summaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/broadcom/bnx2x
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-11 08:27:49 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-11 08:27:49 +0000
commitace9429bb58fd418f0c81d4c2835699bddf6bde6 (patch)
treeb2d64bc10158fdd5497876388cd68142ca374ed3 /drivers/net/ethernet/broadcom/bnx2x
parentInitial commit. (diff)
downloadlinux-ace9429bb58fd418f0c81d4c2835699bddf6bde6.tar.xz
linux-ace9429bb58fd418f0c81d4c2835699bddf6bde6.zip
Adding upstream version 6.6.15.upstream/6.6.15
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/net/ethernet/broadcom/bnx2x')
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/Makefile9
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h2537
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c5141
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h1403
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c2584
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h207
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_dump.h2218
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c3738
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h400
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_file_hdr.h40
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h6048
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h787
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h938
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c14065
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h547
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c15451
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_mfw_req.h170
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h7776
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_self_test.c3183
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c6563
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h1546
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c3201
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h633
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c2007
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h566
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c2314
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h473
27 files changed, 84545 insertions, 0 deletions
diff --git a/drivers/net/ethernet/broadcom/bnx2x/Makefile b/drivers/net/ethernet/broadcom/bnx2x/Makefile
new file mode 100644
index 0000000000..2523cfc752
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnx2x/Makefile
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Makefile for Broadcom 10-Gigabit ethernet driver
+#
+
+obj-$(CONFIG_BNX2X) += bnx2x.o
+
+bnx2x-y := bnx2x_main.o bnx2x_link.o bnx2x_cmn.o bnx2x_ethtool.o bnx2x_stats.o bnx2x_dcb.o bnx2x_sp.o bnx2x_self_test.o
+bnx2x-$(CONFIG_BNX2X_SRIOV) += bnx2x_vfpf.o bnx2x_sriov.o
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
new file mode 100644
index 0000000000..e2a4e1088b
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -0,0 +1,2537 @@
+/* bnx2x.h: QLogic Everest network driver.
+ *
+ * Copyright (c) 2007-2013 Broadcom Corporation
+ * Copyright (c) 2014 QLogic Corporation
+ * All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
+ * Written by: Eliezer Tamir
+ * Based on code from Michael Chan's bnx2 driver
+ */
+
+#ifndef BNX2X_H
+#define BNX2X_H
+
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/dma-mapping.h>
+#include <linux/types.h>
+#include <linux/pci_regs.h>
+
+#include <linux/ptp_clock_kernel.h>
+#include <linux/net_tstamp.h>
+#include <linux/timecounter.h>
+
+/* compilation time flags */
+
+/* define this to make the driver freeze on error to allow getting debug info
+ * (you will need to reboot afterwards) */
+/* #define BNX2X_STOP_ON_ERROR */
+
+/* FIXME: Delete the DRV_MODULE_VERSION below, but please be warned
+ * that it is not an easy task because such change has all chances
+ * to break this driver due to amount of abuse of in-kernel interfaces
+ * between modules and FW.
+ *
+ * DO NOT UPDATE DRV_MODULE_VERSION below.
+ */
+#define DRV_MODULE_VERSION "1.713.36-0"
+#define BNX2X_BC_VER 0x040200
+
+#if defined(CONFIG_DCB)
+#define BCM_DCBNL
+#endif
+
+#include "bnx2x_hsi.h"
+
+#include "../cnic_if.h"
+
+#define BNX2X_MIN_MSIX_VEC_CNT(bp) ((bp)->min_msix_vec_cnt)
+
+#include <linux/mdio.h>
+
+#include "bnx2x_reg.h"
+#include "bnx2x_fw_defs.h"
+#include "bnx2x_mfw_req.h"
+#include "bnx2x_link.h"
+#include "bnx2x_sp.h"
+#include "bnx2x_dcb.h"
+#include "bnx2x_stats.h"
+#include "bnx2x_vfpf.h"
+
+enum bnx2x_int_mode {
+ BNX2X_INT_MODE_MSIX,
+ BNX2X_INT_MODE_INTX,
+ BNX2X_INT_MODE_MSI
+};
+
+/* error/debug prints */
+
+#define DRV_MODULE_NAME "bnx2x"
+
+/* for messages that are currently off */
+#define BNX2X_MSG_OFF 0x0
+#define BNX2X_MSG_MCP 0x0010000 /* was: NETIF_MSG_HW */
+#define BNX2X_MSG_STATS 0x0020000 /* was: NETIF_MSG_TIMER */
+#define BNX2X_MSG_NVM 0x0040000 /* was: NETIF_MSG_HW */
+#define BNX2X_MSG_DMAE 0x0080000 /* was: NETIF_MSG_HW */
+#define BNX2X_MSG_SP 0x0100000 /* was: NETIF_MSG_INTR */
+#define BNX2X_MSG_FP 0x0200000 /* was: NETIF_MSG_INTR */
+#define BNX2X_MSG_IOV 0x0800000
+#define BNX2X_MSG_PTP 0x1000000
+#define BNX2X_MSG_IDLE 0x2000000 /* used for idle check*/
+#define BNX2X_MSG_ETHTOOL 0x4000000
+#define BNX2X_MSG_DCB 0x8000000
+
+/* regular debug print */
+#define DP_INNER(fmt, ...) \
+ pr_notice("[%s:%d(%s)]" fmt, \
+ __func__, __LINE__, \
+ bp->dev ? (bp->dev->name) : "?", \
+ ##__VA_ARGS__);
+
+#define DP(__mask, fmt, ...) \
+do { \
+ if (unlikely(bp->msg_enable & (__mask))) \
+ DP_INNER(fmt, ##__VA_ARGS__); \
+} while (0)
+
+#define DP_AND(__mask, fmt, ...) \
+do { \
+ if (unlikely((bp->msg_enable & (__mask)) == __mask)) \
+ DP_INNER(fmt, ##__VA_ARGS__); \
+} while (0)
+
+#define DP_CONT(__mask, fmt, ...) \
+do { \
+ if (unlikely(bp->msg_enable & (__mask))) \
+ pr_cont(fmt, ##__VA_ARGS__); \
+} while (0)
+
+/* errors debug print */
+#define BNX2X_DBG_ERR(fmt, ...) \
+do { \
+ if (unlikely(netif_msg_probe(bp))) \
+ pr_err("[%s:%d(%s)]" fmt, \
+ __func__, __LINE__, \
+ bp->dev ? (bp->dev->name) : "?", \
+ ##__VA_ARGS__); \
+} while (0)
+
+/* for errors (never masked) */
+#define BNX2X_ERR(fmt, ...) \
+do { \
+ pr_err("[%s:%d(%s)]" fmt, \
+ __func__, __LINE__, \
+ bp->dev ? (bp->dev->name) : "?", \
+ ##__VA_ARGS__); \
+} while (0)
+
+#define BNX2X_ERROR(fmt, ...) \
+ pr_err("[%s:%d]" fmt, __func__, __LINE__, ##__VA_ARGS__)
+
+/* before we have a dev->name use dev_info() */
+#define BNX2X_DEV_INFO(fmt, ...) \
+do { \
+ if (unlikely(netif_msg_probe(bp))) \
+ dev_info(&bp->pdev->dev, fmt, ##__VA_ARGS__); \
+} while (0)
+
+/* Error handling */
+void bnx2x_panic_dump(struct bnx2x *bp, bool disable_int);
+#ifdef BNX2X_STOP_ON_ERROR
+#define bnx2x_panic() \
+do { \
+ bp->panic = 1; \
+ BNX2X_ERR("driver assert\n"); \
+ bnx2x_panic_dump(bp, true); \
+} while (0)
+#else
+#define bnx2x_panic() \
+do { \
+ bp->panic = 1; \
+ BNX2X_ERR("driver assert\n"); \
+ bnx2x_panic_dump(bp, false); \
+} while (0)
+#endif
+
+#define bnx2x_mc_addr(ha) ((ha)->addr)
+#define bnx2x_uc_addr(ha) ((ha)->addr)
+
+#define U64_LO(x) ((u32)(((u64)(x)) & 0xffffffff))
+#define U64_HI(x) ((u32)(((u64)(x)) >> 32))
+#define HILO_U64(hi, lo) ((((u64)(hi)) << 32) + (lo))
+
+#define REG_ADDR(bp, offset) ((bp->regview) + (offset))
+
+#define REG_RD(bp, offset) readl(REG_ADDR(bp, offset))
+#define REG_RD8(bp, offset) readb(REG_ADDR(bp, offset))
+#define REG_RD16(bp, offset) readw(REG_ADDR(bp, offset))
+
+#define REG_WR_RELAXED(bp, offset, val) \
+ writel_relaxed((u32)val, REG_ADDR(bp, offset))
+
+#define REG_WR16_RELAXED(bp, offset, val) \
+ writew_relaxed((u16)val, REG_ADDR(bp, offset))
+
+#define REG_WR(bp, offset, val) writel((u32)val, REG_ADDR(bp, offset))
+#define REG_WR8(bp, offset, val) writeb((u8)val, REG_ADDR(bp, offset))
+#define REG_WR16(bp, offset, val) writew((u16)val, REG_ADDR(bp, offset))
+
+#define REG_RD_IND(bp, offset) bnx2x_reg_rd_ind(bp, offset)
+#define REG_WR_IND(bp, offset, val) bnx2x_reg_wr_ind(bp, offset, val)
+
+#define REG_RD_DMAE(bp, offset, valp, len32) \
+ do { \
+ bnx2x_read_dmae(bp, offset, len32);\
+ memcpy(valp, bnx2x_sp(bp, wb_data[0]), (len32) * 4); \
+ } while (0)
+
+#define REG_WR_DMAE(bp, offset, valp, len32) \
+ do { \
+ memcpy(bnx2x_sp(bp, wb_data[0]), valp, (len32) * 4); \
+ bnx2x_write_dmae(bp, bnx2x_sp_mapping(bp, wb_data), \
+ offset, len32); \
+ } while (0)
+
+#define REG_WR_DMAE_LEN(bp, offset, valp, len32) \
+ REG_WR_DMAE(bp, offset, valp, len32)
+
+#define VIRT_WR_DMAE_LEN(bp, data, addr, len32, le32_swap) \
+ do { \
+ memcpy(GUNZIP_BUF(bp), data, (len32) * 4); \
+ bnx2x_write_big_buf_wb(bp, addr, len32); \
+ } while (0)
+
+#define SHMEM_ADDR(bp, field) (bp->common.shmem_base + \
+ offsetof(struct shmem_region, field))
+#define SHMEM_RD(bp, field) REG_RD(bp, SHMEM_ADDR(bp, field))
+#define SHMEM_WR(bp, field, val) REG_WR(bp, SHMEM_ADDR(bp, field), val)
+
+#define SHMEM2_ADDR(bp, field) (bp->common.shmem2_base + \
+ offsetof(struct shmem2_region, field))
+#define SHMEM2_RD(bp, field) REG_RD(bp, SHMEM2_ADDR(bp, field))
+#define SHMEM2_WR(bp, field, val) REG_WR(bp, SHMEM2_ADDR(bp, field), val)
+#define MF_CFG_ADDR(bp, field) (bp->common.mf_cfg_base + \
+ offsetof(struct mf_cfg, field))
+#define MF2_CFG_ADDR(bp, field) (bp->common.mf2_cfg_base + \
+ offsetof(struct mf2_cfg, field))
+
+#define MF_CFG_RD(bp, field) REG_RD(bp, MF_CFG_ADDR(bp, field))
+#define MF_CFG_WR(bp, field, val) REG_WR(bp,\
+ MF_CFG_ADDR(bp, field), (val))
+#define MF2_CFG_RD(bp, field) REG_RD(bp, MF2_CFG_ADDR(bp, field))
+
+#define SHMEM2_HAS(bp, field) ((bp)->common.shmem2_base && \
+ (SHMEM2_RD((bp), size) > \
+ offsetof(struct shmem2_region, field)))
+
+#define EMAC_RD(bp, reg) REG_RD(bp, emac_base + reg)
+#define EMAC_WR(bp, reg, val) REG_WR(bp, emac_base + reg, val)
+
+/* SP SB indices */
+
+/* General SP events - stats query, cfc delete, etc */
+#define HC_SP_INDEX_ETH_DEF_CONS 3
+
+/* EQ completions */
+#define HC_SP_INDEX_EQ_CONS 7
+
+/* FCoE L2 connection completions */
+#define HC_SP_INDEX_ETH_FCOE_TX_CQ_CONS 6
+#define HC_SP_INDEX_ETH_FCOE_RX_CQ_CONS 4
+/* iSCSI L2 */
+#define HC_SP_INDEX_ETH_ISCSI_CQ_CONS 5
+#define HC_SP_INDEX_ETH_ISCSI_RX_CQ_CONS 1
+
+/* Special clients parameters */
+
+/* SB indices */
+/* FCoE L2 */
+#define BNX2X_FCOE_L2_RX_INDEX \
+ (&bp->def_status_blk->sp_sb.\
+ index_values[HC_SP_INDEX_ETH_FCOE_RX_CQ_CONS])
+
+#define BNX2X_FCOE_L2_TX_INDEX \
+ (&bp->def_status_blk->sp_sb.\
+ index_values[HC_SP_INDEX_ETH_FCOE_TX_CQ_CONS])
+
+/**
+ * CIDs and CLIDs:
+ * CLIDs below is a CLID for func 0, then the CLID for other
+ * functions will be calculated by the formula:
+ *
+ * FUNC_N_CLID_X = N * NUM_SPECIAL_CLIENTS + FUNC_0_CLID_X
+ *
+ */
+enum {
+ BNX2X_ISCSI_ETH_CL_ID_IDX,
+ BNX2X_FCOE_ETH_CL_ID_IDX,
+ BNX2X_MAX_CNIC_ETH_CL_ID_IDX,
+};
+
+/* use a value high enough to be above all the PFs, which has least significant
+ * nibble as 8, so when cnic needs to come up with a CID for UIO to use to
+ * calculate doorbell address according to old doorbell configuration scheme
+ * (db_msg_sz 1 << 7 * cid + 0x40 DPM offset) it can come up with a valid number
+ * We must avoid coming up with cid 8 for iscsi since according to this method
+ * the designated UIO cid will come out 0 and it has a special handling for that
+ * case which doesn't suit us. Therefore will will cieling to closes cid which
+ * has least signigifcant nibble 8 and if it is 8 we will move forward to 0x18.
+ */
+
+#define BNX2X_1st_NON_L2_ETH_CID(bp) (BNX2X_NUM_NON_CNIC_QUEUES(bp) * \
+ (bp)->max_cos)
+/* amount of cids traversed by UIO's DPM addition to doorbell */
+#define UIO_DPM 8
+/* roundup to DPM offset */
+#define UIO_ROUNDUP(bp) (roundup(BNX2X_1st_NON_L2_ETH_CID(bp), \
+ UIO_DPM))
+/* offset to nearest value which has lsb nibble matching DPM */
+#define UIO_CID_OFFSET(bp) ((UIO_ROUNDUP(bp) + UIO_DPM) % \
+ (UIO_DPM * 2))
+/* add offset to rounded-up cid to get a value which could be used with UIO */
+#define UIO_DPM_ALIGN(bp) (UIO_ROUNDUP(bp) + UIO_CID_OFFSET(bp))
+/* but wait - avoid UIO special case for cid 0 */
+#define UIO_DPM_CID0_OFFSET(bp) ((UIO_DPM * 2) * \
+ (UIO_DPM_ALIGN(bp) == UIO_DPM))
+/* Properly DPM aligned CID dajusted to cid 0 secal case */
+#define BNX2X_CNIC_START_ETH_CID(bp) (UIO_DPM_ALIGN(bp) + \
+ (UIO_DPM_CID0_OFFSET(bp)))
+/* how many cids were wasted - need this value for cid allocation */
+#define UIO_CID_PAD(bp) (BNX2X_CNIC_START_ETH_CID(bp) - \
+ BNX2X_1st_NON_L2_ETH_CID(bp))
+ /* iSCSI L2 */
+#define BNX2X_ISCSI_ETH_CID(bp) (BNX2X_CNIC_START_ETH_CID(bp))
+ /* FCoE L2 */
+#define BNX2X_FCOE_ETH_CID(bp) (BNX2X_CNIC_START_ETH_CID(bp) + 1)
+
+#define CNIC_SUPPORT(bp) ((bp)->cnic_support)
+#define CNIC_ENABLED(bp) ((bp)->cnic_enabled)
+#define CNIC_LOADED(bp) ((bp)->cnic_loaded)
+#define FCOE_INIT(bp) ((bp)->fcoe_init)
+
+#define AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR \
+ AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR
+
+#define SM_RX_ID 0
+#define SM_TX_ID 1
+
+/* defines for multiple tx priority indices */
+#define FIRST_TX_ONLY_COS_INDEX 1
+#define FIRST_TX_COS_INDEX 0
+
+/* rules for calculating the cids of tx-only connections */
+#define CID_TO_FP(cid, bp) ((cid) % BNX2X_NUM_NON_CNIC_QUEUES(bp))
+#define CID_COS_TO_TX_ONLY_CID(cid, cos, bp) \
+ (cid + cos * BNX2X_NUM_NON_CNIC_QUEUES(bp))
+
+/* fp index inside class of service range */
+#define FP_COS_TO_TXQ(fp, cos, bp) \
+ ((fp)->index + cos * BNX2X_NUM_NON_CNIC_QUEUES(bp))
+
+/* Indexes for transmission queues array:
+ * txdata for RSS i CoS j is at location i + (j * num of RSS)
+ * txdata for FCoE (if exist) is at location max cos * num of RSS
+ * txdata for FWD (if exist) is one location after FCoE
+ * txdata for OOO (if exist) is one location after FWD
+ */
+enum {
+ FCOE_TXQ_IDX_OFFSET,
+ FWD_TXQ_IDX_OFFSET,
+ OOO_TXQ_IDX_OFFSET,
+};
+#define MAX_ETH_TXQ_IDX(bp) (BNX2X_NUM_NON_CNIC_QUEUES(bp) * (bp)->max_cos)
+#define FCOE_TXQ_IDX(bp) (MAX_ETH_TXQ_IDX(bp) + FCOE_TXQ_IDX_OFFSET)
+
+/* fast path */
+/*
+ * This driver uses new build_skb() API :
+ * RX ring buffer contains pointer to kmalloc() data only,
+ * skb are built only after Hardware filled the frame.
+ */
+struct sw_rx_bd {
+ u8 *data;
+ DEFINE_DMA_UNMAP_ADDR(mapping);
+};
+
+struct sw_tx_bd {
+ struct sk_buff *skb;
+ u16 first_bd;
+ u8 flags;
+/* Set on the first BD descriptor when there is a split BD */
+#define BNX2X_TSO_SPLIT_BD (1<<0)
+#define BNX2X_HAS_SECOND_PBD (1<<1)
+};
+
+struct sw_rx_page {
+ struct page *page;
+ DEFINE_DMA_UNMAP_ADDR(mapping);
+ unsigned int offset;
+};
+
+union db_prod {
+ struct doorbell_set_prod data;
+ u32 raw;
+};
+
+/* dropless fc FW/HW related params */
+#define BRB_SIZE(bp) (CHIP_IS_E3(bp) ? 1024 : 512)
+#define MAX_AGG_QS(bp) (CHIP_IS_E1(bp) ? \
+ ETH_MAX_AGGREGATION_QUEUES_E1 :\
+ ETH_MAX_AGGREGATION_QUEUES_E1H_E2)
+#define FW_DROP_LEVEL(bp) (3 + MAX_SPQ_PENDING + MAX_AGG_QS(bp))
+#define FW_PREFETCH_CNT 16
+#define DROPLESS_FC_HEADROOM 100
+
+/* MC hsi */
+#define BCM_PAGE_SHIFT 12
+#define BCM_PAGE_SIZE (1 << BCM_PAGE_SHIFT)
+#define BCM_PAGE_MASK (~(BCM_PAGE_SIZE - 1))
+#define BCM_PAGE_ALIGN(addr) (((addr) + BCM_PAGE_SIZE - 1) & BCM_PAGE_MASK)
+
+#define PAGES_PER_SGE_SHIFT 0
+#define PAGES_PER_SGE (1 << PAGES_PER_SGE_SHIFT)
+#define SGE_PAGE_SHIFT 12
+#define SGE_PAGE_SIZE (1 << SGE_PAGE_SHIFT)
+#define SGE_PAGE_MASK (~(SGE_PAGE_SIZE - 1))
+#define SGE_PAGE_ALIGN(addr) (((addr) + SGE_PAGE_SIZE - 1) & SGE_PAGE_MASK)
+#define SGE_PAGES (SGE_PAGE_SIZE * PAGES_PER_SGE)
+#define TPA_AGG_SIZE min_t(u32, (min_t(u32, 8, MAX_SKB_FRAGS) * \
+ SGE_PAGES), 0xffff)
+
+/* SGE ring related macros */
+#define NUM_RX_SGE_PAGES 2
+#define RX_SGE_CNT (BCM_PAGE_SIZE / sizeof(struct eth_rx_sge))
+#define NEXT_PAGE_SGE_DESC_CNT 2
+#define MAX_RX_SGE_CNT (RX_SGE_CNT - NEXT_PAGE_SGE_DESC_CNT)
+/* RX_SGE_CNT is promised to be a power of 2 */
+#define RX_SGE_MASK (RX_SGE_CNT - 1)
+#define NUM_RX_SGE (RX_SGE_CNT * NUM_RX_SGE_PAGES)
+#define MAX_RX_SGE (NUM_RX_SGE - 1)
+#define NEXT_SGE_IDX(x) ((((x) & RX_SGE_MASK) == \
+ (MAX_RX_SGE_CNT - 1)) ? \
+ (x) + 1 + NEXT_PAGE_SGE_DESC_CNT : \
+ (x) + 1)
+#define RX_SGE(x) ((x) & MAX_RX_SGE)
+
+/*
+ * Number of required SGEs is the sum of two:
+ * 1. Number of possible opened aggregations (next packet for
+ * these aggregations will probably consume SGE immediately)
+ * 2. Rest of BRB blocks divided by 2 (block will consume new SGE only
+ * after placement on BD for new TPA aggregation)
+ *
+ * Takes into account NEXT_PAGE_SGE_DESC_CNT "next" elements on each page
+ */
+#define NUM_SGE_REQ (MAX_AGG_QS(bp) + \
+ (BRB_SIZE(bp) - MAX_AGG_QS(bp)) / 2)
+#define NUM_SGE_PG_REQ ((NUM_SGE_REQ + MAX_RX_SGE_CNT - 1) / \
+ MAX_RX_SGE_CNT)
+#define SGE_TH_LO(bp) (NUM_SGE_REQ + \
+ NUM_SGE_PG_REQ * NEXT_PAGE_SGE_DESC_CNT)
+#define SGE_TH_HI(bp) (SGE_TH_LO(bp) + DROPLESS_FC_HEADROOM)
+
+/* Manipulate a bit vector defined as an array of u64 */
+
+/* Number of bits in one sge_mask array element */
+#define BIT_VEC64_ELEM_SZ 64
+#define BIT_VEC64_ELEM_SHIFT 6
+#define BIT_VEC64_ELEM_MASK ((u64)BIT_VEC64_ELEM_SZ - 1)
+
+#define __BIT_VEC64_SET_BIT(el, bit) \
+ do { \
+ el = ((el) | ((u64)0x1 << (bit))); \
+ } while (0)
+
+#define __BIT_VEC64_CLEAR_BIT(el, bit) \
+ do { \
+ el = ((el) & (~((u64)0x1 << (bit)))); \
+ } while (0)
+
+#define BIT_VEC64_SET_BIT(vec64, idx) \
+ __BIT_VEC64_SET_BIT((vec64)[(idx) >> BIT_VEC64_ELEM_SHIFT], \
+ (idx) & BIT_VEC64_ELEM_MASK)
+
+#define BIT_VEC64_CLEAR_BIT(vec64, idx) \
+ __BIT_VEC64_CLEAR_BIT((vec64)[(idx) >> BIT_VEC64_ELEM_SHIFT], \
+ (idx) & BIT_VEC64_ELEM_MASK)
+
+#define BIT_VEC64_TEST_BIT(vec64, idx) \
+ (((vec64)[(idx) >> BIT_VEC64_ELEM_SHIFT] >> \
+ ((idx) & BIT_VEC64_ELEM_MASK)) & 0x1)
+
+/* Creates a bitmask of all ones in less significant bits.
+ idx - index of the most significant bit in the created mask */
+#define BIT_VEC64_ONES_MASK(idx) \
+ (((u64)0x1 << (((idx) & BIT_VEC64_ELEM_MASK) + 1)) - 1)
+#define BIT_VEC64_ELEM_ONE_MASK ((u64)(~0))
+
+/*******************************************************/
+
+/* Number of u64 elements in SGE mask array */
+#define RX_SGE_MASK_LEN (NUM_RX_SGE / BIT_VEC64_ELEM_SZ)
+#define RX_SGE_MASK_LEN_MASK (RX_SGE_MASK_LEN - 1)
+#define NEXT_SGE_MASK_ELEM(el) (((el) + 1) & RX_SGE_MASK_LEN_MASK)
+
+union host_hc_status_block {
+ /* pointer to fp status block e1x */
+ struct host_hc_status_block_e1x *e1x_sb;
+ /* pointer to fp status block e2 */
+ struct host_hc_status_block_e2 *e2_sb;
+};
+
+struct bnx2x_agg_info {
+ /*
+ * First aggregation buffer is a data buffer, the following - are pages.
+ * We will preallocate the data buffer for each aggregation when
+ * we open the interface and will replace the BD at the consumer
+ * with this one when we receive the TPA_START CQE in order to
+ * keep the Rx BD ring consistent.
+ */
+ struct sw_rx_bd first_buf;
+ u8 tpa_state;
+#define BNX2X_TPA_START 1
+#define BNX2X_TPA_STOP 2
+#define BNX2X_TPA_ERROR 3
+ u8 placement_offset;
+ u16 parsing_flags;
+ u16 vlan_tag;
+ u16 len_on_bd;
+ u32 rxhash;
+ enum pkt_hash_types rxhash_type;
+ u16 gro_size;
+ u16 full_page;
+};
+
+#define Q_STATS_OFFSET32(stat_name) \
+ (offsetof(struct bnx2x_eth_q_stats, stat_name) / 4)
+
+struct bnx2x_fp_txdata {
+
+ struct sw_tx_bd *tx_buf_ring;
+
+ union eth_tx_bd_types *tx_desc_ring;
+ dma_addr_t tx_desc_mapping;
+
+ u32 cid;
+
+ union db_prod tx_db;
+
+ u16 tx_pkt_prod;
+ u16 tx_pkt_cons;
+ u16 tx_bd_prod;
+ u16 tx_bd_cons;
+
+ unsigned long tx_pkt;
+
+ __le16 *tx_cons_sb;
+
+ int txq_index;
+ struct bnx2x_fastpath *parent_fp;
+ int tx_ring_size;
+};
+
+enum bnx2x_tpa_mode_t {
+ TPA_MODE_DISABLED,
+ TPA_MODE_LRO,
+ TPA_MODE_GRO
+};
+
+struct bnx2x_alloc_pool {
+ struct page *page;
+ unsigned int offset;
+};
+
+struct bnx2x_fastpath {
+ struct bnx2x *bp; /* parent */
+
+ struct napi_struct napi;
+
+ union host_hc_status_block status_blk;
+ /* chip independent shortcuts into sb structure */
+ __le16 *sb_index_values;
+ __le16 *sb_running_index;
+ /* chip independent shortcut into rx_prods_offset memory */
+ u32 ustorm_rx_prods_offset;
+
+ u32 rx_buf_size;
+ u32 rx_frag_size; /* 0 if kmalloced(), or rx_buf_size + NET_SKB_PAD */
+ dma_addr_t status_blk_mapping;
+
+ enum bnx2x_tpa_mode_t mode;
+
+ u8 max_cos; /* actual number of active tx coses */
+ struct bnx2x_fp_txdata *txdata_ptr[BNX2X_MULTI_TX_COS];
+
+ struct sw_rx_bd *rx_buf_ring; /* BDs mappings ring */
+ struct sw_rx_page *rx_page_ring; /* SGE pages mappings ring */
+
+ struct eth_rx_bd *rx_desc_ring;
+ dma_addr_t rx_desc_mapping;
+
+ union eth_rx_cqe *rx_comp_ring;
+ dma_addr_t rx_comp_mapping;
+
+ /* SGE ring */
+ struct eth_rx_sge *rx_sge_ring;
+ dma_addr_t rx_sge_mapping;
+
+ u64 sge_mask[RX_SGE_MASK_LEN];
+
+ u32 cid;
+
+ __le16 fp_hc_idx;
+
+ u8 index; /* number in fp array */
+ u8 rx_queue; /* index for skb_record */
+ u8 cl_id; /* eth client id */
+ u8 cl_qzone_id;
+ u8 fw_sb_id; /* status block number in FW */
+ u8 igu_sb_id; /* status block number in HW */
+
+ u16 rx_bd_prod;
+ u16 rx_bd_cons;
+ u16 rx_comp_prod;
+ u16 rx_comp_cons;
+ u16 rx_sge_prod;
+ /* The last maximal completed SGE */
+ u16 last_max_sge;
+ __le16 *rx_cons_sb;
+
+ /* TPA related */
+ struct bnx2x_agg_info *tpa_info;
+#ifdef BNX2X_STOP_ON_ERROR
+ u64 tpa_queue_used;
+#endif
+ /* The size is calculated using the following:
+ sizeof name field from netdev structure +
+ 4 ('-Xx-' string) +
+ 4 (for the digits and to make it DWORD aligned) */
+#define FP_NAME_SIZE (sizeof(((struct net_device *)0)->name) + 8)
+ char name[FP_NAME_SIZE];
+
+ struct bnx2x_alloc_pool page_pool;
+};
+
+#define bnx2x_fp(bp, nr, var) ((bp)->fp[(nr)].var)
+#define bnx2x_sp_obj(bp, fp) ((bp)->sp_objs[(fp)->index])
+#define bnx2x_fp_stats(bp, fp) (&((bp)->fp_stats[(fp)->index]))
+#define bnx2x_fp_qstats(bp, fp) (&((bp)->fp_stats[(fp)->index].eth_q_stats))
+
+/* Use 2500 as a mini-jumbo MTU for FCoE */
+#define BNX2X_FCOE_MINI_JUMBO_MTU 2500
+
+#define FCOE_IDX_OFFSET 0
+
+#define FCOE_IDX(bp) (BNX2X_NUM_NON_CNIC_QUEUES(bp) + \
+ FCOE_IDX_OFFSET)
+#define bnx2x_fcoe_fp(bp) (&bp->fp[FCOE_IDX(bp)])
+#define bnx2x_fcoe(bp, var) (bnx2x_fcoe_fp(bp)->var)
+#define bnx2x_fcoe_inner_sp_obj(bp) (&bp->sp_objs[FCOE_IDX(bp)])
+#define bnx2x_fcoe_sp_obj(bp, var) (bnx2x_fcoe_inner_sp_obj(bp)->var)
+#define bnx2x_fcoe_tx(bp, var) (bnx2x_fcoe_fp(bp)-> \
+ txdata_ptr[FIRST_TX_COS_INDEX] \
+ ->var)
+
+#define IS_ETH_FP(fp) ((fp)->index < BNX2X_NUM_ETH_QUEUES((fp)->bp))
+#define IS_FCOE_FP(fp) ((fp)->index == FCOE_IDX((fp)->bp))
+#define IS_FCOE_IDX(idx) ((idx) == FCOE_IDX(bp))
+
+/* MC hsi */
+#define MAX_FETCH_BD 13 /* HW max BDs per packet */
+#define RX_COPY_THRESH 92
+
+#define NUM_TX_RINGS 16
+#define TX_DESC_CNT (BCM_PAGE_SIZE / sizeof(union eth_tx_bd_types))
+#define NEXT_PAGE_TX_DESC_CNT 1
+#define MAX_TX_DESC_CNT (TX_DESC_CNT - NEXT_PAGE_TX_DESC_CNT)
+#define NUM_TX_BD (TX_DESC_CNT * NUM_TX_RINGS)
+#define MAX_TX_BD (NUM_TX_BD - 1)
+#define MAX_TX_AVAIL (MAX_TX_DESC_CNT * NUM_TX_RINGS - 2)
+#define NEXT_TX_IDX(x) ((((x) & MAX_TX_DESC_CNT) == \
+ (MAX_TX_DESC_CNT - 1)) ? \
+ (x) + 1 + NEXT_PAGE_TX_DESC_CNT : \
+ (x) + 1)
+#define TX_BD(x) ((x) & MAX_TX_BD)
+#define TX_BD_POFF(x) ((x) & MAX_TX_DESC_CNT)
+
+/* number of NEXT_PAGE descriptors may be required during placement */
+#define NEXT_CNT_PER_TX_PKT(bds) \
+ (((bds) + MAX_TX_DESC_CNT - 1) / \
+ MAX_TX_DESC_CNT * NEXT_PAGE_TX_DESC_CNT)
+/* max BDs per tx packet w/o next_pages:
+ * START_BD - describes packed
+ * START_BD(splitted) - includes unpaged data segment for GSO
+ * PARSING_BD - for TSO and CSUM data
+ * PARSING_BD2 - for encapsulation data
+ * Frag BDs - describes pages for frags
+ */
+#define BDS_PER_TX_PKT 4
+#define MAX_BDS_PER_TX_PKT (MAX_SKB_FRAGS + BDS_PER_TX_PKT)
+/* max BDs per tx packet including next pages */
+#define MAX_DESC_PER_TX_PKT (MAX_BDS_PER_TX_PKT + \
+ NEXT_CNT_PER_TX_PKT(MAX_BDS_PER_TX_PKT))
+
+/* The RX BD ring is special, each bd is 8 bytes but the last one is 16 */
+#define NUM_RX_RINGS 8
+#define RX_DESC_CNT (BCM_PAGE_SIZE / sizeof(struct eth_rx_bd))
+#define NEXT_PAGE_RX_DESC_CNT 2
+#define MAX_RX_DESC_CNT (RX_DESC_CNT - NEXT_PAGE_RX_DESC_CNT)
+#define RX_DESC_MASK (RX_DESC_CNT - 1)
+#define NUM_RX_BD (RX_DESC_CNT * NUM_RX_RINGS)
+#define MAX_RX_BD (NUM_RX_BD - 1)
+#define MAX_RX_AVAIL (MAX_RX_DESC_CNT * NUM_RX_RINGS - 2)
+
+/* dropless fc calculations for BDs
+ *
+ * Number of BDs should as number of buffers in BRB:
+ * Low threshold takes into account NEXT_PAGE_RX_DESC_CNT
+ * "next" elements on each page
+ */
+#define NUM_BD_REQ BRB_SIZE(bp)
+#define NUM_BD_PG_REQ ((NUM_BD_REQ + MAX_RX_DESC_CNT - 1) / \
+ MAX_RX_DESC_CNT)
+#define BD_TH_LO(bp) (NUM_BD_REQ + \
+ NUM_BD_PG_REQ * NEXT_PAGE_RX_DESC_CNT + \
+ FW_DROP_LEVEL(bp))
+#define BD_TH_HI(bp) (BD_TH_LO(bp) + DROPLESS_FC_HEADROOM)
+
+#define MIN_RX_AVAIL ((bp)->dropless_fc ? BD_TH_HI(bp) + 128 : 128)
+
+#define MIN_RX_SIZE_TPA_HW (CHIP_IS_E1(bp) ? \
+ ETH_MIN_RX_CQES_WITH_TPA_E1 : \
+ ETH_MIN_RX_CQES_WITH_TPA_E1H_E2)
+#define MIN_RX_SIZE_NONTPA_HW ETH_MIN_RX_CQES_WITHOUT_TPA
+#define MIN_RX_SIZE_TPA (max_t(u32, MIN_RX_SIZE_TPA_HW, MIN_RX_AVAIL))
+#define MIN_RX_SIZE_NONTPA (max_t(u32, MIN_RX_SIZE_NONTPA_HW,\
+ MIN_RX_AVAIL))
+
+#define NEXT_RX_IDX(x) ((((x) & RX_DESC_MASK) == \
+ (MAX_RX_DESC_CNT - 1)) ? \
+ (x) + 1 + NEXT_PAGE_RX_DESC_CNT : \
+ (x) + 1)
+#define RX_BD(x) ((x) & MAX_RX_BD)
+
+/*
+ * As long as CQE is X times bigger than BD entry we have to allocate X times
+ * more pages for CQ ring in order to keep it balanced with BD ring
+ */
+#define CQE_BD_REL (sizeof(union eth_rx_cqe) / sizeof(struct eth_rx_bd))
+#define NUM_RCQ_RINGS (NUM_RX_RINGS * CQE_BD_REL)
+#define RCQ_DESC_CNT (BCM_PAGE_SIZE / sizeof(union eth_rx_cqe))
+#define NEXT_PAGE_RCQ_DESC_CNT 1
+#define MAX_RCQ_DESC_CNT (RCQ_DESC_CNT - NEXT_PAGE_RCQ_DESC_CNT)
+#define NUM_RCQ_BD (RCQ_DESC_CNT * NUM_RCQ_RINGS)
+#define MAX_RCQ_BD (NUM_RCQ_BD - 1)
+#define MAX_RCQ_AVAIL (MAX_RCQ_DESC_CNT * NUM_RCQ_RINGS - 2)
+#define NEXT_RCQ_IDX(x) ((((x) & MAX_RCQ_DESC_CNT) == \
+ (MAX_RCQ_DESC_CNT - 1)) ? \
+ (x) + 1 + NEXT_PAGE_RCQ_DESC_CNT : \
+ (x) + 1)
+#define RCQ_BD(x) ((x) & MAX_RCQ_BD)
+
+/* dropless fc calculations for RCQs
+ *
+ * Number of RCQs should be as number of buffers in BRB:
+ * Low threshold takes into account NEXT_PAGE_RCQ_DESC_CNT
+ * "next" elements on each page
+ */
+#define NUM_RCQ_REQ BRB_SIZE(bp)
+#define NUM_RCQ_PG_REQ ((NUM_BD_REQ + MAX_RCQ_DESC_CNT - 1) / \
+ MAX_RCQ_DESC_CNT)
+#define RCQ_TH_LO(bp) (NUM_RCQ_REQ + \
+ NUM_RCQ_PG_REQ * NEXT_PAGE_RCQ_DESC_CNT + \
+ FW_DROP_LEVEL(bp))
+#define RCQ_TH_HI(bp) (RCQ_TH_LO(bp) + DROPLESS_FC_HEADROOM)
+
+/* This is needed for determining of last_max */
+#define SUB_S16(a, b) (s16)((s16)(a) - (s16)(b))
+#define SUB_S32(a, b) (s32)((s32)(a) - (s32)(b))
+
+#define BNX2X_SWCID_SHIFT 17
+#define BNX2X_SWCID_MASK ((0x1 << BNX2X_SWCID_SHIFT) - 1)
+
+/* used on a CID received from the HW */
+#define SW_CID(x) (le32_to_cpu(x) & BNX2X_SWCID_MASK)
+#define CQE_CMD(x) (le32_to_cpu(x) >> \
+ COMMON_RAMROD_ETH_RX_CQE_CMD_ID_SHIFT)
+
+#define BD_UNMAP_ADDR(bd) HILO_U64(le32_to_cpu((bd)->addr_hi), \
+ le32_to_cpu((bd)->addr_lo))
+#define BD_UNMAP_LEN(bd) (le16_to_cpu((bd)->nbytes))
+
+#define BNX2X_DB_MIN_SHIFT 3 /* 8 bytes */
+#define BNX2X_DB_SHIFT 3 /* 8 bytes*/
+#if (BNX2X_DB_SHIFT < BNX2X_DB_MIN_SHIFT)
+#error "Min DB doorbell stride is 8"
+#endif
+#define DOORBELL_RELAXED(bp, cid, val) \
+ writel_relaxed((u32)(val), (bp)->doorbells + ((bp)->db_size * (cid)))
+
+/* TX CSUM helpers */
+#define SKB_CS_OFF(skb) (offsetof(struct tcphdr, check) - \
+ skb->csum_offset)
+#define SKB_CS(skb) (*(u16 *)(skb_transport_header(skb) + \
+ skb->csum_offset))
+
+#define pbd_tcp_flags(tcp_hdr) (ntohl(tcp_flag_word(tcp_hdr))>>16 & 0xff)
+
+#define XMIT_PLAIN 0
+#define XMIT_CSUM_V4 (1 << 0)
+#define XMIT_CSUM_V6 (1 << 1)
+#define XMIT_CSUM_TCP (1 << 2)
+#define XMIT_GSO_V4 (1 << 3)
+#define XMIT_GSO_V6 (1 << 4)
+#define XMIT_CSUM_ENC_V4 (1 << 5)
+#define XMIT_CSUM_ENC_V6 (1 << 6)
+#define XMIT_GSO_ENC_V4 (1 << 7)
+#define XMIT_GSO_ENC_V6 (1 << 8)
+
+#define XMIT_CSUM_ENC (XMIT_CSUM_ENC_V4 | XMIT_CSUM_ENC_V6)
+#define XMIT_GSO_ENC (XMIT_GSO_ENC_V4 | XMIT_GSO_ENC_V6)
+
+#define XMIT_CSUM (XMIT_CSUM_V4 | XMIT_CSUM_V6 | XMIT_CSUM_ENC)
+#define XMIT_GSO (XMIT_GSO_V4 | XMIT_GSO_V6 | XMIT_GSO_ENC)
+
+/* stuff added to make the code fit 80Col */
+#define CQE_TYPE(cqe_fp_flags) ((cqe_fp_flags) & ETH_FAST_PATH_RX_CQE_TYPE)
+#define CQE_TYPE_START(cqe_type) ((cqe_type) == RX_ETH_CQE_TYPE_ETH_START_AGG)
+#define CQE_TYPE_STOP(cqe_type) ((cqe_type) == RX_ETH_CQE_TYPE_ETH_STOP_AGG)
+#define CQE_TYPE_SLOW(cqe_type) ((cqe_type) == RX_ETH_CQE_TYPE_ETH_RAMROD)
+#define CQE_TYPE_FAST(cqe_type) ((cqe_type) == RX_ETH_CQE_TYPE_ETH_FASTPATH)
+
+#define ETH_RX_ERROR_FALGS ETH_FAST_PATH_RX_CQE_PHY_DECODE_ERR_FLG
+
+#define BNX2X_PRS_FLAG_OVERETH_IPV4(flags) \
+ (((le16_to_cpu(flags) & \
+ PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) >> \
+ PARSING_FLAGS_OVER_ETHERNET_PROTOCOL_SHIFT) \
+ == PRS_FLAG_OVERETH_IPV4)
+#define BNX2X_RX_SUM_FIX(cqe) \
+ BNX2X_PRS_FLAG_OVERETH_IPV4(cqe->fast_path_cqe.pars_flags.flags)
+
+#define FP_USB_FUNC_OFF \
+ offsetof(struct cstorm_status_block_u, func)
+#define FP_CSB_FUNC_OFF \
+ offsetof(struct cstorm_status_block_c, func)
+
+#define HC_INDEX_ETH_RX_CQ_CONS 1
+
+#define HC_INDEX_OOO_TX_CQ_CONS 4
+
+#define HC_INDEX_ETH_TX_CQ_CONS_COS0 5
+
+#define HC_INDEX_ETH_TX_CQ_CONS_COS1 6
+
+#define HC_INDEX_ETH_TX_CQ_CONS_COS2 7
+
+#define HC_INDEX_ETH_FIRST_TX_CQ_CONS HC_INDEX_ETH_TX_CQ_CONS_COS0
+
+#define BNX2X_RX_SB_INDEX \
+ (&fp->sb_index_values[HC_INDEX_ETH_RX_CQ_CONS])
+
+#define BNX2X_TX_SB_INDEX_BASE BNX2X_TX_SB_INDEX_COS0
+
+#define BNX2X_TX_SB_INDEX_COS0 \
+ (&fp->sb_index_values[HC_INDEX_ETH_TX_CQ_CONS_COS0])
+
+/* end of fast path */
+
+/* common */
+
+struct bnx2x_common {
+
+ u32 chip_id;
+/* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
+#define CHIP_ID(bp) (bp->common.chip_id & 0xfffffff0)
+
+#define CHIP_NUM(bp) (bp->common.chip_id >> 16)
+#define CHIP_NUM_57710 0x164e
+#define CHIP_NUM_57711 0x164f
+#define CHIP_NUM_57711E 0x1650
+#define CHIP_NUM_57712 0x1662
+#define CHIP_NUM_57712_MF 0x1663
+#define CHIP_NUM_57712_VF 0x166f
+#define CHIP_NUM_57713 0x1651
+#define CHIP_NUM_57713E 0x1652
+#define CHIP_NUM_57800 0x168a
+#define CHIP_NUM_57800_MF 0x16a5
+#define CHIP_NUM_57800_VF 0x16a9
+#define CHIP_NUM_57810 0x168e
+#define CHIP_NUM_57810_MF 0x16ae
+#define CHIP_NUM_57810_VF 0x16af
+#define CHIP_NUM_57811 0x163d
+#define CHIP_NUM_57811_MF 0x163e
+#define CHIP_NUM_57811_VF 0x163f
+#define CHIP_NUM_57840_OBSOLETE 0x168d
+#define CHIP_NUM_57840_MF_OBSOLETE 0x16ab
+#define CHIP_NUM_57840_4_10 0x16a1
+#define CHIP_NUM_57840_2_20 0x16a2
+#define CHIP_NUM_57840_MF 0x16a4
+#define CHIP_NUM_57840_VF 0x16ad
+#define CHIP_IS_E1(bp) (CHIP_NUM(bp) == CHIP_NUM_57710)
+#define CHIP_IS_57711(bp) (CHIP_NUM(bp) == CHIP_NUM_57711)
+#define CHIP_IS_57711E(bp) (CHIP_NUM(bp) == CHIP_NUM_57711E)
+#define CHIP_IS_57712(bp) (CHIP_NUM(bp) == CHIP_NUM_57712)
+#define CHIP_IS_57712_VF(bp) (CHIP_NUM(bp) == CHIP_NUM_57712_VF)
+#define CHIP_IS_57712_MF(bp) (CHIP_NUM(bp) == CHIP_NUM_57712_MF)
+#define CHIP_IS_57800(bp) (CHIP_NUM(bp) == CHIP_NUM_57800)
+#define CHIP_IS_57800_MF(bp) (CHIP_NUM(bp) == CHIP_NUM_57800_MF)
+#define CHIP_IS_57800_VF(bp) (CHIP_NUM(bp) == CHIP_NUM_57800_VF)
+#define CHIP_IS_57810(bp) (CHIP_NUM(bp) == CHIP_NUM_57810)
+#define CHIP_IS_57810_MF(bp) (CHIP_NUM(bp) == CHIP_NUM_57810_MF)
+#define CHIP_IS_57810_VF(bp) (CHIP_NUM(bp) == CHIP_NUM_57810_VF)
+#define CHIP_IS_57811(bp) (CHIP_NUM(bp) == CHIP_NUM_57811)
+#define CHIP_IS_57811_MF(bp) (CHIP_NUM(bp) == CHIP_NUM_57811_MF)
+#define CHIP_IS_57811_VF(bp) (CHIP_NUM(bp) == CHIP_NUM_57811_VF)
+#define CHIP_IS_57840(bp) \
+ ((CHIP_NUM(bp) == CHIP_NUM_57840_4_10) || \
+ (CHIP_NUM(bp) == CHIP_NUM_57840_2_20) || \
+ (CHIP_NUM(bp) == CHIP_NUM_57840_OBSOLETE))
+#define CHIP_IS_57840_MF(bp) ((CHIP_NUM(bp) == CHIP_NUM_57840_MF) || \
+ (CHIP_NUM(bp) == CHIP_NUM_57840_MF_OBSOLETE))
+#define CHIP_IS_57840_VF(bp) (CHIP_NUM(bp) == CHIP_NUM_57840_VF)
+#define CHIP_IS_E1H(bp) (CHIP_IS_57711(bp) || \
+ CHIP_IS_57711E(bp))
+#define CHIP_IS_57811xx(bp) (CHIP_IS_57811(bp) || \
+ CHIP_IS_57811_MF(bp) || \
+ CHIP_IS_57811_VF(bp))
+#define CHIP_IS_E2(bp) (CHIP_IS_57712(bp) || \
+ CHIP_IS_57712_MF(bp) || \
+ CHIP_IS_57712_VF(bp))
+#define CHIP_IS_E3(bp) (CHIP_IS_57800(bp) || \
+ CHIP_IS_57800_MF(bp) || \
+ CHIP_IS_57800_VF(bp) || \
+ CHIP_IS_57810(bp) || \
+ CHIP_IS_57810_MF(bp) || \
+ CHIP_IS_57810_VF(bp) || \
+ CHIP_IS_57811xx(bp) || \
+ CHIP_IS_57840(bp) || \
+ CHIP_IS_57840_MF(bp) || \
+ CHIP_IS_57840_VF(bp))
+#define CHIP_IS_E1x(bp) (CHIP_IS_E1((bp)) || CHIP_IS_E1H((bp)))
+#define USES_WARPCORE(bp) (CHIP_IS_E3(bp))
+#define IS_E1H_OFFSET (!CHIP_IS_E1(bp))
+
+#define CHIP_REV_SHIFT 12
+#define CHIP_REV_MASK (0xF << CHIP_REV_SHIFT)
+#define CHIP_REV_VAL(bp) (bp->common.chip_id & CHIP_REV_MASK)
+#define CHIP_REV_Ax (0x0 << CHIP_REV_SHIFT)
+#define CHIP_REV_Bx (0x1 << CHIP_REV_SHIFT)
+/* assume maximum 5 revisions */
+#define CHIP_REV_IS_SLOW(bp) (CHIP_REV_VAL(bp) > 0x00005000)
+/* Emul versions are A=>0xe, B=>0xc, C=>0xa, D=>8, E=>6 */
+#define CHIP_REV_IS_EMUL(bp) ((CHIP_REV_IS_SLOW(bp)) && \
+ !(CHIP_REV_VAL(bp) & 0x00001000))
+/* FPGA versions are A=>0xf, B=>0xd, C=>0xb, D=>9, E=>7 */
+#define CHIP_REV_IS_FPGA(bp) ((CHIP_REV_IS_SLOW(bp)) && \
+ (CHIP_REV_VAL(bp) & 0x00001000))
+
+#define CHIP_TIME(bp) ((CHIP_REV_IS_EMUL(bp)) ? 2000 : \
+ ((CHIP_REV_IS_FPGA(bp)) ? 200 : 1))
+
+#define CHIP_METAL(bp) (bp->common.chip_id & 0x00000ff0)
+#define CHIP_BOND_ID(bp) (bp->common.chip_id & 0x0000000f)
+#define CHIP_REV_SIM(bp) (((CHIP_REV_MASK - CHIP_REV_VAL(bp)) >>\
+ (CHIP_REV_SHIFT + 1)) \
+ << CHIP_REV_SHIFT)
+#define CHIP_REV(bp) (CHIP_REV_IS_SLOW(bp) ? \
+ CHIP_REV_SIM(bp) :\
+ CHIP_REV_VAL(bp))
+#define CHIP_IS_E3B0(bp) (CHIP_IS_E3(bp) && \
+ (CHIP_REV(bp) == CHIP_REV_Bx))
+#define CHIP_IS_E3A0(bp) (CHIP_IS_E3(bp) && \
+ (CHIP_REV(bp) == CHIP_REV_Ax))
+/* This define is used in two main places:
+ * 1. In the early stages of nic_load, to know if to configure Parser / Searcher
+ * to nic-only mode or to offload mode. Offload mode is configured if either the
+ * chip is E1x (where MIC_MODE register is not applicable), or if cnic already
+ * registered for this port (which means that the user wants storage services).
+ * 2. During cnic-related load, to know if offload mode is already configured in
+ * the HW or needs to be configured.
+ * Since the transition from nic-mode to offload-mode in HW causes traffic
+ * corruption, nic-mode is configured only in ports on which storage services
+ * where never requested.
+ */
+#define CONFIGURE_NIC_MODE(bp) (!CHIP_IS_E1x(bp) && !CNIC_ENABLED(bp))
+
+ int flash_size;
+#define BNX2X_NVRAM_1MB_SIZE 0x20000 /* 1M bit in bytes */
+#define BNX2X_NVRAM_TIMEOUT_COUNT 30000
+#define BNX2X_NVRAM_PAGE_SIZE 256
+
+ u32 shmem_base;
+ u32 shmem2_base;
+ u32 mf_cfg_base;
+ u32 mf2_cfg_base;
+
+ u32 hw_config;
+
+ u32 bc_ver;
+
+ u8 int_block;
+#define INT_BLOCK_HC 0
+#define INT_BLOCK_IGU 1
+#define INT_BLOCK_MODE_NORMAL 0
+#define INT_BLOCK_MODE_BW_COMP 2
+#define CHIP_INT_MODE_IS_NBC(bp) \
+ (!CHIP_IS_E1x(bp) && \
+ !((bp)->common.int_block & INT_BLOCK_MODE_BW_COMP))
+#define CHIP_INT_MODE_IS_BC(bp) (!CHIP_INT_MODE_IS_NBC(bp))
+
+ u8 chip_port_mode;
+#define CHIP_4_PORT_MODE 0x0
+#define CHIP_2_PORT_MODE 0x1
+#define CHIP_PORT_MODE_NONE 0x2
+#define CHIP_MODE(bp) (bp->common.chip_port_mode)
+#define CHIP_MODE_IS_4_PORT(bp) (CHIP_MODE(bp) == CHIP_4_PORT_MODE)
+
+ u32 boot_mode;
+};
+
+/* IGU MSIX STATISTICS on 57712: 64 for VFs; 4 for PFs; 4 for Attentions */
+#define BNX2X_IGU_STAS_MSG_VF_CNT 64
+#define BNX2X_IGU_STAS_MSG_PF_CNT 4
+
+#define MAX_IGU_ATTN_ACK_TO 100
+/* end of common */
+
+/* port */
+
+struct bnx2x_port {
+ u32 pmf;
+
+ u32 link_config[LINK_CONFIG_SIZE];
+
+ u32 supported[LINK_CONFIG_SIZE];
+
+ u32 advertising[LINK_CONFIG_SIZE];
+
+ u32 phy_addr;
+
+ /* used to synchronize phy accesses */
+ struct mutex phy_mutex;
+
+ u32 port_stx;
+
+ struct nig_stats old_nig_stats;
+};
+
+/* end of port */
+
+#define STATS_OFFSET32(stat_name) \
+ (offsetof(struct bnx2x_eth_stats, stat_name) / 4)
+
+/* slow path */
+#define BNX2X_MAX_NUM_OF_VFS 64
+#define BNX2X_VF_CID_WND 4 /* log num of queues per VF. HW config. */
+#define BNX2X_CIDS_PER_VF (1 << BNX2X_VF_CID_WND)
+
+/* We need to reserve doorbell addresses for all VF and queue combinations */
+#define BNX2X_VF_CIDS (BNX2X_MAX_NUM_OF_VFS * BNX2X_CIDS_PER_VF)
+
+/* The doorbell is configured to have the same number of CIDs for PFs and for
+ * VFs. For this reason the PF CID zone is as large as the VF zone.
+ */
+#define BNX2X_FIRST_VF_CID BNX2X_VF_CIDS
+#define BNX2X_MAX_NUM_VF_QUEUES 64
+#define BNX2X_VF_ID_INVALID 0xFF
+
+/* the number of VF CIDS multiplied by the amount of bytes reserved for each
+ * cid must not exceed the size of the VF doorbell
+ */
+#define BNX2X_VF_BAR_SIZE 512
+#if (BNX2X_VF_BAR_SIZE < BNX2X_CIDS_PER_VF * (1 << BNX2X_DB_SHIFT))
+#error "VF doorbell bar size is 512"
+#endif
+
+/*
+ * The total number of L2 queues, MSIX vectors and HW contexts (CIDs) is
+ * control by the number of fast-path status blocks supported by the
+ * device (HW/FW). Each fast-path status block (FP-SB) aka non-default
+ * status block represents an independent interrupts context that can
+ * serve a regular L2 networking queue. However special L2 queues such
+ * as the FCoE queue do not require a FP-SB and other components like
+ * the CNIC may consume FP-SB reducing the number of possible L2 queues
+ *
+ * If the maximum number of FP-SB available is X then:
+ * a. If CNIC is supported it consumes 1 FP-SB thus the max number of
+ * regular L2 queues is Y=X-1
+ * b. In MF mode the actual number of L2 queues is Y= (X-1/MF_factor)
+ * c. If the FCoE L2 queue is supported the actual number of L2 queues
+ * is Y+1
+ * d. The number of irqs (MSIX vectors) is either Y+1 (one extra for
+ * slow-path interrupts) or Y+2 if CNIC is supported (one additional
+ * FP interrupt context for the CNIC).
+ * e. The number of HW context (CID count) is always X or X+1 if FCoE
+ * L2 queue is supported. The cid for the FCoE L2 queue is always X.
+ */
+
+/* fast-path interrupt contexts E1x */
+#define FP_SB_MAX_E1x 16
+/* fast-path interrupt contexts E2 */
+#define FP_SB_MAX_E2 HC_SB_MAX_SB_E2
+
+union cdu_context {
+ struct eth_context eth;
+ char pad[1024];
+};
+
+/* CDU host DB constants */
+#define CDU_ILT_PAGE_SZ_HW 2
+#define CDU_ILT_PAGE_SZ (8192 << CDU_ILT_PAGE_SZ_HW) /* 32K */
+#define ILT_PAGE_CIDS (CDU_ILT_PAGE_SZ / sizeof(union cdu_context))
+
+#define CNIC_ISCSI_CID_MAX 256
+#define CNIC_FCOE_CID_MAX 2048
+#define CNIC_CID_MAX (CNIC_ISCSI_CID_MAX + CNIC_FCOE_CID_MAX)
+#define CNIC_ILT_LINES DIV_ROUND_UP(CNIC_CID_MAX, ILT_PAGE_CIDS)
+
+#define QM_ILT_PAGE_SZ_HW 0
+#define QM_ILT_PAGE_SZ (4096 << QM_ILT_PAGE_SZ_HW) /* 4K */
+#define QM_CID_ROUND 1024
+
+/* TM (timers) host DB constants */
+#define TM_ILT_PAGE_SZ_HW 0
+#define TM_ILT_PAGE_SZ (4096 << TM_ILT_PAGE_SZ_HW) /* 4K */
+#define TM_CONN_NUM (BNX2X_FIRST_VF_CID + \
+ BNX2X_VF_CIDS + \
+ CNIC_ISCSI_CID_MAX)
+#define TM_ILT_SZ (8 * TM_CONN_NUM)
+#define TM_ILT_LINES DIV_ROUND_UP(TM_ILT_SZ, TM_ILT_PAGE_SZ)
+
+/* SRC (Searcher) host DB constants */
+#define SRC_ILT_PAGE_SZ_HW 0
+#define SRC_ILT_PAGE_SZ (4096 << SRC_ILT_PAGE_SZ_HW) /* 4K */
+#define SRC_HASH_BITS 10
+#define SRC_CONN_NUM (1 << SRC_HASH_BITS) /* 1024 */
+#define SRC_ILT_SZ (sizeof(struct src_ent) * SRC_CONN_NUM)
+#define SRC_T2_SZ SRC_ILT_SZ
+#define SRC_ILT_LINES DIV_ROUND_UP(SRC_ILT_SZ, SRC_ILT_PAGE_SZ)
+
+#define MAX_DMAE_C 8
+
+/* DMA memory not used in fastpath */
+struct bnx2x_slowpath {
+ union {
+ struct mac_configuration_cmd e1x;
+ struct eth_classify_rules_ramrod_data e2;
+ } mac_rdata;
+
+ union {
+ struct eth_classify_rules_ramrod_data e2;
+ } vlan_rdata;
+
+ union {
+ struct tstorm_eth_mac_filter_config e1x;
+ struct eth_filter_rules_ramrod_data e2;
+ } rx_mode_rdata;
+
+ union {
+ struct mac_configuration_cmd e1;
+ struct eth_multicast_rules_ramrod_data e2;
+ } mcast_rdata;
+
+ struct eth_rss_update_ramrod_data rss_rdata;
+
+ /* Queue State related ramrods are always sent under rtnl_lock */
+ union {
+ struct client_init_ramrod_data init_data;
+ struct client_update_ramrod_data update_data;
+ struct tpa_update_ramrod_data tpa_data;
+ } q_rdata;
+
+ union {
+ struct function_start_data func_start;
+ /* pfc configuration for DCBX ramrod */
+ struct flow_control_configuration pfc_config;
+ } func_rdata;
+
+ /* afex ramrod can not be a part of func_rdata union because these
+ * events might arrive in parallel to other events from func_rdata.
+ * Therefore, if they would have been defined in the same union,
+ * data can get corrupted.
+ */
+ union {
+ struct afex_vif_list_ramrod_data viflist_data;
+ struct function_update_data func_update;
+ } func_afex_rdata;
+
+ /* used by dmae command executer */
+ struct dmae_command dmae[MAX_DMAE_C];
+
+ u32 stats_comp;
+ union mac_stats mac_stats;
+ struct nig_stats nig_stats;
+ struct host_port_stats port_stats;
+ struct host_func_stats func_stats;
+
+ u32 wb_comp;
+ u32 wb_data[4];
+
+ union drv_info_to_mcp drv_info_to_mcp;
+};
+
+#define bnx2x_sp(bp, var) (&bp->slowpath->var)
+#define bnx2x_sp_mapping(bp, var) \
+ (bp->slowpath_mapping + offsetof(struct bnx2x_slowpath, var))
+
+/* attn group wiring */
+#define MAX_DYNAMIC_ATTN_GRPS 8
+
+struct attn_route {
+ u32 sig[5];
+};
+
+struct iro {
+ u32 base;
+ u16 m1;
+ u16 m2;
+ u16 m3;
+ u16 size;
+};
+
+struct hw_context {
+ union cdu_context *vcxt;
+ dma_addr_t cxt_mapping;
+ size_t size;
+};
+
+/* forward */
+struct bnx2x_ilt;
+
+struct bnx2x_vfdb;
+
+enum bnx2x_recovery_state {
+ BNX2X_RECOVERY_DONE,
+ BNX2X_RECOVERY_INIT,
+ BNX2X_RECOVERY_WAIT,
+ BNX2X_RECOVERY_FAILED,
+ BNX2X_RECOVERY_NIC_LOADING
+};
+
+/*
+ * Event queue (EQ or event ring) MC hsi
+ * NUM_EQ_PAGES and EQ_DESC_CNT_PAGE must be power of 2
+ */
+#define NUM_EQ_PAGES 1
+#define EQ_DESC_CNT_PAGE (BCM_PAGE_SIZE / sizeof(union event_ring_elem))
+#define EQ_DESC_MAX_PAGE (EQ_DESC_CNT_PAGE - 1)
+#define NUM_EQ_DESC (EQ_DESC_CNT_PAGE * NUM_EQ_PAGES)
+#define EQ_DESC_MASK (NUM_EQ_DESC - 1)
+#define MAX_EQ_AVAIL (EQ_DESC_MAX_PAGE * NUM_EQ_PAGES - 2)
+
+/* depends on EQ_DESC_CNT_PAGE being a power of 2 */
+#define NEXT_EQ_IDX(x) ((((x) & EQ_DESC_MAX_PAGE) == \
+ (EQ_DESC_MAX_PAGE - 1)) ? (x) + 2 : (x) + 1)
+
+/* depends on the above and on NUM_EQ_PAGES being a power of 2 */
+#define EQ_DESC(x) ((x) & EQ_DESC_MASK)
+
+#define BNX2X_EQ_INDEX \
+ (&bp->def_status_blk->sp_sb.\
+ index_values[HC_SP_INDEX_EQ_CONS])
+
+/* This is a data that will be used to create a link report message.
+ * We will keep the data used for the last link report in order
+ * to prevent reporting the same link parameters twice.
+ */
+struct bnx2x_link_report_data {
+ u16 line_speed; /* Effective line speed */
+ unsigned long link_report_flags;/* BNX2X_LINK_REPORT_XXX flags */
+};
+
+enum {
+ BNX2X_LINK_REPORT_FD, /* Full DUPLEX */
+ BNX2X_LINK_REPORT_LINK_DOWN,
+ BNX2X_LINK_REPORT_RX_FC_ON,
+ BNX2X_LINK_REPORT_TX_FC_ON,
+};
+
+enum {
+ BNX2X_PORT_QUERY_IDX,
+ BNX2X_PF_QUERY_IDX,
+ BNX2X_FCOE_QUERY_IDX,
+ BNX2X_FIRST_QUEUE_QUERY_IDX,
+};
+
+struct bnx2x_fw_stats_req {
+ struct stats_query_header hdr;
+ struct stats_query_entry query[FP_SB_MAX_E1x+
+ BNX2X_FIRST_QUEUE_QUERY_IDX];
+};
+
+struct bnx2x_fw_stats_data {
+ struct stats_counter storm_counters;
+ struct per_port_stats port;
+ struct per_pf_stats pf;
+ struct fcoe_statistics_params fcoe;
+ struct per_queue_stats queue_stats[];
+};
+
+/* Public slow path states */
+enum sp_rtnl_flag {
+ BNX2X_SP_RTNL_SETUP_TC,
+ BNX2X_SP_RTNL_TX_TIMEOUT,
+ BNX2X_SP_RTNL_FAN_FAILURE,
+ BNX2X_SP_RTNL_AFEX_F_UPDATE,
+ BNX2X_SP_RTNL_ENABLE_SRIOV,
+ BNX2X_SP_RTNL_VFPF_MCAST,
+ BNX2X_SP_RTNL_VFPF_CHANNEL_DOWN,
+ BNX2X_SP_RTNL_RX_MODE,
+ BNX2X_SP_RTNL_HYPERVISOR_VLAN,
+ BNX2X_SP_RTNL_TX_STOP,
+ BNX2X_SP_RTNL_GET_DRV_VERSION,
+ BNX2X_SP_RTNL_UPDATE_SVID,
+};
+
+enum bnx2x_iov_flag {
+ BNX2X_IOV_HANDLE_VF_MSG,
+ BNX2X_IOV_HANDLE_FLR,
+};
+
+struct bnx2x_prev_path_list {
+ struct list_head list;
+ u8 bus;
+ u8 slot;
+ u8 path;
+ u8 aer;
+ u8 undi;
+};
+
+struct bnx2x_sp_objs {
+ /* MACs object */
+ struct bnx2x_vlan_mac_obj mac_obj;
+
+ /* Queue State object */
+ struct bnx2x_queue_sp_obj q_obj;
+
+ /* VLANs object */
+ struct bnx2x_vlan_mac_obj vlan_obj;
+};
+
+struct bnx2x_fp_stats {
+ struct tstorm_per_queue_stats old_tclient;
+ struct ustorm_per_queue_stats old_uclient;
+ struct xstorm_per_queue_stats old_xclient;
+ struct bnx2x_eth_q_stats eth_q_stats;
+ struct bnx2x_eth_q_stats_old eth_q_stats_old;
+};
+
+enum {
+ SUB_MF_MODE_UNKNOWN = 0,
+ SUB_MF_MODE_UFP,
+ SUB_MF_MODE_NPAR1_DOT_5,
+ SUB_MF_MODE_BD,
+};
+
+struct bnx2x_vlan_entry {
+ struct list_head link;
+ u16 vid;
+ bool hw;
+};
+
+enum bnx2x_udp_port_type {
+ BNX2X_UDP_PORT_VXLAN,
+ BNX2X_UDP_PORT_GENEVE,
+ BNX2X_UDP_PORT_MAX,
+};
+
+struct bnx2x {
+ /* Fields used in the tx and intr/napi performance paths
+ * are grouped together in the beginning of the structure
+ */
+ struct bnx2x_fastpath *fp;
+ struct bnx2x_sp_objs *sp_objs;
+ struct bnx2x_fp_stats *fp_stats;
+ struct bnx2x_fp_txdata *bnx2x_txq;
+ void __iomem *regview;
+ void __iomem *doorbells;
+ u16 db_size;
+
+ u8 pf_num; /* absolute PF number */
+ u8 pfid; /* per-path PF number */
+ int base_fw_ndsb; /**/
+#define BP_PATH(bp) (CHIP_IS_E1x(bp) ? 0 : (bp->pf_num & 1))
+#define BP_PORT(bp) (bp->pfid & 1)
+#define BP_FUNC(bp) (bp->pfid)
+#define BP_ABS_FUNC(bp) (bp->pf_num)
+#define BP_VN(bp) ((bp)->pfid >> 1)
+#define BP_MAX_VN_NUM(bp) (CHIP_MODE_IS_4_PORT(bp) ? 2 : 4)
+#define BP_L_ID(bp) (BP_VN(bp) << 2)
+#define BP_FW_MB_IDX_VN(bp, vn) (BP_PORT(bp) +\
+ (vn) * ((CHIP_IS_E1x(bp) || (CHIP_MODE_IS_4_PORT(bp))) ? 2 : 1))
+#define BP_FW_MB_IDX(bp) BP_FW_MB_IDX_VN(bp, BP_VN(bp))
+
+#ifdef CONFIG_BNX2X_SRIOV
+ /* protects vf2pf mailbox from simultaneous access */
+ struct mutex vf2pf_mutex;
+ /* vf pf channel mailbox contains request and response buffers */
+ struct bnx2x_vf_mbx_msg *vf2pf_mbox;
+ dma_addr_t vf2pf_mbox_mapping;
+
+ /* we set aside a copy of the acquire response */
+ struct pfvf_acquire_resp_tlv acquire_resp;
+
+ /* bulletin board for messages from pf to vf */
+ union pf_vf_bulletin *pf2vf_bulletin;
+ dma_addr_t pf2vf_bulletin_mapping;
+
+ union pf_vf_bulletin shadow_bulletin;
+ struct pf_vf_bulletin_content old_bulletin;
+
+ u16 requested_nr_virtfn;
+#endif /* CONFIG_BNX2X_SRIOV */
+
+ struct net_device *dev;
+ struct pci_dev *pdev;
+
+ const struct iro *iro_arr;
+#define IRO (bp->iro_arr)
+
+ enum bnx2x_recovery_state recovery_state;
+ int is_leader;
+ struct msix_entry *msix_table;
+
+ int tx_ring_size;
+
+/* L2 header size + 2*VLANs (8 bytes) + LLC SNAP (8 bytes) */
+#define ETH_OVERHEAD (ETH_HLEN + 8 + 8)
+#define ETH_MIN_PACKET_SIZE (ETH_ZLEN - ETH_HLEN)
+#define ETH_MAX_PACKET_SIZE ETH_DATA_LEN
+#define ETH_MAX_JUMBO_PACKET_SIZE 9600
+/* TCP with Timestamp Option (32) + IPv6 (40) */
+#define ETH_MAX_TPA_HEADER_SIZE 72
+
+ /* Max supported alignment is 256 (8 shift)
+ * minimal alignment shift 6 is optimal for 57xxx HW performance
+ */
+#define BNX2X_RX_ALIGN_SHIFT max(6, min(8, L1_CACHE_SHIFT))
+
+ /* FW uses 2 Cache lines Alignment for start packet and size
+ *
+ * We assume skb_build() uses sizeof(struct skb_shared_info) bytes
+ * at the end of skb->data, to avoid wasting a full cache line.
+ * This reduces memory use (skb->truesize).
+ */
+#define BNX2X_FW_RX_ALIGN_START (1UL << BNX2X_RX_ALIGN_SHIFT)
+
+#define BNX2X_FW_RX_ALIGN_END \
+ max_t(u64, 1UL << BNX2X_RX_ALIGN_SHIFT, \
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
+
+#define BNX2X_PXP_DRAM_ALIGN (BNX2X_RX_ALIGN_SHIFT - 5)
+
+ struct host_sp_status_block *def_status_blk;
+#define DEF_SB_IGU_ID 16
+#define DEF_SB_ID HC_SP_SB_ID
+ __le16 def_idx;
+ __le16 def_att_idx;
+ u32 attn_state;
+ struct attn_route attn_group[MAX_DYNAMIC_ATTN_GRPS];
+
+ /* slow path ring */
+ struct eth_spe *spq;
+ dma_addr_t spq_mapping;
+ u16 spq_prod_idx;
+ struct eth_spe *spq_prod_bd;
+ struct eth_spe *spq_last_bd;
+ __le16 *dsb_sp_prod;
+ atomic_t cq_spq_left; /* ETH_XXX ramrods credit */
+ /* used to synchronize spq accesses */
+ spinlock_t spq_lock;
+
+ /* event queue */
+ union event_ring_elem *eq_ring;
+ dma_addr_t eq_mapping;
+ u16 eq_prod;
+ u16 eq_cons;
+ __le16 *eq_cons_sb;
+ atomic_t eq_spq_left; /* COMMON_XXX ramrods credit */
+
+ /* Counter for marking that there is a STAT_QUERY ramrod pending */
+ u16 stats_pending;
+ /* Counter for completed statistics ramrods */
+ u16 stats_comp;
+
+ /* End of fields used in the performance code paths */
+
+ int panic;
+ int msg_enable;
+
+ u32 flags;
+#define PCIX_FLAG (1 << 0)
+#define PCI_32BIT_FLAG (1 << 1)
+#define ONE_PORT_FLAG (1 << 2)
+#define NO_WOL_FLAG (1 << 3)
+#define USING_MSIX_FLAG (1 << 5)
+#define USING_MSI_FLAG (1 << 6)
+#define DISABLE_MSI_FLAG (1 << 7)
+#define NO_MCP_FLAG (1 << 9)
+#define MF_FUNC_DIS (1 << 11)
+#define OWN_CNIC_IRQ (1 << 12)
+#define NO_ISCSI_OOO_FLAG (1 << 13)
+#define NO_ISCSI_FLAG (1 << 14)
+#define NO_FCOE_FLAG (1 << 15)
+#define BC_SUPPORTS_PFC_STATS (1 << 17)
+#define TX_SWITCHING (1 << 18)
+#define BC_SUPPORTS_FCOE_FEATURES (1 << 19)
+#define USING_SINGLE_MSIX_FLAG (1 << 20)
+#define BC_SUPPORTS_DCBX_MSG_NON_PMF (1 << 21)
+#define IS_VF_FLAG (1 << 22)
+#define BC_SUPPORTS_RMMOD_CMD (1 << 23)
+#define HAS_PHYS_PORT_ID (1 << 24)
+#define PTP_SUPPORTED (1 << 26)
+#define TX_TIMESTAMPING_EN (1 << 27)
+
+#define BP_NOMCP(bp) ((bp)->flags & NO_MCP_FLAG)
+
+#ifdef CONFIG_BNX2X_SRIOV
+#define IS_VF(bp) ((bp)->flags & IS_VF_FLAG)
+#define IS_PF(bp) (!((bp)->flags & IS_VF_FLAG))
+#else
+#define IS_VF(bp) false
+#define IS_PF(bp) true
+#endif
+
+#define NO_ISCSI(bp) ((bp)->flags & NO_ISCSI_FLAG)
+#define NO_ISCSI_OOO(bp) ((bp)->flags & NO_ISCSI_OOO_FLAG)
+#define NO_FCOE(bp) ((bp)->flags & NO_FCOE_FLAG)
+
+ u8 cnic_support;
+ bool cnic_enabled;
+ bool cnic_loaded;
+ struct cnic_eth_dev *(*cnic_probe)(struct net_device *);
+
+ bool nic_stopped;
+
+ /* Flag that indicates that we can start looking for FCoE L2 queue
+ * completions in the default status block.
+ */
+ bool fcoe_init;
+
+ int mrrs;
+
+ struct delayed_work sp_task;
+ struct delayed_work iov_task;
+
+ atomic_t interrupt_occurred;
+ struct delayed_work sp_rtnl_task;
+
+ struct delayed_work period_task;
+ struct timer_list timer;
+ int current_interval;
+
+ u16 fw_seq;
+ u16 fw_drv_pulse_wr_seq;
+ u32 func_stx;
+
+ struct link_params link_params;
+ struct link_vars link_vars;
+ u32 link_cnt;
+ struct bnx2x_link_report_data last_reported_link;
+ bool force_link_down;
+
+ struct mdio_if_info mdio;
+
+ struct bnx2x_common common;
+ struct bnx2x_port port;
+
+ struct cmng_init cmng;
+
+ u32 mf_config[E1HVN_MAX];
+ u32 mf_ext_config;
+ u32 path_has_ovlan; /* E3 */
+ u16 mf_ov;
+ u8 mf_mode;
+#define IS_MF(bp) (bp->mf_mode != 0)
+#define IS_MF_SI(bp) (bp->mf_mode == MULTI_FUNCTION_SI)
+#define IS_MF_SD(bp) (bp->mf_mode == MULTI_FUNCTION_SD)
+#define IS_MF_AFEX(bp) (bp->mf_mode == MULTI_FUNCTION_AFEX)
+ u8 mf_sub_mode;
+#define IS_MF_UFP(bp) (IS_MF_SD(bp) && \
+ bp->mf_sub_mode == SUB_MF_MODE_UFP)
+#define IS_MF_BD(bp) (IS_MF_SD(bp) && \
+ bp->mf_sub_mode == SUB_MF_MODE_BD)
+
+ u8 wol;
+
+ int rx_ring_size;
+
+ u16 tx_quick_cons_trip_int;
+ u16 tx_quick_cons_trip;
+ u16 tx_ticks_int;
+ u16 tx_ticks;
+
+ u16 rx_quick_cons_trip_int;
+ u16 rx_quick_cons_trip;
+ u16 rx_ticks_int;
+ u16 rx_ticks;
+/* Maximal coalescing timeout in us */
+#define BNX2X_MAX_COALESCE_TOUT (0xff*BNX2X_BTR)
+
+ u32 lin_cnt;
+
+ u16 state;
+#define BNX2X_STATE_CLOSED 0
+#define BNX2X_STATE_OPENING_WAIT4_LOAD 0x1000
+#define BNX2X_STATE_OPENING_WAIT4_PORT 0x2000
+#define BNX2X_STATE_OPEN 0x3000
+#define BNX2X_STATE_CLOSING_WAIT4_HALT 0x4000
+#define BNX2X_STATE_CLOSING_WAIT4_DELETE 0x5000
+
+#define BNX2X_STATE_DIAG 0xe000
+#define BNX2X_STATE_ERROR 0xf000
+
+#define BNX2X_MAX_PRIORITY 8
+ int num_queues;
+ uint num_ethernet_queues;
+ uint num_cnic_queues;
+ int disable_tpa;
+
+ u32 rx_mode;
+#define BNX2X_RX_MODE_NONE 0
+#define BNX2X_RX_MODE_NORMAL 1
+#define BNX2X_RX_MODE_ALLMULTI 2
+#define BNX2X_RX_MODE_PROMISC 3
+#define BNX2X_MAX_MULTICAST 64
+
+ u8 igu_dsb_id;
+ u8 igu_base_sb;
+ u8 igu_sb_cnt;
+ u8 min_msix_vec_cnt;
+
+ u32 igu_base_addr;
+ dma_addr_t def_status_blk_mapping;
+
+ struct bnx2x_slowpath *slowpath;
+ dma_addr_t slowpath_mapping;
+
+ /* Mechanism protecting the drv_info_to_mcp */
+ struct mutex drv_info_mutex;
+ bool drv_info_mng_owner;
+
+ /* Total number of FW statistics requests */
+ u8 fw_stats_num;
+
+ /*
+ * This is a memory buffer that will contain both statistics
+ * ramrod request and data.
+ */
+ void *fw_stats;
+ dma_addr_t fw_stats_mapping;
+
+ /*
+ * FW statistics request shortcut (points at the
+ * beginning of fw_stats buffer).
+ */
+ struct bnx2x_fw_stats_req *fw_stats_req;
+ dma_addr_t fw_stats_req_mapping;
+ int fw_stats_req_sz;
+
+ /*
+ * FW statistics data shortcut (points at the beginning of
+ * fw_stats buffer + fw_stats_req_sz).
+ */
+ struct bnx2x_fw_stats_data *fw_stats_data;
+ dma_addr_t fw_stats_data_mapping;
+ int fw_stats_data_sz;
+
+ /* For max 1024 cids (VF RSS), 32KB ILT page size and 1KB
+ * context size we need 8 ILT entries.
+ */
+#define ILT_MAX_L2_LINES 32
+ struct hw_context context[ILT_MAX_L2_LINES];
+
+ struct bnx2x_ilt *ilt;
+#define BP_ILT(bp) ((bp)->ilt)
+#define ILT_MAX_LINES 256
+/*
+ * Maximum supported number of RSS queues: number of IGU SBs minus one that goes
+ * to CNIC.
+ */
+#define BNX2X_MAX_RSS_COUNT(bp) ((bp)->igu_sb_cnt - CNIC_SUPPORT(bp))
+
+/*
+ * Maximum CID count that might be required by the bnx2x:
+ * Max RSS * Max_Tx_Multi_Cos + FCoE + iSCSI
+ */
+
+#define BNX2X_L2_CID_COUNT(bp) (BNX2X_NUM_ETH_QUEUES(bp) * BNX2X_MULTI_TX_COS \
+ + CNIC_SUPPORT(bp) * (2 + UIO_CID_PAD(bp)))
+#define BNX2X_L2_MAX_CID(bp) (BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS \
+ + CNIC_SUPPORT(bp) * (2 + UIO_CID_PAD(bp)))
+#define L2_ILT_LINES(bp) (DIV_ROUND_UP(BNX2X_L2_CID_COUNT(bp),\
+ ILT_PAGE_CIDS))
+
+ int qm_cid_count;
+
+ bool dropless_fc;
+
+ void *t2;
+ dma_addr_t t2_mapping;
+ struct cnic_ops __rcu *cnic_ops;
+ void *cnic_data;
+ u32 cnic_tag;
+ struct cnic_eth_dev cnic_eth_dev;
+ union host_hc_status_block cnic_sb;
+ dma_addr_t cnic_sb_mapping;
+ struct eth_spe *cnic_kwq;
+ struct eth_spe *cnic_kwq_prod;
+ struct eth_spe *cnic_kwq_cons;
+ struct eth_spe *cnic_kwq_last;
+ u16 cnic_kwq_pending;
+ u16 cnic_spq_pending;
+ u8 fip_mac[ETH_ALEN];
+ struct mutex cnic_mutex;
+ struct bnx2x_vlan_mac_obj iscsi_l2_mac_obj;
+
+ /* Start index of the "special" (CNIC related) L2 clients */
+ u8 cnic_base_cl_id;
+
+ int dmae_ready;
+ /* used to synchronize dmae accesses */
+ spinlock_t dmae_lock;
+
+ /* used to protect the FW mail box */
+ struct mutex fw_mb_mutex;
+
+ /* used to synchronize stats collecting */
+ int stats_state;
+
+ /* used for synchronization of concurrent threads statistics handling */
+ struct semaphore stats_lock;
+
+ /* used by dmae command loader */
+ struct dmae_command stats_dmae;
+ int executer_idx;
+
+ u16 stats_counter;
+ struct bnx2x_eth_stats eth_stats;
+ struct host_func_stats func_stats;
+ struct bnx2x_eth_stats_old eth_stats_old;
+ struct bnx2x_net_stats_old net_stats_old;
+ struct bnx2x_fw_port_stats_old fw_stats_old;
+ bool stats_init;
+
+ struct z_stream_s *strm;
+ void *gunzip_buf;
+ dma_addr_t gunzip_mapping;
+ int gunzip_outlen;
+#define FW_BUF_SIZE 0x8000
+#define GUNZIP_BUF(bp) (bp->gunzip_buf)
+#define GUNZIP_PHYS(bp) (bp->gunzip_mapping)
+#define GUNZIP_OUTLEN(bp) (bp->gunzip_outlen)
+
+ struct raw_op *init_ops;
+ /* Init blocks offsets inside init_ops */
+ u16 *init_ops_offsets;
+ /* Data blob - has 32 bit granularity */
+ u32 *init_data;
+ u32 init_mode_flags;
+#define INIT_MODE_FLAGS(bp) (bp->init_mode_flags)
+ /* Zipped PRAM blobs - raw data */
+ const u8 *tsem_int_table_data;
+ const u8 *tsem_pram_data;
+ const u8 *usem_int_table_data;
+ const u8 *usem_pram_data;
+ const u8 *xsem_int_table_data;
+ const u8 *xsem_pram_data;
+ const u8 *csem_int_table_data;
+ const u8 *csem_pram_data;
+#define INIT_OPS(bp) (bp->init_ops)
+#define INIT_OPS_OFFSETS(bp) (bp->init_ops_offsets)
+#define INIT_DATA(bp) (bp->init_data)
+#define INIT_TSEM_INT_TABLE_DATA(bp) (bp->tsem_int_table_data)
+#define INIT_TSEM_PRAM_DATA(bp) (bp->tsem_pram_data)
+#define INIT_USEM_INT_TABLE_DATA(bp) (bp->usem_int_table_data)
+#define INIT_USEM_PRAM_DATA(bp) (bp->usem_pram_data)
+#define INIT_XSEM_INT_TABLE_DATA(bp) (bp->xsem_int_table_data)
+#define INIT_XSEM_PRAM_DATA(bp) (bp->xsem_pram_data)
+#define INIT_CSEM_INT_TABLE_DATA(bp) (bp->csem_int_table_data)
+#define INIT_CSEM_PRAM_DATA(bp) (bp->csem_pram_data)
+
+#define PHY_FW_VER_LEN 20
+ char fw_ver[32];
+ const struct firmware *firmware;
+
+ struct bnx2x_vfdb *vfdb;
+#define IS_SRIOV(bp) ((bp)->vfdb)
+
+ /* DCB support on/off */
+ u16 dcb_state;
+#define BNX2X_DCB_STATE_OFF 0
+#define BNX2X_DCB_STATE_ON 1
+
+ /* DCBX engine mode */
+ int dcbx_enabled;
+#define BNX2X_DCBX_ENABLED_OFF 0
+#define BNX2X_DCBX_ENABLED_ON_NEG_OFF 1
+#define BNX2X_DCBX_ENABLED_ON_NEG_ON 2
+#define BNX2X_DCBX_ENABLED_INVALID (-1)
+
+ bool dcbx_mode_uset;
+
+ struct bnx2x_config_dcbx_params dcbx_config_params;
+ struct bnx2x_dcbx_port_params dcbx_port_params;
+ int dcb_version;
+
+ /* CAM credit pools */
+ struct bnx2x_credit_pool_obj vlans_pool;
+
+ struct bnx2x_credit_pool_obj macs_pool;
+
+ /* RX_MODE object */
+ struct bnx2x_rx_mode_obj rx_mode_obj;
+
+ /* MCAST object */
+ struct bnx2x_mcast_obj mcast_obj;
+
+ /* RSS configuration object */
+ struct bnx2x_rss_config_obj rss_conf_obj;
+
+ /* Function State controlling object */
+ struct bnx2x_func_sp_obj func_obj;
+
+ unsigned long sp_state;
+
+ /* operation indication for the sp_rtnl task */
+ unsigned long sp_rtnl_state;
+
+ /* Indication of the IOV tasks */
+ unsigned long iov_task_state;
+
+ /* DCBX Negotiation results */
+ struct dcbx_features dcbx_local_feat;
+ u32 dcbx_error;
+
+#ifdef BCM_DCBNL
+ struct dcbx_features dcbx_remote_feat;
+ u32 dcbx_remote_flags;
+#endif
+ /* AFEX: store default vlan used */
+ int afex_def_vlan_tag;
+ enum mf_cfg_afex_vlan_mode afex_vlan_mode;
+ u32 pending_max;
+
+ /* multiple tx classes of service */
+ u8 max_cos;
+
+ /* priority to cos mapping */
+ u8 prio_to_cos[8];
+
+ int fp_array_size;
+ u32 dump_preset_idx;
+
+ u8 phys_port_id[ETH_ALEN];
+
+ /* PTP related context */
+ struct ptp_clock *ptp_clock;
+ struct ptp_clock_info ptp_clock_info;
+ struct work_struct ptp_task;
+ struct cyclecounter cyclecounter;
+ struct timecounter timecounter;
+ bool timecounter_init_done;
+ struct sk_buff *ptp_tx_skb;
+ unsigned long ptp_tx_start;
+ bool hwtstamp_ioctl_called;
+ u16 tx_type;
+ u16 rx_filter;
+
+ struct bnx2x_link_report_data vf_link_vars;
+ struct list_head vlan_reg;
+ u16 vlan_cnt;
+ u16 vlan_credit;
+ bool accept_any_vlan;
+
+ /* Vxlan/Geneve related information */
+ u16 udp_tunnel_ports[BNX2X_UDP_PORT_MAX];
+
+#define FW_CAP_INVALIDATE_VF_FP_HSI BIT(0)
+ u32 fw_cap;
+
+ u32 fw_major;
+ u32 fw_minor;
+ u32 fw_rev;
+ u32 fw_eng;
+};
+
+/* Tx queues may be less or equal to Rx queues */
+extern int num_queues;
+#define BNX2X_NUM_QUEUES(bp) (bp->num_queues)
+#define BNX2X_NUM_ETH_QUEUES(bp) ((bp)->num_ethernet_queues)
+#define BNX2X_NUM_NON_CNIC_QUEUES(bp) (BNX2X_NUM_QUEUES(bp) - \
+ (bp)->num_cnic_queues)
+#define BNX2X_NUM_RX_QUEUES(bp) BNX2X_NUM_QUEUES(bp)
+
+#define is_multi(bp) (BNX2X_NUM_QUEUES(bp) > 1)
+
+#define BNX2X_MAX_QUEUES(bp) BNX2X_MAX_RSS_COUNT(bp)
+/* #define is_eth_multi(bp) (BNX2X_NUM_ETH_QUEUES(bp) > 1) */
+
+#define RSS_IPV4_CAP_MASK \
+ TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_CAPABILITY
+
+#define RSS_IPV4_TCP_CAP_MASK \
+ TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_TCP_CAPABILITY
+
+#define RSS_IPV6_CAP_MASK \
+ TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_CAPABILITY
+
+#define RSS_IPV6_TCP_CAP_MASK \
+ TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_TCP_CAPABILITY
+
+struct bnx2x_func_init_params {
+ /* dma */
+ bool spq_active;
+ dma_addr_t spq_map;
+ u16 spq_prod;
+
+ u16 func_id; /* abs fid */
+ u16 pf_id;
+};
+
+#define for_each_cnic_queue(bp, var) \
+ for ((var) = BNX2X_NUM_ETH_QUEUES(bp); (var) < BNX2X_NUM_QUEUES(bp); \
+ (var)++) \
+ if (skip_queue(bp, var)) \
+ continue; \
+ else
+
+#define for_each_eth_queue(bp, var) \
+ for ((var) = 0; (var) < BNX2X_NUM_ETH_QUEUES(bp); (var)++)
+
+#define for_each_nondefault_eth_queue(bp, var) \
+ for ((var) = 1; (var) < BNX2X_NUM_ETH_QUEUES(bp); (var)++)
+
+#define for_each_queue(bp, var) \
+ for ((var) = 0; (var) < BNX2X_NUM_QUEUES(bp); (var)++) \
+ if (skip_queue(bp, var)) \
+ continue; \
+ else
+
+/* Skip forwarding FP */
+#define for_each_valid_rx_queue(bp, var) \
+ for ((var) = 0; \
+ (var) < (CNIC_LOADED(bp) ? BNX2X_NUM_QUEUES(bp) : \
+ BNX2X_NUM_ETH_QUEUES(bp)); \
+ (var)++) \
+ if (skip_rx_queue(bp, var)) \
+ continue; \
+ else
+
+#define for_each_rx_queue_cnic(bp, var) \
+ for ((var) = BNX2X_NUM_ETH_QUEUES(bp); (var) < BNX2X_NUM_QUEUES(bp); \
+ (var)++) \
+ if (skip_rx_queue(bp, var)) \
+ continue; \
+ else
+
+#define for_each_rx_queue(bp, var) \
+ for ((var) = 0; (var) < BNX2X_NUM_QUEUES(bp); (var)++) \
+ if (skip_rx_queue(bp, var)) \
+ continue; \
+ else
+
+/* Skip OOO FP */
+#define for_each_valid_tx_queue(bp, var) \
+ for ((var) = 0; \
+ (var) < (CNIC_LOADED(bp) ? BNX2X_NUM_QUEUES(bp) : \
+ BNX2X_NUM_ETH_QUEUES(bp)); \
+ (var)++) \
+ if (skip_tx_queue(bp, var)) \
+ continue; \
+ else
+
+#define for_each_tx_queue_cnic(bp, var) \
+ for ((var) = BNX2X_NUM_ETH_QUEUES(bp); (var) < BNX2X_NUM_QUEUES(bp); \
+ (var)++) \
+ if (skip_tx_queue(bp, var)) \
+ continue; \
+ else
+
+#define for_each_tx_queue(bp, var) \
+ for ((var) = 0; (var) < BNX2X_NUM_QUEUES(bp); (var)++) \
+ if (skip_tx_queue(bp, var)) \
+ continue; \
+ else
+
+#define for_each_nondefault_queue(bp, var) \
+ for ((var) = 1; (var) < BNX2X_NUM_QUEUES(bp); (var)++) \
+ if (skip_queue(bp, var)) \
+ continue; \
+ else
+
+#define for_each_cos_in_tx_queue(fp, var) \
+ for ((var) = 0; (var) < (fp)->max_cos; (var)++)
+
+/* skip rx queue
+ * if FCOE l2 support is disabled and this is the fcoe L2 queue
+ */
+#define skip_rx_queue(bp, idx) (NO_FCOE(bp) && IS_FCOE_IDX(idx))
+
+/* skip tx queue
+ * if FCOE l2 support is disabled and this is the fcoe L2 queue
+ */
+#define skip_tx_queue(bp, idx) (NO_FCOE(bp) && IS_FCOE_IDX(idx))
+
+#define skip_queue(bp, idx) (NO_FCOE(bp) && IS_FCOE_IDX(idx))
+
+/*self test*/
+int bnx2x_idle_chk(struct bnx2x *bp);
+
+/**
+ * bnx2x_set_mac_one - configure a single MAC address
+ *
+ * @bp: driver handle
+ * @mac: MAC to configure
+ * @obj: MAC object handle
+ * @set: if 'true' add a new MAC, otherwise - delete
+ * @mac_type: the type of the MAC to configure (e.g. ETH, UC list)
+ * @ramrod_flags: RAMROD_XXX flags (e.g. RAMROD_CONT, RAMROD_COMP_WAIT)
+ *
+ * Configures one MAC according to provided parameters or continues the
+ * execution of previously scheduled commands if RAMROD_CONT is set in
+ * ramrod_flags.
+ *
+ * Returns zero if operation has successfully completed, a positive value if the
+ * operation has been successfully scheduled and a negative - if a requested
+ * operations has failed.
+ */
+int bnx2x_set_mac_one(struct bnx2x *bp, const u8 *mac,
+ struct bnx2x_vlan_mac_obj *obj, bool set,
+ int mac_type, unsigned long *ramrod_flags);
+
+int bnx2x_set_vlan_one(struct bnx2x *bp, u16 vlan,
+ struct bnx2x_vlan_mac_obj *obj, bool set,
+ unsigned long *ramrod_flags);
+
+/**
+ * bnx2x_del_all_macs - delete all MACs configured for the specific MAC object
+ *
+ * @bp: driver handle
+ * @mac_obj: MAC object handle
+ * @mac_type: type of the MACs to clear (BNX2X_XXX_MAC)
+ * @wait_for_comp: if 'true' block until completion
+ *
+ * Deletes all MACs of the specific type (e.g. ETH, UC list).
+ *
+ * Returns zero if operation has successfully completed, a positive value if the
+ * operation has been successfully scheduled and a negative - if a requested
+ * operations has failed.
+ */
+int bnx2x_del_all_macs(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *mac_obj,
+ int mac_type, bool wait_for_comp);
+
+/* Init Function API */
+void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p);
+void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid,
+ u8 vf_valid, int fw_sb_id, int igu_sb_id);
+int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port);
+int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port);
+int bnx2x_set_mult_gpio(struct bnx2x *bp, u8 pins, u32 mode);
+int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port);
+void bnx2x_read_mf_cfg(struct bnx2x *bp);
+
+int bnx2x_pretend_func(struct bnx2x *bp, u16 pretend_func_val);
+
+/* dmae */
+void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32);
+void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
+ u32 len32);
+void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx);
+u32 bnx2x_dmae_opcode_add_comp(u32 opcode, u8 comp_type);
+u32 bnx2x_dmae_opcode_clr_src_reset(u32 opcode);
+u32 bnx2x_dmae_opcode(struct bnx2x *bp, u8 src_type, u8 dst_type,
+ bool with_comp, u8 comp_type);
+
+void bnx2x_prep_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae,
+ u8 src_type, u8 dst_type);
+int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae,
+ u32 *comp);
+
+/* FLR related routines */
+u32 bnx2x_flr_clnup_poll_count(struct bnx2x *bp);
+void bnx2x_tx_hw_flushed(struct bnx2x *bp, u32 poll_count);
+int bnx2x_send_final_clnup(struct bnx2x *bp, u8 clnup_func, u32 poll_cnt);
+u8 bnx2x_is_pcie_pending(struct pci_dev *dev);
+int bnx2x_flr_clnup_poll_hw_counter(struct bnx2x *bp, u32 reg,
+ char *msg, u32 poll_cnt);
+
+void bnx2x_calc_fc_adv(struct bnx2x *bp);
+int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
+ u32 data_hi, u32 data_lo, int cmd_type);
+void bnx2x_update_coalesce(struct bnx2x *bp);
+int bnx2x_get_cur_phy_idx(struct bnx2x *bp);
+
+bool bnx2x_port_after_undi(struct bnx2x *bp);
+
+static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
+ int wait)
+{
+ u32 val;
+
+ do {
+ val = REG_RD(bp, reg);
+ if (val == expected)
+ break;
+ ms -= wait;
+ msleep(wait);
+
+ } while (ms > 0);
+
+ return val;
+}
+
+void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id,
+ bool is_pf);
+
+#define BNX2X_ILT_ZALLOC(x, y, size) \
+ x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL)
+
+#define BNX2X_ILT_FREE(x, y, size) \
+ do { \
+ if (x) { \
+ dma_free_coherent(&bp->pdev->dev, size, x, y); \
+ x = NULL; \
+ y = 0; \
+ } \
+ } while (0)
+
+#define ILOG2(x) (ilog2((x)))
+
+#define ILT_NUM_PAGE_ENTRIES (3072)
+/* In 57710/11 we use whole table since we have 8 func
+ * In 57712 we have only 4 func, but use same size per func, then only half of
+ * the table in use
+ */
+#define ILT_PER_FUNC (ILT_NUM_PAGE_ENTRIES/8)
+
+#define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC)
+/*
+ * the phys address is shifted right 12 bits and has an added
+ * 1=valid bit added to the 53rd bit
+ * then since this is a wide register(TM)
+ * we split it into two 32 bit writes
+ */
+#define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
+#define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
+
+/* load/unload mode */
+#define LOAD_NORMAL 0
+#define LOAD_OPEN 1
+#define LOAD_DIAG 2
+#define LOAD_LOOPBACK_EXT 3
+#define UNLOAD_NORMAL 0
+#define UNLOAD_CLOSE 1
+#define UNLOAD_RECOVERY 2
+
+/* DMAE command defines */
+#define DMAE_TIMEOUT -1
+#define DMAE_PCI_ERROR -2 /* E2 and onward */
+#define DMAE_NOT_RDY -3
+#define DMAE_PCI_ERR_FLAG 0x80000000
+
+#define DMAE_SRC_PCI 0
+#define DMAE_SRC_GRC 1
+
+#define DMAE_DST_NONE 0
+#define DMAE_DST_PCI 1
+#define DMAE_DST_GRC 2
+
+#define DMAE_COMP_PCI 0
+#define DMAE_COMP_GRC 1
+
+/* E2 and onward - PCI error handling in the completion */
+
+#define DMAE_COMP_REGULAR 0
+#define DMAE_COM_SET_ERR 1
+
+#define DMAE_CMD_SRC_PCI (DMAE_SRC_PCI << \
+ DMAE_COMMAND_SRC_SHIFT)
+#define DMAE_CMD_SRC_GRC (DMAE_SRC_GRC << \
+ DMAE_COMMAND_SRC_SHIFT)
+
+#define DMAE_CMD_DST_PCI (DMAE_DST_PCI << \
+ DMAE_COMMAND_DST_SHIFT)
+#define DMAE_CMD_DST_GRC (DMAE_DST_GRC << \
+ DMAE_COMMAND_DST_SHIFT)
+
+#define DMAE_CMD_C_DST_PCI (DMAE_COMP_PCI << \
+ DMAE_COMMAND_C_DST_SHIFT)
+#define DMAE_CMD_C_DST_GRC (DMAE_COMP_GRC << \
+ DMAE_COMMAND_C_DST_SHIFT)
+
+#define DMAE_CMD_C_ENABLE DMAE_COMMAND_C_TYPE_ENABLE
+
+#define DMAE_CMD_ENDIANITY_NO_SWAP (0 << DMAE_COMMAND_ENDIANITY_SHIFT)
+#define DMAE_CMD_ENDIANITY_B_SWAP (1 << DMAE_COMMAND_ENDIANITY_SHIFT)
+#define DMAE_CMD_ENDIANITY_DW_SWAP (2 << DMAE_COMMAND_ENDIANITY_SHIFT)
+#define DMAE_CMD_ENDIANITY_B_DW_SWAP (3 << DMAE_COMMAND_ENDIANITY_SHIFT)
+
+#define DMAE_CMD_PORT_0 0
+#define DMAE_CMD_PORT_1 DMAE_COMMAND_PORT
+
+#define DMAE_CMD_SRC_RESET DMAE_COMMAND_SRC_RESET
+#define DMAE_CMD_DST_RESET DMAE_COMMAND_DST_RESET
+#define DMAE_CMD_E1HVN_SHIFT DMAE_COMMAND_E1HVN_SHIFT
+
+#define DMAE_SRC_PF 0
+#define DMAE_SRC_VF 1
+
+#define DMAE_DST_PF 0
+#define DMAE_DST_VF 1
+
+#define DMAE_C_SRC 0
+#define DMAE_C_DST 1
+
+#define DMAE_LEN32_RD_MAX 0x80
+#define DMAE_LEN32_WR_MAX(bp) (CHIP_IS_E1(bp) ? 0x400 : 0x2000)
+
+#define DMAE_COMP_VAL 0x60d0d0ae /* E2 and on - upper bit
+ * indicates error
+ */
+
+#define MAX_DMAE_C_PER_PORT 8
+#define INIT_DMAE_C(bp) (BP_PORT(bp) * MAX_DMAE_C_PER_PORT + \
+ BP_VN(bp))
+#define PMF_DMAE_C(bp) (BP_PORT(bp) * MAX_DMAE_C_PER_PORT + \
+ E1HVN_MAX)
+
+/* Following is the DMAE channel number allocation for the clients.
+ * MFW: OCBB/OCSD implementations use DMAE channels 14/15 respectively.
+ * Driver: 0-3 and 8-11 (for PF dmae operations)
+ * 4 and 12 (for stats requests)
+ */
+#define BNX2X_FW_DMAE_C 13 /* Channel for FW DMAE operations */
+
+/* PCIE link and speed */
+#define PCICFG_LINK_WIDTH 0x1f00000
+#define PCICFG_LINK_WIDTH_SHIFT 20
+#define PCICFG_LINK_SPEED 0xf0000
+#define PCICFG_LINK_SPEED_SHIFT 16
+
+#define BNX2X_NUM_TESTS_SF 7
+#define BNX2X_NUM_TESTS_MF 3
+#define BNX2X_NUM_TESTS(bp) (IS_MF(bp) ? BNX2X_NUM_TESTS_MF : \
+ IS_VF(bp) ? 0 : BNX2X_NUM_TESTS_SF)
+
+#define BNX2X_PHY_LOOPBACK 0
+#define BNX2X_MAC_LOOPBACK 1
+#define BNX2X_EXT_LOOPBACK 2
+#define BNX2X_PHY_LOOPBACK_FAILED 1
+#define BNX2X_MAC_LOOPBACK_FAILED 2
+#define BNX2X_EXT_LOOPBACK_FAILED 3
+#define BNX2X_LOOPBACK_FAILED (BNX2X_MAC_LOOPBACK_FAILED | \
+ BNX2X_PHY_LOOPBACK_FAILED)
+
+#define STROM_ASSERT_ARRAY_SIZE 50
+
+/* must be used on a CID before placing it on a HW ring */
+#define HW_CID(bp, x) ((BP_PORT(bp) << 23) | \
+ (BP_VN(bp) << BNX2X_SWCID_SHIFT) | \
+ (x))
+
+#define SP_DESC_CNT (BCM_PAGE_SIZE / sizeof(struct eth_spe))
+#define MAX_SP_DESC_CNT (SP_DESC_CNT - 1)
+
+#define BNX2X_BTR 4
+#define MAX_SPQ_PENDING 8
+
+/* CMNG constants, as derived from system spec calculations */
+/* default MIN rate in case VNIC min rate is configured to zero - 100Mbps */
+#define DEF_MIN_RATE 100
+/* resolution of the rate shaping timer - 400 usec */
+#define RS_PERIODIC_TIMEOUT_USEC 400
+/* number of bytes in single QM arbitration cycle -
+ * coefficient for calculating the fairness timer */
+#define QM_ARB_BYTES 160000
+/* resolution of Min algorithm 1:100 */
+#define MIN_RES 100
+/* how many bytes above threshold for the minimal credit of Min algorithm*/
+#define MIN_ABOVE_THRESH 32768
+/* Fairness algorithm integration time coefficient -
+ * for calculating the actual Tfair */
+#define T_FAIR_COEF ((MIN_ABOVE_THRESH + QM_ARB_BYTES) * 8 * MIN_RES)
+/* Memory of fairness algorithm . 2 cycles */
+#define FAIR_MEM 2
+
+#define ATTN_NIG_FOR_FUNC (1L << 8)
+#define ATTN_SW_TIMER_4_FUNC (1L << 9)
+#define GPIO_2_FUNC (1L << 10)
+#define GPIO_3_FUNC (1L << 11)
+#define GPIO_4_FUNC (1L << 12)
+#define ATTN_GENERAL_ATTN_1 (1L << 13)
+#define ATTN_GENERAL_ATTN_2 (1L << 14)
+#define ATTN_GENERAL_ATTN_3 (1L << 15)
+#define ATTN_GENERAL_ATTN_4 (1L << 13)
+#define ATTN_GENERAL_ATTN_5 (1L << 14)
+#define ATTN_GENERAL_ATTN_6 (1L << 15)
+
+#define ATTN_HARD_WIRED_MASK 0xff00
+#define ATTENTION_ID 4
+
+#define IS_MF_STORAGE_ONLY(bp) (IS_MF_STORAGE_PERSONALITY_ONLY(bp) || \
+ IS_MF_FCOE_AFEX(bp))
+
+/* stuff added to make the code fit 80Col */
+
+#define BNX2X_PMF_LINK_ASSERT \
+ GENERAL_ATTEN_OFFSET(LINK_SYNC_ATTENTION_BIT_FUNC_0 + BP_FUNC(bp))
+
+#define BNX2X_MC_ASSERT_BITS \
+ (GENERAL_ATTEN_OFFSET(TSTORM_FATAL_ASSERT_ATTENTION_BIT) | \
+ GENERAL_ATTEN_OFFSET(USTORM_FATAL_ASSERT_ATTENTION_BIT) | \
+ GENERAL_ATTEN_OFFSET(CSTORM_FATAL_ASSERT_ATTENTION_BIT) | \
+ GENERAL_ATTEN_OFFSET(XSTORM_FATAL_ASSERT_ATTENTION_BIT))
+
+#define BNX2X_MCP_ASSERT \
+ GENERAL_ATTEN_OFFSET(MCP_FATAL_ASSERT_ATTENTION_BIT)
+
+#define BNX2X_GRC_TIMEOUT GENERAL_ATTEN_OFFSET(LATCHED_ATTN_TIMEOUT_GRC)
+#define BNX2X_GRC_RSV (GENERAL_ATTEN_OFFSET(LATCHED_ATTN_RBCR) | \
+ GENERAL_ATTEN_OFFSET(LATCHED_ATTN_RBCT) | \
+ GENERAL_ATTEN_OFFSET(LATCHED_ATTN_RBCN) | \
+ GENERAL_ATTEN_OFFSET(LATCHED_ATTN_RBCU) | \
+ GENERAL_ATTEN_OFFSET(LATCHED_ATTN_RBCP) | \
+ GENERAL_ATTEN_OFFSET(LATCHED_ATTN_RSVD_GRC))
+
+#define HW_INTERRUPT_ASSERT_SET_0 \
+ (AEU_INPUTS_ATTN_BITS_TSDM_HW_INTERRUPT | \
+ AEU_INPUTS_ATTN_BITS_TCM_HW_INTERRUPT | \
+ AEU_INPUTS_ATTN_BITS_TSEMI_HW_INTERRUPT | \
+ AEU_INPUTS_ATTN_BITS_BRB_HW_INTERRUPT | \
+ AEU_INPUTS_ATTN_BITS_PBCLIENT_HW_INTERRUPT)
+#define HW_PRTY_ASSERT_SET_0 (AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR | \
+ AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR | \
+ AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR | \
+ AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR |\
+ AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR |\
+ AEU_INPUTS_ATTN_BITS_TCM_PARITY_ERROR |\
+ AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR)
+#define HW_INTERRUPT_ASSERT_SET_1 \
+ (AEU_INPUTS_ATTN_BITS_QM_HW_INTERRUPT | \
+ AEU_INPUTS_ATTN_BITS_TIMERS_HW_INTERRUPT | \
+ AEU_INPUTS_ATTN_BITS_XSDM_HW_INTERRUPT | \
+ AEU_INPUTS_ATTN_BITS_XCM_HW_INTERRUPT | \
+ AEU_INPUTS_ATTN_BITS_XSEMI_HW_INTERRUPT | \
+ AEU_INPUTS_ATTN_BITS_USDM_HW_INTERRUPT | \
+ AEU_INPUTS_ATTN_BITS_UCM_HW_INTERRUPT | \
+ AEU_INPUTS_ATTN_BITS_USEMI_HW_INTERRUPT | \
+ AEU_INPUTS_ATTN_BITS_UPB_HW_INTERRUPT | \
+ AEU_INPUTS_ATTN_BITS_CSDM_HW_INTERRUPT | \
+ AEU_INPUTS_ATTN_BITS_CCM_HW_INTERRUPT)
+#define HW_PRTY_ASSERT_SET_1 (AEU_INPUTS_ATTN_BITS_PBF_PARITY_ERROR |\
+ AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR | \
+ AEU_INPUTS_ATTN_BITS_TIMERS_PARITY_ERROR |\
+ AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR | \
+ AEU_INPUTS_ATTN_BITS_XCM_PARITY_ERROR |\
+ AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR | \
+ AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR |\
+ AEU_INPUTS_ATTN_BITS_NIG_PARITY_ERROR |\
+ AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR |\
+ AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR | \
+ AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR | \
+ AEU_INPUTS_ATTN_BITS_UCM_PARITY_ERROR |\
+ AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR | \
+ AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR | \
+ AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR |\
+ AEU_INPUTS_ATTN_BITS_CCM_PARITY_ERROR)
+#define HW_INTERRUPT_ASSERT_SET_2 \
+ (AEU_INPUTS_ATTN_BITS_CSEMI_HW_INTERRUPT | \
+ AEU_INPUTS_ATTN_BITS_CDU_HW_INTERRUPT | \
+ AEU_INPUTS_ATTN_BITS_DMAE_HW_INTERRUPT | \
+ AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_HW_INTERRUPT |\
+ AEU_INPUTS_ATTN_BITS_MISC_HW_INTERRUPT)
+#define HW_PRTY_ASSERT_SET_2 (AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR | \
+ AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR | \
+ AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR |\
+ AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR | \
+ AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR | \
+ AEU_INPUTS_ATTN_BITS_DMAE_PARITY_ERROR |\
+ AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR | \
+ AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR)
+
+#define HW_PRTY_ASSERT_SET_3_WITHOUT_SCPAD \
+ (AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY | \
+ AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY | \
+ AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY)
+
+#define HW_PRTY_ASSERT_SET_3 (HW_PRTY_ASSERT_SET_3_WITHOUT_SCPAD | \
+ AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY)
+
+#define HW_PRTY_ASSERT_SET_4 (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR | \
+ AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)
+
+#define MULTI_MASK 0x7f
+
+#define DEF_USB_FUNC_OFF offsetof(struct cstorm_def_status_block_u, func)
+#define DEF_CSB_FUNC_OFF offsetof(struct cstorm_def_status_block_c, func)
+#define DEF_XSB_FUNC_OFF offsetof(struct xstorm_def_status_block, func)
+#define DEF_TSB_FUNC_OFF offsetof(struct tstorm_def_status_block, func)
+
+#define DEF_USB_IGU_INDEX_OFF \
+ offsetof(struct cstorm_def_status_block_u, igu_index)
+#define DEF_CSB_IGU_INDEX_OFF \
+ offsetof(struct cstorm_def_status_block_c, igu_index)
+#define DEF_XSB_IGU_INDEX_OFF \
+ offsetof(struct xstorm_def_status_block, igu_index)
+#define DEF_TSB_IGU_INDEX_OFF \
+ offsetof(struct tstorm_def_status_block, igu_index)
+
+#define DEF_USB_SEGMENT_OFF \
+ offsetof(struct cstorm_def_status_block_u, segment)
+#define DEF_CSB_SEGMENT_OFF \
+ offsetof(struct cstorm_def_status_block_c, segment)
+#define DEF_XSB_SEGMENT_OFF \
+ offsetof(struct xstorm_def_status_block, segment)
+#define DEF_TSB_SEGMENT_OFF \
+ offsetof(struct tstorm_def_status_block, segment)
+
+#define BNX2X_SP_DSB_INDEX \
+ (&bp->def_status_blk->sp_sb.\
+ index_values[HC_SP_INDEX_ETH_DEF_CONS])
+
+#define CAM_IS_INVALID(x) \
+ (GET_FLAG(x.flags, \
+ MAC_CONFIGURATION_ENTRY_ACTION_TYPE) == \
+ (T_ETH_MAC_COMMAND_INVALIDATE))
+
+/* Number of u32 elements in MC hash array */
+#define MC_HASH_SIZE 8
+#define MC_HASH_OFFSET(bp, i) (BAR_TSTRORM_INTMEM + \
+ TSTORM_APPROXIMATE_MATCH_MULTICAST_FILTERING_OFFSET(BP_FUNC(bp)) + i*4)
+
+#ifndef PXP2_REG_PXP2_INT_STS
+#define PXP2_REG_PXP2_INT_STS PXP2_REG_PXP2_INT_STS_0
+#endif
+
+#ifndef ETH_MAX_RX_CLIENTS_E2
+#define ETH_MAX_RX_CLIENTS_E2 ETH_MAX_RX_CLIENTS_E1H
+#endif
+
+#define VENDOR_ID_LEN 4
+
+#define VF_ACQUIRE_THRESH 3
+#define VF_ACQUIRE_MAC_FILTERS 1
+#define VF_ACQUIRE_MC_FILTERS 10
+#define VF_ACQUIRE_VLAN_FILTERS 2 /* VLAN0 + 'real' VLAN */
+
+#define GOOD_ME_REG(me_reg) (((me_reg) & ME_REG_VF_VALID) && \
+ (!((me_reg) & ME_REG_VF_ERR)))
+int bnx2x_compare_fw_ver(struct bnx2x *bp, u32 load_code, bool print_err);
+
+/* Congestion management fairness mode */
+#define CMNG_FNS_NONE 0
+#define CMNG_FNS_MINMAX 1
+
+#define HC_SEG_ACCESS_DEF 0 /*Driver decision 0-3*/
+#define HC_SEG_ACCESS_ATTN 4
+#define HC_SEG_ACCESS_NORM 0 /*Driver decision 0-1*/
+
+void bnx2x_set_ethtool_ops(struct bnx2x *bp, struct net_device *netdev);
+void bnx2x_notify_link_changed(struct bnx2x *bp);
+
+#define BNX2X_MF_SD_PROTOCOL(bp) \
+ ((bp)->mf_config[BP_VN(bp)] & FUNC_MF_CFG_PROTOCOL_MASK)
+
+#define BNX2X_IS_MF_SD_PROTOCOL_ISCSI(bp) \
+ (BNX2X_MF_SD_PROTOCOL(bp) == FUNC_MF_CFG_PROTOCOL_ISCSI)
+
+#define BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp) \
+ (BNX2X_MF_SD_PROTOCOL(bp) == FUNC_MF_CFG_PROTOCOL_FCOE)
+
+#define IS_MF_ISCSI_SD(bp) (IS_MF_SD(bp) && BNX2X_IS_MF_SD_PROTOCOL_ISCSI(bp))
+#define IS_MF_FCOE_SD(bp) (IS_MF_SD(bp) && BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp))
+#define IS_MF_ISCSI_SI(bp) (IS_MF_SI(bp) && BNX2X_IS_MF_EXT_PROTOCOL_ISCSI(bp))
+
+#define IS_MF_ISCSI_ONLY(bp) (IS_MF_ISCSI_SD(bp) || IS_MF_ISCSI_SI(bp))
+
+#define BNX2X_MF_EXT_PROTOCOL_MASK \
+ (MACP_FUNC_CFG_FLAGS_ETHERNET | \
+ MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD | \
+ MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD)
+
+#define BNX2X_MF_EXT_PROT(bp) ((bp)->mf_ext_config & \
+ BNX2X_MF_EXT_PROTOCOL_MASK)
+
+#define BNX2X_HAS_MF_EXT_PROTOCOL_FCOE(bp) \
+ (BNX2X_MF_EXT_PROT(bp) & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD)
+
+#define BNX2X_IS_MF_EXT_PROTOCOL_FCOE(bp) \
+ (BNX2X_MF_EXT_PROT(bp) == MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD)
+
+#define BNX2X_IS_MF_EXT_PROTOCOL_ISCSI(bp) \
+ (BNX2X_MF_EXT_PROT(bp) == MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD)
+
+#define IS_MF_FCOE_AFEX(bp) \
+ (IS_MF_AFEX(bp) && BNX2X_IS_MF_EXT_PROTOCOL_FCOE(bp))
+
+#define IS_MF_SD_STORAGE_PERSONALITY_ONLY(bp) \
+ (IS_MF_SD(bp) && \
+ (BNX2X_IS_MF_SD_PROTOCOL_ISCSI(bp) || \
+ BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp)))
+
+#define IS_MF_SI_STORAGE_PERSONALITY_ONLY(bp) \
+ (IS_MF_SI(bp) && \
+ (BNX2X_IS_MF_EXT_PROTOCOL_ISCSI(bp) || \
+ BNX2X_IS_MF_EXT_PROTOCOL_FCOE(bp)))
+
+#define IS_MF_STORAGE_PERSONALITY_ONLY(bp) \
+ (IS_MF_SD_STORAGE_PERSONALITY_ONLY(bp) || \
+ IS_MF_SI_STORAGE_PERSONALITY_ONLY(bp))
+
+/* Determines whether BW configuration arrives in 100Mb units or in
+ * percentages from actual physical link speed.
+ */
+#define IS_MF_PERCENT_BW(bp) (IS_MF_SI(bp) || IS_MF_UFP(bp) || IS_MF_BD(bp))
+
+#define SET_FLAG(value, mask, flag) \
+ do {\
+ (value) &= ~(mask);\
+ (value) |= ((flag) << (mask##_SHIFT));\
+ } while (0)
+
+#define GET_FLAG(value, mask) \
+ (((value) & (mask)) >> (mask##_SHIFT))
+
+#define GET_FIELD(value, fname) \
+ (((value) & (fname##_MASK)) >> (fname##_SHIFT))
+
+enum {
+ SWITCH_UPDATE,
+ AFEX_UPDATE,
+};
+
+#define NUM_MACS 8
+
+void bnx2x_set_local_cmng(struct bnx2x *bp);
+
+void bnx2x_update_mng_version(struct bnx2x *bp);
+
+void bnx2x_update_mfw_dump(struct bnx2x *bp);
+
+#define MCPR_SCRATCH_BASE(bp) \
+ (CHIP_IS_E1x(bp) ? MCP_REG_MCPR_SCRATCH : MCP_A_REG_MCPR_SCRATCH)
+
+#define E1H_MAX_MF_SB_COUNT (HC_SB_MAX_SB_E1X/(E1HVN_MAX * PORT_MAX))
+
+void bnx2x_init_ptp(struct bnx2x *bp);
+int bnx2x_configure_ptp_filters(struct bnx2x *bp);
+void bnx2x_set_rx_ts(struct bnx2x *bp, struct sk_buff *skb);
+void bnx2x_register_phc(struct bnx2x *bp);
+
+#define BNX2X_MAX_PHC_DRIFT 31000000
+#define BNX2X_PTP_TX_TIMEOUT
+
+/* Re-configure all previously configured vlan filters.
+ * Meant for implicit re-load flows.
+ */
+int bnx2x_vlan_reconfigure_vid(struct bnx2x *bp);
+#endif /* bnx2x.h */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
new file mode 100644
index 0000000000..e9c1e1bb55
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -0,0 +1,5141 @@
+/* bnx2x_cmn.c: QLogic Everest network driver.
+ *
+ * Copyright (c) 2007-2013 Broadcom Corporation
+ * Copyright (c) 2014 QLogic Corporation
+ * All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
+ * Written by: Eliezer Tamir
+ * Based on code from Michael Chan's bnx2 driver
+ * UDP CSUM errata workaround by Arik Gendelman
+ * Slowpath and fastpath rework by Vladislav Zolotarov
+ * Statistics and Link management by Yitchak Gertner
+ *
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/etherdevice.h>
+#include <linux/if_vlan.h>
+#include <linux/interrupt.h>
+#include <linux/ip.h>
+#include <linux/crash_dump.h>
+#include <net/tcp.h>
+#include <net/gro.h>
+#include <net/ipv6.h>
+#include <net/ip6_checksum.h>
+#include <linux/prefetch.h>
+#include "bnx2x_cmn.h"
+#include "bnx2x_init.h"
+#include "bnx2x_sp.h"
+
+static void bnx2x_free_fp_mem_cnic(struct bnx2x *bp);
+static int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp);
+static int bnx2x_alloc_fp_mem(struct bnx2x *bp);
+static int bnx2x_poll(struct napi_struct *napi, int budget);
+
+static void bnx2x_add_all_napi_cnic(struct bnx2x *bp)
+{
+ int i;
+
+ /* Add NAPI objects */
+ for_each_rx_queue_cnic(bp, i) {
+ netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi), bnx2x_poll);
+ }
+}
+
+static void bnx2x_add_all_napi(struct bnx2x *bp)
+{
+ int i;
+
+ /* Add NAPI objects */
+ for_each_eth_queue(bp, i) {
+ netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi), bnx2x_poll);
+ }
+}
+
+static int bnx2x_calc_num_queues(struct bnx2x *bp)
+{
+ int nq = bnx2x_num_queues ? : netif_get_num_default_rss_queues();
+
+ /* Reduce memory usage in kdump environment by using only one queue */
+ if (is_kdump_kernel())
+ nq = 1;
+
+ nq = clamp(nq, 1, BNX2X_MAX_QUEUES(bp));
+ return nq;
+}
+
+/**
+ * bnx2x_move_fp - move content of the fastpath structure.
+ *
+ * @bp: driver handle
+ * @from: source FP index
+ * @to: destination FP index
+ *
+ * Makes sure the contents of the bp->fp[to].napi is kept
+ * intact. This is done by first copying the napi struct from
+ * the target to the source, and then mem copying the entire
+ * source onto the target. Update txdata pointers and related
+ * content.
+ */
+static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to)
+{
+ struct bnx2x_fastpath *from_fp = &bp->fp[from];
+ struct bnx2x_fastpath *to_fp = &bp->fp[to];
+ struct bnx2x_sp_objs *from_sp_objs = &bp->sp_objs[from];
+ struct bnx2x_sp_objs *to_sp_objs = &bp->sp_objs[to];
+ struct bnx2x_fp_stats *from_fp_stats = &bp->fp_stats[from];
+ struct bnx2x_fp_stats *to_fp_stats = &bp->fp_stats[to];
+ int old_max_eth_txqs, new_max_eth_txqs;
+ int old_txdata_index = 0, new_txdata_index = 0;
+ struct bnx2x_agg_info *old_tpa_info = to_fp->tpa_info;
+
+ /* Copy the NAPI object as it has been already initialized */
+ from_fp->napi = to_fp->napi;
+
+ /* Move bnx2x_fastpath contents */
+ memcpy(to_fp, from_fp, sizeof(*to_fp));
+ to_fp->index = to;
+
+ /* Retain the tpa_info of the original `to' version as we don't want
+ * 2 FPs to contain the same tpa_info pointer.
+ */
+ to_fp->tpa_info = old_tpa_info;
+
+ /* move sp_objs contents as well, as their indices match fp ones */
+ memcpy(to_sp_objs, from_sp_objs, sizeof(*to_sp_objs));
+
+ /* move fp_stats contents as well, as their indices match fp ones */
+ memcpy(to_fp_stats, from_fp_stats, sizeof(*to_fp_stats));
+
+ /* Update txdata pointers in fp and move txdata content accordingly:
+ * Each fp consumes 'max_cos' txdata structures, so the index should be
+ * decremented by max_cos x delta.
+ */
+
+ old_max_eth_txqs = BNX2X_NUM_ETH_QUEUES(bp) * (bp)->max_cos;
+ new_max_eth_txqs = (BNX2X_NUM_ETH_QUEUES(bp) - from + to) *
+ (bp)->max_cos;
+ if (from == FCOE_IDX(bp)) {
+ old_txdata_index = old_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
+ new_txdata_index = new_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
+ }
+
+ memcpy(&bp->bnx2x_txq[new_txdata_index],
+ &bp->bnx2x_txq[old_txdata_index],
+ sizeof(struct bnx2x_fp_txdata));
+ to_fp->txdata_ptr[0] = &bp->bnx2x_txq[new_txdata_index];
+}
+
+/**
+ * bnx2x_fill_fw_str - Fill buffer with FW version string.
+ *
+ * @bp: driver handle
+ * @buf: character buffer to fill with the fw name
+ * @buf_len: length of the above buffer
+ *
+ */
+void bnx2x_fill_fw_str(struct bnx2x *bp, char *buf, size_t buf_len)
+{
+ if (IS_PF(bp)) {
+ u8 phy_fw_ver[PHY_FW_VER_LEN];
+
+ phy_fw_ver[0] = '\0';
+ bnx2x_get_ext_phy_fw_version(&bp->link_params,
+ phy_fw_ver, PHY_FW_VER_LEN);
+ strscpy(buf, bp->fw_ver, buf_len);
+ snprintf(buf + strlen(bp->fw_ver), 32 - strlen(bp->fw_ver),
+ "bc %d.%d.%d%s%s",
+ (bp->common.bc_ver & 0xff0000) >> 16,
+ (bp->common.bc_ver & 0xff00) >> 8,
+ (bp->common.bc_ver & 0xff),
+ ((phy_fw_ver[0] != '\0') ? " phy " : ""), phy_fw_ver);
+ } else {
+ bnx2x_vf_fill_fw_str(bp, buf, buf_len);
+ }
+}
+
+/**
+ * bnx2x_shrink_eth_fp - guarantees fastpath structures stay intact
+ *
+ * @bp: driver handle
+ * @delta: number of eth queues which were not allocated
+ */
+static void bnx2x_shrink_eth_fp(struct bnx2x *bp, int delta)
+{
+ int i, cos, old_eth_num = BNX2X_NUM_ETH_QUEUES(bp);
+
+ /* Queue pointer cannot be re-set on an fp-basis, as moving pointer
+ * backward along the array could cause memory to be overridden
+ */
+ for (cos = 1; cos < bp->max_cos; cos++) {
+ for (i = 0; i < old_eth_num - delta; i++) {
+ struct bnx2x_fastpath *fp = &bp->fp[i];
+ int new_idx = cos * (old_eth_num - delta) + i;
+
+ memcpy(&bp->bnx2x_txq[new_idx], fp->txdata_ptr[cos],
+ sizeof(struct bnx2x_fp_txdata));
+ fp->txdata_ptr[cos] = &bp->bnx2x_txq[new_idx];
+ }
+ }
+}
+
+int bnx2x_load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
+
+/* free skb in the packet ring at pos idx
+ * return idx of last bd freed
+ */
+static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
+ u16 idx, unsigned int *pkts_compl,
+ unsigned int *bytes_compl)
+{
+ struct sw_tx_bd *tx_buf = &txdata->tx_buf_ring[idx];
+ struct eth_tx_start_bd *tx_start_bd;
+ struct eth_tx_bd *tx_data_bd;
+ struct sk_buff *skb = tx_buf->skb;
+ u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
+ int nbd;
+ u16 split_bd_len = 0;
+
+ /* prefetch skb end pointer to speedup dev_kfree_skb() */
+ prefetch(&skb->end);
+
+ DP(NETIF_MSG_TX_DONE, "fp[%d]: pkt_idx %d buff @(%p)->skb %p\n",
+ txdata->txq_index, idx, tx_buf, skb);
+
+ tx_start_bd = &txdata->tx_desc_ring[bd_idx].start_bd;
+
+ nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
+#ifdef BNX2X_STOP_ON_ERROR
+ if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
+ BNX2X_ERR("BAD nbd!\n");
+ bnx2x_panic();
+ }
+#endif
+ new_cons = nbd + tx_buf->first_bd;
+
+ /* Get the next bd */
+ bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
+
+ /* Skip a parse bd... */
+ --nbd;
+ bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
+
+ if (tx_buf->flags & BNX2X_HAS_SECOND_PBD) {
+ /* Skip second parse bd... */
+ --nbd;
+ bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
+ }
+
+ /* TSO headers+data bds share a common mapping. See bnx2x_tx_split() */
+ if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
+ tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
+ split_bd_len = BD_UNMAP_LEN(tx_data_bd);
+ --nbd;
+ bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
+ }
+
+ /* unmap first bd */
+ dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
+ BD_UNMAP_LEN(tx_start_bd) + split_bd_len,
+ DMA_TO_DEVICE);
+
+ /* now free frags */
+ while (nbd > 0) {
+
+ tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
+ dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
+ BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
+ if (--nbd)
+ bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
+ }
+
+ /* release skb */
+ WARN_ON(!skb);
+ if (likely(skb)) {
+ (*pkts_compl)++;
+ (*bytes_compl) += skb->len;
+ dev_kfree_skb_any(skb);
+ }
+
+ tx_buf->first_bd = 0;
+ tx_buf->skb = NULL;
+
+ return new_cons;
+}
+
+int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata)
+{
+ struct netdev_queue *txq;
+ u16 hw_cons, sw_cons, bd_cons = txdata->tx_bd_cons;
+ unsigned int pkts_compl = 0, bytes_compl = 0;
+
+#ifdef BNX2X_STOP_ON_ERROR
+ if (unlikely(bp->panic))
+ return -1;
+#endif
+
+ txq = netdev_get_tx_queue(bp->dev, txdata->txq_index);
+ hw_cons = le16_to_cpu(*txdata->tx_cons_sb);
+ sw_cons = txdata->tx_pkt_cons;
+
+ /* Ensure subsequent loads occur after hw_cons */
+ smp_rmb();
+
+ while (sw_cons != hw_cons) {
+ u16 pkt_cons;
+
+ pkt_cons = TX_BD(sw_cons);
+
+ DP(NETIF_MSG_TX_DONE,
+ "queue[%d]: hw_cons %u sw_cons %u pkt_cons %u\n",
+ txdata->txq_index, hw_cons, sw_cons, pkt_cons);
+
+ bd_cons = bnx2x_free_tx_pkt(bp, txdata, pkt_cons,
+ &pkts_compl, &bytes_compl);
+
+ sw_cons++;
+ }
+
+ netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
+
+ txdata->tx_pkt_cons = sw_cons;
+ txdata->tx_bd_cons = bd_cons;
+
+ /* Need to make the tx_bd_cons update visible to start_xmit()
+ * before checking for netif_tx_queue_stopped(). Without the
+ * memory barrier, there is a small possibility that
+ * start_xmit() will miss it and cause the queue to be stopped
+ * forever.
+ * On the other hand we need an rmb() here to ensure the proper
+ * ordering of bit testing in the following
+ * netif_tx_queue_stopped(txq) call.
+ */
+ smp_mb();
+
+ if (unlikely(netif_tx_queue_stopped(txq))) {
+ /* Taking tx_lock() is needed to prevent re-enabling the queue
+ * while it's empty. This could have happen if rx_action() gets
+ * suspended in bnx2x_tx_int() after the condition before
+ * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
+ *
+ * stops the queue->sees fresh tx_bd_cons->releases the queue->
+ * sends some packets consuming the whole queue again->
+ * stops the queue
+ */
+
+ __netif_tx_lock(txq, smp_processor_id());
+
+ if ((netif_tx_queue_stopped(txq)) &&
+ (bp->state == BNX2X_STATE_OPEN) &&
+ (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT))
+ netif_tx_wake_queue(txq);
+
+ __netif_tx_unlock(txq);
+ }
+ return 0;
+}
+
+static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
+ u16 idx)
+{
+ u16 last_max = fp->last_max_sge;
+
+ if (SUB_S16(idx, last_max) > 0)
+ fp->last_max_sge = idx;
+}
+
+static inline void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
+ u16 sge_len,
+ struct eth_end_agg_rx_cqe *cqe)
+{
+ struct bnx2x *bp = fp->bp;
+ u16 last_max, last_elem, first_elem;
+ u16 delta = 0;
+ u16 i;
+
+ if (!sge_len)
+ return;
+
+ /* First mark all used pages */
+ for (i = 0; i < sge_len; i++)
+ BIT_VEC64_CLEAR_BIT(fp->sge_mask,
+ RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[i])));
+
+ DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
+ sge_len - 1, le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
+
+ /* Here we assume that the last SGE index is the biggest */
+ prefetch((void *)(fp->sge_mask));
+ bnx2x_update_last_max_sge(fp,
+ le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
+
+ last_max = RX_SGE(fp->last_max_sge);
+ last_elem = last_max >> BIT_VEC64_ELEM_SHIFT;
+ first_elem = RX_SGE(fp->rx_sge_prod) >> BIT_VEC64_ELEM_SHIFT;
+
+ /* If ring is not full */
+ if (last_elem + 1 != first_elem)
+ last_elem++;
+
+ /* Now update the prod */
+ for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
+ if (likely(fp->sge_mask[i]))
+ break;
+
+ fp->sge_mask[i] = BIT_VEC64_ELEM_ONE_MASK;
+ delta += BIT_VEC64_ELEM_SZ;
+ }
+
+ if (delta > 0) {
+ fp->rx_sge_prod += delta;
+ /* clear page-end entries */
+ bnx2x_clear_sge_mask_next_elems(fp);
+ }
+
+ DP(NETIF_MSG_RX_STATUS,
+ "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
+ fp->last_max_sge, fp->rx_sge_prod);
+}
+
+/* Get Toeplitz hash value in the skb using the value from the
+ * CQE (calculated by HW).
+ */
+static u32 bnx2x_get_rxhash(const struct bnx2x *bp,
+ const struct eth_fast_path_rx_cqe *cqe,
+ enum pkt_hash_types *rxhash_type)
+{
+ /* Get Toeplitz hash from CQE */
+ if ((bp->dev->features & NETIF_F_RXHASH) &&
+ (cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG)) {
+ enum eth_rss_hash_type htype;
+
+ htype = cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE;
+ *rxhash_type = ((htype == TCP_IPV4_HASH_TYPE) ||
+ (htype == TCP_IPV6_HASH_TYPE)) ?
+ PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3;
+
+ return le32_to_cpu(cqe->rss_hash_result);
+ }
+ *rxhash_type = PKT_HASH_TYPE_NONE;
+ return 0;
+}
+
+static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
+ u16 cons, u16 prod,
+ struct eth_fast_path_rx_cqe *cqe)
+{
+ struct bnx2x *bp = fp->bp;
+ struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
+ struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
+ struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
+ dma_addr_t mapping;
+ struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
+ struct sw_rx_bd *first_buf = &tpa_info->first_buf;
+
+ /* print error if current state != stop */
+ if (tpa_info->tpa_state != BNX2X_TPA_STOP)
+ BNX2X_ERR("start of bin not in stop [%d]\n", queue);
+
+ /* Try to map an empty data buffer from the aggregation info */
+ mapping = dma_map_single(&bp->pdev->dev,
+ first_buf->data + NET_SKB_PAD,
+ fp->rx_buf_size, DMA_FROM_DEVICE);
+ /*
+ * ...if it fails - move the skb from the consumer to the producer
+ * and set the current aggregation state as ERROR to drop it
+ * when TPA_STOP arrives.
+ */
+
+ if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
+ /* Move the BD from the consumer to the producer */
+ bnx2x_reuse_rx_data(fp, cons, prod);
+ tpa_info->tpa_state = BNX2X_TPA_ERROR;
+ return;
+ }
+
+ /* move empty data from pool to prod */
+ prod_rx_buf->data = first_buf->data;
+ dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
+ /* point prod_bd to new data */
+ prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
+ prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
+
+ /* move partial skb from cons to pool (don't unmap yet) */
+ *first_buf = *cons_rx_buf;
+
+ /* mark bin state as START */
+ tpa_info->parsing_flags =
+ le16_to_cpu(cqe->pars_flags.flags);
+ tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag);
+ tpa_info->tpa_state = BNX2X_TPA_START;
+ tpa_info->len_on_bd = le16_to_cpu(cqe->len_on_bd);
+ tpa_info->placement_offset = cqe->placement_offset;
+ tpa_info->rxhash = bnx2x_get_rxhash(bp, cqe, &tpa_info->rxhash_type);
+ if (fp->mode == TPA_MODE_GRO) {
+ u16 gro_size = le16_to_cpu(cqe->pkt_len_or_gro_seg_len);
+ tpa_info->full_page = SGE_PAGES / gro_size * gro_size;
+ tpa_info->gro_size = gro_size;
+ }
+
+#ifdef BNX2X_STOP_ON_ERROR
+ fp->tpa_queue_used |= (1 << queue);
+ DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
+ fp->tpa_queue_used);
+#endif
+}
+
+/* Timestamp option length allowed for TPA aggregation:
+ *
+ * nop nop kind length echo val
+ */
+#define TPA_TSTAMP_OPT_LEN 12
+/**
+ * bnx2x_set_gro_params - compute GRO values
+ *
+ * @skb: packet skb
+ * @parsing_flags: parsing flags from the START CQE
+ * @len_on_bd: total length of the first packet for the
+ * aggregation.
+ * @pkt_len: length of all segments
+ * @num_of_coalesced_segs: count of segments
+ *
+ * Approximate value of the MSS for this aggregation calculated using
+ * the first packet of it.
+ * Compute number of aggregated segments, and gso_type.
+ */
+static void bnx2x_set_gro_params(struct sk_buff *skb, u16 parsing_flags,
+ u16 len_on_bd, unsigned int pkt_len,
+ u16 num_of_coalesced_segs)
+{
+ /* TPA aggregation won't have either IP options or TCP options
+ * other than timestamp or IPv6 extension headers.
+ */
+ u16 hdrs_len = ETH_HLEN + sizeof(struct tcphdr);
+
+ if (GET_FLAG(parsing_flags, PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) ==
+ PRS_FLAG_OVERETH_IPV6) {
+ hdrs_len += sizeof(struct ipv6hdr);
+ skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
+ } else {
+ hdrs_len += sizeof(struct iphdr);
+ skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
+ }
+
+ /* Check if there was a TCP timestamp, if there is it's will
+ * always be 12 bytes length: nop nop kind length echo val.
+ *
+ * Otherwise FW would close the aggregation.
+ */
+ if (parsing_flags & PARSING_FLAGS_TIME_STAMP_EXIST_FLAG)
+ hdrs_len += TPA_TSTAMP_OPT_LEN;
+
+ skb_shinfo(skb)->gso_size = len_on_bd - hdrs_len;
+
+ /* tcp_gro_complete() will copy NAPI_GRO_CB(skb)->count
+ * to skb_shinfo(skb)->gso_segs
+ */
+ NAPI_GRO_CB(skb)->count = num_of_coalesced_segs;
+}
+
+static int bnx2x_alloc_rx_sge(struct bnx2x *bp, struct bnx2x_fastpath *fp,
+ u16 index, gfp_t gfp_mask)
+{
+ struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
+ struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
+ struct bnx2x_alloc_pool *pool = &fp->page_pool;
+ dma_addr_t mapping;
+
+ if (!pool->page) {
+ pool->page = alloc_pages(gfp_mask, PAGES_PER_SGE_SHIFT);
+ if (unlikely(!pool->page))
+ return -ENOMEM;
+
+ pool->offset = 0;
+ }
+
+ mapping = dma_map_page(&bp->pdev->dev, pool->page,
+ pool->offset, SGE_PAGE_SIZE, DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
+ BNX2X_ERR("Can't map sge\n");
+ return -ENOMEM;
+ }
+
+ sw_buf->page = pool->page;
+ sw_buf->offset = pool->offset;
+
+ dma_unmap_addr_set(sw_buf, mapping, mapping);
+
+ sge->addr_hi = cpu_to_le32(U64_HI(mapping));
+ sge->addr_lo = cpu_to_le32(U64_LO(mapping));
+
+ pool->offset += SGE_PAGE_SIZE;
+ if (PAGE_SIZE - pool->offset >= SGE_PAGE_SIZE)
+ get_page(pool->page);
+ else
+ pool->page = NULL;
+ return 0;
+}
+
+static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
+ struct bnx2x_agg_info *tpa_info,
+ u16 pages,
+ struct sk_buff *skb,
+ struct eth_end_agg_rx_cqe *cqe,
+ u16 cqe_idx)
+{
+ struct sw_rx_page *rx_pg, old_rx_pg;
+ u32 i, frag_len, frag_size;
+ int err, j, frag_id = 0;
+ u16 len_on_bd = tpa_info->len_on_bd;
+ u16 full_page = 0, gro_size = 0;
+
+ frag_size = le16_to_cpu(cqe->pkt_len) - len_on_bd;
+
+ if (fp->mode == TPA_MODE_GRO) {
+ gro_size = tpa_info->gro_size;
+ full_page = tpa_info->full_page;
+ }
+
+ /* This is needed in order to enable forwarding support */
+ if (frag_size)
+ bnx2x_set_gro_params(skb, tpa_info->parsing_flags, len_on_bd,
+ le16_to_cpu(cqe->pkt_len),
+ le16_to_cpu(cqe->num_of_coalesced_segs));
+
+#ifdef BNX2X_STOP_ON_ERROR
+ if (pages > min_t(u32, 8, MAX_SKB_FRAGS) * SGE_PAGES) {
+ BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
+ pages, cqe_idx);
+ BNX2X_ERR("cqe->pkt_len = %d\n", cqe->pkt_len);
+ bnx2x_panic();
+ return -EINVAL;
+ }
+#endif
+
+ /* Run through the SGL and compose the fragmented skb */
+ for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
+ u16 sge_idx = RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[j]));
+
+ /* FW gives the indices of the SGE as if the ring is an array
+ (meaning that "next" element will consume 2 indices) */
+ if (fp->mode == TPA_MODE_GRO)
+ frag_len = min_t(u32, frag_size, (u32)full_page);
+ else /* LRO */
+ frag_len = min_t(u32, frag_size, (u32)SGE_PAGES);
+
+ rx_pg = &fp->rx_page_ring[sge_idx];
+ old_rx_pg = *rx_pg;
+
+ /* If we fail to allocate a substitute page, we simply stop
+ where we are and drop the whole packet */
+ err = bnx2x_alloc_rx_sge(bp, fp, sge_idx, GFP_ATOMIC);
+ if (unlikely(err)) {
+ bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
+ return err;
+ }
+
+ dma_unmap_page(&bp->pdev->dev,
+ dma_unmap_addr(&old_rx_pg, mapping),
+ SGE_PAGE_SIZE, DMA_FROM_DEVICE);
+ /* Add one frag and update the appropriate fields in the skb */
+ if (fp->mode == TPA_MODE_LRO)
+ skb_fill_page_desc(skb, j, old_rx_pg.page,
+ old_rx_pg.offset, frag_len);
+ else { /* GRO */
+ int rem;
+ int offset = 0;
+ for (rem = frag_len; rem > 0; rem -= gro_size) {
+ int len = rem > gro_size ? gro_size : rem;
+ skb_fill_page_desc(skb, frag_id++,
+ old_rx_pg.page,
+ old_rx_pg.offset + offset,
+ len);
+ if (offset)
+ get_page(old_rx_pg.page);
+ offset += len;
+ }
+ }
+
+ skb->data_len += frag_len;
+ skb->truesize += SGE_PAGES;
+ skb->len += frag_len;
+
+ frag_size -= frag_len;
+ }
+
+ return 0;
+}
+
+static struct sk_buff *
+bnx2x_build_skb(const struct bnx2x_fastpath *fp, void *data)
+{
+ struct sk_buff *skb;
+
+ if (fp->rx_frag_size)
+ skb = build_skb(data, fp->rx_frag_size);
+ else
+ skb = slab_build_skb(data);
+ return skb;
+}
+
+static void bnx2x_frag_free(const struct bnx2x_fastpath *fp, void *data)
+{
+ if (fp->rx_frag_size)
+ skb_free_frag(data);
+ else
+ kfree(data);
+}
+
+static void *bnx2x_frag_alloc(const struct bnx2x_fastpath *fp, gfp_t gfp_mask)
+{
+ if (fp->rx_frag_size) {
+ /* GFP_KERNEL allocations are used only during initialization */
+ if (unlikely(gfpflags_allow_blocking(gfp_mask)))
+ return (void *)__get_free_page(gfp_mask);
+
+ return napi_alloc_frag(fp->rx_frag_size);
+ }
+
+ return kmalloc(fp->rx_buf_size + NET_SKB_PAD, gfp_mask);
+}
+
+#ifdef CONFIG_INET
+static void bnx2x_gro_ip_csum(struct bnx2x *bp, struct sk_buff *skb)
+{
+ const struct iphdr *iph = ip_hdr(skb);
+ struct tcphdr *th;
+
+ skb_set_transport_header(skb, sizeof(struct iphdr));
+ th = tcp_hdr(skb);
+
+ th->check = ~tcp_v4_check(skb->len - skb_transport_offset(skb),
+ iph->saddr, iph->daddr, 0);
+}
+
+static void bnx2x_gro_ipv6_csum(struct bnx2x *bp, struct sk_buff *skb)
+{
+ struct ipv6hdr *iph = ipv6_hdr(skb);
+ struct tcphdr *th;
+
+ skb_set_transport_header(skb, sizeof(struct ipv6hdr));
+ th = tcp_hdr(skb);
+
+ th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb),
+ &iph->saddr, &iph->daddr, 0);
+}
+
+static void bnx2x_gro_csum(struct bnx2x *bp, struct sk_buff *skb,
+ void (*gro_func)(struct bnx2x*, struct sk_buff*))
+{
+ skb_reset_network_header(skb);
+ gro_func(bp, skb);
+ tcp_gro_complete(skb);
+}
+#endif
+
+static void bnx2x_gro_receive(struct bnx2x *bp, struct bnx2x_fastpath *fp,
+ struct sk_buff *skb)
+{
+#ifdef CONFIG_INET
+ if (skb_shinfo(skb)->gso_size) {
+ switch (be16_to_cpu(skb->protocol)) {
+ case ETH_P_IP:
+ bnx2x_gro_csum(bp, skb, bnx2x_gro_ip_csum);
+ break;
+ case ETH_P_IPV6:
+ bnx2x_gro_csum(bp, skb, bnx2x_gro_ipv6_csum);
+ break;
+ default:
+ netdev_WARN_ONCE(bp->dev,
+ "Error: FW GRO supports only IPv4/IPv6, not 0x%04x\n",
+ be16_to_cpu(skb->protocol));
+ }
+ }
+#endif
+ skb_record_rx_queue(skb, fp->rx_queue);
+ napi_gro_receive(&fp->napi, skb);
+}
+
+static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
+ struct bnx2x_agg_info *tpa_info,
+ u16 pages,
+ struct eth_end_agg_rx_cqe *cqe,
+ u16 cqe_idx)
+{
+ struct sw_rx_bd *rx_buf = &tpa_info->first_buf;
+ u8 pad = tpa_info->placement_offset;
+ u16 len = tpa_info->len_on_bd;
+ struct sk_buff *skb = NULL;
+ u8 *new_data, *data = rx_buf->data;
+ u8 old_tpa_state = tpa_info->tpa_state;
+
+ tpa_info->tpa_state = BNX2X_TPA_STOP;
+
+ /* If we there was an error during the handling of the TPA_START -
+ * drop this aggregation.
+ */
+ if (old_tpa_state == BNX2X_TPA_ERROR)
+ goto drop;
+
+ /* Try to allocate the new data */
+ new_data = bnx2x_frag_alloc(fp, GFP_ATOMIC);
+ /* Unmap skb in the pool anyway, as we are going to change
+ pool entry status to BNX2X_TPA_STOP even if new skb allocation
+ fails. */
+ dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
+ fp->rx_buf_size, DMA_FROM_DEVICE);
+ if (likely(new_data))
+ skb = bnx2x_build_skb(fp, data);
+
+ if (likely(skb)) {
+#ifdef BNX2X_STOP_ON_ERROR
+ if (pad + len > fp->rx_buf_size) {
+ BNX2X_ERR("skb_put is about to fail... pad %d len %d rx_buf_size %d\n",
+ pad, len, fp->rx_buf_size);
+ bnx2x_panic();
+ bnx2x_frag_free(fp, new_data);
+ return;
+ }
+#endif
+
+ skb_reserve(skb, pad + NET_SKB_PAD);
+ skb_put(skb, len);
+ skb_set_hash(skb, tpa_info->rxhash, tpa_info->rxhash_type);
+
+ skb->protocol = eth_type_trans(skb, bp->dev);
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ if (!bnx2x_fill_frag_skb(bp, fp, tpa_info, pages,
+ skb, cqe, cqe_idx)) {
+ if (tpa_info->parsing_flags & PARSING_FLAGS_VLAN)
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tpa_info->vlan_tag);
+ bnx2x_gro_receive(bp, fp, skb);
+ } else {
+ DP(NETIF_MSG_RX_STATUS,
+ "Failed to allocate new pages - dropping packet!\n");
+ dev_kfree_skb_any(skb);
+ }
+
+ /* put new data in bin */
+ rx_buf->data = new_data;
+
+ return;
+ }
+ if (new_data)
+ bnx2x_frag_free(fp, new_data);
+drop:
+ /* drop the packet and keep the buffer in the bin */
+ DP(NETIF_MSG_RX_STATUS,
+ "Failed to allocate or map a new skb - dropping packet!\n");
+ bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed++;
+}
+
+static int bnx2x_alloc_rx_data(struct bnx2x *bp, struct bnx2x_fastpath *fp,
+ u16 index, gfp_t gfp_mask)
+{
+ u8 *data;
+ struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
+ struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
+ dma_addr_t mapping;
+
+ data = bnx2x_frag_alloc(fp, gfp_mask);
+ if (unlikely(data == NULL))
+ return -ENOMEM;
+
+ mapping = dma_map_single(&bp->pdev->dev, data + NET_SKB_PAD,
+ fp->rx_buf_size,
+ DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
+ bnx2x_frag_free(fp, data);
+ BNX2X_ERR("Can't map rx data\n");
+ return -ENOMEM;
+ }
+
+ rx_buf->data = data;
+ dma_unmap_addr_set(rx_buf, mapping, mapping);
+
+ rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
+ rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
+
+ return 0;
+}
+
+static
+void bnx2x_csum_validate(struct sk_buff *skb, union eth_rx_cqe *cqe,
+ struct bnx2x_fastpath *fp,
+ struct bnx2x_eth_q_stats *qstats)
+{
+ /* Do nothing if no L4 csum validation was done.
+ * We do not check whether IP csum was validated. For IPv4 we assume
+ * that if the card got as far as validating the L4 csum, it also
+ * validated the IP csum. IPv6 has no IP csum.
+ */
+ if (cqe->fast_path_cqe.status_flags &
+ ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG)
+ return;
+
+ /* If L4 validation was done, check if an error was found. */
+
+ if (cqe->fast_path_cqe.type_error_flags &
+ (ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG |
+ ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG))
+ qstats->hw_csum_err++;
+ else
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+}
+
+static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
+{
+ struct bnx2x *bp = fp->bp;
+ u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
+ u16 sw_comp_cons, sw_comp_prod;
+ int rx_pkt = 0;
+ union eth_rx_cqe *cqe;
+ struct eth_fast_path_rx_cqe *cqe_fp;
+
+#ifdef BNX2X_STOP_ON_ERROR
+ if (unlikely(bp->panic))
+ return 0;
+#endif
+ if (budget <= 0)
+ return rx_pkt;
+
+ bd_cons = fp->rx_bd_cons;
+ bd_prod = fp->rx_bd_prod;
+ bd_prod_fw = bd_prod;
+ sw_comp_cons = fp->rx_comp_cons;
+ sw_comp_prod = fp->rx_comp_prod;
+
+ comp_ring_cons = RCQ_BD(sw_comp_cons);
+ cqe = &fp->rx_comp_ring[comp_ring_cons];
+ cqe_fp = &cqe->fast_path_cqe;
+
+ DP(NETIF_MSG_RX_STATUS,
+ "queue[%d]: sw_comp_cons %u\n", fp->index, sw_comp_cons);
+
+ while (BNX2X_IS_CQE_COMPLETED(cqe_fp)) {
+ struct sw_rx_bd *rx_buf = NULL;
+ struct sk_buff *skb;
+ u8 cqe_fp_flags;
+ enum eth_rx_cqe_type cqe_fp_type;
+ u16 len, pad, queue;
+ u8 *data;
+ u32 rxhash;
+ enum pkt_hash_types rxhash_type;
+
+#ifdef BNX2X_STOP_ON_ERROR
+ if (unlikely(bp->panic))
+ return 0;
+#endif
+
+ bd_prod = RX_BD(bd_prod);
+ bd_cons = RX_BD(bd_cons);
+
+ /* A rmb() is required to ensure that the CQE is not read
+ * before it is written by the adapter DMA. PCI ordering
+ * rules will make sure the other fields are written before
+ * the marker at the end of struct eth_fast_path_rx_cqe
+ * but without rmb() a weakly ordered processor can process
+ * stale data. Without the barrier TPA state-machine might
+ * enter inconsistent state and kernel stack might be
+ * provided with incorrect packet description - these lead
+ * to various kernel crashed.
+ */
+ rmb();
+
+ cqe_fp_flags = cqe_fp->type_error_flags;
+ cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE;
+
+ DP(NETIF_MSG_RX_STATUS,
+ "CQE type %x err %x status %x queue %x vlan %x len %u\n",
+ CQE_TYPE(cqe_fp_flags),
+ cqe_fp_flags, cqe_fp->status_flags,
+ le32_to_cpu(cqe_fp->rss_hash_result),
+ le16_to_cpu(cqe_fp->vlan_tag),
+ le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len));
+
+ /* is this a slowpath msg? */
+ if (unlikely(CQE_TYPE_SLOW(cqe_fp_type))) {
+ bnx2x_sp_event(fp, cqe);
+ goto next_cqe;
+ }
+
+ rx_buf = &fp->rx_buf_ring[bd_cons];
+ data = rx_buf->data;
+
+ if (!CQE_TYPE_FAST(cqe_fp_type)) {
+ struct bnx2x_agg_info *tpa_info;
+ u16 frag_size, pages;
+#ifdef BNX2X_STOP_ON_ERROR
+ /* sanity check */
+ if (fp->mode == TPA_MODE_DISABLED &&
+ (CQE_TYPE_START(cqe_fp_type) ||
+ CQE_TYPE_STOP(cqe_fp_type)))
+ BNX2X_ERR("START/STOP packet while TPA disabled, type %x\n",
+ CQE_TYPE(cqe_fp_type));
+#endif
+
+ if (CQE_TYPE_START(cqe_fp_type)) {
+ u16 queue = cqe_fp->queue_index;
+ DP(NETIF_MSG_RX_STATUS,
+ "calling tpa_start on queue %d\n",
+ queue);
+
+ bnx2x_tpa_start(fp, queue,
+ bd_cons, bd_prod,
+ cqe_fp);
+
+ goto next_rx;
+ }
+ queue = cqe->end_agg_cqe.queue_index;
+ tpa_info = &fp->tpa_info[queue];
+ DP(NETIF_MSG_RX_STATUS,
+ "calling tpa_stop on queue %d\n",
+ queue);
+
+ frag_size = le16_to_cpu(cqe->end_agg_cqe.pkt_len) -
+ tpa_info->len_on_bd;
+
+ if (fp->mode == TPA_MODE_GRO)
+ pages = (frag_size + tpa_info->full_page - 1) /
+ tpa_info->full_page;
+ else
+ pages = SGE_PAGE_ALIGN(frag_size) >>
+ SGE_PAGE_SHIFT;
+
+ bnx2x_tpa_stop(bp, fp, tpa_info, pages,
+ &cqe->end_agg_cqe, comp_ring_cons);
+#ifdef BNX2X_STOP_ON_ERROR
+ if (bp->panic)
+ return 0;
+#endif
+
+ bnx2x_update_sge_prod(fp, pages, &cqe->end_agg_cqe);
+ goto next_cqe;
+ }
+ /* non TPA */
+ len = le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len);
+ pad = cqe_fp->placement_offset;
+ dma_sync_single_for_cpu(&bp->pdev->dev,
+ dma_unmap_addr(rx_buf, mapping),
+ pad + RX_COPY_THRESH,
+ DMA_FROM_DEVICE);
+ pad += NET_SKB_PAD;
+ prefetch(data + pad); /* speedup eth_type_trans() */
+ /* is this an error packet? */
+ if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
+ DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
+ "ERROR flags %x rx packet %u\n",
+ cqe_fp_flags, sw_comp_cons);
+ bnx2x_fp_qstats(bp, fp)->rx_err_discard_pkt++;
+ goto reuse_rx;
+ }
+
+ /* Since we don't have a jumbo ring
+ * copy small packets if mtu > 1500
+ */
+ if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
+ (len <= RX_COPY_THRESH)) {
+ skb = napi_alloc_skb(&fp->napi, len);
+ if (skb == NULL) {
+ DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
+ "ERROR packet dropped because of alloc failure\n");
+ bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
+ goto reuse_rx;
+ }
+ memcpy(skb->data, data + pad, len);
+ bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
+ } else {
+ if (likely(bnx2x_alloc_rx_data(bp, fp, bd_prod,
+ GFP_ATOMIC) == 0)) {
+ dma_unmap_single(&bp->pdev->dev,
+ dma_unmap_addr(rx_buf, mapping),
+ fp->rx_buf_size,
+ DMA_FROM_DEVICE);
+ skb = bnx2x_build_skb(fp, data);
+ if (unlikely(!skb)) {
+ bnx2x_frag_free(fp, data);
+ bnx2x_fp_qstats(bp, fp)->
+ rx_skb_alloc_failed++;
+ goto next_rx;
+ }
+ skb_reserve(skb, pad);
+ } else {
+ DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
+ "ERROR packet dropped because of alloc failure\n");
+ bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
+reuse_rx:
+ bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
+ goto next_rx;
+ }
+ }
+
+ skb_put(skb, len);
+ skb->protocol = eth_type_trans(skb, bp->dev);
+
+ /* Set Toeplitz hash for a none-LRO skb */
+ rxhash = bnx2x_get_rxhash(bp, cqe_fp, &rxhash_type);
+ skb_set_hash(skb, rxhash, rxhash_type);
+
+ skb_checksum_none_assert(skb);
+
+ if (bp->dev->features & NETIF_F_RXCSUM)
+ bnx2x_csum_validate(skb, cqe, fp,
+ bnx2x_fp_qstats(bp, fp));
+
+ skb_record_rx_queue(skb, fp->rx_queue);
+
+ /* Check if this packet was timestamped */
+ if (unlikely(cqe->fast_path_cqe.type_error_flags &
+ (1 << ETH_FAST_PATH_RX_CQE_PTP_PKT_SHIFT)))
+ bnx2x_set_rx_ts(bp, skb);
+
+ if (le16_to_cpu(cqe_fp->pars_flags.flags) &
+ PARSING_FLAGS_VLAN)
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
+ le16_to_cpu(cqe_fp->vlan_tag));
+
+ napi_gro_receive(&fp->napi, skb);
+next_rx:
+ rx_buf->data = NULL;
+
+ bd_cons = NEXT_RX_IDX(bd_cons);
+ bd_prod = NEXT_RX_IDX(bd_prod);
+ bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
+ rx_pkt++;
+next_cqe:
+ sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
+ sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
+
+ /* mark CQE as free */
+ BNX2X_SEED_CQE(cqe_fp);
+
+ if (rx_pkt == budget)
+ break;
+
+ comp_ring_cons = RCQ_BD(sw_comp_cons);
+ cqe = &fp->rx_comp_ring[comp_ring_cons];
+ cqe_fp = &cqe->fast_path_cqe;
+ } /* while */
+
+ fp->rx_bd_cons = bd_cons;
+ fp->rx_bd_prod = bd_prod_fw;
+ fp->rx_comp_cons = sw_comp_cons;
+ fp->rx_comp_prod = sw_comp_prod;
+
+ /* Update producers */
+ bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
+ fp->rx_sge_prod);
+
+ return rx_pkt;
+}
+
+static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
+{
+ struct bnx2x_fastpath *fp = fp_cookie;
+ struct bnx2x *bp = fp->bp;
+ u8 cos;
+
+ DP(NETIF_MSG_INTR,
+ "got an MSI-X interrupt on IDX:SB [fp %d fw_sd %d igusb %d]\n",
+ fp->index, fp->fw_sb_id, fp->igu_sb_id);
+
+ bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
+
+#ifdef BNX2X_STOP_ON_ERROR
+ if (unlikely(bp->panic))
+ return IRQ_HANDLED;
+#endif
+
+ /* Handle Rx and Tx according to MSI-X vector */
+ for_each_cos_in_tx_queue(fp, cos)
+ prefetch(fp->txdata_ptr[cos]->tx_cons_sb);
+
+ prefetch(&fp->sb_running_index[SM_RX_ID]);
+ napi_schedule_irqoff(&bnx2x_fp(bp, fp->index, napi));
+
+ return IRQ_HANDLED;
+}
+
+/* HW Lock for shared dual port PHYs */
+void bnx2x_acquire_phy_lock(struct bnx2x *bp)
+{
+ mutex_lock(&bp->port.phy_mutex);
+
+ bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
+}
+
+void bnx2x_release_phy_lock(struct bnx2x *bp)
+{
+ bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
+
+ mutex_unlock(&bp->port.phy_mutex);
+}
+
+/* calculates MF speed according to current linespeed and MF configuration */
+u16 bnx2x_get_mf_speed(struct bnx2x *bp)
+{
+ u16 line_speed = bp->link_vars.line_speed;
+ if (IS_MF(bp)) {
+ u16 maxCfg = bnx2x_extract_max_cfg(bp,
+ bp->mf_config[BP_VN(bp)]);
+
+ /* Calculate the current MAX line speed limit for the MF
+ * devices
+ */
+ if (IS_MF_PERCENT_BW(bp))
+ line_speed = (line_speed * maxCfg) / 100;
+ else { /* SD mode */
+ u16 vn_max_rate = maxCfg * 100;
+
+ if (vn_max_rate < line_speed)
+ line_speed = vn_max_rate;
+ }
+ }
+
+ return line_speed;
+}
+
+/**
+ * bnx2x_fill_report_data - fill link report data to report
+ *
+ * @bp: driver handle
+ * @data: link state to update
+ *
+ * It uses a none-atomic bit operations because is called under the mutex.
+ */
+static void bnx2x_fill_report_data(struct bnx2x *bp,
+ struct bnx2x_link_report_data *data)
+{
+ memset(data, 0, sizeof(*data));
+
+ if (IS_PF(bp)) {
+ /* Fill the report data: effective line speed */
+ data->line_speed = bnx2x_get_mf_speed(bp);
+
+ /* Link is down */
+ if (!bp->link_vars.link_up || (bp->flags & MF_FUNC_DIS))
+ __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
+ &data->link_report_flags);
+
+ if (!BNX2X_NUM_ETH_QUEUES(bp))
+ __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
+ &data->link_report_flags);
+
+ /* Full DUPLEX */
+ if (bp->link_vars.duplex == DUPLEX_FULL)
+ __set_bit(BNX2X_LINK_REPORT_FD,
+ &data->link_report_flags);
+
+ /* Rx Flow Control is ON */
+ if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX)
+ __set_bit(BNX2X_LINK_REPORT_RX_FC_ON,
+ &data->link_report_flags);
+
+ /* Tx Flow Control is ON */
+ if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
+ __set_bit(BNX2X_LINK_REPORT_TX_FC_ON,
+ &data->link_report_flags);
+ } else { /* VF */
+ *data = bp->vf_link_vars;
+ }
+}
+
+/**
+ * bnx2x_link_report - report link status to OS.
+ *
+ * @bp: driver handle
+ *
+ * Calls the __bnx2x_link_report() under the same locking scheme
+ * as a link/PHY state managing code to ensure a consistent link
+ * reporting.
+ */
+
+void bnx2x_link_report(struct bnx2x *bp)
+{
+ bnx2x_acquire_phy_lock(bp);
+ __bnx2x_link_report(bp);
+ bnx2x_release_phy_lock(bp);
+}
+
+/**
+ * __bnx2x_link_report - report link status to OS.
+ *
+ * @bp: driver handle
+ *
+ * None atomic implementation.
+ * Should be called under the phy_lock.
+ */
+void __bnx2x_link_report(struct bnx2x *bp)
+{
+ struct bnx2x_link_report_data cur_data;
+
+ if (bp->force_link_down) {
+ bp->link_vars.link_up = 0;
+ return;
+ }
+
+ /* reread mf_cfg */
+ if (IS_PF(bp) && !CHIP_IS_E1(bp))
+ bnx2x_read_mf_cfg(bp);
+
+ /* Read the current link report info */
+ bnx2x_fill_report_data(bp, &cur_data);
+
+ /* Don't report link down or exactly the same link status twice */
+ if (!memcmp(&cur_data, &bp->last_reported_link, sizeof(cur_data)) ||
+ (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
+ &bp->last_reported_link.link_report_flags) &&
+ test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
+ &cur_data.link_report_flags)))
+ return;
+
+ bp->link_cnt++;
+
+ /* We are going to report a new link parameters now -
+ * remember the current data for the next time.
+ */
+ memcpy(&bp->last_reported_link, &cur_data, sizeof(cur_data));
+
+ /* propagate status to VFs */
+ if (IS_PF(bp))
+ bnx2x_iov_link_update(bp);
+
+ if (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
+ &cur_data.link_report_flags)) {
+ netif_carrier_off(bp->dev);
+ netdev_err(bp->dev, "NIC Link is Down\n");
+ return;
+ } else {
+ const char *duplex;
+ const char *flow;
+
+ netif_carrier_on(bp->dev);
+
+ if (test_and_clear_bit(BNX2X_LINK_REPORT_FD,
+ &cur_data.link_report_flags))
+ duplex = "full";
+ else
+ duplex = "half";
+
+ /* Handle the FC at the end so that only these flags would be
+ * possibly set. This way we may easily check if there is no FC
+ * enabled.
+ */
+ if (cur_data.link_report_flags) {
+ if (test_bit(BNX2X_LINK_REPORT_RX_FC_ON,
+ &cur_data.link_report_flags)) {
+ if (test_bit(BNX2X_LINK_REPORT_TX_FC_ON,
+ &cur_data.link_report_flags))
+ flow = "ON - receive & transmit";
+ else
+ flow = "ON - receive";
+ } else {
+ flow = "ON - transmit";
+ }
+ } else {
+ flow = "none";
+ }
+ netdev_info(bp->dev, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n",
+ cur_data.line_speed, duplex, flow);
+ }
+}
+
+static void bnx2x_set_next_page_sgl(struct bnx2x_fastpath *fp)
+{
+ int i;
+
+ for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
+ struct eth_rx_sge *sge;
+
+ sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
+ sge->addr_hi =
+ cpu_to_le32(U64_HI(fp->rx_sge_mapping +
+ BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
+
+ sge->addr_lo =
+ cpu_to_le32(U64_LO(fp->rx_sge_mapping +
+ BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
+ }
+}
+
+static void bnx2x_free_tpa_pool(struct bnx2x *bp,
+ struct bnx2x_fastpath *fp, int last)
+{
+ int i;
+
+ for (i = 0; i < last; i++) {
+ struct bnx2x_agg_info *tpa_info = &fp->tpa_info[i];
+ struct sw_rx_bd *first_buf = &tpa_info->first_buf;
+ u8 *data = first_buf->data;
+
+ if (data == NULL) {
+ DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
+ continue;
+ }
+ if (tpa_info->tpa_state == BNX2X_TPA_START)
+ dma_unmap_single(&bp->pdev->dev,
+ dma_unmap_addr(first_buf, mapping),
+ fp->rx_buf_size, DMA_FROM_DEVICE);
+ bnx2x_frag_free(fp, data);
+ first_buf->data = NULL;
+ }
+}
+
+void bnx2x_init_rx_rings_cnic(struct bnx2x *bp)
+{
+ int j;
+
+ for_each_rx_queue_cnic(bp, j) {
+ struct bnx2x_fastpath *fp = &bp->fp[j];
+
+ fp->rx_bd_cons = 0;
+
+ /* Activate BD ring */
+ /* Warning!
+ * this will generate an interrupt (to the TSTORM)
+ * must only be done after chip is initialized
+ */
+ bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
+ fp->rx_sge_prod);
+ }
+}
+
+void bnx2x_init_rx_rings(struct bnx2x *bp)
+{
+ int func = BP_FUNC(bp);
+ u16 ring_prod;
+ int i, j;
+
+ /* Allocate TPA resources */
+ for_each_eth_queue(bp, j) {
+ struct bnx2x_fastpath *fp = &bp->fp[j];
+
+ DP(NETIF_MSG_IFUP,
+ "mtu %d rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size);
+
+ if (fp->mode != TPA_MODE_DISABLED) {
+ /* Fill the per-aggregation pool */
+ for (i = 0; i < MAX_AGG_QS(bp); i++) {
+ struct bnx2x_agg_info *tpa_info =
+ &fp->tpa_info[i];
+ struct sw_rx_bd *first_buf =
+ &tpa_info->first_buf;
+
+ first_buf->data =
+ bnx2x_frag_alloc(fp, GFP_KERNEL);
+ if (!first_buf->data) {
+ BNX2X_ERR("Failed to allocate TPA skb pool for queue[%d] - disabling TPA on this queue!\n",
+ j);
+ bnx2x_free_tpa_pool(bp, fp, i);
+ fp->mode = TPA_MODE_DISABLED;
+ break;
+ }
+ dma_unmap_addr_set(first_buf, mapping, 0);
+ tpa_info->tpa_state = BNX2X_TPA_STOP;
+ }
+
+ /* "next page" elements initialization */
+ bnx2x_set_next_page_sgl(fp);
+
+ /* set SGEs bit mask */
+ bnx2x_init_sge_ring_bit_mask(fp);
+
+ /* Allocate SGEs and initialize the ring elements */
+ for (i = 0, ring_prod = 0;
+ i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
+
+ if (bnx2x_alloc_rx_sge(bp, fp, ring_prod,
+ GFP_KERNEL) < 0) {
+ BNX2X_ERR("was only able to allocate %d rx sges\n",
+ i);
+ BNX2X_ERR("disabling TPA for queue[%d]\n",
+ j);
+ /* Cleanup already allocated elements */
+ bnx2x_free_rx_sge_range(bp, fp,
+ ring_prod);
+ bnx2x_free_tpa_pool(bp, fp,
+ MAX_AGG_QS(bp));
+ fp->mode = TPA_MODE_DISABLED;
+ ring_prod = 0;
+ break;
+ }
+ ring_prod = NEXT_SGE_IDX(ring_prod);
+ }
+
+ fp->rx_sge_prod = ring_prod;
+ }
+ }
+
+ for_each_eth_queue(bp, j) {
+ struct bnx2x_fastpath *fp = &bp->fp[j];
+
+ fp->rx_bd_cons = 0;
+
+ /* Activate BD ring */
+ /* Warning!
+ * this will generate an interrupt (to the TSTORM)
+ * must only be done after chip is initialized
+ */
+ bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
+ fp->rx_sge_prod);
+
+ if (j != 0)
+ continue;
+
+ if (CHIP_IS_E1(bp)) {
+ REG_WR(bp, BAR_USTRORM_INTMEM +
+ USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
+ U64_LO(fp->rx_comp_mapping));
+ REG_WR(bp, BAR_USTRORM_INTMEM +
+ USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
+ U64_HI(fp->rx_comp_mapping));
+ }
+ }
+}
+
+static void bnx2x_free_tx_skbs_queue(struct bnx2x_fastpath *fp)
+{
+ u8 cos;
+ struct bnx2x *bp = fp->bp;
+
+ for_each_cos_in_tx_queue(fp, cos) {
+ struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
+ unsigned pkts_compl = 0, bytes_compl = 0;
+
+ u16 sw_prod = txdata->tx_pkt_prod;
+ u16 sw_cons = txdata->tx_pkt_cons;
+
+ while (sw_cons != sw_prod) {
+ bnx2x_free_tx_pkt(bp, txdata, TX_BD(sw_cons),
+ &pkts_compl, &bytes_compl);
+ sw_cons++;
+ }
+
+ netdev_tx_reset_queue(
+ netdev_get_tx_queue(bp->dev,
+ txdata->txq_index));
+ }
+}
+
+static void bnx2x_free_tx_skbs_cnic(struct bnx2x *bp)
+{
+ int i;
+
+ for_each_tx_queue_cnic(bp, i) {
+ bnx2x_free_tx_skbs_queue(&bp->fp[i]);
+ }
+}
+
+static void bnx2x_free_tx_skbs(struct bnx2x *bp)
+{
+ int i;
+
+ for_each_eth_queue(bp, i) {
+ bnx2x_free_tx_skbs_queue(&bp->fp[i]);
+ }
+}
+
+static void bnx2x_free_rx_bds(struct bnx2x_fastpath *fp)
+{
+ struct bnx2x *bp = fp->bp;
+ int i;
+
+ /* ring wasn't allocated */
+ if (fp->rx_buf_ring == NULL)
+ return;
+
+ for (i = 0; i < NUM_RX_BD; i++) {
+ struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
+ u8 *data = rx_buf->data;
+
+ if (data == NULL)
+ continue;
+ dma_unmap_single(&bp->pdev->dev,
+ dma_unmap_addr(rx_buf, mapping),
+ fp->rx_buf_size, DMA_FROM_DEVICE);
+
+ rx_buf->data = NULL;
+ bnx2x_frag_free(fp, data);
+ }
+}
+
+static void bnx2x_free_rx_skbs_cnic(struct bnx2x *bp)
+{
+ int j;
+
+ for_each_rx_queue_cnic(bp, j) {
+ bnx2x_free_rx_bds(&bp->fp[j]);
+ }
+}
+
+static void bnx2x_free_rx_skbs(struct bnx2x *bp)
+{
+ int j;
+
+ for_each_eth_queue(bp, j) {
+ struct bnx2x_fastpath *fp = &bp->fp[j];
+
+ bnx2x_free_rx_bds(fp);
+
+ if (fp->mode != TPA_MODE_DISABLED)
+ bnx2x_free_tpa_pool(bp, fp, MAX_AGG_QS(bp));
+ }
+}
+
+static void bnx2x_free_skbs_cnic(struct bnx2x *bp)
+{
+ bnx2x_free_tx_skbs_cnic(bp);
+ bnx2x_free_rx_skbs_cnic(bp);
+}
+
+void bnx2x_free_skbs(struct bnx2x *bp)
+{
+ bnx2x_free_tx_skbs(bp);
+ bnx2x_free_rx_skbs(bp);
+}
+
+void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value)
+{
+ /* load old values */
+ u32 mf_cfg = bp->mf_config[BP_VN(bp)];
+
+ if (value != bnx2x_extract_max_cfg(bp, mf_cfg)) {
+ /* leave all but MAX value */
+ mf_cfg &= ~FUNC_MF_CFG_MAX_BW_MASK;
+
+ /* set new MAX value */
+ mf_cfg |= (value << FUNC_MF_CFG_MAX_BW_SHIFT)
+ & FUNC_MF_CFG_MAX_BW_MASK;
+
+ bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW, mf_cfg);
+ }
+}
+
+/**
+ * bnx2x_free_msix_irqs - free previously requested MSI-X IRQ vectors
+ *
+ * @bp: driver handle
+ * @nvecs: number of vectors to be released
+ */
+static void bnx2x_free_msix_irqs(struct bnx2x *bp, int nvecs)
+{
+ int i, offset = 0;
+
+ if (nvecs == offset)
+ return;
+
+ /* VFs don't have a default SB */
+ if (IS_PF(bp)) {
+ free_irq(bp->msix_table[offset].vector, bp->dev);
+ DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
+ bp->msix_table[offset].vector);
+ offset++;
+ }
+
+ if (CNIC_SUPPORT(bp)) {
+ if (nvecs == offset)
+ return;
+ offset++;
+ }
+
+ for_each_eth_queue(bp, i) {
+ if (nvecs == offset)
+ return;
+ DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq\n",
+ i, bp->msix_table[offset].vector);
+
+ free_irq(bp->msix_table[offset++].vector, &bp->fp[i]);
+ }
+}
+
+void bnx2x_free_irq(struct bnx2x *bp)
+{
+ if (bp->flags & USING_MSIX_FLAG &&
+ !(bp->flags & USING_SINGLE_MSIX_FLAG)) {
+ int nvecs = BNX2X_NUM_ETH_QUEUES(bp) + CNIC_SUPPORT(bp);
+
+ /* vfs don't have a default status block */
+ if (IS_PF(bp))
+ nvecs++;
+
+ bnx2x_free_msix_irqs(bp, nvecs);
+ } else {
+ free_irq(bp->dev->irq, bp->dev);
+ }
+}
+
+int bnx2x_enable_msix(struct bnx2x *bp)
+{
+ int msix_vec = 0, i, rc;
+
+ /* VFs don't have a default status block */
+ if (IS_PF(bp)) {
+ bp->msix_table[msix_vec].entry = msix_vec;
+ BNX2X_DEV_INFO("msix_table[0].entry = %d (slowpath)\n",
+ bp->msix_table[0].entry);
+ msix_vec++;
+ }
+
+ /* Cnic requires an msix vector for itself */
+ if (CNIC_SUPPORT(bp)) {
+ bp->msix_table[msix_vec].entry = msix_vec;
+ BNX2X_DEV_INFO("msix_table[%d].entry = %d (CNIC)\n",
+ msix_vec, bp->msix_table[msix_vec].entry);
+ msix_vec++;
+ }
+
+ /* We need separate vectors for ETH queues only (not FCoE) */
+ for_each_eth_queue(bp, i) {
+ bp->msix_table[msix_vec].entry = msix_vec;
+ BNX2X_DEV_INFO("msix_table[%d].entry = %d (fastpath #%u)\n",
+ msix_vec, msix_vec, i);
+ msix_vec++;
+ }
+
+ DP(BNX2X_MSG_SP, "about to request enable msix with %d vectors\n",
+ msix_vec);
+
+ rc = pci_enable_msix_range(bp->pdev, &bp->msix_table[0],
+ BNX2X_MIN_MSIX_VEC_CNT(bp), msix_vec);
+ /*
+ * reconfigure number of tx/rx queues according to available
+ * MSI-X vectors
+ */
+ if (rc == -ENOSPC) {
+ /* Get by with single vector */
+ rc = pci_enable_msix_range(bp->pdev, &bp->msix_table[0], 1, 1);
+ if (rc < 0) {
+ BNX2X_DEV_INFO("Single MSI-X is not attainable rc %d\n",
+ rc);
+ goto no_msix;
+ }
+
+ BNX2X_DEV_INFO("Using single MSI-X vector\n");
+ bp->flags |= USING_SINGLE_MSIX_FLAG;
+
+ BNX2X_DEV_INFO("set number of queues to 1\n");
+ bp->num_ethernet_queues = 1;
+ bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
+ } else if (rc < 0) {
+ BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc);
+ goto no_msix;
+ } else if (rc < msix_vec) {
+ /* how less vectors we will have? */
+ int diff = msix_vec - rc;
+
+ BNX2X_DEV_INFO("Trying to use less MSI-X vectors: %d\n", rc);
+
+ /*
+ * decrease number of queues by number of unallocated entries
+ */
+ bp->num_ethernet_queues -= diff;
+ bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
+
+ BNX2X_DEV_INFO("New queue configuration set: %d\n",
+ bp->num_queues);
+ }
+
+ bp->flags |= USING_MSIX_FLAG;
+
+ return 0;
+
+no_msix:
+ /* fall to INTx if not enough memory */
+ if (rc == -ENOMEM)
+ bp->flags |= DISABLE_MSI_FLAG;
+
+ return rc;
+}
+
+static int bnx2x_req_msix_irqs(struct bnx2x *bp)
+{
+ int i, rc, offset = 0;
+
+ /* no default status block for vf */
+ if (IS_PF(bp)) {
+ rc = request_irq(bp->msix_table[offset++].vector,
+ bnx2x_msix_sp_int, 0,
+ bp->dev->name, bp->dev);
+ if (rc) {
+ BNX2X_ERR("request sp irq failed\n");
+ return -EBUSY;
+ }
+ }
+
+ if (CNIC_SUPPORT(bp))
+ offset++;
+
+ for_each_eth_queue(bp, i) {
+ struct bnx2x_fastpath *fp = &bp->fp[i];
+ snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
+ bp->dev->name, i);
+
+ rc = request_irq(bp->msix_table[offset].vector,
+ bnx2x_msix_fp_int, 0, fp->name, fp);
+ if (rc) {
+ BNX2X_ERR("request fp #%d irq (%d) failed rc %d\n", i,
+ bp->msix_table[offset].vector, rc);
+ bnx2x_free_msix_irqs(bp, offset);
+ return -EBUSY;
+ }
+
+ offset++;
+ }
+
+ i = BNX2X_NUM_ETH_QUEUES(bp);
+ if (IS_PF(bp)) {
+ offset = 1 + CNIC_SUPPORT(bp);
+ netdev_info(bp->dev,
+ "using MSI-X IRQs: sp %d fp[%d] %d ... fp[%d] %d\n",
+ bp->msix_table[0].vector,
+ 0, bp->msix_table[offset].vector,
+ i - 1, bp->msix_table[offset + i - 1].vector);
+ } else {
+ offset = CNIC_SUPPORT(bp);
+ netdev_info(bp->dev,
+ "using MSI-X IRQs: fp[%d] %d ... fp[%d] %d\n",
+ 0, bp->msix_table[offset].vector,
+ i - 1, bp->msix_table[offset + i - 1].vector);
+ }
+ return 0;
+}
+
+int bnx2x_enable_msi(struct bnx2x *bp)
+{
+ int rc;
+
+ rc = pci_enable_msi(bp->pdev);
+ if (rc) {
+ BNX2X_DEV_INFO("MSI is not attainable\n");
+ return -1;
+ }
+ bp->flags |= USING_MSI_FLAG;
+
+ return 0;
+}
+
+static int bnx2x_req_irq(struct bnx2x *bp)
+{
+ unsigned long flags;
+ unsigned int irq;
+
+ if (bp->flags & (USING_MSI_FLAG | USING_MSIX_FLAG))
+ flags = 0;
+ else
+ flags = IRQF_SHARED;
+
+ if (bp->flags & USING_MSIX_FLAG)
+ irq = bp->msix_table[0].vector;
+ else
+ irq = bp->pdev->irq;
+
+ return request_irq(irq, bnx2x_interrupt, flags, bp->dev->name, bp->dev);
+}
+
+static int bnx2x_setup_irqs(struct bnx2x *bp)
+{
+ int rc = 0;
+ if (bp->flags & USING_MSIX_FLAG &&
+ !(bp->flags & USING_SINGLE_MSIX_FLAG)) {
+ rc = bnx2x_req_msix_irqs(bp);
+ if (rc)
+ return rc;
+ } else {
+ rc = bnx2x_req_irq(bp);
+ if (rc) {
+ BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
+ return rc;
+ }
+ if (bp->flags & USING_MSI_FLAG) {
+ bp->dev->irq = bp->pdev->irq;
+ netdev_info(bp->dev, "using MSI IRQ %d\n",
+ bp->dev->irq);
+ }
+ if (bp->flags & USING_MSIX_FLAG) {
+ bp->dev->irq = bp->msix_table[0].vector;
+ netdev_info(bp->dev, "using MSIX IRQ %d\n",
+ bp->dev->irq);
+ }
+ }
+
+ return 0;
+}
+
+static void bnx2x_napi_enable_cnic(struct bnx2x *bp)
+{
+ int i;
+
+ for_each_rx_queue_cnic(bp, i) {
+ napi_enable(&bnx2x_fp(bp, i, napi));
+ }
+}
+
+static void bnx2x_napi_enable(struct bnx2x *bp)
+{
+ int i;
+
+ for_each_eth_queue(bp, i) {
+ napi_enable(&bnx2x_fp(bp, i, napi));
+ }
+}
+
+static void bnx2x_napi_disable_cnic(struct bnx2x *bp)
+{
+ int i;
+
+ for_each_rx_queue_cnic(bp, i) {
+ napi_disable(&bnx2x_fp(bp, i, napi));
+ }
+}
+
+static void bnx2x_napi_disable(struct bnx2x *bp)
+{
+ int i;
+
+ for_each_eth_queue(bp, i) {
+ napi_disable(&bnx2x_fp(bp, i, napi));
+ }
+}
+
+void bnx2x_netif_start(struct bnx2x *bp)
+{
+ if (netif_running(bp->dev)) {
+ bnx2x_napi_enable(bp);
+ if (CNIC_LOADED(bp))
+ bnx2x_napi_enable_cnic(bp);
+ bnx2x_int_enable(bp);
+ if (bp->state == BNX2X_STATE_OPEN)
+ netif_tx_wake_all_queues(bp->dev);
+ }
+}
+
+void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
+{
+ bnx2x_int_disable_sync(bp, disable_hw);
+ bnx2x_napi_disable(bp);
+ if (CNIC_LOADED(bp))
+ bnx2x_napi_disable_cnic(bp);
+}
+
+u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb,
+ struct net_device *sb_dev)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+
+ if (CNIC_LOADED(bp) && !NO_FCOE(bp)) {
+ struct ethhdr *hdr = (struct ethhdr *)skb->data;
+ u16 ether_type = ntohs(hdr->h_proto);
+
+ /* Skip VLAN tag if present */
+ if (ether_type == ETH_P_8021Q) {
+ struct vlan_ethhdr *vhdr = skb_vlan_eth_hdr(skb);
+
+ ether_type = ntohs(vhdr->h_vlan_encapsulated_proto);
+ }
+
+ /* If ethertype is FCoE or FIP - use FCoE ring */
+ if ((ether_type == ETH_P_FCOE) || (ether_type == ETH_P_FIP))
+ return bnx2x_fcoe_tx(bp, txq_index);
+ }
+
+ /* select a non-FCoE queue */
+ return netdev_pick_tx(dev, skb, NULL) %
+ (BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos);
+}
+
+void bnx2x_set_num_queues(struct bnx2x *bp)
+{
+ /* RSS queues */
+ bp->num_ethernet_queues = bnx2x_calc_num_queues(bp);
+
+ /* override in STORAGE SD modes */
+ if (IS_MF_STORAGE_ONLY(bp))
+ bp->num_ethernet_queues = 1;
+
+ /* Add special queues */
+ bp->num_cnic_queues = CNIC_SUPPORT(bp); /* For FCOE */
+ bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
+
+ BNX2X_DEV_INFO("set number of queues to %d\n", bp->num_queues);
+}
+
+/**
+ * bnx2x_set_real_num_queues - configure netdev->real_num_[tx,rx]_queues
+ *
+ * @bp: Driver handle
+ * @include_cnic: handle cnic case
+ *
+ * We currently support for at most 16 Tx queues for each CoS thus we will
+ * allocate a multiple of 16 for ETH L2 rings according to the value of the
+ * bp->max_cos.
+ *
+ * If there is an FCoE L2 queue the appropriate Tx queue will have the next
+ * index after all ETH L2 indices.
+ *
+ * If the actual number of Tx queues (for each CoS) is less than 16 then there
+ * will be the holes at the end of each group of 16 ETh L2 indices (0..15,
+ * 16..31,...) with indices that are not coupled with any real Tx queue.
+ *
+ * The proper configuration of skb->queue_mapping is handled by
+ * bnx2x_select_queue() and __skb_tx_hash().
+ *
+ * bnx2x_setup_tc() takes care of the proper TC mappings so that __skb_tx_hash()
+ * will return a proper Tx index if TC is enabled (netdev->num_tc > 0).
+ */
+static int bnx2x_set_real_num_queues(struct bnx2x *bp, int include_cnic)
+{
+ int rc, tx, rx;
+
+ tx = BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos;
+ rx = BNX2X_NUM_ETH_QUEUES(bp);
+
+/* account for fcoe queue */
+ if (include_cnic && !NO_FCOE(bp)) {
+ rx++;
+ tx++;
+ }
+
+ rc = netif_set_real_num_tx_queues(bp->dev, tx);
+ if (rc) {
+ BNX2X_ERR("Failed to set real number of Tx queues: %d\n", rc);
+ return rc;
+ }
+ rc = netif_set_real_num_rx_queues(bp->dev, rx);
+ if (rc) {
+ BNX2X_ERR("Failed to set real number of Rx queues: %d\n", rc);
+ return rc;
+ }
+
+ DP(NETIF_MSG_IFUP, "Setting real num queues to (tx, rx) (%d, %d)\n",
+ tx, rx);
+
+ return rc;
+}
+
+static void bnx2x_set_rx_buf_size(struct bnx2x *bp)
+{
+ int i;
+
+ for_each_queue(bp, i) {
+ struct bnx2x_fastpath *fp = &bp->fp[i];
+ u32 mtu;
+
+ /* Always use a mini-jumbo MTU for the FCoE L2 ring */
+ if (IS_FCOE_IDX(i))
+ /*
+ * Although there are no IP frames expected to arrive to
+ * this ring we still want to add an
+ * IP_HEADER_ALIGNMENT_PADDING to prevent a buffer
+ * overrun attack.
+ */
+ mtu = BNX2X_FCOE_MINI_JUMBO_MTU;
+ else
+ mtu = bp->dev->mtu;
+ fp->rx_buf_size = BNX2X_FW_RX_ALIGN_START +
+ IP_HEADER_ALIGNMENT_PADDING +
+ ETH_OVERHEAD +
+ mtu +
+ BNX2X_FW_RX_ALIGN_END;
+ fp->rx_buf_size = SKB_DATA_ALIGN(fp->rx_buf_size);
+ /* Note : rx_buf_size doesn't take into account NET_SKB_PAD */
+ if (fp->rx_buf_size + NET_SKB_PAD <= PAGE_SIZE)
+ fp->rx_frag_size = fp->rx_buf_size + NET_SKB_PAD;
+ else
+ fp->rx_frag_size = 0;
+ }
+}
+
+static int bnx2x_init_rss(struct bnx2x *bp)
+{
+ int i;
+ u8 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp);
+
+ /* Prepare the initial contents for the indirection table if RSS is
+ * enabled
+ */
+ for (i = 0; i < sizeof(bp->rss_conf_obj.ind_table); i++)
+ bp->rss_conf_obj.ind_table[i] =
+ bp->fp->cl_id +
+ ethtool_rxfh_indir_default(i, num_eth_queues);
+
+ /*
+ * For 57710 and 57711 SEARCHER configuration (rss_keys) is
+ * per-port, so if explicit configuration is needed , do it only
+ * for a PMF.
+ *
+ * For 57712 and newer on the other hand it's a per-function
+ * configuration.
+ */
+ return bnx2x_config_rss_eth(bp, bp->port.pmf || !CHIP_IS_E1x(bp));
+}
+
+int bnx2x_rss(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
+ bool config_hash, bool enable)
+{
+ struct bnx2x_config_rss_params params = {NULL};
+
+ /* Although RSS is meaningless when there is a single HW queue we
+ * still need it enabled in order to have HW Rx hash generated.
+ *
+ * if (!is_eth_multi(bp))
+ * bp->multi_mode = ETH_RSS_MODE_DISABLED;
+ */
+
+ params.rss_obj = rss_obj;
+
+ __set_bit(RAMROD_COMP_WAIT, &params.ramrod_flags);
+
+ if (enable) {
+ __set_bit(BNX2X_RSS_MODE_REGULAR, &params.rss_flags);
+
+ /* RSS configuration */
+ __set_bit(BNX2X_RSS_IPV4, &params.rss_flags);
+ __set_bit(BNX2X_RSS_IPV4_TCP, &params.rss_flags);
+ __set_bit(BNX2X_RSS_IPV6, &params.rss_flags);
+ __set_bit(BNX2X_RSS_IPV6_TCP, &params.rss_flags);
+ if (rss_obj->udp_rss_v4)
+ __set_bit(BNX2X_RSS_IPV4_UDP, &params.rss_flags);
+ if (rss_obj->udp_rss_v6)
+ __set_bit(BNX2X_RSS_IPV6_UDP, &params.rss_flags);
+
+ if (!CHIP_IS_E1x(bp)) {
+ /* valid only for TUNN_MODE_VXLAN tunnel mode */
+ __set_bit(BNX2X_RSS_IPV4_VXLAN, &params.rss_flags);
+ __set_bit(BNX2X_RSS_IPV6_VXLAN, &params.rss_flags);
+
+ /* valid only for TUNN_MODE_GRE tunnel mode */
+ __set_bit(BNX2X_RSS_TUNN_INNER_HDRS, &params.rss_flags);
+ }
+ } else {
+ __set_bit(BNX2X_RSS_MODE_DISABLED, &params.rss_flags);
+ }
+
+ /* Hash bits */
+ params.rss_result_mask = MULTI_MASK;
+
+ memcpy(params.ind_table, rss_obj->ind_table, sizeof(params.ind_table));
+
+ if (config_hash) {
+ /* RSS keys */
+ netdev_rss_key_fill(params.rss_key, T_ETH_RSS_KEY * 4);
+ __set_bit(BNX2X_RSS_SET_SRCH, &params.rss_flags);
+ }
+
+ if (IS_PF(bp))
+ return bnx2x_config_rss(bp, &params);
+ else
+ return bnx2x_vfpf_config_rss(bp, &params);
+}
+
+static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
+{
+ struct bnx2x_func_state_params func_params = {NULL};
+
+ /* Prepare parameters for function state transitions */
+ __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
+
+ func_params.f_obj = &bp->func_obj;
+ func_params.cmd = BNX2X_F_CMD_HW_INIT;
+
+ func_params.params.hw_init.load_phase = load_code;
+
+ return bnx2x_func_state_change(bp, &func_params);
+}
+
+/*
+ * Cleans the object that have internal lists without sending
+ * ramrods. Should be run when interrupts are disabled.
+ */
+void bnx2x_squeeze_objects(struct bnx2x *bp)
+{
+ int rc;
+ unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
+ struct bnx2x_mcast_ramrod_params rparam = {NULL};
+ struct bnx2x_vlan_mac_obj *mac_obj = &bp->sp_objs->mac_obj;
+
+ /***************** Cleanup MACs' object first *************************/
+
+ /* Wait for completion of requested */
+ __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
+ /* Perform a dry cleanup */
+ __set_bit(RAMROD_DRV_CLR_ONLY, &ramrod_flags);
+
+ /* Clean ETH primary MAC */
+ __set_bit(BNX2X_ETH_MAC, &vlan_mac_flags);
+ rc = mac_obj->delete_all(bp, &bp->sp_objs->mac_obj, &vlan_mac_flags,
+ &ramrod_flags);
+ if (rc != 0)
+ BNX2X_ERR("Failed to clean ETH MACs: %d\n", rc);
+
+ /* Cleanup UC list */
+ vlan_mac_flags = 0;
+ __set_bit(BNX2X_UC_LIST_MAC, &vlan_mac_flags);
+ rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags,
+ &ramrod_flags);
+ if (rc != 0)
+ BNX2X_ERR("Failed to clean UC list MACs: %d\n", rc);
+
+ /***************** Now clean mcast object *****************************/
+ rparam.mcast_obj = &bp->mcast_obj;
+ __set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags);
+
+ /* Add a DEL command... - Since we're doing a driver cleanup only,
+ * we take a lock surrounding both the initial send and the CONTs,
+ * as we don't want a true completion to disrupt us in the middle.
+ */
+ netif_addr_lock_bh(bp->dev);
+ rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
+ if (rc < 0)
+ BNX2X_ERR("Failed to add a new DEL command to a multi-cast object: %d\n",
+ rc);
+
+ /* ...and wait until all pending commands are cleared */
+ rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
+ while (rc != 0) {
+ if (rc < 0) {
+ BNX2X_ERR("Failed to clean multi-cast object: %d\n",
+ rc);
+ netif_addr_unlock_bh(bp->dev);
+ return;
+ }
+
+ rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
+ }
+ netif_addr_unlock_bh(bp->dev);
+}
+
+#ifndef BNX2X_STOP_ON_ERROR
+#define LOAD_ERROR_EXIT(bp, label) \
+ do { \
+ (bp)->state = BNX2X_STATE_ERROR; \
+ goto label; \
+ } while (0)
+
+#define LOAD_ERROR_EXIT_CNIC(bp, label) \
+ do { \
+ bp->cnic_loaded = false; \
+ goto label; \
+ } while (0)
+#else /*BNX2X_STOP_ON_ERROR*/
+#define LOAD_ERROR_EXIT(bp, label) \
+ do { \
+ (bp)->state = BNX2X_STATE_ERROR; \
+ (bp)->panic = 1; \
+ return -EBUSY; \
+ } while (0)
+#define LOAD_ERROR_EXIT_CNIC(bp, label) \
+ do { \
+ bp->cnic_loaded = false; \
+ (bp)->panic = 1; \
+ return -EBUSY; \
+ } while (0)
+#endif /*BNX2X_STOP_ON_ERROR*/
+
+static void bnx2x_free_fw_stats_mem(struct bnx2x *bp)
+{
+ BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping,
+ bp->fw_stats_data_sz + bp->fw_stats_req_sz);
+ return;
+}
+
+static int bnx2x_alloc_fw_stats_mem(struct bnx2x *bp)
+{
+ int num_groups, vf_headroom = 0;
+ int is_fcoe_stats = NO_FCOE(bp) ? 0 : 1;
+
+ /* number of queues for statistics is number of eth queues + FCoE */
+ u8 num_queue_stats = BNX2X_NUM_ETH_QUEUES(bp) + is_fcoe_stats;
+
+ /* Total number of FW statistics requests =
+ * 1 for port stats + 1 for PF stats + potential 2 for FCoE (fcoe proper
+ * and fcoe l2 queue) stats + num of queues (which includes another 1
+ * for fcoe l2 queue if applicable)
+ */
+ bp->fw_stats_num = 2 + is_fcoe_stats + num_queue_stats;
+
+ /* vf stats appear in the request list, but their data is allocated by
+ * the VFs themselves. We don't include them in the bp->fw_stats_num as
+ * it is used to determine where to place the vf stats queries in the
+ * request struct
+ */
+ if (IS_SRIOV(bp))
+ vf_headroom = bnx2x_vf_headroom(bp);
+
+ /* Request is built from stats_query_header and an array of
+ * stats_query_cmd_group each of which contains
+ * STATS_QUERY_CMD_COUNT rules. The real number or requests is
+ * configured in the stats_query_header.
+ */
+ num_groups =
+ (((bp->fw_stats_num + vf_headroom) / STATS_QUERY_CMD_COUNT) +
+ (((bp->fw_stats_num + vf_headroom) % STATS_QUERY_CMD_COUNT) ?
+ 1 : 0));
+
+ DP(BNX2X_MSG_SP, "stats fw_stats_num %d, vf headroom %d, num_groups %d\n",
+ bp->fw_stats_num, vf_headroom, num_groups);
+ bp->fw_stats_req_sz = sizeof(struct stats_query_header) +
+ num_groups * sizeof(struct stats_query_cmd_group);
+
+ /* Data for statistics requests + stats_counter
+ * stats_counter holds per-STORM counters that are incremented
+ * when STORM has finished with the current request.
+ * memory for FCoE offloaded statistics are counted anyway,
+ * even if they will not be sent.
+ * VF stats are not accounted for here as the data of VF stats is stored
+ * in memory allocated by the VF, not here.
+ */
+ bp->fw_stats_data_sz = sizeof(struct per_port_stats) +
+ sizeof(struct per_pf_stats) +
+ sizeof(struct fcoe_statistics_params) +
+ sizeof(struct per_queue_stats) * num_queue_stats +
+ sizeof(struct stats_counter);
+
+ bp->fw_stats = BNX2X_PCI_ALLOC(&bp->fw_stats_mapping,
+ bp->fw_stats_data_sz + bp->fw_stats_req_sz);
+ if (!bp->fw_stats)
+ goto alloc_mem_err;
+
+ /* Set shortcuts */
+ bp->fw_stats_req = (struct bnx2x_fw_stats_req *)bp->fw_stats;
+ bp->fw_stats_req_mapping = bp->fw_stats_mapping;
+ bp->fw_stats_data = (struct bnx2x_fw_stats_data *)
+ ((u8 *)bp->fw_stats + bp->fw_stats_req_sz);
+ bp->fw_stats_data_mapping = bp->fw_stats_mapping +
+ bp->fw_stats_req_sz;
+
+ DP(BNX2X_MSG_SP, "statistics request base address set to %x %x\n",
+ U64_HI(bp->fw_stats_req_mapping),
+ U64_LO(bp->fw_stats_req_mapping));
+ DP(BNX2X_MSG_SP, "statistics data base address set to %x %x\n",
+ U64_HI(bp->fw_stats_data_mapping),
+ U64_LO(bp->fw_stats_data_mapping));
+ return 0;
+
+alloc_mem_err:
+ bnx2x_free_fw_stats_mem(bp);
+ BNX2X_ERR("Can't allocate FW stats memory\n");
+ return -ENOMEM;
+}
+
+/* send load request to mcp and analyze response */
+static int bnx2x_nic_load_request(struct bnx2x *bp, u32 *load_code)
+{
+ u32 param;
+
+ /* init fw_seq */
+ bp->fw_seq =
+ (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
+ DRV_MSG_SEQ_NUMBER_MASK);
+ BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
+
+ /* Get current FW pulse sequence */
+ bp->fw_drv_pulse_wr_seq =
+ (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb) &
+ DRV_PULSE_SEQ_MASK);
+ BNX2X_DEV_INFO("drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
+
+ param = DRV_MSG_CODE_LOAD_REQ_WITH_LFA;
+
+ if (IS_MF_SD(bp) && bnx2x_port_after_undi(bp))
+ param |= DRV_MSG_CODE_LOAD_REQ_FORCE_LFA;
+
+ /* load request */
+ (*load_code) = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, param);
+
+ /* if mcp fails to respond we must abort */
+ if (!(*load_code)) {
+ BNX2X_ERR("MCP response failure, aborting\n");
+ return -EBUSY;
+ }
+
+ /* If mcp refused (e.g. other port is in diagnostic mode) we
+ * must abort
+ */
+ if ((*load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED) {
+ BNX2X_ERR("MCP refused load request, aborting\n");
+ return -EBUSY;
+ }
+ return 0;
+}
+
+/* check whether another PF has already loaded FW to chip. In
+ * virtualized environments a pf from another VM may have already
+ * initialized the device including loading FW
+ */
+int bnx2x_compare_fw_ver(struct bnx2x *bp, u32 load_code, bool print_err)
+{
+ /* is another pf loaded on this engine? */
+ if (load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP &&
+ load_code != FW_MSG_CODE_DRV_LOAD_COMMON) {
+ u8 loaded_fw_major, loaded_fw_minor, loaded_fw_rev, loaded_fw_eng;
+ u32 loaded_fw;
+
+ /* read loaded FW from chip */
+ loaded_fw = REG_RD(bp, XSEM_REG_PRAM);
+
+ loaded_fw_major = loaded_fw & 0xff;
+ loaded_fw_minor = (loaded_fw >> 8) & 0xff;
+ loaded_fw_rev = (loaded_fw >> 16) & 0xff;
+ loaded_fw_eng = (loaded_fw >> 24) & 0xff;
+
+ DP(BNX2X_MSG_SP, "loaded fw 0x%x major 0x%x minor 0x%x rev 0x%x eng 0x%x\n",
+ loaded_fw, loaded_fw_major, loaded_fw_minor, loaded_fw_rev, loaded_fw_eng);
+
+ /* abort nic load if version mismatch */
+ if (loaded_fw_major != BCM_5710_FW_MAJOR_VERSION ||
+ loaded_fw_minor != BCM_5710_FW_MINOR_VERSION ||
+ loaded_fw_eng != BCM_5710_FW_ENGINEERING_VERSION ||
+ loaded_fw_rev < BCM_5710_FW_REVISION_VERSION_V15) {
+ if (print_err)
+ BNX2X_ERR("loaded FW incompatible. Aborting\n");
+ else
+ BNX2X_DEV_INFO("loaded FW incompatible, possibly due to MF UNDI\n");
+
+ return -EBUSY;
+ }
+ }
+ return 0;
+}
+
+/* returns the "mcp load_code" according to global load_count array */
+static int bnx2x_nic_load_no_mcp(struct bnx2x *bp, int port)
+{
+ int path = BP_PATH(bp);
+
+ DP(NETIF_MSG_IFUP, "NO MCP - load counts[%d] %d, %d, %d\n",
+ path, bnx2x_load_count[path][0], bnx2x_load_count[path][1],
+ bnx2x_load_count[path][2]);
+ bnx2x_load_count[path][0]++;
+ bnx2x_load_count[path][1 + port]++;
+ DP(NETIF_MSG_IFUP, "NO MCP - new load counts[%d] %d, %d, %d\n",
+ path, bnx2x_load_count[path][0], bnx2x_load_count[path][1],
+ bnx2x_load_count[path][2]);
+ if (bnx2x_load_count[path][0] == 1)
+ return FW_MSG_CODE_DRV_LOAD_COMMON;
+ else if (bnx2x_load_count[path][1 + port] == 1)
+ return FW_MSG_CODE_DRV_LOAD_PORT;
+ else
+ return FW_MSG_CODE_DRV_LOAD_FUNCTION;
+}
+
+/* mark PMF if applicable */
+static void bnx2x_nic_load_pmf(struct bnx2x *bp, u32 load_code)
+{
+ if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
+ (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
+ (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) {
+ bp->port.pmf = 1;
+ /* We need the barrier to ensure the ordering between the
+ * writing to bp->port.pmf here and reading it from the
+ * bnx2x_periodic_task().
+ */
+ smp_mb();
+ } else {
+ bp->port.pmf = 0;
+ }
+
+ DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
+}
+
+static void bnx2x_nic_load_afex_dcc(struct bnx2x *bp, int load_code)
+{
+ if (((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
+ (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP)) &&
+ (bp->common.shmem2_base)) {
+ if (SHMEM2_HAS(bp, dcc_support))
+ SHMEM2_WR(bp, dcc_support,
+ (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
+ SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
+ if (SHMEM2_HAS(bp, afex_driver_support))
+ SHMEM2_WR(bp, afex_driver_support,
+ SHMEM_AFEX_SUPPORTED_VERSION_ONE);
+ }
+
+ /* Set AFEX default VLAN tag to an invalid value */
+ bp->afex_def_vlan_tag = -1;
+}
+
+/**
+ * bnx2x_bz_fp - zero content of the fastpath structure.
+ *
+ * @bp: driver handle
+ * @index: fastpath index to be zeroed
+ *
+ * Makes sure the contents of the bp->fp[index].napi is kept
+ * intact.
+ */
+static void bnx2x_bz_fp(struct bnx2x *bp, int index)
+{
+ struct bnx2x_fastpath *fp = &bp->fp[index];
+ int cos;
+ struct napi_struct orig_napi = fp->napi;
+ struct bnx2x_agg_info *orig_tpa_info = fp->tpa_info;
+
+ /* bzero bnx2x_fastpath contents */
+ if (fp->tpa_info)
+ memset(fp->tpa_info, 0, ETH_MAX_AGGREGATION_QUEUES_E1H_E2 *
+ sizeof(struct bnx2x_agg_info));
+ memset(fp, 0, sizeof(*fp));
+
+ /* Restore the NAPI object as it has been already initialized */
+ fp->napi = orig_napi;
+ fp->tpa_info = orig_tpa_info;
+ fp->bp = bp;
+ fp->index = index;
+ if (IS_ETH_FP(fp))
+ fp->max_cos = bp->max_cos;
+ else
+ /* Special queues support only one CoS */
+ fp->max_cos = 1;
+
+ /* Init txdata pointers */
+ if (IS_FCOE_FP(fp))
+ fp->txdata_ptr[0] = &bp->bnx2x_txq[FCOE_TXQ_IDX(bp)];
+ if (IS_ETH_FP(fp))
+ for_each_cos_in_tx_queue(fp, cos)
+ fp->txdata_ptr[cos] = &bp->bnx2x_txq[cos *
+ BNX2X_NUM_ETH_QUEUES(bp) + index];
+
+ /* set the tpa flag for each queue. The tpa flag determines the queue
+ * minimal size so it must be set prior to queue memory allocation
+ */
+ if (bp->dev->features & NETIF_F_LRO)
+ fp->mode = TPA_MODE_LRO;
+ else if (bp->dev->features & NETIF_F_GRO_HW)
+ fp->mode = TPA_MODE_GRO;
+ else
+ fp->mode = TPA_MODE_DISABLED;
+
+ /* We don't want TPA if it's disabled in bp
+ * or if this is an FCoE L2 ring.
+ */
+ if (bp->disable_tpa || IS_FCOE_FP(fp))
+ fp->mode = TPA_MODE_DISABLED;
+}
+
+void bnx2x_set_os_driver_state(struct bnx2x *bp, u32 state)
+{
+ u32 cur;
+
+ if (!IS_MF_BD(bp) || !SHMEM2_HAS(bp, os_driver_state) || IS_VF(bp))
+ return;
+
+ cur = SHMEM2_RD(bp, os_driver_state[BP_FW_MB_IDX(bp)]);
+ DP(NETIF_MSG_IFUP, "Driver state %08x-->%08x\n",
+ cur, state);
+
+ SHMEM2_WR(bp, os_driver_state[BP_FW_MB_IDX(bp)], state);
+}
+
+int bnx2x_load_cnic(struct bnx2x *bp)
+{
+ int i, rc, port = BP_PORT(bp);
+
+ DP(NETIF_MSG_IFUP, "Starting CNIC-related load\n");
+
+ mutex_init(&bp->cnic_mutex);
+
+ if (IS_PF(bp)) {
+ rc = bnx2x_alloc_mem_cnic(bp);
+ if (rc) {
+ BNX2X_ERR("Unable to allocate bp memory for cnic\n");
+ LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
+ }
+ }
+
+ rc = bnx2x_alloc_fp_mem_cnic(bp);
+ if (rc) {
+ BNX2X_ERR("Unable to allocate memory for cnic fps\n");
+ LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
+ }
+
+ /* Update the number of queues with the cnic queues */
+ rc = bnx2x_set_real_num_queues(bp, 1);
+ if (rc) {
+ BNX2X_ERR("Unable to set real_num_queues including cnic\n");
+ LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
+ }
+
+ /* Add all CNIC NAPI objects */
+ bnx2x_add_all_napi_cnic(bp);
+ DP(NETIF_MSG_IFUP, "cnic napi added\n");
+ bnx2x_napi_enable_cnic(bp);
+
+ rc = bnx2x_init_hw_func_cnic(bp);
+ if (rc)
+ LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic1);
+
+ bnx2x_nic_init_cnic(bp);
+
+ if (IS_PF(bp)) {
+ /* Enable Timer scan */
+ REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 1);
+
+ /* setup cnic queues */
+ for_each_cnic_queue(bp, i) {
+ rc = bnx2x_setup_queue(bp, &bp->fp[i], 0);
+ if (rc) {
+ BNX2X_ERR("Queue setup failed\n");
+ LOAD_ERROR_EXIT(bp, load_error_cnic2);
+ }
+ }
+ }
+
+ /* Initialize Rx filter. */
+ bnx2x_set_rx_mode_inner(bp);
+
+ /* re-read iscsi info */
+ bnx2x_get_iscsi_info(bp);
+ bnx2x_setup_cnic_irq_info(bp);
+ bnx2x_setup_cnic_info(bp);
+ bp->cnic_loaded = true;
+ if (bp->state == BNX2X_STATE_OPEN)
+ bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
+
+ DP(NETIF_MSG_IFUP, "Ending successfully CNIC-related load\n");
+
+ return 0;
+
+#ifndef BNX2X_STOP_ON_ERROR
+load_error_cnic2:
+ /* Disable Timer scan */
+ REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
+
+load_error_cnic1:
+ bnx2x_napi_disable_cnic(bp);
+ /* Update the number of queues without the cnic queues */
+ if (bnx2x_set_real_num_queues(bp, 0))
+ BNX2X_ERR("Unable to set real_num_queues not including cnic\n");
+load_error_cnic0:
+ BNX2X_ERR("CNIC-related load failed\n");
+ bnx2x_free_fp_mem_cnic(bp);
+ bnx2x_free_mem_cnic(bp);
+ return rc;
+#endif /* ! BNX2X_STOP_ON_ERROR */
+}
+
+/* must be called with rtnl_lock */
+int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
+{
+ int port = BP_PORT(bp);
+ int i, rc = 0, load_code = 0;
+
+ DP(NETIF_MSG_IFUP, "Starting NIC load\n");
+ DP(NETIF_MSG_IFUP,
+ "CNIC is %s\n", CNIC_ENABLED(bp) ? "enabled" : "disabled");
+
+#ifdef BNX2X_STOP_ON_ERROR
+ if (unlikely(bp->panic)) {
+ BNX2X_ERR("Can't load NIC when there is panic\n");
+ return -EPERM;
+ }
+#endif
+
+ bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
+
+ /* zero the structure w/o any lock, before SP handler is initialized */
+ memset(&bp->last_reported_link, 0, sizeof(bp->last_reported_link));
+ __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
+ &bp->last_reported_link.link_report_flags);
+
+ if (IS_PF(bp))
+ /* must be called before memory allocation and HW init */
+ bnx2x_ilt_set_info(bp);
+
+ /*
+ * Zero fastpath structures preserving invariants like napi, which are
+ * allocated only once, fp index, max_cos, bp pointer.
+ * Also set fp->mode and txdata_ptr.
+ */
+ DP(NETIF_MSG_IFUP, "num queues: %d", bp->num_queues);
+ for_each_queue(bp, i)
+ bnx2x_bz_fp(bp, i);
+ memset(bp->bnx2x_txq, 0, (BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS +
+ bp->num_cnic_queues) *
+ sizeof(struct bnx2x_fp_txdata));
+
+ bp->fcoe_init = false;
+
+ /* Set the receive queues buffer size */
+ bnx2x_set_rx_buf_size(bp);
+
+ if (IS_PF(bp)) {
+ rc = bnx2x_alloc_mem(bp);
+ if (rc) {
+ BNX2X_ERR("Unable to allocate bp memory\n");
+ return rc;
+ }
+ }
+
+ /* need to be done after alloc mem, since it's self adjusting to amount
+ * of memory available for RSS queues
+ */
+ rc = bnx2x_alloc_fp_mem(bp);
+ if (rc) {
+ BNX2X_ERR("Unable to allocate memory for fps\n");
+ LOAD_ERROR_EXIT(bp, load_error0);
+ }
+
+ /* Allocated memory for FW statistics */
+ rc = bnx2x_alloc_fw_stats_mem(bp);
+ if (rc)
+ LOAD_ERROR_EXIT(bp, load_error0);
+
+ /* request pf to initialize status blocks */
+ if (IS_VF(bp)) {
+ rc = bnx2x_vfpf_init(bp);
+ if (rc)
+ LOAD_ERROR_EXIT(bp, load_error0);
+ }
+
+ /* As long as bnx2x_alloc_mem() may possibly update
+ * bp->num_queues, bnx2x_set_real_num_queues() should always
+ * come after it. At this stage cnic queues are not counted.
+ */
+ rc = bnx2x_set_real_num_queues(bp, 0);
+ if (rc) {
+ BNX2X_ERR("Unable to set real_num_queues\n");
+ LOAD_ERROR_EXIT(bp, load_error0);
+ }
+
+ /* configure multi cos mappings in kernel.
+ * this configuration may be overridden by a multi class queue
+ * discipline or by a dcbx negotiation result.
+ */
+ bnx2x_setup_tc(bp->dev, bp->max_cos);
+
+ /* Add all NAPI objects */
+ bnx2x_add_all_napi(bp);
+ DP(NETIF_MSG_IFUP, "napi added\n");
+ bnx2x_napi_enable(bp);
+ bp->nic_stopped = false;
+
+ if (IS_PF(bp)) {
+ /* set pf load just before approaching the MCP */
+ bnx2x_set_pf_load(bp);
+
+ /* if mcp exists send load request and analyze response */
+ if (!BP_NOMCP(bp)) {
+ /* attempt to load pf */
+ rc = bnx2x_nic_load_request(bp, &load_code);
+ if (rc)
+ LOAD_ERROR_EXIT(bp, load_error1);
+
+ /* what did mcp say? */
+ rc = bnx2x_compare_fw_ver(bp, load_code, true);
+ if (rc) {
+ bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
+ LOAD_ERROR_EXIT(bp, load_error2);
+ }
+ } else {
+ load_code = bnx2x_nic_load_no_mcp(bp, port);
+ }
+
+ /* mark pmf if applicable */
+ bnx2x_nic_load_pmf(bp, load_code);
+
+ /* Init Function state controlling object */
+ bnx2x__init_func_obj(bp);
+
+ /* Initialize HW */
+ rc = bnx2x_init_hw(bp, load_code);
+ if (rc) {
+ BNX2X_ERR("HW init failed, aborting\n");
+ bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
+ LOAD_ERROR_EXIT(bp, load_error2);
+ }
+ }
+
+ bnx2x_pre_irq_nic_init(bp);
+
+ /* Connect to IRQs */
+ rc = bnx2x_setup_irqs(bp);
+ if (rc) {
+ BNX2X_ERR("setup irqs failed\n");
+ if (IS_PF(bp))
+ bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
+ LOAD_ERROR_EXIT(bp, load_error2);
+ }
+
+ /* Init per-function objects */
+ if (IS_PF(bp)) {
+ /* Setup NIC internals and enable interrupts */
+ bnx2x_post_irq_nic_init(bp, load_code);
+
+ bnx2x_init_bp_objs(bp);
+ bnx2x_iov_nic_init(bp);
+
+ /* Set AFEX default VLAN tag to an invalid value */
+ bp->afex_def_vlan_tag = -1;
+ bnx2x_nic_load_afex_dcc(bp, load_code);
+ bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
+ rc = bnx2x_func_start(bp);
+ if (rc) {
+ BNX2X_ERR("Function start failed!\n");
+ bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
+
+ LOAD_ERROR_EXIT(bp, load_error3);
+ }
+
+ /* Send LOAD_DONE command to MCP */
+ if (!BP_NOMCP(bp)) {
+ load_code = bnx2x_fw_command(bp,
+ DRV_MSG_CODE_LOAD_DONE, 0);
+ if (!load_code) {
+ BNX2X_ERR("MCP response failure, aborting\n");
+ rc = -EBUSY;
+ LOAD_ERROR_EXIT(bp, load_error3);
+ }
+ }
+
+ /* initialize FW coalescing state machines in RAM */
+ bnx2x_update_coalesce(bp);
+ }
+
+ /* setup the leading queue */
+ rc = bnx2x_setup_leading(bp);
+ if (rc) {
+ BNX2X_ERR("Setup leading failed!\n");
+ LOAD_ERROR_EXIT(bp, load_error3);
+ }
+
+ /* set up the rest of the queues */
+ for_each_nondefault_eth_queue(bp, i) {
+ if (IS_PF(bp))
+ rc = bnx2x_setup_queue(bp, &bp->fp[i], false);
+ else /* VF */
+ rc = bnx2x_vfpf_setup_q(bp, &bp->fp[i], false);
+ if (rc) {
+ BNX2X_ERR("Queue %d setup failed\n", i);
+ LOAD_ERROR_EXIT(bp, load_error3);
+ }
+ }
+
+ /* setup rss */
+ rc = bnx2x_init_rss(bp);
+ if (rc) {
+ BNX2X_ERR("PF RSS init failed\n");
+ LOAD_ERROR_EXIT(bp, load_error3);
+ }
+
+ /* Now when Clients are configured we are ready to work */
+ bp->state = BNX2X_STATE_OPEN;
+
+ /* Configure a ucast MAC */
+ if (IS_PF(bp))
+ rc = bnx2x_set_eth_mac(bp, true);
+ else /* vf */
+ rc = bnx2x_vfpf_config_mac(bp, bp->dev->dev_addr, bp->fp->index,
+ true);
+ if (rc) {
+ BNX2X_ERR("Setting Ethernet MAC failed\n");
+ LOAD_ERROR_EXIT(bp, load_error3);
+ }
+
+ if (IS_PF(bp) && bp->pending_max) {
+ bnx2x_update_max_mf_config(bp, bp->pending_max);
+ bp->pending_max = 0;
+ }
+
+ bp->force_link_down = false;
+ if (bp->port.pmf) {
+ rc = bnx2x_initial_phy_init(bp, load_mode);
+ if (rc)
+ LOAD_ERROR_EXIT(bp, load_error3);
+ }
+ bp->link_params.feature_config_flags &= ~FEATURE_CONFIG_BOOT_FROM_SAN;
+
+ /* Start fast path */
+
+ /* Re-configure vlan filters */
+ rc = bnx2x_vlan_reconfigure_vid(bp);
+ if (rc)
+ LOAD_ERROR_EXIT(bp, load_error3);
+
+ /* Initialize Rx filter. */
+ bnx2x_set_rx_mode_inner(bp);
+
+ if (bp->flags & PTP_SUPPORTED) {
+ bnx2x_register_phc(bp);
+ bnx2x_init_ptp(bp);
+ bnx2x_configure_ptp_filters(bp);
+ }
+ /* Start Tx */
+ switch (load_mode) {
+ case LOAD_NORMAL:
+ /* Tx queue should be only re-enabled */
+ netif_tx_wake_all_queues(bp->dev);
+ break;
+
+ case LOAD_OPEN:
+ netif_tx_start_all_queues(bp->dev);
+ smp_mb__after_atomic();
+ break;
+
+ case LOAD_DIAG:
+ case LOAD_LOOPBACK_EXT:
+ bp->state = BNX2X_STATE_DIAG;
+ break;
+
+ default:
+ break;
+ }
+
+ if (bp->port.pmf)
+ bnx2x_update_drv_flags(bp, 1 << DRV_FLAGS_PORT_MASK, 0);
+ else
+ bnx2x__link_status_update(bp);
+
+ /* start the timer */
+ mod_timer(&bp->timer, jiffies + bp->current_interval);
+
+ if (CNIC_ENABLED(bp))
+ bnx2x_load_cnic(bp);
+
+ if (IS_PF(bp))
+ bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_GET_DRV_VERSION, 0);
+
+ if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
+ /* mark driver is loaded in shmem2 */
+ u32 val;
+ val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
+ val &= ~DRV_FLAGS_MTU_MASK;
+ val |= (bp->dev->mtu << DRV_FLAGS_MTU_SHIFT);
+ SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
+ val | DRV_FLAGS_CAPABILITIES_LOADED_SUPPORTED |
+ DRV_FLAGS_CAPABILITIES_LOADED_L2);
+ }
+
+ /* Wait for all pending SP commands to complete */
+ if (IS_PF(bp) && !bnx2x_wait_sp_comp(bp, ~0x0UL)) {
+ BNX2X_ERR("Timeout waiting for SP elements to complete\n");
+ bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
+ return -EBUSY;
+ }
+
+ /* Update driver data for On-Chip MFW dump. */
+ if (IS_PF(bp))
+ bnx2x_update_mfw_dump(bp);
+
+ /* If PMF - send ADMIN DCBX msg to MFW to initiate DCBX FSM */
+ if (bp->port.pmf && (bp->state != BNX2X_STATE_DIAG))
+ bnx2x_dcbx_init(bp, false);
+
+ if (!IS_MF_SD_STORAGE_PERSONALITY_ONLY(bp))
+ bnx2x_set_os_driver_state(bp, OS_DRIVER_STATE_ACTIVE);
+
+ DP(NETIF_MSG_IFUP, "Ending successfully NIC load\n");
+
+ return 0;
+
+#ifndef BNX2X_STOP_ON_ERROR
+load_error3:
+ if (IS_PF(bp)) {
+ bnx2x_int_disable_sync(bp, 1);
+
+ /* Clean queueable objects */
+ bnx2x_squeeze_objects(bp);
+ }
+
+ /* Free SKBs, SGEs, TPA pool and driver internals */
+ bnx2x_free_skbs(bp);
+ for_each_rx_queue(bp, i)
+ bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
+
+ /* Release IRQs */
+ bnx2x_free_irq(bp);
+load_error2:
+ if (IS_PF(bp) && !BP_NOMCP(bp)) {
+ bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
+ bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
+ }
+
+ bp->port.pmf = 0;
+load_error1:
+ bnx2x_napi_disable(bp);
+ bnx2x_del_all_napi(bp);
+ bp->nic_stopped = true;
+
+ /* clear pf_load status, as it was already set */
+ if (IS_PF(bp))
+ bnx2x_clear_pf_load(bp);
+load_error0:
+ bnx2x_free_fw_stats_mem(bp);
+ bnx2x_free_fp_mem(bp);
+ bnx2x_free_mem(bp);
+
+ return rc;
+#endif /* ! BNX2X_STOP_ON_ERROR */
+}
+
+int bnx2x_drain_tx_queues(struct bnx2x *bp)
+{
+ u8 rc = 0, cos, i;
+
+ /* Wait until tx fastpath tasks complete */
+ for_each_tx_queue(bp, i) {
+ struct bnx2x_fastpath *fp = &bp->fp[i];
+
+ for_each_cos_in_tx_queue(fp, cos)
+ rc = bnx2x_clean_tx_queue(bp, fp->txdata_ptr[cos]);
+ if (rc)
+ return rc;
+ }
+ return 0;
+}
+
+/* must be called with rtnl_lock */
+int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
+{
+ int i;
+ bool global = false;
+
+ DP(NETIF_MSG_IFUP, "Starting NIC unload\n");
+
+ if (!IS_MF_SD_STORAGE_PERSONALITY_ONLY(bp))
+ bnx2x_set_os_driver_state(bp, OS_DRIVER_STATE_DISABLED);
+
+ /* mark driver is unloaded in shmem2 */
+ if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
+ u32 val;
+ val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
+ SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
+ val & ~DRV_FLAGS_CAPABILITIES_LOADED_L2);
+ }
+
+ if (IS_PF(bp) && bp->recovery_state != BNX2X_RECOVERY_DONE &&
+ (bp->state == BNX2X_STATE_CLOSED ||
+ bp->state == BNX2X_STATE_ERROR)) {
+ /* We can get here if the driver has been unloaded
+ * during parity error recovery and is either waiting for a
+ * leader to complete or for other functions to unload and
+ * then ifdown has been issued. In this case we want to
+ * unload and let other functions to complete a recovery
+ * process.
+ */
+ bp->recovery_state = BNX2X_RECOVERY_DONE;
+ bp->is_leader = 0;
+ bnx2x_release_leader_lock(bp);
+ smp_mb();
+
+ DP(NETIF_MSG_IFDOWN, "Releasing a leadership...\n");
+ BNX2X_ERR("Can't unload in closed or error state\n");
+ return -EINVAL;
+ }
+
+ /* Nothing to do during unload if previous bnx2x_nic_load()
+ * have not completed successfully - all resources are released.
+ *
+ * we can get here only after unsuccessful ndo_* callback, during which
+ * dev->IFF_UP flag is still on.
+ */
+ if (bp->state == BNX2X_STATE_CLOSED || bp->state == BNX2X_STATE_ERROR)
+ return 0;
+
+ /* It's important to set the bp->state to the value different from
+ * BNX2X_STATE_OPEN and only then stop the Tx. Otherwise bnx2x_tx_int()
+ * may restart the Tx from the NAPI context (see bnx2x_tx_int()).
+ */
+ bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
+ smp_mb();
+
+ /* indicate to VFs that the PF is going down */
+ bnx2x_iov_channel_down(bp);
+
+ if (CNIC_LOADED(bp))
+ bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
+
+ /* Stop Tx */
+ bnx2x_tx_disable(bp);
+ netdev_reset_tc(bp->dev);
+
+ bp->rx_mode = BNX2X_RX_MODE_NONE;
+
+ del_timer_sync(&bp->timer);
+
+ if (IS_PF(bp) && !BP_NOMCP(bp)) {
+ /* Set ALWAYS_ALIVE bit in shmem */
+ bp->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE;
+ bnx2x_drv_pulse(bp);
+ bnx2x_stats_handle(bp, STATS_EVENT_STOP);
+ bnx2x_save_statistics(bp);
+ }
+
+ /* wait till consumers catch up with producers in all queues.
+ * If we're recovering, FW can't write to host so no reason
+ * to wait for the queues to complete all Tx.
+ */
+ if (unload_mode != UNLOAD_RECOVERY)
+ bnx2x_drain_tx_queues(bp);
+
+ /* if VF indicate to PF this function is going down (PF will delete sp
+ * elements and clear initializations
+ */
+ if (IS_VF(bp)) {
+ bnx2x_clear_vlan_info(bp);
+ bnx2x_vfpf_close_vf(bp);
+ } else if (unload_mode != UNLOAD_RECOVERY) {
+ /* if this is a normal/close unload need to clean up chip*/
+ bnx2x_chip_cleanup(bp, unload_mode, keep_link);
+ } else {
+ /* Send the UNLOAD_REQUEST to the MCP */
+ bnx2x_send_unload_req(bp, unload_mode);
+
+ /* Prevent transactions to host from the functions on the
+ * engine that doesn't reset global blocks in case of global
+ * attention once global blocks are reset and gates are opened
+ * (the engine which leader will perform the recovery
+ * last).
+ */
+ if (!CHIP_IS_E1x(bp))
+ bnx2x_pf_disable(bp);
+
+ if (!bp->nic_stopped) {
+ /* Disable HW interrupts, NAPI */
+ bnx2x_netif_stop(bp, 1);
+ /* Delete all NAPI objects */
+ bnx2x_del_all_napi(bp);
+ if (CNIC_LOADED(bp))
+ bnx2x_del_all_napi_cnic(bp);
+ /* Release IRQs */
+ bnx2x_free_irq(bp);
+ bp->nic_stopped = true;
+ }
+
+ /* Report UNLOAD_DONE to MCP */
+ bnx2x_send_unload_done(bp, false);
+ }
+
+ /*
+ * At this stage no more interrupts will arrive so we may safely clean
+ * the queueable objects here in case they failed to get cleaned so far.
+ */
+ if (IS_PF(bp))
+ bnx2x_squeeze_objects(bp);
+
+ /* There should be no more pending SP commands at this stage */
+ bp->sp_state = 0;
+
+ bp->port.pmf = 0;
+
+ /* clear pending work in rtnl task */
+ bp->sp_rtnl_state = 0;
+ smp_mb();
+
+ /* Free SKBs, SGEs, TPA pool and driver internals */
+ bnx2x_free_skbs(bp);
+ if (CNIC_LOADED(bp))
+ bnx2x_free_skbs_cnic(bp);
+ for_each_rx_queue(bp, i)
+ bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
+
+ bnx2x_free_fp_mem(bp);
+ if (CNIC_LOADED(bp))
+ bnx2x_free_fp_mem_cnic(bp);
+
+ if (IS_PF(bp)) {
+ if (CNIC_LOADED(bp))
+ bnx2x_free_mem_cnic(bp);
+ }
+ bnx2x_free_mem(bp);
+
+ bp->state = BNX2X_STATE_CLOSED;
+ bp->cnic_loaded = false;
+
+ /* Clear driver version indication in shmem */
+ if (IS_PF(bp) && !BP_NOMCP(bp))
+ bnx2x_update_mng_version(bp);
+
+ /* Check if there are pending parity attentions. If there are - set
+ * RECOVERY_IN_PROGRESS.
+ */
+ if (IS_PF(bp) && bnx2x_chk_parity_attn(bp, &global, false)) {
+ bnx2x_set_reset_in_progress(bp);
+
+ /* Set RESET_IS_GLOBAL if needed */
+ if (global)
+ bnx2x_set_reset_global(bp);
+ }
+
+ /* The last driver must disable a "close the gate" if there is no
+ * parity attention or "process kill" pending.
+ */
+ if (IS_PF(bp) &&
+ !bnx2x_clear_pf_load(bp) &&
+ bnx2x_reset_is_done(bp, BP_PATH(bp)))
+ bnx2x_disable_close_the_gate(bp);
+
+ DP(NETIF_MSG_IFUP, "Ending NIC unload\n");
+
+ return 0;
+}
+
+int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
+{
+ u16 pmcsr;
+
+ /* If there is no power capability, silently succeed */
+ if (!bp->pdev->pm_cap) {
+ BNX2X_DEV_INFO("No power capability. Breaking.\n");
+ return 0;
+ }
+
+ pci_read_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL, &pmcsr);
+
+ switch (state) {
+ case PCI_D0:
+ pci_write_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL,
+ ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
+ PCI_PM_CTRL_PME_STATUS));
+
+ if (pmcsr & PCI_PM_CTRL_STATE_MASK)
+ /* delay required during transition out of D3hot */
+ msleep(20);
+ break;
+
+ case PCI_D3hot:
+ /* If there are other clients above don't
+ shut down the power */
+ if (atomic_read(&bp->pdev->enable_cnt) != 1)
+ return 0;
+ /* Don't shut down the power for emulation and FPGA */
+ if (CHIP_REV_IS_SLOW(bp))
+ return 0;
+
+ pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
+ pmcsr |= 3;
+
+ if (bp->wol)
+ pmcsr |= PCI_PM_CTRL_PME_ENABLE;
+
+ pci_write_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL,
+ pmcsr);
+
+ /* No more memory access after this point until
+ * device is brought back to D0.
+ */
+ break;
+
+ default:
+ dev_err(&bp->pdev->dev, "Can't support state = %d\n", state);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/*
+ * net_device service functions
+ */
+static int bnx2x_poll(struct napi_struct *napi, int budget)
+{
+ struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
+ napi);
+ struct bnx2x *bp = fp->bp;
+ int rx_work_done;
+ u8 cos;
+
+#ifdef BNX2X_STOP_ON_ERROR
+ if (unlikely(bp->panic)) {
+ napi_complete(napi);
+ return 0;
+ }
+#endif
+ for_each_cos_in_tx_queue(fp, cos)
+ if (bnx2x_tx_queue_has_work(fp->txdata_ptr[cos]))
+ bnx2x_tx_int(bp, fp->txdata_ptr[cos]);
+
+ rx_work_done = (bnx2x_has_rx_work(fp)) ? bnx2x_rx_int(fp, budget) : 0;
+
+ if (rx_work_done < budget) {
+ /* No need to update SB for FCoE L2 ring as long as
+ * it's connected to the default SB and the SB
+ * has been updated when NAPI was scheduled.
+ */
+ if (IS_FCOE_FP(fp)) {
+ napi_complete_done(napi, rx_work_done);
+ } else {
+ bnx2x_update_fpsb_idx(fp);
+ /* bnx2x_has_rx_work() reads the status block,
+ * thus we need to ensure that status block indices
+ * have been actually read (bnx2x_update_fpsb_idx)
+ * prior to this check (bnx2x_has_rx_work) so that
+ * we won't write the "newer" value of the status block
+ * to IGU (if there was a DMA right after
+ * bnx2x_has_rx_work and if there is no rmb, the memory
+ * reading (bnx2x_update_fpsb_idx) may be postponed
+ * to right before bnx2x_ack_sb). In this case there
+ * will never be another interrupt until there is
+ * another update of the status block, while there
+ * is still unhandled work.
+ */
+ rmb();
+
+ if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
+ if (napi_complete_done(napi, rx_work_done)) {
+ /* Re-enable interrupts */
+ DP(NETIF_MSG_RX_STATUS,
+ "Update index to %d\n", fp->fp_hc_idx);
+ bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID,
+ le16_to_cpu(fp->fp_hc_idx),
+ IGU_INT_ENABLE, 1);
+ }
+ } else {
+ rx_work_done = budget;
+ }
+ }
+ }
+
+ return rx_work_done;
+}
+
+/* we split the first BD into headers and data BDs
+ * to ease the pain of our fellow microcode engineers
+ * we use one mapping for both BDs
+ */
+static u16 bnx2x_tx_split(struct bnx2x *bp,
+ struct bnx2x_fp_txdata *txdata,
+ struct sw_tx_bd *tx_buf,
+ struct eth_tx_start_bd **tx_bd, u16 hlen,
+ u16 bd_prod)
+{
+ struct eth_tx_start_bd *h_tx_bd = *tx_bd;
+ struct eth_tx_bd *d_tx_bd;
+ dma_addr_t mapping;
+ int old_len = le16_to_cpu(h_tx_bd->nbytes);
+
+ /* first fix first BD */
+ h_tx_bd->nbytes = cpu_to_le16(hlen);
+
+ DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d (%x:%x)\n",
+ h_tx_bd->nbytes, h_tx_bd->addr_hi, h_tx_bd->addr_lo);
+
+ /* now get a new data BD
+ * (after the pbd) and fill it */
+ bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
+ d_tx_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
+
+ mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
+ le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
+
+ d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
+ d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
+ d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
+
+ /* this marks the BD as one that has no individual mapping */
+ tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
+
+ DP(NETIF_MSG_TX_QUEUED,
+ "TSO split data size is %d (%x:%x)\n",
+ d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
+
+ /* update tx_bd */
+ *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
+
+ return bd_prod;
+}
+
+#define bswab32(b32) ((__force __le32) swab32((__force __u32) (b32)))
+#define bswab16(b16) ((__force __le16) swab16((__force __u16) (b16)))
+static __le16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
+{
+ __sum16 tsum = (__force __sum16) csum;
+
+ if (fix > 0)
+ tsum = ~csum_fold(csum_sub((__force __wsum) csum,
+ csum_partial(t_header - fix, fix, 0)));
+
+ else if (fix < 0)
+ tsum = ~csum_fold(csum_add((__force __wsum) csum,
+ csum_partial(t_header, -fix, 0)));
+
+ return bswab16(tsum);
+}
+
+static u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
+{
+ u32 rc;
+ __u8 prot = 0;
+ __be16 protocol;
+
+ if (skb->ip_summed != CHECKSUM_PARTIAL)
+ return XMIT_PLAIN;
+
+ protocol = vlan_get_protocol(skb);
+ if (protocol == htons(ETH_P_IPV6)) {
+ rc = XMIT_CSUM_V6;
+ prot = ipv6_hdr(skb)->nexthdr;
+ } else {
+ rc = XMIT_CSUM_V4;
+ prot = ip_hdr(skb)->protocol;
+ }
+
+ if (!CHIP_IS_E1x(bp) && skb->encapsulation) {
+ if (inner_ip_hdr(skb)->version == 6) {
+ rc |= XMIT_CSUM_ENC_V6;
+ if (inner_ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
+ rc |= XMIT_CSUM_TCP;
+ } else {
+ rc |= XMIT_CSUM_ENC_V4;
+ if (inner_ip_hdr(skb)->protocol == IPPROTO_TCP)
+ rc |= XMIT_CSUM_TCP;
+ }
+ }
+ if (prot == IPPROTO_TCP)
+ rc |= XMIT_CSUM_TCP;
+
+ if (skb_is_gso(skb)) {
+ if (skb_is_gso_v6(skb)) {
+ rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP);
+ if (rc & XMIT_CSUM_ENC)
+ rc |= XMIT_GSO_ENC_V6;
+ } else {
+ rc |= (XMIT_GSO_V4 | XMIT_CSUM_TCP);
+ if (rc & XMIT_CSUM_ENC)
+ rc |= XMIT_GSO_ENC_V4;
+ }
+ }
+
+ return rc;
+}
+
+/* VXLAN: 4 = 1 (for linear data BD) + 3 (2 for PBD and last BD) */
+#define BNX2X_NUM_VXLAN_TSO_WIN_SUB_BDS 4
+
+/* Regular: 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
+#define BNX2X_NUM_TSO_WIN_SUB_BDS 3
+
+#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - BDS_PER_TX_PKT)
+/* check if packet requires linearization (packet is too fragmented)
+ no need to check fragmentation if page size > 8K (there will be no
+ violation to FW restrictions) */
+static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
+ u32 xmit_type)
+{
+ int first_bd_sz = 0, num_tso_win_sub = BNX2X_NUM_TSO_WIN_SUB_BDS;
+ int to_copy = 0, hlen = 0;
+
+ if (xmit_type & XMIT_GSO_ENC)
+ num_tso_win_sub = BNX2X_NUM_VXLAN_TSO_WIN_SUB_BDS;
+
+ if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - num_tso_win_sub)) {
+ if (xmit_type & XMIT_GSO) {
+ unsigned short lso_mss = skb_shinfo(skb)->gso_size;
+ int wnd_size = MAX_FETCH_BD - num_tso_win_sub;
+ /* Number of windows to check */
+ int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
+ int wnd_idx = 0;
+ int frag_idx = 0;
+ u32 wnd_sum = 0;
+
+ /* Headers length */
+ if (xmit_type & XMIT_GSO_ENC)
+ hlen = skb_inner_tcp_all_headers(skb);
+ else
+ hlen = skb_tcp_all_headers(skb);
+
+ /* Amount of data (w/o headers) on linear part of SKB*/
+ first_bd_sz = skb_headlen(skb) - hlen;
+
+ wnd_sum = first_bd_sz;
+
+ /* Calculate the first sum - it's special */
+ for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
+ wnd_sum +=
+ skb_frag_size(&skb_shinfo(skb)->frags[frag_idx]);
+
+ /* If there was data on linear skb data - check it */
+ if (first_bd_sz > 0) {
+ if (unlikely(wnd_sum < lso_mss)) {
+ to_copy = 1;
+ goto exit_lbl;
+ }
+
+ wnd_sum -= first_bd_sz;
+ }
+
+ /* Others are easier: run through the frag list and
+ check all windows */
+ for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
+ wnd_sum +=
+ skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1]);
+
+ if (unlikely(wnd_sum < lso_mss)) {
+ to_copy = 1;
+ break;
+ }
+ wnd_sum -=
+ skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx]);
+ }
+ } else {
+ /* in non-LSO too fragmented packet should always
+ be linearized */
+ to_copy = 1;
+ }
+ }
+
+exit_lbl:
+ if (unlikely(to_copy))
+ DP(NETIF_MSG_TX_QUEUED,
+ "Linearization IS REQUIRED for %s packet. num_frags %d hlen %d first_bd_sz %d\n",
+ (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
+ skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
+
+ return to_copy;
+}
+#endif
+
+/**
+ * bnx2x_set_pbd_gso - update PBD in GSO case.
+ *
+ * @skb: packet skb
+ * @pbd: parse BD
+ * @xmit_type: xmit flags
+ */
+static void bnx2x_set_pbd_gso(struct sk_buff *skb,
+ struct eth_tx_parse_bd_e1x *pbd,
+ u32 xmit_type)
+{
+ pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
+ pbd->tcp_send_seq = bswab32(tcp_hdr(skb)->seq);
+ pbd->tcp_flags = pbd_tcp_flags(tcp_hdr(skb));
+
+ if (xmit_type & XMIT_GSO_V4) {
+ pbd->ip_id = bswab16(ip_hdr(skb)->id);
+ pbd->tcp_pseudo_csum =
+ bswab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
+ ip_hdr(skb)->daddr,
+ 0, IPPROTO_TCP, 0));
+ } else {
+ pbd->tcp_pseudo_csum =
+ bswab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
+ &ipv6_hdr(skb)->daddr,
+ 0, IPPROTO_TCP, 0));
+ }
+
+ pbd->global_data |=
+ cpu_to_le16(ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN);
+}
+
+/**
+ * bnx2x_set_pbd_csum_enc - update PBD with checksum and return header length
+ *
+ * @bp: driver handle
+ * @skb: packet skb
+ * @parsing_data: data to be updated
+ * @xmit_type: xmit flags
+ *
+ * 57712/578xx related, when skb has encapsulation
+ */
+static u8 bnx2x_set_pbd_csum_enc(struct bnx2x *bp, struct sk_buff *skb,
+ u32 *parsing_data, u32 xmit_type)
+{
+ *parsing_data |=
+ ((((u8 *)skb_inner_transport_header(skb) - skb->data) >> 1) <<
+ ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
+ ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W;
+
+ if (xmit_type & XMIT_CSUM_TCP) {
+ *parsing_data |= ((inner_tcp_hdrlen(skb) / 4) <<
+ ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
+ ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
+
+ return skb_inner_tcp_all_headers(skb);
+ }
+
+ /* We support checksum offload for TCP and UDP only.
+ * No need to pass the UDP header length - it's a constant.
+ */
+ return skb_inner_transport_offset(skb) + sizeof(struct udphdr);
+}
+
+/**
+ * bnx2x_set_pbd_csum_e2 - update PBD with checksum and return header length
+ *
+ * @bp: driver handle
+ * @skb: packet skb
+ * @parsing_data: data to be updated
+ * @xmit_type: xmit flags
+ *
+ * 57712/578xx related
+ */
+static u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
+ u32 *parsing_data, u32 xmit_type)
+{
+ *parsing_data |=
+ ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) <<
+ ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
+ ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W;
+
+ if (xmit_type & XMIT_CSUM_TCP) {
+ *parsing_data |= ((tcp_hdrlen(skb) / 4) <<
+ ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
+ ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
+
+ return skb_tcp_all_headers(skb);
+ }
+ /* We support checksum offload for TCP and UDP only.
+ * No need to pass the UDP header length - it's a constant.
+ */
+ return skb_transport_offset(skb) + sizeof(struct udphdr);
+}
+
+/* set FW indication according to inner or outer protocols if tunneled */
+static void bnx2x_set_sbd_csum(struct bnx2x *bp, struct sk_buff *skb,
+ struct eth_tx_start_bd *tx_start_bd,
+ u32 xmit_type)
+{
+ tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
+
+ if (xmit_type & (XMIT_CSUM_ENC_V6 | XMIT_CSUM_V6))
+ tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
+
+ if (!(xmit_type & XMIT_CSUM_TCP))
+ tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IS_UDP;
+}
+
+/**
+ * bnx2x_set_pbd_csum - update PBD with checksum and return header length
+ *
+ * @bp: driver handle
+ * @skb: packet skb
+ * @pbd: parse BD to be updated
+ * @xmit_type: xmit flags
+ */
+static u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
+ struct eth_tx_parse_bd_e1x *pbd,
+ u32 xmit_type)
+{
+ u8 hlen = (skb_network_header(skb) - skb->data) >> 1;
+
+ /* for now NS flag is not used in Linux */
+ pbd->global_data =
+ cpu_to_le16(hlen |
+ ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
+ ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
+
+ pbd->ip_hlen_w = (skb_transport_header(skb) -
+ skb_network_header(skb)) >> 1;
+
+ hlen += pbd->ip_hlen_w;
+
+ /* We support checksum offload for TCP and UDP only */
+ if (xmit_type & XMIT_CSUM_TCP)
+ hlen += tcp_hdrlen(skb) / 2;
+ else
+ hlen += sizeof(struct udphdr) / 2;
+
+ pbd->total_hlen_w = cpu_to_le16(hlen);
+ hlen = hlen*2;
+
+ if (xmit_type & XMIT_CSUM_TCP) {
+ pbd->tcp_pseudo_csum = bswab16(tcp_hdr(skb)->check);
+
+ } else {
+ s8 fix = SKB_CS_OFF(skb); /* signed! */
+
+ DP(NETIF_MSG_TX_QUEUED,
+ "hlen %d fix %d csum before fix %x\n",
+ le16_to_cpu(pbd->total_hlen_w), fix, SKB_CS(skb));
+
+ /* HW bug: fixup the CSUM */
+ pbd->tcp_pseudo_csum =
+ bnx2x_csum_fix(skb_transport_header(skb),
+ SKB_CS(skb), fix);
+
+ DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
+ pbd->tcp_pseudo_csum);
+ }
+
+ return hlen;
+}
+
+static void bnx2x_update_pbds_gso_enc(struct sk_buff *skb,
+ struct eth_tx_parse_bd_e2 *pbd_e2,
+ struct eth_tx_parse_2nd_bd *pbd2,
+ u16 *global_data,
+ u32 xmit_type)
+{
+ u16 hlen_w = 0;
+ u8 outerip_off, outerip_len = 0;
+
+ /* from outer IP to transport */
+ hlen_w = (skb_inner_transport_header(skb) -
+ skb_network_header(skb)) >> 1;
+
+ /* transport len */
+ hlen_w += inner_tcp_hdrlen(skb) >> 1;
+
+ pbd2->fw_ip_hdr_to_payload_w = hlen_w;
+
+ /* outer IP header info */
+ if (xmit_type & XMIT_CSUM_V4) {
+ struct iphdr *iph = ip_hdr(skb);
+ u32 csum = (__force u32)(~iph->check) -
+ (__force u32)iph->tot_len -
+ (__force u32)iph->frag_off;
+
+ outerip_len = iph->ihl << 1;
+
+ pbd2->fw_ip_csum_wo_len_flags_frag =
+ bswab16(csum_fold((__force __wsum)csum));
+ } else {
+ pbd2->fw_ip_hdr_to_payload_w =
+ hlen_w - ((sizeof(struct ipv6hdr)) >> 1);
+ pbd_e2->data.tunnel_data.flags |=
+ ETH_TUNNEL_DATA_IPV6_OUTER;
+ }
+
+ pbd2->tcp_send_seq = bswab32(inner_tcp_hdr(skb)->seq);
+
+ pbd2->tcp_flags = pbd_tcp_flags(inner_tcp_hdr(skb));
+
+ /* inner IP header info */
+ if (xmit_type & XMIT_CSUM_ENC_V4) {
+ pbd2->hw_ip_id = bswab16(inner_ip_hdr(skb)->id);
+
+ pbd_e2->data.tunnel_data.pseudo_csum =
+ bswab16(~csum_tcpudp_magic(
+ inner_ip_hdr(skb)->saddr,
+ inner_ip_hdr(skb)->daddr,
+ 0, IPPROTO_TCP, 0));
+ } else {
+ pbd_e2->data.tunnel_data.pseudo_csum =
+ bswab16(~csum_ipv6_magic(
+ &inner_ipv6_hdr(skb)->saddr,
+ &inner_ipv6_hdr(skb)->daddr,
+ 0, IPPROTO_TCP, 0));
+ }
+
+ outerip_off = (skb_network_header(skb) - skb->data) >> 1;
+
+ *global_data |=
+ outerip_off |
+ (outerip_len <<
+ ETH_TX_PARSE_2ND_BD_IP_HDR_LEN_OUTER_W_SHIFT) |
+ ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
+ ETH_TX_PARSE_2ND_BD_LLC_SNAP_EN_SHIFT);
+
+ if (ip_hdr(skb)->protocol == IPPROTO_UDP) {
+ SET_FLAG(*global_data, ETH_TX_PARSE_2ND_BD_TUNNEL_UDP_EXIST, 1);
+ pbd2->tunnel_udp_hdr_start_w = skb_transport_offset(skb) >> 1;
+ }
+}
+
+static inline void bnx2x_set_ipv6_ext_e2(struct sk_buff *skb, u32 *parsing_data,
+ u32 xmit_type)
+{
+ struct ipv6hdr *ipv6;
+
+ if (!(xmit_type & (XMIT_GSO_ENC_V6 | XMIT_GSO_V6)))
+ return;
+
+ if (xmit_type & XMIT_GSO_ENC_V6)
+ ipv6 = inner_ipv6_hdr(skb);
+ else /* XMIT_GSO_V6 */
+ ipv6 = ipv6_hdr(skb);
+
+ if (ipv6->nexthdr == NEXTHDR_IPV6)
+ *parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
+}
+
+/* called with netif_tx_lock
+ * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
+ * netif_wake_queue()
+ */
+netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+
+ struct netdev_queue *txq;
+ struct bnx2x_fp_txdata *txdata;
+ struct sw_tx_bd *tx_buf;
+ struct eth_tx_start_bd *tx_start_bd, *first_bd;
+ struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
+ struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
+ struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
+ struct eth_tx_parse_2nd_bd *pbd2 = NULL;
+ u32 pbd_e2_parsing_data = 0;
+ u16 pkt_prod, bd_prod;
+ int nbd, txq_index;
+ dma_addr_t mapping;
+ u32 xmit_type = bnx2x_xmit_type(bp, skb);
+ int i;
+ u8 hlen = 0;
+ __le16 pkt_size = 0;
+ struct ethhdr *eth;
+ u8 mac_type = UNICAST_ADDRESS;
+
+#ifdef BNX2X_STOP_ON_ERROR
+ if (unlikely(bp->panic))
+ return NETDEV_TX_BUSY;
+#endif
+
+ txq_index = skb_get_queue_mapping(skb);
+ txq = netdev_get_tx_queue(dev, txq_index);
+
+ BUG_ON(txq_index >= MAX_ETH_TXQ_IDX(bp) + (CNIC_LOADED(bp) ? 1 : 0));
+
+ txdata = &bp->bnx2x_txq[txq_index];
+
+ /* enable this debug print to view the transmission queue being used
+ DP(NETIF_MSG_TX_QUEUED, "indices: txq %d, fp %d, txdata %d\n",
+ txq_index, fp_index, txdata_index); */
+
+ /* enable this debug print to view the transmission details
+ DP(NETIF_MSG_TX_QUEUED,
+ "transmitting packet cid %d fp index %d txdata_index %d tx_data ptr %p fp pointer %p\n",
+ txdata->cid, fp_index, txdata_index, txdata, fp); */
+
+ if (unlikely(bnx2x_tx_avail(bp, txdata) <
+ skb_shinfo(skb)->nr_frags +
+ BDS_PER_TX_PKT +
+ NEXT_CNT_PER_TX_PKT(MAX_BDS_PER_TX_PKT))) {
+ /* Handle special storage cases separately */
+ if (txdata->tx_ring_size == 0) {
+ struct bnx2x_eth_q_stats *q_stats =
+ bnx2x_fp_qstats(bp, txdata->parent_fp);
+ q_stats->driver_filtered_tx_pkt++;
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
+ }
+ bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
+ netif_tx_stop_queue(txq);
+ BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
+
+ return NETDEV_TX_BUSY;
+ }
+
+ DP(NETIF_MSG_TX_QUEUED,
+ "queue[%d]: SKB: summed %x protocol %x protocol(%x,%x) gso type %x xmit_type %x len %d\n",
+ txq_index, skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
+ ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type,
+ skb->len);
+
+ eth = (struct ethhdr *)skb->data;
+
+ /* set flag according to packet type (UNICAST_ADDRESS is default)*/
+ if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
+ if (is_broadcast_ether_addr(eth->h_dest))
+ mac_type = BROADCAST_ADDRESS;
+ else
+ mac_type = MULTICAST_ADDRESS;
+ }
+
+#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - BDS_PER_TX_PKT)
+ /* First, check if we need to linearize the skb (due to FW
+ restrictions). No need to check fragmentation if page size > 8K
+ (there will be no violation to FW restrictions) */
+ if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
+ /* Statistics of linearization */
+ bp->lin_cnt++;
+ if (skb_linearize(skb) != 0) {
+ DP(NETIF_MSG_TX_QUEUED,
+ "SKB linearization failed - silently dropping this SKB\n");
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
+ }
+#endif
+ /* Map skb linear data for DMA */
+ mapping = dma_map_single(&bp->pdev->dev, skb->data,
+ skb_headlen(skb), DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
+ DP(NETIF_MSG_TX_QUEUED,
+ "SKB mapping failed - silently dropping this SKB\n");
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
+ /*
+ Please read carefully. First we use one BD which we mark as start,
+ then we have a parsing info BD (used for TSO or xsum),
+ and only then we have the rest of the TSO BDs.
+ (don't forget to mark the last one as last,
+ and to unmap only AFTER you write to the BD ...)
+ And above all, all pdb sizes are in words - NOT DWORDS!
+ */
+
+ /* get current pkt produced now - advance it just before sending packet
+ * since mapping of pages may fail and cause packet to be dropped
+ */
+ pkt_prod = txdata->tx_pkt_prod;
+ bd_prod = TX_BD(txdata->tx_bd_prod);
+
+ /* get a tx_buf and first BD
+ * tx_start_bd may be changed during SPLIT,
+ * but first_bd will always stay first
+ */
+ tx_buf = &txdata->tx_buf_ring[TX_BD(pkt_prod)];
+ tx_start_bd = &txdata->tx_desc_ring[bd_prod].start_bd;
+ first_bd = tx_start_bd;
+
+ tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
+
+ if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
+ if (!(bp->flags & TX_TIMESTAMPING_EN)) {
+ bp->eth_stats.ptp_skip_tx_ts++;
+ BNX2X_ERR("Tx timestamping was not enabled, this packet will not be timestamped\n");
+ } else if (bp->ptp_tx_skb) {
+ bp->eth_stats.ptp_skip_tx_ts++;
+ netdev_err_once(bp->dev,
+ "Device supports only a single outstanding packet to timestamp, this packet won't be timestamped\n");
+ } else {
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ /* schedule check for Tx timestamp */
+ bp->ptp_tx_skb = skb_get(skb);
+ bp->ptp_tx_start = jiffies;
+ schedule_work(&bp->ptp_task);
+ }
+ }
+
+ /* header nbd: indirectly zero other flags! */
+ tx_start_bd->general_data = 1 << ETH_TX_START_BD_HDR_NBDS_SHIFT;
+
+ /* remember the first BD of the packet */
+ tx_buf->first_bd = txdata->tx_bd_prod;
+ tx_buf->skb = skb;
+ tx_buf->flags = 0;
+
+ DP(NETIF_MSG_TX_QUEUED,
+ "sending pkt %u @%p next_idx %u bd %u @%p\n",
+ pkt_prod, tx_buf, txdata->tx_pkt_prod, bd_prod, tx_start_bd);
+
+ if (skb_vlan_tag_present(skb)) {
+ tx_start_bd->vlan_or_ethertype =
+ cpu_to_le16(skb_vlan_tag_get(skb));
+ tx_start_bd->bd_flags.as_bitfield |=
+ (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
+ } else {
+ /* when transmitting in a vf, start bd must hold the ethertype
+ * for fw to enforce it
+ */
+ u16 vlan_tci = 0;
+#ifndef BNX2X_STOP_ON_ERROR
+ if (IS_VF(bp)) {
+#endif
+ /* Still need to consider inband vlan for enforced */
+ if (__vlan_get_tag(skb, &vlan_tci)) {
+ tx_start_bd->vlan_or_ethertype =
+ cpu_to_le16(ntohs(eth->h_proto));
+ } else {
+ tx_start_bd->bd_flags.as_bitfield |=
+ (X_ETH_INBAND_VLAN <<
+ ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
+ tx_start_bd->vlan_or_ethertype =
+ cpu_to_le16(vlan_tci);
+ }
+#ifndef BNX2X_STOP_ON_ERROR
+ } else {
+ /* used by FW for packet accounting */
+ tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
+ }
+#endif
+ }
+
+ nbd = 2; /* start_bd + pbd + frags (updated when pages are mapped) */
+
+ /* turn on parsing and get a BD */
+ bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
+
+ if (xmit_type & XMIT_CSUM)
+ bnx2x_set_sbd_csum(bp, skb, tx_start_bd, xmit_type);
+
+ if (!CHIP_IS_E1x(bp)) {
+ pbd_e2 = &txdata->tx_desc_ring[bd_prod].parse_bd_e2;
+ memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
+
+ if (xmit_type & XMIT_CSUM_ENC) {
+ u16 global_data = 0;
+
+ /* Set PBD in enc checksum offload case */
+ hlen = bnx2x_set_pbd_csum_enc(bp, skb,
+ &pbd_e2_parsing_data,
+ xmit_type);
+
+ /* turn on 2nd parsing and get a BD */
+ bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
+
+ pbd2 = &txdata->tx_desc_ring[bd_prod].parse_2nd_bd;
+
+ memset(pbd2, 0, sizeof(*pbd2));
+
+ pbd_e2->data.tunnel_data.ip_hdr_start_inner_w =
+ (skb_inner_network_header(skb) -
+ skb->data) >> 1;
+
+ if (xmit_type & XMIT_GSO_ENC)
+ bnx2x_update_pbds_gso_enc(skb, pbd_e2, pbd2,
+ &global_data,
+ xmit_type);
+
+ pbd2->global_data = cpu_to_le16(global_data);
+
+ /* add addition parse BD indication to start BD */
+ SET_FLAG(tx_start_bd->general_data,
+ ETH_TX_START_BD_PARSE_NBDS, 1);
+ /* set encapsulation flag in start BD */
+ SET_FLAG(tx_start_bd->general_data,
+ ETH_TX_START_BD_TUNNEL_EXIST, 1);
+
+ tx_buf->flags |= BNX2X_HAS_SECOND_PBD;
+
+ nbd++;
+ } else if (xmit_type & XMIT_CSUM) {
+ /* Set PBD in checksum offload case w/o encapsulation */
+ hlen = bnx2x_set_pbd_csum_e2(bp, skb,
+ &pbd_e2_parsing_data,
+ xmit_type);
+ }
+
+ bnx2x_set_ipv6_ext_e2(skb, &pbd_e2_parsing_data, xmit_type);
+ /* Add the macs to the parsing BD if this is a vf or if
+ * Tx Switching is enabled.
+ */
+ if (IS_VF(bp)) {
+ /* override GRE parameters in BD */
+ bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.src_hi,
+ &pbd_e2->data.mac_addr.src_mid,
+ &pbd_e2->data.mac_addr.src_lo,
+ eth->h_source);
+
+ bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.dst_hi,
+ &pbd_e2->data.mac_addr.dst_mid,
+ &pbd_e2->data.mac_addr.dst_lo,
+ eth->h_dest);
+ } else {
+ if (bp->flags & TX_SWITCHING)
+ bnx2x_set_fw_mac_addr(
+ &pbd_e2->data.mac_addr.dst_hi,
+ &pbd_e2->data.mac_addr.dst_mid,
+ &pbd_e2->data.mac_addr.dst_lo,
+ eth->h_dest);
+#ifdef BNX2X_STOP_ON_ERROR
+ /* Enforce security is always set in Stop on Error -
+ * source mac should be present in the parsing BD
+ */
+ bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.src_hi,
+ &pbd_e2->data.mac_addr.src_mid,
+ &pbd_e2->data.mac_addr.src_lo,
+ eth->h_source);
+#endif
+ }
+
+ SET_FLAG(pbd_e2_parsing_data,
+ ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE, mac_type);
+ } else {
+ u16 global_data = 0;
+ pbd_e1x = &txdata->tx_desc_ring[bd_prod].parse_bd_e1x;
+ memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
+ /* Set PBD in checksum offload case */
+ if (xmit_type & XMIT_CSUM)
+ hlen = bnx2x_set_pbd_csum(bp, skb, pbd_e1x, xmit_type);
+
+ SET_FLAG(global_data,
+ ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE, mac_type);
+ pbd_e1x->global_data |= cpu_to_le16(global_data);
+ }
+
+ /* Setup the data pointer of the first BD of the packet */
+ tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
+ tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
+ tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
+ pkt_size = tx_start_bd->nbytes;
+
+ DP(NETIF_MSG_TX_QUEUED,
+ "first bd @%p addr (%x:%x) nbytes %d flags %x vlan %x\n",
+ tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
+ le16_to_cpu(tx_start_bd->nbytes),
+ tx_start_bd->bd_flags.as_bitfield,
+ le16_to_cpu(tx_start_bd->vlan_or_ethertype));
+
+ if (xmit_type & XMIT_GSO) {
+
+ DP(NETIF_MSG_TX_QUEUED,
+ "TSO packet len %d hlen %d total len %d tso size %d\n",
+ skb->len, hlen, skb_headlen(skb),
+ skb_shinfo(skb)->gso_size);
+
+ tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
+
+ if (unlikely(skb_headlen(skb) > hlen)) {
+ nbd++;
+ bd_prod = bnx2x_tx_split(bp, txdata, tx_buf,
+ &tx_start_bd, hlen,
+ bd_prod);
+ }
+ if (!CHIP_IS_E1x(bp))
+ pbd_e2_parsing_data |=
+ (skb_shinfo(skb)->gso_size <<
+ ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) &
+ ETH_TX_PARSE_BD_E2_LSO_MSS;
+ else
+ bnx2x_set_pbd_gso(skb, pbd_e1x, xmit_type);
+ }
+
+ /* Set the PBD's parsing_data field if not zero
+ * (for the chips newer than 57711).
+ */
+ if (pbd_e2_parsing_data)
+ pbd_e2->parsing_data = cpu_to_le32(pbd_e2_parsing_data);
+
+ tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
+
+ /* Handle fragmented skb */
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+
+ mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0,
+ skb_frag_size(frag), DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
+ unsigned int pkts_compl = 0, bytes_compl = 0;
+
+ DP(NETIF_MSG_TX_QUEUED,
+ "Unable to map page - dropping packet...\n");
+
+ /* we need unmap all buffers already mapped
+ * for this SKB;
+ * first_bd->nbd need to be properly updated
+ * before call to bnx2x_free_tx_pkt
+ */
+ first_bd->nbd = cpu_to_le16(nbd);
+ bnx2x_free_tx_pkt(bp, txdata,
+ TX_BD(txdata->tx_pkt_prod),
+ &pkts_compl, &bytes_compl);
+ return NETDEV_TX_OK;
+ }
+
+ bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
+ tx_data_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
+ if (total_pkt_bd == NULL)
+ total_pkt_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
+
+ tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
+ tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
+ tx_data_bd->nbytes = cpu_to_le16(skb_frag_size(frag));
+ le16_add_cpu(&pkt_size, skb_frag_size(frag));
+ nbd++;
+
+ DP(NETIF_MSG_TX_QUEUED,
+ "frag %d bd @%p addr (%x:%x) nbytes %d\n",
+ i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
+ le16_to_cpu(tx_data_bd->nbytes));
+ }
+
+ DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
+
+ /* update with actual num BDs */
+ first_bd->nbd = cpu_to_le16(nbd);
+
+ bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
+
+ /* now send a tx doorbell, counting the next BD
+ * if the packet contains or ends with it
+ */
+ if (TX_BD_POFF(bd_prod) < nbd)
+ nbd++;
+
+ /* total_pkt_bytes should be set on the first data BD if
+ * it's not an LSO packet and there is more than one
+ * data BD. In this case pkt_size is limited by an MTU value.
+ * However we prefer to set it for an LSO packet (while we don't
+ * have to) in order to save some CPU cycles in a none-LSO
+ * case, when we much more care about them.
+ */
+ if (total_pkt_bd != NULL)
+ total_pkt_bd->total_pkt_bytes = pkt_size;
+
+ if (pbd_e1x)
+ DP(NETIF_MSG_TX_QUEUED,
+ "PBD (E1X) @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u tcp_flags %x xsum %x seq %u hlen %u\n",
+ pbd_e1x, pbd_e1x->global_data, pbd_e1x->ip_hlen_w,
+ pbd_e1x->ip_id, pbd_e1x->lso_mss, pbd_e1x->tcp_flags,
+ pbd_e1x->tcp_pseudo_csum, pbd_e1x->tcp_send_seq,
+ le16_to_cpu(pbd_e1x->total_hlen_w));
+ if (pbd_e2)
+ DP(NETIF_MSG_TX_QUEUED,
+ "PBD (E2) @%p dst %x %x %x src %x %x %x parsing_data %x\n",
+ pbd_e2,
+ pbd_e2->data.mac_addr.dst_hi,
+ pbd_e2->data.mac_addr.dst_mid,
+ pbd_e2->data.mac_addr.dst_lo,
+ pbd_e2->data.mac_addr.src_hi,
+ pbd_e2->data.mac_addr.src_mid,
+ pbd_e2->data.mac_addr.src_lo,
+ pbd_e2->parsing_data);
+ DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
+
+ netdev_tx_sent_queue(txq, skb->len);
+
+ skb_tx_timestamp(skb);
+
+ txdata->tx_pkt_prod++;
+ /*
+ * Make sure that the BD data is updated before updating the producer
+ * since FW might read the BD right after the producer is updated.
+ * This is only applicable for weak-ordered memory model archs such
+ * as IA-64. The following barrier is also mandatory since FW will
+ * assumes packets must have BDs.
+ */
+ wmb();
+
+ txdata->tx_db.data.prod += nbd;
+ /* make sure descriptor update is observed by HW */
+ wmb();
+
+ DOORBELL_RELAXED(bp, txdata->cid, txdata->tx_db.raw);
+
+ txdata->tx_bd_prod += nbd;
+
+ if (unlikely(bnx2x_tx_avail(bp, txdata) < MAX_DESC_PER_TX_PKT)) {
+ netif_tx_stop_queue(txq);
+
+ /* paired memory barrier is in bnx2x_tx_int(), we have to keep
+ * ordering of set_bit() in netif_tx_stop_queue() and read of
+ * fp->bd_tx_cons */
+ smp_mb();
+
+ bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
+ if (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT)
+ netif_tx_wake_queue(txq);
+ }
+ txdata->tx_pkt++;
+
+ return NETDEV_TX_OK;
+}
+
+void bnx2x_get_c2s_mapping(struct bnx2x *bp, u8 *c2s_map, u8 *c2s_default)
+{
+ int mfw_vn = BP_FW_MB_IDX(bp);
+ u32 tmp;
+
+ /* If the shmem shouldn't affect configuration, reflect */
+ if (!IS_MF_BD(bp)) {
+ int i;
+
+ for (i = 0; i < BNX2X_MAX_PRIORITY; i++)
+ c2s_map[i] = i;
+ *c2s_default = 0;
+
+ return;
+ }
+
+ tmp = SHMEM2_RD(bp, c2s_pcp_map_lower[mfw_vn]);
+ tmp = (__force u32)be32_to_cpu((__force __be32)tmp);
+ c2s_map[0] = tmp & 0xff;
+ c2s_map[1] = (tmp >> 8) & 0xff;
+ c2s_map[2] = (tmp >> 16) & 0xff;
+ c2s_map[3] = (tmp >> 24) & 0xff;
+
+ tmp = SHMEM2_RD(bp, c2s_pcp_map_upper[mfw_vn]);
+ tmp = (__force u32)be32_to_cpu((__force __be32)tmp);
+ c2s_map[4] = tmp & 0xff;
+ c2s_map[5] = (tmp >> 8) & 0xff;
+ c2s_map[6] = (tmp >> 16) & 0xff;
+ c2s_map[7] = (tmp >> 24) & 0xff;
+
+ tmp = SHMEM2_RD(bp, c2s_pcp_map_default[mfw_vn]);
+ tmp = (__force u32)be32_to_cpu((__force __be32)tmp);
+ *c2s_default = (tmp >> (8 * mfw_vn)) & 0xff;
+}
+
+/**
+ * bnx2x_setup_tc - routine to configure net_device for multi tc
+ *
+ * @dev: net device to configure
+ * @num_tc: number of traffic classes to enable
+ *
+ * callback connected to the ndo_setup_tc function pointer
+ */
+int bnx2x_setup_tc(struct net_device *dev, u8 num_tc)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+ u8 c2s_map[BNX2X_MAX_PRIORITY], c2s_def;
+ int cos, prio, count, offset;
+
+ /* setup tc must be called under rtnl lock */
+ ASSERT_RTNL();
+
+ /* no traffic classes requested. Aborting */
+ if (!num_tc) {
+ netdev_reset_tc(dev);
+ return 0;
+ }
+
+ /* requested to support too many traffic classes */
+ if (num_tc > bp->max_cos) {
+ BNX2X_ERR("support for too many traffic classes requested: %d. Max supported is %d\n",
+ num_tc, bp->max_cos);
+ return -EINVAL;
+ }
+
+ /* declare amount of supported traffic classes */
+ if (netdev_set_num_tc(dev, num_tc)) {
+ BNX2X_ERR("failed to declare %d traffic classes\n", num_tc);
+ return -EINVAL;
+ }
+
+ bnx2x_get_c2s_mapping(bp, c2s_map, &c2s_def);
+
+ /* configure priority to traffic class mapping */
+ for (prio = 0; prio < BNX2X_MAX_PRIORITY; prio++) {
+ int outer_prio = c2s_map[prio];
+
+ netdev_set_prio_tc_map(dev, prio, bp->prio_to_cos[outer_prio]);
+ DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
+ "mapping priority %d to tc %d\n",
+ outer_prio, bp->prio_to_cos[outer_prio]);
+ }
+
+ /* Use this configuration to differentiate tc0 from other COSes
+ This can be used for ets or pfc, and save the effort of setting
+ up a multio class queue disc or negotiating DCBX with a switch
+ netdev_set_prio_tc_map(dev, 0, 0);
+ DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", 0, 0);
+ for (prio = 1; prio < 16; prio++) {
+ netdev_set_prio_tc_map(dev, prio, 1);
+ DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", prio, 1);
+ } */
+
+ /* configure traffic class to transmission queue mapping */
+ for (cos = 0; cos < bp->max_cos; cos++) {
+ count = BNX2X_NUM_ETH_QUEUES(bp);
+ offset = cos * BNX2X_NUM_NON_CNIC_QUEUES(bp);
+ netdev_set_tc_queue(dev, cos, count, offset);
+ DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
+ "mapping tc %d to offset %d count %d\n",
+ cos, offset, count);
+ }
+
+ return 0;
+}
+
+int __bnx2x_setup_tc(struct net_device *dev, enum tc_setup_type type,
+ void *type_data)
+{
+ struct tc_mqprio_qopt *mqprio = type_data;
+
+ if (type != TC_SETUP_QDISC_MQPRIO)
+ return -EOPNOTSUPP;
+
+ mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
+
+ return bnx2x_setup_tc(dev, mqprio->num_tc);
+}
+
+/* called with rtnl_lock */
+int bnx2x_change_mac_addr(struct net_device *dev, void *p)
+{
+ struct sockaddr *addr = p;
+ struct bnx2x *bp = netdev_priv(dev);
+ int rc = 0;
+
+ if (!is_valid_ether_addr(addr->sa_data)) {
+ BNX2X_ERR("Requested MAC address is not valid\n");
+ return -EINVAL;
+ }
+
+ if (IS_MF_STORAGE_ONLY(bp)) {
+ BNX2X_ERR("Can't change address on STORAGE ONLY function\n");
+ return -EINVAL;
+ }
+
+ if (netif_running(dev)) {
+ rc = bnx2x_set_eth_mac(bp, false);
+ if (rc)
+ return rc;
+ }
+
+ eth_hw_addr_set(dev, addr->sa_data);
+
+ if (netif_running(dev))
+ rc = bnx2x_set_eth_mac(bp, true);
+
+ if (IS_PF(bp) && SHMEM2_HAS(bp, curr_cfg))
+ SHMEM2_WR(bp, curr_cfg, CURR_CFG_MET_OS);
+
+ return rc;
+}
+
+static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index)
+{
+ union host_hc_status_block *sb = &bnx2x_fp(bp, fp_index, status_blk);
+ struct bnx2x_fastpath *fp = &bp->fp[fp_index];
+ u8 cos;
+
+ /* Common */
+
+ if (IS_FCOE_IDX(fp_index)) {
+ memset(sb, 0, sizeof(union host_hc_status_block));
+ fp->status_blk_mapping = 0;
+ } else {
+ /* status blocks */
+ if (!CHIP_IS_E1x(bp))
+ BNX2X_PCI_FREE(sb->e2_sb,
+ bnx2x_fp(bp, fp_index,
+ status_blk_mapping),
+ sizeof(struct host_hc_status_block_e2));
+ else
+ BNX2X_PCI_FREE(sb->e1x_sb,
+ bnx2x_fp(bp, fp_index,
+ status_blk_mapping),
+ sizeof(struct host_hc_status_block_e1x));
+ }
+
+ /* Rx */
+ if (!skip_rx_queue(bp, fp_index)) {
+ bnx2x_free_rx_bds(fp);
+
+ /* fastpath rx rings: rx_buf rx_desc rx_comp */
+ BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_buf_ring));
+ BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_desc_ring),
+ bnx2x_fp(bp, fp_index, rx_desc_mapping),
+ sizeof(struct eth_rx_bd) * NUM_RX_BD);
+
+ BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_comp_ring),
+ bnx2x_fp(bp, fp_index, rx_comp_mapping),
+ sizeof(struct eth_fast_path_rx_cqe) *
+ NUM_RCQ_BD);
+
+ /* SGE ring */
+ BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_page_ring));
+ BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_sge_ring),
+ bnx2x_fp(bp, fp_index, rx_sge_mapping),
+ BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
+ }
+
+ /* Tx */
+ if (!skip_tx_queue(bp, fp_index)) {
+ /* fastpath tx rings: tx_buf tx_desc */
+ for_each_cos_in_tx_queue(fp, cos) {
+ struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
+
+ DP(NETIF_MSG_IFDOWN,
+ "freeing tx memory of fp %d cos %d cid %d\n",
+ fp_index, cos, txdata->cid);
+
+ BNX2X_FREE(txdata->tx_buf_ring);
+ BNX2X_PCI_FREE(txdata->tx_desc_ring,
+ txdata->tx_desc_mapping,
+ sizeof(union eth_tx_bd_types) * NUM_TX_BD);
+ }
+ }
+ /* end of fastpath */
+}
+
+static void bnx2x_free_fp_mem_cnic(struct bnx2x *bp)
+{
+ int i;
+ for_each_cnic_queue(bp, i)
+ bnx2x_free_fp_mem_at(bp, i);
+}
+
+void bnx2x_free_fp_mem(struct bnx2x *bp)
+{
+ int i;
+ for_each_eth_queue(bp, i)
+ bnx2x_free_fp_mem_at(bp, i);
+}
+
+static void set_sb_shortcuts(struct bnx2x *bp, int index)
+{
+ union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk);
+ if (!CHIP_IS_E1x(bp)) {
+ bnx2x_fp(bp, index, sb_index_values) =
+ (__le16 *)status_blk.e2_sb->sb.index_values;
+ bnx2x_fp(bp, index, sb_running_index) =
+ (__le16 *)status_blk.e2_sb->sb.running_index;
+ } else {
+ bnx2x_fp(bp, index, sb_index_values) =
+ (__le16 *)status_blk.e1x_sb->sb.index_values;
+ bnx2x_fp(bp, index, sb_running_index) =
+ (__le16 *)status_blk.e1x_sb->sb.running_index;
+ }
+}
+
+/* Returns the number of actually allocated BDs */
+static int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp,
+ int rx_ring_size)
+{
+ struct bnx2x *bp = fp->bp;
+ u16 ring_prod, cqe_ring_prod;
+ int i, failure_cnt = 0;
+
+ fp->rx_comp_cons = 0;
+ cqe_ring_prod = ring_prod = 0;
+
+ /* This routine is called only during fo init so
+ * fp->eth_q_stats.rx_skb_alloc_failed = 0
+ */
+ for (i = 0; i < rx_ring_size; i++) {
+ if (bnx2x_alloc_rx_data(bp, fp, ring_prod, GFP_KERNEL) < 0) {
+ failure_cnt++;
+ continue;
+ }
+ ring_prod = NEXT_RX_IDX(ring_prod);
+ cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
+ WARN_ON(ring_prod <= (i - failure_cnt));
+ }
+
+ if (failure_cnt)
+ BNX2X_ERR("was only able to allocate %d rx skbs on queue[%d]\n",
+ i - failure_cnt, fp->index);
+
+ fp->rx_bd_prod = ring_prod;
+ /* Limit the CQE producer by the CQE ring size */
+ fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT,
+ cqe_ring_prod);
+
+ bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed += failure_cnt;
+
+ return i - failure_cnt;
+}
+
+static void bnx2x_set_next_page_rx_cq(struct bnx2x_fastpath *fp)
+{
+ int i;
+
+ for (i = 1; i <= NUM_RCQ_RINGS; i++) {
+ struct eth_rx_cqe_next_page *nextpg;
+
+ nextpg = (struct eth_rx_cqe_next_page *)
+ &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
+ nextpg->addr_hi =
+ cpu_to_le32(U64_HI(fp->rx_comp_mapping +
+ BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
+ nextpg->addr_lo =
+ cpu_to_le32(U64_LO(fp->rx_comp_mapping +
+ BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
+ }
+}
+
+static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
+{
+ union host_hc_status_block *sb;
+ struct bnx2x_fastpath *fp = &bp->fp[index];
+ int ring_size = 0;
+ u8 cos;
+ int rx_ring_size = 0;
+
+ if (!bp->rx_ring_size && IS_MF_STORAGE_ONLY(bp)) {
+ rx_ring_size = MIN_RX_SIZE_NONTPA;
+ bp->rx_ring_size = rx_ring_size;
+ } else if (!bp->rx_ring_size) {
+ rx_ring_size = MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp);
+
+ if (CHIP_IS_E3(bp)) {
+ u32 cfg = SHMEM_RD(bp,
+ dev_info.port_hw_config[BP_PORT(bp)].
+ default_cfg);
+
+ /* Decrease ring size for 1G functions */
+ if ((cfg & PORT_HW_CFG_NET_SERDES_IF_MASK) ==
+ PORT_HW_CFG_NET_SERDES_IF_SGMII)
+ rx_ring_size /= 10;
+ }
+
+ /* allocate at least number of buffers required by FW */
+ rx_ring_size = max_t(int, bp->disable_tpa ? MIN_RX_SIZE_NONTPA :
+ MIN_RX_SIZE_TPA, rx_ring_size);
+
+ bp->rx_ring_size = rx_ring_size;
+ } else /* if rx_ring_size specified - use it */
+ rx_ring_size = bp->rx_ring_size;
+
+ DP(BNX2X_MSG_SP, "calculated rx_ring_size %d\n", rx_ring_size);
+
+ /* Common */
+ sb = &bnx2x_fp(bp, index, status_blk);
+
+ if (!IS_FCOE_IDX(index)) {
+ /* status blocks */
+ if (!CHIP_IS_E1x(bp)) {
+ sb->e2_sb = BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, status_blk_mapping),
+ sizeof(struct host_hc_status_block_e2));
+ if (!sb->e2_sb)
+ goto alloc_mem_err;
+ } else {
+ sb->e1x_sb = BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, status_blk_mapping),
+ sizeof(struct host_hc_status_block_e1x));
+ if (!sb->e1x_sb)
+ goto alloc_mem_err;
+ }
+ }
+
+ /* FCoE Queue uses Default SB and doesn't ACK the SB, thus no need to
+ * set shortcuts for it.
+ */
+ if (!IS_FCOE_IDX(index))
+ set_sb_shortcuts(bp, index);
+
+ /* Tx */
+ if (!skip_tx_queue(bp, index)) {
+ /* fastpath tx rings: tx_buf tx_desc */
+ for_each_cos_in_tx_queue(fp, cos) {
+ struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
+
+ DP(NETIF_MSG_IFUP,
+ "allocating tx memory of fp %d cos %d\n",
+ index, cos);
+
+ txdata->tx_buf_ring = kcalloc(NUM_TX_BD,
+ sizeof(struct sw_tx_bd),
+ GFP_KERNEL);
+ if (!txdata->tx_buf_ring)
+ goto alloc_mem_err;
+ txdata->tx_desc_ring = BNX2X_PCI_ALLOC(&txdata->tx_desc_mapping,
+ sizeof(union eth_tx_bd_types) * NUM_TX_BD);
+ if (!txdata->tx_desc_ring)
+ goto alloc_mem_err;
+ }
+ }
+
+ /* Rx */
+ if (!skip_rx_queue(bp, index)) {
+ /* fastpath rx rings: rx_buf rx_desc rx_comp */
+ bnx2x_fp(bp, index, rx_buf_ring) =
+ kcalloc(NUM_RX_BD, sizeof(struct sw_rx_bd), GFP_KERNEL);
+ if (!bnx2x_fp(bp, index, rx_buf_ring))
+ goto alloc_mem_err;
+ bnx2x_fp(bp, index, rx_desc_ring) =
+ BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, rx_desc_mapping),
+ sizeof(struct eth_rx_bd) * NUM_RX_BD);
+ if (!bnx2x_fp(bp, index, rx_desc_ring))
+ goto alloc_mem_err;
+
+ /* Seed all CQEs by 1s */
+ bnx2x_fp(bp, index, rx_comp_ring) =
+ BNX2X_PCI_FALLOC(&bnx2x_fp(bp, index, rx_comp_mapping),
+ sizeof(struct eth_fast_path_rx_cqe) * NUM_RCQ_BD);
+ if (!bnx2x_fp(bp, index, rx_comp_ring))
+ goto alloc_mem_err;
+
+ /* SGE ring */
+ bnx2x_fp(bp, index, rx_page_ring) =
+ kcalloc(NUM_RX_SGE, sizeof(struct sw_rx_page),
+ GFP_KERNEL);
+ if (!bnx2x_fp(bp, index, rx_page_ring))
+ goto alloc_mem_err;
+ bnx2x_fp(bp, index, rx_sge_ring) =
+ BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, rx_sge_mapping),
+ BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
+ if (!bnx2x_fp(bp, index, rx_sge_ring))
+ goto alloc_mem_err;
+ /* RX BD ring */
+ bnx2x_set_next_page_rx_bd(fp);
+
+ /* CQ ring */
+ bnx2x_set_next_page_rx_cq(fp);
+
+ /* BDs */
+ ring_size = bnx2x_alloc_rx_bds(fp, rx_ring_size);
+ if (ring_size < rx_ring_size)
+ goto alloc_mem_err;
+ }
+
+ return 0;
+
+/* handles low memory cases */
+alloc_mem_err:
+ BNX2X_ERR("Unable to allocate full memory for queue %d (size %d)\n",
+ index, ring_size);
+ /* FW will drop all packets if queue is not big enough,
+ * In these cases we disable the queue
+ * Min size is different for OOO, TPA and non-TPA queues
+ */
+ if (ring_size < (fp->mode == TPA_MODE_DISABLED ?
+ MIN_RX_SIZE_NONTPA : MIN_RX_SIZE_TPA)) {
+ /* release memory allocated for this queue */
+ bnx2x_free_fp_mem_at(bp, index);
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+static int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp)
+{
+ if (!NO_FCOE(bp))
+ /* FCoE */
+ if (bnx2x_alloc_fp_mem_at(bp, FCOE_IDX(bp)))
+ /* we will fail load process instead of mark
+ * NO_FCOE_FLAG
+ */
+ return -ENOMEM;
+
+ return 0;
+}
+
+static int bnx2x_alloc_fp_mem(struct bnx2x *bp)
+{
+ int i;
+
+ /* 1. Allocate FP for leading - fatal if error
+ * 2. Allocate RSS - fix number of queues if error
+ */
+
+ /* leading */
+ if (bnx2x_alloc_fp_mem_at(bp, 0))
+ return -ENOMEM;
+
+ /* RSS */
+ for_each_nondefault_eth_queue(bp, i)
+ if (bnx2x_alloc_fp_mem_at(bp, i))
+ break;
+
+ /* handle memory failures */
+ if (i != BNX2X_NUM_ETH_QUEUES(bp)) {
+ int delta = BNX2X_NUM_ETH_QUEUES(bp) - i;
+
+ WARN_ON(delta < 0);
+ bnx2x_shrink_eth_fp(bp, delta);
+ if (CNIC_SUPPORT(bp))
+ /* move non eth FPs next to last eth FP
+ * must be done in that order
+ * FCOE_IDX < FWD_IDX < OOO_IDX
+ */
+
+ /* move FCoE fp even NO_FCOE_FLAG is on */
+ bnx2x_move_fp(bp, FCOE_IDX(bp), FCOE_IDX(bp) - delta);
+ bp->num_ethernet_queues -= delta;
+ bp->num_queues = bp->num_ethernet_queues +
+ bp->num_cnic_queues;
+ BNX2X_ERR("Adjusted num of queues from %d to %d\n",
+ bp->num_queues + delta, bp->num_queues);
+ }
+
+ return 0;
+}
+
+void bnx2x_free_mem_bp(struct bnx2x *bp)
+{
+ int i;
+
+ for (i = 0; i < bp->fp_array_size; i++)
+ kfree(bp->fp[i].tpa_info);
+ kfree(bp->fp);
+ kfree(bp->sp_objs);
+ kfree(bp->fp_stats);
+ kfree(bp->bnx2x_txq);
+ kfree(bp->msix_table);
+ kfree(bp->ilt);
+}
+
+int bnx2x_alloc_mem_bp(struct bnx2x *bp)
+{
+ struct bnx2x_fastpath *fp;
+ struct msix_entry *tbl;
+ struct bnx2x_ilt *ilt;
+ int msix_table_size = 0;
+ int fp_array_size, txq_array_size;
+ int i;
+
+ /*
+ * The biggest MSI-X table we might need is as a maximum number of fast
+ * path IGU SBs plus default SB (for PF only).
+ */
+ msix_table_size = bp->igu_sb_cnt;
+ if (IS_PF(bp))
+ msix_table_size++;
+ BNX2X_DEV_INFO("msix_table_size %d\n", msix_table_size);
+
+ /* fp array: RSS plus CNIC related L2 queues */
+ fp_array_size = BNX2X_MAX_RSS_COUNT(bp) + CNIC_SUPPORT(bp);
+ bp->fp_array_size = fp_array_size;
+ BNX2X_DEV_INFO("fp_array_size %d\n", bp->fp_array_size);
+
+ fp = kcalloc(bp->fp_array_size, sizeof(*fp), GFP_KERNEL);
+ if (!fp)
+ goto alloc_err;
+ for (i = 0; i < bp->fp_array_size; i++) {
+ fp[i].tpa_info =
+ kcalloc(ETH_MAX_AGGREGATION_QUEUES_E1H_E2,
+ sizeof(struct bnx2x_agg_info), GFP_KERNEL);
+ if (!(fp[i].tpa_info))
+ goto alloc_err;
+ }
+
+ bp->fp = fp;
+
+ /* allocate sp objs */
+ bp->sp_objs = kcalloc(bp->fp_array_size, sizeof(struct bnx2x_sp_objs),
+ GFP_KERNEL);
+ if (!bp->sp_objs)
+ goto alloc_err;
+
+ /* allocate fp_stats */
+ bp->fp_stats = kcalloc(bp->fp_array_size, sizeof(struct bnx2x_fp_stats),
+ GFP_KERNEL);
+ if (!bp->fp_stats)
+ goto alloc_err;
+
+ /* Allocate memory for the transmission queues array */
+ txq_array_size =
+ BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS + CNIC_SUPPORT(bp);
+ BNX2X_DEV_INFO("txq_array_size %d", txq_array_size);
+
+ bp->bnx2x_txq = kcalloc(txq_array_size, sizeof(struct bnx2x_fp_txdata),
+ GFP_KERNEL);
+ if (!bp->bnx2x_txq)
+ goto alloc_err;
+
+ /* msix table */
+ tbl = kcalloc(msix_table_size, sizeof(*tbl), GFP_KERNEL);
+ if (!tbl)
+ goto alloc_err;
+ bp->msix_table = tbl;
+
+ /* ilt */
+ ilt = kzalloc(sizeof(*ilt), GFP_KERNEL);
+ if (!ilt)
+ goto alloc_err;
+ bp->ilt = ilt;
+
+ return 0;
+alloc_err:
+ bnx2x_free_mem_bp(bp);
+ return -ENOMEM;
+}
+
+int bnx2x_reload_if_running(struct net_device *dev)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+
+ if (unlikely(!netif_running(dev)))
+ return 0;
+
+ bnx2x_nic_unload(bp, UNLOAD_NORMAL, true);
+ return bnx2x_nic_load(bp, LOAD_NORMAL);
+}
+
+int bnx2x_get_cur_phy_idx(struct bnx2x *bp)
+{
+ u32 sel_phy_idx = 0;
+ if (bp->link_params.num_phys <= 1)
+ return INT_PHY;
+
+ if (bp->link_vars.link_up) {
+ sel_phy_idx = EXT_PHY1;
+ /* In case link is SERDES, check if the EXT_PHY2 is the one */
+ if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
+ (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE))
+ sel_phy_idx = EXT_PHY2;
+ } else {
+
+ switch (bnx2x_phy_selection(&bp->link_params)) {
+ case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
+ case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
+ case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
+ sel_phy_idx = EXT_PHY1;
+ break;
+ case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
+ case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
+ sel_phy_idx = EXT_PHY2;
+ break;
+ }
+ }
+
+ return sel_phy_idx;
+}
+int bnx2x_get_link_cfg_idx(struct bnx2x *bp)
+{
+ u32 sel_phy_idx = bnx2x_get_cur_phy_idx(bp);
+ /*
+ * The selected activated PHY is always after swapping (in case PHY
+ * swapping is enabled). So when swapping is enabled, we need to reverse
+ * the configuration
+ */
+
+ if (bp->link_params.multi_phy_config &
+ PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
+ if (sel_phy_idx == EXT_PHY1)
+ sel_phy_idx = EXT_PHY2;
+ else if (sel_phy_idx == EXT_PHY2)
+ sel_phy_idx = EXT_PHY1;
+ }
+ return LINK_CONFIG_IDX(sel_phy_idx);
+}
+
+#ifdef NETDEV_FCOE_WWNN
+int bnx2x_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+ struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
+
+ switch (type) {
+ case NETDEV_FCOE_WWNN:
+ *wwn = HILO_U64(cp->fcoe_wwn_node_name_hi,
+ cp->fcoe_wwn_node_name_lo);
+ break;
+ case NETDEV_FCOE_WWPN:
+ *wwn = HILO_U64(cp->fcoe_wwn_port_name_hi,
+ cp->fcoe_wwn_port_name_lo);
+ break;
+ default:
+ BNX2X_ERR("Wrong WWN type requested - %d\n", type);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+#endif
+
+/* called with rtnl_lock */
+int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+
+ if (pci_num_vf(bp->pdev)) {
+ DP(BNX2X_MSG_IOV, "VFs are enabled, can not change MTU\n");
+ return -EPERM;
+ }
+
+ if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
+ BNX2X_ERR("Can't perform change MTU during parity recovery\n");
+ return -EAGAIN;
+ }
+
+ /* This does not race with packet allocation
+ * because the actual alloc size is
+ * only updated as part of load
+ */
+ dev->mtu = new_mtu;
+
+ if (!bnx2x_mtu_allows_gro(new_mtu))
+ dev->features &= ~NETIF_F_GRO_HW;
+
+ if (IS_PF(bp) && SHMEM2_HAS(bp, curr_cfg))
+ SHMEM2_WR(bp, curr_cfg, CURR_CFG_MET_OS);
+
+ return bnx2x_reload_if_running(dev);
+}
+
+netdev_features_t bnx2x_fix_features(struct net_device *dev,
+ netdev_features_t features)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+
+ if (pci_num_vf(bp->pdev)) {
+ netdev_features_t changed = dev->features ^ features;
+
+ /* Revert the requested changes in features if they
+ * would require internal reload of PF in bnx2x_set_features().
+ */
+ if (!(features & NETIF_F_RXCSUM) && !bp->disable_tpa) {
+ features &= ~NETIF_F_RXCSUM;
+ features |= dev->features & NETIF_F_RXCSUM;
+ }
+
+ if (changed & NETIF_F_LOOPBACK) {
+ features &= ~NETIF_F_LOOPBACK;
+ features |= dev->features & NETIF_F_LOOPBACK;
+ }
+ }
+
+ /* TPA requires Rx CSUM offloading */
+ if (!(features & NETIF_F_RXCSUM))
+ features &= ~NETIF_F_LRO;
+
+ if (!(features & NETIF_F_GRO) || !bnx2x_mtu_allows_gro(dev->mtu))
+ features &= ~NETIF_F_GRO_HW;
+ if (features & NETIF_F_GRO_HW)
+ features &= ~NETIF_F_LRO;
+
+ return features;
+}
+
+int bnx2x_set_features(struct net_device *dev, netdev_features_t features)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+ netdev_features_t changes = features ^ dev->features;
+ bool bnx2x_reload = false;
+ int rc;
+
+ /* VFs or non SRIOV PFs should be able to change loopback feature */
+ if (!pci_num_vf(bp->pdev)) {
+ if (features & NETIF_F_LOOPBACK) {
+ if (bp->link_params.loopback_mode != LOOPBACK_BMAC) {
+ bp->link_params.loopback_mode = LOOPBACK_BMAC;
+ bnx2x_reload = true;
+ }
+ } else {
+ if (bp->link_params.loopback_mode != LOOPBACK_NONE) {
+ bp->link_params.loopback_mode = LOOPBACK_NONE;
+ bnx2x_reload = true;
+ }
+ }
+ }
+
+ /* Don't care about GRO changes */
+ changes &= ~NETIF_F_GRO;
+
+ if (changes)
+ bnx2x_reload = true;
+
+ if (bnx2x_reload) {
+ if (bp->recovery_state == BNX2X_RECOVERY_DONE) {
+ dev->features = features;
+ rc = bnx2x_reload_if_running(dev);
+ return rc ? rc : 1;
+ }
+ /* else: bnx2x_nic_load() will be called at end of recovery */
+ }
+
+ return 0;
+}
+
+void bnx2x_tx_timeout(struct net_device *dev, unsigned int txqueue)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+
+ /* We want the information of the dump logged,
+ * but calling bnx2x_panic() would kill all chances of recovery.
+ */
+ if (!bp->panic)
+#ifndef BNX2X_STOP_ON_ERROR
+ bnx2x_panic_dump(bp, false);
+#else
+ bnx2x_panic();
+#endif
+
+ /* This allows the netif to be shutdown gracefully before resetting */
+ bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_TX_TIMEOUT, 0);
+}
+
+static int __maybe_unused bnx2x_suspend(struct device *dev_d)
+{
+ struct pci_dev *pdev = to_pci_dev(dev_d);
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct bnx2x *bp;
+
+ if (!dev) {
+ dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
+ return -ENODEV;
+ }
+ bp = netdev_priv(dev);
+
+ rtnl_lock();
+
+ if (!netif_running(dev)) {
+ rtnl_unlock();
+ return 0;
+ }
+
+ netif_device_detach(dev);
+
+ bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
+
+ rtnl_unlock();
+
+ return 0;
+}
+
+static int __maybe_unused bnx2x_resume(struct device *dev_d)
+{
+ struct pci_dev *pdev = to_pci_dev(dev_d);
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct bnx2x *bp;
+ int rc;
+
+ if (!dev) {
+ dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
+ return -ENODEV;
+ }
+ bp = netdev_priv(dev);
+
+ if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
+ BNX2X_ERR("Handling parity error recovery. Try again later\n");
+ return -EAGAIN;
+ }
+
+ rtnl_lock();
+
+ if (!netif_running(dev)) {
+ rtnl_unlock();
+ return 0;
+ }
+
+ netif_device_attach(dev);
+
+ rc = bnx2x_nic_load(bp, LOAD_OPEN);
+
+ rtnl_unlock();
+
+ return rc;
+}
+
+SIMPLE_DEV_PM_OPS(bnx2x_pm_ops, bnx2x_suspend, bnx2x_resume);
+
+void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt,
+ u32 cid)
+{
+ if (!cxt) {
+ BNX2X_ERR("bad context pointer %p\n", cxt);
+ return;
+ }
+
+ /* ustorm cxt validation */
+ cxt->ustorm_ag_context.cdu_usage =
+ CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
+ CDU_REGION_NUMBER_UCM_AG, ETH_CONNECTION_TYPE);
+ /* xcontext validation */
+ cxt->xstorm_ag_context.cdu_reserved =
+ CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
+ CDU_REGION_NUMBER_XCM_AG, ETH_CONNECTION_TYPE);
+}
+
+static void storm_memset_hc_timeout(struct bnx2x *bp, u8 port,
+ u8 fw_sb_id, u8 sb_index,
+ u8 ticks)
+{
+ u32 addr = BAR_CSTRORM_INTMEM +
+ CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id, sb_index);
+ REG_WR8(bp, addr, ticks);
+ DP(NETIF_MSG_IFUP,
+ "port %x fw_sb_id %d sb_index %d ticks %d\n",
+ port, fw_sb_id, sb_index, ticks);
+}
+
+static void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
+ u16 fw_sb_id, u8 sb_index,
+ u8 disable)
+{
+ u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
+ u32 addr = BAR_CSTRORM_INTMEM +
+ CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index);
+ u8 flags = REG_RD8(bp, addr);
+ /* clear and set */
+ flags &= ~HC_INDEX_DATA_HC_ENABLED;
+ flags |= enable_flag;
+ REG_WR8(bp, addr, flags);
+ DP(NETIF_MSG_IFUP,
+ "port %x fw_sb_id %d sb_index %d disable %d\n",
+ port, fw_sb_id, sb_index, disable);
+}
+
+void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id,
+ u8 sb_index, u8 disable, u16 usec)
+{
+ int port = BP_PORT(bp);
+ u8 ticks = usec / BNX2X_BTR;
+
+ storm_memset_hc_timeout(bp, port, fw_sb_id, sb_index, ticks);
+
+ disable = disable ? 1 : (usec ? 0 : 1);
+ storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable);
+}
+
+void bnx2x_schedule_sp_rtnl(struct bnx2x *bp, enum sp_rtnl_flag flag,
+ u32 verbose)
+{
+ smp_mb__before_atomic();
+ set_bit(flag, &bp->sp_rtnl_state);
+ smp_mb__after_atomic();
+ DP((BNX2X_MSG_SP | verbose), "Scheduling sp_rtnl task [Flag: %d]\n",
+ flag);
+ schedule_delayed_work(&bp->sp_rtnl_task, 0);
+}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
new file mode 100644
index 0000000000..d8b1824c33
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
@@ -0,0 +1,1403 @@
+/* bnx2x_cmn.h: QLogic Everest network driver.
+ *
+ * Copyright (c) 2007-2013 Broadcom Corporation
+ * Copyright (c) 2014 QLogic Corporation
+ * All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
+ * Written by: Eliezer Tamir
+ * Based on code from Michael Chan's bnx2 driver
+ * UDP CSUM errata workaround by Arik Gendelman
+ * Slowpath and fastpath rework by Vladislav Zolotarov
+ * Statistics and Link management by Yitchak Gertner
+ *
+ */
+#ifndef BNX2X_CMN_H
+#define BNX2X_CMN_H
+
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/irq.h>
+
+#include "bnx2x.h"
+#include "bnx2x_sriov.h"
+
+/* This is used as a replacement for an MCP if it's not present */
+extern int bnx2x_load_count[2][3]; /* per-path: 0-common, 1-port0, 2-port1 */
+extern int bnx2x_num_queues;
+
+/************************ Macros ********************************/
+#define BNX2X_PCI_FREE(x, y, size) \
+ do { \
+ if (x) { \
+ dma_free_coherent(&bp->pdev->dev, size, (void *)x, y); \
+ x = NULL; \
+ y = 0; \
+ } \
+ } while (0)
+
+#define BNX2X_FREE(x) \
+ do { \
+ if (x) { \
+ kfree((void *)x); \
+ x = NULL; \
+ } \
+ } while (0)
+
+#define BNX2X_PCI_ALLOC(y, size) \
+({ \
+ void *x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
+ if (x) \
+ DP(NETIF_MSG_HW, \
+ "BNX2X_PCI_ALLOC: Physical %Lx Virtual %p\n", \
+ (unsigned long long)(*y), x); \
+ x; \
+})
+#define BNX2X_PCI_FALLOC(y, size) \
+({ \
+ void *x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
+ if (x) { \
+ memset(x, 0xff, size); \
+ DP(NETIF_MSG_HW, \
+ "BNX2X_PCI_FALLOC: Physical %Lx Virtual %p\n", \
+ (unsigned long long)(*y), x); \
+ } \
+ x; \
+})
+
+/*********************** Interfaces ****************************
+ * Functions that need to be implemented by each driver version
+ */
+/* Init */
+
+/**
+ * bnx2x_send_unload_req - request unload mode from the MCP.
+ *
+ * @bp: driver handle
+ * @unload_mode: requested function's unload mode
+ *
+ * Return unload mode returned by the MCP: COMMON, PORT or FUNC.
+ */
+u32 bnx2x_send_unload_req(struct bnx2x *bp, int unload_mode);
+
+/**
+ * bnx2x_send_unload_done - send UNLOAD_DONE command to the MCP.
+ *
+ * @bp: driver handle
+ * @keep_link: true iff link should be kept up
+ */
+void bnx2x_send_unload_done(struct bnx2x *bp, bool keep_link);
+
+/**
+ * bnx2x_config_rss_pf - configure RSS parameters in a PF.
+ *
+ * @bp: driver handle
+ * @rss_obj: RSS object to use
+ * @ind_table: indirection table to configure
+ * @config_hash: re-configure RSS hash keys configuration
+ * @enable: enabled or disabled configuration
+ */
+int bnx2x_rss(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
+ bool config_hash, bool enable);
+
+/**
+ * bnx2x__init_func_obj - init function object
+ *
+ * @bp: driver handle
+ *
+ * Initializes the Function Object with the appropriate
+ * parameters which include a function slow path driver
+ * interface.
+ */
+void bnx2x__init_func_obj(struct bnx2x *bp);
+
+/**
+ * bnx2x_setup_queue - setup eth queue.
+ *
+ * @bp: driver handle
+ * @fp: pointer to the fastpath structure
+ * @leading: boolean
+ *
+ */
+int bnx2x_setup_queue(struct bnx2x *bp, struct bnx2x_fastpath *fp,
+ bool leading);
+
+/**
+ * bnx2x_setup_leading - bring up a leading eth queue.
+ *
+ * @bp: driver handle
+ */
+int bnx2x_setup_leading(struct bnx2x *bp);
+
+/**
+ * bnx2x_fw_command - send the MCP a request
+ *
+ * @bp: driver handle
+ * @command: request
+ * @param: request's parameter
+ *
+ * block until there is a reply
+ */
+u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param);
+
+/**
+ * bnx2x_initial_phy_init - initialize link parameters structure variables.
+ *
+ * @bp: driver handle
+ * @load_mode: current mode
+ */
+int bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode);
+
+/**
+ * bnx2x_link_set - configure hw according to link parameters structure.
+ *
+ * @bp: driver handle
+ */
+void bnx2x_link_set(struct bnx2x *bp);
+
+/**
+ * bnx2x_force_link_reset - Forces link reset, and put the PHY
+ * in reset as well.
+ *
+ * @bp: driver handle
+ */
+void bnx2x_force_link_reset(struct bnx2x *bp);
+
+/**
+ * bnx2x_link_test - query link status.
+ *
+ * @bp: driver handle
+ * @is_serdes: bool
+ *
+ * Returns 0 if link is UP.
+ */
+u8 bnx2x_link_test(struct bnx2x *bp, u8 is_serdes);
+
+/**
+ * bnx2x_drv_pulse - write driver pulse to shmem
+ *
+ * @bp: driver handle
+ *
+ * writes the value in bp->fw_drv_pulse_wr_seq to drv_pulse mbox
+ * in the shmem.
+ */
+void bnx2x_drv_pulse(struct bnx2x *bp);
+
+/**
+ * bnx2x_igu_ack_sb - update IGU with current SB value
+ *
+ * @bp: driver handle
+ * @igu_sb_id: SB id
+ * @segment: SB segment
+ * @index: SB index
+ * @op: SB operation
+ * @update: is HW update required
+ */
+void bnx2x_igu_ack_sb(struct bnx2x *bp, u8 igu_sb_id, u8 segment,
+ u16 index, u8 op, u8 update);
+
+/* Disable transactions from chip to host */
+void bnx2x_pf_disable(struct bnx2x *bp);
+int bnx2x_pretend_func(struct bnx2x *bp, u16 pretend_func_val);
+
+/**
+ * bnx2x__link_status_update - handles link status change.
+ *
+ * @bp: driver handle
+ */
+void bnx2x__link_status_update(struct bnx2x *bp);
+
+/**
+ * bnx2x_link_report - report link status to upper layer.
+ *
+ * @bp: driver handle
+ */
+void bnx2x_link_report(struct bnx2x *bp);
+
+/* None-atomic version of bnx2x_link_report() */
+void __bnx2x_link_report(struct bnx2x *bp);
+
+/**
+ * bnx2x_get_mf_speed - calculate MF speed.
+ *
+ * @bp: driver handle
+ *
+ * Takes into account current linespeed and MF configuration.
+ */
+u16 bnx2x_get_mf_speed(struct bnx2x *bp);
+
+/**
+ * bnx2x_msix_sp_int - MSI-X slowpath interrupt handler
+ *
+ * @irq: irq number
+ * @dev_instance: private instance
+ */
+irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance);
+
+/**
+ * bnx2x_interrupt - non MSI-X interrupt handler
+ *
+ * @irq: irq number
+ * @dev_instance: private instance
+ */
+irqreturn_t bnx2x_interrupt(int irq, void *dev_instance);
+
+/**
+ * bnx2x_cnic_notify - send command to cnic driver
+ *
+ * @bp: driver handle
+ * @cmd: command
+ */
+int bnx2x_cnic_notify(struct bnx2x *bp, int cmd);
+
+/**
+ * bnx2x_setup_cnic_irq_info - provides cnic with IRQ information
+ *
+ * @bp: driver handle
+ */
+void bnx2x_setup_cnic_irq_info(struct bnx2x *bp);
+
+/**
+ * bnx2x_setup_cnic_info - provides cnic with updated info
+ *
+ * @bp: driver handle
+ */
+void bnx2x_setup_cnic_info(struct bnx2x *bp);
+
+/**
+ * bnx2x_int_enable - enable HW interrupts.
+ *
+ * @bp: driver handle
+ */
+void bnx2x_int_enable(struct bnx2x *bp);
+
+/**
+ * bnx2x_int_disable_sync - disable interrupts.
+ *
+ * @bp: driver handle
+ * @disable_hw: true, disable HW interrupts.
+ *
+ * This function ensures that there are no
+ * ISRs or SP DPCs (sp_task) are running after it returns.
+ */
+void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw);
+
+/**
+ * bnx2x_nic_init_cnic - init driver internals for cnic.
+ *
+ * @bp: driver handle
+ * @load_code: COMMON, PORT or FUNCTION
+ *
+ * Initializes:
+ * - rings
+ * - status blocks
+ * - etc.
+ */
+void bnx2x_nic_init_cnic(struct bnx2x *bp);
+
+/**
+ * bnx2x_preirq_nic_init - init driver internals.
+ *
+ * @bp: driver handle
+ *
+ * Initializes:
+ * - fastpath object
+ * - fastpath rings
+ * etc.
+ */
+void bnx2x_pre_irq_nic_init(struct bnx2x *bp);
+
+/**
+ * bnx2x_postirq_nic_init - init driver internals.
+ *
+ * @bp: driver handle
+ * @load_code: COMMON, PORT or FUNCTION
+ *
+ * Initializes:
+ * - status blocks
+ * - slowpath rings
+ * - etc.
+ */
+void bnx2x_post_irq_nic_init(struct bnx2x *bp, u32 load_code);
+/**
+ * bnx2x_alloc_mem_cnic - allocate driver's memory for cnic.
+ *
+ * @bp: driver handle
+ */
+int bnx2x_alloc_mem_cnic(struct bnx2x *bp);
+/**
+ * bnx2x_alloc_mem - allocate driver's memory.
+ *
+ * @bp: driver handle
+ */
+int bnx2x_alloc_mem(struct bnx2x *bp);
+
+/**
+ * bnx2x_free_mem_cnic - release driver's memory for cnic.
+ *
+ * @bp: driver handle
+ */
+void bnx2x_free_mem_cnic(struct bnx2x *bp);
+/**
+ * bnx2x_free_mem - release driver's memory.
+ *
+ * @bp: driver handle
+ */
+void bnx2x_free_mem(struct bnx2x *bp);
+
+/**
+ * bnx2x_set_num_queues - set number of queues according to mode.
+ *
+ * @bp: driver handle
+ */
+void bnx2x_set_num_queues(struct bnx2x *bp);
+
+/**
+ * bnx2x_chip_cleanup - cleanup chip internals.
+ *
+ * @bp: driver handle
+ * @unload_mode: COMMON, PORT, FUNCTION
+ * @keep_link: true iff link should be kept up.
+ *
+ * - Cleanup MAC configuration.
+ * - Closes clients.
+ * - etc.
+ */
+void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode, bool keep_link);
+
+/**
+ * bnx2x_acquire_hw_lock - acquire HW lock.
+ *
+ * @bp: driver handle
+ * @resource: resource bit which was locked
+ */
+int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource);
+
+/**
+ * bnx2x_release_hw_lock - release HW lock.
+ *
+ * @bp: driver handle
+ * @resource: resource bit which was locked
+ */
+int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource);
+
+/**
+ * bnx2x_release_leader_lock - release recovery leader lock
+ *
+ * @bp: driver handle
+ */
+int bnx2x_release_leader_lock(struct bnx2x *bp);
+
+/**
+ * bnx2x_set_eth_mac - configure eth MAC address in the HW
+ *
+ * @bp: driver handle
+ * @set: set or clear
+ *
+ * Configures according to the value in netdev->dev_addr.
+ */
+int bnx2x_set_eth_mac(struct bnx2x *bp, bool set);
+
+/**
+ * bnx2x_set_rx_mode - set MAC filtering configurations.
+ *
+ * @dev: netdevice
+ *
+ * called with netif_tx_lock from dev_mcast.c
+ * If bp->state is OPEN, should be called with
+ * netif_addr_lock_bh()
+ */
+void bnx2x_set_rx_mode_inner(struct bnx2x *bp);
+
+/* Parity errors related */
+void bnx2x_set_pf_load(struct bnx2x *bp);
+bool bnx2x_clear_pf_load(struct bnx2x *bp);
+bool bnx2x_chk_parity_attn(struct bnx2x *bp, bool *global, bool print);
+bool bnx2x_reset_is_done(struct bnx2x *bp, int engine);
+void bnx2x_set_reset_in_progress(struct bnx2x *bp);
+void bnx2x_set_reset_global(struct bnx2x *bp);
+void bnx2x_disable_close_the_gate(struct bnx2x *bp);
+int bnx2x_init_hw_func_cnic(struct bnx2x *bp);
+
+void bnx2x_clear_vlan_info(struct bnx2x *bp);
+
+/**
+ * bnx2x_sp_event - handle ramrods completion.
+ *
+ * @fp: fastpath handle for the event
+ * @rr_cqe: eth_rx_cqe
+ */
+void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe);
+
+/**
+ * bnx2x_ilt_set_info - prepare ILT configurations.
+ *
+ * @bp: driver handle
+ */
+void bnx2x_ilt_set_info(struct bnx2x *bp);
+
+/**
+ * bnx2x_ilt_set_cnic_info - prepare ILT configurations for SRC
+ * and TM.
+ *
+ * @bp: driver handle
+ */
+void bnx2x_ilt_set_info_cnic(struct bnx2x *bp);
+
+/**
+ * bnx2x_dcbx_init - initialize dcbx protocol.
+ *
+ * @bp: driver handle
+ */
+void bnx2x_dcbx_init(struct bnx2x *bp, bool update_shmem);
+
+/**
+ * bnx2x_set_power_state - set power state to the requested value.
+ *
+ * @bp: driver handle
+ * @state: required state D0 or D3hot
+ *
+ * Currently only D0 and D3hot are supported.
+ */
+int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state);
+
+/**
+ * bnx2x_update_max_mf_config - update MAX part of MF configuration in HW.
+ *
+ * @bp: driver handle
+ * @value: new value
+ */
+void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value);
+/* Error handling */
+void bnx2x_fw_dump_lvl(struct bnx2x *bp, const char *lvl);
+
+/* dev_close main block */
+int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link);
+
+/* dev_open main block */
+int bnx2x_nic_load(struct bnx2x *bp, int load_mode);
+
+/* hard_xmit callback */
+netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev);
+
+/* setup_tc callback */
+int bnx2x_setup_tc(struct net_device *dev, u8 num_tc);
+int __bnx2x_setup_tc(struct net_device *dev, enum tc_setup_type type,
+ void *type_data);
+
+int bnx2x_get_vf_config(struct net_device *dev, int vf,
+ struct ifla_vf_info *ivi);
+int bnx2x_set_vf_mac(struct net_device *dev, int queue, u8 *mac);
+int bnx2x_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos,
+ __be16 vlan_proto);
+int bnx2x_set_vf_spoofchk(struct net_device *dev, int idx, bool val);
+
+/* select_queue callback */
+u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb,
+ struct net_device *sb_dev);
+
+static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
+ struct bnx2x_fastpath *fp,
+ u16 bd_prod, u16 rx_comp_prod,
+ u16 rx_sge_prod)
+{
+ struct ustorm_eth_rx_producers rx_prods = {0};
+ u32 i;
+
+ /* Update producers */
+ rx_prods.bd_prod = bd_prod;
+ rx_prods.cqe_prod = rx_comp_prod;
+ rx_prods.sge_prod = rx_sge_prod;
+
+ /* Make sure that the BD and SGE data is updated before updating the
+ * producers since FW might read the BD/SGE right after the producer
+ * is updated.
+ * This is only applicable for weak-ordered memory model archs such
+ * as IA-64. The following barrier is also mandatory since FW will
+ * assumes BDs must have buffers.
+ */
+ wmb();
+
+ for (i = 0; i < sizeof(rx_prods)/4; i++)
+ REG_WR_RELAXED(bp, fp->ustorm_rx_prods_offset + i * 4,
+ ((u32 *)&rx_prods)[i]);
+
+ DP(NETIF_MSG_RX_STATUS,
+ "queue[%d]: wrote bd_prod %u cqe_prod %u sge_prod %u\n",
+ fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
+}
+
+/* reload helper */
+int bnx2x_reload_if_running(struct net_device *dev);
+
+int bnx2x_change_mac_addr(struct net_device *dev, void *p);
+
+/* NAPI poll Tx part */
+int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata);
+
+extern const struct dev_pm_ops bnx2x_pm_ops;
+
+/* Release IRQ vectors */
+void bnx2x_free_irq(struct bnx2x *bp);
+
+void bnx2x_free_fp_mem(struct bnx2x *bp);
+void bnx2x_init_rx_rings(struct bnx2x *bp);
+void bnx2x_init_rx_rings_cnic(struct bnx2x *bp);
+void bnx2x_free_skbs(struct bnx2x *bp);
+void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw);
+void bnx2x_netif_start(struct bnx2x *bp);
+int bnx2x_load_cnic(struct bnx2x *bp);
+
+/**
+ * bnx2x_enable_msix - set msix configuration.
+ *
+ * @bp: driver handle
+ *
+ * fills msix_table, requests vectors, updates num_queues
+ * according to number of available vectors.
+ */
+int bnx2x_enable_msix(struct bnx2x *bp);
+
+/**
+ * bnx2x_enable_msi - request msi mode from OS, updated internals accordingly
+ *
+ * @bp: driver handle
+ */
+int bnx2x_enable_msi(struct bnx2x *bp);
+
+/**
+ * bnx2x_alloc_mem_bp - allocate memories outsize main driver structure
+ *
+ * @bp: driver handle
+ */
+int bnx2x_alloc_mem_bp(struct bnx2x *bp);
+
+/**
+ * bnx2x_free_mem_bp - release memories outsize main driver structure
+ *
+ * @bp: driver handle
+ */
+void bnx2x_free_mem_bp(struct bnx2x *bp);
+
+/**
+ * bnx2x_change_mtu - change mtu netdev callback
+ *
+ * @dev: net device
+ * @new_mtu: requested mtu
+ *
+ */
+int bnx2x_change_mtu(struct net_device *dev, int new_mtu);
+
+#ifdef NETDEV_FCOE_WWNN
+/**
+ * bnx2x_fcoe_get_wwn - return the requested WWN value for this port
+ *
+ * @dev: net_device
+ * @wwn: output buffer
+ * @type: WWN type: NETDEV_FCOE_WWNN (node) or NETDEV_FCOE_WWPN (port)
+ *
+ */
+int bnx2x_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type);
+#endif
+
+netdev_features_t bnx2x_fix_features(struct net_device *dev,
+ netdev_features_t features);
+int bnx2x_set_features(struct net_device *dev, netdev_features_t features);
+
+/**
+ * bnx2x_tx_timeout - tx timeout netdev callback
+ *
+ * @dev: net device
+ */
+void bnx2x_tx_timeout(struct net_device *dev, unsigned int txqueue);
+
+/** bnx2x_get_c2s_mapping - read inner-to-outer vlan configuration
+ * c2s_map should have BNX2X_MAX_PRIORITY entries.
+ * @bp: driver handle
+ * @c2s_map: should have BNX2X_MAX_PRIORITY entries for mapping
+ * @c2s_default: entry for non-tagged configuration
+ */
+void bnx2x_get_c2s_mapping(struct bnx2x *bp, u8 *c2s_map, u8 *c2s_default);
+
+/*********************** Inlines **********************************/
+/*********************** Fast path ********************************/
+static inline void bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
+{
+ barrier(); /* status block is written to by the chip */
+ fp->fp_hc_idx = fp->sb_running_index[SM_RX_ID];
+}
+
+static inline void bnx2x_igu_ack_sb_gen(struct bnx2x *bp, u8 igu_sb_id,
+ u8 segment, u16 index, u8 op,
+ u8 update, u32 igu_addr)
+{
+ struct igu_regular cmd_data = {0};
+
+ cmd_data.sb_id_and_flags =
+ ((index << IGU_REGULAR_SB_INDEX_SHIFT) |
+ (segment << IGU_REGULAR_SEGMENT_ACCESS_SHIFT) |
+ (update << IGU_REGULAR_BUPDATE_SHIFT) |
+ (op << IGU_REGULAR_ENABLE_INT_SHIFT));
+
+ DP(NETIF_MSG_INTR, "write 0x%08x to IGU addr 0x%x\n",
+ cmd_data.sb_id_and_flags, igu_addr);
+ REG_WR(bp, igu_addr, cmd_data.sb_id_and_flags);
+
+ /* Make sure that ACK is written */
+ barrier();
+}
+
+static inline void bnx2x_hc_ack_sb(struct bnx2x *bp, u8 sb_id,
+ u8 storm, u16 index, u8 op, u8 update)
+{
+ u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
+ COMMAND_REG_INT_ACK);
+ struct igu_ack_register igu_ack;
+
+ igu_ack.status_block_index = index;
+ igu_ack.sb_id_and_flags =
+ ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
+ (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
+ (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
+ (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
+
+ REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
+
+ /* Make sure that ACK is written */
+ barrier();
+}
+
+static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 igu_sb_id, u8 storm,
+ u16 index, u8 op, u8 update)
+{
+ if (bp->common.int_block == INT_BLOCK_HC)
+ bnx2x_hc_ack_sb(bp, igu_sb_id, storm, index, op, update);
+ else {
+ u8 segment;
+
+ if (CHIP_INT_MODE_IS_BC(bp))
+ segment = storm;
+ else if (igu_sb_id != bp->igu_dsb_id)
+ segment = IGU_SEG_ACCESS_DEF;
+ else if (storm == ATTENTION_ID)
+ segment = IGU_SEG_ACCESS_ATTN;
+ else
+ segment = IGU_SEG_ACCESS_DEF;
+ bnx2x_igu_ack_sb(bp, igu_sb_id, segment, index, op, update);
+ }
+}
+
+static inline u16 bnx2x_hc_ack_int(struct bnx2x *bp)
+{
+ u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
+ COMMAND_REG_SIMD_MASK);
+ u32 result = REG_RD(bp, hc_addr);
+
+ barrier();
+ return result;
+}
+
+static inline u16 bnx2x_igu_ack_int(struct bnx2x *bp)
+{
+ u32 igu_addr = (BAR_IGU_INTMEM + IGU_REG_SISR_MDPC_WMASK_LSB_UPPER*8);
+ u32 result = REG_RD(bp, igu_addr);
+
+ DP(NETIF_MSG_INTR, "read 0x%08x from IGU addr 0x%x\n",
+ result, igu_addr);
+
+ barrier();
+ return result;
+}
+
+static inline u16 bnx2x_ack_int(struct bnx2x *bp)
+{
+ barrier();
+ if (bp->common.int_block == INT_BLOCK_HC)
+ return bnx2x_hc_ack_int(bp);
+ else
+ return bnx2x_igu_ack_int(bp);
+}
+
+static inline int bnx2x_has_tx_work_unload(struct bnx2x_fp_txdata *txdata)
+{
+ /* Tell compiler that consumer and producer can change */
+ barrier();
+ return txdata->tx_pkt_prod != txdata->tx_pkt_cons;
+}
+
+static inline u16 bnx2x_tx_avail(struct bnx2x *bp,
+ struct bnx2x_fp_txdata *txdata)
+{
+ s16 used;
+ u16 prod;
+ u16 cons;
+
+ prod = txdata->tx_bd_prod;
+ cons = txdata->tx_bd_cons;
+
+ used = SUB_S16(prod, cons);
+
+#ifdef BNX2X_STOP_ON_ERROR
+ WARN_ON(used < 0);
+ WARN_ON(used > txdata->tx_ring_size);
+ WARN_ON((txdata->tx_ring_size - used) > MAX_TX_AVAIL);
+#endif
+
+ return (s16)(txdata->tx_ring_size) - used;
+}
+
+static inline int bnx2x_tx_queue_has_work(struct bnx2x_fp_txdata *txdata)
+{
+ u16 hw_cons;
+
+ /* Tell compiler that status block fields can change */
+ barrier();
+ hw_cons = le16_to_cpu(*txdata->tx_cons_sb);
+ return hw_cons != txdata->tx_pkt_cons;
+}
+
+static inline bool bnx2x_has_tx_work(struct bnx2x_fastpath *fp)
+{
+ u8 cos;
+ for_each_cos_in_tx_queue(fp, cos)
+ if (bnx2x_tx_queue_has_work(fp->txdata_ptr[cos]))
+ return true;
+ return false;
+}
+
+#define BNX2X_IS_CQE_COMPLETED(cqe_fp) (cqe_fp->marker == 0x0)
+#define BNX2X_SEED_CQE(cqe_fp) (cqe_fp->marker = 0xFFFFFFFF)
+static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
+{
+ u16 cons;
+ union eth_rx_cqe *cqe;
+ struct eth_fast_path_rx_cqe *cqe_fp;
+
+ cons = RCQ_BD(fp->rx_comp_cons);
+ cqe = &fp->rx_comp_ring[cons];
+ cqe_fp = &cqe->fast_path_cqe;
+ return BNX2X_IS_CQE_COMPLETED(cqe_fp);
+}
+
+/**
+ * bnx2x_tx_disable - disables tx from stack point of view
+ *
+ * @bp: driver handle
+ */
+static inline void bnx2x_tx_disable(struct bnx2x *bp)
+{
+ netif_tx_disable(bp->dev);
+ netif_carrier_off(bp->dev);
+}
+
+static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
+ struct bnx2x_fastpath *fp, u16 index)
+{
+ struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
+ struct page *page = sw_buf->page;
+ struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
+
+ /* Skip "next page" elements */
+ if (!page)
+ return;
+
+ /* Since many fragments can share the same page, make sure to
+ * only unmap and free the page once.
+ */
+ dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(sw_buf, mapping),
+ SGE_PAGE_SIZE, DMA_FROM_DEVICE);
+
+ put_page(page);
+
+ sw_buf->page = NULL;
+ sge->addr_hi = 0;
+ sge->addr_lo = 0;
+}
+
+static inline void bnx2x_del_all_napi_cnic(struct bnx2x *bp)
+{
+ int i;
+
+ for_each_rx_queue_cnic(bp, i) {
+ __netif_napi_del(&bnx2x_fp(bp, i, napi));
+ }
+ synchronize_net();
+}
+
+static inline void bnx2x_del_all_napi(struct bnx2x *bp)
+{
+ int i;
+
+ for_each_eth_queue(bp, i) {
+ __netif_napi_del(&bnx2x_fp(bp, i, napi));
+ }
+ synchronize_net();
+}
+
+int bnx2x_set_int_mode(struct bnx2x *bp);
+
+static inline void bnx2x_disable_msi(struct bnx2x *bp)
+{
+ if (bp->flags & USING_MSIX_FLAG) {
+ pci_disable_msix(bp->pdev);
+ bp->flags &= ~(USING_MSIX_FLAG | USING_SINGLE_MSIX_FLAG);
+ } else if (bp->flags & USING_MSI_FLAG) {
+ pci_disable_msi(bp->pdev);
+ bp->flags &= ~USING_MSI_FLAG;
+ }
+}
+
+static inline void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
+{
+ int i, j;
+
+ for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
+ int idx = RX_SGE_CNT * i - 1;
+
+ for (j = 0; j < 2; j++) {
+ BIT_VEC64_CLEAR_BIT(fp->sge_mask, idx);
+ idx--;
+ }
+ }
+}
+
+static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
+{
+ /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
+ memset(fp->sge_mask, 0xff, sizeof(fp->sge_mask));
+
+ /* Clear the two last indices in the page to 1:
+ these are the indices that correspond to the "next" element,
+ hence will never be indicated and should be removed from
+ the calculations. */
+ bnx2x_clear_sge_mask_next_elems(fp);
+}
+
+/* note that we are not allocating a new buffer,
+ * we are just moving one from cons to prod
+ * we are not creating a new mapping,
+ * so there is no need to check for dma_mapping_error().
+ */
+static inline void bnx2x_reuse_rx_data(struct bnx2x_fastpath *fp,
+ u16 cons, u16 prod)
+{
+ struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
+ struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
+ struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
+ struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
+
+ dma_unmap_addr_set(prod_rx_buf, mapping,
+ dma_unmap_addr(cons_rx_buf, mapping));
+ prod_rx_buf->data = cons_rx_buf->data;
+ *prod_bd = *cons_bd;
+}
+
+/************************* Init ******************************************/
+
+/* returns func by VN for current port */
+static inline int func_by_vn(struct bnx2x *bp, int vn)
+{
+ return 2 * vn + BP_PORT(bp);
+}
+
+static inline int bnx2x_config_rss_eth(struct bnx2x *bp, bool config_hash)
+{
+ return bnx2x_rss(bp, &bp->rss_conf_obj, config_hash, true);
+}
+
+/**
+ * bnx2x_func_start - init function
+ *
+ * @bp: driver handle
+ *
+ * Must be called before sending CLIENT_SETUP for the first client.
+ */
+static inline int bnx2x_func_start(struct bnx2x *bp)
+{
+ struct bnx2x_func_state_params func_params = {NULL};
+ struct bnx2x_func_start_params *start_params =
+ &func_params.params.start;
+ u16 port;
+
+ /* Prepare parameters for function state transitions */
+ __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
+
+ func_params.f_obj = &bp->func_obj;
+ func_params.cmd = BNX2X_F_CMD_START;
+
+ /* Function parameters */
+ start_params->mf_mode = bp->mf_mode;
+ start_params->sd_vlan_tag = bp->mf_ov;
+
+ /* Configure Ethertype for BD mode */
+ if (IS_MF_BD(bp)) {
+ DP(NETIF_MSG_IFUP, "Configuring ethertype 0x88a8 for BD\n");
+ start_params->sd_vlan_eth_type = ETH_P_8021AD;
+ REG_WR(bp, PRS_REG_VLAN_TYPE_0, ETH_P_8021AD);
+ REG_WR(bp, PBF_REG_VLAN_TYPE_0, ETH_P_8021AD);
+ REG_WR(bp, NIG_REG_LLH_E1HOV_TYPE_1, ETH_P_8021AD);
+
+ bnx2x_get_c2s_mapping(bp, start_params->c2s_pri,
+ &start_params->c2s_pri_default);
+ start_params->c2s_pri_valid = 1;
+
+ DP(NETIF_MSG_IFUP,
+ "Inner-to-Outer priority: %02x %02x %02x %02x %02x %02x %02x %02x [Default %02x]\n",
+ start_params->c2s_pri[0], start_params->c2s_pri[1],
+ start_params->c2s_pri[2], start_params->c2s_pri[3],
+ start_params->c2s_pri[4], start_params->c2s_pri[5],
+ start_params->c2s_pri[6], start_params->c2s_pri[7],
+ start_params->c2s_pri_default);
+ }
+
+ if (CHIP_IS_E2(bp) || CHIP_IS_E3(bp))
+ start_params->network_cos_mode = STATIC_COS;
+ else /* CHIP_IS_E1X */
+ start_params->network_cos_mode = FW_WRR;
+ if (bp->udp_tunnel_ports[BNX2X_UDP_PORT_VXLAN]) {
+ port = bp->udp_tunnel_ports[BNX2X_UDP_PORT_VXLAN];
+ start_params->vxlan_dst_port = port;
+ }
+ if (bp->udp_tunnel_ports[BNX2X_UDP_PORT_GENEVE]) {
+ port = bp->udp_tunnel_ports[BNX2X_UDP_PORT_GENEVE];
+ start_params->geneve_dst_port = port;
+ }
+
+ start_params->inner_rss = 1;
+
+ if (IS_MF_UFP(bp) && BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp)) {
+ start_params->class_fail_ethtype = ETH_P_FIP;
+ start_params->class_fail = 1;
+ start_params->no_added_tags = 1;
+ }
+
+ return bnx2x_func_state_change(bp, &func_params);
+}
+
+/**
+ * bnx2x_set_fw_mac_addr - fill in a MAC address in FW format
+ *
+ * @fw_hi: pointer to upper part
+ * @fw_mid: pointer to middle part
+ * @fw_lo: pointer to lower part
+ * @mac: pointer to MAC address
+ */
+static inline void bnx2x_set_fw_mac_addr(__le16 *fw_hi, __le16 *fw_mid,
+ __le16 *fw_lo, u8 *mac)
+{
+ ((u8 *)fw_hi)[0] = mac[1];
+ ((u8 *)fw_hi)[1] = mac[0];
+ ((u8 *)fw_mid)[0] = mac[3];
+ ((u8 *)fw_mid)[1] = mac[2];
+ ((u8 *)fw_lo)[0] = mac[5];
+ ((u8 *)fw_lo)[1] = mac[4];
+}
+
+static inline void bnx2x_free_rx_mem_pool(struct bnx2x *bp,
+ struct bnx2x_alloc_pool *pool)
+{
+ if (!pool->page)
+ return;
+
+ put_page(pool->page);
+
+ pool->page = NULL;
+}
+
+static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
+ struct bnx2x_fastpath *fp, int last)
+{
+ int i;
+
+ if (fp->mode == TPA_MODE_DISABLED)
+ return;
+
+ for (i = 0; i < last; i++)
+ bnx2x_free_rx_sge(bp, fp, i);
+
+ bnx2x_free_rx_mem_pool(bp, &fp->page_pool);
+}
+
+static inline void bnx2x_set_next_page_rx_bd(struct bnx2x_fastpath *fp)
+{
+ int i;
+
+ for (i = 1; i <= NUM_RX_RINGS; i++) {
+ struct eth_rx_bd *rx_bd;
+
+ rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
+ rx_bd->addr_hi =
+ cpu_to_le32(U64_HI(fp->rx_desc_mapping +
+ BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
+ rx_bd->addr_lo =
+ cpu_to_le32(U64_LO(fp->rx_desc_mapping +
+ BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
+ }
+}
+
+/* Statistics ID are global per chip/path, while Client IDs for E1x are per
+ * port.
+ */
+static inline u8 bnx2x_stats_id(struct bnx2x_fastpath *fp)
+{
+ struct bnx2x *bp = fp->bp;
+ if (!CHIP_IS_E1x(bp)) {
+ /* there are special statistics counters for FCoE 136..140 */
+ if (IS_FCOE_FP(fp))
+ return bp->cnic_base_cl_id + (bp->pf_num >> 1);
+ return fp->cl_id;
+ }
+ return fp->cl_id + BP_PORT(bp) * FP_SB_MAX_E1x;
+}
+
+static inline void bnx2x_init_vlan_mac_fp_objs(struct bnx2x_fastpath *fp,
+ bnx2x_obj_type obj_type)
+{
+ struct bnx2x *bp = fp->bp;
+
+ /* Configure classification DBs */
+ bnx2x_init_mac_obj(bp, &bnx2x_sp_obj(bp, fp).mac_obj, fp->cl_id,
+ fp->cid, BP_FUNC(bp), bnx2x_sp(bp, mac_rdata),
+ bnx2x_sp_mapping(bp, mac_rdata),
+ BNX2X_FILTER_MAC_PENDING,
+ &bp->sp_state, obj_type,
+ &bp->macs_pool);
+
+ if (!CHIP_IS_E1x(bp))
+ bnx2x_init_vlan_obj(bp, &bnx2x_sp_obj(bp, fp).vlan_obj,
+ fp->cl_id, fp->cid, BP_FUNC(bp),
+ bnx2x_sp(bp, vlan_rdata),
+ bnx2x_sp_mapping(bp, vlan_rdata),
+ BNX2X_FILTER_VLAN_PENDING,
+ &bp->sp_state, obj_type,
+ &bp->vlans_pool);
+}
+
+/**
+ * bnx2x_get_path_func_num - get number of active functions
+ *
+ * @bp: driver handle
+ *
+ * Calculates the number of active (not hidden) functions on the
+ * current path.
+ */
+static inline u8 bnx2x_get_path_func_num(struct bnx2x *bp)
+{
+ u8 func_num = 0, i;
+
+ /* 57710 has only one function per-port */
+ if (CHIP_IS_E1(bp))
+ return 1;
+
+ /* Calculate a number of functions enabled on the current
+ * PATH/PORT.
+ */
+ if (CHIP_REV_IS_SLOW(bp)) {
+ if (IS_MF(bp))
+ func_num = 4;
+ else
+ func_num = 2;
+ } else {
+ for (i = 0; i < E1H_FUNC_MAX / 2; i++) {
+ u32 func_config =
+ MF_CFG_RD(bp,
+ func_mf_config[BP_PATH(bp) + 2 * i].
+ config);
+ func_num +=
+ ((func_config & FUNC_MF_CFG_FUNC_HIDE) ? 0 : 1);
+ }
+ }
+
+ WARN_ON(!func_num);
+
+ return func_num;
+}
+
+static inline void bnx2x_init_bp_objs(struct bnx2x *bp)
+{
+ /* RX_MODE controlling object */
+ bnx2x_init_rx_mode_obj(bp, &bp->rx_mode_obj);
+
+ /* multicast configuration controlling object */
+ bnx2x_init_mcast_obj(bp, &bp->mcast_obj, bp->fp->cl_id, bp->fp->cid,
+ BP_FUNC(bp), BP_FUNC(bp),
+ bnx2x_sp(bp, mcast_rdata),
+ bnx2x_sp_mapping(bp, mcast_rdata),
+ BNX2X_FILTER_MCAST_PENDING, &bp->sp_state,
+ BNX2X_OBJ_TYPE_RX);
+
+ /* Setup CAM credit pools */
+ bnx2x_init_mac_credit_pool(bp, &bp->macs_pool, BP_FUNC(bp),
+ bnx2x_get_path_func_num(bp));
+
+ bnx2x_init_vlan_credit_pool(bp, &bp->vlans_pool, BP_FUNC(bp),
+ bnx2x_get_path_func_num(bp));
+
+ /* RSS configuration object */
+ bnx2x_init_rss_config_obj(bp, &bp->rss_conf_obj, bp->fp->cl_id,
+ bp->fp->cid, BP_FUNC(bp), BP_FUNC(bp),
+ bnx2x_sp(bp, rss_rdata),
+ bnx2x_sp_mapping(bp, rss_rdata),
+ BNX2X_FILTER_RSS_CONF_PENDING, &bp->sp_state,
+ BNX2X_OBJ_TYPE_RX);
+
+ bp->vlan_credit = PF_VLAN_CREDIT_E2(bp, bnx2x_get_path_func_num(bp));
+}
+
+static inline u8 bnx2x_fp_qzone_id(struct bnx2x_fastpath *fp)
+{
+ if (CHIP_IS_E1x(fp->bp))
+ return fp->cl_id + BP_PORT(fp->bp) * ETH_MAX_RX_CLIENTS_E1H;
+ else
+ return fp->cl_id;
+}
+
+static inline void bnx2x_init_txdata(struct bnx2x *bp,
+ struct bnx2x_fp_txdata *txdata, u32 cid,
+ int txq_index, __le16 *tx_cons_sb,
+ struct bnx2x_fastpath *fp)
+{
+ txdata->cid = cid;
+ txdata->txq_index = txq_index;
+ txdata->tx_cons_sb = tx_cons_sb;
+ txdata->parent_fp = fp;
+ txdata->tx_ring_size = IS_FCOE_FP(fp) ? MAX_TX_AVAIL : bp->tx_ring_size;
+
+ DP(NETIF_MSG_IFUP, "created tx data cid %d, txq %d\n",
+ txdata->cid, txdata->txq_index);
+}
+
+static inline u8 bnx2x_cnic_eth_cl_id(struct bnx2x *bp, u8 cl_idx)
+{
+ return bp->cnic_base_cl_id + cl_idx +
+ (bp->pf_num >> 1) * BNX2X_MAX_CNIC_ETH_CL_ID_IDX;
+}
+
+static inline u8 bnx2x_cnic_fw_sb_id(struct bnx2x *bp)
+{
+ /* the 'first' id is allocated for the cnic */
+ return bp->base_fw_ndsb;
+}
+
+static inline u8 bnx2x_cnic_igu_sb_id(struct bnx2x *bp)
+{
+ return bp->igu_base_sb;
+}
+
+static inline int bnx2x_clean_tx_queue(struct bnx2x *bp,
+ struct bnx2x_fp_txdata *txdata)
+{
+ int cnt = 1000;
+
+ while (bnx2x_has_tx_work_unload(txdata)) {
+ if (!cnt) {
+ BNX2X_ERR("timeout waiting for queue[%d]: txdata->tx_pkt_prod(%d) != txdata->tx_pkt_cons(%d)\n",
+ txdata->txq_index, txdata->tx_pkt_prod,
+ txdata->tx_pkt_cons);
+#ifdef BNX2X_STOP_ON_ERROR
+ bnx2x_panic();
+ return -EBUSY;
+#else
+ break;
+#endif
+ }
+ cnt--;
+ usleep_range(1000, 2000);
+ }
+
+ return 0;
+}
+
+int bnx2x_get_link_cfg_idx(struct bnx2x *bp);
+
+static inline void __storm_memset_struct(struct bnx2x *bp,
+ u32 addr, size_t size, u32 *data)
+{
+ int i;
+ for (i = 0; i < size/4; i++)
+ REG_WR(bp, addr + (i * 4), data[i]);
+}
+
+/**
+ * bnx2x_wait_sp_comp - wait for the outstanding SP commands.
+ *
+ * @bp: driver handle
+ * @mask: bits that need to be cleared
+ */
+static inline bool bnx2x_wait_sp_comp(struct bnx2x *bp, unsigned long mask)
+{
+ int tout = 5000; /* Wait for 5 secs tops */
+
+ while (tout--) {
+ smp_mb();
+ netif_addr_lock_bh(bp->dev);
+ if (!(bp->sp_state & mask)) {
+ netif_addr_unlock_bh(bp->dev);
+ return true;
+ }
+ netif_addr_unlock_bh(bp->dev);
+
+ usleep_range(1000, 2000);
+ }
+
+ smp_mb();
+
+ netif_addr_lock_bh(bp->dev);
+ if (bp->sp_state & mask) {
+ BNX2X_ERR("Filtering completion timed out. sp_state 0x%lx, mask 0x%lx\n",
+ bp->sp_state, mask);
+ netif_addr_unlock_bh(bp->dev);
+ return false;
+ }
+ netif_addr_unlock_bh(bp->dev);
+
+ return true;
+}
+
+/**
+ * bnx2x_set_ctx_validation - set CDU context validation values
+ *
+ * @bp: driver handle
+ * @cxt: context of the connection on the host memory
+ * @cid: SW CID of the connection to be configured
+ */
+void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt,
+ u32 cid);
+
+void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id,
+ u8 sb_index, u8 disable, u16 usec);
+void bnx2x_acquire_phy_lock(struct bnx2x *bp);
+void bnx2x_release_phy_lock(struct bnx2x *bp);
+
+/**
+ * bnx2x_extract_max_cfg - extract MAX BW part from MF configuration.
+ *
+ * @bp: driver handle
+ * @mf_cfg: MF configuration
+ *
+ */
+static inline u16 bnx2x_extract_max_cfg(struct bnx2x *bp, u32 mf_cfg)
+{
+ u16 max_cfg = (mf_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
+ FUNC_MF_CFG_MAX_BW_SHIFT;
+ if (!max_cfg) {
+ DP(NETIF_MSG_IFUP | BNX2X_MSG_ETHTOOL,
+ "Max BW configured to 0 - using 100 instead\n");
+ max_cfg = 100;
+ }
+ return max_cfg;
+}
+
+/* checks if HW supports GRO for given MTU */
+static inline bool bnx2x_mtu_allows_gro(int mtu)
+{
+ /* gro frags per page */
+ int fpp = SGE_PAGE_SIZE / (mtu - ETH_MAX_TPA_HEADER_SIZE);
+
+ /*
+ * 1. Number of frags should not grow above MAX_SKB_FRAGS
+ * 2. Frag must fit the page
+ */
+ return mtu <= SGE_PAGE_SIZE && (U_ETH_SGL_SIZE * fpp) <= MAX_SKB_FRAGS;
+}
+
+/**
+ * bnx2x_get_iscsi_info - update iSCSI params according to licensing info.
+ *
+ * @bp: driver handle
+ *
+ */
+void bnx2x_get_iscsi_info(struct bnx2x *bp);
+
+/**
+ * bnx2x_link_sync_notify - send notification to other functions.
+ *
+ * @bp: driver handle
+ *
+ */
+static inline void bnx2x_link_sync_notify(struct bnx2x *bp)
+{
+ int func;
+ int vn;
+
+ /* Set the attention towards other drivers on the same port */
+ for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) {
+ if (vn == BP_VN(bp))
+ continue;
+
+ func = func_by_vn(bp, vn);
+ REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
+ (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
+ }
+}
+
+/**
+ * bnx2x_update_drv_flags - update flags in shmem
+ *
+ * @bp: driver handle
+ * @flags: flags to update
+ * @set: set or clear
+ *
+ */
+static inline void bnx2x_update_drv_flags(struct bnx2x *bp, u32 flags, u32 set)
+{
+ if (SHMEM2_HAS(bp, drv_flags)) {
+ u32 drv_flags;
+ bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_DRV_FLAGS);
+ drv_flags = SHMEM2_RD(bp, drv_flags);
+
+ if (set)
+ SET_FLAGS(drv_flags, flags);
+ else
+ RESET_FLAGS(drv_flags, flags);
+
+ SHMEM2_WR(bp, drv_flags, drv_flags);
+ DP(NETIF_MSG_IFUP, "drv_flags 0x%08x\n", drv_flags);
+ bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_DRV_FLAGS);
+ }
+}
+
+
+
+/**
+ * bnx2x_fill_fw_str - Fill buffer with FW version string
+ *
+ * @bp: driver handle
+ * @buf: character buffer to fill with the fw name
+ * @buf_len: length of the above buffer
+ *
+ */
+void bnx2x_fill_fw_str(struct bnx2x *bp, char *buf, size_t buf_len);
+
+int bnx2x_drain_tx_queues(struct bnx2x *bp);
+void bnx2x_squeeze_objects(struct bnx2x *bp);
+
+void bnx2x_schedule_sp_rtnl(struct bnx2x*, enum sp_rtnl_flag,
+ u32 verbose);
+
+/**
+ * bnx2x_set_os_driver_state - write driver state for management FW usage
+ *
+ * @bp: driver handle
+ * @state: OS_DRIVER_STATE_* value reflecting current driver state
+ */
+void bnx2x_set_os_driver_state(struct bnx2x *bp, u32 state);
+
+/**
+ * bnx2x_nvram_read - reads data from nvram [might sleep]
+ *
+ * @bp: driver handle
+ * @offset: byte offset in nvram
+ * @ret_buf: pointer to buffer where data is to be stored
+ * @buf_size: Length of 'ret_buf' in bytes
+ */
+int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
+ int buf_size);
+
+#endif /* BNX2X_CMN_H */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
new file mode 100644
index 0000000000..17ae6df907
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
@@ -0,0 +1,2584 @@
+/* bnx2x_dcb.c: QLogic Everest network driver.
+ *
+ * Copyright 2009-2013 Broadcom Corporation
+ * Copyright 2014 QLogic Corporation
+ * All rights reserved
+ *
+ * Unless you and QLogic execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2, available
+ * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other QLogic software provided under a
+ * license other than the GPL, without QLogic's express prior written
+ * consent.
+ *
+ * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
+ * Written by: Dmitry Kravkov
+ *
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/netdevice.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/rtnetlink.h>
+#include <net/dcbnl.h>
+
+#include "bnx2x.h"
+#include "bnx2x_cmn.h"
+#include "bnx2x_dcb.h"
+
+/* forward declarations of dcbx related functions */
+static void bnx2x_pfc_set_pfc(struct bnx2x *bp);
+static void bnx2x_dcbx_update_ets_params(struct bnx2x *bp);
+static void bnx2x_dcbx_get_ets_pri_pg_tbl(struct bnx2x *bp,
+ u32 *set_configuration_ets_pg,
+ u32 *pri_pg_tbl);
+static void bnx2x_dcbx_get_num_pg_traf_type(struct bnx2x *bp,
+ u32 *pg_pri_orginal_spread,
+ struct pg_help_data *help_data);
+static void bnx2x_dcbx_fill_cos_params(struct bnx2x *bp,
+ struct pg_help_data *help_data,
+ struct dcbx_ets_feature *ets,
+ u32 *pg_pri_orginal_spread);
+static void bnx2x_dcbx_separate_pauseable_from_non(struct bnx2x *bp,
+ struct cos_help_data *cos_data,
+ u32 *pg_pri_orginal_spread,
+ struct dcbx_ets_feature *ets);
+static void bnx2x_dcbx_fw_struct(struct bnx2x *bp,
+ struct bnx2x_func_tx_start_params*);
+
+/* helpers: read/write len bytes from addr into buff by REG_RD/REG_WR */
+static void bnx2x_read_data(struct bnx2x *bp, u32 *buff,
+ u32 addr, u32 len)
+{
+ int i;
+ for (i = 0; i < len; i += 4, buff++)
+ *buff = REG_RD(bp, addr + i);
+}
+
+static void bnx2x_write_data(struct bnx2x *bp, u32 *buff,
+ u32 addr, u32 len)
+{
+ int i;
+ for (i = 0; i < len; i += 4, buff++)
+ REG_WR(bp, addr + i, *buff);
+}
+
+static void bnx2x_pfc_set(struct bnx2x *bp)
+{
+ struct bnx2x_nig_brb_pfc_port_params pfc_params = {0};
+ u32 pri_bit, val = 0;
+ int i;
+
+ pfc_params.num_of_rx_cos_priority_mask =
+ bp->dcbx_port_params.ets.num_of_cos;
+
+ /* Tx COS configuration */
+ for (i = 0; i < bp->dcbx_port_params.ets.num_of_cos; i++)
+ /*
+ * We configure only the pauseable bits (non pauseable aren't
+ * configured at all) it's done to avoid false pauses from
+ * network
+ */
+ pfc_params.rx_cos_priority_mask[i] =
+ bp->dcbx_port_params.ets.cos_params[i].pri_bitmask
+ & DCBX_PFC_PRI_PAUSE_MASK(bp);
+
+ /*
+ * Rx COS configuration
+ * Changing PFC RX configuration .
+ * In RX COS0 will always be configured to lossless and COS1 to lossy
+ */
+ for (i = 0 ; i < MAX_PFC_PRIORITIES ; i++) {
+ pri_bit = 1 << i;
+
+ if (!(pri_bit & DCBX_PFC_PRI_PAUSE_MASK(bp)))
+ val |= 1 << (i * 4);
+ }
+
+ pfc_params.pkt_priority_to_cos = val;
+
+ /* RX COS0 */
+ pfc_params.llfc_low_priority_classes = DCBX_PFC_PRI_PAUSE_MASK(bp);
+ /* RX COS1 */
+ pfc_params.llfc_high_priority_classes = 0;
+
+ bnx2x_acquire_phy_lock(bp);
+ bp->link_params.feature_config_flags |= FEATURE_CONFIG_PFC_ENABLED;
+ bnx2x_update_pfc(&bp->link_params, &bp->link_vars, &pfc_params);
+ bnx2x_release_phy_lock(bp);
+}
+
+static void bnx2x_pfc_clear(struct bnx2x *bp)
+{
+ struct bnx2x_nig_brb_pfc_port_params nig_params = {0};
+ nig_params.pause_enable = 1;
+ bnx2x_acquire_phy_lock(bp);
+ bp->link_params.feature_config_flags &= ~FEATURE_CONFIG_PFC_ENABLED;
+ bnx2x_update_pfc(&bp->link_params, &bp->link_vars, &nig_params);
+ bnx2x_release_phy_lock(bp);
+}
+
+static void bnx2x_dump_dcbx_drv_param(struct bnx2x *bp,
+ struct dcbx_features *features,
+ u32 error)
+{
+ u8 i = 0;
+ DP(NETIF_MSG_LINK, "local_mib.error %x\n", error);
+
+ /* PG */
+ DP(NETIF_MSG_LINK,
+ "local_mib.features.ets.enabled %x\n", features->ets.enabled);
+ for (i = 0; i < DCBX_MAX_NUM_PG_BW_ENTRIES; i++)
+ DP(NETIF_MSG_LINK,
+ "local_mib.features.ets.pg_bw_tbl[%d] %d\n", i,
+ DCBX_PG_BW_GET(features->ets.pg_bw_tbl, i));
+ for (i = 0; i < DCBX_MAX_NUM_PRI_PG_ENTRIES; i++)
+ DP(NETIF_MSG_LINK,
+ "local_mib.features.ets.pri_pg_tbl[%d] %d\n", i,
+ DCBX_PRI_PG_GET(features->ets.pri_pg_tbl, i));
+
+ /* pfc */
+ DP(BNX2X_MSG_DCB, "dcbx_features.pfc.pri_en_bitmap %x\n",
+ features->pfc.pri_en_bitmap);
+ DP(BNX2X_MSG_DCB, "dcbx_features.pfc.pfc_caps %x\n",
+ features->pfc.pfc_caps);
+ DP(BNX2X_MSG_DCB, "dcbx_features.pfc.enabled %x\n",
+ features->pfc.enabled);
+
+ DP(BNX2X_MSG_DCB, "dcbx_features.app.default_pri %x\n",
+ features->app.default_pri);
+ DP(BNX2X_MSG_DCB, "dcbx_features.app.tc_supported %x\n",
+ features->app.tc_supported);
+ DP(BNX2X_MSG_DCB, "dcbx_features.app.enabled %x\n",
+ features->app.enabled);
+ for (i = 0; i < DCBX_MAX_APP_PROTOCOL; i++) {
+ DP(BNX2X_MSG_DCB,
+ "dcbx_features.app.app_pri_tbl[%x].app_id %x\n",
+ i, features->app.app_pri_tbl[i].app_id);
+ DP(BNX2X_MSG_DCB,
+ "dcbx_features.app.app_pri_tbl[%x].pri_bitmap %x\n",
+ i, features->app.app_pri_tbl[i].pri_bitmap);
+ DP(BNX2X_MSG_DCB,
+ "dcbx_features.app.app_pri_tbl[%x].appBitfield %x\n",
+ i, features->app.app_pri_tbl[i].appBitfield);
+ }
+}
+
+static void bnx2x_dcbx_get_ap_priority(struct bnx2x *bp,
+ u8 pri_bitmap,
+ u8 llfc_traf_type)
+{
+ u32 pri = MAX_PFC_PRIORITIES;
+ u32 index = MAX_PFC_PRIORITIES - 1;
+ u32 pri_mask;
+ u32 *ttp = bp->dcbx_port_params.app.traffic_type_priority;
+
+ /* Choose the highest priority */
+ while ((MAX_PFC_PRIORITIES == pri) && (0 != index)) {
+ pri_mask = 1 << index;
+ if (GET_FLAGS(pri_bitmap, pri_mask))
+ pri = index ;
+ index--;
+ }
+
+ if (pri < MAX_PFC_PRIORITIES)
+ ttp[llfc_traf_type] = max_t(u32, ttp[llfc_traf_type], pri);
+}
+
+static void bnx2x_dcbx_get_ap_feature(struct bnx2x *bp,
+ struct dcbx_app_priority_feature *app,
+ u32 error) {
+ u8 index;
+ u32 *ttp = bp->dcbx_port_params.app.traffic_type_priority;
+ u8 iscsi_pri_found = 0, fcoe_pri_found = 0;
+
+ if (GET_FLAGS(error, DCBX_LOCAL_APP_ERROR))
+ DP(BNX2X_MSG_DCB, "DCBX_LOCAL_APP_ERROR\n");
+
+ if (GET_FLAGS(error, DCBX_LOCAL_APP_MISMATCH))
+ DP(BNX2X_MSG_DCB, "DCBX_LOCAL_APP_MISMATCH\n");
+
+ if (GET_FLAGS(error, DCBX_REMOTE_APP_TLV_NOT_FOUND))
+ DP(BNX2X_MSG_DCB, "DCBX_REMOTE_APP_TLV_NOT_FOUND\n");
+ if (app->enabled &&
+ !GET_FLAGS(error, DCBX_LOCAL_APP_ERROR | DCBX_LOCAL_APP_MISMATCH |
+ DCBX_REMOTE_APP_TLV_NOT_FOUND)) {
+
+ bp->dcbx_port_params.app.enabled = true;
+
+ /* Use 0 as the default application priority for all. */
+ for (index = 0 ; index < LLFC_DRIVER_TRAFFIC_TYPE_MAX; index++)
+ ttp[index] = 0;
+
+ for (index = 0 ; index < DCBX_MAX_APP_PROTOCOL; index++) {
+ struct dcbx_app_priority_entry *entry =
+ app->app_pri_tbl;
+ enum traffic_type type = MAX_TRAFFIC_TYPE;
+
+ if (GET_FLAGS(entry[index].appBitfield,
+ DCBX_APP_SF_DEFAULT) &&
+ GET_FLAGS(entry[index].appBitfield,
+ DCBX_APP_SF_ETH_TYPE)) {
+ type = LLFC_TRAFFIC_TYPE_NW;
+ } else if (GET_FLAGS(entry[index].appBitfield,
+ DCBX_APP_SF_PORT) &&
+ TCP_PORT_ISCSI == entry[index].app_id) {
+ type = LLFC_TRAFFIC_TYPE_ISCSI;
+ iscsi_pri_found = 1;
+ } else if (GET_FLAGS(entry[index].appBitfield,
+ DCBX_APP_SF_ETH_TYPE) &&
+ ETH_TYPE_FCOE == entry[index].app_id) {
+ type = LLFC_TRAFFIC_TYPE_FCOE;
+ fcoe_pri_found = 1;
+ }
+
+ if (type == MAX_TRAFFIC_TYPE)
+ continue;
+
+ bnx2x_dcbx_get_ap_priority(bp,
+ entry[index].pri_bitmap,
+ type);
+ }
+
+ /* If we have received a non-zero default application
+ * priority, then use that for applications which are
+ * not configured with any priority.
+ */
+ if (ttp[LLFC_TRAFFIC_TYPE_NW] != 0) {
+ if (!iscsi_pri_found) {
+ ttp[LLFC_TRAFFIC_TYPE_ISCSI] =
+ ttp[LLFC_TRAFFIC_TYPE_NW];
+ DP(BNX2X_MSG_DCB,
+ "ISCSI is using default priority.\n");
+ }
+ if (!fcoe_pri_found) {
+ ttp[LLFC_TRAFFIC_TYPE_FCOE] =
+ ttp[LLFC_TRAFFIC_TYPE_NW];
+ DP(BNX2X_MSG_DCB,
+ "FCoE is using default priority.\n");
+ }
+ }
+ } else {
+ DP(BNX2X_MSG_DCB, "DCBX_LOCAL_APP_DISABLED\n");
+ bp->dcbx_port_params.app.enabled = false;
+ for (index = 0 ; index < LLFC_DRIVER_TRAFFIC_TYPE_MAX; index++)
+ ttp[index] = INVALID_TRAFFIC_TYPE_PRIORITY;
+ }
+}
+
+static void bnx2x_dcbx_get_ets_feature(struct bnx2x *bp,
+ struct dcbx_ets_feature *ets,
+ u32 error) {
+ int i = 0;
+ u32 pg_pri_orginal_spread[DCBX_MAX_NUM_PG_BW_ENTRIES] = {0};
+ struct pg_help_data pg_help_data;
+ struct bnx2x_dcbx_cos_params *cos_params =
+ bp->dcbx_port_params.ets.cos_params;
+
+ memset(&pg_help_data, 0, sizeof(struct pg_help_data));
+
+ if (GET_FLAGS(error, DCBX_LOCAL_ETS_ERROR))
+ DP(BNX2X_MSG_DCB, "DCBX_LOCAL_ETS_ERROR\n");
+
+ if (GET_FLAGS(error, DCBX_REMOTE_ETS_TLV_NOT_FOUND))
+ DP(BNX2X_MSG_DCB, "DCBX_REMOTE_ETS_TLV_NOT_FOUND\n");
+
+ /* Clean up old settings of ets on COS */
+ for (i = 0; i < ARRAY_SIZE(bp->dcbx_port_params.ets.cos_params) ; i++) {
+ cos_params[i].pauseable = false;
+ cos_params[i].strict = BNX2X_DCBX_STRICT_INVALID;
+ cos_params[i].bw_tbl = DCBX_INVALID_COS_BW;
+ cos_params[i].pri_bitmask = 0;
+ }
+
+ if (bp->dcbx_port_params.app.enabled && ets->enabled &&
+ !GET_FLAGS(error,
+ DCBX_LOCAL_ETS_ERROR | DCBX_REMOTE_ETS_TLV_NOT_FOUND)) {
+ DP(BNX2X_MSG_DCB, "DCBX_LOCAL_ETS_ENABLE\n");
+ bp->dcbx_port_params.ets.enabled = true;
+
+ bnx2x_dcbx_get_ets_pri_pg_tbl(bp,
+ pg_pri_orginal_spread,
+ ets->pri_pg_tbl);
+
+ bnx2x_dcbx_get_num_pg_traf_type(bp,
+ pg_pri_orginal_spread,
+ &pg_help_data);
+
+ bnx2x_dcbx_fill_cos_params(bp, &pg_help_data,
+ ets, pg_pri_orginal_spread);
+
+ } else {
+ DP(BNX2X_MSG_DCB, "DCBX_LOCAL_ETS_DISABLED\n");
+ bp->dcbx_port_params.ets.enabled = false;
+ ets->pri_pg_tbl[0] = 0;
+
+ for (i = 0; i < DCBX_MAX_NUM_PRI_PG_ENTRIES ; i++)
+ DCBX_PG_BW_SET(ets->pg_bw_tbl, i, 1);
+ }
+}
+
+static void bnx2x_dcbx_get_pfc_feature(struct bnx2x *bp,
+ struct dcbx_pfc_feature *pfc, u32 error)
+{
+ if (GET_FLAGS(error, DCBX_LOCAL_PFC_ERROR))
+ DP(BNX2X_MSG_DCB, "DCBX_LOCAL_PFC_ERROR\n");
+
+ if (GET_FLAGS(error, DCBX_REMOTE_PFC_TLV_NOT_FOUND))
+ DP(BNX2X_MSG_DCB, "DCBX_REMOTE_PFC_TLV_NOT_FOUND\n");
+ if (bp->dcbx_port_params.app.enabled && pfc->enabled &&
+ !GET_FLAGS(error, DCBX_LOCAL_PFC_ERROR | DCBX_LOCAL_PFC_MISMATCH |
+ DCBX_REMOTE_PFC_TLV_NOT_FOUND)) {
+ bp->dcbx_port_params.pfc.enabled = true;
+ bp->dcbx_port_params.pfc.priority_non_pauseable_mask =
+ ~(pfc->pri_en_bitmap);
+ } else {
+ DP(BNX2X_MSG_DCB, "DCBX_LOCAL_PFC_DISABLED\n");
+ bp->dcbx_port_params.pfc.enabled = false;
+ bp->dcbx_port_params.pfc.priority_non_pauseable_mask = 0;
+ }
+}
+
+/* maps unmapped priorities to to the same COS as L2 */
+static void bnx2x_dcbx_map_nw(struct bnx2x *bp)
+{
+ int i;
+ u32 unmapped = (1 << MAX_PFC_PRIORITIES) - 1; /* all ones */
+ u32 *ttp = bp->dcbx_port_params.app.traffic_type_priority;
+ u32 nw_prio = 1 << ttp[LLFC_TRAFFIC_TYPE_NW];
+ struct bnx2x_dcbx_cos_params *cos_params =
+ bp->dcbx_port_params.ets.cos_params;
+
+ /* get unmapped priorities by clearing mapped bits */
+ for (i = 0; i < LLFC_DRIVER_TRAFFIC_TYPE_MAX; i++)
+ unmapped &= ~(1 << ttp[i]);
+
+ /* find cos for nw prio and extend it with unmapped */
+ for (i = 0; i < ARRAY_SIZE(bp->dcbx_port_params.ets.cos_params); i++) {
+ if (cos_params[i].pri_bitmask & nw_prio) {
+ /* extend the bitmask with unmapped */
+ DP(BNX2X_MSG_DCB,
+ "cos %d extended with 0x%08x\n", i, unmapped);
+ cos_params[i].pri_bitmask |= unmapped;
+ break;
+ }
+ }
+}
+
+static void bnx2x_get_dcbx_drv_param(struct bnx2x *bp,
+ struct dcbx_features *features,
+ u32 error)
+{
+ bnx2x_dcbx_get_ap_feature(bp, &features->app, error);
+
+ bnx2x_dcbx_get_pfc_feature(bp, &features->pfc, error);
+
+ bnx2x_dcbx_get_ets_feature(bp, &features->ets, error);
+
+ bnx2x_dcbx_map_nw(bp);
+}
+
+#define DCBX_LOCAL_MIB_MAX_TRY_READ (100)
+static int bnx2x_dcbx_read_mib(struct bnx2x *bp,
+ u32 *base_mib_addr,
+ u32 offset,
+ int read_mib_type)
+{
+ int max_try_read = 0;
+ u32 mib_size, prefix_seq_num, suffix_seq_num;
+ struct lldp_remote_mib *remote_mib ;
+ struct lldp_local_mib *local_mib;
+
+ switch (read_mib_type) {
+ case DCBX_READ_LOCAL_MIB:
+ mib_size = sizeof(struct lldp_local_mib);
+ break;
+ case DCBX_READ_REMOTE_MIB:
+ mib_size = sizeof(struct lldp_remote_mib);
+ break;
+ default:
+ return 1; /*error*/
+ }
+
+ offset += BP_PORT(bp) * mib_size;
+
+ do {
+ bnx2x_read_data(bp, base_mib_addr, offset, mib_size);
+
+ max_try_read++;
+
+ switch (read_mib_type) {
+ case DCBX_READ_LOCAL_MIB:
+ local_mib = (struct lldp_local_mib *) base_mib_addr;
+ prefix_seq_num = local_mib->prefix_seq_num;
+ suffix_seq_num = local_mib->suffix_seq_num;
+ break;
+ case DCBX_READ_REMOTE_MIB:
+ remote_mib = (struct lldp_remote_mib *) base_mib_addr;
+ prefix_seq_num = remote_mib->prefix_seq_num;
+ suffix_seq_num = remote_mib->suffix_seq_num;
+ break;
+ default:
+ return 1; /*error*/
+ }
+ } while ((prefix_seq_num != suffix_seq_num) &&
+ (max_try_read < DCBX_LOCAL_MIB_MAX_TRY_READ));
+
+ if (max_try_read >= DCBX_LOCAL_MIB_MAX_TRY_READ) {
+ BNX2X_ERR("MIB could not be read\n");
+ return 1;
+ }
+
+ return 0;
+}
+
+static void bnx2x_pfc_set_pfc(struct bnx2x *bp)
+{
+ int mfw_configured = SHMEM2_HAS(bp, drv_flags) &&
+ GET_FLAGS(SHMEM2_RD(bp, drv_flags),
+ 1 << DRV_FLAGS_DCB_MFW_CONFIGURED);
+
+ if (bp->dcbx_port_params.pfc.enabled &&
+ (!(bp->dcbx_error & DCBX_REMOTE_MIB_ERROR) || mfw_configured))
+ /*
+ * 1. Fills up common PFC structures if required
+ * 2. Configure NIG, MAC and BRB via the elink
+ */
+ bnx2x_pfc_set(bp);
+ else
+ bnx2x_pfc_clear(bp);
+}
+
+int bnx2x_dcbx_stop_hw_tx(struct bnx2x *bp)
+{
+ struct bnx2x_func_state_params func_params = {NULL};
+ int rc;
+
+ func_params.f_obj = &bp->func_obj;
+ func_params.cmd = BNX2X_F_CMD_TX_STOP;
+
+ __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
+ __set_bit(RAMROD_RETRY, &func_params.ramrod_flags);
+
+ DP(BNX2X_MSG_DCB, "STOP TRAFFIC\n");
+
+ rc = bnx2x_func_state_change(bp, &func_params);
+ if (rc) {
+ BNX2X_ERR("Unable to hold traffic for HW configuration\n");
+ bnx2x_panic();
+ }
+
+ return rc;
+}
+
+int bnx2x_dcbx_resume_hw_tx(struct bnx2x *bp)
+{
+ struct bnx2x_func_state_params func_params = {NULL};
+ struct bnx2x_func_tx_start_params *tx_params =
+ &func_params.params.tx_start;
+ int rc;
+
+ func_params.f_obj = &bp->func_obj;
+ func_params.cmd = BNX2X_F_CMD_TX_START;
+
+ __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
+ __set_bit(RAMROD_RETRY, &func_params.ramrod_flags);
+
+ bnx2x_dcbx_fw_struct(bp, tx_params);
+
+ DP(BNX2X_MSG_DCB, "START TRAFFIC\n");
+
+ rc = bnx2x_func_state_change(bp, &func_params);
+ if (rc) {
+ BNX2X_ERR("Unable to resume traffic after HW configuration\n");
+ bnx2x_panic();
+ }
+
+ return rc;
+}
+
+static void bnx2x_dcbx_2cos_limit_update_ets_config(struct bnx2x *bp)
+{
+ struct bnx2x_dcbx_pg_params *ets = &(bp->dcbx_port_params.ets);
+ int rc = 0;
+
+ if (ets->num_of_cos == 0 || ets->num_of_cos > DCBX_COS_MAX_NUM_E2) {
+ BNX2X_ERR("Illegal number of COSes %d\n", ets->num_of_cos);
+ return;
+ }
+
+ /* valid COS entries */
+ if (ets->num_of_cos == 1) /* no ETS */
+ return;
+
+ /* sanity */
+ if (((BNX2X_DCBX_STRICT_INVALID == ets->cos_params[0].strict) &&
+ (DCBX_INVALID_COS_BW == ets->cos_params[0].bw_tbl)) ||
+ ((BNX2X_DCBX_STRICT_INVALID == ets->cos_params[1].strict) &&
+ (DCBX_INVALID_COS_BW == ets->cos_params[1].bw_tbl))) {
+ BNX2X_ERR("all COS should have at least bw_limit or strict"
+ "ets->cos_params[0].strict= %x"
+ "ets->cos_params[0].bw_tbl= %x"
+ "ets->cos_params[1].strict= %x"
+ "ets->cos_params[1].bw_tbl= %x",
+ ets->cos_params[0].strict,
+ ets->cos_params[0].bw_tbl,
+ ets->cos_params[1].strict,
+ ets->cos_params[1].bw_tbl);
+ return;
+ }
+ /* If we join a group and there is bw_tbl and strict then bw rules */
+ if ((DCBX_INVALID_COS_BW != ets->cos_params[0].bw_tbl) &&
+ (DCBX_INVALID_COS_BW != ets->cos_params[1].bw_tbl)) {
+ u32 bw_tbl_0 = ets->cos_params[0].bw_tbl;
+ u32 bw_tbl_1 = ets->cos_params[1].bw_tbl;
+ /* Do not allow 0-100 configuration
+ * since PBF does not support it
+ * force 1-99 instead
+ */
+ if (bw_tbl_0 == 0) {
+ bw_tbl_0 = 1;
+ bw_tbl_1 = 99;
+ } else if (bw_tbl_1 == 0) {
+ bw_tbl_1 = 1;
+ bw_tbl_0 = 99;
+ }
+
+ bnx2x_ets_bw_limit(&bp->link_params, bw_tbl_0, bw_tbl_1);
+ } else {
+ if (ets->cos_params[0].strict == BNX2X_DCBX_STRICT_COS_HIGHEST)
+ rc = bnx2x_ets_strict(&bp->link_params, 0);
+ else if (ets->cos_params[1].strict
+ == BNX2X_DCBX_STRICT_COS_HIGHEST)
+ rc = bnx2x_ets_strict(&bp->link_params, 1);
+ if (rc)
+ BNX2X_ERR("update_ets_params failed\n");
+ }
+}
+
+/*
+ * In E3B0 the configuration may have more than 2 COS.
+ */
+static void bnx2x_dcbx_update_ets_config(struct bnx2x *bp)
+{
+ struct bnx2x_dcbx_pg_params *ets = &(bp->dcbx_port_params.ets);
+ struct bnx2x_ets_params ets_params = { 0 };
+ u8 i;
+
+ ets_params.num_of_cos = ets->num_of_cos;
+
+ for (i = 0; i < ets->num_of_cos; i++) {
+ /* COS is SP */
+ if (ets->cos_params[i].strict != BNX2X_DCBX_STRICT_INVALID) {
+ if (ets->cos_params[i].bw_tbl != DCBX_INVALID_COS_BW) {
+ BNX2X_ERR("COS can't be not BW and not SP\n");
+ return;
+ }
+
+ ets_params.cos[i].state = bnx2x_cos_state_strict;
+ ets_params.cos[i].params.sp_params.pri =
+ ets->cos_params[i].strict;
+ } else { /* COS is BW */
+ if (ets->cos_params[i].bw_tbl == DCBX_INVALID_COS_BW) {
+ BNX2X_ERR("COS can't be not BW and not SP\n");
+ return;
+ }
+ ets_params.cos[i].state = bnx2x_cos_state_bw;
+ ets_params.cos[i].params.bw_params.bw =
+ (u8)ets->cos_params[i].bw_tbl;
+ }
+ }
+
+ /* Configure the ETS in HW */
+ if (bnx2x_ets_e3b0_config(&bp->link_params, &bp->link_vars,
+ &ets_params)) {
+ BNX2X_ERR("bnx2x_ets_e3b0_config failed\n");
+ bnx2x_ets_disabled(&bp->link_params, &bp->link_vars);
+ }
+}
+
+static void bnx2x_dcbx_update_ets_params(struct bnx2x *bp)
+{
+ int mfw_configured = SHMEM2_HAS(bp, drv_flags) &&
+ GET_FLAGS(SHMEM2_RD(bp, drv_flags),
+ 1 << DRV_FLAGS_DCB_MFW_CONFIGURED);
+
+ bnx2x_ets_disabled(&bp->link_params, &bp->link_vars);
+
+ if (!bp->dcbx_port_params.ets.enabled ||
+ ((bp->dcbx_error & DCBX_REMOTE_MIB_ERROR) && !mfw_configured))
+ return;
+
+ if (CHIP_IS_E3B0(bp))
+ bnx2x_dcbx_update_ets_config(bp);
+ else
+ bnx2x_dcbx_2cos_limit_update_ets_config(bp);
+}
+
+#ifdef BCM_DCBNL
+static int bnx2x_dcbx_read_shmem_remote_mib(struct bnx2x *bp)
+{
+ struct lldp_remote_mib remote_mib = {0};
+ u32 dcbx_remote_mib_offset = SHMEM2_RD(bp, dcbx_remote_mib_offset);
+ int rc;
+
+ DP(BNX2X_MSG_DCB, "dcbx_remote_mib_offset 0x%x\n",
+ dcbx_remote_mib_offset);
+
+ if (SHMEM_DCBX_REMOTE_MIB_NONE == dcbx_remote_mib_offset) {
+ BNX2X_ERR("FW doesn't support dcbx_remote_mib_offset\n");
+ return -EINVAL;
+ }
+
+ rc = bnx2x_dcbx_read_mib(bp, (u32 *)&remote_mib, dcbx_remote_mib_offset,
+ DCBX_READ_REMOTE_MIB);
+
+ if (rc) {
+ BNX2X_ERR("Failed to read remote mib from FW\n");
+ return rc;
+ }
+
+ /* save features and flags */
+ bp->dcbx_remote_feat = remote_mib.features;
+ bp->dcbx_remote_flags = remote_mib.flags;
+ return 0;
+}
+#endif
+
+static int bnx2x_dcbx_read_shmem_neg_results(struct bnx2x *bp)
+{
+ struct lldp_local_mib local_mib = {0};
+ u32 dcbx_neg_res_offset = SHMEM2_RD(bp, dcbx_neg_res_offset);
+ int rc;
+
+ DP(BNX2X_MSG_DCB, "dcbx_neg_res_offset 0x%x\n", dcbx_neg_res_offset);
+
+ if (SHMEM_DCBX_NEG_RES_NONE == dcbx_neg_res_offset) {
+ BNX2X_ERR("FW doesn't support dcbx_neg_res_offset\n");
+ return -EINVAL;
+ }
+
+ rc = bnx2x_dcbx_read_mib(bp, (u32 *)&local_mib, dcbx_neg_res_offset,
+ DCBX_READ_LOCAL_MIB);
+
+ if (rc) {
+ BNX2X_ERR("Failed to read local mib from FW\n");
+ return rc;
+ }
+
+ /* save features and error */
+ bp->dcbx_local_feat = local_mib.features;
+ bp->dcbx_error = local_mib.error;
+ return 0;
+}
+
+#ifdef BCM_DCBNL
+static inline
+u8 bnx2x_dcbx_dcbnl_app_up(struct dcbx_app_priority_entry *ent)
+{
+ u8 pri;
+
+ /* Choose the highest priority */
+ for (pri = MAX_PFC_PRIORITIES - 1; pri > 0; pri--)
+ if (ent->pri_bitmap & (1 << pri))
+ break;
+ return pri;
+}
+
+static inline
+u8 bnx2x_dcbx_dcbnl_app_idtype(struct dcbx_app_priority_entry *ent)
+{
+ return ((ent->appBitfield & DCBX_APP_ENTRY_SF_MASK) ==
+ DCBX_APP_SF_PORT) ? DCB_APP_IDTYPE_PORTNUM :
+ DCB_APP_IDTYPE_ETHTYPE;
+}
+
+int bnx2x_dcbnl_update_applist(struct bnx2x *bp, bool delall)
+{
+ int i, err = 0;
+
+ for (i = 0; i < DCBX_MAX_APP_PROTOCOL && err == 0; i++) {
+ struct dcbx_app_priority_entry *ent =
+ &bp->dcbx_local_feat.app.app_pri_tbl[i];
+
+ if (ent->appBitfield & DCBX_APP_ENTRY_VALID) {
+ u8 up = bnx2x_dcbx_dcbnl_app_up(ent);
+
+ /* avoid invalid user-priority */
+ if (up) {
+ struct dcb_app app;
+ app.selector = bnx2x_dcbx_dcbnl_app_idtype(ent);
+ app.protocol = ent->app_id;
+ app.priority = delall ? 0 : up;
+ err = dcb_setapp(bp->dev, &app);
+ }
+ }
+ }
+ return err;
+}
+#endif
+
+static inline void bnx2x_dcbx_update_tc_mapping(struct bnx2x *bp)
+{
+ u8 prio, cos;
+ for (cos = 0; cos < bp->dcbx_port_params.ets.num_of_cos; cos++) {
+ for (prio = 0; prio < BNX2X_MAX_PRIORITY; prio++) {
+ if (bp->dcbx_port_params.ets.cos_params[cos].pri_bitmask
+ & (1 << prio)) {
+ bp->prio_to_cos[prio] = cos;
+ DP(BNX2X_MSG_DCB,
+ "tx_mapping %d --> %d\n", prio, cos);
+ }
+ }
+ }
+
+ /* setup tc must be called under rtnl lock, but we can't take it here
+ * as we are handling an attention on a work queue which must be
+ * flushed at some rtnl-locked contexts (e.g. if down)
+ */
+ bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_SETUP_TC, 0);
+}
+
+void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state)
+{
+ switch (state) {
+ case BNX2X_DCBX_STATE_NEG_RECEIVED:
+ {
+ DP(BNX2X_MSG_DCB, "BNX2X_DCBX_STATE_NEG_RECEIVED\n");
+#ifdef BCM_DCBNL
+ /**
+ * Delete app tlvs from dcbnl before reading new
+ * negotiation results
+ */
+ bnx2x_dcbnl_update_applist(bp, true);
+
+ /* Read remote mib if dcbx is in the FW */
+ if (bnx2x_dcbx_read_shmem_remote_mib(bp))
+ return;
+#endif
+ /* Read neg results if dcbx is in the FW */
+ if (bnx2x_dcbx_read_shmem_neg_results(bp))
+ return;
+
+ bnx2x_dump_dcbx_drv_param(bp, &bp->dcbx_local_feat,
+ bp->dcbx_error);
+
+ bnx2x_get_dcbx_drv_param(bp, &bp->dcbx_local_feat,
+ bp->dcbx_error);
+
+ /* mark DCBX result for PMF migration */
+ bnx2x_update_drv_flags(bp,
+ 1 << DRV_FLAGS_DCB_CONFIGURED,
+ 1);
+#ifdef BCM_DCBNL
+ /*
+ * Add new app tlvs to dcbnl
+ */
+ bnx2x_dcbnl_update_applist(bp, false);
+#endif
+ /*
+ * reconfigure the netdevice with the results of the new
+ * dcbx negotiation.
+ */
+ bnx2x_dcbx_update_tc_mapping(bp);
+
+ /*
+ * allow other functions to update their netdevices
+ * accordingly
+ */
+ if (IS_MF(bp))
+ bnx2x_link_sync_notify(bp);
+
+ bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_TX_STOP, 0);
+ return;
+ }
+ case BNX2X_DCBX_STATE_TX_PAUSED:
+ DP(BNX2X_MSG_DCB, "BNX2X_DCBX_STATE_TX_PAUSED\n");
+ bnx2x_pfc_set_pfc(bp);
+
+ bnx2x_dcbx_update_ets_params(bp);
+
+ /* ets may affect cmng configuration: reinit it in hw */
+ bnx2x_set_local_cmng(bp);
+ return;
+ case BNX2X_DCBX_STATE_TX_RELEASED:
+ DP(BNX2X_MSG_DCB, "BNX2X_DCBX_STATE_TX_RELEASED\n");
+ bnx2x_fw_command(bp, DRV_MSG_CODE_DCBX_PMF_DRV_OK, 0);
+#ifdef BCM_DCBNL
+ /*
+ * Send a notification for the new negotiated parameters
+ */
+ dcbnl_cee_notify(bp->dev, RTM_GETDCB, DCB_CMD_CEE_GET, 0, 0);
+#endif
+ return;
+ default:
+ BNX2X_ERR("Unknown DCBX_STATE\n");
+ }
+}
+
+#define LLDP_ADMIN_MIB_OFFSET(bp) (PORT_MAX*sizeof(struct lldp_params) + \
+ BP_PORT(bp)*sizeof(struct lldp_admin_mib))
+
+static void bnx2x_dcbx_admin_mib_updated_params(struct bnx2x *bp,
+ u32 dcbx_lldp_params_offset)
+{
+ struct lldp_admin_mib admin_mib;
+ u32 i, other_traf_type = PREDEFINED_APP_IDX_MAX, traf_type = 0;
+ u32 offset = dcbx_lldp_params_offset + LLDP_ADMIN_MIB_OFFSET(bp);
+
+ /*shortcuts*/
+ struct dcbx_features *af = &admin_mib.features;
+ struct bnx2x_config_dcbx_params *dp = &bp->dcbx_config_params;
+
+ memset(&admin_mib, 0, sizeof(struct lldp_admin_mib));
+
+ /* Read the data first */
+ bnx2x_read_data(bp, (u32 *)&admin_mib, offset,
+ sizeof(struct lldp_admin_mib));
+
+ if (bp->dcbx_enabled == BNX2X_DCBX_ENABLED_ON_NEG_ON)
+ SET_FLAGS(admin_mib.ver_cfg_flags, DCBX_DCBX_ENABLED);
+ else
+ RESET_FLAGS(admin_mib.ver_cfg_flags, DCBX_DCBX_ENABLED);
+
+ if (dp->overwrite_settings == BNX2X_DCBX_OVERWRITE_SETTINGS_ENABLE) {
+
+ RESET_FLAGS(admin_mib.ver_cfg_flags, DCBX_CEE_VERSION_MASK);
+ admin_mib.ver_cfg_flags |=
+ (dp->admin_dcbx_version << DCBX_CEE_VERSION_SHIFT) &
+ DCBX_CEE_VERSION_MASK;
+
+ af->ets.enabled = (u8)dp->admin_ets_enable;
+
+ af->pfc.enabled = (u8)dp->admin_pfc_enable;
+
+ /* FOR IEEE dp->admin_tc_supported_tx_enable */
+ if (dp->admin_ets_configuration_tx_enable)
+ SET_FLAGS(admin_mib.ver_cfg_flags,
+ DCBX_ETS_CONFIG_TX_ENABLED);
+ else
+ RESET_FLAGS(admin_mib.ver_cfg_flags,
+ DCBX_ETS_CONFIG_TX_ENABLED);
+ /* For IEEE admin_ets_recommendation_tx_enable */
+ if (dp->admin_pfc_tx_enable)
+ SET_FLAGS(admin_mib.ver_cfg_flags,
+ DCBX_PFC_CONFIG_TX_ENABLED);
+ else
+ RESET_FLAGS(admin_mib.ver_cfg_flags,
+ DCBX_PFC_CONFIG_TX_ENABLED);
+
+ if (dp->admin_application_priority_tx_enable)
+ SET_FLAGS(admin_mib.ver_cfg_flags,
+ DCBX_APP_CONFIG_TX_ENABLED);
+ else
+ RESET_FLAGS(admin_mib.ver_cfg_flags,
+ DCBX_APP_CONFIG_TX_ENABLED);
+
+ if (dp->admin_ets_willing)
+ SET_FLAGS(admin_mib.ver_cfg_flags, DCBX_ETS_WILLING);
+ else
+ RESET_FLAGS(admin_mib.ver_cfg_flags, DCBX_ETS_WILLING);
+ /* For IEEE admin_ets_reco_valid */
+ if (dp->admin_pfc_willing)
+ SET_FLAGS(admin_mib.ver_cfg_flags, DCBX_PFC_WILLING);
+ else
+ RESET_FLAGS(admin_mib.ver_cfg_flags, DCBX_PFC_WILLING);
+
+ if (dp->admin_app_priority_willing)
+ SET_FLAGS(admin_mib.ver_cfg_flags, DCBX_APP_WILLING);
+ else
+ RESET_FLAGS(admin_mib.ver_cfg_flags, DCBX_APP_WILLING);
+
+ for (i = 0 ; i < DCBX_MAX_NUM_PG_BW_ENTRIES; i++) {
+ DCBX_PG_BW_SET(af->ets.pg_bw_tbl, i,
+ (u8)dp->admin_configuration_bw_precentage[i]);
+
+ DP(BNX2X_MSG_DCB, "pg_bw_tbl[%d] = %02x\n",
+ i, DCBX_PG_BW_GET(af->ets.pg_bw_tbl, i));
+ }
+
+ for (i = 0; i < DCBX_MAX_NUM_PRI_PG_ENTRIES; i++) {
+ DCBX_PRI_PG_SET(af->ets.pri_pg_tbl, i,
+ (u8)dp->admin_configuration_ets_pg[i]);
+
+ DP(BNX2X_MSG_DCB, "pri_pg_tbl[%d] = %02x\n",
+ i, DCBX_PRI_PG_GET(af->ets.pri_pg_tbl, i));
+ }
+
+ /*For IEEE admin_recommendation_bw_percentage
+ *For IEEE admin_recommendation_ets_pg */
+ af->pfc.pri_en_bitmap = (u8)dp->admin_pfc_bitmap;
+ for (i = 0; i < DCBX_CONFIG_MAX_APP_PROTOCOL; i++) {
+ if (dp->admin_priority_app_table[i].valid) {
+ struct bnx2x_admin_priority_app_table *table =
+ dp->admin_priority_app_table;
+ if ((ETH_TYPE_FCOE == table[i].app_id) &&
+ (TRAFFIC_TYPE_ETH == table[i].traffic_type))
+ traf_type = FCOE_APP_IDX;
+ else if ((TCP_PORT_ISCSI == table[i].app_id) &&
+ (TRAFFIC_TYPE_PORT == table[i].traffic_type))
+ traf_type = ISCSI_APP_IDX;
+ else
+ traf_type = other_traf_type++;
+
+ af->app.app_pri_tbl[traf_type].app_id =
+ table[i].app_id;
+
+ af->app.app_pri_tbl[traf_type].pri_bitmap =
+ (u8)(1 << table[i].priority);
+
+ af->app.app_pri_tbl[traf_type].appBitfield =
+ (DCBX_APP_ENTRY_VALID);
+
+ af->app.app_pri_tbl[traf_type].appBitfield |=
+ (TRAFFIC_TYPE_ETH == table[i].traffic_type) ?
+ DCBX_APP_SF_ETH_TYPE : DCBX_APP_SF_PORT;
+ }
+ }
+
+ af->app.default_pri = (u8)dp->admin_default_priority;
+ }
+
+ /* Write the data. */
+ bnx2x_write_data(bp, (u32 *)&admin_mib, offset,
+ sizeof(struct lldp_admin_mib));
+}
+
+void bnx2x_dcbx_set_state(struct bnx2x *bp, bool dcb_on, u32 dcbx_enabled)
+{
+ if (!CHIP_IS_E1x(bp)) {
+ bp->dcb_state = dcb_on;
+ bp->dcbx_enabled = dcbx_enabled;
+ } else {
+ bp->dcb_state = false;
+ bp->dcbx_enabled = BNX2X_DCBX_ENABLED_INVALID;
+ }
+ DP(BNX2X_MSG_DCB, "DCB state [%s:%s]\n",
+ dcb_on ? "ON" : "OFF",
+ dcbx_enabled == BNX2X_DCBX_ENABLED_OFF ? "user-mode" :
+ dcbx_enabled == BNX2X_DCBX_ENABLED_ON_NEG_OFF ? "on-chip static" :
+ dcbx_enabled == BNX2X_DCBX_ENABLED_ON_NEG_ON ?
+ "on-chip with negotiation" : "invalid");
+}
+
+void bnx2x_dcbx_init_params(struct bnx2x *bp)
+{
+ bp->dcbx_config_params.admin_dcbx_version = 0x0; /* 0 - CEE; 1 - IEEE */
+ bp->dcbx_config_params.admin_ets_willing = 1;
+ bp->dcbx_config_params.admin_pfc_willing = 1;
+ bp->dcbx_config_params.overwrite_settings = 1;
+ bp->dcbx_config_params.admin_ets_enable = 1;
+ bp->dcbx_config_params.admin_pfc_enable = 1;
+ bp->dcbx_config_params.admin_tc_supported_tx_enable = 1;
+ bp->dcbx_config_params.admin_ets_configuration_tx_enable = 1;
+ bp->dcbx_config_params.admin_pfc_tx_enable = 1;
+ bp->dcbx_config_params.admin_application_priority_tx_enable = 1;
+ bp->dcbx_config_params.admin_ets_reco_valid = 1;
+ bp->dcbx_config_params.admin_app_priority_willing = 1;
+ bp->dcbx_config_params.admin_configuration_bw_precentage[0] = 100;
+ bp->dcbx_config_params.admin_configuration_bw_precentage[1] = 0;
+ bp->dcbx_config_params.admin_configuration_bw_precentage[2] = 0;
+ bp->dcbx_config_params.admin_configuration_bw_precentage[3] = 0;
+ bp->dcbx_config_params.admin_configuration_bw_precentage[4] = 0;
+ bp->dcbx_config_params.admin_configuration_bw_precentage[5] = 0;
+ bp->dcbx_config_params.admin_configuration_bw_precentage[6] = 0;
+ bp->dcbx_config_params.admin_configuration_bw_precentage[7] = 0;
+ bp->dcbx_config_params.admin_configuration_ets_pg[0] = 0;
+ bp->dcbx_config_params.admin_configuration_ets_pg[1] = 0;
+ bp->dcbx_config_params.admin_configuration_ets_pg[2] = 0;
+ bp->dcbx_config_params.admin_configuration_ets_pg[3] = 0;
+ bp->dcbx_config_params.admin_configuration_ets_pg[4] = 0;
+ bp->dcbx_config_params.admin_configuration_ets_pg[5] = 0;
+ bp->dcbx_config_params.admin_configuration_ets_pg[6] = 0;
+ bp->dcbx_config_params.admin_configuration_ets_pg[7] = 0;
+ bp->dcbx_config_params.admin_recommendation_bw_precentage[0] = 100;
+ bp->dcbx_config_params.admin_recommendation_bw_precentage[1] = 0;
+ bp->dcbx_config_params.admin_recommendation_bw_precentage[2] = 0;
+ bp->dcbx_config_params.admin_recommendation_bw_precentage[3] = 0;
+ bp->dcbx_config_params.admin_recommendation_bw_precentage[4] = 0;
+ bp->dcbx_config_params.admin_recommendation_bw_precentage[5] = 0;
+ bp->dcbx_config_params.admin_recommendation_bw_precentage[6] = 0;
+ bp->dcbx_config_params.admin_recommendation_bw_precentage[7] = 0;
+ bp->dcbx_config_params.admin_recommendation_ets_pg[0] = 0;
+ bp->dcbx_config_params.admin_recommendation_ets_pg[1] = 1;
+ bp->dcbx_config_params.admin_recommendation_ets_pg[2] = 2;
+ bp->dcbx_config_params.admin_recommendation_ets_pg[3] = 3;
+ bp->dcbx_config_params.admin_recommendation_ets_pg[4] = 4;
+ bp->dcbx_config_params.admin_recommendation_ets_pg[5] = 5;
+ bp->dcbx_config_params.admin_recommendation_ets_pg[6] = 6;
+ bp->dcbx_config_params.admin_recommendation_ets_pg[7] = 7;
+ bp->dcbx_config_params.admin_pfc_bitmap = 0x0;
+ bp->dcbx_config_params.admin_priority_app_table[0].valid = 0;
+ bp->dcbx_config_params.admin_priority_app_table[1].valid = 0;
+ bp->dcbx_config_params.admin_priority_app_table[2].valid = 0;
+ bp->dcbx_config_params.admin_priority_app_table[3].valid = 0;
+ bp->dcbx_config_params.admin_default_priority = 0;
+}
+
+void bnx2x_dcbx_init(struct bnx2x *bp, bool update_shmem)
+{
+ u32 dcbx_lldp_params_offset = SHMEM_LLDP_DCBX_PARAMS_NONE;
+
+ /* only PMF can send ADMIN msg to MFW in old MFW versions */
+ if ((!bp->port.pmf) && (!(bp->flags & BC_SUPPORTS_DCBX_MSG_NON_PMF)))
+ return;
+
+ if (bp->dcbx_enabled <= 0)
+ return;
+
+ /* validate:
+ * chip of good for dcbx version,
+ * dcb is wanted
+ * shmem2 contains DCBX support fields
+ */
+ DP(BNX2X_MSG_DCB, "dcb_state %d bp->port.pmf %d\n",
+ bp->dcb_state, bp->port.pmf);
+
+ if (bp->dcb_state == BNX2X_DCB_STATE_ON &&
+ SHMEM2_HAS(bp, dcbx_lldp_params_offset)) {
+ dcbx_lldp_params_offset =
+ SHMEM2_RD(bp, dcbx_lldp_params_offset);
+
+ DP(BNX2X_MSG_DCB, "dcbx_lldp_params_offset 0x%x\n",
+ dcbx_lldp_params_offset);
+
+ bnx2x_update_drv_flags(bp, 1 << DRV_FLAGS_DCB_CONFIGURED, 0);
+
+ if (SHMEM_LLDP_DCBX_PARAMS_NONE != dcbx_lldp_params_offset) {
+ /* need HW lock to avoid scenario of two drivers
+ * writing in parallel to shmem
+ */
+ bnx2x_acquire_hw_lock(bp,
+ HW_LOCK_RESOURCE_DCBX_ADMIN_MIB);
+ if (update_shmem)
+ bnx2x_dcbx_admin_mib_updated_params(bp,
+ dcbx_lldp_params_offset);
+
+ /* Let HW start negotiation */
+ bnx2x_fw_command(bp,
+ DRV_MSG_CODE_DCBX_ADMIN_PMF_MSG, 0);
+ /* release HW lock only after MFW acks that it finished
+ * reading values from shmem
+ */
+ bnx2x_release_hw_lock(bp,
+ HW_LOCK_RESOURCE_DCBX_ADMIN_MIB);
+ }
+ }
+}
+static void
+bnx2x_dcbx_print_cos_params(struct bnx2x *bp,
+ struct bnx2x_func_tx_start_params *pfc_fw_cfg)
+{
+ u8 pri = 0;
+ u8 cos = 0;
+
+ DP(BNX2X_MSG_DCB,
+ "pfc_fw_cfg->dcb_version %x\n", pfc_fw_cfg->dcb_version);
+ DP(BNX2X_MSG_DCB,
+ "pdev->params.dcbx_port_params.pfc.priority_non_pauseable_mask %x\n",
+ bp->dcbx_port_params.pfc.priority_non_pauseable_mask);
+
+ for (cos = 0 ; cos < bp->dcbx_port_params.ets.num_of_cos ; cos++) {
+ DP(BNX2X_MSG_DCB,
+ "pdev->params.dcbx_port_params.ets.cos_params[%d].pri_bitmask %x\n",
+ cos, bp->dcbx_port_params.ets.cos_params[cos].pri_bitmask);
+
+ DP(BNX2X_MSG_DCB,
+ "pdev->params.dcbx_port_params.ets.cos_params[%d].bw_tbl %x\n",
+ cos, bp->dcbx_port_params.ets.cos_params[cos].bw_tbl);
+
+ DP(BNX2X_MSG_DCB,
+ "pdev->params.dcbx_port_params.ets.cos_params[%d].strict %x\n",
+ cos, bp->dcbx_port_params.ets.cos_params[cos].strict);
+
+ DP(BNX2X_MSG_DCB,
+ "pdev->params.dcbx_port_params.ets.cos_params[%d].pauseable %x\n",
+ cos, bp->dcbx_port_params.ets.cos_params[cos].pauseable);
+ }
+
+ for (pri = 0; pri < LLFC_DRIVER_TRAFFIC_TYPE_MAX; pri++) {
+ DP(BNX2X_MSG_DCB,
+ "pfc_fw_cfg->traffic_type_to_priority_cos[%d].priority %x\n",
+ pri, pfc_fw_cfg->traffic_type_to_priority_cos[pri].priority);
+
+ DP(BNX2X_MSG_DCB,
+ "pfc_fw_cfg->traffic_type_to_priority_cos[%d].cos %x\n",
+ pri, pfc_fw_cfg->traffic_type_to_priority_cos[pri].cos);
+ }
+}
+
+/* fills help_data according to pg_info */
+static void bnx2x_dcbx_get_num_pg_traf_type(struct bnx2x *bp,
+ u32 *pg_pri_orginal_spread,
+ struct pg_help_data *help_data)
+{
+ bool pg_found = false;
+ u32 i, traf_type, add_traf_type, add_pg;
+ u32 *ttp = bp->dcbx_port_params.app.traffic_type_priority;
+ struct pg_entry_help_data *data = help_data->data; /*shortcut*/
+
+ /* Set to invalid */
+ for (i = 0; i < LLFC_DRIVER_TRAFFIC_TYPE_MAX; i++)
+ data[i].pg = DCBX_ILLEGAL_PG;
+
+ for (add_traf_type = 0;
+ add_traf_type < LLFC_DRIVER_TRAFFIC_TYPE_MAX; add_traf_type++) {
+ pg_found = false;
+ if (ttp[add_traf_type] < MAX_PFC_PRIORITIES) {
+ add_pg = (u8)pg_pri_orginal_spread[ttp[add_traf_type]];
+ for (traf_type = 0;
+ traf_type < LLFC_DRIVER_TRAFFIC_TYPE_MAX;
+ traf_type++) {
+ if (data[traf_type].pg == add_pg) {
+ if (!(data[traf_type].pg_priority &
+ (1 << ttp[add_traf_type])))
+ data[traf_type].
+ num_of_dif_pri++;
+ data[traf_type].pg_priority |=
+ (1 << ttp[add_traf_type]);
+ pg_found = true;
+ break;
+ }
+ }
+ if (!pg_found) {
+ data[help_data->num_of_pg].pg = add_pg;
+ data[help_data->num_of_pg].pg_priority =
+ (1 << ttp[add_traf_type]);
+ data[help_data->num_of_pg].num_of_dif_pri = 1;
+ help_data->num_of_pg++;
+ }
+ }
+ DP(BNX2X_MSG_DCB,
+ "add_traf_type %d pg_found %s num_of_pg %d\n",
+ add_traf_type, !pg_found ? "NO" : "YES",
+ help_data->num_of_pg);
+ }
+}
+
+static void bnx2x_dcbx_ets_disabled_entry_data(struct bnx2x *bp,
+ struct cos_help_data *cos_data,
+ u32 pri_join_mask)
+{
+ /* Only one priority than only one COS */
+ cos_data->data[0].pausable =
+ IS_DCBX_PFC_PRI_ONLY_PAUSE(bp, pri_join_mask);
+ cos_data->data[0].pri_join_mask = pri_join_mask;
+ cos_data->data[0].cos_bw = 100;
+ cos_data->num_of_cos = 1;
+}
+
+static inline void bnx2x_dcbx_add_to_cos_bw(struct bnx2x *bp,
+ struct cos_entry_help_data *data,
+ u8 pg_bw)
+{
+ if (data->cos_bw == DCBX_INVALID_COS_BW)
+ data->cos_bw = pg_bw;
+ else
+ data->cos_bw += pg_bw;
+}
+
+static void bnx2x_dcbx_separate_pauseable_from_non(struct bnx2x *bp,
+ struct cos_help_data *cos_data,
+ u32 *pg_pri_orginal_spread,
+ struct dcbx_ets_feature *ets)
+{
+ u32 pri_tested = 0;
+ u8 i = 0;
+ u8 entry = 0;
+ u8 pg_entry = 0;
+ u8 num_of_pri = LLFC_DRIVER_TRAFFIC_TYPE_MAX;
+
+ cos_data->data[0].pausable = true;
+ cos_data->data[1].pausable = false;
+ cos_data->data[0].pri_join_mask = cos_data->data[1].pri_join_mask = 0;
+
+ for (i = 0 ; i < num_of_pri ; i++) {
+ pri_tested = 1 << bp->dcbx_port_params.
+ app.traffic_type_priority[i];
+
+ if (pri_tested & DCBX_PFC_PRI_NON_PAUSE_MASK(bp)) {
+ cos_data->data[1].pri_join_mask |= pri_tested;
+ entry = 1;
+ } else {
+ cos_data->data[0].pri_join_mask |= pri_tested;
+ entry = 0;
+ }
+ pg_entry = (u8)pg_pri_orginal_spread[bp->dcbx_port_params.
+ app.traffic_type_priority[i]];
+ /* There can be only one strict pg */
+ if (pg_entry < DCBX_MAX_NUM_PG_BW_ENTRIES)
+ bnx2x_dcbx_add_to_cos_bw(bp, &cos_data->data[entry],
+ DCBX_PG_BW_GET(ets->pg_bw_tbl, pg_entry));
+ else
+ /* If we join a group and one is strict
+ * than the bw rules
+ */
+ cos_data->data[entry].strict =
+ BNX2X_DCBX_STRICT_COS_HIGHEST;
+ }
+ if ((0 == cos_data->data[0].pri_join_mask) &&
+ (0 == cos_data->data[1].pri_join_mask))
+ BNX2X_ERR("dcbx error: Both groups must have priorities\n");
+}
+
+#ifndef POWER_OF_2
+#define POWER_OF_2(x) ((0 != x) && (0 == (x & (x-1))))
+#endif
+
+static void bnx2x_dcbx_2cos_limit_cee_single_pg_to_cos_params(struct bnx2x *bp,
+ struct pg_help_data *pg_help_data,
+ struct cos_help_data *cos_data,
+ u32 pri_join_mask,
+ u8 num_of_dif_pri)
+{
+ u8 i = 0;
+ u32 pri_tested = 0;
+ u32 pri_mask_without_pri = 0;
+ u32 *ttp = bp->dcbx_port_params.app.traffic_type_priority;
+ /*debug*/
+ if (num_of_dif_pri == 1) {
+ bnx2x_dcbx_ets_disabled_entry_data(bp, cos_data, pri_join_mask);
+ return;
+ }
+ /* single priority group */
+ if (pg_help_data->data[0].pg < DCBX_MAX_NUM_PG_BW_ENTRIES) {
+ /* If there are both pauseable and non-pauseable priorities,
+ * the pauseable priorities go to the first queue and
+ * the non-pauseable priorities go to the second queue.
+ */
+ if (IS_DCBX_PFC_PRI_MIX_PAUSE(bp, pri_join_mask)) {
+ /* Pauseable */
+ cos_data->data[0].pausable = true;
+ /* Non pauseable.*/
+ cos_data->data[1].pausable = false;
+
+ if (2 == num_of_dif_pri) {
+ cos_data->data[0].cos_bw = 50;
+ cos_data->data[1].cos_bw = 50;
+ }
+
+ if (3 == num_of_dif_pri) {
+ if (POWER_OF_2(DCBX_PFC_PRI_GET_PAUSE(bp,
+ pri_join_mask))) {
+ cos_data->data[0].cos_bw = 33;
+ cos_data->data[1].cos_bw = 67;
+ } else {
+ cos_data->data[0].cos_bw = 67;
+ cos_data->data[1].cos_bw = 33;
+ }
+ }
+
+ } else if (IS_DCBX_PFC_PRI_ONLY_PAUSE(bp, pri_join_mask)) {
+ /* If there are only pauseable priorities,
+ * then one/two priorities go to the first queue
+ * and one priority goes to the second queue.
+ */
+ if (2 == num_of_dif_pri) {
+ cos_data->data[0].cos_bw = 50;
+ cos_data->data[1].cos_bw = 50;
+ } else {
+ cos_data->data[0].cos_bw = 67;
+ cos_data->data[1].cos_bw = 33;
+ }
+ cos_data->data[1].pausable = true;
+ cos_data->data[0].pausable = true;
+ /* All priorities except FCOE */
+ cos_data->data[0].pri_join_mask = (pri_join_mask &
+ ((u8)~(1 << ttp[LLFC_TRAFFIC_TYPE_FCOE])));
+ /* Only FCOE priority.*/
+ cos_data->data[1].pri_join_mask =
+ (1 << ttp[LLFC_TRAFFIC_TYPE_FCOE]);
+ } else
+ /* If there are only non-pauseable priorities,
+ * they will all go to the same queue.
+ */
+ bnx2x_dcbx_ets_disabled_entry_data(bp,
+ cos_data, pri_join_mask);
+ } else {
+ /* priority group which is not BW limited (PG#15):*/
+ if (IS_DCBX_PFC_PRI_MIX_PAUSE(bp, pri_join_mask)) {
+ /* If there are both pauseable and non-pauseable
+ * priorities, the pauseable priorities go to the first
+ * queue and the non-pauseable priorities
+ * go to the second queue.
+ */
+ if (DCBX_PFC_PRI_GET_PAUSE(bp, pri_join_mask) >
+ DCBX_PFC_PRI_GET_NON_PAUSE(bp, pri_join_mask)) {
+ cos_data->data[0].strict =
+ BNX2X_DCBX_STRICT_COS_HIGHEST;
+ cos_data->data[1].strict =
+ BNX2X_DCBX_STRICT_COS_NEXT_LOWER_PRI(
+ BNX2X_DCBX_STRICT_COS_HIGHEST);
+ } else {
+ cos_data->data[0].strict =
+ BNX2X_DCBX_STRICT_COS_NEXT_LOWER_PRI(
+ BNX2X_DCBX_STRICT_COS_HIGHEST);
+ cos_data->data[1].strict =
+ BNX2X_DCBX_STRICT_COS_HIGHEST;
+ }
+ /* Pauseable */
+ cos_data->data[0].pausable = true;
+ /* Non pause-able.*/
+ cos_data->data[1].pausable = false;
+ } else {
+ /* If there are only pauseable priorities or
+ * only non-pauseable,* the lower priorities go
+ * to the first queue and the higher priorities go
+ * to the second queue.
+ */
+ cos_data->data[0].pausable =
+ cos_data->data[1].pausable =
+ IS_DCBX_PFC_PRI_ONLY_PAUSE(bp, pri_join_mask);
+
+ for (i = 0 ; i < LLFC_DRIVER_TRAFFIC_TYPE_MAX; i++) {
+ pri_tested = 1 << bp->dcbx_port_params.
+ app.traffic_type_priority[i];
+ /* Remove priority tested */
+ pri_mask_without_pri =
+ (pri_join_mask & ((u8)(~pri_tested)));
+ if (pri_mask_without_pri < pri_tested)
+ break;
+ }
+
+ if (i == LLFC_DRIVER_TRAFFIC_TYPE_MAX)
+ BNX2X_ERR("Invalid value for pri_join_mask - could not find a priority\n");
+
+ cos_data->data[0].pri_join_mask = pri_mask_without_pri;
+ cos_data->data[1].pri_join_mask = pri_tested;
+ /* Both queues are strict priority,
+ * and that with the highest priority
+ * gets the highest strict priority in the arbiter.
+ */
+ cos_data->data[0].strict =
+ BNX2X_DCBX_STRICT_COS_NEXT_LOWER_PRI(
+ BNX2X_DCBX_STRICT_COS_HIGHEST);
+ cos_data->data[1].strict =
+ BNX2X_DCBX_STRICT_COS_HIGHEST;
+ }
+ }
+}
+
+static void bnx2x_dcbx_2cos_limit_cee_two_pg_to_cos_params(
+ struct bnx2x *bp,
+ struct pg_help_data *pg_help_data,
+ struct dcbx_ets_feature *ets,
+ struct cos_help_data *cos_data,
+ u32 *pg_pri_orginal_spread,
+ u32 pri_join_mask,
+ u8 num_of_dif_pri)
+{
+ u8 i = 0;
+ u8 pg[DCBX_COS_MAX_NUM_E2] = { 0 };
+
+ /* If there are both pauseable and non-pauseable priorities,
+ * the pauseable priorities go to the first queue and
+ * the non-pauseable priorities go to the second queue.
+ */
+ if (IS_DCBX_PFC_PRI_MIX_PAUSE(bp, pri_join_mask)) {
+ if (IS_DCBX_PFC_PRI_MIX_PAUSE(bp,
+ pg_help_data->data[0].pg_priority) ||
+ IS_DCBX_PFC_PRI_MIX_PAUSE(bp,
+ pg_help_data->data[1].pg_priority)) {
+ /* If one PG contains both pauseable and
+ * non-pauseable priorities then ETS is disabled.
+ */
+ bnx2x_dcbx_separate_pauseable_from_non(bp, cos_data,
+ pg_pri_orginal_spread, ets);
+ bp->dcbx_port_params.ets.enabled = false;
+ return;
+ }
+
+ /* Pauseable */
+ cos_data->data[0].pausable = true;
+ /* Non pauseable. */
+ cos_data->data[1].pausable = false;
+ if (IS_DCBX_PFC_PRI_ONLY_PAUSE(bp,
+ pg_help_data->data[0].pg_priority)) {
+ /* 0 is pauseable */
+ cos_data->data[0].pri_join_mask =
+ pg_help_data->data[0].pg_priority;
+ pg[0] = pg_help_data->data[0].pg;
+ cos_data->data[1].pri_join_mask =
+ pg_help_data->data[1].pg_priority;
+ pg[1] = pg_help_data->data[1].pg;
+ } else {/* 1 is pauseable */
+ cos_data->data[0].pri_join_mask =
+ pg_help_data->data[1].pg_priority;
+ pg[0] = pg_help_data->data[1].pg;
+ cos_data->data[1].pri_join_mask =
+ pg_help_data->data[0].pg_priority;
+ pg[1] = pg_help_data->data[0].pg;
+ }
+ } else {
+ /* If there are only pauseable priorities or
+ * only non-pauseable, each PG goes to a queue.
+ */
+ cos_data->data[0].pausable = cos_data->data[1].pausable =
+ IS_DCBX_PFC_PRI_ONLY_PAUSE(bp, pri_join_mask);
+ cos_data->data[0].pri_join_mask =
+ pg_help_data->data[0].pg_priority;
+ pg[0] = pg_help_data->data[0].pg;
+ cos_data->data[1].pri_join_mask =
+ pg_help_data->data[1].pg_priority;
+ pg[1] = pg_help_data->data[1].pg;
+ }
+
+ /* There can be only one strict pg */
+ for (i = 0 ; i < ARRAY_SIZE(pg); i++) {
+ if (pg[i] < DCBX_MAX_NUM_PG_BW_ENTRIES)
+ cos_data->data[i].cos_bw =
+ DCBX_PG_BW_GET(ets->pg_bw_tbl, pg[i]);
+ else
+ cos_data->data[i].strict =
+ BNX2X_DCBX_STRICT_COS_HIGHEST;
+ }
+}
+
+static int bnx2x_dcbx_join_pgs(
+ struct bnx2x *bp,
+ struct dcbx_ets_feature *ets,
+ struct pg_help_data *pg_help_data,
+ u8 required_num_of_pg)
+{
+ u8 entry_joined = pg_help_data->num_of_pg - 1;
+ u8 entry_removed = entry_joined + 1;
+ u8 pg_joined = 0;
+
+ if (required_num_of_pg == 0 || ARRAY_SIZE(pg_help_data->data)
+ <= pg_help_data->num_of_pg) {
+
+ BNX2X_ERR("required_num_of_pg can't be zero\n");
+ return -EINVAL;
+ }
+
+ while (required_num_of_pg < pg_help_data->num_of_pg) {
+ entry_joined = pg_help_data->num_of_pg - 2;
+ entry_removed = entry_joined + 1;
+ /* protect index */
+ entry_removed %= ARRAY_SIZE(pg_help_data->data);
+
+ pg_help_data->data[entry_joined].pg_priority |=
+ pg_help_data->data[entry_removed].pg_priority;
+
+ pg_help_data->data[entry_joined].num_of_dif_pri +=
+ pg_help_data->data[entry_removed].num_of_dif_pri;
+
+ if (pg_help_data->data[entry_joined].pg == DCBX_STRICT_PRI_PG ||
+ pg_help_data->data[entry_removed].pg == DCBX_STRICT_PRI_PG)
+ /* Entries joined strict priority rules */
+ pg_help_data->data[entry_joined].pg =
+ DCBX_STRICT_PRI_PG;
+ else {
+ /* Entries can be joined join BW */
+ pg_joined = DCBX_PG_BW_GET(ets->pg_bw_tbl,
+ pg_help_data->data[entry_joined].pg) +
+ DCBX_PG_BW_GET(ets->pg_bw_tbl,
+ pg_help_data->data[entry_removed].pg);
+
+ DCBX_PG_BW_SET(ets->pg_bw_tbl,
+ pg_help_data->data[entry_joined].pg, pg_joined);
+ }
+ /* Joined the entries */
+ pg_help_data->num_of_pg--;
+ }
+
+ return 0;
+}
+
+static void bnx2x_dcbx_2cos_limit_cee_three_pg_to_cos_params(
+ struct bnx2x *bp,
+ struct pg_help_data *pg_help_data,
+ struct dcbx_ets_feature *ets,
+ struct cos_help_data *cos_data,
+ u32 *pg_pri_orginal_spread,
+ u32 pri_join_mask,
+ u8 num_of_dif_pri)
+{
+ u8 i = 0;
+ u32 pri_tested = 0;
+ u8 entry = 0;
+ u8 pg_entry = 0;
+ bool b_found_strict = false;
+ u8 num_of_pri = LLFC_DRIVER_TRAFFIC_TYPE_MAX;
+
+ cos_data->data[0].pri_join_mask = cos_data->data[1].pri_join_mask = 0;
+ /* If there are both pauseable and non-pauseable priorities,
+ * the pauseable priorities go to the first queue and the
+ * non-pauseable priorities go to the second queue.
+ */
+ if (IS_DCBX_PFC_PRI_MIX_PAUSE(bp, pri_join_mask))
+ bnx2x_dcbx_separate_pauseable_from_non(bp,
+ cos_data, pg_pri_orginal_spread, ets);
+ else {
+ /* If two BW-limited PG-s were combined to one queue,
+ * the BW is their sum.
+ *
+ * If there are only pauseable priorities or only non-pauseable,
+ * and there are both BW-limited and non-BW-limited PG-s,
+ * the BW-limited PG/s go to one queue and the non-BW-limited
+ * PG/s go to the second queue.
+ *
+ * If there are only pauseable priorities or only non-pauseable
+ * and all are BW limited, then two priorities go to the first
+ * queue and one priority goes to the second queue.
+ *
+ * We will join this two cases:
+ * if one is BW limited it will go to the second queue
+ * otherwise the last priority will get it
+ */
+
+ cos_data->data[0].pausable = cos_data->data[1].pausable =
+ IS_DCBX_PFC_PRI_ONLY_PAUSE(bp, pri_join_mask);
+
+ for (i = 0 ; i < num_of_pri; i++) {
+ pri_tested = 1 << bp->dcbx_port_params.
+ app.traffic_type_priority[i];
+ pg_entry = (u8)pg_pri_orginal_spread[bp->
+ dcbx_port_params.app.traffic_type_priority[i]];
+
+ if (pg_entry < DCBX_MAX_NUM_PG_BW_ENTRIES) {
+ entry = 0;
+
+ if (i == (num_of_pri-1) && !b_found_strict)
+ /* last entry will be handled separately
+ * If no priority is strict than last
+ * entry goes to last queue.
+ */
+ entry = 1;
+ cos_data->data[entry].pri_join_mask |=
+ pri_tested;
+ bnx2x_dcbx_add_to_cos_bw(bp,
+ &cos_data->data[entry],
+ DCBX_PG_BW_GET(ets->pg_bw_tbl,
+ pg_entry));
+ } else {
+ b_found_strict = true;
+ cos_data->data[1].pri_join_mask |= pri_tested;
+ /* If we join a group and one is strict
+ * than the bw rules
+ */
+ cos_data->data[1].strict =
+ BNX2X_DCBX_STRICT_COS_HIGHEST;
+ }
+ }
+ }
+}
+
+static void bnx2x_dcbx_2cos_limit_cee_fill_cos_params(struct bnx2x *bp,
+ struct pg_help_data *help_data,
+ struct dcbx_ets_feature *ets,
+ struct cos_help_data *cos_data,
+ u32 *pg_pri_orginal_spread,
+ u32 pri_join_mask,
+ u8 num_of_dif_pri)
+{
+ /* default E2 settings */
+ cos_data->num_of_cos = DCBX_COS_MAX_NUM_E2;
+
+ switch (help_data->num_of_pg) {
+ case 1:
+ bnx2x_dcbx_2cos_limit_cee_single_pg_to_cos_params(
+ bp,
+ help_data,
+ cos_data,
+ pri_join_mask,
+ num_of_dif_pri);
+ break;
+ case 2:
+ bnx2x_dcbx_2cos_limit_cee_two_pg_to_cos_params(
+ bp,
+ help_data,
+ ets,
+ cos_data,
+ pg_pri_orginal_spread,
+ pri_join_mask,
+ num_of_dif_pri);
+ break;
+
+ case 3:
+ bnx2x_dcbx_2cos_limit_cee_three_pg_to_cos_params(
+ bp,
+ help_data,
+ ets,
+ cos_data,
+ pg_pri_orginal_spread,
+ pri_join_mask,
+ num_of_dif_pri);
+ break;
+ default:
+ BNX2X_ERR("Wrong pg_help_data.num_of_pg\n");
+ bnx2x_dcbx_ets_disabled_entry_data(bp,
+ cos_data, pri_join_mask);
+ }
+}
+
+static int bnx2x_dcbx_spread_strict_pri(struct bnx2x *bp,
+ struct cos_help_data *cos_data,
+ u8 entry,
+ u8 num_spread_of_entries,
+ u8 strict_app_pris)
+{
+ u8 strict_pri = BNX2X_DCBX_STRICT_COS_HIGHEST;
+ u8 num_of_app_pri = MAX_PFC_PRIORITIES;
+ u8 app_pri_bit = 0;
+
+ while (num_spread_of_entries && num_of_app_pri > 0) {
+ app_pri_bit = 1 << (num_of_app_pri - 1);
+ if (app_pri_bit & strict_app_pris) {
+ struct cos_entry_help_data *data = &cos_data->
+ data[entry];
+ num_spread_of_entries--;
+ if (num_spread_of_entries == 0) {
+ /* last entry needed put all the entries left */
+ data->cos_bw = DCBX_INVALID_COS_BW;
+ data->strict = strict_pri;
+ data->pri_join_mask = strict_app_pris;
+ data->pausable = DCBX_IS_PFC_PRI_SOME_PAUSE(bp,
+ data->pri_join_mask);
+ } else {
+ strict_app_pris &= ~app_pri_bit;
+
+ data->cos_bw = DCBX_INVALID_COS_BW;
+ data->strict = strict_pri;
+ data->pri_join_mask = app_pri_bit;
+ data->pausable = DCBX_IS_PFC_PRI_SOME_PAUSE(bp,
+ data->pri_join_mask);
+ }
+
+ strict_pri =
+ BNX2X_DCBX_STRICT_COS_NEXT_LOWER_PRI(strict_pri);
+ entry++;
+ }
+
+ num_of_app_pri--;
+ }
+
+ if (num_spread_of_entries) {
+ BNX2X_ERR("Didn't succeed to spread strict priorities\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static u8 bnx2x_dcbx_cee_fill_strict_pri(struct bnx2x *bp,
+ struct cos_help_data *cos_data,
+ u8 entry,
+ u8 num_spread_of_entries,
+ u8 strict_app_pris)
+{
+ if (bnx2x_dcbx_spread_strict_pri(bp, cos_data, entry,
+ num_spread_of_entries,
+ strict_app_pris)) {
+ struct cos_entry_help_data *data = &cos_data->
+ data[entry];
+ /* Fill BW entry */
+ data->cos_bw = DCBX_INVALID_COS_BW;
+ data->strict = BNX2X_DCBX_STRICT_COS_HIGHEST;
+ data->pri_join_mask = strict_app_pris;
+ data->pausable = DCBX_IS_PFC_PRI_SOME_PAUSE(bp,
+ data->pri_join_mask);
+ return 1;
+ }
+
+ return num_spread_of_entries;
+}
+
+static void bnx2x_dcbx_cee_fill_cos_params(struct bnx2x *bp,
+ struct pg_help_data *help_data,
+ struct dcbx_ets_feature *ets,
+ struct cos_help_data *cos_data,
+ u32 pri_join_mask)
+
+{
+ u8 need_num_of_entries = 0;
+ u8 i = 0;
+ u8 entry = 0;
+
+ /*
+ * if the number of requested PG-s in CEE is greater than 3
+ * then the results are not determined since this is a violation
+ * of the standard.
+ */
+ if (help_data->num_of_pg > DCBX_COS_MAX_NUM_E3B0) {
+ if (bnx2x_dcbx_join_pgs(bp, ets, help_data,
+ DCBX_COS_MAX_NUM_E3B0)) {
+ BNX2X_ERR("Unable to reduce the number of PGs - we will disables ETS\n");
+ bnx2x_dcbx_ets_disabled_entry_data(bp, cos_data,
+ pri_join_mask);
+ return;
+ }
+ }
+
+ for (i = 0 ; i < help_data->num_of_pg; i++) {
+ struct pg_entry_help_data *pg = &help_data->data[i];
+ if (pg->pg < DCBX_MAX_NUM_PG_BW_ENTRIES) {
+ struct cos_entry_help_data *data = &cos_data->
+ data[entry];
+ /* Fill BW entry */
+ data->cos_bw = DCBX_PG_BW_GET(ets->pg_bw_tbl, pg->pg);
+ data->strict = BNX2X_DCBX_STRICT_INVALID;
+ data->pri_join_mask = pg->pg_priority;
+ data->pausable = DCBX_IS_PFC_PRI_SOME_PAUSE(bp,
+ data->pri_join_mask);
+
+ entry++;
+ } else {
+ need_num_of_entries = min_t(u8,
+ (u8)pg->num_of_dif_pri,
+ (u8)DCBX_COS_MAX_NUM_E3B0 -
+ help_data->num_of_pg + 1);
+ /*
+ * If there are still VOQ-s which have no associated PG,
+ * then associate these VOQ-s to PG15. These PG-s will
+ * be used for SP between priorities on PG15.
+ */
+ entry += bnx2x_dcbx_cee_fill_strict_pri(bp, cos_data,
+ entry, need_num_of_entries, pg->pg_priority);
+ }
+ }
+
+ /* the entry will represent the number of COSes used */
+ cos_data->num_of_cos = entry;
+}
+static void bnx2x_dcbx_fill_cos_params(struct bnx2x *bp,
+ struct pg_help_data *help_data,
+ struct dcbx_ets_feature *ets,
+ u32 *pg_pri_orginal_spread)
+{
+ struct cos_help_data cos_data;
+ u8 i = 0;
+ u32 pri_join_mask = 0;
+ u8 num_of_dif_pri = 0;
+
+ memset(&cos_data, 0, sizeof(cos_data));
+
+ /* Validate the pg value */
+ for (i = 0; i < help_data->num_of_pg ; i++) {
+ if (DCBX_STRICT_PRIORITY != help_data->data[i].pg &&
+ DCBX_MAX_NUM_PG_BW_ENTRIES <= help_data->data[i].pg)
+ BNX2X_ERR("Invalid pg[%d] data %x\n", i,
+ help_data->data[i].pg);
+ pri_join_mask |= help_data->data[i].pg_priority;
+ num_of_dif_pri += help_data->data[i].num_of_dif_pri;
+ }
+
+ /* defaults */
+ cos_data.num_of_cos = 1;
+ for (i = 0; i < ARRAY_SIZE(cos_data.data); i++) {
+ cos_data.data[i].pri_join_mask = 0;
+ cos_data.data[i].pausable = false;
+ cos_data.data[i].strict = BNX2X_DCBX_STRICT_INVALID;
+ cos_data.data[i].cos_bw = DCBX_INVALID_COS_BW;
+ }
+
+ if (CHIP_IS_E3B0(bp))
+ bnx2x_dcbx_cee_fill_cos_params(bp, help_data, ets,
+ &cos_data, pri_join_mask);
+ else /* E2 + E3A0 */
+ bnx2x_dcbx_2cos_limit_cee_fill_cos_params(bp,
+ help_data, ets,
+ &cos_data,
+ pg_pri_orginal_spread,
+ pri_join_mask,
+ num_of_dif_pri);
+
+ for (i = 0; i < cos_data.num_of_cos ; i++) {
+ struct bnx2x_dcbx_cos_params *p =
+ &bp->dcbx_port_params.ets.cos_params[i];
+
+ p->strict = cos_data.data[i].strict;
+ p->bw_tbl = cos_data.data[i].cos_bw;
+ p->pri_bitmask = cos_data.data[i].pri_join_mask;
+ p->pauseable = cos_data.data[i].pausable;
+
+ /* sanity */
+ if (p->bw_tbl != DCBX_INVALID_COS_BW ||
+ p->strict != BNX2X_DCBX_STRICT_INVALID) {
+ if (p->pri_bitmask == 0)
+ BNX2X_ERR("Invalid pri_bitmask for %d\n", i);
+
+ if (CHIP_IS_E2(bp) || CHIP_IS_E3A0(bp)) {
+
+ if (p->pauseable &&
+ DCBX_PFC_PRI_GET_NON_PAUSE(bp,
+ p->pri_bitmask) != 0)
+ BNX2X_ERR("Inconsistent config for pausable COS %d\n",
+ i);
+
+ if (!p->pauseable &&
+ DCBX_PFC_PRI_GET_PAUSE(bp,
+ p->pri_bitmask) != 0)
+ BNX2X_ERR("Inconsistent config for nonpausable COS %d\n",
+ i);
+ }
+ }
+
+ if (p->pauseable)
+ DP(BNX2X_MSG_DCB, "COS %d PAUSABLE prijoinmask 0x%x\n",
+ i, cos_data.data[i].pri_join_mask);
+ else
+ DP(BNX2X_MSG_DCB,
+ "COS %d NONPAUSABLE prijoinmask 0x%x\n",
+ i, cos_data.data[i].pri_join_mask);
+ }
+
+ bp->dcbx_port_params.ets.num_of_cos = cos_data.num_of_cos ;
+}
+
+static void bnx2x_dcbx_get_ets_pri_pg_tbl(struct bnx2x *bp,
+ u32 *set_configuration_ets_pg,
+ u32 *pri_pg_tbl)
+{
+ int i;
+
+ for (i = 0; i < DCBX_MAX_NUM_PRI_PG_ENTRIES; i++) {
+ set_configuration_ets_pg[i] = DCBX_PRI_PG_GET(pri_pg_tbl, i);
+
+ DP(BNX2X_MSG_DCB, "set_configuration_ets_pg[%d] = 0x%x\n",
+ i, set_configuration_ets_pg[i]);
+ }
+}
+
+static void bnx2x_dcbx_fw_struct(struct bnx2x *bp,
+ struct bnx2x_func_tx_start_params *pfc_fw_cfg)
+{
+ u16 pri_bit = 0;
+ u8 cos = 0, pri = 0;
+ struct priority_cos *tt2cos;
+ u32 *ttp = bp->dcbx_port_params.app.traffic_type_priority;
+ int mfw_configured = SHMEM2_HAS(bp, drv_flags) &&
+ GET_FLAGS(SHMEM2_RD(bp, drv_flags),
+ 1 << DRV_FLAGS_DCB_MFW_CONFIGURED);
+
+ memset(pfc_fw_cfg, 0, sizeof(*pfc_fw_cfg));
+
+ /* to disable DCB - the structure must be zeroed */
+ if ((bp->dcbx_error & DCBX_REMOTE_MIB_ERROR) && !mfw_configured)
+ return;
+
+ /*shortcut*/
+ tt2cos = pfc_fw_cfg->traffic_type_to_priority_cos;
+
+ /* Fw version should be incremented each update */
+ pfc_fw_cfg->dcb_version = ++bp->dcb_version;
+ pfc_fw_cfg->dcb_enabled = 1;
+
+ /* Fill priority parameters */
+ for (pri = 0; pri < LLFC_DRIVER_TRAFFIC_TYPE_MAX; pri++) {
+ tt2cos[pri].priority = ttp[pri];
+ pri_bit = 1 << tt2cos[pri].priority;
+
+ /* Fill COS parameters based on COS calculated to
+ * make it more general for future use */
+ for (cos = 0; cos < bp->dcbx_port_params.ets.num_of_cos; cos++)
+ if (bp->dcbx_port_params.ets.cos_params[cos].
+ pri_bitmask & pri_bit)
+ tt2cos[pri].cos = cos;
+
+ pfc_fw_cfg->dcb_outer_pri[pri] = ttp[pri];
+ }
+
+ /* we never want the FW to add a 0 vlan tag */
+ pfc_fw_cfg->dont_add_pri_0_en = 1;
+
+ bnx2x_dcbx_print_cos_params(bp, pfc_fw_cfg);
+}
+
+void bnx2x_dcbx_pmf_update(struct bnx2x *bp)
+{
+ /* if we need to synchronize DCBX result from prev PMF
+ * read it from shmem and update bp and netdev accordingly
+ */
+ if (SHMEM2_HAS(bp, drv_flags) &&
+ GET_FLAGS(SHMEM2_RD(bp, drv_flags), 1 << DRV_FLAGS_DCB_CONFIGURED)) {
+ /* Read neg results if dcbx is in the FW */
+ if (bnx2x_dcbx_read_shmem_neg_results(bp))
+ return;
+
+ bnx2x_dump_dcbx_drv_param(bp, &bp->dcbx_local_feat,
+ bp->dcbx_error);
+ bnx2x_get_dcbx_drv_param(bp, &bp->dcbx_local_feat,
+ bp->dcbx_error);
+#ifdef BCM_DCBNL
+ /*
+ * Add new app tlvs to dcbnl
+ */
+ bnx2x_dcbnl_update_applist(bp, false);
+ /*
+ * Send a notification for the new negotiated parameters
+ */
+ dcbnl_cee_notify(bp->dev, RTM_GETDCB, DCB_CMD_CEE_GET, 0, 0);
+#endif
+ /*
+ * reconfigure the netdevice with the results of the new
+ * dcbx negotiation.
+ */
+ bnx2x_dcbx_update_tc_mapping(bp);
+ }
+}
+
+/* DCB netlink */
+#ifdef BCM_DCBNL
+
+#define BNX2X_DCBX_CAPS (DCB_CAP_DCBX_LLD_MANAGED | \
+ DCB_CAP_DCBX_VER_CEE | DCB_CAP_DCBX_STATIC)
+
+static inline bool bnx2x_dcbnl_set_valid(struct bnx2x *bp)
+{
+ /* validate dcbnl call that may change HW state:
+ * DCB is on and DCBX mode was SUCCESSFULLY set by the user.
+ */
+ return bp->dcb_state && bp->dcbx_mode_uset;
+}
+
+static u8 bnx2x_dcbnl_get_state(struct net_device *netdev)
+{
+ struct bnx2x *bp = netdev_priv(netdev);
+ DP(BNX2X_MSG_DCB, "state = %d\n", bp->dcb_state);
+ return bp->dcb_state;
+}
+
+static u8 bnx2x_dcbnl_set_state(struct net_device *netdev, u8 state)
+{
+ struct bnx2x *bp = netdev_priv(netdev);
+ DP(BNX2X_MSG_DCB, "state = %s\n", state ? "on" : "off");
+
+ /* Fail to set state to "enabled" if dcbx is disabled in nvram */
+ if (state && ((bp->dcbx_enabled == BNX2X_DCBX_ENABLED_OFF) ||
+ (bp->dcbx_enabled == BNX2X_DCBX_ENABLED_INVALID))) {
+ DP(BNX2X_MSG_DCB, "Can not set dcbx to enabled while it is disabled in nvm\n");
+ return 1;
+ }
+
+ bnx2x_dcbx_set_state(bp, (state ? true : false), bp->dcbx_enabled);
+ return 0;
+}
+
+static void bnx2x_dcbnl_get_perm_hw_addr(struct net_device *netdev,
+ u8 *perm_addr)
+{
+ struct bnx2x *bp = netdev_priv(netdev);
+ DP(BNX2X_MSG_DCB, "GET-PERM-ADDR\n");
+
+ /* first the HW mac address */
+ memcpy(perm_addr, netdev->dev_addr, netdev->addr_len);
+
+ if (CNIC_LOADED(bp))
+ /* second SAN address */
+ memcpy(perm_addr+netdev->addr_len, bp->fip_mac,
+ netdev->addr_len);
+}
+
+static void bnx2x_dcbnl_set_pg_tccfg_tx(struct net_device *netdev, int prio,
+ u8 prio_type, u8 pgid, u8 bw_pct,
+ u8 up_map)
+{
+ struct bnx2x *bp = netdev_priv(netdev);
+
+ DP(BNX2X_MSG_DCB, "prio[%d] = %d\n", prio, pgid);
+ if (!bnx2x_dcbnl_set_valid(bp) || prio >= DCBX_MAX_NUM_PRI_PG_ENTRIES)
+ return;
+
+ /**
+ * bw_pct ignored - band-width percentage devision between user
+ * priorities within the same group is not
+ * standard and hence not supported
+ *
+ * prio_type ignored - priority levels within the same group are not
+ * standard and hence are not supported. According
+ * to the standard pgid 15 is dedicated to strict
+ * priority traffic (on the port level).
+ *
+ * up_map ignored
+ */
+
+ bp->dcbx_config_params.admin_configuration_ets_pg[prio] = pgid;
+ bp->dcbx_config_params.admin_ets_configuration_tx_enable = 1;
+}
+
+static void bnx2x_dcbnl_set_pg_bwgcfg_tx(struct net_device *netdev,
+ int pgid, u8 bw_pct)
+{
+ struct bnx2x *bp = netdev_priv(netdev);
+ DP(BNX2X_MSG_DCB, "pgid[%d] = %d\n", pgid, bw_pct);
+
+ if (!bnx2x_dcbnl_set_valid(bp) || pgid >= DCBX_MAX_NUM_PG_BW_ENTRIES)
+ return;
+
+ bp->dcbx_config_params.admin_configuration_bw_precentage[pgid] = bw_pct;
+ bp->dcbx_config_params.admin_ets_configuration_tx_enable = 1;
+}
+
+static void bnx2x_dcbnl_set_pg_tccfg_rx(struct net_device *netdev, int prio,
+ u8 prio_type, u8 pgid, u8 bw_pct,
+ u8 up_map)
+{
+ struct bnx2x *bp = netdev_priv(netdev);
+ DP(BNX2X_MSG_DCB, "Nothing to set; No RX support\n");
+}
+
+static void bnx2x_dcbnl_set_pg_bwgcfg_rx(struct net_device *netdev,
+ int pgid, u8 bw_pct)
+{
+ struct bnx2x *bp = netdev_priv(netdev);
+ DP(BNX2X_MSG_DCB, "Nothing to set; No RX support\n");
+}
+
+static void bnx2x_dcbnl_get_pg_tccfg_tx(struct net_device *netdev, int prio,
+ u8 *prio_type, u8 *pgid, u8 *bw_pct,
+ u8 *up_map)
+{
+ struct bnx2x *bp = netdev_priv(netdev);
+ DP(BNX2X_MSG_DCB, "prio = %d\n", prio);
+
+ /**
+ * bw_pct ignored - band-width percentage devision between user
+ * priorities within the same group is not
+ * standard and hence not supported
+ *
+ * prio_type ignored - priority levels within the same group are not
+ * standard and hence are not supported. According
+ * to the standard pgid 15 is dedicated to strict
+ * priority traffic (on the port level).
+ *
+ * up_map ignored
+ */
+ *up_map = *bw_pct = *prio_type = *pgid = 0;
+
+ if (!bp->dcb_state || prio >= DCBX_MAX_NUM_PRI_PG_ENTRIES)
+ return;
+
+ *pgid = DCBX_PRI_PG_GET(bp->dcbx_local_feat.ets.pri_pg_tbl, prio);
+}
+
+static void bnx2x_dcbnl_get_pg_bwgcfg_tx(struct net_device *netdev,
+ int pgid, u8 *bw_pct)
+{
+ struct bnx2x *bp = netdev_priv(netdev);
+ DP(BNX2X_MSG_DCB, "pgid = %d\n", pgid);
+
+ *bw_pct = 0;
+
+ if (!bp->dcb_state || pgid >= DCBX_MAX_NUM_PG_BW_ENTRIES)
+ return;
+
+ *bw_pct = DCBX_PG_BW_GET(bp->dcbx_local_feat.ets.pg_bw_tbl, pgid);
+}
+
+static void bnx2x_dcbnl_get_pg_tccfg_rx(struct net_device *netdev, int prio,
+ u8 *prio_type, u8 *pgid, u8 *bw_pct,
+ u8 *up_map)
+{
+ struct bnx2x *bp = netdev_priv(netdev);
+ DP(BNX2X_MSG_DCB, "Nothing to get; No RX support\n");
+
+ *prio_type = *pgid = *bw_pct = *up_map = 0;
+}
+
+static void bnx2x_dcbnl_get_pg_bwgcfg_rx(struct net_device *netdev,
+ int pgid, u8 *bw_pct)
+{
+ struct bnx2x *bp = netdev_priv(netdev);
+ DP(BNX2X_MSG_DCB, "Nothing to get; No RX support\n");
+
+ *bw_pct = 0;
+}
+
+static void bnx2x_dcbnl_set_pfc_cfg(struct net_device *netdev, int prio,
+ u8 setting)
+{
+ struct bnx2x *bp = netdev_priv(netdev);
+ DP(BNX2X_MSG_DCB, "prio[%d] = %d\n", prio, setting);
+
+ if (!bnx2x_dcbnl_set_valid(bp) || prio >= MAX_PFC_PRIORITIES)
+ return;
+
+ if (setting) {
+ bp->dcbx_config_params.admin_pfc_bitmap |= (1 << prio);
+ bp->dcbx_config_params.admin_pfc_tx_enable = 1;
+ } else {
+ bp->dcbx_config_params.admin_pfc_bitmap &= ~(1 << prio);
+ }
+}
+
+static void bnx2x_dcbnl_get_pfc_cfg(struct net_device *netdev, int prio,
+ u8 *setting)
+{
+ struct bnx2x *bp = netdev_priv(netdev);
+ DP(BNX2X_MSG_DCB, "prio = %d\n", prio);
+
+ *setting = 0;
+
+ if (!bp->dcb_state || prio >= MAX_PFC_PRIORITIES)
+ return;
+
+ *setting = (bp->dcbx_local_feat.pfc.pri_en_bitmap >> prio) & 0x1;
+}
+
+static u8 bnx2x_dcbnl_set_all(struct net_device *netdev)
+{
+ struct bnx2x *bp = netdev_priv(netdev);
+
+ DP(BNX2X_MSG_DCB, "SET-ALL\n");
+
+ if (!bnx2x_dcbnl_set_valid(bp))
+ return 1;
+
+ if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
+ netdev_err(bp->dev,
+ "Handling parity error recovery. Try again later\n");
+ return 1;
+ }
+ if (netif_running(bp->dev)) {
+ bnx2x_update_drv_flags(bp,
+ 1 << DRV_FLAGS_DCB_MFW_CONFIGURED,
+ 1);
+ bnx2x_dcbx_init(bp, true);
+ }
+ DP(BNX2X_MSG_DCB, "set_dcbx_params done\n");
+
+ return 0;
+}
+
+static u8 bnx2x_dcbnl_get_cap(struct net_device *netdev, int capid, u8 *cap)
+{
+ struct bnx2x *bp = netdev_priv(netdev);
+ u8 rval = 0;
+
+ if (bp->dcb_state) {
+ switch (capid) {
+ case DCB_CAP_ATTR_PG:
+ *cap = true;
+ break;
+ case DCB_CAP_ATTR_PFC:
+ *cap = true;
+ break;
+ case DCB_CAP_ATTR_UP2TC:
+ *cap = false;
+ break;
+ case DCB_CAP_ATTR_PG_TCS:
+ *cap = 0x80; /* 8 priorities for PGs */
+ break;
+ case DCB_CAP_ATTR_PFC_TCS:
+ *cap = 0x80; /* 8 priorities for PFC */
+ break;
+ case DCB_CAP_ATTR_GSP:
+ *cap = true;
+ break;
+ case DCB_CAP_ATTR_BCN:
+ *cap = false;
+ break;
+ case DCB_CAP_ATTR_DCBX:
+ *cap = BNX2X_DCBX_CAPS;
+ break;
+ default:
+ BNX2X_ERR("Non valid capability ID\n");
+ rval = 1;
+ break;
+ }
+ } else {
+ DP(BNX2X_MSG_DCB, "DCB disabled\n");
+ rval = 1;
+ }
+
+ DP(BNX2X_MSG_DCB, "capid %d:%x\n", capid, *cap);
+ return rval;
+}
+
+static int bnx2x_dcbnl_get_numtcs(struct net_device *netdev, int tcid, u8 *num)
+{
+ struct bnx2x *bp = netdev_priv(netdev);
+ u8 rval = 0;
+
+ DP(BNX2X_MSG_DCB, "tcid %d\n", tcid);
+
+ if (bp->dcb_state) {
+ switch (tcid) {
+ case DCB_NUMTCS_ATTR_PG:
+ *num = CHIP_IS_E3B0(bp) ? DCBX_COS_MAX_NUM_E3B0 :
+ DCBX_COS_MAX_NUM_E2;
+ break;
+ case DCB_NUMTCS_ATTR_PFC:
+ *num = CHIP_IS_E3B0(bp) ? DCBX_COS_MAX_NUM_E3B0 :
+ DCBX_COS_MAX_NUM_E2;
+ break;
+ default:
+ BNX2X_ERR("Non valid TC-ID\n");
+ rval = 1;
+ break;
+ }
+ } else {
+ DP(BNX2X_MSG_DCB, "DCB disabled\n");
+ rval = 1;
+ }
+
+ return rval;
+}
+
+static int bnx2x_dcbnl_set_numtcs(struct net_device *netdev, int tcid, u8 num)
+{
+ struct bnx2x *bp = netdev_priv(netdev);
+ DP(BNX2X_MSG_DCB, "num tcs = %d; Not supported\n", num);
+ return -EINVAL;
+}
+
+static u8 bnx2x_dcbnl_get_pfc_state(struct net_device *netdev)
+{
+ struct bnx2x *bp = netdev_priv(netdev);
+ DP(BNX2X_MSG_DCB, "state = %d\n", bp->dcbx_local_feat.pfc.enabled);
+
+ if (!bp->dcb_state)
+ return 0;
+
+ return bp->dcbx_local_feat.pfc.enabled;
+}
+
+static void bnx2x_dcbnl_set_pfc_state(struct net_device *netdev, u8 state)
+{
+ struct bnx2x *bp = netdev_priv(netdev);
+ DP(BNX2X_MSG_DCB, "state = %s\n", state ? "on" : "off");
+
+ if (!bnx2x_dcbnl_set_valid(bp))
+ return;
+
+ bp->dcbx_config_params.admin_pfc_tx_enable =
+ bp->dcbx_config_params.admin_pfc_enable = (state ? 1 : 0);
+}
+
+static void bnx2x_admin_app_set_ent(
+ struct bnx2x_admin_priority_app_table *app_ent,
+ u8 idtype, u16 idval, u8 up)
+{
+ app_ent->valid = 1;
+
+ switch (idtype) {
+ case DCB_APP_IDTYPE_ETHTYPE:
+ app_ent->traffic_type = TRAFFIC_TYPE_ETH;
+ break;
+ case DCB_APP_IDTYPE_PORTNUM:
+ app_ent->traffic_type = TRAFFIC_TYPE_PORT;
+ break;
+ default:
+ break; /* never gets here */
+ }
+ app_ent->app_id = idval;
+ app_ent->priority = up;
+}
+
+static bool bnx2x_admin_app_is_equal(
+ struct bnx2x_admin_priority_app_table *app_ent,
+ u8 idtype, u16 idval)
+{
+ if (!app_ent->valid)
+ return false;
+
+ switch (idtype) {
+ case DCB_APP_IDTYPE_ETHTYPE:
+ if (app_ent->traffic_type != TRAFFIC_TYPE_ETH)
+ return false;
+ break;
+ case DCB_APP_IDTYPE_PORTNUM:
+ if (app_ent->traffic_type != TRAFFIC_TYPE_PORT)
+ return false;
+ break;
+ default:
+ return false;
+ }
+ if (app_ent->app_id != idval)
+ return false;
+
+ return true;
+}
+
+static int bnx2x_set_admin_app_up(struct bnx2x *bp, u8 idtype, u16 idval, u8 up)
+{
+ int i, ff;
+
+ /* iterate over the app entries looking for idtype and idval */
+ for (i = 0, ff = -1; i < DCBX_CONFIG_MAX_APP_PROTOCOL; i++) {
+ struct bnx2x_admin_priority_app_table *app_ent =
+ &bp->dcbx_config_params.admin_priority_app_table[i];
+ if (bnx2x_admin_app_is_equal(app_ent, idtype, idval))
+ break;
+
+ if (ff < 0 && !app_ent->valid)
+ ff = i;
+ }
+ if (i < DCBX_CONFIG_MAX_APP_PROTOCOL)
+ /* if found overwrite up */
+ bp->dcbx_config_params.
+ admin_priority_app_table[i].priority = up;
+ else if (ff >= 0)
+ /* not found use first-free */
+ bnx2x_admin_app_set_ent(
+ &bp->dcbx_config_params.admin_priority_app_table[ff],
+ idtype, idval, up);
+ else {
+ /* app table is full */
+ BNX2X_ERR("Application table is too large\n");
+ return -EBUSY;
+ }
+
+ /* up configured, if not 0 make sure feature is enabled */
+ if (up)
+ bp->dcbx_config_params.admin_application_priority_tx_enable = 1;
+
+ return 0;
+}
+
+static int bnx2x_dcbnl_set_app_up(struct net_device *netdev, u8 idtype,
+ u16 idval, u8 up)
+{
+ struct bnx2x *bp = netdev_priv(netdev);
+
+ DP(BNX2X_MSG_DCB, "app_type %d, app_id %x, prio bitmap %d\n",
+ idtype, idval, up);
+
+ if (!bnx2x_dcbnl_set_valid(bp)) {
+ DP(BNX2X_MSG_DCB, "dcbnl call not valid\n");
+ return -EINVAL;
+ }
+
+ /* verify idtype */
+ switch (idtype) {
+ case DCB_APP_IDTYPE_ETHTYPE:
+ case DCB_APP_IDTYPE_PORTNUM:
+ break;
+ default:
+ DP(BNX2X_MSG_DCB, "Wrong ID type\n");
+ return -EINVAL;
+ }
+ return bnx2x_set_admin_app_up(bp, idtype, idval, up);
+}
+
+static u8 bnx2x_dcbnl_get_dcbx(struct net_device *netdev)
+{
+ struct bnx2x *bp = netdev_priv(netdev);
+ u8 state;
+
+ state = DCB_CAP_DCBX_LLD_MANAGED | DCB_CAP_DCBX_VER_CEE;
+
+ if (bp->dcbx_enabled == BNX2X_DCBX_ENABLED_ON_NEG_OFF)
+ state |= DCB_CAP_DCBX_STATIC;
+
+ return state;
+}
+
+static u8 bnx2x_dcbnl_set_dcbx(struct net_device *netdev, u8 state)
+{
+ struct bnx2x *bp = netdev_priv(netdev);
+ DP(BNX2X_MSG_DCB, "state = %02x\n", state);
+
+ /* set dcbx mode */
+
+ if ((state & BNX2X_DCBX_CAPS) != state) {
+ BNX2X_ERR("Requested DCBX mode %x is beyond advertised capabilities\n",
+ state);
+ return 1;
+ }
+
+ if (bp->dcb_state != BNX2X_DCB_STATE_ON) {
+ BNX2X_ERR("DCB turned off, DCBX configuration is invalid\n");
+ return 1;
+ }
+
+ if (state & DCB_CAP_DCBX_STATIC)
+ bp->dcbx_enabled = BNX2X_DCBX_ENABLED_ON_NEG_OFF;
+ else
+ bp->dcbx_enabled = BNX2X_DCBX_ENABLED_ON_NEG_ON;
+
+ bp->dcbx_mode_uset = true;
+ return 0;
+}
+
+static u8 bnx2x_dcbnl_get_featcfg(struct net_device *netdev, int featid,
+ u8 *flags)
+{
+ struct bnx2x *bp = netdev_priv(netdev);
+ u8 rval = 0;
+
+ DP(BNX2X_MSG_DCB, "featid %d\n", featid);
+
+ if (bp->dcb_state) {
+ *flags = 0;
+ switch (featid) {
+ case DCB_FEATCFG_ATTR_PG:
+ if (bp->dcbx_local_feat.ets.enabled)
+ *flags |= DCB_FEATCFG_ENABLE;
+ if (bp->dcbx_error & (DCBX_LOCAL_ETS_ERROR |
+ DCBX_REMOTE_MIB_ERROR))
+ *flags |= DCB_FEATCFG_ERROR;
+ break;
+ case DCB_FEATCFG_ATTR_PFC:
+ if (bp->dcbx_local_feat.pfc.enabled)
+ *flags |= DCB_FEATCFG_ENABLE;
+ if (bp->dcbx_error & (DCBX_LOCAL_PFC_ERROR |
+ DCBX_LOCAL_PFC_MISMATCH |
+ DCBX_REMOTE_MIB_ERROR))
+ *flags |= DCB_FEATCFG_ERROR;
+ break;
+ case DCB_FEATCFG_ATTR_APP:
+ if (bp->dcbx_local_feat.app.enabled)
+ *flags |= DCB_FEATCFG_ENABLE;
+ if (bp->dcbx_error & (DCBX_LOCAL_APP_ERROR |
+ DCBX_LOCAL_APP_MISMATCH |
+ DCBX_REMOTE_MIB_ERROR))
+ *flags |= DCB_FEATCFG_ERROR;
+ break;
+ default:
+ BNX2X_ERR("Non valid feature-ID\n");
+ rval = 1;
+ break;
+ }
+ } else {
+ DP(BNX2X_MSG_DCB, "DCB disabled\n");
+ rval = 1;
+ }
+
+ return rval;
+}
+
+static u8 bnx2x_dcbnl_set_featcfg(struct net_device *netdev, int featid,
+ u8 flags)
+{
+ struct bnx2x *bp = netdev_priv(netdev);
+ u8 rval = 0;
+
+ DP(BNX2X_MSG_DCB, "featid = %d flags = %02x\n", featid, flags);
+
+ /* ignore the 'advertise' flag */
+ if (bnx2x_dcbnl_set_valid(bp)) {
+ switch (featid) {
+ case DCB_FEATCFG_ATTR_PG:
+ bp->dcbx_config_params.admin_ets_enable =
+ flags & DCB_FEATCFG_ENABLE ? 1 : 0;
+ bp->dcbx_config_params.admin_ets_willing =
+ flags & DCB_FEATCFG_WILLING ? 1 : 0;
+ break;
+ case DCB_FEATCFG_ATTR_PFC:
+ bp->dcbx_config_params.admin_pfc_enable =
+ flags & DCB_FEATCFG_ENABLE ? 1 : 0;
+ bp->dcbx_config_params.admin_pfc_willing =
+ flags & DCB_FEATCFG_WILLING ? 1 : 0;
+ break;
+ case DCB_FEATCFG_ATTR_APP:
+ /* ignore enable, always enabled */
+ bp->dcbx_config_params.admin_app_priority_willing =
+ flags & DCB_FEATCFG_WILLING ? 1 : 0;
+ break;
+ default:
+ BNX2X_ERR("Non valid feature-ID\n");
+ rval = 1;
+ break;
+ }
+ } else {
+ DP(BNX2X_MSG_DCB, "dcbnl call not valid\n");
+ rval = 1;
+ }
+
+ return rval;
+}
+
+static int bnx2x_peer_appinfo(struct net_device *netdev,
+ struct dcb_peer_app_info *info, u16* app_count)
+{
+ int i;
+ struct bnx2x *bp = netdev_priv(netdev);
+
+ DP(BNX2X_MSG_DCB, "APP-INFO\n");
+
+ info->willing = (bp->dcbx_remote_flags & DCBX_APP_REM_WILLING) ?: 0;
+ info->error = (bp->dcbx_remote_flags & DCBX_APP_RX_ERROR) ?: 0;
+ *app_count = 0;
+
+ for (i = 0; i < DCBX_MAX_APP_PROTOCOL; i++)
+ if (bp->dcbx_remote_feat.app.app_pri_tbl[i].appBitfield &
+ DCBX_APP_ENTRY_VALID)
+ (*app_count)++;
+ return 0;
+}
+
+static int bnx2x_peer_apptable(struct net_device *netdev,
+ struct dcb_app *table)
+{
+ int i, j;
+ struct bnx2x *bp = netdev_priv(netdev);
+
+ DP(BNX2X_MSG_DCB, "APP-TABLE\n");
+
+ for (i = 0, j = 0; i < DCBX_MAX_APP_PROTOCOL; i++) {
+ struct dcbx_app_priority_entry *ent =
+ &bp->dcbx_remote_feat.app.app_pri_tbl[i];
+
+ if (ent->appBitfield & DCBX_APP_ENTRY_VALID) {
+ table[j].selector = bnx2x_dcbx_dcbnl_app_idtype(ent);
+ table[j].priority = bnx2x_dcbx_dcbnl_app_up(ent);
+ table[j++].protocol = ent->app_id;
+ }
+ }
+ return 0;
+}
+
+static int bnx2x_cee_peer_getpg(struct net_device *netdev, struct cee_pg *pg)
+{
+ int i;
+ struct bnx2x *bp = netdev_priv(netdev);
+
+ pg->willing = (bp->dcbx_remote_flags & DCBX_ETS_REM_WILLING) ?: 0;
+
+ for (i = 0; i < CEE_DCBX_MAX_PGS; i++) {
+ pg->pg_bw[i] =
+ DCBX_PG_BW_GET(bp->dcbx_remote_feat.ets.pg_bw_tbl, i);
+ pg->prio_pg[i] =
+ DCBX_PRI_PG_GET(bp->dcbx_remote_feat.ets.pri_pg_tbl, i);
+ }
+ return 0;
+}
+
+static int bnx2x_cee_peer_getpfc(struct net_device *netdev,
+ struct cee_pfc *pfc)
+{
+ struct bnx2x *bp = netdev_priv(netdev);
+ pfc->tcs_supported = bp->dcbx_remote_feat.pfc.pfc_caps;
+ pfc->pfc_en = bp->dcbx_remote_feat.pfc.pri_en_bitmap;
+ return 0;
+}
+
+const struct dcbnl_rtnl_ops bnx2x_dcbnl_ops = {
+ .getstate = bnx2x_dcbnl_get_state,
+ .setstate = bnx2x_dcbnl_set_state,
+ .getpermhwaddr = bnx2x_dcbnl_get_perm_hw_addr,
+ .setpgtccfgtx = bnx2x_dcbnl_set_pg_tccfg_tx,
+ .setpgbwgcfgtx = bnx2x_dcbnl_set_pg_bwgcfg_tx,
+ .setpgtccfgrx = bnx2x_dcbnl_set_pg_tccfg_rx,
+ .setpgbwgcfgrx = bnx2x_dcbnl_set_pg_bwgcfg_rx,
+ .getpgtccfgtx = bnx2x_dcbnl_get_pg_tccfg_tx,
+ .getpgbwgcfgtx = bnx2x_dcbnl_get_pg_bwgcfg_tx,
+ .getpgtccfgrx = bnx2x_dcbnl_get_pg_tccfg_rx,
+ .getpgbwgcfgrx = bnx2x_dcbnl_get_pg_bwgcfg_rx,
+ .setpfccfg = bnx2x_dcbnl_set_pfc_cfg,
+ .getpfccfg = bnx2x_dcbnl_get_pfc_cfg,
+ .setall = bnx2x_dcbnl_set_all,
+ .getcap = bnx2x_dcbnl_get_cap,
+ .getnumtcs = bnx2x_dcbnl_get_numtcs,
+ .setnumtcs = bnx2x_dcbnl_set_numtcs,
+ .getpfcstate = bnx2x_dcbnl_get_pfc_state,
+ .setpfcstate = bnx2x_dcbnl_set_pfc_state,
+ .setapp = bnx2x_dcbnl_set_app_up,
+ .getdcbx = bnx2x_dcbnl_get_dcbx,
+ .setdcbx = bnx2x_dcbnl_set_dcbx,
+ .getfeatcfg = bnx2x_dcbnl_get_featcfg,
+ .setfeatcfg = bnx2x_dcbnl_set_featcfg,
+ .peer_getappinfo = bnx2x_peer_appinfo,
+ .peer_getapptable = bnx2x_peer_apptable,
+ .cee_peer_getpg = bnx2x_cee_peer_getpg,
+ .cee_peer_getpfc = bnx2x_cee_peer_getpfc,
+};
+
+#endif /* BCM_DCBNL */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h
new file mode 100644
index 0000000000..9a9517c0f7
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h
@@ -0,0 +1,207 @@
+/* bnx2x_dcb.h: QLogic Everest network driver.
+ *
+ * Copyright 2009-2013 Broadcom Corporation
+ * Copyright 2014 QLogic Corporation
+ * All rights reserved
+ *
+ * Unless you and QLogic execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2, available
+ * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other QLogic software provided under a
+ * license other than the GPL, without QLogic's express prior written
+ * consent.
+ *
+ * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
+ * Written by: Dmitry Kravkov
+ *
+ */
+#ifndef BNX2X_DCB_H
+#define BNX2X_DCB_H
+
+#include "bnx2x_hsi.h"
+
+#define LLFC_DRIVER_TRAFFIC_TYPE_MAX 3 /* NW, iSCSI, FCoE */
+struct bnx2x_dcbx_app_params {
+ u32 enabled;
+ u32 traffic_type_priority[LLFC_DRIVER_TRAFFIC_TYPE_MAX];
+};
+
+#define DCBX_COS_MAX_NUM_E2 DCBX_E2E3_MAX_NUM_COS
+/* bnx2x currently limits numbers of supported COSes to 3 to be extended to 6 */
+#define BNX2X_MAX_COS_SUPPORT 3
+#define DCBX_COS_MAX_NUM_E3B0 BNX2X_MAX_COS_SUPPORT
+#define DCBX_COS_MAX_NUM BNX2X_MAX_COS_SUPPORT
+
+struct bnx2x_dcbx_cos_params {
+ u32 bw_tbl;
+ u32 pri_bitmask;
+ /*
+ * strict priority: valid values are 0..5; 0 is highest priority.
+ * There can't be two COSes with the same priority.
+ */
+ u8 strict;
+#define BNX2X_DCBX_STRICT_INVALID DCBX_COS_MAX_NUM
+#define BNX2X_DCBX_STRICT_COS_HIGHEST 0
+#define BNX2X_DCBX_STRICT_COS_NEXT_LOWER_PRI(sp) ((sp) + 1)
+ u8 pauseable;
+};
+
+struct bnx2x_dcbx_pg_params {
+ u32 enabled;
+ u8 num_of_cos; /* valid COS entries */
+ struct bnx2x_dcbx_cos_params cos_params[DCBX_COS_MAX_NUM];
+};
+
+struct bnx2x_dcbx_pfc_params {
+ u32 enabled;
+ u32 priority_non_pauseable_mask;
+};
+
+struct bnx2x_dcbx_port_params {
+ struct bnx2x_dcbx_pfc_params pfc;
+ struct bnx2x_dcbx_pg_params ets;
+ struct bnx2x_dcbx_app_params app;
+};
+
+#define BNX2X_DCBX_CONFIG_INV_VALUE (0xFFFFFFFF)
+#define BNX2X_DCBX_OVERWRITE_SETTINGS_DISABLE 0
+#define BNX2X_DCBX_OVERWRITE_SETTINGS_ENABLE 1
+#define BNX2X_DCBX_OVERWRITE_SETTINGS_INVALID (BNX2X_DCBX_CONFIG_INV_VALUE)
+#define BNX2X_IS_ETS_ENABLED(bp) ((bp)->dcb_state == BNX2X_DCB_STATE_ON &&\
+ (bp)->dcbx_port_params.ets.enabled)
+
+struct bnx2x_config_lldp_params {
+ u32 overwrite_settings;
+ u32 msg_tx_hold;
+ u32 msg_fast_tx;
+ u32 tx_credit_max;
+ u32 msg_tx_interval;
+ u32 tx_fast;
+};
+
+struct bnx2x_admin_priority_app_table {
+ u32 valid;
+ u32 priority;
+#define INVALID_TRAFFIC_TYPE_PRIORITY (0xFFFFFFFF)
+ u32 traffic_type;
+#define TRAFFIC_TYPE_ETH 0
+#define TRAFFIC_TYPE_PORT 1
+ u32 app_id;
+};
+
+#define DCBX_CONFIG_MAX_APP_PROTOCOL 4
+struct bnx2x_config_dcbx_params {
+ u32 overwrite_settings;
+ u32 admin_dcbx_version;
+ u32 admin_ets_enable;
+ u32 admin_pfc_enable;
+ u32 admin_tc_supported_tx_enable;
+ u32 admin_ets_configuration_tx_enable;
+ u32 admin_ets_recommendation_tx_enable;
+ u32 admin_pfc_tx_enable;
+ u32 admin_application_priority_tx_enable;
+ u32 admin_ets_willing;
+ u32 admin_ets_reco_valid;
+ u32 admin_pfc_willing;
+ u32 admin_app_priority_willing;
+ u32 admin_configuration_bw_precentage[8];
+ u32 admin_configuration_ets_pg[8];
+ u32 admin_recommendation_bw_precentage[8];
+ u32 admin_recommendation_ets_pg[8];
+ u32 admin_pfc_bitmap;
+ struct bnx2x_admin_priority_app_table
+ admin_priority_app_table[DCBX_CONFIG_MAX_APP_PROTOCOL];
+ u32 admin_default_priority;
+};
+
+#define GET_FLAGS(flags, bits) ((flags) & (bits))
+#define SET_FLAGS(flags, bits) ((flags) |= (bits))
+#define RESET_FLAGS(flags, bits) ((flags) &= ~(bits))
+
+enum {
+ DCBX_READ_LOCAL_MIB,
+ DCBX_READ_REMOTE_MIB
+};
+
+#define ETH_TYPE_FCOE (0x8906)
+#define TCP_PORT_ISCSI (0xCBC)
+
+#define PFC_VALUE_FRAME_SIZE (512)
+#define PFC_QUANTA_IN_NANOSEC_FROM_SPEED_MEGA(mega_speed) \
+ ((1000 * PFC_VALUE_FRAME_SIZE)/(mega_speed))
+
+#define PFC_BRB1_REG_HIGH_LLFC_LOW_THRESHOLD 130
+#define PFC_BRB1_REG_HIGH_LLFC_HIGH_THRESHOLD 170
+
+struct cos_entry_help_data {
+ u32 pri_join_mask;
+ u32 cos_bw;
+ u8 strict;
+ bool pausable;
+};
+
+struct cos_help_data {
+ struct cos_entry_help_data data[DCBX_COS_MAX_NUM];
+ u8 num_of_cos;
+};
+
+#define DCBX_ILLEGAL_PG (0xFF)
+#define DCBX_PFC_PRI_MASK (0xFF)
+#define DCBX_STRICT_PRIORITY (15)
+#define DCBX_INVALID_COS_BW (0xFFFFFFFF)
+#define DCBX_PFC_PRI_NON_PAUSE_MASK(bp) \
+ ((bp)->dcbx_port_params.pfc.priority_non_pauseable_mask)
+#define DCBX_PFC_PRI_PAUSE_MASK(bp) \
+ ((u8)~DCBX_PFC_PRI_NON_PAUSE_MASK(bp))
+#define DCBX_PFC_PRI_GET_PAUSE(bp, pg_pri) \
+ ((pg_pri) & (DCBX_PFC_PRI_PAUSE_MASK(bp)))
+#define DCBX_PFC_PRI_GET_NON_PAUSE(bp, pg_pri) \
+ (DCBX_PFC_PRI_NON_PAUSE_MASK(bp) & (pg_pri))
+#define DCBX_IS_PFC_PRI_SOME_PAUSE(bp, pg_pri) \
+ (0 != DCBX_PFC_PRI_GET_PAUSE(bp, pg_pri))
+#define IS_DCBX_PFC_PRI_ONLY_PAUSE(bp, pg_pri) \
+ (pg_pri == DCBX_PFC_PRI_GET_PAUSE((bp), (pg_pri)))
+#define IS_DCBX_PFC_PRI_ONLY_NON_PAUSE(bp, pg_pri)\
+ ((pg_pri) == DCBX_PFC_PRI_GET_NON_PAUSE((bp), (pg_pri)))
+#define IS_DCBX_PFC_PRI_MIX_PAUSE(bp, pg_pri) \
+ (!(IS_DCBX_PFC_PRI_ONLY_NON_PAUSE((bp), (pg_pri)) || \
+ IS_DCBX_PFC_PRI_ONLY_PAUSE((bp), (pg_pri))))
+
+struct pg_entry_help_data {
+ u8 num_of_dif_pri;
+ u8 pg;
+ u32 pg_priority;
+};
+
+struct pg_help_data {
+ struct pg_entry_help_data data[LLFC_DRIVER_TRAFFIC_TYPE_MAX];
+ u8 num_of_pg;
+};
+
+/* forward DCB/PFC related declarations */
+struct bnx2x;
+void bnx2x_dcbx_update(struct work_struct *work);
+void bnx2x_dcbx_init_params(struct bnx2x *bp);
+void bnx2x_dcbx_set_state(struct bnx2x *bp, bool dcb_on, u32 dcbx_enabled);
+
+enum {
+ BNX2X_DCBX_STATE_NEG_RECEIVED = 0x1,
+ BNX2X_DCBX_STATE_TX_PAUSED,
+ BNX2X_DCBX_STATE_TX_RELEASED
+};
+
+void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state);
+void bnx2x_dcbx_pmf_update(struct bnx2x *bp);
+/* DCB netlink */
+#ifdef BCM_DCBNL
+extern const struct dcbnl_rtnl_ops bnx2x_dcbnl_ops;
+int bnx2x_dcbnl_update_applist(struct bnx2x *bp, bool delall);
+#endif /* BCM_DCBNL */
+
+int bnx2x_dcbx_stop_hw_tx(struct bnx2x *bp);
+int bnx2x_dcbx_resume_hw_tx(struct bnx2x *bp);
+
+#endif /* BNX2X_DCB_H */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dump.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dump.h
new file mode 100644
index 0000000000..eccfa13b0f
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dump.h
@@ -0,0 +1,2218 @@
+/* bnx2x_dump.h: QLogic Everest network driver.
+ *
+ * Copyright (c) 2012-2013 Broadcom Corporation
+ * Copyright (c) 2014 QLogic Corporation
+ * All rights reserved
+ *
+ * Unless you and QLogic execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2, available
+ * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other QLogic software provided under a
+ * license other than the GPL, without QLogic's express prior written
+ * consent.
+ */
+
+#ifndef BNX2X_DUMP_H
+#define BNX2X_DUMP_H
+
+/* WaitP Definitions */
+#define DRV_DUMP_XSTORM_WAITP_ADDRESS 0x2b8a80
+#define DRV_DUMP_TSTORM_WAITP_ADDRESS 0x1b8a80
+#define DRV_DUMP_USTORM_WAITP_ADDRESS 0x338a80
+#define DRV_DUMP_CSTORM_WAITP_ADDRESS 0x238a80
+
+/* Possible Chips */
+#define DUMP_CHIP_E1 1
+#define DUMP_CHIP_E1H 2
+#define DUMP_CHIP_E2 4
+#define DUMP_CHIP_E3A0 8
+#define DUMP_CHIP_E3B0 16
+#define DUMP_PATH_0 512
+#define DUMP_PATH_1 1024
+#define NUM_PRESETS 13
+#define NUM_CHIPS 5
+
+struct dump_header {
+ u32 header_size; /* Size in DWORDs excluding this field */
+ u32 version;
+ u32 preset;
+ u32 dump_meta_data; /* OR of CHIP and PATH. */
+};
+
+#define BNX2X_DUMP_VERSION 0x61111111
+struct reg_addr {
+ u32 addr;
+ u32 size;
+ u32 chips;
+ u32 presets;
+};
+
+struct wreg_addr {
+ u32 addr;
+ u32 size;
+ u32 read_regs_count;
+ const u32 *read_regs;
+ u32 chips;
+ u32 presets;
+};
+
+#define PAGE_MODE_VALUES_E2 2
+#define PAGE_READ_REGS_E2 1
+#define PAGE_WRITE_REGS_E2 1
+static const u32 page_vals_e2[] = {0, 128};
+static const u32 page_write_regs_e2[] = {328476};
+static const struct reg_addr page_read_regs_e2[] = {
+ {0x58000, 4608, DUMP_CHIP_E2, 0x30}
+};
+
+#define PAGE_MODE_VALUES_E3 2
+#define PAGE_READ_REGS_E3 1
+#define PAGE_WRITE_REGS_E3 1
+static const u32 page_vals_e3[] = {0, 128};
+static const u32 page_write_regs_e3[] = {328476};
+static const struct reg_addr page_read_regs_e3[] = {
+ {0x58000, 4608, DUMP_CHIP_E3A0 | DUMP_CHIP_E3B0, 0x30}
+};
+
+static const struct reg_addr reg_addrs[] = {
+ { 0x2000, 1, 0x1f, 0xfff},
+ { 0x2004, 1, 0x1f, 0x1fff},
+ { 0x2008, 25, 0x1f, 0xfff},
+ { 0x206c, 1, 0x1f, 0x1fff},
+ { 0x2070, 313, 0x1f, 0xfff},
+ { 0x2800, 103, 0x1f, 0xfff},
+ { 0x3000, 287, 0x1f, 0xfff},
+ { 0x3800, 331, 0x1f, 0xfff},
+ { 0x8800, 6, 0x1f, 0x924},
+ { 0x8818, 1, 0x1e, 0x924},
+ { 0x9000, 4, 0x1c, 0x924},
+ { 0x9010, 7, 0x1c, 0xfff},
+ { 0x902c, 1, 0x1c, 0x924},
+ { 0x9030, 1, 0x1c, 0xfff},
+ { 0x9034, 13, 0x1c, 0x924},
+ { 0x9068, 16, 0x1c, 0xfff},
+ { 0x90a8, 98, 0x1c, 0x924},
+ { 0x9230, 2, 0x1c, 0xfff},
+ { 0x9238, 3, 0x1c, 0x924},
+ { 0x9244, 1, 0x1c, 0xfff},
+ { 0x9248, 1, 0x1c, 0x924},
+ { 0x924c, 1, 0x4, 0x924},
+ { 0x9250, 16, 0x1c, 0x924},
+ { 0x92a8, 2, 0x1c, 0x1fff},
+ { 0x92b4, 1, 0x1c, 0x1fff},
+ { 0x9400, 33, 0x1c, 0x924},
+ { 0x9484, 5, 0x18, 0x924},
+ { 0xa000, 27, 0x1f, 0x924},
+ { 0xa06c, 1, 0x3, 0x924},
+ { 0xa070, 2, 0x1f, 0x924},
+ { 0xa078, 1, 0x1f, 0x1fff},
+ { 0xa07c, 31, 0x1f, 0x924},
+ { 0xa0f8, 1, 0x1f, 0x1fff},
+ { 0xa0fc, 3, 0x1f, 0x924},
+ { 0xa108, 1, 0x1f, 0x1fff},
+ { 0xa10c, 3, 0x1f, 0x924},
+ { 0xa118, 1, 0x1f, 0x1fff},
+ { 0xa11c, 28, 0x1f, 0x924},
+ { 0xa18c, 4, 0x3, 0x924},
+ { 0xa19c, 3, 0x1f, 0x924},
+ { 0xa1a8, 1, 0x1f, 0x1fff},
+ { 0xa1ac, 3, 0x1f, 0x924},
+ { 0xa1b8, 1, 0x1f, 0x1fff},
+ { 0xa1bc, 54, 0x1f, 0x924},
+ { 0xa294, 2, 0x3, 0x924},
+ { 0xa29c, 2, 0x1f, 0x924},
+ { 0xa2a4, 2, 0x7, 0x924},
+ { 0xa2ac, 2, 0x1f, 0x924},
+ { 0xa2b4, 1, 0x1f, 0x1fff},
+ { 0xa2b8, 49, 0x1f, 0x924},
+ { 0xa38c, 2, 0x1f, 0x1fff},
+ { 0xa398, 1, 0x1f, 0x1fff},
+ { 0xa39c, 7, 0x1e, 0x924},
+ { 0xa3b8, 2, 0x18, 0x924},
+ { 0xa3c0, 1, 0x1e, 0x924},
+ { 0xa3c4, 1, 0x1e, 0xfff},
+ { 0xa3c8, 1, 0x1e, 0x924},
+ { 0xa3d0, 1, 0x1e, 0x924},
+ { 0xa3d8, 1, 0x1e, 0x924},
+ { 0xa3e0, 1, 0x1e, 0x924},
+ { 0xa3e8, 1, 0x1e, 0x924},
+ { 0xa3f0, 1, 0x1e, 0x924},
+ { 0xa3f8, 1, 0x1e, 0x924},
+ { 0xa400, 1, 0x1f, 0x924},
+ { 0xa404, 1, 0x1f, 0xfff},
+ { 0xa408, 2, 0x1f, 0x1fff},
+ { 0xa410, 7, 0x1f, 0x924},
+ { 0xa42c, 12, 0x1f, 0xfff},
+ { 0xa45c, 1, 0x1f, 0x924},
+ { 0xa460, 1, 0x1f, 0x1924},
+ { 0xa464, 15, 0x1f, 0x924},
+ { 0xa4a0, 1, 0x7, 0x924},
+ { 0xa4a4, 2, 0x1f, 0x924},
+ { 0xa4ac, 2, 0x3, 0x924},
+ { 0xa4b4, 1, 0x7, 0x924},
+ { 0xa4b8, 2, 0x3, 0x924},
+ { 0xa4c0, 3, 0x1f, 0x924},
+ { 0xa4cc, 5, 0x3, 0x924},
+ { 0xa4e0, 3, 0x1f, 0x924},
+ { 0xa4fc, 2, 0x1f, 0x924},
+ { 0xa504, 1, 0x3, 0x924},
+ { 0xa508, 3, 0x1f, 0x924},
+ { 0xa518, 1, 0x1f, 0x924},
+ { 0xa520, 1, 0x1f, 0x924},
+ { 0xa528, 1, 0x1f, 0x924},
+ { 0xa530, 1, 0x1f, 0x924},
+ { 0xa538, 1, 0x1f, 0x924},
+ { 0xa540, 1, 0x1f, 0x924},
+ { 0xa548, 1, 0x3, 0x924},
+ { 0xa550, 1, 0x3, 0x924},
+ { 0xa558, 1, 0x3, 0x924},
+ { 0xa560, 1, 0x3, 0x924},
+ { 0xa568, 1, 0x3, 0x924},
+ { 0xa570, 1, 0x1f, 0x924},
+ { 0xa580, 1, 0x1f, 0x1fff},
+ { 0xa590, 1, 0x1f, 0x1fff},
+ { 0xa5a0, 1, 0x7, 0x924},
+ { 0xa5c0, 1, 0x1f, 0x924},
+ { 0xa5e0, 1, 0x1e, 0x924},
+ { 0xa5e8, 1, 0x1e, 0x924},
+ { 0xa5f0, 1, 0x1e, 0x924},
+ { 0xa5f8, 1, 0x6, 0x924},
+ { 0xa5fc, 1, 0x1e, 0x924},
+ { 0xa600, 5, 0x1e, 0xfff},
+ { 0xa614, 1, 0x1e, 0x924},
+ { 0xa618, 1, 0x1e, 0xfff},
+ { 0xa61c, 1, 0x1e, 0x924},
+ { 0xa620, 6, 0x1c, 0x924},
+ { 0xa638, 20, 0x4, 0x924},
+ { 0xa688, 35, 0x1c, 0x924},
+ { 0xa714, 1, 0x1c, 0xfff},
+ { 0xa718, 2, 0x1c, 0x924},
+ { 0xa720, 1, 0x1c, 0xfff},
+ { 0xa724, 3, 0x1c, 0x924},
+ { 0xa730, 1, 0x4, 0x924},
+ { 0xa734, 2, 0x1c, 0x924},
+ { 0xa73c, 4, 0x4, 0x924},
+ { 0xa74c, 1, 0x1c, 0x924},
+ { 0xa750, 1, 0x1c, 0xfff},
+ { 0xa754, 3, 0x1c, 0x924},
+ { 0xa760, 5, 0x4, 0x924},
+ { 0xa774, 7, 0x1c, 0x924},
+ { 0xa790, 15, 0x4, 0x924},
+ { 0xa7cc, 4, 0x1c, 0x924},
+ { 0xa7e0, 6, 0x18, 0x924},
+ { 0xa800, 18, 0x4, 0x924},
+ { 0xa848, 33, 0x1c, 0x924},
+ { 0xa8cc, 2, 0x18, 0x924},
+ { 0xa8d4, 4, 0x1c, 0x924},
+ { 0xa8e4, 1, 0x18, 0x924},
+ { 0xa8e8, 1, 0x1c, 0x924},
+ { 0xa8f0, 1, 0x1c, 0x924},
+ { 0xa8f8, 30, 0x18, 0x924},
+ { 0xa974, 73, 0x18, 0x924},
+ { 0xac30, 1, 0x18, 0x924},
+ { 0xac40, 1, 0x18, 0x924},
+ { 0xac50, 1, 0x18, 0x924},
+ { 0xac60, 1, 0x10, 0x924},
+ { 0x10000, 9, 0x1f, 0x924},
+ { 0x10024, 1, 0x7, 0x924},
+ { 0x10028, 5, 0x1f, 0x924},
+ { 0x1003c, 6, 0x7, 0x924},
+ { 0x10054, 20, 0x1f, 0x924},
+ { 0x100a4, 4, 0x7, 0x924},
+ { 0x100b4, 11, 0x1f, 0x924},
+ { 0x100e0, 4, 0x7, 0x924},
+ { 0x100f0, 8, 0x1f, 0x924},
+ { 0x10110, 6, 0x7, 0x924},
+ { 0x10128, 110, 0x1f, 0x924},
+ { 0x102e0, 4, 0x7, 0x924},
+ { 0x102f0, 18, 0x1f, 0x924},
+ { 0x10338, 20, 0x7, 0x924},
+ { 0x10388, 10, 0x1f, 0x924},
+ { 0x103d0, 2, 0x3, 0x1fff},
+ { 0x103dc, 1, 0x3, 0x1fff},
+ { 0x10400, 6, 0x7, 0x924},
+ { 0x10418, 1, 0x1f, 0xfff},
+ { 0x1041c, 1, 0x1f, 0x924},
+ { 0x10420, 1, 0x1f, 0xfff},
+ { 0x10424, 1, 0x1f, 0x924},
+ { 0x10428, 1, 0x1f, 0xfff},
+ { 0x1042c, 1, 0x1f, 0x924},
+ { 0x10430, 10, 0x7, 0x924},
+ { 0x10458, 2, 0x1f, 0x924},
+ { 0x10460, 1, 0x1f, 0xfff},
+ { 0x10464, 4, 0x1f, 0x924},
+ { 0x10474, 1, 0x1f, 0xfff},
+ { 0x10478, 14, 0x1f, 0x924},
+ { 0x104b0, 12, 0x7, 0x924},
+ { 0x104e0, 1, 0x1f, 0xfff},
+ { 0x104e8, 1, 0x1f, 0x924},
+ { 0x104ec, 1, 0x1f, 0xfff},
+ { 0x104f4, 1, 0x1f, 0x924},
+ { 0x104f8, 1, 0x1f, 0xfff},
+ { 0x10500, 2, 0x1f, 0x924},
+ { 0x10508, 1, 0x1f, 0xfff},
+ { 0x1050c, 9, 0x1f, 0x924},
+ { 0x10530, 1, 0x1f, 0xfff},
+ { 0x10534, 1, 0x1f, 0x924},
+ { 0x10538, 1, 0x1f, 0xfff},
+ { 0x1053c, 3, 0x1f, 0x924},
+ { 0x10548, 1, 0x1f, 0xfff},
+ { 0x1054c, 3, 0x1f, 0x924},
+ { 0x10558, 1, 0x1f, 0xfff},
+ { 0x1055c, 123, 0x1f, 0x924},
+ { 0x10750, 2, 0x7, 0x924},
+ { 0x10760, 2, 0x7, 0x924},
+ { 0x10770, 2, 0x7, 0x924},
+ { 0x10780, 2, 0x7, 0x924},
+ { 0x10790, 2, 0x1f, 0x924},
+ { 0x107a0, 2, 0x7, 0x924},
+ { 0x107b0, 2, 0x7, 0x924},
+ { 0x107c0, 2, 0x7, 0x924},
+ { 0x107d0, 2, 0x7, 0x924},
+ { 0x107e0, 2, 0x1f, 0x924},
+ { 0x10880, 2, 0x1f, 0x924},
+ { 0x10900, 2, 0x1f, 0x924},
+ { 0x16000, 1, 0x6, 0x924},
+ { 0x16004, 25, 0x1e, 0x924},
+ { 0x16070, 8, 0x1e, 0x924},
+ { 0x16090, 4, 0xe, 0x924},
+ { 0x160a0, 6, 0x1e, 0x924},
+ { 0x160c0, 7, 0x1e, 0x924},
+ { 0x160dc, 2, 0x6, 0x924},
+ { 0x160e4, 6, 0x1e, 0x924},
+ { 0x160fc, 4, 0x1e, 0x1fff},
+ { 0x1610c, 2, 0x6, 0x924},
+ { 0x16114, 6, 0x1e, 0x924},
+ { 0x16140, 48, 0x1e, 0x1fff},
+ { 0x16204, 5, 0x1e, 0x924},
+ { 0x18000, 1, 0x1e, 0x924},
+ { 0x18008, 1, 0x1e, 0x924},
+ { 0x18010, 35, 0x1c, 0x924},
+ { 0x180a4, 2, 0x1c, 0x924},
+ { 0x180c0, 9, 0x1c, 0x924},
+ { 0x180e4, 1, 0xc, 0x924},
+ { 0x180e8, 2, 0x1c, 0x924},
+ { 0x180f0, 1, 0xc, 0x924},
+ { 0x180f4, 79, 0x1c, 0x924},
+ { 0x18230, 1, 0xc, 0x924},
+ { 0x18234, 2, 0x1c, 0x924},
+ { 0x1823c, 1, 0xc, 0x924},
+ { 0x18240, 13, 0x1c, 0x924},
+ { 0x18274, 1, 0x4, 0x924},
+ { 0x18278, 12, 0x1c, 0x924},
+ { 0x182a8, 1, 0x1c, 0xfff},
+ { 0x182ac, 3, 0x1c, 0x924},
+ { 0x182b8, 1, 0x1c, 0xfff},
+ { 0x182bc, 19, 0x1c, 0x924},
+ { 0x18308, 1, 0x1c, 0xfff},
+ { 0x1830c, 3, 0x1c, 0x924},
+ { 0x18318, 1, 0x1c, 0xfff},
+ { 0x1831c, 7, 0x1c, 0x924},
+ { 0x18338, 1, 0x1c, 0xfff},
+ { 0x1833c, 3, 0x1c, 0x924},
+ { 0x18348, 1, 0x1c, 0xfff},
+ { 0x1834c, 28, 0x1c, 0x924},
+ { 0x183bc, 2, 0x1c, 0x1fff},
+ { 0x183c8, 3, 0x1c, 0x1fff},
+ { 0x183d8, 1, 0x1c, 0x1fff},
+ { 0x18440, 48, 0x1c, 0x1fff},
+ { 0x18500, 15, 0x1c, 0x924},
+ { 0x18570, 1, 0x18, 0xfff},
+ { 0x18574, 1, 0x18, 0x924},
+ { 0x18578, 1, 0x18, 0xfff},
+ { 0x1857c, 4, 0x18, 0x924},
+ { 0x1858c, 1, 0x18, 0xfff},
+ { 0x18590, 1, 0x18, 0x924},
+ { 0x18594, 1, 0x18, 0xfff},
+ { 0x18598, 32, 0x18, 0x924},
+ { 0x18618, 5, 0x10, 0x924},
+ { 0x1862c, 4, 0x10, 0xfff},
+ { 0x1863c, 16, 0x10, 0x924},
+ { 0x18680, 44, 0x10, 0x924},
+ { 0x18748, 12, 0x10, 0x924},
+ { 0x18788, 1, 0x10, 0x924},
+ { 0x1879c, 6, 0x10, 0x924},
+ { 0x187c4, 51, 0x10, 0x924},
+ { 0x18a00, 48, 0x10, 0x924},
+ { 0x20000, 24, 0x1f, 0x924},
+ { 0x20060, 8, 0x1f, 0x9e4},
+ { 0x20080, 94, 0x1f, 0x924},
+ { 0x201f8, 1, 0x3, 0x924},
+ { 0x201fc, 1, 0x1f, 0x924},
+ { 0x20200, 1, 0x3, 0x924},
+ { 0x20204, 1, 0x1f, 0x924},
+ { 0x20208, 1, 0x3, 0x924},
+ { 0x2020c, 4, 0x1f, 0x924},
+ { 0x2021c, 11, 0x1f, 0xfff},
+ { 0x20248, 24, 0x1f, 0x924},
+ { 0x202b8, 2, 0x1f, 0x1fff},
+ { 0x202c4, 1, 0x1f, 0x1fff},
+ { 0x202c8, 1, 0x1c, 0x924},
+ { 0x202d8, 4, 0x1c, 0x924},
+ { 0x202f0, 1, 0x10, 0x924},
+ { 0x20400, 1, 0x1f, 0x924},
+ { 0x20404, 1, 0x1f, 0xfff},
+ { 0x2040c, 2, 0x1f, 0xfff},
+ { 0x20414, 2, 0x1f, 0x924},
+ { 0x2041c, 2, 0x1f, 0xfff},
+ { 0x20424, 2, 0x1f, 0x924},
+ { 0x2042c, 18, 0x1e, 0x924},
+ { 0x20480, 1, 0x1f, 0x924},
+ { 0x20500, 1, 0x1f, 0x924},
+ { 0x20600, 1, 0x1f, 0x924},
+ { 0x28000, 1, 0x1f, 0x9e4},
+ { 0x28004, 255, 0x1f, 0x180},
+ { 0x28400, 1, 0x1f, 0x1c0},
+ { 0x28404, 255, 0x1f, 0x180},
+ { 0x28800, 1, 0x1f, 0x1c0},
+ { 0x28804, 255, 0x1f, 0x180},
+ { 0x28c00, 1, 0x1f, 0x1c0},
+ { 0x28c04, 255, 0x1f, 0x180},
+ { 0x29000, 1, 0x1f, 0x1c0},
+ { 0x29004, 255, 0x1f, 0x180},
+ { 0x29400, 1, 0x1f, 0x1c0},
+ { 0x29404, 255, 0x1f, 0x180},
+ { 0x29800, 1, 0x1f, 0x1c0},
+ { 0x29804, 255, 0x1f, 0x180},
+ { 0x29c00, 1, 0x1f, 0x1c0},
+ { 0x29c04, 255, 0x1f, 0x180},
+ { 0x2a000, 1, 0x1f, 0x1c0},
+ { 0x2a004, 255, 0x1f, 0x180},
+ { 0x2a400, 1, 0x1f, 0x1c0},
+ { 0x2a404, 255, 0x1f, 0x180},
+ { 0x2a800, 1, 0x1f, 0x1c0},
+ { 0x2a804, 255, 0x1f, 0x180},
+ { 0x2ac00, 1, 0x1f, 0x1c0},
+ { 0x2ac04, 255, 0x1f, 0x180},
+ { 0x2b000, 1, 0x1f, 0x1c0},
+ { 0x2b004, 255, 0x1f, 0x180},
+ { 0x2b400, 1, 0x1f, 0x1c0},
+ { 0x2b404, 255, 0x1f, 0x180},
+ { 0x2b800, 1, 0x1f, 0x1c0},
+ { 0x2b804, 255, 0x1f, 0x180},
+ { 0x2bc00, 1, 0x1f, 0x1c0},
+ { 0x2bc04, 255, 0x1f, 0x180},
+ { 0x2c000, 1, 0x1f, 0x1c0},
+ { 0x2c004, 255, 0x1f, 0x180},
+ { 0x2c400, 1, 0x1f, 0x1c0},
+ { 0x2c404, 255, 0x1f, 0x180},
+ { 0x2c800, 1, 0x1f, 0x1c0},
+ { 0x2c804, 255, 0x1f, 0x180},
+ { 0x2cc00, 1, 0x1f, 0x1c0},
+ { 0x2cc04, 255, 0x1f, 0x180},
+ { 0x2d000, 1, 0x1f, 0x1c0},
+ { 0x2d004, 255, 0x1f, 0x180},
+ { 0x2d400, 1, 0x1f, 0x1c0},
+ { 0x2d404, 255, 0x1f, 0x180},
+ { 0x2d800, 1, 0x1f, 0x1c0},
+ { 0x2d804, 255, 0x1f, 0x180},
+ { 0x2dc00, 1, 0x1f, 0x1c0},
+ { 0x2dc04, 255, 0x1f, 0x180},
+ { 0x2e000, 1, 0x1f, 0x1c0},
+ { 0x2e004, 255, 0x1f, 0x180},
+ { 0x2e400, 1, 0x1f, 0x1c0},
+ { 0x2e404, 255, 0x1f, 0x180},
+ { 0x2e800, 1, 0x1f, 0x1c0},
+ { 0x2e804, 255, 0x1f, 0x180},
+ { 0x2ec00, 1, 0x1f, 0x1c0},
+ { 0x2ec04, 255, 0x1f, 0x180},
+ { 0x2f000, 1, 0x1f, 0x1c0},
+ { 0x2f004, 255, 0x1f, 0x180},
+ { 0x2f400, 1, 0x1f, 0x1c0},
+ { 0x2f404, 255, 0x1f, 0x180},
+ { 0x2f800, 1, 0x1f, 0x1c0},
+ { 0x2f804, 255, 0x1f, 0x180},
+ { 0x2fc00, 1, 0x1f, 0x1c0},
+ { 0x2fc04, 255, 0x1f, 0x180},
+ { 0x30000, 1, 0x1f, 0x9e4},
+ { 0x30004, 255, 0x1f, 0x180},
+ { 0x30400, 1, 0x1f, 0x1c0},
+ { 0x30404, 255, 0x1f, 0x180},
+ { 0x30800, 1, 0x1f, 0x1c0},
+ { 0x30804, 255, 0x1f, 0x180},
+ { 0x30c00, 1, 0x1f, 0x1c0},
+ { 0x30c04, 255, 0x1f, 0x180},
+ { 0x31000, 1, 0x1f, 0x1c0},
+ { 0x31004, 255, 0x1f, 0x180},
+ { 0x31400, 1, 0x1f, 0x1c0},
+ { 0x31404, 255, 0x1f, 0x180},
+ { 0x31800, 1, 0x1f, 0x1c0},
+ { 0x31804, 255, 0x1f, 0x180},
+ { 0x31c00, 1, 0x1f, 0x1c0},
+ { 0x31c04, 255, 0x1f, 0x180},
+ { 0x32000, 1, 0x1f, 0x1c0},
+ { 0x32004, 255, 0x1f, 0x180},
+ { 0x32400, 1, 0x1f, 0x1c0},
+ { 0x32404, 255, 0x1f, 0x180},
+ { 0x32800, 1, 0x1f, 0x1c0},
+ { 0x32804, 255, 0x1f, 0x180},
+ { 0x32c00, 1, 0x1f, 0x1c0},
+ { 0x32c04, 255, 0x1f, 0x180},
+ { 0x33000, 1, 0x1f, 0x1c0},
+ { 0x33004, 255, 0x1f, 0x180},
+ { 0x33400, 1, 0x1f, 0x1c0},
+ { 0x33404, 255, 0x1f, 0x180},
+ { 0x33800, 1, 0x1f, 0x1c0},
+ { 0x33804, 255, 0x1f, 0x180},
+ { 0x33c00, 1, 0x1f, 0x1c0},
+ { 0x33c04, 255, 0x1f, 0x180},
+ { 0x34000, 1, 0x1f, 0x1c0},
+ { 0x34004, 255, 0x1f, 0x180},
+ { 0x34400, 1, 0x1f, 0x1c0},
+ { 0x34404, 255, 0x1f, 0x180},
+ { 0x34800, 1, 0x1f, 0x1c0},
+ { 0x34804, 255, 0x1f, 0x180},
+ { 0x34c00, 1, 0x1f, 0x1c0},
+ { 0x34c04, 255, 0x1f, 0x180},
+ { 0x35000, 1, 0x1f, 0x1c0},
+ { 0x35004, 255, 0x1f, 0x180},
+ { 0x35400, 1, 0x1f, 0x1c0},
+ { 0x35404, 255, 0x1f, 0x180},
+ { 0x35800, 1, 0x1f, 0x1c0},
+ { 0x35804, 255, 0x1f, 0x180},
+ { 0x35c00, 1, 0x1f, 0x1c0},
+ { 0x35c04, 255, 0x1f, 0x180},
+ { 0x36000, 1, 0x1f, 0x1c0},
+ { 0x36004, 255, 0x1f, 0x180},
+ { 0x36400, 1, 0x1f, 0x1c0},
+ { 0x36404, 255, 0x1f, 0x180},
+ { 0x36800, 1, 0x1f, 0x1c0},
+ { 0x36804, 255, 0x1f, 0x180},
+ { 0x36c00, 1, 0x1f, 0x1c0},
+ { 0x36c04, 255, 0x1f, 0x180},
+ { 0x37000, 1, 0x1f, 0x1c0},
+ { 0x37004, 255, 0x1f, 0x180},
+ { 0x37400, 1, 0x1f, 0x1c0},
+ { 0x37404, 255, 0x1f, 0x180},
+ { 0x37800, 1, 0x1f, 0x1c0},
+ { 0x37804, 255, 0x1f, 0x180},
+ { 0x37c00, 1, 0x1f, 0x1c0},
+ { 0x37c04, 255, 0x1f, 0x180},
+ { 0x38000, 1, 0x1f, 0x1c0},
+ { 0x38004, 255, 0x1f, 0x180},
+ { 0x38400, 1, 0x1f, 0x1c0},
+ { 0x38404, 255, 0x1f, 0x180},
+ { 0x38800, 1, 0x1f, 0x1c0},
+ { 0x38804, 255, 0x1f, 0x180},
+ { 0x38c00, 1, 0x1f, 0x1c0},
+ { 0x38c04, 255, 0x1f, 0x180},
+ { 0x39000, 1, 0x1f, 0x1c0},
+ { 0x39004, 255, 0x1f, 0x180},
+ { 0x39400, 1, 0x1f, 0x1c0},
+ { 0x39404, 255, 0x1f, 0x180},
+ { 0x39800, 1, 0x1f, 0x1c0},
+ { 0x39804, 255, 0x1f, 0x180},
+ { 0x39c00, 1, 0x1f, 0x1c0},
+ { 0x39c04, 255, 0x1f, 0x180},
+ { 0x3a000, 1, 0x1f, 0x1c0},
+ { 0x3a004, 255, 0x1f, 0x180},
+ { 0x3a400, 1, 0x1f, 0x1c0},
+ { 0x3a404, 255, 0x1f, 0x180},
+ { 0x3a800, 1, 0x1f, 0x1c0},
+ { 0x3a804, 255, 0x1f, 0x180},
+ { 0x3ac00, 1, 0x1f, 0x1c0},
+ { 0x3ac04, 255, 0x1f, 0x180},
+ { 0x3b000, 1, 0x1f, 0x1c0},
+ { 0x3b004, 255, 0x1f, 0x180},
+ { 0x3b400, 1, 0x1f, 0x1c0},
+ { 0x3b404, 255, 0x1f, 0x180},
+ { 0x3b800, 1, 0x1f, 0x1c0},
+ { 0x3b804, 255, 0x1f, 0x180},
+ { 0x3bc00, 1, 0x1f, 0x1c0},
+ { 0x3bc04, 255, 0x1f, 0x180},
+ { 0x3c000, 1, 0x1f, 0x1c0},
+ { 0x3c004, 255, 0x1f, 0x180},
+ { 0x3c400, 1, 0x1f, 0x1c0},
+ { 0x3c404, 255, 0x1f, 0x180},
+ { 0x3c800, 1, 0x1f, 0x1c0},
+ { 0x3c804, 255, 0x1f, 0x180},
+ { 0x3cc00, 1, 0x1f, 0x1c0},
+ { 0x3cc04, 255, 0x1f, 0x180},
+ { 0x3d000, 1, 0x1f, 0x1c0},
+ { 0x3d004, 255, 0x1f, 0x180},
+ { 0x3d400, 1, 0x1f, 0x1c0},
+ { 0x3d404, 255, 0x1f, 0x180},
+ { 0x3d800, 1, 0x1f, 0x1c0},
+ { 0x3d804, 255, 0x1f, 0x180},
+ { 0x3dc00, 1, 0x1f, 0x1c0},
+ { 0x3dc04, 255, 0x1f, 0x180},
+ { 0x3e000, 1, 0x1f, 0x1c0},
+ { 0x3e004, 255, 0x1f, 0x180},
+ { 0x3e400, 1, 0x1f, 0x1c0},
+ { 0x3e404, 255, 0x1f, 0x180},
+ { 0x3e800, 1, 0x1f, 0x1c0},
+ { 0x3e804, 255, 0x1f, 0x180},
+ { 0x3ec00, 1, 0x1f, 0x1c0},
+ { 0x3ec04, 255, 0x1f, 0x180},
+ { 0x3f000, 1, 0x1f, 0x1c0},
+ { 0x3f004, 255, 0x1f, 0x180},
+ { 0x3f400, 1, 0x1f, 0x1c0},
+ { 0x3f404, 255, 0x1f, 0x180},
+ { 0x3f800, 1, 0x1f, 0x1c0},
+ { 0x3f804, 255, 0x1f, 0x180},
+ { 0x3fc00, 1, 0x1f, 0x1c0},
+ { 0x3fc04, 255, 0x1f, 0x180},
+ { 0x40000, 85, 0x1f, 0x924},
+ { 0x40154, 13, 0x1f, 0xfff},
+ { 0x40198, 2, 0x1f, 0x1fff},
+ { 0x401a4, 1, 0x1f, 0x1fff},
+ { 0x401a8, 8, 0x1e, 0x924},
+ { 0x401c8, 1, 0x2, 0x924},
+ { 0x401cc, 2, 0x1e, 0x924},
+ { 0x401d4, 2, 0x1c, 0x924},
+ { 0x40200, 4, 0x1f, 0x924},
+ { 0x40220, 6, 0x1c, 0x924},
+ { 0x40238, 8, 0xc, 0x924},
+ { 0x40258, 4, 0x1c, 0x924},
+ { 0x40268, 2, 0x18, 0x924},
+ { 0x40270, 17, 0x10, 0x924},
+ { 0x40400, 43, 0x1f, 0x924},
+ { 0x404bc, 2, 0x1f, 0x1fff},
+ { 0x404c8, 1, 0x1f, 0x1fff},
+ { 0x404cc, 3, 0x1e, 0x924},
+ { 0x404e0, 1, 0x1c, 0x924},
+ { 0x40500, 2, 0x1f, 0x924},
+ { 0x40510, 2, 0x1f, 0x924},
+ { 0x40520, 2, 0x1f, 0x924},
+ { 0x40530, 2, 0x1f, 0x924},
+ { 0x40540, 2, 0x1f, 0x924},
+ { 0x40550, 10, 0x1c, 0x924},
+ { 0x40610, 2, 0x1c, 0x924},
+ { 0x42000, 164, 0x1f, 0x924},
+ { 0x422b0, 2, 0x1f, 0x1fff},
+ { 0x422bc, 1, 0x1f, 0x1fff},
+ { 0x422c0, 4, 0x1c, 0x924},
+ { 0x422d4, 5, 0x1e, 0x924},
+ { 0x422e8, 1, 0x1c, 0x924},
+ { 0x42400, 49, 0x1f, 0x924},
+ { 0x424c8, 32, 0x1f, 0x924},
+ { 0x42548, 1, 0x1f, 0xfff},
+ { 0x4254c, 1, 0x1f, 0x924},
+ { 0x42550, 1, 0x1f, 0xfff},
+ { 0x42554, 1, 0x1f, 0x924},
+ { 0x42558, 1, 0x1f, 0xfff},
+ { 0x4255c, 1, 0x1f, 0x924},
+ { 0x42568, 2, 0x1f, 0x924},
+ { 0x42640, 5, 0x1c, 0x924},
+ { 0x42800, 1, 0x1f, 0x924},
+ { 0x50000, 1, 0x1f, 0x1fff},
+ { 0x50004, 19, 0x1f, 0x924},
+ { 0x50050, 8, 0x1f, 0x93c},
+ { 0x50070, 60, 0x1f, 0x924},
+ { 0x50160, 8, 0x1f, 0xfff},
+ { 0x50180, 20, 0x1f, 0x924},
+ { 0x501e0, 2, 0x1f, 0x1fff},
+ { 0x501ec, 1, 0x1f, 0x1fff},
+ { 0x501f0, 4, 0x1e, 0x924},
+ { 0x50200, 1, 0x1f, 0x924},
+ { 0x50204, 1, 0x1f, 0xfff},
+ { 0x5020c, 2, 0x1f, 0xfff},
+ { 0x50214, 2, 0x1f, 0x924},
+ { 0x5021c, 1, 0x1f, 0xfff},
+ { 0x50220, 2, 0x1f, 0x924},
+ { 0x50228, 6, 0x1e, 0x924},
+ { 0x50240, 1, 0x1f, 0x924},
+ { 0x50280, 1, 0x1f, 0x924},
+ { 0x50300, 1, 0x1c, 0x924},
+ { 0x5030c, 1, 0x1c, 0x924},
+ { 0x50318, 1, 0x1c, 0x934},
+ { 0x5031c, 1, 0x1c, 0x924},
+ { 0x50320, 2, 0x1c, 0x934},
+ { 0x50330, 1, 0x10, 0x924},
+ { 0x52000, 1, 0x1f, 0x924},
+ { 0x54000, 1, 0x1f, 0x93c},
+ { 0x54004, 255, 0x1f, 0x30},
+ { 0x54400, 1, 0x1f, 0x38},
+ { 0x54404, 255, 0x1f, 0x30},
+ { 0x54800, 1, 0x1f, 0x38},
+ { 0x54804, 255, 0x1f, 0x30},
+ { 0x54c00, 1, 0x1f, 0x38},
+ { 0x54c04, 255, 0x1f, 0x30},
+ { 0x55000, 1, 0x1f, 0x38},
+ { 0x55004, 255, 0x1f, 0x30},
+ { 0x55400, 1, 0x1f, 0x38},
+ { 0x55404, 255, 0x1f, 0x30},
+ { 0x55800, 1, 0x1f, 0x38},
+ { 0x55804, 255, 0x1f, 0x30},
+ { 0x55c00, 1, 0x1f, 0x38},
+ { 0x55c04, 255, 0x1f, 0x30},
+ { 0x56000, 1, 0x1f, 0x38},
+ { 0x56004, 255, 0x1f, 0x30},
+ { 0x56400, 1, 0x1f, 0x38},
+ { 0x56404, 255, 0x1f, 0x30},
+ { 0x56800, 1, 0x1f, 0x38},
+ { 0x56804, 255, 0x1f, 0x30},
+ { 0x56c00, 1, 0x1f, 0x38},
+ { 0x56c04, 255, 0x1f, 0x30},
+ { 0x57000, 1, 0x1f, 0x38},
+ { 0x57004, 255, 0x1f, 0x30},
+ { 0x58000, 1, 0x1f, 0x934},
+ { 0x58004, 8191, 0x3, 0x30},
+ { 0x60000, 26, 0x1f, 0x924},
+ { 0x60068, 8, 0x3, 0x924},
+ { 0x60088, 2, 0x1f, 0x924},
+ { 0x60090, 1, 0x1f, 0xfff},
+ { 0x60094, 9, 0x1f, 0x924},
+ { 0x600b8, 9, 0x3, 0x924},
+ { 0x600dc, 1, 0x1f, 0x924},
+ { 0x600e0, 5, 0x3, 0x924},
+ { 0x600f4, 1, 0x7, 0x924},
+ { 0x600f8, 1, 0x3, 0x924},
+ { 0x600fc, 8, 0x1f, 0x924},
+ { 0x6012c, 2, 0x1f, 0x1fff},
+ { 0x60138, 1, 0x1f, 0x1fff},
+ { 0x6013c, 24, 0x2, 0x924},
+ { 0x6019c, 2, 0x1c, 0x924},
+ { 0x601ac, 18, 0x1c, 0x924},
+ { 0x60200, 1, 0x1f, 0xb6d},
+ { 0x60204, 2, 0x1f, 0x249},
+ { 0x60210, 13, 0x1c, 0x924},
+ { 0x60244, 16, 0x10, 0x924},
+ { 0x61000, 1, 0x1f, 0xb6d},
+ { 0x61004, 511, 0x1f, 0x249},
+ { 0x61800, 512, 0x18, 0x249},
+ { 0x70000, 8, 0x1f, 0xb6d},
+ { 0x70020, 8184, 0x1f, 0x249},
+ { 0x78000, 8192, 0x18, 0x249},
+ { 0x85000, 3, 0x1f, 0x1000},
+ { 0x8501c, 7, 0x1f, 0x1000},
+ { 0x85048, 1, 0x1f, 0x1000},
+ { 0x85200, 32, 0x1f, 0x1000},
+ { 0xa0000, 16384, 0x3, 0x1000},
+ { 0xb0000, 16384, 0x2, 0x1000},
+ { 0xc1000, 7, 0x1f, 0x924},
+ { 0xc102c, 2, 0x1f, 0x1fff},
+ { 0xc1038, 1, 0x1f, 0x1fff},
+ { 0xc103c, 2, 0x1c, 0x924},
+ { 0xc1800, 2, 0x1f, 0x924},
+ { 0xc2000, 164, 0x1f, 0x924},
+ { 0xc22b0, 2, 0x1f, 0x1fff},
+ { 0xc22bc, 1, 0x1f, 0x1fff},
+ { 0xc22c0, 5, 0x1c, 0x924},
+ { 0xc22d8, 4, 0x1c, 0x924},
+ { 0xc2400, 49, 0x1f, 0x924},
+ { 0xc24c8, 32, 0x1f, 0x924},
+ { 0xc2548, 1, 0x1f, 0xfff},
+ { 0xc254c, 1, 0x1f, 0x924},
+ { 0xc2550, 1, 0x1f, 0xfff},
+ { 0xc2554, 1, 0x1f, 0x924},
+ { 0xc2558, 1, 0x1f, 0xfff},
+ { 0xc255c, 1, 0x1f, 0x924},
+ { 0xc2568, 2, 0x1f, 0x924},
+ { 0xc2600, 1, 0x1f, 0x924},
+ { 0xc4000, 165, 0x1f, 0x924},
+ { 0xc42b4, 2, 0x1f, 0x1fff},
+ { 0xc42c0, 1, 0x1f, 0x1fff},
+ { 0xc42d8, 2, 0x1c, 0x924},
+ { 0xc42e0, 7, 0x1e, 0x924},
+ { 0xc42fc, 1, 0x1c, 0x924},
+ { 0xc4400, 51, 0x1f, 0x924},
+ { 0xc44d0, 32, 0x1f, 0x924},
+ { 0xc4550, 1, 0x1f, 0xfff},
+ { 0xc4554, 1, 0x1f, 0x924},
+ { 0xc4558, 1, 0x1f, 0xfff},
+ { 0xc455c, 1, 0x1f, 0x924},
+ { 0xc4560, 1, 0x1f, 0xfff},
+ { 0xc4564, 1, 0x1f, 0x924},
+ { 0xc4570, 2, 0x1f, 0x924},
+ { 0xc4578, 5, 0x1c, 0x924},
+ { 0xc4600, 1, 0x1f, 0x924},
+ { 0xd0000, 19, 0x1f, 0x924},
+ { 0xd004c, 8, 0x1f, 0x1927},
+ { 0xd006c, 64, 0x1f, 0x924},
+ { 0xd016c, 8, 0x1f, 0xfff},
+ { 0xd018c, 19, 0x1f, 0x924},
+ { 0xd01e8, 2, 0x1f, 0x1fff},
+ { 0xd01f4, 1, 0x1f, 0x1fff},
+ { 0xd01fc, 1, 0x1c, 0x924},
+ { 0xd0200, 1, 0x1f, 0x924},
+ { 0xd0204, 1, 0x1f, 0xfff},
+ { 0xd020c, 3, 0x1f, 0xfff},
+ { 0xd0218, 4, 0x1f, 0x924},
+ { 0xd0228, 18, 0x1e, 0x924},
+ { 0xd0280, 1, 0x1f, 0x924},
+ { 0xd0300, 1, 0x1f, 0x924},
+ { 0xd0400, 1, 0x1f, 0x924},
+ { 0xd0818, 1, 0x10, 0x924},
+ { 0xd4000, 1, 0x1f, 0x1927},
+ { 0xd4004, 255, 0x1f, 0x6},
+ { 0xd4400, 1, 0x1f, 0x1007},
+ { 0xd4404, 255, 0x1f, 0x6},
+ { 0xd4800, 1, 0x1f, 0x1007},
+ { 0xd4804, 255, 0x1f, 0x6},
+ { 0xd4c00, 1, 0x1f, 0x1007},
+ { 0xd4c04, 255, 0x1f, 0x6},
+ { 0xd5000, 1, 0x1f, 0x1007},
+ { 0xd5004, 255, 0x1f, 0x6},
+ { 0xd5400, 1, 0x1f, 0x1007},
+ { 0xd5404, 255, 0x1f, 0x6},
+ { 0xd5800, 1, 0x1f, 0x1007},
+ { 0xd5804, 255, 0x1f, 0x6},
+ { 0xd5c00, 1, 0x1f, 0x1007},
+ { 0xd5c04, 255, 0x1f, 0x6},
+ { 0xd6000, 1, 0x1f, 0x1007},
+ { 0xd6004, 255, 0x1f, 0x6},
+ { 0xd6400, 1, 0x1f, 0x1007},
+ { 0xd6404, 255, 0x1f, 0x6},
+ { 0xd8000, 1, 0x1f, 0x1927},
+ { 0xd8004, 255, 0x1f, 0x6},
+ { 0xd8400, 1, 0x1f, 0x1007},
+ { 0xd8404, 255, 0x1f, 0x6},
+ { 0xd8800, 1, 0x1f, 0x1007},
+ { 0xd8804, 255, 0x1f, 0x6},
+ { 0xd8c00, 1, 0x1f, 0x1007},
+ { 0xd8c04, 255, 0x1f, 0x6},
+ { 0xd9000, 1, 0x1f, 0x1007},
+ { 0xd9004, 255, 0x1f, 0x6},
+ { 0xd9400, 1, 0x1f, 0x1007},
+ { 0xd9404, 255, 0x1f, 0x6},
+ { 0xd9800, 1, 0x1f, 0x1007},
+ { 0xd9804, 255, 0x1f, 0x6},
+ { 0xd9c00, 1, 0x1f, 0x1007},
+ { 0xd9c04, 255, 0x1f, 0x6},
+ { 0xda000, 1, 0x1f, 0x1007},
+ { 0xda004, 255, 0x1f, 0x6},
+ { 0xda400, 1, 0x1f, 0x1007},
+ { 0xda404, 255, 0x1f, 0x6},
+ { 0xda800, 1, 0x1f, 0x1007},
+ { 0xda804, 255, 0x1f, 0x6},
+ { 0xdac00, 1, 0x1f, 0x1007},
+ { 0xdac04, 255, 0x1f, 0x6},
+ { 0xdb000, 1, 0x1f, 0x1007},
+ { 0xdb004, 255, 0x1f, 0x6},
+ { 0xdb400, 1, 0x1f, 0x1007},
+ { 0xdb404, 255, 0x1f, 0x6},
+ { 0xdb800, 1, 0x1f, 0x1007},
+ { 0xdb804, 255, 0x1f, 0x6},
+ { 0xdbc00, 1, 0x1f, 0x1007},
+ { 0xdbc04, 255, 0x1f, 0x6},
+ { 0xdc000, 1, 0x1f, 0x1007},
+ { 0xdc004, 255, 0x1f, 0x6},
+ { 0xdc400, 1, 0x1f, 0x1007},
+ { 0xdc404, 255, 0x1f, 0x6},
+ { 0xdc800, 1, 0x1f, 0x1007},
+ { 0xdc804, 255, 0x1f, 0x6},
+ { 0xdcc00, 1, 0x1f, 0x1007},
+ { 0xdcc04, 255, 0x1f, 0x6},
+ { 0xdd000, 1, 0x1f, 0x1007},
+ { 0xdd004, 255, 0x1f, 0x6},
+ { 0xdd400, 1, 0x1f, 0x1007},
+ { 0xdd404, 255, 0x1f, 0x6},
+ { 0xdd800, 1, 0x1f, 0x1007},
+ { 0xdd804, 255, 0x1f, 0x6},
+ { 0xddc00, 1, 0x1f, 0x1007},
+ { 0xddc04, 255, 0x1f, 0x6},
+ { 0xde000, 1, 0x1f, 0x1007},
+ { 0xde004, 255, 0x1f, 0x6},
+ { 0xde400, 1, 0x1f, 0x1007},
+ { 0xde404, 255, 0x1f, 0x6},
+ { 0xde800, 1, 0x1f, 0x1007},
+ { 0xde804, 255, 0x1f, 0x6},
+ { 0xdec00, 1, 0x1f, 0x1007},
+ { 0xdec04, 255, 0x1f, 0x6},
+ { 0xdf000, 1, 0x1f, 0x1007},
+ { 0xdf004, 255, 0x1f, 0x6},
+ { 0xdf400, 1, 0x1f, 0x1007},
+ { 0xdf404, 255, 0x1f, 0x6},
+ { 0xdf800, 1, 0x1f, 0x1007},
+ { 0xdf804, 255, 0x1f, 0x6},
+ { 0xdfc00, 1, 0x1f, 0x1007},
+ { 0xdfc04, 255, 0x1f, 0x6},
+ { 0xe0000, 21, 0x1f, 0x924},
+ { 0xe0054, 8, 0x1f, 0xf24},
+ { 0xe0074, 49, 0x1f, 0x924},
+ { 0xe0138, 1, 0x3, 0x924},
+ { 0xe013c, 6, 0x1f, 0x924},
+ { 0xe0154, 8, 0x1f, 0xfff},
+ { 0xe0174, 21, 0x1f, 0x924},
+ { 0xe01d8, 2, 0x1f, 0x1fff},
+ { 0xe01e4, 1, 0x1f, 0x1fff},
+ { 0xe01f4, 1, 0x4, 0x924},
+ { 0xe01f8, 1, 0x1c, 0x924},
+ { 0xe0200, 1, 0x1f, 0x924},
+ { 0xe0204, 1, 0x1f, 0xfff},
+ { 0xe020c, 2, 0x1f, 0xfff},
+ { 0xe0214, 2, 0x1f, 0x924},
+ { 0xe021c, 2, 0x1f, 0xfff},
+ { 0xe0224, 2, 0x1f, 0x924},
+ { 0xe022c, 18, 0x1e, 0x924},
+ { 0xe0280, 1, 0x1f, 0x924},
+ { 0xe0300, 1, 0x1f, 0x924},
+ { 0xe0400, 1, 0x10, 0x924},
+ { 0xe1000, 1, 0x1f, 0x924},
+ { 0xe2000, 1, 0x1f, 0xf24},
+ { 0xe2004, 255, 0x1f, 0xc00},
+ { 0xe2400, 1, 0x1f, 0xe00},
+ { 0xe2404, 255, 0x1f, 0xc00},
+ { 0xe2800, 1, 0x1f, 0xe00},
+ { 0xe2804, 255, 0x1f, 0xc00},
+ { 0xe2c00, 1, 0x1f, 0xe00},
+ { 0xe2c04, 255, 0x1f, 0xc00},
+ { 0xe3000, 1, 0x1f, 0xe00},
+ { 0xe3004, 255, 0x1f, 0xc00},
+ { 0xe3400, 1, 0x1f, 0xe00},
+ { 0xe3404, 255, 0x1f, 0xc00},
+ { 0xe3800, 1, 0x1f, 0xe00},
+ { 0xe3804, 255, 0x1f, 0xc00},
+ { 0xe3c00, 1, 0x1f, 0xe00},
+ { 0xe3c04, 255, 0x1f, 0xc00},
+ { 0xf0000, 1, 0x1f, 0xf24},
+ { 0xf0004, 255, 0x1f, 0xc00},
+ { 0xf0400, 1, 0x1f, 0xe00},
+ { 0xf0404, 255, 0x1f, 0xc00},
+ { 0xf0800, 1, 0x1f, 0xe00},
+ { 0xf0804, 255, 0x1f, 0xc00},
+ { 0xf0c00, 1, 0x1f, 0xe00},
+ { 0xf0c04, 255, 0x1f, 0xc00},
+ { 0xf1000, 1, 0x1f, 0xe00},
+ { 0xf1004, 255, 0x1f, 0xc00},
+ { 0xf1400, 1, 0x1f, 0xe00},
+ { 0xf1404, 255, 0x1f, 0xc00},
+ { 0xf1800, 1, 0x1f, 0xe00},
+ { 0xf1804, 255, 0x1f, 0xc00},
+ { 0xf1c00, 1, 0x1f, 0xe00},
+ { 0xf1c04, 255, 0x1f, 0xc00},
+ { 0xf2000, 1, 0x1f, 0xe00},
+ { 0xf2004, 255, 0x1f, 0xc00},
+ { 0xf2400, 1, 0x1f, 0xe00},
+ { 0xf2404, 255, 0x1f, 0xc00},
+ { 0xf2800, 1, 0x1f, 0xe00},
+ { 0xf2804, 255, 0x1f, 0xc00},
+ { 0xf2c00, 1, 0x1f, 0xe00},
+ { 0xf2c04, 255, 0x1f, 0xc00},
+ { 0xf3000, 1, 0x1f, 0xe00},
+ { 0xf3004, 255, 0x1f, 0xc00},
+ { 0xf3400, 1, 0x1f, 0xe00},
+ { 0xf3404, 255, 0x1f, 0xc00},
+ { 0xf3800, 1, 0x1f, 0xe00},
+ { 0xf3804, 255, 0x1f, 0xc00},
+ { 0xf3c00, 1, 0x1f, 0xe00},
+ { 0xf3c04, 255, 0x1f, 0xc00},
+ { 0xf4000, 1, 0x1f, 0xe00},
+ { 0xf4004, 255, 0x1f, 0xc00},
+ { 0xf4400, 1, 0x1f, 0xe00},
+ { 0xf4404, 255, 0x1f, 0xc00},
+ { 0xf4800, 1, 0x1f, 0xe00},
+ { 0xf4804, 255, 0x1f, 0xc00},
+ { 0xf4c00, 1, 0x1f, 0xe00},
+ { 0xf4c04, 255, 0x1f, 0xc00},
+ { 0xf5000, 1, 0x1f, 0xe00},
+ { 0xf5004, 255, 0x1f, 0xc00},
+ { 0xf5400, 1, 0x1f, 0xe00},
+ { 0xf5404, 255, 0x1f, 0xc00},
+ { 0xf5800, 1, 0x1f, 0xe00},
+ { 0xf5804, 255, 0x1f, 0xc00},
+ { 0xf5c00, 1, 0x1f, 0xe00},
+ { 0xf5c04, 255, 0x1f, 0xc00},
+ { 0xf6000, 1, 0x1f, 0xe00},
+ { 0xf6004, 255, 0x1f, 0xc00},
+ { 0xf6400, 1, 0x1f, 0xe00},
+ { 0xf6404, 255, 0x1f, 0xc00},
+ { 0xf6800, 1, 0x1f, 0xe00},
+ { 0xf6804, 255, 0x1f, 0xc00},
+ { 0xf6c00, 1, 0x1f, 0xe00},
+ { 0xf6c04, 255, 0x1f, 0xc00},
+ { 0xf7000, 1, 0x1f, 0xe00},
+ { 0xf7004, 255, 0x1f, 0xc00},
+ { 0xf7400, 1, 0x1f, 0xe00},
+ { 0xf7404, 255, 0x1f, 0xc00},
+ { 0xf7800, 1, 0x1f, 0xe00},
+ { 0xf7804, 255, 0x1f, 0xc00},
+ { 0xf7c00, 1, 0x1f, 0xe00},
+ { 0xf7c04, 255, 0x1f, 0xc00},
+ { 0xf8000, 1, 0x1f, 0xe00},
+ { 0xf8004, 255, 0x1f, 0xc00},
+ { 0xf8400, 1, 0x1f, 0xe00},
+ { 0xf8404, 255, 0x1f, 0xc00},
+ { 0xf8800, 1, 0x1f, 0xe00},
+ { 0xf8804, 255, 0x1f, 0xc00},
+ { 0xf8c00, 1, 0x1f, 0xe00},
+ { 0xf8c04, 255, 0x1f, 0xc00},
+ { 0xf9000, 1, 0x1f, 0xe00},
+ { 0xf9004, 255, 0x1f, 0xc00},
+ { 0xf9400, 1, 0x1f, 0xe00},
+ { 0xf9404, 255, 0x1f, 0xc00},
+ { 0xf9800, 1, 0x1f, 0xe00},
+ { 0xf9804, 255, 0x1f, 0xc00},
+ { 0xf9c00, 1, 0x1f, 0xe00},
+ { 0xf9c04, 255, 0x1f, 0xc00},
+ { 0xfa000, 1, 0x1f, 0xe00},
+ { 0xfa004, 255, 0x1f, 0xc00},
+ { 0xfa400, 1, 0x1f, 0xe00},
+ { 0xfa404, 255, 0x1f, 0xc00},
+ { 0xfa800, 1, 0x1f, 0xe00},
+ { 0xfa804, 255, 0x1f, 0xc00},
+ { 0xfac00, 1, 0x1f, 0xe00},
+ { 0xfac04, 255, 0x1f, 0xc00},
+ { 0xfb000, 1, 0x1f, 0xe00},
+ { 0xfb004, 255, 0x1f, 0xc00},
+ { 0xfb400, 1, 0x1f, 0xe00},
+ { 0xfb404, 255, 0x1f, 0xc00},
+ { 0xfb800, 1, 0x1f, 0xe00},
+ { 0xfb804, 255, 0x1f, 0xc00},
+ { 0xfbc00, 1, 0x1f, 0xe00},
+ { 0xfbc04, 255, 0x1f, 0xc00},
+ { 0xfc000, 1, 0x1f, 0xe00},
+ { 0xfc004, 255, 0x1f, 0xc00},
+ { 0xfc400, 1, 0x1f, 0xe00},
+ { 0xfc404, 255, 0x1f, 0xc00},
+ { 0xfc800, 1, 0x1f, 0xe00},
+ { 0xfc804, 255, 0x1f, 0xc00},
+ { 0xfcc00, 1, 0x1f, 0xe00},
+ { 0xfcc04, 255, 0x1f, 0xc00},
+ { 0xfd000, 1, 0x1f, 0xe00},
+ { 0xfd004, 255, 0x1f, 0xc00},
+ { 0xfd400, 1, 0x1f, 0xe00},
+ { 0xfd404, 255, 0x1f, 0xc00},
+ { 0xfd800, 1, 0x1f, 0xe00},
+ { 0xfd804, 255, 0x1f, 0xc00},
+ { 0xfdc00, 1, 0x1f, 0xe00},
+ { 0xfdc04, 255, 0x1f, 0xc00},
+ { 0xfe000, 1, 0x1f, 0xe00},
+ { 0xfe004, 255, 0x1f, 0xc00},
+ { 0xfe400, 1, 0x1f, 0xe00},
+ { 0xfe404, 255, 0x1f, 0xc00},
+ { 0xfe800, 1, 0x1f, 0xe00},
+ { 0xfe804, 255, 0x1f, 0xc00},
+ { 0xfec00, 1, 0x1f, 0xe00},
+ { 0xfec04, 255, 0x1f, 0xc00},
+ { 0xff000, 1, 0x1f, 0xe00},
+ { 0xff004, 255, 0x1f, 0xc00},
+ { 0xff400, 1, 0x1f, 0xe00},
+ { 0xff404, 255, 0x1f, 0xc00},
+ { 0xff800, 1, 0x1f, 0xe00},
+ { 0xff804, 255, 0x1f, 0xc00},
+ { 0xffc00, 1, 0x1f, 0xe00},
+ { 0xffc04, 255, 0x1f, 0xc00},
+ { 0x101000, 5, 0x1f, 0x924},
+ { 0x101014, 1, 0x1f, 0xfff},
+ { 0x101018, 6, 0x1f, 0x924},
+ { 0x101040, 2, 0x1f, 0x1fff},
+ { 0x10104c, 1, 0x1f, 0x1fff},
+ { 0x101050, 1, 0x1e, 0x924},
+ { 0x101054, 3, 0x1c, 0x924},
+ { 0x101100, 1, 0x1f, 0x924},
+ { 0x101800, 8, 0x1f, 0x924},
+ { 0x102000, 18, 0x1f, 0x924},
+ { 0x102058, 2, 0x1f, 0x1fff},
+ { 0x102064, 1, 0x1f, 0x1fff},
+ { 0x102068, 6, 0x1c, 0x924},
+ { 0x102080, 16, 0x1f, 0xfff},
+ { 0x1020c0, 1, 0x1f, 0x924},
+ { 0x1020c8, 8, 0x2, 0x924},
+ { 0x1020e8, 9, 0x1c, 0x924},
+ { 0x102400, 1, 0x1f, 0x924},
+ { 0x103000, 1, 0x1f, 0x924},
+ { 0x103004, 2, 0x1f, 0xfff},
+ { 0x10300c, 23, 0x1f, 0x924},
+ { 0x103088, 2, 0x1f, 0x1fff},
+ { 0x103094, 1, 0x1f, 0x1fff},
+ { 0x103098, 1, 0x1e, 0x924},
+ { 0x10309c, 2, 0x1e, 0xfff},
+ { 0x1030a4, 2, 0x1e, 0x924},
+ { 0x1030ac, 2, 0x1c, 0x924},
+ { 0x1030b4, 1, 0x4, 0x924},
+ { 0x1030b8, 2, 0x1c, 0xfff},
+ { 0x1030c0, 3, 0x1c, 0x924},
+ { 0x1030cc, 1, 0x1c, 0xfff},
+ { 0x1030d0, 1, 0x1c, 0x924},
+ { 0x1030d8, 2, 0x1c, 0x924},
+ { 0x1030e0, 1, 0x1c, 0xfff},
+ { 0x1030e4, 5, 0x1c, 0x924},
+ { 0x103400, 136, 0x1c, 0x1fff},
+ { 0x103800, 8, 0x1f, 0x924},
+ { 0x104000, 1, 0x1f, 0x924},
+ { 0x104004, 1, 0x1f, 0xfff},
+ { 0x104008, 4, 0x1f, 0x924},
+ { 0x104018, 1, 0x1f, 0xfff},
+ { 0x10401c, 1, 0x1f, 0x924},
+ { 0x104020, 1, 0x1f, 0xfff},
+ { 0x104024, 6, 0x1f, 0x924},
+ { 0x10403c, 1, 0x1f, 0xfff},
+ { 0x104040, 47, 0x1f, 0x924},
+ { 0x10410c, 2, 0x1f, 0x1fff},
+ { 0x104118, 1, 0x1f, 0x1fff},
+ { 0x10411c, 16, 0x1c, 0x924},
+ { 0x104200, 17, 0x1f, 0x924},
+ { 0x104400, 1, 0x1f, 0x1fff},
+ { 0x104404, 63, 0x1f, 0xfff},
+ { 0x104500, 192, 0x1f, 0xdb6},
+ { 0x104800, 1, 0x1f, 0x1fff},
+ { 0x104804, 63, 0x1f, 0xfff},
+ { 0x104900, 192, 0x1f, 0xdb6},
+ { 0x105000, 4, 0x1f, 0x1fff},
+ { 0x105010, 252, 0x1f, 0xfff},
+ { 0x105400, 768, 0x1f, 0xdb6},
+ { 0x107000, 7, 0x1c, 0x924},
+ { 0x10701c, 1, 0x18, 0x924},
+ { 0x108000, 33, 0x3, 0x924},
+ { 0x1080ac, 5, 0x2, 0x924},
+ { 0x108100, 5, 0x3, 0x924},
+ { 0x108120, 5, 0x3, 0x924},
+ { 0x108200, 74, 0x3, 0x924},
+ { 0x108400, 74, 0x3, 0x924},
+ { 0x108800, 152, 0x3, 0x924},
+ { 0x110000, 111, 0x1c, 0x924},
+ { 0x1101cc, 2, 0x1c, 0x1fff},
+ { 0x1101d8, 1, 0x1c, 0x1fff},
+ { 0x1101dc, 1, 0x18, 0x924},
+ { 0x110200, 4, 0x1c, 0x924},
+ { 0x120000, 92, 0x1f, 0x924},
+ { 0x120170, 2, 0x3, 0x924},
+ { 0x120178, 14, 0x1f, 0x924},
+ { 0x1201b0, 2, 0x1f, 0xfff},
+ { 0x1201b8, 93, 0x1f, 0x924},
+ { 0x12032c, 1, 0x1f, 0xfff},
+ { 0x120330, 15, 0x1f, 0x924},
+ { 0x12036c, 3, 0x1f, 0xfff},
+ { 0x120378, 36, 0x1f, 0x924},
+ { 0x120408, 2, 0x1f, 0xfff},
+ { 0x120410, 1, 0x1f, 0x924},
+ { 0x120414, 15, 0x1f, 0xfff},
+ { 0x120450, 10, 0x1f, 0x924},
+ { 0x120478, 2, 0x1f, 0xfff},
+ { 0x120480, 43, 0x1f, 0x924},
+ { 0x12052c, 1, 0x1f, 0xfff},
+ { 0x120530, 5, 0x1f, 0x924},
+ { 0x120544, 4, 0x3, 0x924},
+ { 0x120554, 4, 0x1f, 0x924},
+ { 0x120564, 2, 0x1f, 0xfff},
+ { 0x12057c, 2, 0x1f, 0x1fff},
+ { 0x120588, 3, 0x1f, 0x1fff},
+ { 0x120598, 1, 0x1f, 0x1fff},
+ { 0x12059c, 22, 0x1e, 0x924},
+ { 0x1205f4, 1, 0x6, 0x924},
+ { 0x1205f8, 4, 0x1c, 0x924},
+ { 0x120618, 1, 0x1c, 0x924},
+ { 0x12061c, 31, 0x1e, 0x924},
+ { 0x120698, 3, 0x1c, 0x924},
+ { 0x1206a4, 1, 0x4, 0x924},
+ { 0x1206a8, 1, 0x1c, 0x924},
+ { 0x1206b0, 38, 0x1c, 0x924},
+ { 0x120748, 1, 0x1c, 0xfff},
+ { 0x12074c, 11, 0x1c, 0x924},
+ { 0x120778, 2, 0x1c, 0xfff},
+ { 0x120780, 23, 0x1c, 0x924},
+ { 0x1207dc, 1, 0x4, 0x924},
+ { 0x1207fc, 1, 0x1c, 0x924},
+ { 0x12080c, 2, 0x1f, 0xfff},
+ { 0x120814, 1, 0x1f, 0x924},
+ { 0x120818, 1, 0x1f, 0xfff},
+ { 0x12081c, 1, 0x1f, 0x924},
+ { 0x120820, 1, 0x1f, 0xfff},
+ { 0x120824, 1, 0x1f, 0x924},
+ { 0x120828, 1, 0x1f, 0xfff},
+ { 0x12082c, 1, 0x1f, 0x924},
+ { 0x120830, 1, 0x1f, 0xfff},
+ { 0x120834, 1, 0x1f, 0x924},
+ { 0x120838, 1, 0x1f, 0xfff},
+ { 0x12083c, 1, 0x1f, 0x924},
+ { 0x120840, 1, 0x1f, 0xfff},
+ { 0x120844, 1, 0x1f, 0x924},
+ { 0x120848, 1, 0x1f, 0xfff},
+ { 0x12084c, 1, 0x1f, 0x924},
+ { 0x120850, 1, 0x1f, 0xfff},
+ { 0x120854, 1, 0x1f, 0x924},
+ { 0x120858, 1, 0x1f, 0xfff},
+ { 0x12085c, 1, 0x1f, 0x924},
+ { 0x120860, 1, 0x1f, 0xfff},
+ { 0x120864, 1, 0x1f, 0x924},
+ { 0x120868, 1, 0x1f, 0xfff},
+ { 0x12086c, 1, 0x1f, 0x924},
+ { 0x120870, 1, 0x1f, 0xfff},
+ { 0x120874, 1, 0x1f, 0x924},
+ { 0x120878, 1, 0x1f, 0xfff},
+ { 0x12087c, 1, 0x1f, 0x924},
+ { 0x120880, 1, 0x1f, 0xfff},
+ { 0x120884, 1, 0x1f, 0x924},
+ { 0x120888, 1, 0x1f, 0xfff},
+ { 0x12088c, 1, 0x1f, 0x924},
+ { 0x120890, 1, 0x1f, 0xfff},
+ { 0x120894, 1, 0x1f, 0x924},
+ { 0x120898, 1, 0x1f, 0xfff},
+ { 0x12089c, 1, 0x1f, 0x924},
+ { 0x1208a0, 1, 0x1f, 0xfff},
+ { 0x1208a4, 1, 0x1f, 0x924},
+ { 0x1208a8, 1, 0x1f, 0xfff},
+ { 0x1208ac, 1, 0x1f, 0x924},
+ { 0x1208b0, 1, 0x1f, 0xfff},
+ { 0x1208b4, 1, 0x1f, 0x924},
+ { 0x1208b8, 1, 0x1f, 0xfff},
+ { 0x1208bc, 1, 0x1f, 0x924},
+ { 0x1208c0, 1, 0x1f, 0xfff},
+ { 0x1208c4, 1, 0x1f, 0x924},
+ { 0x1208c8, 1, 0x1f, 0xfff},
+ { 0x1208cc, 1, 0x1f, 0x924},
+ { 0x1208d0, 1, 0x1f, 0xfff},
+ { 0x1208d4, 1, 0x1f, 0x924},
+ { 0x1208d8, 1, 0x1f, 0xfff},
+ { 0x1208dc, 1, 0x1f, 0x924},
+ { 0x1208e0, 1, 0x1f, 0xfff},
+ { 0x1208e4, 1, 0x1f, 0x924},
+ { 0x1208e8, 1, 0x1f, 0xfff},
+ { 0x1208ec, 1, 0x1f, 0x924},
+ { 0x1208f0, 1, 0x1f, 0xfff},
+ { 0x1208f4, 1, 0x1f, 0x924},
+ { 0x1208f8, 1, 0x1f, 0xfff},
+ { 0x1208fc, 1, 0x1f, 0x924},
+ { 0x120900, 1, 0x1f, 0xfff},
+ { 0x120904, 1, 0x1f, 0x924},
+ { 0x120908, 1, 0x1f, 0xfff},
+ { 0x12090c, 1, 0x1f, 0x924},
+ { 0x120910, 7, 0x1c, 0x924},
+ { 0x120930, 9, 0x1c, 0x924},
+ { 0x12095c, 37, 0x18, 0x924},
+ { 0x120a00, 2, 0x7, 0x924},
+ { 0x120b00, 1, 0x18, 0x924},
+ { 0x122000, 2, 0x1f, 0x924},
+ { 0x122008, 2046, 0x1, 0x924},
+ { 0x128000, 6144, 0x1e, 0x924},
+ { 0x130000, 1, 0x1c, 0x1fff},
+ { 0x130004, 11, 0x1c, 0x924},
+ { 0x130030, 1, 0x1c, 0xfff},
+ { 0x130034, 6, 0x1c, 0x924},
+ { 0x13004c, 3, 0x1c, 0xfff},
+ { 0x130058, 3, 0x1c, 0x924},
+ { 0x130064, 2, 0x1c, 0xfff},
+ { 0x13006c, 8, 0x1c, 0x924},
+ { 0x13009c, 2, 0x1c, 0x1fff},
+ { 0x1300a8, 1, 0x1c, 0x1fff},
+ { 0x130100, 12, 0x1c, 0x924},
+ { 0x130130, 1, 0x1c, 0xfff},
+ { 0x130134, 14, 0x1c, 0x924},
+ { 0x13016c, 1, 0x1c, 0xfff},
+ { 0x130170, 1, 0x1c, 0x924},
+ { 0x130180, 1, 0x1c, 0x924},
+ { 0x130200, 1, 0x1c, 0x924},
+ { 0x130280, 1, 0x1c, 0x924},
+ { 0x130300, 1, 0x1c, 0xfff},
+ { 0x130304, 4, 0x1c, 0x924},
+ { 0x130380, 1, 0x1c, 0x924},
+ { 0x130400, 1, 0x1c, 0x924},
+ { 0x130480, 1, 0x1c, 0xfff},
+ { 0x130484, 4, 0x1c, 0x924},
+ { 0x130800, 72, 0x1c, 0x924},
+ { 0x131000, 136, 0x1c, 0x924},
+ { 0x132000, 148, 0x1c, 0x924},
+ { 0x134000, 544, 0x1c, 0x924},
+ { 0x140000, 1, 0x1f, 0x924},
+ { 0x140004, 9, 0xf, 0x924},
+ { 0x140028, 8, 0x1f, 0x924},
+ { 0x140048, 5, 0xf, 0x924},
+ { 0x14005c, 2, 0xf, 0xfff},
+ { 0x140064, 3, 0xf, 0x924},
+ { 0x140070, 1, 0x1f, 0x924},
+ { 0x140074, 10, 0xf, 0x924},
+ { 0x14009c, 1, 0x1f, 0x924},
+ { 0x1400a0, 5, 0xf, 0x924},
+ { 0x1400b4, 7, 0x1f, 0x924},
+ { 0x1400d0, 2, 0xf, 0xfff},
+ { 0x1400d8, 2, 0xf, 0x924},
+ { 0x1400e0, 1, 0xf, 0xfff},
+ { 0x1400e4, 5, 0xf, 0x924},
+ { 0x1400f8, 2, 0x1f, 0x924},
+ { 0x140100, 5, 0x3, 0x924},
+ { 0x140114, 5, 0xf, 0x924},
+ { 0x140128, 7, 0x1f, 0x924},
+ { 0x140144, 9, 0xf, 0x924},
+ { 0x140168, 8, 0x1f, 0x924},
+ { 0x140188, 3, 0xf, 0x924},
+ { 0x140194, 13, 0x1f, 0x924},
+ { 0x1401d8, 2, 0x1f, 0x1fff},
+ { 0x1401e4, 1, 0x1f, 0x1fff},
+ { 0x140200, 6, 0xf, 0xfff},
+ { 0x1402e0, 2, 0xc, 0x924},
+ { 0x1402e8, 2, 0x1c, 0x924},
+ { 0x1402f0, 9, 0xc, 0x924},
+ { 0x140314, 9, 0x10, 0x924},
+ { 0x140338, 7, 0x10, 0xfff},
+ { 0x140354, 7, 0x10, 0x924},
+ { 0x140370, 7, 0x10, 0xfff},
+ { 0x14038c, 14, 0x10, 0x924},
+ { 0x1404b0, 14, 0x10, 0x924},
+ { 0x15c000, 2, 0x1e, 0x924},
+ { 0x15c008, 5, 0x2, 0x924},
+ { 0x15c020, 8, 0x1c, 0x924},
+ { 0x15c040, 1, 0xc, 0x924},
+ { 0x15c044, 2, 0x1c, 0x924},
+ { 0x15c04c, 8, 0xc, 0x924},
+ { 0x15c06c, 8, 0x1c, 0x924},
+ { 0x15c090, 13, 0x1c, 0x924},
+ { 0x15c0c8, 24, 0x1c, 0x924},
+ { 0x15c128, 2, 0xc, 0x924},
+ { 0x15c130, 1, 0x1c, 0x924},
+ { 0x15c138, 6, 0x1c, 0x924},
+ { 0x15c150, 2, 0x18, 0x924},
+ { 0x15c158, 2, 0x8, 0x924},
+ { 0x15c160, 23, 0x10, 0x924},
+ { 0x15c1bc, 6, 0x10, 0xfff},
+ { 0x15c1d4, 23, 0x10, 0x924},
+ { 0x15c230, 7, 0x10, 0xfff},
+ { 0x15c24c, 90, 0x10, 0x924},
+ { 0x160004, 6, 0x18, 0x924},
+ { 0x16003c, 1, 0x10, 0x924},
+ { 0x160040, 6, 0x18, 0x924},
+ { 0x16005c, 6, 0x18, 0x924},
+ { 0x160074, 1, 0x10, 0x924},
+ { 0x160078, 2, 0x18, 0x924},
+ { 0x160300, 8, 0x18, 0x924},
+ { 0x160330, 6, 0x18, 0x924},
+ { 0x160404, 6, 0x18, 0x924},
+ { 0x16043c, 1, 0x10, 0x924},
+ { 0x160440, 6, 0x18, 0x924},
+ { 0x16045c, 6, 0x18, 0x924},
+ { 0x160474, 1, 0x10, 0x924},
+ { 0x160478, 2, 0x18, 0x924},
+ { 0x160700, 8, 0x18, 0x924},
+ { 0x160730, 6, 0x18, 0x924},
+ { 0x161000, 7, 0x1f, 0x924},
+ { 0x16102c, 2, 0x1f, 0x1fff},
+ { 0x161038, 1, 0x1f, 0x1fff},
+ { 0x16103c, 2, 0x1c, 0x924},
+ { 0x161800, 2, 0x1f, 0x924},
+ { 0x162000, 54, 0x18, 0x924},
+ { 0x162200, 60, 0x18, 0x924},
+ { 0x162400, 54, 0x18, 0x924},
+ { 0x162600, 60, 0x18, 0x924},
+ { 0x162800, 54, 0x18, 0x924},
+ { 0x162a00, 60, 0x18, 0x924},
+ { 0x162c00, 54, 0x18, 0x924},
+ { 0x162e00, 60, 0x18, 0x924},
+ { 0x163000, 1, 0x18, 0x924},
+ { 0x163008, 1, 0x18, 0x924},
+ { 0x163010, 1, 0x18, 0x924},
+ { 0x163018, 1, 0x18, 0x924},
+ { 0x163020, 5, 0x18, 0x924},
+ { 0x163038, 3, 0x18, 0x924},
+ { 0x163048, 3, 0x18, 0x924},
+ { 0x163058, 1, 0x18, 0x924},
+ { 0x163060, 1, 0x18, 0x924},
+ { 0x163068, 1, 0x18, 0x924},
+ { 0x163070, 3, 0x18, 0x924},
+ { 0x163080, 1, 0x18, 0x924},
+ { 0x163088, 3, 0x18, 0x924},
+ { 0x163098, 1, 0x18, 0x924},
+ { 0x1630a0, 1, 0x18, 0x924},
+ { 0x1630a8, 1, 0x18, 0x924},
+ { 0x1630b0, 2, 0x10, 0x924},
+ { 0x1630c0, 1, 0x18, 0x924},
+ { 0x1630c8, 1, 0x18, 0x924},
+ { 0x1630d0, 1, 0x18, 0x924},
+ { 0x1630d8, 1, 0x18, 0x924},
+ { 0x1630e0, 2, 0x18, 0x924},
+ { 0x163110, 1, 0x18, 0x924},
+ { 0x163120, 2, 0x18, 0x924},
+ { 0x163420, 4, 0x18, 0x924},
+ { 0x163438, 2, 0x18, 0x924},
+ { 0x163488, 2, 0x18, 0x924},
+ { 0x163520, 2, 0x18, 0x924},
+ { 0x163800, 1, 0x18, 0x924},
+ { 0x163808, 1, 0x18, 0x924},
+ { 0x163810, 1, 0x18, 0x924},
+ { 0x163818, 1, 0x18, 0x924},
+ { 0x163820, 5, 0x18, 0x924},
+ { 0x163838, 3, 0x18, 0x924},
+ { 0x163848, 3, 0x18, 0x924},
+ { 0x163858, 1, 0x18, 0x924},
+ { 0x163860, 1, 0x18, 0x924},
+ { 0x163868, 1, 0x18, 0x924},
+ { 0x163870, 3, 0x18, 0x924},
+ { 0x163880, 1, 0x18, 0x924},
+ { 0x163888, 3, 0x18, 0x924},
+ { 0x163898, 1, 0x18, 0x924},
+ { 0x1638a0, 1, 0x18, 0x924},
+ { 0x1638a8, 1, 0x18, 0x924},
+ { 0x1638b0, 2, 0x10, 0x924},
+ { 0x1638c0, 1, 0x18, 0x924},
+ { 0x1638c8, 1, 0x18, 0x924},
+ { 0x1638d0, 1, 0x18, 0x924},
+ { 0x1638d8, 1, 0x18, 0x924},
+ { 0x1638e0, 2, 0x18, 0x924},
+ { 0x163910, 1, 0x18, 0x924},
+ { 0x163920, 2, 0x18, 0x924},
+ { 0x163c20, 4, 0x18, 0x924},
+ { 0x163c38, 2, 0x18, 0x924},
+ { 0x163c88, 2, 0x18, 0x924},
+ { 0x163d20, 2, 0x18, 0x924},
+ { 0x164000, 5, 0x1f, 0x924},
+ { 0x164014, 2, 0x1f, 0xfff},
+ { 0x16401c, 53, 0x1f, 0x924},
+ { 0x164100, 2, 0x1f, 0x1fff},
+ { 0x16410c, 1, 0x1f, 0x1fff},
+ { 0x164110, 2, 0x1e, 0x924},
+ { 0x164118, 15, 0x1c, 0x924},
+ { 0x164200, 1, 0x1f, 0x924},
+ { 0x164208, 1, 0x1f, 0x924},
+ { 0x164210, 1, 0x1f, 0x924},
+ { 0x164218, 1, 0x1f, 0x924},
+ { 0x164220, 1, 0x1f, 0x924},
+ { 0x164228, 1, 0x1f, 0x924},
+ { 0x164230, 1, 0x1f, 0x924},
+ { 0x164238, 1, 0x1f, 0x924},
+ { 0x164240, 1, 0x1f, 0x924},
+ { 0x164248, 1, 0x1f, 0x924},
+ { 0x164250, 1, 0x1f, 0x924},
+ { 0x164258, 1, 0x1f, 0x924},
+ { 0x164260, 1, 0x1f, 0x924},
+ { 0x164270, 2, 0x1f, 0x924},
+ { 0x164280, 2, 0x1f, 0x924},
+ { 0x164800, 2, 0x1f, 0x924},
+ { 0x165000, 2, 0x1f, 0x924},
+ { 0x166000, 164, 0x1f, 0x924},
+ { 0x1662b0, 2, 0x1f, 0x1fff},
+ { 0x1662bc, 1, 0x1f, 0x1fff},
+ { 0x1662cc, 7, 0x1c, 0x924},
+ { 0x166400, 49, 0x1f, 0x924},
+ { 0x1664c8, 32, 0x1f, 0x924},
+ { 0x166548, 1, 0x1f, 0xfff},
+ { 0x16654c, 1, 0x1f, 0x924},
+ { 0x166550, 1, 0x1f, 0xfff},
+ { 0x166554, 1, 0x1f, 0x924},
+ { 0x166558, 1, 0x1f, 0xfff},
+ { 0x16655c, 1, 0x1f, 0x924},
+ { 0x166568, 2, 0x1f, 0x924},
+ { 0x166570, 5, 0x1c, 0x924},
+ { 0x166800, 1, 0x1f, 0x924},
+ { 0x168000, 1, 0x1f, 0xfff},
+ { 0x168004, 1, 0x1f, 0x924},
+ { 0x168008, 1, 0x1f, 0xfff},
+ { 0x16800c, 1, 0x1f, 0x924},
+ { 0x168010, 1, 0x1f, 0xfff},
+ { 0x168014, 1, 0x1f, 0x924},
+ { 0x168018, 1, 0x1f, 0xfff},
+ { 0x16801c, 3, 0x1f, 0x924},
+ { 0x168028, 2, 0x1f, 0xfff},
+ { 0x168030, 10, 0x1f, 0x924},
+ { 0x168058, 9, 0x1f, 0xfff},
+ { 0x16807c, 106, 0x1f, 0x924},
+ { 0x168224, 2, 0x3, 0x924},
+ { 0x16822c, 3, 0x1f, 0x924},
+ { 0x168238, 1, 0x1f, 0xfff},
+ { 0x16823c, 25, 0x1f, 0x924},
+ { 0x1682a0, 12, 0x3, 0x924},
+ { 0x1682d0, 7, 0x1f, 0xfff},
+ { 0x1682ec, 5, 0x1f, 0x924},
+ { 0x168300, 2, 0x3, 0xfff},
+ { 0x168308, 65, 0x1f, 0xfff},
+ { 0x16840c, 1, 0x1f, 0x924},
+ { 0x168410, 2, 0x1f, 0xfff},
+ { 0x168418, 2, 0x3, 0x924},
+ { 0x168420, 6, 0x1f, 0x924},
+ { 0x168448, 2, 0x1f, 0x1fff},
+ { 0x168454, 1, 0x1f, 0x1fff},
+ { 0x168800, 19, 0x1f, 0x924},
+ { 0x168900, 1, 0x1f, 0x924},
+ { 0x168a00, 128, 0x1f, 0xfff},
+ { 0x16a000, 1536, 0x1f, 0x924},
+ { 0x16c000, 1536, 0x1f, 0x924},
+ { 0x16e000, 16, 0x2, 0x924},
+ { 0x16e040, 8, 0x1c, 0x924},
+ { 0x16e100, 1, 0x2, 0x924},
+ { 0x16e200, 2, 0x2, 0xfff},
+ { 0x16e400, 1, 0x2, 0x924},
+ { 0x16e404, 2, 0x2, 0xfff},
+ { 0x16e40c, 94, 0x2, 0x924},
+ { 0x16e584, 64, 0x2, 0xfff},
+ { 0x16e684, 2, 0x1e, 0xfff},
+ { 0x16e68c, 4, 0x2, 0xfff},
+ { 0x16e69c, 8, 0x2, 0x924},
+ { 0x16e6bc, 4, 0x1e, 0x924},
+ { 0x16e6cc, 4, 0x2, 0x924},
+ { 0x16e6e0, 2, 0x1c, 0x924},
+ { 0x16e6e8, 5, 0xc, 0x924},
+ { 0x16e6fc, 4, 0x1c, 0xfff},
+ { 0x16e70c, 1, 0x1c, 0x924},
+ { 0x16e768, 17, 0x1c, 0x924},
+ { 0x16e7ac, 12, 0x10, 0xfff},
+ { 0x170000, 24, 0x1f, 0x924},
+ { 0x170060, 4, 0x3, 0x924},
+ { 0x170070, 13, 0x1f, 0x924},
+ { 0x1700a4, 1, 0x1f, 0xfff},
+ { 0x1700a8, 1, 0x1f, 0x924},
+ { 0x1700ac, 2, 0x1f, 0xfff},
+ { 0x1700b4, 3, 0x1f, 0x924},
+ { 0x1700c0, 1, 0x1f, 0xfff},
+ { 0x1700c4, 44, 0x1f, 0x924},
+ { 0x170184, 2, 0x1f, 0x1fff},
+ { 0x170190, 1, 0x1f, 0x1fff},
+ { 0x170194, 11, 0x1c, 0x924},
+ { 0x1701c4, 1, 0x1c, 0x924},
+ { 0x1701cc, 7, 0x1c, 0x924},
+ { 0x1701e8, 1, 0x18, 0x924},
+ { 0x1701ec, 1, 0x1c, 0x924},
+ { 0x1701f4, 1, 0x1c, 0x924},
+ { 0x170200, 4, 0x1f, 0x924},
+ { 0x170214, 1, 0x1f, 0x924},
+ { 0x170218, 77, 0x1c, 0x924},
+ { 0x170400, 64, 0x1c, 0x924},
+ { 0x178000, 1, 0x1f, 0x924},
+ { 0x180000, 61, 0x1f, 0x924},
+ { 0x180114, 2, 0x1f, 0x1fff},
+ { 0x180120, 3, 0x1f, 0x1fff},
+ { 0x180130, 1, 0x1f, 0x1fff},
+ { 0x18013c, 2, 0x1e, 0x924},
+ { 0x180200, 27, 0x1f, 0x924},
+ { 0x18026c, 1, 0x1f, 0xfff},
+ { 0x180270, 12, 0x1f, 0x924},
+ { 0x1802a0, 1, 0x1f, 0xfff},
+ { 0x1802a4, 17, 0x1f, 0x924},
+ { 0x180340, 4, 0x1f, 0x924},
+ { 0x180380, 1, 0x1c, 0x924},
+ { 0x180388, 1, 0x1c, 0x924},
+ { 0x180390, 1, 0x1c, 0x924},
+ { 0x180398, 1, 0x1c, 0x924},
+ { 0x1803a0, 5, 0x1c, 0x924},
+ { 0x1803b4, 2, 0x18, 0x924},
+ { 0x181000, 4, 0x1f, 0x93c},
+ { 0x181010, 1020, 0x1f, 0x38},
+ { 0x182000, 4, 0x18, 0x924},
+ { 0x1a0000, 1, 0x1f, 0x92c},
+ { 0x1a0004, 5631, 0x1f, 0x8},
+ { 0x1a5800, 2560, 0x1e, 0x8},
+ { 0x1a8000, 1, 0x1f, 0x92c},
+ { 0x1a8004, 8191, 0x1e, 0x8},
+ { 0x1b0000, 1, 0x1f, 0x92c},
+ { 0x1b0004, 15, 0x2, 0x8},
+ { 0x1b0040, 1, 0x1e, 0x92c},
+ { 0x1b0044, 239, 0x2, 0x8},
+ { 0x1b0400, 1, 0x1f, 0x92c},
+ { 0x1b0404, 255, 0x2, 0x8},
+ { 0x1b0800, 1, 0x1f, 0x924},
+ { 0x1b0840, 1, 0x1e, 0x924},
+ { 0x1b0c00, 1, 0x1f, 0x1fff},
+ { 0x1b1000, 1, 0x1f, 0x1fff},
+ { 0x1b1040, 1, 0x1e, 0x1fff},
+ { 0x1b1400, 1, 0x1f, 0x924},
+ { 0x1b1440, 1, 0x1e, 0x924},
+ { 0x1b1480, 1, 0x1e, 0x924},
+ { 0x1b14c0, 1, 0x1e, 0x924},
+ { 0x1b1800, 128, 0x1f, 0x10},
+ { 0x1b1c00, 128, 0x1f, 0x10},
+ { 0x1b2000, 1, 0x1f, 0xdb6},
+ { 0x1b2400, 1, 0x1e, 0x92c},
+ { 0x1b2404, 5631, 0x1c, 0x8},
+ { 0x1b8000, 1, 0x1f, 0xfff},
+ { 0x1b8040, 1, 0x1f, 0xfff},
+ { 0x1b8080, 1, 0x1f, 0xfff},
+ { 0x1b80c0, 1, 0x1f, 0xfff},
+ { 0x1b8100, 1, 0x1f, 0x924},
+ { 0x1b8140, 1, 0x1f, 0x924},
+ { 0x1b8180, 1, 0x1f, 0x924},
+ { 0x1b81c0, 1, 0x1f, 0x924},
+ { 0x1b8200, 1, 0x1f, 0x924},
+ { 0x1b8240, 1, 0x1f, 0x924},
+ { 0x1b8280, 1, 0x1f, 0x924},
+ { 0x1b82c0, 1, 0x1f, 0x924},
+ { 0x1b8300, 1, 0x1f, 0x924},
+ { 0x1b8340, 1, 0x1f, 0x924},
+ { 0x1b8380, 1, 0x1f, 0x924},
+ { 0x1b83c0, 1, 0x1f, 0x924},
+ { 0x1b8400, 1, 0x1f, 0x924},
+ { 0x1b8440, 1, 0x1f, 0x924},
+ { 0x1b8480, 1, 0x1f, 0x924},
+ { 0x1b84c0, 1, 0x1f, 0x924},
+ { 0x1b8500, 1, 0x1f, 0x924},
+ { 0x1b8540, 1, 0x1f, 0x924},
+ { 0x1b8580, 1, 0x1f, 0x924},
+ { 0x1b85c0, 19, 0x1c, 0x924},
+ { 0x1b8800, 1, 0x1f, 0x924},
+ { 0x1b8840, 1, 0x1f, 0x924},
+ { 0x1b8880, 1, 0x1f, 0x924},
+ { 0x1b88c0, 1, 0x1f, 0x924},
+ { 0x1b8900, 1, 0x1f, 0x924},
+ { 0x1b8940, 1, 0x1f, 0x924},
+ { 0x1b8980, 1, 0x1f, 0x924},
+ { 0x1b89c0, 1, 0x1f, 0x924},
+ { 0x1b8a00, 1, 0x1f, 0x934},
+ { 0x1b8a40, 1, 0x1f, 0x924},
+ { 0x1b8a80, 1, 0x1f, 0x492},
+ { 0x1b8ac0, 1, 0x1f, 0x924},
+ { 0x1b8b00, 1, 0x1f, 0x924},
+ { 0x1b8b40, 1, 0x1f, 0x924},
+ { 0x1b8b80, 1, 0x1f, 0x924},
+ { 0x1b8bc0, 1, 0x1f, 0x924},
+ { 0x1b8c00, 1, 0x1f, 0x924},
+ { 0x1b8c40, 1, 0x1f, 0x924},
+ { 0x1b8c80, 1, 0x1f, 0x924},
+ { 0x1b8cc0, 1, 0x1f, 0x924},
+ { 0x1b8cc4, 1, 0x1c, 0x924},
+ { 0x1b8d00, 1, 0x1f, 0x924},
+ { 0x1b8d40, 1, 0x1f, 0x924},
+ { 0x1b8d80, 1, 0x1f, 0x924},
+ { 0x1b8dc0, 1, 0x1f, 0x924},
+ { 0x1b8e00, 1, 0x1f, 0x924},
+ { 0x1b8e40, 1, 0x1f, 0x924},
+ { 0x1b8e80, 1, 0x1f, 0x924},
+ { 0x1b8e84, 1, 0x1c, 0x924},
+ { 0x1b8ec0, 1, 0x1e, 0x924},
+ { 0x1b8f00, 1, 0x1e, 0x924},
+ { 0x1b8f40, 1, 0x1e, 0x924},
+ { 0x1b8f80, 1, 0x1e, 0x924},
+ { 0x1b8fc0, 1, 0x1e, 0x924},
+ { 0x1b8fd4, 5, 0x1c, 0x924},
+ { 0x1b8fe8, 2, 0x18, 0x924},
+ { 0x1b9000, 1, 0x1c, 0x924},
+ { 0x1b9040, 3, 0x1c, 0x924},
+ { 0x1b905c, 1, 0x18, 0x924},
+ { 0x1b9064, 1, 0x10, 0x924},
+ { 0x1b9080, 10, 0x10, 0x924},
+ { 0x1c0000, 2, 0x1f, 0x924},
+ { 0x200000, 65, 0x1f, 0x924},
+ { 0x200124, 2, 0x1f, 0x1fff},
+ { 0x200130, 3, 0x1f, 0x1fff},
+ { 0x200140, 1, 0x1f, 0x1fff},
+ { 0x20014c, 2, 0x1e, 0x924},
+ { 0x200200, 27, 0x1f, 0x924},
+ { 0x20026c, 1, 0x1f, 0xfff},
+ { 0x200270, 12, 0x1f, 0x924},
+ { 0x2002a0, 1, 0x1f, 0xfff},
+ { 0x2002a4, 17, 0x1f, 0x924},
+ { 0x200340, 4, 0x1f, 0x924},
+ { 0x200380, 1, 0x1c, 0x924},
+ { 0x200388, 1, 0x1c, 0x924},
+ { 0x200390, 1, 0x1c, 0x924},
+ { 0x200398, 1, 0x1c, 0x924},
+ { 0x2003a0, 1, 0x1c, 0x924},
+ { 0x2003a8, 2, 0x1c, 0x924},
+ { 0x202000, 4, 0x1f, 0x1927},
+ { 0x202010, 2044, 0x1f, 0x1007},
+ { 0x204000, 4, 0x18, 0x924},
+ { 0x220000, 1, 0x1f, 0x925},
+ { 0x220004, 5631, 0x1f, 0x1},
+ { 0x225800, 2560, 0x1e, 0x1},
+ { 0x228000, 1, 0x1f, 0x925},
+ { 0x228004, 8191, 0x1e, 0x1},
+ { 0x230000, 1, 0x1f, 0x925},
+ { 0x230004, 15, 0x2, 0x1},
+ { 0x230040, 1, 0x1e, 0x925},
+ { 0x230044, 239, 0x2, 0x1},
+ { 0x230400, 1, 0x1f, 0x925},
+ { 0x230404, 255, 0x2, 0x1},
+ { 0x230800, 1, 0x1f, 0x924},
+ { 0x230840, 1, 0x1e, 0x924},
+ { 0x230c00, 1, 0x1f, 0x924},
+ { 0x231000, 1, 0x1f, 0x924},
+ { 0x231040, 1, 0x1e, 0x924},
+ { 0x231400, 1, 0x1f, 0x924},
+ { 0x231440, 1, 0x1e, 0x924},
+ { 0x231480, 1, 0x1e, 0x924},
+ { 0x2314c0, 1, 0x1e, 0x924},
+ { 0x231800, 128, 0x1f, 0x2},
+ { 0x231c00, 128, 0x1f, 0x2},
+ { 0x232000, 1, 0x1f, 0xdb6},
+ { 0x232400, 1, 0x1e, 0x925},
+ { 0x232404, 5631, 0x1c, 0x1},
+ { 0x238000, 1, 0x1f, 0xfff},
+ { 0x238040, 1, 0x1f, 0xfff},
+ { 0x238080, 1, 0x1f, 0xfff},
+ { 0x2380c0, 1, 0x1f, 0xfff},
+ { 0x238100, 1, 0x1f, 0x924},
+ { 0x238140, 1, 0x1f, 0x924},
+ { 0x238180, 1, 0x1f, 0x924},
+ { 0x2381c0, 1, 0x1f, 0x924},
+ { 0x238200, 1, 0x1f, 0x924},
+ { 0x238240, 1, 0x1f, 0x924},
+ { 0x238280, 1, 0x1f, 0x924},
+ { 0x2382c0, 1, 0x1f, 0x924},
+ { 0x238300, 1, 0x1f, 0x924},
+ { 0x238340, 1, 0x1f, 0x924},
+ { 0x238380, 1, 0x1f, 0x924},
+ { 0x2383c0, 1, 0x1f, 0x924},
+ { 0x238400, 1, 0x1f, 0x924},
+ { 0x238440, 1, 0x1f, 0x924},
+ { 0x238480, 1, 0x1f, 0x924},
+ { 0x2384c0, 1, 0x1f, 0x924},
+ { 0x238500, 1, 0x1f, 0x924},
+ { 0x238540, 1, 0x1f, 0x924},
+ { 0x238580, 1, 0x1f, 0x924},
+ { 0x2385c0, 19, 0x1c, 0x924},
+ { 0x238800, 1, 0x1f, 0x924},
+ { 0x238840, 1, 0x1f, 0x924},
+ { 0x238880, 1, 0x1f, 0x924},
+ { 0x2388c0, 1, 0x1f, 0x924},
+ { 0x238900, 1, 0x1f, 0x924},
+ { 0x238940, 1, 0x1f, 0x924},
+ { 0x238980, 1, 0x1f, 0x924},
+ { 0x2389c0, 1, 0x1f, 0x924},
+ { 0x238a00, 1, 0x1f, 0x926},
+ { 0x238a40, 1, 0x1f, 0x924},
+ { 0x238a80, 1, 0x1f, 0x492},
+ { 0x238ac0, 1, 0x1f, 0x924},
+ { 0x238b00, 1, 0x1f, 0x924},
+ { 0x238b40, 1, 0x1f, 0x924},
+ { 0x238b80, 1, 0x1f, 0x924},
+ { 0x238bc0, 1, 0x1f, 0x924},
+ { 0x238c00, 1, 0x1f, 0x924},
+ { 0x238c40, 1, 0x1f, 0x924},
+ { 0x238c80, 1, 0x1f, 0x924},
+ { 0x238cc0, 1, 0x1f, 0x924},
+ { 0x238cc4, 1, 0x1c, 0x924},
+ { 0x238d00, 1, 0x1f, 0x924},
+ { 0x238d40, 1, 0x1f, 0x924},
+ { 0x238d80, 1, 0x1f, 0x924},
+ { 0x238dc0, 1, 0x1f, 0x924},
+ { 0x238e00, 1, 0x1f, 0x924},
+ { 0x238e40, 1, 0x1f, 0x924},
+ { 0x238e80, 1, 0x1f, 0x924},
+ { 0x238e84, 1, 0x1c, 0x924},
+ { 0x238ec0, 1, 0x1e, 0x924},
+ { 0x238f00, 1, 0x1e, 0x924},
+ { 0x238f40, 1, 0x1e, 0x924},
+ { 0x238f80, 1, 0x1e, 0x924},
+ { 0x238fc0, 1, 0x1e, 0x924},
+ { 0x238fd4, 5, 0x1c, 0x924},
+ { 0x238fe8, 2, 0x18, 0x924},
+ { 0x239000, 1, 0x1c, 0x924},
+ { 0x239040, 3, 0x1c, 0x924},
+ { 0x23905c, 1, 0x18, 0x924},
+ { 0x239064, 1, 0x10, 0x924},
+ { 0x239080, 10, 0x10, 0x924},
+ { 0x240000, 2, 0x1f, 0x924},
+ { 0x280000, 65, 0x1f, 0x924},
+ { 0x280124, 2, 0x1f, 0x1fff},
+ { 0x280130, 3, 0x1f, 0x1fff},
+ { 0x280140, 1, 0x1f, 0x1fff},
+ { 0x28014c, 2, 0x1e, 0x924},
+ { 0x280200, 27, 0x1f, 0x924},
+ { 0x28026c, 1, 0x1f, 0xfff},
+ { 0x280270, 12, 0x1f, 0x924},
+ { 0x2802a0, 1, 0x1f, 0xfff},
+ { 0x2802a4, 17, 0x1f, 0x924},
+ { 0x280340, 4, 0x1f, 0x924},
+ { 0x280380, 1, 0x1c, 0x924},
+ { 0x280388, 1, 0x1c, 0x924},
+ { 0x280390, 1, 0x1c, 0x924},
+ { 0x280398, 1, 0x1c, 0x924},
+ { 0x2803a0, 1, 0x1c, 0x924},
+ { 0x2803a8, 2, 0x1c, 0x924},
+ { 0x282000, 4, 0x1f, 0x9e4},
+ { 0x282010, 2044, 0x1f, 0x1c0},
+ { 0x284000, 4, 0x18, 0x924},
+ { 0x2a0000, 1, 0x1f, 0x964},
+ { 0x2a0004, 5631, 0x1f, 0x40},
+ { 0x2a5800, 2560, 0x1e, 0x40},
+ { 0x2a8000, 1, 0x1f, 0x964},
+ { 0x2a8004, 8191, 0x1e, 0x40},
+ { 0x2b0000, 1, 0x1f, 0x964},
+ { 0x2b0004, 15, 0x2, 0x40},
+ { 0x2b0040, 1, 0x1e, 0x964},
+ { 0x2b0044, 239, 0x2, 0x40},
+ { 0x2b0400, 1, 0x1f, 0x964},
+ { 0x2b0404, 255, 0x2, 0x40},
+ { 0x2b0800, 1, 0x1f, 0x924},
+ { 0x2b0840, 1, 0x1e, 0x924},
+ { 0x2b0c00, 1, 0x1f, 0x924},
+ { 0x2b1000, 1, 0x1f, 0x924},
+ { 0x2b1040, 1, 0x1e, 0x924},
+ { 0x2b1400, 1, 0x1f, 0x924},
+ { 0x2b1440, 1, 0x1e, 0x924},
+ { 0x2b1480, 1, 0x1e, 0x924},
+ { 0x2b14c0, 1, 0x1e, 0x924},
+ { 0x2b1800, 128, 0x1f, 0x80},
+ { 0x2b1c00, 128, 0x1f, 0x80},
+ { 0x2b2000, 1, 0x1f, 0xdb6},
+ { 0x2b2400, 1, 0x1e, 0x964},
+ { 0x2b2404, 5631, 0x1c, 0x40},
+ { 0x2b8000, 1, 0x1f, 0xfff},
+ { 0x2b8040, 1, 0x1f, 0xfff},
+ { 0x2b8080, 1, 0x1f, 0xfff},
+ { 0x2b80c0, 1, 0x1f, 0x924},
+ { 0x2b8100, 1, 0x1f, 0x924},
+ { 0x2b8140, 1, 0x1f, 0x924},
+ { 0x2b8180, 1, 0x1f, 0x924},
+ { 0x2b81c0, 1, 0x1f, 0x924},
+ { 0x2b8200, 1, 0x1f, 0x924},
+ { 0x2b8240, 1, 0x1f, 0x924},
+ { 0x2b8280, 1, 0x1f, 0x924},
+ { 0x2b82c0, 1, 0x1f, 0x924},
+ { 0x2b8300, 1, 0x1f, 0x924},
+ { 0x2b8340, 1, 0x1f, 0x924},
+ { 0x2b8380, 1, 0x1f, 0x924},
+ { 0x2b83c0, 1, 0x1f, 0x924},
+ { 0x2b8400, 1, 0x1f, 0x924},
+ { 0x2b8440, 1, 0x1f, 0x924},
+ { 0x2b8480, 1, 0x1f, 0x924},
+ { 0x2b84c0, 1, 0x1f, 0x924},
+ { 0x2b8500, 1, 0x1f, 0x924},
+ { 0x2b8540, 1, 0x1f, 0x924},
+ { 0x2b8580, 1, 0x1f, 0x924},
+ { 0x2b85c0, 19, 0x1c, 0x924},
+ { 0x2b8800, 1, 0x1f, 0x924},
+ { 0x2b8840, 1, 0x1f, 0x924},
+ { 0x2b8880, 1, 0x1f, 0x924},
+ { 0x2b88c0, 1, 0x1f, 0x924},
+ { 0x2b8900, 1, 0x1f, 0x924},
+ { 0x2b8940, 1, 0x1f, 0x924},
+ { 0x2b8980, 1, 0x1f, 0x924},
+ { 0x2b89c0, 1, 0x1f, 0x924},
+ { 0x2b8a00, 1, 0x1f, 0x9a4},
+ { 0x2b8a40, 1, 0x1f, 0x924},
+ { 0x2b8a80, 1, 0x1f, 0x492},
+ { 0x2b8ac0, 1, 0x1f, 0x924},
+ { 0x2b8b00, 1, 0x1f, 0x924},
+ { 0x2b8b40, 1, 0x1f, 0x924},
+ { 0x2b8b80, 1, 0x1f, 0x924},
+ { 0x2b8bc0, 1, 0x1f, 0x924},
+ { 0x2b8c00, 1, 0x1f, 0x924},
+ { 0x2b8c40, 1, 0x1f, 0x924},
+ { 0x2b8c80, 1, 0x1f, 0x924},
+ { 0x2b8cc0, 1, 0x1f, 0x924},
+ { 0x2b8cc4, 1, 0x1c, 0x924},
+ { 0x2b8d00, 1, 0x1f, 0x924},
+ { 0x2b8d40, 1, 0x1f, 0x924},
+ { 0x2b8d80, 1, 0x1f, 0x924},
+ { 0x2b8dc0, 1, 0x1f, 0x924},
+ { 0x2b8e00, 1, 0x1f, 0x924},
+ { 0x2b8e40, 1, 0x1f, 0x924},
+ { 0x2b8e80, 1, 0x1f, 0x924},
+ { 0x2b8e84, 1, 0x1c, 0x924},
+ { 0x2b8ec0, 1, 0x1e, 0x924},
+ { 0x2b8f00, 1, 0x1e, 0x924},
+ { 0x2b8f40, 1, 0x1e, 0x924},
+ { 0x2b8f80, 1, 0x1e, 0x924},
+ { 0x2b8fc0, 1, 0x1e, 0x924},
+ { 0x2b8fd4, 5, 0x1c, 0x924},
+ { 0x2b8fe8, 2, 0x18, 0x924},
+ { 0x2b9000, 1, 0x1c, 0x924},
+ { 0x2b9040, 3, 0x1c, 0x924},
+ { 0x2b905c, 1, 0x18, 0x924},
+ { 0x2b9064, 1, 0x10, 0x924},
+ { 0x2b9080, 10, 0x10, 0x924},
+ { 0x2c0000, 2, 0x1f, 0x1fff},
+ { 0x300000, 65, 0x1f, 0x924},
+ { 0x300124, 2, 0x1f, 0x1fff},
+ { 0x300130, 3, 0x1f, 0x1fff},
+ { 0x300140, 1, 0x1f, 0x1fff},
+ { 0x30014c, 2, 0x1e, 0x924},
+ { 0x300200, 27, 0x1f, 0x924},
+ { 0x30026c, 1, 0x1f, 0xfff},
+ { 0x300270, 12, 0x1f, 0x924},
+ { 0x3002a0, 1, 0x1f, 0xfff},
+ { 0x3002a4, 17, 0x1f, 0x924},
+ { 0x300340, 4, 0x1f, 0x924},
+ { 0x300380, 1, 0x1c, 0x924},
+ { 0x300388, 1, 0x1c, 0x924},
+ { 0x300390, 1, 0x1c, 0x924},
+ { 0x300398, 1, 0x1c, 0x924},
+ { 0x3003a0, 1, 0x1c, 0x924},
+ { 0x3003a8, 2, 0x1c, 0x924},
+ { 0x302000, 4, 0x1f, 0xf24},
+ { 0x302010, 2044, 0x1f, 0xe00},
+ { 0x304000, 4, 0x18, 0x924},
+ { 0x320000, 1, 0x1f, 0xb24},
+ { 0x320004, 5631, 0x1f, 0x200},
+ { 0x325800, 2560, 0x1e, 0x200},
+ { 0x328000, 1, 0x1f, 0xb24},
+ { 0x328004, 8191, 0x1e, 0x200},
+ { 0x330000, 1, 0x1f, 0xb24},
+ { 0x330004, 15, 0x2, 0x200},
+ { 0x330040, 1, 0x1e, 0xb24},
+ { 0x330044, 239, 0x2, 0x200},
+ { 0x330400, 1, 0x1f, 0xb24},
+ { 0x330404, 255, 0x2, 0x200},
+ { 0x330800, 1, 0x1f, 0x924},
+ { 0x330840, 1, 0x1e, 0x924},
+ { 0x330c00, 1, 0x1f, 0x924},
+ { 0x331000, 1, 0x1f, 0x924},
+ { 0x331040, 1, 0x1e, 0x924},
+ { 0x331400, 1, 0x1f, 0x924},
+ { 0x331440, 1, 0x1e, 0x924},
+ { 0x331480, 1, 0x1e, 0x924},
+ { 0x3314c0, 1, 0x1e, 0x924},
+ { 0x331800, 128, 0x1f, 0x400},
+ { 0x331c00, 128, 0x1f, 0x400},
+ { 0x332000, 1, 0x1f, 0xdb6},
+ { 0x332400, 1, 0x1e, 0xb24},
+ { 0x332404, 5631, 0x1c, 0x200},
+ { 0x338000, 1, 0x1f, 0xfff},
+ { 0x338040, 1, 0x1f, 0xfff},
+ { 0x338080, 1, 0x1f, 0xfff},
+ { 0x3380c0, 1, 0x1f, 0xfff},
+ { 0x338100, 1, 0x1f, 0x924},
+ { 0x338140, 1, 0x1f, 0x924},
+ { 0x338180, 1, 0x1f, 0x924},
+ { 0x3381c0, 1, 0x1f, 0x924},
+ { 0x338200, 1, 0x1f, 0x924},
+ { 0x338240, 1, 0x1f, 0x924},
+ { 0x338280, 1, 0x1f, 0x924},
+ { 0x3382c0, 1, 0x1f, 0x924},
+ { 0x338300, 1, 0x1f, 0x924},
+ { 0x338340, 1, 0x1f, 0x924},
+ { 0x338380, 1, 0x1f, 0x924},
+ { 0x3383c0, 1, 0x1f, 0x924},
+ { 0x338400, 1, 0x1f, 0x924},
+ { 0x338440, 1, 0x1f, 0x924},
+ { 0x338480, 1, 0x1f, 0x924},
+ { 0x3384c0, 1, 0x1f, 0x924},
+ { 0x338500, 1, 0x1f, 0x924},
+ { 0x338540, 1, 0x1f, 0x924},
+ { 0x338580, 1, 0x1f, 0x924},
+ { 0x3385c0, 19, 0x1c, 0x924},
+ { 0x338800, 1, 0x1f, 0x924},
+ { 0x338840, 1, 0x1f, 0x924},
+ { 0x338880, 1, 0x1f, 0x924},
+ { 0x3388c0, 1, 0x1f, 0x924},
+ { 0x338900, 1, 0x1f, 0x924},
+ { 0x338940, 1, 0x1f, 0x924},
+ { 0x338980, 1, 0x1f, 0x924},
+ { 0x3389c0, 1, 0x1f, 0x924},
+ { 0x338a00, 1, 0x1f, 0xd24},
+ { 0x338a40, 1, 0x1f, 0x924},
+ { 0x338a80, 1, 0x1f, 0x492},
+ { 0x338ac0, 1, 0x1f, 0x924},
+ { 0x338b00, 1, 0x1f, 0x924},
+ { 0x338b40, 1, 0x1f, 0x924},
+ { 0x338b80, 1, 0x1f, 0x924},
+ { 0x338bc0, 1, 0x1f, 0x924},
+ { 0x338c00, 1, 0x1f, 0x924},
+ { 0x338c40, 1, 0x1f, 0x924},
+ { 0x338c80, 1, 0x1f, 0x924},
+ { 0x338cc0, 1, 0x1f, 0x924},
+ { 0x338cc4, 1, 0x1c, 0x924},
+ { 0x338d00, 1, 0x1f, 0x924},
+ { 0x338d40, 1, 0x1f, 0x924},
+ { 0x338d80, 1, 0x1f, 0x924},
+ { 0x338dc0, 1, 0x1f, 0x924},
+ { 0x338e00, 1, 0x1f, 0x924},
+ { 0x338e40, 1, 0x1f, 0x924},
+ { 0x338e80, 1, 0x1f, 0x924},
+ { 0x338e84, 1, 0x1c, 0x924},
+ { 0x338ec0, 1, 0x1e, 0x924},
+ { 0x338f00, 1, 0x1e, 0x924},
+ { 0x338f40, 1, 0x1e, 0x924},
+ { 0x338f80, 1, 0x1e, 0x924},
+ { 0x338fc0, 1, 0x1e, 0x924},
+ { 0x338fd4, 5, 0x1c, 0x924},
+ { 0x338fe8, 2, 0x18, 0x924},
+ { 0x339000, 1, 0x1c, 0x924},
+ { 0x339040, 3, 0x1c, 0x924},
+ { 0x33905c, 1, 0x18, 0x924},
+ { 0x339064, 1, 0x10, 0x924},
+ { 0x339080, 10, 0x10, 0x924},
+ { 0x340000, 2, 0x1f, 0x924},
+ { 0x3a0000, 40960, 0x1c, 0x1000}
+};
+
+#define REGS_COUNT ARRAY_SIZE(reg_addrs)
+
+static const struct reg_addr idle_reg_addrs[] = {
+ { 0x2104, 1, 0x1f, 0xfff},
+ { 0x2110, 2, 0x1f, 0xfff},
+ { 0x211c, 8, 0x1f, 0xfff},
+ { 0x2814, 1, 0x1f, 0xfff},
+ { 0x281c, 2, 0x1f, 0xfff},
+ { 0x2854, 1, 0x1f, 0xfff},
+ { 0x285c, 1, 0x1f, 0xfff},
+ { 0x3040, 1, 0x1f, 0xfff},
+ { 0x9010, 7, 0x1c, 0xfff},
+ { 0x9030, 1, 0x1c, 0xfff},
+ { 0x9068, 16, 0x1c, 0xfff},
+ { 0x9230, 2, 0x1c, 0xfff},
+ { 0x9244, 1, 0x1c, 0xfff},
+ { 0x9298, 1, 0x1c, 0xfff},
+ { 0x92a8, 1, 0x1c, 0x1fff},
+ { 0xa38c, 1, 0x1f, 0x1fff},
+ { 0xa3c4, 1, 0x1e, 0xfff},
+ { 0xa404, 1, 0x1f, 0xfff},
+ { 0xa408, 2, 0x1f, 0x1fff},
+ { 0xa42c, 12, 0x1f, 0xfff},
+ { 0xa580, 1, 0x1f, 0x1fff},
+ { 0xa590, 1, 0x1f, 0x1fff},
+ { 0xa600, 5, 0x1e, 0xfff},
+ { 0xa618, 1, 0x1e, 0xfff},
+ { 0xa714, 1, 0x1c, 0xfff},
+ { 0xa720, 1, 0x1c, 0xfff},
+ { 0xa750, 1, 0x1c, 0xfff},
+ { 0xc09c, 1, 0x3, 0xfff},
+ { 0x103b0, 1, 0x1f, 0xfff},
+ { 0x103c0, 1, 0x1f, 0xfff},
+ { 0x103d0, 1, 0x3, 0x1fff},
+ { 0x10418, 1, 0x1f, 0xfff},
+ { 0x10420, 1, 0x1f, 0xfff},
+ { 0x10428, 1, 0x1f, 0xfff},
+ { 0x10460, 1, 0x1f, 0xfff},
+ { 0x10474, 1, 0x1f, 0xfff},
+ { 0x104e0, 1, 0x1f, 0xfff},
+ { 0x104ec, 1, 0x1f, 0xfff},
+ { 0x104f8, 1, 0x1f, 0xfff},
+ { 0x10508, 1, 0x1f, 0xfff},
+ { 0x10530, 1, 0x1f, 0xfff},
+ { 0x10538, 1, 0x1f, 0xfff},
+ { 0x10548, 1, 0x1f, 0xfff},
+ { 0x10558, 1, 0x1f, 0xfff},
+ { 0x182a8, 1, 0x1c, 0xfff},
+ { 0x182b8, 1, 0x1c, 0xfff},
+ { 0x18308, 1, 0x1c, 0xfff},
+ { 0x18318, 1, 0x1c, 0xfff},
+ { 0x18338, 1, 0x1c, 0xfff},
+ { 0x18348, 1, 0x1c, 0xfff},
+ { 0x183bc, 1, 0x1c, 0x1fff},
+ { 0x183cc, 1, 0x1c, 0x1fff},
+ { 0x18570, 1, 0x18, 0xfff},
+ { 0x18578, 1, 0x18, 0xfff},
+ { 0x1858c, 1, 0x18, 0xfff},
+ { 0x18594, 1, 0x18, 0xfff},
+ { 0x1862c, 4, 0x10, 0xfff},
+ { 0x2021c, 11, 0x1f, 0xfff},
+ { 0x202a8, 1, 0x1f, 0xfff},
+ { 0x202b8, 1, 0x1f, 0x1fff},
+ { 0x20404, 1, 0x1f, 0xfff},
+ { 0x2040c, 2, 0x1f, 0xfff},
+ { 0x2041c, 2, 0x1f, 0xfff},
+ { 0x40154, 14, 0x1f, 0xfff},
+ { 0x40198, 1, 0x1f, 0x1fff},
+ { 0x404ac, 1, 0x1f, 0xfff},
+ { 0x404bc, 1, 0x1f, 0x1fff},
+ { 0x42290, 1, 0x1f, 0xfff},
+ { 0x422a0, 1, 0x1f, 0xfff},
+ { 0x422b0, 1, 0x1f, 0x1fff},
+ { 0x42548, 1, 0x1f, 0xfff},
+ { 0x42550, 1, 0x1f, 0xfff},
+ { 0x42558, 1, 0x1f, 0xfff},
+ { 0x50160, 8, 0x1f, 0xfff},
+ { 0x501d0, 1, 0x1f, 0xfff},
+ { 0x501e0, 1, 0x1f, 0x1fff},
+ { 0x50204, 1, 0x1f, 0xfff},
+ { 0x5020c, 2, 0x1f, 0xfff},
+ { 0x5021c, 1, 0x1f, 0xfff},
+ { 0x60090, 1, 0x1f, 0xfff},
+ { 0x6011c, 1, 0x1f, 0xfff},
+ { 0x6012c, 1, 0x1f, 0x1fff},
+ { 0xc101c, 1, 0x1f, 0xfff},
+ { 0xc102c, 1, 0x1f, 0x1fff},
+ { 0xc2290, 1, 0x1f, 0xfff},
+ { 0xc22a0, 1, 0x1f, 0xfff},
+ { 0xc22b0, 1, 0x1f, 0x1fff},
+ { 0xc2548, 1, 0x1f, 0xfff},
+ { 0xc2550, 1, 0x1f, 0xfff},
+ { 0xc2558, 1, 0x1f, 0xfff},
+ { 0xc4294, 1, 0x1f, 0xfff},
+ { 0xc42a4, 1, 0x1f, 0xfff},
+ { 0xc42b4, 1, 0x1f, 0x1fff},
+ { 0xc4550, 1, 0x1f, 0xfff},
+ { 0xc4558, 1, 0x1f, 0xfff},
+ { 0xc4560, 1, 0x1f, 0xfff},
+ { 0xd016c, 8, 0x1f, 0xfff},
+ { 0xd01d8, 1, 0x1f, 0xfff},
+ { 0xd01e8, 1, 0x1f, 0x1fff},
+ { 0xd0204, 1, 0x1f, 0xfff},
+ { 0xd020c, 3, 0x1f, 0xfff},
+ { 0xe0154, 8, 0x1f, 0xfff},
+ { 0xe01c8, 1, 0x1f, 0xfff},
+ { 0xe01d8, 1, 0x1f, 0x1fff},
+ { 0xe0204, 1, 0x1f, 0xfff},
+ { 0xe020c, 2, 0x1f, 0xfff},
+ { 0xe021c, 2, 0x1f, 0xfff},
+ { 0x101014, 1, 0x1f, 0xfff},
+ { 0x101030, 1, 0x1f, 0xfff},
+ { 0x101040, 1, 0x1f, 0x1fff},
+ { 0x102058, 1, 0x1f, 0x1fff},
+ { 0x102080, 16, 0x1f, 0xfff},
+ { 0x103004, 2, 0x1f, 0xfff},
+ { 0x103068, 1, 0x1f, 0xfff},
+ { 0x103078, 1, 0x1f, 0xfff},
+ { 0x103088, 1, 0x1f, 0x1fff},
+ { 0x10309c, 2, 0x1e, 0xfff},
+ { 0x1030b8, 2, 0x1c, 0xfff},
+ { 0x1030cc, 1, 0x1c, 0xfff},
+ { 0x1030e0, 1, 0x1c, 0xfff},
+ { 0x104004, 1, 0x1f, 0xfff},
+ { 0x104018, 1, 0x1f, 0xfff},
+ { 0x104020, 1, 0x1f, 0xfff},
+ { 0x10403c, 1, 0x1f, 0xfff},
+ { 0x1040fc, 1, 0x1f, 0xfff},
+ { 0x10410c, 1, 0x1f, 0x1fff},
+ { 0x104400, 1, 0x1f, 0x1fff},
+ { 0x104404, 63, 0x1f, 0xfff},
+ { 0x104800, 1, 0x1f, 0x1fff},
+ { 0x104804, 63, 0x1f, 0xfff},
+ { 0x105000, 4, 0x1f, 0x1fff},
+ { 0x105010, 252, 0x1f, 0xfff},
+ { 0x108094, 1, 0x3, 0xfff},
+ { 0x1201b0, 2, 0x1f, 0xfff},
+ { 0x12032c, 1, 0x1f, 0xfff},
+ { 0x12036c, 3, 0x1f, 0xfff},
+ { 0x120408, 2, 0x1f, 0xfff},
+ { 0x120414, 15, 0x1f, 0xfff},
+ { 0x120478, 2, 0x1f, 0xfff},
+ { 0x12052c, 1, 0x1f, 0xfff},
+ { 0x120564, 3, 0x1f, 0xfff},
+ { 0x12057c, 1, 0x1f, 0x1fff},
+ { 0x12058c, 1, 0x1f, 0x1fff},
+ { 0x120608, 1, 0x1e, 0xfff},
+ { 0x120748, 1, 0x1c, 0xfff},
+ { 0x120778, 2, 0x1c, 0xfff},
+ { 0x120808, 3, 0x1f, 0xfff},
+ { 0x120818, 1, 0x1f, 0xfff},
+ { 0x120820, 1, 0x1f, 0xfff},
+ { 0x120828, 1, 0x1f, 0xfff},
+ { 0x120830, 1, 0x1f, 0xfff},
+ { 0x120838, 1, 0x1f, 0xfff},
+ { 0x120840, 1, 0x1f, 0xfff},
+ { 0x120848, 1, 0x1f, 0xfff},
+ { 0x120850, 1, 0x1f, 0xfff},
+ { 0x120858, 1, 0x1f, 0xfff},
+ { 0x120860, 1, 0x1f, 0xfff},
+ { 0x120868, 1, 0x1f, 0xfff},
+ { 0x120870, 1, 0x1f, 0xfff},
+ { 0x120878, 1, 0x1f, 0xfff},
+ { 0x120880, 1, 0x1f, 0xfff},
+ { 0x120888, 1, 0x1f, 0xfff},
+ { 0x120890, 1, 0x1f, 0xfff},
+ { 0x120898, 1, 0x1f, 0xfff},
+ { 0x1208a0, 1, 0x1f, 0xfff},
+ { 0x1208a8, 1, 0x1f, 0xfff},
+ { 0x1208b0, 1, 0x1f, 0xfff},
+ { 0x1208b8, 1, 0x1f, 0xfff},
+ { 0x1208c0, 1, 0x1f, 0xfff},
+ { 0x1208c8, 1, 0x1f, 0xfff},
+ { 0x1208d0, 1, 0x1f, 0xfff},
+ { 0x1208d8, 1, 0x1f, 0xfff},
+ { 0x1208e0, 1, 0x1f, 0xfff},
+ { 0x1208e8, 1, 0x1f, 0xfff},
+ { 0x1208f0, 1, 0x1f, 0xfff},
+ { 0x1208f8, 1, 0x1f, 0xfff},
+ { 0x120900, 1, 0x1f, 0xfff},
+ { 0x120908, 1, 0x1f, 0xfff},
+ { 0x130030, 1, 0x1c, 0xfff},
+ { 0x13004c, 3, 0x1c, 0xfff},
+ { 0x130064, 2, 0x1c, 0xfff},
+ { 0x13009c, 1, 0x1c, 0x1fff},
+ { 0x130130, 1, 0x1c, 0xfff},
+ { 0x13016c, 1, 0x1c, 0xfff},
+ { 0x130300, 1, 0x1c, 0xfff},
+ { 0x130480, 1, 0x1c, 0xfff},
+ { 0x14005c, 2, 0xf, 0xfff},
+ { 0x1400d0, 2, 0xf, 0xfff},
+ { 0x1400e0, 1, 0xf, 0xfff},
+ { 0x1401c8, 1, 0xf, 0xfff},
+ { 0x140200, 6, 0xf, 0xfff},
+ { 0x140338, 7, 0x10, 0xfff},
+ { 0x140370, 7, 0x10, 0xfff},
+ { 0x15c1bc, 6, 0x10, 0xfff},
+ { 0x15c230, 7, 0x10, 0xfff},
+ { 0x16101c, 1, 0x1f, 0xfff},
+ { 0x16102c, 1, 0x1f, 0x1fff},
+ { 0x164014, 2, 0x1f, 0xfff},
+ { 0x1640f0, 1, 0x1f, 0xfff},
+ { 0x166290, 1, 0x1f, 0xfff},
+ { 0x1662a0, 1, 0x1f, 0xfff},
+ { 0x1662b0, 1, 0x1f, 0x1fff},
+ { 0x166548, 1, 0x1f, 0xfff},
+ { 0x166550, 1, 0x1f, 0xfff},
+ { 0x166558, 1, 0x1f, 0xfff},
+ { 0x168000, 1, 0x1f, 0xfff},
+ { 0x168008, 1, 0x1f, 0xfff},
+ { 0x168010, 1, 0x1f, 0xfff},
+ { 0x168018, 1, 0x1f, 0xfff},
+ { 0x168028, 2, 0x1f, 0xfff},
+ { 0x168058, 9, 0x1f, 0xfff},
+ { 0x168238, 1, 0x1f, 0xfff},
+ { 0x1682d0, 7, 0x1f, 0xfff},
+ { 0x168300, 2, 0x3, 0xfff},
+ { 0x168308, 65, 0x1f, 0xfff},
+ { 0x168410, 2, 0x1f, 0xfff},
+ { 0x168438, 1, 0x1f, 0xfff},
+ { 0x168448, 1, 0x1f, 0x1fff},
+ { 0x168a00, 128, 0x1f, 0xfff},
+ { 0x16e200, 128, 0x2, 0xfff},
+ { 0x16e404, 2, 0x2, 0xfff},
+ { 0x16e584, 64, 0x2, 0xfff},
+ { 0x16e684, 2, 0x1e, 0xfff},
+ { 0x16e68c, 4, 0x2, 0xfff},
+ { 0x16e6fc, 4, 0x1c, 0xfff},
+ { 0x16e7ac, 12, 0x10, 0xfff},
+ { 0x1700a4, 1, 0x1f, 0xfff},
+ { 0x1700ac, 2, 0x1f, 0xfff},
+ { 0x1700c0, 1, 0x1f, 0xfff},
+ { 0x170174, 1, 0x1f, 0xfff},
+ { 0x170184, 1, 0x1f, 0x1fff},
+ { 0x1800f4, 1, 0x1f, 0xfff},
+ { 0x180104, 1, 0x1f, 0xfff},
+ { 0x180114, 1, 0x1f, 0x1fff},
+ { 0x180124, 1, 0x1f, 0x1fff},
+ { 0x18026c, 1, 0x1f, 0xfff},
+ { 0x1802a0, 1, 0x1f, 0xfff},
+ { 0x1b8000, 1, 0x1f, 0xfff},
+ { 0x1b8040, 1, 0x1f, 0xfff},
+ { 0x1b8080, 1, 0x1f, 0xfff},
+ { 0x1b80c0, 1, 0x1f, 0xfff},
+ { 0x200104, 1, 0x1f, 0xfff},
+ { 0x200114, 1, 0x1f, 0xfff},
+ { 0x200124, 1, 0x1f, 0x1fff},
+ { 0x200134, 1, 0x1f, 0x1fff},
+ { 0x20026c, 1, 0x1f, 0xfff},
+ { 0x2002a0, 1, 0x1f, 0xfff},
+ { 0x238000, 1, 0x1f, 0xfff},
+ { 0x238040, 1, 0x1f, 0xfff},
+ { 0x238080, 1, 0x1f, 0xfff},
+ { 0x2380c0, 1, 0x1f, 0xfff},
+ { 0x280104, 1, 0x1f, 0xfff},
+ { 0x280114, 1, 0x1f, 0xfff},
+ { 0x280124, 1, 0x1f, 0x1fff},
+ { 0x280134, 1, 0x1f, 0x1fff},
+ { 0x28026c, 1, 0x1f, 0xfff},
+ { 0x2802a0, 1, 0x1f, 0xfff},
+ { 0x2b8000, 1, 0x1f, 0xfff},
+ { 0x2b8040, 1, 0x1f, 0xfff},
+ { 0x2b8080, 1, 0x1f, 0xfff},
+ { 0x300104, 1, 0x1f, 0xfff},
+ { 0x300114, 1, 0x1f, 0xfff},
+ { 0x300124, 1, 0x1f, 0x1fff},
+ { 0x300134, 1, 0x1f, 0x1fff},
+ { 0x30026c, 1, 0x1f, 0xfff},
+ { 0x3002a0, 1, 0x1f, 0xfff},
+ { 0x338000, 1, 0x1f, 0xfff},
+ { 0x338040, 1, 0x1f, 0xfff},
+ { 0x338080, 1, 0x1f, 0xfff},
+ { 0x3380c0, 1, 0x1f, 0xfff}
+};
+
+#define IDLE_REGS_COUNT ARRAY_SIZE(idle_reg_addrs)
+
+static const u32 read_reg_e1[] = {
+ 0x1b1000};
+
+static const struct wreg_addr wreg_addr_e1 = {
+ 0x1b0c00, 192, 1, read_reg_e1, 0x1f, 0x1fff};
+
+static const u32 read_reg_e1h[] = {
+ 0x1b1040, 0x1b1000};
+
+static const struct wreg_addr wreg_addr_e1h = {
+ 0x1b0c00, 256, 2, read_reg_e1h, 0x1f, 0x1fff};
+
+static const u32 read_reg_e2[] = {
+ 0x1b1040, 0x1b1000};
+
+static const struct wreg_addr wreg_addr_e2 = {
+ 0x1b0c00, 128, 2, read_reg_e2, 0x1f, 0x1fff};
+
+static const u32 read_reg_e3[] = {
+ 0x1b1040, 0x1b1000};
+
+static const struct wreg_addr wreg_addr_e3 = {
+ 0x1b0c00, 128, 2, read_reg_e3, 0x1f, 0x1fff};
+
+static const u32 read_reg_e3b0[] = {
+ 0x1b1040, 0x1b1000};
+
+static const struct wreg_addr wreg_addr_e3b0 = {
+ 0x1b0c00, 128, 2, read_reg_e3b0, 0x1f, 0x1fff};
+
+static const unsigned int dump_num_registers[NUM_CHIPS][NUM_PRESETS] = {
+ {19758, 17543, 26951, 18705, 17287, 26695, 19812, 31367, 40775, 19788,
+ 25223, 34631, 19074},
+ {31750, 18273, 32253, 30697, 18017, 31997, 31804, 32097, 46077, 31780,
+ 25953, 39933, 35895},
+ {36527, 17928, 33697, 35474, 18700, 34466, 36581, 31752, 47521, 36557,
+ 25608, 41377, 43903},
+ {45239, 17936, 34387, 44186, 18708, 35156, 45293, 31760, 48211, 45269,
+ 25616, 42067, 43903},
+ {45302, 17999, 34802, 44249, 18771, 35571, 45356, 31823, 48626, 45332,
+ 25679, 42482, 43903}
+};
+#endif
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
new file mode 100644
index 0000000000..bda3ccc28e
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
@@ -0,0 +1,3738 @@
+/* bnx2x_ethtool.c: QLogic Everest network driver.
+ *
+ * Copyright (c) 2007-2013 Broadcom Corporation
+ * Copyright (c) 2014 QLogic Corporation
+ * All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
+ * Written by: Eliezer Tamir
+ * Based on code from Michael Chan's bnx2 driver
+ * UDP CSUM errata workaround by Arik Gendelman
+ * Slowpath and fastpath rework by Vladislav Zolotarov
+ * Statistics and Link management by Yitchak Gertner
+ *
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/ethtool.h>
+#include <linux/netdevice.h>
+#include <linux/types.h>
+#include <linux/sched.h>
+#include <linux/crc32.h>
+#include "bnx2x.h"
+#include "bnx2x_cmn.h"
+#include "bnx2x_dump.h"
+#include "bnx2x_init.h"
+
+/* Note: in the format strings below %s is replaced by the queue-name which is
+ * either its index or 'fcoe' for the fcoe queue. Make sure the format string
+ * length does not exceed ETH_GSTRING_LEN - MAX_QUEUE_NAME_LEN + 2
+ */
+#define MAX_QUEUE_NAME_LEN 4
+static const struct {
+ long offset;
+ int size;
+ char string[ETH_GSTRING_LEN];
+} bnx2x_q_stats_arr[] = {
+/* 1 */ { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%s]: rx_bytes" },
+ { Q_STATS_OFFSET32(total_unicast_packets_received_hi),
+ 8, "[%s]: rx_ucast_packets" },
+ { Q_STATS_OFFSET32(total_multicast_packets_received_hi),
+ 8, "[%s]: rx_mcast_packets" },
+ { Q_STATS_OFFSET32(total_broadcast_packets_received_hi),
+ 8, "[%s]: rx_bcast_packets" },
+ { Q_STATS_OFFSET32(no_buff_discard_hi), 8, "[%s]: rx_discards" },
+ { Q_STATS_OFFSET32(rx_err_discard_pkt),
+ 4, "[%s]: rx_phy_ip_err_discards"},
+ { Q_STATS_OFFSET32(rx_skb_alloc_failed),
+ 4, "[%s]: rx_skb_alloc_discard" },
+ { Q_STATS_OFFSET32(hw_csum_err), 4, "[%s]: rx_csum_offload_errors" },
+ { Q_STATS_OFFSET32(driver_xoff), 4, "[%s]: tx_exhaustion_events" },
+ { Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%s]: tx_bytes" },
+/* 10 */{ Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
+ 8, "[%s]: tx_ucast_packets" },
+ { Q_STATS_OFFSET32(total_multicast_packets_transmitted_hi),
+ 8, "[%s]: tx_mcast_packets" },
+ { Q_STATS_OFFSET32(total_broadcast_packets_transmitted_hi),
+ 8, "[%s]: tx_bcast_packets" },
+ { Q_STATS_OFFSET32(total_tpa_aggregations_hi),
+ 8, "[%s]: tpa_aggregations" },
+ { Q_STATS_OFFSET32(total_tpa_aggregated_frames_hi),
+ 8, "[%s]: tpa_aggregated_frames"},
+ { Q_STATS_OFFSET32(total_tpa_bytes_hi), 8, "[%s]: tpa_bytes"},
+ { Q_STATS_OFFSET32(driver_filtered_tx_pkt),
+ 4, "[%s]: driver_filtered_tx_pkt" }
+};
+
+#define BNX2X_NUM_Q_STATS ARRAY_SIZE(bnx2x_q_stats_arr)
+
+static const struct {
+ long offset;
+ int size;
+ bool is_port_stat;
+ char string[ETH_GSTRING_LEN];
+} bnx2x_stats_arr[] = {
+/* 1 */ { STATS_OFFSET32(total_bytes_received_hi),
+ 8, false, "rx_bytes" },
+ { STATS_OFFSET32(error_bytes_received_hi),
+ 8, false, "rx_error_bytes" },
+ { STATS_OFFSET32(total_unicast_packets_received_hi),
+ 8, false, "rx_ucast_packets" },
+ { STATS_OFFSET32(total_multicast_packets_received_hi),
+ 8, false, "rx_mcast_packets" },
+ { STATS_OFFSET32(total_broadcast_packets_received_hi),
+ 8, false, "rx_bcast_packets" },
+ { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
+ 8, true, "rx_crc_errors" },
+ { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
+ 8, true, "rx_align_errors" },
+ { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
+ 8, true, "rx_undersize_packets" },
+ { STATS_OFFSET32(etherstatsoverrsizepkts_hi),
+ 8, true, "rx_oversize_packets" },
+/* 10 */{ STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
+ 8, true, "rx_fragments" },
+ { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
+ 8, true, "rx_jabbers" },
+ { STATS_OFFSET32(no_buff_discard_hi),
+ 8, false, "rx_discards" },
+ { STATS_OFFSET32(mac_filter_discard),
+ 4, true, "rx_filtered_packets" },
+ { STATS_OFFSET32(mf_tag_discard),
+ 4, true, "rx_mf_tag_discard" },
+ { STATS_OFFSET32(pfc_frames_received_hi),
+ 8, true, "pfc_frames_received" },
+ { STATS_OFFSET32(pfc_frames_sent_hi),
+ 8, true, "pfc_frames_sent" },
+ { STATS_OFFSET32(brb_drop_hi),
+ 8, true, "rx_brb_discard" },
+ { STATS_OFFSET32(brb_truncate_hi),
+ 8, true, "rx_brb_truncate" },
+ { STATS_OFFSET32(pause_frames_received_hi),
+ 8, true, "rx_pause_frames" },
+ { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
+ 8, true, "rx_mac_ctrl_frames" },
+ { STATS_OFFSET32(nig_timer_max),
+ 4, true, "rx_constant_pause_events" },
+/* 20 */{ STATS_OFFSET32(rx_err_discard_pkt),
+ 4, false, "rx_phy_ip_err_discards"},
+ { STATS_OFFSET32(rx_skb_alloc_failed),
+ 4, false, "rx_skb_alloc_discard" },
+ { STATS_OFFSET32(hw_csum_err),
+ 4, false, "rx_csum_offload_errors" },
+ { STATS_OFFSET32(driver_xoff),
+ 4, false, "tx_exhaustion_events" },
+ { STATS_OFFSET32(total_bytes_transmitted_hi),
+ 8, false, "tx_bytes" },
+ { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
+ 8, true, "tx_error_bytes" },
+ { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
+ 8, false, "tx_ucast_packets" },
+ { STATS_OFFSET32(total_multicast_packets_transmitted_hi),
+ 8, false, "tx_mcast_packets" },
+ { STATS_OFFSET32(total_broadcast_packets_transmitted_hi),
+ 8, false, "tx_bcast_packets" },
+ { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
+ 8, true, "tx_mac_errors" },
+ { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
+ 8, true, "tx_carrier_errors" },
+/* 30 */{ STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
+ 8, true, "tx_single_collisions" },
+ { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
+ 8, true, "tx_multi_collisions" },
+ { STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
+ 8, true, "tx_deferred" },
+ { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
+ 8, true, "tx_excess_collisions" },
+ { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
+ 8, true, "tx_late_collisions" },
+ { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
+ 8, true, "tx_total_collisions" },
+ { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
+ 8, true, "tx_64_byte_packets" },
+ { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
+ 8, true, "tx_65_to_127_byte_packets" },
+ { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
+ 8, true, "tx_128_to_255_byte_packets" },
+ { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
+ 8, true, "tx_256_to_511_byte_packets" },
+/* 40 */{ STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
+ 8, true, "tx_512_to_1023_byte_packets" },
+ { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
+ 8, true, "tx_1024_to_1522_byte_packets" },
+ { STATS_OFFSET32(etherstatspktsover1522octets_hi),
+ 8, true, "tx_1523_to_9022_byte_packets" },
+ { STATS_OFFSET32(pause_frames_sent_hi),
+ 8, true, "tx_pause_frames" },
+ { STATS_OFFSET32(total_tpa_aggregations_hi),
+ 8, false, "tpa_aggregations" },
+ { STATS_OFFSET32(total_tpa_aggregated_frames_hi),
+ 8, false, "tpa_aggregated_frames"},
+ { STATS_OFFSET32(total_tpa_bytes_hi),
+ 8, false, "tpa_bytes"},
+ { STATS_OFFSET32(recoverable_error),
+ 4, false, "recoverable_errors" },
+ { STATS_OFFSET32(unrecoverable_error),
+ 4, false, "unrecoverable_errors" },
+ { STATS_OFFSET32(driver_filtered_tx_pkt),
+ 4, false, "driver_filtered_tx_pkt" },
+ { STATS_OFFSET32(eee_tx_lpi),
+ 4, true, "Tx LPI entry count"},
+ { STATS_OFFSET32(ptp_skip_tx_ts),
+ 4, false, "ptp_skipped_tx_tstamp" },
+};
+
+#define BNX2X_NUM_STATS ARRAY_SIZE(bnx2x_stats_arr)
+
+static int bnx2x_get_port_type(struct bnx2x *bp)
+{
+ int port_type;
+ u32 phy_idx = bnx2x_get_cur_phy_idx(bp);
+ switch (bp->link_params.phy[phy_idx].media_type) {
+ case ETH_PHY_SFPP_10G_FIBER:
+ case ETH_PHY_SFP_1G_FIBER:
+ case ETH_PHY_XFP_FIBER:
+ case ETH_PHY_KR:
+ case ETH_PHY_CX4:
+ port_type = PORT_FIBRE;
+ break;
+ case ETH_PHY_DA_TWINAX:
+ port_type = PORT_DA;
+ break;
+ case ETH_PHY_BASE_T:
+ port_type = PORT_TP;
+ break;
+ case ETH_PHY_NOT_PRESENT:
+ port_type = PORT_NONE;
+ break;
+ case ETH_PHY_UNSPECIFIED:
+ default:
+ port_type = PORT_OTHER;
+ break;
+ }
+ return port_type;
+}
+
+static int bnx2x_get_vf_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+ u32 supported, advertising;
+
+ ethtool_convert_link_mode_to_legacy_u32(&supported,
+ cmd->link_modes.supported);
+ ethtool_convert_link_mode_to_legacy_u32(&advertising,
+ cmd->link_modes.advertising);
+
+ if (bp->state == BNX2X_STATE_OPEN) {
+ if (test_bit(BNX2X_LINK_REPORT_FD,
+ &bp->vf_link_vars.link_report_flags))
+ cmd->base.duplex = DUPLEX_FULL;
+ else
+ cmd->base.duplex = DUPLEX_HALF;
+
+ cmd->base.speed = bp->vf_link_vars.line_speed;
+ } else {
+ cmd->base.duplex = DUPLEX_UNKNOWN;
+ cmd->base.speed = SPEED_UNKNOWN;
+ }
+
+ cmd->base.port = PORT_OTHER;
+ cmd->base.phy_address = 0;
+ cmd->base.autoneg = AUTONEG_DISABLE;
+
+ DP(BNX2X_MSG_ETHTOOL, "ethtool_cmd: cmd %d\n"
+ " supported 0x%x advertising 0x%x speed %u\n"
+ " duplex %d port %d phy_address %d\n"
+ " autoneg %d\n",
+ cmd->base.cmd, supported, advertising,
+ cmd->base.speed,
+ cmd->base.duplex, cmd->base.port, cmd->base.phy_address,
+ cmd->base.autoneg);
+
+ return 0;
+}
+
+static int bnx2x_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+ int cfg_idx = bnx2x_get_link_cfg_idx(bp);
+ u32 media_type;
+ u32 supported, advertising, lp_advertising;
+
+ ethtool_convert_link_mode_to_legacy_u32(&lp_advertising,
+ cmd->link_modes.lp_advertising);
+
+ /* Dual Media boards present all available port types */
+ supported = bp->port.supported[cfg_idx] |
+ (bp->port.supported[cfg_idx ^ 1] &
+ (SUPPORTED_TP | SUPPORTED_FIBRE));
+ advertising = bp->port.advertising[cfg_idx];
+ media_type = bp->link_params.phy[bnx2x_get_cur_phy_idx(bp)].media_type;
+ if (media_type == ETH_PHY_SFP_1G_FIBER) {
+ supported &= ~(SUPPORTED_10000baseT_Full);
+ advertising &= ~(ADVERTISED_10000baseT_Full);
+ }
+
+ if ((bp->state == BNX2X_STATE_OPEN) && bp->link_vars.link_up &&
+ !(bp->flags & MF_FUNC_DIS)) {
+ cmd->base.duplex = bp->link_vars.duplex;
+
+ if (IS_MF(bp) && !BP_NOMCP(bp))
+ cmd->base.speed = bnx2x_get_mf_speed(bp);
+ else
+ cmd->base.speed = bp->link_vars.line_speed;
+ } else {
+ cmd->base.duplex = DUPLEX_UNKNOWN;
+ cmd->base.speed = SPEED_UNKNOWN;
+ }
+
+ cmd->base.port = bnx2x_get_port_type(bp);
+
+ cmd->base.phy_address = bp->mdio.prtad;
+
+ if (bp->link_params.req_line_speed[cfg_idx] == SPEED_AUTO_NEG)
+ cmd->base.autoneg = AUTONEG_ENABLE;
+ else
+ cmd->base.autoneg = AUTONEG_DISABLE;
+
+ /* Publish LP advertised speeds and FC */
+ if (bp->link_vars.link_status & LINK_STATUS_AUTO_NEGOTIATE_COMPLETE) {
+ u32 status = bp->link_vars.link_status;
+
+ lp_advertising |= ADVERTISED_Autoneg;
+ if (status & LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE)
+ lp_advertising |= ADVERTISED_Pause;
+ if (status & LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE)
+ lp_advertising |= ADVERTISED_Asym_Pause;
+
+ if (status & LINK_STATUS_LINK_PARTNER_10THD_CAPABLE)
+ lp_advertising |= ADVERTISED_10baseT_Half;
+ if (status & LINK_STATUS_LINK_PARTNER_10TFD_CAPABLE)
+ lp_advertising |= ADVERTISED_10baseT_Full;
+ if (status & LINK_STATUS_LINK_PARTNER_100TXHD_CAPABLE)
+ lp_advertising |= ADVERTISED_100baseT_Half;
+ if (status & LINK_STATUS_LINK_PARTNER_100TXFD_CAPABLE)
+ lp_advertising |= ADVERTISED_100baseT_Full;
+ if (status & LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE)
+ lp_advertising |= ADVERTISED_1000baseT_Half;
+ if (status & LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE) {
+ if (media_type == ETH_PHY_KR) {
+ lp_advertising |=
+ ADVERTISED_1000baseKX_Full;
+ } else {
+ lp_advertising |=
+ ADVERTISED_1000baseT_Full;
+ }
+ }
+ if (status & LINK_STATUS_LINK_PARTNER_2500XFD_CAPABLE)
+ lp_advertising |= ADVERTISED_2500baseX_Full;
+ if (status & LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE) {
+ if (media_type == ETH_PHY_KR) {
+ lp_advertising |=
+ ADVERTISED_10000baseKR_Full;
+ } else {
+ lp_advertising |=
+ ADVERTISED_10000baseT_Full;
+ }
+ }
+ if (status & LINK_STATUS_LINK_PARTNER_20GXFD_CAPABLE)
+ lp_advertising |= ADVERTISED_20000baseKR2_Full;
+ }
+
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
+ supported);
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
+ advertising);
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.lp_advertising,
+ lp_advertising);
+
+ DP(BNX2X_MSG_ETHTOOL, "ethtool_cmd: cmd %d\n"
+ " supported 0x%x advertising 0x%x speed %u\n"
+ " duplex %d port %d phy_address %d\n"
+ " autoneg %d\n",
+ cmd->base.cmd, supported, advertising,
+ cmd->base.speed,
+ cmd->base.duplex, cmd->base.port, cmd->base.phy_address,
+ cmd->base.autoneg);
+
+ return 0;
+}
+
+static int bnx2x_set_link_ksettings(struct net_device *dev,
+ const struct ethtool_link_ksettings *cmd)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+ u32 advertising, cfg_idx, old_multi_phy_config, new_multi_phy_config;
+ u32 speed, phy_idx;
+ u32 supported;
+ u8 duplex = cmd->base.duplex;
+
+ ethtool_convert_link_mode_to_legacy_u32(&supported,
+ cmd->link_modes.supported);
+ ethtool_convert_link_mode_to_legacy_u32(&advertising,
+ cmd->link_modes.advertising);
+
+ if (IS_MF_SD(bp))
+ return 0;
+
+ DP(BNX2X_MSG_ETHTOOL, "ethtool_cmd: cmd %d\n"
+ " supported 0x%x advertising 0x%x speed %u\n"
+ " duplex %d port %d phy_address %d\n"
+ " autoneg %d\n",
+ cmd->base.cmd, supported, advertising,
+ cmd->base.speed,
+ cmd->base.duplex, cmd->base.port, cmd->base.phy_address,
+ cmd->base.autoneg);
+
+ speed = cmd->base.speed;
+
+ /* If received a request for an unknown duplex, assume full*/
+ if (duplex == DUPLEX_UNKNOWN)
+ duplex = DUPLEX_FULL;
+
+ if (IS_MF_SI(bp)) {
+ u32 part;
+ u32 line_speed = bp->link_vars.line_speed;
+
+ /* use 10G if no link detected */
+ if (!line_speed)
+ line_speed = 10000;
+
+ if (bp->common.bc_ver < REQ_BC_VER_4_SET_MF_BW) {
+ DP(BNX2X_MSG_ETHTOOL,
+ "To set speed BC %X or higher is required, please upgrade BC\n",
+ REQ_BC_VER_4_SET_MF_BW);
+ return -EINVAL;
+ }
+
+ part = (speed * 100) / line_speed;
+
+ if (line_speed < speed || !part) {
+ DP(BNX2X_MSG_ETHTOOL,
+ "Speed setting should be in a range from 1%% to 100%% of actual line speed\n");
+ return -EINVAL;
+ }
+
+ if (bp->state != BNX2X_STATE_OPEN)
+ /* store value for following "load" */
+ bp->pending_max = part;
+ else
+ bnx2x_update_max_mf_config(bp, part);
+
+ return 0;
+ }
+
+ cfg_idx = bnx2x_get_link_cfg_idx(bp);
+ old_multi_phy_config = bp->link_params.multi_phy_config;
+ if (cmd->base.port != bnx2x_get_port_type(bp)) {
+ switch (cmd->base.port) {
+ case PORT_TP:
+ if (!(bp->port.supported[0] & SUPPORTED_TP ||
+ bp->port.supported[1] & SUPPORTED_TP)) {
+ DP(BNX2X_MSG_ETHTOOL,
+ "Unsupported port type\n");
+ return -EINVAL;
+ }
+ bp->link_params.multi_phy_config &=
+ ~PORT_HW_CFG_PHY_SELECTION_MASK;
+ if (bp->link_params.multi_phy_config &
+ PORT_HW_CFG_PHY_SWAPPED_ENABLED)
+ bp->link_params.multi_phy_config |=
+ PORT_HW_CFG_PHY_SELECTION_SECOND_PHY;
+ else
+ bp->link_params.multi_phy_config |=
+ PORT_HW_CFG_PHY_SELECTION_FIRST_PHY;
+ break;
+ case PORT_FIBRE:
+ case PORT_DA:
+ case PORT_NONE:
+ if (!(bp->port.supported[0] & SUPPORTED_FIBRE ||
+ bp->port.supported[1] & SUPPORTED_FIBRE)) {
+ DP(BNX2X_MSG_ETHTOOL,
+ "Unsupported port type\n");
+ return -EINVAL;
+ }
+ bp->link_params.multi_phy_config &=
+ ~PORT_HW_CFG_PHY_SELECTION_MASK;
+ if (bp->link_params.multi_phy_config &
+ PORT_HW_CFG_PHY_SWAPPED_ENABLED)
+ bp->link_params.multi_phy_config |=
+ PORT_HW_CFG_PHY_SELECTION_FIRST_PHY;
+ else
+ bp->link_params.multi_phy_config |=
+ PORT_HW_CFG_PHY_SELECTION_SECOND_PHY;
+ break;
+ default:
+ DP(BNX2X_MSG_ETHTOOL, "Unsupported port type\n");
+ return -EINVAL;
+ }
+ }
+ /* Save new config in case command complete successfully */
+ new_multi_phy_config = bp->link_params.multi_phy_config;
+ /* Get the new cfg_idx */
+ cfg_idx = bnx2x_get_link_cfg_idx(bp);
+ /* Restore old config in case command failed */
+ bp->link_params.multi_phy_config = old_multi_phy_config;
+ DP(BNX2X_MSG_ETHTOOL, "cfg_idx = %x\n", cfg_idx);
+
+ if (cmd->base.autoneg == AUTONEG_ENABLE) {
+ u32 an_supported_speed = bp->port.supported[cfg_idx];
+ if (bp->link_params.phy[EXT_PHY1].type ==
+ PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833)
+ an_supported_speed |= (SUPPORTED_100baseT_Half |
+ SUPPORTED_100baseT_Full);
+ if (!(bp->port.supported[cfg_idx] & SUPPORTED_Autoneg)) {
+ DP(BNX2X_MSG_ETHTOOL, "Autoneg not supported\n");
+ return -EINVAL;
+ }
+
+ /* advertise the requested speed and duplex if supported */
+ if (advertising & ~an_supported_speed) {
+ DP(BNX2X_MSG_ETHTOOL,
+ "Advertisement parameters are not supported\n");
+ return -EINVAL;
+ }
+
+ bp->link_params.req_line_speed[cfg_idx] = SPEED_AUTO_NEG;
+ bp->link_params.req_duplex[cfg_idx] = duplex;
+ bp->port.advertising[cfg_idx] = (ADVERTISED_Autoneg |
+ advertising);
+ if (advertising) {
+
+ bp->link_params.speed_cap_mask[cfg_idx] = 0;
+ if (advertising & ADVERTISED_10baseT_Half) {
+ bp->link_params.speed_cap_mask[cfg_idx] |=
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF;
+ }
+ if (advertising & ADVERTISED_10baseT_Full)
+ bp->link_params.speed_cap_mask[cfg_idx] |=
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL;
+
+ if (advertising & ADVERTISED_100baseT_Full)
+ bp->link_params.speed_cap_mask[cfg_idx] |=
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL;
+
+ if (advertising & ADVERTISED_100baseT_Half) {
+ bp->link_params.speed_cap_mask[cfg_idx] |=
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF;
+ }
+ if (advertising & ADVERTISED_1000baseT_Half) {
+ bp->link_params.speed_cap_mask[cfg_idx] |=
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_1G;
+ }
+ if (advertising & (ADVERTISED_1000baseT_Full |
+ ADVERTISED_1000baseKX_Full))
+ bp->link_params.speed_cap_mask[cfg_idx] |=
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_1G;
+
+ if (advertising & (ADVERTISED_10000baseT_Full |
+ ADVERTISED_10000baseKX4_Full |
+ ADVERTISED_10000baseKR_Full))
+ bp->link_params.speed_cap_mask[cfg_idx] |=
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_10G;
+
+ if (advertising & ADVERTISED_20000baseKR2_Full)
+ bp->link_params.speed_cap_mask[cfg_idx] |=
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_20G;
+ }
+ } else { /* forced speed */
+ /* advertise the requested speed and duplex if supported */
+ switch (speed) {
+ case SPEED_10:
+ if (duplex == DUPLEX_FULL) {
+ if (!(bp->port.supported[cfg_idx] &
+ SUPPORTED_10baseT_Full)) {
+ DP(BNX2X_MSG_ETHTOOL,
+ "10M full not supported\n");
+ return -EINVAL;
+ }
+
+ advertising = (ADVERTISED_10baseT_Full |
+ ADVERTISED_TP);
+ } else {
+ if (!(bp->port.supported[cfg_idx] &
+ SUPPORTED_10baseT_Half)) {
+ DP(BNX2X_MSG_ETHTOOL,
+ "10M half not supported\n");
+ return -EINVAL;
+ }
+
+ advertising = (ADVERTISED_10baseT_Half |
+ ADVERTISED_TP);
+ }
+ break;
+
+ case SPEED_100:
+ if (duplex == DUPLEX_FULL) {
+ if (!(bp->port.supported[cfg_idx] &
+ SUPPORTED_100baseT_Full)) {
+ DP(BNX2X_MSG_ETHTOOL,
+ "100M full not supported\n");
+ return -EINVAL;
+ }
+
+ advertising = (ADVERTISED_100baseT_Full |
+ ADVERTISED_TP);
+ } else {
+ if (!(bp->port.supported[cfg_idx] &
+ SUPPORTED_100baseT_Half)) {
+ DP(BNX2X_MSG_ETHTOOL,
+ "100M half not supported\n");
+ return -EINVAL;
+ }
+
+ advertising = (ADVERTISED_100baseT_Half |
+ ADVERTISED_TP);
+ }
+ break;
+
+ case SPEED_1000:
+ if (duplex != DUPLEX_FULL) {
+ DP(BNX2X_MSG_ETHTOOL,
+ "1G half not supported\n");
+ return -EINVAL;
+ }
+
+ if (bp->port.supported[cfg_idx] &
+ SUPPORTED_1000baseT_Full) {
+ advertising = (ADVERTISED_1000baseT_Full |
+ ADVERTISED_TP);
+
+ } else if (bp->port.supported[cfg_idx] &
+ SUPPORTED_1000baseKX_Full) {
+ advertising = ADVERTISED_1000baseKX_Full;
+ } else {
+ DP(BNX2X_MSG_ETHTOOL,
+ "1G full not supported\n");
+ return -EINVAL;
+ }
+
+ break;
+
+ case SPEED_2500:
+ if (duplex != DUPLEX_FULL) {
+ DP(BNX2X_MSG_ETHTOOL,
+ "2.5G half not supported\n");
+ return -EINVAL;
+ }
+
+ if (!(bp->port.supported[cfg_idx]
+ & SUPPORTED_2500baseX_Full)) {
+ DP(BNX2X_MSG_ETHTOOL,
+ "2.5G full not supported\n");
+ return -EINVAL;
+ }
+
+ advertising = (ADVERTISED_2500baseX_Full |
+ ADVERTISED_TP);
+ break;
+
+ case SPEED_10000:
+ if (duplex != DUPLEX_FULL) {
+ DP(BNX2X_MSG_ETHTOOL,
+ "10G half not supported\n");
+ return -EINVAL;
+ }
+ phy_idx = bnx2x_get_cur_phy_idx(bp);
+ if ((bp->port.supported[cfg_idx] &
+ SUPPORTED_10000baseT_Full) &&
+ (bp->link_params.phy[phy_idx].media_type !=
+ ETH_PHY_SFP_1G_FIBER)) {
+ advertising = (ADVERTISED_10000baseT_Full |
+ ADVERTISED_FIBRE);
+ } else if (bp->port.supported[cfg_idx] &
+ SUPPORTED_10000baseKR_Full) {
+ advertising = (ADVERTISED_10000baseKR_Full |
+ ADVERTISED_FIBRE);
+ } else {
+ DP(BNX2X_MSG_ETHTOOL,
+ "10G full not supported\n");
+ return -EINVAL;
+ }
+
+ break;
+
+ default:
+ DP(BNX2X_MSG_ETHTOOL, "Unsupported speed %u\n", speed);
+ return -EINVAL;
+ }
+
+ bp->link_params.req_line_speed[cfg_idx] = speed;
+ bp->link_params.req_duplex[cfg_idx] = duplex;
+ bp->port.advertising[cfg_idx] = advertising;
+ }
+
+ DP(BNX2X_MSG_ETHTOOL, "req_line_speed %d\n"
+ " req_duplex %d advertising 0x%x\n",
+ bp->link_params.req_line_speed[cfg_idx],
+ bp->link_params.req_duplex[cfg_idx],
+ bp->port.advertising[cfg_idx]);
+
+ /* Set new config */
+ bp->link_params.multi_phy_config = new_multi_phy_config;
+ if (netif_running(dev)) {
+ bnx2x_stats_handle(bp, STATS_EVENT_STOP);
+ bnx2x_force_link_reset(bp);
+ bnx2x_link_set(bp);
+ }
+
+ return 0;
+}
+
+#define DUMP_ALL_PRESETS 0x1FFF
+#define DUMP_MAX_PRESETS 13
+
+static int __bnx2x_get_preset_regs_len(struct bnx2x *bp, u32 preset)
+{
+ if (CHIP_IS_E1(bp))
+ return dump_num_registers[0][preset-1];
+ else if (CHIP_IS_E1H(bp))
+ return dump_num_registers[1][preset-1];
+ else if (CHIP_IS_E2(bp))
+ return dump_num_registers[2][preset-1];
+ else if (CHIP_IS_E3A0(bp))
+ return dump_num_registers[3][preset-1];
+ else if (CHIP_IS_E3B0(bp))
+ return dump_num_registers[4][preset-1];
+ else
+ return 0;
+}
+
+static int __bnx2x_get_regs_len(struct bnx2x *bp)
+{
+ u32 preset_idx;
+ int regdump_len = 0;
+
+ /* Calculate the total preset regs length */
+ for (preset_idx = 1; preset_idx <= DUMP_MAX_PRESETS; preset_idx++)
+ regdump_len += __bnx2x_get_preset_regs_len(bp, preset_idx);
+
+ return regdump_len;
+}
+
+static int bnx2x_get_regs_len(struct net_device *dev)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+ int regdump_len = 0;
+
+ if (IS_VF(bp))
+ return 0;
+
+ regdump_len = __bnx2x_get_regs_len(bp);
+ regdump_len *= 4;
+ regdump_len += sizeof(struct dump_header);
+
+ return regdump_len;
+}
+
+#define IS_E1_REG(chips) ((chips & DUMP_CHIP_E1) == DUMP_CHIP_E1)
+#define IS_E1H_REG(chips) ((chips & DUMP_CHIP_E1H) == DUMP_CHIP_E1H)
+#define IS_E2_REG(chips) ((chips & DUMP_CHIP_E2) == DUMP_CHIP_E2)
+#define IS_E3A0_REG(chips) ((chips & DUMP_CHIP_E3A0) == DUMP_CHIP_E3A0)
+#define IS_E3B0_REG(chips) ((chips & DUMP_CHIP_E3B0) == DUMP_CHIP_E3B0)
+
+#define IS_REG_IN_PRESET(presets, idx) \
+ ((presets & (1 << (idx-1))) == (1 << (idx-1)))
+
+/******* Paged registers info selectors ********/
+static const u32 *__bnx2x_get_page_addr_ar(struct bnx2x *bp)
+{
+ if (CHIP_IS_E2(bp))
+ return page_vals_e2;
+ else if (CHIP_IS_E3(bp))
+ return page_vals_e3;
+ else
+ return NULL;
+}
+
+static u32 __bnx2x_get_page_reg_num(struct bnx2x *bp)
+{
+ if (CHIP_IS_E2(bp))
+ return PAGE_MODE_VALUES_E2;
+ else if (CHIP_IS_E3(bp))
+ return PAGE_MODE_VALUES_E3;
+ else
+ return 0;
+}
+
+static const u32 *__bnx2x_get_page_write_ar(struct bnx2x *bp)
+{
+ if (CHIP_IS_E2(bp))
+ return page_write_regs_e2;
+ else if (CHIP_IS_E3(bp))
+ return page_write_regs_e3;
+ else
+ return NULL;
+}
+
+static u32 __bnx2x_get_page_write_num(struct bnx2x *bp)
+{
+ if (CHIP_IS_E2(bp))
+ return PAGE_WRITE_REGS_E2;
+ else if (CHIP_IS_E3(bp))
+ return PAGE_WRITE_REGS_E3;
+ else
+ return 0;
+}
+
+static const struct reg_addr *__bnx2x_get_page_read_ar(struct bnx2x *bp)
+{
+ if (CHIP_IS_E2(bp))
+ return page_read_regs_e2;
+ else if (CHIP_IS_E3(bp))
+ return page_read_regs_e3;
+ else
+ return NULL;
+}
+
+static u32 __bnx2x_get_page_read_num(struct bnx2x *bp)
+{
+ if (CHIP_IS_E2(bp))
+ return PAGE_READ_REGS_E2;
+ else if (CHIP_IS_E3(bp))
+ return PAGE_READ_REGS_E3;
+ else
+ return 0;
+}
+
+static bool bnx2x_is_reg_in_chip(struct bnx2x *bp,
+ const struct reg_addr *reg_info)
+{
+ if (CHIP_IS_E1(bp))
+ return IS_E1_REG(reg_info->chips);
+ else if (CHIP_IS_E1H(bp))
+ return IS_E1H_REG(reg_info->chips);
+ else if (CHIP_IS_E2(bp))
+ return IS_E2_REG(reg_info->chips);
+ else if (CHIP_IS_E3A0(bp))
+ return IS_E3A0_REG(reg_info->chips);
+ else if (CHIP_IS_E3B0(bp))
+ return IS_E3B0_REG(reg_info->chips);
+ else
+ return false;
+}
+
+static bool bnx2x_is_wreg_in_chip(struct bnx2x *bp,
+ const struct wreg_addr *wreg_info)
+{
+ if (CHIP_IS_E1(bp))
+ return IS_E1_REG(wreg_info->chips);
+ else if (CHIP_IS_E1H(bp))
+ return IS_E1H_REG(wreg_info->chips);
+ else if (CHIP_IS_E2(bp))
+ return IS_E2_REG(wreg_info->chips);
+ else if (CHIP_IS_E3A0(bp))
+ return IS_E3A0_REG(wreg_info->chips);
+ else if (CHIP_IS_E3B0(bp))
+ return IS_E3B0_REG(wreg_info->chips);
+ else
+ return false;
+}
+
+/**
+ * bnx2x_read_pages_regs - read "paged" registers
+ *
+ * @bp: device handle
+ * @p: output buffer
+ * @preset: the preset value
+ *
+ * Reads "paged" memories: memories that may only be read by first writing to a
+ * specific address ("write address") and then reading from a specific address
+ * ("read address"). There may be more than one write address per "page" and
+ * more than one read address per write address.
+ */
+static void bnx2x_read_pages_regs(struct bnx2x *bp, u32 *p, u32 preset)
+{
+ u32 i, j, k, n;
+
+ /* addresses of the paged registers */
+ const u32 *page_addr = __bnx2x_get_page_addr_ar(bp);
+ /* number of paged registers */
+ int num_pages = __bnx2x_get_page_reg_num(bp);
+ /* write addresses */
+ const u32 *write_addr = __bnx2x_get_page_write_ar(bp);
+ /* number of write addresses */
+ int write_num = __bnx2x_get_page_write_num(bp);
+ /* read addresses info */
+ const struct reg_addr *read_addr = __bnx2x_get_page_read_ar(bp);
+ /* number of read addresses */
+ int read_num = __bnx2x_get_page_read_num(bp);
+ u32 addr, size;
+
+ for (i = 0; i < num_pages; i++) {
+ for (j = 0; j < write_num; j++) {
+ REG_WR(bp, write_addr[j], page_addr[i]);
+
+ for (k = 0; k < read_num; k++) {
+ if (IS_REG_IN_PRESET(read_addr[k].presets,
+ preset)) {
+ size = read_addr[k].size;
+ for (n = 0; n < size; n++) {
+ addr = read_addr[k].addr + n*4;
+ *p++ = REG_RD(bp, addr);
+ }
+ }
+ }
+ }
+ }
+}
+
+static int __bnx2x_get_preset_regs(struct bnx2x *bp, u32 *p, u32 preset)
+{
+ u32 i, j, addr;
+ const struct wreg_addr *wreg_addr_p = NULL;
+
+ if (CHIP_IS_E1(bp))
+ wreg_addr_p = &wreg_addr_e1;
+ else if (CHIP_IS_E1H(bp))
+ wreg_addr_p = &wreg_addr_e1h;
+ else if (CHIP_IS_E2(bp))
+ wreg_addr_p = &wreg_addr_e2;
+ else if (CHIP_IS_E3A0(bp))
+ wreg_addr_p = &wreg_addr_e3;
+ else if (CHIP_IS_E3B0(bp))
+ wreg_addr_p = &wreg_addr_e3b0;
+
+ /* Read the idle_chk registers */
+ for (i = 0; i < IDLE_REGS_COUNT; i++) {
+ if (bnx2x_is_reg_in_chip(bp, &idle_reg_addrs[i]) &&
+ IS_REG_IN_PRESET(idle_reg_addrs[i].presets, preset)) {
+ for (j = 0; j < idle_reg_addrs[i].size; j++)
+ *p++ = REG_RD(bp, idle_reg_addrs[i].addr + j*4);
+ }
+ }
+
+ /* Read the regular registers */
+ for (i = 0; i < REGS_COUNT; i++) {
+ if (bnx2x_is_reg_in_chip(bp, &reg_addrs[i]) &&
+ IS_REG_IN_PRESET(reg_addrs[i].presets, preset)) {
+ for (j = 0; j < reg_addrs[i].size; j++)
+ *p++ = REG_RD(bp, reg_addrs[i].addr + j*4);
+ }
+ }
+
+ /* Read the CAM registers */
+ if (bnx2x_is_wreg_in_chip(bp, wreg_addr_p) &&
+ IS_REG_IN_PRESET(wreg_addr_p->presets, preset)) {
+ for (i = 0; i < wreg_addr_p->size; i++) {
+ *p++ = REG_RD(bp, wreg_addr_p->addr + i*4);
+
+ /* In case of wreg_addr register, read additional
+ registers from read_regs array
+ */
+ for (j = 0; j < wreg_addr_p->read_regs_count; j++) {
+ addr = *(wreg_addr_p->read_regs);
+ *p++ = REG_RD(bp, addr + j*4);
+ }
+ }
+ }
+
+ /* Paged registers are supported in E2 & E3 only */
+ if (CHIP_IS_E2(bp) || CHIP_IS_E3(bp)) {
+ /* Read "paged" registers */
+ bnx2x_read_pages_regs(bp, p, preset);
+ }
+
+ return 0;
+}
+
+static void __bnx2x_get_regs(struct bnx2x *bp, u32 *p)
+{
+ u32 preset_idx;
+
+ /* Read all registers, by reading all preset registers */
+ for (preset_idx = 1; preset_idx <= DUMP_MAX_PRESETS; preset_idx++) {
+ /* Skip presets with IOR */
+ if ((preset_idx == 2) ||
+ (preset_idx == 5) ||
+ (preset_idx == 8) ||
+ (preset_idx == 11))
+ continue;
+ __bnx2x_get_preset_regs(bp, p, preset_idx);
+ p += __bnx2x_get_preset_regs_len(bp, preset_idx);
+ }
+}
+
+static void bnx2x_get_regs(struct net_device *dev,
+ struct ethtool_regs *regs, void *_p)
+{
+ u32 *p = _p;
+ struct bnx2x *bp = netdev_priv(dev);
+ struct dump_header dump_hdr = {0};
+
+ regs->version = 2;
+ memset(p, 0, regs->len);
+
+ if (!netif_running(bp->dev))
+ return;
+
+ /* Disable parity attentions as long as following dump may
+ * cause false alarms by reading never written registers. We
+ * will re-enable parity attentions right after the dump.
+ */
+
+ bnx2x_disable_blocks_parity(bp);
+
+ dump_hdr.header_size = (sizeof(struct dump_header) / 4) - 1;
+ dump_hdr.preset = DUMP_ALL_PRESETS;
+ dump_hdr.version = BNX2X_DUMP_VERSION;
+
+ /* dump_meta_data presents OR of CHIP and PATH. */
+ if (CHIP_IS_E1(bp)) {
+ dump_hdr.dump_meta_data = DUMP_CHIP_E1;
+ } else if (CHIP_IS_E1H(bp)) {
+ dump_hdr.dump_meta_data = DUMP_CHIP_E1H;
+ } else if (CHIP_IS_E2(bp)) {
+ dump_hdr.dump_meta_data = DUMP_CHIP_E2 |
+ (BP_PATH(bp) ? DUMP_PATH_1 : DUMP_PATH_0);
+ } else if (CHIP_IS_E3A0(bp)) {
+ dump_hdr.dump_meta_data = DUMP_CHIP_E3A0 |
+ (BP_PATH(bp) ? DUMP_PATH_1 : DUMP_PATH_0);
+ } else if (CHIP_IS_E3B0(bp)) {
+ dump_hdr.dump_meta_data = DUMP_CHIP_E3B0 |
+ (BP_PATH(bp) ? DUMP_PATH_1 : DUMP_PATH_0);
+ }
+
+ memcpy(p, &dump_hdr, sizeof(struct dump_header));
+ p += dump_hdr.header_size + 1;
+
+ /* This isn't really an error, but since attention handling is going
+ * to print the GRC timeouts using this macro, we use the same.
+ */
+ BNX2X_ERR("Generating register dump. Might trigger harmless GRC timeouts\n");
+
+ /* Actually read the registers */
+ __bnx2x_get_regs(bp, p);
+
+ /* Re-enable parity attentions */
+ bnx2x_clear_blocks_parity(bp);
+ bnx2x_enable_blocks_parity(bp);
+}
+
+static int bnx2x_get_preset_regs_len(struct net_device *dev, u32 preset)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+ int regdump_len = 0;
+
+ regdump_len = __bnx2x_get_preset_regs_len(bp, preset);
+ regdump_len *= 4;
+ regdump_len += sizeof(struct dump_header);
+
+ return regdump_len;
+}
+
+static int bnx2x_set_dump(struct net_device *dev, struct ethtool_dump *val)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+
+ /* Use the ethtool_dump "flag" field as the dump preset index */
+ if (val->flag < 1 || val->flag > DUMP_MAX_PRESETS)
+ return -EINVAL;
+
+ bp->dump_preset_idx = val->flag;
+ return 0;
+}
+
+static int bnx2x_get_dump_flag(struct net_device *dev,
+ struct ethtool_dump *dump)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+
+ dump->version = BNX2X_DUMP_VERSION;
+ dump->flag = bp->dump_preset_idx;
+ /* Calculate the requested preset idx length */
+ dump->len = bnx2x_get_preset_regs_len(dev, bp->dump_preset_idx);
+ DP(BNX2X_MSG_ETHTOOL, "Get dump preset %d length=%d\n",
+ bp->dump_preset_idx, dump->len);
+ return 0;
+}
+
+static int bnx2x_get_dump_data(struct net_device *dev,
+ struct ethtool_dump *dump,
+ void *buffer)
+{
+ u32 *p = buffer;
+ struct bnx2x *bp = netdev_priv(dev);
+ struct dump_header dump_hdr = {0};
+
+ /* Disable parity attentions as long as following dump may
+ * cause false alarms by reading never written registers. We
+ * will re-enable parity attentions right after the dump.
+ */
+
+ bnx2x_disable_blocks_parity(bp);
+
+ dump_hdr.header_size = (sizeof(struct dump_header) / 4) - 1;
+ dump_hdr.preset = bp->dump_preset_idx;
+ dump_hdr.version = BNX2X_DUMP_VERSION;
+
+ DP(BNX2X_MSG_ETHTOOL, "Get dump data of preset %d\n", dump_hdr.preset);
+
+ /* dump_meta_data presents OR of CHIP and PATH. */
+ if (CHIP_IS_E1(bp)) {
+ dump_hdr.dump_meta_data = DUMP_CHIP_E1;
+ } else if (CHIP_IS_E1H(bp)) {
+ dump_hdr.dump_meta_data = DUMP_CHIP_E1H;
+ } else if (CHIP_IS_E2(bp)) {
+ dump_hdr.dump_meta_data = DUMP_CHIP_E2 |
+ (BP_PATH(bp) ? DUMP_PATH_1 : DUMP_PATH_0);
+ } else if (CHIP_IS_E3A0(bp)) {
+ dump_hdr.dump_meta_data = DUMP_CHIP_E3A0 |
+ (BP_PATH(bp) ? DUMP_PATH_1 : DUMP_PATH_0);
+ } else if (CHIP_IS_E3B0(bp)) {
+ dump_hdr.dump_meta_data = DUMP_CHIP_E3B0 |
+ (BP_PATH(bp) ? DUMP_PATH_1 : DUMP_PATH_0);
+ }
+
+ memcpy(p, &dump_hdr, sizeof(struct dump_header));
+ p += dump_hdr.header_size + 1;
+
+ /* Actually read the registers */
+ __bnx2x_get_preset_regs(bp, p, dump_hdr.preset);
+
+ /* Re-enable parity attentions */
+ bnx2x_clear_blocks_parity(bp);
+ bnx2x_enable_blocks_parity(bp);
+
+ return 0;
+}
+
+static void bnx2x_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+ char version[ETHTOOL_FWVERS_LEN];
+ int ext_dev_info_offset;
+ u32 mbi;
+
+ strscpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
+
+ if (SHMEM2_HAS(bp, extended_dev_info_shared_addr)) {
+ ext_dev_info_offset = SHMEM2_RD(bp,
+ extended_dev_info_shared_addr);
+ mbi = REG_RD(bp, ext_dev_info_offset +
+ offsetof(struct extended_dev_info_shared_cfg,
+ mbi_version));
+ if (mbi) {
+ memset(version, 0, sizeof(version));
+ snprintf(version, ETHTOOL_FWVERS_LEN, "mbi %d.%d.%d ",
+ (mbi & 0xff000000) >> 24,
+ (mbi & 0x00ff0000) >> 16,
+ (mbi & 0x0000ff00) >> 8);
+ strscpy(info->fw_version, version,
+ sizeof(info->fw_version));
+ }
+ }
+
+ memset(version, 0, sizeof(version));
+ bnx2x_fill_fw_str(bp, version, ETHTOOL_FWVERS_LEN);
+ strlcat(info->fw_version, version, sizeof(info->fw_version));
+
+ strscpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info));
+}
+
+static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+
+ if (bp->flags & NO_WOL_FLAG) {
+ wol->supported = 0;
+ wol->wolopts = 0;
+ } else {
+ wol->supported = WAKE_MAGIC;
+ if (bp->wol)
+ wol->wolopts = WAKE_MAGIC;
+ else
+ wol->wolopts = 0;
+ }
+ memset(&wol->sopass, 0, sizeof(wol->sopass));
+}
+
+static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+
+ if (wol->wolopts & ~WAKE_MAGIC) {
+ DP(BNX2X_MSG_ETHTOOL, "WOL not supported\n");
+ return -EINVAL;
+ }
+
+ if (wol->wolopts & WAKE_MAGIC) {
+ if (bp->flags & NO_WOL_FLAG) {
+ DP(BNX2X_MSG_ETHTOOL, "WOL not supported\n");
+ return -EINVAL;
+ }
+ bp->wol = 1;
+ } else
+ bp->wol = 0;
+
+ if (SHMEM2_HAS(bp, curr_cfg))
+ SHMEM2_WR(bp, curr_cfg, CURR_CFG_MET_OS);
+
+ return 0;
+}
+
+static u32 bnx2x_get_msglevel(struct net_device *dev)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+
+ return bp->msg_enable;
+}
+
+static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+
+ if (capable(CAP_NET_ADMIN)) {
+ /* dump MCP trace */
+ if (IS_PF(bp) && (level & BNX2X_MSG_MCP))
+ bnx2x_fw_dump_lvl(bp, KERN_INFO);
+ bp->msg_enable = level;
+ }
+}
+
+static int bnx2x_nway_reset(struct net_device *dev)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+
+ if (!bp->port.pmf)
+ return 0;
+
+ if (netif_running(dev)) {
+ bnx2x_stats_handle(bp, STATS_EVENT_STOP);
+ bnx2x_force_link_reset(bp);
+ bnx2x_link_set(bp);
+ }
+
+ return 0;
+}
+
+static u32 bnx2x_get_link(struct net_device *dev)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+
+ if (bp->flags & MF_FUNC_DIS || (bp->state != BNX2X_STATE_OPEN))
+ return 0;
+
+ if (IS_VF(bp))
+ return !test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
+ &bp->vf_link_vars.link_report_flags);
+
+ return bp->link_vars.link_up;
+}
+
+static int bnx2x_get_eeprom_len(struct net_device *dev)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+
+ return bp->common.flash_size;
+}
+
+/* Per pf misc lock must be acquired before the per port mcp lock. Otherwise,
+ * had we done things the other way around, if two pfs from the same port would
+ * attempt to access nvram at the same time, we could run into a scenario such
+ * as:
+ * pf A takes the port lock.
+ * pf B succeeds in taking the same lock since they are from the same port.
+ * pf A takes the per pf misc lock. Performs eeprom access.
+ * pf A finishes. Unlocks the per pf misc lock.
+ * Pf B takes the lock and proceeds to perform it's own access.
+ * pf A unlocks the per port lock, while pf B is still working (!).
+ * mcp takes the per port lock and corrupts pf B's access (and/or has it's own
+ * access corrupted by pf B)
+ */
+static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
+{
+ int port = BP_PORT(bp);
+ int count, i;
+ u32 val;
+
+ /* acquire HW lock: protect against other PFs in PF Direct Assignment */
+ bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_NVRAM);
+
+ /* adjust timeout for emulation/FPGA */
+ count = BNX2X_NVRAM_TIMEOUT_COUNT;
+ if (CHIP_REV_IS_SLOW(bp))
+ count *= 100;
+
+ /* request access to nvram interface */
+ REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
+ (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
+
+ for (i = 0; i < count*10; i++) {
+ val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
+ if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
+ break;
+
+ udelay(5);
+ }
+
+ if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
+ DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
+ "cannot get access to nvram interface\n");
+ bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_NVRAM);
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static int bnx2x_release_nvram_lock(struct bnx2x *bp)
+{
+ int port = BP_PORT(bp);
+ int count, i;
+ u32 val;
+
+ /* adjust timeout for emulation/FPGA */
+ count = BNX2X_NVRAM_TIMEOUT_COUNT;
+ if (CHIP_REV_IS_SLOW(bp))
+ count *= 100;
+
+ /* relinquish nvram interface */
+ REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
+ (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
+
+ for (i = 0; i < count*10; i++) {
+ val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
+ if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
+ break;
+
+ udelay(5);
+ }
+
+ if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
+ DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
+ "cannot free access to nvram interface\n");
+ return -EBUSY;
+ }
+
+ /* release HW lock: protect against other PFs in PF Direct Assignment */
+ bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_NVRAM);
+ return 0;
+}
+
+static void bnx2x_enable_nvram_access(struct bnx2x *bp)
+{
+ u32 val;
+
+ val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
+
+ /* enable both bits, even on read */
+ REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
+ (val | MCPR_NVM_ACCESS_ENABLE_EN |
+ MCPR_NVM_ACCESS_ENABLE_WR_EN));
+}
+
+static void bnx2x_disable_nvram_access(struct bnx2x *bp)
+{
+ u32 val;
+
+ val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
+
+ /* disable both bits, even after read */
+ REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
+ (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
+ MCPR_NVM_ACCESS_ENABLE_WR_EN)));
+}
+
+static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, __be32 *ret_val,
+ u32 cmd_flags)
+{
+ int count, i, rc;
+ u32 val;
+
+ /* build the command word */
+ cmd_flags |= MCPR_NVM_COMMAND_DOIT;
+
+ /* need to clear DONE bit separately */
+ REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
+
+ /* address of the NVRAM to read from */
+ REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
+ (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
+
+ /* issue a read command */
+ REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
+
+ /* adjust timeout for emulation/FPGA */
+ count = BNX2X_NVRAM_TIMEOUT_COUNT;
+ if (CHIP_REV_IS_SLOW(bp))
+ count *= 100;
+
+ /* wait for completion */
+ *ret_val = 0;
+ rc = -EBUSY;
+ for (i = 0; i < count; i++) {
+ udelay(5);
+ val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
+
+ if (val & MCPR_NVM_COMMAND_DONE) {
+ val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
+ /* we read nvram data in cpu order
+ * but ethtool sees it as an array of bytes
+ * converting to big-endian will do the work
+ */
+ *ret_val = cpu_to_be32(val);
+ rc = 0;
+ break;
+ }
+ }
+ if (rc == -EBUSY)
+ DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
+ "nvram read timeout expired\n");
+ return rc;
+}
+
+int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
+ int buf_size)
+{
+ int rc;
+ u32 cmd_flags;
+ __be32 val;
+
+ if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
+ DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
+ "Invalid parameter: offset 0x%x buf_size 0x%x\n",
+ offset, buf_size);
+ return -EINVAL;
+ }
+
+ if (offset + buf_size > bp->common.flash_size) {
+ DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
+ "Invalid parameter: offset (0x%x) + buf_size (0x%x) > flash_size (0x%x)\n",
+ offset, buf_size, bp->common.flash_size);
+ return -EINVAL;
+ }
+
+ /* request access to nvram interface */
+ rc = bnx2x_acquire_nvram_lock(bp);
+ if (rc)
+ return rc;
+
+ /* enable access to nvram interface */
+ bnx2x_enable_nvram_access(bp);
+
+ /* read the first word(s) */
+ cmd_flags = MCPR_NVM_COMMAND_FIRST;
+ while ((buf_size > sizeof(u32)) && (rc == 0)) {
+ rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
+ memcpy(ret_buf, &val, 4);
+
+ /* advance to the next dword */
+ offset += sizeof(u32);
+ ret_buf += sizeof(u32);
+ buf_size -= sizeof(u32);
+ cmd_flags = 0;
+ }
+
+ if (rc == 0) {
+ cmd_flags |= MCPR_NVM_COMMAND_LAST;
+ rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
+ memcpy(ret_buf, &val, 4);
+ }
+
+ /* disable access to nvram interface */
+ bnx2x_disable_nvram_access(bp);
+ bnx2x_release_nvram_lock(bp);
+
+ return rc;
+}
+
+static int bnx2x_nvram_read32(struct bnx2x *bp, u32 offset, u32 *buf,
+ int buf_size)
+{
+ int rc;
+
+ rc = bnx2x_nvram_read(bp, offset, (u8 *)buf, buf_size);
+
+ if (!rc) {
+ __be32 *be = (__be32 *)buf;
+
+ while ((buf_size -= 4) >= 0)
+ *buf++ = be32_to_cpu(*be++);
+ }
+
+ return rc;
+}
+
+static bool bnx2x_is_nvm_accessible(struct bnx2x *bp)
+{
+ int rc = 1;
+ u16 pm = 0;
+ struct net_device *dev = pci_get_drvdata(bp->pdev);
+
+ if (bp->pdev->pm_cap)
+ rc = pci_read_config_word(bp->pdev,
+ bp->pdev->pm_cap + PCI_PM_CTRL, &pm);
+
+ if ((rc && !netif_running(dev)) ||
+ (!rc && ((pm & PCI_PM_CTRL_STATE_MASK) != (__force u16)PCI_D0)))
+ return false;
+
+ return true;
+}
+
+static int bnx2x_get_eeprom(struct net_device *dev,
+ struct ethtool_eeprom *eeprom, u8 *eebuf)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+
+ if (!bnx2x_is_nvm_accessible(bp)) {
+ DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
+ "cannot access eeprom when the interface is down\n");
+ return -EAGAIN;
+ }
+
+ DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
+ " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
+ eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
+ eeprom->len, eeprom->len);
+
+ /* parameters already validated in ethtool_get_eeprom */
+
+ return bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
+}
+
+static int bnx2x_get_module_eeprom(struct net_device *dev,
+ struct ethtool_eeprom *ee,
+ u8 *data)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+ int rc = -EINVAL, phy_idx;
+ u8 *user_data = data;
+ unsigned int start_addr = ee->offset, xfer_size = 0;
+
+ if (!bnx2x_is_nvm_accessible(bp)) {
+ DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
+ "cannot access eeprom when the interface is down\n");
+ return -EAGAIN;
+ }
+
+ phy_idx = bnx2x_get_cur_phy_idx(bp);
+
+ /* Read A0 section */
+ if (start_addr < ETH_MODULE_SFF_8079_LEN) {
+ /* Limit transfer size to the A0 section boundary */
+ if (start_addr + ee->len > ETH_MODULE_SFF_8079_LEN)
+ xfer_size = ETH_MODULE_SFF_8079_LEN - start_addr;
+ else
+ xfer_size = ee->len;
+ bnx2x_acquire_phy_lock(bp);
+ rc = bnx2x_read_sfp_module_eeprom(&bp->link_params.phy[phy_idx],
+ &bp->link_params,
+ I2C_DEV_ADDR_A0,
+ start_addr,
+ xfer_size,
+ user_data);
+ bnx2x_release_phy_lock(bp);
+ if (rc) {
+ DP(BNX2X_MSG_ETHTOOL, "Failed reading A0 section\n");
+
+ return -EINVAL;
+ }
+ user_data += xfer_size;
+ start_addr += xfer_size;
+ }
+
+ /* Read A2 section */
+ if ((start_addr >= ETH_MODULE_SFF_8079_LEN) &&
+ (start_addr < ETH_MODULE_SFF_8472_LEN)) {
+ xfer_size = ee->len - xfer_size;
+ /* Limit transfer size to the A2 section boundary */
+ if (start_addr + xfer_size > ETH_MODULE_SFF_8472_LEN)
+ xfer_size = ETH_MODULE_SFF_8472_LEN - start_addr;
+ start_addr -= ETH_MODULE_SFF_8079_LEN;
+ bnx2x_acquire_phy_lock(bp);
+ rc = bnx2x_read_sfp_module_eeprom(&bp->link_params.phy[phy_idx],
+ &bp->link_params,
+ I2C_DEV_ADDR_A2,
+ start_addr,
+ xfer_size,
+ user_data);
+ bnx2x_release_phy_lock(bp);
+ if (rc) {
+ DP(BNX2X_MSG_ETHTOOL, "Failed reading A2 section\n");
+ return -EINVAL;
+ }
+ }
+ return rc;
+}
+
+static int bnx2x_get_module_info(struct net_device *dev,
+ struct ethtool_modinfo *modinfo)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+ int phy_idx, rc;
+ u8 sff8472_comp, diag_type;
+
+ if (!bnx2x_is_nvm_accessible(bp)) {
+ DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
+ "cannot access eeprom when the interface is down\n");
+ return -EAGAIN;
+ }
+ phy_idx = bnx2x_get_cur_phy_idx(bp);
+ bnx2x_acquire_phy_lock(bp);
+ rc = bnx2x_read_sfp_module_eeprom(&bp->link_params.phy[phy_idx],
+ &bp->link_params,
+ I2C_DEV_ADDR_A0,
+ SFP_EEPROM_SFF_8472_COMP_ADDR,
+ SFP_EEPROM_SFF_8472_COMP_SIZE,
+ &sff8472_comp);
+ bnx2x_release_phy_lock(bp);
+ if (rc) {
+ DP(BNX2X_MSG_ETHTOOL, "Failed reading SFF-8472 comp field\n");
+ return -EINVAL;
+ }
+
+ bnx2x_acquire_phy_lock(bp);
+ rc = bnx2x_read_sfp_module_eeprom(&bp->link_params.phy[phy_idx],
+ &bp->link_params,
+ I2C_DEV_ADDR_A0,
+ SFP_EEPROM_DIAG_TYPE_ADDR,
+ SFP_EEPROM_DIAG_TYPE_SIZE,
+ &diag_type);
+ bnx2x_release_phy_lock(bp);
+ if (rc) {
+ DP(BNX2X_MSG_ETHTOOL, "Failed reading Diag Type field\n");
+ return -EINVAL;
+ }
+
+ if (!sff8472_comp ||
+ (diag_type & SFP_EEPROM_DIAG_ADDR_CHANGE_REQ) ||
+ !(diag_type & SFP_EEPROM_DDM_IMPLEMENTED)) {
+ modinfo->type = ETH_MODULE_SFF_8079;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN;
+ } else {
+ modinfo->type = ETH_MODULE_SFF_8472;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
+ }
+ return 0;
+}
+
+static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
+ u32 cmd_flags)
+{
+ int count, i, rc;
+
+ /* build the command word */
+ cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
+
+ /* need to clear DONE bit separately */
+ REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
+
+ /* write the data */
+ REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
+
+ /* address of the NVRAM to write to */
+ REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
+ (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
+
+ /* issue the write command */
+ REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
+
+ /* adjust timeout for emulation/FPGA */
+ count = BNX2X_NVRAM_TIMEOUT_COUNT;
+ if (CHIP_REV_IS_SLOW(bp))
+ count *= 100;
+
+ /* wait for completion */
+ rc = -EBUSY;
+ for (i = 0; i < count; i++) {
+ udelay(5);
+ val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
+ if (val & MCPR_NVM_COMMAND_DONE) {
+ rc = 0;
+ break;
+ }
+ }
+
+ if (rc == -EBUSY)
+ DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
+ "nvram write timeout expired\n");
+ return rc;
+}
+
+#define BYTE_OFFSET(offset) (8 * (offset & 0x03))
+
+static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
+ int buf_size)
+{
+ int rc;
+ u32 cmd_flags, align_offset, val;
+ __be32 val_be;
+
+ if (offset + buf_size > bp->common.flash_size) {
+ DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
+ "Invalid parameter: offset (0x%x) + buf_size (0x%x) > flash_size (0x%x)\n",
+ offset, buf_size, bp->common.flash_size);
+ return -EINVAL;
+ }
+
+ /* request access to nvram interface */
+ rc = bnx2x_acquire_nvram_lock(bp);
+ if (rc)
+ return rc;
+
+ /* enable access to nvram interface */
+ bnx2x_enable_nvram_access(bp);
+
+ cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
+ align_offset = (offset & ~0x03);
+ rc = bnx2x_nvram_read_dword(bp, align_offset, &val_be, cmd_flags);
+
+ if (rc == 0) {
+ /* nvram data is returned as an array of bytes
+ * convert it back to cpu order
+ */
+ val = be32_to_cpu(val_be);
+
+ val &= ~le32_to_cpu((__force __le32)
+ (0xff << BYTE_OFFSET(offset)));
+ val |= le32_to_cpu((__force __le32)
+ (*data_buf << BYTE_OFFSET(offset)));
+
+ rc = bnx2x_nvram_write_dword(bp, align_offset, val,
+ cmd_flags);
+ }
+
+ /* disable access to nvram interface */
+ bnx2x_disable_nvram_access(bp);
+ bnx2x_release_nvram_lock(bp);
+
+ return rc;
+}
+
+static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
+ int buf_size)
+{
+ int rc;
+ u32 cmd_flags;
+ u32 val;
+ u32 written_so_far;
+
+ if (buf_size == 1) /* ethtool */
+ return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
+
+ if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
+ DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
+ "Invalid parameter: offset 0x%x buf_size 0x%x\n",
+ offset, buf_size);
+ return -EINVAL;
+ }
+
+ if (offset + buf_size > bp->common.flash_size) {
+ DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
+ "Invalid parameter: offset (0x%x) + buf_size (0x%x) > flash_size (0x%x)\n",
+ offset, buf_size, bp->common.flash_size);
+ return -EINVAL;
+ }
+
+ /* request access to nvram interface */
+ rc = bnx2x_acquire_nvram_lock(bp);
+ if (rc)
+ return rc;
+
+ /* enable access to nvram interface */
+ bnx2x_enable_nvram_access(bp);
+
+ written_so_far = 0;
+ cmd_flags = MCPR_NVM_COMMAND_FIRST;
+ while ((written_so_far < buf_size) && (rc == 0)) {
+ if (written_so_far == (buf_size - sizeof(u32)))
+ cmd_flags |= MCPR_NVM_COMMAND_LAST;
+ else if (((offset + 4) % BNX2X_NVRAM_PAGE_SIZE) == 0)
+ cmd_flags |= MCPR_NVM_COMMAND_LAST;
+ else if ((offset % BNX2X_NVRAM_PAGE_SIZE) == 0)
+ cmd_flags |= MCPR_NVM_COMMAND_FIRST;
+
+ memcpy(&val, data_buf, 4);
+
+ /* Notice unlike bnx2x_nvram_read_dword() this will not
+ * change val using be32_to_cpu(), which causes data to flip
+ * if the eeprom is read and then written back. This is due
+ * to tools utilizing this functionality that would break
+ * if this would be resolved.
+ */
+ rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
+
+ /* advance to the next dword */
+ offset += sizeof(u32);
+ data_buf += sizeof(u32);
+ written_so_far += sizeof(u32);
+
+ /* At end of each 4Kb page, release nvram lock to allow MFW
+ * chance to take it for its own use.
+ */
+ if ((cmd_flags & MCPR_NVM_COMMAND_LAST) &&
+ (written_so_far < buf_size)) {
+ DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
+ "Releasing NVM lock after offset 0x%x\n",
+ (u32)(offset - sizeof(u32)));
+ bnx2x_release_nvram_lock(bp);
+ usleep_range(1000, 2000);
+ rc = bnx2x_acquire_nvram_lock(bp);
+ if (rc)
+ return rc;
+ }
+
+ cmd_flags = 0;
+ }
+
+ /* disable access to nvram interface */
+ bnx2x_disable_nvram_access(bp);
+ bnx2x_release_nvram_lock(bp);
+
+ return rc;
+}
+
+static int bnx2x_set_eeprom(struct net_device *dev,
+ struct ethtool_eeprom *eeprom, u8 *eebuf)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+ int port = BP_PORT(bp);
+ int rc = 0;
+ u32 ext_phy_config;
+
+ if (!bnx2x_is_nvm_accessible(bp)) {
+ DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
+ "cannot access eeprom when the interface is down\n");
+ return -EAGAIN;
+ }
+
+ DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
+ " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
+ eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
+ eeprom->len, eeprom->len);
+
+ /* parameters already validated in ethtool_set_eeprom */
+
+ /* PHY eeprom can be accessed only by the PMF */
+ if ((eeprom->magic >= 0x50485900) && (eeprom->magic <= 0x504859FF) &&
+ !bp->port.pmf) {
+ DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
+ "wrong magic or interface is not pmf\n");
+ return -EINVAL;
+ }
+
+ ext_phy_config =
+ SHMEM_RD(bp,
+ dev_info.port_hw_config[port].external_phy_config);
+
+ if (eeprom->magic == 0x50485950) {
+ /* 'PHYP' (0x50485950): prepare phy for FW upgrade */
+ bnx2x_stats_handle(bp, STATS_EVENT_STOP);
+
+ bnx2x_acquire_phy_lock(bp);
+ rc |= bnx2x_link_reset(&bp->link_params,
+ &bp->link_vars, 0);
+ if (XGXS_EXT_PHY_TYPE(ext_phy_config) ==
+ PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101)
+ bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
+ MISC_REGISTERS_GPIO_HIGH, port);
+ bnx2x_release_phy_lock(bp);
+ bnx2x_link_report(bp);
+
+ } else if (eeprom->magic == 0x50485952) {
+ /* 'PHYR' (0x50485952): re-init link after FW upgrade */
+ if (bp->state == BNX2X_STATE_OPEN) {
+ bnx2x_acquire_phy_lock(bp);
+ rc |= bnx2x_link_reset(&bp->link_params,
+ &bp->link_vars, 1);
+
+ rc |= bnx2x_phy_init(&bp->link_params,
+ &bp->link_vars);
+ bnx2x_release_phy_lock(bp);
+ bnx2x_calc_fc_adv(bp);
+ }
+ } else if (eeprom->magic == 0x53985943) {
+ /* 'PHYC' (0x53985943): PHY FW upgrade completed */
+ if (XGXS_EXT_PHY_TYPE(ext_phy_config) ==
+ PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) {
+
+ /* DSP Remove Download Mode */
+ bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
+ MISC_REGISTERS_GPIO_LOW, port);
+
+ bnx2x_acquire_phy_lock(bp);
+
+ bnx2x_sfx7101_sp_sw_reset(bp,
+ &bp->link_params.phy[EXT_PHY1]);
+
+ /* wait 0.5 sec to allow it to run */
+ msleep(500);
+ bnx2x_ext_phy_hw_reset(bp, port);
+ msleep(500);
+ bnx2x_release_phy_lock(bp);
+ }
+ } else
+ rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
+
+ return rc;
+}
+
+static int bnx2x_get_coalesce(struct net_device *dev,
+ struct ethtool_coalesce *coal,
+ struct kernel_ethtool_coalesce *kernel_coal,
+ struct netlink_ext_ack *extack)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+
+ memset(coal, 0, sizeof(struct ethtool_coalesce));
+
+ coal->rx_coalesce_usecs = bp->rx_ticks;
+ coal->tx_coalesce_usecs = bp->tx_ticks;
+
+ return 0;
+}
+
+static int bnx2x_set_coalesce(struct net_device *dev,
+ struct ethtool_coalesce *coal,
+ struct kernel_ethtool_coalesce *kernel_coal,
+ struct netlink_ext_ack *extack)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+
+ bp->rx_ticks = (u16)coal->rx_coalesce_usecs;
+ if (bp->rx_ticks > BNX2X_MAX_COALESCE_TOUT)
+ bp->rx_ticks = BNX2X_MAX_COALESCE_TOUT;
+
+ bp->tx_ticks = (u16)coal->tx_coalesce_usecs;
+ if (bp->tx_ticks > BNX2X_MAX_COALESCE_TOUT)
+ bp->tx_ticks = BNX2X_MAX_COALESCE_TOUT;
+
+ if (netif_running(dev))
+ bnx2x_update_coalesce(bp);
+
+ return 0;
+}
+
+static void bnx2x_get_ringparam(struct net_device *dev,
+ struct ethtool_ringparam *ering,
+ struct kernel_ethtool_ringparam *kernel_ering,
+ struct netlink_ext_ack *extack)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+
+ ering->rx_max_pending = MAX_RX_AVAIL;
+
+ /* If size isn't already set, we give an estimation of the number
+ * of buffers we'll have. We're neglecting some possible conditions
+ * [we couldn't know for certain at this point if number of queues
+ * might shrink] but the number would be correct for the likely
+ * scenario.
+ */
+ if (bp->rx_ring_size)
+ ering->rx_pending = bp->rx_ring_size;
+ else if (BNX2X_NUM_RX_QUEUES(bp))
+ ering->rx_pending = MAX_RX_AVAIL / BNX2X_NUM_RX_QUEUES(bp);
+ else
+ ering->rx_pending = MAX_RX_AVAIL;
+
+ ering->tx_max_pending = IS_MF_FCOE_AFEX(bp) ? 0 : MAX_TX_AVAIL;
+ ering->tx_pending = bp->tx_ring_size;
+}
+
+static int bnx2x_set_ringparam(struct net_device *dev,
+ struct ethtool_ringparam *ering,
+ struct kernel_ethtool_ringparam *kernel_ering,
+ struct netlink_ext_ack *extack)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+
+ DP(BNX2X_MSG_ETHTOOL,
+ "set ring params command parameters: rx_pending = %d, tx_pending = %d\n",
+ ering->rx_pending, ering->tx_pending);
+
+ if (pci_num_vf(bp->pdev)) {
+ DP(BNX2X_MSG_IOV,
+ "VFs are enabled, can not change ring parameters\n");
+ return -EPERM;
+ }
+
+ if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
+ DP(BNX2X_MSG_ETHTOOL,
+ "Handling parity error recovery. Try again later\n");
+ return -EAGAIN;
+ }
+
+ if ((ering->rx_pending > MAX_RX_AVAIL) ||
+ (ering->rx_pending < (bp->disable_tpa ? MIN_RX_SIZE_NONTPA :
+ MIN_RX_SIZE_TPA)) ||
+ (ering->tx_pending > (IS_MF_STORAGE_ONLY(bp) ? 0 : MAX_TX_AVAIL)) ||
+ (ering->tx_pending <= MAX_SKB_FRAGS + 4)) {
+ DP(BNX2X_MSG_ETHTOOL, "Command parameters not supported\n");
+ return -EINVAL;
+ }
+
+ bp->rx_ring_size = ering->rx_pending;
+ bp->tx_ring_size = ering->tx_pending;
+
+ return bnx2x_reload_if_running(dev);
+}
+
+static void bnx2x_get_pauseparam(struct net_device *dev,
+ struct ethtool_pauseparam *epause)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+ int cfg_idx = bnx2x_get_link_cfg_idx(bp);
+ int cfg_reg;
+
+ epause->autoneg = (bp->link_params.req_flow_ctrl[cfg_idx] ==
+ BNX2X_FLOW_CTRL_AUTO);
+
+ if (!epause->autoneg)
+ cfg_reg = bp->link_params.req_flow_ctrl[cfg_idx];
+ else
+ cfg_reg = bp->link_params.req_fc_auto_adv;
+
+ epause->rx_pause = ((cfg_reg & BNX2X_FLOW_CTRL_RX) ==
+ BNX2X_FLOW_CTRL_RX);
+ epause->tx_pause = ((cfg_reg & BNX2X_FLOW_CTRL_TX) ==
+ BNX2X_FLOW_CTRL_TX);
+
+ DP(BNX2X_MSG_ETHTOOL, "ethtool_pauseparam: cmd %d\n"
+ " autoneg %d rx_pause %d tx_pause %d\n",
+ epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
+}
+
+static int bnx2x_set_pauseparam(struct net_device *dev,
+ struct ethtool_pauseparam *epause)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+ u32 cfg_idx = bnx2x_get_link_cfg_idx(bp);
+ if (IS_MF(bp))
+ return 0;
+
+ DP(BNX2X_MSG_ETHTOOL, "ethtool_pauseparam: cmd %d\n"
+ " autoneg %d rx_pause %d tx_pause %d\n",
+ epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
+
+ bp->link_params.req_flow_ctrl[cfg_idx] = BNX2X_FLOW_CTRL_AUTO;
+
+ if (epause->rx_pause)
+ bp->link_params.req_flow_ctrl[cfg_idx] |= BNX2X_FLOW_CTRL_RX;
+
+ if (epause->tx_pause)
+ bp->link_params.req_flow_ctrl[cfg_idx] |= BNX2X_FLOW_CTRL_TX;
+
+ if (bp->link_params.req_flow_ctrl[cfg_idx] == BNX2X_FLOW_CTRL_AUTO)
+ bp->link_params.req_flow_ctrl[cfg_idx] = BNX2X_FLOW_CTRL_NONE;
+
+ if (epause->autoneg) {
+ if (!(bp->port.supported[cfg_idx] & SUPPORTED_Autoneg)) {
+ DP(BNX2X_MSG_ETHTOOL, "autoneg not supported\n");
+ return -EINVAL;
+ }
+
+ if (bp->link_params.req_line_speed[cfg_idx] == SPEED_AUTO_NEG) {
+ bp->link_params.req_flow_ctrl[cfg_idx] =
+ BNX2X_FLOW_CTRL_AUTO;
+ }
+ bp->link_params.req_fc_auto_adv = 0;
+ if (epause->rx_pause)
+ bp->link_params.req_fc_auto_adv |= BNX2X_FLOW_CTRL_RX;
+
+ if (epause->tx_pause)
+ bp->link_params.req_fc_auto_adv |= BNX2X_FLOW_CTRL_TX;
+
+ if (!bp->link_params.req_fc_auto_adv)
+ bp->link_params.req_fc_auto_adv |= BNX2X_FLOW_CTRL_NONE;
+ }
+
+ DP(BNX2X_MSG_ETHTOOL,
+ "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl[cfg_idx]);
+
+ if (netif_running(dev)) {
+ bnx2x_stats_handle(bp, STATS_EVENT_STOP);
+ bnx2x_force_link_reset(bp);
+ bnx2x_link_set(bp);
+ }
+
+ return 0;
+}
+
+static const char bnx2x_tests_str_arr[BNX2X_NUM_TESTS_SF][ETH_GSTRING_LEN] = {
+ "register_test (offline) ",
+ "memory_test (offline) ",
+ "int_loopback_test (offline)",
+ "ext_loopback_test (offline)",
+ "nvram_test (online) ",
+ "interrupt_test (online) ",
+ "link_test (online) "
+};
+
+enum {
+ BNX2X_PRI_FLAG_ISCSI,
+ BNX2X_PRI_FLAG_FCOE,
+ BNX2X_PRI_FLAG_STORAGE,
+ BNX2X_PRI_FLAG_LEN,
+};
+
+static const char bnx2x_private_arr[BNX2X_PRI_FLAG_LEN][ETH_GSTRING_LEN] = {
+ "iSCSI offload support",
+ "FCoE offload support",
+ "Storage only interface"
+};
+
+static u32 bnx2x_eee_to_adv(u32 eee_adv)
+{
+ u32 modes = 0;
+
+ if (eee_adv & SHMEM_EEE_100M_ADV)
+ modes |= ADVERTISED_100baseT_Full;
+ if (eee_adv & SHMEM_EEE_1G_ADV)
+ modes |= ADVERTISED_1000baseT_Full;
+ if (eee_adv & SHMEM_EEE_10G_ADV)
+ modes |= ADVERTISED_10000baseT_Full;
+
+ return modes;
+}
+
+static u32 bnx2x_adv_to_eee(u32 modes, u32 shift)
+{
+ u32 eee_adv = 0;
+ if (modes & ADVERTISED_100baseT_Full)
+ eee_adv |= SHMEM_EEE_100M_ADV;
+ if (modes & ADVERTISED_1000baseT_Full)
+ eee_adv |= SHMEM_EEE_1G_ADV;
+ if (modes & ADVERTISED_10000baseT_Full)
+ eee_adv |= SHMEM_EEE_10G_ADV;
+
+ return eee_adv << shift;
+}
+
+static int bnx2x_get_eee(struct net_device *dev, struct ethtool_eee *edata)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+ u32 eee_cfg;
+
+ if (!SHMEM2_HAS(bp, eee_status[BP_PORT(bp)])) {
+ DP(BNX2X_MSG_ETHTOOL, "BC Version does not support EEE\n");
+ return -EOPNOTSUPP;
+ }
+
+ eee_cfg = bp->link_vars.eee_status;
+
+ edata->supported =
+ bnx2x_eee_to_adv((eee_cfg & SHMEM_EEE_SUPPORTED_MASK) >>
+ SHMEM_EEE_SUPPORTED_SHIFT);
+
+ edata->advertised =
+ bnx2x_eee_to_adv((eee_cfg & SHMEM_EEE_ADV_STATUS_MASK) >>
+ SHMEM_EEE_ADV_STATUS_SHIFT);
+ edata->lp_advertised =
+ bnx2x_eee_to_adv((eee_cfg & SHMEM_EEE_LP_ADV_STATUS_MASK) >>
+ SHMEM_EEE_LP_ADV_STATUS_SHIFT);
+
+ /* SHMEM value is in 16u units --> Convert to 1u units. */
+ edata->tx_lpi_timer = (eee_cfg & SHMEM_EEE_TIMER_MASK) << 4;
+
+ edata->eee_enabled = (eee_cfg & SHMEM_EEE_REQUESTED_BIT) ? 1 : 0;
+ edata->eee_active = (eee_cfg & SHMEM_EEE_ACTIVE_BIT) ? 1 : 0;
+ edata->tx_lpi_enabled = (eee_cfg & SHMEM_EEE_LPI_REQUESTED_BIT) ? 1 : 0;
+
+ return 0;
+}
+
+static int bnx2x_set_eee(struct net_device *dev, struct ethtool_eee *edata)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+ u32 eee_cfg;
+ u32 advertised;
+
+ if (IS_MF(bp))
+ return 0;
+
+ if (!SHMEM2_HAS(bp, eee_status[BP_PORT(bp)])) {
+ DP(BNX2X_MSG_ETHTOOL, "BC Version does not support EEE\n");
+ return -EOPNOTSUPP;
+ }
+
+ eee_cfg = bp->link_vars.eee_status;
+
+ if (!(eee_cfg & SHMEM_EEE_SUPPORTED_MASK)) {
+ DP(BNX2X_MSG_ETHTOOL, "Board does not support EEE!\n");
+ return -EOPNOTSUPP;
+ }
+
+ advertised = bnx2x_adv_to_eee(edata->advertised,
+ SHMEM_EEE_ADV_STATUS_SHIFT);
+ if ((advertised != (eee_cfg & SHMEM_EEE_ADV_STATUS_MASK))) {
+ DP(BNX2X_MSG_ETHTOOL,
+ "Direct manipulation of EEE advertisement is not supported\n");
+ return -EINVAL;
+ }
+
+ if (edata->tx_lpi_timer > EEE_MODE_TIMER_MASK) {
+ DP(BNX2X_MSG_ETHTOOL,
+ "Maximal Tx Lpi timer supported is %x(u)\n",
+ EEE_MODE_TIMER_MASK);
+ return -EINVAL;
+ }
+ if (edata->tx_lpi_enabled &&
+ (edata->tx_lpi_timer < EEE_MODE_NVRAM_AGGRESSIVE_TIME)) {
+ DP(BNX2X_MSG_ETHTOOL,
+ "Minimal Tx Lpi timer supported is %d(u)\n",
+ EEE_MODE_NVRAM_AGGRESSIVE_TIME);
+ return -EINVAL;
+ }
+
+ /* All is well; Apply changes*/
+ if (edata->eee_enabled)
+ bp->link_params.eee_mode |= EEE_MODE_ADV_LPI;
+ else
+ bp->link_params.eee_mode &= ~EEE_MODE_ADV_LPI;
+
+ if (edata->tx_lpi_enabled)
+ bp->link_params.eee_mode |= EEE_MODE_ENABLE_LPI;
+ else
+ bp->link_params.eee_mode &= ~EEE_MODE_ENABLE_LPI;
+
+ bp->link_params.eee_mode &= ~EEE_MODE_TIMER_MASK;
+ bp->link_params.eee_mode |= (edata->tx_lpi_timer &
+ EEE_MODE_TIMER_MASK) |
+ EEE_MODE_OVERRIDE_NVRAM |
+ EEE_MODE_OUTPUT_TIME;
+
+ /* Restart link to propagate changes */
+ if (netif_running(dev)) {
+ bnx2x_stats_handle(bp, STATS_EVENT_STOP);
+ bnx2x_force_link_reset(bp);
+ bnx2x_link_set(bp);
+ }
+
+ return 0;
+}
+
+enum {
+ BNX2X_CHIP_E1_OFST = 0,
+ BNX2X_CHIP_E1H_OFST,
+ BNX2X_CHIP_E2_OFST,
+ BNX2X_CHIP_E3_OFST,
+ BNX2X_CHIP_E3B0_OFST,
+ BNX2X_CHIP_MAX_OFST
+};
+
+#define BNX2X_CHIP_MASK_E1 (1 << BNX2X_CHIP_E1_OFST)
+#define BNX2X_CHIP_MASK_E1H (1 << BNX2X_CHIP_E1H_OFST)
+#define BNX2X_CHIP_MASK_E2 (1 << BNX2X_CHIP_E2_OFST)
+#define BNX2X_CHIP_MASK_E3 (1 << BNX2X_CHIP_E3_OFST)
+#define BNX2X_CHIP_MASK_E3B0 (1 << BNX2X_CHIP_E3B0_OFST)
+
+#define BNX2X_CHIP_MASK_ALL ((1 << BNX2X_CHIP_MAX_OFST) - 1)
+#define BNX2X_CHIP_MASK_E1X (BNX2X_CHIP_MASK_E1 | BNX2X_CHIP_MASK_E1H)
+
+static int bnx2x_test_registers(struct bnx2x *bp)
+{
+ int idx, i, rc = -ENODEV;
+ u32 wr_val = 0, hw;
+ int port = BP_PORT(bp);
+ static const struct {
+ u32 hw;
+ u32 offset0;
+ u32 offset1;
+ u32 mask;
+ } reg_tbl[] = {
+/* 0 */ { BNX2X_CHIP_MASK_ALL,
+ BRB1_REG_PAUSE_LOW_THRESHOLD_0, 4, 0x000003ff },
+ { BNX2X_CHIP_MASK_ALL,
+ DORQ_REG_DB_ADDR0, 4, 0xffffffff },
+ { BNX2X_CHIP_MASK_E1X,
+ HC_REG_AGG_INT_0, 4, 0x000003ff },
+ { BNX2X_CHIP_MASK_ALL,
+ PBF_REG_MAC_IF0_ENABLE, 4, 0x00000001 },
+ { BNX2X_CHIP_MASK_E1X | BNX2X_CHIP_MASK_E2 | BNX2X_CHIP_MASK_E3,
+ PBF_REG_P0_INIT_CRD, 4, 0x000007ff },
+ { BNX2X_CHIP_MASK_E3B0,
+ PBF_REG_INIT_CRD_Q0, 4, 0x000007ff },
+ { BNX2X_CHIP_MASK_ALL,
+ PRS_REG_CID_PORT_0, 4, 0x00ffffff },
+ { BNX2X_CHIP_MASK_ALL,
+ PXP2_REG_PSWRQ_CDU0_L2P, 4, 0x000fffff },
+ { BNX2X_CHIP_MASK_ALL,
+ PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
+ { BNX2X_CHIP_MASK_ALL,
+ PXP2_REG_PSWRQ_TM0_L2P, 4, 0x000fffff },
+/* 10 */ { BNX2X_CHIP_MASK_ALL,
+ PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
+ { BNX2X_CHIP_MASK_ALL,
+ PXP2_REG_PSWRQ_TSDM0_L2P, 4, 0x000fffff },
+ { BNX2X_CHIP_MASK_ALL,
+ QM_REG_CONNNUM_0, 4, 0x000fffff },
+ { BNX2X_CHIP_MASK_ALL,
+ TM_REG_LIN0_MAX_ACTIVE_CID, 4, 0x0003ffff },
+ { BNX2X_CHIP_MASK_ALL,
+ SRC_REG_KEYRSS0_0, 40, 0xffffffff },
+ { BNX2X_CHIP_MASK_ALL,
+ SRC_REG_KEYRSS0_7, 40, 0xffffffff },
+ { BNX2X_CHIP_MASK_ALL,
+ XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
+ { BNX2X_CHIP_MASK_ALL,
+ XCM_REG_WU_DA_CNT_CMD00, 4, 0x00000003 },
+ { BNX2X_CHIP_MASK_ALL,
+ XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 4, 0x000000ff },
+ { BNX2X_CHIP_MASK_ALL,
+ NIG_REG_LLH0_T_BIT, 4, 0x00000001 },
+/* 20 */ { BNX2X_CHIP_MASK_E1X | BNX2X_CHIP_MASK_E2,
+ NIG_REG_EMAC0_IN_EN, 4, 0x00000001 },
+ { BNX2X_CHIP_MASK_E1X | BNX2X_CHIP_MASK_E2,
+ NIG_REG_BMAC0_IN_EN, 4, 0x00000001 },
+ { BNX2X_CHIP_MASK_ALL,
+ NIG_REG_XCM0_OUT_EN, 4, 0x00000001 },
+ { BNX2X_CHIP_MASK_ALL,
+ NIG_REG_BRB0_OUT_EN, 4, 0x00000001 },
+ { BNX2X_CHIP_MASK_ALL,
+ NIG_REG_LLH0_XCM_MASK, 4, 0x00000007 },
+ { BNX2X_CHIP_MASK_ALL,
+ NIG_REG_LLH0_ACPI_PAT_6_LEN, 68, 0x000000ff },
+ { BNX2X_CHIP_MASK_ALL,
+ NIG_REG_LLH0_ACPI_PAT_0_CRC, 68, 0xffffffff },
+ { BNX2X_CHIP_MASK_ALL,
+ NIG_REG_LLH0_DEST_MAC_0_0, 160, 0xffffffff },
+ { BNX2X_CHIP_MASK_ALL,
+ NIG_REG_LLH0_DEST_IP_0_1, 160, 0xffffffff },
+ { BNX2X_CHIP_MASK_ALL,
+ NIG_REG_LLH0_IPV4_IPV6_0, 160, 0x00000001 },
+/* 30 */ { BNX2X_CHIP_MASK_ALL,
+ NIG_REG_LLH0_DEST_UDP_0, 160, 0x0000ffff },
+ { BNX2X_CHIP_MASK_ALL,
+ NIG_REG_LLH0_DEST_TCP_0, 160, 0x0000ffff },
+ { BNX2X_CHIP_MASK_ALL,
+ NIG_REG_LLH0_VLAN_ID_0, 160, 0x00000fff },
+ { BNX2X_CHIP_MASK_E1X | BNX2X_CHIP_MASK_E2,
+ NIG_REG_XGXS_SERDES0_MODE_SEL, 4, 0x00000001 },
+ { BNX2X_CHIP_MASK_ALL,
+ NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001},
+ { BNX2X_CHIP_MASK_ALL,
+ NIG_REG_STATUS_INTERRUPT_PORT0, 4, 0x07ffffff },
+ { BNX2X_CHIP_MASK_E1X | BNX2X_CHIP_MASK_E2,
+ NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
+ { BNX2X_CHIP_MASK_E1X | BNX2X_CHIP_MASK_E2,
+ NIG_REG_SERDES0_CTRL_PHY_ADDR, 16, 0x0000001f },
+
+ { BNX2X_CHIP_MASK_ALL, 0xffffffff, 0, 0x00000000 }
+ };
+
+ if (!bnx2x_is_nvm_accessible(bp)) {
+ DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
+ "cannot access eeprom when the interface is down\n");
+ return rc;
+ }
+
+ if (CHIP_IS_E1(bp))
+ hw = BNX2X_CHIP_MASK_E1;
+ else if (CHIP_IS_E1H(bp))
+ hw = BNX2X_CHIP_MASK_E1H;
+ else if (CHIP_IS_E2(bp))
+ hw = BNX2X_CHIP_MASK_E2;
+ else if (CHIP_IS_E3B0(bp))
+ hw = BNX2X_CHIP_MASK_E3B0;
+ else /* e3 A0 */
+ hw = BNX2X_CHIP_MASK_E3;
+
+ /* Repeat the test twice:
+ * First by writing 0x00000000, second by writing 0xffffffff
+ */
+ for (idx = 0; idx < 2; idx++) {
+
+ switch (idx) {
+ case 0:
+ wr_val = 0;
+ break;
+ case 1:
+ wr_val = 0xffffffff;
+ break;
+ }
+
+ for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
+ u32 offset, mask, save_val, val;
+ if (!(hw & reg_tbl[i].hw))
+ continue;
+
+ offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
+ mask = reg_tbl[i].mask;
+
+ save_val = REG_RD(bp, offset);
+
+ REG_WR(bp, offset, wr_val & mask);
+
+ val = REG_RD(bp, offset);
+
+ /* Restore the original register's value */
+ REG_WR(bp, offset, save_val);
+
+ /* verify value is as expected */
+ if ((val & mask) != (wr_val & mask)) {
+ DP(BNX2X_MSG_ETHTOOL,
+ "offset 0x%x: val 0x%x != 0x%x mask 0x%x\n",
+ offset, val, wr_val, mask);
+ goto test_reg_exit;
+ }
+ }
+ }
+
+ rc = 0;
+
+test_reg_exit:
+ return rc;
+}
+
+static int bnx2x_test_memory(struct bnx2x *bp)
+{
+ int i, j, rc = -ENODEV;
+ u32 val, index;
+ static const struct {
+ u32 offset;
+ int size;
+ } mem_tbl[] = {
+ { CCM_REG_XX_DESCR_TABLE, CCM_REG_XX_DESCR_TABLE_SIZE },
+ { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
+ { CFC_REG_LINK_LIST, CFC_REG_LINK_LIST_SIZE },
+ { DMAE_REG_CMD_MEM, DMAE_REG_CMD_MEM_SIZE },
+ { TCM_REG_XX_DESCR_TABLE, TCM_REG_XX_DESCR_TABLE_SIZE },
+ { UCM_REG_XX_DESCR_TABLE, UCM_REG_XX_DESCR_TABLE_SIZE },
+ { XCM_REG_XX_DESCR_TABLE, XCM_REG_XX_DESCR_TABLE_SIZE },
+
+ { 0xffffffff, 0 }
+ };
+
+ static const struct {
+ char *name;
+ u32 offset;
+ u32 hw_mask[BNX2X_CHIP_MAX_OFST];
+ } prty_tbl[] = {
+ { "CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS,
+ {0x3ffc0, 0, 0, 0} },
+ { "CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS,
+ {0x2, 0x2, 0, 0} },
+ { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS,
+ {0, 0, 0, 0} },
+ { "TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS,
+ {0x3ffc0, 0, 0, 0} },
+ { "UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS,
+ {0x3ffc0, 0, 0, 0} },
+ { "XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS,
+ {0x3ffc1, 0, 0, 0} },
+
+ { NULL, 0xffffffff, {0, 0, 0, 0} }
+ };
+
+ if (!bnx2x_is_nvm_accessible(bp)) {
+ DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
+ "cannot access eeprom when the interface is down\n");
+ return rc;
+ }
+
+ if (CHIP_IS_E1(bp))
+ index = BNX2X_CHIP_E1_OFST;
+ else if (CHIP_IS_E1H(bp))
+ index = BNX2X_CHIP_E1H_OFST;
+ else if (CHIP_IS_E2(bp))
+ index = BNX2X_CHIP_E2_OFST;
+ else /* e3 */
+ index = BNX2X_CHIP_E3_OFST;
+
+ /* pre-Check the parity status */
+ for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
+ val = REG_RD(bp, prty_tbl[i].offset);
+ if (val & ~(prty_tbl[i].hw_mask[index])) {
+ DP(BNX2X_MSG_ETHTOOL,
+ "%s is 0x%x\n", prty_tbl[i].name, val);
+ goto test_mem_exit;
+ }
+ }
+
+ /* Go through all the memories */
+ for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
+ for (j = 0; j < mem_tbl[i].size; j++)
+ REG_RD(bp, mem_tbl[i].offset + j*4);
+
+ /* Check the parity status */
+ for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
+ val = REG_RD(bp, prty_tbl[i].offset);
+ if (val & ~(prty_tbl[i].hw_mask[index])) {
+ DP(BNX2X_MSG_ETHTOOL,
+ "%s is 0x%x\n", prty_tbl[i].name, val);
+ goto test_mem_exit;
+ }
+ }
+
+ rc = 0;
+
+test_mem_exit:
+ return rc;
+}
+
+static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up, u8 is_serdes)
+{
+ int cnt = 1400;
+
+ if (link_up) {
+ while (bnx2x_link_test(bp, is_serdes) && cnt--)
+ msleep(20);
+
+ if (cnt <= 0 && bnx2x_link_test(bp, is_serdes))
+ DP(BNX2X_MSG_ETHTOOL, "Timeout waiting for link up\n");
+
+ cnt = 1400;
+ while (!bp->link_vars.link_up && cnt--)
+ msleep(20);
+
+ if (cnt <= 0 && !bp->link_vars.link_up)
+ DP(BNX2X_MSG_ETHTOOL,
+ "Timeout waiting for link init\n");
+ }
+}
+
+static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode)
+{
+ unsigned int pkt_size, num_pkts, i;
+ struct sk_buff *skb;
+ unsigned char *packet;
+ struct bnx2x_fastpath *fp_rx = &bp->fp[0];
+ struct bnx2x_fastpath *fp_tx = &bp->fp[0];
+ struct bnx2x_fp_txdata *txdata = fp_tx->txdata_ptr[0];
+ u16 tx_start_idx, tx_idx;
+ u16 rx_start_idx, rx_idx;
+ u16 pkt_prod, bd_prod;
+ struct sw_tx_bd *tx_buf;
+ struct eth_tx_start_bd *tx_start_bd;
+ dma_addr_t mapping;
+ union eth_rx_cqe *cqe;
+ u8 cqe_fp_flags, cqe_fp_type;
+ struct sw_rx_bd *rx_buf;
+ u16 len;
+ int rc = -ENODEV;
+ u8 *data;
+ struct netdev_queue *txq = netdev_get_tx_queue(bp->dev,
+ txdata->txq_index);
+
+ /* check the loopback mode */
+ switch (loopback_mode) {
+ case BNX2X_PHY_LOOPBACK:
+ if (bp->link_params.loopback_mode != LOOPBACK_XGXS) {
+ DP(BNX2X_MSG_ETHTOOL, "PHY loopback not supported\n");
+ return -EINVAL;
+ }
+ break;
+ case BNX2X_MAC_LOOPBACK:
+ if (CHIP_IS_E3(bp)) {
+ int cfg_idx = bnx2x_get_link_cfg_idx(bp);
+ if (bp->port.supported[cfg_idx] &
+ (SUPPORTED_10000baseT_Full |
+ SUPPORTED_20000baseMLD2_Full |
+ SUPPORTED_20000baseKR2_Full))
+ bp->link_params.loopback_mode = LOOPBACK_XMAC;
+ else
+ bp->link_params.loopback_mode = LOOPBACK_UMAC;
+ } else
+ bp->link_params.loopback_mode = LOOPBACK_BMAC;
+
+ bnx2x_phy_init(&bp->link_params, &bp->link_vars);
+ break;
+ case BNX2X_EXT_LOOPBACK:
+ if (bp->link_params.loopback_mode != LOOPBACK_EXT) {
+ DP(BNX2X_MSG_ETHTOOL,
+ "Can't configure external loopback\n");
+ return -EINVAL;
+ }
+ break;
+ default:
+ DP(BNX2X_MSG_ETHTOOL, "Command parameters not supported\n");
+ return -EINVAL;
+ }
+
+ /* prepare the loopback packet */
+ pkt_size = (((bp->dev->mtu < ETH_MAX_PACKET_SIZE) ?
+ bp->dev->mtu : ETH_MAX_PACKET_SIZE) + ETH_HLEN);
+ skb = netdev_alloc_skb(bp->dev, fp_rx->rx_buf_size);
+ if (!skb) {
+ DP(BNX2X_MSG_ETHTOOL, "Can't allocate skb\n");
+ rc = -ENOMEM;
+ goto test_loopback_exit;
+ }
+ packet = skb_put(skb, pkt_size);
+ memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
+ eth_zero_addr(packet + ETH_ALEN);
+ memset(packet + 2*ETH_ALEN, 0x77, (ETH_HLEN - 2*ETH_ALEN));
+ for (i = ETH_HLEN; i < pkt_size; i++)
+ packet[i] = (unsigned char) (i & 0xff);
+ mapping = dma_map_single(&bp->pdev->dev, skb->data,
+ skb_headlen(skb), DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
+ rc = -ENOMEM;
+ dev_kfree_skb(skb);
+ DP(BNX2X_MSG_ETHTOOL, "Unable to map SKB\n");
+ goto test_loopback_exit;
+ }
+
+ /* send the loopback packet */
+ num_pkts = 0;
+ tx_start_idx = le16_to_cpu(*txdata->tx_cons_sb);
+ rx_start_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
+
+ netdev_tx_sent_queue(txq, skb->len);
+
+ pkt_prod = txdata->tx_pkt_prod++;
+ tx_buf = &txdata->tx_buf_ring[TX_BD(pkt_prod)];
+ tx_buf->first_bd = txdata->tx_bd_prod;
+ tx_buf->skb = skb;
+ tx_buf->flags = 0;
+
+ bd_prod = TX_BD(txdata->tx_bd_prod);
+ tx_start_bd = &txdata->tx_desc_ring[bd_prod].start_bd;
+ tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
+ tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
+ tx_start_bd->nbd = cpu_to_le16(2); /* start + pbd */
+ tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
+ tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
+ tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
+ SET_FLAG(tx_start_bd->general_data,
+ ETH_TX_START_BD_HDR_NBDS,
+ 1);
+ SET_FLAG(tx_start_bd->general_data,
+ ETH_TX_START_BD_PARSE_NBDS,
+ 0);
+
+ /* turn on parsing and get a BD */
+ bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
+
+ if (CHIP_IS_E1x(bp)) {
+ u16 global_data = 0;
+ struct eth_tx_parse_bd_e1x *pbd_e1x =
+ &txdata->tx_desc_ring[bd_prod].parse_bd_e1x;
+ memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
+ SET_FLAG(global_data,
+ ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE, UNICAST_ADDRESS);
+ pbd_e1x->global_data = cpu_to_le16(global_data);
+ } else {
+ u32 parsing_data = 0;
+ struct eth_tx_parse_bd_e2 *pbd_e2 =
+ &txdata->tx_desc_ring[bd_prod].parse_bd_e2;
+ memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
+ SET_FLAG(parsing_data,
+ ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE, UNICAST_ADDRESS);
+ pbd_e2->parsing_data = cpu_to_le32(parsing_data);
+ }
+ wmb();
+
+ txdata->tx_db.data.prod += 2;
+ /* make sure descriptor update is observed by the HW */
+ wmb();
+ DOORBELL_RELAXED(bp, txdata->cid, txdata->tx_db.raw);
+
+ barrier();
+
+ num_pkts++;
+ txdata->tx_bd_prod += 2; /* start + pbd */
+
+ udelay(100);
+
+ tx_idx = le16_to_cpu(*txdata->tx_cons_sb);
+ if (tx_idx != tx_start_idx + num_pkts)
+ goto test_loopback_exit;
+
+ /* Unlike HC IGU won't generate an interrupt for status block
+ * updates that have been performed while interrupts were
+ * disabled.
+ */
+ if (bp->common.int_block == INT_BLOCK_IGU) {
+ /* Disable local BHes to prevent a dead-lock situation between
+ * sch_direct_xmit() and bnx2x_run_loopback() (calling
+ * bnx2x_tx_int()), as both are taking netif_tx_lock().
+ */
+ local_bh_disable();
+ bnx2x_tx_int(bp, txdata);
+ local_bh_enable();
+ }
+
+ rx_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
+ if (rx_idx != rx_start_idx + num_pkts)
+ goto test_loopback_exit;
+
+ cqe = &fp_rx->rx_comp_ring[RCQ_BD(fp_rx->rx_comp_cons)];
+ cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
+ cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE;
+ if (!CQE_TYPE_FAST(cqe_fp_type) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
+ goto test_loopback_rx_exit;
+
+ len = le16_to_cpu(cqe->fast_path_cqe.pkt_len_or_gro_seg_len);
+ if (len != pkt_size)
+ goto test_loopback_rx_exit;
+
+ rx_buf = &fp_rx->rx_buf_ring[RX_BD(fp_rx->rx_bd_cons)];
+ dma_sync_single_for_cpu(&bp->pdev->dev,
+ dma_unmap_addr(rx_buf, mapping),
+ fp_rx->rx_buf_size, DMA_FROM_DEVICE);
+ data = rx_buf->data + NET_SKB_PAD + cqe->fast_path_cqe.placement_offset;
+ for (i = ETH_HLEN; i < pkt_size; i++)
+ if (*(data + i) != (unsigned char) (i & 0xff))
+ goto test_loopback_rx_exit;
+
+ rc = 0;
+
+test_loopback_rx_exit:
+
+ fp_rx->rx_bd_cons = NEXT_RX_IDX(fp_rx->rx_bd_cons);
+ fp_rx->rx_bd_prod = NEXT_RX_IDX(fp_rx->rx_bd_prod);
+ fp_rx->rx_comp_cons = NEXT_RCQ_IDX(fp_rx->rx_comp_cons);
+ fp_rx->rx_comp_prod = NEXT_RCQ_IDX(fp_rx->rx_comp_prod);
+
+ /* Update producers */
+ bnx2x_update_rx_prod(bp, fp_rx, fp_rx->rx_bd_prod, fp_rx->rx_comp_prod,
+ fp_rx->rx_sge_prod);
+
+test_loopback_exit:
+ bp->link_params.loopback_mode = LOOPBACK_NONE;
+
+ return rc;
+}
+
+static int bnx2x_test_loopback(struct bnx2x *bp)
+{
+ int rc = 0, res;
+
+ if (BP_NOMCP(bp))
+ return rc;
+
+ if (!netif_running(bp->dev))
+ return BNX2X_LOOPBACK_FAILED;
+
+ bnx2x_netif_stop(bp, 1);
+ bnx2x_acquire_phy_lock(bp);
+
+ res = bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK);
+ if (res) {
+ DP(BNX2X_MSG_ETHTOOL, " PHY loopback failed (res %d)\n", res);
+ rc |= BNX2X_PHY_LOOPBACK_FAILED;
+ }
+
+ res = bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK);
+ if (res) {
+ DP(BNX2X_MSG_ETHTOOL, " MAC loopback failed (res %d)\n", res);
+ rc |= BNX2X_MAC_LOOPBACK_FAILED;
+ }
+
+ bnx2x_release_phy_lock(bp);
+ bnx2x_netif_start(bp);
+
+ return rc;
+}
+
+static int bnx2x_test_ext_loopback(struct bnx2x *bp)
+{
+ int rc;
+ u8 is_serdes =
+ (bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) > 0;
+
+ if (BP_NOMCP(bp))
+ return -ENODEV;
+
+ if (!netif_running(bp->dev))
+ return BNX2X_EXT_LOOPBACK_FAILED;
+
+ bnx2x_nic_unload(bp, UNLOAD_NORMAL, false);
+ rc = bnx2x_nic_load(bp, LOAD_LOOPBACK_EXT);
+ if (rc) {
+ DP(BNX2X_MSG_ETHTOOL,
+ "Can't perform self-test, nic_load (for external lb) failed\n");
+ return -ENODEV;
+ }
+ bnx2x_wait_for_link(bp, 1, is_serdes);
+
+ bnx2x_netif_stop(bp, 1);
+
+ rc = bnx2x_run_loopback(bp, BNX2X_EXT_LOOPBACK);
+ if (rc)
+ DP(BNX2X_MSG_ETHTOOL, "EXT loopback failed (res %d)\n", rc);
+
+ bnx2x_netif_start(bp);
+
+ return rc;
+}
+
+struct code_entry {
+ u32 sram_start_addr;
+ u32 code_attribute;
+#define CODE_IMAGE_TYPE_MASK 0xf0800003
+#define CODE_IMAGE_VNTAG_PROFILES_DATA 0xd0000003
+#define CODE_IMAGE_LENGTH_MASK 0x007ffffc
+#define CODE_IMAGE_TYPE_EXTENDED_DIR 0xe0000000
+ u32 nvm_start_addr;
+};
+
+#define CODE_ENTRY_MAX 16
+#define CODE_ENTRY_EXTENDED_DIR_IDX 15
+#define MAX_IMAGES_IN_EXTENDED_DIR 64
+#define NVRAM_DIR_OFFSET 0x14
+
+#define EXTENDED_DIR_EXISTS(code) \
+ ((code & CODE_IMAGE_TYPE_MASK) == CODE_IMAGE_TYPE_EXTENDED_DIR && \
+ (code & CODE_IMAGE_LENGTH_MASK) != 0)
+
+#define CRC32_RESIDUAL 0xdebb20e3
+#define CRC_BUFF_SIZE 256
+
+static int bnx2x_nvram_crc(struct bnx2x *bp,
+ int offset,
+ int size,
+ u8 *buff)
+{
+ u32 crc = ~0;
+ int rc = 0, done = 0;
+
+ DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
+ "NVRAM CRC from 0x%08x to 0x%08x\n", offset, offset + size);
+
+ while (done < size) {
+ int count = min_t(int, size - done, CRC_BUFF_SIZE);
+
+ rc = bnx2x_nvram_read(bp, offset + done, buff, count);
+
+ if (rc)
+ return rc;
+
+ crc = crc32_le(crc, buff, count);
+ done += count;
+ }
+
+ if (crc != CRC32_RESIDUAL)
+ rc = -EINVAL;
+
+ return rc;
+}
+
+static int bnx2x_test_nvram_dir(struct bnx2x *bp,
+ struct code_entry *entry,
+ u8 *buff)
+{
+ size_t size = entry->code_attribute & CODE_IMAGE_LENGTH_MASK;
+ u32 type = entry->code_attribute & CODE_IMAGE_TYPE_MASK;
+ int rc;
+
+ /* Zero-length images and AFEX profiles do not have CRC */
+ if (size == 0 || type == CODE_IMAGE_VNTAG_PROFILES_DATA)
+ return 0;
+
+ rc = bnx2x_nvram_crc(bp, entry->nvm_start_addr, size, buff);
+ if (rc)
+ DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
+ "image %x has failed crc test (rc %d)\n", type, rc);
+
+ return rc;
+}
+
+static int bnx2x_test_dir_entry(struct bnx2x *bp, u32 addr, u8 *buff)
+{
+ int rc;
+ struct code_entry entry;
+
+ rc = bnx2x_nvram_read32(bp, addr, (u32 *)&entry, sizeof(entry));
+ if (rc)
+ return rc;
+
+ return bnx2x_test_nvram_dir(bp, &entry, buff);
+}
+
+static int bnx2x_test_nvram_ext_dirs(struct bnx2x *bp, u8 *buff)
+{
+ u32 rc, cnt, dir_offset = NVRAM_DIR_OFFSET;
+ struct code_entry entry;
+ int i;
+
+ rc = bnx2x_nvram_read32(bp,
+ dir_offset +
+ sizeof(entry) * CODE_ENTRY_EXTENDED_DIR_IDX,
+ (u32 *)&entry, sizeof(entry));
+ if (rc)
+ return rc;
+
+ if (!EXTENDED_DIR_EXISTS(entry.code_attribute))
+ return 0;
+
+ rc = bnx2x_nvram_read32(bp, entry.nvm_start_addr,
+ &cnt, sizeof(u32));
+ if (rc)
+ return rc;
+
+ dir_offset = entry.nvm_start_addr + 8;
+
+ for (i = 0; i < cnt && i < MAX_IMAGES_IN_EXTENDED_DIR; i++) {
+ rc = bnx2x_test_dir_entry(bp, dir_offset +
+ sizeof(struct code_entry) * i,
+ buff);
+ if (rc)
+ return rc;
+ }
+
+ return 0;
+}
+
+static int bnx2x_test_nvram_dirs(struct bnx2x *bp, u8 *buff)
+{
+ u32 rc, dir_offset = NVRAM_DIR_OFFSET;
+ int i;
+
+ DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM, "NVRAM DIRS CRC test-set\n");
+
+ for (i = 0; i < CODE_ENTRY_EXTENDED_DIR_IDX; i++) {
+ rc = bnx2x_test_dir_entry(bp, dir_offset +
+ sizeof(struct code_entry) * i,
+ buff);
+ if (rc)
+ return rc;
+ }
+
+ return bnx2x_test_nvram_ext_dirs(bp, buff);
+}
+
+struct crc_pair {
+ int offset;
+ int size;
+};
+
+static int bnx2x_test_nvram_tbl(struct bnx2x *bp,
+ const struct crc_pair *nvram_tbl, u8 *buf)
+{
+ int i;
+
+ for (i = 0; nvram_tbl[i].size; i++) {
+ int rc = bnx2x_nvram_crc(bp, nvram_tbl[i].offset,
+ nvram_tbl[i].size, buf);
+ if (rc) {
+ DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
+ "nvram_tbl[%d] has failed crc test (rc %d)\n",
+ i, rc);
+ return rc;
+ }
+ }
+
+ return 0;
+}
+
+static int bnx2x_test_nvram(struct bnx2x *bp)
+{
+ static const struct crc_pair nvram_tbl[] = {
+ { 0, 0x14 }, /* bootstrap */
+ { 0x14, 0xec }, /* dir */
+ { 0x100, 0x350 }, /* manuf_info */
+ { 0x450, 0xf0 }, /* feature_info */
+ { 0x640, 0x64 }, /* upgrade_key_info */
+ { 0x708, 0x70 }, /* manuf_key_info */
+ { 0, 0 }
+ };
+ static const struct crc_pair nvram_tbl2[] = {
+ { 0x7e8, 0x350 }, /* manuf_info2 */
+ { 0xb38, 0xf0 }, /* feature_info */
+ { 0, 0 }
+ };
+
+ u8 *buf;
+ int rc;
+ u32 magic;
+
+ if (BP_NOMCP(bp))
+ return 0;
+
+ buf = kmalloc(CRC_BUFF_SIZE, GFP_KERNEL);
+ if (!buf) {
+ DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM, "kmalloc failed\n");
+ rc = -ENOMEM;
+ goto test_nvram_exit;
+ }
+
+ rc = bnx2x_nvram_read32(bp, 0, &magic, sizeof(magic));
+ if (rc) {
+ DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
+ "magic value read (rc %d)\n", rc);
+ goto test_nvram_exit;
+ }
+
+ if (magic != 0x669955aa) {
+ DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
+ "wrong magic value (0x%08x)\n", magic);
+ rc = -ENODEV;
+ goto test_nvram_exit;
+ }
+
+ DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM, "Port 0 CRC test-set\n");
+ rc = bnx2x_test_nvram_tbl(bp, nvram_tbl, buf);
+ if (rc)
+ goto test_nvram_exit;
+
+ if (!CHIP_IS_E1x(bp) && !CHIP_IS_57811xx(bp)) {
+ u32 hide = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
+ SHARED_HW_CFG_HIDE_PORT1;
+
+ if (!hide) {
+ DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
+ "Port 1 CRC test-set\n");
+ rc = bnx2x_test_nvram_tbl(bp, nvram_tbl2, buf);
+ if (rc)
+ goto test_nvram_exit;
+ }
+ }
+
+ rc = bnx2x_test_nvram_dirs(bp, buf);
+
+test_nvram_exit:
+ kfree(buf);
+ return rc;
+}
+
+/* Send an EMPTY ramrod on the first queue */
+static int bnx2x_test_intr(struct bnx2x *bp)
+{
+ struct bnx2x_queue_state_params params = {NULL};
+
+ if (!netif_running(bp->dev)) {
+ DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
+ "cannot access eeprom when the interface is down\n");
+ return -ENODEV;
+ }
+
+ params.q_obj = &bp->sp_objs->q_obj;
+ params.cmd = BNX2X_Q_CMD_EMPTY;
+
+ __set_bit(RAMROD_COMP_WAIT, &params.ramrod_flags);
+
+ return bnx2x_queue_state_change(bp, &params);
+}
+
+static void bnx2x_self_test(struct net_device *dev,
+ struct ethtool_test *etest, u64 *buf)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+ u8 is_serdes, link_up;
+ int rc, cnt = 0;
+
+ if (pci_num_vf(bp->pdev)) {
+ DP(BNX2X_MSG_IOV,
+ "VFs are enabled, can not perform self test\n");
+ return;
+ }
+
+ if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
+ netdev_err(bp->dev,
+ "Handling parity error recovery. Try again later\n");
+ etest->flags |= ETH_TEST_FL_FAILED;
+ return;
+ }
+
+ DP(BNX2X_MSG_ETHTOOL,
+ "Self-test command parameters: offline = %d, external_lb = %d\n",
+ (etest->flags & ETH_TEST_FL_OFFLINE),
+ (etest->flags & ETH_TEST_FL_EXTERNAL_LB)>>2);
+
+ memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS(bp));
+
+ if (bnx2x_test_nvram(bp) != 0) {
+ if (!IS_MF(bp))
+ buf[4] = 1;
+ else
+ buf[0] = 1;
+ etest->flags |= ETH_TEST_FL_FAILED;
+ }
+
+ if (!netif_running(dev)) {
+ DP(BNX2X_MSG_ETHTOOL, "Interface is down\n");
+ return;
+ }
+
+ is_serdes = (bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) > 0;
+ link_up = bp->link_vars.link_up;
+ /* offline tests are not supported in MF mode */
+ if ((etest->flags & ETH_TEST_FL_OFFLINE) && !IS_MF(bp)) {
+ int port = BP_PORT(bp);
+ u32 val;
+
+ /* save current value of input enable for TX port IF */
+ val = REG_RD(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4);
+ /* disable input for TX port IF */
+ REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, 0);
+
+ bnx2x_nic_unload(bp, UNLOAD_NORMAL, false);
+ rc = bnx2x_nic_load(bp, LOAD_DIAG);
+ if (rc) {
+ etest->flags |= ETH_TEST_FL_FAILED;
+ DP(BNX2X_MSG_ETHTOOL,
+ "Can't perform self-test, nic_load (for offline) failed\n");
+ return;
+ }
+
+ /* wait until link state is restored */
+ bnx2x_wait_for_link(bp, 1, is_serdes);
+
+ if (bnx2x_test_registers(bp) != 0) {
+ buf[0] = 1;
+ etest->flags |= ETH_TEST_FL_FAILED;
+ }
+ if (bnx2x_test_memory(bp) != 0) {
+ buf[1] = 1;
+ etest->flags |= ETH_TEST_FL_FAILED;
+ }
+
+ buf[2] = bnx2x_test_loopback(bp); /* internal LB */
+ if (buf[2] != 0)
+ etest->flags |= ETH_TEST_FL_FAILED;
+
+ if (etest->flags & ETH_TEST_FL_EXTERNAL_LB) {
+ buf[3] = bnx2x_test_ext_loopback(bp); /* external LB */
+ if (buf[3] != 0)
+ etest->flags |= ETH_TEST_FL_FAILED;
+ etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
+ }
+
+ bnx2x_nic_unload(bp, UNLOAD_NORMAL, false);
+
+ /* restore input for TX port IF */
+ REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, val);
+ rc = bnx2x_nic_load(bp, LOAD_NORMAL);
+ if (rc) {
+ etest->flags |= ETH_TEST_FL_FAILED;
+ DP(BNX2X_MSG_ETHTOOL,
+ "Can't perform self-test, nic_load (for online) failed\n");
+ return;
+ }
+ /* wait until link state is restored */
+ bnx2x_wait_for_link(bp, link_up, is_serdes);
+ }
+
+ if (bnx2x_test_intr(bp) != 0) {
+ if (!IS_MF(bp))
+ buf[5] = 1;
+ else
+ buf[1] = 1;
+ etest->flags |= ETH_TEST_FL_FAILED;
+ }
+
+ if (link_up) {
+ cnt = 100;
+ while (bnx2x_link_test(bp, is_serdes) && --cnt)
+ msleep(20);
+ }
+
+ if (!cnt) {
+ if (!IS_MF(bp))
+ buf[6] = 1;
+ else
+ buf[2] = 1;
+ etest->flags |= ETH_TEST_FL_FAILED;
+ }
+}
+
+#define IS_PORT_STAT(i) (bnx2x_stats_arr[i].is_port_stat)
+#define HIDE_PORT_STAT(bp) IS_VF(bp)
+
+/* ethtool statistics are displayed for all regular ethernet queues and the
+ * fcoe L2 queue if not disabled
+ */
+static int bnx2x_num_stat_queues(struct bnx2x *bp)
+{
+ return BNX2X_NUM_ETH_QUEUES(bp);
+}
+
+static int bnx2x_get_sset_count(struct net_device *dev, int stringset)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+ int i, num_strings = 0;
+
+ switch (stringset) {
+ case ETH_SS_STATS:
+ if (is_multi(bp)) {
+ num_strings = bnx2x_num_stat_queues(bp) *
+ BNX2X_NUM_Q_STATS;
+ } else
+ num_strings = 0;
+ if (HIDE_PORT_STAT(bp)) {
+ for (i = 0; i < BNX2X_NUM_STATS; i++)
+ if (!IS_PORT_STAT(i))
+ num_strings++;
+ } else
+ num_strings += BNX2X_NUM_STATS;
+
+ return num_strings;
+
+ case ETH_SS_TEST:
+ return BNX2X_NUM_TESTS(bp);
+
+ case ETH_SS_PRIV_FLAGS:
+ return BNX2X_PRI_FLAG_LEN;
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static u32 bnx2x_get_private_flags(struct net_device *dev)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+ u32 flags = 0;
+
+ flags |= (!(bp->flags & NO_ISCSI_FLAG) ? 1 : 0) << BNX2X_PRI_FLAG_ISCSI;
+ flags |= (!(bp->flags & NO_FCOE_FLAG) ? 1 : 0) << BNX2X_PRI_FLAG_FCOE;
+ flags |= (!!IS_MF_STORAGE_ONLY(bp)) << BNX2X_PRI_FLAG_STORAGE;
+
+ return flags;
+}
+
+static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+ int i, j, k, start;
+ char queue_name[MAX_QUEUE_NAME_LEN+1];
+
+ switch (stringset) {
+ case ETH_SS_STATS:
+ k = 0;
+ if (is_multi(bp)) {
+ for_each_eth_queue(bp, i) {
+ memset(queue_name, 0, sizeof(queue_name));
+ snprintf(queue_name, sizeof(queue_name),
+ "%d", i);
+ for (j = 0; j < BNX2X_NUM_Q_STATS; j++)
+ snprintf(buf + (k + j)*ETH_GSTRING_LEN,
+ ETH_GSTRING_LEN,
+ bnx2x_q_stats_arr[j].string,
+ queue_name);
+ k += BNX2X_NUM_Q_STATS;
+ }
+ }
+
+ for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
+ if (HIDE_PORT_STAT(bp) && IS_PORT_STAT(i))
+ continue;
+ strcpy(buf + (k + j)*ETH_GSTRING_LEN,
+ bnx2x_stats_arr[i].string);
+ j++;
+ }
+
+ break;
+
+ case ETH_SS_TEST:
+ /* First 4 tests cannot be done in MF mode */
+ if (!IS_MF(bp))
+ start = 0;
+ else
+ start = 4;
+ memcpy(buf, bnx2x_tests_str_arr + start,
+ ETH_GSTRING_LEN * BNX2X_NUM_TESTS(bp));
+ break;
+
+ case ETH_SS_PRIV_FLAGS:
+ memcpy(buf, bnx2x_private_arr,
+ ETH_GSTRING_LEN * BNX2X_PRI_FLAG_LEN);
+ break;
+ }
+}
+
+static void bnx2x_get_ethtool_stats(struct net_device *dev,
+ struct ethtool_stats *stats, u64 *buf)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+ u32 *hw_stats, *offset;
+ int i, j, k = 0;
+
+ if (is_multi(bp)) {
+ for_each_eth_queue(bp, i) {
+ hw_stats = (u32 *)&bp->fp_stats[i].eth_q_stats;
+ for (j = 0; j < BNX2X_NUM_Q_STATS; j++) {
+ if (bnx2x_q_stats_arr[j].size == 0) {
+ /* skip this counter */
+ buf[k + j] = 0;
+ continue;
+ }
+ offset = (hw_stats +
+ bnx2x_q_stats_arr[j].offset);
+ if (bnx2x_q_stats_arr[j].size == 4) {
+ /* 4-byte counter */
+ buf[k + j] = (u64) *offset;
+ continue;
+ }
+ /* 8-byte counter */
+ buf[k + j] = HILO_U64(*offset, *(offset + 1));
+ }
+ k += BNX2X_NUM_Q_STATS;
+ }
+ }
+
+ hw_stats = (u32 *)&bp->eth_stats;
+ for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
+ if (HIDE_PORT_STAT(bp) && IS_PORT_STAT(i))
+ continue;
+ if (bnx2x_stats_arr[i].size == 0) {
+ /* skip this counter */
+ buf[k + j] = 0;
+ j++;
+ continue;
+ }
+ offset = (hw_stats + bnx2x_stats_arr[i].offset);
+ if (bnx2x_stats_arr[i].size == 4) {
+ /* 4-byte counter */
+ buf[k + j] = (u64) *offset;
+ j++;
+ continue;
+ }
+ /* 8-byte counter */
+ buf[k + j] = HILO_U64(*offset, *(offset + 1));
+ j++;
+ }
+}
+
+static int bnx2x_set_phys_id(struct net_device *dev,
+ enum ethtool_phys_id_state state)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+
+ if (!bnx2x_is_nvm_accessible(bp)) {
+ DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
+ "cannot access eeprom when the interface is down\n");
+ return -EAGAIN;
+ }
+
+ switch (state) {
+ case ETHTOOL_ID_ACTIVE:
+ return 1; /* cycle on/off once per second */
+
+ case ETHTOOL_ID_ON:
+ bnx2x_acquire_phy_lock(bp);
+ bnx2x_set_led(&bp->link_params, &bp->link_vars,
+ LED_MODE_ON, SPEED_1000);
+ bnx2x_release_phy_lock(bp);
+ break;
+
+ case ETHTOOL_ID_OFF:
+ bnx2x_acquire_phy_lock(bp);
+ bnx2x_set_led(&bp->link_params, &bp->link_vars,
+ LED_MODE_FRONT_PANEL_OFF, 0);
+ bnx2x_release_phy_lock(bp);
+ break;
+
+ case ETHTOOL_ID_INACTIVE:
+ bnx2x_acquire_phy_lock(bp);
+ bnx2x_set_led(&bp->link_params, &bp->link_vars,
+ LED_MODE_OPER,
+ bp->link_vars.line_speed);
+ bnx2x_release_phy_lock(bp);
+ }
+
+ return 0;
+}
+
+static int bnx2x_get_rss_flags(struct bnx2x *bp, struct ethtool_rxnfc *info)
+{
+ switch (info->flow_type) {
+ case TCP_V4_FLOW:
+ case TCP_V6_FLOW:
+ info->data = RXH_IP_SRC | RXH_IP_DST |
+ RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ break;
+ case UDP_V4_FLOW:
+ if (bp->rss_conf_obj.udp_rss_v4)
+ info->data = RXH_IP_SRC | RXH_IP_DST |
+ RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ else
+ info->data = RXH_IP_SRC | RXH_IP_DST;
+ break;
+ case UDP_V6_FLOW:
+ if (bp->rss_conf_obj.udp_rss_v6)
+ info->data = RXH_IP_SRC | RXH_IP_DST |
+ RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ else
+ info->data = RXH_IP_SRC | RXH_IP_DST;
+ break;
+ case IPV4_FLOW:
+ case IPV6_FLOW:
+ info->data = RXH_IP_SRC | RXH_IP_DST;
+ break;
+ default:
+ info->data = 0;
+ break;
+ }
+
+ return 0;
+}
+
+static int bnx2x_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
+ u32 *rules __always_unused)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+
+ switch (info->cmd) {
+ case ETHTOOL_GRXRINGS:
+ info->data = BNX2X_NUM_ETH_QUEUES(bp);
+ return 0;
+ case ETHTOOL_GRXFH:
+ return bnx2x_get_rss_flags(bp, info);
+ default:
+ DP(BNX2X_MSG_ETHTOOL, "Command parameters not supported\n");
+ return -EOPNOTSUPP;
+ }
+}
+
+static int bnx2x_set_rss_flags(struct bnx2x *bp, struct ethtool_rxnfc *info)
+{
+ int udp_rss_requested;
+
+ DP(BNX2X_MSG_ETHTOOL,
+ "Set rss flags command parameters: flow type = %d, data = %llu\n",
+ info->flow_type, info->data);
+
+ switch (info->flow_type) {
+ case TCP_V4_FLOW:
+ case TCP_V6_FLOW:
+ /* For TCP only 4-tupple hash is supported */
+ if (info->data ^ (RXH_IP_SRC | RXH_IP_DST |
+ RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
+ DP(BNX2X_MSG_ETHTOOL,
+ "Command parameters not supported\n");
+ return -EINVAL;
+ }
+ return 0;
+
+ case UDP_V4_FLOW:
+ case UDP_V6_FLOW:
+ /* For UDP either 2-tupple hash or 4-tupple hash is supported */
+ if (info->data == (RXH_IP_SRC | RXH_IP_DST |
+ RXH_L4_B_0_1 | RXH_L4_B_2_3))
+ udp_rss_requested = 1;
+ else if (info->data == (RXH_IP_SRC | RXH_IP_DST))
+ udp_rss_requested = 0;
+ else
+ return -EINVAL;
+
+ if (CHIP_IS_E1x(bp) && udp_rss_requested) {
+ DP(BNX2X_MSG_ETHTOOL,
+ "57710, 57711 boards don't support RSS according to UDP 4-tuple\n");
+ return -EINVAL;
+ }
+
+ if ((info->flow_type == UDP_V4_FLOW) &&
+ (bp->rss_conf_obj.udp_rss_v4 != udp_rss_requested)) {
+ bp->rss_conf_obj.udp_rss_v4 = udp_rss_requested;
+ DP(BNX2X_MSG_ETHTOOL,
+ "rss re-configured, UDP 4-tupple %s\n",
+ udp_rss_requested ? "enabled" : "disabled");
+ if (bp->state == BNX2X_STATE_OPEN)
+ return bnx2x_rss(bp, &bp->rss_conf_obj, false,
+ true);
+ } else if ((info->flow_type == UDP_V6_FLOW) &&
+ (bp->rss_conf_obj.udp_rss_v6 != udp_rss_requested)) {
+ bp->rss_conf_obj.udp_rss_v6 = udp_rss_requested;
+ DP(BNX2X_MSG_ETHTOOL,
+ "rss re-configured, UDP 4-tupple %s\n",
+ udp_rss_requested ? "enabled" : "disabled");
+ if (bp->state == BNX2X_STATE_OPEN)
+ return bnx2x_rss(bp, &bp->rss_conf_obj, false,
+ true);
+ }
+ return 0;
+
+ case IPV4_FLOW:
+ case IPV6_FLOW:
+ /* For IP only 2-tupple hash is supported */
+ if (info->data ^ (RXH_IP_SRC | RXH_IP_DST)) {
+ DP(BNX2X_MSG_ETHTOOL,
+ "Command parameters not supported\n");
+ return -EINVAL;
+ }
+ return 0;
+
+ case SCTP_V4_FLOW:
+ case AH_ESP_V4_FLOW:
+ case AH_V4_FLOW:
+ case ESP_V4_FLOW:
+ case SCTP_V6_FLOW:
+ case AH_ESP_V6_FLOW:
+ case AH_V6_FLOW:
+ case ESP_V6_FLOW:
+ case IP_USER_FLOW:
+ case ETHER_FLOW:
+ /* RSS is not supported for these protocols */
+ if (info->data) {
+ DP(BNX2X_MSG_ETHTOOL,
+ "Command parameters not supported\n");
+ return -EINVAL;
+ }
+ return 0;
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static int bnx2x_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+
+ switch (info->cmd) {
+ case ETHTOOL_SRXFH:
+ return bnx2x_set_rss_flags(bp, info);
+ default:
+ DP(BNX2X_MSG_ETHTOOL, "Command parameters not supported\n");
+ return -EOPNOTSUPP;
+ }
+}
+
+static u32 bnx2x_get_rxfh_indir_size(struct net_device *dev)
+{
+ return T_ETH_INDIRECTION_TABLE_SIZE;
+}
+
+static int bnx2x_get_rxfh(struct net_device *dev, u32 *indir, u8 *key,
+ u8 *hfunc)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+ u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE] = {0};
+ size_t i;
+
+ if (hfunc)
+ *hfunc = ETH_RSS_HASH_TOP;
+ if (!indir)
+ return 0;
+
+ /* Get the current configuration of the RSS indirection table */
+ bnx2x_get_rss_ind_table(&bp->rss_conf_obj, ind_table);
+
+ /*
+ * We can't use a memcpy() as an internal storage of an
+ * indirection table is a u8 array while indir->ring_index
+ * points to an array of u32.
+ *
+ * Indirection table contains the FW Client IDs, so we need to
+ * align the returned table to the Client ID of the leading RSS
+ * queue.
+ */
+ for (i = 0; i < T_ETH_INDIRECTION_TABLE_SIZE; i++)
+ indir[i] = ind_table[i] - bp->fp->cl_id;
+
+ return 0;
+}
+
+static int bnx2x_set_rxfh(struct net_device *dev, const u32 *indir,
+ const u8 *key, const u8 hfunc)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+ size_t i;
+
+ /* We require at least one supported parameter to be changed and no
+ * change in any of the unsupported parameters
+ */
+ if (key ||
+ (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP))
+ return -EOPNOTSUPP;
+
+ if (!indir)
+ return 0;
+
+ for (i = 0; i < T_ETH_INDIRECTION_TABLE_SIZE; i++) {
+ /*
+ * The same as in bnx2x_get_rxfh: we can't use a memcpy()
+ * as an internal storage of an indirection table is a u8 array
+ * while indir->ring_index points to an array of u32.
+ *
+ * Indirection table contains the FW Client IDs, so we need to
+ * align the received table to the Client ID of the leading RSS
+ * queue
+ */
+ bp->rss_conf_obj.ind_table[i] = indir[i] + bp->fp->cl_id;
+ }
+
+ if (bp->state == BNX2X_STATE_OPEN)
+ return bnx2x_config_rss_eth(bp, false);
+
+ return 0;
+}
+
+/**
+ * bnx2x_get_channels - gets the number of RSS queues.
+ *
+ * @dev: net device
+ * @channels: returns the number of max / current queues
+ */
+static void bnx2x_get_channels(struct net_device *dev,
+ struct ethtool_channels *channels)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+
+ channels->max_combined = BNX2X_MAX_RSS_COUNT(bp);
+ channels->combined_count = BNX2X_NUM_ETH_QUEUES(bp);
+}
+
+/**
+ * bnx2x_change_num_queues - change the number of RSS queues.
+ *
+ * @bp: bnx2x private structure
+ * @num_rss: rss count
+ *
+ * Re-configure interrupt mode to get the new number of MSI-X
+ * vectors and re-add NAPI objects.
+ */
+static void bnx2x_change_num_queues(struct bnx2x *bp, int num_rss)
+{
+ bnx2x_disable_msi(bp);
+ bp->num_ethernet_queues = num_rss;
+ bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
+ BNX2X_DEV_INFO("set number of queues to %d\n", bp->num_queues);
+ bnx2x_set_int_mode(bp);
+}
+
+/**
+ * bnx2x_set_channels - sets the number of RSS queues.
+ *
+ * @dev: net device
+ * @channels: includes the number of queues requested
+ */
+static int bnx2x_set_channels(struct net_device *dev,
+ struct ethtool_channels *channels)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+
+ DP(BNX2X_MSG_ETHTOOL,
+ "set-channels command parameters: rx = %d, tx = %d, other = %d, combined = %d\n",
+ channels->rx_count, channels->tx_count, channels->other_count,
+ channels->combined_count);
+
+ if (pci_num_vf(bp->pdev)) {
+ DP(BNX2X_MSG_IOV, "VFs are enabled, can not set channels\n");
+ return -EPERM;
+ }
+
+ /* We don't support separate rx / tx channels.
+ * We don't allow setting 'other' channels.
+ */
+ if (channels->rx_count || channels->tx_count || channels->other_count
+ || (channels->combined_count == 0) ||
+ (channels->combined_count > BNX2X_MAX_RSS_COUNT(bp))) {
+ DP(BNX2X_MSG_ETHTOOL, "command parameters not supported\n");
+ return -EINVAL;
+ }
+
+ /* Check if there was a change in the active parameters */
+ if (channels->combined_count == BNX2X_NUM_ETH_QUEUES(bp)) {
+ DP(BNX2X_MSG_ETHTOOL, "No change in active parameters\n");
+ return 0;
+ }
+
+ /* Set the requested number of queues in bp context.
+ * Note that the actual number of queues created during load may be
+ * less than requested if memory is low.
+ */
+ if (unlikely(!netif_running(dev))) {
+ bnx2x_change_num_queues(bp, channels->combined_count);
+ return 0;
+ }
+ bnx2x_nic_unload(bp, UNLOAD_NORMAL, true);
+ bnx2x_change_num_queues(bp, channels->combined_count);
+ return bnx2x_nic_load(bp, LOAD_NORMAL);
+}
+
+static int bnx2x_get_ts_info(struct net_device *dev,
+ struct ethtool_ts_info *info)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+
+ if (bp->flags & PTP_SUPPORTED) {
+ info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE |
+ SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+
+ if (bp->ptp_clock)
+ info->phc_index = ptp_clock_index(bp->ptp_clock);
+ else
+ info->phc_index = -1;
+
+ info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
+ (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_EVENT);
+
+ info->tx_types = (1 << HWTSTAMP_TX_OFF)|(1 << HWTSTAMP_TX_ON);
+
+ return 0;
+ }
+
+ return ethtool_op_get_ts_info(dev, info);
+}
+
+static const struct ethtool_ops bnx2x_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS,
+ .get_drvinfo = bnx2x_get_drvinfo,
+ .get_regs_len = bnx2x_get_regs_len,
+ .get_regs = bnx2x_get_regs,
+ .get_dump_flag = bnx2x_get_dump_flag,
+ .get_dump_data = bnx2x_get_dump_data,
+ .set_dump = bnx2x_set_dump,
+ .get_wol = bnx2x_get_wol,
+ .set_wol = bnx2x_set_wol,
+ .get_msglevel = bnx2x_get_msglevel,
+ .set_msglevel = bnx2x_set_msglevel,
+ .nway_reset = bnx2x_nway_reset,
+ .get_link = bnx2x_get_link,
+ .get_eeprom_len = bnx2x_get_eeprom_len,
+ .get_eeprom = bnx2x_get_eeprom,
+ .set_eeprom = bnx2x_set_eeprom,
+ .get_coalesce = bnx2x_get_coalesce,
+ .set_coalesce = bnx2x_set_coalesce,
+ .get_ringparam = bnx2x_get_ringparam,
+ .set_ringparam = bnx2x_set_ringparam,
+ .get_pauseparam = bnx2x_get_pauseparam,
+ .set_pauseparam = bnx2x_set_pauseparam,
+ .self_test = bnx2x_self_test,
+ .get_sset_count = bnx2x_get_sset_count,
+ .get_priv_flags = bnx2x_get_private_flags,
+ .get_strings = bnx2x_get_strings,
+ .set_phys_id = bnx2x_set_phys_id,
+ .get_ethtool_stats = bnx2x_get_ethtool_stats,
+ .get_rxnfc = bnx2x_get_rxnfc,
+ .set_rxnfc = bnx2x_set_rxnfc,
+ .get_rxfh_indir_size = bnx2x_get_rxfh_indir_size,
+ .get_rxfh = bnx2x_get_rxfh,
+ .set_rxfh = bnx2x_set_rxfh,
+ .get_channels = bnx2x_get_channels,
+ .set_channels = bnx2x_set_channels,
+ .get_module_info = bnx2x_get_module_info,
+ .get_module_eeprom = bnx2x_get_module_eeprom,
+ .get_eee = bnx2x_get_eee,
+ .set_eee = bnx2x_set_eee,
+ .get_ts_info = bnx2x_get_ts_info,
+ .get_link_ksettings = bnx2x_get_link_ksettings,
+ .set_link_ksettings = bnx2x_set_link_ksettings,
+};
+
+static const struct ethtool_ops bnx2x_vf_ethtool_ops = {
+ .get_drvinfo = bnx2x_get_drvinfo,
+ .get_msglevel = bnx2x_get_msglevel,
+ .set_msglevel = bnx2x_set_msglevel,
+ .get_link = bnx2x_get_link,
+ .get_coalesce = bnx2x_get_coalesce,
+ .get_ringparam = bnx2x_get_ringparam,
+ .set_ringparam = bnx2x_set_ringparam,
+ .get_sset_count = bnx2x_get_sset_count,
+ .get_strings = bnx2x_get_strings,
+ .get_ethtool_stats = bnx2x_get_ethtool_stats,
+ .get_rxnfc = bnx2x_get_rxnfc,
+ .set_rxnfc = bnx2x_set_rxnfc,
+ .get_rxfh_indir_size = bnx2x_get_rxfh_indir_size,
+ .get_rxfh = bnx2x_get_rxfh,
+ .set_rxfh = bnx2x_set_rxfh,
+ .get_channels = bnx2x_get_channels,
+ .set_channels = bnx2x_set_channels,
+ .get_link_ksettings = bnx2x_get_vf_link_ksettings,
+};
+
+void bnx2x_set_ethtool_ops(struct bnx2x *bp, struct net_device *netdev)
+{
+ netdev->ethtool_ops = (IS_PF(bp)) ?
+ &bnx2x_ethtool_ops : &bnx2x_vf_ethtool_ops;
+}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
new file mode 100644
index 0000000000..a84d015da5
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
@@ -0,0 +1,400 @@
+/* bnx2x_fw_defs.h: Qlogic Everest network driver.
+ *
+ * Copyright (c) 2007-2013 Broadcom Corporation
+ * Copyright (c) 2014 QLogic Corporation
+ * All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ */
+
+#ifndef BNX2X_FW_DEFS_H
+#define BNX2X_FW_DEFS_H
+
+#define CSTORM_ASSERT_LIST_INDEX_OFFSET (IRO[152].base)
+#define CSTORM_ASSERT_LIST_OFFSET(assertListEntry) \
+ (IRO[151].base + ((assertListEntry) * IRO[151].m1))
+#define CSTORM_EVENT_RING_DATA_OFFSET(pfId) \
+ (IRO[157].base + (((pfId)>>1) * IRO[157].m1) + (((pfId)&1) * \
+ IRO[157].m2))
+#define CSTORM_EVENT_RING_PROD_OFFSET(pfId) \
+ (IRO[158].base + (((pfId)>>1) * IRO[158].m1) + (((pfId)&1) * \
+ IRO[158].m2))
+#define CSTORM_FINAL_CLEANUP_COMPLETE_OFFSET(funcId) \
+ (IRO[163].base + ((funcId) * IRO[163].m1))
+#define CSTORM_FUNC_EN_OFFSET(funcId) \
+ (IRO[153].base + ((funcId) * IRO[153].m1))
+#define CSTORM_HC_SYNC_LINE_INDEX_E1X_OFFSET(hcIndex, sbId) \
+ (IRO[143].base + ((hcIndex) * IRO[143].m1) + ((sbId) * IRO[143].m2))
+#define CSTORM_HC_SYNC_LINE_INDEX_E2_OFFSET(hcIndex, sbId) \
+ (IRO[142].base + (((hcIndex)>>2) * IRO[142].m1) + (((hcIndex)&3) \
+ * IRO[142].m2) + ((sbId) * IRO[142].m3))
+#define CSTORM_IGU_MODE_OFFSET (IRO[161].base)
+#define CSTORM_ISCSI_CQ_SIZE_OFFSET(pfId) \
+ (IRO[324].base + ((pfId) * IRO[324].m1))
+#define CSTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfId) \
+ (IRO[325].base + ((pfId) * IRO[325].m1))
+#define CSTORM_ISCSI_EQ_CONS_OFFSET(pfId, iscsiEqId) \
+ (IRO[317].base + ((pfId) * IRO[317].m1) + ((iscsiEqId) * IRO[317].m2))
+#define CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(pfId, iscsiEqId) \
+ (IRO[319].base + ((pfId) * IRO[319].m1) + ((iscsiEqId) * IRO[319].m2))
+#define CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(pfId, iscsiEqId) \
+ (IRO[318].base + ((pfId) * IRO[318].m1) + ((iscsiEqId) * IRO[318].m2))
+#define CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_VALID_OFFSET(pfId, iscsiEqId) \
+ (IRO[320].base + ((pfId) * IRO[320].m1) + ((iscsiEqId) * IRO[320].m2))
+#define CSTORM_ISCSI_EQ_PROD_OFFSET(pfId, iscsiEqId) \
+ (IRO[316].base + ((pfId) * IRO[316].m1) + ((iscsiEqId) * IRO[316].m2))
+#define CSTORM_ISCSI_EQ_SB_INDEX_OFFSET(pfId, iscsiEqId) \
+ (IRO[322].base + ((pfId) * IRO[322].m1) + ((iscsiEqId) * IRO[322].m2))
+#define CSTORM_ISCSI_EQ_SB_NUM_OFFSET(pfId, iscsiEqId) \
+ (IRO[321].base + ((pfId) * IRO[321].m1) + ((iscsiEqId) * IRO[321].m2))
+#define CSTORM_ISCSI_HQ_SIZE_OFFSET(pfId) \
+ (IRO[323].base + ((pfId) * IRO[323].m1))
+#define CSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \
+ (IRO[315].base + ((pfId) * IRO[315].m1))
+#define CSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \
+ (IRO[314].base + ((pfId) * IRO[314].m1))
+#define CSTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \
+ (IRO[313].base + ((pfId) * IRO[313].m1))
+#define CSTORM_RECORD_SLOW_PATH_OFFSET(funcId) \
+ (IRO[155].base + ((funcId) * IRO[155].m1))
+#define CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(pfId) \
+ (IRO[146].base + ((pfId) * IRO[146].m1))
+#define CSTORM_SP_STATUS_BLOCK_DATA_STATE_OFFSET(pfId) \
+ (IRO[147].base + ((pfId) * IRO[147].m1))
+#define CSTORM_SP_STATUS_BLOCK_OFFSET(pfId) \
+ (IRO[145].base + ((pfId) * IRO[145].m1))
+#define CSTORM_SP_STATUS_BLOCK_SIZE (IRO[145].size)
+#define CSTORM_SP_SYNC_BLOCK_OFFSET(pfId) \
+ (IRO[148].base + ((pfId) * IRO[148].m1))
+#define CSTORM_SP_SYNC_BLOCK_SIZE (IRO[148].size)
+#define CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(sbId, hcIndex) \
+ (IRO[140].base + ((sbId) * IRO[140].m1) + ((hcIndex) * IRO[140].m2))
+#define CSTORM_STATUS_BLOCK_DATA_OFFSET(sbId) \
+ (IRO[137].base + ((sbId) * IRO[137].m1))
+#define CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET(sbId) \
+ (IRO[138].base + ((sbId) * IRO[138].m1))
+#define CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(sbId, hcIndex) \
+ (IRO[139].base + ((sbId) * IRO[139].m1) + ((hcIndex) * IRO[139].m2))
+#define CSTORM_STATUS_BLOCK_OFFSET(sbId) \
+ (IRO[136].base + ((sbId) * IRO[136].m1))
+#define CSTORM_STATUS_BLOCK_SIZE (IRO[136].size)
+#define CSTORM_SYNC_BLOCK_OFFSET(sbId) \
+ (IRO[141].base + ((sbId) * IRO[141].m1))
+#define CSTORM_SYNC_BLOCK_SIZE (IRO[141].size)
+#define CSTORM_VF_PF_CHANNEL_STATE_OFFSET(vfId) \
+ (IRO[159].base + ((vfId) * IRO[159].m1))
+#define CSTORM_VF_PF_CHANNEL_VALID_OFFSET(vfId) \
+ (IRO[160].base + ((vfId) * IRO[160].m1))
+#define CSTORM_VF_TO_PF_OFFSET(funcId) \
+ (IRO[154].base + ((funcId) * IRO[154].m1))
+#define TSTORM_APPROXIMATE_MATCH_MULTICAST_FILTERING_OFFSET(pfId) \
+ (IRO[207].base + ((pfId) * IRO[207].m1))
+#define TSTORM_ASSERT_LIST_INDEX_OFFSET (IRO[102].base)
+#define TSTORM_ASSERT_LIST_OFFSET(assertListEntry) \
+ (IRO[101].base + ((assertListEntry) * IRO[101].m1))
+#define TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(pfId) \
+ (IRO[205].base + ((pfId) * IRO[205].m1))
+#define TSTORM_FUNC_EN_OFFSET(funcId) \
+ (IRO[107].base + ((funcId) * IRO[107].m1))
+#define TSTORM_ISCSI_ERROR_BITMAP_OFFSET(pfId) \
+ (IRO[279].base + ((pfId) * IRO[279].m1))
+#define TSTORM_ISCSI_L2_ISCSI_OOO_CID_TABLE_OFFSET(pfId) \
+ (IRO[280].base + ((pfId) * IRO[280].m1))
+#define TSTORM_ISCSI_L2_ISCSI_OOO_CLIENT_ID_TABLE_OFFSET(pfId) \
+ (IRO[281].base + ((pfId) * IRO[281].m1))
+#define TSTORM_ISCSI_L2_ISCSI_OOO_PROD_OFFSET(pfId) \
+ (IRO[282].base + ((pfId) * IRO[282].m1))
+#define TSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \
+ (IRO[278].base + ((pfId) * IRO[278].m1))
+#define TSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \
+ (IRO[277].base + ((pfId) * IRO[277].m1))
+#define TSTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \
+ (IRO[276].base + ((pfId) * IRO[276].m1))
+#define TSTORM_ISCSI_RQ_SIZE_OFFSET(pfId) \
+ (IRO[275].base + ((pfId) * IRO[275].m1))
+#define TSTORM_ISCSI_TCP_LOCAL_ADV_WND_OFFSET(pfId) \
+ (IRO[285].base + ((pfId) * IRO[285].m1))
+#define TSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(pfId) \
+ (IRO[271].base + ((pfId) * IRO[271].m1))
+#define TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(pfId) \
+ (IRO[272].base + ((pfId) * IRO[272].m1))
+#define TSTORM_ISCSI_TCP_VARS_MID_LOCAL_MAC_ADDR_OFFSET(pfId) \
+ (IRO[273].base + ((pfId) * IRO[273].m1))
+#define TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfId) \
+ (IRO[274].base + ((pfId) * IRO[274].m1))
+#define TSTORM_MAC_FILTER_CONFIG_OFFSET(pfId) \
+ (IRO[206].base + ((pfId) * IRO[206].m1))
+#define TSTORM_RECORD_SLOW_PATH_OFFSET(funcId) \
+ (IRO[109].base + ((funcId) * IRO[109].m1))
+#define TSTORM_TCP_MAX_CWND_OFFSET(pfId) \
+ (IRO[224].base + ((pfId) * IRO[224].m1))
+#define TSTORM_VF_TO_PF_OFFSET(funcId) \
+ (IRO[108].base + ((funcId) * IRO[108].m1))
+#define USTORM_AGG_DATA_OFFSET (IRO[213].base)
+#define USTORM_AGG_DATA_SIZE (IRO[213].size)
+#define USTORM_ASSERT_LIST_INDEX_OFFSET (IRO[181].base)
+#define USTORM_ASSERT_LIST_OFFSET(assertListEntry) \
+ (IRO[180].base + ((assertListEntry) * IRO[180].m1))
+#define USTORM_ETH_PAUSE_ENABLED_OFFSET(portId) \
+ (IRO[187].base + ((portId) * IRO[187].m1))
+#define USTORM_FCOE_EQ_PROD_OFFSET(pfId) \
+ (IRO[326].base + ((pfId) * IRO[326].m1))
+#define USTORM_FUNC_EN_OFFSET(funcId) \
+ (IRO[182].base + ((funcId) * IRO[182].m1))
+#define USTORM_ISCSI_CQ_SIZE_OFFSET(pfId) \
+ (IRO[290].base + ((pfId) * IRO[290].m1))
+#define USTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfId) \
+ (IRO[291].base + ((pfId) * IRO[291].m1))
+#define USTORM_ISCSI_ERROR_BITMAP_OFFSET(pfId) \
+ (IRO[295].base + ((pfId) * IRO[295].m1))
+#define USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(pfId) \
+ (IRO[292].base + ((pfId) * IRO[292].m1))
+#define USTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \
+ (IRO[288].base + ((pfId) * IRO[288].m1))
+#define USTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \
+ (IRO[287].base + ((pfId) * IRO[287].m1))
+#define USTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \
+ (IRO[286].base + ((pfId) * IRO[286].m1))
+#define USTORM_ISCSI_R2TQ_SIZE_OFFSET(pfId) \
+ (IRO[289].base + ((pfId) * IRO[289].m1))
+#define USTORM_ISCSI_RQ_BUFFER_SIZE_OFFSET(pfId) \
+ (IRO[293].base + ((pfId) * IRO[293].m1))
+#define USTORM_ISCSI_RQ_SIZE_OFFSET(pfId) \
+ (IRO[294].base + ((pfId) * IRO[294].m1))
+#define USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(pfId) \
+ (IRO[186].base + ((pfId) * IRO[186].m1))
+#define USTORM_RECORD_SLOW_PATH_OFFSET(funcId) \
+ (IRO[184].base + ((funcId) * IRO[184].m1))
+#define USTORM_RX_PRODS_E1X_OFFSET(portId, clientId) \
+ (IRO[216].base + ((portId) * IRO[216].m1) + ((clientId) * \
+ IRO[216].m2))
+#define USTORM_RX_PRODS_E2_OFFSET(qzoneId) \
+ (IRO[217].base + ((qzoneId) * IRO[217].m1))
+#define USTORM_TPA_BTR_OFFSET (IRO[214].base)
+#define USTORM_TPA_BTR_SIZE (IRO[214].size)
+#define USTORM_VF_TO_PF_OFFSET(funcId) \
+ (IRO[183].base + ((funcId) * IRO[183].m1))
+#define XSTORM_AGG_INT_FINAL_CLEANUP_COMP_TYPE (IRO[67].base)
+#define XSTORM_AGG_INT_FINAL_CLEANUP_INDEX (IRO[66].base)
+#define XSTORM_ASSERT_LIST_INDEX_OFFSET (IRO[51].base)
+#define XSTORM_ASSERT_LIST_OFFSET(assertListEntry) \
+ (IRO[50].base + ((assertListEntry) * IRO[50].m1))
+#define XSTORM_CMNG_PER_PORT_VARS_OFFSET(portId) \
+ (IRO[43].base + ((portId) * IRO[43].m1))
+#define XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(pfId) \
+ (IRO[45].base + ((pfId) * IRO[45].m1))
+#define XSTORM_FUNC_EN_OFFSET(funcId) \
+ (IRO[47].base + ((funcId) * IRO[47].m1))
+#define XSTORM_ISCSI_HQ_SIZE_OFFSET(pfId) \
+ (IRO[303].base + ((pfId) * IRO[303].m1))
+#define XSTORM_ISCSI_LOCAL_MAC_ADDR0_OFFSET(pfId) \
+ (IRO[306].base + ((pfId) * IRO[306].m1))
+#define XSTORM_ISCSI_LOCAL_MAC_ADDR1_OFFSET(pfId) \
+ (IRO[307].base + ((pfId) * IRO[307].m1))
+#define XSTORM_ISCSI_LOCAL_MAC_ADDR2_OFFSET(pfId) \
+ (IRO[308].base + ((pfId) * IRO[308].m1))
+#define XSTORM_ISCSI_LOCAL_MAC_ADDR3_OFFSET(pfId) \
+ (IRO[309].base + ((pfId) * IRO[309].m1))
+#define XSTORM_ISCSI_LOCAL_MAC_ADDR4_OFFSET(pfId) \
+ (IRO[310].base + ((pfId) * IRO[310].m1))
+#define XSTORM_ISCSI_LOCAL_MAC_ADDR5_OFFSET(pfId) \
+ (IRO[311].base + ((pfId) * IRO[311].m1))
+#define XSTORM_ISCSI_LOCAL_VLAN_OFFSET(pfId) \
+ (IRO[312].base + ((pfId) * IRO[312].m1))
+#define XSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \
+ (IRO[302].base + ((pfId) * IRO[302].m1))
+#define XSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \
+ (IRO[301].base + ((pfId) * IRO[301].m1))
+#define XSTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \
+ (IRO[300].base + ((pfId) * IRO[300].m1))
+#define XSTORM_ISCSI_R2TQ_SIZE_OFFSET(pfId) \
+ (IRO[305].base + ((pfId) * IRO[305].m1))
+#define XSTORM_ISCSI_SQ_SIZE_OFFSET(pfId) \
+ (IRO[304].base + ((pfId) * IRO[304].m1))
+#define XSTORM_ISCSI_TCP_VARS_ADV_WND_SCL_OFFSET(pfId) \
+ (IRO[299].base + ((pfId) * IRO[299].m1))
+#define XSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(pfId) \
+ (IRO[298].base + ((pfId) * IRO[298].m1))
+#define XSTORM_ISCSI_TCP_VARS_TOS_OFFSET(pfId) \
+ (IRO[297].base + ((pfId) * IRO[297].m1))
+#define XSTORM_ISCSI_TCP_VARS_TTL_OFFSET(pfId) \
+ (IRO[296].base + ((pfId) * IRO[296].m1))
+#define XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(pfId) \
+ (IRO[44].base + ((pfId) * IRO[44].m1))
+#define XSTORM_RECORD_SLOW_PATH_OFFSET(funcId) \
+ (IRO[49].base + ((funcId) * IRO[49].m1))
+#define XSTORM_SPQ_DATA_OFFSET(funcId) \
+ (IRO[32].base + ((funcId) * IRO[32].m1))
+#define XSTORM_SPQ_DATA_SIZE (IRO[32].size)
+#define XSTORM_SPQ_PAGE_BASE_OFFSET(funcId) \
+ (IRO[30].base + ((funcId) * IRO[30].m1))
+#define XSTORM_SPQ_PROD_OFFSET(funcId) \
+ (IRO[31].base + ((funcId) * IRO[31].m1))
+#define XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_ENABLED_OFFSET(portId) \
+ (IRO[218].base + ((portId) * IRO[218].m1))
+#define XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_MAX_COUNT_OFFSET(portId) \
+ (IRO[219].base + ((portId) * IRO[219].m1))
+#define XSTORM_TCP_TX_SWS_TIMER_VAL_OFFSET(pfId) \
+ (IRO[221].base + (((pfId)>>1) * IRO[221].m1) + (((pfId)&1) * \
+ IRO[221].m2))
+#define XSTORM_VF_TO_PF_OFFSET(funcId) \
+ (IRO[48].base + ((funcId) * IRO[48].m1))
+#define XSTORM_ETH_FUNCTION_INFO_FP_HSI_VALID_E2_OFFSET(fid) \
+ (IRO[386].base + ((fid) * IRO[386].m1))
+#define COMMON_ASM_INVALID_ASSERT_OPCODE 0x0
+
+/* eth hsi version */
+#define ETH_FP_HSI_VERSION (ETH_FP_HSI_VER_2)
+
+/* Ethernet Ring parameters */
+#define X_ETH_LOCAL_RING_SIZE 13
+#define FIRST_BD_IN_PKT 0
+#define PARSE_BD_INDEX 1
+#define NUM_OF_ETH_BDS_IN_PAGE ((PAGE_SIZE)/(STRUCT_SIZE(eth_tx_bd)/8))
+#define U_ETH_NUM_OF_SGES_TO_FETCH 8
+#define U_ETH_MAX_SGES_FOR_PACKET 3
+
+/* Rx ring params */
+#define U_ETH_LOCAL_BD_RING_SIZE 8
+#define U_ETH_LOCAL_SGE_RING_SIZE 10
+#define U_ETH_SGL_SIZE 8
+ /* The fw will padd the buffer with this value, so the IP header \
+ will be align to 4 Byte */
+#define IP_HEADER_ALIGNMENT_PADDING 2
+
+#define U_ETH_SGES_PER_PAGE_INVERSE_MASK \
+ (0xFFFF - ((PAGE_SIZE/((STRUCT_SIZE(eth_rx_sge))/8))-1))
+
+#define TU_ETH_CQES_PER_PAGE (PAGE_SIZE/(STRUCT_SIZE(eth_rx_cqe)/8))
+#define U_ETH_BDS_PER_PAGE (PAGE_SIZE/(STRUCT_SIZE(eth_rx_bd)/8))
+#define U_ETH_SGES_PER_PAGE (PAGE_SIZE/(STRUCT_SIZE(eth_rx_sge)/8))
+
+#define U_ETH_BDS_PER_PAGE_MASK (U_ETH_BDS_PER_PAGE-1)
+#define U_ETH_CQE_PER_PAGE_MASK (TU_ETH_CQES_PER_PAGE-1)
+#define U_ETH_SGES_PER_PAGE_MASK (U_ETH_SGES_PER_PAGE-1)
+
+#define U_ETH_UNDEFINED_Q 0xFF
+
+#define T_ETH_INDIRECTION_TABLE_SIZE 128
+#define T_ETH_RSS_KEY 10
+#define ETH_NUM_OF_RSS_ENGINES_E2 72
+
+#define FILTER_RULES_COUNT 16
+#define MULTICAST_RULES_COUNT 16
+#define CLASSIFY_RULES_COUNT 16
+
+/*The CRC32 seed, that is used for the hash(reduction) multicast address */
+#define ETH_CRC32_HASH_SEED 0x00000000
+
+#define ETH_CRC32_HASH_BIT_SIZE (8)
+#define ETH_CRC32_HASH_MASK EVAL((1<<ETH_CRC32_HASH_BIT_SIZE)-1)
+
+/* Maximal L2 clients supported */
+#define ETH_MAX_RX_CLIENTS_E1 18
+#define ETH_MAX_RX_CLIENTS_E1H 28
+#define ETH_MAX_RX_CLIENTS_E2 152
+
+/* Maximal statistics client Ids */
+#define MAX_STAT_COUNTER_ID_E1 36
+#define MAX_STAT_COUNTER_ID_E1H 56
+#define MAX_STAT_COUNTER_ID_E2 140
+
+#define MAX_MAC_CREDIT_E1 192 /* Per Chip */
+#define MAX_MAC_CREDIT_E1H 256 /* Per Chip */
+#define MAX_MAC_CREDIT_E2 272 /* Per Path */
+#define MAX_VLAN_CREDIT_E1 0 /* Per Chip */
+#define MAX_VLAN_CREDIT_E1H 0 /* Per Chip */
+#define MAX_VLAN_CREDIT_E2 272 /* Per Path */
+
+/* Maximal aggregation queues supported */
+#define ETH_MAX_AGGREGATION_QUEUES_E1 32
+#define ETH_MAX_AGGREGATION_QUEUES_E1H_E2 64
+
+#define ETH_NUM_OF_MCAST_BINS 256
+#define ETH_NUM_OF_MCAST_ENGINES_E2 72
+
+#define ETH_MIN_RX_CQES_WITHOUT_TPA (MAX_RAMRODS_PER_PORT + 3)
+#define ETH_MIN_RX_CQES_WITH_TPA_E1 \
+ (ETH_MAX_AGGREGATION_QUEUES_E1 + ETH_MIN_RX_CQES_WITHOUT_TPA)
+#define ETH_MIN_RX_CQES_WITH_TPA_E1H_E2 \
+ (ETH_MAX_AGGREGATION_QUEUES_E1H_E2 + ETH_MIN_RX_CQES_WITHOUT_TPA)
+
+#define DISABLE_STATISTIC_COUNTER_ID_VALUE 0
+
+
+/* This file defines HSI constants common to all microcode flows */
+
+#define PROTOCOL_STATE_BIT_OFFSET 6
+
+#define ETH_STATE (ETH_CONNECTION_TYPE << PROTOCOL_STATE_BIT_OFFSET)
+#define TOE_STATE (TOE_CONNECTION_TYPE << PROTOCOL_STATE_BIT_OFFSET)
+#define RDMA_STATE (RDMA_CONNECTION_TYPE << PROTOCOL_STATE_BIT_OFFSET)
+
+/* microcode fixed page page size 4K (chains and ring segments) */
+#define MC_PAGE_SIZE 4096
+
+/* Number of indices per slow-path SB */
+#define HC_SP_SB_MAX_INDICES 16
+
+/* Number of indices per SB */
+#define HC_SB_MAX_INDICES_E1X 8
+#define HC_SB_MAX_INDICES_E2 8
+
+#define HC_SB_MAX_SB_E1X 32
+#define HC_SB_MAX_SB_E2 136
+
+#define HC_SP_SB_ID 0xde
+
+#define HC_SB_MAX_SM 2
+
+#define HC_SB_MAX_DYNAMIC_INDICES 4
+
+/* max number of slow path commands per port */
+#define MAX_RAMRODS_PER_PORT 8
+
+/**** DEFINES FOR TIMERS/CLOCKS RESOLUTIONS ****/
+
+#define TIMERS_TICK_SIZE_CHIP (1e-3)
+
+#define TSEMI_CLK1_RESUL_CHIP (1e-3)
+
+#define XSEMI_CLK1_RESUL_CHIP (1e-3)
+
+#define SDM_TIMER_TICK_RESUL_CHIP (4 * (1e-6))
+#define TSDM_TIMER_TICK_RESUL_CHIP (1 * (1e-6))
+
+/**** END DEFINES FOR TIMERS/CLOCKS RESOLUTIONS ****/
+
+#define XSTORM_IP_ID_ROLL_HALF 0x8000
+#define XSTORM_IP_ID_ROLL_ALL 0
+
+#define FW_LOG_LIST_SIZE 50
+
+#define NUM_OF_SAFC_BITS 16
+#define MAX_COS_NUMBER 4
+#define MAX_TRAFFIC_TYPES 8
+#define MAX_PFC_PRIORITIES 8
+#define MAX_VLAN_PRIORITIES 8
+ /* used by array traffic_type_to_priority[] to mark traffic type \
+ that is not mapped to priority*/
+#define LLFC_TRAFFIC_TYPE_TO_PRIORITY_UNMAPPED 0xFF
+
+#define C_ERES_PER_PAGE \
+ (PAGE_SIZE / BITS_TO_BYTES(STRUCT_SIZE(event_ring_elem)))
+#define C_ERE_PER_PAGE_MASK (C_ERES_PER_PAGE - 1)
+
+#define STATS_QUERY_CMD_COUNT 16
+
+#define AFEX_LIST_TABLE_SIZE 4096
+
+#define INVALID_VNIC_ID 0xFF
+
+#define UNDEF_IRO 0x80000000
+
+/* used for defining the amount of FCoE tasks supported for PF */
+#define MAX_FCOE_FUNCS_PER_ENGINE 2
+#define MAX_NUM_FCOE_TASKS_PER_ENGINE 4096
+
+#endif /* BNX2X_FW_DEFS_H */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_file_hdr.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_file_hdr.h
new file mode 100644
index 0000000000..9e3b5a1e9f
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_file_hdr.h
@@ -0,0 +1,40 @@
+/* bnx2x_fw_file_hdr.h: FW binary file header structure.
+ *
+ * Copyright (c) 2007-2013 Broadcom Corporation
+ * Copyright (c) 2014 QLogic Corporation
+ * All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
+ * Written by: Vladislav Zolotarov
+ * Based on the original idea of John Wright <john.wright@hp.com>.
+ */
+
+#ifndef BNX2X_INIT_FILE_HDR_H
+#define BNX2X_INIT_FILE_HDR_H
+
+struct bnx2x_fw_file_section {
+ __be32 len;
+ __be32 offset;
+};
+
+struct bnx2x_fw_file_hdr {
+ struct bnx2x_fw_file_section init_ops;
+ struct bnx2x_fw_file_section init_ops_offsets;
+ struct bnx2x_fw_file_section init_data;
+ struct bnx2x_fw_file_section tsem_int_table_data;
+ struct bnx2x_fw_file_section tsem_pram_data;
+ struct bnx2x_fw_file_section usem_int_table_data;
+ struct bnx2x_fw_file_section usem_pram_data;
+ struct bnx2x_fw_file_section csem_int_table_data;
+ struct bnx2x_fw_file_section csem_pram_data;
+ struct bnx2x_fw_file_section xsem_int_table_data;
+ struct bnx2x_fw_file_section xsem_pram_data;
+ struct bnx2x_fw_file_section iro_arr;
+ struct bnx2x_fw_file_section fw_version;
+};
+
+#endif /* BNX2X_INIT_FILE_HDR_H */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
new file mode 100644
index 0000000000..611efee758
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
@@ -0,0 +1,6048 @@
+/* bnx2x_hsi.h: Qlogic Everest network driver.
+ *
+ * Copyright (c) 2007-2013 Broadcom Corporation
+ * Copyright (c) 2014 QLogic Corporation
+ * All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ */
+#ifndef BNX2X_HSI_H
+#define BNX2X_HSI_H
+
+#include "bnx2x_fw_defs.h"
+#include "bnx2x_mfw_req.h"
+
+#define FW_ENCODE_32BIT_PATTERN 0x1e1e1e1e
+
+struct license_key {
+ u32 reserved[6];
+
+ u32 max_iscsi_conn;
+#define BNX2X_MAX_ISCSI_TRGT_CONN_MASK 0xFFFF
+#define BNX2X_MAX_ISCSI_TRGT_CONN_SHIFT 0
+#define BNX2X_MAX_ISCSI_INIT_CONN_MASK 0xFFFF0000
+#define BNX2X_MAX_ISCSI_INIT_CONN_SHIFT 16
+
+ u32 reserved_a;
+
+ u32 max_fcoe_conn;
+#define BNX2X_MAX_FCOE_TRGT_CONN_MASK 0xFFFF
+#define BNX2X_MAX_FCOE_TRGT_CONN_SHIFT 0
+#define BNX2X_MAX_FCOE_INIT_CONN_MASK 0xFFFF0000
+#define BNX2X_MAX_FCOE_INIT_CONN_SHIFT 16
+
+ u32 reserved_b[4];
+};
+
+/****************************************************************************
+ * Shared HW configuration *
+ ****************************************************************************/
+#define PIN_CFG_NA 0x00000000
+#define PIN_CFG_GPIO0_P0 0x00000001
+#define PIN_CFG_GPIO1_P0 0x00000002
+#define PIN_CFG_GPIO2_P0 0x00000003
+#define PIN_CFG_GPIO3_P0 0x00000004
+#define PIN_CFG_GPIO0_P1 0x00000005
+#define PIN_CFG_GPIO1_P1 0x00000006
+#define PIN_CFG_GPIO2_P1 0x00000007
+#define PIN_CFG_GPIO3_P1 0x00000008
+#define PIN_CFG_EPIO0 0x00000009
+#define PIN_CFG_EPIO1 0x0000000a
+#define PIN_CFG_EPIO2 0x0000000b
+#define PIN_CFG_EPIO3 0x0000000c
+#define PIN_CFG_EPIO4 0x0000000d
+#define PIN_CFG_EPIO5 0x0000000e
+#define PIN_CFG_EPIO6 0x0000000f
+#define PIN_CFG_EPIO7 0x00000010
+#define PIN_CFG_EPIO8 0x00000011
+#define PIN_CFG_EPIO9 0x00000012
+#define PIN_CFG_EPIO10 0x00000013
+#define PIN_CFG_EPIO11 0x00000014
+#define PIN_CFG_EPIO12 0x00000015
+#define PIN_CFG_EPIO13 0x00000016
+#define PIN_CFG_EPIO14 0x00000017
+#define PIN_CFG_EPIO15 0x00000018
+#define PIN_CFG_EPIO16 0x00000019
+#define PIN_CFG_EPIO17 0x0000001a
+#define PIN_CFG_EPIO18 0x0000001b
+#define PIN_CFG_EPIO19 0x0000001c
+#define PIN_CFG_EPIO20 0x0000001d
+#define PIN_CFG_EPIO21 0x0000001e
+#define PIN_CFG_EPIO22 0x0000001f
+#define PIN_CFG_EPIO23 0x00000020
+#define PIN_CFG_EPIO24 0x00000021
+#define PIN_CFG_EPIO25 0x00000022
+#define PIN_CFG_EPIO26 0x00000023
+#define PIN_CFG_EPIO27 0x00000024
+#define PIN_CFG_EPIO28 0x00000025
+#define PIN_CFG_EPIO29 0x00000026
+#define PIN_CFG_EPIO30 0x00000027
+#define PIN_CFG_EPIO31 0x00000028
+
+/* EPIO definition */
+#define EPIO_CFG_NA 0x00000000
+#define EPIO_CFG_EPIO0 0x00000001
+#define EPIO_CFG_EPIO1 0x00000002
+#define EPIO_CFG_EPIO2 0x00000003
+#define EPIO_CFG_EPIO3 0x00000004
+#define EPIO_CFG_EPIO4 0x00000005
+#define EPIO_CFG_EPIO5 0x00000006
+#define EPIO_CFG_EPIO6 0x00000007
+#define EPIO_CFG_EPIO7 0x00000008
+#define EPIO_CFG_EPIO8 0x00000009
+#define EPIO_CFG_EPIO9 0x0000000a
+#define EPIO_CFG_EPIO10 0x0000000b
+#define EPIO_CFG_EPIO11 0x0000000c
+#define EPIO_CFG_EPIO12 0x0000000d
+#define EPIO_CFG_EPIO13 0x0000000e
+#define EPIO_CFG_EPIO14 0x0000000f
+#define EPIO_CFG_EPIO15 0x00000010
+#define EPIO_CFG_EPIO16 0x00000011
+#define EPIO_CFG_EPIO17 0x00000012
+#define EPIO_CFG_EPIO18 0x00000013
+#define EPIO_CFG_EPIO19 0x00000014
+#define EPIO_CFG_EPIO20 0x00000015
+#define EPIO_CFG_EPIO21 0x00000016
+#define EPIO_CFG_EPIO22 0x00000017
+#define EPIO_CFG_EPIO23 0x00000018
+#define EPIO_CFG_EPIO24 0x00000019
+#define EPIO_CFG_EPIO25 0x0000001a
+#define EPIO_CFG_EPIO26 0x0000001b
+#define EPIO_CFG_EPIO27 0x0000001c
+#define EPIO_CFG_EPIO28 0x0000001d
+#define EPIO_CFG_EPIO29 0x0000001e
+#define EPIO_CFG_EPIO30 0x0000001f
+#define EPIO_CFG_EPIO31 0x00000020
+
+struct mac_addr {
+ u32 upper;
+ u32 lower;
+};
+
+struct shared_hw_cfg { /* NVRAM Offset */
+ /* Up to 16 bytes of NULL-terminated string */
+ u8 part_num[16]; /* 0x104 */
+
+ u32 config; /* 0x114 */
+ #define SHARED_HW_CFG_MDIO_VOLTAGE_MASK 0x00000001
+ #define SHARED_HW_CFG_MDIO_VOLTAGE_SHIFT 0
+ #define SHARED_HW_CFG_MDIO_VOLTAGE_1_2V 0x00000000
+ #define SHARED_HW_CFG_MDIO_VOLTAGE_2_5V 0x00000001
+ #define SHARED_HW_CFG_MCP_RST_ON_CORE_RST_EN 0x00000002
+
+ #define SHARED_HW_CFG_PORT_SWAP 0x00000004
+
+ #define SHARED_HW_CFG_BEACON_WOL_EN 0x00000008
+
+ #define SHARED_HW_CFG_PCIE_GEN3_DISABLED 0x00000000
+ #define SHARED_HW_CFG_PCIE_GEN3_ENABLED 0x00000010
+
+ #define SHARED_HW_CFG_MFW_SELECT_MASK 0x00000700
+ #define SHARED_HW_CFG_MFW_SELECT_SHIFT 8
+ /* Whatever MFW found in NVM
+ (if multiple found, priority order is: NC-SI, UMP, IPMI) */
+ #define SHARED_HW_CFG_MFW_SELECT_DEFAULT 0x00000000
+ #define SHARED_HW_CFG_MFW_SELECT_NC_SI 0x00000100
+ #define SHARED_HW_CFG_MFW_SELECT_UMP 0x00000200
+ #define SHARED_HW_CFG_MFW_SELECT_IPMI 0x00000300
+ /* Use SPIO4 as an arbiter between: 0-NC_SI, 1-IPMI
+ (can only be used when an add-in board, not BMC, pulls-down SPIO4) */
+ #define SHARED_HW_CFG_MFW_SELECT_SPIO4_NC_SI_IPMI 0x00000400
+ /* Use SPIO4 as an arbiter between: 0-UMP, 1-IPMI
+ (can only be used when an add-in board, not BMC, pulls-down SPIO4) */
+ #define SHARED_HW_CFG_MFW_SELECT_SPIO4_UMP_IPMI 0x00000500
+ /* Use SPIO4 as an arbiter between: 0-NC-SI, 1-UMP
+ (can only be used when an add-in board, not BMC, pulls-down SPIO4) */
+ #define SHARED_HW_CFG_MFW_SELECT_SPIO4_NC_SI_UMP 0x00000600
+
+ #define SHARED_HW_CFG_LED_MODE_MASK 0x000f0000
+ #define SHARED_HW_CFG_LED_MODE_SHIFT 16
+ #define SHARED_HW_CFG_LED_MAC1 0x00000000
+ #define SHARED_HW_CFG_LED_PHY1 0x00010000
+ #define SHARED_HW_CFG_LED_PHY2 0x00020000
+ #define SHARED_HW_CFG_LED_PHY3 0x00030000
+ #define SHARED_HW_CFG_LED_MAC2 0x00040000
+ #define SHARED_HW_CFG_LED_PHY4 0x00050000
+ #define SHARED_HW_CFG_LED_PHY5 0x00060000
+ #define SHARED_HW_CFG_LED_PHY6 0x00070000
+ #define SHARED_HW_CFG_LED_MAC3 0x00080000
+ #define SHARED_HW_CFG_LED_PHY7 0x00090000
+ #define SHARED_HW_CFG_LED_PHY9 0x000a0000
+ #define SHARED_HW_CFG_LED_PHY11 0x000b0000
+ #define SHARED_HW_CFG_LED_MAC4 0x000c0000
+ #define SHARED_HW_CFG_LED_PHY8 0x000d0000
+ #define SHARED_HW_CFG_LED_EXTPHY1 0x000e0000
+ #define SHARED_HW_CFG_LED_EXTPHY2 0x000f0000
+
+
+ #define SHARED_HW_CFG_AN_ENABLE_MASK 0x3f000000
+ #define SHARED_HW_CFG_AN_ENABLE_SHIFT 24
+ #define SHARED_HW_CFG_AN_ENABLE_CL37 0x01000000
+ #define SHARED_HW_CFG_AN_ENABLE_CL73 0x02000000
+ #define SHARED_HW_CFG_AN_ENABLE_BAM 0x04000000
+ #define SHARED_HW_CFG_AN_ENABLE_PARALLEL_DETECTION 0x08000000
+ #define SHARED_HW_CFG_AN_EN_SGMII_FIBER_AUTO_DETECT 0x10000000
+ #define SHARED_HW_CFG_AN_ENABLE_REMOTE_PHY 0x20000000
+
+ #define SHARED_HW_CFG_SRIOV_MASK 0x40000000
+ #define SHARED_HW_CFG_SRIOV_DISABLED 0x00000000
+ #define SHARED_HW_CFG_SRIOV_ENABLED 0x40000000
+
+ #define SHARED_HW_CFG_ATC_MASK 0x80000000
+ #define SHARED_HW_CFG_ATC_DISABLED 0x00000000
+ #define SHARED_HW_CFG_ATC_ENABLED 0x80000000
+
+ u32 config2; /* 0x118 */
+ /* one time auto detect grace period (in sec) */
+ #define SHARED_HW_CFG_GRACE_PERIOD_MASK 0x000000ff
+ #define SHARED_HW_CFG_GRACE_PERIOD_SHIFT 0
+
+ #define SHARED_HW_CFG_PCIE_GEN2_ENABLED 0x00000100
+ #define SHARED_HW_CFG_PCIE_GEN2_DISABLED 0x00000000
+
+ /* The default value for the core clock is 250MHz and it is
+ achieved by setting the clock change to 4 */
+ #define SHARED_HW_CFG_CLOCK_CHANGE_MASK 0x00000e00
+ #define SHARED_HW_CFG_CLOCK_CHANGE_SHIFT 9
+
+ #define SHARED_HW_CFG_SMBUS_TIMING_MASK 0x00001000
+ #define SHARED_HW_CFG_SMBUS_TIMING_100KHZ 0x00000000
+ #define SHARED_HW_CFG_SMBUS_TIMING_400KHZ 0x00001000
+
+ #define SHARED_HW_CFG_HIDE_PORT1 0x00002000
+
+ #define SHARED_HW_CFG_WOL_CAPABLE_MASK 0x00004000
+ #define SHARED_HW_CFG_WOL_CAPABLE_DISABLED 0x00000000
+ #define SHARED_HW_CFG_WOL_CAPABLE_ENABLED 0x00004000
+
+ /* Output low when PERST is asserted */
+ #define SHARED_HW_CFG_SPIO4_FOLLOW_PERST_MASK 0x00008000
+ #define SHARED_HW_CFG_SPIO4_FOLLOW_PERST_DISABLED 0x00000000
+ #define SHARED_HW_CFG_SPIO4_FOLLOW_PERST_ENABLED 0x00008000
+
+ #define SHARED_HW_CFG_PCIE_GEN2_PREEMPHASIS_MASK 0x00070000
+ #define SHARED_HW_CFG_PCIE_GEN2_PREEMPHASIS_SHIFT 16
+ #define SHARED_HW_CFG_PCIE_GEN2_PREEMPHASIS_HW 0x00000000
+ #define SHARED_HW_CFG_PCIE_GEN2_PREEMPHASIS_0DB 0x00010000
+ #define SHARED_HW_CFG_PCIE_GEN2_PREEMPHASIS_3_5DB 0x00020000
+ #define SHARED_HW_CFG_PCIE_GEN2_PREEMPHASIS_6_0DB 0x00030000
+
+ /* The fan failure mechanism is usually related to the PHY type
+ since the power consumption of the board is determined by the PHY.
+ Currently, fan is required for most designs with SFX7101, BCM8727
+ and BCM8481. If a fan is not required for a board which uses one
+ of those PHYs, this field should be set to "Disabled". If a fan is
+ required for a different PHY type, this option should be set to
+ "Enabled". The fan failure indication is expected on SPIO5 */
+ #define SHARED_HW_CFG_FAN_FAILURE_MASK 0x00180000
+ #define SHARED_HW_CFG_FAN_FAILURE_SHIFT 19
+ #define SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE 0x00000000
+ #define SHARED_HW_CFG_FAN_FAILURE_DISABLED 0x00080000
+ #define SHARED_HW_CFG_FAN_FAILURE_ENABLED 0x00100000
+
+ /* ASPM Power Management support */
+ #define SHARED_HW_CFG_ASPM_SUPPORT_MASK 0x00600000
+ #define SHARED_HW_CFG_ASPM_SUPPORT_SHIFT 21
+ #define SHARED_HW_CFG_ASPM_SUPPORT_L0S_L1_ENABLED 0x00000000
+ #define SHARED_HW_CFG_ASPM_SUPPORT_L0S_DISABLED 0x00200000
+ #define SHARED_HW_CFG_ASPM_SUPPORT_L1_DISABLED 0x00400000
+ #define SHARED_HW_CFG_ASPM_SUPPORT_L0S_L1_DISABLED 0x00600000
+
+ /* The value of PM_TL_IGNORE_REQS (bit0) in PCI register
+ tl_control_0 (register 0x2800) */
+ #define SHARED_HW_CFG_PREVENT_L1_ENTRY_MASK 0x00800000
+ #define SHARED_HW_CFG_PREVENT_L1_ENTRY_DISABLED 0x00000000
+ #define SHARED_HW_CFG_PREVENT_L1_ENTRY_ENABLED 0x00800000
+
+ #define SHARED_HW_CFG_PORT_MODE_MASK 0x01000000
+ #define SHARED_HW_CFG_PORT_MODE_2 0x00000000
+ #define SHARED_HW_CFG_PORT_MODE_4 0x01000000
+
+ #define SHARED_HW_CFG_PATH_SWAP_MASK 0x02000000
+ #define SHARED_HW_CFG_PATH_SWAP_DISABLED 0x00000000
+ #define SHARED_HW_CFG_PATH_SWAP_ENABLED 0x02000000
+
+ /* Set the MDC/MDIO access for the first external phy */
+ #define SHARED_HW_CFG_MDC_MDIO_ACCESS1_MASK 0x1C000000
+ #define SHARED_HW_CFG_MDC_MDIO_ACCESS1_SHIFT 26
+ #define SHARED_HW_CFG_MDC_MDIO_ACCESS1_PHY_TYPE 0x00000000
+ #define SHARED_HW_CFG_MDC_MDIO_ACCESS1_EMAC0 0x04000000
+ #define SHARED_HW_CFG_MDC_MDIO_ACCESS1_EMAC1 0x08000000
+ #define SHARED_HW_CFG_MDC_MDIO_ACCESS1_BOTH 0x0c000000
+ #define SHARED_HW_CFG_MDC_MDIO_ACCESS1_SWAPPED 0x10000000
+
+ /* Set the MDC/MDIO access for the second external phy */
+ #define SHARED_HW_CFG_MDC_MDIO_ACCESS2_MASK 0xE0000000
+ #define SHARED_HW_CFG_MDC_MDIO_ACCESS2_SHIFT 29
+ #define SHARED_HW_CFG_MDC_MDIO_ACCESS2_PHY_TYPE 0x00000000
+ #define SHARED_HW_CFG_MDC_MDIO_ACCESS2_EMAC0 0x20000000
+ #define SHARED_HW_CFG_MDC_MDIO_ACCESS2_EMAC1 0x40000000
+ #define SHARED_HW_CFG_MDC_MDIO_ACCESS2_BOTH 0x60000000
+ #define SHARED_HW_CFG_MDC_MDIO_ACCESS2_SWAPPED 0x80000000
+
+ u32 config_3; /* 0x11C */
+ #define SHARED_HW_CFG_EXTENDED_MF_MODE_MASK 0x00000F00
+ #define SHARED_HW_CFG_EXTENDED_MF_MODE_SHIFT 8
+ #define SHARED_HW_CFG_EXTENDED_MF_MODE_NPAR1_DOT_5 0x00000000
+ #define SHARED_HW_CFG_EXTENDED_MF_MODE_NPAR2_DOT_0 0x00000100
+
+ u32 ump_nc_si_config; /* 0x120 */
+ #define SHARED_HW_CFG_UMP_NC_SI_MII_MODE_MASK 0x00000003
+ #define SHARED_HW_CFG_UMP_NC_SI_MII_MODE_SHIFT 0
+ #define SHARED_HW_CFG_UMP_NC_SI_MII_MODE_MAC 0x00000000
+ #define SHARED_HW_CFG_UMP_NC_SI_MII_MODE_PHY 0x00000001
+ #define SHARED_HW_CFG_UMP_NC_SI_MII_MODE_MII 0x00000000
+ #define SHARED_HW_CFG_UMP_NC_SI_MII_MODE_RMII 0x00000002
+
+ #define SHARED_HW_CFG_UMP_NC_SI_NUM_DEVS_MASK 0x00000f00
+ #define SHARED_HW_CFG_UMP_NC_SI_NUM_DEVS_SHIFT 8
+
+ #define SHARED_HW_CFG_UMP_NC_SI_EXT_PHY_TYPE_MASK 0x00ff0000
+ #define SHARED_HW_CFG_UMP_NC_SI_EXT_PHY_TYPE_SHIFT 16
+ #define SHARED_HW_CFG_UMP_NC_SI_EXT_PHY_TYPE_NONE 0x00000000
+ #define SHARED_HW_CFG_UMP_NC_SI_EXT_PHY_TYPE_BCM5221 0x00010000
+
+ u32 board; /* 0x124 */
+ #define SHARED_HW_CFG_E3_I2C_MUX0_MASK 0x0000003F
+ #define SHARED_HW_CFG_E3_I2C_MUX0_SHIFT 0
+ #define SHARED_HW_CFG_E3_I2C_MUX1_MASK 0x00000FC0
+ #define SHARED_HW_CFG_E3_I2C_MUX1_SHIFT 6
+ /* Use the PIN_CFG_XXX defines on top */
+ #define SHARED_HW_CFG_BOARD_REV_MASK 0x00ff0000
+ #define SHARED_HW_CFG_BOARD_REV_SHIFT 16
+
+ #define SHARED_HW_CFG_BOARD_MAJOR_VER_MASK 0x0f000000
+ #define SHARED_HW_CFG_BOARD_MAJOR_VER_SHIFT 24
+
+ #define SHARED_HW_CFG_BOARD_MINOR_VER_MASK 0xf0000000
+ #define SHARED_HW_CFG_BOARD_MINOR_VER_SHIFT 28
+
+ u32 wc_lane_config; /* 0x128 */
+ #define SHARED_HW_CFG_LANE_SWAP_CFG_MASK 0x0000FFFF
+ #define SHARED_HW_CFG_LANE_SWAP_CFG_SHIFT 0
+ #define SHARED_HW_CFG_LANE_SWAP_CFG_32103210 0x00001b1b
+ #define SHARED_HW_CFG_LANE_SWAP_CFG_32100123 0x00001be4
+ #define SHARED_HW_CFG_LANE_SWAP_CFG_01233210 0x0000e41b
+ #define SHARED_HW_CFG_LANE_SWAP_CFG_01230123 0x0000e4e4
+ #define SHARED_HW_CFG_LANE_SWAP_CFG_TX_MASK 0x000000FF
+ #define SHARED_HW_CFG_LANE_SWAP_CFG_TX_SHIFT 0
+ #define SHARED_HW_CFG_LANE_SWAP_CFG_RX_MASK 0x0000FF00
+ #define SHARED_HW_CFG_LANE_SWAP_CFG_RX_SHIFT 8
+
+ /* TX lane Polarity swap */
+ #define SHARED_HW_CFG_TX_LANE0_POL_FLIP_ENABLED 0x00010000
+ #define SHARED_HW_CFG_TX_LANE1_POL_FLIP_ENABLED 0x00020000
+ #define SHARED_HW_CFG_TX_LANE2_POL_FLIP_ENABLED 0x00040000
+ #define SHARED_HW_CFG_TX_LANE3_POL_FLIP_ENABLED 0x00080000
+ /* TX lane Polarity swap */
+ #define SHARED_HW_CFG_RX_LANE0_POL_FLIP_ENABLED 0x00100000
+ #define SHARED_HW_CFG_RX_LANE1_POL_FLIP_ENABLED 0x00200000
+ #define SHARED_HW_CFG_RX_LANE2_POL_FLIP_ENABLED 0x00400000
+ #define SHARED_HW_CFG_RX_LANE3_POL_FLIP_ENABLED 0x00800000
+
+ /* Selects the port layout of the board */
+ #define SHARED_HW_CFG_E3_PORT_LAYOUT_MASK 0x0F000000
+ #define SHARED_HW_CFG_E3_PORT_LAYOUT_SHIFT 24
+ #define SHARED_HW_CFG_E3_PORT_LAYOUT_2P_01 0x00000000
+ #define SHARED_HW_CFG_E3_PORT_LAYOUT_2P_10 0x01000000
+ #define SHARED_HW_CFG_E3_PORT_LAYOUT_4P_0123 0x02000000
+ #define SHARED_HW_CFG_E3_PORT_LAYOUT_4P_1032 0x03000000
+ #define SHARED_HW_CFG_E3_PORT_LAYOUT_4P_2301 0x04000000
+ #define SHARED_HW_CFG_E3_PORT_LAYOUT_4P_3210 0x05000000
+};
+
+
+/****************************************************************************
+ * Port HW configuration *
+ ****************************************************************************/
+struct port_hw_cfg { /* port 0: 0x12c port 1: 0x2bc */
+
+ u32 pci_id;
+ #define PORT_HW_CFG_PCI_VENDOR_ID_MASK 0xffff0000
+ #define PORT_HW_CFG_PCI_DEVICE_ID_MASK 0x0000ffff
+
+ u32 pci_sub_id;
+ #define PORT_HW_CFG_PCI_SUBSYS_DEVICE_ID_MASK 0xffff0000
+ #define PORT_HW_CFG_PCI_SUBSYS_VENDOR_ID_MASK 0x0000ffff
+
+ u32 power_dissipated;
+ #define PORT_HW_CFG_POWER_DIS_D0_MASK 0x000000ff
+ #define PORT_HW_CFG_POWER_DIS_D0_SHIFT 0
+ #define PORT_HW_CFG_POWER_DIS_D1_MASK 0x0000ff00
+ #define PORT_HW_CFG_POWER_DIS_D1_SHIFT 8
+ #define PORT_HW_CFG_POWER_DIS_D2_MASK 0x00ff0000
+ #define PORT_HW_CFG_POWER_DIS_D2_SHIFT 16
+ #define PORT_HW_CFG_POWER_DIS_D3_MASK 0xff000000
+ #define PORT_HW_CFG_POWER_DIS_D3_SHIFT 24
+
+ u32 power_consumed;
+ #define PORT_HW_CFG_POWER_CONS_D0_MASK 0x000000ff
+ #define PORT_HW_CFG_POWER_CONS_D0_SHIFT 0
+ #define PORT_HW_CFG_POWER_CONS_D1_MASK 0x0000ff00
+ #define PORT_HW_CFG_POWER_CONS_D1_SHIFT 8
+ #define PORT_HW_CFG_POWER_CONS_D2_MASK 0x00ff0000
+ #define PORT_HW_CFG_POWER_CONS_D2_SHIFT 16
+ #define PORT_HW_CFG_POWER_CONS_D3_MASK 0xff000000
+ #define PORT_HW_CFG_POWER_CONS_D3_SHIFT 24
+
+ u32 mac_upper;
+ #define PORT_HW_CFG_UPPERMAC_MASK 0x0000ffff
+ #define PORT_HW_CFG_UPPERMAC_SHIFT 0
+ u32 mac_lower;
+
+ u32 iscsi_mac_upper; /* Upper 16 bits are always zeroes */
+ u32 iscsi_mac_lower;
+
+ u32 rdma_mac_upper; /* Upper 16 bits are always zeroes */
+ u32 rdma_mac_lower;
+
+ u32 serdes_config;
+ #define PORT_HW_CFG_SERDES_TX_DRV_PRE_EMPHASIS_MASK 0x0000ffff
+ #define PORT_HW_CFG_SERDES_TX_DRV_PRE_EMPHASIS_SHIFT 0
+
+ #define PORT_HW_CFG_SERDES_RX_DRV_EQUALIZER_MASK 0xffff0000
+ #define PORT_HW_CFG_SERDES_RX_DRV_EQUALIZER_SHIFT 16
+
+
+ /* Default values: 2P-64, 4P-32 */
+ u32 pf_config; /* 0x158 */
+ #define PORT_HW_CFG_PF_NUM_VF_MASK 0x0000007F
+ #define PORT_HW_CFG_PF_NUM_VF_SHIFT 0
+
+ /* Default values: 17 */
+ #define PORT_HW_CFG_PF_NUM_MSIX_VECTORS_MASK 0x00007F00
+ #define PORT_HW_CFG_PF_NUM_MSIX_VECTORS_SHIFT 8
+
+ #define PORT_HW_CFG_ENABLE_FLR_MASK 0x00010000
+ #define PORT_HW_CFG_FLR_ENABLED 0x00010000
+
+ u32 vf_config; /* 0x15C */
+ #define PORT_HW_CFG_VF_NUM_MSIX_VECTORS_MASK 0x0000007F
+ #define PORT_HW_CFG_VF_NUM_MSIX_VECTORS_SHIFT 0
+
+ #define PORT_HW_CFG_VF_PCI_DEVICE_ID_MASK 0xFFFF0000
+ #define PORT_HW_CFG_VF_PCI_DEVICE_ID_SHIFT 16
+
+ u32 mf_pci_id; /* 0x160 */
+ #define PORT_HW_CFG_MF_PCI_DEVICE_ID_MASK 0x0000FFFF
+ #define PORT_HW_CFG_MF_PCI_DEVICE_ID_SHIFT 0
+
+ /* Controls the TX laser of the SFP+ module */
+ u32 sfp_ctrl; /* 0x164 */
+ #define PORT_HW_CFG_TX_LASER_MASK 0x000000FF
+ #define PORT_HW_CFG_TX_LASER_SHIFT 0
+ #define PORT_HW_CFG_TX_LASER_MDIO 0x00000000
+ #define PORT_HW_CFG_TX_LASER_GPIO0 0x00000001
+ #define PORT_HW_CFG_TX_LASER_GPIO1 0x00000002
+ #define PORT_HW_CFG_TX_LASER_GPIO2 0x00000003
+ #define PORT_HW_CFG_TX_LASER_GPIO3 0x00000004
+
+ /* Controls the fault module LED of the SFP+ */
+ #define PORT_HW_CFG_FAULT_MODULE_LED_MASK 0x0000FF00
+ #define PORT_HW_CFG_FAULT_MODULE_LED_SHIFT 8
+ #define PORT_HW_CFG_FAULT_MODULE_LED_GPIO0 0x00000000
+ #define PORT_HW_CFG_FAULT_MODULE_LED_GPIO1 0x00000100
+ #define PORT_HW_CFG_FAULT_MODULE_LED_GPIO2 0x00000200
+ #define PORT_HW_CFG_FAULT_MODULE_LED_GPIO3 0x00000300
+ #define PORT_HW_CFG_FAULT_MODULE_LED_DISABLED 0x00000400
+
+ /* The output pin TX_DIS that controls the TX laser of the SFP+
+ module. Use the PIN_CFG_XXX defines on top */
+ u32 e3_sfp_ctrl; /* 0x168 */
+ #define PORT_HW_CFG_E3_TX_LASER_MASK 0x000000FF
+ #define PORT_HW_CFG_E3_TX_LASER_SHIFT 0
+
+ /* The output pin for SFPP_TYPE which turns on the Fault module LED */
+ #define PORT_HW_CFG_E3_FAULT_MDL_LED_MASK 0x0000FF00
+ #define PORT_HW_CFG_E3_FAULT_MDL_LED_SHIFT 8
+
+ /* The input pin MOD_ABS that indicates whether SFP+ module is
+ present or not. Use the PIN_CFG_XXX defines on top */
+ #define PORT_HW_CFG_E3_MOD_ABS_MASK 0x00FF0000
+ #define PORT_HW_CFG_E3_MOD_ABS_SHIFT 16
+
+ /* The output pin PWRDIS_SFP_X which disable the power of the SFP+
+ module. Use the PIN_CFG_XXX defines on top */
+ #define PORT_HW_CFG_E3_PWR_DIS_MASK 0xFF000000
+ #define PORT_HW_CFG_E3_PWR_DIS_SHIFT 24
+
+ /*
+ * The input pin which signals module transmit fault. Use the
+ * PIN_CFG_XXX defines on top
+ */
+ u32 e3_cmn_pin_cfg; /* 0x16C */
+ #define PORT_HW_CFG_E3_TX_FAULT_MASK 0x000000FF
+ #define PORT_HW_CFG_E3_TX_FAULT_SHIFT 0
+
+ /* The output pin which reset the PHY. Use the PIN_CFG_XXX defines on
+ top */
+ #define PORT_HW_CFG_E3_PHY_RESET_MASK 0x0000FF00
+ #define PORT_HW_CFG_E3_PHY_RESET_SHIFT 8
+
+ /*
+ * The output pin which powers down the PHY. Use the PIN_CFG_XXX
+ * defines on top
+ */
+ #define PORT_HW_CFG_E3_PWR_DOWN_MASK 0x00FF0000
+ #define PORT_HW_CFG_E3_PWR_DOWN_SHIFT 16
+
+ /* The output pin values BSC_SEL which selects the I2C for this port
+ in the I2C Mux */
+ #define PORT_HW_CFG_E3_I2C_MUX0_MASK 0x01000000
+ #define PORT_HW_CFG_E3_I2C_MUX1_MASK 0x02000000
+
+
+ /*
+ * The input pin I_FAULT which indicate over-current has occurred.
+ * Use the PIN_CFG_XXX defines on top
+ */
+ u32 e3_cmn_pin_cfg1; /* 0x170 */
+ #define PORT_HW_CFG_E3_OVER_CURRENT_MASK 0x000000FF
+ #define PORT_HW_CFG_E3_OVER_CURRENT_SHIFT 0
+
+ /* pause on host ring */
+ u32 generic_features; /* 0x174 */
+ #define PORT_HW_CFG_PAUSE_ON_HOST_RING_MASK 0x00000001
+ #define PORT_HW_CFG_PAUSE_ON_HOST_RING_SHIFT 0
+ #define PORT_HW_CFG_PAUSE_ON_HOST_RING_DISABLED 0x00000000
+ #define PORT_HW_CFG_PAUSE_ON_HOST_RING_ENABLED 0x00000001
+
+ /* SFP+ Tx Equalization: NIC recommended and tested value is 0xBEB2
+ * LOM recommended and tested value is 0xBEB2. Using a different
+ * value means using a value not tested by BRCM
+ */
+ u32 sfi_tap_values; /* 0x178 */
+ #define PORT_HW_CFG_TX_EQUALIZATION_MASK 0x0000FFFF
+ #define PORT_HW_CFG_TX_EQUALIZATION_SHIFT 0
+
+ /* SFP+ Tx driver broadcast IDRIVER: NIC recommended and tested
+ * value is 0x2. LOM recommended and tested value is 0x2. Using a
+ * different value means using a value not tested by BRCM
+ */
+ #define PORT_HW_CFG_TX_DRV_BROADCAST_MASK 0x000F0000
+ #define PORT_HW_CFG_TX_DRV_BROADCAST_SHIFT 16
+ /* Set non-default values for TXFIR in SFP mode. */
+ #define PORT_HW_CFG_TX_DRV_IFIR_MASK 0x00F00000
+ #define PORT_HW_CFG_TX_DRV_IFIR_SHIFT 20
+
+ /* Set non-default values for IPREDRIVER in SFP mode. */
+ #define PORT_HW_CFG_TX_DRV_IPREDRIVER_MASK 0x0F000000
+ #define PORT_HW_CFG_TX_DRV_IPREDRIVER_SHIFT 24
+
+ /* Set non-default values for POST2 in SFP mode. */
+ #define PORT_HW_CFG_TX_DRV_POST2_MASK 0xF0000000
+ #define PORT_HW_CFG_TX_DRV_POST2_SHIFT 28
+
+ u32 reserved0[5]; /* 0x17c */
+
+ u32 aeu_int_mask; /* 0x190 */
+
+ u32 media_type; /* 0x194 */
+ #define PORT_HW_CFG_MEDIA_TYPE_PHY0_MASK 0x000000FF
+ #define PORT_HW_CFG_MEDIA_TYPE_PHY0_SHIFT 0
+
+ #define PORT_HW_CFG_MEDIA_TYPE_PHY1_MASK 0x0000FF00
+ #define PORT_HW_CFG_MEDIA_TYPE_PHY1_SHIFT 8
+
+ #define PORT_HW_CFG_MEDIA_TYPE_PHY2_MASK 0x00FF0000
+ #define PORT_HW_CFG_MEDIA_TYPE_PHY2_SHIFT 16
+
+ /* 4 times 16 bits for all 4 lanes. In case external PHY is present
+ (not direct mode), those values will not take effect on the 4 XGXS
+ lanes. For some external PHYs (such as 8706 and 8726) the values
+ will be used to configure the external PHY in those cases, not
+ all 4 values are needed. */
+ u16 xgxs_config_rx[4]; /* 0x198 */
+ u16 xgxs_config_tx[4]; /* 0x1A0 */
+
+ /* For storing FCOE mac on shared memory */
+ u32 fcoe_fip_mac_upper;
+ #define PORT_HW_CFG_FCOE_UPPERMAC_MASK 0x0000ffff
+ #define PORT_HW_CFG_FCOE_UPPERMAC_SHIFT 0
+ u32 fcoe_fip_mac_lower;
+
+ u32 fcoe_wwn_port_name_upper;
+ u32 fcoe_wwn_port_name_lower;
+
+ u32 fcoe_wwn_node_name_upper;
+ u32 fcoe_wwn_node_name_lower;
+
+ u32 Reserved1[49]; /* 0x1C0 */
+
+ /* Enable RJ45 magjack pair swapping on 10GBase-T PHY (0=default),
+ 84833 only */
+ u32 xgbt_phy_cfg; /* 0x284 */
+ #define PORT_HW_CFG_RJ45_PAIR_SWAP_MASK 0x000000FF
+ #define PORT_HW_CFG_RJ45_PAIR_SWAP_SHIFT 0
+
+ u32 default_cfg; /* 0x288 */
+ #define PORT_HW_CFG_GPIO0_CONFIG_MASK 0x00000003
+ #define PORT_HW_CFG_GPIO0_CONFIG_SHIFT 0
+ #define PORT_HW_CFG_GPIO0_CONFIG_NA 0x00000000
+ #define PORT_HW_CFG_GPIO0_CONFIG_LOW 0x00000001
+ #define PORT_HW_CFG_GPIO0_CONFIG_HIGH 0x00000002
+ #define PORT_HW_CFG_GPIO0_CONFIG_INPUT 0x00000003
+
+ #define PORT_HW_CFG_GPIO1_CONFIG_MASK 0x0000000C
+ #define PORT_HW_CFG_GPIO1_CONFIG_SHIFT 2
+ #define PORT_HW_CFG_GPIO1_CONFIG_NA 0x00000000
+ #define PORT_HW_CFG_GPIO1_CONFIG_LOW 0x00000004
+ #define PORT_HW_CFG_GPIO1_CONFIG_HIGH 0x00000008
+ #define PORT_HW_CFG_GPIO1_CONFIG_INPUT 0x0000000c
+
+ #define PORT_HW_CFG_GPIO2_CONFIG_MASK 0x00000030
+ #define PORT_HW_CFG_GPIO2_CONFIG_SHIFT 4
+ #define PORT_HW_CFG_GPIO2_CONFIG_NA 0x00000000
+ #define PORT_HW_CFG_GPIO2_CONFIG_LOW 0x00000010
+ #define PORT_HW_CFG_GPIO2_CONFIG_HIGH 0x00000020
+ #define PORT_HW_CFG_GPIO2_CONFIG_INPUT 0x00000030
+
+ #define PORT_HW_CFG_GPIO3_CONFIG_MASK 0x000000C0
+ #define PORT_HW_CFG_GPIO3_CONFIG_SHIFT 6
+ #define PORT_HW_CFG_GPIO3_CONFIG_NA 0x00000000
+ #define PORT_HW_CFG_GPIO3_CONFIG_LOW 0x00000040
+ #define PORT_HW_CFG_GPIO3_CONFIG_HIGH 0x00000080
+ #define PORT_HW_CFG_GPIO3_CONFIG_INPUT 0x000000c0
+
+ /* When KR link is required to be set to force which is not
+ KR-compliant, this parameter determine what is the trigger for it.
+ When GPIO is selected, low input will force the speed. Currently
+ default speed is 1G. In the future, it may be widen to select the
+ forced speed in with another parameter. Note when force-1G is
+ enabled, it override option 56: Link Speed option. */
+ #define PORT_HW_CFG_FORCE_KR_ENABLER_MASK 0x00000F00
+ #define PORT_HW_CFG_FORCE_KR_ENABLER_SHIFT 8
+ #define PORT_HW_CFG_FORCE_KR_ENABLER_NOT_FORCED 0x00000000
+ #define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO0_P0 0x00000100
+ #define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO1_P0 0x00000200
+ #define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO2_P0 0x00000300
+ #define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO3_P0 0x00000400
+ #define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO0_P1 0x00000500
+ #define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO1_P1 0x00000600
+ #define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO2_P1 0x00000700
+ #define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO3_P1 0x00000800
+ #define PORT_HW_CFG_FORCE_KR_ENABLER_FORCED 0x00000900
+ /* Enable to determine with which GPIO to reset the external phy */
+ #define PORT_HW_CFG_EXT_PHY_GPIO_RST_MASK 0x000F0000
+ #define PORT_HW_CFG_EXT_PHY_GPIO_RST_SHIFT 16
+ #define PORT_HW_CFG_EXT_PHY_GPIO_RST_PHY_TYPE 0x00000000
+ #define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO0_P0 0x00010000
+ #define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO1_P0 0x00020000
+ #define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO2_P0 0x00030000
+ #define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO3_P0 0x00040000
+ #define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO0_P1 0x00050000
+ #define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO1_P1 0x00060000
+ #define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO2_P1 0x00070000
+ #define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO3_P1 0x00080000
+
+ /* Enable BAM on KR */
+ #define PORT_HW_CFG_ENABLE_BAM_ON_KR_MASK 0x00100000
+ #define PORT_HW_CFG_ENABLE_BAM_ON_KR_SHIFT 20
+ #define PORT_HW_CFG_ENABLE_BAM_ON_KR_DISABLED 0x00000000
+ #define PORT_HW_CFG_ENABLE_BAM_ON_KR_ENABLED 0x00100000
+
+ /* Enable Common Mode Sense */
+ #define PORT_HW_CFG_ENABLE_CMS_MASK 0x00200000
+ #define PORT_HW_CFG_ENABLE_CMS_SHIFT 21
+ #define PORT_HW_CFG_ENABLE_CMS_DISABLED 0x00000000
+ #define PORT_HW_CFG_ENABLE_CMS_ENABLED 0x00200000
+
+ /* Determine the Serdes electrical interface */
+ #define PORT_HW_CFG_NET_SERDES_IF_MASK 0x0F000000
+ #define PORT_HW_CFG_NET_SERDES_IF_SHIFT 24
+ #define PORT_HW_CFG_NET_SERDES_IF_SGMII 0x00000000
+ #define PORT_HW_CFG_NET_SERDES_IF_XFI 0x01000000
+ #define PORT_HW_CFG_NET_SERDES_IF_SFI 0x02000000
+ #define PORT_HW_CFG_NET_SERDES_IF_KR 0x03000000
+ #define PORT_HW_CFG_NET_SERDES_IF_DXGXS 0x04000000
+ #define PORT_HW_CFG_NET_SERDES_IF_KR2 0x05000000
+
+
+ u32 speed_capability_mask2; /* 0x28C */
+ #define PORT_HW_CFG_SPEED_CAPABILITY2_D3_MASK 0x0000FFFF
+ #define PORT_HW_CFG_SPEED_CAPABILITY2_D3_SHIFT 0
+ #define PORT_HW_CFG_SPEED_CAPABILITY2_D3_10M_FULL 0x00000001
+ #define PORT_HW_CFG_SPEED_CAPABILITY2_D3__ 0x00000002
+ #define PORT_HW_CFG_SPEED_CAPABILITY2_D3___ 0x00000004
+ #define PORT_HW_CFG_SPEED_CAPABILITY2_D3_100M_FULL 0x00000008
+ #define PORT_HW_CFG_SPEED_CAPABILITY2_D3_1G 0x00000010
+ #define PORT_HW_CFG_SPEED_CAPABILITY2_D3_2_DOT_5G 0x00000020
+ #define PORT_HW_CFG_SPEED_CAPABILITY2_D3_10G 0x00000040
+ #define PORT_HW_CFG_SPEED_CAPABILITY2_D3_20G 0x00000080
+
+ #define PORT_HW_CFG_SPEED_CAPABILITY2_D0_MASK 0xFFFF0000
+ #define PORT_HW_CFG_SPEED_CAPABILITY2_D0_SHIFT 16
+ #define PORT_HW_CFG_SPEED_CAPABILITY2_D0_10M_FULL 0x00010000
+ #define PORT_HW_CFG_SPEED_CAPABILITY2_D0__ 0x00020000
+ #define PORT_HW_CFG_SPEED_CAPABILITY2_D0___ 0x00040000
+ #define PORT_HW_CFG_SPEED_CAPABILITY2_D0_100M_FULL 0x00080000
+ #define PORT_HW_CFG_SPEED_CAPABILITY2_D0_1G 0x00100000
+ #define PORT_HW_CFG_SPEED_CAPABILITY2_D0_2_DOT_5G 0x00200000
+ #define PORT_HW_CFG_SPEED_CAPABILITY2_D0_10G 0x00400000
+ #define PORT_HW_CFG_SPEED_CAPABILITY2_D0_20G 0x00800000
+
+
+ /* In the case where two media types (e.g. copper and fiber) are
+ present and electrically active at the same time, PHY Selection
+ will determine which of the two PHYs will be designated as the
+ Active PHY and used for a connection to the network. */
+ u32 multi_phy_config; /* 0x290 */
+ #define PORT_HW_CFG_PHY_SELECTION_MASK 0x00000007
+ #define PORT_HW_CFG_PHY_SELECTION_SHIFT 0
+ #define PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT 0x00000000
+ #define PORT_HW_CFG_PHY_SELECTION_FIRST_PHY 0x00000001
+ #define PORT_HW_CFG_PHY_SELECTION_SECOND_PHY 0x00000002
+ #define PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY 0x00000003
+ #define PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY 0x00000004
+
+ /* When enabled, all second phy nvram parameters will be swapped
+ with the first phy parameters */
+ #define PORT_HW_CFG_PHY_SWAPPED_MASK 0x00000008
+ #define PORT_HW_CFG_PHY_SWAPPED_SHIFT 3
+ #define PORT_HW_CFG_PHY_SWAPPED_DISABLED 0x00000000
+ #define PORT_HW_CFG_PHY_SWAPPED_ENABLED 0x00000008
+
+
+ /* Address of the second external phy */
+ u32 external_phy_config2; /* 0x294 */
+ #define PORT_HW_CFG_XGXS_EXT_PHY2_ADDR_MASK 0x000000FF
+ #define PORT_HW_CFG_XGXS_EXT_PHY2_ADDR_SHIFT 0
+
+ /* The second XGXS external PHY type */
+ #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_MASK 0x0000FF00
+ #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_SHIFT 8
+ #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_DIRECT 0x00000000
+ #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8071 0x00000100
+ #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8072 0x00000200
+ #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8073 0x00000300
+ #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8705 0x00000400
+ #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8706 0x00000500
+ #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8726 0x00000600
+ #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8481 0x00000700
+ #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_SFX7101 0x00000800
+ #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8727 0x00000900
+ #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8727_NOC 0x00000a00
+ #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM84823 0x00000b00
+ #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM54640 0x00000c00
+ #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM84833 0x00000d00
+ #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM54618SE 0x00000e00
+ #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8722 0x00000f00
+ #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM54616 0x00001000
+ #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM84834 0x00001100
+ #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM84858 0x00001200
+ #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_FAILURE 0x0000fd00
+ #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_NOT_CONN 0x0000ff00
+
+
+ /* 4 times 16 bits for all 4 lanes. For some external PHYs (such as
+ 8706, 8726 and 8727) not all 4 values are needed. */
+ u16 xgxs_config2_rx[4]; /* 0x296 */
+ u16 xgxs_config2_tx[4]; /* 0x2A0 */
+
+ u32 lane_config;
+ #define PORT_HW_CFG_LANE_SWAP_CFG_MASK 0x0000ffff
+ #define PORT_HW_CFG_LANE_SWAP_CFG_SHIFT 0
+ /* AN and forced */
+ #define PORT_HW_CFG_LANE_SWAP_CFG_01230123 0x00001b1b
+ /* forced only */
+ #define PORT_HW_CFG_LANE_SWAP_CFG_01233210 0x00001be4
+ /* forced only */
+ #define PORT_HW_CFG_LANE_SWAP_CFG_31203120 0x0000d8d8
+ /* forced only */
+ #define PORT_HW_CFG_LANE_SWAP_CFG_32103210 0x0000e4e4
+ #define PORT_HW_CFG_LANE_SWAP_CFG_TX_MASK 0x000000ff
+ #define PORT_HW_CFG_LANE_SWAP_CFG_TX_SHIFT 0
+ #define PORT_HW_CFG_LANE_SWAP_CFG_RX_MASK 0x0000ff00
+ #define PORT_HW_CFG_LANE_SWAP_CFG_RX_SHIFT 8
+ #define PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK 0x0000c000
+ #define PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT 14
+
+ /* Indicate whether to swap the external phy polarity */
+ #define PORT_HW_CFG_SWAP_PHY_POLARITY_MASK 0x00010000
+ #define PORT_HW_CFG_SWAP_PHY_POLARITY_DISABLED 0x00000000
+ #define PORT_HW_CFG_SWAP_PHY_POLARITY_ENABLED 0x00010000
+
+
+ u32 external_phy_config;
+ #define PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK 0x000000ff
+ #define PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT 0
+
+ #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK 0x0000ff00
+ #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SHIFT 8
+ #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT 0x00000000
+ #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8071 0x00000100
+ #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072 0x00000200
+ #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073 0x00000300
+ #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705 0x00000400
+ #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706 0x00000500
+ #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726 0x00000600
+ #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481 0x00000700
+ #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101 0x00000800
+ #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727 0x00000900
+ #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC 0x00000a00
+ #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823 0x00000b00
+ #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54640 0x00000c00
+ #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833 0x00000d00
+ #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54618SE 0x00000e00
+ #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8722 0x00000f00
+ #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54616 0x00001000
+ #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834 0x00001100
+ #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84858 0x00001200
+ #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT_WC 0x0000fc00
+ #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE 0x0000fd00
+ #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN 0x0000ff00
+
+ #define PORT_HW_CFG_SERDES_EXT_PHY_ADDR_MASK 0x00ff0000
+ #define PORT_HW_CFG_SERDES_EXT_PHY_ADDR_SHIFT 16
+
+ #define PORT_HW_CFG_SERDES_EXT_PHY_TYPE_MASK 0xff000000
+ #define PORT_HW_CFG_SERDES_EXT_PHY_TYPE_SHIFT 24
+ #define PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT 0x00000000
+ #define PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482 0x01000000
+ #define PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT_SD 0x02000000
+ #define PORT_HW_CFG_SERDES_EXT_PHY_TYPE_NOT_CONN 0xff000000
+
+ u32 speed_capability_mask;
+ #define PORT_HW_CFG_SPEED_CAPABILITY_D3_MASK 0x0000ffff
+ #define PORT_HW_CFG_SPEED_CAPABILITY_D3_SHIFT 0
+ #define PORT_HW_CFG_SPEED_CAPABILITY_D3_10M_FULL 0x00000001
+ #define PORT_HW_CFG_SPEED_CAPABILITY_D3_10M_HALF 0x00000002
+ #define PORT_HW_CFG_SPEED_CAPABILITY_D3_100M_HALF 0x00000004
+ #define PORT_HW_CFG_SPEED_CAPABILITY_D3_100M_FULL 0x00000008
+ #define PORT_HW_CFG_SPEED_CAPABILITY_D3_1G 0x00000010
+ #define PORT_HW_CFG_SPEED_CAPABILITY_D3_2_5G 0x00000020
+ #define PORT_HW_CFG_SPEED_CAPABILITY_D3_10G 0x00000040
+ #define PORT_HW_CFG_SPEED_CAPABILITY_D3_20G 0x00000080
+ #define PORT_HW_CFG_SPEED_CAPABILITY_D3_RESERVED 0x0000f000
+
+ #define PORT_HW_CFG_SPEED_CAPABILITY_D0_MASK 0xffff0000
+ #define PORT_HW_CFG_SPEED_CAPABILITY_D0_SHIFT 16
+ #define PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL 0x00010000
+ #define PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF 0x00020000
+ #define PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF 0x00040000
+ #define PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL 0x00080000
+ #define PORT_HW_CFG_SPEED_CAPABILITY_D0_1G 0x00100000
+ #define PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G 0x00200000
+ #define PORT_HW_CFG_SPEED_CAPABILITY_D0_10G 0x00400000
+ #define PORT_HW_CFG_SPEED_CAPABILITY_D0_20G 0x00800000
+ #define PORT_HW_CFG_SPEED_CAPABILITY_D0_RESERVED 0xf0000000
+
+ /* A place to hold the original MAC address as a backup */
+ u32 backup_mac_upper; /* 0x2B4 */
+ u32 backup_mac_lower; /* 0x2B8 */
+
+};
+
+
+/****************************************************************************
+ * Shared Feature configuration *
+ ****************************************************************************/
+struct shared_feat_cfg { /* NVRAM Offset */
+
+ u32 config; /* 0x450 */
+ #define SHARED_FEATURE_BMC_ECHO_MODE_EN 0x00000001
+
+ /* Use NVRAM values instead of HW default values */
+ #define SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_MASK \
+ 0x00000002
+ #define SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_DISABLED \
+ 0x00000000
+ #define SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED \
+ 0x00000002
+
+ #define SHARED_FEAT_CFG_NCSI_ID_METHOD_MASK 0x00000008
+ #define SHARED_FEAT_CFG_NCSI_ID_METHOD_SPIO 0x00000000
+ #define SHARED_FEAT_CFG_NCSI_ID_METHOD_NVRAM 0x00000008
+
+ #define SHARED_FEAT_CFG_NCSI_ID_MASK 0x00000030
+ #define SHARED_FEAT_CFG_NCSI_ID_SHIFT 4
+
+ /* Override the OTP back to single function mode. When using GPIO,
+ high means only SF, 0 is according to CLP configuration */
+ #define SHARED_FEAT_CFG_FORCE_SF_MODE_MASK 0x00000700
+ #define SHARED_FEAT_CFG_FORCE_SF_MODE_SHIFT 8
+ #define SHARED_FEAT_CFG_FORCE_SF_MODE_MF_ALLOWED 0x00000000
+ #define SHARED_FEAT_CFG_FORCE_SF_MODE_FORCED_SF 0x00000100
+ #define SHARED_FEAT_CFG_FORCE_SF_MODE_SPIO4 0x00000200
+ #define SHARED_FEAT_CFG_FORCE_SF_MODE_SWITCH_INDEPT 0x00000300
+ #define SHARED_FEAT_CFG_FORCE_SF_MODE_AFEX_MODE 0x00000400
+ #define SHARED_FEAT_CFG_FORCE_SF_MODE_BD_MODE 0x00000500
+ #define SHARED_FEAT_CFG_FORCE_SF_MODE_UFP_MODE 0x00000600
+ #define SHARED_FEAT_CFG_FORCE_SF_MODE_EXTENDED_MODE 0x00000700
+
+ /* The interval in seconds between sending LLDP packets. Set to zero
+ to disable the feature */
+ #define SHARED_FEAT_CFG_LLDP_XMIT_INTERVAL_MASK 0x00ff0000
+ #define SHARED_FEAT_CFG_LLDP_XMIT_INTERVAL_SHIFT 16
+
+ /* The assigned device type ID for LLDP usage */
+ #define SHARED_FEAT_CFG_LLDP_DEVICE_TYPE_ID_MASK 0xff000000
+ #define SHARED_FEAT_CFG_LLDP_DEVICE_TYPE_ID_SHIFT 24
+
+};
+
+
+/****************************************************************************
+ * Port Feature configuration *
+ ****************************************************************************/
+struct port_feat_cfg { /* port 0: 0x454 port 1: 0x4c8 */
+
+ u32 config;
+ #define PORT_FEATURE_BAR1_SIZE_MASK 0x0000000f
+ #define PORT_FEATURE_BAR1_SIZE_SHIFT 0
+ #define PORT_FEATURE_BAR1_SIZE_DISABLED 0x00000000
+ #define PORT_FEATURE_BAR1_SIZE_64K 0x00000001
+ #define PORT_FEATURE_BAR1_SIZE_128K 0x00000002
+ #define PORT_FEATURE_BAR1_SIZE_256K 0x00000003
+ #define PORT_FEATURE_BAR1_SIZE_512K 0x00000004
+ #define PORT_FEATURE_BAR1_SIZE_1M 0x00000005
+ #define PORT_FEATURE_BAR1_SIZE_2M 0x00000006
+ #define PORT_FEATURE_BAR1_SIZE_4M 0x00000007
+ #define PORT_FEATURE_BAR1_SIZE_8M 0x00000008
+ #define PORT_FEATURE_BAR1_SIZE_16M 0x00000009
+ #define PORT_FEATURE_BAR1_SIZE_32M 0x0000000a
+ #define PORT_FEATURE_BAR1_SIZE_64M 0x0000000b
+ #define PORT_FEATURE_BAR1_SIZE_128M 0x0000000c
+ #define PORT_FEATURE_BAR1_SIZE_256M 0x0000000d
+ #define PORT_FEATURE_BAR1_SIZE_512M 0x0000000e
+ #define PORT_FEATURE_BAR1_SIZE_1G 0x0000000f
+ #define PORT_FEATURE_BAR2_SIZE_MASK 0x000000f0
+ #define PORT_FEATURE_BAR2_SIZE_SHIFT 4
+ #define PORT_FEATURE_BAR2_SIZE_DISABLED 0x00000000
+ #define PORT_FEATURE_BAR2_SIZE_64K 0x00000010
+ #define PORT_FEATURE_BAR2_SIZE_128K 0x00000020
+ #define PORT_FEATURE_BAR2_SIZE_256K 0x00000030
+ #define PORT_FEATURE_BAR2_SIZE_512K 0x00000040
+ #define PORT_FEATURE_BAR2_SIZE_1M 0x00000050
+ #define PORT_FEATURE_BAR2_SIZE_2M 0x00000060
+ #define PORT_FEATURE_BAR2_SIZE_4M 0x00000070
+ #define PORT_FEATURE_BAR2_SIZE_8M 0x00000080
+ #define PORT_FEATURE_BAR2_SIZE_16M 0x00000090
+ #define PORT_FEATURE_BAR2_SIZE_32M 0x000000a0
+ #define PORT_FEATURE_BAR2_SIZE_64M 0x000000b0
+ #define PORT_FEATURE_BAR2_SIZE_128M 0x000000c0
+ #define PORT_FEATURE_BAR2_SIZE_256M 0x000000d0
+ #define PORT_FEATURE_BAR2_SIZE_512M 0x000000e0
+ #define PORT_FEATURE_BAR2_SIZE_1G 0x000000f0
+
+ #define PORT_FEAT_CFG_DCBX_MASK 0x00000100
+ #define PORT_FEAT_CFG_DCBX_DISABLED 0x00000000
+ #define PORT_FEAT_CFG_DCBX_ENABLED 0x00000100
+
+ #define PORT_FEAT_CFG_STORAGE_PERSONALITY_MASK 0x00000C00
+ #define PORT_FEAT_CFG_STORAGE_PERSONALITY_FCOE 0x00000400
+ #define PORT_FEAT_CFG_STORAGE_PERSONALITY_ISCSI 0x00000800
+
+ #define PORT_FEATURE_EN_SIZE_MASK 0x0f000000
+ #define PORT_FEATURE_EN_SIZE_SHIFT 24
+ #define PORT_FEATURE_WOL_ENABLED 0x01000000
+ #define PORT_FEATURE_MBA_ENABLED 0x02000000
+ #define PORT_FEATURE_MFW_ENABLED 0x04000000
+
+ /* Advertise expansion ROM even if MBA is disabled */
+ #define PORT_FEAT_CFG_FORCE_EXP_ROM_ADV_MASK 0x08000000
+ #define PORT_FEAT_CFG_FORCE_EXP_ROM_ADV_DISABLED 0x00000000
+ #define PORT_FEAT_CFG_FORCE_EXP_ROM_ADV_ENABLED 0x08000000
+
+ /* Check the optic vendor via i2c against a list of approved modules
+ in a separate nvram image */
+ #define PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK 0xe0000000
+ #define PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_SHIFT 29
+ #define PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_NO_ENFORCEMENT \
+ 0x00000000
+ #define PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_DISABLE_TX_LASER \
+ 0x20000000
+ #define PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_WARNING_MSG 0x40000000
+ #define PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_POWER_DOWN 0x60000000
+
+ u32 wol_config;
+ /* Default is used when driver sets to "auto" mode */
+ #define PORT_FEATURE_WOL_DEFAULT_MASK 0x00000003
+ #define PORT_FEATURE_WOL_DEFAULT_SHIFT 0
+ #define PORT_FEATURE_WOL_DEFAULT_DISABLE 0x00000000
+ #define PORT_FEATURE_WOL_DEFAULT_MAGIC 0x00000001
+ #define PORT_FEATURE_WOL_DEFAULT_ACPI 0x00000002
+ #define PORT_FEATURE_WOL_DEFAULT_MAGIC_AND_ACPI 0x00000003
+ #define PORT_FEATURE_WOL_RES_PAUSE_CAP 0x00000004
+ #define PORT_FEATURE_WOL_RES_ASYM_PAUSE_CAP 0x00000008
+ #define PORT_FEATURE_WOL_ACPI_UPON_MGMT 0x00000010
+
+ u32 mba_config;
+ #define PORT_FEATURE_MBA_BOOT_AGENT_TYPE_MASK 0x00000007
+ #define PORT_FEATURE_MBA_BOOT_AGENT_TYPE_SHIFT 0
+ #define PORT_FEATURE_MBA_BOOT_AGENT_TYPE_PXE 0x00000000
+ #define PORT_FEATURE_MBA_BOOT_AGENT_TYPE_RPL 0x00000001
+ #define PORT_FEATURE_MBA_BOOT_AGENT_TYPE_BOOTP 0x00000002
+ #define PORT_FEATURE_MBA_BOOT_AGENT_TYPE_ISCSIB 0x00000003
+ #define PORT_FEATURE_MBA_BOOT_AGENT_TYPE_FCOE_BOOT 0x00000004
+ #define PORT_FEATURE_MBA_BOOT_AGENT_TYPE_NONE 0x00000007
+
+ #define PORT_FEATURE_MBA_BOOT_RETRY_MASK 0x00000038
+ #define PORT_FEATURE_MBA_BOOT_RETRY_SHIFT 3
+
+ #define PORT_FEATURE_MBA_RES_PAUSE_CAP 0x00000100
+ #define PORT_FEATURE_MBA_RES_ASYM_PAUSE_CAP 0x00000200
+ #define PORT_FEATURE_MBA_SETUP_PROMPT_ENABLE 0x00000400
+ #define PORT_FEATURE_MBA_HOTKEY_MASK 0x00000800
+ #define PORT_FEATURE_MBA_HOTKEY_CTRL_S 0x00000000
+ #define PORT_FEATURE_MBA_HOTKEY_CTRL_B 0x00000800
+ #define PORT_FEATURE_MBA_EXP_ROM_SIZE_MASK 0x000ff000
+ #define PORT_FEATURE_MBA_EXP_ROM_SIZE_SHIFT 12
+ #define PORT_FEATURE_MBA_EXP_ROM_SIZE_DISABLED 0x00000000
+ #define PORT_FEATURE_MBA_EXP_ROM_SIZE_2K 0x00001000
+ #define PORT_FEATURE_MBA_EXP_ROM_SIZE_4K 0x00002000
+ #define PORT_FEATURE_MBA_EXP_ROM_SIZE_8K 0x00003000
+ #define PORT_FEATURE_MBA_EXP_ROM_SIZE_16K 0x00004000
+ #define PORT_FEATURE_MBA_EXP_ROM_SIZE_32K 0x00005000
+ #define PORT_FEATURE_MBA_EXP_ROM_SIZE_64K 0x00006000
+ #define PORT_FEATURE_MBA_EXP_ROM_SIZE_128K 0x00007000
+ #define PORT_FEATURE_MBA_EXP_ROM_SIZE_256K 0x00008000
+ #define PORT_FEATURE_MBA_EXP_ROM_SIZE_512K 0x00009000
+ #define PORT_FEATURE_MBA_EXP_ROM_SIZE_1M 0x0000a000
+ #define PORT_FEATURE_MBA_EXP_ROM_SIZE_2M 0x0000b000
+ #define PORT_FEATURE_MBA_EXP_ROM_SIZE_4M 0x0000c000
+ #define PORT_FEATURE_MBA_EXP_ROM_SIZE_8M 0x0000d000
+ #define PORT_FEATURE_MBA_EXP_ROM_SIZE_16M 0x0000e000
+ #define PORT_FEATURE_MBA_EXP_ROM_SIZE_32M 0x0000f000
+ #define PORT_FEATURE_MBA_MSG_TIMEOUT_MASK 0x00f00000
+ #define PORT_FEATURE_MBA_MSG_TIMEOUT_SHIFT 20
+ #define PORT_FEATURE_MBA_BIOS_BOOTSTRAP_MASK 0x03000000
+ #define PORT_FEATURE_MBA_BIOS_BOOTSTRAP_SHIFT 24
+ #define PORT_FEATURE_MBA_BIOS_BOOTSTRAP_AUTO 0x00000000
+ #define PORT_FEATURE_MBA_BIOS_BOOTSTRAP_BBS 0x01000000
+ #define PORT_FEATURE_MBA_BIOS_BOOTSTRAP_INT18H 0x02000000
+ #define PORT_FEATURE_MBA_BIOS_BOOTSTRAP_INT19H 0x03000000
+ #define PORT_FEATURE_MBA_LINK_SPEED_MASK 0x3c000000
+ #define PORT_FEATURE_MBA_LINK_SPEED_SHIFT 26
+ #define PORT_FEATURE_MBA_LINK_SPEED_AUTO 0x00000000
+ #define PORT_FEATURE_MBA_LINK_SPEED_10HD 0x04000000
+ #define PORT_FEATURE_MBA_LINK_SPEED_10FD 0x08000000
+ #define PORT_FEATURE_MBA_LINK_SPEED_100HD 0x0c000000
+ #define PORT_FEATURE_MBA_LINK_SPEED_100FD 0x10000000
+ #define PORT_FEATURE_MBA_LINK_SPEED_1GBPS 0x14000000
+ #define PORT_FEATURE_MBA_LINK_SPEED_2_5GBPS 0x18000000
+ #define PORT_FEATURE_MBA_LINK_SPEED_10GBPS_CX4 0x1c000000
+ #define PORT_FEATURE_MBA_LINK_SPEED_20GBPS 0x20000000
+ u32 bmc_config;
+ #define PORT_FEATURE_BMC_LINK_OVERRIDE_MASK 0x00000001
+ #define PORT_FEATURE_BMC_LINK_OVERRIDE_DEFAULT 0x00000000
+ #define PORT_FEATURE_BMC_LINK_OVERRIDE_EN 0x00000001
+
+ u32 mba_vlan_cfg;
+ #define PORT_FEATURE_MBA_VLAN_TAG_MASK 0x0000ffff
+ #define PORT_FEATURE_MBA_VLAN_TAG_SHIFT 0
+ #define PORT_FEATURE_MBA_VLAN_EN 0x00010000
+
+ u32 resource_cfg;
+ #define PORT_FEATURE_RESOURCE_CFG_VALID 0x00000001
+ #define PORT_FEATURE_RESOURCE_CFG_DIAG 0x00000002
+ #define PORT_FEATURE_RESOURCE_CFG_L2 0x00000004
+ #define PORT_FEATURE_RESOURCE_CFG_ISCSI 0x00000008
+ #define PORT_FEATURE_RESOURCE_CFG_RDMA 0x00000010
+
+ u32 smbus_config;
+ #define PORT_FEATURE_SMBUS_ADDR_MASK 0x000000fe
+ #define PORT_FEATURE_SMBUS_ADDR_SHIFT 1
+
+ u32 vf_config;
+ #define PORT_FEAT_CFG_VF_BAR2_SIZE_MASK 0x0000000f
+ #define PORT_FEAT_CFG_VF_BAR2_SIZE_SHIFT 0
+ #define PORT_FEAT_CFG_VF_BAR2_SIZE_DISABLED 0x00000000
+ #define PORT_FEAT_CFG_VF_BAR2_SIZE_4K 0x00000001
+ #define PORT_FEAT_CFG_VF_BAR2_SIZE_8K 0x00000002
+ #define PORT_FEAT_CFG_VF_BAR2_SIZE_16K 0x00000003
+ #define PORT_FEAT_CFG_VF_BAR2_SIZE_32K 0x00000004
+ #define PORT_FEAT_CFG_VF_BAR2_SIZE_64K 0x00000005
+ #define PORT_FEAT_CFG_VF_BAR2_SIZE_128K 0x00000006
+ #define PORT_FEAT_CFG_VF_BAR2_SIZE_256K 0x00000007
+ #define PORT_FEAT_CFG_VF_BAR2_SIZE_512K 0x00000008
+ #define PORT_FEAT_CFG_VF_BAR2_SIZE_1M 0x00000009
+ #define PORT_FEAT_CFG_VF_BAR2_SIZE_2M 0x0000000a
+ #define PORT_FEAT_CFG_VF_BAR2_SIZE_4M 0x0000000b
+ #define PORT_FEAT_CFG_VF_BAR2_SIZE_8M 0x0000000c
+ #define PORT_FEAT_CFG_VF_BAR2_SIZE_16M 0x0000000d
+ #define PORT_FEAT_CFG_VF_BAR2_SIZE_32M 0x0000000e
+ #define PORT_FEAT_CFG_VF_BAR2_SIZE_64M 0x0000000f
+
+ u32 link_config; /* Used as HW defaults for the driver */
+ #define PORT_FEATURE_CONNECTED_SWITCH_MASK 0x03000000
+ #define PORT_FEATURE_CONNECTED_SWITCH_SHIFT 24
+ /* (forced) low speed switch (< 10G) */
+ #define PORT_FEATURE_CON_SWITCH_1G_SWITCH 0x00000000
+ /* (forced) high speed switch (>= 10G) */
+ #define PORT_FEATURE_CON_SWITCH_10G_SWITCH 0x01000000
+ #define PORT_FEATURE_CON_SWITCH_AUTO_DETECT 0x02000000
+ #define PORT_FEATURE_CON_SWITCH_ONE_TIME_DETECT 0x03000000
+
+ #define PORT_FEATURE_LINK_SPEED_MASK 0x000f0000
+ #define PORT_FEATURE_LINK_SPEED_SHIFT 16
+ #define PORT_FEATURE_LINK_SPEED_AUTO 0x00000000
+ #define PORT_FEATURE_LINK_SPEED_10M_FULL 0x00010000
+ #define PORT_FEATURE_LINK_SPEED_10M_HALF 0x00020000
+ #define PORT_FEATURE_LINK_SPEED_100M_HALF 0x00030000
+ #define PORT_FEATURE_LINK_SPEED_100M_FULL 0x00040000
+ #define PORT_FEATURE_LINK_SPEED_1G 0x00050000
+ #define PORT_FEATURE_LINK_SPEED_2_5G 0x00060000
+ #define PORT_FEATURE_LINK_SPEED_10G_CX4 0x00070000
+ #define PORT_FEATURE_LINK_SPEED_20G 0x00080000
+
+ #define PORT_FEATURE_FLOW_CONTROL_MASK 0x00000700
+ #define PORT_FEATURE_FLOW_CONTROL_SHIFT 8
+ #define PORT_FEATURE_FLOW_CONTROL_AUTO 0x00000000
+ #define PORT_FEATURE_FLOW_CONTROL_TX 0x00000100
+ #define PORT_FEATURE_FLOW_CONTROL_RX 0x00000200
+ #define PORT_FEATURE_FLOW_CONTROL_BOTH 0x00000300
+ #define PORT_FEATURE_FLOW_CONTROL_NONE 0x00000400
+
+ /* The default for MCP link configuration,
+ uses the same defines as link_config */
+ u32 mfw_wol_link_cfg;
+
+ /* The default for the driver of the second external phy,
+ uses the same defines as link_config */
+ u32 link_config2; /* 0x47C */
+
+ /* The default for MCP of the second external phy,
+ uses the same defines as link_config */
+ u32 mfw_wol_link_cfg2; /* 0x480 */
+
+
+ /* EEE power saving mode */
+ u32 eee_power_mode; /* 0x484 */
+ #define PORT_FEAT_CFG_EEE_POWER_MODE_MASK 0x000000FF
+ #define PORT_FEAT_CFG_EEE_POWER_MODE_SHIFT 0
+ #define PORT_FEAT_CFG_EEE_POWER_MODE_DISABLED 0x00000000
+ #define PORT_FEAT_CFG_EEE_POWER_MODE_BALANCED 0x00000001
+ #define PORT_FEAT_CFG_EEE_POWER_MODE_AGGRESSIVE 0x00000002
+ #define PORT_FEAT_CFG_EEE_POWER_MODE_LOW_LATENCY 0x00000003
+
+
+ u32 Reserved2[16]; /* 0x488 */
+};
+
+
+/****************************************************************************
+ * Device Information *
+ ****************************************************************************/
+struct shm_dev_info { /* size */
+
+ u32 bc_rev; /* 8 bits each: major, minor, build */ /* 4 */
+
+ struct shared_hw_cfg shared_hw_config; /* 40 */
+
+ struct port_hw_cfg port_hw_config[PORT_MAX]; /* 400*2=800 */
+
+ struct shared_feat_cfg shared_feature_config; /* 4 */
+
+ struct port_feat_cfg port_feature_config[PORT_MAX];/* 116*2=232 */
+
+};
+
+struct extended_dev_info_shared_cfg {
+ u32 reserved[18];
+ u32 mbi_version;
+ u32 mbi_date;
+};
+
+#if !defined(__LITTLE_ENDIAN) && !defined(__BIG_ENDIAN)
+ #error "Missing either LITTLE_ENDIAN or BIG_ENDIAN definition."
+#endif
+
+#define FUNC_0 0
+#define FUNC_1 1
+#define FUNC_2 2
+#define FUNC_3 3
+#define FUNC_4 4
+#define FUNC_5 5
+#define FUNC_6 6
+#define FUNC_7 7
+#define E1_FUNC_MAX 2
+#define E1H_FUNC_MAX 8
+#define E2_FUNC_MAX 4 /* per path */
+
+#define VN_0 0
+#define VN_1 1
+#define VN_2 2
+#define VN_3 3
+#define E1VN_MAX 1
+#define E1HVN_MAX 4
+
+#define E2_VF_MAX 64 /* HC_REG_VF_CONFIGURATION_SIZE */
+/* This value (in milliseconds) determines the frequency of the driver
+ * issuing the PULSE message code. The firmware monitors this periodic
+ * pulse to determine when to switch to an OS-absent mode. */
+#define DRV_PULSE_PERIOD_MS 250
+
+/* This value (in milliseconds) determines how long the driver should
+ * wait for an acknowledgement from the firmware before timing out. Once
+ * the firmware has timed out, the driver will assume there is no firmware
+ * running and there won't be any firmware-driver synchronization during a
+ * driver reset. */
+#define FW_ACK_TIME_OUT_MS 5000
+
+#define FW_ACK_POLL_TIME_MS 1
+
+#define FW_ACK_NUM_OF_POLL (FW_ACK_TIME_OUT_MS/FW_ACK_POLL_TIME_MS)
+
+#define MFW_TRACE_SIGNATURE 0x54524342
+
+/****************************************************************************
+ * Driver <-> FW Mailbox *
+ ****************************************************************************/
+struct drv_port_mb {
+
+ u32 link_status;
+ /* Driver should update this field on any link change event */
+
+ #define LINK_STATUS_NONE (0<<0)
+ #define LINK_STATUS_LINK_FLAG_MASK 0x00000001
+ #define LINK_STATUS_LINK_UP 0x00000001
+ #define LINK_STATUS_SPEED_AND_DUPLEX_MASK 0x0000001E
+ #define LINK_STATUS_SPEED_AND_DUPLEX_AN_NOT_COMPLETE (0<<1)
+ #define LINK_STATUS_SPEED_AND_DUPLEX_10THD (1<<1)
+ #define LINK_STATUS_SPEED_AND_DUPLEX_10TFD (2<<1)
+ #define LINK_STATUS_SPEED_AND_DUPLEX_100TXHD (3<<1)
+ #define LINK_STATUS_SPEED_AND_DUPLEX_100T4 (4<<1)
+ #define LINK_STATUS_SPEED_AND_DUPLEX_100TXFD (5<<1)
+ #define LINK_STATUS_SPEED_AND_DUPLEX_1000THD (6<<1)
+ #define LINK_STATUS_SPEED_AND_DUPLEX_1000TFD (7<<1)
+ #define LINK_STATUS_SPEED_AND_DUPLEX_1000XFD (7<<1)
+ #define LINK_STATUS_SPEED_AND_DUPLEX_2500THD (8<<1)
+ #define LINK_STATUS_SPEED_AND_DUPLEX_2500TFD (9<<1)
+ #define LINK_STATUS_SPEED_AND_DUPLEX_2500XFD (9<<1)
+ #define LINK_STATUS_SPEED_AND_DUPLEX_10GTFD (10<<1)
+ #define LINK_STATUS_SPEED_AND_DUPLEX_10GXFD (10<<1)
+ #define LINK_STATUS_SPEED_AND_DUPLEX_20GTFD (11<<1)
+ #define LINK_STATUS_SPEED_AND_DUPLEX_20GXFD (11<<1)
+
+ #define LINK_STATUS_AUTO_NEGOTIATE_FLAG_MASK 0x00000020
+ #define LINK_STATUS_AUTO_NEGOTIATE_ENABLED 0x00000020
+
+ #define LINK_STATUS_AUTO_NEGOTIATE_COMPLETE 0x00000040
+ #define LINK_STATUS_PARALLEL_DETECTION_FLAG_MASK 0x00000080
+ #define LINK_STATUS_PARALLEL_DETECTION_USED 0x00000080
+
+ #define LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE 0x00000200
+ #define LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE 0x00000400
+ #define LINK_STATUS_LINK_PARTNER_100T4_CAPABLE 0x00000800
+ #define LINK_STATUS_LINK_PARTNER_100TXFD_CAPABLE 0x00001000
+ #define LINK_STATUS_LINK_PARTNER_100TXHD_CAPABLE 0x00002000
+ #define LINK_STATUS_LINK_PARTNER_10TFD_CAPABLE 0x00004000
+ #define LINK_STATUS_LINK_PARTNER_10THD_CAPABLE 0x00008000
+
+ #define LINK_STATUS_TX_FLOW_CONTROL_FLAG_MASK 0x00010000
+ #define LINK_STATUS_TX_FLOW_CONTROL_ENABLED 0x00010000
+
+ #define LINK_STATUS_RX_FLOW_CONTROL_FLAG_MASK 0x00020000
+ #define LINK_STATUS_RX_FLOW_CONTROL_ENABLED 0x00020000
+
+ #define LINK_STATUS_LINK_PARTNER_FLOW_CONTROL_MASK 0x000C0000
+ #define LINK_STATUS_LINK_PARTNER_NOT_PAUSE_CAPABLE (0<<18)
+ #define LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE (1<<18)
+ #define LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE (2<<18)
+ #define LINK_STATUS_LINK_PARTNER_BOTH_PAUSE (3<<18)
+
+ #define LINK_STATUS_SERDES_LINK 0x00100000
+
+ #define LINK_STATUS_LINK_PARTNER_2500XFD_CAPABLE 0x00200000
+ #define LINK_STATUS_LINK_PARTNER_2500XHD_CAPABLE 0x00400000
+ #define LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE 0x00800000
+ #define LINK_STATUS_LINK_PARTNER_20GXFD_CAPABLE 0x10000000
+
+ #define LINK_STATUS_PFC_ENABLED 0x20000000
+
+ #define LINK_STATUS_PHYSICAL_LINK_FLAG 0x40000000
+ #define LINK_STATUS_SFP_TX_FAULT 0x80000000
+
+ u32 port_stx;
+
+ u32 stat_nig_timer;
+
+ /* MCP firmware does not use this field */
+ u32 ext_phy_fw_version;
+
+};
+
+
+struct drv_func_mb {
+
+ u32 drv_mb_header;
+ #define DRV_MSG_CODE_MASK 0xffff0000
+ #define DRV_MSG_CODE_LOAD_REQ 0x10000000
+ #define DRV_MSG_CODE_LOAD_DONE 0x11000000
+ #define DRV_MSG_CODE_UNLOAD_REQ_WOL_EN 0x20000000
+ #define DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS 0x20010000
+ #define DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP 0x20020000
+ #define DRV_MSG_CODE_UNLOAD_DONE 0x21000000
+ #define DRV_MSG_CODE_DCC_OK 0x30000000
+ #define DRV_MSG_CODE_DCC_FAILURE 0x31000000
+ #define DRV_MSG_CODE_DIAG_ENTER_REQ 0x50000000
+ #define DRV_MSG_CODE_DIAG_EXIT_REQ 0x60000000
+ #define DRV_MSG_CODE_VALIDATE_KEY 0x70000000
+ #define DRV_MSG_CODE_GET_CURR_KEY 0x80000000
+ #define DRV_MSG_CODE_GET_UPGRADE_KEY 0x81000000
+ #define DRV_MSG_CODE_GET_MANUF_KEY 0x82000000
+ #define DRV_MSG_CODE_LOAD_L2B_PRAM 0x90000000
+ #define DRV_MSG_CODE_OEM_OK 0x00010000
+ #define DRV_MSG_CODE_OEM_FAILURE 0x00020000
+ #define DRV_MSG_CODE_OEM_UPDATE_SVID_OK 0x00030000
+ #define DRV_MSG_CODE_OEM_UPDATE_SVID_FAILURE 0x00040000
+ /*
+ * The optic module verification command requires bootcode
+ * v5.0.6 or later, te specific optic module verification command
+ * requires bootcode v5.2.12 or later
+ */
+ #define DRV_MSG_CODE_VRFY_FIRST_PHY_OPT_MDL 0xa0000000
+ #define REQ_BC_VER_4_VRFY_FIRST_PHY_OPT_MDL 0x00050006
+ #define DRV_MSG_CODE_VRFY_SPECIFIC_PHY_OPT_MDL 0xa1000000
+ #define REQ_BC_VER_4_VRFY_SPECIFIC_PHY_OPT_MDL 0x00050234
+ #define DRV_MSG_CODE_VRFY_AFEX_SUPPORTED 0xa2000000
+ #define REQ_BC_VER_4_VRFY_AFEX_SUPPORTED 0x00070002
+ #define REQ_BC_VER_4_SFP_TX_DISABLE_SUPPORTED 0x00070014
+ #define REQ_BC_VER_4_MT_SUPPORTED 0x00070201
+ #define REQ_BC_VER_4_PFC_STATS_SUPPORTED 0x00070201
+ #define REQ_BC_VER_4_FCOE_FEATURES 0x00070209
+
+ #define DRV_MSG_CODE_DCBX_ADMIN_PMF_MSG 0xb0000000
+ #define DRV_MSG_CODE_DCBX_PMF_DRV_OK 0xb2000000
+ #define REQ_BC_VER_4_DCBX_ADMIN_MSG_NON_PMF 0x00070401
+
+ #define DRV_MSG_CODE_VF_DISABLED_DONE 0xc0000000
+
+ #define DRV_MSG_CODE_AFEX_DRIVER_SETMAC 0xd0000000
+ #define DRV_MSG_CODE_AFEX_LISTGET_ACK 0xd1000000
+ #define DRV_MSG_CODE_AFEX_LISTSET_ACK 0xd2000000
+ #define DRV_MSG_CODE_AFEX_STATSGET_ACK 0xd3000000
+ #define DRV_MSG_CODE_AFEX_VIFSET_ACK 0xd4000000
+
+ #define DRV_MSG_CODE_DRV_INFO_ACK 0xd8000000
+ #define DRV_MSG_CODE_DRV_INFO_NACK 0xd9000000
+
+ #define DRV_MSG_CODE_EEE_RESULTS_ACK 0xda000000
+
+ #define DRV_MSG_CODE_RMMOD 0xdb000000
+ #define REQ_BC_VER_4_RMMOD_CMD 0x0007080f
+
+ #define DRV_MSG_CODE_SET_MF_BW 0xe0000000
+ #define REQ_BC_VER_4_SET_MF_BW 0x00060202
+ #define DRV_MSG_CODE_SET_MF_BW_ACK 0xe1000000
+
+ #define DRV_MSG_CODE_LINK_STATUS_CHANGED 0x01000000
+
+ #define DRV_MSG_CODE_INITIATE_FLR 0x02000000
+ #define REQ_BC_VER_4_INITIATE_FLR 0x00070213
+
+ #define BIOS_MSG_CODE_LIC_CHALLENGE 0xff010000
+ #define BIOS_MSG_CODE_LIC_RESPONSE 0xff020000
+ #define BIOS_MSG_CODE_VIRT_MAC_PRIM 0xff030000
+ #define BIOS_MSG_CODE_VIRT_MAC_ISCSI 0xff040000
+
+ #define DRV_MSG_SEQ_NUMBER_MASK 0x0000ffff
+
+ u32 drv_mb_param;
+ #define DRV_MSG_CODE_SET_MF_BW_MIN_MASK 0x00ff0000
+ #define DRV_MSG_CODE_SET_MF_BW_MAX_MASK 0xff000000
+
+ #define DRV_MSG_CODE_UNLOAD_SKIP_LINK_RESET 0x00000002
+
+ #define DRV_MSG_CODE_LOAD_REQ_WITH_LFA 0x0000100a
+ #define DRV_MSG_CODE_LOAD_REQ_FORCE_LFA 0x00002000
+
+ u32 fw_mb_header;
+ #define FW_MSG_CODE_MASK 0xffff0000
+ #define FW_MSG_CODE_DRV_LOAD_COMMON 0x10100000
+ #define FW_MSG_CODE_DRV_LOAD_PORT 0x10110000
+ #define FW_MSG_CODE_DRV_LOAD_FUNCTION 0x10120000
+ /* Load common chip is supported from bc 6.0.0 */
+ #define REQ_BC_VER_4_DRV_LOAD_COMMON_CHIP 0x00060000
+ #define FW_MSG_CODE_DRV_LOAD_COMMON_CHIP 0x10130000
+
+ #define FW_MSG_CODE_DRV_LOAD_REFUSED 0x10200000
+ #define FW_MSG_CODE_DRV_LOAD_DONE 0x11100000
+ #define FW_MSG_CODE_DRV_UNLOAD_COMMON 0x20100000
+ #define FW_MSG_CODE_DRV_UNLOAD_PORT 0x20110000
+ #define FW_MSG_CODE_DRV_UNLOAD_FUNCTION 0x20120000
+ #define FW_MSG_CODE_DRV_UNLOAD_DONE 0x21100000
+ #define FW_MSG_CODE_DCC_DONE 0x30100000
+ #define FW_MSG_CODE_LLDP_DONE 0x40100000
+ #define FW_MSG_CODE_DIAG_ENTER_DONE 0x50100000
+ #define FW_MSG_CODE_DIAG_REFUSE 0x50200000
+ #define FW_MSG_CODE_DIAG_EXIT_DONE 0x60100000
+ #define FW_MSG_CODE_VALIDATE_KEY_SUCCESS 0x70100000
+ #define FW_MSG_CODE_VALIDATE_KEY_FAILURE 0x70200000
+ #define FW_MSG_CODE_GET_KEY_DONE 0x80100000
+ #define FW_MSG_CODE_NO_KEY 0x80f00000
+ #define FW_MSG_CODE_LIC_INFO_NOT_READY 0x80f80000
+ #define FW_MSG_CODE_L2B_PRAM_LOADED 0x90100000
+ #define FW_MSG_CODE_L2B_PRAM_T_LOAD_FAILURE 0x90210000
+ #define FW_MSG_CODE_L2B_PRAM_C_LOAD_FAILURE 0x90220000
+ #define FW_MSG_CODE_L2B_PRAM_X_LOAD_FAILURE 0x90230000
+ #define FW_MSG_CODE_L2B_PRAM_U_LOAD_FAILURE 0x90240000
+ #define FW_MSG_CODE_VRFY_OPT_MDL_SUCCESS 0xa0100000
+ #define FW_MSG_CODE_VRFY_OPT_MDL_INVLD_IMG 0xa0200000
+ #define FW_MSG_CODE_VRFY_OPT_MDL_UNAPPROVED 0xa0300000
+ #define FW_MSG_CODE_VF_DISABLED_DONE 0xb0000000
+ #define FW_MSG_CODE_HW_SET_INVALID_IMAGE 0xb0100000
+
+ #define FW_MSG_CODE_AFEX_DRIVER_SETMAC_DONE 0xd0100000
+ #define FW_MSG_CODE_AFEX_LISTGET_ACK 0xd1100000
+ #define FW_MSG_CODE_AFEX_LISTSET_ACK 0xd2100000
+ #define FW_MSG_CODE_AFEX_STATSGET_ACK 0xd3100000
+ #define FW_MSG_CODE_AFEX_VIFSET_ACK 0xd4100000
+
+ #define FW_MSG_CODE_DRV_INFO_ACK 0xd8100000
+ #define FW_MSG_CODE_DRV_INFO_NACK 0xd9100000
+
+ #define FW_MSG_CODE_EEE_RESULS_ACK 0xda100000
+
+ #define FW_MSG_CODE_RMMOD_ACK 0xdb100000
+
+ #define FW_MSG_CODE_SET_MF_BW_SENT 0xe0000000
+ #define FW_MSG_CODE_SET_MF_BW_DONE 0xe1000000
+
+ #define FW_MSG_CODE_LINK_CHANGED_ACK 0x01100000
+
+ #define FW_MSG_CODE_LIC_CHALLENGE 0xff010000
+ #define FW_MSG_CODE_LIC_RESPONSE 0xff020000
+ #define FW_MSG_CODE_VIRT_MAC_PRIM 0xff030000
+ #define FW_MSG_CODE_VIRT_MAC_ISCSI 0xff040000
+
+ #define FW_MSG_SEQ_NUMBER_MASK 0x0000ffff
+
+ u32 fw_mb_param;
+
+ u32 drv_pulse_mb;
+ #define DRV_PULSE_SEQ_MASK 0x00007fff
+ #define DRV_PULSE_SYSTEM_TIME_MASK 0xffff0000
+ /*
+ * The system time is in the format of
+ * (year-2001)*12*32 + month*32 + day.
+ */
+ #define DRV_PULSE_ALWAYS_ALIVE 0x00008000
+ /*
+ * Indicate to the firmware not to go into the
+ * OS-absent when it is not getting driver pulse.
+ * This is used for debugging as well for PXE(MBA).
+ */
+
+ u32 mcp_pulse_mb;
+ #define MCP_PULSE_SEQ_MASK 0x00007fff
+ #define MCP_PULSE_ALWAYS_ALIVE 0x00008000
+ /* Indicates to the driver not to assert due to lack
+ * of MCP response */
+ #define MCP_EVENT_MASK 0xffff0000
+ #define MCP_EVENT_OTHER_DRIVER_RESET_REQ 0x00010000
+
+ u32 iscsi_boot_signature;
+ u32 iscsi_boot_block_offset;
+
+ u32 drv_status;
+ #define DRV_STATUS_PMF 0x00000001
+ #define DRV_STATUS_VF_DISABLED 0x00000002
+ #define DRV_STATUS_SET_MF_BW 0x00000004
+ #define DRV_STATUS_LINK_EVENT 0x00000008
+
+ #define DRV_STATUS_OEM_EVENT_MASK 0x00000070
+ #define DRV_STATUS_OEM_DISABLE_ENABLE_PF 0x00000010
+ #define DRV_STATUS_OEM_BANDWIDTH_ALLOCATION 0x00000020
+
+ #define DRV_STATUS_OEM_UPDATE_SVID 0x00000080
+
+ #define DRV_STATUS_DCC_EVENT_MASK 0x0000ff00
+ #define DRV_STATUS_DCC_DISABLE_ENABLE_PF 0x00000100
+ #define DRV_STATUS_DCC_BANDWIDTH_ALLOCATION 0x00000200
+ #define DRV_STATUS_DCC_CHANGE_MAC_ADDRESS 0x00000400
+ #define DRV_STATUS_DCC_RESERVED1 0x00000800
+ #define DRV_STATUS_DCC_SET_PROTOCOL 0x00001000
+ #define DRV_STATUS_DCC_SET_PRIORITY 0x00002000
+
+ #define DRV_STATUS_DCBX_EVENT_MASK 0x000f0000
+ #define DRV_STATUS_DCBX_NEGOTIATION_RESULTS 0x00010000
+ #define DRV_STATUS_AFEX_EVENT_MASK 0x03f00000
+ #define DRV_STATUS_AFEX_LISTGET_REQ 0x00100000
+ #define DRV_STATUS_AFEX_LISTSET_REQ 0x00200000
+ #define DRV_STATUS_AFEX_STATSGET_REQ 0x00400000
+ #define DRV_STATUS_AFEX_VIFSET_REQ 0x00800000
+
+ #define DRV_STATUS_DRV_INFO_REQ 0x04000000
+
+ #define DRV_STATUS_EEE_NEGOTIATION_RESULTS 0x08000000
+
+ u32 virt_mac_upper;
+ #define VIRT_MAC_SIGN_MASK 0xffff0000
+ #define VIRT_MAC_SIGNATURE 0x564d0000
+ u32 virt_mac_lower;
+
+};
+
+
+/****************************************************************************
+ * Management firmware state *
+ ****************************************************************************/
+/* Allocate 440 bytes for management firmware */
+#define MGMTFW_STATE_WORD_SIZE 110
+
+struct mgmtfw_state {
+ u32 opaque[MGMTFW_STATE_WORD_SIZE];
+};
+
+
+/****************************************************************************
+ * Multi-Function configuration *
+ ****************************************************************************/
+struct shared_mf_cfg {
+
+ u32 clp_mb;
+ #define SHARED_MF_CLP_SET_DEFAULT 0x00000000
+ /* set by CLP */
+ #define SHARED_MF_CLP_EXIT 0x00000001
+ /* set by MCP */
+ #define SHARED_MF_CLP_EXIT_DONE 0x00010000
+
+};
+
+struct port_mf_cfg {
+
+ u32 dynamic_cfg; /* device control channel */
+ #define PORT_MF_CFG_E1HOV_TAG_MASK 0x0000ffff
+ #define PORT_MF_CFG_E1HOV_TAG_SHIFT 0
+ #define PORT_MF_CFG_E1HOV_TAG_DEFAULT PORT_MF_CFG_E1HOV_TAG_MASK
+
+ u32 reserved[1];
+
+};
+
+struct func_mf_cfg {
+
+ u32 config;
+ /* E/R/I/D */
+ /* function 0 of each port cannot be hidden */
+ #define FUNC_MF_CFG_FUNC_HIDE 0x00000001
+
+ #define FUNC_MF_CFG_PROTOCOL_MASK 0x00000006
+ #define FUNC_MF_CFG_PROTOCOL_FCOE 0x00000000
+ #define FUNC_MF_CFG_PROTOCOL_ETHERNET 0x00000002
+ #define FUNC_MF_CFG_PROTOCOL_ETHERNET_WITH_RDMA 0x00000004
+ #define FUNC_MF_CFG_PROTOCOL_ISCSI 0x00000006
+ #define FUNC_MF_CFG_PROTOCOL_DEFAULT \
+ FUNC_MF_CFG_PROTOCOL_ETHERNET_WITH_RDMA
+
+ #define FUNC_MF_CFG_FUNC_DISABLED 0x00000008
+ #define FUNC_MF_CFG_FUNC_DELETED 0x00000010
+
+ /* PRI */
+ /* 0 - low priority, 3 - high priority */
+ #define FUNC_MF_CFG_TRANSMIT_PRIORITY_MASK 0x00000300
+ #define FUNC_MF_CFG_TRANSMIT_PRIORITY_SHIFT 8
+ #define FUNC_MF_CFG_TRANSMIT_PRIORITY_DEFAULT 0x00000000
+
+ /* MINBW, MAXBW */
+ /* value range - 0..100, increments in 100Mbps */
+ #define FUNC_MF_CFG_MIN_BW_MASK 0x00ff0000
+ #define FUNC_MF_CFG_MIN_BW_SHIFT 16
+ #define FUNC_MF_CFG_MIN_BW_DEFAULT 0x00000000
+ #define FUNC_MF_CFG_MAX_BW_MASK 0xff000000
+ #define FUNC_MF_CFG_MAX_BW_SHIFT 24
+ #define FUNC_MF_CFG_MAX_BW_DEFAULT 0x64000000
+
+ u32 mac_upper; /* MAC */
+ #define FUNC_MF_CFG_UPPERMAC_MASK 0x0000ffff
+ #define FUNC_MF_CFG_UPPERMAC_SHIFT 0
+ #define FUNC_MF_CFG_UPPERMAC_DEFAULT FUNC_MF_CFG_UPPERMAC_MASK
+ u32 mac_lower;
+ #define FUNC_MF_CFG_LOWERMAC_DEFAULT 0xffffffff
+
+ u32 e1hov_tag; /* VNI */
+ #define FUNC_MF_CFG_E1HOV_TAG_MASK 0x0000ffff
+ #define FUNC_MF_CFG_E1HOV_TAG_SHIFT 0
+ #define FUNC_MF_CFG_E1HOV_TAG_DEFAULT FUNC_MF_CFG_E1HOV_TAG_MASK
+
+ /* afex default VLAN ID - 12 bits */
+ #define FUNC_MF_CFG_AFEX_VLAN_MASK 0x0fff0000
+ #define FUNC_MF_CFG_AFEX_VLAN_SHIFT 16
+
+ u32 afex_config;
+ #define FUNC_MF_CFG_AFEX_COS_FILTER_MASK 0x000000ff
+ #define FUNC_MF_CFG_AFEX_COS_FILTER_SHIFT 0
+ #define FUNC_MF_CFG_AFEX_MBA_ENABLED_MASK 0x0000ff00
+ #define FUNC_MF_CFG_AFEX_MBA_ENABLED_SHIFT 8
+ #define FUNC_MF_CFG_AFEX_MBA_ENABLED_VAL 0x00000100
+ #define FUNC_MF_CFG_AFEX_VLAN_MODE_MASK 0x000f0000
+ #define FUNC_MF_CFG_AFEX_VLAN_MODE_SHIFT 16
+
+ u32 reserved;
+};
+
+enum mf_cfg_afex_vlan_mode {
+ FUNC_MF_CFG_AFEX_VLAN_TRUNK_MODE = 0,
+ FUNC_MF_CFG_AFEX_VLAN_ACCESS_MODE,
+ FUNC_MF_CFG_AFEX_VLAN_TRUNK_TAG_NATIVE_MODE
+};
+
+/* This structure is not applicable and should not be accessed on 57711 */
+struct func_ext_cfg {
+ u32 func_cfg;
+ #define MACP_FUNC_CFG_FLAGS_MASK 0x0000007F
+ #define MACP_FUNC_CFG_FLAGS_SHIFT 0
+ #define MACP_FUNC_CFG_FLAGS_ENABLED 0x00000001
+ #define MACP_FUNC_CFG_FLAGS_ETHERNET 0x00000002
+ #define MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD 0x00000004
+ #define MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD 0x00000008
+ #define MACP_FUNC_CFG_PAUSE_ON_HOST_RING 0x00000080
+
+ u32 iscsi_mac_addr_upper;
+ u32 iscsi_mac_addr_lower;
+
+ u32 fcoe_mac_addr_upper;
+ u32 fcoe_mac_addr_lower;
+
+ u32 fcoe_wwn_port_name_upper;
+ u32 fcoe_wwn_port_name_lower;
+
+ u32 fcoe_wwn_node_name_upper;
+ u32 fcoe_wwn_node_name_lower;
+
+ u32 preserve_data;
+ #define MF_FUNC_CFG_PRESERVE_L2_MAC (1<<0)
+ #define MF_FUNC_CFG_PRESERVE_ISCSI_MAC (1<<1)
+ #define MF_FUNC_CFG_PRESERVE_FCOE_MAC (1<<2)
+ #define MF_FUNC_CFG_PRESERVE_FCOE_WWN_P (1<<3)
+ #define MF_FUNC_CFG_PRESERVE_FCOE_WWN_N (1<<4)
+ #define MF_FUNC_CFG_PRESERVE_TX_BW (1<<5)
+};
+
+struct mf_cfg {
+
+ struct shared_mf_cfg shared_mf_config; /* 0x4 */
+ /* 0x8*2*2=0x20 */
+ struct port_mf_cfg port_mf_config[NVM_PATH_MAX][PORT_MAX];
+ /* for all chips, there are 8 mf functions */
+ struct func_mf_cfg func_mf_config[E1H_FUNC_MAX]; /* 0x18 * 8 = 0xc0 */
+ /*
+ * Extended configuration per function - this array does not exist and
+ * should not be accessed on 57711
+ */
+ struct func_ext_cfg func_ext_config[E1H_FUNC_MAX]; /* 0x28 * 8 = 0x140*/
+}; /* 0x224 */
+
+/****************************************************************************
+ * Shared Memory Region *
+ ****************************************************************************/
+struct shmem_region { /* SharedMem Offset (size) */
+
+ u32 validity_map[PORT_MAX]; /* 0x0 (4*2 = 0x8) */
+ #define SHR_MEM_FORMAT_REV_MASK 0xff000000
+ #define SHR_MEM_FORMAT_REV_ID ('A'<<24)
+ /* validity bits */
+ #define SHR_MEM_VALIDITY_PCI_CFG 0x00100000
+ #define SHR_MEM_VALIDITY_MB 0x00200000
+ #define SHR_MEM_VALIDITY_DEV_INFO 0x00400000
+ #define SHR_MEM_VALIDITY_RESERVED 0x00000007
+ /* One licensing bit should be set */
+ #define SHR_MEM_VALIDITY_LIC_KEY_IN_EFFECT_MASK 0x00000038
+ #define SHR_MEM_VALIDITY_LIC_MANUF_KEY_IN_EFFECT 0x00000008
+ #define SHR_MEM_VALIDITY_LIC_UPGRADE_KEY_IN_EFFECT 0x00000010
+ #define SHR_MEM_VALIDITY_LIC_NO_KEY_IN_EFFECT 0x00000020
+ /* Active MFW */
+ #define SHR_MEM_VALIDITY_ACTIVE_MFW_UNKNOWN 0x00000000
+ #define SHR_MEM_VALIDITY_ACTIVE_MFW_MASK 0x000001c0
+ #define SHR_MEM_VALIDITY_ACTIVE_MFW_IPMI 0x00000040
+ #define SHR_MEM_VALIDITY_ACTIVE_MFW_UMP 0x00000080
+ #define SHR_MEM_VALIDITY_ACTIVE_MFW_NCSI 0x000000c0
+ #define SHR_MEM_VALIDITY_ACTIVE_MFW_NONE 0x000001c0
+
+ struct shm_dev_info dev_info; /* 0x8 (0x438) */
+
+ struct license_key drv_lic_key[PORT_MAX]; /* 0x440 (52*2=0x68) */
+
+ /* FW information (for internal FW use) */
+ u32 fw_info_fio_offset; /* 0x4a8 (0x4) */
+ struct mgmtfw_state mgmtfw_state; /* 0x4ac (0x1b8) */
+
+ struct drv_port_mb port_mb[PORT_MAX]; /* 0x664 (16*2=0x20) */
+
+#ifdef BMAPI
+ /* This is a variable length array */
+ /* the number of function depends on the chip type */
+ struct drv_func_mb func_mb[1]; /* 0x684 (44*2/4/8=0x58/0xb0/0x160) */
+#else
+ /* the number of function depends on the chip type */
+ struct drv_func_mb func_mb[]; /* 0x684 (44*2/4/8=0x58/0xb0/0x160) */
+#endif /* BMAPI */
+
+}; /* 57710 = 0x6dc | 57711 = 0x7E4 | 57712 = 0x734 */
+
+/****************************************************************************
+ * Shared Memory 2 Region *
+ ****************************************************************************/
+/* The fw_flr_ack is actually built in the following way: */
+/* 8 bit: PF ack */
+/* 64 bit: VF ack */
+/* 8 bit: ios_dis_ack */
+/* In order to maintain endianity in the mailbox hsi, we want to keep using */
+/* u32. The fw must have the VF right after the PF since this is how it */
+/* access arrays(it expects always the VF to reside after the PF, and that */
+/* makes the calculation much easier for it. ) */
+/* In order to answer both limitations, and keep the struct small, the code */
+/* will abuse the structure defined here to achieve the actual partition */
+/* above */
+/****************************************************************************/
+struct fw_flr_ack {
+ u32 pf_ack;
+ u32 vf_ack[1];
+ u32 iov_dis_ack;
+};
+
+struct fw_flr_mb {
+ u32 aggint;
+ u32 opgen_addr;
+ struct fw_flr_ack ack;
+};
+
+struct eee_remote_vals {
+ u32 tx_tw;
+ u32 rx_tw;
+};
+
+/**** SUPPORT FOR SHMEM ARRRAYS ***
+ * The SHMEM HSI is aligned on 32 bit boundaries which makes it difficult to
+ * define arrays with storage types smaller then unsigned dwords.
+ * The macros below add generic support for SHMEM arrays with numeric elements
+ * that can span 2,4,8 or 16 bits. The array underlying type is a 32 bit dword
+ * array with individual bit-filed elements accessed using shifts and masks.
+ *
+ */
+
+/* eb is the bitwidth of a single element */
+#define SHMEM_ARRAY_MASK(eb) ((1<<(eb))-1)
+#define SHMEM_ARRAY_ENTRY(i, eb) ((i)/(32/(eb)))
+
+/* the bit-position macro allows the used to flip the order of the arrays
+ * elements on a per byte or word boundary.
+ *
+ * example: an array with 8 entries each 4 bit wide. This array will fit into
+ * a single dword. The diagrmas below show the array order of the nibbles.
+ *
+ * SHMEM_ARRAY_BITPOS(i, 4, 4) defines the stadard ordering:
+ *
+ * | | | |
+ * 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 |
+ * | | | |
+ *
+ * SHMEM_ARRAY_BITPOS(i, 4, 8) defines a flip ordering per byte:
+ *
+ * | | | |
+ * 1 | 0 | 3 | 2 | 5 | 4 | 7 | 6 |
+ * | | | |
+ *
+ * SHMEM_ARRAY_BITPOS(i, 4, 16) defines a flip ordering per word:
+ *
+ * | | | |
+ * 3 | 2 | 1 | 0 | 7 | 6 | 5 | 4 |
+ * | | | |
+ */
+#define SHMEM_ARRAY_BITPOS(i, eb, fb) \
+ ((((32/(fb)) - 1 - ((i)/((fb)/(eb))) % (32/(fb))) * (fb)) + \
+ (((i)%((fb)/(eb))) * (eb)))
+
+#define SHMEM_ARRAY_GET(a, i, eb, fb) \
+ ((a[SHMEM_ARRAY_ENTRY(i, eb)] >> SHMEM_ARRAY_BITPOS(i, eb, fb)) & \
+ SHMEM_ARRAY_MASK(eb))
+
+#define SHMEM_ARRAY_SET(a, i, eb, fb, val) \
+do { \
+ a[SHMEM_ARRAY_ENTRY(i, eb)] &= ~(SHMEM_ARRAY_MASK(eb) << \
+ SHMEM_ARRAY_BITPOS(i, eb, fb)); \
+ a[SHMEM_ARRAY_ENTRY(i, eb)] |= (((val) & SHMEM_ARRAY_MASK(eb)) << \
+ SHMEM_ARRAY_BITPOS(i, eb, fb)); \
+} while (0)
+
+
+/****START OF DCBX STRUCTURES DECLARATIONS****/
+#define DCBX_MAX_NUM_PRI_PG_ENTRIES 8
+#define DCBX_PRI_PG_BITWIDTH 4
+#define DCBX_PRI_PG_FBITS 8
+#define DCBX_PRI_PG_GET(a, i) \
+ SHMEM_ARRAY_GET(a, i, DCBX_PRI_PG_BITWIDTH, DCBX_PRI_PG_FBITS)
+#define DCBX_PRI_PG_SET(a, i, val) \
+ SHMEM_ARRAY_SET(a, i, DCBX_PRI_PG_BITWIDTH, DCBX_PRI_PG_FBITS, val)
+#define DCBX_MAX_NUM_PG_BW_ENTRIES 8
+#define DCBX_BW_PG_BITWIDTH 8
+#define DCBX_PG_BW_GET(a, i) \
+ SHMEM_ARRAY_GET(a, i, DCBX_BW_PG_BITWIDTH, DCBX_BW_PG_BITWIDTH)
+#define DCBX_PG_BW_SET(a, i, val) \
+ SHMEM_ARRAY_SET(a, i, DCBX_BW_PG_BITWIDTH, DCBX_BW_PG_BITWIDTH, val)
+#define DCBX_STRICT_PRI_PG 15
+#define DCBX_MAX_APP_PROTOCOL 16
+#define FCOE_APP_IDX 0
+#define ISCSI_APP_IDX 1
+#define PREDEFINED_APP_IDX_MAX 2
+
+
+/* Big/Little endian have the same representation. */
+struct dcbx_ets_feature {
+ /*
+ * For Admin MIB - is this feature supported by the
+ * driver | For Local MIB - should this feature be enabled.
+ */
+ u32 enabled;
+ u32 pg_bw_tbl[2];
+ u32 pri_pg_tbl[1];
+};
+
+/* Driver structure in LE */
+struct dcbx_pfc_feature {
+#ifdef __BIG_ENDIAN
+ u8 pri_en_bitmap;
+ #define DCBX_PFC_PRI_0 0x01
+ #define DCBX_PFC_PRI_1 0x02
+ #define DCBX_PFC_PRI_2 0x04
+ #define DCBX_PFC_PRI_3 0x08
+ #define DCBX_PFC_PRI_4 0x10
+ #define DCBX_PFC_PRI_5 0x20
+ #define DCBX_PFC_PRI_6 0x40
+ #define DCBX_PFC_PRI_7 0x80
+ u8 pfc_caps;
+ u8 reserved;
+ u8 enabled;
+#elif defined(__LITTLE_ENDIAN)
+ u8 enabled;
+ u8 reserved;
+ u8 pfc_caps;
+ u8 pri_en_bitmap;
+ #define DCBX_PFC_PRI_0 0x01
+ #define DCBX_PFC_PRI_1 0x02
+ #define DCBX_PFC_PRI_2 0x04
+ #define DCBX_PFC_PRI_3 0x08
+ #define DCBX_PFC_PRI_4 0x10
+ #define DCBX_PFC_PRI_5 0x20
+ #define DCBX_PFC_PRI_6 0x40
+ #define DCBX_PFC_PRI_7 0x80
+#endif
+};
+
+struct dcbx_app_priority_entry {
+#ifdef __BIG_ENDIAN
+ u16 app_id;
+ u8 pri_bitmap;
+ u8 appBitfield;
+ #define DCBX_APP_ENTRY_VALID 0x01
+ #define DCBX_APP_ENTRY_SF_MASK 0xF0
+ #define DCBX_APP_ENTRY_SF_SHIFT 4
+ #define DCBX_APP_SF_ETH_TYPE 0x10
+ #define DCBX_APP_SF_PORT 0x20
+ #define DCBX_APP_SF_UDP 0x40
+ #define DCBX_APP_SF_DEFAULT 0x80
+#elif defined(__LITTLE_ENDIAN)
+ u8 appBitfield;
+ #define DCBX_APP_ENTRY_VALID 0x01
+ #define DCBX_APP_ENTRY_SF_MASK 0xF0
+ #define DCBX_APP_ENTRY_SF_SHIFT 4
+ #define DCBX_APP_ENTRY_VALID 0x01
+ #define DCBX_APP_SF_ETH_TYPE 0x10
+ #define DCBX_APP_SF_PORT 0x20
+ #define DCBX_APP_SF_UDP 0x40
+ #define DCBX_APP_SF_DEFAULT 0x80
+ u8 pri_bitmap;
+ u16 app_id;
+#endif
+};
+
+
+/* FW structure in BE */
+struct dcbx_app_priority_feature {
+#ifdef __BIG_ENDIAN
+ u8 reserved;
+ u8 default_pri;
+ u8 tc_supported;
+ u8 enabled;
+#elif defined(__LITTLE_ENDIAN)
+ u8 enabled;
+ u8 tc_supported;
+ u8 default_pri;
+ u8 reserved;
+#endif
+ struct dcbx_app_priority_entry app_pri_tbl[DCBX_MAX_APP_PROTOCOL];
+};
+
+/* FW structure in BE */
+struct dcbx_features {
+ /* PG feature */
+ struct dcbx_ets_feature ets;
+ /* PFC feature */
+ struct dcbx_pfc_feature pfc;
+ /* APP feature */
+ struct dcbx_app_priority_feature app;
+};
+
+/* LLDP protocol parameters */
+/* FW structure in BE */
+struct lldp_params {
+#ifdef __BIG_ENDIAN
+ u8 msg_fast_tx_interval;
+ u8 msg_tx_hold;
+ u8 msg_tx_interval;
+ u8 admin_status;
+ #define LLDP_TX_ONLY 0x01
+ #define LLDP_RX_ONLY 0x02
+ #define LLDP_TX_RX 0x03
+ #define LLDP_DISABLED 0x04
+ u8 reserved1;
+ u8 tx_fast;
+ u8 tx_crd_max;
+ u8 tx_crd;
+#elif defined(__LITTLE_ENDIAN)
+ u8 admin_status;
+ #define LLDP_TX_ONLY 0x01
+ #define LLDP_RX_ONLY 0x02
+ #define LLDP_TX_RX 0x03
+ #define LLDP_DISABLED 0x04
+ u8 msg_tx_interval;
+ u8 msg_tx_hold;
+ u8 msg_fast_tx_interval;
+ u8 tx_crd;
+ u8 tx_crd_max;
+ u8 tx_fast;
+ u8 reserved1;
+#endif
+ #define REM_CHASSIS_ID_STAT_LEN 4
+ #define REM_PORT_ID_STAT_LEN 4
+ /* Holds remote Chassis ID TLV header, subtype and 9B of payload. */
+ u32 peer_chassis_id[REM_CHASSIS_ID_STAT_LEN];
+ /* Holds remote Port ID TLV header, subtype and 9B of payload. */
+ u32 peer_port_id[REM_PORT_ID_STAT_LEN];
+};
+
+struct lldp_dcbx_stat {
+ #define LOCAL_CHASSIS_ID_STAT_LEN 2
+ #define LOCAL_PORT_ID_STAT_LEN 2
+ /* Holds local Chassis ID 8B payload of constant subtype 4. */
+ u32 local_chassis_id[LOCAL_CHASSIS_ID_STAT_LEN];
+ /* Holds local Port ID 8B payload of constant subtype 3. */
+ u32 local_port_id[LOCAL_PORT_ID_STAT_LEN];
+ /* Number of DCBX frames transmitted. */
+ u32 num_tx_dcbx_pkts;
+ /* Number of DCBX frames received. */
+ u32 num_rx_dcbx_pkts;
+};
+
+/* ADMIN MIB - DCBX local machine default configuration. */
+struct lldp_admin_mib {
+ u32 ver_cfg_flags;
+ #define DCBX_ETS_CONFIG_TX_ENABLED 0x00000001
+ #define DCBX_PFC_CONFIG_TX_ENABLED 0x00000002
+ #define DCBX_APP_CONFIG_TX_ENABLED 0x00000004
+ #define DCBX_ETS_RECO_TX_ENABLED 0x00000008
+ #define DCBX_ETS_RECO_VALID 0x00000010
+ #define DCBX_ETS_WILLING 0x00000020
+ #define DCBX_PFC_WILLING 0x00000040
+ #define DCBX_APP_WILLING 0x00000080
+ #define DCBX_VERSION_CEE 0x00000100
+ #define DCBX_VERSION_IEEE 0x00000200
+ #define DCBX_DCBX_ENABLED 0x00000400
+ #define DCBX_CEE_VERSION_MASK 0x0000f000
+ #define DCBX_CEE_VERSION_SHIFT 12
+ #define DCBX_CEE_MAX_VERSION_MASK 0x000f0000
+ #define DCBX_CEE_MAX_VERSION_SHIFT 16
+ struct dcbx_features features;
+};
+
+/* REMOTE MIB - remote machine DCBX configuration. */
+struct lldp_remote_mib {
+ u32 prefix_seq_num;
+ u32 flags;
+ #define DCBX_ETS_TLV_RX 0x00000001
+ #define DCBX_PFC_TLV_RX 0x00000002
+ #define DCBX_APP_TLV_RX 0x00000004
+ #define DCBX_ETS_RX_ERROR 0x00000010
+ #define DCBX_PFC_RX_ERROR 0x00000020
+ #define DCBX_APP_RX_ERROR 0x00000040
+ #define DCBX_ETS_REM_WILLING 0x00000100
+ #define DCBX_PFC_REM_WILLING 0x00000200
+ #define DCBX_APP_REM_WILLING 0x00000400
+ #define DCBX_REMOTE_ETS_RECO_VALID 0x00001000
+ #define DCBX_REMOTE_MIB_VALID 0x00002000
+ struct dcbx_features features;
+ u32 suffix_seq_num;
+};
+
+/* LOCAL MIB - operational DCBX configuration - transmitted on Tx LLDPDU. */
+struct lldp_local_mib {
+ u32 prefix_seq_num;
+ /* Indicates if there is mismatch with negotiation results. */
+ u32 error;
+ #define DCBX_LOCAL_ETS_ERROR 0x00000001
+ #define DCBX_LOCAL_PFC_ERROR 0x00000002
+ #define DCBX_LOCAL_APP_ERROR 0x00000004
+ #define DCBX_LOCAL_PFC_MISMATCH 0x00000010
+ #define DCBX_LOCAL_APP_MISMATCH 0x00000020
+ #define DCBX_REMOTE_MIB_ERROR 0x00000040
+ #define DCBX_REMOTE_ETS_TLV_NOT_FOUND 0x00000080
+ #define DCBX_REMOTE_PFC_TLV_NOT_FOUND 0x00000100
+ #define DCBX_REMOTE_APP_TLV_NOT_FOUND 0x00000200
+ struct dcbx_features features;
+ u32 suffix_seq_num;
+};
+/***END OF DCBX STRUCTURES DECLARATIONS***/
+
+/***********************************************************/
+/* Elink section */
+/***********************************************************/
+#define SHMEM_LINK_CONFIG_SIZE 2
+struct shmem_lfa {
+ u32 req_duplex;
+ #define REQ_DUPLEX_PHY0_MASK 0x0000ffff
+ #define REQ_DUPLEX_PHY0_SHIFT 0
+ #define REQ_DUPLEX_PHY1_MASK 0xffff0000
+ #define REQ_DUPLEX_PHY1_SHIFT 16
+ u32 req_flow_ctrl;
+ #define REQ_FLOW_CTRL_PHY0_MASK 0x0000ffff
+ #define REQ_FLOW_CTRL_PHY0_SHIFT 0
+ #define REQ_FLOW_CTRL_PHY1_MASK 0xffff0000
+ #define REQ_FLOW_CTRL_PHY1_SHIFT 16
+ u32 req_line_speed; /* Also determine AutoNeg */
+ #define REQ_LINE_SPD_PHY0_MASK 0x0000ffff
+ #define REQ_LINE_SPD_PHY0_SHIFT 0
+ #define REQ_LINE_SPD_PHY1_MASK 0xffff0000
+ #define REQ_LINE_SPD_PHY1_SHIFT 16
+ u32 speed_cap_mask[SHMEM_LINK_CONFIG_SIZE];
+ u32 additional_config;
+ #define REQ_FC_AUTO_ADV_MASK 0x0000ffff
+ #define REQ_FC_AUTO_ADV0_SHIFT 0
+ #define NO_LFA_DUE_TO_DCC_MASK 0x00010000
+ u32 lfa_sts;
+ #define LFA_LINK_FLAP_REASON_OFFSET 0
+ #define LFA_LINK_FLAP_REASON_MASK 0x000000ff
+ #define LFA_LINK_DOWN 0x1
+ #define LFA_LOOPBACK_ENABLED 0x2
+ #define LFA_DUPLEX_MISMATCH 0x3
+ #define LFA_MFW_IS_TOO_OLD 0x4
+ #define LFA_LINK_SPEED_MISMATCH 0x5
+ #define LFA_FLOW_CTRL_MISMATCH 0x6
+ #define LFA_SPEED_CAP_MISMATCH 0x7
+ #define LFA_DCC_LFA_DISABLED 0x8
+ #define LFA_EEE_MISMATCH 0x9
+
+ #define LINK_FLAP_AVOIDANCE_COUNT_OFFSET 8
+ #define LINK_FLAP_AVOIDANCE_COUNT_MASK 0x0000ff00
+
+ #define LINK_FLAP_COUNT_OFFSET 16
+ #define LINK_FLAP_COUNT_MASK 0x00ff0000
+
+ #define LFA_FLAGS_MASK 0xff000000
+ #define SHMEM_LFA_DONT_CLEAR_STAT (1<<24)
+};
+
+/* Used to support NSCI get OS driver version
+ * on driver load the version value will be set
+ * on driver unload driver value of 0x0 will be set.
+ */
+struct os_drv_ver {
+#define DRV_VER_NOT_LOADED 0
+
+ /* personalties order is important */
+#define DRV_PERS_ETHERNET 0
+#define DRV_PERS_ISCSI 1
+#define DRV_PERS_FCOE 2
+
+ /* shmem2 struct is constant can't add more personalties here */
+#define MAX_DRV_PERS 3
+ u32 versions[MAX_DRV_PERS];
+};
+
+struct ncsi_oem_fcoe_features {
+ u32 fcoe_features1;
+ #define FCOE_FEATURES1_IOS_PER_CONNECTION_MASK 0x0000FFFF
+ #define FCOE_FEATURES1_IOS_PER_CONNECTION_OFFSET 0
+
+ #define FCOE_FEATURES1_LOGINS_PER_PORT_MASK 0xFFFF0000
+ #define FCOE_FEATURES1_LOGINS_PER_PORT_OFFSET 16
+
+ u32 fcoe_features2;
+ #define FCOE_FEATURES2_EXCHANGES_MASK 0x0000FFFF
+ #define FCOE_FEATURES2_EXCHANGES_OFFSET 0
+
+ #define FCOE_FEATURES2_NPIV_WWN_PER_PORT_MASK 0xFFFF0000
+ #define FCOE_FEATURES2_NPIV_WWN_PER_PORT_OFFSET 16
+
+ u32 fcoe_features3;
+ #define FCOE_FEATURES3_TARGETS_SUPPORTED_MASK 0x0000FFFF
+ #define FCOE_FEATURES3_TARGETS_SUPPORTED_OFFSET 0
+
+ #define FCOE_FEATURES3_OUTSTANDING_COMMANDS_MASK 0xFFFF0000
+ #define FCOE_FEATURES3_OUTSTANDING_COMMANDS_OFFSET 16
+
+ u32 fcoe_features4;
+ #define FCOE_FEATURES4_FEATURE_SETTINGS_MASK 0x0000000F
+ #define FCOE_FEATURES4_FEATURE_SETTINGS_OFFSET 0
+};
+
+enum curr_cfg_method_e {
+ CURR_CFG_MET_NONE = 0, /* default config */
+ CURR_CFG_MET_OS = 1,
+ CURR_CFG_MET_VENDOR_SPEC = 2,/* e.g. Option ROM, NPAR, O/S Cfg Utils */
+};
+
+#define FC_NPIV_WWPN_SIZE 8
+#define FC_NPIV_WWNN_SIZE 8
+struct bdn_npiv_settings {
+ u8 npiv_wwpn[FC_NPIV_WWPN_SIZE];
+ u8 npiv_wwnn[FC_NPIV_WWNN_SIZE];
+};
+
+struct bdn_fc_npiv_cfg {
+ /* hdr used internally by the MFW */
+ u32 hdr;
+ u32 num_of_npiv;
+};
+
+#define MAX_NUMBER_NPIV 64
+struct bdn_fc_npiv_tbl {
+ struct bdn_fc_npiv_cfg fc_npiv_cfg;
+ struct bdn_npiv_settings settings[MAX_NUMBER_NPIV];
+};
+
+struct mdump_driver_info {
+ u32 epoc;
+ u32 drv_ver;
+ u32 fw_ver;
+
+ u32 valid_dump;
+ #define FIRST_DUMP_VALID (1 << 0)
+ #define SECOND_DUMP_VALID (1 << 1)
+
+ u32 flags;
+ #define ENABLE_ALL_TRIGGERS (0x7fffffff)
+ #define TRIGGER_MDUMP_ONCE (1 << 31)
+};
+
+struct ncsi_oem_data {
+ u32 driver_version[4];
+ struct ncsi_oem_fcoe_features ncsi_oem_fcoe_features;
+};
+
+struct shmem2_region {
+
+ u32 size; /* 0x0000 */
+
+ u32 dcc_support; /* 0x0004 */
+ #define SHMEM_DCC_SUPPORT_NONE 0x00000000
+ #define SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV 0x00000001
+ #define SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV 0x00000004
+ #define SHMEM_DCC_SUPPORT_CHANGE_MAC_ADDRESS_TLV 0x00000008
+ #define SHMEM_DCC_SUPPORT_SET_PROTOCOL_TLV 0x00000040
+ #define SHMEM_DCC_SUPPORT_SET_PRIORITY_TLV 0x00000080
+
+ u32 ext_phy_fw_version2[PORT_MAX]; /* 0x0008 */
+ /*
+ * For backwards compatibility, if the mf_cfg_addr does not exist
+ * (the size filed is smaller than 0xc) the mf_cfg resides at the
+ * end of struct shmem_region
+ */
+ u32 mf_cfg_addr; /* 0x0010 */
+ #define SHMEM_MF_CFG_ADDR_NONE 0x00000000
+
+ struct fw_flr_mb flr_mb; /* 0x0014 */
+ u32 dcbx_lldp_params_offset; /* 0x0028 */
+ #define SHMEM_LLDP_DCBX_PARAMS_NONE 0x00000000
+ u32 dcbx_neg_res_offset; /* 0x002c */
+ #define SHMEM_DCBX_NEG_RES_NONE 0x00000000
+ u32 dcbx_remote_mib_offset; /* 0x0030 */
+ #define SHMEM_DCBX_REMOTE_MIB_NONE 0x00000000
+ /*
+ * The other shmemX_base_addr holds the other path's shmem address
+ * required for example in case of common phy init, or for path1 to know
+ * the address of mcp debug trace which is located in offset from shmem
+ * of path0
+ */
+ u32 other_shmem_base_addr; /* 0x0034 */
+ u32 other_shmem2_base_addr; /* 0x0038 */
+ /*
+ * mcp_vf_disabled is set by the MCP to indicate the driver about VFs
+ * which were disabled/flred
+ */
+ u32 mcp_vf_disabled[E2_VF_MAX / 32]; /* 0x003c */
+
+ /*
+ * drv_ack_vf_disabled is set by the PF driver to ack handled disabled
+ * VFs
+ */
+ u32 drv_ack_vf_disabled[E2_FUNC_MAX][E2_VF_MAX / 32]; /* 0x0044 */
+
+ u32 dcbx_lldp_dcbx_stat_offset; /* 0x0064 */
+ #define SHMEM_LLDP_DCBX_STAT_NONE 0x00000000
+
+ /*
+ * edebug_driver_if field is used to transfer messages between edebug
+ * app to the driver through shmem2.
+ *
+ * message format:
+ * bits 0-2 - function number / instance of driver to perform request
+ * bits 3-5 - op code / is_ack?
+ * bits 6-63 - data
+ */
+ u32 edebug_driver_if[2]; /* 0x0068 */
+ #define EDEBUG_DRIVER_IF_OP_CODE_GET_PHYS_ADDR 1
+ #define EDEBUG_DRIVER_IF_OP_CODE_GET_BUS_ADDR 2
+ #define EDEBUG_DRIVER_IF_OP_CODE_DISABLE_STAT 3
+
+ u32 nvm_retain_bitmap_addr; /* 0x0070 */
+
+ /* afex support of that driver */
+ u32 afex_driver_support; /* 0x0074 */
+ #define SHMEM_AFEX_VERSION_MASK 0x100f
+ #define SHMEM_AFEX_SUPPORTED_VERSION_ONE 0x1001
+ #define SHMEM_AFEX_REDUCED_DRV_LOADED 0x8000
+
+ /* driver receives addr in scratchpad to which it should respond */
+ u32 afex_scratchpad_addr_to_write[E2_FUNC_MAX];
+
+ /* generic params from MCP to driver (value depends on the msg sent
+ * to driver
+ */
+ u32 afex_param1_to_driver[E2_FUNC_MAX]; /* 0x0088 */
+ u32 afex_param2_to_driver[E2_FUNC_MAX]; /* 0x0098 */
+
+ u32 swim_base_addr; /* 0x0108 */
+ u32 swim_funcs;
+ u32 swim_main_cb;
+
+ /* bitmap notifying which VIF profiles stored in nvram are enabled by
+ * switch
+ */
+ u32 afex_profiles_enabled[2];
+
+ /* generic flags controlled by the driver */
+ u32 drv_flags;
+ #define DRV_FLAGS_DCB_CONFIGURED 0x0
+ #define DRV_FLAGS_DCB_CONFIGURATION_ABORTED 0x1
+ #define DRV_FLAGS_DCB_MFW_CONFIGURED 0x2
+
+ #define DRV_FLAGS_PORT_MASK ((1 << DRV_FLAGS_DCB_CONFIGURED) | \
+ (1 << DRV_FLAGS_DCB_CONFIGURATION_ABORTED) | \
+ (1 << DRV_FLAGS_DCB_MFW_CONFIGURED))
+ /* pointer to extended dev_info shared data copied from nvm image */
+ u32 extended_dev_info_shared_addr;
+ u32 ncsi_oem_data_addr;
+
+ u32 ocsd_host_addr; /* initialized by option ROM */
+ u32 ocbb_host_addr; /* initialized by option ROM */
+ u32 ocsd_req_update_interval; /* initialized by option ROM */
+ u32 temperature_in_half_celsius;
+ u32 glob_struct_in_host;
+
+ u32 dcbx_neg_res_ext_offset;
+#define SHMEM_DCBX_NEG_RES_EXT_NONE 0x00000000
+
+ u32 drv_capabilities_flag[E2_FUNC_MAX];
+#define DRV_FLAGS_CAPABILITIES_LOADED_SUPPORTED 0x00000001
+#define DRV_FLAGS_CAPABILITIES_LOADED_L2 0x00000002
+#define DRV_FLAGS_CAPABILITIES_LOADED_FCOE 0x00000004
+#define DRV_FLAGS_CAPABILITIES_LOADED_ISCSI 0x00000008
+#define DRV_FLAGS_MTU_MASK 0xffff0000
+#define DRV_FLAGS_MTU_SHIFT 16
+
+ u32 extended_dev_info_shared_cfg_size;
+
+ u32 dcbx_en[PORT_MAX];
+
+ /* The offset points to the multi threaded meta structure */
+ u32 multi_thread_data_offset;
+
+ /* address of DMAable host address holding values from the drivers */
+ u32 drv_info_host_addr_lo;
+ u32 drv_info_host_addr_hi;
+
+ /* general values written by the MFW (such as current version) */
+ u32 drv_info_control;
+#define DRV_INFO_CONTROL_VER_MASK 0x000000ff
+#define DRV_INFO_CONTROL_VER_SHIFT 0
+#define DRV_INFO_CONTROL_OP_CODE_MASK 0x0000ff00
+#define DRV_INFO_CONTROL_OP_CODE_SHIFT 8
+ u32 ibft_host_addr; /* initialized by option ROM */
+ struct eee_remote_vals eee_remote_vals[PORT_MAX];
+ u32 reserved[E2_FUNC_MAX];
+
+
+ /* the status of EEE auto-negotiation
+ * bits 15:0 the configured tx-lpi entry timer value. Depends on bit 31.
+ * bits 19:16 the supported modes for EEE.
+ * bits 23:20 the speeds advertised for EEE.
+ * bits 27:24 the speeds the Link partner advertised for EEE.
+ * The supported/adv. modes in bits 27:19 originate from the
+ * SHMEM_EEE_XXX_ADV definitions (where XXX is replaced by speed).
+ * bit 28 when 1'b1 EEE was requested.
+ * bit 29 when 1'b1 tx lpi was requested.
+ * bit 30 when 1'b1 EEE was negotiated. Tx lpi will be asserted iff
+ * 30:29 are 2'b11.
+ * bit 31 when 1'b0 bits 15:0 contain a PORT_FEAT_CFG_EEE_ define as
+ * value. When 1'b1 those bits contains a value times 16 microseconds.
+ */
+ u32 eee_status[PORT_MAX];
+ #define SHMEM_EEE_TIMER_MASK 0x0000ffff
+ #define SHMEM_EEE_SUPPORTED_MASK 0x000f0000
+ #define SHMEM_EEE_SUPPORTED_SHIFT 16
+ #define SHMEM_EEE_ADV_STATUS_MASK 0x00f00000
+ #define SHMEM_EEE_100M_ADV (1<<0)
+ #define SHMEM_EEE_1G_ADV (1<<1)
+ #define SHMEM_EEE_10G_ADV (1<<2)
+ #define SHMEM_EEE_ADV_STATUS_SHIFT 20
+ #define SHMEM_EEE_LP_ADV_STATUS_MASK 0x0f000000
+ #define SHMEM_EEE_LP_ADV_STATUS_SHIFT 24
+ #define SHMEM_EEE_REQUESTED_BIT 0x10000000
+ #define SHMEM_EEE_LPI_REQUESTED_BIT 0x20000000
+ #define SHMEM_EEE_ACTIVE_BIT 0x40000000
+ #define SHMEM_EEE_TIME_OUTPUT_BIT 0x80000000
+
+ u32 sizeof_port_stats;
+
+ /* Link Flap Avoidance */
+ u32 lfa_host_addr[PORT_MAX];
+ u32 reserved1;
+
+ u32 reserved2; /* Offset 0x148 */
+ u32 reserved3; /* Offset 0x14C */
+ u32 reserved4; /* Offset 0x150 */
+ u32 link_attr_sync[PORT_MAX]; /* Offset 0x154 */
+ #define LINK_ATTR_SYNC_KR2_ENABLE 0x00000001
+ #define LINK_ATTR_84858 0x00000002
+ #define LINK_SFP_EEPROM_COMP_CODE_MASK 0x0000ff00
+ #define LINK_SFP_EEPROM_COMP_CODE_SHIFT 8
+ #define LINK_SFP_EEPROM_COMP_CODE_SR 0x00001000
+ #define LINK_SFP_EEPROM_COMP_CODE_LR 0x00002000
+ #define LINK_SFP_EEPROM_COMP_CODE_LRM 0x00004000
+
+ u32 reserved5[2];
+ u32 link_change_count[PORT_MAX]; /* Offset 0x160-0x164 */
+ #define LINK_CHANGE_COUNT_MASK 0xff /* Offset 0x168 */
+ /* driver version for each personality */
+ struct os_drv_ver func_os_drv_ver[E2_FUNC_MAX]; /* Offset 0x16c */
+
+ /* Flag to the driver that PF's drv_info_host_addr buffer was read */
+ u32 mfw_drv_indication;
+
+ /* We use indication for each PF (0..3) */
+#define MFW_DRV_IND_READ_DONE_OFFSET(_pf_) (1 << (_pf_))
+ union { /* For various OEMs */ /* Offset 0x1a0 */
+ u8 storage_boot_prog[E2_FUNC_MAX];
+ #define STORAGE_BOOT_PROG_MASK 0x000000FF
+ #define STORAGE_BOOT_PROG_NONE 0x00000000
+ #define STORAGE_BOOT_PROG_ISCSI_IP_ACQUIRED 0x00000002
+ #define STORAGE_BOOT_PROG_FCOE_FABRIC_LOGIN_SUCCESS 0x00000002
+ #define STORAGE_BOOT_PROG_TARGET_FOUND 0x00000004
+ #define STORAGE_BOOT_PROG_ISCSI_CHAP_SUCCESS 0x00000008
+ #define STORAGE_BOOT_PROG_FCOE_LUN_FOUND 0x00000008
+ #define STORAGE_BOOT_PROG_LOGGED_INTO_TGT 0x00000010
+ #define STORAGE_BOOT_PROG_IMG_DOWNLOADED 0x00000020
+ #define STORAGE_BOOT_PROG_OS_HANDOFF 0x00000040
+ #define STORAGE_BOOT_PROG_COMPLETED 0x00000080
+
+ u32 oem_i2c_data_addr;
+ };
+
+ /* 9 entires for the C2S PCP map for each inner VLAN PCP + 1 default */
+ /* For PCP values 0-3 use the map lower */
+ /* 0xFF000000 - PCP 0, 0x00FF0000 - PCP 1,
+ * 0x0000FF00 - PCP 2, 0x000000FF PCP 3
+ */
+ u32 c2s_pcp_map_lower[E2_FUNC_MAX]; /* 0x1a4 */
+
+ /* For PCP values 4-7 use the map upper */
+ /* 0xFF000000 - PCP 4, 0x00FF0000 - PCP 5,
+ * 0x0000FF00 - PCP 6, 0x000000FF PCP 7
+ */
+ u32 c2s_pcp_map_upper[E2_FUNC_MAX]; /* 0x1b4 */
+
+ /* For PCP default value get the MSB byte of the map default */
+ u32 c2s_pcp_map_default[E2_FUNC_MAX]; /* 0x1c4 */
+
+ /* FC_NPIV table offset in NVRAM */
+ u32 fc_npiv_nvram_tbl_addr[PORT_MAX]; /* 0x1d4 */
+
+ /* Shows last method that changed configuration of this device */
+ enum curr_cfg_method_e curr_cfg; /* 0x1dc */
+
+ /* Storm FW version, shold be kept in the format 0xMMmmbbdd:
+ * MM - Major, mm - Minor, bb - Build ,dd - Drop
+ */
+ u32 netproc_fw_ver; /* 0x1e0 */
+
+ /* Option ROM SMASH CLP version */
+ u32 clp_ver; /* 0x1e4 */
+
+ u32 pcie_bus_num; /* 0x1e8 */
+
+ u32 sriov_switch_mode; /* 0x1ec */
+ #define SRIOV_SWITCH_MODE_NONE 0x0
+ #define SRIOV_SWITCH_MODE_VEB 0x1
+ #define SRIOV_SWITCH_MODE_VEPA 0x2
+
+ u8 rsrv2[E2_FUNC_MAX]; /* 0x1f0 */
+
+ u32 img_inv_table_addr; /* Address to INV_TABLE_P */ /* 0x1f4 */
+
+ u32 mtu_size[E2_FUNC_MAX]; /* 0x1f8 */
+
+ u32 os_driver_state[E2_FUNC_MAX]; /* 0x208 */
+ #define OS_DRIVER_STATE_NOT_LOADED 0 /* not installed */
+ #define OS_DRIVER_STATE_LOADING 1 /* transition state */
+ #define OS_DRIVER_STATE_DISABLED 2 /* installed but disabled */
+ #define OS_DRIVER_STATE_ACTIVE 3 /* installed and active */
+
+ /* mini dump driver info */
+ struct mdump_driver_info drv_info; /* 0x218 */
+};
+
+
+struct emac_stats {
+ u32 rx_stat_ifhcinoctets;
+ u32 rx_stat_ifhcinbadoctets;
+ u32 rx_stat_etherstatsfragments;
+ u32 rx_stat_ifhcinucastpkts;
+ u32 rx_stat_ifhcinmulticastpkts;
+ u32 rx_stat_ifhcinbroadcastpkts;
+ u32 rx_stat_dot3statsfcserrors;
+ u32 rx_stat_dot3statsalignmenterrors;
+ u32 rx_stat_dot3statscarriersenseerrors;
+ u32 rx_stat_xonpauseframesreceived;
+ u32 rx_stat_xoffpauseframesreceived;
+ u32 rx_stat_maccontrolframesreceived;
+ u32 rx_stat_xoffstateentered;
+ u32 rx_stat_dot3statsframestoolong;
+ u32 rx_stat_etherstatsjabbers;
+ u32 rx_stat_etherstatsundersizepkts;
+ u32 rx_stat_etherstatspkts64octets;
+ u32 rx_stat_etherstatspkts65octetsto127octets;
+ u32 rx_stat_etherstatspkts128octetsto255octets;
+ u32 rx_stat_etherstatspkts256octetsto511octets;
+ u32 rx_stat_etherstatspkts512octetsto1023octets;
+ u32 rx_stat_etherstatspkts1024octetsto1522octets;
+ u32 rx_stat_etherstatspktsover1522octets;
+
+ u32 rx_stat_falsecarriererrors;
+
+ u32 tx_stat_ifhcoutoctets;
+ u32 tx_stat_ifhcoutbadoctets;
+ u32 tx_stat_etherstatscollisions;
+ u32 tx_stat_outxonsent;
+ u32 tx_stat_outxoffsent;
+ u32 tx_stat_flowcontroldone;
+ u32 tx_stat_dot3statssinglecollisionframes;
+ u32 tx_stat_dot3statsmultiplecollisionframes;
+ u32 tx_stat_dot3statsdeferredtransmissions;
+ u32 tx_stat_dot3statsexcessivecollisions;
+ u32 tx_stat_dot3statslatecollisions;
+ u32 tx_stat_ifhcoutucastpkts;
+ u32 tx_stat_ifhcoutmulticastpkts;
+ u32 tx_stat_ifhcoutbroadcastpkts;
+ u32 tx_stat_etherstatspkts64octets;
+ u32 tx_stat_etherstatspkts65octetsto127octets;
+ u32 tx_stat_etherstatspkts128octetsto255octets;
+ u32 tx_stat_etherstatspkts256octetsto511octets;
+ u32 tx_stat_etherstatspkts512octetsto1023octets;
+ u32 tx_stat_etherstatspkts1024octetsto1522octets;
+ u32 tx_stat_etherstatspktsover1522octets;
+ u32 tx_stat_dot3statsinternalmactransmiterrors;
+};
+
+
+struct bmac1_stats {
+ u32 tx_stat_gtpkt_lo;
+ u32 tx_stat_gtpkt_hi;
+ u32 tx_stat_gtxpf_lo;
+ u32 tx_stat_gtxpf_hi;
+ u32 tx_stat_gtfcs_lo;
+ u32 tx_stat_gtfcs_hi;
+ u32 tx_stat_gtmca_lo;
+ u32 tx_stat_gtmca_hi;
+ u32 tx_stat_gtbca_lo;
+ u32 tx_stat_gtbca_hi;
+ u32 tx_stat_gtfrg_lo;
+ u32 tx_stat_gtfrg_hi;
+ u32 tx_stat_gtovr_lo;
+ u32 tx_stat_gtovr_hi;
+ u32 tx_stat_gt64_lo;
+ u32 tx_stat_gt64_hi;
+ u32 tx_stat_gt127_lo;
+ u32 tx_stat_gt127_hi;
+ u32 tx_stat_gt255_lo;
+ u32 tx_stat_gt255_hi;
+ u32 tx_stat_gt511_lo;
+ u32 tx_stat_gt511_hi;
+ u32 tx_stat_gt1023_lo;
+ u32 tx_stat_gt1023_hi;
+ u32 tx_stat_gt1518_lo;
+ u32 tx_stat_gt1518_hi;
+ u32 tx_stat_gt2047_lo;
+ u32 tx_stat_gt2047_hi;
+ u32 tx_stat_gt4095_lo;
+ u32 tx_stat_gt4095_hi;
+ u32 tx_stat_gt9216_lo;
+ u32 tx_stat_gt9216_hi;
+ u32 tx_stat_gt16383_lo;
+ u32 tx_stat_gt16383_hi;
+ u32 tx_stat_gtmax_lo;
+ u32 tx_stat_gtmax_hi;
+ u32 tx_stat_gtufl_lo;
+ u32 tx_stat_gtufl_hi;
+ u32 tx_stat_gterr_lo;
+ u32 tx_stat_gterr_hi;
+ u32 tx_stat_gtbyt_lo;
+ u32 tx_stat_gtbyt_hi;
+
+ u32 rx_stat_gr64_lo;
+ u32 rx_stat_gr64_hi;
+ u32 rx_stat_gr127_lo;
+ u32 rx_stat_gr127_hi;
+ u32 rx_stat_gr255_lo;
+ u32 rx_stat_gr255_hi;
+ u32 rx_stat_gr511_lo;
+ u32 rx_stat_gr511_hi;
+ u32 rx_stat_gr1023_lo;
+ u32 rx_stat_gr1023_hi;
+ u32 rx_stat_gr1518_lo;
+ u32 rx_stat_gr1518_hi;
+ u32 rx_stat_gr2047_lo;
+ u32 rx_stat_gr2047_hi;
+ u32 rx_stat_gr4095_lo;
+ u32 rx_stat_gr4095_hi;
+ u32 rx_stat_gr9216_lo;
+ u32 rx_stat_gr9216_hi;
+ u32 rx_stat_gr16383_lo;
+ u32 rx_stat_gr16383_hi;
+ u32 rx_stat_grmax_lo;
+ u32 rx_stat_grmax_hi;
+ u32 rx_stat_grpkt_lo;
+ u32 rx_stat_grpkt_hi;
+ u32 rx_stat_grfcs_lo;
+ u32 rx_stat_grfcs_hi;
+ u32 rx_stat_grmca_lo;
+ u32 rx_stat_grmca_hi;
+ u32 rx_stat_grbca_lo;
+ u32 rx_stat_grbca_hi;
+ u32 rx_stat_grxcf_lo;
+ u32 rx_stat_grxcf_hi;
+ u32 rx_stat_grxpf_lo;
+ u32 rx_stat_grxpf_hi;
+ u32 rx_stat_grxuo_lo;
+ u32 rx_stat_grxuo_hi;
+ u32 rx_stat_grjbr_lo;
+ u32 rx_stat_grjbr_hi;
+ u32 rx_stat_grovr_lo;
+ u32 rx_stat_grovr_hi;
+ u32 rx_stat_grflr_lo;
+ u32 rx_stat_grflr_hi;
+ u32 rx_stat_grmeg_lo;
+ u32 rx_stat_grmeg_hi;
+ u32 rx_stat_grmeb_lo;
+ u32 rx_stat_grmeb_hi;
+ u32 rx_stat_grbyt_lo;
+ u32 rx_stat_grbyt_hi;
+ u32 rx_stat_grund_lo;
+ u32 rx_stat_grund_hi;
+ u32 rx_stat_grfrg_lo;
+ u32 rx_stat_grfrg_hi;
+ u32 rx_stat_grerb_lo;
+ u32 rx_stat_grerb_hi;
+ u32 rx_stat_grfre_lo;
+ u32 rx_stat_grfre_hi;
+ u32 rx_stat_gripj_lo;
+ u32 rx_stat_gripj_hi;
+};
+
+struct bmac2_stats {
+ u32 tx_stat_gtpk_lo; /* gtpok */
+ u32 tx_stat_gtpk_hi; /* gtpok */
+ u32 tx_stat_gtxpf_lo; /* gtpf */
+ u32 tx_stat_gtxpf_hi; /* gtpf */
+ u32 tx_stat_gtpp_lo; /* NEW BMAC2 */
+ u32 tx_stat_gtpp_hi; /* NEW BMAC2 */
+ u32 tx_stat_gtfcs_lo;
+ u32 tx_stat_gtfcs_hi;
+ u32 tx_stat_gtuca_lo; /* NEW BMAC2 */
+ u32 tx_stat_gtuca_hi; /* NEW BMAC2 */
+ u32 tx_stat_gtmca_lo;
+ u32 tx_stat_gtmca_hi;
+ u32 tx_stat_gtbca_lo;
+ u32 tx_stat_gtbca_hi;
+ u32 tx_stat_gtovr_lo;
+ u32 tx_stat_gtovr_hi;
+ u32 tx_stat_gtfrg_lo;
+ u32 tx_stat_gtfrg_hi;
+ u32 tx_stat_gtpkt1_lo; /* gtpkt */
+ u32 tx_stat_gtpkt1_hi; /* gtpkt */
+ u32 tx_stat_gt64_lo;
+ u32 tx_stat_gt64_hi;
+ u32 tx_stat_gt127_lo;
+ u32 tx_stat_gt127_hi;
+ u32 tx_stat_gt255_lo;
+ u32 tx_stat_gt255_hi;
+ u32 tx_stat_gt511_lo;
+ u32 tx_stat_gt511_hi;
+ u32 tx_stat_gt1023_lo;
+ u32 tx_stat_gt1023_hi;
+ u32 tx_stat_gt1518_lo;
+ u32 tx_stat_gt1518_hi;
+ u32 tx_stat_gt2047_lo;
+ u32 tx_stat_gt2047_hi;
+ u32 tx_stat_gt4095_lo;
+ u32 tx_stat_gt4095_hi;
+ u32 tx_stat_gt9216_lo;
+ u32 tx_stat_gt9216_hi;
+ u32 tx_stat_gt16383_lo;
+ u32 tx_stat_gt16383_hi;
+ u32 tx_stat_gtmax_lo;
+ u32 tx_stat_gtmax_hi;
+ u32 tx_stat_gtufl_lo;
+ u32 tx_stat_gtufl_hi;
+ u32 tx_stat_gterr_lo;
+ u32 tx_stat_gterr_hi;
+ u32 tx_stat_gtbyt_lo;
+ u32 tx_stat_gtbyt_hi;
+
+ u32 rx_stat_gr64_lo;
+ u32 rx_stat_gr64_hi;
+ u32 rx_stat_gr127_lo;
+ u32 rx_stat_gr127_hi;
+ u32 rx_stat_gr255_lo;
+ u32 rx_stat_gr255_hi;
+ u32 rx_stat_gr511_lo;
+ u32 rx_stat_gr511_hi;
+ u32 rx_stat_gr1023_lo;
+ u32 rx_stat_gr1023_hi;
+ u32 rx_stat_gr1518_lo;
+ u32 rx_stat_gr1518_hi;
+ u32 rx_stat_gr2047_lo;
+ u32 rx_stat_gr2047_hi;
+ u32 rx_stat_gr4095_lo;
+ u32 rx_stat_gr4095_hi;
+ u32 rx_stat_gr9216_lo;
+ u32 rx_stat_gr9216_hi;
+ u32 rx_stat_gr16383_lo;
+ u32 rx_stat_gr16383_hi;
+ u32 rx_stat_grmax_lo;
+ u32 rx_stat_grmax_hi;
+ u32 rx_stat_grpkt_lo;
+ u32 rx_stat_grpkt_hi;
+ u32 rx_stat_grfcs_lo;
+ u32 rx_stat_grfcs_hi;
+ u32 rx_stat_gruca_lo;
+ u32 rx_stat_gruca_hi;
+ u32 rx_stat_grmca_lo;
+ u32 rx_stat_grmca_hi;
+ u32 rx_stat_grbca_lo;
+ u32 rx_stat_grbca_hi;
+ u32 rx_stat_grxpf_lo; /* grpf */
+ u32 rx_stat_grxpf_hi; /* grpf */
+ u32 rx_stat_grpp_lo;
+ u32 rx_stat_grpp_hi;
+ u32 rx_stat_grxuo_lo; /* gruo */
+ u32 rx_stat_grxuo_hi; /* gruo */
+ u32 rx_stat_grjbr_lo;
+ u32 rx_stat_grjbr_hi;
+ u32 rx_stat_grovr_lo;
+ u32 rx_stat_grovr_hi;
+ u32 rx_stat_grxcf_lo; /* grcf */
+ u32 rx_stat_grxcf_hi; /* grcf */
+ u32 rx_stat_grflr_lo;
+ u32 rx_stat_grflr_hi;
+ u32 rx_stat_grpok_lo;
+ u32 rx_stat_grpok_hi;
+ u32 rx_stat_grmeg_lo;
+ u32 rx_stat_grmeg_hi;
+ u32 rx_stat_grmeb_lo;
+ u32 rx_stat_grmeb_hi;
+ u32 rx_stat_grbyt_lo;
+ u32 rx_stat_grbyt_hi;
+ u32 rx_stat_grund_lo;
+ u32 rx_stat_grund_hi;
+ u32 rx_stat_grfrg_lo;
+ u32 rx_stat_grfrg_hi;
+ u32 rx_stat_grerb_lo; /* grerrbyt */
+ u32 rx_stat_grerb_hi; /* grerrbyt */
+ u32 rx_stat_grfre_lo; /* grfrerr */
+ u32 rx_stat_grfre_hi; /* grfrerr */
+ u32 rx_stat_gripj_lo;
+ u32 rx_stat_gripj_hi;
+};
+
+struct mstat_stats {
+ struct {
+ /* OTE MSTAT on E3 has a bug where this register's contents are
+ * actually tx_gtxpok + tx_gtxpf + (possibly)tx_gtxpp
+ */
+ u32 tx_gtxpok_lo;
+ u32 tx_gtxpok_hi;
+ u32 tx_gtxpf_lo;
+ u32 tx_gtxpf_hi;
+ u32 tx_gtxpp_lo;
+ u32 tx_gtxpp_hi;
+ u32 tx_gtfcs_lo;
+ u32 tx_gtfcs_hi;
+ u32 tx_gtuca_lo;
+ u32 tx_gtuca_hi;
+ u32 tx_gtmca_lo;
+ u32 tx_gtmca_hi;
+ u32 tx_gtgca_lo;
+ u32 tx_gtgca_hi;
+ u32 tx_gtpkt_lo;
+ u32 tx_gtpkt_hi;
+ u32 tx_gt64_lo;
+ u32 tx_gt64_hi;
+ u32 tx_gt127_lo;
+ u32 tx_gt127_hi;
+ u32 tx_gt255_lo;
+ u32 tx_gt255_hi;
+ u32 tx_gt511_lo;
+ u32 tx_gt511_hi;
+ u32 tx_gt1023_lo;
+ u32 tx_gt1023_hi;
+ u32 tx_gt1518_lo;
+ u32 tx_gt1518_hi;
+ u32 tx_gt2047_lo;
+ u32 tx_gt2047_hi;
+ u32 tx_gt4095_lo;
+ u32 tx_gt4095_hi;
+ u32 tx_gt9216_lo;
+ u32 tx_gt9216_hi;
+ u32 tx_gt16383_lo;
+ u32 tx_gt16383_hi;
+ u32 tx_gtufl_lo;
+ u32 tx_gtufl_hi;
+ u32 tx_gterr_lo;
+ u32 tx_gterr_hi;
+ u32 tx_gtbyt_lo;
+ u32 tx_gtbyt_hi;
+ u32 tx_collisions_lo;
+ u32 tx_collisions_hi;
+ u32 tx_singlecollision_lo;
+ u32 tx_singlecollision_hi;
+ u32 tx_multiplecollisions_lo;
+ u32 tx_multiplecollisions_hi;
+ u32 tx_deferred_lo;
+ u32 tx_deferred_hi;
+ u32 tx_excessivecollisions_lo;
+ u32 tx_excessivecollisions_hi;
+ u32 tx_latecollisions_lo;
+ u32 tx_latecollisions_hi;
+ } stats_tx;
+
+ struct {
+ u32 rx_gr64_lo;
+ u32 rx_gr64_hi;
+ u32 rx_gr127_lo;
+ u32 rx_gr127_hi;
+ u32 rx_gr255_lo;
+ u32 rx_gr255_hi;
+ u32 rx_gr511_lo;
+ u32 rx_gr511_hi;
+ u32 rx_gr1023_lo;
+ u32 rx_gr1023_hi;
+ u32 rx_gr1518_lo;
+ u32 rx_gr1518_hi;
+ u32 rx_gr2047_lo;
+ u32 rx_gr2047_hi;
+ u32 rx_gr4095_lo;
+ u32 rx_gr4095_hi;
+ u32 rx_gr9216_lo;
+ u32 rx_gr9216_hi;
+ u32 rx_gr16383_lo;
+ u32 rx_gr16383_hi;
+ u32 rx_grpkt_lo;
+ u32 rx_grpkt_hi;
+ u32 rx_grfcs_lo;
+ u32 rx_grfcs_hi;
+ u32 rx_gruca_lo;
+ u32 rx_gruca_hi;
+ u32 rx_grmca_lo;
+ u32 rx_grmca_hi;
+ u32 rx_grbca_lo;
+ u32 rx_grbca_hi;
+ u32 rx_grxpf_lo;
+ u32 rx_grxpf_hi;
+ u32 rx_grxpp_lo;
+ u32 rx_grxpp_hi;
+ u32 rx_grxuo_lo;
+ u32 rx_grxuo_hi;
+ u32 rx_grovr_lo;
+ u32 rx_grovr_hi;
+ u32 rx_grxcf_lo;
+ u32 rx_grxcf_hi;
+ u32 rx_grflr_lo;
+ u32 rx_grflr_hi;
+ u32 rx_grpok_lo;
+ u32 rx_grpok_hi;
+ u32 rx_grbyt_lo;
+ u32 rx_grbyt_hi;
+ u32 rx_grund_lo;
+ u32 rx_grund_hi;
+ u32 rx_grfrg_lo;
+ u32 rx_grfrg_hi;
+ u32 rx_grerb_lo;
+ u32 rx_grerb_hi;
+ u32 rx_grfre_lo;
+ u32 rx_grfre_hi;
+
+ u32 rx_alignmenterrors_lo;
+ u32 rx_alignmenterrors_hi;
+ u32 rx_falsecarrier_lo;
+ u32 rx_falsecarrier_hi;
+ u32 rx_llfcmsgcnt_lo;
+ u32 rx_llfcmsgcnt_hi;
+ } stats_rx;
+};
+
+union mac_stats {
+ struct emac_stats emac_stats;
+ struct bmac1_stats bmac1_stats;
+ struct bmac2_stats bmac2_stats;
+ struct mstat_stats mstat_stats;
+};
+
+
+struct mac_stx {
+ /* in_bad_octets */
+ u32 rx_stat_ifhcinbadoctets_hi;
+ u32 rx_stat_ifhcinbadoctets_lo;
+
+ /* out_bad_octets */
+ u32 tx_stat_ifhcoutbadoctets_hi;
+ u32 tx_stat_ifhcoutbadoctets_lo;
+
+ /* crc_receive_errors */
+ u32 rx_stat_dot3statsfcserrors_hi;
+ u32 rx_stat_dot3statsfcserrors_lo;
+ /* alignment_errors */
+ u32 rx_stat_dot3statsalignmenterrors_hi;
+ u32 rx_stat_dot3statsalignmenterrors_lo;
+ /* carrier_sense_errors */
+ u32 rx_stat_dot3statscarriersenseerrors_hi;
+ u32 rx_stat_dot3statscarriersenseerrors_lo;
+ /* false_carrier_detections */
+ u32 rx_stat_falsecarriererrors_hi;
+ u32 rx_stat_falsecarriererrors_lo;
+
+ /* runt_packets_received */
+ u32 rx_stat_etherstatsundersizepkts_hi;
+ u32 rx_stat_etherstatsundersizepkts_lo;
+ /* jabber_packets_received */
+ u32 rx_stat_dot3statsframestoolong_hi;
+ u32 rx_stat_dot3statsframestoolong_lo;
+
+ /* error_runt_packets_received */
+ u32 rx_stat_etherstatsfragments_hi;
+ u32 rx_stat_etherstatsfragments_lo;
+ /* error_jabber_packets_received */
+ u32 rx_stat_etherstatsjabbers_hi;
+ u32 rx_stat_etherstatsjabbers_lo;
+
+ /* control_frames_received */
+ u32 rx_stat_maccontrolframesreceived_hi;
+ u32 rx_stat_maccontrolframesreceived_lo;
+ u32 rx_stat_mac_xpf_hi;
+ u32 rx_stat_mac_xpf_lo;
+ u32 rx_stat_mac_xcf_hi;
+ u32 rx_stat_mac_xcf_lo;
+
+ /* xoff_state_entered */
+ u32 rx_stat_xoffstateentered_hi;
+ u32 rx_stat_xoffstateentered_lo;
+ /* pause_xon_frames_received */
+ u32 rx_stat_xonpauseframesreceived_hi;
+ u32 rx_stat_xonpauseframesreceived_lo;
+ /* pause_xoff_frames_received */
+ u32 rx_stat_xoffpauseframesreceived_hi;
+ u32 rx_stat_xoffpauseframesreceived_lo;
+ /* pause_xon_frames_transmitted */
+ u32 tx_stat_outxonsent_hi;
+ u32 tx_stat_outxonsent_lo;
+ /* pause_xoff_frames_transmitted */
+ u32 tx_stat_outxoffsent_hi;
+ u32 tx_stat_outxoffsent_lo;
+ /* flow_control_done */
+ u32 tx_stat_flowcontroldone_hi;
+ u32 tx_stat_flowcontroldone_lo;
+
+ /* ether_stats_collisions */
+ u32 tx_stat_etherstatscollisions_hi;
+ u32 tx_stat_etherstatscollisions_lo;
+ /* single_collision_transmit_frames */
+ u32 tx_stat_dot3statssinglecollisionframes_hi;
+ u32 tx_stat_dot3statssinglecollisionframes_lo;
+ /* multiple_collision_transmit_frames */
+ u32 tx_stat_dot3statsmultiplecollisionframes_hi;
+ u32 tx_stat_dot3statsmultiplecollisionframes_lo;
+ /* deferred_transmissions */
+ u32 tx_stat_dot3statsdeferredtransmissions_hi;
+ u32 tx_stat_dot3statsdeferredtransmissions_lo;
+ /* excessive_collision_frames */
+ u32 tx_stat_dot3statsexcessivecollisions_hi;
+ u32 tx_stat_dot3statsexcessivecollisions_lo;
+ /* late_collision_frames */
+ u32 tx_stat_dot3statslatecollisions_hi;
+ u32 tx_stat_dot3statslatecollisions_lo;
+
+ /* frames_transmitted_64_bytes */
+ u32 tx_stat_etherstatspkts64octets_hi;
+ u32 tx_stat_etherstatspkts64octets_lo;
+ /* frames_transmitted_65_127_bytes */
+ u32 tx_stat_etherstatspkts65octetsto127octets_hi;
+ u32 tx_stat_etherstatspkts65octetsto127octets_lo;
+ /* frames_transmitted_128_255_bytes */
+ u32 tx_stat_etherstatspkts128octetsto255octets_hi;
+ u32 tx_stat_etherstatspkts128octetsto255octets_lo;
+ /* frames_transmitted_256_511_bytes */
+ u32 tx_stat_etherstatspkts256octetsto511octets_hi;
+ u32 tx_stat_etherstatspkts256octetsto511octets_lo;
+ /* frames_transmitted_512_1023_bytes */
+ u32 tx_stat_etherstatspkts512octetsto1023octets_hi;
+ u32 tx_stat_etherstatspkts512octetsto1023octets_lo;
+ /* frames_transmitted_1024_1522_bytes */
+ u32 tx_stat_etherstatspkts1024octetsto1522octets_hi;
+ u32 tx_stat_etherstatspkts1024octetsto1522octets_lo;
+ /* frames_transmitted_1523_9022_bytes */
+ u32 tx_stat_etherstatspktsover1522octets_hi;
+ u32 tx_stat_etherstatspktsover1522octets_lo;
+ u32 tx_stat_mac_2047_hi;
+ u32 tx_stat_mac_2047_lo;
+ u32 tx_stat_mac_4095_hi;
+ u32 tx_stat_mac_4095_lo;
+ u32 tx_stat_mac_9216_hi;
+ u32 tx_stat_mac_9216_lo;
+ u32 tx_stat_mac_16383_hi;
+ u32 tx_stat_mac_16383_lo;
+
+ /* internal_mac_transmit_errors */
+ u32 tx_stat_dot3statsinternalmactransmiterrors_hi;
+ u32 tx_stat_dot3statsinternalmactransmiterrors_lo;
+
+ /* if_out_discards */
+ u32 tx_stat_mac_ufl_hi;
+ u32 tx_stat_mac_ufl_lo;
+};
+
+
+#define MAC_STX_IDX_MAX 2
+
+struct host_port_stats {
+ u32 host_port_stats_counter;
+
+ struct mac_stx mac_stx[MAC_STX_IDX_MAX];
+
+ u32 brb_drop_hi;
+ u32 brb_drop_lo;
+
+ u32 not_used; /* obsolete */
+ u32 pfc_frames_tx_hi;
+ u32 pfc_frames_tx_lo;
+ u32 pfc_frames_rx_hi;
+ u32 pfc_frames_rx_lo;
+
+ u32 eee_lpi_count_hi;
+ u32 eee_lpi_count_lo;
+};
+
+
+struct host_func_stats {
+ u32 host_func_stats_start;
+
+ u32 total_bytes_received_hi;
+ u32 total_bytes_received_lo;
+
+ u32 total_bytes_transmitted_hi;
+ u32 total_bytes_transmitted_lo;
+
+ u32 total_unicast_packets_received_hi;
+ u32 total_unicast_packets_received_lo;
+
+ u32 total_multicast_packets_received_hi;
+ u32 total_multicast_packets_received_lo;
+
+ u32 total_broadcast_packets_received_hi;
+ u32 total_broadcast_packets_received_lo;
+
+ u32 total_unicast_packets_transmitted_hi;
+ u32 total_unicast_packets_transmitted_lo;
+
+ u32 total_multicast_packets_transmitted_hi;
+ u32 total_multicast_packets_transmitted_lo;
+
+ u32 total_broadcast_packets_transmitted_hi;
+ u32 total_broadcast_packets_transmitted_lo;
+
+ u32 valid_bytes_received_hi;
+ u32 valid_bytes_received_lo;
+
+ u32 host_func_stats_end;
+};
+
+/* VIC definitions */
+#define VICSTATST_UIF_INDEX 2
+
+
+/* stats collected for afex.
+ * NOTE: structure is exactly as expected to be received by the switch.
+ * order must remain exactly as is unless protocol changes !
+ */
+struct afex_stats {
+ u32 tx_unicast_frames_hi;
+ u32 tx_unicast_frames_lo;
+ u32 tx_unicast_bytes_hi;
+ u32 tx_unicast_bytes_lo;
+ u32 tx_multicast_frames_hi;
+ u32 tx_multicast_frames_lo;
+ u32 tx_multicast_bytes_hi;
+ u32 tx_multicast_bytes_lo;
+ u32 tx_broadcast_frames_hi;
+ u32 tx_broadcast_frames_lo;
+ u32 tx_broadcast_bytes_hi;
+ u32 tx_broadcast_bytes_lo;
+ u32 tx_frames_discarded_hi;
+ u32 tx_frames_discarded_lo;
+ u32 tx_frames_dropped_hi;
+ u32 tx_frames_dropped_lo;
+
+ u32 rx_unicast_frames_hi;
+ u32 rx_unicast_frames_lo;
+ u32 rx_unicast_bytes_hi;
+ u32 rx_unicast_bytes_lo;
+ u32 rx_multicast_frames_hi;
+ u32 rx_multicast_frames_lo;
+ u32 rx_multicast_bytes_hi;
+ u32 rx_multicast_bytes_lo;
+ u32 rx_broadcast_frames_hi;
+ u32 rx_broadcast_frames_lo;
+ u32 rx_broadcast_bytes_hi;
+ u32 rx_broadcast_bytes_lo;
+ u32 rx_frames_discarded_hi;
+ u32 rx_frames_discarded_lo;
+ u32 rx_frames_dropped_hi;
+ u32 rx_frames_dropped_lo;
+};
+
+#define BCM_5710_FW_MAJOR_VERSION 7
+#define BCM_5710_FW_MINOR_VERSION 13
+#define BCM_5710_FW_REVISION_VERSION 21
+#define BCM_5710_FW_REVISION_VERSION_V15 15
+#define BCM_5710_FW_ENGINEERING_VERSION 0
+#define BCM_5710_FW_COMPILE_FLAGS 1
+
+
+/*
+ * attention bits
+ */
+struct atten_sp_status_block {
+ __le32 attn_bits;
+ __le32 attn_bits_ack;
+ u8 status_block_id;
+ u8 reserved0;
+ __le16 attn_bits_index;
+ __le32 reserved1;
+};
+
+
+/*
+ * The eth aggregative context of Cstorm
+ */
+struct cstorm_eth_ag_context {
+ u32 __reserved0[10];
+};
+
+
+/*
+ * dmae command structure
+ */
+struct dmae_command {
+ u32 opcode;
+#define DMAE_COMMAND_SRC (0x1<<0)
+#define DMAE_COMMAND_SRC_SHIFT 0
+#define DMAE_COMMAND_DST (0x3<<1)
+#define DMAE_COMMAND_DST_SHIFT 1
+#define DMAE_COMMAND_C_DST (0x1<<3)
+#define DMAE_COMMAND_C_DST_SHIFT 3
+#define DMAE_COMMAND_C_TYPE_ENABLE (0x1<<4)
+#define DMAE_COMMAND_C_TYPE_ENABLE_SHIFT 4
+#define DMAE_COMMAND_C_TYPE_CRC_ENABLE (0x1<<5)
+#define DMAE_COMMAND_C_TYPE_CRC_ENABLE_SHIFT 5
+#define DMAE_COMMAND_C_TYPE_CRC_OFFSET (0x7<<6)
+#define DMAE_COMMAND_C_TYPE_CRC_OFFSET_SHIFT 6
+#define DMAE_COMMAND_ENDIANITY (0x3<<9)
+#define DMAE_COMMAND_ENDIANITY_SHIFT 9
+#define DMAE_COMMAND_PORT (0x1<<11)
+#define DMAE_COMMAND_PORT_SHIFT 11
+#define DMAE_COMMAND_CRC_RESET (0x1<<12)
+#define DMAE_COMMAND_CRC_RESET_SHIFT 12
+#define DMAE_COMMAND_SRC_RESET (0x1<<13)
+#define DMAE_COMMAND_SRC_RESET_SHIFT 13
+#define DMAE_COMMAND_DST_RESET (0x1<<14)
+#define DMAE_COMMAND_DST_RESET_SHIFT 14
+#define DMAE_COMMAND_E1HVN (0x3<<15)
+#define DMAE_COMMAND_E1HVN_SHIFT 15
+#define DMAE_COMMAND_DST_VN (0x3<<17)
+#define DMAE_COMMAND_DST_VN_SHIFT 17
+#define DMAE_COMMAND_C_FUNC (0x1<<19)
+#define DMAE_COMMAND_C_FUNC_SHIFT 19
+#define DMAE_COMMAND_ERR_POLICY (0x3<<20)
+#define DMAE_COMMAND_ERR_POLICY_SHIFT 20
+#define DMAE_COMMAND_RESERVED0 (0x3FF<<22)
+#define DMAE_COMMAND_RESERVED0_SHIFT 22
+ u32 src_addr_lo;
+ u32 src_addr_hi;
+ u32 dst_addr_lo;
+ u32 dst_addr_hi;
+#if defined(__BIG_ENDIAN)
+ u16 opcode_iov;
+#define DMAE_COMMAND_SRC_VFID (0x3F<<0)
+#define DMAE_COMMAND_SRC_VFID_SHIFT 0
+#define DMAE_COMMAND_SRC_VFPF (0x1<<6)
+#define DMAE_COMMAND_SRC_VFPF_SHIFT 6
+#define DMAE_COMMAND_RESERVED1 (0x1<<7)
+#define DMAE_COMMAND_RESERVED1_SHIFT 7
+#define DMAE_COMMAND_DST_VFID (0x3F<<8)
+#define DMAE_COMMAND_DST_VFID_SHIFT 8
+#define DMAE_COMMAND_DST_VFPF (0x1<<14)
+#define DMAE_COMMAND_DST_VFPF_SHIFT 14
+#define DMAE_COMMAND_RESERVED2 (0x1<<15)
+#define DMAE_COMMAND_RESERVED2_SHIFT 15
+ u16 len;
+#elif defined(__LITTLE_ENDIAN)
+ u16 len;
+ u16 opcode_iov;
+#define DMAE_COMMAND_SRC_VFID (0x3F<<0)
+#define DMAE_COMMAND_SRC_VFID_SHIFT 0
+#define DMAE_COMMAND_SRC_VFPF (0x1<<6)
+#define DMAE_COMMAND_SRC_VFPF_SHIFT 6
+#define DMAE_COMMAND_RESERVED1 (0x1<<7)
+#define DMAE_COMMAND_RESERVED1_SHIFT 7
+#define DMAE_COMMAND_DST_VFID (0x3F<<8)
+#define DMAE_COMMAND_DST_VFID_SHIFT 8
+#define DMAE_COMMAND_DST_VFPF (0x1<<14)
+#define DMAE_COMMAND_DST_VFPF_SHIFT 14
+#define DMAE_COMMAND_RESERVED2 (0x1<<15)
+#define DMAE_COMMAND_RESERVED2_SHIFT 15
+#endif
+ u32 comp_addr_lo;
+ u32 comp_addr_hi;
+ u32 comp_val;
+ u32 crc32;
+ u32 crc32_c;
+#if defined(__BIG_ENDIAN)
+ u16 crc16_c;
+ u16 crc16;
+#elif defined(__LITTLE_ENDIAN)
+ u16 crc16;
+ u16 crc16_c;
+#endif
+#if defined(__BIG_ENDIAN)
+ u16 reserved3;
+ u16 crc_t10;
+#elif defined(__LITTLE_ENDIAN)
+ u16 crc_t10;
+ u16 reserved3;
+#endif
+#if defined(__BIG_ENDIAN)
+ u16 xsum8;
+ u16 xsum16;
+#elif defined(__LITTLE_ENDIAN)
+ u16 xsum16;
+ u16 xsum8;
+#endif
+};
+
+
+/*
+ * common data for all protocols
+ */
+struct doorbell_hdr {
+ u8 header;
+#define DOORBELL_HDR_RX (0x1<<0)
+#define DOORBELL_HDR_RX_SHIFT 0
+#define DOORBELL_HDR_DB_TYPE (0x1<<1)
+#define DOORBELL_HDR_DB_TYPE_SHIFT 1
+#define DOORBELL_HDR_DPM_SIZE (0x3<<2)
+#define DOORBELL_HDR_DPM_SIZE_SHIFT 2
+#define DOORBELL_HDR_CONN_TYPE (0xF<<4)
+#define DOORBELL_HDR_CONN_TYPE_SHIFT 4
+};
+
+/*
+ * Ethernet doorbell
+ */
+struct eth_tx_doorbell {
+#if defined(__BIG_ENDIAN)
+ u16 npackets;
+ u8 params;
+#define ETH_TX_DOORBELL_NUM_BDS (0x3F<<0)
+#define ETH_TX_DOORBELL_NUM_BDS_SHIFT 0
+#define ETH_TX_DOORBELL_RESERVED_TX_FIN_FLAG (0x1<<6)
+#define ETH_TX_DOORBELL_RESERVED_TX_FIN_FLAG_SHIFT 6
+#define ETH_TX_DOORBELL_SPARE (0x1<<7)
+#define ETH_TX_DOORBELL_SPARE_SHIFT 7
+ struct doorbell_hdr hdr;
+#elif defined(__LITTLE_ENDIAN)
+ struct doorbell_hdr hdr;
+ u8 params;
+#define ETH_TX_DOORBELL_NUM_BDS (0x3F<<0)
+#define ETH_TX_DOORBELL_NUM_BDS_SHIFT 0
+#define ETH_TX_DOORBELL_RESERVED_TX_FIN_FLAG (0x1<<6)
+#define ETH_TX_DOORBELL_RESERVED_TX_FIN_FLAG_SHIFT 6
+#define ETH_TX_DOORBELL_SPARE (0x1<<7)
+#define ETH_TX_DOORBELL_SPARE_SHIFT 7
+ u16 npackets;
+#endif
+};
+
+
+/*
+ * 3 lines. status block
+ */
+struct hc_status_block_e1x {
+ __le16 index_values[HC_SB_MAX_INDICES_E1X];
+ __le16 running_index[HC_SB_MAX_SM];
+ __le32 rsrv[11];
+};
+
+/*
+ * host status block
+ */
+struct host_hc_status_block_e1x {
+ struct hc_status_block_e1x sb;
+};
+
+
+/*
+ * 3 lines. status block
+ */
+struct hc_status_block_e2 {
+ __le16 index_values[HC_SB_MAX_INDICES_E2];
+ __le16 running_index[HC_SB_MAX_SM];
+ __le32 reserved[11];
+};
+
+/*
+ * host status block
+ */
+struct host_hc_status_block_e2 {
+ struct hc_status_block_e2 sb;
+};
+
+
+/*
+ * 5 lines. slow-path status block
+ */
+struct hc_sp_status_block {
+ __le16 index_values[HC_SP_SB_MAX_INDICES];
+ __le16 running_index;
+ __le16 rsrv;
+ u32 rsrv1;
+};
+
+/*
+ * host status block
+ */
+struct host_sp_status_block {
+ struct atten_sp_status_block atten_status_block;
+ struct hc_sp_status_block sp_sb;
+};
+
+
+/*
+ * IGU driver acknowledgment register
+ */
+struct igu_ack_register {
+#if defined(__BIG_ENDIAN)
+ u16 sb_id_and_flags;
+#define IGU_ACK_REGISTER_STATUS_BLOCK_ID (0x1F<<0)
+#define IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT 0
+#define IGU_ACK_REGISTER_STORM_ID (0x7<<5)
+#define IGU_ACK_REGISTER_STORM_ID_SHIFT 5
+#define IGU_ACK_REGISTER_UPDATE_INDEX (0x1<<8)
+#define IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT 8
+#define IGU_ACK_REGISTER_INTERRUPT_MODE (0x3<<9)
+#define IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT 9
+#define IGU_ACK_REGISTER_RESERVED (0x1F<<11)
+#define IGU_ACK_REGISTER_RESERVED_SHIFT 11
+ u16 status_block_index;
+#elif defined(__LITTLE_ENDIAN)
+ u16 status_block_index;
+ u16 sb_id_and_flags;
+#define IGU_ACK_REGISTER_STATUS_BLOCK_ID (0x1F<<0)
+#define IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT 0
+#define IGU_ACK_REGISTER_STORM_ID (0x7<<5)
+#define IGU_ACK_REGISTER_STORM_ID_SHIFT 5
+#define IGU_ACK_REGISTER_UPDATE_INDEX (0x1<<8)
+#define IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT 8
+#define IGU_ACK_REGISTER_INTERRUPT_MODE (0x3<<9)
+#define IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT 9
+#define IGU_ACK_REGISTER_RESERVED (0x1F<<11)
+#define IGU_ACK_REGISTER_RESERVED_SHIFT 11
+#endif
+};
+
+
+/*
+ * IGU driver acknowledgement register
+ */
+struct igu_backward_compatible {
+ u32 sb_id_and_flags;
+#define IGU_BACKWARD_COMPATIBLE_SB_INDEX (0xFFFF<<0)
+#define IGU_BACKWARD_COMPATIBLE_SB_INDEX_SHIFT 0
+#define IGU_BACKWARD_COMPATIBLE_SB_SELECT (0x1F<<16)
+#define IGU_BACKWARD_COMPATIBLE_SB_SELECT_SHIFT 16
+#define IGU_BACKWARD_COMPATIBLE_SEGMENT_ACCESS (0x7<<21)
+#define IGU_BACKWARD_COMPATIBLE_SEGMENT_ACCESS_SHIFT 21
+#define IGU_BACKWARD_COMPATIBLE_BUPDATE (0x1<<24)
+#define IGU_BACKWARD_COMPATIBLE_BUPDATE_SHIFT 24
+#define IGU_BACKWARD_COMPATIBLE_ENABLE_INT (0x3<<25)
+#define IGU_BACKWARD_COMPATIBLE_ENABLE_INT_SHIFT 25
+#define IGU_BACKWARD_COMPATIBLE_RESERVED_0 (0x1F<<27)
+#define IGU_BACKWARD_COMPATIBLE_RESERVED_0_SHIFT 27
+ u32 reserved_2;
+};
+
+
+/*
+ * IGU driver acknowledgement register
+ */
+struct igu_regular {
+ u32 sb_id_and_flags;
+#define IGU_REGULAR_SB_INDEX (0xFFFFF<<0)
+#define IGU_REGULAR_SB_INDEX_SHIFT 0
+#define IGU_REGULAR_RESERVED0 (0x1<<20)
+#define IGU_REGULAR_RESERVED0_SHIFT 20
+#define IGU_REGULAR_SEGMENT_ACCESS (0x7<<21)
+#define IGU_REGULAR_SEGMENT_ACCESS_SHIFT 21
+#define IGU_REGULAR_BUPDATE (0x1<<24)
+#define IGU_REGULAR_BUPDATE_SHIFT 24
+#define IGU_REGULAR_ENABLE_INT (0x3<<25)
+#define IGU_REGULAR_ENABLE_INT_SHIFT 25
+#define IGU_REGULAR_RESERVED_1 (0x1<<27)
+#define IGU_REGULAR_RESERVED_1_SHIFT 27
+#define IGU_REGULAR_CLEANUP_TYPE (0x3<<28)
+#define IGU_REGULAR_CLEANUP_TYPE_SHIFT 28
+#define IGU_REGULAR_CLEANUP_SET (0x1<<30)
+#define IGU_REGULAR_CLEANUP_SET_SHIFT 30
+#define IGU_REGULAR_BCLEANUP (0x1<<31)
+#define IGU_REGULAR_BCLEANUP_SHIFT 31
+ u32 reserved_2;
+};
+
+/*
+ * IGU driver acknowledgement register
+ */
+union igu_consprod_reg {
+ struct igu_regular regular;
+ struct igu_backward_compatible backward_compatible;
+};
+
+
+/*
+ * Igu control commands
+ */
+enum igu_ctrl_cmd {
+ IGU_CTRL_CMD_TYPE_RD,
+ IGU_CTRL_CMD_TYPE_WR,
+ MAX_IGU_CTRL_CMD
+};
+
+
+/*
+ * Control register for the IGU command register
+ */
+struct igu_ctrl_reg {
+ u32 ctrl_data;
+#define IGU_CTRL_REG_ADDRESS (0xFFF<<0)
+#define IGU_CTRL_REG_ADDRESS_SHIFT 0
+#define IGU_CTRL_REG_FID (0x7F<<12)
+#define IGU_CTRL_REG_FID_SHIFT 12
+#define IGU_CTRL_REG_RESERVED (0x1<<19)
+#define IGU_CTRL_REG_RESERVED_SHIFT 19
+#define IGU_CTRL_REG_TYPE (0x1<<20)
+#define IGU_CTRL_REG_TYPE_SHIFT 20
+#define IGU_CTRL_REG_UNUSED (0x7FF<<21)
+#define IGU_CTRL_REG_UNUSED_SHIFT 21
+};
+
+
+/*
+ * Igu interrupt command
+ */
+enum igu_int_cmd {
+ IGU_INT_ENABLE,
+ IGU_INT_DISABLE,
+ IGU_INT_NOP,
+ IGU_INT_NOP2,
+ MAX_IGU_INT_CMD
+};
+
+
+/*
+ * Igu segments
+ */
+enum igu_seg_access {
+ IGU_SEG_ACCESS_NORM,
+ IGU_SEG_ACCESS_DEF,
+ IGU_SEG_ACCESS_ATTN,
+ MAX_IGU_SEG_ACCESS
+};
+
+
+/*
+ * Parser parsing flags field
+ */
+struct parsing_flags {
+ __le16 flags;
+#define PARSING_FLAGS_ETHERNET_ADDRESS_TYPE (0x1<<0)
+#define PARSING_FLAGS_ETHERNET_ADDRESS_TYPE_SHIFT 0
+#define PARSING_FLAGS_VLAN (0x1<<1)
+#define PARSING_FLAGS_VLAN_SHIFT 1
+#define PARSING_FLAGS_EXTRA_VLAN (0x1<<2)
+#define PARSING_FLAGS_EXTRA_VLAN_SHIFT 2
+#define PARSING_FLAGS_OVER_ETHERNET_PROTOCOL (0x3<<3)
+#define PARSING_FLAGS_OVER_ETHERNET_PROTOCOL_SHIFT 3
+#define PARSING_FLAGS_IP_OPTIONS (0x1<<5)
+#define PARSING_FLAGS_IP_OPTIONS_SHIFT 5
+#define PARSING_FLAGS_FRAGMENTATION_STATUS (0x1<<6)
+#define PARSING_FLAGS_FRAGMENTATION_STATUS_SHIFT 6
+#define PARSING_FLAGS_OVER_IP_PROTOCOL (0x3<<7)
+#define PARSING_FLAGS_OVER_IP_PROTOCOL_SHIFT 7
+#define PARSING_FLAGS_PURE_ACK_INDICATION (0x1<<9)
+#define PARSING_FLAGS_PURE_ACK_INDICATION_SHIFT 9
+#define PARSING_FLAGS_TCP_OPTIONS_EXIST (0x1<<10)
+#define PARSING_FLAGS_TCP_OPTIONS_EXIST_SHIFT 10
+#define PARSING_FLAGS_TIME_STAMP_EXIST_FLAG (0x1<<11)
+#define PARSING_FLAGS_TIME_STAMP_EXIST_FLAG_SHIFT 11
+#define PARSING_FLAGS_CONNECTION_MATCH (0x1<<12)
+#define PARSING_FLAGS_CONNECTION_MATCH_SHIFT 12
+#define PARSING_FLAGS_LLC_SNAP (0x1<<13)
+#define PARSING_FLAGS_LLC_SNAP_SHIFT 13
+#define PARSING_FLAGS_RESERVED0 (0x3<<14)
+#define PARSING_FLAGS_RESERVED0_SHIFT 14
+};
+
+
+/*
+ * Parsing flags for TCP ACK type
+ */
+enum prs_flags_ack_type {
+ PRS_FLAG_PUREACK_PIGGY,
+ PRS_FLAG_PUREACK_PURE,
+ MAX_PRS_FLAGS_ACK_TYPE
+};
+
+
+/*
+ * Parsing flags for Ethernet address type
+ */
+enum prs_flags_eth_addr_type {
+ PRS_FLAG_ETHTYPE_NON_UNICAST,
+ PRS_FLAG_ETHTYPE_UNICAST,
+ MAX_PRS_FLAGS_ETH_ADDR_TYPE
+};
+
+
+/*
+ * Parsing flags for over-ethernet protocol
+ */
+enum prs_flags_over_eth {
+ PRS_FLAG_OVERETH_UNKNOWN,
+ PRS_FLAG_OVERETH_IPV4,
+ PRS_FLAG_OVERETH_IPV6,
+ PRS_FLAG_OVERETH_LLCSNAP_UNKNOWN,
+ MAX_PRS_FLAGS_OVER_ETH
+};
+
+
+/*
+ * Parsing flags for over-IP protocol
+ */
+enum prs_flags_over_ip {
+ PRS_FLAG_OVERIP_UNKNOWN,
+ PRS_FLAG_OVERIP_TCP,
+ PRS_FLAG_OVERIP_UDP,
+ MAX_PRS_FLAGS_OVER_IP
+};
+
+
+/*
+ * SDM operation gen command (generate aggregative interrupt)
+ */
+struct sdm_op_gen {
+ __le32 command;
+#define SDM_OP_GEN_COMP_PARAM (0x1F<<0)
+#define SDM_OP_GEN_COMP_PARAM_SHIFT 0
+#define SDM_OP_GEN_COMP_TYPE (0x7<<5)
+#define SDM_OP_GEN_COMP_TYPE_SHIFT 5
+#define SDM_OP_GEN_AGG_VECT_IDX (0xFF<<8)
+#define SDM_OP_GEN_AGG_VECT_IDX_SHIFT 8
+#define SDM_OP_GEN_AGG_VECT_IDX_VALID (0x1<<16)
+#define SDM_OP_GEN_AGG_VECT_IDX_VALID_SHIFT 16
+#define SDM_OP_GEN_RESERVED (0x7FFF<<17)
+#define SDM_OP_GEN_RESERVED_SHIFT 17
+};
+
+
+/*
+ * Timers connection context
+ */
+struct timers_block_context {
+ u32 __reserved_0;
+ u32 __reserved_1;
+ u32 __reserved_2;
+ u32 flags;
+#define __TIMERS_BLOCK_CONTEXT_NUM_OF_ACTIVE_TIMERS (0x3<<0)
+#define __TIMERS_BLOCK_CONTEXT_NUM_OF_ACTIVE_TIMERS_SHIFT 0
+#define TIMERS_BLOCK_CONTEXT_CONN_VALID_FLG (0x1<<2)
+#define TIMERS_BLOCK_CONTEXT_CONN_VALID_FLG_SHIFT 2
+#define __TIMERS_BLOCK_CONTEXT_RESERVED0 (0x1FFFFFFF<<3)
+#define __TIMERS_BLOCK_CONTEXT_RESERVED0_SHIFT 3
+};
+
+
+/*
+ * The eth aggregative context of Tstorm
+ */
+struct tstorm_eth_ag_context {
+ u32 __reserved0[14];
+};
+
+
+/*
+ * The eth aggregative context of Ustorm
+ */
+struct ustorm_eth_ag_context {
+ u32 __reserved0;
+#if defined(__BIG_ENDIAN)
+ u8 cdu_usage;
+ u8 __reserved2;
+ u16 __reserved1;
+#elif defined(__LITTLE_ENDIAN)
+ u16 __reserved1;
+ u8 __reserved2;
+ u8 cdu_usage;
+#endif
+ u32 __reserved3[6];
+};
+
+
+/*
+ * The eth aggregative context of Xstorm
+ */
+struct xstorm_eth_ag_context {
+ u32 reserved0;
+#if defined(__BIG_ENDIAN)
+ u8 cdu_reserved;
+ u8 reserved2;
+ u16 reserved1;
+#elif defined(__LITTLE_ENDIAN)
+ u16 reserved1;
+ u8 reserved2;
+ u8 cdu_reserved;
+#endif
+ u32 reserved3[30];
+};
+
+
+/*
+ * doorbell message sent to the chip
+ */
+struct doorbell {
+#if defined(__BIG_ENDIAN)
+ u16 zero_fill2;
+ u8 zero_fill1;
+ struct doorbell_hdr header;
+#elif defined(__LITTLE_ENDIAN)
+ struct doorbell_hdr header;
+ u8 zero_fill1;
+ u16 zero_fill2;
+#endif
+};
+
+
+/*
+ * doorbell message sent to the chip
+ */
+struct doorbell_set_prod {
+#if defined(__BIG_ENDIAN)
+ u16 prod;
+ u8 zero_fill1;
+ struct doorbell_hdr header;
+#elif defined(__LITTLE_ENDIAN)
+ struct doorbell_hdr header;
+ u8 zero_fill1;
+ u16 prod;
+#endif
+};
+
+
+struct regpair {
+ __le32 lo;
+ __le32 hi;
+};
+
+struct regpair_native {
+ u32 lo;
+ u32 hi;
+};
+
+/*
+ * Classify rule opcodes in E2/E3
+ */
+enum classify_rule {
+ CLASSIFY_RULE_OPCODE_MAC,
+ CLASSIFY_RULE_OPCODE_VLAN,
+ CLASSIFY_RULE_OPCODE_PAIR,
+ CLASSIFY_RULE_OPCODE_IMAC_VNI,
+ MAX_CLASSIFY_RULE
+};
+
+
+/*
+ * Classify rule types in E2/E3
+ */
+enum classify_rule_action_type {
+ CLASSIFY_RULE_REMOVE,
+ CLASSIFY_RULE_ADD,
+ MAX_CLASSIFY_RULE_ACTION_TYPE
+};
+
+
+/*
+ * client init ramrod data
+ */
+struct client_init_general_data {
+ u8 client_id;
+ u8 statistics_counter_id;
+ u8 statistics_en_flg;
+ u8 is_fcoe_flg;
+ u8 activate_flg;
+ u8 sp_client_id;
+ __le16 mtu;
+ u8 statistics_zero_flg;
+ u8 func_id;
+ u8 cos;
+ u8 traffic_type;
+ u8 fp_hsi_ver;
+ u8 reserved0[3];
+};
+
+
+/*
+ * client init rx data
+ */
+struct client_init_rx_data {
+ u8 tpa_en;
+#define CLIENT_INIT_RX_DATA_TPA_EN_IPV4 (0x1<<0)
+#define CLIENT_INIT_RX_DATA_TPA_EN_IPV4_SHIFT 0
+#define CLIENT_INIT_RX_DATA_TPA_EN_IPV6 (0x1<<1)
+#define CLIENT_INIT_RX_DATA_TPA_EN_IPV6_SHIFT 1
+#define CLIENT_INIT_RX_DATA_TPA_MODE (0x1<<2)
+#define CLIENT_INIT_RX_DATA_TPA_MODE_SHIFT 2
+#define CLIENT_INIT_RX_DATA_TPA_OVER_VLAN_DISABLE (0x1<<3)
+#define CLIENT_INIT_RX_DATA_TPA_OVER_VLAN_DISABLE_SHIFT 3
+#define CLIENT_INIT_RX_DATA_RESERVED5 (0xF<<4)
+#define CLIENT_INIT_RX_DATA_RESERVED5_SHIFT 4
+ u8 vmqueue_mode_en_flg;
+ u8 extra_data_over_sgl_en_flg;
+ u8 cache_line_alignment_log_size;
+ u8 enable_dynamic_hc;
+ u8 max_sges_for_packet;
+ u8 client_qzone_id;
+ u8 drop_ip_cs_err_flg;
+ u8 drop_tcp_cs_err_flg;
+ u8 drop_ttl0_flg;
+ u8 drop_udp_cs_err_flg;
+ u8 inner_vlan_removal_enable_flg;
+ u8 outer_vlan_removal_enable_flg;
+ u8 status_block_id;
+ u8 rx_sb_index_number;
+ u8 dont_verify_rings_pause_thr_flg;
+ u8 max_tpa_queues;
+ u8 silent_vlan_removal_flg;
+ __le16 max_bytes_on_bd;
+ __le16 sge_buff_size;
+ u8 approx_mcast_engine_id;
+ u8 rss_engine_id;
+ struct regpair bd_page_base;
+ struct regpair sge_page_base;
+ struct regpair cqe_page_base;
+ u8 is_leading_rss;
+ u8 is_approx_mcast;
+ __le16 max_agg_size;
+ __le16 state;
+#define CLIENT_INIT_RX_DATA_UCAST_DROP_ALL (0x1<<0)
+#define CLIENT_INIT_RX_DATA_UCAST_DROP_ALL_SHIFT 0
+#define CLIENT_INIT_RX_DATA_UCAST_ACCEPT_ALL (0x1<<1)
+#define CLIENT_INIT_RX_DATA_UCAST_ACCEPT_ALL_SHIFT 1
+#define CLIENT_INIT_RX_DATA_UCAST_ACCEPT_UNMATCHED (0x1<<2)
+#define CLIENT_INIT_RX_DATA_UCAST_ACCEPT_UNMATCHED_SHIFT 2
+#define CLIENT_INIT_RX_DATA_MCAST_DROP_ALL (0x1<<3)
+#define CLIENT_INIT_RX_DATA_MCAST_DROP_ALL_SHIFT 3
+#define CLIENT_INIT_RX_DATA_MCAST_ACCEPT_ALL (0x1<<4)
+#define CLIENT_INIT_RX_DATA_MCAST_ACCEPT_ALL_SHIFT 4
+#define CLIENT_INIT_RX_DATA_BCAST_ACCEPT_ALL (0x1<<5)
+#define CLIENT_INIT_RX_DATA_BCAST_ACCEPT_ALL_SHIFT 5
+#define CLIENT_INIT_RX_DATA_ACCEPT_ANY_VLAN (0x1<<6)
+#define CLIENT_INIT_RX_DATA_ACCEPT_ANY_VLAN_SHIFT 6
+#define CLIENT_INIT_RX_DATA_RESERVED2 (0x1FF<<7)
+#define CLIENT_INIT_RX_DATA_RESERVED2_SHIFT 7
+ __le16 cqe_pause_thr_low;
+ __le16 cqe_pause_thr_high;
+ __le16 bd_pause_thr_low;
+ __le16 bd_pause_thr_high;
+ __le16 sge_pause_thr_low;
+ __le16 sge_pause_thr_high;
+ __le16 rx_cos_mask;
+ __le16 silent_vlan_value;
+ __le16 silent_vlan_mask;
+ u8 handle_ptp_pkts_flg;
+ u8 reserved6[3];
+ __le32 reserved7;
+};
+
+/*
+ * client init tx data
+ */
+struct client_init_tx_data {
+ u8 enforce_security_flg;
+ u8 tx_status_block_id;
+ u8 tx_sb_index_number;
+ u8 tss_leading_client_id;
+ u8 tx_switching_flg;
+ u8 anti_spoofing_flg;
+ __le16 default_vlan;
+ struct regpair tx_bd_page_base;
+ __le16 state;
+#define CLIENT_INIT_TX_DATA_UCAST_ACCEPT_ALL (0x1<<0)
+#define CLIENT_INIT_TX_DATA_UCAST_ACCEPT_ALL_SHIFT 0
+#define CLIENT_INIT_TX_DATA_MCAST_ACCEPT_ALL (0x1<<1)
+#define CLIENT_INIT_TX_DATA_MCAST_ACCEPT_ALL_SHIFT 1
+#define CLIENT_INIT_TX_DATA_BCAST_ACCEPT_ALL (0x1<<2)
+#define CLIENT_INIT_TX_DATA_BCAST_ACCEPT_ALL_SHIFT 2
+#define CLIENT_INIT_TX_DATA_ACCEPT_ANY_VLAN (0x1<<3)
+#define CLIENT_INIT_TX_DATA_ACCEPT_ANY_VLAN_SHIFT 3
+#define CLIENT_INIT_TX_DATA_RESERVED0 (0xFFF<<4)
+#define CLIENT_INIT_TX_DATA_RESERVED0_SHIFT 4
+ u8 default_vlan_flg;
+ u8 force_default_pri_flg;
+ u8 tunnel_lso_inc_ip_id;
+ u8 refuse_outband_vlan_flg;
+ u8 tunnel_non_lso_pcsum_location;
+ u8 tunnel_non_lso_outer_ip_csum_location;
+};
+
+/*
+ * client init ramrod data
+ */
+struct client_init_ramrod_data {
+ struct client_init_general_data general;
+ struct client_init_rx_data rx;
+ struct client_init_tx_data tx;
+};
+
+
+/*
+ * client update ramrod data
+ */
+struct client_update_ramrod_data {
+ u8 client_id;
+ u8 func_id;
+ u8 inner_vlan_removal_enable_flg;
+ u8 inner_vlan_removal_change_flg;
+ u8 outer_vlan_removal_enable_flg;
+ u8 outer_vlan_removal_change_flg;
+ u8 anti_spoofing_enable_flg;
+ u8 anti_spoofing_change_flg;
+ u8 activate_flg;
+ u8 activate_change_flg;
+ __le16 default_vlan;
+ u8 default_vlan_enable_flg;
+ u8 default_vlan_change_flg;
+ __le16 silent_vlan_value;
+ __le16 silent_vlan_mask;
+ u8 silent_vlan_removal_flg;
+ u8 silent_vlan_change_flg;
+ u8 refuse_outband_vlan_flg;
+ u8 refuse_outband_vlan_change_flg;
+ u8 tx_switching_flg;
+ u8 tx_switching_change_flg;
+ u8 handle_ptp_pkts_flg;
+ u8 handle_ptp_pkts_change_flg;
+ __le16 reserved1;
+ __le32 echo;
+};
+
+
+/*
+ * The eth storm context of Cstorm
+ */
+struct cstorm_eth_st_context {
+ u32 __reserved0[4];
+};
+
+
+struct double_regpair {
+ u32 regpair0_lo;
+ u32 regpair0_hi;
+ u32 regpair1_lo;
+ u32 regpair1_hi;
+};
+
+/* 2nd parse bd type used in ethernet tx BDs */
+enum eth_2nd_parse_bd_type {
+ ETH_2ND_PARSE_BD_TYPE_LSO_TUNNEL,
+ MAX_ETH_2ND_PARSE_BD_TYPE
+};
+
+/*
+ * Ethernet address typesm used in ethernet tx BDs
+ */
+enum eth_addr_type {
+ UNKNOWN_ADDRESS,
+ UNICAST_ADDRESS,
+ MULTICAST_ADDRESS,
+ BROADCAST_ADDRESS,
+ MAX_ETH_ADDR_TYPE
+};
+
+
+/*
+ *
+ */
+struct eth_classify_cmd_header {
+ u8 cmd_general_data;
+#define ETH_CLASSIFY_CMD_HEADER_RX_CMD (0x1<<0)
+#define ETH_CLASSIFY_CMD_HEADER_RX_CMD_SHIFT 0
+#define ETH_CLASSIFY_CMD_HEADER_TX_CMD (0x1<<1)
+#define ETH_CLASSIFY_CMD_HEADER_TX_CMD_SHIFT 1
+#define ETH_CLASSIFY_CMD_HEADER_OPCODE (0x3<<2)
+#define ETH_CLASSIFY_CMD_HEADER_OPCODE_SHIFT 2
+#define ETH_CLASSIFY_CMD_HEADER_IS_ADD (0x1<<4)
+#define ETH_CLASSIFY_CMD_HEADER_IS_ADD_SHIFT 4
+#define ETH_CLASSIFY_CMD_HEADER_RESERVED0 (0x7<<5)
+#define ETH_CLASSIFY_CMD_HEADER_RESERVED0_SHIFT 5
+ u8 func_id;
+ u8 client_id;
+ u8 reserved1;
+};
+
+
+/*
+ * header for eth classification config ramrod
+ */
+struct eth_classify_header {
+ u8 rule_cnt;
+ u8 warning_on_error;
+ __le16 reserved1;
+ __le32 echo;
+};
+
+/*
+ * Command for adding/removing a Inner-MAC/VNI classification rule
+ */
+struct eth_classify_imac_vni_cmd {
+ struct eth_classify_cmd_header header;
+ __le32 vni;
+ __le16 imac_lsb;
+ __le16 imac_mid;
+ __le16 imac_msb;
+ __le16 reserved1;
+};
+
+/*
+ * Command for adding/removing a MAC classification rule
+ */
+struct eth_classify_mac_cmd {
+ struct eth_classify_cmd_header header;
+ __le16 reserved0;
+ __le16 inner_mac;
+ __le16 mac_lsb;
+ __le16 mac_mid;
+ __le16 mac_msb;
+ __le16 reserved1;
+};
+
+
+/*
+ * Command for adding/removing a MAC-VLAN pair classification rule
+ */
+struct eth_classify_pair_cmd {
+ struct eth_classify_cmd_header header;
+ __le16 reserved0;
+ __le16 inner_mac;
+ __le16 mac_lsb;
+ __le16 mac_mid;
+ __le16 mac_msb;
+ __le16 vlan;
+};
+
+
+/*
+ * Command for adding/removing a VLAN classification rule
+ */
+struct eth_classify_vlan_cmd {
+ struct eth_classify_cmd_header header;
+ __le32 reserved0;
+ __le32 reserved1;
+ __le16 reserved2;
+ __le16 vlan;
+};
+
+/*
+ * Command for adding/removing a VXLAN classification rule
+ */
+
+/*
+ * union for eth classification rule
+ */
+union eth_classify_rule_cmd {
+ struct eth_classify_mac_cmd mac;
+ struct eth_classify_vlan_cmd vlan;
+ struct eth_classify_pair_cmd pair;
+ struct eth_classify_imac_vni_cmd imac_vni;
+};
+
+/*
+ * parameters for eth classification configuration ramrod
+ */
+struct eth_classify_rules_ramrod_data {
+ struct eth_classify_header header;
+ union eth_classify_rule_cmd rules[CLASSIFY_RULES_COUNT];
+};
+
+
+/*
+ * The data contain client ID need to the ramrod
+ */
+struct eth_common_ramrod_data {
+ __le32 client_id;
+ __le32 reserved1;
+};
+
+
+/*
+ * The eth storm context of Ustorm
+ */
+struct ustorm_eth_st_context {
+ u32 reserved0[52];
+};
+
+/*
+ * The eth storm context of Tstorm
+ */
+struct tstorm_eth_st_context {
+ u32 __reserved0[28];
+};
+
+/*
+ * The eth storm context of Xstorm
+ */
+struct xstorm_eth_st_context {
+ u32 reserved0[60];
+};
+
+/*
+ * Ethernet connection context
+ */
+struct eth_context {
+ struct ustorm_eth_st_context ustorm_st_context;
+ struct tstorm_eth_st_context tstorm_st_context;
+ struct xstorm_eth_ag_context xstorm_ag_context;
+ struct tstorm_eth_ag_context tstorm_ag_context;
+ struct cstorm_eth_ag_context cstorm_ag_context;
+ struct ustorm_eth_ag_context ustorm_ag_context;
+ struct timers_block_context timers_context;
+ struct xstorm_eth_st_context xstorm_st_context;
+ struct cstorm_eth_st_context cstorm_st_context;
+};
+
+
+/*
+ * union for sgl and raw data.
+ */
+union eth_sgl_or_raw_data {
+ __le16 sgl[8];
+ u32 raw_data[4];
+};
+
+/*
+ * eth FP end aggregation CQE parameters struct
+ */
+struct eth_end_agg_rx_cqe {
+ u8 type_error_flags;
+#define ETH_END_AGG_RX_CQE_TYPE (0x3<<0)
+#define ETH_END_AGG_RX_CQE_TYPE_SHIFT 0
+#define ETH_END_AGG_RX_CQE_SGL_RAW_SEL (0x1<<2)
+#define ETH_END_AGG_RX_CQE_SGL_RAW_SEL_SHIFT 2
+#define ETH_END_AGG_RX_CQE_RESERVED0 (0x1F<<3)
+#define ETH_END_AGG_RX_CQE_RESERVED0_SHIFT 3
+ u8 reserved1;
+ u8 queue_index;
+ u8 reserved2;
+ __le32 timestamp_delta;
+ __le16 num_of_coalesced_segs;
+ __le16 pkt_len;
+ u8 pure_ack_count;
+ u8 reserved3;
+ __le16 reserved4;
+ union eth_sgl_or_raw_data sgl_or_raw_data;
+ __le32 reserved5[8];
+};
+
+
+/*
+ * regular eth FP CQE parameters struct
+ */
+struct eth_fast_path_rx_cqe {
+ u8 type_error_flags;
+#define ETH_FAST_PATH_RX_CQE_TYPE (0x3<<0)
+#define ETH_FAST_PATH_RX_CQE_TYPE_SHIFT 0
+#define ETH_FAST_PATH_RX_CQE_SGL_RAW_SEL (0x1<<2)
+#define ETH_FAST_PATH_RX_CQE_SGL_RAW_SEL_SHIFT 2
+#define ETH_FAST_PATH_RX_CQE_PHY_DECODE_ERR_FLG (0x1<<3)
+#define ETH_FAST_PATH_RX_CQE_PHY_DECODE_ERR_FLG_SHIFT 3
+#define ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG (0x1<<4)
+#define ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG_SHIFT 4
+#define ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG (0x1<<5)
+#define ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG_SHIFT 5
+#define ETH_FAST_PATH_RX_CQE_PTP_PKT (0x1<<6)
+#define ETH_FAST_PATH_RX_CQE_PTP_PKT_SHIFT 6
+#define ETH_FAST_PATH_RX_CQE_RESERVED0 (0x1<<7)
+#define ETH_FAST_PATH_RX_CQE_RESERVED0_SHIFT 7
+ u8 status_flags;
+#define ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE (0x7<<0)
+#define ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE_SHIFT 0
+#define ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG (0x1<<3)
+#define ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG_SHIFT 3
+#define ETH_FAST_PATH_RX_CQE_BROADCAST_FLG (0x1<<4)
+#define ETH_FAST_PATH_RX_CQE_BROADCAST_FLG_SHIFT 4
+#define ETH_FAST_PATH_RX_CQE_MAC_MATCH_FLG (0x1<<5)
+#define ETH_FAST_PATH_RX_CQE_MAC_MATCH_FLG_SHIFT 5
+#define ETH_FAST_PATH_RX_CQE_IP_XSUM_NO_VALIDATION_FLG (0x1<<6)
+#define ETH_FAST_PATH_RX_CQE_IP_XSUM_NO_VALIDATION_FLG_SHIFT 6
+#define ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG (0x1<<7)
+#define ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG_SHIFT 7
+ u8 queue_index;
+ u8 placement_offset;
+ __le32 rss_hash_result;
+ __le16 vlan_tag;
+ __le16 pkt_len_or_gro_seg_len;
+ __le16 len_on_bd;
+ struct parsing_flags pars_flags;
+ union eth_sgl_or_raw_data sgl_or_raw_data;
+ u8 tunn_type;
+ u8 tunn_inner_hdrs_offset;
+ __le16 reserved1;
+ __le32 tunn_tenant_id;
+ __le32 padding[5];
+ u32 marker;
+};
+
+
+/*
+ * Command for setting classification flags for a client
+ */
+struct eth_filter_rules_cmd {
+ u8 cmd_general_data;
+#define ETH_FILTER_RULES_CMD_RX_CMD (0x1<<0)
+#define ETH_FILTER_RULES_CMD_RX_CMD_SHIFT 0
+#define ETH_FILTER_RULES_CMD_TX_CMD (0x1<<1)
+#define ETH_FILTER_RULES_CMD_TX_CMD_SHIFT 1
+#define ETH_FILTER_RULES_CMD_RESERVED0 (0x3F<<2)
+#define ETH_FILTER_RULES_CMD_RESERVED0_SHIFT 2
+ u8 func_id;
+ u8 client_id;
+ u8 reserved1;
+ __le16 state;
+#define ETH_FILTER_RULES_CMD_UCAST_DROP_ALL (0x1<<0)
+#define ETH_FILTER_RULES_CMD_UCAST_DROP_ALL_SHIFT 0
+#define ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL (0x1<<1)
+#define ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL_SHIFT 1
+#define ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED (0x1<<2)
+#define ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED_SHIFT 2
+#define ETH_FILTER_RULES_CMD_MCAST_DROP_ALL (0x1<<3)
+#define ETH_FILTER_RULES_CMD_MCAST_DROP_ALL_SHIFT 3
+#define ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL (0x1<<4)
+#define ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL_SHIFT 4
+#define ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL (0x1<<5)
+#define ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL_SHIFT 5
+#define ETH_FILTER_RULES_CMD_ACCEPT_ANY_VLAN (0x1<<6)
+#define ETH_FILTER_RULES_CMD_ACCEPT_ANY_VLAN_SHIFT 6
+#define ETH_FILTER_RULES_CMD_RESERVED2 (0x1FF<<7)
+#define ETH_FILTER_RULES_CMD_RESERVED2_SHIFT 7
+ __le16 reserved3;
+ struct regpair reserved4;
+};
+
+
+/*
+ * parameters for eth classification filters ramrod
+ */
+struct eth_filter_rules_ramrod_data {
+ struct eth_classify_header header;
+ struct eth_filter_rules_cmd rules[FILTER_RULES_COUNT];
+};
+
+/* Hsi version */
+enum eth_fp_hsi_ver {
+ ETH_FP_HSI_VER_0,
+ ETH_FP_HSI_VER_1,
+ ETH_FP_HSI_VER_2,
+ MAX_ETH_FP_HSI_VER
+};
+
+/*
+ * parameters for eth classification configuration ramrod
+ */
+struct eth_general_rules_ramrod_data {
+ struct eth_classify_header header;
+ union eth_classify_rule_cmd rules[CLASSIFY_RULES_COUNT];
+};
+
+
+/*
+ * The data for Halt ramrod
+ */
+struct eth_halt_ramrod_data {
+ __le32 client_id;
+ __le32 reserved0;
+};
+
+
+/*
+ * destination and source mac address.
+ */
+struct eth_mac_addresses {
+#if defined(__BIG_ENDIAN)
+ __le16 dst_mid;
+ __le16 dst_lo;
+#elif defined(__LITTLE_ENDIAN)
+ __le16 dst_lo;
+ __le16 dst_mid;
+#endif
+#if defined(__BIG_ENDIAN)
+ __le16 src_lo;
+ __le16 dst_hi;
+#elif defined(__LITTLE_ENDIAN)
+ __le16 dst_hi;
+ __le16 src_lo;
+#endif
+#if defined(__BIG_ENDIAN)
+ __le16 src_hi;
+ __le16 src_mid;
+#elif defined(__LITTLE_ENDIAN)
+ __le16 src_mid;
+ __le16 src_hi;
+#endif
+};
+
+/* tunneling related data */
+struct eth_tunnel_data {
+ __le16 dst_lo;
+ __le16 dst_mid;
+ __le16 dst_hi;
+ __le16 fw_ip_hdr_csum;
+ __le16 pseudo_csum;
+ u8 ip_hdr_start_inner_w;
+ u8 flags;
+#define ETH_TUNNEL_DATA_IPV6_OUTER (0x1<<0)
+#define ETH_TUNNEL_DATA_IPV6_OUTER_SHIFT 0
+#define ETH_TUNNEL_DATA_RESERVED (0x7F<<1)
+#define ETH_TUNNEL_DATA_RESERVED_SHIFT 1
+};
+
+/* union for mac addresses and for tunneling data.
+ * considered as tunneling data only if (tunnel_exist == 1).
+ */
+union eth_mac_addr_or_tunnel_data {
+ struct eth_mac_addresses mac_addr;
+ struct eth_tunnel_data tunnel_data;
+};
+
+/*Command for setting multicast classification for a client */
+struct eth_multicast_rules_cmd {
+ u8 cmd_general_data;
+#define ETH_MULTICAST_RULES_CMD_RX_CMD (0x1<<0)
+#define ETH_MULTICAST_RULES_CMD_RX_CMD_SHIFT 0
+#define ETH_MULTICAST_RULES_CMD_TX_CMD (0x1<<1)
+#define ETH_MULTICAST_RULES_CMD_TX_CMD_SHIFT 1
+#define ETH_MULTICAST_RULES_CMD_IS_ADD (0x1<<2)
+#define ETH_MULTICAST_RULES_CMD_IS_ADD_SHIFT 2
+#define ETH_MULTICAST_RULES_CMD_RESERVED0 (0x1F<<3)
+#define ETH_MULTICAST_RULES_CMD_RESERVED0_SHIFT 3
+ u8 func_id;
+ u8 bin_id;
+ u8 engine_id;
+ __le32 reserved2;
+ struct regpair reserved3;
+};
+
+/*
+ * parameters for multicast classification ramrod
+ */
+struct eth_multicast_rules_ramrod_data {
+ struct eth_classify_header header;
+ struct eth_multicast_rules_cmd rules[MULTICAST_RULES_COUNT];
+};
+
+/*
+ * Place holder for ramrods protocol specific data
+ */
+struct ramrod_data {
+ __le32 data_lo;
+ __le32 data_hi;
+};
+
+/*
+ * union for ramrod data for Ethernet protocol (CQE) (force size of 16 bits)
+ */
+union eth_ramrod_data {
+ struct ramrod_data general;
+};
+
+
+/*
+ * RSS toeplitz hash type, as reported in CQE
+ */
+enum eth_rss_hash_type {
+ DEFAULT_HASH_TYPE,
+ IPV4_HASH_TYPE,
+ TCP_IPV4_HASH_TYPE,
+ IPV6_HASH_TYPE,
+ TCP_IPV6_HASH_TYPE,
+ VLAN_PRI_HASH_TYPE,
+ E1HOV_PRI_HASH_TYPE,
+ DSCP_HASH_TYPE,
+ MAX_ETH_RSS_HASH_TYPE
+};
+
+
+/*
+ * Ethernet RSS mode
+ */
+enum eth_rss_mode {
+ ETH_RSS_MODE_DISABLED,
+ ETH_RSS_MODE_REGULAR,
+ ETH_RSS_MODE_VLAN_PRI,
+ ETH_RSS_MODE_E1HOV_PRI,
+ ETH_RSS_MODE_IP_DSCP,
+ MAX_ETH_RSS_MODE
+};
+
+
+/*
+ * parameters for RSS update ramrod (E2)
+ */
+struct eth_rss_update_ramrod_data {
+ u8 rss_engine_id;
+ u8 rss_mode;
+ __le16 capabilities;
+#define ETH_RSS_UPDATE_RAMROD_DATA_IPV4_CAPABILITY (0x1<<0)
+#define ETH_RSS_UPDATE_RAMROD_DATA_IPV4_CAPABILITY_SHIFT 0
+#define ETH_RSS_UPDATE_RAMROD_DATA_IPV4_TCP_CAPABILITY (0x1<<1)
+#define ETH_RSS_UPDATE_RAMROD_DATA_IPV4_TCP_CAPABILITY_SHIFT 1
+#define ETH_RSS_UPDATE_RAMROD_DATA_IPV4_UDP_CAPABILITY (0x1<<2)
+#define ETH_RSS_UPDATE_RAMROD_DATA_IPV4_UDP_CAPABILITY_SHIFT 2
+#define ETH_RSS_UPDATE_RAMROD_DATA_IPV4_VXLAN_CAPABILITY (0x1<<3)
+#define ETH_RSS_UPDATE_RAMROD_DATA_IPV4_VXLAN_CAPABILITY_SHIFT 3
+#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_CAPABILITY (0x1<<4)
+#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_CAPABILITY_SHIFT 4
+#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_TCP_CAPABILITY (0x1<<5)
+#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_TCP_CAPABILITY_SHIFT 5
+#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_UDP_CAPABILITY (0x1<<6)
+#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_UDP_CAPABILITY_SHIFT 6
+#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_VXLAN_CAPABILITY (0x1<<7)
+#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_VXLAN_CAPABILITY_SHIFT 7
+#define ETH_RSS_UPDATE_RAMROD_DATA_TUNN_INNER_HDRS_CAPABILITY (0x1<<8)
+#define ETH_RSS_UPDATE_RAMROD_DATA_TUNN_INNER_HDRS_CAPABILITY_SHIFT 8
+#define ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY (0x1<<9)
+#define ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY_SHIFT 9
+#define ETH_RSS_UPDATE_RAMROD_DATA_RESERVED (0x3F<<10)
+#define ETH_RSS_UPDATE_RAMROD_DATA_RESERVED_SHIFT 10
+ u8 rss_result_mask;
+ u8 reserved3;
+ __le16 reserved4;
+ u8 indirection_table[T_ETH_INDIRECTION_TABLE_SIZE];
+ __le32 rss_key[T_ETH_RSS_KEY];
+ __le32 echo;
+ __le32 reserved5;
+};
+
+
+/*
+ * The eth Rx Buffer Descriptor
+ */
+struct eth_rx_bd {
+ __le32 addr_lo;
+ __le32 addr_hi;
+};
+
+
+/*
+ * Eth Rx Cqe structure- general structure for ramrods
+ */
+struct common_ramrod_eth_rx_cqe {
+ u8 ramrod_type;
+#define COMMON_RAMROD_ETH_RX_CQE_TYPE (0x3<<0)
+#define COMMON_RAMROD_ETH_RX_CQE_TYPE_SHIFT 0
+#define COMMON_RAMROD_ETH_RX_CQE_ERROR (0x1<<2)
+#define COMMON_RAMROD_ETH_RX_CQE_ERROR_SHIFT 2
+#define COMMON_RAMROD_ETH_RX_CQE_RESERVED0 (0x1F<<3)
+#define COMMON_RAMROD_ETH_RX_CQE_RESERVED0_SHIFT 3
+ u8 conn_type;
+ __le16 reserved1;
+ __le32 conn_and_cmd_data;
+#define COMMON_RAMROD_ETH_RX_CQE_CID (0xFFFFFF<<0)
+#define COMMON_RAMROD_ETH_RX_CQE_CID_SHIFT 0
+#define COMMON_RAMROD_ETH_RX_CQE_CMD_ID (0xFF<<24)
+#define COMMON_RAMROD_ETH_RX_CQE_CMD_ID_SHIFT 24
+ struct ramrod_data protocol_data;
+ __le32 echo;
+ __le32 reserved2[11];
+};
+
+/*
+ * Rx Last CQE in page (in ETH)
+ */
+struct eth_rx_cqe_next_page {
+ __le32 addr_lo;
+ __le32 addr_hi;
+ __le32 reserved[14];
+};
+
+/*
+ * union for all eth rx cqe types (fix their sizes)
+ */
+union eth_rx_cqe {
+ struct eth_fast_path_rx_cqe fast_path_cqe;
+ struct common_ramrod_eth_rx_cqe ramrod_cqe;
+ struct eth_rx_cqe_next_page next_page_cqe;
+ struct eth_end_agg_rx_cqe end_agg_cqe;
+};
+
+
+/*
+ * Values for RX ETH CQE type field
+ */
+enum eth_rx_cqe_type {
+ RX_ETH_CQE_TYPE_ETH_FASTPATH,
+ RX_ETH_CQE_TYPE_ETH_RAMROD,
+ RX_ETH_CQE_TYPE_ETH_START_AGG,
+ RX_ETH_CQE_TYPE_ETH_STOP_AGG,
+ MAX_ETH_RX_CQE_TYPE
+};
+
+
+/*
+ * Type of SGL/Raw field in ETH RX fast path CQE
+ */
+enum eth_rx_fp_sel {
+ ETH_FP_CQE_REGULAR,
+ ETH_FP_CQE_RAW,
+ MAX_ETH_RX_FP_SEL
+};
+
+
+/*
+ * The eth Rx SGE Descriptor
+ */
+struct eth_rx_sge {
+ __le32 addr_lo;
+ __le32 addr_hi;
+};
+
+
+/*
+ * common data for all protocols
+ */
+struct spe_hdr {
+ __le32 conn_and_cmd_data;
+#define SPE_HDR_CID (0xFFFFFF<<0)
+#define SPE_HDR_CID_SHIFT 0
+#define SPE_HDR_CMD_ID (0xFF<<24)
+#define SPE_HDR_CMD_ID_SHIFT 24
+ __le16 type;
+#define SPE_HDR_CONN_TYPE (0xFF<<0)
+#define SPE_HDR_CONN_TYPE_SHIFT 0
+#define SPE_HDR_FUNCTION_ID (0xFF<<8)
+#define SPE_HDR_FUNCTION_ID_SHIFT 8
+ __le16 reserved1;
+};
+
+/*
+ * specific data for ethernet slow path element
+ */
+union eth_specific_data {
+ u8 protocol_data[8];
+ struct regpair client_update_ramrod_data;
+ struct regpair client_init_ramrod_init_data;
+ struct eth_halt_ramrod_data halt_ramrod_data;
+ struct regpair update_data_addr;
+ struct eth_common_ramrod_data common_ramrod_data;
+ struct regpair classify_cfg_addr;
+ struct regpair filter_cfg_addr;
+ struct regpair mcast_cfg_addr;
+};
+
+/*
+ * Ethernet slow path element
+ */
+struct eth_spe {
+ struct spe_hdr hdr;
+ union eth_specific_data data;
+};
+
+
+/*
+ * Ethernet command ID for slow path elements
+ */
+enum eth_spqe_cmd_id {
+ RAMROD_CMD_ID_ETH_UNUSED,
+ RAMROD_CMD_ID_ETH_CLIENT_SETUP,
+ RAMROD_CMD_ID_ETH_HALT,
+ RAMROD_CMD_ID_ETH_FORWARD_SETUP,
+ RAMROD_CMD_ID_ETH_TX_QUEUE_SETUP,
+ RAMROD_CMD_ID_ETH_CLIENT_UPDATE,
+ RAMROD_CMD_ID_ETH_EMPTY,
+ RAMROD_CMD_ID_ETH_TERMINATE,
+ RAMROD_CMD_ID_ETH_TPA_UPDATE,
+ RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES,
+ RAMROD_CMD_ID_ETH_FILTER_RULES,
+ RAMROD_CMD_ID_ETH_MULTICAST_RULES,
+ RAMROD_CMD_ID_ETH_RSS_UPDATE,
+ RAMROD_CMD_ID_ETH_SET_MAC,
+ MAX_ETH_SPQE_CMD_ID
+};
+
+
+/*
+ * eth tpa update command
+ */
+enum eth_tpa_update_command {
+ TPA_UPDATE_NONE_COMMAND,
+ TPA_UPDATE_ENABLE_COMMAND,
+ TPA_UPDATE_DISABLE_COMMAND,
+ MAX_ETH_TPA_UPDATE_COMMAND
+};
+
+/* In case of LSO over IPv4 tunnel, whether to increment
+ * IP ID on external IP header or internal IP header
+ */
+enum eth_tunnel_lso_inc_ip_id {
+ EXT_HEADER,
+ INT_HEADER,
+ MAX_ETH_TUNNEL_LSO_INC_IP_ID
+};
+
+/* In case tunnel exist and L4 checksum offload,
+ * the pseudo checksum location, on packet or on BD.
+ */
+enum eth_tunnel_non_lso_csum_location {
+ CSUM_ON_PKT,
+ CSUM_ON_BD,
+ MAX_ETH_TUNNEL_NON_LSO_CSUM_LOCATION
+};
+
+enum eth_tunn_type {
+ TUNN_TYPE_NONE,
+ TUNN_TYPE_VXLAN,
+ TUNN_TYPE_L2_GRE,
+ TUNN_TYPE_IPV4_GRE,
+ TUNN_TYPE_IPV6_GRE,
+ TUNN_TYPE_L2_GENEVE,
+ TUNN_TYPE_IPV4_GENEVE,
+ TUNN_TYPE_IPV6_GENEVE,
+ MAX_ETH_TUNN_TYPE
+};
+
+/*
+ * Tx regular BD structure
+ */
+struct eth_tx_bd {
+ __le32 addr_lo;
+ __le32 addr_hi;
+ __le16 total_pkt_bytes;
+ __le16 nbytes;
+ u8 reserved[4];
+};
+
+
+/*
+ * structure for easy accessibility to assembler
+ */
+struct eth_tx_bd_flags {
+ u8 as_bitfield;
+#define ETH_TX_BD_FLAGS_IP_CSUM (0x1<<0)
+#define ETH_TX_BD_FLAGS_IP_CSUM_SHIFT 0
+#define ETH_TX_BD_FLAGS_L4_CSUM (0x1<<1)
+#define ETH_TX_BD_FLAGS_L4_CSUM_SHIFT 1
+#define ETH_TX_BD_FLAGS_VLAN_MODE (0x3<<2)
+#define ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT 2
+#define ETH_TX_BD_FLAGS_START_BD (0x1<<4)
+#define ETH_TX_BD_FLAGS_START_BD_SHIFT 4
+#define ETH_TX_BD_FLAGS_IS_UDP (0x1<<5)
+#define ETH_TX_BD_FLAGS_IS_UDP_SHIFT 5
+#define ETH_TX_BD_FLAGS_SW_LSO (0x1<<6)
+#define ETH_TX_BD_FLAGS_SW_LSO_SHIFT 6
+#define ETH_TX_BD_FLAGS_IPV6 (0x1<<7)
+#define ETH_TX_BD_FLAGS_IPV6_SHIFT 7
+};
+
+/*
+ * The eth Tx Buffer Descriptor
+ */
+struct eth_tx_start_bd {
+ __le32 addr_lo;
+ __le32 addr_hi;
+ __le16 nbd;
+ __le16 nbytes;
+ __le16 vlan_or_ethertype;
+ struct eth_tx_bd_flags bd_flags;
+ u8 general_data;
+#define ETH_TX_START_BD_HDR_NBDS (0x7<<0)
+#define ETH_TX_START_BD_HDR_NBDS_SHIFT 0
+#define ETH_TX_START_BD_NO_ADDED_TAGS (0x1<<3)
+#define ETH_TX_START_BD_NO_ADDED_TAGS_SHIFT 3
+#define ETH_TX_START_BD_FORCE_VLAN_MODE (0x1<<4)
+#define ETH_TX_START_BD_FORCE_VLAN_MODE_SHIFT 4
+#define ETH_TX_START_BD_PARSE_NBDS (0x3<<5)
+#define ETH_TX_START_BD_PARSE_NBDS_SHIFT 5
+#define ETH_TX_START_BD_TUNNEL_EXIST (0x1<<7)
+#define ETH_TX_START_BD_TUNNEL_EXIST_SHIFT 7
+};
+
+/*
+ * Tx parsing BD structure for ETH E1/E1h
+ */
+struct eth_tx_parse_bd_e1x {
+ __le16 global_data;
+#define ETH_TX_PARSE_BD_E1X_IP_HDR_START_OFFSET_W (0xF<<0)
+#define ETH_TX_PARSE_BD_E1X_IP_HDR_START_OFFSET_W_SHIFT 0
+#define ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE (0x3<<4)
+#define ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE_SHIFT 4
+#define ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN (0x1<<6)
+#define ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN_SHIFT 6
+#define ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN (0x1<<7)
+#define ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT 7
+#define ETH_TX_PARSE_BD_E1X_NS_FLG (0x1<<8)
+#define ETH_TX_PARSE_BD_E1X_NS_FLG_SHIFT 8
+#define ETH_TX_PARSE_BD_E1X_RESERVED0 (0x7F<<9)
+#define ETH_TX_PARSE_BD_E1X_RESERVED0_SHIFT 9
+ u8 tcp_flags;
+#define ETH_TX_PARSE_BD_E1X_FIN_FLG (0x1<<0)
+#define ETH_TX_PARSE_BD_E1X_FIN_FLG_SHIFT 0
+#define ETH_TX_PARSE_BD_E1X_SYN_FLG (0x1<<1)
+#define ETH_TX_PARSE_BD_E1X_SYN_FLG_SHIFT 1
+#define ETH_TX_PARSE_BD_E1X_RST_FLG (0x1<<2)
+#define ETH_TX_PARSE_BD_E1X_RST_FLG_SHIFT 2
+#define ETH_TX_PARSE_BD_E1X_PSH_FLG (0x1<<3)
+#define ETH_TX_PARSE_BD_E1X_PSH_FLG_SHIFT 3
+#define ETH_TX_PARSE_BD_E1X_ACK_FLG (0x1<<4)
+#define ETH_TX_PARSE_BD_E1X_ACK_FLG_SHIFT 4
+#define ETH_TX_PARSE_BD_E1X_URG_FLG (0x1<<5)
+#define ETH_TX_PARSE_BD_E1X_URG_FLG_SHIFT 5
+#define ETH_TX_PARSE_BD_E1X_ECE_FLG (0x1<<6)
+#define ETH_TX_PARSE_BD_E1X_ECE_FLG_SHIFT 6
+#define ETH_TX_PARSE_BD_E1X_CWR_FLG (0x1<<7)
+#define ETH_TX_PARSE_BD_E1X_CWR_FLG_SHIFT 7
+ u8 ip_hlen_w;
+ __le16 total_hlen_w;
+ __le16 tcp_pseudo_csum;
+ __le16 lso_mss;
+ __le16 ip_id;
+ __le32 tcp_send_seq;
+};
+
+/*
+ * Tx parsing BD structure for ETH E2
+ */
+struct eth_tx_parse_bd_e2 {
+ union eth_mac_addr_or_tunnel_data data;
+ __le32 parsing_data;
+#define ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W (0x7FF<<0)
+#define ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT 0
+#define ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW (0xF<<11)
+#define ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT 11
+#define ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR (0x1<<15)
+#define ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR_SHIFT 15
+#define ETH_TX_PARSE_BD_E2_LSO_MSS (0x3FFF<<16)
+#define ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT 16
+#define ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE (0x3<<30)
+#define ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE_SHIFT 30
+};
+
+/*
+ * Tx 2nd parsing BD structure for ETH packet
+ */
+struct eth_tx_parse_2nd_bd {
+ __le16 global_data;
+#define ETH_TX_PARSE_2ND_BD_IP_HDR_START_OUTER_W (0xF<<0)
+#define ETH_TX_PARSE_2ND_BD_IP_HDR_START_OUTER_W_SHIFT 0
+#define ETH_TX_PARSE_2ND_BD_RESERVED0 (0x1<<4)
+#define ETH_TX_PARSE_2ND_BD_RESERVED0_SHIFT 4
+#define ETH_TX_PARSE_2ND_BD_LLC_SNAP_EN (0x1<<5)
+#define ETH_TX_PARSE_2ND_BD_LLC_SNAP_EN_SHIFT 5
+#define ETH_TX_PARSE_2ND_BD_NS_FLG (0x1<<6)
+#define ETH_TX_PARSE_2ND_BD_NS_FLG_SHIFT 6
+#define ETH_TX_PARSE_2ND_BD_TUNNEL_UDP_EXIST (0x1<<7)
+#define ETH_TX_PARSE_2ND_BD_TUNNEL_UDP_EXIST_SHIFT 7
+#define ETH_TX_PARSE_2ND_BD_IP_HDR_LEN_OUTER_W (0x1F<<8)
+#define ETH_TX_PARSE_2ND_BD_IP_HDR_LEN_OUTER_W_SHIFT 8
+#define ETH_TX_PARSE_2ND_BD_RESERVED1 (0x7<<13)
+#define ETH_TX_PARSE_2ND_BD_RESERVED1_SHIFT 13
+ u8 bd_type;
+#define ETH_TX_PARSE_2ND_BD_TYPE (0xF<<0)
+#define ETH_TX_PARSE_2ND_BD_TYPE_SHIFT 0
+#define ETH_TX_PARSE_2ND_BD_RESERVED2 (0xF<<4)
+#define ETH_TX_PARSE_2ND_BD_RESERVED2_SHIFT 4
+ u8 reserved3;
+ u8 tcp_flags;
+#define ETH_TX_PARSE_2ND_BD_FIN_FLG (0x1<<0)
+#define ETH_TX_PARSE_2ND_BD_FIN_FLG_SHIFT 0
+#define ETH_TX_PARSE_2ND_BD_SYN_FLG (0x1<<1)
+#define ETH_TX_PARSE_2ND_BD_SYN_FLG_SHIFT 1
+#define ETH_TX_PARSE_2ND_BD_RST_FLG (0x1<<2)
+#define ETH_TX_PARSE_2ND_BD_RST_FLG_SHIFT 2
+#define ETH_TX_PARSE_2ND_BD_PSH_FLG (0x1<<3)
+#define ETH_TX_PARSE_2ND_BD_PSH_FLG_SHIFT 3
+#define ETH_TX_PARSE_2ND_BD_ACK_FLG (0x1<<4)
+#define ETH_TX_PARSE_2ND_BD_ACK_FLG_SHIFT 4
+#define ETH_TX_PARSE_2ND_BD_URG_FLG (0x1<<5)
+#define ETH_TX_PARSE_2ND_BD_URG_FLG_SHIFT 5
+#define ETH_TX_PARSE_2ND_BD_ECE_FLG (0x1<<6)
+#define ETH_TX_PARSE_2ND_BD_ECE_FLG_SHIFT 6
+#define ETH_TX_PARSE_2ND_BD_CWR_FLG (0x1<<7)
+#define ETH_TX_PARSE_2ND_BD_CWR_FLG_SHIFT 7
+ u8 reserved4;
+ u8 tunnel_udp_hdr_start_w;
+ u8 fw_ip_hdr_to_payload_w;
+ __le16 fw_ip_csum_wo_len_flags_frag;
+ __le16 hw_ip_id;
+ __le32 tcp_send_seq;
+};
+
+/* The last BD in the BD memory will hold a pointer to the next BD memory */
+struct eth_tx_next_bd {
+ __le32 addr_lo;
+ __le32 addr_hi;
+ u8 reserved[8];
+};
+
+/*
+ * union for 4 Bd types
+ */
+union eth_tx_bd_types {
+ struct eth_tx_start_bd start_bd;
+ struct eth_tx_bd reg_bd;
+ struct eth_tx_parse_bd_e1x parse_bd_e1x;
+ struct eth_tx_parse_bd_e2 parse_bd_e2;
+ struct eth_tx_parse_2nd_bd parse_2nd_bd;
+ struct eth_tx_next_bd next_bd;
+};
+
+/*
+ * array of 13 bds as appears in the eth xstorm context
+ */
+struct eth_tx_bds_array {
+ union eth_tx_bd_types bds[13];
+};
+
+
+/*
+ * VLAN mode on TX BDs
+ */
+enum eth_tx_vlan_type {
+ X_ETH_NO_VLAN,
+ X_ETH_OUTBAND_VLAN,
+ X_ETH_INBAND_VLAN,
+ X_ETH_FW_ADDED_VLAN,
+ MAX_ETH_TX_VLAN_TYPE
+};
+
+
+/*
+ * Ethernet VLAN filtering mode in E1x
+ */
+enum eth_vlan_filter_mode {
+ ETH_VLAN_FILTER_ANY_VLAN,
+ ETH_VLAN_FILTER_SPECIFIC_VLAN,
+ ETH_VLAN_FILTER_CLASSIFY,
+ MAX_ETH_VLAN_FILTER_MODE
+};
+
+
+/*
+ * MAC filtering configuration command header
+ */
+struct mac_configuration_hdr {
+ u8 length;
+ u8 offset;
+ __le16 client_id;
+ __le32 echo;
+};
+
+/*
+ * MAC address in list for ramrod
+ */
+struct mac_configuration_entry {
+ __le16 lsb_mac_addr;
+ __le16 middle_mac_addr;
+ __le16 msb_mac_addr;
+ __le16 vlan_id;
+ u8 pf_id;
+ u8 flags;
+#define MAC_CONFIGURATION_ENTRY_ACTION_TYPE (0x1<<0)
+#define MAC_CONFIGURATION_ENTRY_ACTION_TYPE_SHIFT 0
+#define MAC_CONFIGURATION_ENTRY_RDMA_MAC (0x1<<1)
+#define MAC_CONFIGURATION_ENTRY_RDMA_MAC_SHIFT 1
+#define MAC_CONFIGURATION_ENTRY_VLAN_FILTERING_MODE (0x3<<2)
+#define MAC_CONFIGURATION_ENTRY_VLAN_FILTERING_MODE_SHIFT 2
+#define MAC_CONFIGURATION_ENTRY_OVERRIDE_VLAN_REMOVAL (0x1<<4)
+#define MAC_CONFIGURATION_ENTRY_OVERRIDE_VLAN_REMOVAL_SHIFT 4
+#define MAC_CONFIGURATION_ENTRY_BROADCAST (0x1<<5)
+#define MAC_CONFIGURATION_ENTRY_BROADCAST_SHIFT 5
+#define MAC_CONFIGURATION_ENTRY_RESERVED1 (0x3<<6)
+#define MAC_CONFIGURATION_ENTRY_RESERVED1_SHIFT 6
+ __le16 reserved0;
+ __le32 clients_bit_vector;
+};
+
+/*
+ * MAC filtering configuration command
+ */
+struct mac_configuration_cmd {
+ struct mac_configuration_hdr hdr;
+ struct mac_configuration_entry config_table[64];
+};
+
+
+/*
+ * Set-MAC command type (in E1x)
+ */
+enum set_mac_action_type {
+ T_ETH_MAC_COMMAND_INVALIDATE,
+ T_ETH_MAC_COMMAND_SET,
+ MAX_SET_MAC_ACTION_TYPE
+};
+
+
+/*
+ * Ethernet TPA Modes
+ */
+enum tpa_mode {
+ TPA_LRO,
+ TPA_GRO,
+ MAX_TPA_MODE};
+
+
+/*
+ * tpa update ramrod data
+ */
+struct tpa_update_ramrod_data {
+ u8 update_ipv4;
+ u8 update_ipv6;
+ u8 client_id;
+ u8 max_tpa_queues;
+ u8 max_sges_for_packet;
+ u8 complete_on_both_clients;
+ u8 dont_verify_rings_pause_thr_flg;
+ u8 tpa_mode;
+ __le16 sge_buff_size;
+ __le16 max_agg_size;
+ __le32 sge_page_base_lo;
+ __le32 sge_page_base_hi;
+ __le16 sge_pause_thr_low;
+ __le16 sge_pause_thr_high;
+ u8 tpa_over_vlan_disable;
+ u8 reserved[7];
+};
+
+
+/*
+ * approximate-match multicast filtering for E1H per function in Tstorm
+ */
+struct tstorm_eth_approximate_match_multicast_filtering {
+ u32 mcast_add_hash_bit_array[8];
+};
+
+
+/*
+ * Common configuration parameters per function in Tstorm
+ */
+struct tstorm_eth_function_common_config {
+ __le16 config_flags;
+#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_CAPABILITY (0x1<<0)
+#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_CAPABILITY_SHIFT 0
+#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_TCP_CAPABILITY (0x1<<1)
+#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_TCP_CAPABILITY_SHIFT 1
+#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_CAPABILITY (0x1<<2)
+#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_CAPABILITY_SHIFT 2
+#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_TCP_CAPABILITY (0x1<<3)
+#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_TCP_CAPABILITY_SHIFT 3
+#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_MODE (0x7<<4)
+#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_MODE_SHIFT 4
+#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_VLAN_FILTERING_ENABLE (0x1<<7)
+#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_VLAN_FILTERING_ENABLE_SHIFT 7
+#define __TSTORM_ETH_FUNCTION_COMMON_CONFIG_RESERVED0 (0xFF<<8)
+#define __TSTORM_ETH_FUNCTION_COMMON_CONFIG_RESERVED0_SHIFT 8
+ u8 rss_result_mask;
+ u8 reserved1;
+ __le16 vlan_id[2];
+};
+
+
+/*
+ * MAC filtering configuration parameters per port in Tstorm
+ */
+struct tstorm_eth_mac_filter_config {
+ u32 ucast_drop_all;
+ u32 ucast_accept_all;
+ u32 mcast_drop_all;
+ u32 mcast_accept_all;
+ u32 bcast_accept_all;
+ u32 vlan_filter[2];
+ u32 unmatched_unicast;
+};
+
+
+/*
+ * tx only queue init ramrod data
+ */
+struct tx_queue_init_ramrod_data {
+ struct client_init_general_data general;
+ struct client_init_tx_data tx;
+};
+
+
+/*
+ * Three RX producers for ETH
+ */
+struct ustorm_eth_rx_producers {
+#if defined(__BIG_ENDIAN)
+ u16 bd_prod;
+ u16 cqe_prod;
+#elif defined(__LITTLE_ENDIAN)
+ u16 cqe_prod;
+ u16 bd_prod;
+#endif
+#if defined(__BIG_ENDIAN)
+ u16 reserved;
+ u16 sge_prod;
+#elif defined(__LITTLE_ENDIAN)
+ u16 sge_prod;
+ u16 reserved;
+#endif
+};
+
+
+/*
+ * FCoE RX statistics parameters section#0
+ */
+struct fcoe_rx_stat_params_section0 {
+ __le32 fcoe_rx_pkt_cnt;
+ __le32 fcoe_rx_byte_cnt;
+};
+
+
+/*
+ * FCoE RX statistics parameters section#1
+ */
+struct fcoe_rx_stat_params_section1 {
+ __le32 fcoe_ver_cnt;
+ __le32 fcoe_rx_drop_pkt_cnt;
+};
+
+
+/*
+ * FCoE RX statistics parameters section#2
+ */
+struct fcoe_rx_stat_params_section2 {
+ __le32 fc_crc_cnt;
+ __le32 eofa_del_cnt;
+ __le32 miss_frame_cnt;
+ __le32 seq_timeout_cnt;
+ __le32 drop_seq_cnt;
+ __le32 fcoe_rx_drop_pkt_cnt;
+ __le32 fcp_rx_pkt_cnt;
+ __le32 reserved0;
+};
+
+
+/*
+ * FCoE TX statistics parameters
+ */
+struct fcoe_tx_stat_params {
+ __le32 fcoe_tx_pkt_cnt;
+ __le32 fcoe_tx_byte_cnt;
+ __le32 fcp_tx_pkt_cnt;
+ __le32 reserved0;
+};
+
+/*
+ * FCoE statistics parameters
+ */
+struct fcoe_statistics_params {
+ struct fcoe_tx_stat_params tx_stat;
+ struct fcoe_rx_stat_params_section0 rx_stat0;
+ struct fcoe_rx_stat_params_section1 rx_stat1;
+ struct fcoe_rx_stat_params_section2 rx_stat2;
+};
+
+
+/*
+ * The data afex vif list ramrod need
+ */
+struct afex_vif_list_ramrod_data {
+ u8 afex_vif_list_command;
+ u8 func_bit_map;
+ __le16 vif_list_index;
+ u8 func_to_clear;
+ u8 echo;
+ __le16 reserved1;
+};
+
+struct c2s_pri_trans_table_entry {
+ u8 val[MAX_VLAN_PRIORITIES];
+};
+
+/*
+ * cfc delete event data
+ */
+struct cfc_del_event_data {
+ __le32 cid;
+ __le32 reserved0;
+ __le32 reserved1;
+};
+
+
+/*
+ * per-port SAFC demo variables
+ */
+struct cmng_flags_per_port {
+ u32 cmng_enables;
+#define CMNG_FLAGS_PER_PORT_FAIRNESS_VN (0x1<<0)
+#define CMNG_FLAGS_PER_PORT_FAIRNESS_VN_SHIFT 0
+#define CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN (0x1<<1)
+#define CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN_SHIFT 1
+#define CMNG_FLAGS_PER_PORT_FAIRNESS_COS (0x1<<2)
+#define CMNG_FLAGS_PER_PORT_FAIRNESS_COS_SHIFT 2
+#define CMNG_FLAGS_PER_PORT_FAIRNESS_COS_MODE (0x1<<3)
+#define CMNG_FLAGS_PER_PORT_FAIRNESS_COS_MODE_SHIFT 3
+#define __CMNG_FLAGS_PER_PORT_RESERVED0 (0xFFFFFFF<<4)
+#define __CMNG_FLAGS_PER_PORT_RESERVED0_SHIFT 4
+ u32 __reserved1;
+};
+
+
+/*
+ * per-port rate shaping variables
+ */
+struct rate_shaping_vars_per_port {
+ u32 rs_periodic_timeout;
+ u32 rs_threshold;
+};
+
+/*
+ * per-port fairness variables
+ */
+struct fairness_vars_per_port {
+ u32 upper_bound;
+ u32 fair_threshold;
+ u32 fairness_timeout;
+ u32 size_thr;
+};
+
+/*
+ * per-port SAFC variables
+ */
+struct safc_struct_per_port {
+#if defined(__BIG_ENDIAN)
+ u16 __reserved1;
+ u8 __reserved0;
+ u8 safc_timeout_usec;
+#elif defined(__LITTLE_ENDIAN)
+ u8 safc_timeout_usec;
+ u8 __reserved0;
+ u16 __reserved1;
+#endif
+ u8 cos_to_traffic_types[MAX_COS_NUMBER];
+ u16 cos_to_pause_mask[NUM_OF_SAFC_BITS];
+};
+
+/*
+ * Per-port congestion management variables
+ */
+struct cmng_struct_per_port {
+ struct rate_shaping_vars_per_port rs_vars;
+ struct fairness_vars_per_port fair_vars;
+ struct safc_struct_per_port safc_vars;
+ struct cmng_flags_per_port flags;
+};
+
+/*
+ * a single rate shaping counter. can be used as protocol or vnic counter
+ */
+struct rate_shaping_counter {
+ u32 quota;
+#if defined(__BIG_ENDIAN)
+ u16 __reserved0;
+ u16 rate;
+#elif defined(__LITTLE_ENDIAN)
+ u16 rate;
+ u16 __reserved0;
+#endif
+};
+
+/*
+ * per-vnic rate shaping variables
+ */
+struct rate_shaping_vars_per_vn {
+ struct rate_shaping_counter vn_counter;
+};
+
+/*
+ * per-vnic fairness variables
+ */
+struct fairness_vars_per_vn {
+ u32 cos_credit_delta[MAX_COS_NUMBER];
+ u32 vn_credit_delta;
+ u32 __reserved0;
+};
+
+/*
+ * cmng port init state
+ */
+struct cmng_vnic {
+ struct rate_shaping_vars_per_vn vnic_max_rate[4];
+ struct fairness_vars_per_vn vnic_min_rate[4];
+};
+
+/*
+ * cmng port init state
+ */
+struct cmng_init {
+ struct cmng_struct_per_port port;
+ struct cmng_vnic vnic;
+};
+
+
+/*
+ * driver parameters for congestion management init, all rates are in Mbps
+ */
+struct cmng_init_input {
+ u32 port_rate;
+ u16 vnic_min_rate[4];
+ u16 vnic_max_rate[4];
+ u16 cos_min_rate[MAX_COS_NUMBER];
+ u16 cos_to_pause_mask[MAX_COS_NUMBER];
+ struct cmng_flags_per_port flags;
+};
+
+
+/*
+ * Protocol-common command ID for slow path elements
+ */
+enum common_spqe_cmd_id {
+ RAMROD_CMD_ID_COMMON_UNUSED,
+ RAMROD_CMD_ID_COMMON_FUNCTION_START,
+ RAMROD_CMD_ID_COMMON_FUNCTION_STOP,
+ RAMROD_CMD_ID_COMMON_FUNCTION_UPDATE,
+ RAMROD_CMD_ID_COMMON_CFC_DEL,
+ RAMROD_CMD_ID_COMMON_CFC_DEL_WB,
+ RAMROD_CMD_ID_COMMON_STAT_QUERY,
+ RAMROD_CMD_ID_COMMON_STOP_TRAFFIC,
+ RAMROD_CMD_ID_COMMON_START_TRAFFIC,
+ RAMROD_CMD_ID_COMMON_AFEX_VIF_LISTS,
+ RAMROD_CMD_ID_COMMON_SET_TIMESYNC,
+ MAX_COMMON_SPQE_CMD_ID
+};
+
+/*
+ * Per-protocol connection types
+ */
+enum connection_type {
+ ETH_CONNECTION_TYPE,
+ TOE_CONNECTION_TYPE,
+ RDMA_CONNECTION_TYPE,
+ ISCSI_CONNECTION_TYPE,
+ FCOE_CONNECTION_TYPE,
+ RESERVED_CONNECTION_TYPE_0,
+ RESERVED_CONNECTION_TYPE_1,
+ RESERVED_CONNECTION_TYPE_2,
+ NONE_CONNECTION_TYPE,
+ MAX_CONNECTION_TYPE
+};
+
+
+/*
+ * Cos modes
+ */
+enum cos_mode {
+ OVERRIDE_COS,
+ STATIC_COS,
+ FW_WRR,
+ MAX_COS_MODE
+};
+
+
+/*
+ * Dynamic HC counters set by the driver
+ */
+struct hc_dynamic_drv_counter {
+ u32 val[HC_SB_MAX_DYNAMIC_INDICES];
+};
+
+/*
+ * zone A per-queue data
+ */
+struct cstorm_queue_zone_data {
+ struct hc_dynamic_drv_counter hc_dyn_drv_cnt;
+ struct regpair reserved[2];
+};
+
+
+/*
+ * Vf-PF channel data in cstorm ram (non-triggered zone)
+ */
+struct vf_pf_channel_zone_data {
+ u32 msg_addr_lo;
+ u32 msg_addr_hi;
+};
+
+/*
+ * zone for VF non-triggered data
+ */
+struct non_trigger_vf_zone {
+ struct vf_pf_channel_zone_data vf_pf_channel;
+};
+
+/*
+ * Vf-PF channel trigger zone in cstorm ram
+ */
+struct vf_pf_channel_zone_trigger {
+ u8 addr_valid;
+};
+
+/*
+ * zone that triggers the in-bound interrupt
+ */
+struct trigger_vf_zone {
+ struct vf_pf_channel_zone_trigger vf_pf_channel;
+ u8 reserved0;
+ u16 reserved1;
+ u32 reserved2;
+};
+
+/*
+ * zone B per-VF data
+ */
+struct cstorm_vf_zone_data {
+ struct non_trigger_vf_zone non_trigger;
+ struct trigger_vf_zone trigger;
+};
+
+
+/*
+ * Dynamic host coalescing init parameters, per state machine
+ */
+struct dynamic_hc_sm_config {
+ u32 threshold[3];
+ u8 shift_per_protocol[HC_SB_MAX_DYNAMIC_INDICES];
+ u8 hc_timeout0[HC_SB_MAX_DYNAMIC_INDICES];
+ u8 hc_timeout1[HC_SB_MAX_DYNAMIC_INDICES];
+ u8 hc_timeout2[HC_SB_MAX_DYNAMIC_INDICES];
+ u8 hc_timeout3[HC_SB_MAX_DYNAMIC_INDICES];
+};
+
+/*
+ * Dynamic host coalescing init parameters
+ */
+struct dynamic_hc_config {
+ struct dynamic_hc_sm_config sm_config[HC_SB_MAX_SM];
+};
+
+
+struct e2_integ_data {
+#if defined(__BIG_ENDIAN)
+ u8 flags;
+#define E2_INTEG_DATA_TESTING_EN (0x1<<0)
+#define E2_INTEG_DATA_TESTING_EN_SHIFT 0
+#define E2_INTEG_DATA_LB_TX (0x1<<1)
+#define E2_INTEG_DATA_LB_TX_SHIFT 1
+#define E2_INTEG_DATA_COS_TX (0x1<<2)
+#define E2_INTEG_DATA_COS_TX_SHIFT 2
+#define E2_INTEG_DATA_OPPORTUNISTICQM (0x1<<3)
+#define E2_INTEG_DATA_OPPORTUNISTICQM_SHIFT 3
+#define E2_INTEG_DATA_DPMTESTRELEASEDQ (0x1<<4)
+#define E2_INTEG_DATA_DPMTESTRELEASEDQ_SHIFT 4
+#define E2_INTEG_DATA_RESERVED (0x7<<5)
+#define E2_INTEG_DATA_RESERVED_SHIFT 5
+ u8 cos;
+ u8 voq;
+ u8 pbf_queue;
+#elif defined(__LITTLE_ENDIAN)
+ u8 pbf_queue;
+ u8 voq;
+ u8 cos;
+ u8 flags;
+#define E2_INTEG_DATA_TESTING_EN (0x1<<0)
+#define E2_INTEG_DATA_TESTING_EN_SHIFT 0
+#define E2_INTEG_DATA_LB_TX (0x1<<1)
+#define E2_INTEG_DATA_LB_TX_SHIFT 1
+#define E2_INTEG_DATA_COS_TX (0x1<<2)
+#define E2_INTEG_DATA_COS_TX_SHIFT 2
+#define E2_INTEG_DATA_OPPORTUNISTICQM (0x1<<3)
+#define E2_INTEG_DATA_OPPORTUNISTICQM_SHIFT 3
+#define E2_INTEG_DATA_DPMTESTRELEASEDQ (0x1<<4)
+#define E2_INTEG_DATA_DPMTESTRELEASEDQ_SHIFT 4
+#define E2_INTEG_DATA_RESERVED (0x7<<5)
+#define E2_INTEG_DATA_RESERVED_SHIFT 5
+#endif
+#if defined(__BIG_ENDIAN)
+ u16 reserved3;
+ u8 reserved2;
+ u8 ramEn;
+#elif defined(__LITTLE_ENDIAN)
+ u8 ramEn;
+ u8 reserved2;
+ u16 reserved3;
+#endif
+};
+
+
+/*
+ * set mac event data
+ */
+struct eth_event_data {
+ __le32 echo;
+ __le32 reserved0;
+ __le32 reserved1;
+};
+
+
+/*
+ * pf-vf event data
+ */
+struct vf_pf_event_data {
+ u8 vf_id;
+ u8 reserved0;
+ __le16 reserved1;
+ __le32 msg_addr_lo;
+ __le32 msg_addr_hi;
+};
+
+/*
+ * VF FLR event data
+ */
+struct vf_flr_event_data {
+ u8 vf_id;
+ u8 reserved0;
+ __le16 reserved1;
+ __le32 reserved2;
+ __le32 reserved3;
+};
+
+/*
+ * malicious VF event data
+ */
+struct malicious_vf_event_data {
+ u8 vf_id;
+ u8 err_id;
+ __le16 reserved1;
+ __le32 reserved2;
+ __le32 reserved3;
+};
+
+/*
+ * vif list event data
+ */
+struct vif_list_event_data {
+ u8 func_bit_map;
+ u8 echo;
+ __le16 reserved0;
+ __le32 reserved1;
+ __le32 reserved2;
+};
+
+/* function update event data */
+struct function_update_event_data {
+ u8 echo;
+ u8 reserved;
+ __le16 reserved0;
+ __le32 reserved1;
+ __le32 reserved2;
+};
+
+
+/* union for all event ring message types */
+union event_data {
+ struct vf_pf_event_data vf_pf_event;
+ struct eth_event_data eth_event;
+ struct cfc_del_event_data cfc_del_event;
+ struct vf_flr_event_data vf_flr_event;
+ struct malicious_vf_event_data malicious_vf_event;
+ struct vif_list_event_data vif_list_event;
+ struct function_update_event_data function_update_event;
+};
+
+
+/*
+ * per PF event ring data
+ */
+struct event_ring_data {
+ struct regpair_native base_addr;
+#if defined(__BIG_ENDIAN)
+ u8 index_id;
+ u8 sb_id;
+ u16 producer;
+#elif defined(__LITTLE_ENDIAN)
+ u16 producer;
+ u8 sb_id;
+ u8 index_id;
+#endif
+ u32 reserved0;
+};
+
+
+/*
+ * event ring message element (each element is 128 bits)
+ */
+struct event_ring_msg {
+ u8 opcode;
+ u8 error;
+ u16 reserved1;
+ union event_data data;
+};
+
+/*
+ * event ring next page element (128 bits)
+ */
+struct event_ring_next {
+ struct regpair addr;
+ u32 reserved[2];
+};
+
+/*
+ * union for event ring element types (each element is 128 bits)
+ */
+union event_ring_elem {
+ struct event_ring_msg message;
+ struct event_ring_next next_page;
+};
+
+
+/*
+ * Common event ring opcodes
+ */
+enum event_ring_opcode {
+ EVENT_RING_OPCODE_VF_PF_CHANNEL,
+ EVENT_RING_OPCODE_FUNCTION_START,
+ EVENT_RING_OPCODE_FUNCTION_STOP,
+ EVENT_RING_OPCODE_CFC_DEL,
+ EVENT_RING_OPCODE_CFC_DEL_WB,
+ EVENT_RING_OPCODE_STAT_QUERY,
+ EVENT_RING_OPCODE_STOP_TRAFFIC,
+ EVENT_RING_OPCODE_START_TRAFFIC,
+ EVENT_RING_OPCODE_VF_FLR,
+ EVENT_RING_OPCODE_MALICIOUS_VF,
+ EVENT_RING_OPCODE_FORWARD_SETUP,
+ EVENT_RING_OPCODE_RSS_UPDATE_RULES,
+ EVENT_RING_OPCODE_FUNCTION_UPDATE,
+ EVENT_RING_OPCODE_AFEX_VIF_LISTS,
+ EVENT_RING_OPCODE_SET_MAC,
+ EVENT_RING_OPCODE_CLASSIFICATION_RULES,
+ EVENT_RING_OPCODE_FILTERS_RULES,
+ EVENT_RING_OPCODE_MULTICAST_RULES,
+ EVENT_RING_OPCODE_SET_TIMESYNC,
+ MAX_EVENT_RING_OPCODE
+};
+
+/*
+ * Modes for fairness algorithm
+ */
+enum fairness_mode {
+ FAIRNESS_COS_WRR_MODE,
+ FAIRNESS_COS_ETS_MODE,
+ MAX_FAIRNESS_MODE
+};
+
+
+/*
+ * Priority and cos
+ */
+struct priority_cos {
+ u8 priority;
+ u8 cos;
+ __le16 reserved1;
+};
+
+/*
+ * The data for flow control configuration
+ */
+struct flow_control_configuration {
+ struct priority_cos traffic_type_to_priority_cos[MAX_TRAFFIC_TYPES];
+ u8 dcb_enabled;
+ u8 dcb_version;
+ u8 dont_add_pri_0_en;
+ u8 reserved1;
+ __le32 reserved2;
+ u8 dcb_outer_pri[MAX_TRAFFIC_TYPES];
+};
+
+
+/*
+ *
+ */
+struct function_start_data {
+ u8 function_mode;
+ u8 allow_npar_tx_switching;
+ __le16 sd_vlan_tag;
+ __le16 vif_id;
+ u8 path_id;
+ u8 network_cos_mode;
+ u8 dmae_cmd_id;
+ u8 no_added_tags;
+ __le16 reserved0;
+ __le32 reserved1;
+ u8 inner_clss_vxlan;
+ u8 inner_clss_l2gre;
+ u8 inner_clss_l2geneve;
+ u8 inner_rss;
+ __le16 vxlan_dst_port;
+ __le16 geneve_dst_port;
+ u8 sd_accept_mf_clss_fail;
+ u8 sd_accept_mf_clss_fail_match_ethtype;
+ __le16 sd_accept_mf_clss_fail_ethtype;
+ __le16 sd_vlan_eth_type;
+ u8 sd_vlan_force_pri_flg;
+ u8 sd_vlan_force_pri_val;
+ u8 c2s_pri_tt_valid;
+ u8 c2s_pri_default;
+ u8 tx_vlan_filtering_enable;
+ u8 tx_vlan_filtering_use_pvid;
+ u8 reserved2[4];
+ struct c2s_pri_trans_table_entry c2s_pri_trans_table;
+};
+
+struct function_update_data {
+ u8 vif_id_change_flg;
+ u8 afex_default_vlan_change_flg;
+ u8 allowed_priorities_change_flg;
+ u8 network_cos_mode_change_flg;
+ __le16 vif_id;
+ __le16 afex_default_vlan;
+ u8 allowed_priorities;
+ u8 network_cos_mode;
+ u8 lb_mode_en_change_flg;
+ u8 lb_mode_en;
+ u8 tx_switch_suspend_change_flg;
+ u8 tx_switch_suspend;
+ u8 echo;
+ u8 update_tunn_cfg_flg;
+ u8 inner_clss_vxlan;
+ u8 inner_clss_l2gre;
+ u8 inner_clss_l2geneve;
+ u8 inner_rss;
+ __le16 vxlan_dst_port;
+ __le16 geneve_dst_port;
+ u8 sd_vlan_force_pri_change_flg;
+ u8 sd_vlan_force_pri_flg;
+ u8 sd_vlan_force_pri_val;
+ u8 sd_vlan_tag_change_flg;
+ u8 sd_vlan_eth_type_change_flg;
+ u8 reserved1;
+ __le16 sd_vlan_tag;
+ __le16 sd_vlan_eth_type;
+ u8 tx_vlan_filtering_pvid_change_flg;
+ u8 reserved0;
+ __le32 reserved2;
+};
+
+/*
+ * FW version stored in the Xstorm RAM
+ */
+struct fw_version {
+#if defined(__BIG_ENDIAN)
+ u8 engineering;
+ u8 revision;
+ u8 minor;
+ u8 major;
+#elif defined(__LITTLE_ENDIAN)
+ u8 major;
+ u8 minor;
+ u8 revision;
+ u8 engineering;
+#endif
+ u32 flags;
+#define FW_VERSION_OPTIMIZED (0x1<<0)
+#define FW_VERSION_OPTIMIZED_SHIFT 0
+#define FW_VERSION_BIG_ENDIEN (0x1<<1)
+#define FW_VERSION_BIG_ENDIEN_SHIFT 1
+#define FW_VERSION_CHIP_VERSION (0x3<<2)
+#define FW_VERSION_CHIP_VERSION_SHIFT 2
+#define __FW_VERSION_RESERVED (0xFFFFFFF<<4)
+#define __FW_VERSION_RESERVED_SHIFT 4
+};
+
+/*
+ * Dynamic Host-Coalescing - Driver(host) counters
+ */
+struct hc_dynamic_sb_drv_counters {
+ u32 dynamic_hc_drv_counter[HC_SB_MAX_DYNAMIC_INDICES];
+};
+
+
+/*
+ * 2 bytes. configuration/state parameters for a single protocol index
+ */
+struct hc_index_data {
+#if defined(__BIG_ENDIAN)
+ u8 flags;
+#define HC_INDEX_DATA_SM_ID (0x1<<0)
+#define HC_INDEX_DATA_SM_ID_SHIFT 0
+#define HC_INDEX_DATA_HC_ENABLED (0x1<<1)
+#define HC_INDEX_DATA_HC_ENABLED_SHIFT 1
+#define HC_INDEX_DATA_DYNAMIC_HC_ENABLED (0x1<<2)
+#define HC_INDEX_DATA_DYNAMIC_HC_ENABLED_SHIFT 2
+#define HC_INDEX_DATA_RESERVE (0x1F<<3)
+#define HC_INDEX_DATA_RESERVE_SHIFT 3
+ u8 timeout;
+#elif defined(__LITTLE_ENDIAN)
+ u8 timeout;
+ u8 flags;
+#define HC_INDEX_DATA_SM_ID (0x1<<0)
+#define HC_INDEX_DATA_SM_ID_SHIFT 0
+#define HC_INDEX_DATA_HC_ENABLED (0x1<<1)
+#define HC_INDEX_DATA_HC_ENABLED_SHIFT 1
+#define HC_INDEX_DATA_DYNAMIC_HC_ENABLED (0x1<<2)
+#define HC_INDEX_DATA_DYNAMIC_HC_ENABLED_SHIFT 2
+#define HC_INDEX_DATA_RESERVE (0x1F<<3)
+#define HC_INDEX_DATA_RESERVE_SHIFT 3
+#endif
+};
+
+
+/*
+ * HC state-machine
+ */
+struct hc_status_block_sm {
+#if defined(__BIG_ENDIAN)
+ u8 igu_seg_id;
+ u8 igu_sb_id;
+ u8 timer_value;
+ u8 __flags;
+#elif defined(__LITTLE_ENDIAN)
+ u8 __flags;
+ u8 timer_value;
+ u8 igu_sb_id;
+ u8 igu_seg_id;
+#endif
+ u32 time_to_expire;
+};
+
+/*
+ * hold PCI identification variables- used in various places in firmware
+ */
+struct pci_entity {
+#if defined(__BIG_ENDIAN)
+ u8 vf_valid;
+ u8 vf_id;
+ u8 vnic_id;
+ u8 pf_id;
+#elif defined(__LITTLE_ENDIAN)
+ u8 pf_id;
+ u8 vnic_id;
+ u8 vf_id;
+ u8 vf_valid;
+#endif
+};
+
+/*
+ * The fast-path status block meta-data, common to all chips
+ */
+struct hc_sb_data {
+ struct regpair_native host_sb_addr;
+ struct hc_status_block_sm state_machine[HC_SB_MAX_SM];
+ struct pci_entity p_func;
+#if defined(__BIG_ENDIAN)
+ u8 rsrv0;
+ u8 state;
+ u8 dhc_qzone_id;
+ u8 same_igu_sb_1b;
+#elif defined(__LITTLE_ENDIAN)
+ u8 same_igu_sb_1b;
+ u8 dhc_qzone_id;
+ u8 state;
+ u8 rsrv0;
+#endif
+ struct regpair_native rsrv1[2];
+};
+
+
+/*
+ * Segment types for host coaslescing
+ */
+enum hc_segment {
+ HC_REGULAR_SEGMENT,
+ HC_DEFAULT_SEGMENT,
+ MAX_HC_SEGMENT
+};
+
+
+/*
+ * The fast-path status block meta-data
+ */
+struct hc_sp_status_block_data {
+ struct regpair_native host_sb_addr;
+#if defined(__BIG_ENDIAN)
+ u8 rsrv1;
+ u8 state;
+ u8 igu_seg_id;
+ u8 igu_sb_id;
+#elif defined(__LITTLE_ENDIAN)
+ u8 igu_sb_id;
+ u8 igu_seg_id;
+ u8 state;
+ u8 rsrv1;
+#endif
+ struct pci_entity p_func;
+};
+
+
+/*
+ * The fast-path status block meta-data
+ */
+struct hc_status_block_data_e1x {
+ struct hc_index_data index_data[HC_SB_MAX_INDICES_E1X];
+ struct hc_sb_data common;
+};
+
+
+/*
+ * The fast-path status block meta-data
+ */
+struct hc_status_block_data_e2 {
+ struct hc_index_data index_data[HC_SB_MAX_INDICES_E2];
+ struct hc_sb_data common;
+};
+
+
+/*
+ * IGU block operartion modes (in Everest2)
+ */
+enum igu_mode {
+ HC_IGU_BC_MODE,
+ HC_IGU_NBC_MODE,
+ MAX_IGU_MODE
+};
+
+/*
+ * Inner Headers Classification Type
+ */
+enum inner_clss_type {
+ INNER_CLSS_DISABLED,
+ INNER_CLSS_USE_VLAN,
+ INNER_CLSS_USE_VNI,
+ MAX_INNER_CLSS_TYPE};
+
+/*
+ * IP versions
+ */
+enum ip_ver {
+ IP_V4,
+ IP_V6,
+ MAX_IP_VER
+};
+
+/*
+ * Malicious VF error ID
+ */
+enum malicious_vf_error_id {
+ MALICIOUS_VF_NO_ERROR,
+ VF_PF_CHANNEL_NOT_READY,
+ ETH_ILLEGAL_BD_LENGTHS,
+ ETH_PACKET_TOO_SHORT,
+ ETH_PAYLOAD_TOO_BIG,
+ ETH_ILLEGAL_ETH_TYPE,
+ ETH_ILLEGAL_LSO_HDR_LEN,
+ ETH_TOO_MANY_BDS,
+ ETH_ZERO_HDR_NBDS,
+ ETH_START_BD_NOT_SET,
+ ETH_ILLEGAL_PARSE_NBDS,
+ ETH_IPV6_AND_CHECKSUM,
+ ETH_VLAN_FLG_INCORRECT,
+ ETH_ILLEGAL_LSO_MSS,
+ ETH_TUNNEL_NOT_SUPPORTED,
+ MAX_MALICIOUS_VF_ERROR_ID
+};
+
+/*
+ * Multi-function modes
+ */
+enum mf_mode {
+ SINGLE_FUNCTION,
+ MULTI_FUNCTION_SD,
+ MULTI_FUNCTION_SI,
+ MULTI_FUNCTION_AFEX,
+ MAX_MF_MODE
+};
+
+/*
+ * Protocol-common statistics collected by the Tstorm (per pf)
+ */
+struct tstorm_per_pf_stats {
+ struct regpair rcv_error_bytes;
+};
+
+/*
+ *
+ */
+struct per_pf_stats {
+ struct tstorm_per_pf_stats tstorm_pf_statistics;
+};
+
+
+/*
+ * Protocol-common statistics collected by the Tstorm (per port)
+ */
+struct tstorm_per_port_stats {
+ __le32 mac_discard;
+ __le32 mac_filter_discard;
+ __le32 brb_truncate_discard;
+ __le32 mf_tag_discard;
+ __le32 packet_drop;
+ __le32 reserved;
+};
+
+/*
+ *
+ */
+struct per_port_stats {
+ struct tstorm_per_port_stats tstorm_port_statistics;
+};
+
+
+/*
+ * Protocol-common statistics collected by the Tstorm (per client)
+ */
+struct tstorm_per_queue_stats {
+ struct regpair rcv_ucast_bytes;
+ __le32 rcv_ucast_pkts;
+ __le32 checksum_discard;
+ struct regpair rcv_bcast_bytes;
+ __le32 rcv_bcast_pkts;
+ __le32 pkts_too_big_discard;
+ struct regpair rcv_mcast_bytes;
+ __le32 rcv_mcast_pkts;
+ __le32 ttl0_discard;
+ __le16 no_buff_discard;
+ __le16 reserved0;
+ __le32 reserved1;
+};
+
+/*
+ * Protocol-common statistics collected by the Ustorm (per client)
+ */
+struct ustorm_per_queue_stats {
+ struct regpair ucast_no_buff_bytes;
+ struct regpair mcast_no_buff_bytes;
+ struct regpair bcast_no_buff_bytes;
+ __le32 ucast_no_buff_pkts;
+ __le32 mcast_no_buff_pkts;
+ __le32 bcast_no_buff_pkts;
+ __le32 coalesced_pkts;
+ struct regpair coalesced_bytes;
+ __le32 coalesced_events;
+ __le32 coalesced_aborts;
+};
+
+/*
+ * Protocol-common statistics collected by the Xstorm (per client)
+ */
+struct xstorm_per_queue_stats {
+ struct regpair ucast_bytes_sent;
+ struct regpair mcast_bytes_sent;
+ struct regpair bcast_bytes_sent;
+ __le32 ucast_pkts_sent;
+ __le32 mcast_pkts_sent;
+ __le32 bcast_pkts_sent;
+ __le32 error_drop_pkts;
+};
+
+/*
+ *
+ */
+struct per_queue_stats {
+ struct tstorm_per_queue_stats tstorm_queue_statistics;
+ struct ustorm_per_queue_stats ustorm_queue_statistics;
+ struct xstorm_per_queue_stats xstorm_queue_statistics;
+};
+
+
+/*
+ * FW version stored in first line of pram
+ */
+struct pram_fw_version {
+ u8 major;
+ u8 minor;
+ u8 revision;
+ u8 engineering;
+ u8 flags;
+#define PRAM_FW_VERSION_OPTIMIZED (0x1<<0)
+#define PRAM_FW_VERSION_OPTIMIZED_SHIFT 0
+#define PRAM_FW_VERSION_STORM_ID (0x3<<1)
+#define PRAM_FW_VERSION_STORM_ID_SHIFT 1
+#define PRAM_FW_VERSION_BIG_ENDIEN (0x1<<3)
+#define PRAM_FW_VERSION_BIG_ENDIEN_SHIFT 3
+#define PRAM_FW_VERSION_CHIP_VERSION (0x3<<4)
+#define PRAM_FW_VERSION_CHIP_VERSION_SHIFT 4
+#define __PRAM_FW_VERSION_RESERVED0 (0x3<<6)
+#define __PRAM_FW_VERSION_RESERVED0_SHIFT 6
+};
+
+
+/*
+ * Ethernet slow path element
+ */
+union protocol_common_specific_data {
+ u8 protocol_data[8];
+ struct regpair phy_address;
+ struct regpair mac_config_addr;
+ struct afex_vif_list_ramrod_data afex_vif_list_data;
+};
+
+/*
+ * The send queue element
+ */
+struct protocol_common_spe {
+ struct spe_hdr hdr;
+ union protocol_common_specific_data data;
+};
+
+/* The data for the Set Timesync Ramrod */
+struct set_timesync_ramrod_data {
+ u8 drift_adjust_cmd;
+ u8 offset_cmd;
+ u8 add_sub_drift_adjust_value;
+ u8 drift_adjust_value;
+ u32 drift_adjust_period;
+ struct regpair offset_delta;
+};
+
+/*
+ * The send queue element
+ */
+struct slow_path_element {
+ struct spe_hdr hdr;
+ struct regpair protocol_data;
+};
+
+
+/*
+ * Protocol-common statistics counter
+ */
+struct stats_counter {
+ __le16 xstats_counter;
+ __le16 reserved0;
+ __le32 reserved1;
+ __le16 tstats_counter;
+ __le16 reserved2;
+ __le32 reserved3;
+ __le16 ustats_counter;
+ __le16 reserved4;
+ __le32 reserved5;
+ __le16 cstats_counter;
+ __le16 reserved6;
+ __le32 reserved7;
+};
+
+
+/*
+ *
+ */
+struct stats_query_entry {
+ u8 kind;
+ u8 index;
+ __le16 funcID;
+ __le32 reserved;
+ struct regpair address;
+};
+
+/*
+ * statistic command
+ */
+struct stats_query_cmd_group {
+ struct stats_query_entry query[STATS_QUERY_CMD_COUNT];
+};
+
+
+/*
+ * statistic command header
+ */
+struct stats_query_header {
+ u8 cmd_num;
+ u8 reserved0;
+ __le16 drv_stats_counter;
+ __le32 reserved1;
+ struct regpair stats_counters_addrs;
+};
+
+
+/*
+ * Types of statistcis query entry
+ */
+enum stats_query_type {
+ STATS_TYPE_QUEUE,
+ STATS_TYPE_PORT,
+ STATS_TYPE_PF,
+ STATS_TYPE_TOE,
+ STATS_TYPE_FCOE,
+ MAX_STATS_QUERY_TYPE
+};
+
+
+/*
+ * Indicate of the function status block state
+ */
+enum status_block_state {
+ SB_DISABLED,
+ SB_ENABLED,
+ SB_CLEANED,
+ MAX_STATUS_BLOCK_STATE
+};
+
+
+/*
+ * Storm IDs (including attentions for IGU related enums)
+ */
+enum storm_id {
+ USTORM_ID,
+ CSTORM_ID,
+ XSTORM_ID,
+ TSTORM_ID,
+ ATTENTION_ID,
+ MAX_STORM_ID
+};
+
+
+/*
+ * Taffic types used in ETS and flow control algorithms
+ */
+enum traffic_type {
+ LLFC_TRAFFIC_TYPE_NW,
+ LLFC_TRAFFIC_TYPE_FCOE,
+ LLFC_TRAFFIC_TYPE_ISCSI,
+ MAX_TRAFFIC_TYPE
+};
+
+
+/*
+ * zone A per-queue data
+ */
+struct tstorm_queue_zone_data {
+ struct regpair reserved[4];
+};
+
+
+/*
+ * zone B per-VF data
+ */
+struct tstorm_vf_zone_data {
+ struct regpair reserved;
+};
+
+/* Add or Subtract Value for Set Timesync Ramrod */
+enum ts_add_sub_value {
+ TS_SUB_VALUE,
+ TS_ADD_VALUE,
+ MAX_TS_ADD_SUB_VALUE
+};
+
+/* Drift-Adjust Commands for Set Timesync Ramrod */
+enum ts_drift_adjust_cmd {
+ TS_DRIFT_ADJUST_KEEP,
+ TS_DRIFT_ADJUST_SET,
+ TS_DRIFT_ADJUST_RESET,
+ MAX_TS_DRIFT_ADJUST_CMD
+};
+
+/* Offset Commands for Set Timesync Ramrod */
+enum ts_offset_cmd {
+ TS_OFFSET_KEEP,
+ TS_OFFSET_INC,
+ TS_OFFSET_DEC,
+ MAX_TS_OFFSET_CMD
+};
+
+ /* zone A per-queue data */
+struct ustorm_queue_zone_data {
+ struct ustorm_eth_rx_producers eth_rx_producers;
+ struct regpair reserved[3];
+};
+
+
+/*
+ * zone B per-VF data
+ */
+struct ustorm_vf_zone_data {
+ struct regpair reserved;
+};
+
+
+/*
+ * data per VF-PF channel
+ */
+struct vf_pf_channel_data {
+#if defined(__BIG_ENDIAN)
+ u16 reserved0;
+ u8 valid;
+ u8 state;
+#elif defined(__LITTLE_ENDIAN)
+ u8 state;
+ u8 valid;
+ u16 reserved0;
+#endif
+ u32 reserved1;
+};
+
+
+/*
+ * State of VF-PF channel
+ */
+enum vf_pf_channel_state {
+ VF_PF_CHANNEL_STATE_READY,
+ VF_PF_CHANNEL_STATE_WAITING_FOR_ACK,
+ MAX_VF_PF_CHANNEL_STATE
+};
+
+
+/*
+ * vif_list_rule_kind
+ */
+enum vif_list_rule_kind {
+ VIF_LIST_RULE_SET,
+ VIF_LIST_RULE_GET,
+ VIF_LIST_RULE_CLEAR_ALL,
+ VIF_LIST_RULE_CLEAR_FUNC,
+ MAX_VIF_LIST_RULE_KIND
+};
+
+
+/*
+ * zone A per-queue data
+ */
+struct xstorm_queue_zone_data {
+ struct regpair reserved[4];
+};
+
+
+/*
+ * zone B per-VF data
+ */
+struct xstorm_vf_zone_data {
+ struct regpair reserved;
+};
+
+#endif /* BNX2X_HSI_H */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h
new file mode 100644
index 0000000000..0a59a09ef8
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h
@@ -0,0 +1,787 @@
+/* bnx2x_init.h: Qlogic Everest network driver.
+ * Structures and macroes needed during the initialization.
+ *
+ * Copyright (c) 2007-2013 Broadcom Corporation
+ * Copyright (c) 2014 QLogic Corporation
+ All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
+ * Written by: Eliezer Tamir
+ * Modified by: Vladislav Zolotarov
+ */
+
+#ifndef BNX2X_INIT_H
+#define BNX2X_INIT_H
+
+/* Init operation types and structures */
+enum {
+ OP_RD = 0x1, /* read a single register */
+ OP_WR, /* write a single register */
+ OP_SW, /* copy a string to the device */
+ OP_ZR, /* clear memory */
+ OP_ZP, /* unzip then copy with DMAE */
+ OP_WR_64, /* write 64 bit pattern */
+ OP_WB, /* copy a string using DMAE */
+ OP_WB_ZR, /* Clear a string using DMAE or indirect-wr */
+ /* Skip the following ops if all of the init modes don't match */
+ OP_IF_MODE_OR,
+ /* Skip the following ops if any of the init modes don't match */
+ OP_IF_MODE_AND,
+ OP_MAX
+};
+
+enum {
+ STAGE_START,
+ STAGE_END,
+};
+
+/* Returns the index of start or end of a specific block stage in ops array*/
+#define BLOCK_OPS_IDX(block, stage, end) \
+ (2*(((block)*NUM_OF_INIT_PHASES) + (stage)) + (end))
+
+
+/* structs for the various opcodes */
+struct raw_op {
+ u32 op:8;
+ u32 offset:24;
+ u32 raw_data;
+};
+
+struct op_read {
+ u32 op:8;
+ u32 offset:24;
+ u32 val;
+};
+
+struct op_write {
+ u32 op:8;
+ u32 offset:24;
+ u32 val;
+};
+
+struct op_arr_write {
+ u32 op:8;
+ u32 offset:24;
+#ifdef __BIG_ENDIAN
+ u16 data_len;
+ u16 data_off;
+#else /* __LITTLE_ENDIAN */
+ u16 data_off;
+ u16 data_len;
+#endif
+};
+
+struct op_zero {
+ u32 op:8;
+ u32 offset:24;
+ u32 len;
+};
+
+struct op_if_mode {
+ u32 op:8;
+ u32 cmd_offset:24;
+ u32 mode_bit_map;
+};
+
+
+union init_op {
+ struct op_read read;
+ struct op_write write;
+ struct op_arr_write arr_wr;
+ struct op_zero zero;
+ struct raw_op raw;
+ struct op_if_mode if_mode;
+};
+
+
+/* Init Phases */
+enum {
+ PHASE_COMMON,
+ PHASE_PORT0,
+ PHASE_PORT1,
+ PHASE_PF0,
+ PHASE_PF1,
+ PHASE_PF2,
+ PHASE_PF3,
+ PHASE_PF4,
+ PHASE_PF5,
+ PHASE_PF6,
+ PHASE_PF7,
+ NUM_OF_INIT_PHASES
+};
+
+/* Init Modes */
+enum {
+ MODE_ASIC = 0x00000001,
+ MODE_FPGA = 0x00000002,
+ MODE_EMUL = 0x00000004,
+ MODE_E2 = 0x00000008,
+ MODE_E3 = 0x00000010,
+ MODE_PORT2 = 0x00000020,
+ MODE_PORT4 = 0x00000040,
+ MODE_SF = 0x00000080,
+ MODE_MF = 0x00000100,
+ MODE_MF_SD = 0x00000200,
+ MODE_MF_SI = 0x00000400,
+ MODE_MF_AFEX = 0x00000800,
+ MODE_E3_A0 = 0x00001000,
+ MODE_E3_B0 = 0x00002000,
+ MODE_COS3 = 0x00004000,
+ MODE_COS6 = 0x00008000,
+ MODE_LITTLE_ENDIAN = 0x00010000,
+ MODE_BIG_ENDIAN = 0x00020000,
+};
+
+/* Init Blocks */
+enum {
+ BLOCK_ATC,
+ BLOCK_BRB1,
+ BLOCK_CCM,
+ BLOCK_CDU,
+ BLOCK_CFC,
+ BLOCK_CSDM,
+ BLOCK_CSEM,
+ BLOCK_DBG,
+ BLOCK_DMAE,
+ BLOCK_DORQ,
+ BLOCK_HC,
+ BLOCK_IGU,
+ BLOCK_MISC,
+ BLOCK_NIG,
+ BLOCK_PBF,
+ BLOCK_PGLUE_B,
+ BLOCK_PRS,
+ BLOCK_PXP2,
+ BLOCK_PXP,
+ BLOCK_QM,
+ BLOCK_SRC,
+ BLOCK_TCM,
+ BLOCK_TM,
+ BLOCK_TSDM,
+ BLOCK_TSEM,
+ BLOCK_UCM,
+ BLOCK_UPB,
+ BLOCK_USDM,
+ BLOCK_USEM,
+ BLOCK_XCM,
+ BLOCK_XPB,
+ BLOCK_XSDM,
+ BLOCK_XSEM,
+ BLOCK_MISC_AEU,
+ NUM_OF_INIT_BLOCKS
+};
+
+/* QM queue numbers */
+#define BNX2X_ETH_Q 0
+#define BNX2X_TOE_Q 3
+#define BNX2X_TOE_ACK_Q 6
+#define BNX2X_ISCSI_Q 9
+#define BNX2X_ISCSI_ACK_Q 11
+#define BNX2X_FCOE_Q 10
+
+/* Vnics per mode */
+#define BNX2X_PORT2_MODE_NUM_VNICS 4
+#define BNX2X_PORT4_MODE_NUM_VNICS 2
+
+/* COS offset for port1 in E3 B0 4port mode */
+#define BNX2X_E3B0_PORT1_COS_OFFSET 3
+
+/* QM Register addresses */
+#define BNX2X_Q_VOQ_REG_ADDR(pf_q_num)\
+ (QM_REG_QVOQIDX_0 + 4 * (pf_q_num))
+#define BNX2X_VOQ_Q_REG_ADDR(cos, pf_q_num)\
+ (QM_REG_VOQQMASK_0_LSB + 4 * ((cos) * 2 + ((pf_q_num) >> 5)))
+#define BNX2X_Q_CMDQ_REG_ADDR(pf_q_num)\
+ (QM_REG_BYTECRDCMDQ_0 + 4 * ((pf_q_num) >> 4))
+
+/* extracts the QM queue number for the specified port and vnic */
+#define BNX2X_PF_Q_NUM(q_num, port, vnic)\
+ ((((port) << 1) | (vnic)) * 16 + (q_num))
+
+
+/* Maps the specified queue to the specified COS */
+static inline void bnx2x_map_q_cos(struct bnx2x *bp, u32 q_num, u32 new_cos)
+{
+ /* find current COS mapping */
+ u32 curr_cos = REG_RD(bp, QM_REG_QVOQIDX_0 + q_num * 4);
+
+ /* check if queue->COS mapping has changed */
+ if (curr_cos != new_cos) {
+ u32 num_vnics = BNX2X_PORT2_MODE_NUM_VNICS;
+ u32 reg_addr, reg_bit_map, vnic;
+
+ /* update parameters for 4port mode */
+ if (INIT_MODE_FLAGS(bp) & MODE_PORT4) {
+ num_vnics = BNX2X_PORT4_MODE_NUM_VNICS;
+ if (BP_PORT(bp)) {
+ curr_cos += BNX2X_E3B0_PORT1_COS_OFFSET;
+ new_cos += BNX2X_E3B0_PORT1_COS_OFFSET;
+ }
+ }
+
+ /* change queue mapping for each VNIC */
+ for (vnic = 0; vnic < num_vnics; vnic++) {
+ u32 pf_q_num =
+ BNX2X_PF_Q_NUM(q_num, BP_PORT(bp), vnic);
+ u32 q_bit_map = 1 << (pf_q_num & 0x1f);
+
+ /* overwrite queue->VOQ mapping */
+ REG_WR(bp, BNX2X_Q_VOQ_REG_ADDR(pf_q_num), new_cos);
+
+ /* clear queue bit from current COS bit map */
+ reg_addr = BNX2X_VOQ_Q_REG_ADDR(curr_cos, pf_q_num);
+ reg_bit_map = REG_RD(bp, reg_addr);
+ REG_WR(bp, reg_addr, reg_bit_map & (~q_bit_map));
+
+ /* set queue bit in new COS bit map */
+ reg_addr = BNX2X_VOQ_Q_REG_ADDR(new_cos, pf_q_num);
+ reg_bit_map = REG_RD(bp, reg_addr);
+ REG_WR(bp, reg_addr, reg_bit_map | q_bit_map);
+
+ /* set/clear queue bit in command-queue bit map
+ * (E2/E3A0 only, valid COS values are 0/1)
+ */
+ if (!(INIT_MODE_FLAGS(bp) & MODE_E3_B0)) {
+ reg_addr = BNX2X_Q_CMDQ_REG_ADDR(pf_q_num);
+ reg_bit_map = REG_RD(bp, reg_addr);
+ q_bit_map = 1 << (2 * (pf_q_num & 0xf));
+ reg_bit_map = new_cos ?
+ (reg_bit_map | q_bit_map) :
+ (reg_bit_map & (~q_bit_map));
+ REG_WR(bp, reg_addr, reg_bit_map);
+ }
+ }
+ }
+}
+
+/* Configures the QM according to the specified per-traffic-type COSes */
+static inline void bnx2x_dcb_config_qm(struct bnx2x *bp, enum cos_mode mode,
+ struct priority_cos *traffic_cos)
+{
+ bnx2x_map_q_cos(bp, BNX2X_FCOE_Q,
+ traffic_cos[LLFC_TRAFFIC_TYPE_FCOE].cos);
+ bnx2x_map_q_cos(bp, BNX2X_ISCSI_Q,
+ traffic_cos[LLFC_TRAFFIC_TYPE_ISCSI].cos);
+ bnx2x_map_q_cos(bp, BNX2X_ISCSI_ACK_Q,
+ traffic_cos[LLFC_TRAFFIC_TYPE_ISCSI].cos);
+ if (mode != STATIC_COS) {
+ /* required only in backward compatible COS mode */
+ bnx2x_map_q_cos(bp, BNX2X_ETH_Q,
+ traffic_cos[LLFC_TRAFFIC_TYPE_NW].cos);
+ bnx2x_map_q_cos(bp, BNX2X_TOE_Q,
+ traffic_cos[LLFC_TRAFFIC_TYPE_NW].cos);
+ bnx2x_map_q_cos(bp, BNX2X_TOE_ACK_Q,
+ traffic_cos[LLFC_TRAFFIC_TYPE_NW].cos);
+ }
+}
+
+
+/* congestion management port init api description
+ * the api works as follows:
+ * the driver should pass the cmng_init_input struct, the port_init function
+ * will prepare the required internal ram structure which will be passed back
+ * to the driver (cmng_init) that will write it into the internal ram.
+ *
+ * IMPORTANT REMARKS:
+ * 1. the cmng_init struct does not represent the contiguous internal ram
+ * structure. the driver should use the XSTORM_CMNG_PERPORT_VARS_OFFSET
+ * offset in order to write the port sub struct and the
+ * PFID_FROM_PORT_AND_VNIC offset for writing the vnic sub struct (in other
+ * words - don't use memcpy!).
+ * 2. although the cmng_init struct is filled for the maximal vnic number
+ * possible, the driver should only write the valid vnics into the internal
+ * ram according to the appropriate port mode.
+ */
+
+/* CMNG constants, as derived from system spec calculations */
+
+/* default MIN rate in case VNIC min rate is configured to zero- 100Mbps */
+#define DEF_MIN_RATE 100
+
+/* resolution of the rate shaping timer - 400 usec */
+#define RS_PERIODIC_TIMEOUT_USEC 400
+
+/* number of bytes in single QM arbitration cycle -
+ * coefficient for calculating the fairness timer
+ */
+#define QM_ARB_BYTES 160000
+
+/* resolution of Min algorithm 1:100 */
+#define MIN_RES 100
+
+/* how many bytes above threshold for
+ * the minimal credit of Min algorithm
+ */
+#define MIN_ABOVE_THRESH 32768
+
+/* Fairness algorithm integration time coefficient -
+ * for calculating the actual Tfair
+ */
+#define T_FAIR_COEF ((MIN_ABOVE_THRESH + QM_ARB_BYTES) * 8 * MIN_RES)
+
+/* Memory of fairness algorithm - 2 cycles */
+#define FAIR_MEM 2
+#define SAFC_TIMEOUT_USEC 52
+
+#define SDM_TICKS 4
+
+
+static inline void bnx2x_init_max(const struct cmng_init_input *input_data,
+ u32 r_param, struct cmng_init *ram_data)
+{
+ u32 vnic;
+ struct cmng_vnic *vdata = &ram_data->vnic;
+ struct cmng_struct_per_port *pdata = &ram_data->port;
+ /* rate shaping per-port variables
+ * 100 micro seconds in SDM ticks = 25
+ * since each tick is 4 microSeconds
+ */
+
+ pdata->rs_vars.rs_periodic_timeout =
+ RS_PERIODIC_TIMEOUT_USEC / SDM_TICKS;
+
+ /* this is the threshold below which no timer arming will occur.
+ * 1.25 coefficient is for the threshold to be a little bigger
+ * then the real time to compensate for timer in-accuracy
+ */
+ pdata->rs_vars.rs_threshold =
+ (5 * RS_PERIODIC_TIMEOUT_USEC * r_param)/4;
+
+ /* rate shaping per-vnic variables */
+ for (vnic = 0; vnic < BNX2X_PORT2_MODE_NUM_VNICS; vnic++) {
+ /* global vnic counter */
+ vdata->vnic_max_rate[vnic].vn_counter.rate =
+ input_data->vnic_max_rate[vnic];
+ /* maximal Mbps for this vnic
+ * the quota in each timer period - number of bytes
+ * transmitted in this period
+ */
+ vdata->vnic_max_rate[vnic].vn_counter.quota =
+ RS_PERIODIC_TIMEOUT_USEC *
+ (u32)vdata->vnic_max_rate[vnic].vn_counter.rate / 8;
+ }
+
+}
+
+static inline void bnx2x_init_min(const struct cmng_init_input *input_data,
+ u32 r_param, struct cmng_init *ram_data)
+{
+ u32 vnic, fair_periodic_timeout_usec, vnicWeightSum, tFair;
+ struct cmng_vnic *vdata = &ram_data->vnic;
+ struct cmng_struct_per_port *pdata = &ram_data->port;
+
+ /* this is the resolution of the fairness timer */
+ fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
+
+ /* fairness per-port variables
+ * for 10G it is 1000usec. for 1G it is 10000usec.
+ */
+ tFair = T_FAIR_COEF / input_data->port_rate;
+
+ /* this is the threshold below which we won't arm the timer anymore */
+ pdata->fair_vars.fair_threshold = QM_ARB_BYTES;
+
+ /* we multiply by 1e3/8 to get bytes/msec. We don't want the credits
+ * to pass a credit of the T_FAIR*FAIR_MEM (algorithm resolution)
+ */
+ pdata->fair_vars.upper_bound = r_param * tFair * FAIR_MEM;
+
+ /* since each tick is 4 microSeconds */
+ pdata->fair_vars.fairness_timeout =
+ fair_periodic_timeout_usec / SDM_TICKS;
+
+ /* calculate sum of weights */
+ vnicWeightSum = 0;
+
+ for (vnic = 0; vnic < BNX2X_PORT2_MODE_NUM_VNICS; vnic++)
+ vnicWeightSum += input_data->vnic_min_rate[vnic];
+
+ /* global vnic counter */
+ if (vnicWeightSum > 0) {
+ /* fairness per-vnic variables */
+ for (vnic = 0; vnic < BNX2X_PORT2_MODE_NUM_VNICS; vnic++) {
+ /* this is the credit for each period of the fairness
+ * algorithm - number of bytes in T_FAIR (this vnic
+ * share of the port rate)
+ */
+ vdata->vnic_min_rate[vnic].vn_credit_delta =
+ (u32)input_data->vnic_min_rate[vnic] * 100 *
+ (T_FAIR_COEF / (8 * 100 * vnicWeightSum));
+ if (vdata->vnic_min_rate[vnic].vn_credit_delta <
+ pdata->fair_vars.fair_threshold +
+ MIN_ABOVE_THRESH) {
+ vdata->vnic_min_rate[vnic].vn_credit_delta =
+ pdata->fair_vars.fair_threshold +
+ MIN_ABOVE_THRESH;
+ }
+ }
+ }
+}
+
+static inline void bnx2x_init_fw_wrr(const struct cmng_init_input *input_data,
+ u32 r_param, struct cmng_init *ram_data)
+{
+ u32 vnic, cos;
+ u32 cosWeightSum = 0;
+ struct cmng_vnic *vdata = &ram_data->vnic;
+ struct cmng_struct_per_port *pdata = &ram_data->port;
+
+ for (cos = 0; cos < MAX_COS_NUMBER; cos++)
+ cosWeightSum += input_data->cos_min_rate[cos];
+
+ if (cosWeightSum > 0) {
+
+ for (vnic = 0; vnic < BNX2X_PORT2_MODE_NUM_VNICS; vnic++) {
+ /* Since cos and vnic shouldn't work together the rate
+ * to divide between the coses is the port rate.
+ */
+ u32 *ccd = vdata->vnic_min_rate[vnic].cos_credit_delta;
+ for (cos = 0; cos < MAX_COS_NUMBER; cos++) {
+ /* this is the credit for each period of
+ * the fairness algorithm - number of bytes
+ * in T_FAIR (this cos share of the vnic rate)
+ */
+ ccd[cos] =
+ (u32)input_data->cos_min_rate[cos] * 100 *
+ (T_FAIR_COEF / (8 * 100 * cosWeightSum));
+ if (ccd[cos] < pdata->fair_vars.fair_threshold
+ + MIN_ABOVE_THRESH) {
+ ccd[cos] =
+ pdata->fair_vars.fair_threshold +
+ MIN_ABOVE_THRESH;
+ }
+ }
+ }
+ }
+}
+
+static inline void bnx2x_init_safc(const struct cmng_init_input *input_data,
+ struct cmng_init *ram_data)
+{
+ /* in microSeconds */
+ ram_data->port.safc_vars.safc_timeout_usec = SAFC_TIMEOUT_USEC;
+}
+
+/* Congestion management port init */
+static inline void bnx2x_init_cmng(const struct cmng_init_input *input_data,
+ struct cmng_init *ram_data)
+{
+ u32 r_param;
+ memset(ram_data, 0, sizeof(struct cmng_init));
+
+ ram_data->port.flags = input_data->flags;
+
+ /* number of bytes transmitted in a rate of 10Gbps
+ * in one usec = 1.25KB.
+ */
+ r_param = BITS_TO_BYTES(input_data->port_rate);
+ bnx2x_init_max(input_data, r_param, ram_data);
+ bnx2x_init_min(input_data, r_param, ram_data);
+ bnx2x_init_fw_wrr(input_data, r_param, ram_data);
+ bnx2x_init_safc(input_data, ram_data);
+}
+
+
+
+/* Returns the index of start or end of a specific block stage in ops array */
+#define BLOCK_OPS_IDX(block, stage, end) \
+ (2*(((block)*NUM_OF_INIT_PHASES) + (stage)) + (end))
+
+
+#define INITOP_SET 0 /* set the HW directly */
+#define INITOP_CLEAR 1 /* clear the HW directly */
+#define INITOP_INIT 2 /* set the init-value array */
+
+/****************************************************************************
+* ILT management
+****************************************************************************/
+struct ilt_line {
+ dma_addr_t page_mapping;
+ void *page;
+ u32 size;
+};
+
+struct ilt_client_info {
+ u32 page_size;
+ u16 start;
+ u16 end;
+ u16 client_num;
+ u16 flags;
+#define ILT_CLIENT_SKIP_INIT 0x1
+#define ILT_CLIENT_SKIP_MEM 0x2
+};
+
+struct bnx2x_ilt {
+ u32 start_line;
+ struct ilt_line *lines;
+ struct ilt_client_info clients[4];
+#define ILT_CLIENT_CDU 0
+#define ILT_CLIENT_QM 1
+#define ILT_CLIENT_SRC 2
+#define ILT_CLIENT_TM 3
+};
+
+/****************************************************************************
+* SRC configuration
+****************************************************************************/
+struct src_ent {
+ u8 opaque[56];
+ u64 next;
+};
+
+/****************************************************************************
+* Parity configuration
+****************************************************************************/
+#define BLOCK_PRTY_INFO(block, en_mask, m1, m1h, m2, m3) \
+{ \
+ block##_REG_##block##_PRTY_MASK, \
+ block##_REG_##block##_PRTY_STS_CLR, \
+ en_mask, {m1, m1h, m2, m3}, #block \
+}
+
+#define BLOCK_PRTY_INFO_0(block, en_mask, m1, m1h, m2, m3) \
+{ \
+ block##_REG_##block##_PRTY_MASK_0, \
+ block##_REG_##block##_PRTY_STS_CLR_0, \
+ en_mask, {m1, m1h, m2, m3}, #block"_0" \
+}
+
+#define BLOCK_PRTY_INFO_1(block, en_mask, m1, m1h, m2, m3) \
+{ \
+ block##_REG_##block##_PRTY_MASK_1, \
+ block##_REG_##block##_PRTY_STS_CLR_1, \
+ en_mask, {m1, m1h, m2, m3}, #block"_1" \
+}
+
+static const struct {
+ u32 mask_addr;
+ u32 sts_clr_addr;
+ u32 en_mask; /* Mask to enable parity attentions */
+ struct {
+ u32 e1; /* 57710 */
+ u32 e1h; /* 57711 */
+ u32 e2; /* 57712 */
+ u32 e3; /* 578xx */
+ } reg_mask; /* Register mask (all valid bits) */
+ char name[8]; /* Block's longest name is 7 characters long
+ * (name + suffix)
+ */
+} bnx2x_blocks_parity_data[] = {
+ /* bit 19 masked */
+ /* REG_WR(bp, PXP_REG_PXP_PRTY_MASK, 0x80000); */
+ /* bit 5,18,20-31 */
+ /* REG_WR(bp, PXP2_REG_PXP2_PRTY_MASK_0, 0xfff40020); */
+ /* bit 5 */
+ /* REG_WR(bp, PXP2_REG_PXP2_PRTY_MASK_1, 0x20); */
+ /* REG_WR(bp, HC_REG_HC_PRTY_MASK, 0x0); */
+ /* REG_WR(bp, MISC_REG_MISC_PRTY_MASK, 0x0); */
+
+ /* Block IGU, MISC, PXP and PXP2 parity errors as long as we don't
+ * want to handle "system kill" flow at the moment.
+ */
+ BLOCK_PRTY_INFO(PXP, 0x7ffffff, 0x3ffffff, 0x3ffffff, 0x7ffffff,
+ 0x7ffffff),
+ BLOCK_PRTY_INFO_0(PXP2, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff),
+ BLOCK_PRTY_INFO_1(PXP2, 0x1ffffff, 0x7f, 0x7f, 0x7ff, 0x1ffffff),
+ BLOCK_PRTY_INFO(HC, 0x7, 0x7, 0x7, 0, 0),
+ BLOCK_PRTY_INFO(NIG, 0xffffffff, 0x3fffffff, 0xffffffff, 0, 0),
+ BLOCK_PRTY_INFO_0(NIG, 0xffffffff, 0, 0, 0xffffffff, 0xffffffff),
+ BLOCK_PRTY_INFO_1(NIG, 0xffff, 0, 0, 0xff, 0xffff),
+ BLOCK_PRTY_INFO(IGU, 0x7ff, 0, 0, 0x7ff, 0x7ff),
+ BLOCK_PRTY_INFO(MISC, 0x1, 0x1, 0x1, 0x1, 0x1),
+ BLOCK_PRTY_INFO(QM, 0, 0x1ff, 0xfff, 0xfff, 0xfff),
+ BLOCK_PRTY_INFO(ATC, 0x1f, 0, 0, 0x1f, 0x1f),
+ BLOCK_PRTY_INFO(PGLUE_B, 0x3, 0, 0, 0x3, 0x3),
+ BLOCK_PRTY_INFO(DORQ, 0, 0x3, 0x3, 0x3, 0x3),
+ {GRCBASE_UPB + PB_REG_PB_PRTY_MASK,
+ GRCBASE_UPB + PB_REG_PB_PRTY_STS_CLR, 0xf,
+ {0xf, 0xf, 0xf, 0xf}, "UPB"},
+ {GRCBASE_XPB + PB_REG_PB_PRTY_MASK,
+ GRCBASE_XPB + PB_REG_PB_PRTY_STS_CLR, 0,
+ {0xf, 0xf, 0xf, 0xf}, "XPB"},
+ BLOCK_PRTY_INFO(SRC, 0x4, 0x7, 0x7, 0x7, 0x7),
+ BLOCK_PRTY_INFO(CDU, 0, 0x1f, 0x1f, 0x1f, 0x1f),
+ BLOCK_PRTY_INFO(CFC, 0, 0xf, 0xf, 0xf, 0x3f),
+ BLOCK_PRTY_INFO(DBG, 0, 0x1, 0x1, 0x1, 0x1),
+ BLOCK_PRTY_INFO(DMAE, 0, 0xf, 0xf, 0xf, 0xf),
+ BLOCK_PRTY_INFO(BRB1, 0, 0xf, 0xf, 0xf, 0xf),
+ BLOCK_PRTY_INFO(PRS, (1<<6), 0xff, 0xff, 0xff, 0xff),
+ BLOCK_PRTY_INFO(PBF, 0, 0, 0x3ffff, 0xfffff, 0xfffffff),
+ BLOCK_PRTY_INFO(TM, 0, 0, 0x7f, 0x7f, 0x7f),
+ BLOCK_PRTY_INFO(TSDM, 0x18, 0x7ff, 0x7ff, 0x7ff, 0x7ff),
+ BLOCK_PRTY_INFO(CSDM, 0x8, 0x7ff, 0x7ff, 0x7ff, 0x7ff),
+ BLOCK_PRTY_INFO(USDM, 0x38, 0x7ff, 0x7ff, 0x7ff, 0x7ff),
+ BLOCK_PRTY_INFO(XSDM, 0x8, 0x7ff, 0x7ff, 0x7ff, 0x7ff),
+ BLOCK_PRTY_INFO(TCM, 0, 0, 0x7ffffff, 0x7ffffff, 0x7ffffff),
+ BLOCK_PRTY_INFO(CCM, 0, 0, 0x7ffffff, 0x7ffffff, 0x7ffffff),
+ BLOCK_PRTY_INFO(UCM, 0, 0, 0x7ffffff, 0x7ffffff, 0x7ffffff),
+ BLOCK_PRTY_INFO(XCM, 0, 0, 0x3fffffff, 0x3fffffff, 0x3fffffff),
+ BLOCK_PRTY_INFO_0(TSEM, 0, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff),
+ BLOCK_PRTY_INFO_1(TSEM, 0, 0x3, 0x1f, 0x3f, 0x3f),
+ BLOCK_PRTY_INFO_0(USEM, 0, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff),
+ BLOCK_PRTY_INFO_1(USEM, 0, 0x3, 0x1f, 0x1f, 0x1f),
+ BLOCK_PRTY_INFO_0(CSEM, 0, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff),
+ BLOCK_PRTY_INFO_1(CSEM, 0, 0x3, 0x1f, 0x1f, 0x1f),
+ BLOCK_PRTY_INFO_0(XSEM, 0, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff),
+ BLOCK_PRTY_INFO_1(XSEM, 0, 0x3, 0x1f, 0x3f, 0x3f),
+};
+
+
+/* [28] MCP Latched rom_parity
+ * [29] MCP Latched ump_rx_parity
+ * [30] MCP Latched ump_tx_parity
+ * [31] MCP Latched scpad_parity
+ */
+#define MISC_AEU_ENABLE_MCP_PRTY_SUB_BITS \
+ (AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY | \
+ AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY | \
+ AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY)
+
+#define MISC_AEU_ENABLE_MCP_PRTY_BITS \
+ (MISC_AEU_ENABLE_MCP_PRTY_SUB_BITS | \
+ AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY)
+
+/* Below registers control the MCP parity attention output. When
+ * MISC_AEU_ENABLE_MCP_PRTY_BITS are set - attentions are
+ * enabled, when cleared - disabled.
+ */
+static const struct {
+ u32 addr;
+ u32 bits;
+} mcp_attn_ctl_regs[] = {
+ { MISC_REG_AEU_ENABLE4_FUNC_0_OUT_0,
+ MISC_AEU_ENABLE_MCP_PRTY_BITS },
+ { MISC_REG_AEU_ENABLE4_NIG_0,
+ MISC_AEU_ENABLE_MCP_PRTY_SUB_BITS },
+ { MISC_REG_AEU_ENABLE4_PXP_0,
+ MISC_AEU_ENABLE_MCP_PRTY_SUB_BITS },
+ { MISC_REG_AEU_ENABLE4_FUNC_1_OUT_0,
+ MISC_AEU_ENABLE_MCP_PRTY_BITS },
+ { MISC_REG_AEU_ENABLE4_NIG_1,
+ MISC_AEU_ENABLE_MCP_PRTY_SUB_BITS },
+ { MISC_REG_AEU_ENABLE4_PXP_1,
+ MISC_AEU_ENABLE_MCP_PRTY_SUB_BITS }
+};
+
+static inline void bnx2x_set_mcp_parity(struct bnx2x *bp, u8 enable)
+{
+ int i;
+ u32 reg_val;
+
+ for (i = 0; i < ARRAY_SIZE(mcp_attn_ctl_regs); i++) {
+ reg_val = REG_RD(bp, mcp_attn_ctl_regs[i].addr);
+
+ if (enable)
+ reg_val |= mcp_attn_ctl_regs[i].bits;
+ else
+ reg_val &= ~mcp_attn_ctl_regs[i].bits;
+
+ REG_WR(bp, mcp_attn_ctl_regs[i].addr, reg_val);
+ }
+}
+
+static inline u32 bnx2x_parity_reg_mask(struct bnx2x *bp, int idx)
+{
+ if (CHIP_IS_E1(bp))
+ return bnx2x_blocks_parity_data[idx].reg_mask.e1;
+ else if (CHIP_IS_E1H(bp))
+ return bnx2x_blocks_parity_data[idx].reg_mask.e1h;
+ else if (CHIP_IS_E2(bp))
+ return bnx2x_blocks_parity_data[idx].reg_mask.e2;
+ else /* CHIP_IS_E3 */
+ return bnx2x_blocks_parity_data[idx].reg_mask.e3;
+}
+
+static inline void bnx2x_disable_blocks_parity(struct bnx2x *bp)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(bnx2x_blocks_parity_data); i++) {
+ u32 dis_mask = bnx2x_parity_reg_mask(bp, i);
+
+ if (dis_mask) {
+ REG_WR(bp, bnx2x_blocks_parity_data[i].mask_addr,
+ dis_mask);
+ DP(NETIF_MSG_HW, "Setting parity mask "
+ "for %s to\t\t0x%x\n",
+ bnx2x_blocks_parity_data[i].name, dis_mask);
+ }
+ }
+
+ /* Disable MCP parity attentions */
+ bnx2x_set_mcp_parity(bp, false);
+}
+
+/* Clear the parity error status registers. */
+static inline void bnx2x_clear_blocks_parity(struct bnx2x *bp)
+{
+ int i;
+ u32 reg_val, mcp_aeu_bits =
+ AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY |
+ AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY |
+ AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY |
+ AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY;
+
+ /* Clear SEM_FAST parities */
+ REG_WR(bp, XSEM_REG_FAST_MEMORY + SEM_FAST_REG_PARITY_RST, 0x1);
+ REG_WR(bp, TSEM_REG_FAST_MEMORY + SEM_FAST_REG_PARITY_RST, 0x1);
+ REG_WR(bp, USEM_REG_FAST_MEMORY + SEM_FAST_REG_PARITY_RST, 0x1);
+ REG_WR(bp, CSEM_REG_FAST_MEMORY + SEM_FAST_REG_PARITY_RST, 0x1);
+
+ for (i = 0; i < ARRAY_SIZE(bnx2x_blocks_parity_data); i++) {
+ u32 reg_mask = bnx2x_parity_reg_mask(bp, i);
+
+ if (reg_mask) {
+ reg_val = REG_RD(bp, bnx2x_blocks_parity_data[i].
+ sts_clr_addr);
+ if (reg_val & reg_mask)
+ DP(NETIF_MSG_HW,
+ "Parity errors in %s: 0x%x\n",
+ bnx2x_blocks_parity_data[i].name,
+ reg_val & reg_mask);
+ }
+ }
+
+ /* Check if there were parity attentions in MCP */
+ reg_val = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_MCP);
+ if (reg_val & mcp_aeu_bits)
+ DP(NETIF_MSG_HW, "Parity error in MCP: 0x%x\n",
+ reg_val & mcp_aeu_bits);
+
+ /* Clear parity attentions in MCP:
+ * [7] clears Latched rom_parity
+ * [8] clears Latched ump_rx_parity
+ * [9] clears Latched ump_tx_parity
+ * [10] clears Latched scpad_parity (both ports)
+ */
+ REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x780);
+}
+
+static inline void bnx2x_enable_blocks_parity(struct bnx2x *bp)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(bnx2x_blocks_parity_data); i++) {
+ u32 reg_mask = bnx2x_parity_reg_mask(bp, i);
+
+ if (reg_mask)
+ REG_WR(bp, bnx2x_blocks_parity_data[i].mask_addr,
+ bnx2x_blocks_parity_data[i].en_mask & reg_mask);
+ }
+
+ /* Enable MCP parity attentions */
+ bnx2x_set_mcp_parity(bp, true);
+}
+
+
+#endif /* BNX2X_INIT_H */
+
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h
new file mode 100644
index 0000000000..fc7fce6426
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h
@@ -0,0 +1,938 @@
+/* bnx2x_init_ops.h: Qlogic Everest network driver.
+ * Static functions needed during the initialization.
+ * This file is "included" in bnx2x_main.c.
+ *
+ * Copyright (c) 2007-2013 Broadcom Corporation
+ * Copyright (c) 2014 QLogic Corporation
+ All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
+ * Written by: Vladislav Zolotarov
+ */
+
+#ifndef BNX2X_INIT_OPS_H
+#define BNX2X_INIT_OPS_H
+
+
+#ifndef BP_ILT
+#define BP_ILT(bp) NULL
+#endif
+
+#ifndef BP_FUNC
+#define BP_FUNC(bp) 0
+#endif
+
+#ifndef BP_PORT
+#define BP_PORT(bp) 0
+#endif
+
+#ifndef BNX2X_ILT_FREE
+#define BNX2X_ILT_FREE(x, y, sz)
+#endif
+
+#ifndef BNX2X_ILT_ZALLOC
+#define BNX2X_ILT_ZALLOC(x, y, sz)
+#endif
+
+#ifndef ILOG2
+#define ILOG2(x) x
+#endif
+
+static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len);
+static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val);
+static void bnx2x_write_dmae_phys_len(struct bnx2x *bp,
+ dma_addr_t phys_addr, u32 addr,
+ u32 len);
+
+static void bnx2x_init_str_wr(struct bnx2x *bp, u32 addr,
+ const u32 *data, u32 len)
+{
+ u32 i;
+
+ for (i = 0; i < len; i++)
+ REG_WR(bp, addr + i*4, data[i]);
+}
+
+static void bnx2x_init_ind_wr(struct bnx2x *bp, u32 addr,
+ const u32 *data, u32 len)
+{
+ u32 i;
+
+ for (i = 0; i < len; i++)
+ bnx2x_reg_wr_ind(bp, addr + i*4, data[i]);
+}
+
+static void bnx2x_write_big_buf(struct bnx2x *bp, u32 addr, u32 len,
+ u8 wb)
+{
+ if (bp->dmae_ready)
+ bnx2x_write_dmae_phys_len(bp, GUNZIP_PHYS(bp), addr, len);
+
+ /* in E1 chips BIOS initiated ZLR may interrupt widebus writes */
+ else if (wb && CHIP_IS_E1(bp))
+ bnx2x_init_ind_wr(bp, addr, GUNZIP_BUF(bp), len);
+
+ /* in later chips PXP root complex handles BIOS ZLR w/o interrupting */
+ else
+ bnx2x_init_str_wr(bp, addr, GUNZIP_BUF(bp), len);
+}
+
+static void bnx2x_init_fill(struct bnx2x *bp, u32 addr, int fill,
+ u32 len, u8 wb)
+{
+ u32 buf_len = (((len*4) > FW_BUF_SIZE) ? FW_BUF_SIZE : (len*4));
+ u32 buf_len32 = buf_len/4;
+ u32 i;
+
+ memset(GUNZIP_BUF(bp), (u8)fill, buf_len);
+
+ for (i = 0; i < len; i += buf_len32) {
+ u32 cur_len = min(buf_len32, len - i);
+
+ bnx2x_write_big_buf(bp, addr + i*4, cur_len, wb);
+ }
+}
+
+static void bnx2x_write_big_buf_wb(struct bnx2x *bp, u32 addr, u32 len)
+{
+ if (bp->dmae_ready)
+ bnx2x_write_dmae_phys_len(bp, GUNZIP_PHYS(bp), addr, len);
+
+ /* in E1 chips BIOS initiated ZLR may interrupt widebus writes */
+ else if (CHIP_IS_E1(bp))
+ bnx2x_init_ind_wr(bp, addr, GUNZIP_BUF(bp), len);
+
+ /* in later chips PXP root complex handles BIOS ZLR w/o interrupting */
+ else
+ bnx2x_init_str_wr(bp, addr, GUNZIP_BUF(bp), len);
+}
+
+static void bnx2x_init_wr_64(struct bnx2x *bp, u32 addr,
+ const u32 *data, u32 len64)
+{
+ u32 buf_len32 = FW_BUF_SIZE/4;
+ u32 len = len64*2;
+ u64 data64 = 0;
+ u32 i;
+
+ /* 64 bit value is in a blob: first low DWORD, then high DWORD */
+ data64 = HILO_U64((*(data + 1)), (*data));
+
+ len64 = min((u32)(FW_BUF_SIZE/8), len64);
+ for (i = 0; i < len64; i++) {
+ u64 *pdata = ((u64 *)(GUNZIP_BUF(bp))) + i;
+
+ *pdata = data64;
+ }
+
+ for (i = 0; i < len; i += buf_len32) {
+ u32 cur_len = min(buf_len32, len - i);
+
+ bnx2x_write_big_buf_wb(bp, addr + i*4, cur_len);
+ }
+}
+
+/*********************************************************
+ There are different blobs for each PRAM section.
+ In addition, each blob write operation is divided into a few operations
+ in order to decrease the amount of phys. contiguous buffer needed.
+ Thus, when we select a blob the address may be with some offset
+ from the beginning of PRAM section.
+ The same holds for the INT_TABLE sections.
+**********************************************************/
+#define IF_IS_INT_TABLE_ADDR(base, addr) \
+ if (((base) <= (addr)) && ((base) + 0x400 >= (addr)))
+
+#define IF_IS_PRAM_ADDR(base, addr) \
+ if (((base) <= (addr)) && ((base) + 0x40000 >= (addr)))
+
+static const u8 *bnx2x_sel_blob(struct bnx2x *bp, u32 addr,
+ const u8 *data)
+{
+ IF_IS_INT_TABLE_ADDR(TSEM_REG_INT_TABLE, addr)
+ data = INIT_TSEM_INT_TABLE_DATA(bp);
+ else
+ IF_IS_INT_TABLE_ADDR(CSEM_REG_INT_TABLE, addr)
+ data = INIT_CSEM_INT_TABLE_DATA(bp);
+ else
+ IF_IS_INT_TABLE_ADDR(USEM_REG_INT_TABLE, addr)
+ data = INIT_USEM_INT_TABLE_DATA(bp);
+ else
+ IF_IS_INT_TABLE_ADDR(XSEM_REG_INT_TABLE, addr)
+ data = INIT_XSEM_INT_TABLE_DATA(bp);
+ else
+ IF_IS_PRAM_ADDR(TSEM_REG_PRAM, addr)
+ data = INIT_TSEM_PRAM_DATA(bp);
+ else
+ IF_IS_PRAM_ADDR(CSEM_REG_PRAM, addr)
+ data = INIT_CSEM_PRAM_DATA(bp);
+ else
+ IF_IS_PRAM_ADDR(USEM_REG_PRAM, addr)
+ data = INIT_USEM_PRAM_DATA(bp);
+ else
+ IF_IS_PRAM_ADDR(XSEM_REG_PRAM, addr)
+ data = INIT_XSEM_PRAM_DATA(bp);
+
+ return data;
+}
+
+static void bnx2x_init_wr_wb(struct bnx2x *bp, u32 addr,
+ const u32 *data, u32 len)
+{
+ if (bp->dmae_ready)
+ VIRT_WR_DMAE_LEN(bp, data, addr, len, 0);
+
+ /* in E1 chips BIOS initiated ZLR may interrupt widebus writes */
+ else if (CHIP_IS_E1(bp))
+ bnx2x_init_ind_wr(bp, addr, data, len);
+
+ /* in later chips PXP root complex handles BIOS ZLR w/o interrupting */
+ else
+ bnx2x_init_str_wr(bp, addr, data, len);
+}
+
+static void bnx2x_wr_64(struct bnx2x *bp, u32 reg, u32 val_lo,
+ u32 val_hi)
+{
+ u32 wb_write[2];
+
+ wb_write[0] = val_lo;
+ wb_write[1] = val_hi;
+ REG_WR_DMAE_LEN(bp, reg, wb_write, 2);
+}
+static void bnx2x_init_wr_zp(struct bnx2x *bp, u32 addr, u32 len,
+ u32 blob_off)
+{
+ const u8 *data = NULL;
+ int rc;
+ u32 i;
+
+ data = bnx2x_sel_blob(bp, addr, data) + blob_off*4;
+
+ rc = bnx2x_gunzip(bp, data, len);
+ if (rc)
+ return;
+
+ /* gunzip_outlen is in dwords */
+ len = GUNZIP_OUTLEN(bp);
+ for (i = 0; i < len; i++)
+ ((u32 *)GUNZIP_BUF(bp))[i] = (__force u32)
+ cpu_to_le32(((u32 *)GUNZIP_BUF(bp))[i]);
+
+ bnx2x_write_big_buf_wb(bp, addr, len);
+}
+
+static void bnx2x_init_block(struct bnx2x *bp, u32 block, u32 stage)
+{
+ u16 op_start =
+ INIT_OPS_OFFSETS(bp)[BLOCK_OPS_IDX(block, stage,
+ STAGE_START)];
+ u16 op_end =
+ INIT_OPS_OFFSETS(bp)[BLOCK_OPS_IDX(block, stage,
+ STAGE_END)];
+ const union init_op *op;
+ u32 op_idx, op_type, addr, len;
+ const u32 *data, *data_base;
+
+ /* If empty block */
+ if (op_start == op_end)
+ return;
+
+ data_base = INIT_DATA(bp);
+
+ for (op_idx = op_start; op_idx < op_end; op_idx++) {
+
+ op = (const union init_op *)&(INIT_OPS(bp)[op_idx]);
+ /* Get generic data */
+ op_type = op->raw.op;
+ addr = op->raw.offset;
+ /* Get data that's used for OP_SW, OP_WB, OP_FW, OP_ZP and
+ * OP_WR64 (we assume that op_arr_write and op_write have the
+ * same structure).
+ */
+ len = op->arr_wr.data_len;
+ data = data_base + op->arr_wr.data_off;
+
+ switch (op_type) {
+ case OP_RD:
+ REG_RD(bp, addr);
+ break;
+ case OP_WR:
+ REG_WR(bp, addr, op->write.val);
+ break;
+ case OP_SW:
+ bnx2x_init_str_wr(bp, addr, data, len);
+ break;
+ case OP_WB:
+ bnx2x_init_wr_wb(bp, addr, data, len);
+ break;
+ case OP_ZR:
+ bnx2x_init_fill(bp, addr, 0, op->zero.len, 0);
+ break;
+ case OP_WB_ZR:
+ bnx2x_init_fill(bp, addr, 0, op->zero.len, 1);
+ break;
+ case OP_ZP:
+ bnx2x_init_wr_zp(bp, addr, len,
+ op->arr_wr.data_off);
+ break;
+ case OP_WR_64:
+ bnx2x_init_wr_64(bp, addr, data, len);
+ break;
+ case OP_IF_MODE_AND:
+ /* if any of the flags doesn't match, skip the
+ * conditional block.
+ */
+ if ((INIT_MODE_FLAGS(bp) &
+ op->if_mode.mode_bit_map) !=
+ op->if_mode.mode_bit_map)
+ op_idx += op->if_mode.cmd_offset;
+ break;
+ case OP_IF_MODE_OR:
+ /* if all the flags don't match, skip the conditional
+ * block.
+ */
+ if ((INIT_MODE_FLAGS(bp) &
+ op->if_mode.mode_bit_map) == 0)
+ op_idx += op->if_mode.cmd_offset;
+ break;
+ default:
+ /* Should never get here! */
+
+ break;
+ }
+ }
+}
+
+
+/****************************************************************************
+* PXP Arbiter
+****************************************************************************/
+/*
+ * This code configures the PCI read/write arbiter
+ * which implements a weighted round robin
+ * between the virtual queues in the chip.
+ *
+ * The values were derived for each PCI max payload and max request size.
+ * since max payload and max request size are only known at run time,
+ * this is done as a separate init stage.
+ */
+
+#define NUM_WR_Q 13
+#define NUM_RD_Q 29
+#define MAX_RD_ORD 3
+#define MAX_WR_ORD 2
+
+/* configuration for one arbiter queue */
+struct arb_line {
+ int l;
+ int add;
+ int ubound;
+};
+
+/* derived configuration for each read queue for each max request size */
+static const struct arb_line read_arb_data[NUM_RD_Q][MAX_RD_ORD + 1] = {
+/* 1 */ { {8, 64, 25}, {16, 64, 25}, {32, 64, 25}, {64, 64, 41} },
+ { {4, 8, 4}, {4, 8, 4}, {4, 8, 4}, {4, 8, 4} },
+ { {4, 3, 3}, {4, 3, 3}, {4, 3, 3}, {4, 3, 3} },
+ { {8, 3, 6}, {16, 3, 11}, {16, 3, 11}, {16, 3, 11} },
+ { {8, 64, 25}, {16, 64, 25}, {32, 64, 25}, {64, 64, 41} },
+ { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {64, 3, 41} },
+ { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {64, 3, 41} },
+ { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {64, 3, 41} },
+ { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {64, 3, 41} },
+/* 10 */{ {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
+ { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
+ { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
+ { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
+ { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
+ { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
+ { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
+ { {8, 64, 6}, {16, 64, 11}, {32, 64, 21}, {32, 64, 21} },
+ { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
+ { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
+/* 20 */{ {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
+ { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
+ { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
+ { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
+ { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
+ { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
+ { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
+ { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
+ { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
+ { {8, 64, 25}, {16, 64, 41}, {32, 64, 81}, {64, 64, 120} }
+};
+
+/* derived configuration for each write queue for each max request size */
+static const struct arb_line write_arb_data[NUM_WR_Q][MAX_WR_ORD + 1] = {
+/* 1 */ { {4, 6, 3}, {4, 6, 3}, {4, 6, 3} },
+ { {4, 2, 3}, {4, 2, 3}, {4, 2, 3} },
+ { {8, 2, 6}, {16, 2, 11}, {16, 2, 11} },
+ { {8, 2, 6}, {16, 2, 11}, {32, 2, 21} },
+ { {8, 2, 6}, {16, 2, 11}, {32, 2, 21} },
+ { {8, 2, 6}, {16, 2, 11}, {32, 2, 21} },
+ { {8, 64, 25}, {16, 64, 25}, {32, 64, 25} },
+ { {8, 2, 6}, {16, 2, 11}, {16, 2, 11} },
+ { {8, 2, 6}, {16, 2, 11}, {16, 2, 11} },
+/* 10 */{ {8, 9, 6}, {16, 9, 11}, {32, 9, 21} },
+ { {8, 47, 19}, {16, 47, 19}, {32, 47, 21} },
+ { {8, 9, 6}, {16, 9, 11}, {16, 9, 11} },
+ { {8, 64, 25}, {16, 64, 41}, {32, 64, 81} }
+};
+
+/* register addresses for read queues */
+static const struct arb_line read_arb_addr[NUM_RD_Q-1] = {
+/* 1 */ {PXP2_REG_RQ_BW_RD_L0, PXP2_REG_RQ_BW_RD_ADD0,
+ PXP2_REG_RQ_BW_RD_UBOUND0},
+ {PXP2_REG_PSWRQ_BW_L1, PXP2_REG_PSWRQ_BW_ADD1,
+ PXP2_REG_PSWRQ_BW_UB1},
+ {PXP2_REG_PSWRQ_BW_L2, PXP2_REG_PSWRQ_BW_ADD2,
+ PXP2_REG_PSWRQ_BW_UB2},
+ {PXP2_REG_PSWRQ_BW_L3, PXP2_REG_PSWRQ_BW_ADD3,
+ PXP2_REG_PSWRQ_BW_UB3},
+ {PXP2_REG_RQ_BW_RD_L4, PXP2_REG_RQ_BW_RD_ADD4,
+ PXP2_REG_RQ_BW_RD_UBOUND4},
+ {PXP2_REG_RQ_BW_RD_L5, PXP2_REG_RQ_BW_RD_ADD5,
+ PXP2_REG_RQ_BW_RD_UBOUND5},
+ {PXP2_REG_PSWRQ_BW_L6, PXP2_REG_PSWRQ_BW_ADD6,
+ PXP2_REG_PSWRQ_BW_UB6},
+ {PXP2_REG_PSWRQ_BW_L7, PXP2_REG_PSWRQ_BW_ADD7,
+ PXP2_REG_PSWRQ_BW_UB7},
+ {PXP2_REG_PSWRQ_BW_L8, PXP2_REG_PSWRQ_BW_ADD8,
+ PXP2_REG_PSWRQ_BW_UB8},
+/* 10 */{PXP2_REG_PSWRQ_BW_L9, PXP2_REG_PSWRQ_BW_ADD9,
+ PXP2_REG_PSWRQ_BW_UB9},
+ {PXP2_REG_PSWRQ_BW_L10, PXP2_REG_PSWRQ_BW_ADD10,
+ PXP2_REG_PSWRQ_BW_UB10},
+ {PXP2_REG_PSWRQ_BW_L11, PXP2_REG_PSWRQ_BW_ADD11,
+ PXP2_REG_PSWRQ_BW_UB11},
+ {PXP2_REG_RQ_BW_RD_L12, PXP2_REG_RQ_BW_RD_ADD12,
+ PXP2_REG_RQ_BW_RD_UBOUND12},
+ {PXP2_REG_RQ_BW_RD_L13, PXP2_REG_RQ_BW_RD_ADD13,
+ PXP2_REG_RQ_BW_RD_UBOUND13},
+ {PXP2_REG_RQ_BW_RD_L14, PXP2_REG_RQ_BW_RD_ADD14,
+ PXP2_REG_RQ_BW_RD_UBOUND14},
+ {PXP2_REG_RQ_BW_RD_L15, PXP2_REG_RQ_BW_RD_ADD15,
+ PXP2_REG_RQ_BW_RD_UBOUND15},
+ {PXP2_REG_RQ_BW_RD_L16, PXP2_REG_RQ_BW_RD_ADD16,
+ PXP2_REG_RQ_BW_RD_UBOUND16},
+ {PXP2_REG_RQ_BW_RD_L17, PXP2_REG_RQ_BW_RD_ADD17,
+ PXP2_REG_RQ_BW_RD_UBOUND17},
+ {PXP2_REG_RQ_BW_RD_L18, PXP2_REG_RQ_BW_RD_ADD18,
+ PXP2_REG_RQ_BW_RD_UBOUND18},
+/* 20 */{PXP2_REG_RQ_BW_RD_L19, PXP2_REG_RQ_BW_RD_ADD19,
+ PXP2_REG_RQ_BW_RD_UBOUND19},
+ {PXP2_REG_RQ_BW_RD_L20, PXP2_REG_RQ_BW_RD_ADD20,
+ PXP2_REG_RQ_BW_RD_UBOUND20},
+ {PXP2_REG_RQ_BW_RD_L22, PXP2_REG_RQ_BW_RD_ADD22,
+ PXP2_REG_RQ_BW_RD_UBOUND22},
+ {PXP2_REG_RQ_BW_RD_L23, PXP2_REG_RQ_BW_RD_ADD23,
+ PXP2_REG_RQ_BW_RD_UBOUND23},
+ {PXP2_REG_RQ_BW_RD_L24, PXP2_REG_RQ_BW_RD_ADD24,
+ PXP2_REG_RQ_BW_RD_UBOUND24},
+ {PXP2_REG_RQ_BW_RD_L25, PXP2_REG_RQ_BW_RD_ADD25,
+ PXP2_REG_RQ_BW_RD_UBOUND25},
+ {PXP2_REG_RQ_BW_RD_L26, PXP2_REG_RQ_BW_RD_ADD26,
+ PXP2_REG_RQ_BW_RD_UBOUND26},
+ {PXP2_REG_RQ_BW_RD_L27, PXP2_REG_RQ_BW_RD_ADD27,
+ PXP2_REG_RQ_BW_RD_UBOUND27},
+ {PXP2_REG_PSWRQ_BW_L28, PXP2_REG_PSWRQ_BW_ADD28,
+ PXP2_REG_PSWRQ_BW_UB28}
+};
+
+/* register addresses for write queues */
+static const struct arb_line write_arb_addr[NUM_WR_Q-1] = {
+/* 1 */ {PXP2_REG_PSWRQ_BW_L1, PXP2_REG_PSWRQ_BW_ADD1,
+ PXP2_REG_PSWRQ_BW_UB1},
+ {PXP2_REG_PSWRQ_BW_L2, PXP2_REG_PSWRQ_BW_ADD2,
+ PXP2_REG_PSWRQ_BW_UB2},
+ {PXP2_REG_PSWRQ_BW_L3, PXP2_REG_PSWRQ_BW_ADD3,
+ PXP2_REG_PSWRQ_BW_UB3},
+ {PXP2_REG_PSWRQ_BW_L6, PXP2_REG_PSWRQ_BW_ADD6,
+ PXP2_REG_PSWRQ_BW_UB6},
+ {PXP2_REG_PSWRQ_BW_L7, PXP2_REG_PSWRQ_BW_ADD7,
+ PXP2_REG_PSWRQ_BW_UB7},
+ {PXP2_REG_PSWRQ_BW_L8, PXP2_REG_PSWRQ_BW_ADD8,
+ PXP2_REG_PSWRQ_BW_UB8},
+ {PXP2_REG_PSWRQ_BW_L9, PXP2_REG_PSWRQ_BW_ADD9,
+ PXP2_REG_PSWRQ_BW_UB9},
+ {PXP2_REG_PSWRQ_BW_L10, PXP2_REG_PSWRQ_BW_ADD10,
+ PXP2_REG_PSWRQ_BW_UB10},
+ {PXP2_REG_PSWRQ_BW_L11, PXP2_REG_PSWRQ_BW_ADD11,
+ PXP2_REG_PSWRQ_BW_UB11},
+/* 10 */{PXP2_REG_PSWRQ_BW_L28, PXP2_REG_PSWRQ_BW_ADD28,
+ PXP2_REG_PSWRQ_BW_UB28},
+ {PXP2_REG_RQ_BW_WR_L29, PXP2_REG_RQ_BW_WR_ADD29,
+ PXP2_REG_RQ_BW_WR_UBOUND29},
+ {PXP2_REG_RQ_BW_WR_L30, PXP2_REG_RQ_BW_WR_ADD30,
+ PXP2_REG_RQ_BW_WR_UBOUND30}
+};
+
+static void bnx2x_init_pxp_arb(struct bnx2x *bp, int r_order,
+ int w_order)
+{
+ u32 val, i;
+
+ if (r_order > MAX_RD_ORD) {
+ DP(NETIF_MSG_HW, "read order of %d order adjusted to %d\n",
+ r_order, MAX_RD_ORD);
+ r_order = MAX_RD_ORD;
+ }
+ if (w_order > MAX_WR_ORD) {
+ DP(NETIF_MSG_HW, "write order of %d order adjusted to %d\n",
+ w_order, MAX_WR_ORD);
+ w_order = MAX_WR_ORD;
+ }
+ if (CHIP_REV_IS_FPGA(bp)) {
+ DP(NETIF_MSG_HW, "write order adjusted to 1 for FPGA\n");
+ w_order = 0;
+ }
+ DP(NETIF_MSG_HW, "read order %d write order %d\n", r_order, w_order);
+
+ for (i = 0; i < NUM_RD_Q-1; i++) {
+ REG_WR(bp, read_arb_addr[i].l, read_arb_data[i][r_order].l);
+ REG_WR(bp, read_arb_addr[i].add,
+ read_arb_data[i][r_order].add);
+ REG_WR(bp, read_arb_addr[i].ubound,
+ read_arb_data[i][r_order].ubound);
+ }
+
+ for (i = 0; i < NUM_WR_Q-1; i++) {
+ if ((write_arb_addr[i].l == PXP2_REG_RQ_BW_WR_L29) ||
+ (write_arb_addr[i].l == PXP2_REG_RQ_BW_WR_L30)) {
+
+ REG_WR(bp, write_arb_addr[i].l,
+ write_arb_data[i][w_order].l);
+
+ REG_WR(bp, write_arb_addr[i].add,
+ write_arb_data[i][w_order].add);
+
+ REG_WR(bp, write_arb_addr[i].ubound,
+ write_arb_data[i][w_order].ubound);
+ } else {
+
+ val = REG_RD(bp, write_arb_addr[i].l);
+ REG_WR(bp, write_arb_addr[i].l,
+ val | (write_arb_data[i][w_order].l << 10));
+
+ val = REG_RD(bp, write_arb_addr[i].add);
+ REG_WR(bp, write_arb_addr[i].add,
+ val | (write_arb_data[i][w_order].add << 10));
+
+ val = REG_RD(bp, write_arb_addr[i].ubound);
+ REG_WR(bp, write_arb_addr[i].ubound,
+ val | (write_arb_data[i][w_order].ubound << 7));
+ }
+ }
+
+ val = write_arb_data[NUM_WR_Q-1][w_order].add;
+ val += write_arb_data[NUM_WR_Q-1][w_order].ubound << 10;
+ val += write_arb_data[NUM_WR_Q-1][w_order].l << 17;
+ REG_WR(bp, PXP2_REG_PSWRQ_BW_RD, val);
+
+ val = read_arb_data[NUM_RD_Q-1][r_order].add;
+ val += read_arb_data[NUM_RD_Q-1][r_order].ubound << 10;
+ val += read_arb_data[NUM_RD_Q-1][r_order].l << 17;
+ REG_WR(bp, PXP2_REG_PSWRQ_BW_WR, val);
+
+ REG_WR(bp, PXP2_REG_RQ_WR_MBS0, w_order);
+ REG_WR(bp, PXP2_REG_RQ_WR_MBS1, w_order);
+ REG_WR(bp, PXP2_REG_RQ_RD_MBS0, r_order);
+ REG_WR(bp, PXP2_REG_RQ_RD_MBS1, r_order);
+
+ if ((CHIP_IS_E1(bp) || CHIP_IS_E1H(bp)) && (r_order == MAX_RD_ORD))
+ REG_WR(bp, PXP2_REG_RQ_PDR_LIMIT, 0xe00);
+
+ if (CHIP_IS_E3(bp))
+ REG_WR(bp, PXP2_REG_WR_USDMDP_TH, (0x4 << w_order));
+ else if (CHIP_IS_E2(bp))
+ REG_WR(bp, PXP2_REG_WR_USDMDP_TH, (0x8 << w_order));
+ else
+ REG_WR(bp, PXP2_REG_WR_USDMDP_TH, (0x18 << w_order));
+
+ if (!CHIP_IS_E1(bp)) {
+ /* MPS w_order optimal TH presently TH
+ * 128 0 0 2
+ * 256 1 1 3
+ * >=512 2 2 3
+ */
+ /* DMAE is special */
+ if (!CHIP_IS_E1H(bp)) {
+ /* E2 can use optimal TH */
+ val = w_order;
+ REG_WR(bp, PXP2_REG_WR_DMAE_MPS, val);
+ } else {
+ val = ((w_order == 0) ? 2 : 3);
+ REG_WR(bp, PXP2_REG_WR_DMAE_MPS, 2);
+ }
+
+ REG_WR(bp, PXP2_REG_WR_HC_MPS, val);
+ REG_WR(bp, PXP2_REG_WR_USDM_MPS, val);
+ REG_WR(bp, PXP2_REG_WR_CSDM_MPS, val);
+ REG_WR(bp, PXP2_REG_WR_TSDM_MPS, val);
+ REG_WR(bp, PXP2_REG_WR_XSDM_MPS, val);
+ REG_WR(bp, PXP2_REG_WR_QM_MPS, val);
+ REG_WR(bp, PXP2_REG_WR_TM_MPS, val);
+ REG_WR(bp, PXP2_REG_WR_SRC_MPS, val);
+ REG_WR(bp, PXP2_REG_WR_DBG_MPS, val);
+ REG_WR(bp, PXP2_REG_WR_CDU_MPS, val);
+ }
+
+ /* Validate number of tags suppoted by device */
+#define PCIE_REG_PCIER_TL_HDR_FC_ST 0x2980
+ val = REG_RD(bp, PCIE_REG_PCIER_TL_HDR_FC_ST);
+ val &= 0xFF;
+ if (val <= 0x20)
+ REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x20);
+}
+
+/****************************************************************************
+* ILT management
+****************************************************************************/
+/*
+ * This codes hides the low level HW interaction for ILT management and
+ * configuration. The API consists of a shadow ILT table which is set by the
+ * driver and a set of routines to use it to configure the HW.
+ *
+ */
+
+/* ILT HW init operations */
+
+/* ILT memory management operations */
+#define ILT_MEMOP_ALLOC 0
+#define ILT_MEMOP_FREE 1
+
+/* the phys address is shifted right 12 bits and has an added
+ * 1=valid bit added to the 53rd bit
+ * then since this is a wide register(TM)
+ * we split it into two 32 bit writes
+ */
+#define ILT_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
+#define ILT_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
+#define ILT_RANGE(f, l) (((l) << 10) | f)
+
+static int bnx2x_ilt_line_mem_op(struct bnx2x *bp,
+ struct ilt_line *line, u32 size, u8 memop)
+{
+ if (memop == ILT_MEMOP_FREE) {
+ BNX2X_ILT_FREE(line->page, line->page_mapping, line->size);
+ return 0;
+ }
+ BNX2X_ILT_ZALLOC(line->page, &line->page_mapping, size);
+ if (!line->page)
+ return -1;
+ line->size = size;
+ return 0;
+}
+
+
+static int bnx2x_ilt_client_mem_op(struct bnx2x *bp, int cli_num,
+ u8 memop)
+{
+ int i, rc;
+ struct bnx2x_ilt *ilt = BP_ILT(bp);
+ struct ilt_client_info *ilt_cli;
+
+ if (!ilt || !ilt->lines)
+ return -1;
+
+ ilt_cli = &ilt->clients[cli_num];
+
+ if (ilt_cli->flags & (ILT_CLIENT_SKIP_INIT | ILT_CLIENT_SKIP_MEM))
+ return 0;
+
+ for (rc = 0, i = ilt_cli->start; i <= ilt_cli->end && !rc; i++) {
+ rc = bnx2x_ilt_line_mem_op(bp, &ilt->lines[i],
+ ilt_cli->page_size, memop);
+ }
+ return rc;
+}
+
+static int bnx2x_ilt_mem_op_cnic(struct bnx2x *bp, u8 memop)
+{
+ int rc = 0;
+
+ if (CONFIGURE_NIC_MODE(bp))
+ rc = bnx2x_ilt_client_mem_op(bp, ILT_CLIENT_SRC, memop);
+ if (!rc)
+ rc = bnx2x_ilt_client_mem_op(bp, ILT_CLIENT_TM, memop);
+
+ return rc;
+}
+
+static int bnx2x_ilt_mem_op(struct bnx2x *bp, u8 memop)
+{
+ int rc = bnx2x_ilt_client_mem_op(bp, ILT_CLIENT_CDU, memop);
+ if (!rc)
+ rc = bnx2x_ilt_client_mem_op(bp, ILT_CLIENT_QM, memop);
+ if (!rc && CNIC_SUPPORT(bp) && !CONFIGURE_NIC_MODE(bp))
+ rc = bnx2x_ilt_client_mem_op(bp, ILT_CLIENT_SRC, memop);
+
+ return rc;
+}
+
+static void bnx2x_ilt_line_wr(struct bnx2x *bp, int abs_idx,
+ dma_addr_t page_mapping)
+{
+ u32 reg;
+
+ if (CHIP_IS_E1(bp))
+ reg = PXP2_REG_RQ_ONCHIP_AT + abs_idx*8;
+ else
+ reg = PXP2_REG_RQ_ONCHIP_AT_B0 + abs_idx*8;
+
+ bnx2x_wr_64(bp, reg, ILT_ADDR1(page_mapping), ILT_ADDR2(page_mapping));
+}
+
+static void bnx2x_ilt_line_init_op(struct bnx2x *bp,
+ struct bnx2x_ilt *ilt, int idx, u8 initop)
+{
+ dma_addr_t null_mapping;
+ int abs_idx = ilt->start_line + idx;
+
+
+ switch (initop) {
+ case INITOP_INIT:
+ /* set in the init-value array */
+ case INITOP_SET:
+ bnx2x_ilt_line_wr(bp, abs_idx, ilt->lines[idx].page_mapping);
+ break;
+ case INITOP_CLEAR:
+ null_mapping = 0;
+ bnx2x_ilt_line_wr(bp, abs_idx, null_mapping);
+ break;
+ }
+}
+
+static void bnx2x_ilt_boundry_init_op(struct bnx2x *bp,
+ struct ilt_client_info *ilt_cli,
+ u32 ilt_start, u8 initop)
+{
+ u32 start_reg = 0;
+ u32 end_reg = 0;
+
+ /* The boundary is either SET or INIT,
+ CLEAR => SET and for now SET ~~ INIT */
+
+ /* find the appropriate regs */
+ if (CHIP_IS_E1(bp)) {
+ switch (ilt_cli->client_num) {
+ case ILT_CLIENT_CDU:
+ start_reg = PXP2_REG_PSWRQ_CDU0_L2P;
+ break;
+ case ILT_CLIENT_QM:
+ start_reg = PXP2_REG_PSWRQ_QM0_L2P;
+ break;
+ case ILT_CLIENT_SRC:
+ start_reg = PXP2_REG_PSWRQ_SRC0_L2P;
+ break;
+ case ILT_CLIENT_TM:
+ start_reg = PXP2_REG_PSWRQ_TM0_L2P;
+ break;
+ }
+ REG_WR(bp, start_reg + BP_FUNC(bp)*4,
+ ILT_RANGE((ilt_start + ilt_cli->start),
+ (ilt_start + ilt_cli->end)));
+ } else {
+ switch (ilt_cli->client_num) {
+ case ILT_CLIENT_CDU:
+ start_reg = PXP2_REG_RQ_CDU_FIRST_ILT;
+ end_reg = PXP2_REG_RQ_CDU_LAST_ILT;
+ break;
+ case ILT_CLIENT_QM:
+ start_reg = PXP2_REG_RQ_QM_FIRST_ILT;
+ end_reg = PXP2_REG_RQ_QM_LAST_ILT;
+ break;
+ case ILT_CLIENT_SRC:
+ start_reg = PXP2_REG_RQ_SRC_FIRST_ILT;
+ end_reg = PXP2_REG_RQ_SRC_LAST_ILT;
+ break;
+ case ILT_CLIENT_TM:
+ start_reg = PXP2_REG_RQ_TM_FIRST_ILT;
+ end_reg = PXP2_REG_RQ_TM_LAST_ILT;
+ break;
+ }
+ REG_WR(bp, start_reg, (ilt_start + ilt_cli->start));
+ REG_WR(bp, end_reg, (ilt_start + ilt_cli->end));
+ }
+}
+
+static void bnx2x_ilt_client_init_op_ilt(struct bnx2x *bp,
+ struct bnx2x_ilt *ilt,
+ struct ilt_client_info *ilt_cli,
+ u8 initop)
+{
+ int i;
+
+ if (ilt_cli->flags & ILT_CLIENT_SKIP_INIT)
+ return;
+
+ for (i = ilt_cli->start; i <= ilt_cli->end; i++)
+ bnx2x_ilt_line_init_op(bp, ilt, i, initop);
+
+ /* init/clear the ILT boundries */
+ bnx2x_ilt_boundry_init_op(bp, ilt_cli, ilt->start_line, initop);
+}
+
+static void bnx2x_ilt_client_init_op(struct bnx2x *bp,
+ struct ilt_client_info *ilt_cli, u8 initop)
+{
+ struct bnx2x_ilt *ilt = BP_ILT(bp);
+
+ bnx2x_ilt_client_init_op_ilt(bp, ilt, ilt_cli, initop);
+}
+
+static void bnx2x_ilt_client_id_init_op(struct bnx2x *bp,
+ int cli_num, u8 initop)
+{
+ struct bnx2x_ilt *ilt = BP_ILT(bp);
+ struct ilt_client_info *ilt_cli = &ilt->clients[cli_num];
+
+ bnx2x_ilt_client_init_op(bp, ilt_cli, initop);
+}
+
+static void bnx2x_ilt_init_op_cnic(struct bnx2x *bp, u8 initop)
+{
+ if (CONFIGURE_NIC_MODE(bp))
+ bnx2x_ilt_client_id_init_op(bp, ILT_CLIENT_SRC, initop);
+ bnx2x_ilt_client_id_init_op(bp, ILT_CLIENT_TM, initop);
+}
+
+static void bnx2x_ilt_init_op(struct bnx2x *bp, u8 initop)
+{
+ bnx2x_ilt_client_id_init_op(bp, ILT_CLIENT_CDU, initop);
+ bnx2x_ilt_client_id_init_op(bp, ILT_CLIENT_QM, initop);
+ if (CNIC_SUPPORT(bp) && !CONFIGURE_NIC_MODE(bp))
+ bnx2x_ilt_client_id_init_op(bp, ILT_CLIENT_SRC, initop);
+}
+
+static void bnx2x_ilt_init_client_psz(struct bnx2x *bp, int cli_num,
+ u32 psz_reg, u8 initop)
+{
+ struct bnx2x_ilt *ilt = BP_ILT(bp);
+ struct ilt_client_info *ilt_cli = &ilt->clients[cli_num];
+
+ if (ilt_cli->flags & ILT_CLIENT_SKIP_INIT)
+ return;
+
+ switch (initop) {
+ case INITOP_INIT:
+ /* set in the init-value array */
+ case INITOP_SET:
+ REG_WR(bp, psz_reg, ILOG2(ilt_cli->page_size >> 12));
+ break;
+ case INITOP_CLEAR:
+ break;
+ }
+}
+
+/*
+ * called during init common stage, ilt clients should be initialized
+ * prioir to calling this function
+ */
+static void bnx2x_ilt_init_page_size(struct bnx2x *bp, u8 initop)
+{
+ bnx2x_ilt_init_client_psz(bp, ILT_CLIENT_CDU,
+ PXP2_REG_RQ_CDU_P_SIZE, initop);
+ bnx2x_ilt_init_client_psz(bp, ILT_CLIENT_QM,
+ PXP2_REG_RQ_QM_P_SIZE, initop);
+ bnx2x_ilt_init_client_psz(bp, ILT_CLIENT_SRC,
+ PXP2_REG_RQ_SRC_P_SIZE, initop);
+ bnx2x_ilt_init_client_psz(bp, ILT_CLIENT_TM,
+ PXP2_REG_RQ_TM_P_SIZE, initop);
+}
+
+/****************************************************************************
+* QM initializations
+****************************************************************************/
+#define QM_QUEUES_PER_FUNC 16 /* E1 has 32, but only 16 are used */
+#define QM_INIT_MIN_CID_COUNT 31
+#define QM_INIT(cid_cnt) (cid_cnt > QM_INIT_MIN_CID_COUNT)
+
+/* called during init port stage */
+static void bnx2x_qm_init_cid_count(struct bnx2x *bp, int qm_cid_count,
+ u8 initop)
+{
+ int port = BP_PORT(bp);
+
+ if (QM_INIT(qm_cid_count)) {
+ switch (initop) {
+ case INITOP_INIT:
+ /* set in the init-value array */
+ case INITOP_SET:
+ REG_WR(bp, QM_REG_CONNNUM_0 + port*4,
+ qm_cid_count/16 - 1);
+ break;
+ case INITOP_CLEAR:
+ break;
+ }
+ }
+}
+
+static void bnx2x_qm_set_ptr_table(struct bnx2x *bp, int qm_cid_count,
+ u32 base_reg, u32 reg)
+{
+ int i;
+ u32 wb_data[2] = {0, 0};
+ for (i = 0; i < 4 * QM_QUEUES_PER_FUNC; i++) {
+ REG_WR(bp, base_reg + i*4,
+ qm_cid_count * 4 * (i % QM_QUEUES_PER_FUNC));
+ bnx2x_init_wr_wb(bp, reg + i*8, wb_data, 2);
+ }
+}
+
+/* called during init common stage */
+static void bnx2x_qm_init_ptr_table(struct bnx2x *bp, int qm_cid_count,
+ u8 initop)
+{
+ if (!QM_INIT(qm_cid_count))
+ return;
+
+ switch (initop) {
+ case INITOP_INIT:
+ /* set in the init-value array */
+ case INITOP_SET:
+ bnx2x_qm_set_ptr_table(bp, qm_cid_count,
+ QM_REG_BASEADDR, QM_REG_PTRTBL);
+ if (CHIP_IS_E1H(bp))
+ bnx2x_qm_set_ptr_table(bp, qm_cid_count,
+ QM_REG_BASEADDR_EXT_A,
+ QM_REG_PTRTBL_EXT_A);
+ break;
+ case INITOP_CLEAR:
+ break;
+ }
+}
+
+/****************************************************************************
+* SRC initializations
+****************************************************************************/
+/* called during init func stage */
+static void bnx2x_src_init_t2(struct bnx2x *bp, struct src_ent *t2,
+ dma_addr_t t2_mapping, int src_cid_count)
+{
+ int i;
+ int port = BP_PORT(bp);
+
+ /* Initialize T2 */
+ for (i = 0; i < src_cid_count-1; i++)
+ t2[i].next = (u64)(t2_mapping +
+ (i+1)*sizeof(struct src_ent));
+
+ /* tell the searcher where the T2 table is */
+ REG_WR(bp, SRC_REG_COUNTFREE0 + port*4, src_cid_count);
+
+ bnx2x_wr_64(bp, SRC_REG_FIRSTFREE0 + port*16,
+ U64_LO(t2_mapping), U64_HI(t2_mapping));
+
+ bnx2x_wr_64(bp, SRC_REG_LASTFREE0 + port*16,
+ U64_LO((u64)t2_mapping +
+ (src_cid_count-1) * sizeof(struct src_ent)),
+ U64_HI((u64)t2_mapping +
+ (src_cid_count-1) * sizeof(struct src_ent)));
+}
+#endif /* BNX2X_INIT_OPS_H */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
new file mode 100644
index 0000000000..02808513ff
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
@@ -0,0 +1,14065 @@
+/* Copyright 2008-2013 Broadcom Corporation
+ * Copyright (c) 2014 QLogic Corporation
+ * All rights reserved
+ *
+ * Unless you and QLogic execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2, available
+ * at http://www.gnu.org/licenses/gpl-2.0.html (the "GPL").
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Qlogic software provided under a
+ * license other than the GPL, without Qlogic's express prior written
+ * consent.
+ *
+ * Written by Yaniv Rosner
+ *
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/delay.h>
+#include <linux/ethtool.h>
+#include <linux/mutex.h>
+
+#include "bnx2x.h"
+#include "bnx2x_cmn.h"
+
+typedef int (*read_sfp_module_eeprom_func_p)(struct bnx2x_phy *phy,
+ struct link_params *params,
+ u8 dev_addr, u16 addr, u8 byte_cnt,
+ u8 *o_buf, u8);
+/********************************************************/
+#define MDIO_ACCESS_TIMEOUT 1000
+#define WC_LANE_MAX 4
+#define I2C_SWITCH_WIDTH 2
+#define I2C_BSC0 0
+#define I2C_BSC1 1
+#define I2C_WA_RETRY_CNT 3
+#define I2C_WA_PWR_ITER (I2C_WA_RETRY_CNT - 1)
+#define MCPR_IMC_COMMAND_READ_OP 1
+#define MCPR_IMC_COMMAND_WRITE_OP 2
+
+/* LED Blink rate that will achieve ~15.9Hz */
+#define LED_BLINK_RATE_VAL_E3 354
+#define LED_BLINK_RATE_VAL_E1X_E2 480
+/***********************************************************/
+/* Shortcut definitions */
+/***********************************************************/
+
+#define NIG_LATCH_BC_ENABLE_MI_INT 0
+
+#define NIG_STATUS_EMAC0_MI_INT \
+ NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_EMAC0_MISC_MI_INT
+#define NIG_STATUS_XGXS0_LINK10G \
+ NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_XGXS0_LINK10G
+#define NIG_STATUS_XGXS0_LINK_STATUS \
+ NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_XGXS0_LINK_STATUS
+#define NIG_STATUS_XGXS0_LINK_STATUS_SIZE \
+ NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_XGXS0_LINK_STATUS_SIZE
+#define NIG_STATUS_SERDES0_LINK_STATUS \
+ NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_SERDES0_LINK_STATUS
+#define NIG_MASK_MI_INT \
+ NIG_MASK_INTERRUPT_PORT0_REG_MASK_EMAC0_MISC_MI_INT
+#define NIG_MASK_XGXS0_LINK10G \
+ NIG_MASK_INTERRUPT_PORT0_REG_MASK_XGXS0_LINK10G
+#define NIG_MASK_XGXS0_LINK_STATUS \
+ NIG_MASK_INTERRUPT_PORT0_REG_MASK_XGXS0_LINK_STATUS
+#define NIG_MASK_SERDES0_LINK_STATUS \
+ NIG_MASK_INTERRUPT_PORT0_REG_MASK_SERDES0_LINK_STATUS
+
+#define MDIO_AN_CL73_OR_37_COMPLETE \
+ (MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_AUTONEG_COMPLETE | \
+ MDIO_GP_STATUS_TOP_AN_STATUS1_CL37_AUTONEG_COMPLETE)
+
+#define XGXS_RESET_BITS \
+ (MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_XGXS0_RSTB_HW | \
+ MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_XGXS0_IDDQ | \
+ MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_XGXS0_PWRDWN | \
+ MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_XGXS0_PWRDWN_SD | \
+ MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_XGXS0_TXD_FIFO_RSTB)
+
+#define SERDES_RESET_BITS \
+ (MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_SERDES0_RSTB_HW | \
+ MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_SERDES0_IDDQ | \
+ MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_SERDES0_PWRDWN | \
+ MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_SERDES0_PWRDWN_SD)
+
+#define AUTONEG_CL37 SHARED_HW_CFG_AN_ENABLE_CL37
+#define AUTONEG_CL73 SHARED_HW_CFG_AN_ENABLE_CL73
+#define AUTONEG_BAM SHARED_HW_CFG_AN_ENABLE_BAM
+#define AUTONEG_PARALLEL \
+ SHARED_HW_CFG_AN_ENABLE_PARALLEL_DETECTION
+#define AUTONEG_SGMII_FIBER_AUTODET \
+ SHARED_HW_CFG_AN_EN_SGMII_FIBER_AUTO_DETECT
+#define AUTONEG_REMOTE_PHY SHARED_HW_CFG_AN_ENABLE_REMOTE_PHY
+
+#define GP_STATUS_PAUSE_RSOLUTION_TXSIDE \
+ MDIO_GP_STATUS_TOP_AN_STATUS1_PAUSE_RSOLUTION_TXSIDE
+#define GP_STATUS_PAUSE_RSOLUTION_RXSIDE \
+ MDIO_GP_STATUS_TOP_AN_STATUS1_PAUSE_RSOLUTION_RXSIDE
+#define GP_STATUS_SPEED_MASK \
+ MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_MASK
+#define GP_STATUS_10M MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10M
+#define GP_STATUS_100M MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_100M
+#define GP_STATUS_1G MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_1G
+#define GP_STATUS_2_5G MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_2_5G
+#define GP_STATUS_5G MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_5G
+#define GP_STATUS_6G MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_6G
+#define GP_STATUS_10G_HIG \
+ MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_HIG
+#define GP_STATUS_10G_CX4 \
+ MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_CX4
+#define GP_STATUS_1G_KX MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_1G_KX
+#define GP_STATUS_10G_KX4 \
+ MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_KX4
+#define GP_STATUS_10G_KR MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_KR
+#define GP_STATUS_10G_XFI MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_XFI
+#define GP_STATUS_20G_DXGXS MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_20G_DXGXS
+#define GP_STATUS_10G_SFI MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_SFI
+#define GP_STATUS_20G_KR2 MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_20G_KR2
+#define LINK_10THD LINK_STATUS_SPEED_AND_DUPLEX_10THD
+#define LINK_10TFD LINK_STATUS_SPEED_AND_DUPLEX_10TFD
+#define LINK_100TXHD LINK_STATUS_SPEED_AND_DUPLEX_100TXHD
+#define LINK_100T4 LINK_STATUS_SPEED_AND_DUPLEX_100T4
+#define LINK_100TXFD LINK_STATUS_SPEED_AND_DUPLEX_100TXFD
+#define LINK_1000THD LINK_STATUS_SPEED_AND_DUPLEX_1000THD
+#define LINK_1000TFD LINK_STATUS_SPEED_AND_DUPLEX_1000TFD
+#define LINK_1000XFD LINK_STATUS_SPEED_AND_DUPLEX_1000XFD
+#define LINK_2500THD LINK_STATUS_SPEED_AND_DUPLEX_2500THD
+#define LINK_2500TFD LINK_STATUS_SPEED_AND_DUPLEX_2500TFD
+#define LINK_2500XFD LINK_STATUS_SPEED_AND_DUPLEX_2500XFD
+#define LINK_10GTFD LINK_STATUS_SPEED_AND_DUPLEX_10GTFD
+#define LINK_10GXFD LINK_STATUS_SPEED_AND_DUPLEX_10GXFD
+#define LINK_20GTFD LINK_STATUS_SPEED_AND_DUPLEX_20GTFD
+#define LINK_20GXFD LINK_STATUS_SPEED_AND_DUPLEX_20GXFD
+
+#define LINK_UPDATE_MASK \
+ (LINK_STATUS_SPEED_AND_DUPLEX_MASK | \
+ LINK_STATUS_LINK_UP | \
+ LINK_STATUS_PHYSICAL_LINK_FLAG | \
+ LINK_STATUS_AUTO_NEGOTIATE_COMPLETE | \
+ LINK_STATUS_RX_FLOW_CONTROL_FLAG_MASK | \
+ LINK_STATUS_TX_FLOW_CONTROL_FLAG_MASK | \
+ LINK_STATUS_PARALLEL_DETECTION_FLAG_MASK | \
+ LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE | \
+ LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE)
+
+#define SFP_EEPROM_CON_TYPE_ADDR 0x2
+ #define SFP_EEPROM_CON_TYPE_VAL_UNKNOWN 0x0
+ #define SFP_EEPROM_CON_TYPE_VAL_LC 0x7
+ #define SFP_EEPROM_CON_TYPE_VAL_COPPER 0x21
+ #define SFP_EEPROM_CON_TYPE_VAL_RJ45 0x22
+
+
+#define SFP_EEPROM_10G_COMP_CODE_ADDR 0x3
+ #define SFP_EEPROM_10G_COMP_CODE_SR_MASK (1<<4)
+ #define SFP_EEPROM_10G_COMP_CODE_LR_MASK (1<<5)
+ #define SFP_EEPROM_10G_COMP_CODE_LRM_MASK (1<<6)
+
+#define SFP_EEPROM_1G_COMP_CODE_ADDR 0x6
+ #define SFP_EEPROM_1G_COMP_CODE_SX (1<<0)
+ #define SFP_EEPROM_1G_COMP_CODE_LX (1<<1)
+ #define SFP_EEPROM_1G_COMP_CODE_CX (1<<2)
+ #define SFP_EEPROM_1G_COMP_CODE_BASE_T (1<<3)
+
+#define SFP_EEPROM_FC_TX_TECH_ADDR 0x8
+ #define SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_PASSIVE 0x4
+ #define SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_ACTIVE 0x8
+
+#define SFP_EEPROM_OPTIONS_ADDR 0x40
+ #define SFP_EEPROM_OPTIONS_LINEAR_RX_OUT_MASK 0x1
+#define SFP_EEPROM_OPTIONS_SIZE 2
+
+#define EDC_MODE_LINEAR 0x0022
+#define EDC_MODE_LIMITING 0x0044
+#define EDC_MODE_PASSIVE_DAC 0x0055
+#define EDC_MODE_ACTIVE_DAC 0x0066
+
+/* ETS defines*/
+#define DCBX_INVALID_COS (0xFF)
+
+#define ETS_BW_LIMIT_CREDIT_UPPER_BOUND (0x5000)
+#define ETS_BW_LIMIT_CREDIT_WEIGHT (0x5000)
+#define ETS_E3B0_NIG_MIN_W_VAL_UP_TO_10GBPS (1360)
+#define ETS_E3B0_NIG_MIN_W_VAL_20GBPS (2720)
+#define ETS_E3B0_PBF_MIN_W_VAL (10000)
+
+#define MAX_PACKET_SIZE (9700)
+#define MAX_KR_LINK_RETRY 4
+#define DEFAULT_TX_DRV_BRDCT 2
+#define DEFAULT_TX_DRV_IFIR 0
+#define DEFAULT_TX_DRV_POST2 3
+#define DEFAULT_TX_DRV_IPRE_DRIVER 6
+
+/**********************************************************/
+/* INTERFACE */
+/**********************************************************/
+
+#define CL22_WR_OVER_CL45(_bp, _phy, _bank, _addr, _val) \
+ bnx2x_cl45_write(_bp, _phy, \
+ (_phy)->def_md_devad, \
+ (_bank + (_addr & 0xf)), \
+ _val)
+
+#define CL22_RD_OVER_CL45(_bp, _phy, _bank, _addr, _val) \
+ bnx2x_cl45_read(_bp, _phy, \
+ (_phy)->def_md_devad, \
+ (_bank + (_addr & 0xf)), \
+ _val)
+
+static int bnx2x_check_half_open_conn(struct link_params *params,
+ struct link_vars *vars, u8 notify);
+static int bnx2x_sfp_module_detection(struct bnx2x_phy *phy,
+ struct link_params *params);
+
+static u32 bnx2x_bits_en(struct bnx2x *bp, u32 reg, u32 bits)
+{
+ u32 val = REG_RD(bp, reg);
+
+ val |= bits;
+ REG_WR(bp, reg, val);
+ return val;
+}
+
+static u32 bnx2x_bits_dis(struct bnx2x *bp, u32 reg, u32 bits)
+{
+ u32 val = REG_RD(bp, reg);
+
+ val &= ~bits;
+ REG_WR(bp, reg, val);
+ return val;
+}
+
+/*
+ * bnx2x_check_lfa - This function checks if link reinitialization is required,
+ * or link flap can be avoided.
+ *
+ * @params: link parameters
+ * Returns 0 if Link Flap Avoidance conditions are met otherwise, the failed
+ * condition code.
+ */
+static int bnx2x_check_lfa(struct link_params *params)
+{
+ u32 link_status, cfg_idx, lfa_mask, cfg_size;
+ u32 cur_speed_cap_mask, cur_req_fc_auto_adv, additional_config;
+ u32 saved_val, req_val, eee_status;
+ struct bnx2x *bp = params->bp;
+
+ additional_config =
+ REG_RD(bp, params->lfa_base +
+ offsetof(struct shmem_lfa, additional_config));
+
+ /* NOTE: must be first condition checked -
+ * to verify DCC bit is cleared in any case!
+ */
+ if (additional_config & NO_LFA_DUE_TO_DCC_MASK) {
+ DP(NETIF_MSG_LINK, "No LFA due to DCC flap after clp exit\n");
+ REG_WR(bp, params->lfa_base +
+ offsetof(struct shmem_lfa, additional_config),
+ additional_config & ~NO_LFA_DUE_TO_DCC_MASK);
+ return LFA_DCC_LFA_DISABLED;
+ }
+
+ /* Verify that link is up */
+ link_status = REG_RD(bp, params->shmem_base +
+ offsetof(struct shmem_region,
+ port_mb[params->port].link_status));
+ if (!(link_status & LINK_STATUS_LINK_UP))
+ return LFA_LINK_DOWN;
+
+ /* if loaded after BOOT from SAN, don't flap the link in any case and
+ * rely on link set by preboot driver
+ */
+ if (params->feature_config_flags & FEATURE_CONFIG_BOOT_FROM_SAN)
+ return 0;
+
+ /* Verify that loopback mode is not set */
+ if (params->loopback_mode)
+ return LFA_LOOPBACK_ENABLED;
+
+ /* Verify that MFW supports LFA */
+ if (!params->lfa_base)
+ return LFA_MFW_IS_TOO_OLD;
+
+ if (params->num_phys == 3) {
+ cfg_size = 2;
+ lfa_mask = 0xffffffff;
+ } else {
+ cfg_size = 1;
+ lfa_mask = 0xffff;
+ }
+
+ /* Compare Duplex */
+ saved_val = REG_RD(bp, params->lfa_base +
+ offsetof(struct shmem_lfa, req_duplex));
+ req_val = params->req_duplex[0] | (params->req_duplex[1] << 16);
+ if ((saved_val & lfa_mask) != (req_val & lfa_mask)) {
+ DP(NETIF_MSG_LINK, "Duplex mismatch %x vs. %x\n",
+ (saved_val & lfa_mask), (req_val & lfa_mask));
+ return LFA_DUPLEX_MISMATCH;
+ }
+ /* Compare Flow Control */
+ saved_val = REG_RD(bp, params->lfa_base +
+ offsetof(struct shmem_lfa, req_flow_ctrl));
+ req_val = params->req_flow_ctrl[0] | (params->req_flow_ctrl[1] << 16);
+ if ((saved_val & lfa_mask) != (req_val & lfa_mask)) {
+ DP(NETIF_MSG_LINK, "Flow control mismatch %x vs. %x\n",
+ (saved_val & lfa_mask), (req_val & lfa_mask));
+ return LFA_FLOW_CTRL_MISMATCH;
+ }
+ /* Compare Link Speed */
+ saved_val = REG_RD(bp, params->lfa_base +
+ offsetof(struct shmem_lfa, req_line_speed));
+ req_val = params->req_line_speed[0] | (params->req_line_speed[1] << 16);
+ if ((saved_val & lfa_mask) != (req_val & lfa_mask)) {
+ DP(NETIF_MSG_LINK, "Link speed mismatch %x vs. %x\n",
+ (saved_val & lfa_mask), (req_val & lfa_mask));
+ return LFA_LINK_SPEED_MISMATCH;
+ }
+
+ for (cfg_idx = 0; cfg_idx < cfg_size; cfg_idx++) {
+ cur_speed_cap_mask = REG_RD(bp, params->lfa_base +
+ offsetof(struct shmem_lfa,
+ speed_cap_mask[cfg_idx]));
+
+ if (cur_speed_cap_mask != params->speed_cap_mask[cfg_idx]) {
+ DP(NETIF_MSG_LINK, "Speed Cap mismatch %x vs. %x\n",
+ cur_speed_cap_mask,
+ params->speed_cap_mask[cfg_idx]);
+ return LFA_SPEED_CAP_MISMATCH;
+ }
+ }
+
+ cur_req_fc_auto_adv =
+ REG_RD(bp, params->lfa_base +
+ offsetof(struct shmem_lfa, additional_config)) &
+ REQ_FC_AUTO_ADV_MASK;
+
+ if ((u16)cur_req_fc_auto_adv != params->req_fc_auto_adv) {
+ DP(NETIF_MSG_LINK, "Flow Ctrl AN mismatch %x vs. %x\n",
+ cur_req_fc_auto_adv, params->req_fc_auto_adv);
+ return LFA_FLOW_CTRL_MISMATCH;
+ }
+
+ eee_status = REG_RD(bp, params->shmem2_base +
+ offsetof(struct shmem2_region,
+ eee_status[params->port]));
+
+ if (((eee_status & SHMEM_EEE_LPI_REQUESTED_BIT) ^
+ (params->eee_mode & EEE_MODE_ENABLE_LPI)) ||
+ ((eee_status & SHMEM_EEE_REQUESTED_BIT) ^
+ (params->eee_mode & EEE_MODE_ADV_LPI))) {
+ DP(NETIF_MSG_LINK, "EEE mismatch %x vs. %x\n", params->eee_mode,
+ eee_status);
+ return LFA_EEE_MISMATCH;
+ }
+
+ /* LFA conditions are met */
+ return 0;
+}
+/******************************************************************/
+/* EPIO/GPIO section */
+/******************************************************************/
+static void bnx2x_get_epio(struct bnx2x *bp, u32 epio_pin, u32 *en)
+{
+ u32 epio_mask, gp_oenable;
+ *en = 0;
+ /* Sanity check */
+ if (epio_pin > 31) {
+ DP(NETIF_MSG_LINK, "Invalid EPIO pin %d to get\n", epio_pin);
+ return;
+ }
+
+ epio_mask = 1 << epio_pin;
+ /* Set this EPIO to output */
+ gp_oenable = REG_RD(bp, MCP_REG_MCPR_GP_OENABLE);
+ REG_WR(bp, MCP_REG_MCPR_GP_OENABLE, gp_oenable & ~epio_mask);
+
+ *en = (REG_RD(bp, MCP_REG_MCPR_GP_INPUTS) & epio_mask) >> epio_pin;
+}
+static void bnx2x_set_epio(struct bnx2x *bp, u32 epio_pin, u32 en)
+{
+ u32 epio_mask, gp_output, gp_oenable;
+
+ /* Sanity check */
+ if (epio_pin > 31) {
+ DP(NETIF_MSG_LINK, "Invalid EPIO pin %d to set\n", epio_pin);
+ return;
+ }
+ DP(NETIF_MSG_LINK, "Setting EPIO pin %d to %d\n", epio_pin, en);
+ epio_mask = 1 << epio_pin;
+ /* Set this EPIO to output */
+ gp_output = REG_RD(bp, MCP_REG_MCPR_GP_OUTPUTS);
+ if (en)
+ gp_output |= epio_mask;
+ else
+ gp_output &= ~epio_mask;
+
+ REG_WR(bp, MCP_REG_MCPR_GP_OUTPUTS, gp_output);
+
+ /* Set the value for this EPIO */
+ gp_oenable = REG_RD(bp, MCP_REG_MCPR_GP_OENABLE);
+ REG_WR(bp, MCP_REG_MCPR_GP_OENABLE, gp_oenable | epio_mask);
+}
+
+static void bnx2x_set_cfg_pin(struct bnx2x *bp, u32 pin_cfg, u32 val)
+{
+ if (pin_cfg == PIN_CFG_NA)
+ return;
+ if (pin_cfg >= PIN_CFG_EPIO0) {
+ bnx2x_set_epio(bp, pin_cfg - PIN_CFG_EPIO0, val);
+ } else {
+ u8 gpio_num = (pin_cfg - PIN_CFG_GPIO0_P0) & 0x3;
+ u8 gpio_port = (pin_cfg - PIN_CFG_GPIO0_P0) >> 2;
+ bnx2x_set_gpio(bp, gpio_num, (u8)val, gpio_port);
+ }
+}
+
+static u32 bnx2x_get_cfg_pin(struct bnx2x *bp, u32 pin_cfg, u32 *val)
+{
+ if (pin_cfg == PIN_CFG_NA)
+ return -EINVAL;
+ if (pin_cfg >= PIN_CFG_EPIO0) {
+ bnx2x_get_epio(bp, pin_cfg - PIN_CFG_EPIO0, val);
+ } else {
+ u8 gpio_num = (pin_cfg - PIN_CFG_GPIO0_P0) & 0x3;
+ u8 gpio_port = (pin_cfg - PIN_CFG_GPIO0_P0) >> 2;
+ *val = bnx2x_get_gpio(bp, gpio_num, gpio_port);
+ }
+ return 0;
+
+}
+/******************************************************************/
+/* ETS section */
+/******************************************************************/
+static void bnx2x_ets_e2e3a0_disabled(struct link_params *params)
+{
+ /* ETS disabled configuration*/
+ struct bnx2x *bp = params->bp;
+
+ DP(NETIF_MSG_LINK, "ETS E2E3 disabled configuration\n");
+
+ /* mapping between entry priority to client number (0,1,2 -debug and
+ * management clients, 3 - COS0 client, 4 - COS client)(HIGHEST)
+ * 3bits client num.
+ * PRI4 | PRI3 | PRI2 | PRI1 | PRI0
+ * cos1-100 cos0-011 dbg1-010 dbg0-001 MCP-000
+ */
+
+ REG_WR(bp, NIG_REG_P0_TX_ARB_PRIORITY_CLIENT, 0x4688);
+ /* Bitmap of 5bits length. Each bit specifies whether the entry behaves
+ * as strict. Bits 0,1,2 - debug and management entries, 3 -
+ * COS0 entry, 4 - COS1 entry.
+ * COS1 | COS0 | DEBUG1 | DEBUG0 | MGMT
+ * bit4 bit3 bit2 bit1 bit0
+ * MCP and debug are strict
+ */
+
+ REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_STRICT, 0x7);
+ /* defines which entries (clients) are subjected to WFQ arbitration */
+ REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_SUBJECT2WFQ, 0);
+ /* For strict priority entries defines the number of consecutive
+ * slots for the highest priority.
+ */
+ REG_WR(bp, NIG_REG_P0_TX_ARB_NUM_STRICT_ARB_SLOTS, 0x100);
+ /* mapping between the CREDIT_WEIGHT registers and actual client
+ * numbers
+ */
+ REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_CREDIT_MAP, 0);
+ REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_0, 0);
+ REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_1, 0);
+
+ REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_0, 0);
+ REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_1, 0);
+ REG_WR(bp, PBF_REG_HIGH_PRIORITY_COS_NUM, 0);
+ /* ETS mode disable */
+ REG_WR(bp, PBF_REG_ETS_ENABLED, 0);
+ /* If ETS mode is enabled (there is no strict priority) defines a WFQ
+ * weight for COS0/COS1.
+ */
+ REG_WR(bp, PBF_REG_COS0_WEIGHT, 0x2710);
+ REG_WR(bp, PBF_REG_COS1_WEIGHT, 0x2710);
+ /* Upper bound that COS0_WEIGHT can reach in the WFQ arbiter */
+ REG_WR(bp, PBF_REG_COS0_UPPER_BOUND, 0x989680);
+ REG_WR(bp, PBF_REG_COS1_UPPER_BOUND, 0x989680);
+ /* Defines the number of consecutive slots for the strict priority */
+ REG_WR(bp, PBF_REG_NUM_STRICT_ARB_SLOTS, 0);
+}
+/******************************************************************************
+* Description:
+* Getting min_w_val will be set according to line speed .
+*.
+******************************************************************************/
+static u32 bnx2x_ets_get_min_w_val_nig(const struct link_vars *vars)
+{
+ u32 min_w_val = 0;
+ /* Calculate min_w_val.*/
+ if (vars->link_up) {
+ if (vars->line_speed == SPEED_20000)
+ min_w_val = ETS_E3B0_NIG_MIN_W_VAL_20GBPS;
+ else
+ min_w_val = ETS_E3B0_NIG_MIN_W_VAL_UP_TO_10GBPS;
+ } else
+ min_w_val = ETS_E3B0_NIG_MIN_W_VAL_20GBPS;
+ /* If the link isn't up (static configuration for example ) The
+ * link will be according to 20GBPS.
+ */
+ return min_w_val;
+}
+/******************************************************************************
+* Description:
+* Getting credit upper bound form min_w_val.
+*.
+******************************************************************************/
+static u32 bnx2x_ets_get_credit_upper_bound(const u32 min_w_val)
+{
+ const u32 credit_upper_bound = (u32)MAXVAL((150 * min_w_val),
+ MAX_PACKET_SIZE);
+ return credit_upper_bound;
+}
+/******************************************************************************
+* Description:
+* Set credit upper bound for NIG.
+*.
+******************************************************************************/
+static void bnx2x_ets_e3b0_set_credit_upper_bound_nig(
+ const struct link_params *params,
+ const u32 min_w_val)
+{
+ struct bnx2x *bp = params->bp;
+ const u8 port = params->port;
+ const u32 credit_upper_bound =
+ bnx2x_ets_get_credit_upper_bound(min_w_val);
+
+ REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_0 :
+ NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_0, credit_upper_bound);
+ REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_1 :
+ NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_1, credit_upper_bound);
+ REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_2 :
+ NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_2, credit_upper_bound);
+ REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_3 :
+ NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_3, credit_upper_bound);
+ REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_4 :
+ NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_4, credit_upper_bound);
+ REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_5 :
+ NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_5, credit_upper_bound);
+
+ if (!port) {
+ REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_6,
+ credit_upper_bound);
+ REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_7,
+ credit_upper_bound);
+ REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_8,
+ credit_upper_bound);
+ }
+}
+/******************************************************************************
+* Description:
+* Will return the NIG ETS registers to init values.Except
+* credit_upper_bound.
+* That isn't used in this configuration (No WFQ is enabled) and will be
+* configured according to spec
+*.
+******************************************************************************/
+static void bnx2x_ets_e3b0_nig_disabled(const struct link_params *params,
+ const struct link_vars *vars)
+{
+ struct bnx2x *bp = params->bp;
+ const u8 port = params->port;
+ const u32 min_w_val = bnx2x_ets_get_min_w_val_nig(vars);
+ /* Mapping between entry priority to client number (0,1,2 -debug and
+ * management clients, 3 - COS0 client, 4 - COS1, ... 8 -
+ * COS5)(HIGHEST) 4bits client num.TODO_ETS - Should be done by
+ * reset value or init tool
+ */
+ if (port) {
+ REG_WR(bp, NIG_REG_P1_TX_ARB_PRIORITY_CLIENT2_LSB, 0x543210);
+ REG_WR(bp, NIG_REG_P1_TX_ARB_PRIORITY_CLIENT2_MSB, 0x0);
+ } else {
+ REG_WR(bp, NIG_REG_P0_TX_ARB_PRIORITY_CLIENT2_LSB, 0x76543210);
+ REG_WR(bp, NIG_REG_P0_TX_ARB_PRIORITY_CLIENT2_MSB, 0x8);
+ }
+ /* For strict priority entries defines the number of consecutive
+ * slots for the highest priority.
+ */
+ REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_NUM_STRICT_ARB_SLOTS :
+ NIG_REG_P0_TX_ARB_NUM_STRICT_ARB_SLOTS, 0x100);
+ /* Mapping between the CREDIT_WEIGHT registers and actual client
+ * numbers
+ */
+ if (port) {
+ /*Port 1 has 6 COS*/
+ REG_WR(bp, NIG_REG_P1_TX_ARB_CLIENT_CREDIT_MAP2_LSB, 0x210543);
+ REG_WR(bp, NIG_REG_P1_TX_ARB_CLIENT_CREDIT_MAP2_MSB, 0x0);
+ } else {
+ /*Port 0 has 9 COS*/
+ REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_CREDIT_MAP2_LSB,
+ 0x43210876);
+ REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_CREDIT_MAP2_MSB, 0x5);
+ }
+
+ /* Bitmap of 5bits length. Each bit specifies whether the entry behaves
+ * as strict. Bits 0,1,2 - debug and management entries, 3 -
+ * COS0 entry, 4 - COS1 entry.
+ * COS1 | COS0 | DEBUG1 | DEBUG0 | MGMT
+ * bit4 bit3 bit2 bit1 bit0
+ * MCP and debug are strict
+ */
+ if (port)
+ REG_WR(bp, NIG_REG_P1_TX_ARB_CLIENT_IS_STRICT, 0x3f);
+ else
+ REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_STRICT, 0x1ff);
+ /* defines which entries (clients) are subjected to WFQ arbitration */
+ REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CLIENT_IS_SUBJECT2WFQ :
+ NIG_REG_P0_TX_ARB_CLIENT_IS_SUBJECT2WFQ, 0);
+
+ /* Please notice the register address are note continuous and a
+ * for here is note appropriate.In 2 port mode port0 only COS0-5
+ * can be used. DEBUG1,DEBUG1,MGMT are never used for WFQ* In 4
+ * port mode port1 only COS0-2 can be used. DEBUG1,DEBUG1,MGMT
+ * are never used for WFQ
+ */
+ REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_0 :
+ NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_0, 0x0);
+ REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_1 :
+ NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_1, 0x0);
+ REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_2 :
+ NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_2, 0x0);
+ REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_3 :
+ NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_3, 0x0);
+ REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_4 :
+ NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_4, 0x0);
+ REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_5 :
+ NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_5, 0x0);
+ if (!port) {
+ REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_6, 0x0);
+ REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_7, 0x0);
+ REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_8, 0x0);
+ }
+
+ bnx2x_ets_e3b0_set_credit_upper_bound_nig(params, min_w_val);
+}
+/******************************************************************************
+* Description:
+* Set credit upper bound for PBF.
+*.
+******************************************************************************/
+static void bnx2x_ets_e3b0_set_credit_upper_bound_pbf(
+ const struct link_params *params,
+ const u32 min_w_val)
+{
+ struct bnx2x *bp = params->bp;
+ const u32 credit_upper_bound =
+ bnx2x_ets_get_credit_upper_bound(min_w_val);
+ const u8 port = params->port;
+ u32 base_upper_bound = 0;
+ u8 max_cos = 0;
+ u8 i = 0;
+ /* In 2 port mode port0 has COS0-5 that can be used for WFQ.In 4
+ * port mode port1 has COS0-2 that can be used for WFQ.
+ */
+ if (!port) {
+ base_upper_bound = PBF_REG_COS0_UPPER_BOUND_P0;
+ max_cos = DCBX_E3B0_MAX_NUM_COS_PORT0;
+ } else {
+ base_upper_bound = PBF_REG_COS0_UPPER_BOUND_P1;
+ max_cos = DCBX_E3B0_MAX_NUM_COS_PORT1;
+ }
+
+ for (i = 0; i < max_cos; i++)
+ REG_WR(bp, base_upper_bound + (i << 2), credit_upper_bound);
+}
+
+/******************************************************************************
+* Description:
+* Will return the PBF ETS registers to init values.Except
+* credit_upper_bound.
+* That isn't used in this configuration (No WFQ is enabled) and will be
+* configured according to spec
+*.
+******************************************************************************/
+static void bnx2x_ets_e3b0_pbf_disabled(const struct link_params *params)
+{
+ struct bnx2x *bp = params->bp;
+ const u8 port = params->port;
+ const u32 min_w_val_pbf = ETS_E3B0_PBF_MIN_W_VAL;
+ u8 i = 0;
+ u32 base_weight = 0;
+ u8 max_cos = 0;
+
+ /* Mapping between entry priority to client number 0 - COS0
+ * client, 2 - COS1, ... 5 - COS5)(HIGHEST) 4bits client num.
+ * TODO_ETS - Should be done by reset value or init tool
+ */
+ if (port)
+ /* 0x688 (|011|0 10|00 1|000) */
+ REG_WR(bp, PBF_REG_ETS_ARB_PRIORITY_CLIENT_P1 , 0x688);
+ else
+ /* (10 1|100 |011|0 10|00 1|000) */
+ REG_WR(bp, PBF_REG_ETS_ARB_PRIORITY_CLIENT_P0 , 0x2C688);
+
+ /* TODO_ETS - Should be done by reset value or init tool */
+ if (port)
+ /* 0x688 (|011|0 10|00 1|000)*/
+ REG_WR(bp, PBF_REG_ETS_ARB_CLIENT_CREDIT_MAP_P1, 0x688);
+ else
+ /* 0x2C688 (10 1|100 |011|0 10|00 1|000) */
+ REG_WR(bp, PBF_REG_ETS_ARB_CLIENT_CREDIT_MAP_P0, 0x2C688);
+
+ REG_WR(bp, (port) ? PBF_REG_ETS_ARB_NUM_STRICT_ARB_SLOTS_P1 :
+ PBF_REG_ETS_ARB_NUM_STRICT_ARB_SLOTS_P0 , 0x100);
+
+
+ REG_WR(bp, (port) ? PBF_REG_ETS_ARB_CLIENT_IS_STRICT_P1 :
+ PBF_REG_ETS_ARB_CLIENT_IS_STRICT_P0 , 0);
+
+ REG_WR(bp, (port) ? PBF_REG_ETS_ARB_CLIENT_IS_SUBJECT2WFQ_P1 :
+ PBF_REG_ETS_ARB_CLIENT_IS_SUBJECT2WFQ_P0 , 0);
+ /* In 2 port mode port0 has COS0-5 that can be used for WFQ.
+ * In 4 port mode port1 has COS0-2 that can be used for WFQ.
+ */
+ if (!port) {
+ base_weight = PBF_REG_COS0_WEIGHT_P0;
+ max_cos = DCBX_E3B0_MAX_NUM_COS_PORT0;
+ } else {
+ base_weight = PBF_REG_COS0_WEIGHT_P1;
+ max_cos = DCBX_E3B0_MAX_NUM_COS_PORT1;
+ }
+
+ for (i = 0; i < max_cos; i++)
+ REG_WR(bp, base_weight + (0x4 * i), 0);
+
+ bnx2x_ets_e3b0_set_credit_upper_bound_pbf(params, min_w_val_pbf);
+}
+/******************************************************************************
+* Description:
+* E3B0 disable will return basically the values to init values.
+*.
+******************************************************************************/
+static int bnx2x_ets_e3b0_disabled(const struct link_params *params,
+ const struct link_vars *vars)
+{
+ struct bnx2x *bp = params->bp;
+
+ if (!CHIP_IS_E3B0(bp)) {
+ DP(NETIF_MSG_LINK,
+ "bnx2x_ets_e3b0_disabled the chip isn't E3B0\n");
+ return -EINVAL;
+ }
+
+ bnx2x_ets_e3b0_nig_disabled(params, vars);
+
+ bnx2x_ets_e3b0_pbf_disabled(params);
+
+ return 0;
+}
+
+/******************************************************************************
+* Description:
+* Disable will return basically the values to init values.
+*
+******************************************************************************/
+int bnx2x_ets_disabled(struct link_params *params,
+ struct link_vars *vars)
+{
+ struct bnx2x *bp = params->bp;
+ int bnx2x_status = 0;
+
+ if ((CHIP_IS_E2(bp)) || (CHIP_IS_E3A0(bp)))
+ bnx2x_ets_e2e3a0_disabled(params);
+ else if (CHIP_IS_E3B0(bp))
+ bnx2x_status = bnx2x_ets_e3b0_disabled(params, vars);
+ else {
+ DP(NETIF_MSG_LINK, "bnx2x_ets_disabled - chip not supported\n");
+ return -EINVAL;
+ }
+
+ return bnx2x_status;
+}
+
+/******************************************************************************
+* Description
+* Set the COS mappimg to SP and BW until this point all the COS are not
+* set as SP or BW.
+******************************************************************************/
+static int bnx2x_ets_e3b0_cli_map(const struct link_params *params,
+ const struct bnx2x_ets_params *ets_params,
+ const u8 cos_sp_bitmap,
+ const u8 cos_bw_bitmap)
+{
+ struct bnx2x *bp = params->bp;
+ const u8 port = params->port;
+ const u8 nig_cli_sp_bitmap = 0x7 | (cos_sp_bitmap << 3);
+ const u8 pbf_cli_sp_bitmap = cos_sp_bitmap;
+ const u8 nig_cli_subject2wfq_bitmap = cos_bw_bitmap << 3;
+ const u8 pbf_cli_subject2wfq_bitmap = cos_bw_bitmap;
+
+ REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CLIENT_IS_STRICT :
+ NIG_REG_P0_TX_ARB_CLIENT_IS_STRICT, nig_cli_sp_bitmap);
+
+ REG_WR(bp, (port) ? PBF_REG_ETS_ARB_CLIENT_IS_STRICT_P1 :
+ PBF_REG_ETS_ARB_CLIENT_IS_STRICT_P0 , pbf_cli_sp_bitmap);
+
+ REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CLIENT_IS_SUBJECT2WFQ :
+ NIG_REG_P0_TX_ARB_CLIENT_IS_SUBJECT2WFQ,
+ nig_cli_subject2wfq_bitmap);
+
+ REG_WR(bp, (port) ? PBF_REG_ETS_ARB_CLIENT_IS_SUBJECT2WFQ_P1 :
+ PBF_REG_ETS_ARB_CLIENT_IS_SUBJECT2WFQ_P0,
+ pbf_cli_subject2wfq_bitmap);
+
+ return 0;
+}
+
+/******************************************************************************
+* Description:
+* This function is needed because NIG ARB_CREDIT_WEIGHT_X are
+* not continues and ARB_CREDIT_WEIGHT_0 + offset is suitable.
+******************************************************************************/
+static int bnx2x_ets_e3b0_set_cos_bw(struct bnx2x *bp,
+ const u8 cos_entry,
+ const u32 min_w_val_nig,
+ const u32 min_w_val_pbf,
+ const u16 total_bw,
+ const u8 bw,
+ const u8 port)
+{
+ u32 nig_reg_adress_crd_weight = 0;
+ u32 pbf_reg_adress_crd_weight = 0;
+ /* Calculate and set BW for this COS - use 1 instead of 0 for BW */
+ const u32 cos_bw_nig = ((bw ? bw : 1) * min_w_val_nig) / total_bw;
+ const u32 cos_bw_pbf = ((bw ? bw : 1) * min_w_val_pbf) / total_bw;
+
+ switch (cos_entry) {
+ case 0:
+ nig_reg_adress_crd_weight =
+ (port) ? NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_0 :
+ NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_0;
+ pbf_reg_adress_crd_weight = (port) ?
+ PBF_REG_COS0_WEIGHT_P1 : PBF_REG_COS0_WEIGHT_P0;
+ break;
+ case 1:
+ nig_reg_adress_crd_weight = (port) ?
+ NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_1 :
+ NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_1;
+ pbf_reg_adress_crd_weight = (port) ?
+ PBF_REG_COS1_WEIGHT_P1 : PBF_REG_COS1_WEIGHT_P0;
+ break;
+ case 2:
+ nig_reg_adress_crd_weight = (port) ?
+ NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_2 :
+ NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_2;
+
+ pbf_reg_adress_crd_weight = (port) ?
+ PBF_REG_COS2_WEIGHT_P1 : PBF_REG_COS2_WEIGHT_P0;
+ break;
+ case 3:
+ if (port)
+ return -EINVAL;
+ nig_reg_adress_crd_weight = NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_3;
+ pbf_reg_adress_crd_weight = PBF_REG_COS3_WEIGHT_P0;
+ break;
+ case 4:
+ if (port)
+ return -EINVAL;
+ nig_reg_adress_crd_weight = NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_4;
+ pbf_reg_adress_crd_weight = PBF_REG_COS4_WEIGHT_P0;
+ break;
+ case 5:
+ if (port)
+ return -EINVAL;
+ nig_reg_adress_crd_weight = NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_5;
+ pbf_reg_adress_crd_weight = PBF_REG_COS5_WEIGHT_P0;
+ break;
+ }
+
+ REG_WR(bp, nig_reg_adress_crd_weight, cos_bw_nig);
+
+ REG_WR(bp, pbf_reg_adress_crd_weight, cos_bw_pbf);
+
+ return 0;
+}
+/******************************************************************************
+* Description:
+* Calculate the total BW.A value of 0 isn't legal.
+*
+******************************************************************************/
+static int bnx2x_ets_e3b0_get_total_bw(
+ const struct link_params *params,
+ struct bnx2x_ets_params *ets_params,
+ u16 *total_bw)
+{
+ struct bnx2x *bp = params->bp;
+ u8 cos_idx = 0;
+ u8 is_bw_cos_exist = 0;
+
+ *total_bw = 0 ;
+ /* Calculate total BW requested */
+ for (cos_idx = 0; cos_idx < ets_params->num_of_cos; cos_idx++) {
+ if (ets_params->cos[cos_idx].state == bnx2x_cos_state_bw) {
+ is_bw_cos_exist = 1;
+ if (!ets_params->cos[cos_idx].params.bw_params.bw) {
+ DP(NETIF_MSG_LINK, "bnx2x_ets_E3B0_config BW"
+ "was set to 0\n");
+ /* This is to prevent a state when ramrods
+ * can't be sent
+ */
+ ets_params->cos[cos_idx].params.bw_params.bw
+ = 1;
+ }
+ *total_bw +=
+ ets_params->cos[cos_idx].params.bw_params.bw;
+ }
+ }
+
+ /* Check total BW is valid */
+ if ((is_bw_cos_exist == 1) && (*total_bw != 100)) {
+ if (*total_bw == 0) {
+ DP(NETIF_MSG_LINK,
+ "bnx2x_ets_E3B0_config total BW shouldn't be 0\n");
+ return -EINVAL;
+ }
+ DP(NETIF_MSG_LINK,
+ "bnx2x_ets_E3B0_config total BW should be 100\n");
+ /* We can handle a case whre the BW isn't 100 this can happen
+ * if the TC are joined.
+ */
+ }
+ return 0;
+}
+
+/******************************************************************************
+* Description:
+* Invalidate all the sp_pri_to_cos.
+*
+******************************************************************************/
+static void bnx2x_ets_e3b0_sp_pri_to_cos_init(u8 *sp_pri_to_cos)
+{
+ u8 pri = 0;
+ for (pri = 0; pri < DCBX_MAX_NUM_COS; pri++)
+ sp_pri_to_cos[pri] = DCBX_INVALID_COS;
+}
+/******************************************************************************
+* Description:
+* Calculate and set the SP (ARB_PRIORITY_CLIENT) NIG and PBF registers
+* according to sp_pri_to_cos.
+*
+******************************************************************************/
+static int bnx2x_ets_e3b0_sp_pri_to_cos_set(const struct link_params *params,
+ u8 *sp_pri_to_cos, const u8 pri,
+ const u8 cos_entry)
+{
+ struct bnx2x *bp = params->bp;
+ const u8 port = params->port;
+ const u8 max_num_of_cos = (port) ? DCBX_E3B0_MAX_NUM_COS_PORT1 :
+ DCBX_E3B0_MAX_NUM_COS_PORT0;
+
+ if (pri >= max_num_of_cos) {
+ DP(NETIF_MSG_LINK, "bnx2x_ets_e3b0_sp_pri_to_cos_set invalid "
+ "parameter Illegal strict priority\n");
+ return -EINVAL;
+ }
+
+ if (sp_pri_to_cos[pri] != DCBX_INVALID_COS) {
+ DP(NETIF_MSG_LINK, "bnx2x_ets_e3b0_sp_pri_to_cos_set invalid "
+ "parameter There can't be two COS's with "
+ "the same strict pri\n");
+ return -EINVAL;
+ }
+
+ sp_pri_to_cos[pri] = cos_entry;
+ return 0;
+
+}
+
+/******************************************************************************
+* Description:
+* Returns the correct value according to COS and priority in
+* the sp_pri_cli register.
+*
+******************************************************************************/
+static u64 bnx2x_e3b0_sp_get_pri_cli_reg(const u8 cos, const u8 cos_offset,
+ const u8 pri_set,
+ const u8 pri_offset,
+ const u8 entry_size)
+{
+ u64 pri_cli_nig = 0;
+ pri_cli_nig = ((u64)(cos + cos_offset)) << (entry_size *
+ (pri_set + pri_offset));
+
+ return pri_cli_nig;
+}
+/******************************************************************************
+* Description:
+* Returns the correct value according to COS and priority in the
+* sp_pri_cli register for NIG.
+*
+******************************************************************************/
+static u64 bnx2x_e3b0_sp_get_pri_cli_reg_nig(const u8 cos, const u8 pri_set)
+{
+ /* MCP Dbg0 and dbg1 are always with higher strict pri*/
+ const u8 nig_cos_offset = 3;
+ const u8 nig_pri_offset = 3;
+
+ return bnx2x_e3b0_sp_get_pri_cli_reg(cos, nig_cos_offset, pri_set,
+ nig_pri_offset, 4);
+
+}
+/******************************************************************************
+* Description:
+* Returns the correct value according to COS and priority in the
+* sp_pri_cli register for PBF.
+*
+******************************************************************************/
+static u64 bnx2x_e3b0_sp_get_pri_cli_reg_pbf(const u8 cos, const u8 pri_set)
+{
+ const u8 pbf_cos_offset = 0;
+ const u8 pbf_pri_offset = 0;
+
+ return bnx2x_e3b0_sp_get_pri_cli_reg(cos, pbf_cos_offset, pri_set,
+ pbf_pri_offset, 3);
+
+}
+
+/******************************************************************************
+* Description:
+* Calculate and set the SP (ARB_PRIORITY_CLIENT) NIG and PBF registers
+* according to sp_pri_to_cos.(which COS has higher priority)
+*
+******************************************************************************/
+static int bnx2x_ets_e3b0_sp_set_pri_cli_reg(const struct link_params *params,
+ u8 *sp_pri_to_cos)
+{
+ struct bnx2x *bp = params->bp;
+ u8 i = 0;
+ const u8 port = params->port;
+ /* MCP Dbg0 and dbg1 are always with higher strict pri*/
+ u64 pri_cli_nig = 0x210;
+ u32 pri_cli_pbf = 0x0;
+ u8 pri_set = 0;
+ u8 pri_bitmask = 0;
+ const u8 max_num_of_cos = (port) ? DCBX_E3B0_MAX_NUM_COS_PORT1 :
+ DCBX_E3B0_MAX_NUM_COS_PORT0;
+
+ u8 cos_bit_to_set = (1 << max_num_of_cos) - 1;
+
+ /* Set all the strict priority first */
+ for (i = 0; i < max_num_of_cos; i++) {
+ if (sp_pri_to_cos[i] != DCBX_INVALID_COS) {
+ if (sp_pri_to_cos[i] >= DCBX_MAX_NUM_COS) {
+ DP(NETIF_MSG_LINK,
+ "bnx2x_ets_e3b0_sp_set_pri_cli_reg "
+ "invalid cos entry\n");
+ return -EINVAL;
+ }
+
+ pri_cli_nig |= bnx2x_e3b0_sp_get_pri_cli_reg_nig(
+ sp_pri_to_cos[i], pri_set);
+
+ pri_cli_pbf |= bnx2x_e3b0_sp_get_pri_cli_reg_pbf(
+ sp_pri_to_cos[i], pri_set);
+ pri_bitmask = 1 << sp_pri_to_cos[i];
+ /* COS is used remove it from bitmap.*/
+ if (!(pri_bitmask & cos_bit_to_set)) {
+ DP(NETIF_MSG_LINK,
+ "bnx2x_ets_e3b0_sp_set_pri_cli_reg "
+ "invalid There can't be two COS's with"
+ " the same strict pri\n");
+ return -EINVAL;
+ }
+ cos_bit_to_set &= ~pri_bitmask;
+ pri_set++;
+ }
+ }
+
+ /* Set all the Non strict priority i= COS*/
+ for (i = 0; i < max_num_of_cos; i++) {
+ pri_bitmask = 1 << i;
+ /* Check if COS was already used for SP */
+ if (pri_bitmask & cos_bit_to_set) {
+ /* COS wasn't used for SP */
+ pri_cli_nig |= bnx2x_e3b0_sp_get_pri_cli_reg_nig(
+ i, pri_set);
+
+ pri_cli_pbf |= bnx2x_e3b0_sp_get_pri_cli_reg_pbf(
+ i, pri_set);
+ /* COS is used remove it from bitmap.*/
+ cos_bit_to_set &= ~pri_bitmask;
+ pri_set++;
+ }
+ }
+
+ if (pri_set != max_num_of_cos) {
+ DP(NETIF_MSG_LINK, "bnx2x_ets_e3b0_sp_set_pri_cli_reg not all "
+ "entries were set\n");
+ return -EINVAL;
+ }
+
+ if (port) {
+ /* Only 6 usable clients*/
+ REG_WR(bp, NIG_REG_P1_TX_ARB_PRIORITY_CLIENT2_LSB,
+ (u32)pri_cli_nig);
+
+ REG_WR(bp, PBF_REG_ETS_ARB_PRIORITY_CLIENT_P1 , pri_cli_pbf);
+ } else {
+ /* Only 9 usable clients*/
+ const u32 pri_cli_nig_lsb = (u32) (pri_cli_nig);
+ const u32 pri_cli_nig_msb = (u32) ((pri_cli_nig >> 32) & 0xF);
+
+ REG_WR(bp, NIG_REG_P0_TX_ARB_PRIORITY_CLIENT2_LSB,
+ pri_cli_nig_lsb);
+ REG_WR(bp, NIG_REG_P0_TX_ARB_PRIORITY_CLIENT2_MSB,
+ pri_cli_nig_msb);
+
+ REG_WR(bp, PBF_REG_ETS_ARB_PRIORITY_CLIENT_P0 , pri_cli_pbf);
+ }
+ return 0;
+}
+
+/******************************************************************************
+* Description:
+* Configure the COS to ETS according to BW and SP settings.
+******************************************************************************/
+int bnx2x_ets_e3b0_config(const struct link_params *params,
+ const struct link_vars *vars,
+ struct bnx2x_ets_params *ets_params)
+{
+ struct bnx2x *bp = params->bp;
+ int bnx2x_status = 0;
+ const u8 port = params->port;
+ u16 total_bw = 0;
+ const u32 min_w_val_nig = bnx2x_ets_get_min_w_val_nig(vars);
+ const u32 min_w_val_pbf = ETS_E3B0_PBF_MIN_W_VAL;
+ u8 cos_bw_bitmap = 0;
+ u8 cos_sp_bitmap = 0;
+ u8 sp_pri_to_cos[DCBX_MAX_NUM_COS] = {0};
+ const u8 max_num_of_cos = (port) ? DCBX_E3B0_MAX_NUM_COS_PORT1 :
+ DCBX_E3B0_MAX_NUM_COS_PORT0;
+ u8 cos_entry = 0;
+
+ if (!CHIP_IS_E3B0(bp)) {
+ DP(NETIF_MSG_LINK,
+ "bnx2x_ets_e3b0_disabled the chip isn't E3B0\n");
+ return -EINVAL;
+ }
+
+ if ((ets_params->num_of_cos > max_num_of_cos)) {
+ DP(NETIF_MSG_LINK, "bnx2x_ets_E3B0_config the number of COS "
+ "isn't supported\n");
+ return -EINVAL;
+ }
+
+ /* Prepare sp strict priority parameters*/
+ bnx2x_ets_e3b0_sp_pri_to_cos_init(sp_pri_to_cos);
+
+ /* Prepare BW parameters*/
+ bnx2x_status = bnx2x_ets_e3b0_get_total_bw(params, ets_params,
+ &total_bw);
+ if (bnx2x_status) {
+ DP(NETIF_MSG_LINK,
+ "bnx2x_ets_E3B0_config get_total_bw failed\n");
+ return -EINVAL;
+ }
+
+ /* Upper bound is set according to current link speed (min_w_val
+ * should be the same for upper bound and COS credit val).
+ */
+ bnx2x_ets_e3b0_set_credit_upper_bound_nig(params, min_w_val_nig);
+ bnx2x_ets_e3b0_set_credit_upper_bound_pbf(params, min_w_val_pbf);
+
+
+ for (cos_entry = 0; cos_entry < ets_params->num_of_cos; cos_entry++) {
+ if (bnx2x_cos_state_bw == ets_params->cos[cos_entry].state) {
+ cos_bw_bitmap |= (1 << cos_entry);
+ /* The function also sets the BW in HW(not the mappin
+ * yet)
+ */
+ bnx2x_status = bnx2x_ets_e3b0_set_cos_bw(
+ bp, cos_entry, min_w_val_nig, min_w_val_pbf,
+ total_bw,
+ ets_params->cos[cos_entry].params.bw_params.bw,
+ port);
+ } else if (bnx2x_cos_state_strict ==
+ ets_params->cos[cos_entry].state){
+ cos_sp_bitmap |= (1 << cos_entry);
+
+ bnx2x_status = bnx2x_ets_e3b0_sp_pri_to_cos_set(
+ params,
+ sp_pri_to_cos,
+ ets_params->cos[cos_entry].params.sp_params.pri,
+ cos_entry);
+
+ } else {
+ DP(NETIF_MSG_LINK,
+ "bnx2x_ets_e3b0_config cos state not valid\n");
+ return -EINVAL;
+ }
+ if (bnx2x_status) {
+ DP(NETIF_MSG_LINK,
+ "bnx2x_ets_e3b0_config set cos bw failed\n");
+ return bnx2x_status;
+ }
+ }
+
+ /* Set SP register (which COS has higher priority) */
+ bnx2x_status = bnx2x_ets_e3b0_sp_set_pri_cli_reg(params,
+ sp_pri_to_cos);
+
+ if (bnx2x_status) {
+ DP(NETIF_MSG_LINK,
+ "bnx2x_ets_E3B0_config set_pri_cli_reg failed\n");
+ return bnx2x_status;
+ }
+
+ /* Set client mapping of BW and strict */
+ bnx2x_status = bnx2x_ets_e3b0_cli_map(params, ets_params,
+ cos_sp_bitmap,
+ cos_bw_bitmap);
+
+ if (bnx2x_status) {
+ DP(NETIF_MSG_LINK, "bnx2x_ets_E3B0_config SP failed\n");
+ return bnx2x_status;
+ }
+ return 0;
+}
+static void bnx2x_ets_bw_limit_common(const struct link_params *params)
+{
+ /* ETS disabled configuration */
+ struct bnx2x *bp = params->bp;
+ DP(NETIF_MSG_LINK, "ETS enabled BW limit configuration\n");
+ /* Defines which entries (clients) are subjected to WFQ arbitration
+ * COS0 0x8
+ * COS1 0x10
+ */
+ REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_SUBJECT2WFQ, 0x18);
+ /* Mapping between the ARB_CREDIT_WEIGHT registers and actual
+ * client numbers (WEIGHT_0 does not actually have to represent
+ * client 0)
+ * PRI4 | PRI3 | PRI2 | PRI1 | PRI0
+ * cos1-001 cos0-000 dbg1-100 dbg0-011 MCP-010
+ */
+ REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_CREDIT_MAP, 0x111A);
+
+ REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_0,
+ ETS_BW_LIMIT_CREDIT_UPPER_BOUND);
+ REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_1,
+ ETS_BW_LIMIT_CREDIT_UPPER_BOUND);
+
+ /* ETS mode enabled*/
+ REG_WR(bp, PBF_REG_ETS_ENABLED, 1);
+
+ /* Defines the number of consecutive slots for the strict priority */
+ REG_WR(bp, PBF_REG_NUM_STRICT_ARB_SLOTS, 0);
+ /* Bitmap of 5bits length. Each bit specifies whether the entry behaves
+ * as strict. Bits 0,1,2 - debug and management entries, 3 - COS0
+ * entry, 4 - COS1 entry.
+ * COS1 | COS0 | DEBUG21 | DEBUG0 | MGMT
+ * bit4 bit3 bit2 bit1 bit0
+ * MCP and debug are strict
+ */
+ REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_STRICT, 0x7);
+
+ /* Upper bound that COS0_WEIGHT can reach in the WFQ arbiter.*/
+ REG_WR(bp, PBF_REG_COS0_UPPER_BOUND,
+ ETS_BW_LIMIT_CREDIT_UPPER_BOUND);
+ REG_WR(bp, PBF_REG_COS1_UPPER_BOUND,
+ ETS_BW_LIMIT_CREDIT_UPPER_BOUND);
+}
+
+void bnx2x_ets_bw_limit(const struct link_params *params, const u32 cos0_bw,
+ const u32 cos1_bw)
+{
+ /* ETS disabled configuration*/
+ struct bnx2x *bp = params->bp;
+ const u32 total_bw = cos0_bw + cos1_bw;
+ u32 cos0_credit_weight = 0;
+ u32 cos1_credit_weight = 0;
+
+ DP(NETIF_MSG_LINK, "ETS enabled BW limit configuration\n");
+
+ if ((!total_bw) ||
+ (!cos0_bw) ||
+ (!cos1_bw)) {
+ DP(NETIF_MSG_LINK, "Total BW can't be zero\n");
+ return;
+ }
+
+ cos0_credit_weight = (cos0_bw * ETS_BW_LIMIT_CREDIT_WEIGHT)/
+ total_bw;
+ cos1_credit_weight = (cos1_bw * ETS_BW_LIMIT_CREDIT_WEIGHT)/
+ total_bw;
+
+ bnx2x_ets_bw_limit_common(params);
+
+ REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_0, cos0_credit_weight);
+ REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_1, cos1_credit_weight);
+
+ REG_WR(bp, PBF_REG_COS0_WEIGHT, cos0_credit_weight);
+ REG_WR(bp, PBF_REG_COS1_WEIGHT, cos1_credit_weight);
+}
+
+int bnx2x_ets_strict(const struct link_params *params, const u8 strict_cos)
+{
+ /* ETS disabled configuration*/
+ struct bnx2x *bp = params->bp;
+ u32 val = 0;
+
+ DP(NETIF_MSG_LINK, "ETS enabled strict configuration\n");
+ /* Bitmap of 5bits length. Each bit specifies whether the entry behaves
+ * as strict. Bits 0,1,2 - debug and management entries,
+ * 3 - COS0 entry, 4 - COS1 entry.
+ * COS1 | COS0 | DEBUG21 | DEBUG0 | MGMT
+ * bit4 bit3 bit2 bit1 bit0
+ * MCP and debug are strict
+ */
+ REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_STRICT, 0x1F);
+ /* For strict priority entries defines the number of consecutive slots
+ * for the highest priority.
+ */
+ REG_WR(bp, NIG_REG_P0_TX_ARB_NUM_STRICT_ARB_SLOTS, 0x100);
+ /* ETS mode disable */
+ REG_WR(bp, PBF_REG_ETS_ENABLED, 0);
+ /* Defines the number of consecutive slots for the strict priority */
+ REG_WR(bp, PBF_REG_NUM_STRICT_ARB_SLOTS, 0x100);
+
+ /* Defines the number of consecutive slots for the strict priority */
+ REG_WR(bp, PBF_REG_HIGH_PRIORITY_COS_NUM, strict_cos);
+
+ /* Mapping between entry priority to client number (0,1,2 -debug and
+ * management clients, 3 - COS0 client, 4 - COS client)(HIGHEST)
+ * 3bits client num.
+ * PRI4 | PRI3 | PRI2 | PRI1 | PRI0
+ * dbg0-010 dbg1-001 cos1-100 cos0-011 MCP-000
+ * dbg0-010 dbg1-001 cos0-011 cos1-100 MCP-000
+ */
+ val = (!strict_cos) ? 0x2318 : 0x22E0;
+ REG_WR(bp, NIG_REG_P0_TX_ARB_PRIORITY_CLIENT, val);
+
+ return 0;
+}
+
+/******************************************************************/
+/* PFC section */
+/******************************************************************/
+static void bnx2x_update_pfc_xmac(struct link_params *params,
+ struct link_vars *vars,
+ u8 is_lb)
+{
+ struct bnx2x *bp = params->bp;
+ u32 xmac_base;
+ u32 pause_val, pfc0_val, pfc1_val;
+
+ /* XMAC base adrr */
+ xmac_base = (params->port) ? GRCBASE_XMAC1 : GRCBASE_XMAC0;
+
+ /* Initialize pause and pfc registers */
+ pause_val = 0x18000;
+ pfc0_val = 0xFFFF8000;
+ pfc1_val = 0x2;
+
+ /* No PFC support */
+ if (!(params->feature_config_flags &
+ FEATURE_CONFIG_PFC_ENABLED)) {
+
+ /* RX flow control - Process pause frame in receive direction
+ */
+ if (vars->flow_ctrl & BNX2X_FLOW_CTRL_RX)
+ pause_val |= XMAC_PAUSE_CTRL_REG_RX_PAUSE_EN;
+
+ /* TX flow control - Send pause packet when buffer is full */
+ if (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX)
+ pause_val |= XMAC_PAUSE_CTRL_REG_TX_PAUSE_EN;
+ } else {/* PFC support */
+ pfc1_val |= XMAC_PFC_CTRL_HI_REG_PFC_REFRESH_EN |
+ XMAC_PFC_CTRL_HI_REG_PFC_STATS_EN |
+ XMAC_PFC_CTRL_HI_REG_RX_PFC_EN |
+ XMAC_PFC_CTRL_HI_REG_TX_PFC_EN |
+ XMAC_PFC_CTRL_HI_REG_FORCE_PFC_XON;
+ /* Write pause and PFC registers */
+ REG_WR(bp, xmac_base + XMAC_REG_PAUSE_CTRL, pause_val);
+ REG_WR(bp, xmac_base + XMAC_REG_PFC_CTRL, pfc0_val);
+ REG_WR(bp, xmac_base + XMAC_REG_PFC_CTRL_HI, pfc1_val);
+ pfc1_val &= ~XMAC_PFC_CTRL_HI_REG_FORCE_PFC_XON;
+
+ }
+
+ /* Write pause and PFC registers */
+ REG_WR(bp, xmac_base + XMAC_REG_PAUSE_CTRL, pause_val);
+ REG_WR(bp, xmac_base + XMAC_REG_PFC_CTRL, pfc0_val);
+ REG_WR(bp, xmac_base + XMAC_REG_PFC_CTRL_HI, pfc1_val);
+
+
+ /* Set MAC address for source TX Pause/PFC frames */
+ REG_WR(bp, xmac_base + XMAC_REG_CTRL_SA_LO,
+ ((params->mac_addr[2] << 24) |
+ (params->mac_addr[3] << 16) |
+ (params->mac_addr[4] << 8) |
+ (params->mac_addr[5])));
+ REG_WR(bp, xmac_base + XMAC_REG_CTRL_SA_HI,
+ ((params->mac_addr[0] << 8) |
+ (params->mac_addr[1])));
+
+ udelay(30);
+}
+
+/******************************************************************/
+/* MAC/PBF section */
+/******************************************************************/
+static void bnx2x_set_mdio_clk(struct bnx2x *bp, u32 chip_id,
+ u32 emac_base)
+{
+ u32 new_mode, cur_mode;
+ u32 clc_cnt;
+ /* Set clause 45 mode, slow down the MDIO clock to 2.5MHz
+ * (a value of 49==0x31) and make sure that the AUTO poll is off
+ */
+ cur_mode = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE);
+
+ if (USES_WARPCORE(bp))
+ clc_cnt = 74L << EMAC_MDIO_MODE_CLOCK_CNT_BITSHIFT;
+ else
+ clc_cnt = 49L << EMAC_MDIO_MODE_CLOCK_CNT_BITSHIFT;
+
+ if (((cur_mode & EMAC_MDIO_MODE_CLOCK_CNT) == clc_cnt) &&
+ (cur_mode & (EMAC_MDIO_MODE_CLAUSE_45)))
+ return;
+
+ new_mode = cur_mode &
+ ~(EMAC_MDIO_MODE_AUTO_POLL | EMAC_MDIO_MODE_CLOCK_CNT);
+ new_mode |= clc_cnt;
+ new_mode |= (EMAC_MDIO_MODE_CLAUSE_45);
+
+ DP(NETIF_MSG_LINK, "Changing emac_mode from 0x%x to 0x%x\n",
+ cur_mode, new_mode);
+ REG_WR(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE, new_mode);
+ udelay(40);
+}
+
+static void bnx2x_set_mdio_emac_per_phy(struct bnx2x *bp,
+ struct link_params *params)
+{
+ u8 phy_index;
+ /* Set mdio clock per phy */
+ for (phy_index = INT_PHY; phy_index < params->num_phys;
+ phy_index++)
+ bnx2x_set_mdio_clk(bp, params->chip_id,
+ params->phy[phy_index].mdio_ctrl);
+}
+
+static u8 bnx2x_is_4_port_mode(struct bnx2x *bp)
+{
+ u32 port4mode_ovwr_val;
+ /* Check 4-port override enabled */
+ port4mode_ovwr_val = REG_RD(bp, MISC_REG_PORT4MODE_EN_OVWR);
+ if (port4mode_ovwr_val & (1<<0)) {
+ /* Return 4-port mode override value */
+ return ((port4mode_ovwr_val & (1<<1)) == (1<<1));
+ }
+ /* Return 4-port mode from input pin */
+ return (u8)REG_RD(bp, MISC_REG_PORT4MODE_EN);
+}
+
+static void bnx2x_emac_init(struct link_params *params,
+ struct link_vars *vars)
+{
+ /* reset and unreset the emac core */
+ struct bnx2x *bp = params->bp;
+ u8 port = params->port;
+ u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
+ u32 val;
+ u16 timeout;
+
+ REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
+ (MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE << port));
+ udelay(5);
+ REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
+ (MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE << port));
+
+ /* init emac - use read-modify-write */
+ /* self clear reset */
+ val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MODE);
+ EMAC_WR(bp, EMAC_REG_EMAC_MODE, (val | EMAC_MODE_RESET));
+
+ timeout = 200;
+ do {
+ val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MODE);
+ DP(NETIF_MSG_LINK, "EMAC reset reg is %u\n", val);
+ if (!timeout) {
+ DP(NETIF_MSG_LINK, "EMAC timeout!\n");
+ return;
+ }
+ timeout--;
+ } while (val & EMAC_MODE_RESET);
+
+ bnx2x_set_mdio_emac_per_phy(bp, params);
+ /* Set mac address */
+ val = ((params->mac_addr[0] << 8) |
+ params->mac_addr[1]);
+ EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH, val);
+
+ val = ((params->mac_addr[2] << 24) |
+ (params->mac_addr[3] << 16) |
+ (params->mac_addr[4] << 8) |
+ params->mac_addr[5]);
+ EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + 4, val);
+}
+
+static void bnx2x_set_xumac_nig(struct link_params *params,
+ u16 tx_pause_en,
+ u8 enable)
+{
+ struct bnx2x *bp = params->bp;
+
+ REG_WR(bp, params->port ? NIG_REG_P1_MAC_IN_EN : NIG_REG_P0_MAC_IN_EN,
+ enable);
+ REG_WR(bp, params->port ? NIG_REG_P1_MAC_OUT_EN : NIG_REG_P0_MAC_OUT_EN,
+ enable);
+ REG_WR(bp, params->port ? NIG_REG_P1_MAC_PAUSE_OUT_EN :
+ NIG_REG_P0_MAC_PAUSE_OUT_EN, tx_pause_en);
+}
+
+static void bnx2x_set_umac_rxtx(struct link_params *params, u8 en)
+{
+ u32 umac_base = params->port ? GRCBASE_UMAC1 : GRCBASE_UMAC0;
+ u32 val;
+ struct bnx2x *bp = params->bp;
+ if (!(REG_RD(bp, MISC_REG_RESET_REG_2) &
+ (MISC_REGISTERS_RESET_REG_2_UMAC0 << params->port)))
+ return;
+ val = REG_RD(bp, umac_base + UMAC_REG_COMMAND_CONFIG);
+ if (en)
+ val |= (UMAC_COMMAND_CONFIG_REG_TX_ENA |
+ UMAC_COMMAND_CONFIG_REG_RX_ENA);
+ else
+ val &= ~(UMAC_COMMAND_CONFIG_REG_TX_ENA |
+ UMAC_COMMAND_CONFIG_REG_RX_ENA);
+ /* Disable RX and TX */
+ REG_WR(bp, umac_base + UMAC_REG_COMMAND_CONFIG, val);
+}
+
+static void bnx2x_umac_enable(struct link_params *params,
+ struct link_vars *vars, u8 lb)
+{
+ u32 val;
+ u32 umac_base = params->port ? GRCBASE_UMAC1 : GRCBASE_UMAC0;
+ struct bnx2x *bp = params->bp;
+ /* Reset UMAC */
+ REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
+ (MISC_REGISTERS_RESET_REG_2_UMAC0 << params->port));
+ usleep_range(1000, 2000);
+
+ REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
+ (MISC_REGISTERS_RESET_REG_2_UMAC0 << params->port));
+
+ DP(NETIF_MSG_LINK, "enabling UMAC\n");
+
+ /* This register opens the gate for the UMAC despite its name */
+ REG_WR(bp, NIG_REG_EGRESS_EMAC0_PORT + params->port*4, 1);
+
+ val = UMAC_COMMAND_CONFIG_REG_PROMIS_EN |
+ UMAC_COMMAND_CONFIG_REG_PAD_EN |
+ UMAC_COMMAND_CONFIG_REG_SW_RESET |
+ UMAC_COMMAND_CONFIG_REG_NO_LGTH_CHECK;
+ switch (vars->line_speed) {
+ case SPEED_10:
+ val |= (0<<2);
+ break;
+ case SPEED_100:
+ val |= (1<<2);
+ break;
+ case SPEED_1000:
+ val |= (2<<2);
+ break;
+ case SPEED_2500:
+ val |= (3<<2);
+ break;
+ default:
+ DP(NETIF_MSG_LINK, "Invalid speed for UMAC %d\n",
+ vars->line_speed);
+ break;
+ }
+ if (!(vars->flow_ctrl & BNX2X_FLOW_CTRL_TX))
+ val |= UMAC_COMMAND_CONFIG_REG_IGNORE_TX_PAUSE;
+
+ if (!(vars->flow_ctrl & BNX2X_FLOW_CTRL_RX))
+ val |= UMAC_COMMAND_CONFIG_REG_PAUSE_IGNORE;
+
+ if (vars->duplex == DUPLEX_HALF)
+ val |= UMAC_COMMAND_CONFIG_REG_HD_ENA;
+
+ REG_WR(bp, umac_base + UMAC_REG_COMMAND_CONFIG, val);
+ udelay(50);
+
+ /* Configure UMAC for EEE */
+ if (vars->eee_status & SHMEM_EEE_ADV_STATUS_MASK) {
+ DP(NETIF_MSG_LINK, "configured UMAC for EEE\n");
+ REG_WR(bp, umac_base + UMAC_REG_UMAC_EEE_CTRL,
+ UMAC_UMAC_EEE_CTRL_REG_EEE_EN);
+ REG_WR(bp, umac_base + UMAC_REG_EEE_WAKE_TIMER, 0x11);
+ } else {
+ REG_WR(bp, umac_base + UMAC_REG_UMAC_EEE_CTRL, 0x0);
+ }
+
+ /* Set MAC address for source TX Pause/PFC frames (under SW reset) */
+ REG_WR(bp, umac_base + UMAC_REG_MAC_ADDR0,
+ ((params->mac_addr[2] << 24) |
+ (params->mac_addr[3] << 16) |
+ (params->mac_addr[4] << 8) |
+ (params->mac_addr[5])));
+ REG_WR(bp, umac_base + UMAC_REG_MAC_ADDR1,
+ ((params->mac_addr[0] << 8) |
+ (params->mac_addr[1])));
+
+ /* Enable RX and TX */
+ val &= ~UMAC_COMMAND_CONFIG_REG_PAD_EN;
+ val |= UMAC_COMMAND_CONFIG_REG_TX_ENA |
+ UMAC_COMMAND_CONFIG_REG_RX_ENA;
+ REG_WR(bp, umac_base + UMAC_REG_COMMAND_CONFIG, val);
+ udelay(50);
+
+ /* Remove SW Reset */
+ val &= ~UMAC_COMMAND_CONFIG_REG_SW_RESET;
+
+ /* Check loopback mode */
+ if (lb)
+ val |= UMAC_COMMAND_CONFIG_REG_LOOP_ENA;
+ REG_WR(bp, umac_base + UMAC_REG_COMMAND_CONFIG, val);
+
+ /* Maximum Frame Length (RW). Defines a 14-Bit maximum frame
+ * length used by the MAC receive logic to check frames.
+ */
+ REG_WR(bp, umac_base + UMAC_REG_MAXFR, 0x2710);
+ bnx2x_set_xumac_nig(params,
+ ((vars->flow_ctrl & BNX2X_FLOW_CTRL_TX) != 0), 1);
+ vars->mac_type = MAC_TYPE_UMAC;
+
+}
+
+/* Define the XMAC mode */
+static void bnx2x_xmac_init(struct link_params *params, u32 max_speed)
+{
+ struct bnx2x *bp = params->bp;
+ u32 is_port4mode = bnx2x_is_4_port_mode(bp);
+
+ /* In 4-port mode, need to set the mode only once, so if XMAC is
+ * already out of reset, it means the mode has already been set,
+ * and it must not* reset the XMAC again, since it controls both
+ * ports of the path
+ */
+
+ if (((CHIP_NUM(bp) == CHIP_NUM_57840_4_10) ||
+ (CHIP_NUM(bp) == CHIP_NUM_57840_2_20) ||
+ (CHIP_NUM(bp) == CHIP_NUM_57840_OBSOLETE)) &&
+ is_port4mode &&
+ (REG_RD(bp, MISC_REG_RESET_REG_2) &
+ MISC_REGISTERS_RESET_REG_2_XMAC)) {
+ DP(NETIF_MSG_LINK,
+ "XMAC already out of reset in 4-port mode\n");
+ return;
+ }
+
+ /* Hard reset */
+ REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
+ MISC_REGISTERS_RESET_REG_2_XMAC);
+ usleep_range(1000, 2000);
+
+ REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
+ MISC_REGISTERS_RESET_REG_2_XMAC);
+ if (is_port4mode) {
+ DP(NETIF_MSG_LINK, "Init XMAC to 2 ports x 10G per path\n");
+
+ /* Set the number of ports on the system side to up to 2 */
+ REG_WR(bp, MISC_REG_XMAC_CORE_PORT_MODE, 1);
+
+ /* Set the number of ports on the Warp Core to 10G */
+ REG_WR(bp, MISC_REG_XMAC_PHY_PORT_MODE, 3);
+ } else {
+ /* Set the number of ports on the system side to 1 */
+ REG_WR(bp, MISC_REG_XMAC_CORE_PORT_MODE, 0);
+ if (max_speed == SPEED_10000) {
+ DP(NETIF_MSG_LINK,
+ "Init XMAC to 10G x 1 port per path\n");
+ /* Set the number of ports on the Warp Core to 10G */
+ REG_WR(bp, MISC_REG_XMAC_PHY_PORT_MODE, 3);
+ } else {
+ DP(NETIF_MSG_LINK,
+ "Init XMAC to 20G x 2 ports per path\n");
+ /* Set the number of ports on the Warp Core to 20G */
+ REG_WR(bp, MISC_REG_XMAC_PHY_PORT_MODE, 1);
+ }
+ }
+ /* Soft reset */
+ REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
+ MISC_REGISTERS_RESET_REG_2_XMAC_SOFT);
+ usleep_range(1000, 2000);
+
+ REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
+ MISC_REGISTERS_RESET_REG_2_XMAC_SOFT);
+
+}
+
+static void bnx2x_set_xmac_rxtx(struct link_params *params, u8 en)
+{
+ u8 port = params->port;
+ struct bnx2x *bp = params->bp;
+ u32 pfc_ctrl, xmac_base = (port) ? GRCBASE_XMAC1 : GRCBASE_XMAC0;
+ u32 val;
+
+ if (REG_RD(bp, MISC_REG_RESET_REG_2) &
+ MISC_REGISTERS_RESET_REG_2_XMAC) {
+ /* Send an indication to change the state in the NIG back to XON
+ * Clearing this bit enables the next set of this bit to get
+ * rising edge
+ */
+ pfc_ctrl = REG_RD(bp, xmac_base + XMAC_REG_PFC_CTRL_HI);
+ REG_WR(bp, xmac_base + XMAC_REG_PFC_CTRL_HI,
+ (pfc_ctrl & ~(1<<1)));
+ REG_WR(bp, xmac_base + XMAC_REG_PFC_CTRL_HI,
+ (pfc_ctrl | (1<<1)));
+ DP(NETIF_MSG_LINK, "Disable XMAC on port %x\n", port);
+ val = REG_RD(bp, xmac_base + XMAC_REG_CTRL);
+ if (en)
+ val |= (XMAC_CTRL_REG_TX_EN | XMAC_CTRL_REG_RX_EN);
+ else
+ val &= ~(XMAC_CTRL_REG_TX_EN | XMAC_CTRL_REG_RX_EN);
+ REG_WR(bp, xmac_base + XMAC_REG_CTRL, val);
+ }
+}
+
+static int bnx2x_xmac_enable(struct link_params *params,
+ struct link_vars *vars, u8 lb)
+{
+ u32 val, xmac_base;
+ struct bnx2x *bp = params->bp;
+ DP(NETIF_MSG_LINK, "enabling XMAC\n");
+
+ xmac_base = (params->port) ? GRCBASE_XMAC1 : GRCBASE_XMAC0;
+
+ bnx2x_xmac_init(params, vars->line_speed);
+
+ /* This register determines on which events the MAC will assert
+ * error on the i/f to the NIG along w/ EOP.
+ */
+
+ /* This register tells the NIG whether to send traffic to UMAC
+ * or XMAC
+ */
+ REG_WR(bp, NIG_REG_EGRESS_EMAC0_PORT + params->port*4, 0);
+
+ /* When XMAC is in XLGMII mode, disable sending idles for fault
+ * detection.
+ */
+ if (!(params->phy[INT_PHY].flags & FLAGS_TX_ERROR_CHECK)) {
+ REG_WR(bp, xmac_base + XMAC_REG_RX_LSS_CTRL,
+ (XMAC_RX_LSS_CTRL_REG_LOCAL_FAULT_DISABLE |
+ XMAC_RX_LSS_CTRL_REG_REMOTE_FAULT_DISABLE));
+ REG_WR(bp, xmac_base + XMAC_REG_CLEAR_RX_LSS_STATUS, 0);
+ REG_WR(bp, xmac_base + XMAC_REG_CLEAR_RX_LSS_STATUS,
+ XMAC_CLEAR_RX_LSS_STATUS_REG_CLEAR_LOCAL_FAULT_STATUS |
+ XMAC_CLEAR_RX_LSS_STATUS_REG_CLEAR_REMOTE_FAULT_STATUS);
+ }
+ /* Set Max packet size */
+ REG_WR(bp, xmac_base + XMAC_REG_RX_MAX_SIZE, 0x2710);
+
+ /* CRC append for Tx packets */
+ REG_WR(bp, xmac_base + XMAC_REG_TX_CTRL, 0xC800);
+
+ /* update PFC */
+ bnx2x_update_pfc_xmac(params, vars, 0);
+
+ if (vars->eee_status & SHMEM_EEE_ADV_STATUS_MASK) {
+ DP(NETIF_MSG_LINK, "Setting XMAC for EEE\n");
+ REG_WR(bp, xmac_base + XMAC_REG_EEE_TIMERS_HI, 0x1380008);
+ REG_WR(bp, xmac_base + XMAC_REG_EEE_CTRL, 0x1);
+ } else {
+ REG_WR(bp, xmac_base + XMAC_REG_EEE_CTRL, 0x0);
+ }
+
+ /* Enable TX and RX */
+ val = XMAC_CTRL_REG_TX_EN | XMAC_CTRL_REG_RX_EN;
+
+ /* Set MAC in XLGMII mode for dual-mode */
+ if ((vars->line_speed == SPEED_20000) &&
+ (params->phy[INT_PHY].supported &
+ SUPPORTED_20000baseKR2_Full))
+ val |= XMAC_CTRL_REG_XLGMII_ALIGN_ENB;
+
+ /* Check loopback mode */
+ if (lb)
+ val |= XMAC_CTRL_REG_LINE_LOCAL_LPBK;
+ REG_WR(bp, xmac_base + XMAC_REG_CTRL, val);
+ bnx2x_set_xumac_nig(params,
+ ((vars->flow_ctrl & BNX2X_FLOW_CTRL_TX) != 0), 1);
+
+ vars->mac_type = MAC_TYPE_XMAC;
+
+ return 0;
+}
+
+static int bnx2x_emac_enable(struct link_params *params,
+ struct link_vars *vars, u8 lb)
+{
+ struct bnx2x *bp = params->bp;
+ u8 port = params->port;
+ u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
+ u32 val;
+
+ DP(NETIF_MSG_LINK, "enabling EMAC\n");
+
+ /* Disable BMAC */
+ REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
+ (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
+
+ /* enable emac and not bmac */
+ REG_WR(bp, NIG_REG_EGRESS_EMAC0_PORT + port*4, 1);
+
+ /* ASIC */
+ if (vars->phy_flags & PHY_XGXS_FLAG) {
+ u32 ser_lane = ((params->lane_config &
+ PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >>
+ PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT);
+
+ DP(NETIF_MSG_LINK, "XGXS\n");
+ /* select the master lanes (out of 0-3) */
+ REG_WR(bp, NIG_REG_XGXS_LANE_SEL_P0 + port*4, ser_lane);
+ /* select XGXS */
+ REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
+
+ } else { /* SerDes */
+ DP(NETIF_MSG_LINK, "SerDes\n");
+ /* select SerDes */
+ REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 0);
+ }
+
+ bnx2x_bits_en(bp, emac_base + EMAC_REG_EMAC_RX_MODE,
+ EMAC_RX_MODE_RESET);
+ bnx2x_bits_en(bp, emac_base + EMAC_REG_EMAC_TX_MODE,
+ EMAC_TX_MODE_RESET);
+
+ /* pause enable/disable */
+ bnx2x_bits_dis(bp, emac_base + EMAC_REG_EMAC_RX_MODE,
+ EMAC_RX_MODE_FLOW_EN);
+
+ bnx2x_bits_dis(bp, emac_base + EMAC_REG_EMAC_TX_MODE,
+ (EMAC_TX_MODE_EXT_PAUSE_EN |
+ EMAC_TX_MODE_FLOW_EN));
+ if (!(params->feature_config_flags &
+ FEATURE_CONFIG_PFC_ENABLED)) {
+ if (vars->flow_ctrl & BNX2X_FLOW_CTRL_RX)
+ bnx2x_bits_en(bp, emac_base +
+ EMAC_REG_EMAC_RX_MODE,
+ EMAC_RX_MODE_FLOW_EN);
+
+ if (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX)
+ bnx2x_bits_en(bp, emac_base +
+ EMAC_REG_EMAC_TX_MODE,
+ (EMAC_TX_MODE_EXT_PAUSE_EN |
+ EMAC_TX_MODE_FLOW_EN));
+ } else
+ bnx2x_bits_en(bp, emac_base + EMAC_REG_EMAC_TX_MODE,
+ EMAC_TX_MODE_FLOW_EN);
+
+ /* KEEP_VLAN_TAG, promiscuous */
+ val = REG_RD(bp, emac_base + EMAC_REG_EMAC_RX_MODE);
+ val |= EMAC_RX_MODE_KEEP_VLAN_TAG | EMAC_RX_MODE_PROMISCUOUS;
+
+ /* Setting this bit causes MAC control frames (except for pause
+ * frames) to be passed on for processing. This setting has no
+ * affect on the operation of the pause frames. This bit effects
+ * all packets regardless of RX Parser packet sorting logic.
+ * Turn the PFC off to make sure we are in Xon state before
+ * enabling it.
+ */
+ EMAC_WR(bp, EMAC_REG_RX_PFC_MODE, 0);
+ if (params->feature_config_flags & FEATURE_CONFIG_PFC_ENABLED) {
+ DP(NETIF_MSG_LINK, "PFC is enabled\n");
+ /* Enable PFC again */
+ EMAC_WR(bp, EMAC_REG_RX_PFC_MODE,
+ EMAC_REG_RX_PFC_MODE_RX_EN |
+ EMAC_REG_RX_PFC_MODE_TX_EN |
+ EMAC_REG_RX_PFC_MODE_PRIORITIES);
+
+ EMAC_WR(bp, EMAC_REG_RX_PFC_PARAM,
+ ((0x0101 <<
+ EMAC_REG_RX_PFC_PARAM_OPCODE_BITSHIFT) |
+ (0x00ff <<
+ EMAC_REG_RX_PFC_PARAM_PRIORITY_EN_BITSHIFT)));
+ val |= EMAC_RX_MODE_KEEP_MAC_CONTROL;
+ }
+ EMAC_WR(bp, EMAC_REG_EMAC_RX_MODE, val);
+
+ /* Set Loopback */
+ val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MODE);
+ if (lb)
+ val |= 0x810;
+ else
+ val &= ~0x810;
+ EMAC_WR(bp, EMAC_REG_EMAC_MODE, val);
+
+ /* Enable emac */
+ REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 1);
+
+ /* Enable emac for jumbo packets */
+ EMAC_WR(bp, EMAC_REG_EMAC_RX_MTU_SIZE,
+ (EMAC_RX_MTU_SIZE_JUMBO_ENA |
+ (ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVERHEAD)));
+
+ /* Strip CRC */
+ REG_WR(bp, NIG_REG_NIG_INGRESS_EMAC0_NO_CRC + port*4, 0x1);
+
+ /* Disable the NIG in/out to the bmac */
+ REG_WR(bp, NIG_REG_BMAC0_IN_EN + port*4, 0x0);
+ REG_WR(bp, NIG_REG_BMAC0_PAUSE_OUT_EN + port*4, 0x0);
+ REG_WR(bp, NIG_REG_BMAC0_OUT_EN + port*4, 0x0);
+
+ /* Enable the NIG in/out to the emac */
+ REG_WR(bp, NIG_REG_EMAC0_IN_EN + port*4, 0x1);
+ val = 0;
+ if ((params->feature_config_flags &
+ FEATURE_CONFIG_PFC_ENABLED) ||
+ (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX))
+ val = 1;
+
+ REG_WR(bp, NIG_REG_EMAC0_PAUSE_OUT_EN + port*4, val);
+ REG_WR(bp, NIG_REG_EGRESS_EMAC0_OUT_EN + port*4, 0x1);
+
+ REG_WR(bp, NIG_REG_BMAC0_REGS_OUT_EN + port*4, 0x0);
+
+ vars->mac_type = MAC_TYPE_EMAC;
+ return 0;
+}
+
+static void bnx2x_update_pfc_bmac1(struct link_params *params,
+ struct link_vars *vars)
+{
+ u32 wb_data[2];
+ struct bnx2x *bp = params->bp;
+ u32 bmac_addr = params->port ? NIG_REG_INGRESS_BMAC1_MEM :
+ NIG_REG_INGRESS_BMAC0_MEM;
+
+ u32 val = 0x14;
+ if ((!(params->feature_config_flags &
+ FEATURE_CONFIG_PFC_ENABLED)) &&
+ (vars->flow_ctrl & BNX2X_FLOW_CTRL_RX))
+ /* Enable BigMAC to react on received Pause packets */
+ val |= (1<<5);
+ wb_data[0] = val;
+ wb_data[1] = 0;
+ REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_CONTROL, wb_data, 2);
+
+ /* TX control */
+ val = 0xc0;
+ if (!(params->feature_config_flags &
+ FEATURE_CONFIG_PFC_ENABLED) &&
+ (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX))
+ val |= 0x800000;
+ wb_data[0] = val;
+ wb_data[1] = 0;
+ REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_TX_CONTROL, wb_data, 2);
+}
+
+static void bnx2x_update_pfc_bmac2(struct link_params *params,
+ struct link_vars *vars,
+ u8 is_lb)
+{
+ /* Set rx control: Strip CRC and enable BigMAC to relay
+ * control packets to the system as well
+ */
+ u32 wb_data[2];
+ struct bnx2x *bp = params->bp;
+ u32 bmac_addr = params->port ? NIG_REG_INGRESS_BMAC1_MEM :
+ NIG_REG_INGRESS_BMAC0_MEM;
+ u32 val = 0x14;
+
+ if ((!(params->feature_config_flags &
+ FEATURE_CONFIG_PFC_ENABLED)) &&
+ (vars->flow_ctrl & BNX2X_FLOW_CTRL_RX))
+ /* Enable BigMAC to react on received Pause packets */
+ val |= (1<<5);
+ wb_data[0] = val;
+ wb_data[1] = 0;
+ REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_RX_CONTROL, wb_data, 2);
+ udelay(30);
+
+ /* Tx control */
+ val = 0xc0;
+ if (!(params->feature_config_flags &
+ FEATURE_CONFIG_PFC_ENABLED) &&
+ (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX))
+ val |= 0x800000;
+ wb_data[0] = val;
+ wb_data[1] = 0;
+ REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_TX_CONTROL, wb_data, 2);
+
+ if (params->feature_config_flags & FEATURE_CONFIG_PFC_ENABLED) {
+ DP(NETIF_MSG_LINK, "PFC is enabled\n");
+ /* Enable PFC RX & TX & STATS and set 8 COS */
+ wb_data[0] = 0x0;
+ wb_data[0] |= (1<<0); /* RX */
+ wb_data[0] |= (1<<1); /* TX */
+ wb_data[0] |= (1<<2); /* Force initial Xon */
+ wb_data[0] |= (1<<3); /* 8 cos */
+ wb_data[0] |= (1<<5); /* STATS */
+ wb_data[1] = 0;
+ REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_PFC_CONTROL,
+ wb_data, 2);
+ /* Clear the force Xon */
+ wb_data[0] &= ~(1<<2);
+ } else {
+ DP(NETIF_MSG_LINK, "PFC is disabled\n");
+ /* Disable PFC RX & TX & STATS and set 8 COS */
+ wb_data[0] = 0x8;
+ wb_data[1] = 0;
+ }
+
+ REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_PFC_CONTROL, wb_data, 2);
+
+ /* Set Time (based unit is 512 bit time) between automatic
+ * re-sending of PP packets amd enable automatic re-send of
+ * Per-Priroity Packet as long as pp_gen is asserted and
+ * pp_disable is low.
+ */
+ val = 0x8000;
+ if (params->feature_config_flags & FEATURE_CONFIG_PFC_ENABLED)
+ val |= (1<<16); /* enable automatic re-send */
+
+ wb_data[0] = val;
+ wb_data[1] = 0;
+ REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_TX_PAUSE_CONTROL,
+ wb_data, 2);
+
+ /* mac control */
+ val = 0x3; /* Enable RX and TX */
+ if (is_lb) {
+ val |= 0x4; /* Local loopback */
+ DP(NETIF_MSG_LINK, "enable bmac loopback\n");
+ }
+ /* When PFC enabled, Pass pause frames towards the NIG. */
+ if (params->feature_config_flags & FEATURE_CONFIG_PFC_ENABLED)
+ val |= ((1<<6)|(1<<5));
+
+ wb_data[0] = val;
+ wb_data[1] = 0;
+ REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_BMAC_CONTROL, wb_data, 2);
+}
+
+/******************************************************************************
+* Description:
+* This function is needed because NIG ARB_CREDIT_WEIGHT_X are
+* not continues and ARB_CREDIT_WEIGHT_0 + offset is suitable.
+******************************************************************************/
+static int bnx2x_pfc_nig_rx_priority_mask(struct bnx2x *bp,
+ u8 cos_entry,
+ u32 priority_mask, u8 port)
+{
+ u32 nig_reg_rx_priority_mask_add = 0;
+
+ switch (cos_entry) {
+ case 0:
+ nig_reg_rx_priority_mask_add = (port) ?
+ NIG_REG_P1_RX_COS0_PRIORITY_MASK :
+ NIG_REG_P0_RX_COS0_PRIORITY_MASK;
+ break;
+ case 1:
+ nig_reg_rx_priority_mask_add = (port) ?
+ NIG_REG_P1_RX_COS1_PRIORITY_MASK :
+ NIG_REG_P0_RX_COS1_PRIORITY_MASK;
+ break;
+ case 2:
+ nig_reg_rx_priority_mask_add = (port) ?
+ NIG_REG_P1_RX_COS2_PRIORITY_MASK :
+ NIG_REG_P0_RX_COS2_PRIORITY_MASK;
+ break;
+ case 3:
+ if (port)
+ return -EINVAL;
+ nig_reg_rx_priority_mask_add = NIG_REG_P0_RX_COS3_PRIORITY_MASK;
+ break;
+ case 4:
+ if (port)
+ return -EINVAL;
+ nig_reg_rx_priority_mask_add = NIG_REG_P0_RX_COS4_PRIORITY_MASK;
+ break;
+ case 5:
+ if (port)
+ return -EINVAL;
+ nig_reg_rx_priority_mask_add = NIG_REG_P0_RX_COS5_PRIORITY_MASK;
+ break;
+ }
+
+ REG_WR(bp, nig_reg_rx_priority_mask_add, priority_mask);
+
+ return 0;
+}
+static void bnx2x_update_mng(struct link_params *params, u32 link_status)
+{
+ struct bnx2x *bp = params->bp;
+
+ REG_WR(bp, params->shmem_base +
+ offsetof(struct shmem_region,
+ port_mb[params->port].link_status), link_status);
+}
+
+static void bnx2x_update_link_attr(struct link_params *params, u32 link_attr)
+{
+ struct bnx2x *bp = params->bp;
+
+ if (SHMEM2_HAS(bp, link_attr_sync))
+ REG_WR(bp, params->shmem2_base +
+ offsetof(struct shmem2_region,
+ link_attr_sync[params->port]), link_attr);
+}
+
+static void bnx2x_update_pfc_nig(struct link_params *params,
+ struct link_vars *vars,
+ struct bnx2x_nig_brb_pfc_port_params *nig_params)
+{
+ u32 xcm_mask = 0, ppp_enable = 0, pause_enable = 0, llfc_out_en = 0;
+ u32 llfc_enable = 0, xcm_out_en = 0, hwpfc_enable = 0;
+ u32 pkt_priority_to_cos = 0;
+ struct bnx2x *bp = params->bp;
+ u8 port = params->port;
+
+ int set_pfc = params->feature_config_flags &
+ FEATURE_CONFIG_PFC_ENABLED;
+ DP(NETIF_MSG_LINK, "updating pfc nig parameters\n");
+
+ /* When NIG_LLH0_XCM_MASK_REG_LLHX_XCM_MASK_BCN bit is set
+ * MAC control frames (that are not pause packets)
+ * will be forwarded to the XCM.
+ */
+ xcm_mask = REG_RD(bp, port ? NIG_REG_LLH1_XCM_MASK :
+ NIG_REG_LLH0_XCM_MASK);
+ /* NIG params will override non PFC params, since it's possible to
+ * do transition from PFC to SAFC
+ */
+ if (set_pfc) {
+ pause_enable = 0;
+ llfc_out_en = 0;
+ llfc_enable = 0;
+ if (CHIP_IS_E3(bp))
+ ppp_enable = 0;
+ else
+ ppp_enable = 1;
+ xcm_mask &= ~(port ? NIG_LLH1_XCM_MASK_REG_LLH1_XCM_MASK_BCN :
+ NIG_LLH0_XCM_MASK_REG_LLH0_XCM_MASK_BCN);
+ xcm_out_en = 0;
+ hwpfc_enable = 1;
+ } else {
+ if (nig_params) {
+ llfc_out_en = nig_params->llfc_out_en;
+ llfc_enable = nig_params->llfc_enable;
+ pause_enable = nig_params->pause_enable;
+ } else /* Default non PFC mode - PAUSE */
+ pause_enable = 1;
+
+ xcm_mask |= (port ? NIG_LLH1_XCM_MASK_REG_LLH1_XCM_MASK_BCN :
+ NIG_LLH0_XCM_MASK_REG_LLH0_XCM_MASK_BCN);
+ xcm_out_en = 1;
+ }
+
+ if (CHIP_IS_E3(bp))
+ REG_WR(bp, port ? NIG_REG_BRB1_PAUSE_IN_EN :
+ NIG_REG_BRB0_PAUSE_IN_EN, pause_enable);
+ REG_WR(bp, port ? NIG_REG_LLFC_OUT_EN_1 :
+ NIG_REG_LLFC_OUT_EN_0, llfc_out_en);
+ REG_WR(bp, port ? NIG_REG_LLFC_ENABLE_1 :
+ NIG_REG_LLFC_ENABLE_0, llfc_enable);
+ REG_WR(bp, port ? NIG_REG_PAUSE_ENABLE_1 :
+ NIG_REG_PAUSE_ENABLE_0, pause_enable);
+
+ REG_WR(bp, port ? NIG_REG_PPP_ENABLE_1 :
+ NIG_REG_PPP_ENABLE_0, ppp_enable);
+
+ REG_WR(bp, port ? NIG_REG_LLH1_XCM_MASK :
+ NIG_REG_LLH0_XCM_MASK, xcm_mask);
+
+ REG_WR(bp, port ? NIG_REG_LLFC_EGRESS_SRC_ENABLE_1 :
+ NIG_REG_LLFC_EGRESS_SRC_ENABLE_0, 0x7);
+
+ /* Output enable for RX_XCM # IF */
+ REG_WR(bp, port ? NIG_REG_XCM1_OUT_EN :
+ NIG_REG_XCM0_OUT_EN, xcm_out_en);
+
+ /* HW PFC TX enable */
+ REG_WR(bp, port ? NIG_REG_P1_HWPFC_ENABLE :
+ NIG_REG_P0_HWPFC_ENABLE, hwpfc_enable);
+
+ if (nig_params) {
+ u8 i = 0;
+ pkt_priority_to_cos = nig_params->pkt_priority_to_cos;
+
+ for (i = 0; i < nig_params->num_of_rx_cos_priority_mask; i++)
+ bnx2x_pfc_nig_rx_priority_mask(bp, i,
+ nig_params->rx_cos_priority_mask[i], port);
+
+ REG_WR(bp, port ? NIG_REG_LLFC_HIGH_PRIORITY_CLASSES_1 :
+ NIG_REG_LLFC_HIGH_PRIORITY_CLASSES_0,
+ nig_params->llfc_high_priority_classes);
+
+ REG_WR(bp, port ? NIG_REG_LLFC_LOW_PRIORITY_CLASSES_1 :
+ NIG_REG_LLFC_LOW_PRIORITY_CLASSES_0,
+ nig_params->llfc_low_priority_classes);
+ }
+ REG_WR(bp, port ? NIG_REG_P1_PKT_PRIORITY_TO_COS :
+ NIG_REG_P0_PKT_PRIORITY_TO_COS,
+ pkt_priority_to_cos);
+}
+
+int bnx2x_update_pfc(struct link_params *params,
+ struct link_vars *vars,
+ struct bnx2x_nig_brb_pfc_port_params *pfc_params)
+{
+ /* The PFC and pause are orthogonal to one another, meaning when
+ * PFC is enabled, the pause are disabled, and when PFC is
+ * disabled, pause are set according to the pause result.
+ */
+ u32 val;
+ struct bnx2x *bp = params->bp;
+ u8 bmac_loopback = (params->loopback_mode == LOOPBACK_BMAC);
+
+ if (params->feature_config_flags & FEATURE_CONFIG_PFC_ENABLED)
+ vars->link_status |= LINK_STATUS_PFC_ENABLED;
+ else
+ vars->link_status &= ~LINK_STATUS_PFC_ENABLED;
+
+ bnx2x_update_mng(params, vars->link_status);
+
+ /* Update NIG params */
+ bnx2x_update_pfc_nig(params, vars, pfc_params);
+
+ if (!vars->link_up)
+ return 0;
+
+ DP(NETIF_MSG_LINK, "About to update PFC in BMAC\n");
+
+ if (CHIP_IS_E3(bp)) {
+ if (vars->mac_type == MAC_TYPE_XMAC)
+ bnx2x_update_pfc_xmac(params, vars, 0);
+ } else {
+ val = REG_RD(bp, MISC_REG_RESET_REG_2);
+ if ((val &
+ (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << params->port))
+ == 0) {
+ DP(NETIF_MSG_LINK, "About to update PFC in EMAC\n");
+ bnx2x_emac_enable(params, vars, 0);
+ return 0;
+ }
+ if (CHIP_IS_E2(bp))
+ bnx2x_update_pfc_bmac2(params, vars, bmac_loopback);
+ else
+ bnx2x_update_pfc_bmac1(params, vars);
+
+ val = 0;
+ if ((params->feature_config_flags &
+ FEATURE_CONFIG_PFC_ENABLED) ||
+ (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX))
+ val = 1;
+ REG_WR(bp, NIG_REG_BMAC0_PAUSE_OUT_EN + params->port*4, val);
+ }
+ return 0;
+}
+
+static int bnx2x_bmac1_enable(struct link_params *params,
+ struct link_vars *vars,
+ u8 is_lb)
+{
+ struct bnx2x *bp = params->bp;
+ u8 port = params->port;
+ u32 bmac_addr = port ? NIG_REG_INGRESS_BMAC1_MEM :
+ NIG_REG_INGRESS_BMAC0_MEM;
+ u32 wb_data[2];
+ u32 val;
+
+ DP(NETIF_MSG_LINK, "Enabling BigMAC1\n");
+
+ /* XGXS control */
+ wb_data[0] = 0x3c;
+ wb_data[1] = 0;
+ REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_BMAC_XGXS_CONTROL,
+ wb_data, 2);
+
+ /* TX MAC SA */
+ wb_data[0] = ((params->mac_addr[2] << 24) |
+ (params->mac_addr[3] << 16) |
+ (params->mac_addr[4] << 8) |
+ params->mac_addr[5]);
+ wb_data[1] = ((params->mac_addr[0] << 8) |
+ params->mac_addr[1]);
+ REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_TX_SOURCE_ADDR, wb_data, 2);
+
+ /* MAC control */
+ val = 0x3;
+ if (is_lb) {
+ val |= 0x4;
+ DP(NETIF_MSG_LINK, "enable bmac loopback\n");
+ }
+ wb_data[0] = val;
+ wb_data[1] = 0;
+ REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_BMAC_CONTROL, wb_data, 2);
+
+ /* Set rx mtu */
+ wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVERHEAD;
+ wb_data[1] = 0;
+ REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_MAX_SIZE, wb_data, 2);
+
+ bnx2x_update_pfc_bmac1(params, vars);
+
+ /* Set tx mtu */
+ wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVERHEAD;
+ wb_data[1] = 0;
+ REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_TX_MAX_SIZE, wb_data, 2);
+
+ /* Set cnt max size */
+ wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVERHEAD;
+ wb_data[1] = 0;
+ REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_CNT_MAX_SIZE, wb_data, 2);
+
+ /* Configure SAFC */
+ wb_data[0] = 0x1000200;
+ wb_data[1] = 0;
+ REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_LLFC_MSG_FLDS,
+ wb_data, 2);
+
+ return 0;
+}
+
+static int bnx2x_bmac2_enable(struct link_params *params,
+ struct link_vars *vars,
+ u8 is_lb)
+{
+ struct bnx2x *bp = params->bp;
+ u8 port = params->port;
+ u32 bmac_addr = port ? NIG_REG_INGRESS_BMAC1_MEM :
+ NIG_REG_INGRESS_BMAC0_MEM;
+ u32 wb_data[2];
+
+ DP(NETIF_MSG_LINK, "Enabling BigMAC2\n");
+
+ wb_data[0] = 0;
+ wb_data[1] = 0;
+ REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_BMAC_CONTROL, wb_data, 2);
+ udelay(30);
+
+ /* XGXS control: Reset phy HW, MDIO registers, PHY PLL and BMAC */
+ wb_data[0] = 0x3c;
+ wb_data[1] = 0;
+ REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_BMAC_XGXS_CONTROL,
+ wb_data, 2);
+
+ udelay(30);
+
+ /* TX MAC SA */
+ wb_data[0] = ((params->mac_addr[2] << 24) |
+ (params->mac_addr[3] << 16) |
+ (params->mac_addr[4] << 8) |
+ params->mac_addr[5]);
+ wb_data[1] = ((params->mac_addr[0] << 8) |
+ params->mac_addr[1]);
+ REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_TX_SOURCE_ADDR,
+ wb_data, 2);
+
+ udelay(30);
+
+ /* Configure SAFC */
+ wb_data[0] = 0x1000200;
+ wb_data[1] = 0;
+ REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_RX_LLFC_MSG_FLDS,
+ wb_data, 2);
+ udelay(30);
+
+ /* Set RX MTU */
+ wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVERHEAD;
+ wb_data[1] = 0;
+ REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_RX_MAX_SIZE, wb_data, 2);
+ udelay(30);
+
+ /* Set TX MTU */
+ wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVERHEAD;
+ wb_data[1] = 0;
+ REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_TX_MAX_SIZE, wb_data, 2);
+ udelay(30);
+ /* Set cnt max size */
+ wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVERHEAD - 2;
+ wb_data[1] = 0;
+ REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_CNT_MAX_SIZE, wb_data, 2);
+ udelay(30);
+ bnx2x_update_pfc_bmac2(params, vars, is_lb);
+
+ return 0;
+}
+
+static int bnx2x_bmac_enable(struct link_params *params,
+ struct link_vars *vars,
+ u8 is_lb, u8 reset_bmac)
+{
+ int rc = 0;
+ u8 port = params->port;
+ struct bnx2x *bp = params->bp;
+ u32 val;
+ /* Reset and unreset the BigMac */
+ if (reset_bmac) {
+ REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
+ (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
+ usleep_range(1000, 2000);
+ }
+
+ REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
+ (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
+
+ /* Enable access for bmac registers */
+ REG_WR(bp, NIG_REG_BMAC0_REGS_OUT_EN + port*4, 0x1);
+
+ /* Enable BMAC according to BMAC type*/
+ if (CHIP_IS_E2(bp))
+ rc = bnx2x_bmac2_enable(params, vars, is_lb);
+ else
+ rc = bnx2x_bmac1_enable(params, vars, is_lb);
+ REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 0x1);
+ REG_WR(bp, NIG_REG_XGXS_LANE_SEL_P0 + port*4, 0x0);
+ REG_WR(bp, NIG_REG_EGRESS_EMAC0_PORT + port*4, 0x0);
+ val = 0;
+ if ((params->feature_config_flags &
+ FEATURE_CONFIG_PFC_ENABLED) ||
+ (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX))
+ val = 1;
+ REG_WR(bp, NIG_REG_BMAC0_PAUSE_OUT_EN + port*4, val);
+ REG_WR(bp, NIG_REG_EGRESS_EMAC0_OUT_EN + port*4, 0x0);
+ REG_WR(bp, NIG_REG_EMAC0_IN_EN + port*4, 0x0);
+ REG_WR(bp, NIG_REG_EMAC0_PAUSE_OUT_EN + port*4, 0x0);
+ REG_WR(bp, NIG_REG_BMAC0_IN_EN + port*4, 0x1);
+ REG_WR(bp, NIG_REG_BMAC0_OUT_EN + port*4, 0x1);
+
+ vars->mac_type = MAC_TYPE_BMAC;
+ return rc;
+}
+
+static void bnx2x_set_bmac_rx(struct bnx2x *bp, u32 chip_id, u8 port, u8 en)
+{
+ u32 bmac_addr = port ? NIG_REG_INGRESS_BMAC1_MEM :
+ NIG_REG_INGRESS_BMAC0_MEM;
+ u32 wb_data[2];
+ u32 nig_bmac_enable = REG_RD(bp, NIG_REG_BMAC0_REGS_OUT_EN + port*4);
+
+ if (CHIP_IS_E2(bp))
+ bmac_addr += BIGMAC2_REGISTER_BMAC_CONTROL;
+ else
+ bmac_addr += BIGMAC_REGISTER_BMAC_CONTROL;
+ /* Only if the bmac is out of reset */
+ if (REG_RD(bp, MISC_REG_RESET_REG_2) &
+ (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port) &&
+ nig_bmac_enable) {
+ /* Clear Rx Enable bit in BMAC_CONTROL register */
+ REG_RD_DMAE(bp, bmac_addr, wb_data, 2);
+ if (en)
+ wb_data[0] |= BMAC_CONTROL_RX_ENABLE;
+ else
+ wb_data[0] &= ~BMAC_CONTROL_RX_ENABLE;
+ REG_WR_DMAE(bp, bmac_addr, wb_data, 2);
+ usleep_range(1000, 2000);
+ }
+}
+
+static int bnx2x_pbf_update(struct link_params *params, u32 flow_ctrl,
+ u32 line_speed)
+{
+ struct bnx2x *bp = params->bp;
+ u8 port = params->port;
+ u32 init_crd, crd;
+ u32 count = 1000;
+
+ /* Disable port */
+ REG_WR(bp, PBF_REG_DISABLE_NEW_TASK_PROC_P0 + port*4, 0x1);
+
+ /* Wait for init credit */
+ init_crd = REG_RD(bp, PBF_REG_P0_INIT_CRD + port*4);
+ crd = REG_RD(bp, PBF_REG_P0_CREDIT + port*8);
+ DP(NETIF_MSG_LINK, "init_crd 0x%x crd 0x%x\n", init_crd, crd);
+
+ while ((init_crd != crd) && count) {
+ usleep_range(5000, 10000);
+ crd = REG_RD(bp, PBF_REG_P0_CREDIT + port*8);
+ count--;
+ }
+ crd = REG_RD(bp, PBF_REG_P0_CREDIT + port*8);
+ if (init_crd != crd) {
+ DP(NETIF_MSG_LINK, "BUG! init_crd 0x%x != crd 0x%x\n",
+ init_crd, crd);
+ return -EINVAL;
+ }
+
+ if (flow_ctrl & BNX2X_FLOW_CTRL_RX ||
+ line_speed == SPEED_10 ||
+ line_speed == SPEED_100 ||
+ line_speed == SPEED_1000 ||
+ line_speed == SPEED_2500) {
+ REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 1);
+ /* Update threshold */
+ REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, 0);
+ /* Update init credit */
+ init_crd = 778; /* (800-18-4) */
+
+ } else {
+ u32 thresh = (ETH_MAX_JUMBO_PACKET_SIZE +
+ ETH_OVERHEAD)/16;
+ REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
+ /* Update threshold */
+ REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, thresh);
+ /* Update init credit */
+ switch (line_speed) {
+ case SPEED_10000:
+ init_crd = thresh + 553 - 22;
+ break;
+ default:
+ DP(NETIF_MSG_LINK, "Invalid line_speed 0x%x\n",
+ line_speed);
+ return -EINVAL;
+ }
+ }
+ REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, init_crd);
+ DP(NETIF_MSG_LINK, "PBF updated to speed %d credit %d\n",
+ line_speed, init_crd);
+
+ /* Probe the credit changes */
+ REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0x1);
+ usleep_range(5000, 10000);
+ REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0x0);
+
+ /* Enable port */
+ REG_WR(bp, PBF_REG_DISABLE_NEW_TASK_PROC_P0 + port*4, 0x0);
+ return 0;
+}
+
+/**
+ * bnx2x_get_emac_base - retrive emac base address
+ *
+ * @bp: driver handle
+ * @mdc_mdio_access: access type
+ * @port: port id
+ *
+ * This function selects the MDC/MDIO access (through emac0 or
+ * emac1) depend on the mdc_mdio_access, port, port swapped. Each
+ * phy has a default access mode, which could also be overridden
+ * by nvram configuration. This parameter, whether this is the
+ * default phy configuration, or the nvram overrun
+ * configuration, is passed here as mdc_mdio_access and selects
+ * the emac_base for the CL45 read/writes operations
+ */
+static u32 bnx2x_get_emac_base(struct bnx2x *bp,
+ u32 mdc_mdio_access, u8 port)
+{
+ u32 emac_base = 0;
+ switch (mdc_mdio_access) {
+ case SHARED_HW_CFG_MDC_MDIO_ACCESS1_PHY_TYPE:
+ break;
+ case SHARED_HW_CFG_MDC_MDIO_ACCESS1_EMAC0:
+ if (REG_RD(bp, NIG_REG_PORT_SWAP))
+ emac_base = GRCBASE_EMAC1;
+ else
+ emac_base = GRCBASE_EMAC0;
+ break;
+ case SHARED_HW_CFG_MDC_MDIO_ACCESS1_EMAC1:
+ if (REG_RD(bp, NIG_REG_PORT_SWAP))
+ emac_base = GRCBASE_EMAC0;
+ else
+ emac_base = GRCBASE_EMAC1;
+ break;
+ case SHARED_HW_CFG_MDC_MDIO_ACCESS1_BOTH:
+ emac_base = (port) ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
+ break;
+ case SHARED_HW_CFG_MDC_MDIO_ACCESS1_SWAPPED:
+ emac_base = (port) ? GRCBASE_EMAC0 : GRCBASE_EMAC1;
+ break;
+ default:
+ break;
+ }
+ return emac_base;
+
+}
+
+/******************************************************************/
+/* CL22 access functions */
+/******************************************************************/
+static int bnx2x_cl22_write(struct bnx2x *bp,
+ struct bnx2x_phy *phy,
+ u16 reg, u16 val)
+{
+ u32 tmp, mode;
+ u8 i;
+ int rc = 0;
+ /* Switch to CL22 */
+ mode = REG_RD(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE);
+ REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE,
+ mode & ~EMAC_MDIO_MODE_CLAUSE_45);
+
+ /* Address */
+ tmp = ((phy->addr << 21) | (reg << 16) | val |
+ EMAC_MDIO_COMM_COMMAND_WRITE_22 |
+ EMAC_MDIO_COMM_START_BUSY);
+ REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, tmp);
+
+ for (i = 0; i < 50; i++) {
+ udelay(10);
+
+ tmp = REG_RD(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM);
+ if (!(tmp & EMAC_MDIO_COMM_START_BUSY)) {
+ udelay(5);
+ break;
+ }
+ }
+ if (tmp & EMAC_MDIO_COMM_START_BUSY) {
+ DP(NETIF_MSG_LINK, "write phy register failed\n");
+ rc = -EFAULT;
+ }
+ REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, mode);
+ return rc;
+}
+
+static int bnx2x_cl22_read(struct bnx2x *bp,
+ struct bnx2x_phy *phy,
+ u16 reg, u16 *ret_val)
+{
+ u32 val, mode;
+ u16 i;
+ int rc = 0;
+
+ /* Switch to CL22 */
+ mode = REG_RD(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE);
+ REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE,
+ mode & ~EMAC_MDIO_MODE_CLAUSE_45);
+
+ /* Address */
+ val = ((phy->addr << 21) | (reg << 16) |
+ EMAC_MDIO_COMM_COMMAND_READ_22 |
+ EMAC_MDIO_COMM_START_BUSY);
+ REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, val);
+
+ for (i = 0; i < 50; i++) {
+ udelay(10);
+
+ val = REG_RD(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM);
+ if (!(val & EMAC_MDIO_COMM_START_BUSY)) {
+ *ret_val = (u16)(val & EMAC_MDIO_COMM_DATA);
+ udelay(5);
+ break;
+ }
+ }
+ if (val & EMAC_MDIO_COMM_START_BUSY) {
+ DP(NETIF_MSG_LINK, "read phy register failed\n");
+
+ *ret_val = 0;
+ rc = -EFAULT;
+ }
+ REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, mode);
+ return rc;
+}
+
+/******************************************************************/
+/* CL45 access functions */
+/******************************************************************/
+static int bnx2x_cl45_read(struct bnx2x *bp, struct bnx2x_phy *phy,
+ u8 devad, u16 reg, u16 *ret_val)
+{
+ u32 val;
+ u16 i;
+ int rc = 0;
+ u32 chip_id;
+ if (phy->flags & FLAGS_MDC_MDIO_WA_G) {
+ chip_id = (REG_RD(bp, MISC_REG_CHIP_NUM) << 16) |
+ ((REG_RD(bp, MISC_REG_CHIP_REV) & 0xf) << 12);
+ bnx2x_set_mdio_clk(bp, chip_id, phy->mdio_ctrl);
+ }
+
+ if (phy->flags & FLAGS_MDC_MDIO_WA_B0)
+ bnx2x_bits_en(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_STATUS,
+ EMAC_MDIO_STATUS_10MB);
+ /* Address */
+ val = ((phy->addr << 21) | (devad << 16) | reg |
+ EMAC_MDIO_COMM_COMMAND_ADDRESS |
+ EMAC_MDIO_COMM_START_BUSY);
+ REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, val);
+
+ for (i = 0; i < 50; i++) {
+ udelay(10);
+
+ val = REG_RD(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM);
+ if (!(val & EMAC_MDIO_COMM_START_BUSY)) {
+ udelay(5);
+ break;
+ }
+ }
+ if (val & EMAC_MDIO_COMM_START_BUSY) {
+ DP(NETIF_MSG_LINK, "read phy register failed\n");
+ netdev_err(bp->dev, "MDC/MDIO access timeout\n");
+ *ret_val = 0;
+ rc = -EFAULT;
+ } else {
+ /* Data */
+ val = ((phy->addr << 21) | (devad << 16) |
+ EMAC_MDIO_COMM_COMMAND_READ_45 |
+ EMAC_MDIO_COMM_START_BUSY);
+ REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, val);
+
+ for (i = 0; i < 50; i++) {
+ udelay(10);
+
+ val = REG_RD(bp, phy->mdio_ctrl +
+ EMAC_REG_EMAC_MDIO_COMM);
+ if (!(val & EMAC_MDIO_COMM_START_BUSY)) {
+ *ret_val = (u16)(val & EMAC_MDIO_COMM_DATA);
+ break;
+ }
+ }
+ if (val & EMAC_MDIO_COMM_START_BUSY) {
+ DP(NETIF_MSG_LINK, "read phy register failed\n");
+ netdev_err(bp->dev, "MDC/MDIO access timeout\n");
+ *ret_val = 0;
+ rc = -EFAULT;
+ }
+ }
+ /* Work around for E3 A0 */
+ if (phy->flags & FLAGS_MDC_MDIO_WA) {
+ phy->flags ^= FLAGS_DUMMY_READ;
+ if (phy->flags & FLAGS_DUMMY_READ) {
+ u16 temp_val;
+ bnx2x_cl45_read(bp, phy, devad, 0xf, &temp_val);
+ }
+ }
+
+ if (phy->flags & FLAGS_MDC_MDIO_WA_B0)
+ bnx2x_bits_dis(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_STATUS,
+ EMAC_MDIO_STATUS_10MB);
+ return rc;
+}
+
+static int bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy,
+ u8 devad, u16 reg, u16 val)
+{
+ u32 tmp;
+ u8 i;
+ int rc = 0;
+ u32 chip_id;
+ if (phy->flags & FLAGS_MDC_MDIO_WA_G) {
+ chip_id = (REG_RD(bp, MISC_REG_CHIP_NUM) << 16) |
+ ((REG_RD(bp, MISC_REG_CHIP_REV) & 0xf) << 12);
+ bnx2x_set_mdio_clk(bp, chip_id, phy->mdio_ctrl);
+ }
+
+ if (phy->flags & FLAGS_MDC_MDIO_WA_B0)
+ bnx2x_bits_en(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_STATUS,
+ EMAC_MDIO_STATUS_10MB);
+
+ /* Address */
+ tmp = ((phy->addr << 21) | (devad << 16) | reg |
+ EMAC_MDIO_COMM_COMMAND_ADDRESS |
+ EMAC_MDIO_COMM_START_BUSY);
+ REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, tmp);
+
+ for (i = 0; i < 50; i++) {
+ udelay(10);
+
+ tmp = REG_RD(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM);
+ if (!(tmp & EMAC_MDIO_COMM_START_BUSY)) {
+ udelay(5);
+ break;
+ }
+ }
+ if (tmp & EMAC_MDIO_COMM_START_BUSY) {
+ DP(NETIF_MSG_LINK, "write phy register failed\n");
+ netdev_err(bp->dev, "MDC/MDIO access timeout\n");
+ rc = -EFAULT;
+ } else {
+ /* Data */
+ tmp = ((phy->addr << 21) | (devad << 16) | val |
+ EMAC_MDIO_COMM_COMMAND_WRITE_45 |
+ EMAC_MDIO_COMM_START_BUSY);
+ REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, tmp);
+
+ for (i = 0; i < 50; i++) {
+ udelay(10);
+
+ tmp = REG_RD(bp, phy->mdio_ctrl +
+ EMAC_REG_EMAC_MDIO_COMM);
+ if (!(tmp & EMAC_MDIO_COMM_START_BUSY)) {
+ udelay(5);
+ break;
+ }
+ }
+ if (tmp & EMAC_MDIO_COMM_START_BUSY) {
+ DP(NETIF_MSG_LINK, "write phy register failed\n");
+ netdev_err(bp->dev, "MDC/MDIO access timeout\n");
+ rc = -EFAULT;
+ }
+ }
+ /* Work around for E3 A0 */
+ if (phy->flags & FLAGS_MDC_MDIO_WA) {
+ phy->flags ^= FLAGS_DUMMY_READ;
+ if (phy->flags & FLAGS_DUMMY_READ) {
+ u16 temp_val;
+ bnx2x_cl45_read(bp, phy, devad, 0xf, &temp_val);
+ }
+ }
+ if (phy->flags & FLAGS_MDC_MDIO_WA_B0)
+ bnx2x_bits_dis(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_STATUS,
+ EMAC_MDIO_STATUS_10MB);
+ return rc;
+}
+
+/******************************************************************/
+/* EEE section */
+/******************************************************************/
+static u8 bnx2x_eee_has_cap(struct link_params *params)
+{
+ struct bnx2x *bp = params->bp;
+
+ if (REG_RD(bp, params->shmem2_base) <=
+ offsetof(struct shmem2_region, eee_status[params->port]))
+ return 0;
+
+ return 1;
+}
+
+static int bnx2x_eee_nvram_to_time(u32 nvram_mode, u32 *idle_timer)
+{
+ switch (nvram_mode) {
+ case PORT_FEAT_CFG_EEE_POWER_MODE_BALANCED:
+ *idle_timer = EEE_MODE_NVRAM_BALANCED_TIME;
+ break;
+ case PORT_FEAT_CFG_EEE_POWER_MODE_AGGRESSIVE:
+ *idle_timer = EEE_MODE_NVRAM_AGGRESSIVE_TIME;
+ break;
+ case PORT_FEAT_CFG_EEE_POWER_MODE_LOW_LATENCY:
+ *idle_timer = EEE_MODE_NVRAM_LATENCY_TIME;
+ break;
+ default:
+ *idle_timer = 0;
+ break;
+ }
+
+ return 0;
+}
+
+static int bnx2x_eee_time_to_nvram(u32 idle_timer, u32 *nvram_mode)
+{
+ switch (idle_timer) {
+ case EEE_MODE_NVRAM_BALANCED_TIME:
+ *nvram_mode = PORT_FEAT_CFG_EEE_POWER_MODE_BALANCED;
+ break;
+ case EEE_MODE_NVRAM_AGGRESSIVE_TIME:
+ *nvram_mode = PORT_FEAT_CFG_EEE_POWER_MODE_AGGRESSIVE;
+ break;
+ case EEE_MODE_NVRAM_LATENCY_TIME:
+ *nvram_mode = PORT_FEAT_CFG_EEE_POWER_MODE_LOW_LATENCY;
+ break;
+ default:
+ *nvram_mode = PORT_FEAT_CFG_EEE_POWER_MODE_DISABLED;
+ break;
+ }
+
+ return 0;
+}
+
+static u32 bnx2x_eee_calc_timer(struct link_params *params)
+{
+ u32 eee_mode, eee_idle;
+ struct bnx2x *bp = params->bp;
+
+ if (params->eee_mode & EEE_MODE_OVERRIDE_NVRAM) {
+ if (params->eee_mode & EEE_MODE_OUTPUT_TIME) {
+ /* time value in eee_mode --> used directly*/
+ eee_idle = params->eee_mode & EEE_MODE_TIMER_MASK;
+ } else {
+ /* hsi value in eee_mode --> time */
+ if (bnx2x_eee_nvram_to_time(params->eee_mode &
+ EEE_MODE_NVRAM_MASK,
+ &eee_idle))
+ return 0;
+ }
+ } else {
+ /* hsi values in nvram --> time*/
+ eee_mode = ((REG_RD(bp, params->shmem_base +
+ offsetof(struct shmem_region, dev_info.
+ port_feature_config[params->port].
+ eee_power_mode)) &
+ PORT_FEAT_CFG_EEE_POWER_MODE_MASK) >>
+ PORT_FEAT_CFG_EEE_POWER_MODE_SHIFT);
+
+ if (bnx2x_eee_nvram_to_time(eee_mode, &eee_idle))
+ return 0;
+ }
+
+ return eee_idle;
+}
+
+static int bnx2x_eee_set_timers(struct link_params *params,
+ struct link_vars *vars)
+{
+ u32 eee_idle = 0, eee_mode;
+ struct bnx2x *bp = params->bp;
+
+ eee_idle = bnx2x_eee_calc_timer(params);
+
+ if (eee_idle) {
+ REG_WR(bp, MISC_REG_CPMU_LP_IDLE_THR_P0 + (params->port << 2),
+ eee_idle);
+ } else if ((params->eee_mode & EEE_MODE_ENABLE_LPI) &&
+ (params->eee_mode & EEE_MODE_OVERRIDE_NVRAM) &&
+ (params->eee_mode & EEE_MODE_OUTPUT_TIME)) {
+ DP(NETIF_MSG_LINK, "Error: Tx LPI is enabled with timer 0\n");
+ return -EINVAL;
+ }
+
+ vars->eee_status &= ~(SHMEM_EEE_TIMER_MASK | SHMEM_EEE_TIME_OUTPUT_BIT);
+ if (params->eee_mode & EEE_MODE_OUTPUT_TIME) {
+ /* eee_idle in 1u --> eee_status in 16u */
+ eee_idle >>= 4;
+ vars->eee_status |= (eee_idle & SHMEM_EEE_TIMER_MASK) |
+ SHMEM_EEE_TIME_OUTPUT_BIT;
+ } else {
+ if (bnx2x_eee_time_to_nvram(eee_idle, &eee_mode))
+ return -EINVAL;
+ vars->eee_status |= eee_mode;
+ }
+
+ return 0;
+}
+
+static int bnx2x_eee_initial_config(struct link_params *params,
+ struct link_vars *vars, u8 mode)
+{
+ vars->eee_status |= ((u32) mode) << SHMEM_EEE_SUPPORTED_SHIFT;
+
+ /* Propagate params' bits --> vars (for migration exposure) */
+ if (params->eee_mode & EEE_MODE_ENABLE_LPI)
+ vars->eee_status |= SHMEM_EEE_LPI_REQUESTED_BIT;
+ else
+ vars->eee_status &= ~SHMEM_EEE_LPI_REQUESTED_BIT;
+
+ if (params->eee_mode & EEE_MODE_ADV_LPI)
+ vars->eee_status |= SHMEM_EEE_REQUESTED_BIT;
+ else
+ vars->eee_status &= ~SHMEM_EEE_REQUESTED_BIT;
+
+ return bnx2x_eee_set_timers(params, vars);
+}
+
+static int bnx2x_eee_disable(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
+{
+ struct bnx2x *bp = params->bp;
+
+ /* Make Certain LPI is disabled */
+ REG_WR(bp, MISC_REG_CPMU_LP_FW_ENABLE_P0 + (params->port << 2), 0);
+
+ bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_EEE_ADV, 0x0);
+
+ vars->eee_status &= ~SHMEM_EEE_ADV_STATUS_MASK;
+
+ return 0;
+}
+
+static int bnx2x_eee_advertise(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars, u8 modes)
+{
+ struct bnx2x *bp = params->bp;
+ u16 val = 0;
+
+ /* Mask events preventing LPI generation */
+ REG_WR(bp, MISC_REG_CPMU_LP_MASK_EXT_P0 + (params->port << 2), 0xfc20);
+
+ if (modes & SHMEM_EEE_10G_ADV) {
+ DP(NETIF_MSG_LINK, "Advertise 10GBase-T EEE\n");
+ val |= 0x8;
+ }
+ if (modes & SHMEM_EEE_1G_ADV) {
+ DP(NETIF_MSG_LINK, "Advertise 1GBase-T EEE\n");
+ val |= 0x4;
+ }
+
+ bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_EEE_ADV, val);
+
+ vars->eee_status &= ~SHMEM_EEE_ADV_STATUS_MASK;
+ vars->eee_status |= (modes << SHMEM_EEE_ADV_STATUS_SHIFT);
+
+ return 0;
+}
+
+static void bnx2x_update_mng_eee(struct link_params *params, u32 eee_status)
+{
+ struct bnx2x *bp = params->bp;
+
+ if (bnx2x_eee_has_cap(params))
+ REG_WR(bp, params->shmem2_base +
+ offsetof(struct shmem2_region,
+ eee_status[params->port]), eee_status);
+}
+
+static void bnx2x_eee_an_resolve(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
+{
+ struct bnx2x *bp = params->bp;
+ u16 adv = 0, lp = 0;
+ u32 lp_adv = 0;
+ u8 neg = 0;
+
+ bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_EEE_ADV, &adv);
+ bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_LP_EEE_ADV, &lp);
+
+ if (lp & 0x2) {
+ lp_adv |= SHMEM_EEE_100M_ADV;
+ if (adv & 0x2) {
+ if (vars->line_speed == SPEED_100)
+ neg = 1;
+ DP(NETIF_MSG_LINK, "EEE negotiated - 100M\n");
+ }
+ }
+ if (lp & 0x14) {
+ lp_adv |= SHMEM_EEE_1G_ADV;
+ if (adv & 0x14) {
+ if (vars->line_speed == SPEED_1000)
+ neg = 1;
+ DP(NETIF_MSG_LINK, "EEE negotiated - 1G\n");
+ }
+ }
+ if (lp & 0x68) {
+ lp_adv |= SHMEM_EEE_10G_ADV;
+ if (adv & 0x68) {
+ if (vars->line_speed == SPEED_10000)
+ neg = 1;
+ DP(NETIF_MSG_LINK, "EEE negotiated - 10G\n");
+ }
+ }
+
+ vars->eee_status &= ~SHMEM_EEE_LP_ADV_STATUS_MASK;
+ vars->eee_status |= (lp_adv << SHMEM_EEE_LP_ADV_STATUS_SHIFT);
+
+ if (neg) {
+ DP(NETIF_MSG_LINK, "EEE is active\n");
+ vars->eee_status |= SHMEM_EEE_ACTIVE_BIT;
+ }
+
+}
+
+/******************************************************************/
+/* BSC access functions from E3 */
+/******************************************************************/
+static void bnx2x_bsc_module_sel(struct link_params *params)
+{
+ int idx;
+ u32 board_cfg, sfp_ctrl;
+ u32 i2c_pins[I2C_SWITCH_WIDTH], i2c_val[I2C_SWITCH_WIDTH];
+ struct bnx2x *bp = params->bp;
+ u8 port = params->port;
+ /* Read I2C output PINs */
+ board_cfg = REG_RD(bp, params->shmem_base +
+ offsetof(struct shmem_region,
+ dev_info.shared_hw_config.board));
+ i2c_pins[I2C_BSC0] = board_cfg & SHARED_HW_CFG_E3_I2C_MUX0_MASK;
+ i2c_pins[I2C_BSC1] = (board_cfg & SHARED_HW_CFG_E3_I2C_MUX1_MASK) >>
+ SHARED_HW_CFG_E3_I2C_MUX1_SHIFT;
+
+ /* Read I2C output value */
+ sfp_ctrl = REG_RD(bp, params->shmem_base +
+ offsetof(struct shmem_region,
+ dev_info.port_hw_config[port].e3_cmn_pin_cfg));
+ i2c_val[I2C_BSC0] = (sfp_ctrl & PORT_HW_CFG_E3_I2C_MUX0_MASK) > 0;
+ i2c_val[I2C_BSC1] = (sfp_ctrl & PORT_HW_CFG_E3_I2C_MUX1_MASK) > 0;
+ DP(NETIF_MSG_LINK, "Setting BSC switch\n");
+ for (idx = 0; idx < I2C_SWITCH_WIDTH; idx++)
+ bnx2x_set_cfg_pin(bp, i2c_pins[idx], i2c_val[idx]);
+}
+
+static int bnx2x_bsc_read(struct link_params *params,
+ struct bnx2x *bp,
+ u8 sl_devid,
+ u16 sl_addr,
+ u8 lc_addr,
+ u8 xfer_cnt,
+ u32 *data_array)
+{
+ u64 t0, delta;
+ u32 val, i;
+ int rc = 0;
+
+ if (xfer_cnt > 16) {
+ DP(NETIF_MSG_LINK, "invalid xfer_cnt %d. Max is 16 bytes\n",
+ xfer_cnt);
+ return -EINVAL;
+ }
+ bnx2x_bsc_module_sel(params);
+
+ xfer_cnt = 16 - lc_addr;
+
+ /* Enable the engine */
+ val = REG_RD(bp, MCP_REG_MCPR_IMC_COMMAND);
+ val |= MCPR_IMC_COMMAND_ENABLE;
+ REG_WR(bp, MCP_REG_MCPR_IMC_COMMAND, val);
+
+ /* Program slave device ID */
+ val = (sl_devid << 16) | sl_addr;
+ REG_WR(bp, MCP_REG_MCPR_IMC_SLAVE_CONTROL, val);
+
+ /* Start xfer with 0 byte to update the address pointer ???*/
+ val = (MCPR_IMC_COMMAND_ENABLE) |
+ (MCPR_IMC_COMMAND_WRITE_OP <<
+ MCPR_IMC_COMMAND_OPERATION_BITSHIFT) |
+ (lc_addr << MCPR_IMC_COMMAND_TRANSFER_ADDRESS_BITSHIFT) | (0);
+ REG_WR(bp, MCP_REG_MCPR_IMC_COMMAND, val);
+
+ /* Poll for completion */
+ t0 = ktime_get_ns();
+ val = REG_RD(bp, MCP_REG_MCPR_IMC_COMMAND);
+ while (((val >> MCPR_IMC_COMMAND_IMC_STATUS_BITSHIFT) & 0x3) != 1) {
+ delta = ktime_get_ns() - t0;
+ if (delta > 10 * NSEC_PER_MSEC) {
+ DP(NETIF_MSG_LINK, "wr 0 byte timed out after %Lu ns\n",
+ delta);
+ rc = -EFAULT;
+ break;
+ }
+ usleep_range(10, 20);
+ val = REG_RD(bp, MCP_REG_MCPR_IMC_COMMAND);
+ }
+ if (rc == -EFAULT)
+ return rc;
+
+ /* Start xfer with read op */
+ val = (MCPR_IMC_COMMAND_ENABLE) |
+ (MCPR_IMC_COMMAND_READ_OP <<
+ MCPR_IMC_COMMAND_OPERATION_BITSHIFT) |
+ (lc_addr << MCPR_IMC_COMMAND_TRANSFER_ADDRESS_BITSHIFT) |
+ (xfer_cnt);
+ REG_WR(bp, MCP_REG_MCPR_IMC_COMMAND, val);
+
+ /* Poll for completion */
+ t0 = ktime_get_ns();
+ val = REG_RD(bp, MCP_REG_MCPR_IMC_COMMAND);
+ while (((val >> MCPR_IMC_COMMAND_IMC_STATUS_BITSHIFT) & 0x3) != 1) {
+ delta = ktime_get_ns() - t0;
+ if (delta > 10 * NSEC_PER_MSEC) {
+ DP(NETIF_MSG_LINK, "rd op timed out after %Lu ns\n",
+ delta);
+ rc = -EFAULT;
+ break;
+ }
+ usleep_range(10, 20);
+ val = REG_RD(bp, MCP_REG_MCPR_IMC_COMMAND);
+ }
+ if (rc == -EFAULT)
+ return rc;
+
+ for (i = (lc_addr >> 2); i < 4; i++) {
+ data_array[i] = REG_RD(bp, (MCP_REG_MCPR_IMC_DATAREG0 + i*4));
+#ifdef __BIG_ENDIAN
+ data_array[i] = ((data_array[i] & 0x000000ff) << 24) |
+ ((data_array[i] & 0x0000ff00) << 8) |
+ ((data_array[i] & 0x00ff0000) >> 8) |
+ ((data_array[i] & 0xff000000) >> 24);
+#endif
+ }
+ return rc;
+}
+
+static void bnx2x_cl45_read_or_write(struct bnx2x *bp, struct bnx2x_phy *phy,
+ u8 devad, u16 reg, u16 or_val)
+{
+ u16 val;
+ bnx2x_cl45_read(bp, phy, devad, reg, &val);
+ bnx2x_cl45_write(bp, phy, devad, reg, val | or_val);
+}
+
+static void bnx2x_cl45_read_and_write(struct bnx2x *bp,
+ struct bnx2x_phy *phy,
+ u8 devad, u16 reg, u16 and_val)
+{
+ u16 val;
+ bnx2x_cl45_read(bp, phy, devad, reg, &val);
+ bnx2x_cl45_write(bp, phy, devad, reg, val & and_val);
+}
+
+int bnx2x_phy_read(struct link_params *params, u8 phy_addr,
+ u8 devad, u16 reg, u16 *ret_val)
+{
+ u8 phy_index;
+ /* Probe for the phy according to the given phy_addr, and execute
+ * the read request on it
+ */
+ for (phy_index = 0; phy_index < params->num_phys; phy_index++) {
+ if (params->phy[phy_index].addr == phy_addr) {
+ return bnx2x_cl45_read(params->bp,
+ &params->phy[phy_index], devad,
+ reg, ret_val);
+ }
+ }
+ return -EINVAL;
+}
+
+int bnx2x_phy_write(struct link_params *params, u8 phy_addr,
+ u8 devad, u16 reg, u16 val)
+{
+ u8 phy_index;
+ /* Probe for the phy according to the given phy_addr, and execute
+ * the write request on it
+ */
+ for (phy_index = 0; phy_index < params->num_phys; phy_index++) {
+ if (params->phy[phy_index].addr == phy_addr) {
+ return bnx2x_cl45_write(params->bp,
+ &params->phy[phy_index], devad,
+ reg, val);
+ }
+ }
+ return -EINVAL;
+}
+static u8 bnx2x_get_warpcore_lane(struct bnx2x_phy *phy,
+ struct link_params *params)
+{
+ u8 lane = 0;
+ struct bnx2x *bp = params->bp;
+ u32 path_swap, path_swap_ovr;
+ u8 path, port;
+
+ path = BP_PATH(bp);
+ port = params->port;
+
+ if (bnx2x_is_4_port_mode(bp)) {
+ u32 port_swap, port_swap_ovr;
+
+ /* Figure out path swap value */
+ path_swap_ovr = REG_RD(bp, MISC_REG_FOUR_PORT_PATH_SWAP_OVWR);
+ if (path_swap_ovr & 0x1)
+ path_swap = (path_swap_ovr & 0x2);
+ else
+ path_swap = REG_RD(bp, MISC_REG_FOUR_PORT_PATH_SWAP);
+
+ if (path_swap)
+ path = path ^ 1;
+
+ /* Figure out port swap value */
+ port_swap_ovr = REG_RD(bp, MISC_REG_FOUR_PORT_PORT_SWAP_OVWR);
+ if (port_swap_ovr & 0x1)
+ port_swap = (port_swap_ovr & 0x2);
+ else
+ port_swap = REG_RD(bp, MISC_REG_FOUR_PORT_PORT_SWAP);
+
+ if (port_swap)
+ port = port ^ 1;
+
+ lane = (port<<1) + path;
+ } else { /* Two port mode - no port swap */
+
+ /* Figure out path swap value */
+ path_swap_ovr =
+ REG_RD(bp, MISC_REG_TWO_PORT_PATH_SWAP_OVWR);
+ if (path_swap_ovr & 0x1) {
+ path_swap = (path_swap_ovr & 0x2);
+ } else {
+ path_swap =
+ REG_RD(bp, MISC_REG_TWO_PORT_PATH_SWAP);
+ }
+ if (path_swap)
+ path = path ^ 1;
+
+ lane = path << 1 ;
+ }
+ return lane;
+}
+
+static void bnx2x_set_aer_mmd(struct link_params *params,
+ struct bnx2x_phy *phy)
+{
+ u32 ser_lane;
+ u16 offset, aer_val;
+ struct bnx2x *bp = params->bp;
+ ser_lane = ((params->lane_config &
+ PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >>
+ PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT);
+
+ offset = (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) ?
+ (phy->addr + ser_lane) : 0;
+
+ if (USES_WARPCORE(bp)) {
+ aer_val = bnx2x_get_warpcore_lane(phy, params);
+ /* In Dual-lane mode, two lanes are joined together,
+ * so in order to configure them, the AER broadcast method is
+ * used here.
+ * 0x200 is the broadcast address for lanes 0,1
+ * 0x201 is the broadcast address for lanes 2,3
+ */
+ if (phy->flags & FLAGS_WC_DUAL_MODE)
+ aer_val = (aer_val >> 1) | 0x200;
+ } else if (CHIP_IS_E2(bp))
+ aer_val = 0x3800 + offset - 1;
+ else
+ aer_val = 0x3800 + offset;
+
+ CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK,
+ MDIO_AER_BLOCK_AER_REG, aer_val);
+
+}
+
+/******************************************************************/
+/* Internal phy section */
+/******************************************************************/
+
+static void bnx2x_set_serdes_access(struct bnx2x *bp, u8 port)
+{
+ u32 emac_base = (port) ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
+
+ /* Set Clause 22 */
+ REG_WR(bp, NIG_REG_SERDES0_CTRL_MD_ST + port*0x10, 1);
+ REG_WR(bp, emac_base + EMAC_REG_EMAC_MDIO_COMM, 0x245f8000);
+ udelay(500);
+ REG_WR(bp, emac_base + EMAC_REG_EMAC_MDIO_COMM, 0x245d000f);
+ udelay(500);
+ /* Set Clause 45 */
+ REG_WR(bp, NIG_REG_SERDES0_CTRL_MD_ST + port*0x10, 0);
+}
+
+static void bnx2x_serdes_deassert(struct bnx2x *bp, u8 port)
+{
+ u32 val;
+
+ DP(NETIF_MSG_LINK, "bnx2x_serdes_deassert\n");
+
+ val = SERDES_RESET_BITS << (port*16);
+
+ /* Reset and unreset the SerDes/XGXS */
+ REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_CLEAR, val);
+ udelay(500);
+ REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_SET, val);
+
+ bnx2x_set_serdes_access(bp, port);
+
+ REG_WR(bp, NIG_REG_SERDES0_CTRL_MD_DEVAD + port*0x10,
+ DEFAULT_PHY_DEV_ADDR);
+}
+
+static void bnx2x_xgxs_specific_func(struct bnx2x_phy *phy,
+ struct link_params *params,
+ u32 action)
+{
+ struct bnx2x *bp = params->bp;
+ switch (action) {
+ case PHY_INIT:
+ /* Set correct devad */
+ REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_ST + params->port*0x18, 0);
+ REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_DEVAD + params->port*0x18,
+ phy->def_md_devad);
+ break;
+ }
+}
+
+static void bnx2x_xgxs_deassert(struct link_params *params)
+{
+ struct bnx2x *bp = params->bp;
+ u8 port;
+ u32 val;
+ DP(NETIF_MSG_LINK, "bnx2x_xgxs_deassert\n");
+ port = params->port;
+
+ val = XGXS_RESET_BITS << (port*16);
+
+ /* Reset and unreset the SerDes/XGXS */
+ REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_CLEAR, val);
+ udelay(500);
+ REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_SET, val);
+ bnx2x_xgxs_specific_func(&params->phy[INT_PHY], params,
+ PHY_INIT);
+}
+
+static void bnx2x_calc_ieee_aneg_adv(struct bnx2x_phy *phy,
+ struct link_params *params, u16 *ieee_fc)
+{
+ struct bnx2x *bp = params->bp;
+ *ieee_fc = MDIO_COMBO_IEEE0_AUTO_NEG_ADV_FULL_DUPLEX;
+ /* Resolve pause mode and advertisement Please refer to Table
+ * 28B-3 of the 802.3ab-1999 spec
+ */
+
+ switch (phy->req_flow_ctrl) {
+ case BNX2X_FLOW_CTRL_AUTO:
+ switch (params->req_fc_auto_adv) {
+ case BNX2X_FLOW_CTRL_BOTH:
+ case BNX2X_FLOW_CTRL_RX:
+ *ieee_fc |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH;
+ break;
+ case BNX2X_FLOW_CTRL_TX:
+ *ieee_fc |=
+ MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC;
+ break;
+ default:
+ break;
+ }
+ break;
+ case BNX2X_FLOW_CTRL_TX:
+ *ieee_fc |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC;
+ break;
+
+ case BNX2X_FLOW_CTRL_RX:
+ case BNX2X_FLOW_CTRL_BOTH:
+ *ieee_fc |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH;
+ break;
+
+ case BNX2X_FLOW_CTRL_NONE:
+ default:
+ *ieee_fc |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE;
+ break;
+ }
+ DP(NETIF_MSG_LINK, "ieee_fc = 0x%x\n", *ieee_fc);
+}
+
+static void set_phy_vars(struct link_params *params,
+ struct link_vars *vars)
+{
+ struct bnx2x *bp = params->bp;
+ u8 actual_phy_idx, phy_index, link_cfg_idx;
+ u8 phy_config_swapped = params->multi_phy_config &
+ PORT_HW_CFG_PHY_SWAPPED_ENABLED;
+ for (phy_index = INT_PHY; phy_index < params->num_phys;
+ phy_index++) {
+ link_cfg_idx = LINK_CONFIG_IDX(phy_index);
+ actual_phy_idx = phy_index;
+ if (phy_config_swapped) {
+ if (phy_index == EXT_PHY1)
+ actual_phy_idx = EXT_PHY2;
+ else if (phy_index == EXT_PHY2)
+ actual_phy_idx = EXT_PHY1;
+ }
+ params->phy[actual_phy_idx].req_flow_ctrl =
+ params->req_flow_ctrl[link_cfg_idx];
+
+ params->phy[actual_phy_idx].req_line_speed =
+ params->req_line_speed[link_cfg_idx];
+
+ params->phy[actual_phy_idx].speed_cap_mask =
+ params->speed_cap_mask[link_cfg_idx];
+
+ params->phy[actual_phy_idx].req_duplex =
+ params->req_duplex[link_cfg_idx];
+
+ if (params->req_line_speed[link_cfg_idx] ==
+ SPEED_AUTO_NEG)
+ vars->link_status |= LINK_STATUS_AUTO_NEGOTIATE_ENABLED;
+
+ DP(NETIF_MSG_LINK, "req_flow_ctrl %x, req_line_speed %x,"
+ " speed_cap_mask %x\n",
+ params->phy[actual_phy_idx].req_flow_ctrl,
+ params->phy[actual_phy_idx].req_line_speed,
+ params->phy[actual_phy_idx].speed_cap_mask);
+ }
+}
+
+static void bnx2x_ext_phy_set_pause(struct link_params *params,
+ struct bnx2x_phy *phy,
+ struct link_vars *vars)
+{
+ u16 val;
+ struct bnx2x *bp = params->bp;
+ /* Read modify write pause advertizing */
+ bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_ADV_PAUSE, &val);
+
+ val &= ~MDIO_AN_REG_ADV_PAUSE_BOTH;
+
+ /* Please refer to Table 28B-3 of 802.3ab-1999 spec. */
+ bnx2x_calc_ieee_aneg_adv(phy, params, &vars->ieee_fc);
+ if ((vars->ieee_fc &
+ MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) ==
+ MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) {
+ val |= MDIO_AN_REG_ADV_PAUSE_ASYMMETRIC;
+ }
+ if ((vars->ieee_fc &
+ MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) ==
+ MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) {
+ val |= MDIO_AN_REG_ADV_PAUSE_PAUSE;
+ }
+ DP(NETIF_MSG_LINK, "Ext phy AN advertize 0x%x\n", val);
+ bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_ADV_PAUSE, val);
+}
+
+static void bnx2x_pause_resolve(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars,
+ u32 pause_result)
+{
+ struct bnx2x *bp = params->bp;
+ /* LD LP */
+ switch (pause_result) { /* ASYM P ASYM P */
+ case 0xb: /* 1 0 1 1 */
+ DP(NETIF_MSG_LINK, "Flow Control: TX only\n");
+ vars->flow_ctrl = BNX2X_FLOW_CTRL_TX;
+ break;
+
+ case 0xe: /* 1 1 1 0 */
+ DP(NETIF_MSG_LINK, "Flow Control: RX only\n");
+ vars->flow_ctrl = BNX2X_FLOW_CTRL_RX;
+ break;
+
+ case 0x5: /* 0 1 0 1 */
+ case 0x7: /* 0 1 1 1 */
+ case 0xd: /* 1 1 0 1 */
+ case 0xf: /* 1 1 1 1 */
+ /* If the user selected to advertise RX ONLY,
+ * although we advertised both, need to enable
+ * RX only.
+ */
+ if (params->req_fc_auto_adv == BNX2X_FLOW_CTRL_BOTH) {
+ DP(NETIF_MSG_LINK, "Flow Control: RX & TX\n");
+ vars->flow_ctrl = BNX2X_FLOW_CTRL_BOTH;
+ } else {
+ DP(NETIF_MSG_LINK, "Flow Control: RX only\n");
+ vars->flow_ctrl = BNX2X_FLOW_CTRL_RX;
+ }
+ break;
+
+ default:
+ DP(NETIF_MSG_LINK, "Flow Control: None\n");
+ vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
+ break;
+ }
+ if (pause_result & (1<<0))
+ vars->link_status |= LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE;
+ if (pause_result & (1<<1))
+ vars->link_status |= LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE;
+
+}
+
+static void bnx2x_ext_phy_update_adv_fc(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
+{
+ u16 ld_pause; /* local */
+ u16 lp_pause; /* link partner */
+ u16 pause_result;
+ struct bnx2x *bp = params->bp;
+ if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54618SE) {
+ bnx2x_cl22_read(bp, phy, 0x4, &ld_pause);
+ bnx2x_cl22_read(bp, phy, 0x5, &lp_pause);
+ } else if (CHIP_IS_E3(bp) &&
+ SINGLE_MEDIA_DIRECT(params)) {
+ u8 lane = bnx2x_get_warpcore_lane(phy, params);
+ u16 gp_status, gp_mask;
+ bnx2x_cl45_read(bp, phy,
+ MDIO_AN_DEVAD, MDIO_WC_REG_GP2_STATUS_GP_2_4,
+ &gp_status);
+ gp_mask = (MDIO_WC_REG_GP2_STATUS_GP_2_4_CL73_AN_CMPL |
+ MDIO_WC_REG_GP2_STATUS_GP_2_4_CL37_LP_AN_CAP) <<
+ lane;
+ if ((gp_status & gp_mask) == gp_mask) {
+ bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD,
+ MDIO_AN_REG_ADV_PAUSE, &ld_pause);
+ bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD,
+ MDIO_AN_REG_LP_AUTO_NEG, &lp_pause);
+ } else {
+ bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD,
+ MDIO_AN_REG_CL37_FC_LD, &ld_pause);
+ bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD,
+ MDIO_AN_REG_CL37_FC_LP, &lp_pause);
+ ld_pause = ((ld_pause &
+ MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH)
+ << 3);
+ lp_pause = ((lp_pause &
+ MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH)
+ << 3);
+ }
+ } else {
+ bnx2x_cl45_read(bp, phy,
+ MDIO_AN_DEVAD,
+ MDIO_AN_REG_ADV_PAUSE, &ld_pause);
+ bnx2x_cl45_read(bp, phy,
+ MDIO_AN_DEVAD,
+ MDIO_AN_REG_LP_AUTO_NEG, &lp_pause);
+ }
+ pause_result = (ld_pause &
+ MDIO_AN_REG_ADV_PAUSE_MASK) >> 8;
+ pause_result |= (lp_pause &
+ MDIO_AN_REG_ADV_PAUSE_MASK) >> 10;
+ DP(NETIF_MSG_LINK, "Ext PHY pause result 0x%x\n", pause_result);
+ bnx2x_pause_resolve(phy, params, vars, pause_result);
+
+}
+
+static u8 bnx2x_ext_phy_resolve_fc(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
+{
+ u8 ret = 0;
+ vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
+ if (phy->req_flow_ctrl != BNX2X_FLOW_CTRL_AUTO) {
+ /* Update the advertised flow-controled of LD/LP in AN */
+ if (phy->req_line_speed == SPEED_AUTO_NEG)
+ bnx2x_ext_phy_update_adv_fc(phy, params, vars);
+ /* But set the flow-control result as the requested one */
+ vars->flow_ctrl = phy->req_flow_ctrl;
+ } else if (phy->req_line_speed != SPEED_AUTO_NEG)
+ vars->flow_ctrl = params->req_fc_auto_adv;
+ else if (vars->link_status & LINK_STATUS_AUTO_NEGOTIATE_COMPLETE) {
+ ret = 1;
+ bnx2x_ext_phy_update_adv_fc(phy, params, vars);
+ }
+ return ret;
+}
+/******************************************************************/
+/* Warpcore section */
+/******************************************************************/
+/* The init_internal_warpcore should mirror the xgxs,
+ * i.e. reset the lane (if needed), set aer for the
+ * init configuration, and set/clear SGMII flag. Internal
+ * phy init is done purely in phy_init stage.
+ */
+#define WC_TX_DRIVER(post2, idriver, ipre, ifir) \
+ ((post2 << MDIO_WC_REG_TX0_TX_DRIVER_POST2_COEFF_OFFSET) | \
+ (idriver << MDIO_WC_REG_TX0_TX_DRIVER_IDRIVER_OFFSET) | \
+ (ipre << MDIO_WC_REG_TX0_TX_DRIVER_IPRE_DRIVER_OFFSET) | \
+ (ifir << MDIO_WC_REG_TX0_TX_DRIVER_IFIR_OFFSET))
+
+#define WC_TX_FIR(post, main, pre) \
+ ((post << MDIO_WC_REG_TX_FIR_TAP_POST_TAP_OFFSET) | \
+ (main << MDIO_WC_REG_TX_FIR_TAP_MAIN_TAP_OFFSET) | \
+ (pre << MDIO_WC_REG_TX_FIR_TAP_PRE_TAP_OFFSET))
+
+static void bnx2x_warpcore_enable_AN_KR2(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
+{
+ struct bnx2x *bp = params->bp;
+ u16 i;
+ static struct bnx2x_reg_set reg_set[] = {
+ /* Step 1 - Program the TX/RX alignment markers */
+ {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL5, 0xa157},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL7, 0xcbe2},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL6, 0x7537},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL9, 0xa157},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_RX_CTRL11, 0xcbe2},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_RX_CTRL10, 0x7537},
+ /* Step 2 - Configure the NP registers */
+ {MDIO_WC_DEVAD, MDIO_WC_REG_CL73_USERB0_CTRL, 0x000a},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_CL73_BAM_CTRL1, 0x6400},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_CL73_BAM_CTRL3, 0x0620},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_CL73_BAM_CODE_FIELD, 0x0157},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_OUI1, 0x6464},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_OUI2, 0x3150},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_OUI3, 0x3150},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_LD_BAM_CODE, 0x0157},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_LD_UD_CODE, 0x0620}
+ };
+ DP(NETIF_MSG_LINK, "Enabling 20G-KR2\n");
+
+ bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_CL49_USERB0_CTRL, (3<<6));
+
+ for (i = 0; i < ARRAY_SIZE(reg_set); i++)
+ bnx2x_cl45_write(bp, phy, reg_set[i].devad, reg_set[i].reg,
+ reg_set[i].val);
+
+ /* Start KR2 work-around timer which handles BCM8073 link-parner */
+ params->link_attr_sync |= LINK_ATTR_SYNC_KR2_ENABLE;
+ bnx2x_update_link_attr(params, params->link_attr_sync);
+}
+
+static void bnx2x_disable_kr2(struct link_params *params,
+ struct link_vars *vars,
+ struct bnx2x_phy *phy)
+{
+ struct bnx2x *bp = params->bp;
+ int i;
+ static struct bnx2x_reg_set reg_set[] = {
+ /* Step 1 - Program the TX/RX alignment markers */
+ {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL5, 0x7690},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL7, 0xe647},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL6, 0xc4f0},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL9, 0x7690},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_RX_CTRL11, 0xe647},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_RX_CTRL10, 0xc4f0},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_CL73_USERB0_CTRL, 0x000c},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_CL73_BAM_CTRL1, 0x6000},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_CL73_BAM_CTRL3, 0x0000},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_CL73_BAM_CODE_FIELD, 0x0002},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_OUI1, 0x0000},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_OUI2, 0x0af7},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_OUI3, 0x0af7},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_LD_BAM_CODE, 0x0002},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_LD_UD_CODE, 0x0000}
+ };
+ DP(NETIF_MSG_LINK, "Disabling 20G-KR2\n");
+
+ for (i = 0; i < ARRAY_SIZE(reg_set); i++)
+ bnx2x_cl45_write(bp, phy, reg_set[i].devad, reg_set[i].reg,
+ reg_set[i].val);
+ params->link_attr_sync &= ~LINK_ATTR_SYNC_KR2_ENABLE;
+ bnx2x_update_link_attr(params, params->link_attr_sync);
+
+ vars->check_kr2_recovery_cnt = CHECK_KR2_RECOVERY_CNT;
+}
+
+static void bnx2x_warpcore_set_lpi_passthrough(struct bnx2x_phy *phy,
+ struct link_params *params)
+{
+ struct bnx2x *bp = params->bp;
+
+ DP(NETIF_MSG_LINK, "Configure WC for LPI pass through\n");
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_EEE_COMBO_CONTROL0, 0x7c);
+ bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_DIGITAL4_MISC5, 0xc000);
+}
+
+static void bnx2x_warpcore_restart_AN_KR(struct bnx2x_phy *phy,
+ struct link_params *params)
+{
+ /* Restart autoneg on the leading lane only */
+ struct bnx2x *bp = params->bp;
+ u16 lane = bnx2x_get_warpcore_lane(phy, params);
+ CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK,
+ MDIO_AER_BLOCK_AER_REG, lane);
+ bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
+ MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x1200);
+
+ /* Restore AER */
+ bnx2x_set_aer_mmd(params, phy);
+}
+
+static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars) {
+ u16 lane, i, cl72_ctrl, an_adv = 0, val;
+ u32 wc_lane_config;
+ struct bnx2x *bp = params->bp;
+ static struct bnx2x_reg_set reg_set[] = {
+ {MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, 0x7},
+ {MDIO_PMA_DEVAD, MDIO_WC_REG_IEEE0BLK_AUTONEGNP, 0x0},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_RX66_CONTROL, 0x7415},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_MISC2, 0x6190},
+ /* Disable Autoneg: re-enable it after adv is done. */
+ {MDIO_AN_DEVAD, MDIO_WC_REG_IEEE0BLK_MIICNTL, 0},
+ {MDIO_PMA_DEVAD, MDIO_WC_REG_PMD_KR_CONTROL, 0x2},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_CL72_USERB0_CL72_TX_FIR_TAP, 0},
+ };
+ DP(NETIF_MSG_LINK, "Enable Auto Negotiation for KR\n");
+ /* Set to default registers that may be overriden by 10G force */
+ for (i = 0; i < ARRAY_SIZE(reg_set); i++)
+ bnx2x_cl45_write(bp, phy, reg_set[i].devad, reg_set[i].reg,
+ reg_set[i].val);
+
+ bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL, &cl72_ctrl);
+ cl72_ctrl &= 0x08ff;
+ cl72_ctrl |= 0x3800;
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL, cl72_ctrl);
+
+ /* Check adding advertisement for 1G KX */
+ if (((vars->line_speed == SPEED_AUTO_NEG) &&
+ (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) ||
+ (vars->line_speed == SPEED_1000)) {
+ u16 addr = MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2;
+ an_adv |= (1<<5);
+
+ /* Enable CL37 1G Parallel Detect */
+ bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD, addr, 0x1);
+ DP(NETIF_MSG_LINK, "Advertize 1G\n");
+ }
+ if (((vars->line_speed == SPEED_AUTO_NEG) &&
+ (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) ||
+ (vars->line_speed == SPEED_10000)) {
+ /* Check adding advertisement for 10G KR */
+ an_adv |= (1<<7);
+ /* Enable 10G Parallel Detect */
+ CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK,
+ MDIO_AER_BLOCK_AER_REG, 0);
+
+ bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
+ MDIO_WC_REG_PAR_DET_10G_CTRL, 1);
+ bnx2x_set_aer_mmd(params, phy);
+ DP(NETIF_MSG_LINK, "Advertize 10G\n");
+ }
+
+ /* Set Transmit PMD settings */
+ lane = bnx2x_get_warpcore_lane(phy, params);
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_TX0_TX_DRIVER + 0x10*lane,
+ WC_TX_DRIVER(0x02, 0x06, 0x09, 0));
+ /* Configure the next lane if dual mode */
+ if (phy->flags & FLAGS_WC_DUAL_MODE)
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_TX0_TX_DRIVER + 0x10*(lane+1),
+ WC_TX_DRIVER(0x02, 0x06, 0x09, 0));
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_CL72_USERB0_CL72_OS_DEF_CTRL,
+ 0x03f0);
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_CL72_USERB0_CL72_2P5_DEF_CTRL,
+ 0x03f0);
+
+ /* Advertised speeds */
+ bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
+ MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT1, an_adv);
+
+ /* Advertised and set FEC (Forward Error Correction) */
+ bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
+ MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT2,
+ (MDIO_WC_REG_AN_IEEE1BLK_AN_ADV2_FEC_ABILITY |
+ MDIO_WC_REG_AN_IEEE1BLK_AN_ADV2_FEC_REQ));
+
+ /* Enable CL37 BAM */
+ if (REG_RD(bp, params->shmem_base +
+ offsetof(struct shmem_region, dev_info.
+ port_hw_config[params->port].default_cfg)) &
+ PORT_HW_CFG_ENABLE_BAM_ON_KR_ENABLED) {
+ bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_DIGITAL6_MP5_NEXTPAGECTRL,
+ 1);
+ DP(NETIF_MSG_LINK, "Enable CL37 BAM on KR\n");
+ }
+
+ /* Advertise pause */
+ bnx2x_ext_phy_set_pause(params, phy, vars);
+ vars->rx_tx_asic_rst = MAX_KR_LINK_RETRY;
+ bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_DIGITAL5_MISC7, 0x100);
+
+ /* Over 1G - AN local device user page 1 */
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_DIGITAL3_UP1, 0x1f);
+
+ if (((phy->req_line_speed == SPEED_AUTO_NEG) &&
+ (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_20G)) ||
+ (phy->req_line_speed == SPEED_20000)) {
+
+ CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK,
+ MDIO_AER_BLOCK_AER_REG, lane);
+
+ bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_RX1_PCI_CTRL + (0x10*lane),
+ (1<<11));
+
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_XGXS_X2_CONTROL3, 0x7);
+ bnx2x_set_aer_mmd(params, phy);
+
+ bnx2x_warpcore_enable_AN_KR2(phy, params, vars);
+ } else {
+ /* Enable Auto-Detect to support 1G over CL37 as well */
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1, 0x10);
+ wc_lane_config = REG_RD(bp, params->shmem_base +
+ offsetof(struct shmem_region, dev_info.
+ shared_hw_config.wc_lane_config));
+ bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_RX0_PCI_CTRL + (lane << 4), &val);
+ /* Force cl48 sync_status LOW to avoid getting stuck in CL73
+ * parallel-detect loop when CL73 and CL37 are enabled.
+ */
+ val |= 1 << 11;
+
+ /* Restore Polarity settings in case it was run over by
+ * previous link owner
+ */
+ if (wc_lane_config &
+ (SHARED_HW_CFG_RX_LANE0_POL_FLIP_ENABLED << lane))
+ val |= 3 << 2;
+ else
+ val &= ~(3 << 2);
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_RX0_PCI_CTRL + (lane << 4),
+ val);
+
+ bnx2x_disable_kr2(params, vars, phy);
+ }
+
+ /* Enable Autoneg: only on the main lane */
+ bnx2x_warpcore_restart_AN_KR(phy, params);
+}
+
+static void bnx2x_warpcore_set_10G_KR(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
+{
+ struct bnx2x *bp = params->bp;
+ u16 val16, i, lane;
+ static struct bnx2x_reg_set reg_set[] = {
+ /* Disable Autoneg */
+ {MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, 0x7},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL,
+ 0x3f00},
+ {MDIO_AN_DEVAD, MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT1, 0},
+ {MDIO_AN_DEVAD, MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x0},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_DIGITAL3_UP1, 0x1},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_DIGITAL5_MISC7, 0xa},
+ /* Leave cl72 training enable, needed for KR */
+ {MDIO_PMA_DEVAD, MDIO_WC_REG_PMD_KR_CONTROL, 0x2}
+ };
+
+ for (i = 0; i < ARRAY_SIZE(reg_set); i++)
+ bnx2x_cl45_write(bp, phy, reg_set[i].devad, reg_set[i].reg,
+ reg_set[i].val);
+
+ lane = bnx2x_get_warpcore_lane(phy, params);
+ /* Global registers */
+ CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK,
+ MDIO_AER_BLOCK_AER_REG, 0);
+ /* Disable CL36 PCS Tx */
+ bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_XGXSBLK1_LANECTRL0, &val16);
+ val16 &= ~(0x0011 << lane);
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_XGXSBLK1_LANECTRL0, val16);
+
+ bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_XGXSBLK1_LANECTRL1, &val16);
+ val16 |= (0x0303 << (lane << 1));
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_XGXSBLK1_LANECTRL1, val16);
+ /* Restore AER */
+ bnx2x_set_aer_mmd(params, phy);
+ /* Set speed via PMA/PMD register */
+ bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD,
+ MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x2040);
+
+ bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD,
+ MDIO_WC_REG_IEEE0BLK_AUTONEGNP, 0xB);
+
+ /* Enable encoded forced speed */
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_SERDESDIGITAL_MISC2, 0x30);
+
+ /* Turn TX scramble payload only the 64/66 scrambler */
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_TX66_CONTROL, 0x9);
+
+ /* Turn RX scramble payload only the 64/66 scrambler */
+ bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_RX66_CONTROL, 0xF9);
+
+ /* Set and clear loopback to cause a reset to 64/66 decoder */
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x4000);
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x0);
+
+}
+
+static void bnx2x_warpcore_set_10G_XFI(struct bnx2x_phy *phy,
+ struct link_params *params,
+ u8 is_xfi)
+{
+ struct bnx2x *bp = params->bp;
+ u16 misc1_val, tap_val, tx_driver_val, lane, val;
+ u32 cfg_tap_val, tx_drv_brdct, tx_equal;
+ u32 ifir_val, ipost2_val, ipre_driver_val;
+
+ /* Hold rxSeqStart */
+ bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_DSC2B0_DSC_MISC_CTRL0, 0x8000);
+
+ /* Hold tx_fifo_reset */
+ bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X3, 0x1);
+
+ /* Disable CL73 AN */
+ bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0);
+
+ /* Disable 100FX Enable and Auto-Detect */
+ bnx2x_cl45_read_and_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_FX100_CTRL1, 0xFFFA);
+
+ /* Disable 100FX Idle detect */
+ bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_FX100_CTRL3, 0x0080);
+
+ /* Set Block address to Remote PHY & Clear forced_speed[5] */
+ bnx2x_cl45_read_and_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_DIGITAL4_MISC3, 0xFF7F);
+
+ /* Turn off auto-detect & fiber mode */
+ bnx2x_cl45_read_and_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1,
+ 0xFFEE);
+
+ /* Set filter_force_link, disable_false_link and parallel_detect */
+ bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, &val);
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2,
+ ((val | 0x0006) & 0xFFFE));
+
+ /* Set XFI / SFI */
+ bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_SERDESDIGITAL_MISC1, &misc1_val);
+
+ misc1_val &= ~(0x1f);
+
+ if (is_xfi) {
+ misc1_val |= 0x5;
+ tap_val = WC_TX_FIR(0x08, 0x37, 0x00);
+ tx_driver_val = WC_TX_DRIVER(0x00, 0x02, 0x03, 0);
+ } else {
+ cfg_tap_val = REG_RD(bp, params->shmem_base +
+ offsetof(struct shmem_region, dev_info.
+ port_hw_config[params->port].
+ sfi_tap_values));
+
+ tx_equal = cfg_tap_val & PORT_HW_CFG_TX_EQUALIZATION_MASK;
+
+ misc1_val |= 0x9;
+
+ /* TAP values are controlled by nvram, if value there isn't 0 */
+ if (tx_equal)
+ tap_val = (u16)tx_equal;
+ else
+ tap_val = WC_TX_FIR(0x0f, 0x2b, 0x02);
+
+ ifir_val = DEFAULT_TX_DRV_IFIR;
+ ipost2_val = DEFAULT_TX_DRV_POST2;
+ ipre_driver_val = DEFAULT_TX_DRV_IPRE_DRIVER;
+ tx_drv_brdct = DEFAULT_TX_DRV_BRDCT;
+
+ /* If any of the IFIR/IPRE_DRIVER/POST@ is set, apply all
+ * configuration.
+ */
+ if (cfg_tap_val & (PORT_HW_CFG_TX_DRV_IFIR_MASK |
+ PORT_HW_CFG_TX_DRV_IPREDRIVER_MASK |
+ PORT_HW_CFG_TX_DRV_POST2_MASK)) {
+ ifir_val = (cfg_tap_val &
+ PORT_HW_CFG_TX_DRV_IFIR_MASK) >>
+ PORT_HW_CFG_TX_DRV_IFIR_SHIFT;
+ ipre_driver_val = (cfg_tap_val &
+ PORT_HW_CFG_TX_DRV_IPREDRIVER_MASK)
+ >> PORT_HW_CFG_TX_DRV_IPREDRIVER_SHIFT;
+ ipost2_val = (cfg_tap_val &
+ PORT_HW_CFG_TX_DRV_POST2_MASK) >>
+ PORT_HW_CFG_TX_DRV_POST2_SHIFT;
+ }
+
+ if (cfg_tap_val & PORT_HW_CFG_TX_DRV_BROADCAST_MASK) {
+ tx_drv_brdct = (cfg_tap_val &
+ PORT_HW_CFG_TX_DRV_BROADCAST_MASK) >>
+ PORT_HW_CFG_TX_DRV_BROADCAST_SHIFT;
+ }
+
+ tx_driver_val = WC_TX_DRIVER(ipost2_val, tx_drv_brdct,
+ ipre_driver_val, ifir_val);
+ }
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_SERDESDIGITAL_MISC1, misc1_val);
+
+ /* Set Transmit PMD settings */
+ lane = bnx2x_get_warpcore_lane(phy, params);
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_TX_FIR_TAP,
+ tap_val | MDIO_WC_REG_TX_FIR_TAP_ENABLE);
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_TX0_TX_DRIVER + 0x10*lane,
+ tx_driver_val);
+
+ /* Enable fiber mode, enable and invert sig_det */
+ bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1, 0xd);
+
+ /* Set Block address to Remote PHY & Set forced_speed[5], 40bit mode */
+ bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_DIGITAL4_MISC3, 0x8080);
+
+ bnx2x_warpcore_set_lpi_passthrough(phy, params);
+
+ /* 10G XFI Full Duplex */
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x100);
+
+ /* Release tx_fifo_reset */
+ bnx2x_cl45_read_and_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X3,
+ 0xFFFE);
+ /* Release rxSeqStart */
+ bnx2x_cl45_read_and_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_DSC2B0_DSC_MISC_CTRL0, 0x7FFF);
+}
+
+static void bnx2x_warpcore_set_20G_force_KR2(struct bnx2x_phy *phy,
+ struct link_params *params)
+{
+ u16 val;
+ struct bnx2x *bp = params->bp;
+ /* Set global registers, so set AER lane to 0 */
+ CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK,
+ MDIO_AER_BLOCK_AER_REG, 0);
+
+ /* Disable sequencer */
+ bnx2x_cl45_read_and_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_XGXSBLK0_XGXSCONTROL, ~(1<<13));
+
+ bnx2x_set_aer_mmd(params, phy);
+
+ bnx2x_cl45_read_and_write(bp, phy, MDIO_PMA_DEVAD,
+ MDIO_WC_REG_PMD_KR_CONTROL, ~(1<<1));
+ bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
+ MDIO_AN_REG_CTRL, 0);
+ /* Turn off CL73 */
+ bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_CL73_USERB0_CTRL, &val);
+ val &= ~(1<<5);
+ val |= (1<<6);
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_CL73_USERB0_CTRL, val);
+
+ /* Set 20G KR2 force speed */
+ bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_SERDESDIGITAL_MISC1, 0x1f);
+
+ bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_DIGITAL4_MISC3, (1<<7));
+
+ bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL, &val);
+ val &= ~(3<<14);
+ val |= (1<<15);
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL, val);
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_CL72_USERB0_CL72_TX_FIR_TAP, 0x835A);
+
+ /* Enable sequencer (over lane 0) */
+ CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK,
+ MDIO_AER_BLOCK_AER_REG, 0);
+
+ bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_XGXSBLK0_XGXSCONTROL, (1<<13));
+
+ bnx2x_set_aer_mmd(params, phy);
+}
+
+static void bnx2x_warpcore_set_20G_DXGXS(struct bnx2x *bp,
+ struct bnx2x_phy *phy,
+ u16 lane)
+{
+ /* Rx0 anaRxControl1G */
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_RX0_ANARXCONTROL1G, 0x90);
+
+ /* Rx2 anaRxControl1G */
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_RX2_ANARXCONTROL1G, 0x90);
+
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_RX66_SCW0, 0xE070);
+
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_RX66_SCW1, 0xC0D0);
+
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_RX66_SCW2, 0xA0B0);
+
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_RX66_SCW3, 0x8090);
+
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_RX66_SCW0_MASK, 0xF0F0);
+
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_RX66_SCW1_MASK, 0xF0F0);
+
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_RX66_SCW2_MASK, 0xF0F0);
+
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_RX66_SCW3_MASK, 0xF0F0);
+
+ /* Serdes Digital Misc1 */
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_SERDESDIGITAL_MISC1, 0x6008);
+
+ /* Serdes Digital4 Misc3 */
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_DIGITAL4_MISC3, 0x8088);
+
+ /* Set Transmit PMD settings */
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_TX_FIR_TAP,
+ (WC_TX_FIR(0x12, 0x2d, 0x00) |
+ MDIO_WC_REG_TX_FIR_TAP_ENABLE));
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_TX0_TX_DRIVER + 0x10*lane,
+ WC_TX_DRIVER(0x02, 0x02, 0x02, 0));
+}
+
+static void bnx2x_warpcore_set_sgmii_speed(struct bnx2x_phy *phy,
+ struct link_params *params,
+ u8 fiber_mode,
+ u8 always_autoneg)
+{
+ struct bnx2x *bp = params->bp;
+ u16 val16, digctrl_kx1, digctrl_kx2;
+
+ /* Clear XFI clock comp in non-10G single lane mode. */
+ bnx2x_cl45_read_and_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_RX66_CONTROL, ~(3<<13));
+
+ bnx2x_warpcore_set_lpi_passthrough(phy, params);
+
+ if (always_autoneg || phy->req_line_speed == SPEED_AUTO_NEG) {
+ /* SGMII Autoneg */
+ bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_COMBO_IEEE0_MIICTRL,
+ 0x1000);
+ DP(NETIF_MSG_LINK, "set SGMII AUTONEG\n");
+ } else {
+ bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_COMBO_IEEE0_MIICTRL, &val16);
+ val16 &= 0xcebf;
+ switch (phy->req_line_speed) {
+ case SPEED_10:
+ break;
+ case SPEED_100:
+ val16 |= 0x2000;
+ break;
+ case SPEED_1000:
+ val16 |= 0x0040;
+ break;
+ default:
+ DP(NETIF_MSG_LINK,
+ "Speed not supported: 0x%x\n", phy->req_line_speed);
+ return;
+ }
+
+ if (phy->req_duplex == DUPLEX_FULL)
+ val16 |= 0x0100;
+
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_COMBO_IEEE0_MIICTRL, val16);
+
+ DP(NETIF_MSG_LINK, "set SGMII force speed %d\n",
+ phy->req_line_speed);
+ bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_COMBO_IEEE0_MIICTRL, &val16);
+ DP(NETIF_MSG_LINK, " (readback) %x\n", val16);
+ }
+
+ /* SGMII Slave mode and disable signal detect */
+ bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1, &digctrl_kx1);
+ if (fiber_mode)
+ digctrl_kx1 = 1;
+ else
+ digctrl_kx1 &= 0xff4a;
+
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1,
+ digctrl_kx1);
+
+ /* Turn off parallel detect */
+ bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, &digctrl_kx2);
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2,
+ (digctrl_kx2 & ~(1<<2)));
+
+ /* Re-enable parallel detect */
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2,
+ (digctrl_kx2 | (1<<2)));
+
+ /* Enable autodet */
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1,
+ (digctrl_kx1 | 0x10));
+}
+
+static void bnx2x_warpcore_reset_lane(struct bnx2x *bp,
+ struct bnx2x_phy *phy,
+ u8 reset)
+{
+ u16 val;
+ /* Take lane out of reset after configuration is finished */
+ bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_DIGITAL5_MISC6, &val);
+ if (reset)
+ val |= 0xC000;
+ else
+ val &= 0x3FFF;
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_DIGITAL5_MISC6, val);
+ bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_DIGITAL5_MISC6, &val);
+}
+/* Clear SFI/XFI link settings registers */
+static void bnx2x_warpcore_clear_regs(struct bnx2x_phy *phy,
+ struct link_params *params,
+ u16 lane)
+{
+ struct bnx2x *bp = params->bp;
+ u16 i;
+ static struct bnx2x_reg_set wc_regs[] = {
+ {MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_FX100_CTRL1, 0x014a},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_FX100_CTRL3, 0x0800},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_DIGITAL4_MISC3, 0x8008},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1,
+ 0x0195},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2,
+ 0x0007},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X3,
+ 0x0002},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_MISC1, 0x6000},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_TX_FIR_TAP, 0x0000},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x2040},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_COMBO_IEEE0_MIICTRL, 0x0140}
+ };
+ /* Set XFI clock comp as default. */
+ bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_RX66_CONTROL, (3<<13));
+
+ for (i = 0; i < ARRAY_SIZE(wc_regs); i++)
+ bnx2x_cl45_write(bp, phy, wc_regs[i].devad, wc_regs[i].reg,
+ wc_regs[i].val);
+
+ lane = bnx2x_get_warpcore_lane(phy, params);
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_TX0_TX_DRIVER + 0x10*lane, 0x0990);
+
+}
+
+static int bnx2x_get_mod_abs_int_cfg(struct bnx2x *bp,
+ u32 chip_id,
+ u32 shmem_base, u8 port,
+ u8 *gpio_num, u8 *gpio_port)
+{
+ u32 cfg_pin;
+ *gpio_num = 0;
+ *gpio_port = 0;
+ if (CHIP_IS_E3(bp)) {
+ cfg_pin = (REG_RD(bp, shmem_base +
+ offsetof(struct shmem_region,
+ dev_info.port_hw_config[port].e3_sfp_ctrl)) &
+ PORT_HW_CFG_E3_MOD_ABS_MASK) >>
+ PORT_HW_CFG_E3_MOD_ABS_SHIFT;
+
+ /* Should not happen. This function called upon interrupt
+ * triggered by GPIO ( since EPIO can only generate interrupts
+ * to MCP).
+ * So if this function was called and none of the GPIOs was set,
+ * it means the shit hit the fan.
+ */
+ if ((cfg_pin < PIN_CFG_GPIO0_P0) ||
+ (cfg_pin > PIN_CFG_GPIO3_P1)) {
+ DP(NETIF_MSG_LINK,
+ "No cfg pin %x for module detect indication\n",
+ cfg_pin);
+ return -EINVAL;
+ }
+
+ *gpio_num = (cfg_pin - PIN_CFG_GPIO0_P0) & 0x3;
+ *gpio_port = (cfg_pin - PIN_CFG_GPIO0_P0) >> 2;
+ } else {
+ *gpio_num = MISC_REGISTERS_GPIO_3;
+ *gpio_port = port;
+ }
+
+ return 0;
+}
+
+static int bnx2x_is_sfp_module_plugged(struct bnx2x_phy *phy,
+ struct link_params *params)
+{
+ struct bnx2x *bp = params->bp;
+ u8 gpio_num, gpio_port;
+ u32 gpio_val;
+ if (bnx2x_get_mod_abs_int_cfg(bp, params->chip_id,
+ params->shmem_base, params->port,
+ &gpio_num, &gpio_port) != 0)
+ return 0;
+ gpio_val = bnx2x_get_gpio(bp, gpio_num, gpio_port);
+
+ /* Call the handling function in case module is detected */
+ if (gpio_val == 0)
+ return 1;
+ else
+ return 0;
+}
+static int bnx2x_warpcore_get_sigdet(struct bnx2x_phy *phy,
+ struct link_params *params)
+{
+ u16 gp2_status_reg0, lane;
+ struct bnx2x *bp = params->bp;
+
+ lane = bnx2x_get_warpcore_lane(phy, params);
+
+ bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, MDIO_WC_REG_GP2_STATUS_GP_2_0,
+ &gp2_status_reg0);
+
+ return (gp2_status_reg0 >> (8+lane)) & 0x1;
+}
+
+static void bnx2x_warpcore_config_runtime(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
+{
+ struct bnx2x *bp = params->bp;
+ u32 serdes_net_if;
+ u16 gp_status1 = 0, lnkup = 0, lnkup_kr = 0;
+
+ vars->turn_to_run_wc_rt = vars->turn_to_run_wc_rt ? 0 : 1;
+
+ if (!vars->turn_to_run_wc_rt)
+ return;
+
+ if (vars->rx_tx_asic_rst) {
+ u16 lane = bnx2x_get_warpcore_lane(phy, params);
+ serdes_net_if = (REG_RD(bp, params->shmem_base +
+ offsetof(struct shmem_region, dev_info.
+ port_hw_config[params->port].default_cfg)) &
+ PORT_HW_CFG_NET_SERDES_IF_MASK);
+
+ switch (serdes_net_if) {
+ case PORT_HW_CFG_NET_SERDES_IF_KR:
+ /* Do we get link yet? */
+ bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, 0x81d1,
+ &gp_status1);
+ lnkup = (gp_status1 >> (8+lane)) & 0x1;/* 1G */
+ /*10G KR*/
+ lnkup_kr = (gp_status1 >> (12+lane)) & 0x1;
+
+ if (lnkup_kr || lnkup) {
+ vars->rx_tx_asic_rst = 0;
+ } else {
+ /* Reset the lane to see if link comes up.*/
+ bnx2x_warpcore_reset_lane(bp, phy, 1);
+ bnx2x_warpcore_reset_lane(bp, phy, 0);
+
+ /* Restart Autoneg */
+ bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
+ MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x1200);
+
+ vars->rx_tx_asic_rst--;
+ DP(NETIF_MSG_LINK, "0x%x retry left\n",
+ vars->rx_tx_asic_rst);
+ }
+ break;
+
+ default:
+ break;
+ }
+
+ } /*params->rx_tx_asic_rst*/
+
+}
+static void bnx2x_warpcore_config_sfi(struct bnx2x_phy *phy,
+ struct link_params *params)
+{
+ u16 lane = bnx2x_get_warpcore_lane(phy, params);
+ struct bnx2x *bp = params->bp;
+ bnx2x_warpcore_clear_regs(phy, params, lane);
+ if ((params->req_line_speed[LINK_CONFIG_IDX(INT_PHY)] ==
+ SPEED_10000) &&
+ (phy->media_type != ETH_PHY_SFP_1G_FIBER)) {
+ DP(NETIF_MSG_LINK, "Setting 10G SFI\n");
+ bnx2x_warpcore_set_10G_XFI(phy, params, 0);
+ } else {
+ DP(NETIF_MSG_LINK, "Setting 1G Fiber\n");
+ bnx2x_warpcore_set_sgmii_speed(phy, params, 1, 0);
+ }
+}
+
+static void bnx2x_sfp_e3_set_transmitter(struct link_params *params,
+ struct bnx2x_phy *phy,
+ u8 tx_en)
+{
+ struct bnx2x *bp = params->bp;
+ u32 cfg_pin;
+ u8 port = params->port;
+
+ cfg_pin = REG_RD(bp, params->shmem_base +
+ offsetof(struct shmem_region,
+ dev_info.port_hw_config[port].e3_sfp_ctrl)) &
+ PORT_HW_CFG_E3_TX_LASER_MASK;
+ /* Set the !tx_en since this pin is DISABLE_TX_LASER */
+ DP(NETIF_MSG_LINK, "Setting WC TX to %d\n", tx_en);
+
+ /* For 20G, the expected pin to be used is 3 pins after the current */
+ bnx2x_set_cfg_pin(bp, cfg_pin, tx_en ^ 1);
+ if (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_20G)
+ bnx2x_set_cfg_pin(bp, cfg_pin + 3, tx_en ^ 1);
+}
+
+static void bnx2x_warpcore_config_init(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
+{
+ struct bnx2x *bp = params->bp;
+ u32 serdes_net_if;
+ u8 fiber_mode;
+ u16 lane = bnx2x_get_warpcore_lane(phy, params);
+ serdes_net_if = (REG_RD(bp, params->shmem_base +
+ offsetof(struct shmem_region, dev_info.
+ port_hw_config[params->port].default_cfg)) &
+ PORT_HW_CFG_NET_SERDES_IF_MASK);
+ DP(NETIF_MSG_LINK, "Begin Warpcore init, link_speed %d, "
+ "serdes_net_if = 0x%x\n",
+ vars->line_speed, serdes_net_if);
+ bnx2x_set_aer_mmd(params, phy);
+ bnx2x_warpcore_reset_lane(bp, phy, 1);
+ vars->phy_flags |= PHY_XGXS_FLAG;
+ if ((serdes_net_if == PORT_HW_CFG_NET_SERDES_IF_SGMII) ||
+ (phy->req_line_speed &&
+ ((phy->req_line_speed == SPEED_100) ||
+ (phy->req_line_speed == SPEED_10)))) {
+ vars->phy_flags |= PHY_SGMII_FLAG;
+ DP(NETIF_MSG_LINK, "Setting SGMII mode\n");
+ bnx2x_warpcore_clear_regs(phy, params, lane);
+ bnx2x_warpcore_set_sgmii_speed(phy, params, 0, 1);
+ } else {
+ switch (serdes_net_if) {
+ case PORT_HW_CFG_NET_SERDES_IF_KR:
+ /* Enable KR Auto Neg */
+ if (params->loopback_mode != LOOPBACK_EXT)
+ bnx2x_warpcore_enable_AN_KR(phy, params, vars);
+ else {
+ DP(NETIF_MSG_LINK, "Setting KR 10G-Force\n");
+ bnx2x_warpcore_set_10G_KR(phy, params, vars);
+ }
+ break;
+
+ case PORT_HW_CFG_NET_SERDES_IF_XFI:
+ bnx2x_warpcore_clear_regs(phy, params, lane);
+ if (vars->line_speed == SPEED_10000) {
+ DP(NETIF_MSG_LINK, "Setting 10G XFI\n");
+ bnx2x_warpcore_set_10G_XFI(phy, params, 1);
+ } else {
+ if (SINGLE_MEDIA_DIRECT(params)) {
+ DP(NETIF_MSG_LINK, "1G Fiber\n");
+ fiber_mode = 1;
+ } else {
+ DP(NETIF_MSG_LINK, "10/100/1G SGMII\n");
+ fiber_mode = 0;
+ }
+ bnx2x_warpcore_set_sgmii_speed(phy,
+ params,
+ fiber_mode,
+ 0);
+ }
+
+ break;
+
+ case PORT_HW_CFG_NET_SERDES_IF_SFI:
+ /* Issue Module detection if module is plugged, or
+ * enabled transmitter to avoid current leakage in case
+ * no module is connected
+ */
+ if ((params->loopback_mode == LOOPBACK_NONE) ||
+ (params->loopback_mode == LOOPBACK_EXT)) {
+ if (bnx2x_is_sfp_module_plugged(phy, params))
+ bnx2x_sfp_module_detection(phy, params);
+ else
+ bnx2x_sfp_e3_set_transmitter(params,
+ phy, 1);
+ }
+
+ bnx2x_warpcore_config_sfi(phy, params);
+ break;
+
+ case PORT_HW_CFG_NET_SERDES_IF_DXGXS:
+ if (vars->line_speed != SPEED_20000) {
+ DP(NETIF_MSG_LINK, "Speed not supported yet\n");
+ return;
+ }
+ DP(NETIF_MSG_LINK, "Setting 20G DXGXS\n");
+ bnx2x_warpcore_set_20G_DXGXS(bp, phy, lane);
+ /* Issue Module detection */
+
+ bnx2x_sfp_module_detection(phy, params);
+ break;
+ case PORT_HW_CFG_NET_SERDES_IF_KR2:
+ if (!params->loopback_mode) {
+ bnx2x_warpcore_enable_AN_KR(phy, params, vars);
+ } else {
+ DP(NETIF_MSG_LINK, "Setting KR 20G-Force\n");
+ bnx2x_warpcore_set_20G_force_KR2(phy, params);
+ }
+ break;
+ default:
+ DP(NETIF_MSG_LINK,
+ "Unsupported Serdes Net Interface 0x%x\n",
+ serdes_net_if);
+ return;
+ }
+ }
+
+ /* Take lane out of reset after configuration is finished */
+ bnx2x_warpcore_reset_lane(bp, phy, 0);
+ DP(NETIF_MSG_LINK, "Exit config init\n");
+}
+
+static void bnx2x_warpcore_link_reset(struct bnx2x_phy *phy,
+ struct link_params *params)
+{
+ struct bnx2x *bp = params->bp;
+ u16 val16, lane;
+ bnx2x_sfp_e3_set_transmitter(params, phy, 0);
+ bnx2x_set_mdio_emac_per_phy(bp, params);
+ bnx2x_set_aer_mmd(params, phy);
+ /* Global register */
+ bnx2x_warpcore_reset_lane(bp, phy, 1);
+
+ /* Clear loopback settings (if any) */
+ /* 10G & 20G */
+ bnx2x_cl45_read_and_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_COMBO_IEEE0_MIICTRL, 0xBFFF);
+
+ bnx2x_cl45_read_and_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_IEEE0BLK_MIICNTL, 0xfffe);
+
+ /* Update those 1-copy registers */
+ CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK,
+ MDIO_AER_BLOCK_AER_REG, 0);
+ /* Enable 1G MDIO (1-copy) */
+ bnx2x_cl45_read_and_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_XGXSBLK0_XGXSCONTROL,
+ ~0x10);
+
+ bnx2x_cl45_read_and_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_XGXSBLK1_LANECTRL2, 0xff00);
+ lane = bnx2x_get_warpcore_lane(phy, params);
+ /* Disable CL36 PCS Tx */
+ bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_XGXSBLK1_LANECTRL0, &val16);
+ val16 |= (0x11 << lane);
+ if (phy->flags & FLAGS_WC_DUAL_MODE)
+ val16 |= (0x22 << lane);
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_XGXSBLK1_LANECTRL0, val16);
+
+ bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_XGXSBLK1_LANECTRL1, &val16);
+ val16 &= ~(0x0303 << (lane << 1));
+ val16 |= (0x0101 << (lane << 1));
+ if (phy->flags & FLAGS_WC_DUAL_MODE) {
+ val16 &= ~(0x0c0c << (lane << 1));
+ val16 |= (0x0404 << (lane << 1));
+ }
+
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_XGXSBLK1_LANECTRL1, val16);
+ /* Restore AER */
+ bnx2x_set_aer_mmd(params, phy);
+
+}
+
+static void bnx2x_set_warpcore_loopback(struct bnx2x_phy *phy,
+ struct link_params *params)
+{
+ struct bnx2x *bp = params->bp;
+ u16 val16;
+ u32 lane;
+ DP(NETIF_MSG_LINK, "Setting Warpcore loopback type %x, speed %d\n",
+ params->loopback_mode, phy->req_line_speed);
+
+ if (phy->req_line_speed < SPEED_10000 ||
+ phy->supported & SUPPORTED_20000baseKR2_Full) {
+ /* 10/100/1000/20G-KR2 */
+
+ /* Update those 1-copy registers */
+ CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK,
+ MDIO_AER_BLOCK_AER_REG, 0);
+ /* Enable 1G MDIO (1-copy) */
+ bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_XGXSBLK0_XGXSCONTROL,
+ 0x10);
+ /* Set 1G loopback based on lane (1-copy) */
+ lane = bnx2x_get_warpcore_lane(phy, params);
+ bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_XGXSBLK1_LANECTRL2, &val16);
+ val16 |= (1<<lane);
+ if (phy->flags & FLAGS_WC_DUAL_MODE)
+ val16 |= (2<<lane);
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_XGXSBLK1_LANECTRL2,
+ val16);
+
+ /* Switch back to 4-copy registers */
+ bnx2x_set_aer_mmd(params, phy);
+ } else {
+ /* 10G / 20G-DXGXS */
+ bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_COMBO_IEEE0_MIICTRL,
+ 0x4000);
+ bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x1);
+ }
+}
+
+
+
+static void bnx2x_sync_link(struct link_params *params,
+ struct link_vars *vars)
+{
+ struct bnx2x *bp = params->bp;
+ u8 link_10g_plus;
+ if (vars->link_status & LINK_STATUS_PHYSICAL_LINK_FLAG)
+ vars->phy_flags |= PHY_PHYSICAL_LINK_FLAG;
+ vars->link_up = (vars->link_status & LINK_STATUS_LINK_UP);
+ if (vars->link_up) {
+ DP(NETIF_MSG_LINK, "phy link up\n");
+
+ vars->phy_link_up = 1;
+ vars->duplex = DUPLEX_FULL;
+ switch (vars->link_status &
+ LINK_STATUS_SPEED_AND_DUPLEX_MASK) {
+ case LINK_10THD:
+ vars->duplex = DUPLEX_HALF;
+ fallthrough;
+ case LINK_10TFD:
+ vars->line_speed = SPEED_10;
+ break;
+
+ case LINK_100TXHD:
+ vars->duplex = DUPLEX_HALF;
+ fallthrough;
+ case LINK_100T4:
+ case LINK_100TXFD:
+ vars->line_speed = SPEED_100;
+ break;
+
+ case LINK_1000THD:
+ vars->duplex = DUPLEX_HALF;
+ fallthrough;
+ case LINK_1000TFD:
+ vars->line_speed = SPEED_1000;
+ break;
+
+ case LINK_2500THD:
+ vars->duplex = DUPLEX_HALF;
+ fallthrough;
+ case LINK_2500TFD:
+ vars->line_speed = SPEED_2500;
+ break;
+
+ case LINK_10GTFD:
+ vars->line_speed = SPEED_10000;
+ break;
+ case LINK_20GTFD:
+ vars->line_speed = SPEED_20000;
+ break;
+ default:
+ break;
+ }
+ vars->flow_ctrl = 0;
+ if (vars->link_status & LINK_STATUS_TX_FLOW_CONTROL_ENABLED)
+ vars->flow_ctrl |= BNX2X_FLOW_CTRL_TX;
+
+ if (vars->link_status & LINK_STATUS_RX_FLOW_CONTROL_ENABLED)
+ vars->flow_ctrl |= BNX2X_FLOW_CTRL_RX;
+
+ if (!vars->flow_ctrl)
+ vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
+
+ if (vars->line_speed &&
+ ((vars->line_speed == SPEED_10) ||
+ (vars->line_speed == SPEED_100))) {
+ vars->phy_flags |= PHY_SGMII_FLAG;
+ } else {
+ vars->phy_flags &= ~PHY_SGMII_FLAG;
+ }
+ if (vars->line_speed &&
+ USES_WARPCORE(bp) &&
+ (vars->line_speed == SPEED_1000))
+ vars->phy_flags |= PHY_SGMII_FLAG;
+ /* Anything 10 and over uses the bmac */
+ link_10g_plus = (vars->line_speed >= SPEED_10000);
+
+ if (link_10g_plus) {
+ if (USES_WARPCORE(bp))
+ vars->mac_type = MAC_TYPE_XMAC;
+ else
+ vars->mac_type = MAC_TYPE_BMAC;
+ } else {
+ if (USES_WARPCORE(bp))
+ vars->mac_type = MAC_TYPE_UMAC;
+ else
+ vars->mac_type = MAC_TYPE_EMAC;
+ }
+ } else { /* Link down */
+ DP(NETIF_MSG_LINK, "phy link down\n");
+
+ vars->phy_link_up = 0;
+
+ vars->line_speed = 0;
+ vars->duplex = DUPLEX_FULL;
+ vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
+
+ /* Indicate no mac active */
+ vars->mac_type = MAC_TYPE_NONE;
+ if (vars->link_status & LINK_STATUS_PHYSICAL_LINK_FLAG)
+ vars->phy_flags |= PHY_HALF_OPEN_CONN_FLAG;
+ if (vars->link_status & LINK_STATUS_SFP_TX_FAULT)
+ vars->phy_flags |= PHY_SFP_TX_FAULT_FLAG;
+ }
+}
+
+void bnx2x_link_status_update(struct link_params *params,
+ struct link_vars *vars)
+{
+ struct bnx2x *bp = params->bp;
+ u8 port = params->port;
+ u32 sync_offset, media_types;
+ /* Update PHY configuration */
+ set_phy_vars(params, vars);
+
+ vars->link_status = REG_RD(bp, params->shmem_base +
+ offsetof(struct shmem_region,
+ port_mb[port].link_status));
+
+ /* Force link UP in non LOOPBACK_EXT loopback mode(s) */
+ if (params->loopback_mode != LOOPBACK_NONE &&
+ params->loopback_mode != LOOPBACK_EXT)
+ vars->link_status |= LINK_STATUS_LINK_UP;
+
+ if (bnx2x_eee_has_cap(params))
+ vars->eee_status = REG_RD(bp, params->shmem2_base +
+ offsetof(struct shmem2_region,
+ eee_status[params->port]));
+
+ vars->phy_flags = PHY_XGXS_FLAG;
+ bnx2x_sync_link(params, vars);
+ /* Sync media type */
+ sync_offset = params->shmem_base +
+ offsetof(struct shmem_region,
+ dev_info.port_hw_config[port].media_type);
+ media_types = REG_RD(bp, sync_offset);
+
+ params->phy[INT_PHY].media_type =
+ (media_types & PORT_HW_CFG_MEDIA_TYPE_PHY0_MASK) >>
+ PORT_HW_CFG_MEDIA_TYPE_PHY0_SHIFT;
+ params->phy[EXT_PHY1].media_type =
+ (media_types & PORT_HW_CFG_MEDIA_TYPE_PHY1_MASK) >>
+ PORT_HW_CFG_MEDIA_TYPE_PHY1_SHIFT;
+ params->phy[EXT_PHY2].media_type =
+ (media_types & PORT_HW_CFG_MEDIA_TYPE_PHY2_MASK) >>
+ PORT_HW_CFG_MEDIA_TYPE_PHY2_SHIFT;
+ DP(NETIF_MSG_LINK, "media_types = 0x%x\n", media_types);
+
+ /* Sync AEU offset */
+ sync_offset = params->shmem_base +
+ offsetof(struct shmem_region,
+ dev_info.port_hw_config[port].aeu_int_mask);
+
+ vars->aeu_int_mask = REG_RD(bp, sync_offset);
+
+ /* Sync PFC status */
+ if (vars->link_status & LINK_STATUS_PFC_ENABLED)
+ params->feature_config_flags |=
+ FEATURE_CONFIG_PFC_ENABLED;
+ else
+ params->feature_config_flags &=
+ ~FEATURE_CONFIG_PFC_ENABLED;
+
+ if (SHMEM2_HAS(bp, link_attr_sync))
+ params->link_attr_sync = SHMEM2_RD(bp,
+ link_attr_sync[params->port]);
+
+ DP(NETIF_MSG_LINK, "link_status 0x%x phy_link_up %x int_mask 0x%x\n",
+ vars->link_status, vars->phy_link_up, vars->aeu_int_mask);
+ DP(NETIF_MSG_LINK, "line_speed %x duplex %x flow_ctrl 0x%x\n",
+ vars->line_speed, vars->duplex, vars->flow_ctrl);
+}
+
+static void bnx2x_set_master_ln(struct link_params *params,
+ struct bnx2x_phy *phy)
+{
+ struct bnx2x *bp = params->bp;
+ u16 new_master_ln, ser_lane;
+ ser_lane = ((params->lane_config &
+ PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >>
+ PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT);
+
+ /* Set the master_ln for AN */
+ CL22_RD_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_XGXS_BLOCK2,
+ MDIO_XGXS_BLOCK2_TEST_MODE_LANE,
+ &new_master_ln);
+
+ CL22_WR_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_XGXS_BLOCK2 ,
+ MDIO_XGXS_BLOCK2_TEST_MODE_LANE,
+ (new_master_ln | ser_lane));
+}
+
+static int bnx2x_reset_unicore(struct link_params *params,
+ struct bnx2x_phy *phy,
+ u8 set_serdes)
+{
+ struct bnx2x *bp = params->bp;
+ u16 mii_control;
+ u16 i;
+ CL22_RD_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_COMBO_IEEE0,
+ MDIO_COMBO_IEEE0_MII_CONTROL, &mii_control);
+
+ /* Reset the unicore */
+ CL22_WR_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_COMBO_IEEE0,
+ MDIO_COMBO_IEEE0_MII_CONTROL,
+ (mii_control |
+ MDIO_COMBO_IEEO_MII_CONTROL_RESET));
+ if (set_serdes)
+ bnx2x_set_serdes_access(bp, params->port);
+
+ /* Wait for the reset to self clear */
+ for (i = 0; i < MDIO_ACCESS_TIMEOUT; i++) {
+ udelay(5);
+
+ /* The reset erased the previous bank value */
+ CL22_RD_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_COMBO_IEEE0,
+ MDIO_COMBO_IEEE0_MII_CONTROL,
+ &mii_control);
+
+ if (!(mii_control & MDIO_COMBO_IEEO_MII_CONTROL_RESET)) {
+ udelay(5);
+ return 0;
+ }
+ }
+
+ netdev_err(bp->dev, "Warning: PHY was not initialized,"
+ " Port %d\n",
+ params->port);
+ DP(NETIF_MSG_LINK, "BUG! XGXS is still in reset!\n");
+ return -EINVAL;
+
+}
+
+static void bnx2x_set_swap_lanes(struct link_params *params,
+ struct bnx2x_phy *phy)
+{
+ struct bnx2x *bp = params->bp;
+ /* Each two bits represents a lane number:
+ * No swap is 0123 => 0x1b no need to enable the swap
+ */
+ u16 rx_lane_swap, tx_lane_swap;
+
+ rx_lane_swap = ((params->lane_config &
+ PORT_HW_CFG_LANE_SWAP_CFG_RX_MASK) >>
+ PORT_HW_CFG_LANE_SWAP_CFG_RX_SHIFT);
+ tx_lane_swap = ((params->lane_config &
+ PORT_HW_CFG_LANE_SWAP_CFG_TX_MASK) >>
+ PORT_HW_CFG_LANE_SWAP_CFG_TX_SHIFT);
+
+ if (rx_lane_swap != 0x1b) {
+ CL22_WR_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_XGXS_BLOCK2,
+ MDIO_XGXS_BLOCK2_RX_LN_SWAP,
+ (rx_lane_swap |
+ MDIO_XGXS_BLOCK2_RX_LN_SWAP_ENABLE |
+ MDIO_XGXS_BLOCK2_RX_LN_SWAP_FORCE_ENABLE));
+ } else {
+ CL22_WR_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_XGXS_BLOCK2,
+ MDIO_XGXS_BLOCK2_RX_LN_SWAP, 0);
+ }
+
+ if (tx_lane_swap != 0x1b) {
+ CL22_WR_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_XGXS_BLOCK2,
+ MDIO_XGXS_BLOCK2_TX_LN_SWAP,
+ (tx_lane_swap |
+ MDIO_XGXS_BLOCK2_TX_LN_SWAP_ENABLE));
+ } else {
+ CL22_WR_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_XGXS_BLOCK2,
+ MDIO_XGXS_BLOCK2_TX_LN_SWAP, 0);
+ }
+}
+
+static void bnx2x_set_parallel_detection(struct bnx2x_phy *phy,
+ struct link_params *params)
+{
+ struct bnx2x *bp = params->bp;
+ u16 control2;
+ CL22_RD_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_SERDES_DIGITAL,
+ MDIO_SERDES_DIGITAL_A_1000X_CONTROL2,
+ &control2);
+ if (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)
+ control2 |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL2_PRL_DT_EN;
+ else
+ control2 &= ~MDIO_SERDES_DIGITAL_A_1000X_CONTROL2_PRL_DT_EN;
+ DP(NETIF_MSG_LINK, "phy->speed_cap_mask = 0x%x, control2 = 0x%x\n",
+ phy->speed_cap_mask, control2);
+ CL22_WR_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_SERDES_DIGITAL,
+ MDIO_SERDES_DIGITAL_A_1000X_CONTROL2,
+ control2);
+
+ if ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) &&
+ (phy->speed_cap_mask &
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) {
+ DP(NETIF_MSG_LINK, "XGXS\n");
+
+ CL22_WR_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_10G_PARALLEL_DETECT,
+ MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_LINK,
+ MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_LINK_CNT);
+
+ CL22_RD_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_10G_PARALLEL_DETECT,
+ MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL,
+ &control2);
+
+
+ control2 |=
+ MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL_PARDET10G_EN;
+
+ CL22_WR_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_10G_PARALLEL_DETECT,
+ MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL,
+ control2);
+
+ /* Disable parallel detection of HiG */
+ CL22_WR_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_XGXS_BLOCK2,
+ MDIO_XGXS_BLOCK2_UNICORE_MODE_10G,
+ MDIO_XGXS_BLOCK2_UNICORE_MODE_10G_CX4_XGXS |
+ MDIO_XGXS_BLOCK2_UNICORE_MODE_10G_HIGIG_XGXS);
+ }
+}
+
+static void bnx2x_set_autoneg(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars,
+ u8 enable_cl73)
+{
+ struct bnx2x *bp = params->bp;
+ u16 reg_val;
+
+ /* CL37 Autoneg */
+ CL22_RD_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_COMBO_IEEE0,
+ MDIO_COMBO_IEEE0_MII_CONTROL, &reg_val);
+
+ /* CL37 Autoneg Enabled */
+ if (vars->line_speed == SPEED_AUTO_NEG)
+ reg_val |= MDIO_COMBO_IEEO_MII_CONTROL_AN_EN;
+ else /* CL37 Autoneg Disabled */
+ reg_val &= ~(MDIO_COMBO_IEEO_MII_CONTROL_AN_EN |
+ MDIO_COMBO_IEEO_MII_CONTROL_RESTART_AN);
+
+ CL22_WR_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_COMBO_IEEE0,
+ MDIO_COMBO_IEEE0_MII_CONTROL, reg_val);
+
+ /* Enable/Disable Autodetection */
+
+ CL22_RD_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_SERDES_DIGITAL,
+ MDIO_SERDES_DIGITAL_A_1000X_CONTROL1, &reg_val);
+ reg_val &= ~(MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_SIGNAL_DETECT_EN |
+ MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_INVERT_SIGNAL_DETECT);
+ reg_val |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_FIBER_MODE;
+ if (vars->line_speed == SPEED_AUTO_NEG)
+ reg_val |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET;
+ else
+ reg_val &= ~MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET;
+
+ CL22_WR_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_SERDES_DIGITAL,
+ MDIO_SERDES_DIGITAL_A_1000X_CONTROL1, reg_val);
+
+ /* Enable TetonII and BAM autoneg */
+ CL22_RD_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_BAM_NEXT_PAGE,
+ MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL,
+ &reg_val);
+ if (vars->line_speed == SPEED_AUTO_NEG) {
+ /* Enable BAM aneg Mode and TetonII aneg Mode */
+ reg_val |= (MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_BAM_MODE |
+ MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_TETON_AN);
+ } else {
+ /* TetonII and BAM Autoneg Disabled */
+ reg_val &= ~(MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_BAM_MODE |
+ MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_TETON_AN);
+ }
+ CL22_WR_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_BAM_NEXT_PAGE,
+ MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL,
+ reg_val);
+
+ if (enable_cl73) {
+ /* Enable Cl73 FSM status bits */
+ CL22_WR_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_CL73_USERB0,
+ MDIO_CL73_USERB0_CL73_UCTRL,
+ 0xe);
+
+ /* Enable BAM Station Manager*/
+ CL22_WR_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_CL73_USERB0,
+ MDIO_CL73_USERB0_CL73_BAM_CTRL1,
+ MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_EN |
+ MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_STATION_MNGR_EN |
+ MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_NP_AFTER_BP_EN);
+
+ /* Advertise CL73 link speeds */
+ CL22_RD_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_CL73_IEEEB1,
+ MDIO_CL73_IEEEB1_AN_ADV2,
+ &reg_val);
+ if (phy->speed_cap_mask &
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)
+ reg_val |= MDIO_CL73_IEEEB1_AN_ADV2_ADVR_10G_KX4;
+ if (phy->speed_cap_mask &
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)
+ reg_val |= MDIO_CL73_IEEEB1_AN_ADV2_ADVR_1000M_KX;
+
+ CL22_WR_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_CL73_IEEEB1,
+ MDIO_CL73_IEEEB1_AN_ADV2,
+ reg_val);
+
+ /* CL73 Autoneg Enabled */
+ reg_val = MDIO_CL73_IEEEB0_CL73_AN_CONTROL_AN_EN;
+
+ } else /* CL73 Autoneg Disabled */
+ reg_val = 0;
+
+ CL22_WR_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_CL73_IEEEB0,
+ MDIO_CL73_IEEEB0_CL73_AN_CONTROL, reg_val);
+}
+
+/* Program SerDes, forced speed */
+static void bnx2x_program_serdes(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
+{
+ struct bnx2x *bp = params->bp;
+ u16 reg_val;
+
+ /* Program duplex, disable autoneg and sgmii*/
+ CL22_RD_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_COMBO_IEEE0,
+ MDIO_COMBO_IEEE0_MII_CONTROL, &reg_val);
+ reg_val &= ~(MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX |
+ MDIO_COMBO_IEEO_MII_CONTROL_AN_EN |
+ MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_MASK);
+ if (phy->req_duplex == DUPLEX_FULL)
+ reg_val |= MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX;
+ CL22_WR_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_COMBO_IEEE0,
+ MDIO_COMBO_IEEE0_MII_CONTROL, reg_val);
+
+ /* Program speed
+ * - needed only if the speed is greater than 1G (2.5G or 10G)
+ */
+ CL22_RD_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_SERDES_DIGITAL,
+ MDIO_SERDES_DIGITAL_MISC1, &reg_val);
+ /* Clearing the speed value before setting the right speed */
+ DP(NETIF_MSG_LINK, "MDIO_REG_BANK_SERDES_DIGITAL = 0x%x\n", reg_val);
+
+ reg_val &= ~(MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_MASK |
+ MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_SEL);
+
+ if (!((vars->line_speed == SPEED_1000) ||
+ (vars->line_speed == SPEED_100) ||
+ (vars->line_speed == SPEED_10))) {
+
+ reg_val |= (MDIO_SERDES_DIGITAL_MISC1_REFCLK_SEL_156_25M |
+ MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_SEL);
+ if (vars->line_speed == SPEED_10000)
+ reg_val |=
+ MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_10G_CX4;
+ }
+
+ CL22_WR_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_SERDES_DIGITAL,
+ MDIO_SERDES_DIGITAL_MISC1, reg_val);
+
+}
+
+static void bnx2x_set_brcm_cl37_advertisement(struct bnx2x_phy *phy,
+ struct link_params *params)
+{
+ struct bnx2x *bp = params->bp;
+ u16 val = 0;
+
+ /* Set extended capabilities */
+ if (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G)
+ val |= MDIO_OVER_1G_UP1_2_5G;
+ if (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)
+ val |= MDIO_OVER_1G_UP1_10G;
+ CL22_WR_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_OVER_1G,
+ MDIO_OVER_1G_UP1, val);
+
+ CL22_WR_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_OVER_1G,
+ MDIO_OVER_1G_UP3, 0x400);
+}
+
+static void bnx2x_set_ieee_aneg_advertisement(struct bnx2x_phy *phy,
+ struct link_params *params,
+ u16 ieee_fc)
+{
+ struct bnx2x *bp = params->bp;
+ u16 val;
+ /* For AN, we are always publishing full duplex */
+
+ CL22_WR_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_COMBO_IEEE0,
+ MDIO_COMBO_IEEE0_AUTO_NEG_ADV, ieee_fc);
+ CL22_RD_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_CL73_IEEEB1,
+ MDIO_CL73_IEEEB1_AN_ADV1, &val);
+ val &= ~MDIO_CL73_IEEEB1_AN_ADV1_PAUSE_BOTH;
+ val |= ((ieee_fc<<3) & MDIO_CL73_IEEEB1_AN_ADV1_PAUSE_MASK);
+ CL22_WR_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_CL73_IEEEB1,
+ MDIO_CL73_IEEEB1_AN_ADV1, val);
+}
+
+static void bnx2x_restart_autoneg(struct bnx2x_phy *phy,
+ struct link_params *params,
+ u8 enable_cl73)
+{
+ struct bnx2x *bp = params->bp;
+ u16 mii_control;
+
+ DP(NETIF_MSG_LINK, "bnx2x_restart_autoneg\n");
+ /* Enable and restart BAM/CL37 aneg */
+
+ if (enable_cl73) {
+ CL22_RD_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_CL73_IEEEB0,
+ MDIO_CL73_IEEEB0_CL73_AN_CONTROL,
+ &mii_control);
+
+ CL22_WR_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_CL73_IEEEB0,
+ MDIO_CL73_IEEEB0_CL73_AN_CONTROL,
+ (mii_control |
+ MDIO_CL73_IEEEB0_CL73_AN_CONTROL_AN_EN |
+ MDIO_CL73_IEEEB0_CL73_AN_CONTROL_RESTART_AN));
+ } else {
+
+ CL22_RD_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_COMBO_IEEE0,
+ MDIO_COMBO_IEEE0_MII_CONTROL,
+ &mii_control);
+ DP(NETIF_MSG_LINK,
+ "bnx2x_restart_autoneg mii_control before = 0x%x\n",
+ mii_control);
+ CL22_WR_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_COMBO_IEEE0,
+ MDIO_COMBO_IEEE0_MII_CONTROL,
+ (mii_control |
+ MDIO_COMBO_IEEO_MII_CONTROL_AN_EN |
+ MDIO_COMBO_IEEO_MII_CONTROL_RESTART_AN));
+ }
+}
+
+static void bnx2x_initialize_sgmii_process(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
+{
+ struct bnx2x *bp = params->bp;
+ u16 control1;
+
+ /* In SGMII mode, the unicore is always slave */
+
+ CL22_RD_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_SERDES_DIGITAL,
+ MDIO_SERDES_DIGITAL_A_1000X_CONTROL1,
+ &control1);
+ control1 |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_INVERT_SIGNAL_DETECT;
+ /* Set sgmii mode (and not fiber) */
+ control1 &= ~(MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_FIBER_MODE |
+ MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET |
+ MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_MSTR_MODE);
+ CL22_WR_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_SERDES_DIGITAL,
+ MDIO_SERDES_DIGITAL_A_1000X_CONTROL1,
+ control1);
+
+ /* If forced speed */
+ if (!(vars->line_speed == SPEED_AUTO_NEG)) {
+ /* Set speed, disable autoneg */
+ u16 mii_control;
+
+ CL22_RD_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_COMBO_IEEE0,
+ MDIO_COMBO_IEEE0_MII_CONTROL,
+ &mii_control);
+ mii_control &= ~(MDIO_COMBO_IEEO_MII_CONTROL_AN_EN |
+ MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_MASK|
+ MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX);
+
+ switch (vars->line_speed) {
+ case SPEED_100:
+ mii_control |=
+ MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_100;
+ break;
+ case SPEED_1000:
+ mii_control |=
+ MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_1000;
+ break;
+ case SPEED_10:
+ /* There is nothing to set for 10M */
+ break;
+ default:
+ /* Invalid speed for SGMII */
+ DP(NETIF_MSG_LINK, "Invalid line_speed 0x%x\n",
+ vars->line_speed);
+ break;
+ }
+
+ /* Setting the full duplex */
+ if (phy->req_duplex == DUPLEX_FULL)
+ mii_control |=
+ MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX;
+ CL22_WR_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_COMBO_IEEE0,
+ MDIO_COMBO_IEEE0_MII_CONTROL,
+ mii_control);
+
+ } else { /* AN mode */
+ /* Enable and restart AN */
+ bnx2x_restart_autoneg(phy, params, 0);
+ }
+}
+
+/* Link management
+ */
+static int bnx2x_direct_parallel_detect_used(struct bnx2x_phy *phy,
+ struct link_params *params)
+{
+ struct bnx2x *bp = params->bp;
+ u16 pd_10g, status2_1000x;
+ if (phy->req_line_speed != SPEED_AUTO_NEG)
+ return 0;
+ CL22_RD_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_SERDES_DIGITAL,
+ MDIO_SERDES_DIGITAL_A_1000X_STATUS2,
+ &status2_1000x);
+ CL22_RD_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_SERDES_DIGITAL,
+ MDIO_SERDES_DIGITAL_A_1000X_STATUS2,
+ &status2_1000x);
+ if (status2_1000x & MDIO_SERDES_DIGITAL_A_1000X_STATUS2_AN_DISABLED) {
+ DP(NETIF_MSG_LINK, "1G parallel detect link on port %d\n",
+ params->port);
+ return 1;
+ }
+
+ CL22_RD_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_10G_PARALLEL_DETECT,
+ MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_STATUS,
+ &pd_10g);
+
+ if (pd_10g & MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_STATUS_PD_LINK) {
+ DP(NETIF_MSG_LINK, "10G parallel detect link on port %d\n",
+ params->port);
+ return 1;
+ }
+ return 0;
+}
+
+static void bnx2x_update_adv_fc(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars,
+ u32 gp_status)
+{
+ u16 ld_pause; /* local driver */
+ u16 lp_pause; /* link partner */
+ u16 pause_result;
+ struct bnx2x *bp = params->bp;
+ if ((gp_status &
+ (MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_AUTONEG_COMPLETE |
+ MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_MR_LP_NP_AN_ABLE)) ==
+ (MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_AUTONEG_COMPLETE |
+ MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_MR_LP_NP_AN_ABLE)) {
+
+ CL22_RD_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_CL73_IEEEB1,
+ MDIO_CL73_IEEEB1_AN_ADV1,
+ &ld_pause);
+ CL22_RD_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_CL73_IEEEB1,
+ MDIO_CL73_IEEEB1_AN_LP_ADV1,
+ &lp_pause);
+ pause_result = (ld_pause &
+ MDIO_CL73_IEEEB1_AN_ADV1_PAUSE_MASK) >> 8;
+ pause_result |= (lp_pause &
+ MDIO_CL73_IEEEB1_AN_LP_ADV1_PAUSE_MASK) >> 10;
+ DP(NETIF_MSG_LINK, "pause_result CL73 0x%x\n", pause_result);
+ } else {
+ CL22_RD_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_COMBO_IEEE0,
+ MDIO_COMBO_IEEE0_AUTO_NEG_ADV,
+ &ld_pause);
+ CL22_RD_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_COMBO_IEEE0,
+ MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1,
+ &lp_pause);
+ pause_result = (ld_pause &
+ MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK)>>5;
+ pause_result |= (lp_pause &
+ MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK)>>7;
+ DP(NETIF_MSG_LINK, "pause_result CL37 0x%x\n", pause_result);
+ }
+ bnx2x_pause_resolve(phy, params, vars, pause_result);
+
+}
+
+static void bnx2x_flow_ctrl_resolve(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars,
+ u32 gp_status)
+{
+ struct bnx2x *bp = params->bp;
+ vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
+
+ /* Resolve from gp_status in case of AN complete and not sgmii */
+ if (phy->req_flow_ctrl != BNX2X_FLOW_CTRL_AUTO) {
+ /* Update the advertised flow-controled of LD/LP in AN */
+ if (phy->req_line_speed == SPEED_AUTO_NEG)
+ bnx2x_update_adv_fc(phy, params, vars, gp_status);
+ /* But set the flow-control result as the requested one */
+ vars->flow_ctrl = phy->req_flow_ctrl;
+ } else if (phy->req_line_speed != SPEED_AUTO_NEG)
+ vars->flow_ctrl = params->req_fc_auto_adv;
+ else if ((gp_status & MDIO_AN_CL73_OR_37_COMPLETE) &&
+ (!(vars->phy_flags & PHY_SGMII_FLAG))) {
+ if (bnx2x_direct_parallel_detect_used(phy, params)) {
+ vars->flow_ctrl = params->req_fc_auto_adv;
+ return;
+ }
+ bnx2x_update_adv_fc(phy, params, vars, gp_status);
+ }
+ DP(NETIF_MSG_LINK, "flow_ctrl 0x%x\n", vars->flow_ctrl);
+}
+
+static void bnx2x_check_fallback_to_cl37(struct bnx2x_phy *phy,
+ struct link_params *params)
+{
+ struct bnx2x *bp = params->bp;
+ u16 rx_status, ustat_val, cl37_fsm_received;
+ DP(NETIF_MSG_LINK, "bnx2x_check_fallback_to_cl37\n");
+ /* Step 1: Make sure signal is detected */
+ CL22_RD_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_RX0,
+ MDIO_RX0_RX_STATUS,
+ &rx_status);
+ if ((rx_status & MDIO_RX0_RX_STATUS_SIGDET) !=
+ (MDIO_RX0_RX_STATUS_SIGDET)) {
+ DP(NETIF_MSG_LINK, "Signal is not detected. Restoring CL73."
+ "rx_status(0x80b0) = 0x%x\n", rx_status);
+ CL22_WR_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_CL73_IEEEB0,
+ MDIO_CL73_IEEEB0_CL73_AN_CONTROL,
+ MDIO_CL73_IEEEB0_CL73_AN_CONTROL_AN_EN);
+ return;
+ }
+ /* Step 2: Check CL73 state machine */
+ CL22_RD_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_CL73_USERB0,
+ MDIO_CL73_USERB0_CL73_USTAT1,
+ &ustat_val);
+ if ((ustat_val &
+ (MDIO_CL73_USERB0_CL73_USTAT1_LINK_STATUS_CHECK |
+ MDIO_CL73_USERB0_CL73_USTAT1_AN_GOOD_CHECK_BAM37)) !=
+ (MDIO_CL73_USERB0_CL73_USTAT1_LINK_STATUS_CHECK |
+ MDIO_CL73_USERB0_CL73_USTAT1_AN_GOOD_CHECK_BAM37)) {
+ DP(NETIF_MSG_LINK, "CL73 state-machine is not stable. "
+ "ustat_val(0x8371) = 0x%x\n", ustat_val);
+ return;
+ }
+ /* Step 3: Check CL37 Message Pages received to indicate LP
+ * supports only CL37
+ */
+ CL22_RD_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_REMOTE_PHY,
+ MDIO_REMOTE_PHY_MISC_RX_STATUS,
+ &cl37_fsm_received);
+ if ((cl37_fsm_received &
+ (MDIO_REMOTE_PHY_MISC_RX_STATUS_CL37_FSM_RECEIVED_OVER1G_MSG |
+ MDIO_REMOTE_PHY_MISC_RX_STATUS_CL37_FSM_RECEIVED_BRCM_OUI_MSG)) !=
+ (MDIO_REMOTE_PHY_MISC_RX_STATUS_CL37_FSM_RECEIVED_OVER1G_MSG |
+ MDIO_REMOTE_PHY_MISC_RX_STATUS_CL37_FSM_RECEIVED_BRCM_OUI_MSG)) {
+ DP(NETIF_MSG_LINK, "No CL37 FSM were received. "
+ "misc_rx_status(0x8330) = 0x%x\n",
+ cl37_fsm_received);
+ return;
+ }
+ /* The combined cl37/cl73 fsm state information indicating that
+ * we are connected to a device which does not support cl73, but
+ * does support cl37 BAM. In this case we disable cl73 and
+ * restart cl37 auto-neg
+ */
+
+ /* Disable CL73 */
+ CL22_WR_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_CL73_IEEEB0,
+ MDIO_CL73_IEEEB0_CL73_AN_CONTROL,
+ 0);
+ /* Restart CL37 autoneg */
+ bnx2x_restart_autoneg(phy, params, 0);
+ DP(NETIF_MSG_LINK, "Disabling CL73, and restarting CL37 autoneg\n");
+}
+
+static void bnx2x_xgxs_an_resolve(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars,
+ u32 gp_status)
+{
+ if (gp_status & MDIO_AN_CL73_OR_37_COMPLETE)
+ vars->link_status |=
+ LINK_STATUS_AUTO_NEGOTIATE_COMPLETE;
+
+ if (bnx2x_direct_parallel_detect_used(phy, params))
+ vars->link_status |=
+ LINK_STATUS_PARALLEL_DETECTION_USED;
+}
+static int bnx2x_get_link_speed_duplex(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars,
+ u16 is_link_up,
+ u16 speed_mask,
+ u16 is_duplex)
+{
+ struct bnx2x *bp = params->bp;
+ if (phy->req_line_speed == SPEED_AUTO_NEG)
+ vars->link_status |= LINK_STATUS_AUTO_NEGOTIATE_ENABLED;
+ if (is_link_up) {
+ DP(NETIF_MSG_LINK, "phy link up\n");
+
+ vars->phy_link_up = 1;
+ vars->link_status |= LINK_STATUS_LINK_UP;
+
+ switch (speed_mask) {
+ case GP_STATUS_10M:
+ vars->line_speed = SPEED_10;
+ if (is_duplex == DUPLEX_FULL)
+ vars->link_status |= LINK_10TFD;
+ else
+ vars->link_status |= LINK_10THD;
+ break;
+
+ case GP_STATUS_100M:
+ vars->line_speed = SPEED_100;
+ if (is_duplex == DUPLEX_FULL)
+ vars->link_status |= LINK_100TXFD;
+ else
+ vars->link_status |= LINK_100TXHD;
+ break;
+
+ case GP_STATUS_1G:
+ case GP_STATUS_1G_KX:
+ vars->line_speed = SPEED_1000;
+ if (is_duplex == DUPLEX_FULL)
+ vars->link_status |= LINK_1000TFD;
+ else
+ vars->link_status |= LINK_1000THD;
+ break;
+
+ case GP_STATUS_2_5G:
+ vars->line_speed = SPEED_2500;
+ if (is_duplex == DUPLEX_FULL)
+ vars->link_status |= LINK_2500TFD;
+ else
+ vars->link_status |= LINK_2500THD;
+ break;
+
+ case GP_STATUS_5G:
+ case GP_STATUS_6G:
+ DP(NETIF_MSG_LINK,
+ "link speed unsupported gp_status 0x%x\n",
+ speed_mask);
+ return -EINVAL;
+
+ case GP_STATUS_10G_KX4:
+ case GP_STATUS_10G_HIG:
+ case GP_STATUS_10G_CX4:
+ case GP_STATUS_10G_KR:
+ case GP_STATUS_10G_SFI:
+ case GP_STATUS_10G_XFI:
+ vars->line_speed = SPEED_10000;
+ vars->link_status |= LINK_10GTFD;
+ break;
+ case GP_STATUS_20G_DXGXS:
+ case GP_STATUS_20G_KR2:
+ vars->line_speed = SPEED_20000;
+ vars->link_status |= LINK_20GTFD;
+ break;
+ default:
+ DP(NETIF_MSG_LINK,
+ "link speed unsupported gp_status 0x%x\n",
+ speed_mask);
+ return -EINVAL;
+ }
+ } else { /* link_down */
+ DP(NETIF_MSG_LINK, "phy link down\n");
+
+ vars->phy_link_up = 0;
+
+ vars->duplex = DUPLEX_FULL;
+ vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
+ vars->mac_type = MAC_TYPE_NONE;
+ }
+ DP(NETIF_MSG_LINK, " phy_link_up %x line_speed %d\n",
+ vars->phy_link_up, vars->line_speed);
+ return 0;
+}
+
+static u8 bnx2x_link_settings_status(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
+{
+ struct bnx2x *bp = params->bp;
+
+ u16 gp_status, duplex = DUPLEX_HALF, link_up = 0, speed_mask;
+ int rc = 0;
+
+ /* Read gp_status */
+ CL22_RD_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_GP_STATUS,
+ MDIO_GP_STATUS_TOP_AN_STATUS1,
+ &gp_status);
+ if (gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_DUPLEX_STATUS)
+ duplex = DUPLEX_FULL;
+ if (gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_LINK_STATUS)
+ link_up = 1;
+ speed_mask = gp_status & GP_STATUS_SPEED_MASK;
+ DP(NETIF_MSG_LINK, "gp_status 0x%x, is_link_up %d, speed_mask 0x%x\n",
+ gp_status, link_up, speed_mask);
+ rc = bnx2x_get_link_speed_duplex(phy, params, vars, link_up, speed_mask,
+ duplex);
+ if (rc == -EINVAL)
+ return rc;
+
+ if (gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_LINK_STATUS) {
+ if (SINGLE_MEDIA_DIRECT(params)) {
+ vars->duplex = duplex;
+ bnx2x_flow_ctrl_resolve(phy, params, vars, gp_status);
+ if (phy->req_line_speed == SPEED_AUTO_NEG)
+ bnx2x_xgxs_an_resolve(phy, params, vars,
+ gp_status);
+ }
+ } else { /* Link_down */
+ if ((phy->req_line_speed == SPEED_AUTO_NEG) &&
+ SINGLE_MEDIA_DIRECT(params)) {
+ /* Check signal is detected */
+ bnx2x_check_fallback_to_cl37(phy, params);
+ }
+ }
+
+ /* Read LP advertised speeds*/
+ if (SINGLE_MEDIA_DIRECT(params) &&
+ (vars->link_status & LINK_STATUS_AUTO_NEGOTIATE_COMPLETE)) {
+ u16 val;
+
+ CL22_RD_OVER_CL45(bp, phy, MDIO_REG_BANK_CL73_IEEEB1,
+ MDIO_CL73_IEEEB1_AN_LP_ADV2, &val);
+
+ if (val & MDIO_CL73_IEEEB1_AN_ADV2_ADVR_1000M_KX)
+ vars->link_status |=
+ LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE;
+ if (val & (MDIO_CL73_IEEEB1_AN_ADV2_ADVR_10G_KX4 |
+ MDIO_CL73_IEEEB1_AN_ADV2_ADVR_10G_KR))
+ vars->link_status |=
+ LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE;
+
+ CL22_RD_OVER_CL45(bp, phy, MDIO_REG_BANK_OVER_1G,
+ MDIO_OVER_1G_LP_UP1, &val);
+
+ if (val & MDIO_OVER_1G_UP1_2_5G)
+ vars->link_status |=
+ LINK_STATUS_LINK_PARTNER_2500XFD_CAPABLE;
+ if (val & (MDIO_OVER_1G_UP1_10G | MDIO_OVER_1G_UP1_10GH))
+ vars->link_status |=
+ LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE;
+ }
+
+ DP(NETIF_MSG_LINK, "duplex %x flow_ctrl 0x%x link_status 0x%x\n",
+ vars->duplex, vars->flow_ctrl, vars->link_status);
+ return rc;
+}
+
+static u8 bnx2x_warpcore_read_status(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
+{
+ struct bnx2x *bp = params->bp;
+ u8 lane;
+ u16 gp_status1, gp_speed, link_up, duplex = DUPLEX_FULL;
+ int rc = 0;
+ lane = bnx2x_get_warpcore_lane(phy, params);
+ /* Read gp_status */
+ if ((params->loopback_mode) &&
+ (phy->flags & FLAGS_WC_DUAL_MODE)) {
+ bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_DIGITAL5_LINK_STATUS, &link_up);
+ bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_DIGITAL5_LINK_STATUS, &link_up);
+ link_up &= 0x1;
+ } else if ((phy->req_line_speed > SPEED_10000) &&
+ (phy->supported & SUPPORTED_20000baseMLD2_Full)) {
+ u16 temp_link_up;
+ bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+ 1, &temp_link_up);
+ bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+ 1, &link_up);
+ DP(NETIF_MSG_LINK, "PCS RX link status = 0x%x-->0x%x\n",
+ temp_link_up, link_up);
+ link_up &= (1<<2);
+ if (link_up)
+ bnx2x_ext_phy_resolve_fc(phy, params, vars);
+ } else {
+ bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_GP2_STATUS_GP_2_1,
+ &gp_status1);
+ DP(NETIF_MSG_LINK, "0x81d1 = 0x%x\n", gp_status1);
+ /* Check for either KR, 1G, or AN up. */
+ link_up = ((gp_status1 >> 8) |
+ (gp_status1 >> 12) |
+ (gp_status1)) &
+ (1 << lane);
+ if (phy->supported & SUPPORTED_20000baseKR2_Full) {
+ u16 an_link;
+ bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD,
+ MDIO_AN_REG_STATUS, &an_link);
+ bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD,
+ MDIO_AN_REG_STATUS, &an_link);
+ link_up |= (an_link & (1<<2));
+ }
+ if (link_up && SINGLE_MEDIA_DIRECT(params)) {
+ u16 pd, gp_status4;
+ if (phy->req_line_speed == SPEED_AUTO_NEG) {
+ /* Check Autoneg complete */
+ bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_GP2_STATUS_GP_2_4,
+ &gp_status4);
+ if (gp_status4 & ((1<<12)<<lane))
+ vars->link_status |=
+ LINK_STATUS_AUTO_NEGOTIATE_COMPLETE;
+
+ /* Check parallel detect used */
+ bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_PAR_DET_10G_STATUS,
+ &pd);
+ if (pd & (1<<15))
+ vars->link_status |=
+ LINK_STATUS_PARALLEL_DETECTION_USED;
+ }
+ bnx2x_ext_phy_resolve_fc(phy, params, vars);
+ vars->duplex = duplex;
+ }
+ }
+
+ if ((vars->link_status & LINK_STATUS_AUTO_NEGOTIATE_COMPLETE) &&
+ SINGLE_MEDIA_DIRECT(params)) {
+ u16 val;
+
+ bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD,
+ MDIO_AN_REG_LP_AUTO_NEG2, &val);
+
+ if (val & MDIO_CL73_IEEEB1_AN_ADV2_ADVR_1000M_KX)
+ vars->link_status |=
+ LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE;
+ if (val & (MDIO_CL73_IEEEB1_AN_ADV2_ADVR_10G_KX4 |
+ MDIO_CL73_IEEEB1_AN_ADV2_ADVR_10G_KR))
+ vars->link_status |=
+ LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE;
+
+ bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_DIGITAL3_LP_UP1, &val);
+
+ if (val & MDIO_OVER_1G_UP1_2_5G)
+ vars->link_status |=
+ LINK_STATUS_LINK_PARTNER_2500XFD_CAPABLE;
+ if (val & (MDIO_OVER_1G_UP1_10G | MDIO_OVER_1G_UP1_10GH))
+ vars->link_status |=
+ LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE;
+
+ }
+
+
+ if (lane < 2) {
+ bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_GP2_STATUS_GP_2_2, &gp_speed);
+ } else {
+ bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_GP2_STATUS_GP_2_3, &gp_speed);
+ }
+ DP(NETIF_MSG_LINK, "lane %d gp_speed 0x%x\n", lane, gp_speed);
+
+ if ((lane & 1) == 0)
+ gp_speed <<= 8;
+ gp_speed &= 0x3f00;
+ link_up = !!link_up;
+
+ rc = bnx2x_get_link_speed_duplex(phy, params, vars, link_up, gp_speed,
+ duplex);
+
+ /* In case of KR link down, start up the recovering procedure */
+ if ((!link_up) && (phy->media_type == ETH_PHY_KR) &&
+ (!(phy->flags & FLAGS_WC_DUAL_MODE)))
+ vars->rx_tx_asic_rst = MAX_KR_LINK_RETRY;
+
+ DP(NETIF_MSG_LINK, "duplex %x flow_ctrl 0x%x link_status 0x%x\n",
+ vars->duplex, vars->flow_ctrl, vars->link_status);
+ return rc;
+}
+static void bnx2x_set_gmii_tx_driver(struct link_params *params)
+{
+ struct bnx2x *bp = params->bp;
+ struct bnx2x_phy *phy = &params->phy[INT_PHY];
+ u16 lp_up2;
+ u16 tx_driver;
+ u16 bank;
+
+ /* Read precomp */
+ CL22_RD_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_OVER_1G,
+ MDIO_OVER_1G_LP_UP2, &lp_up2);
+
+ /* Bits [10:7] at lp_up2, positioned at [15:12] */
+ lp_up2 = (((lp_up2 & MDIO_OVER_1G_LP_UP2_PREEMPHASIS_MASK) >>
+ MDIO_OVER_1G_LP_UP2_PREEMPHASIS_SHIFT) <<
+ MDIO_TX0_TX_DRIVER_PREEMPHASIS_SHIFT);
+
+ if (lp_up2 == 0)
+ return;
+
+ for (bank = MDIO_REG_BANK_TX0; bank <= MDIO_REG_BANK_TX3;
+ bank += (MDIO_REG_BANK_TX1 - MDIO_REG_BANK_TX0)) {
+ CL22_RD_OVER_CL45(bp, phy,
+ bank,
+ MDIO_TX0_TX_DRIVER, &tx_driver);
+
+ /* Replace tx_driver bits [15:12] */
+ if (lp_up2 !=
+ (tx_driver & MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK)) {
+ tx_driver &= ~MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK;
+ tx_driver |= lp_up2;
+ CL22_WR_OVER_CL45(bp, phy,
+ bank,
+ MDIO_TX0_TX_DRIVER, tx_driver);
+ }
+ }
+}
+
+static int bnx2x_emac_program(struct link_params *params,
+ struct link_vars *vars)
+{
+ struct bnx2x *bp = params->bp;
+ u8 port = params->port;
+ u16 mode = 0;
+
+ DP(NETIF_MSG_LINK, "setting link speed & duplex\n");
+ bnx2x_bits_dis(bp, GRCBASE_EMAC0 + port*0x400 +
+ EMAC_REG_EMAC_MODE,
+ (EMAC_MODE_25G_MODE |
+ EMAC_MODE_PORT_MII_10M |
+ EMAC_MODE_HALF_DUPLEX));
+ switch (vars->line_speed) {
+ case SPEED_10:
+ mode |= EMAC_MODE_PORT_MII_10M;
+ break;
+
+ case SPEED_100:
+ mode |= EMAC_MODE_PORT_MII;
+ break;
+
+ case SPEED_1000:
+ mode |= EMAC_MODE_PORT_GMII;
+ break;
+
+ case SPEED_2500:
+ mode |= (EMAC_MODE_25G_MODE | EMAC_MODE_PORT_GMII);
+ break;
+
+ default:
+ /* 10G not valid for EMAC */
+ DP(NETIF_MSG_LINK, "Invalid line_speed 0x%x\n",
+ vars->line_speed);
+ return -EINVAL;
+ }
+
+ if (vars->duplex == DUPLEX_HALF)
+ mode |= EMAC_MODE_HALF_DUPLEX;
+ bnx2x_bits_en(bp,
+ GRCBASE_EMAC0 + port*0x400 + EMAC_REG_EMAC_MODE,
+ mode);
+
+ bnx2x_set_led(params, vars, LED_MODE_OPER, vars->line_speed);
+ return 0;
+}
+
+static void bnx2x_set_preemphasis(struct bnx2x_phy *phy,
+ struct link_params *params)
+{
+
+ u16 bank, i = 0;
+ struct bnx2x *bp = params->bp;
+
+ for (bank = MDIO_REG_BANK_RX0, i = 0; bank <= MDIO_REG_BANK_RX3;
+ bank += (MDIO_REG_BANK_RX1-MDIO_REG_BANK_RX0), i++) {
+ CL22_WR_OVER_CL45(bp, phy,
+ bank,
+ MDIO_RX0_RX_EQ_BOOST,
+ phy->rx_preemphasis[i]);
+ }
+
+ for (bank = MDIO_REG_BANK_TX0, i = 0; bank <= MDIO_REG_BANK_TX3;
+ bank += (MDIO_REG_BANK_TX1 - MDIO_REG_BANK_TX0), i++) {
+ CL22_WR_OVER_CL45(bp, phy,
+ bank,
+ MDIO_TX0_TX_DRIVER,
+ phy->tx_preemphasis[i]);
+ }
+}
+
+static void bnx2x_xgxs_config_init(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
+{
+ struct bnx2x *bp = params->bp;
+ u8 enable_cl73 = (SINGLE_MEDIA_DIRECT(params) ||
+ (params->loopback_mode == LOOPBACK_XGXS));
+ if (!(vars->phy_flags & PHY_SGMII_FLAG)) {
+ if (SINGLE_MEDIA_DIRECT(params) &&
+ (params->feature_config_flags &
+ FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED))
+ bnx2x_set_preemphasis(phy, params);
+
+ /* Forced speed requested? */
+ if (vars->line_speed != SPEED_AUTO_NEG ||
+ (SINGLE_MEDIA_DIRECT(params) &&
+ params->loopback_mode == LOOPBACK_EXT)) {
+ DP(NETIF_MSG_LINK, "not SGMII, no AN\n");
+
+ /* Disable autoneg */
+ bnx2x_set_autoneg(phy, params, vars, 0);
+
+ /* Program speed and duplex */
+ bnx2x_program_serdes(phy, params, vars);
+
+ } else { /* AN_mode */
+ DP(NETIF_MSG_LINK, "not SGMII, AN\n");
+
+ /* AN enabled */
+ bnx2x_set_brcm_cl37_advertisement(phy, params);
+
+ /* Program duplex & pause advertisement (for aneg) */
+ bnx2x_set_ieee_aneg_advertisement(phy, params,
+ vars->ieee_fc);
+
+ /* Enable autoneg */
+ bnx2x_set_autoneg(phy, params, vars, enable_cl73);
+
+ /* Enable and restart AN */
+ bnx2x_restart_autoneg(phy, params, enable_cl73);
+ }
+
+ } else { /* SGMII mode */
+ DP(NETIF_MSG_LINK, "SGMII\n");
+
+ bnx2x_initialize_sgmii_process(phy, params, vars);
+ }
+}
+
+static int bnx2x_prepare_xgxs(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
+{
+ int rc;
+ vars->phy_flags |= PHY_XGXS_FLAG;
+ if ((phy->req_line_speed &&
+ ((phy->req_line_speed == SPEED_100) ||
+ (phy->req_line_speed == SPEED_10))) ||
+ (!phy->req_line_speed &&
+ (phy->speed_cap_mask >=
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL) &&
+ (phy->speed_cap_mask <
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) ||
+ (phy->type == PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT_SD))
+ vars->phy_flags |= PHY_SGMII_FLAG;
+ else
+ vars->phy_flags &= ~PHY_SGMII_FLAG;
+
+ bnx2x_calc_ieee_aneg_adv(phy, params, &vars->ieee_fc);
+ bnx2x_set_aer_mmd(params, phy);
+ if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
+ bnx2x_set_master_ln(params, phy);
+
+ rc = bnx2x_reset_unicore(params, phy, 0);
+ /* Reset the SerDes and wait for reset bit return low */
+ if (rc)
+ return rc;
+
+ bnx2x_set_aer_mmd(params, phy);
+ /* Setting the masterLn_def again after the reset */
+ if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) {
+ bnx2x_set_master_ln(params, phy);
+ bnx2x_set_swap_lanes(params, phy);
+ }
+
+ return rc;
+}
+
+static u16 bnx2x_wait_reset_complete(struct bnx2x *bp,
+ struct bnx2x_phy *phy,
+ struct link_params *params)
+{
+ u16 cnt, ctrl;
+ /* Wait for soft reset to get cleared up to 1 sec */
+ for (cnt = 0; cnt < 1000; cnt++) {
+ if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54618SE)
+ bnx2x_cl22_read(bp, phy,
+ MDIO_PMA_REG_CTRL, &ctrl);
+ else
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_CTRL, &ctrl);
+ if (!(ctrl & (1<<15)))
+ break;
+ usleep_range(1000, 2000);
+ }
+
+ if (cnt == 1000)
+ netdev_err(bp->dev, "Warning: PHY was not initialized,"
+ " Port %d\n",
+ params->port);
+ DP(NETIF_MSG_LINK, "control reg 0x%x (after %d ms)\n", ctrl, cnt);
+ return cnt;
+}
+
+static void bnx2x_link_int_enable(struct link_params *params)
+{
+ u8 port = params->port;
+ u32 mask;
+ struct bnx2x *bp = params->bp;
+
+ /* Setting the status to report on link up for either XGXS or SerDes */
+ if (CHIP_IS_E3(bp)) {
+ mask = NIG_MASK_XGXS0_LINK_STATUS;
+ if (!(SINGLE_MEDIA_DIRECT(params)))
+ mask |= NIG_MASK_MI_INT;
+ } else if (params->switch_cfg == SWITCH_CFG_10G) {
+ mask = (NIG_MASK_XGXS0_LINK10G |
+ NIG_MASK_XGXS0_LINK_STATUS);
+ DP(NETIF_MSG_LINK, "enabled XGXS interrupt\n");
+ if (!(SINGLE_MEDIA_DIRECT(params)) &&
+ params->phy[INT_PHY].type !=
+ PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) {
+ mask |= NIG_MASK_MI_INT;
+ DP(NETIF_MSG_LINK, "enabled external phy int\n");
+ }
+
+ } else { /* SerDes */
+ mask = NIG_MASK_SERDES0_LINK_STATUS;
+ DP(NETIF_MSG_LINK, "enabled SerDes interrupt\n");
+ if (!(SINGLE_MEDIA_DIRECT(params)) &&
+ params->phy[INT_PHY].type !=
+ PORT_HW_CFG_SERDES_EXT_PHY_TYPE_NOT_CONN) {
+ mask |= NIG_MASK_MI_INT;
+ DP(NETIF_MSG_LINK, "enabled external phy int\n");
+ }
+ }
+ bnx2x_bits_en(bp,
+ NIG_REG_MASK_INTERRUPT_PORT0 + port*4,
+ mask);
+
+ DP(NETIF_MSG_LINK, "port %x, is_xgxs %x, int_status 0x%x\n", port,
+ (params->switch_cfg == SWITCH_CFG_10G),
+ REG_RD(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4));
+ DP(NETIF_MSG_LINK, " int_mask 0x%x, MI_INT %x, SERDES_LINK %x\n",
+ REG_RD(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4),
+ REG_RD(bp, NIG_REG_EMAC0_STATUS_MISC_MI_INT + port*0x18),
+ REG_RD(bp, NIG_REG_SERDES0_STATUS_LINK_STATUS+port*0x3c));
+ DP(NETIF_MSG_LINK, " 10G %x, XGXS_LINK %x\n",
+ REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK10G + port*0x68),
+ REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK_STATUS + port*0x68));
+}
+
+static void bnx2x_rearm_latch_signal(struct bnx2x *bp, u8 port,
+ u8 exp_mi_int)
+{
+ u32 latch_status = 0;
+
+ /* Disable the MI INT ( external phy int ) by writing 1 to the
+ * status register. Link down indication is high-active-signal,
+ * so in this case we need to write the status to clear the XOR
+ */
+ /* Read Latched signals */
+ latch_status = REG_RD(bp,
+ NIG_REG_LATCH_STATUS_0 + port*8);
+ DP(NETIF_MSG_LINK, "latch_status = 0x%x\n", latch_status);
+ /* Handle only those with latched-signal=up.*/
+ if (exp_mi_int)
+ bnx2x_bits_en(bp,
+ NIG_REG_STATUS_INTERRUPT_PORT0
+ + port*4,
+ NIG_STATUS_EMAC0_MI_INT);
+ else
+ bnx2x_bits_dis(bp,
+ NIG_REG_STATUS_INTERRUPT_PORT0
+ + port*4,
+ NIG_STATUS_EMAC0_MI_INT);
+
+ if (latch_status & 1) {
+
+ /* For all latched-signal=up : Re-Arm Latch signals */
+ REG_WR(bp, NIG_REG_LATCH_STATUS_0 + port*8,
+ (latch_status & 0xfffe) | (latch_status & 1));
+ }
+ /* For all latched-signal=up,Write original_signal to status */
+}
+
+static void bnx2x_link_int_ack(struct link_params *params,
+ struct link_vars *vars, u8 is_10g_plus)
+{
+ struct bnx2x *bp = params->bp;
+ u8 port = params->port;
+ u32 mask;
+ /* First reset all status we assume only one line will be
+ * change at a time
+ */
+ bnx2x_bits_dis(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4,
+ (NIG_STATUS_XGXS0_LINK10G |
+ NIG_STATUS_XGXS0_LINK_STATUS |
+ NIG_STATUS_SERDES0_LINK_STATUS));
+ if (vars->phy_link_up) {
+ if (USES_WARPCORE(bp))
+ mask = NIG_STATUS_XGXS0_LINK_STATUS;
+ else {
+ if (is_10g_plus)
+ mask = NIG_STATUS_XGXS0_LINK10G;
+ else if (params->switch_cfg == SWITCH_CFG_10G) {
+ /* Disable the link interrupt by writing 1 to
+ * the relevant lane in the status register
+ */
+ u32 ser_lane =
+ ((params->lane_config &
+ PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >>
+ PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT);
+ mask = ((1 << ser_lane) <<
+ NIG_STATUS_XGXS0_LINK_STATUS_SIZE);
+ } else
+ mask = NIG_STATUS_SERDES0_LINK_STATUS;
+ }
+ DP(NETIF_MSG_LINK, "Ack link up interrupt with mask 0x%x\n",
+ mask);
+ bnx2x_bits_en(bp,
+ NIG_REG_STATUS_INTERRUPT_PORT0 + port*4,
+ mask);
+ }
+}
+
+static int bnx2x_null_format_ver(u32 spirom_ver, u8 *str, u16 *len)
+{
+ str[0] = '\0';
+ (*len)--;
+ return 0;
+}
+
+static int bnx2x_format_ver(u32 num, u8 *str, u16 *len)
+{
+ u16 ret;
+
+ if (*len < 10) {
+ /* Need more than 10chars for this format */
+ bnx2x_null_format_ver(num, str, len);
+ return -EINVAL;
+ }
+
+ ret = scnprintf(str, *len, "%x.%x", (num >> 16) & 0xFFFF,
+ num & 0xFFFF);
+ *len -= ret;
+ return 0;
+}
+
+static int bnx2x_3_seq_format_ver(u32 num, u8 *str, u16 *len)
+{
+ u16 ret;
+
+ if (*len < 10) {
+ /* Need more than 10chars for this format */
+ bnx2x_null_format_ver(num, str, len);
+ return -EINVAL;
+ }
+
+ ret = scnprintf(str, *len, "%x.%x.%x", (num >> 16) & 0xFF,
+ (num >> 8) & 0xFF, num & 0xFF);
+ *len -= ret;
+ return 0;
+}
+
+int bnx2x_get_ext_phy_fw_version(struct link_params *params, u8 *version,
+ u16 len)
+{
+ struct bnx2x *bp;
+ u32 spirom_ver = 0;
+ int status = 0;
+ u8 *ver_p = version;
+ u16 remain_len = len;
+ if (version == NULL || params == NULL)
+ return -EINVAL;
+ bp = params->bp;
+
+ /* Extract first external phy*/
+ version[0] = '\0';
+ spirom_ver = REG_RD(bp, params->phy[EXT_PHY1].ver_addr);
+
+ if (params->phy[EXT_PHY1].format_fw_ver) {
+ status |= params->phy[EXT_PHY1].format_fw_ver(spirom_ver,
+ ver_p,
+ &remain_len);
+ ver_p += (len - remain_len);
+ }
+ if ((params->num_phys == MAX_PHYS) &&
+ (params->phy[EXT_PHY2].ver_addr != 0)) {
+ spirom_ver = REG_RD(bp, params->phy[EXT_PHY2].ver_addr);
+ if (params->phy[EXT_PHY2].format_fw_ver) {
+ *ver_p = '/';
+ ver_p++;
+ remain_len--;
+ status |= params->phy[EXT_PHY2].format_fw_ver(
+ spirom_ver,
+ ver_p,
+ &remain_len);
+ ver_p = version + (len - remain_len);
+ }
+ }
+ *ver_p = '\0';
+ return status;
+}
+
+static void bnx2x_set_xgxs_loopback(struct bnx2x_phy *phy,
+ struct link_params *params)
+{
+ u8 port = params->port;
+ struct bnx2x *bp = params->bp;
+
+ if (phy->req_line_speed != SPEED_1000) {
+ u32 md_devad = 0;
+
+ DP(NETIF_MSG_LINK, "XGXS 10G loopback enable\n");
+
+ if (!CHIP_IS_E3(bp)) {
+ /* Change the uni_phy_addr in the nig */
+ md_devad = REG_RD(bp, (NIG_REG_XGXS0_CTRL_MD_DEVAD +
+ port*0x18));
+
+ REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18,
+ 0x5);
+ }
+
+ bnx2x_cl45_write(bp, phy,
+ 5,
+ (MDIO_REG_BANK_AER_BLOCK +
+ (MDIO_AER_BLOCK_AER_REG & 0xf)),
+ 0x2800);
+
+ bnx2x_cl45_write(bp, phy,
+ 5,
+ (MDIO_REG_BANK_CL73_IEEEB0 +
+ (MDIO_CL73_IEEEB0_CL73_AN_CONTROL & 0xf)),
+ 0x6041);
+ msleep(200);
+ /* Set aer mmd back */
+ bnx2x_set_aer_mmd(params, phy);
+
+ if (!CHIP_IS_E3(bp)) {
+ /* And md_devad */
+ REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18,
+ md_devad);
+ }
+ } else {
+ u16 mii_ctrl;
+ DP(NETIF_MSG_LINK, "XGXS 1G loopback enable\n");
+ bnx2x_cl45_read(bp, phy, 5,
+ (MDIO_REG_BANK_COMBO_IEEE0 +
+ (MDIO_COMBO_IEEE0_MII_CONTROL & 0xf)),
+ &mii_ctrl);
+ bnx2x_cl45_write(bp, phy, 5,
+ (MDIO_REG_BANK_COMBO_IEEE0 +
+ (MDIO_COMBO_IEEE0_MII_CONTROL & 0xf)),
+ mii_ctrl |
+ MDIO_COMBO_IEEO_MII_CONTROL_LOOPBACK);
+ }
+}
+
+int bnx2x_set_led(struct link_params *params,
+ struct link_vars *vars, u8 mode, u32 speed)
+{
+ u8 port = params->port;
+ u16 hw_led_mode = params->hw_led_mode;
+ int rc = 0;
+ u8 phy_idx;
+ u32 tmp;
+ u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
+ struct bnx2x *bp = params->bp;
+ DP(NETIF_MSG_LINK, "bnx2x_set_led: port %x, mode %d\n", port, mode);
+ DP(NETIF_MSG_LINK, "speed 0x%x, hw_led_mode 0x%x\n",
+ speed, hw_led_mode);
+ /* In case */
+ for (phy_idx = EXT_PHY1; phy_idx < MAX_PHYS; phy_idx++) {
+ if (params->phy[phy_idx].set_link_led) {
+ params->phy[phy_idx].set_link_led(
+ &params->phy[phy_idx], params, mode);
+ }
+ }
+
+ switch (mode) {
+ case LED_MODE_FRONT_PANEL_OFF:
+ case LED_MODE_OFF:
+ REG_WR(bp, NIG_REG_LED_10G_P0 + port*4, 0);
+ REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4,
+ SHARED_HW_CFG_LED_MAC1);
+
+ tmp = EMAC_RD(bp, EMAC_REG_EMAC_LED);
+ if (params->phy[EXT_PHY1].type ==
+ PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54618SE)
+ tmp &= ~(EMAC_LED_1000MB_OVERRIDE |
+ EMAC_LED_100MB_OVERRIDE |
+ EMAC_LED_10MB_OVERRIDE);
+ else
+ tmp |= EMAC_LED_OVERRIDE;
+
+ EMAC_WR(bp, EMAC_REG_EMAC_LED, tmp);
+ break;
+
+ case LED_MODE_OPER:
+ /* For all other phys, OPER mode is same as ON, so in case
+ * link is down, do nothing
+ */
+ if (!vars->link_up)
+ break;
+ fallthrough;
+ case LED_MODE_ON:
+ if (((params->phy[EXT_PHY1].type ==
+ PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) ||
+ (params->phy[EXT_PHY1].type ==
+ PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8722)) &&
+ CHIP_IS_E2(bp) && params->num_phys == 2) {
+ /* This is a work-around for E2+8727 Configurations */
+ if (mode == LED_MODE_ON ||
+ speed == SPEED_10000){
+ REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4, 0);
+ REG_WR(bp, NIG_REG_LED_10G_P0 + port*4, 1);
+
+ tmp = EMAC_RD(bp, EMAC_REG_EMAC_LED);
+ EMAC_WR(bp, EMAC_REG_EMAC_LED,
+ (tmp | EMAC_LED_OVERRIDE));
+ /* Return here without enabling traffic
+ * LED blink and setting rate in ON mode.
+ * In oper mode, enabling LED blink
+ * and setting rate is needed.
+ */
+ if (mode == LED_MODE_ON)
+ return rc;
+ }
+ } else if (SINGLE_MEDIA_DIRECT(params)) {
+ /* This is a work-around for HW issue found when link
+ * is up in CL73
+ */
+ if ((!CHIP_IS_E3(bp)) ||
+ (CHIP_IS_E3(bp) &&
+ mode == LED_MODE_ON))
+ REG_WR(bp, NIG_REG_LED_10G_P0 + port*4, 1);
+
+ if (CHIP_IS_E1x(bp) ||
+ CHIP_IS_E2(bp) ||
+ (mode == LED_MODE_ON))
+ REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4, 0);
+ else
+ REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4,
+ hw_led_mode);
+ } else if ((params->phy[EXT_PHY1].type ==
+ PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54618SE) &&
+ (mode == LED_MODE_ON)) {
+ REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4, 0);
+ tmp = EMAC_RD(bp, EMAC_REG_EMAC_LED);
+ EMAC_WR(bp, EMAC_REG_EMAC_LED, tmp |
+ EMAC_LED_OVERRIDE | EMAC_LED_1000MB_OVERRIDE);
+ /* Break here; otherwise, it'll disable the
+ * intended override.
+ */
+ break;
+ } else {
+ u32 nig_led_mode = ((params->hw_led_mode <<
+ SHARED_HW_CFG_LED_MODE_SHIFT) ==
+ SHARED_HW_CFG_LED_EXTPHY2) ?
+ (SHARED_HW_CFG_LED_PHY1 >>
+ SHARED_HW_CFG_LED_MODE_SHIFT) : hw_led_mode;
+ REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4,
+ nig_led_mode);
+ }
+
+ REG_WR(bp, NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0 + port*4, 0);
+ /* Set blinking rate to ~15.9Hz */
+ if (CHIP_IS_E3(bp))
+ REG_WR(bp, NIG_REG_LED_CONTROL_BLINK_RATE_P0 + port*4,
+ LED_BLINK_RATE_VAL_E3);
+ else
+ REG_WR(bp, NIG_REG_LED_CONTROL_BLINK_RATE_P0 + port*4,
+ LED_BLINK_RATE_VAL_E1X_E2);
+ REG_WR(bp, NIG_REG_LED_CONTROL_BLINK_RATE_ENA_P0 +
+ port*4, 1);
+ tmp = EMAC_RD(bp, EMAC_REG_EMAC_LED);
+ EMAC_WR(bp, EMAC_REG_EMAC_LED,
+ (tmp & (~EMAC_LED_OVERRIDE)));
+
+ if (CHIP_IS_E1(bp) &&
+ ((speed == SPEED_2500) ||
+ (speed == SPEED_1000) ||
+ (speed == SPEED_100) ||
+ (speed == SPEED_10))) {
+ /* For speeds less than 10G LED scheme is different */
+ REG_WR(bp, NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0
+ + port*4, 1);
+ REG_WR(bp, NIG_REG_LED_CONTROL_TRAFFIC_P0 +
+ port*4, 0);
+ REG_WR(bp, NIG_REG_LED_CONTROL_BLINK_TRAFFIC_P0 +
+ port*4, 1);
+ }
+ break;
+
+ default:
+ rc = -EINVAL;
+ DP(NETIF_MSG_LINK, "bnx2x_set_led: Invalid led mode %d\n",
+ mode);
+ break;
+ }
+ return rc;
+
+}
+
+/* This function comes to reflect the actual link state read DIRECTLY from the
+ * HW
+ */
+int bnx2x_test_link(struct link_params *params, struct link_vars *vars,
+ u8 is_serdes)
+{
+ struct bnx2x *bp = params->bp;
+ u16 gp_status = 0, phy_index = 0;
+ u8 ext_phy_link_up = 0, serdes_phy_type;
+ struct link_vars temp_vars;
+ struct bnx2x_phy *int_phy = &params->phy[INT_PHY];
+
+ if (CHIP_IS_E3(bp)) {
+ u16 link_up;
+ if (params->req_line_speed[LINK_CONFIG_IDX(INT_PHY)]
+ > SPEED_10000) {
+ /* Check 20G link */
+ bnx2x_cl45_read(bp, int_phy, MDIO_WC_DEVAD,
+ 1, &link_up);
+ bnx2x_cl45_read(bp, int_phy, MDIO_WC_DEVAD,
+ 1, &link_up);
+ link_up &= (1<<2);
+ } else {
+ /* Check 10G link and below*/
+ u8 lane = bnx2x_get_warpcore_lane(int_phy, params);
+ bnx2x_cl45_read(bp, int_phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_GP2_STATUS_GP_2_1,
+ &gp_status);
+ gp_status = ((gp_status >> 8) & 0xf) |
+ ((gp_status >> 12) & 0xf);
+ link_up = gp_status & (1 << lane);
+ }
+ if (!link_up)
+ return -ESRCH;
+ } else {
+ CL22_RD_OVER_CL45(bp, int_phy,
+ MDIO_REG_BANK_GP_STATUS,
+ MDIO_GP_STATUS_TOP_AN_STATUS1,
+ &gp_status);
+ /* Link is up only if both local phy and external phy are up */
+ if (!(gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_LINK_STATUS))
+ return -ESRCH;
+ }
+ /* In XGXS loopback mode, do not check external PHY */
+ if (params->loopback_mode == LOOPBACK_XGXS)
+ return 0;
+
+ switch (params->num_phys) {
+ case 1:
+ /* No external PHY */
+ return 0;
+ case 2:
+ ext_phy_link_up = params->phy[EXT_PHY1].read_status(
+ &params->phy[EXT_PHY1],
+ params, &temp_vars);
+ break;
+ case 3: /* Dual Media */
+ for (phy_index = EXT_PHY1; phy_index < params->num_phys;
+ phy_index++) {
+ serdes_phy_type = ((params->phy[phy_index].media_type ==
+ ETH_PHY_SFPP_10G_FIBER) ||
+ (params->phy[phy_index].media_type ==
+ ETH_PHY_SFP_1G_FIBER) ||
+ (params->phy[phy_index].media_type ==
+ ETH_PHY_XFP_FIBER) ||
+ (params->phy[phy_index].media_type ==
+ ETH_PHY_DA_TWINAX));
+
+ if (is_serdes != serdes_phy_type)
+ continue;
+ if (params->phy[phy_index].read_status) {
+ ext_phy_link_up |=
+ params->phy[phy_index].read_status(
+ &params->phy[phy_index],
+ params, &temp_vars);
+ }
+ }
+ break;
+ }
+ if (ext_phy_link_up)
+ return 0;
+ return -ESRCH;
+}
+
+static int bnx2x_link_initialize(struct link_params *params,
+ struct link_vars *vars)
+{
+ u8 phy_index, non_ext_phy;
+ struct bnx2x *bp = params->bp;
+ /* In case of external phy existence, the line speed would be the
+ * line speed linked up by the external phy. In case it is direct
+ * only, then the line_speed during initialization will be
+ * equal to the req_line_speed
+ */
+ vars->line_speed = params->phy[INT_PHY].req_line_speed;
+
+ /* Initialize the internal phy in case this is a direct board
+ * (no external phys), or this board has external phy which requires
+ * to first.
+ */
+ if (!USES_WARPCORE(bp))
+ bnx2x_prepare_xgxs(&params->phy[INT_PHY], params, vars);
+ /* init ext phy and enable link state int */
+ non_ext_phy = (SINGLE_MEDIA_DIRECT(params) ||
+ (params->loopback_mode == LOOPBACK_XGXS));
+
+ if (non_ext_phy ||
+ (params->phy[EXT_PHY1].flags & FLAGS_INIT_XGXS_FIRST) ||
+ (params->loopback_mode == LOOPBACK_EXT_PHY)) {
+ struct bnx2x_phy *phy = &params->phy[INT_PHY];
+ if (vars->line_speed == SPEED_AUTO_NEG &&
+ (CHIP_IS_E1x(bp) ||
+ CHIP_IS_E2(bp)))
+ bnx2x_set_parallel_detection(phy, params);
+ if (params->phy[INT_PHY].config_init)
+ params->phy[INT_PHY].config_init(phy, params, vars);
+ }
+
+ /* Re-read this value in case it was changed inside config_init due to
+ * limitations of optic module
+ */
+ vars->line_speed = params->phy[INT_PHY].req_line_speed;
+
+ /* Init external phy*/
+ if (non_ext_phy) {
+ if (params->phy[INT_PHY].supported &
+ SUPPORTED_FIBRE)
+ vars->link_status |= LINK_STATUS_SERDES_LINK;
+ } else {
+ for (phy_index = EXT_PHY1; phy_index < params->num_phys;
+ phy_index++) {
+ /* No need to initialize second phy in case of first
+ * phy only selection. In case of second phy, we do
+ * need to initialize the first phy, since they are
+ * connected.
+ */
+ if (params->phy[phy_index].supported &
+ SUPPORTED_FIBRE)
+ vars->link_status |= LINK_STATUS_SERDES_LINK;
+
+ if (phy_index == EXT_PHY2 &&
+ (bnx2x_phy_selection(params) ==
+ PORT_HW_CFG_PHY_SELECTION_FIRST_PHY)) {
+ DP(NETIF_MSG_LINK,
+ "Not initializing second phy\n");
+ continue;
+ }
+ params->phy[phy_index].config_init(
+ &params->phy[phy_index],
+ params, vars);
+ }
+ }
+ /* Reset the interrupt indication after phy was initialized */
+ bnx2x_bits_dis(bp, NIG_REG_STATUS_INTERRUPT_PORT0 +
+ params->port*4,
+ (NIG_STATUS_XGXS0_LINK10G |
+ NIG_STATUS_XGXS0_LINK_STATUS |
+ NIG_STATUS_SERDES0_LINK_STATUS |
+ NIG_MASK_MI_INT));
+ return 0;
+}
+
+static void bnx2x_int_link_reset(struct bnx2x_phy *phy,
+ struct link_params *params)
+{
+ /* Reset the SerDes/XGXS */
+ REG_WR(params->bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_CLEAR,
+ (0x1ff << (params->port*16)));
+}
+
+static void bnx2x_common_ext_link_reset(struct bnx2x_phy *phy,
+ struct link_params *params)
+{
+ struct bnx2x *bp = params->bp;
+ u8 gpio_port;
+ /* HW reset */
+ if (CHIP_IS_E2(bp))
+ gpio_port = BP_PATH(bp);
+ else
+ gpio_port = params->port;
+ bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
+ MISC_REGISTERS_GPIO_OUTPUT_LOW,
+ gpio_port);
+ bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
+ MISC_REGISTERS_GPIO_OUTPUT_LOW,
+ gpio_port);
+ DP(NETIF_MSG_LINK, "reset external PHY\n");
+}
+
+static int bnx2x_update_link_down(struct link_params *params,
+ struct link_vars *vars)
+{
+ struct bnx2x *bp = params->bp;
+ u8 port = params->port;
+
+ DP(NETIF_MSG_LINK, "Port %x: Link is down\n", port);
+ bnx2x_set_led(params, vars, LED_MODE_OFF, 0);
+ vars->phy_flags &= ~PHY_PHYSICAL_LINK_FLAG;
+ /* Indicate no mac active */
+ vars->mac_type = MAC_TYPE_NONE;
+
+ /* Update shared memory */
+ vars->link_status &= ~LINK_UPDATE_MASK;
+ vars->line_speed = 0;
+ bnx2x_update_mng(params, vars->link_status);
+
+ /* Activate nig drain */
+ REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + port*4, 1);
+
+ /* Disable emac */
+ if (!CHIP_IS_E3(bp))
+ REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 0);
+
+ usleep_range(10000, 20000);
+ /* Reset BigMac/Xmac */
+ if (CHIP_IS_E1x(bp) ||
+ CHIP_IS_E2(bp))
+ bnx2x_set_bmac_rx(bp, params->chip_id, params->port, 0);
+
+ if (CHIP_IS_E3(bp)) {
+ /* Prevent LPI Generation by chip */
+ REG_WR(bp, MISC_REG_CPMU_LP_FW_ENABLE_P0 + (params->port << 2),
+ 0);
+ REG_WR(bp, MISC_REG_CPMU_LP_MASK_ENT_P0 + (params->port << 2),
+ 0);
+ vars->eee_status &= ~(SHMEM_EEE_LP_ADV_STATUS_MASK |
+ SHMEM_EEE_ACTIVE_BIT);
+
+ bnx2x_update_mng_eee(params, vars->eee_status);
+ bnx2x_set_xmac_rxtx(params, 0);
+ bnx2x_set_umac_rxtx(params, 0);
+ }
+
+ return 0;
+}
+
+static int bnx2x_update_link_up(struct link_params *params,
+ struct link_vars *vars,
+ u8 link_10g)
+{
+ struct bnx2x *bp = params->bp;
+ u8 phy_idx, port = params->port;
+ int rc = 0;
+
+ vars->link_status |= (LINK_STATUS_LINK_UP |
+ LINK_STATUS_PHYSICAL_LINK_FLAG);
+ vars->phy_flags |= PHY_PHYSICAL_LINK_FLAG;
+
+ if (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX)
+ vars->link_status |=
+ LINK_STATUS_TX_FLOW_CONTROL_ENABLED;
+
+ if (vars->flow_ctrl & BNX2X_FLOW_CTRL_RX)
+ vars->link_status |=
+ LINK_STATUS_RX_FLOW_CONTROL_ENABLED;
+ if (USES_WARPCORE(bp)) {
+ if (link_10g) {
+ if (bnx2x_xmac_enable(params, vars, 0) ==
+ -ESRCH) {
+ DP(NETIF_MSG_LINK, "Found errors on XMAC\n");
+ vars->link_up = 0;
+ vars->phy_flags |= PHY_HALF_OPEN_CONN_FLAG;
+ vars->link_status &= ~LINK_STATUS_LINK_UP;
+ }
+ } else
+ bnx2x_umac_enable(params, vars, 0);
+ bnx2x_set_led(params, vars,
+ LED_MODE_OPER, vars->line_speed);
+
+ if ((vars->eee_status & SHMEM_EEE_ACTIVE_BIT) &&
+ (vars->eee_status & SHMEM_EEE_LPI_REQUESTED_BIT)) {
+ DP(NETIF_MSG_LINK, "Enabling LPI assertion\n");
+ REG_WR(bp, MISC_REG_CPMU_LP_FW_ENABLE_P0 +
+ (params->port << 2), 1);
+ REG_WR(bp, MISC_REG_CPMU_LP_DR_ENABLE, 1);
+ REG_WR(bp, MISC_REG_CPMU_LP_MASK_ENT_P0 +
+ (params->port << 2), 0xfc20);
+ }
+ }
+ if ((CHIP_IS_E1x(bp) ||
+ CHIP_IS_E2(bp))) {
+ if (link_10g) {
+ if (bnx2x_bmac_enable(params, vars, 0, 1) ==
+ -ESRCH) {
+ DP(NETIF_MSG_LINK, "Found errors on BMAC\n");
+ vars->link_up = 0;
+ vars->phy_flags |= PHY_HALF_OPEN_CONN_FLAG;
+ vars->link_status &= ~LINK_STATUS_LINK_UP;
+ }
+
+ bnx2x_set_led(params, vars,
+ LED_MODE_OPER, SPEED_10000);
+ } else {
+ rc = bnx2x_emac_program(params, vars);
+ bnx2x_emac_enable(params, vars, 0);
+
+ /* AN complete? */
+ if ((vars->link_status &
+ LINK_STATUS_AUTO_NEGOTIATE_COMPLETE)
+ && (!(vars->phy_flags & PHY_SGMII_FLAG)) &&
+ SINGLE_MEDIA_DIRECT(params))
+ bnx2x_set_gmii_tx_driver(params);
+ }
+ }
+
+ /* PBF - link up */
+ if (CHIP_IS_E1x(bp))
+ rc |= bnx2x_pbf_update(params, vars->flow_ctrl,
+ vars->line_speed);
+
+ /* Disable drain */
+ REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + port*4, 0);
+
+ /* Update shared memory */
+ bnx2x_update_mng(params, vars->link_status);
+ bnx2x_update_mng_eee(params, vars->eee_status);
+ /* Check remote fault */
+ for (phy_idx = INT_PHY; phy_idx < MAX_PHYS; phy_idx++) {
+ if (params->phy[phy_idx].flags & FLAGS_TX_ERROR_CHECK) {
+ bnx2x_check_half_open_conn(params, vars, 0);
+ break;
+ }
+ }
+ msleep(20);
+ return rc;
+}
+
+static void bnx2x_chng_link_count(struct link_params *params, bool clear)
+{
+ struct bnx2x *bp = params->bp;
+ u32 addr, val;
+
+ /* Verify the link_change_count is supported by the MFW */
+ if (!(SHMEM2_HAS(bp, link_change_count)))
+ return;
+
+ addr = params->shmem2_base +
+ offsetof(struct shmem2_region, link_change_count[params->port]);
+ if (clear)
+ val = 0;
+ else
+ val = REG_RD(bp, addr) + 1;
+ REG_WR(bp, addr, val);
+}
+
+/* The bnx2x_link_update function should be called upon link
+ * interrupt.
+ * Link is considered up as follows:
+ * - DIRECT_SINGLE_MEDIA - Only XGXS link (internal link) needs
+ * to be up
+ * - SINGLE_MEDIA - The link between the 577xx and the external
+ * phy (XGXS) need to up as well as the external link of the
+ * phy (PHY_EXT1)
+ * - DUAL_MEDIA - The link between the 577xx and the first
+ * external phy needs to be up, and at least one of the 2
+ * external phy link must be up.
+ */
+int bnx2x_link_update(struct link_params *params, struct link_vars *vars)
+{
+ struct bnx2x *bp = params->bp;
+ struct link_vars phy_vars[MAX_PHYS];
+ u8 port = params->port;
+ u8 link_10g_plus, phy_index;
+ u32 prev_link_status = vars->link_status;
+ u8 ext_phy_link_up = 0, cur_link_up;
+ int rc = 0;
+ u8 is_mi_int = 0;
+ u16 ext_phy_line_speed = 0, prev_line_speed = vars->line_speed;
+ u8 active_external_phy = INT_PHY;
+ vars->phy_flags &= ~PHY_HALF_OPEN_CONN_FLAG;
+ vars->link_status &= ~LINK_UPDATE_MASK;
+ for (phy_index = INT_PHY; phy_index < params->num_phys;
+ phy_index++) {
+ phy_vars[phy_index].flow_ctrl = 0;
+ phy_vars[phy_index].link_status = 0;
+ phy_vars[phy_index].line_speed = 0;
+ phy_vars[phy_index].duplex = DUPLEX_FULL;
+ phy_vars[phy_index].phy_link_up = 0;
+ phy_vars[phy_index].link_up = 0;
+ phy_vars[phy_index].fault_detected = 0;
+ /* different consideration, since vars holds inner state */
+ phy_vars[phy_index].eee_status = vars->eee_status;
+ }
+
+ if (USES_WARPCORE(bp))
+ bnx2x_set_aer_mmd(params, &params->phy[INT_PHY]);
+
+ DP(NETIF_MSG_LINK, "port %x, XGXS?%x, int_status 0x%x\n",
+ port, (vars->phy_flags & PHY_XGXS_FLAG),
+ REG_RD(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4));
+
+ is_mi_int = (u8)(REG_RD(bp, NIG_REG_EMAC0_STATUS_MISC_MI_INT +
+ port*0x18) > 0);
+ DP(NETIF_MSG_LINK, "int_mask 0x%x MI_INT %x, SERDES_LINK %x\n",
+ REG_RD(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4),
+ is_mi_int,
+ REG_RD(bp, NIG_REG_SERDES0_STATUS_LINK_STATUS + port*0x3c));
+
+ DP(NETIF_MSG_LINK, " 10G %x, XGXS_LINK %x\n",
+ REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK10G + port*0x68),
+ REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK_STATUS + port*0x68));
+
+ /* Disable emac */
+ if (!CHIP_IS_E3(bp))
+ REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 0);
+
+ /* Step 1:
+ * Check external link change only for external phys, and apply
+ * priority selection between them in case the link on both phys
+ * is up. Note that instead of the common vars, a temporary
+ * vars argument is used since each phy may have different link/
+ * speed/duplex result
+ */
+ for (phy_index = EXT_PHY1; phy_index < params->num_phys;
+ phy_index++) {
+ struct bnx2x_phy *phy = &params->phy[phy_index];
+ if (!phy->read_status)
+ continue;
+ /* Read link status and params of this ext phy */
+ cur_link_up = phy->read_status(phy, params,
+ &phy_vars[phy_index]);
+ if (cur_link_up) {
+ DP(NETIF_MSG_LINK, "phy in index %d link is up\n",
+ phy_index);
+ } else {
+ DP(NETIF_MSG_LINK, "phy in index %d link is down\n",
+ phy_index);
+ continue;
+ }
+
+ if (!ext_phy_link_up) {
+ ext_phy_link_up = 1;
+ active_external_phy = phy_index;
+ } else {
+ switch (bnx2x_phy_selection(params)) {
+ case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
+ case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
+ /* In this option, the first PHY makes sure to pass the
+ * traffic through itself only.
+ * It's not clear how to reset the link on the second
+ * phy.
+ */
+ active_external_phy = EXT_PHY1;
+ break;
+ case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
+ /* In this option, the first PHY makes sure to pass the
+ * traffic through the second PHY.
+ */
+ active_external_phy = EXT_PHY2;
+ break;
+ default:
+ /* Link indication on both PHYs with the following cases
+ * is invalid:
+ * - FIRST_PHY means that second phy wasn't initialized,
+ * hence its link is expected to be down
+ * - SECOND_PHY means that first phy should not be able
+ * to link up by itself (using configuration)
+ * - DEFAULT should be overridden during initialization
+ */
+ DP(NETIF_MSG_LINK, "Invalid link indication"
+ "mpc=0x%x. DISABLING LINK !!!\n",
+ params->multi_phy_config);
+ ext_phy_link_up = 0;
+ break;
+ }
+ }
+ }
+ prev_line_speed = vars->line_speed;
+ /* Step 2:
+ * Read the status of the internal phy. In case of
+ * DIRECT_SINGLE_MEDIA board, this link is the external link,
+ * otherwise this is the link between the 577xx and the first
+ * external phy
+ */
+ if (params->phy[INT_PHY].read_status)
+ params->phy[INT_PHY].read_status(
+ &params->phy[INT_PHY],
+ params, vars);
+ /* The INT_PHY flow control reside in the vars. This include the
+ * case where the speed or flow control are not set to AUTO.
+ * Otherwise, the active external phy flow control result is set
+ * to the vars. The ext_phy_line_speed is needed to check if the
+ * speed is different between the internal phy and external phy.
+ * This case may be result of intermediate link speed change.
+ */
+ if (active_external_phy > INT_PHY) {
+ vars->flow_ctrl = phy_vars[active_external_phy].flow_ctrl;
+ /* Link speed is taken from the XGXS. AN and FC result from
+ * the external phy.
+ */
+ vars->link_status |= phy_vars[active_external_phy].link_status;
+
+ /* if active_external_phy is first PHY and link is up - disable
+ * disable TX on second external PHY
+ */
+ if (active_external_phy == EXT_PHY1) {
+ if (params->phy[EXT_PHY2].phy_specific_func) {
+ DP(NETIF_MSG_LINK,
+ "Disabling TX on EXT_PHY2\n");
+ params->phy[EXT_PHY2].phy_specific_func(
+ &params->phy[EXT_PHY2],
+ params, DISABLE_TX);
+ }
+ }
+
+ ext_phy_line_speed = phy_vars[active_external_phy].line_speed;
+ vars->duplex = phy_vars[active_external_phy].duplex;
+ if (params->phy[active_external_phy].supported &
+ SUPPORTED_FIBRE)
+ vars->link_status |= LINK_STATUS_SERDES_LINK;
+ else
+ vars->link_status &= ~LINK_STATUS_SERDES_LINK;
+
+ vars->eee_status = phy_vars[active_external_phy].eee_status;
+
+ DP(NETIF_MSG_LINK, "Active external phy selected: %x\n",
+ active_external_phy);
+ }
+
+ for (phy_index = EXT_PHY1; phy_index < params->num_phys;
+ phy_index++) {
+ if (params->phy[phy_index].flags &
+ FLAGS_REARM_LATCH_SIGNAL) {
+ bnx2x_rearm_latch_signal(bp, port,
+ phy_index ==
+ active_external_phy);
+ break;
+ }
+ }
+ DP(NETIF_MSG_LINK, "vars->flow_ctrl = 0x%x, vars->link_status = 0x%x,"
+ " ext_phy_line_speed = %d\n", vars->flow_ctrl,
+ vars->link_status, ext_phy_line_speed);
+ /* Upon link speed change set the NIG into drain mode. Comes to
+ * deals with possible FIFO glitch due to clk change when speed
+ * is decreased without link down indicator
+ */
+
+ if (vars->phy_link_up) {
+ if (!(SINGLE_MEDIA_DIRECT(params)) && ext_phy_link_up &&
+ (ext_phy_line_speed != vars->line_speed)) {
+ DP(NETIF_MSG_LINK, "Internal link speed %d is"
+ " different than the external"
+ " link speed %d\n", vars->line_speed,
+ ext_phy_line_speed);
+ vars->phy_link_up = 0;
+ } else if (prev_line_speed != vars->line_speed) {
+ REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4,
+ 0);
+ usleep_range(1000, 2000);
+ }
+ }
+
+ /* Anything 10 and over uses the bmac */
+ link_10g_plus = (vars->line_speed >= SPEED_10000);
+
+ bnx2x_link_int_ack(params, vars, link_10g_plus);
+
+ /* In case external phy link is up, and internal link is down
+ * (not initialized yet probably after link initialization, it
+ * needs to be initialized.
+ * Note that after link down-up as result of cable plug, the xgxs
+ * link would probably become up again without the need
+ * initialize it
+ */
+ if (!(SINGLE_MEDIA_DIRECT(params))) {
+ DP(NETIF_MSG_LINK, "ext_phy_link_up = %d, int_link_up = %d,"
+ " init_preceding = %d\n", ext_phy_link_up,
+ vars->phy_link_up,
+ params->phy[EXT_PHY1].flags &
+ FLAGS_INIT_XGXS_FIRST);
+ if (!(params->phy[EXT_PHY1].flags &
+ FLAGS_INIT_XGXS_FIRST)
+ && ext_phy_link_up && !vars->phy_link_up) {
+ vars->line_speed = ext_phy_line_speed;
+ if (vars->line_speed < SPEED_1000)
+ vars->phy_flags |= PHY_SGMII_FLAG;
+ else
+ vars->phy_flags &= ~PHY_SGMII_FLAG;
+
+ if (params->phy[INT_PHY].config_init)
+ params->phy[INT_PHY].config_init(
+ &params->phy[INT_PHY], params,
+ vars);
+ }
+ }
+ /* Link is up only if both local phy and external phy (in case of
+ * non-direct board) are up and no fault detected on active PHY.
+ */
+ vars->link_up = (vars->phy_link_up &&
+ (ext_phy_link_up ||
+ SINGLE_MEDIA_DIRECT(params)) &&
+ (phy_vars[active_external_phy].fault_detected == 0));
+
+ /* Update the PFC configuration in case it was changed */
+ if (params->feature_config_flags & FEATURE_CONFIG_PFC_ENABLED)
+ vars->link_status |= LINK_STATUS_PFC_ENABLED;
+ else
+ vars->link_status &= ~LINK_STATUS_PFC_ENABLED;
+
+ if (vars->link_up)
+ rc = bnx2x_update_link_up(params, vars, link_10g_plus);
+ else
+ rc = bnx2x_update_link_down(params, vars);
+
+ if ((prev_link_status ^ vars->link_status) & LINK_STATUS_LINK_UP)
+ bnx2x_chng_link_count(params, false);
+
+ /* Update MCP link status was changed */
+ if (params->feature_config_flags & FEATURE_CONFIG_BC_SUPPORTS_AFEX)
+ bnx2x_fw_command(bp, DRV_MSG_CODE_LINK_STATUS_CHANGED, 0);
+
+ return rc;
+}
+
+/*****************************************************************************/
+/* External Phy section */
+/*****************************************************************************/
+void bnx2x_ext_phy_hw_reset(struct bnx2x *bp, u8 port)
+{
+ bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
+ MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
+ usleep_range(1000, 2000);
+ bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
+ MISC_REGISTERS_GPIO_OUTPUT_HIGH, port);
+}
+
+static void bnx2x_save_spirom_version(struct bnx2x *bp, u8 port,
+ u32 spirom_ver, u32 ver_addr)
+{
+ DP(NETIF_MSG_LINK, "FW version 0x%x:0x%x for port %d\n",
+ (u16)(spirom_ver>>16), (u16)spirom_ver, port);
+
+ if (ver_addr)
+ REG_WR(bp, ver_addr, spirom_ver);
+}
+
+static void bnx2x_save_bcm_spirom_ver(struct bnx2x *bp,
+ struct bnx2x_phy *phy,
+ u8 port)
+{
+ u16 fw_ver1, fw_ver2;
+
+ bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_ROM_VER1, &fw_ver1);
+ bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_ROM_VER2, &fw_ver2);
+ bnx2x_save_spirom_version(bp, port, (u32)(fw_ver1<<16 | fw_ver2),
+ phy->ver_addr);
+}
+
+static void bnx2x_ext_phy_10G_an_resolve(struct bnx2x *bp,
+ struct bnx2x_phy *phy,
+ struct link_vars *vars)
+{
+ u16 val;
+ bnx2x_cl45_read(bp, phy,
+ MDIO_AN_DEVAD,
+ MDIO_AN_REG_STATUS, &val);
+ bnx2x_cl45_read(bp, phy,
+ MDIO_AN_DEVAD,
+ MDIO_AN_REG_STATUS, &val);
+ if (val & (1<<5))
+ vars->link_status |= LINK_STATUS_AUTO_NEGOTIATE_COMPLETE;
+ if ((val & (1<<0)) == 0)
+ vars->link_status |= LINK_STATUS_PARALLEL_DETECTION_USED;
+}
+
+/******************************************************************/
+/* common BCM8073/BCM8727 PHY SECTION */
+/******************************************************************/
+static void bnx2x_8073_resolve_fc(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
+{
+ struct bnx2x *bp = params->bp;
+ if (phy->req_line_speed == SPEED_10 ||
+ phy->req_line_speed == SPEED_100) {
+ vars->flow_ctrl = phy->req_flow_ctrl;
+ return;
+ }
+
+ if (bnx2x_ext_phy_resolve_fc(phy, params, vars) &&
+ (vars->flow_ctrl == BNX2X_FLOW_CTRL_NONE)) {
+ u16 pause_result;
+ u16 ld_pause; /* local */
+ u16 lp_pause; /* link partner */
+ bnx2x_cl45_read(bp, phy,
+ MDIO_AN_DEVAD,
+ MDIO_AN_REG_CL37_FC_LD, &ld_pause);
+
+ bnx2x_cl45_read(bp, phy,
+ MDIO_AN_DEVAD,
+ MDIO_AN_REG_CL37_FC_LP, &lp_pause);
+ pause_result = (ld_pause &
+ MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) >> 5;
+ pause_result |= (lp_pause &
+ MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) >> 7;
+
+ bnx2x_pause_resolve(phy, params, vars, pause_result);
+ DP(NETIF_MSG_LINK, "Ext PHY CL37 pause result 0x%x\n",
+ pause_result);
+ }
+}
+static int bnx2x_8073_8727_external_rom_boot(struct bnx2x *bp,
+ struct bnx2x_phy *phy,
+ u8 port)
+{
+ u32 count = 0;
+ u16 fw_ver1, fw_msgout;
+ int rc = 0;
+
+ /* Boot port from external ROM */
+ /* EDC grst */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_GEN_CTRL,
+ 0x0001);
+
+ /* Ucode reboot and rst */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_GEN_CTRL,
+ 0x008c);
+
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_MISC_CTRL1, 0x0001);
+
+ /* Reset internal microprocessor */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_GEN_CTRL,
+ MDIO_PMA_REG_GEN_CTRL_ROM_MICRO_RESET);
+
+ /* Release srst bit */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_GEN_CTRL,
+ MDIO_PMA_REG_GEN_CTRL_ROM_RESET_INTERNAL_MP);
+
+ /* Delay 100ms per the PHY specifications */
+ msleep(100);
+
+ /* 8073 sometimes taking longer to download */
+ do {
+ count++;
+ if (count > 300) {
+ DP(NETIF_MSG_LINK,
+ "bnx2x_8073_8727_external_rom_boot port %x:"
+ "Download failed. fw version = 0x%x\n",
+ port, fw_ver1);
+ rc = -EINVAL;
+ break;
+ }
+
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_ROM_VER1, &fw_ver1);
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_M8051_MSGOUT_REG, &fw_msgout);
+
+ usleep_range(1000, 2000);
+ } while (fw_ver1 == 0 || fw_ver1 == 0x4321 ||
+ ((fw_msgout & 0xff) != 0x03 && (phy->type ==
+ PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073)));
+
+ /* Clear ser_boot_ctl bit */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_MISC_CTRL1, 0x0000);
+ bnx2x_save_bcm_spirom_ver(bp, phy, port);
+
+ DP(NETIF_MSG_LINK,
+ "bnx2x_8073_8727_external_rom_boot port %x:"
+ "Download complete. fw version = 0x%x\n",
+ port, fw_ver1);
+
+ return rc;
+}
+
+/******************************************************************/
+/* BCM8073 PHY SECTION */
+/******************************************************************/
+static int bnx2x_8073_is_snr_needed(struct bnx2x *bp, struct bnx2x_phy *phy)
+{
+ /* This is only required for 8073A1, version 102 only */
+ u16 val;
+
+ /* Read 8073 HW revision*/
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8073_CHIP_REV, &val);
+
+ if (val != 1) {
+ /* No need to workaround in 8073 A1 */
+ return 0;
+ }
+
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_ROM_VER2, &val);
+
+ /* SNR should be applied only for version 0x102 */
+ if (val != 0x102)
+ return 0;
+
+ return 1;
+}
+
+static int bnx2x_8073_xaui_wa(struct bnx2x *bp, struct bnx2x_phy *phy)
+{
+ u16 val, cnt, cnt1 ;
+
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8073_CHIP_REV, &val);
+
+ if (val > 0) {
+ /* No need to workaround in 8073 A1 */
+ return 0;
+ }
+ /* XAUI workaround in 8073 A0: */
+
+ /* After loading the boot ROM and restarting Autoneg, poll
+ * Dev1, Reg $C820:
+ */
+
+ for (cnt = 0; cnt < 1000; cnt++) {
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8073_SPEED_LINK_STATUS,
+ &val);
+ /* If bit [14] = 0 or bit [13] = 0, continue on with
+ * system initialization (XAUI work-around not required, as
+ * these bits indicate 2.5G or 1G link up).
+ */
+ if (!(val & (1<<14)) || !(val & (1<<13))) {
+ DP(NETIF_MSG_LINK, "XAUI work-around not required\n");
+ return 0;
+ } else if (!(val & (1<<15))) {
+ DP(NETIF_MSG_LINK, "bit 15 went off\n");
+ /* If bit 15 is 0, then poll Dev1, Reg $C841 until it's
+ * MSB (bit15) goes to 1 (indicating that the XAUI
+ * workaround has completed), then continue on with
+ * system initialization.
+ */
+ for (cnt1 = 0; cnt1 < 1000; cnt1++) {
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8073_XAUI_WA, &val);
+ if (val & (1<<15)) {
+ DP(NETIF_MSG_LINK,
+ "XAUI workaround has completed\n");
+ return 0;
+ }
+ usleep_range(3000, 6000);
+ }
+ break;
+ }
+ usleep_range(3000, 6000);
+ }
+ DP(NETIF_MSG_LINK, "Warning: XAUI work-around timeout !!!\n");
+ return -EINVAL;
+}
+
+static void bnx2x_807x_force_10G(struct bnx2x *bp, struct bnx2x_phy *phy)
+{
+ /* Force KR or KX */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0x2040);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_10G_CTRL2, 0x000b);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_BCM_CTRL, 0x0000);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0x0000);
+}
+
+static void bnx2x_8073_set_pause_cl37(struct link_params *params,
+ struct bnx2x_phy *phy,
+ struct link_vars *vars)
+{
+ u16 cl37_val;
+ struct bnx2x *bp = params->bp;
+ bnx2x_cl45_read(bp, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_CL37_FC_LD, &cl37_val);
+
+ cl37_val &= ~MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH;
+ /* Please refer to Table 28B-3 of 802.3ab-1999 spec. */
+ bnx2x_calc_ieee_aneg_adv(phy, params, &vars->ieee_fc);
+ if ((vars->ieee_fc &
+ MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_SYMMETRIC) ==
+ MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_SYMMETRIC) {
+ cl37_val |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_SYMMETRIC;
+ }
+ if ((vars->ieee_fc &
+ MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) ==
+ MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) {
+ cl37_val |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC;
+ }
+ if ((vars->ieee_fc &
+ MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) ==
+ MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) {
+ cl37_val |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH;
+ }
+ DP(NETIF_MSG_LINK,
+ "Ext phy AN advertize cl37 0x%x\n", cl37_val);
+
+ bnx2x_cl45_write(bp, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_CL37_FC_LD, cl37_val);
+ msleep(500);
+}
+
+static void bnx2x_8073_specific_func(struct bnx2x_phy *phy,
+ struct link_params *params,
+ u32 action)
+{
+ struct bnx2x *bp = params->bp;
+ switch (action) {
+ case PHY_INIT:
+ /* Enable LASI */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXCTRL, (1<<2));
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, 0x0004);
+ break;
+ }
+}
+
+static void bnx2x_8073_config_init(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
+{
+ struct bnx2x *bp = params->bp;
+ u16 val = 0, tmp1;
+ u8 gpio_port;
+ DP(NETIF_MSG_LINK, "Init 8073\n");
+
+ if (CHIP_IS_E2(bp))
+ gpio_port = BP_PATH(bp);
+ else
+ gpio_port = params->port;
+ /* Restore normal power mode*/
+ bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
+ MISC_REGISTERS_GPIO_OUTPUT_HIGH, gpio_port);
+
+ bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
+ MISC_REGISTERS_GPIO_OUTPUT_HIGH, gpio_port);
+
+ bnx2x_8073_specific_func(phy, params, PHY_INIT);
+ bnx2x_8073_set_pause_cl37(params, phy, vars);
+
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_M8051_MSGOUT_REG, &tmp1);
+
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXSTAT, &tmp1);
+
+ DP(NETIF_MSG_LINK, "Before rom RX_ALARM(port1): 0x%x\n", tmp1);
+
+ /* Swap polarity if required - Must be done only in non-1G mode */
+ if (params->lane_config & PORT_HW_CFG_SWAP_PHY_POLARITY_ENABLED) {
+ /* Configure the 8073 to swap _P and _N of the KR lines */
+ DP(NETIF_MSG_LINK, "Swapping polarity for the 8073\n");
+ /* 10G Rx/Tx and 1G Tx signal polarity swap */
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8073_OPT_DIGITAL_CTRL, &val);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8073_OPT_DIGITAL_CTRL,
+ (val | (3<<9)));
+ }
+
+
+ /* Enable CL37 BAM */
+ if (REG_RD(bp, params->shmem_base +
+ offsetof(struct shmem_region, dev_info.
+ port_hw_config[params->port].default_cfg)) &
+ PORT_HW_CFG_ENABLE_BAM_ON_KR_ENABLED) {
+
+ bnx2x_cl45_read(bp, phy,
+ MDIO_AN_DEVAD,
+ MDIO_AN_REG_8073_BAM, &val);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_AN_DEVAD,
+ MDIO_AN_REG_8073_BAM, val | 1);
+ DP(NETIF_MSG_LINK, "Enable CL37 BAM on KR\n");
+ }
+ if (params->loopback_mode == LOOPBACK_EXT) {
+ bnx2x_807x_force_10G(bp, phy);
+ DP(NETIF_MSG_LINK, "Forced speed 10G on 807X\n");
+ return;
+ } else {
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_BCM_CTRL, 0x0002);
+ }
+ if (phy->req_line_speed != SPEED_AUTO_NEG) {
+ if (phy->req_line_speed == SPEED_10000) {
+ val = (1<<7);
+ } else if (phy->req_line_speed == SPEED_2500) {
+ val = (1<<5);
+ /* Note that 2.5G works only when used with 1G
+ * advertisement
+ */
+ } else
+ val = (1<<5);
+ } else {
+ val = 0;
+ if (phy->speed_cap_mask &
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)
+ val |= (1<<7);
+
+ /* Note that 2.5G works only when used with 1G advertisement */
+ if (phy->speed_cap_mask &
+ (PORT_HW_CFG_SPEED_CAPABILITY_D0_1G |
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
+ val |= (1<<5);
+ DP(NETIF_MSG_LINK, "807x autoneg val = 0x%x\n", val);
+ }
+
+ bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_ADV, val);
+ bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_8073_2_5G, &tmp1);
+
+ if (((phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G) &&
+ (phy->req_line_speed == SPEED_AUTO_NEG)) ||
+ (phy->req_line_speed == SPEED_2500)) {
+ u16 phy_ver;
+ /* Allow 2.5G for A1 and above */
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_8073_CHIP_REV,
+ &phy_ver);
+ DP(NETIF_MSG_LINK, "Add 2.5G\n");
+ if (phy_ver > 0)
+ tmp1 |= 1;
+ else
+ tmp1 &= 0xfffe;
+ } else {
+ DP(NETIF_MSG_LINK, "Disable 2.5G\n");
+ tmp1 &= 0xfffe;
+ }
+
+ bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_8073_2_5G, tmp1);
+ /* Add support for CL37 (passive mode) II */
+
+ bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CL37_FC_LD, &tmp1);
+ bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CL37_FC_LD,
+ (tmp1 | ((phy->req_duplex == DUPLEX_FULL) ?
+ 0x20 : 0x40)));
+
+ /* Add support for CL37 (passive mode) III */
+ bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x1000);
+
+ /* The SNR will improve about 2db by changing BW and FEE main
+ * tap. Rest commands are executed after link is up
+ * Change FFE main cursor to 5 in EDC register
+ */
+ if (bnx2x_8073_is_snr_needed(bp, phy))
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_EDC_FFE_MAIN,
+ 0xFB0C);
+
+ /* Enable FEC (Forware Error Correction) Request in the AN */
+ bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_ADV2, &tmp1);
+ tmp1 |= (1<<15);
+ bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_ADV2, tmp1);
+
+ bnx2x_ext_phy_set_pause(params, phy, vars);
+
+ /* Restart autoneg */
+ msleep(500);
+ bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0x1200);
+ DP(NETIF_MSG_LINK, "807x Autoneg Restart: Advertise 1G=%x, 10G=%x\n",
+ ((val & (1<<5)) > 0), ((val & (1<<7)) > 0));
+}
+
+static u8 bnx2x_8073_read_status(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
+{
+ struct bnx2x *bp = params->bp;
+ u8 link_up = 0;
+ u16 val1, val2;
+ u16 link_status = 0;
+ u16 an1000_status = 0;
+
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_LASI_STAT, &val1);
+
+ DP(NETIF_MSG_LINK, "8703 LASI status 0x%x\n", val1);
+
+ /* Clear the interrupt LASI status register */
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PCS_DEVAD, MDIO_PCS_REG_STATUS, &val2);
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PCS_DEVAD, MDIO_PCS_REG_STATUS, &val1);
+ DP(NETIF_MSG_LINK, "807x PCS status 0x%x->0x%x\n", val2, val1);
+ /* Clear MSG-OUT */
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_M8051_MSGOUT_REG, &val1);
+
+ /* Check the LASI */
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXSTAT, &val2);
+
+ DP(NETIF_MSG_LINK, "KR 0x9003 0x%x\n", val2);
+
+ /* Check the link status */
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PCS_DEVAD, MDIO_PCS_REG_STATUS, &val2);
+ DP(NETIF_MSG_LINK, "KR PCS status 0x%x\n", val2);
+
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_STATUS, &val2);
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_STATUS, &val1);
+ link_up = ((val1 & 4) == 4);
+ DP(NETIF_MSG_LINK, "PMA_REG_STATUS=0x%x\n", val1);
+
+ if (link_up &&
+ ((phy->req_line_speed != SPEED_10000))) {
+ if (bnx2x_8073_xaui_wa(bp, phy) != 0)
+ return 0;
+ }
+ bnx2x_cl45_read(bp, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_LINK_STATUS, &an1000_status);
+ bnx2x_cl45_read(bp, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_LINK_STATUS, &an1000_status);
+
+ /* Check the link status on 1.1.2 */
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_STATUS, &val2);
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_STATUS, &val1);
+ DP(NETIF_MSG_LINK, "KR PMA status 0x%x->0x%x,"
+ "an_link_status=0x%x\n", val2, val1, an1000_status);
+
+ link_up = (((val1 & 4) == 4) || (an1000_status & (1<<1)));
+ if (link_up && bnx2x_8073_is_snr_needed(bp, phy)) {
+ /* The SNR will improve about 2dbby changing the BW and FEE main
+ * tap. The 1st write to change FFE main tap is set before
+ * restart AN. Change PLL Bandwidth in EDC register
+ */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_PLL_BANDWIDTH,
+ 0x26BC);
+
+ /* Change CDR Bandwidth in EDC register */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_CDR_BANDWIDTH,
+ 0x0333);
+ }
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_8073_SPEED_LINK_STATUS,
+ &link_status);
+
+ /* Bits 0..2 --> speed detected, bits 13..15--> link is down */
+ if ((link_status & (1<<2)) && (!(link_status & (1<<15)))) {
+ link_up = 1;
+ vars->line_speed = SPEED_10000;
+ DP(NETIF_MSG_LINK, "port %x: External link up in 10G\n",
+ params->port);
+ } else if ((link_status & (1<<1)) && (!(link_status & (1<<14)))) {
+ link_up = 1;
+ vars->line_speed = SPEED_2500;
+ DP(NETIF_MSG_LINK, "port %x: External link up in 2.5G\n",
+ params->port);
+ } else if ((link_status & (1<<0)) && (!(link_status & (1<<13)))) {
+ link_up = 1;
+ vars->line_speed = SPEED_1000;
+ DP(NETIF_MSG_LINK, "port %x: External link up in 1G\n",
+ params->port);
+ } else {
+ link_up = 0;
+ DP(NETIF_MSG_LINK, "port %x: External link is down\n",
+ params->port);
+ }
+
+ if (link_up) {
+ /* Swap polarity if required */
+ if (params->lane_config &
+ PORT_HW_CFG_SWAP_PHY_POLARITY_ENABLED) {
+ /* Configure the 8073 to swap P and N of the KR lines */
+ bnx2x_cl45_read(bp, phy,
+ MDIO_XS_DEVAD,
+ MDIO_XS_REG_8073_RX_CTRL_PCIE, &val1);
+ /* Set bit 3 to invert Rx in 1G mode and clear this bit
+ * when it`s in 10G mode.
+ */
+ if (vars->line_speed == SPEED_1000) {
+ DP(NETIF_MSG_LINK, "Swapping 1G polarity for"
+ "the 8073\n");
+ val1 |= (1<<3);
+ } else
+ val1 &= ~(1<<3);
+
+ bnx2x_cl45_write(bp, phy,
+ MDIO_XS_DEVAD,
+ MDIO_XS_REG_8073_RX_CTRL_PCIE,
+ val1);
+ }
+ bnx2x_ext_phy_10G_an_resolve(bp, phy, vars);
+ bnx2x_8073_resolve_fc(phy, params, vars);
+ vars->duplex = DUPLEX_FULL;
+ }
+
+ if (vars->link_status & LINK_STATUS_AUTO_NEGOTIATE_COMPLETE) {
+ bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD,
+ MDIO_AN_REG_LP_AUTO_NEG2, &val1);
+
+ if (val1 & (1<<5))
+ vars->link_status |=
+ LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE;
+ if (val1 & (1<<7))
+ vars->link_status |=
+ LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE;
+ }
+
+ return link_up;
+}
+
+static void bnx2x_8073_link_reset(struct bnx2x_phy *phy,
+ struct link_params *params)
+{
+ struct bnx2x *bp = params->bp;
+ u8 gpio_port;
+ if (CHIP_IS_E2(bp))
+ gpio_port = BP_PATH(bp);
+ else
+ gpio_port = params->port;
+ DP(NETIF_MSG_LINK, "Setting 8073 port %d into low power mode\n",
+ gpio_port);
+ bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
+ MISC_REGISTERS_GPIO_OUTPUT_LOW,
+ gpio_port);
+}
+
+/******************************************************************/
+/* BCM8705 PHY SECTION */
+/******************************************************************/
+static void bnx2x_8705_config_init(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
+{
+ struct bnx2x *bp = params->bp;
+ DP(NETIF_MSG_LINK, "init 8705\n");
+ /* Restore normal power mode*/
+ bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
+ MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port);
+ /* HW reset */
+ bnx2x_ext_phy_hw_reset(bp, params->port);
+ bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0xa040);
+ bnx2x_wait_reset_complete(bp, phy, params);
+
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_MISC_CTRL, 0x8288);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_PHY_IDENTIFIER, 0x7fbf);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_CMU_PLL_BYPASS, 0x0100);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_WIS_DEVAD, MDIO_WIS_REG_LASI_CNTL, 0x1);
+ /* BCM8705 doesn't have microcode, hence the 0 */
+ bnx2x_save_spirom_version(bp, params->port, params->shmem_base, 0);
+}
+
+static u8 bnx2x_8705_read_status(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
+{
+ u8 link_up = 0;
+ u16 val1, rx_sd;
+ struct bnx2x *bp = params->bp;
+ DP(NETIF_MSG_LINK, "read status 8705\n");
+ bnx2x_cl45_read(bp, phy,
+ MDIO_WIS_DEVAD, MDIO_WIS_REG_LASI_STATUS, &val1);
+ DP(NETIF_MSG_LINK, "8705 LASI status 0x%x\n", val1);
+
+ bnx2x_cl45_read(bp, phy,
+ MDIO_WIS_DEVAD, MDIO_WIS_REG_LASI_STATUS, &val1);
+ DP(NETIF_MSG_LINK, "8705 LASI status 0x%x\n", val1);
+
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_RX_SD, &rx_sd);
+
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, 0xc809, &val1);
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, 0xc809, &val1);
+
+ DP(NETIF_MSG_LINK, "8705 1.c809 val=0x%x\n", val1);
+ link_up = ((rx_sd & 0x1) && (val1 & (1<<9)) && ((val1 & (1<<8)) == 0));
+ if (link_up) {
+ vars->line_speed = SPEED_10000;
+ bnx2x_ext_phy_resolve_fc(phy, params, vars);
+ }
+ return link_up;
+}
+
+/******************************************************************/
+/* SFP+ module Section */
+/******************************************************************/
+static void bnx2x_set_disable_pmd_transmit(struct link_params *params,
+ struct bnx2x_phy *phy,
+ u8 pmd_dis)
+{
+ struct bnx2x *bp = params->bp;
+ /* Disable transmitter only for bootcodes which can enable it afterwards
+ * (for D3 link)
+ */
+ if (pmd_dis) {
+ if (params->feature_config_flags &
+ FEATURE_CONFIG_BC_SUPPORTS_SFP_TX_DISABLED)
+ DP(NETIF_MSG_LINK, "Disabling PMD transmitter\n");
+ else {
+ DP(NETIF_MSG_LINK, "NOT disabling PMD transmitter\n");
+ return;
+ }
+ } else
+ DP(NETIF_MSG_LINK, "Enabling PMD transmitter\n");
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_TX_DISABLE, pmd_dis);
+}
+
+static u8 bnx2x_get_gpio_port(struct link_params *params)
+{
+ u8 gpio_port;
+ u32 swap_val, swap_override;
+ struct bnx2x *bp = params->bp;
+ if (CHIP_IS_E2(bp))
+ gpio_port = BP_PATH(bp);
+ else
+ gpio_port = params->port;
+ swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
+ swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
+ return gpio_port ^ (swap_val && swap_override);
+}
+
+static void bnx2x_sfp_e1e2_set_transmitter(struct link_params *params,
+ struct bnx2x_phy *phy,
+ u8 tx_en)
+{
+ u16 val;
+ u8 port = params->port;
+ struct bnx2x *bp = params->bp;
+ u32 tx_en_mode;
+
+ /* Disable/Enable transmitter ( TX laser of the SFP+ module.)*/
+ tx_en_mode = REG_RD(bp, params->shmem_base +
+ offsetof(struct shmem_region,
+ dev_info.port_hw_config[port].sfp_ctrl)) &
+ PORT_HW_CFG_TX_LASER_MASK;
+ DP(NETIF_MSG_LINK, "Setting transmitter tx_en=%x for port %x "
+ "mode = %x\n", tx_en, port, tx_en_mode);
+ switch (tx_en_mode) {
+ case PORT_HW_CFG_TX_LASER_MDIO:
+
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_PHY_IDENTIFIER,
+ &val);
+
+ if (tx_en)
+ val &= ~(1<<15);
+ else
+ val |= (1<<15);
+
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_PHY_IDENTIFIER,
+ val);
+ break;
+ case PORT_HW_CFG_TX_LASER_GPIO0:
+ case PORT_HW_CFG_TX_LASER_GPIO1:
+ case PORT_HW_CFG_TX_LASER_GPIO2:
+ case PORT_HW_CFG_TX_LASER_GPIO3:
+ {
+ u16 gpio_pin;
+ u8 gpio_port, gpio_mode;
+ if (tx_en)
+ gpio_mode = MISC_REGISTERS_GPIO_OUTPUT_HIGH;
+ else
+ gpio_mode = MISC_REGISTERS_GPIO_OUTPUT_LOW;
+
+ gpio_pin = tx_en_mode - PORT_HW_CFG_TX_LASER_GPIO0;
+ gpio_port = bnx2x_get_gpio_port(params);
+ bnx2x_set_gpio(bp, gpio_pin, gpio_mode, gpio_port);
+ break;
+ }
+ default:
+ DP(NETIF_MSG_LINK, "Invalid TX_LASER_MDIO 0x%x\n", tx_en_mode);
+ break;
+ }
+}
+
+static void bnx2x_sfp_set_transmitter(struct link_params *params,
+ struct bnx2x_phy *phy,
+ u8 tx_en)
+{
+ struct bnx2x *bp = params->bp;
+ DP(NETIF_MSG_LINK, "Setting SFP+ transmitter to %d\n", tx_en);
+ if (CHIP_IS_E3(bp))
+ bnx2x_sfp_e3_set_transmitter(params, phy, tx_en);
+ else
+ bnx2x_sfp_e1e2_set_transmitter(params, phy, tx_en);
+}
+
+static int bnx2x_8726_read_sfp_module_eeprom(struct bnx2x_phy *phy,
+ struct link_params *params,
+ u8 dev_addr, u16 addr, u8 byte_cnt,
+ u8 *o_buf, u8 is_init)
+{
+ struct bnx2x *bp = params->bp;
+ u16 val = 0;
+ u16 i;
+ if (byte_cnt > SFP_EEPROM_PAGE_SIZE) {
+ DP(NETIF_MSG_LINK,
+ "Reading from eeprom is limited to 0xf\n");
+ return -EINVAL;
+ }
+ /* Set the read command byte count */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_SFP_TWO_WIRE_BYTE_CNT,
+ (byte_cnt | (dev_addr << 8)));
+
+ /* Set the read command address */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_SFP_TWO_WIRE_MEM_ADDR,
+ addr);
+
+ /* Activate read command */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_SFP_TWO_WIRE_CTRL,
+ 0x2c0f);
+
+ /* Wait up to 500us for command complete status */
+ for (i = 0; i < 100; i++) {
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val);
+ if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) ==
+ MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_COMPLETE)
+ break;
+ udelay(5);
+ }
+
+ if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) !=
+ MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_COMPLETE) {
+ DP(NETIF_MSG_LINK,
+ "Got bad status 0x%x when reading from SFP+ EEPROM\n",
+ (val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK));
+ return -EINVAL;
+ }
+
+ /* Read the buffer */
+ for (i = 0; i < byte_cnt; i++) {
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8726_TWO_WIRE_DATA_BUF + i, &val);
+ o_buf[i] = (u8)(val & MDIO_PMA_REG_8726_TWO_WIRE_DATA_MASK);
+ }
+
+ for (i = 0; i < 100; i++) {
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val);
+ if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) ==
+ MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_IDLE)
+ return 0;
+ usleep_range(1000, 2000);
+ }
+ return -EINVAL;
+}
+
+static void bnx2x_warpcore_power_module(struct link_params *params,
+ u8 power)
+{
+ u32 pin_cfg;
+ struct bnx2x *bp = params->bp;
+
+ pin_cfg = (REG_RD(bp, params->shmem_base +
+ offsetof(struct shmem_region,
+ dev_info.port_hw_config[params->port].e3_sfp_ctrl)) &
+ PORT_HW_CFG_E3_PWR_DIS_MASK) >>
+ PORT_HW_CFG_E3_PWR_DIS_SHIFT;
+
+ if (pin_cfg == PIN_CFG_NA)
+ return;
+ DP(NETIF_MSG_LINK, "Setting SFP+ module power to %d using pin cfg %d\n",
+ power, pin_cfg);
+ /* Low ==> corresponding SFP+ module is powered
+ * high ==> the SFP+ module is powered down
+ */
+ bnx2x_set_cfg_pin(bp, pin_cfg, power ^ 1);
+}
+static int bnx2x_warpcore_read_sfp_module_eeprom(struct bnx2x_phy *phy,
+ struct link_params *params,
+ u8 dev_addr,
+ u16 addr, u8 byte_cnt,
+ u8 *o_buf, u8 is_init)
+{
+ int rc = 0;
+ u8 i, j = 0, cnt = 0;
+ u32 data_array[4];
+ u16 addr32;
+ struct bnx2x *bp = params->bp;
+
+ if (byte_cnt > SFP_EEPROM_PAGE_SIZE) {
+ DP(NETIF_MSG_LINK,
+ "Reading from eeprom is limited to 16 bytes\n");
+ return -EINVAL;
+ }
+
+ /* 4 byte aligned address */
+ addr32 = addr & (~0x3);
+ do {
+ if ((!is_init) && (cnt == I2C_WA_PWR_ITER)) {
+ bnx2x_warpcore_power_module(params, 0);
+ /* Note that 100us are not enough here */
+ usleep_range(1000, 2000);
+ bnx2x_warpcore_power_module(params, 1);
+ }
+ rc = bnx2x_bsc_read(params, bp, dev_addr, addr32, 0, byte_cnt,
+ data_array);
+ } while ((rc != 0) && (++cnt < I2C_WA_RETRY_CNT));
+
+ if (rc == 0) {
+ for (i = (addr - addr32); i < byte_cnt + (addr - addr32); i++) {
+ o_buf[j] = *((u8 *)data_array + i);
+ j++;
+ }
+ }
+
+ return rc;
+}
+
+static int bnx2x_8727_read_sfp_module_eeprom(struct bnx2x_phy *phy,
+ struct link_params *params,
+ u8 dev_addr, u16 addr, u8 byte_cnt,
+ u8 *o_buf, u8 is_init)
+{
+ struct bnx2x *bp = params->bp;
+ u16 val, i;
+
+ if (byte_cnt > SFP_EEPROM_PAGE_SIZE) {
+ DP(NETIF_MSG_LINK,
+ "Reading from eeprom is limited to 0xf\n");
+ return -EINVAL;
+ }
+
+ /* Set 2-wire transfer rate of SFP+ module EEPROM
+ * to 100Khz since some DACs(direct attached cables) do
+ * not work at 400Khz.
+ */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8727_TWO_WIRE_SLAVE_ADDR,
+ ((dev_addr << 8) | 1));
+
+ /* Need to read from 1.8000 to clear it */
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_SFP_TWO_WIRE_CTRL,
+ &val);
+
+ /* Set the read command byte count */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_SFP_TWO_WIRE_BYTE_CNT,
+ ((byte_cnt < 2) ? 2 : byte_cnt));
+
+ /* Set the read command address */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_SFP_TWO_WIRE_MEM_ADDR,
+ addr);
+ /* Set the destination address */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ 0x8004,
+ MDIO_PMA_REG_8727_TWO_WIRE_DATA_BUF);
+
+ /* Activate read command */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_SFP_TWO_WIRE_CTRL,
+ 0x8002);
+ /* Wait appropriate time for two-wire command to finish before
+ * polling the status register
+ */
+ usleep_range(1000, 2000);
+
+ /* Wait up to 500us for command complete status */
+ for (i = 0; i < 100; i++) {
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val);
+ if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) ==
+ MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_COMPLETE)
+ break;
+ udelay(5);
+ }
+
+ if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) !=
+ MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_COMPLETE) {
+ DP(NETIF_MSG_LINK,
+ "Got bad status 0x%x when reading from SFP+ EEPROM\n",
+ (val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK));
+ return -EFAULT;
+ }
+
+ /* Read the buffer */
+ for (i = 0; i < byte_cnt; i++) {
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8727_TWO_WIRE_DATA_BUF + i, &val);
+ o_buf[i] = (u8)(val & MDIO_PMA_REG_8727_TWO_WIRE_DATA_MASK);
+ }
+
+ for (i = 0; i < 100; i++) {
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val);
+ if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) ==
+ MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_IDLE)
+ return 0;
+ usleep_range(1000, 2000);
+ }
+
+ return -EINVAL;
+}
+int bnx2x_read_sfp_module_eeprom(struct bnx2x_phy *phy,
+ struct link_params *params, u8 dev_addr,
+ u16 addr, u16 byte_cnt, u8 *o_buf)
+{
+ int rc = 0;
+ struct bnx2x *bp = params->bp;
+ u8 xfer_size;
+ u8 *user_data = o_buf;
+ read_sfp_module_eeprom_func_p read_func;
+
+ if ((dev_addr != 0xa0) && (dev_addr != 0xa2)) {
+ DP(NETIF_MSG_LINK, "invalid dev_addr 0x%x\n", dev_addr);
+ return -EINVAL;
+ }
+
+ switch (phy->type) {
+ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
+ read_func = bnx2x_8726_read_sfp_module_eeprom;
+ break;
+ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
+ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8722:
+ read_func = bnx2x_8727_read_sfp_module_eeprom;
+ break;
+ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
+ read_func = bnx2x_warpcore_read_sfp_module_eeprom;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ while (!rc && (byte_cnt > 0)) {
+ xfer_size = (byte_cnt > SFP_EEPROM_PAGE_SIZE) ?
+ SFP_EEPROM_PAGE_SIZE : byte_cnt;
+ rc = read_func(phy, params, dev_addr, addr, xfer_size,
+ user_data, 0);
+ byte_cnt -= xfer_size;
+ user_data += xfer_size;
+ addr += xfer_size;
+ }
+ return rc;
+}
+
+static int bnx2x_get_edc_mode(struct bnx2x_phy *phy,
+ struct link_params *params,
+ u16 *edc_mode)
+{
+ struct bnx2x *bp = params->bp;
+ u32 sync_offset = 0, phy_idx, media_types;
+ u8 val[SFP_EEPROM_FC_TX_TECH_ADDR + 1], check_limiting_mode = 0;
+ *edc_mode = EDC_MODE_LIMITING;
+ phy->media_type = ETH_PHY_UNSPECIFIED;
+ /* First check for copper cable */
+ if (bnx2x_read_sfp_module_eeprom(phy,
+ params,
+ I2C_DEV_ADDR_A0,
+ 0,
+ SFP_EEPROM_FC_TX_TECH_ADDR + 1,
+ (u8 *)val) != 0) {
+ DP(NETIF_MSG_LINK, "Failed to read from SFP+ module EEPROM\n");
+ return -EINVAL;
+ }
+ params->link_attr_sync &= ~LINK_SFP_EEPROM_COMP_CODE_MASK;
+ params->link_attr_sync |= val[SFP_EEPROM_10G_COMP_CODE_ADDR] <<
+ LINK_SFP_EEPROM_COMP_CODE_SHIFT;
+ bnx2x_update_link_attr(params, params->link_attr_sync);
+ switch (val[SFP_EEPROM_CON_TYPE_ADDR]) {
+ case SFP_EEPROM_CON_TYPE_VAL_COPPER:
+ {
+ u8 copper_module_type;
+ phy->media_type = ETH_PHY_DA_TWINAX;
+ /* Check if its active cable (includes SFP+ module)
+ * of passive cable
+ */
+ copper_module_type = val[SFP_EEPROM_FC_TX_TECH_ADDR];
+
+ if (copper_module_type &
+ SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_ACTIVE) {
+ DP(NETIF_MSG_LINK, "Active Copper cable detected\n");
+ if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
+ *edc_mode = EDC_MODE_ACTIVE_DAC;
+ else
+ check_limiting_mode = 1;
+ } else {
+ *edc_mode = EDC_MODE_PASSIVE_DAC;
+ /* Even in case PASSIVE_DAC indication is not set,
+ * treat it as a passive DAC cable, since some cables
+ * don't have this indication.
+ */
+ if (copper_module_type &
+ SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_PASSIVE) {
+ DP(NETIF_MSG_LINK,
+ "Passive Copper cable detected\n");
+ } else {
+ DP(NETIF_MSG_LINK,
+ "Unknown copper-cable-type\n");
+ }
+ }
+ break;
+ }
+ case SFP_EEPROM_CON_TYPE_VAL_UNKNOWN:
+ case SFP_EEPROM_CON_TYPE_VAL_LC:
+ case SFP_EEPROM_CON_TYPE_VAL_RJ45:
+ check_limiting_mode = 1;
+ if (((val[SFP_EEPROM_10G_COMP_CODE_ADDR] &
+ (SFP_EEPROM_10G_COMP_CODE_SR_MASK |
+ SFP_EEPROM_10G_COMP_CODE_LR_MASK |
+ SFP_EEPROM_10G_COMP_CODE_LRM_MASK)) == 0) &&
+ (val[SFP_EEPROM_1G_COMP_CODE_ADDR] != 0)) {
+ DP(NETIF_MSG_LINK, "1G SFP module detected\n");
+ phy->media_type = ETH_PHY_SFP_1G_FIBER;
+ if (phy->req_line_speed != SPEED_1000) {
+ u8 gport = params->port;
+ phy->req_line_speed = SPEED_1000;
+ if (!CHIP_IS_E1x(bp)) {
+ gport = BP_PATH(bp) +
+ (params->port << 1);
+ }
+ netdev_err(bp->dev,
+ "Warning: Link speed was forced to 1000Mbps. Current SFP module in port %d is not compliant with 10G Ethernet\n",
+ gport);
+ }
+ if (val[SFP_EEPROM_1G_COMP_CODE_ADDR] &
+ SFP_EEPROM_1G_COMP_CODE_BASE_T) {
+ bnx2x_sfp_set_transmitter(params, phy, 0);
+ msleep(40);
+ bnx2x_sfp_set_transmitter(params, phy, 1);
+ }
+ } else {
+ int idx, cfg_idx = 0;
+ DP(NETIF_MSG_LINK, "10G Optic module detected\n");
+ for (idx = INT_PHY; idx < MAX_PHYS; idx++) {
+ if (params->phy[idx].type == phy->type) {
+ cfg_idx = LINK_CONFIG_IDX(idx);
+ break;
+ }
+ }
+ phy->media_type = ETH_PHY_SFPP_10G_FIBER;
+ phy->req_line_speed = params->req_line_speed[cfg_idx];
+ }
+ break;
+ default:
+ DP(NETIF_MSG_LINK, "Unable to determine module type 0x%x !!!\n",
+ val[SFP_EEPROM_CON_TYPE_ADDR]);
+ return -EINVAL;
+ }
+ sync_offset = params->shmem_base +
+ offsetof(struct shmem_region,
+ dev_info.port_hw_config[params->port].media_type);
+ media_types = REG_RD(bp, sync_offset);
+ /* Update media type for non-PMF sync */
+ for (phy_idx = INT_PHY; phy_idx < MAX_PHYS; phy_idx++) {
+ if (&(params->phy[phy_idx]) == phy) {
+ media_types &= ~(PORT_HW_CFG_MEDIA_TYPE_PHY0_MASK <<
+ (PORT_HW_CFG_MEDIA_TYPE_PHY1_SHIFT * phy_idx));
+ media_types |= ((phy->media_type &
+ PORT_HW_CFG_MEDIA_TYPE_PHY0_MASK) <<
+ (PORT_HW_CFG_MEDIA_TYPE_PHY1_SHIFT * phy_idx));
+ break;
+ }
+ }
+ REG_WR(bp, sync_offset, media_types);
+ if (check_limiting_mode) {
+ u8 options[SFP_EEPROM_OPTIONS_SIZE];
+ if (bnx2x_read_sfp_module_eeprom(phy,
+ params,
+ I2C_DEV_ADDR_A0,
+ SFP_EEPROM_OPTIONS_ADDR,
+ SFP_EEPROM_OPTIONS_SIZE,
+ options) != 0) {
+ DP(NETIF_MSG_LINK,
+ "Failed to read Option field from module EEPROM\n");
+ return -EINVAL;
+ }
+ if ((options[0] & SFP_EEPROM_OPTIONS_LINEAR_RX_OUT_MASK))
+ *edc_mode = EDC_MODE_LINEAR;
+ else
+ *edc_mode = EDC_MODE_LIMITING;
+ }
+ DP(NETIF_MSG_LINK, "EDC mode is set to 0x%x\n", *edc_mode);
+ return 0;
+}
+/* This function read the relevant field from the module (SFP+), and verify it
+ * is compliant with this board
+ */
+static int bnx2x_verify_sfp_module(struct bnx2x_phy *phy,
+ struct link_params *params)
+{
+ struct bnx2x *bp = params->bp;
+ u32 val, cmd;
+ u32 fw_resp, fw_cmd_param;
+ char vendor_name[SFP_EEPROM_VENDOR_NAME_SIZE+1];
+ char vendor_pn[SFP_EEPROM_PART_NO_SIZE+1];
+ phy->flags &= ~FLAGS_SFP_NOT_APPROVED;
+ val = REG_RD(bp, params->shmem_base +
+ offsetof(struct shmem_region, dev_info.
+ port_feature_config[params->port].config));
+ if ((val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) ==
+ PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_NO_ENFORCEMENT) {
+ DP(NETIF_MSG_LINK, "NOT enforcing module verification\n");
+ return 0;
+ }
+
+ if (params->feature_config_flags &
+ FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY) {
+ /* Use specific phy request */
+ cmd = DRV_MSG_CODE_VRFY_SPECIFIC_PHY_OPT_MDL;
+ } else if (params->feature_config_flags &
+ FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY) {
+ /* Use first phy request only in case of non-dual media*/
+ if (DUAL_MEDIA(params)) {
+ DP(NETIF_MSG_LINK,
+ "FW does not support OPT MDL verification\n");
+ return -EINVAL;
+ }
+ cmd = DRV_MSG_CODE_VRFY_FIRST_PHY_OPT_MDL;
+ } else {
+ /* No support in OPT MDL detection */
+ DP(NETIF_MSG_LINK,
+ "FW does not support OPT MDL verification\n");
+ return -EINVAL;
+ }
+
+ fw_cmd_param = FW_PARAM_SET(phy->addr, phy->type, phy->mdio_ctrl);
+ fw_resp = bnx2x_fw_command(bp, cmd, fw_cmd_param);
+ if (fw_resp == FW_MSG_CODE_VRFY_OPT_MDL_SUCCESS) {
+ DP(NETIF_MSG_LINK, "Approved module\n");
+ return 0;
+ }
+
+ /* Format the warning message */
+ if (bnx2x_read_sfp_module_eeprom(phy,
+ params,
+ I2C_DEV_ADDR_A0,
+ SFP_EEPROM_VENDOR_NAME_ADDR,
+ SFP_EEPROM_VENDOR_NAME_SIZE,
+ (u8 *)vendor_name))
+ vendor_name[0] = '\0';
+ else
+ vendor_name[SFP_EEPROM_VENDOR_NAME_SIZE] = '\0';
+ if (bnx2x_read_sfp_module_eeprom(phy,
+ params,
+ I2C_DEV_ADDR_A0,
+ SFP_EEPROM_PART_NO_ADDR,
+ SFP_EEPROM_PART_NO_SIZE,
+ (u8 *)vendor_pn))
+ vendor_pn[0] = '\0';
+ else
+ vendor_pn[SFP_EEPROM_PART_NO_SIZE] = '\0';
+
+ netdev_err(bp->dev, "Warning: Unqualified SFP+ module detected,"
+ " Port %d from %s part number %s\n",
+ params->port, vendor_name, vendor_pn);
+ if ((val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) !=
+ PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_WARNING_MSG)
+ phy->flags |= FLAGS_SFP_NOT_APPROVED;
+ return -EINVAL;
+}
+
+static int bnx2x_wait_for_sfp_module_initialized(struct bnx2x_phy *phy,
+ struct link_params *params)
+
+{
+ u8 val;
+ int rc;
+ struct bnx2x *bp = params->bp;
+ u16 timeout;
+ /* Initialization time after hot-plug may take up to 300ms for
+ * some phys type ( e.g. JDSU )
+ */
+
+ for (timeout = 0; timeout < 60; timeout++) {
+ if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
+ rc = bnx2x_warpcore_read_sfp_module_eeprom(
+ phy, params, I2C_DEV_ADDR_A0, 1, 1, &val,
+ 1);
+ else
+ rc = bnx2x_read_sfp_module_eeprom(phy, params,
+ I2C_DEV_ADDR_A0,
+ 1, 1, &val);
+ if (rc == 0) {
+ DP(NETIF_MSG_LINK,
+ "SFP+ module initialization took %d ms\n",
+ timeout * 5);
+ return 0;
+ }
+ usleep_range(5000, 10000);
+ }
+ rc = bnx2x_read_sfp_module_eeprom(phy, params, I2C_DEV_ADDR_A0,
+ 1, 1, &val);
+ return rc;
+}
+
+static void bnx2x_8727_power_module(struct bnx2x *bp,
+ struct bnx2x_phy *phy,
+ u8 is_power_up) {
+ /* Make sure GPIOs are not using for LED mode */
+ u16 val;
+ /* In the GPIO register, bit 4 is use to determine if the GPIOs are
+ * operating as INPUT or as OUTPUT. Bit 1 is for input, and 0 for
+ * output
+ * Bits 0-1 determine the GPIOs value for OUTPUT in case bit 4 val is 0
+ * Bits 8-9 determine the GPIOs value for INPUT in case bit 4 val is 1
+ * where the 1st bit is the over-current(only input), and 2nd bit is
+ * for power( only output )
+ *
+ * In case of NOC feature is disabled and power is up, set GPIO control
+ * as input to enable listening of over-current indication
+ */
+ if (phy->flags & FLAGS_NOC)
+ return;
+ if (is_power_up)
+ val = (1<<4);
+ else
+ /* Set GPIO control to OUTPUT, and set the power bit
+ * to according to the is_power_up
+ */
+ val = (1<<1);
+
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8727_GPIO_CTRL,
+ val);
+}
+
+static int bnx2x_8726_set_limiting_mode(struct bnx2x *bp,
+ struct bnx2x_phy *phy,
+ u16 edc_mode)
+{
+ u16 cur_limiting_mode;
+
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_ROM_VER2,
+ &cur_limiting_mode);
+ DP(NETIF_MSG_LINK, "Current Limiting mode is 0x%x\n",
+ cur_limiting_mode);
+
+ if (edc_mode == EDC_MODE_LIMITING) {
+ DP(NETIF_MSG_LINK, "Setting LIMITING MODE\n");
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_ROM_VER2,
+ EDC_MODE_LIMITING);
+ } else { /* LRM mode ( default )*/
+
+ DP(NETIF_MSG_LINK, "Setting LRM MODE\n");
+
+ /* Changing to LRM mode takes quite few seconds. So do it only
+ * if current mode is limiting (default is LRM)
+ */
+ if (cur_limiting_mode != EDC_MODE_LIMITING)
+ return 0;
+
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_LRM_MODE,
+ 0);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_ROM_VER2,
+ 0x128);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_MISC_CTRL0,
+ 0x4008);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_LRM_MODE,
+ 0xaaaa);
+ }
+ return 0;
+}
+
+static int bnx2x_8727_set_limiting_mode(struct bnx2x *bp,
+ struct bnx2x_phy *phy,
+ u16 edc_mode)
+{
+ u16 phy_identifier;
+ u16 rom_ver2_val;
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_PHY_IDENTIFIER,
+ &phy_identifier);
+
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_PHY_IDENTIFIER,
+ (phy_identifier & ~(1<<9)));
+
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_ROM_VER2,
+ &rom_ver2_val);
+ /* Keep the MSB 8-bits, and set the LSB 8-bits with the edc_mode */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_ROM_VER2,
+ (rom_ver2_val & 0xff00) | (edc_mode & 0x00ff));
+
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_PHY_IDENTIFIER,
+ (phy_identifier | (1<<9)));
+
+ return 0;
+}
+
+static void bnx2x_8727_specific_func(struct bnx2x_phy *phy,
+ struct link_params *params,
+ u32 action)
+{
+ struct bnx2x *bp = params->bp;
+ u16 val;
+ switch (action) {
+ case DISABLE_TX:
+ bnx2x_sfp_set_transmitter(params, phy, 0);
+ break;
+ case ENABLE_TX:
+ if (!(phy->flags & FLAGS_SFP_NOT_APPROVED))
+ bnx2x_sfp_set_transmitter(params, phy, 1);
+ break;
+ case PHY_INIT:
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXCTRL,
+ (1<<2) | (1<<5));
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_LASI_TXCTRL,
+ 0);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, 0x0006);
+ /* Make MOD_ABS give interrupt on change */
+ bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8727_PCS_OPT_CTRL,
+ &val);
+ val |= (1<<12);
+ if (phy->flags & FLAGS_NOC)
+ val |= (3<<5);
+ /* Set 8727 GPIOs to input to allow reading from the 8727 GPIO0
+ * status which reflect SFP+ module over-current
+ */
+ if (!(phy->flags & FLAGS_NOC))
+ val &= 0xff8f; /* Reset bits 4-6 */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_PCS_OPT_CTRL,
+ val);
+ break;
+ default:
+ DP(NETIF_MSG_LINK, "Function 0x%x not supported by 8727\n",
+ action);
+ return;
+ }
+}
+
+static void bnx2x_set_e1e2_module_fault_led(struct link_params *params,
+ u8 gpio_mode)
+{
+ struct bnx2x *bp = params->bp;
+
+ u32 fault_led_gpio = REG_RD(bp, params->shmem_base +
+ offsetof(struct shmem_region,
+ dev_info.port_hw_config[params->port].sfp_ctrl)) &
+ PORT_HW_CFG_FAULT_MODULE_LED_MASK;
+ switch (fault_led_gpio) {
+ case PORT_HW_CFG_FAULT_MODULE_LED_DISABLED:
+ return;
+ case PORT_HW_CFG_FAULT_MODULE_LED_GPIO0:
+ case PORT_HW_CFG_FAULT_MODULE_LED_GPIO1:
+ case PORT_HW_CFG_FAULT_MODULE_LED_GPIO2:
+ case PORT_HW_CFG_FAULT_MODULE_LED_GPIO3:
+ {
+ u8 gpio_port = bnx2x_get_gpio_port(params);
+ u16 gpio_pin = fault_led_gpio -
+ PORT_HW_CFG_FAULT_MODULE_LED_GPIO0;
+ DP(NETIF_MSG_LINK, "Set fault module-detected led "
+ "pin %x port %x mode %x\n",
+ gpio_pin, gpio_port, gpio_mode);
+ bnx2x_set_gpio(bp, gpio_pin, gpio_mode, gpio_port);
+ }
+ break;
+ default:
+ DP(NETIF_MSG_LINK, "Error: Invalid fault led mode 0x%x\n",
+ fault_led_gpio);
+ }
+}
+
+static void bnx2x_set_e3_module_fault_led(struct link_params *params,
+ u8 gpio_mode)
+{
+ u32 pin_cfg;
+ u8 port = params->port;
+ struct bnx2x *bp = params->bp;
+ pin_cfg = (REG_RD(bp, params->shmem_base +
+ offsetof(struct shmem_region,
+ dev_info.port_hw_config[port].e3_sfp_ctrl)) &
+ PORT_HW_CFG_E3_FAULT_MDL_LED_MASK) >>
+ PORT_HW_CFG_E3_FAULT_MDL_LED_SHIFT;
+ DP(NETIF_MSG_LINK, "Setting Fault LED to %d using pin cfg %d\n",
+ gpio_mode, pin_cfg);
+ bnx2x_set_cfg_pin(bp, pin_cfg, gpio_mode);
+}
+
+static void bnx2x_set_sfp_module_fault_led(struct link_params *params,
+ u8 gpio_mode)
+{
+ struct bnx2x *bp = params->bp;
+ DP(NETIF_MSG_LINK, "Setting SFP+ module fault LED to %d\n", gpio_mode);
+ if (CHIP_IS_E3(bp)) {
+ /* Low ==> if SFP+ module is supported otherwise
+ * High ==> if SFP+ module is not on the approved vendor list
+ */
+ bnx2x_set_e3_module_fault_led(params, gpio_mode);
+ } else
+ bnx2x_set_e1e2_module_fault_led(params, gpio_mode);
+}
+
+static void bnx2x_warpcore_hw_reset(struct bnx2x_phy *phy,
+ struct link_params *params)
+{
+ struct bnx2x *bp = params->bp;
+ bnx2x_warpcore_power_module(params, 0);
+ /* Put Warpcore in low power mode */
+ REG_WR(bp, MISC_REG_WC0_RESET, 0x0c0e);
+
+ /* Put LCPLL in low power mode */
+ REG_WR(bp, MISC_REG_LCPLL_E40_PWRDWN, 1);
+ REG_WR(bp, MISC_REG_LCPLL_E40_RESETB_ANA, 0);
+ REG_WR(bp, MISC_REG_LCPLL_E40_RESETB_DIG, 0);
+}
+
+static void bnx2x_power_sfp_module(struct link_params *params,
+ struct bnx2x_phy *phy,
+ u8 power)
+{
+ struct bnx2x *bp = params->bp;
+ DP(NETIF_MSG_LINK, "Setting SFP+ power to %x\n", power);
+
+ switch (phy->type) {
+ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
+ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8722:
+ bnx2x_8727_power_module(params->bp, phy, power);
+ break;
+ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
+ bnx2x_warpcore_power_module(params, power);
+ break;
+ default:
+ break;
+ }
+}
+static void bnx2x_warpcore_set_limiting_mode(struct link_params *params,
+ struct bnx2x_phy *phy,
+ u16 edc_mode)
+{
+ u16 val = 0;
+ u16 mode = MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE_DEFAULT;
+ struct bnx2x *bp = params->bp;
+
+ u8 lane = bnx2x_get_warpcore_lane(phy, params);
+ /* This is a global register which controls all lanes */
+ bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE, &val);
+ val &= ~(0xf << (lane << 2));
+
+ switch (edc_mode) {
+ case EDC_MODE_LINEAR:
+ case EDC_MODE_LIMITING:
+ mode = MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE_DEFAULT;
+ break;
+ case EDC_MODE_PASSIVE_DAC:
+ case EDC_MODE_ACTIVE_DAC:
+ mode = MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE_SFP_DAC;
+ break;
+ default:
+ break;
+ }
+
+ val |= (mode << (lane << 2));
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE, val);
+ /* A must read */
+ bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE, &val);
+
+ /* Restart microcode to re-read the new mode */
+ bnx2x_warpcore_reset_lane(bp, phy, 1);
+ bnx2x_warpcore_reset_lane(bp, phy, 0);
+
+}
+
+static void bnx2x_set_limiting_mode(struct link_params *params,
+ struct bnx2x_phy *phy,
+ u16 edc_mode)
+{
+ switch (phy->type) {
+ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
+ bnx2x_8726_set_limiting_mode(params->bp, phy, edc_mode);
+ break;
+ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
+ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8722:
+ bnx2x_8727_set_limiting_mode(params->bp, phy, edc_mode);
+ break;
+ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
+ bnx2x_warpcore_set_limiting_mode(params, phy, edc_mode);
+ break;
+ }
+}
+
+static int bnx2x_sfp_module_detection(struct bnx2x_phy *phy,
+ struct link_params *params)
+{
+ struct bnx2x *bp = params->bp;
+ u16 edc_mode;
+ int rc = 0;
+
+ u32 val = REG_RD(bp, params->shmem_base +
+ offsetof(struct shmem_region, dev_info.
+ port_feature_config[params->port].config));
+ /* Enabled transmitter by default */
+ bnx2x_sfp_set_transmitter(params, phy, 1);
+ DP(NETIF_MSG_LINK, "SFP+ module plugged in/out detected on port %d\n",
+ params->port);
+ /* Power up module */
+ bnx2x_power_sfp_module(params, phy, 1);
+ if (bnx2x_get_edc_mode(phy, params, &edc_mode) != 0) {
+ DP(NETIF_MSG_LINK, "Failed to get valid module type\n");
+ return -EINVAL;
+ } else if (bnx2x_verify_sfp_module(phy, params) != 0) {
+ /* Check SFP+ module compatibility */
+ DP(NETIF_MSG_LINK, "Module verification failed!!\n");
+ rc = -EINVAL;
+ /* Turn on fault module-detected led */
+ bnx2x_set_sfp_module_fault_led(params,
+ MISC_REGISTERS_GPIO_HIGH);
+
+ /* Check if need to power down the SFP+ module */
+ if ((val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) ==
+ PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_POWER_DOWN) {
+ DP(NETIF_MSG_LINK, "Shutdown SFP+ module!!\n");
+ bnx2x_power_sfp_module(params, phy, 0);
+ return rc;
+ }
+ } else {
+ /* Turn off fault module-detected led */
+ bnx2x_set_sfp_module_fault_led(params, MISC_REGISTERS_GPIO_LOW);
+ }
+
+ /* Check and set limiting mode / LRM mode on 8726. On 8727 it
+ * is done automatically
+ */
+ bnx2x_set_limiting_mode(params, phy, edc_mode);
+
+ /* Disable transmit for this module if the module is not approved, and
+ * laser needs to be disabled.
+ */
+ if ((rc) &&
+ ((val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) ==
+ PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_DISABLE_TX_LASER))
+ bnx2x_sfp_set_transmitter(params, phy, 0);
+
+ return rc;
+}
+
+void bnx2x_handle_module_detect_int(struct link_params *params)
+{
+ struct bnx2x *bp = params->bp;
+ struct bnx2x_phy *phy;
+ u32 gpio_val;
+ u8 gpio_num, gpio_port;
+ if (CHIP_IS_E3(bp)) {
+ phy = &params->phy[INT_PHY];
+ /* Always enable TX laser,will be disabled in case of fault */
+ bnx2x_sfp_set_transmitter(params, phy, 1);
+ } else {
+ phy = &params->phy[EXT_PHY1];
+ }
+ if (bnx2x_get_mod_abs_int_cfg(bp, params->chip_id, params->shmem_base,
+ params->port, &gpio_num, &gpio_port) ==
+ -EINVAL) {
+ DP(NETIF_MSG_LINK, "Failed to get MOD_ABS interrupt config\n");
+ return;
+ }
+
+ /* Set valid module led off */
+ bnx2x_set_sfp_module_fault_led(params, MISC_REGISTERS_GPIO_HIGH);
+
+ /* Get current gpio val reflecting module plugged in / out*/
+ gpio_val = bnx2x_get_gpio(bp, gpio_num, gpio_port);
+
+ /* Call the handling function in case module is detected */
+ if (gpio_val == 0) {
+ bnx2x_set_mdio_emac_per_phy(bp, params);
+ bnx2x_set_aer_mmd(params, phy);
+
+ bnx2x_power_sfp_module(params, phy, 1);
+ bnx2x_set_gpio_int(bp, gpio_num,
+ MISC_REGISTERS_GPIO_INT_OUTPUT_CLR,
+ gpio_port);
+ if (bnx2x_wait_for_sfp_module_initialized(phy, params) == 0) {
+ bnx2x_sfp_module_detection(phy, params);
+ if (CHIP_IS_E3(bp)) {
+ u16 rx_tx_in_reset;
+ /* In case WC is out of reset, reconfigure the
+ * link speed while taking into account 1G
+ * module limitation.
+ */
+ bnx2x_cl45_read(bp, phy,
+ MDIO_WC_DEVAD,
+ MDIO_WC_REG_DIGITAL5_MISC6,
+ &rx_tx_in_reset);
+ if ((!rx_tx_in_reset) &&
+ (params->link_flags &
+ PHY_INITIALIZED)) {
+ bnx2x_warpcore_reset_lane(bp, phy, 1);
+ bnx2x_warpcore_config_sfi(phy, params);
+ bnx2x_warpcore_reset_lane(bp, phy, 0);
+ }
+ }
+ } else {
+ DP(NETIF_MSG_LINK, "SFP+ module is not initialized\n");
+ }
+ } else {
+ bnx2x_set_gpio_int(bp, gpio_num,
+ MISC_REGISTERS_GPIO_INT_OUTPUT_SET,
+ gpio_port);
+ /* Module was plugged out.
+ * Disable transmit for this module
+ */
+ phy->media_type = ETH_PHY_NOT_PRESENT;
+ }
+}
+
+/******************************************************************/
+/* Used by 8706 and 8727 */
+/******************************************************************/
+static void bnx2x_sfp_mask_fault(struct bnx2x *bp,
+ struct bnx2x_phy *phy,
+ u16 alarm_status_offset,
+ u16 alarm_ctrl_offset)
+{
+ u16 alarm_status, val;
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, alarm_status_offset,
+ &alarm_status);
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, alarm_status_offset,
+ &alarm_status);
+ /* Mask or enable the fault event. */
+ bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, alarm_ctrl_offset, &val);
+ if (alarm_status & (1<<0))
+ val &= ~(1<<0);
+ else
+ val |= (1<<0);
+ bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, alarm_ctrl_offset, val);
+}
+/******************************************************************/
+/* common BCM8706/BCM8726 PHY SECTION */
+/******************************************************************/
+static u8 bnx2x_8706_8726_read_status(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
+{
+ u8 link_up = 0;
+ u16 val1, val2, rx_sd, pcs_status;
+ struct bnx2x *bp = params->bp;
+ DP(NETIF_MSG_LINK, "XGXS 8706/8726\n");
+ /* Clear RX Alarm*/
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXSTAT, &val2);
+
+ bnx2x_sfp_mask_fault(bp, phy, MDIO_PMA_LASI_TXSTAT,
+ MDIO_PMA_LASI_TXCTRL);
+
+ /* Clear LASI indication*/
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_LASI_STAT, &val1);
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_LASI_STAT, &val2);
+ DP(NETIF_MSG_LINK, "8706/8726 LASI status 0x%x--> 0x%x\n", val1, val2);
+
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_RX_SD, &rx_sd);
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PCS_DEVAD, MDIO_PCS_REG_STATUS, &pcs_status);
+ bnx2x_cl45_read(bp, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_LINK_STATUS, &val2);
+ bnx2x_cl45_read(bp, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_LINK_STATUS, &val2);
+
+ DP(NETIF_MSG_LINK, "8706/8726 rx_sd 0x%x pcs_status 0x%x 1Gbps"
+ " link_status 0x%x\n", rx_sd, pcs_status, val2);
+ /* Link is up if both bit 0 of pmd_rx_sd and bit 0 of pcs_status
+ * are set, or if the autoneg bit 1 is set
+ */
+ link_up = ((rx_sd & pcs_status & 0x1) || (val2 & (1<<1)));
+ if (link_up) {
+ if (val2 & (1<<1))
+ vars->line_speed = SPEED_1000;
+ else
+ vars->line_speed = SPEED_10000;
+ bnx2x_ext_phy_resolve_fc(phy, params, vars);
+ vars->duplex = DUPLEX_FULL;
+ }
+
+ /* Capture 10G link fault. Read twice to clear stale value. */
+ if (vars->line_speed == SPEED_10000) {
+ bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD,
+ MDIO_PMA_LASI_TXSTAT, &val1);
+ bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD,
+ MDIO_PMA_LASI_TXSTAT, &val1);
+ if (val1 & (1<<0))
+ vars->fault_detected = 1;
+ }
+
+ return link_up;
+}
+
+/******************************************************************/
+/* BCM8706 PHY SECTION */
+/******************************************************************/
+static void bnx2x_8706_config_init(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
+{
+ u32 tx_en_mode;
+ u16 cnt, val, tmp1;
+ struct bnx2x *bp = params->bp;
+
+ bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
+ MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port);
+ /* HW reset */
+ bnx2x_ext_phy_hw_reset(bp, params->port);
+ bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0xa040);
+ bnx2x_wait_reset_complete(bp, phy, params);
+
+ /* Wait until fw is loaded */
+ for (cnt = 0; cnt < 100; cnt++) {
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_ROM_VER1, &val);
+ if (val)
+ break;
+ usleep_range(10000, 20000);
+ }
+ DP(NETIF_MSG_LINK, "XGXS 8706 is initialized after %d ms\n", cnt);
+ if ((params->feature_config_flags &
+ FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED)) {
+ u8 i;
+ u16 reg;
+ for (i = 0; i < 4; i++) {
+ reg = MDIO_XS_8706_REG_BANK_RX0 +
+ i*(MDIO_XS_8706_REG_BANK_RX1 -
+ MDIO_XS_8706_REG_BANK_RX0);
+ bnx2x_cl45_read(bp, phy, MDIO_XS_DEVAD, reg, &val);
+ /* Clear first 3 bits of the control */
+ val &= ~0x7;
+ /* Set control bits according to configuration */
+ val |= (phy->rx_preemphasis[i] & 0x7);
+ DP(NETIF_MSG_LINK, "Setting RX Equalizer to BCM8706"
+ " reg 0x%x <-- val 0x%x\n", reg, val);
+ bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, reg, val);
+ }
+ }
+ /* Force speed */
+ if (phy->req_line_speed == SPEED_10000) {
+ DP(NETIF_MSG_LINK, "XGXS 8706 force 10Gbps\n");
+
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_DIGITAL_CTRL, 0x400);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_LASI_TXCTRL,
+ 0);
+ /* Arm LASI for link and Tx fault. */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, 3);
+ } else {
+ /* Force 1Gbps using autoneg with 1G advertisement */
+
+ /* Allow CL37 through CL73 */
+ DP(NETIF_MSG_LINK, "XGXS 8706 AutoNeg\n");
+ bnx2x_cl45_write(bp, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_CL37_CL73, 0x040c);
+
+ /* Enable Full-Duplex advertisement on CL37 */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_CL37_FC_LP, 0x0020);
+ /* Enable CL37 AN */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x1000);
+ /* 1G support */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_ADV, (1<<5));
+
+ /* Enable clause 73 AN */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0x1200);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXCTRL,
+ 0x0400);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL,
+ 0x0004);
+ }
+ bnx2x_save_bcm_spirom_ver(bp, phy, params->port);
+
+ /* If TX Laser is controlled by GPIO_0, do not let PHY go into low
+ * power mode, if TX Laser is disabled
+ */
+
+ tx_en_mode = REG_RD(bp, params->shmem_base +
+ offsetof(struct shmem_region,
+ dev_info.port_hw_config[params->port].sfp_ctrl))
+ & PORT_HW_CFG_TX_LASER_MASK;
+
+ if (tx_en_mode == PORT_HW_CFG_TX_LASER_GPIO0) {
+ DP(NETIF_MSG_LINK, "Enabling TXONOFF_PWRDN_DIS\n");
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_DIGITAL_CTRL, &tmp1);
+ tmp1 |= 0x1;
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_DIGITAL_CTRL, tmp1);
+ }
+}
+
+static u8 bnx2x_8706_read_status(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
+{
+ return bnx2x_8706_8726_read_status(phy, params, vars);
+}
+
+/******************************************************************/
+/* BCM8726 PHY SECTION */
+/******************************************************************/
+static void bnx2x_8726_config_loopback(struct bnx2x_phy *phy,
+ struct link_params *params)
+{
+ struct bnx2x *bp = params->bp;
+ DP(NETIF_MSG_LINK, "PMA/PMD ext_phy_loopback: 8726\n");
+ bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0x0001);
+}
+
+static void bnx2x_8726_external_rom_boot(struct bnx2x_phy *phy,
+ struct link_params *params)
+{
+ struct bnx2x *bp = params->bp;
+ /* Need to wait 100ms after reset */
+ msleep(100);
+
+ /* Micro controller re-boot */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_GEN_CTRL, 0x018B);
+
+ /* Set soft reset */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_GEN_CTRL,
+ MDIO_PMA_REG_GEN_CTRL_ROM_MICRO_RESET);
+
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_MISC_CTRL1, 0x0001);
+
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_GEN_CTRL,
+ MDIO_PMA_REG_GEN_CTRL_ROM_RESET_INTERNAL_MP);
+
+ /* Wait for 150ms for microcode load */
+ msleep(150);
+
+ /* Disable serial boot control, tristates pins SS_N, SCK, MOSI, MISO */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_MISC_CTRL1, 0x0000);
+
+ msleep(200);
+ bnx2x_save_bcm_spirom_ver(bp, phy, params->port);
+}
+
+static u8 bnx2x_8726_read_status(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
+{
+ struct bnx2x *bp = params->bp;
+ u16 val1;
+ u8 link_up = bnx2x_8706_8726_read_status(phy, params, vars);
+ if (link_up) {
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_PHY_IDENTIFIER,
+ &val1);
+ if (val1 & (1<<15)) {
+ DP(NETIF_MSG_LINK, "Tx is disabled\n");
+ link_up = 0;
+ vars->line_speed = 0;
+ }
+ }
+ return link_up;
+}
+
+
+static void bnx2x_8726_config_init(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
+{
+ struct bnx2x *bp = params->bp;
+ DP(NETIF_MSG_LINK, "Initializing BCM8726\n");
+
+ bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 1<<15);
+ bnx2x_wait_reset_complete(bp, phy, params);
+
+ bnx2x_8726_external_rom_boot(phy, params);
+
+ /* Need to call module detected on initialization since the module
+ * detection triggered by actual module insertion might occur before
+ * driver is loaded, and when driver is loaded, it reset all
+ * registers, including the transmitter
+ */
+ bnx2x_sfp_module_detection(phy, params);
+
+ if (phy->req_line_speed == SPEED_1000) {
+ DP(NETIF_MSG_LINK, "Setting 1G force\n");
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0x40);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_10G_CTRL2, 0xD);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, 0x5);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXCTRL,
+ 0x400);
+ } else if ((phy->req_line_speed == SPEED_AUTO_NEG) &&
+ (phy->speed_cap_mask &
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_1G) &&
+ ((phy->speed_cap_mask &
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) !=
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) {
+ DP(NETIF_MSG_LINK, "Setting 1G clause37\n");
+ /* Set Flow control */
+ bnx2x_ext_phy_set_pause(params, phy, vars);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_ADV, 0x20);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_CL37_CL73, 0x040c);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_CL37_FC_LD, 0x0020);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x1000);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0x1200);
+ /* Enable RX-ALARM control to receive interrupt for 1G speed
+ * change
+ */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, 0x4);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXCTRL,
+ 0x400);
+
+ } else { /* Default 10G. Set only LASI control */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, 1);
+ }
+
+ /* Set TX PreEmphasis if needed */
+ if ((params->feature_config_flags &
+ FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED)) {
+ DP(NETIF_MSG_LINK,
+ "Setting TX_CTRL1 0x%x, TX_CTRL2 0x%x\n",
+ phy->tx_preemphasis[0],
+ phy->tx_preemphasis[1]);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8726_TX_CTRL1,
+ phy->tx_preemphasis[0]);
+
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8726_TX_CTRL2,
+ phy->tx_preemphasis[1]);
+ }
+}
+
+static void bnx2x_8726_link_reset(struct bnx2x_phy *phy,
+ struct link_params *params)
+{
+ struct bnx2x *bp = params->bp;
+ DP(NETIF_MSG_LINK, "bnx2x_8726_link_reset port %d\n", params->port);
+ /* Set serial boot control for external load */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_GEN_CTRL, 0x0001);
+}
+
+/******************************************************************/
+/* BCM8727 PHY SECTION */
+/******************************************************************/
+
+static void bnx2x_8727_set_link_led(struct bnx2x_phy *phy,
+ struct link_params *params, u8 mode)
+{
+ struct bnx2x *bp = params->bp;
+ u16 led_mode_bitmask = 0;
+ u16 gpio_pins_bitmask = 0;
+ u16 val;
+ /* Only NOC flavor requires to set the LED specifically */
+ if (!(phy->flags & FLAGS_NOC))
+ return;
+ switch (mode) {
+ case LED_MODE_FRONT_PANEL_OFF:
+ case LED_MODE_OFF:
+ led_mode_bitmask = 0;
+ gpio_pins_bitmask = 0x03;
+ break;
+ case LED_MODE_ON:
+ led_mode_bitmask = 0;
+ gpio_pins_bitmask = 0x02;
+ break;
+ case LED_MODE_OPER:
+ led_mode_bitmask = 0x60;
+ gpio_pins_bitmask = 0x11;
+ break;
+ }
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8727_PCS_OPT_CTRL,
+ &val);
+ val &= 0xff8f;
+ val |= led_mode_bitmask;
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8727_PCS_OPT_CTRL,
+ val);
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8727_GPIO_CTRL,
+ &val);
+ val &= 0xffe0;
+ val |= gpio_pins_bitmask;
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8727_GPIO_CTRL,
+ val);
+}
+static void bnx2x_8727_hw_reset(struct bnx2x_phy *phy,
+ struct link_params *params) {
+ u32 swap_val, swap_override;
+ u8 port;
+ /* The PHY reset is controlled by GPIO 1. Fake the port number
+ * to cancel the swap done in set_gpio()
+ */
+ struct bnx2x *bp = params->bp;
+ swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
+ swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
+ port = (swap_val && swap_override) ^ 1;
+ bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
+ MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
+}
+
+static void bnx2x_8727_config_speed(struct bnx2x_phy *phy,
+ struct link_params *params)
+{
+ struct bnx2x *bp = params->bp;
+ u16 tmp1, val;
+ /* Set option 1G speed */
+ if ((phy->req_line_speed == SPEED_1000) ||
+ (phy->media_type == ETH_PHY_SFP_1G_FIBER)) {
+ DP(NETIF_MSG_LINK, "Setting 1G force\n");
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0x40);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_10G_CTRL2, 0xD);
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_10G_CTRL2, &tmp1);
+ DP(NETIF_MSG_LINK, "1.7 = 0x%x\n", tmp1);
+ /* Power down the XAUI until link is up in case of dual-media
+ * and 1G
+ */
+ if (DUAL_MEDIA(params)) {
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8727_PCS_GP, &val);
+ val |= (3<<10);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8727_PCS_GP, val);
+ }
+ } else if ((phy->req_line_speed == SPEED_AUTO_NEG) &&
+ ((phy->speed_cap_mask &
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) &&
+ ((phy->speed_cap_mask &
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) !=
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) {
+
+ DP(NETIF_MSG_LINK, "Setting 1G clause37\n");
+ bnx2x_cl45_write(bp, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_8727_MISC_CTRL, 0);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x1300);
+ } else {
+ /* Since the 8727 has only single reset pin, need to set the 10G
+ * registers although it is default
+ */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_8727_MISC_CTRL,
+ 0x0020);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x0100);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0x2040);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_10G_CTRL2,
+ 0x0008);
+ }
+}
+
+static void bnx2x_8727_config_init(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
+{
+ u32 tx_en_mode;
+ u16 tmp1, mod_abs, tmp2;
+ struct bnx2x *bp = params->bp;
+ /* Enable PMD link, MOD_ABS_FLT, and 1G link alarm */
+
+ bnx2x_wait_reset_complete(bp, phy, params);
+
+ DP(NETIF_MSG_LINK, "Initializing BCM8727\n");
+
+ bnx2x_8727_specific_func(phy, params, PHY_INIT);
+ /* Initially configure MOD_ABS to interrupt when module is
+ * presence( bit 8)
+ */
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_PHY_IDENTIFIER, &mod_abs);
+ /* Set EDC off by setting OPTXLOS signal input to low (bit 9).
+ * When the EDC is off it locks onto a reference clock and avoids
+ * becoming 'lost'
+ */
+ mod_abs &= ~(1<<8);
+ if (!(phy->flags & FLAGS_NOC))
+ mod_abs &= ~(1<<9);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_PHY_IDENTIFIER, mod_abs);
+
+ /* Enable/Disable PHY transmitter output */
+ bnx2x_set_disable_pmd_transmit(params, phy, 0);
+
+ bnx2x_8727_power_module(bp, phy, 1);
+
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_M8051_MSGOUT_REG, &tmp1);
+
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXSTAT, &tmp1);
+
+ bnx2x_8727_config_speed(phy, params);
+
+
+ /* Set TX PreEmphasis if needed */
+ if ((params->feature_config_flags &
+ FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED)) {
+ DP(NETIF_MSG_LINK, "Setting TX_CTRL1 0x%x, TX_CTRL2 0x%x\n",
+ phy->tx_preemphasis[0],
+ phy->tx_preemphasis[1]);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_TX_CTRL1,
+ phy->tx_preemphasis[0]);
+
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_TX_CTRL2,
+ phy->tx_preemphasis[1]);
+ }
+
+ /* If TX Laser is controlled by GPIO_0, do not let PHY go into low
+ * power mode, if TX Laser is disabled
+ */
+ tx_en_mode = REG_RD(bp, params->shmem_base +
+ offsetof(struct shmem_region,
+ dev_info.port_hw_config[params->port].sfp_ctrl))
+ & PORT_HW_CFG_TX_LASER_MASK;
+
+ if (tx_en_mode == PORT_HW_CFG_TX_LASER_GPIO0) {
+
+ DP(NETIF_MSG_LINK, "Enabling TXONOFF_PWRDN_DIS\n");
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_OPT_CFG_REG, &tmp2);
+ tmp2 |= 0x1000;
+ tmp2 &= 0xFFEF;
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_OPT_CFG_REG, tmp2);
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_PHY_IDENTIFIER,
+ &tmp2);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_PHY_IDENTIFIER,
+ (tmp2 & 0x7fff));
+ }
+}
+
+static void bnx2x_8727_handle_mod_abs(struct bnx2x_phy *phy,
+ struct link_params *params)
+{
+ struct bnx2x *bp = params->bp;
+ u16 mod_abs, rx_alarm_status;
+ u32 val = REG_RD(bp, params->shmem_base +
+ offsetof(struct shmem_region, dev_info.
+ port_feature_config[params->port].
+ config));
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_PHY_IDENTIFIER, &mod_abs);
+ if (mod_abs & (1<<8)) {
+
+ /* Module is absent */
+ DP(NETIF_MSG_LINK,
+ "MOD_ABS indication show module is absent\n");
+ phy->media_type = ETH_PHY_NOT_PRESENT;
+ /* 1. Set mod_abs to detect next module
+ * presence event
+ * 2. Set EDC off by setting OPTXLOS signal input to low
+ * (bit 9).
+ * When the EDC is off it locks onto a reference clock and
+ * avoids becoming 'lost'.
+ */
+ mod_abs &= ~(1<<8);
+ if (!(phy->flags & FLAGS_NOC))
+ mod_abs &= ~(1<<9);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_PHY_IDENTIFIER, mod_abs);
+
+ /* Clear RX alarm since it stays up as long as
+ * the mod_abs wasn't changed
+ */
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_LASI_RXSTAT, &rx_alarm_status);
+
+ } else {
+ /* Module is present */
+ DP(NETIF_MSG_LINK,
+ "MOD_ABS indication show module is present\n");
+ /* First disable transmitter, and if the module is ok, the
+ * module_detection will enable it
+ * 1. Set mod_abs to detect next module absent event ( bit 8)
+ * 2. Restore the default polarity of the OPRXLOS signal and
+ * this signal will then correctly indicate the presence or
+ * absence of the Rx signal. (bit 9)
+ */
+ mod_abs |= (1<<8);
+ if (!(phy->flags & FLAGS_NOC))
+ mod_abs |= (1<<9);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_PHY_IDENTIFIER, mod_abs);
+
+ /* Clear RX alarm since it stays up as long as the mod_abs
+ * wasn't changed. This is need to be done before calling the
+ * module detection, otherwise it will clear* the link update
+ * alarm
+ */
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_LASI_RXSTAT, &rx_alarm_status);
+
+
+ if ((val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) ==
+ PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_DISABLE_TX_LASER)
+ bnx2x_sfp_set_transmitter(params, phy, 0);
+
+ if (bnx2x_wait_for_sfp_module_initialized(phy, params) == 0)
+ bnx2x_sfp_module_detection(phy, params);
+ else
+ DP(NETIF_MSG_LINK, "SFP+ module is not initialized\n");
+
+ /* Reconfigure link speed based on module type limitations */
+ bnx2x_8727_config_speed(phy, params);
+ }
+
+ DP(NETIF_MSG_LINK, "8727 RX_ALARM_STATUS 0x%x\n",
+ rx_alarm_status);
+ /* No need to check link status in case of module plugged in/out */
+}
+
+static u8 bnx2x_8727_read_status(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
+
+{
+ struct bnx2x *bp = params->bp;
+ u8 link_up = 0, oc_port = params->port;
+ u16 link_status = 0;
+ u16 rx_alarm_status, lasi_ctrl, val1;
+
+ /* If PHY is not initialized, do not check link status */
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL,
+ &lasi_ctrl);
+ if (!lasi_ctrl)
+ return 0;
+
+ /* Check the LASI on Rx */
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXSTAT,
+ &rx_alarm_status);
+ vars->line_speed = 0;
+ DP(NETIF_MSG_LINK, "8727 RX_ALARM_STATUS 0x%x\n", rx_alarm_status);
+
+ bnx2x_sfp_mask_fault(bp, phy, MDIO_PMA_LASI_TXSTAT,
+ MDIO_PMA_LASI_TXCTRL);
+
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_LASI_STAT, &val1);
+
+ DP(NETIF_MSG_LINK, "8727 LASI status 0x%x\n", val1);
+
+ /* Clear MSG-OUT */
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_M8051_MSGOUT_REG, &val1);
+
+ /* If a module is present and there is need to check
+ * for over current
+ */
+ if (!(phy->flags & FLAGS_NOC) && !(rx_alarm_status & (1<<5))) {
+ /* Check over-current using 8727 GPIO0 input*/
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_GPIO_CTRL,
+ &val1);
+
+ if ((val1 & (1<<8)) == 0) {
+ if (!CHIP_IS_E1x(bp))
+ oc_port = BP_PATH(bp) + (params->port << 1);
+ DP(NETIF_MSG_LINK,
+ "8727 Power fault has been detected on port %d\n",
+ oc_port);
+ netdev_err(bp->dev, "Error: Power fault on Port %d has "
+ "been detected and the power to "
+ "that SFP+ module has been removed "
+ "to prevent failure of the card. "
+ "Please remove the SFP+ module and "
+ "restart the system to clear this "
+ "error.\n",
+ oc_port);
+ /* Disable all RX_ALARMs except for mod_abs */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_LASI_RXCTRL, (1<<5));
+
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_PHY_IDENTIFIER, &val1);
+ /* Wait for module_absent_event */
+ val1 |= (1<<8);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_PHY_IDENTIFIER, val1);
+ /* Clear RX alarm */
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_LASI_RXSTAT, &rx_alarm_status);
+ bnx2x_8727_power_module(params->bp, phy, 0);
+ return 0;
+ }
+ } /* Over current check */
+
+ /* When module absent bit is set, check module */
+ if (rx_alarm_status & (1<<5)) {
+ bnx2x_8727_handle_mod_abs(phy, params);
+ /* Enable all mod_abs and link detection bits */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXCTRL,
+ ((1<<5) | (1<<2)));
+ }
+
+ if (!(phy->flags & FLAGS_SFP_NOT_APPROVED)) {
+ DP(NETIF_MSG_LINK, "Enabling 8727 TX laser\n");
+ bnx2x_sfp_set_transmitter(params, phy, 1);
+ } else {
+ DP(NETIF_MSG_LINK, "Tx is disabled\n");
+ return 0;
+ }
+
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8073_SPEED_LINK_STATUS, &link_status);
+
+ /* Bits 0..2 --> speed detected,
+ * Bits 13..15--> link is down
+ */
+ if ((link_status & (1<<2)) && (!(link_status & (1<<15)))) {
+ link_up = 1;
+ vars->line_speed = SPEED_10000;
+ DP(NETIF_MSG_LINK, "port %x: External link up in 10G\n",
+ params->port);
+ } else if ((link_status & (1<<0)) && (!(link_status & (1<<13)))) {
+ link_up = 1;
+ vars->line_speed = SPEED_1000;
+ DP(NETIF_MSG_LINK, "port %x: External link up in 1G\n",
+ params->port);
+ } else {
+ link_up = 0;
+ DP(NETIF_MSG_LINK, "port %x: External link is down\n",
+ params->port);
+ }
+
+ /* Capture 10G link fault. */
+ if (vars->line_speed == SPEED_10000) {
+ bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD,
+ MDIO_PMA_LASI_TXSTAT, &val1);
+
+ bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD,
+ MDIO_PMA_LASI_TXSTAT, &val1);
+
+ if (val1 & (1<<0)) {
+ vars->fault_detected = 1;
+ }
+ }
+
+ if (link_up) {
+ bnx2x_ext_phy_resolve_fc(phy, params, vars);
+ vars->duplex = DUPLEX_FULL;
+ DP(NETIF_MSG_LINK, "duplex = 0x%x\n", vars->duplex);
+ }
+
+ if ((DUAL_MEDIA(params)) &&
+ (phy->req_line_speed == SPEED_1000)) {
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8727_PCS_GP, &val1);
+ /* In case of dual-media board and 1G, power up the XAUI side,
+ * otherwise power it down. For 10G it is done automatically
+ */
+ if (link_up)
+ val1 &= ~(3<<10);
+ else
+ val1 |= (3<<10);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8727_PCS_GP, val1);
+ }
+ return link_up;
+}
+
+static void bnx2x_8727_link_reset(struct bnx2x_phy *phy,
+ struct link_params *params)
+{
+ struct bnx2x *bp = params->bp;
+
+ /* Enable/Disable PHY transmitter output */
+ bnx2x_set_disable_pmd_transmit(params, phy, 1);
+
+ /* Disable Transmitter */
+ bnx2x_sfp_set_transmitter(params, phy, 0);
+ /* Clear LASI */
+ bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, 0);
+
+}
+
+/******************************************************************/
+/* BCM8481/BCM84823/BCM84833 PHY SECTION */
+/******************************************************************/
+static int bnx2x_is_8483x_8485x(struct bnx2x_phy *phy)
+{
+ return ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) ||
+ (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834) ||
+ (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84858));
+}
+
+static void bnx2x_save_848xx_spirom_version(struct bnx2x_phy *phy,
+ struct bnx2x *bp,
+ u8 port)
+{
+ u16 val, fw_ver2, cnt, i;
+ static struct bnx2x_reg_set reg_set[] = {
+ {MDIO_PMA_DEVAD, 0xA819, 0x0014},
+ {MDIO_PMA_DEVAD, 0xA81A, 0xc200},
+ {MDIO_PMA_DEVAD, 0xA81B, 0x0000},
+ {MDIO_PMA_DEVAD, 0xA81C, 0x0300},
+ {MDIO_PMA_DEVAD, 0xA817, 0x0009}
+ };
+ u16 fw_ver1;
+
+ if (bnx2x_is_8483x_8485x(phy)) {
+ bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD, 0x400f, &fw_ver1);
+ if (phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84858)
+ fw_ver1 &= 0xfff;
+ bnx2x_save_spirom_version(bp, port, fw_ver1, phy->ver_addr);
+ } else {
+ /* For 32-bit registers in 848xx, access via MDIO2ARM i/f. */
+ /* (1) set reg 0xc200_0014(SPI_BRIDGE_CTRL_2) to 0x03000000 */
+ for (i = 0; i < ARRAY_SIZE(reg_set); i++)
+ bnx2x_cl45_write(bp, phy, reg_set[i].devad,
+ reg_set[i].reg, reg_set[i].val);
+
+ for (cnt = 0; cnt < 100; cnt++) {
+ bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA818, &val);
+ if (val & 1)
+ break;
+ udelay(5);
+ }
+ if (cnt == 100) {
+ DP(NETIF_MSG_LINK, "Unable to read 848xx "
+ "phy fw version(1)\n");
+ bnx2x_save_spirom_version(bp, port, 0,
+ phy->ver_addr);
+ return;
+ }
+
+
+ /* 2) read register 0xc200_0000 (SPI_FW_STATUS) */
+ bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA819, 0x0000);
+ bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81A, 0xc200);
+ bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA817, 0x000A);
+ for (cnt = 0; cnt < 100; cnt++) {
+ bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA818, &val);
+ if (val & 1)
+ break;
+ udelay(5);
+ }
+ if (cnt == 100) {
+ DP(NETIF_MSG_LINK, "Unable to read 848xx phy fw "
+ "version(2)\n");
+ bnx2x_save_spirom_version(bp, port, 0,
+ phy->ver_addr);
+ return;
+ }
+
+ /* lower 16 bits of the register SPI_FW_STATUS */
+ bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA81B, &fw_ver1);
+ /* upper 16 bits of register SPI_FW_STATUS */
+ bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA81C, &fw_ver2);
+
+ bnx2x_save_spirom_version(bp, port, (fw_ver2<<16) | fw_ver1,
+ phy->ver_addr);
+ }
+
+}
+static void bnx2x_848xx_set_led(struct bnx2x *bp,
+ struct bnx2x_phy *phy)
+{
+ u16 val, led3_blink_rate, offset, i;
+ static struct bnx2x_reg_set reg_set[] = {
+ {MDIO_PMA_DEVAD, MDIO_PMA_REG_8481_LED1_MASK, 0x0080},
+ {MDIO_PMA_DEVAD, MDIO_PMA_REG_8481_LED2_MASK, 0x0018},
+ {MDIO_PMA_DEVAD, MDIO_PMA_REG_8481_LED3_MASK, 0x0006},
+ {MDIO_PMA_DEVAD, MDIO_PMA_REG_84823_CTL_SLOW_CLK_CNT_HIGH,
+ MDIO_PMA_REG_84823_BLINK_RATE_VAL_15P9HZ},
+ {MDIO_AN_DEVAD, 0xFFFB, 0xFFFD}
+ };
+
+ if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84858) {
+ /* Set LED5 source */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED5_MASK,
+ 0x90);
+ led3_blink_rate = 0x000f;
+ } else {
+ led3_blink_rate = 0x0000;
+ }
+ /* Set LED3 BLINK */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED3_BLINK,
+ led3_blink_rate);
+
+ /* PHYC_CTL_LED_CTL */
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LINK_SIGNAL, &val);
+ val &= 0xFE00;
+ val |= 0x0092;
+
+ if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84858)
+ val |= 2 << 12; /* LED5 ON based on source */
+
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LINK_SIGNAL, val);
+
+ for (i = 0; i < ARRAY_SIZE(reg_set); i++)
+ bnx2x_cl45_write(bp, phy, reg_set[i].devad, reg_set[i].reg,
+ reg_set[i].val);
+
+ if (bnx2x_is_8483x_8485x(phy))
+ offset = MDIO_PMA_REG_84833_CTL_LED_CTL_1;
+ else
+ offset = MDIO_PMA_REG_84823_CTL_LED_CTL_1;
+
+ if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84858)
+ val = MDIO_PMA_REG_84858_ALLOW_GPHY_ACT |
+ MDIO_PMA_REG_84823_LED3_STRETCH_EN;
+ else
+ val = MDIO_PMA_REG_84823_LED3_STRETCH_EN;
+
+ /* stretch_en for LEDs */
+ bnx2x_cl45_read_or_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ offset,
+ val);
+}
+
+static void bnx2x_848xx_specific_func(struct bnx2x_phy *phy,
+ struct link_params *params,
+ u32 action)
+{
+ struct bnx2x *bp = params->bp;
+ switch (action) {
+ case PHY_INIT:
+ if (bnx2x_is_8483x_8485x(phy)) {
+ /* Save spirom version */
+ bnx2x_save_848xx_spirom_version(phy, bp, params->port);
+ }
+ /* This phy uses the NIG latch mechanism since link indication
+ * arrives through its LED4 and not via its LASI signal, so we
+ * get steady signal instead of clear on read
+ */
+ bnx2x_bits_en(bp, NIG_REG_LATCH_BC_0 + params->port*4,
+ 1 << NIG_LATCH_BC_ENABLE_MI_INT);
+
+ bnx2x_848xx_set_led(bp, phy);
+ break;
+ }
+}
+
+static int bnx2x_848xx_cmn_config_init(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
+{
+ struct bnx2x *bp = params->bp;
+ u16 autoneg_val, an_1000_val, an_10_100_val;
+
+ bnx2x_848xx_specific_func(phy, params, PHY_INIT);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0x0000);
+
+ /* set 1000 speed advertisement */
+ bnx2x_cl45_read(bp, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_8481_1000T_CTRL,
+ &an_1000_val);
+
+ bnx2x_ext_phy_set_pause(params, phy, vars);
+ bnx2x_cl45_read(bp, phy,
+ MDIO_AN_DEVAD,
+ MDIO_AN_REG_8481_LEGACY_AN_ADV,
+ &an_10_100_val);
+ bnx2x_cl45_read(bp, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_8481_LEGACY_MII_CTRL,
+ &autoneg_val);
+ /* Disable forced speed */
+ autoneg_val &= ~((1<<6) | (1<<8) | (1<<9) | (1<<12) | (1<<13));
+ an_10_100_val &= ~((1<<5) | (1<<6) | (1<<7) | (1<<8));
+
+ if (((phy->req_line_speed == SPEED_AUTO_NEG) &&
+ (phy->speed_cap_mask &
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) ||
+ (phy->req_line_speed == SPEED_1000)) {
+ an_1000_val |= (1<<8);
+ autoneg_val |= (1<<9 | 1<<12);
+ if (phy->req_duplex == DUPLEX_FULL)
+ an_1000_val |= (1<<9);
+ DP(NETIF_MSG_LINK, "Advertising 1G\n");
+ } else
+ an_1000_val &= ~((1<<8) | (1<<9));
+
+ bnx2x_cl45_write(bp, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_8481_1000T_CTRL,
+ an_1000_val);
+
+ /* Set 10/100 speed advertisement */
+ if (phy->req_line_speed == SPEED_AUTO_NEG) {
+ if (phy->speed_cap_mask &
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL) {
+ /* Enable autoneg and restart autoneg for legacy speeds
+ */
+ autoneg_val |= (1<<9 | 1<<12);
+ an_10_100_val |= (1<<8);
+ DP(NETIF_MSG_LINK, "Advertising 100M-FD\n");
+ }
+
+ if (phy->speed_cap_mask &
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF) {
+ /* Enable autoneg and restart autoneg for legacy speeds
+ */
+ autoneg_val |= (1<<9 | 1<<12);
+ an_10_100_val |= (1<<7);
+ DP(NETIF_MSG_LINK, "Advertising 100M-HD\n");
+ }
+
+ if ((phy->speed_cap_mask &
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL) &&
+ (phy->supported & SUPPORTED_10baseT_Full)) {
+ an_10_100_val |= (1<<6);
+ autoneg_val |= (1<<9 | 1<<12);
+ DP(NETIF_MSG_LINK, "Advertising 10M-FD\n");
+ }
+
+ if ((phy->speed_cap_mask &
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF) &&
+ (phy->supported & SUPPORTED_10baseT_Half)) {
+ an_10_100_val |= (1<<5);
+ autoneg_val |= (1<<9 | 1<<12);
+ DP(NETIF_MSG_LINK, "Advertising 10M-HD\n");
+ }
+ }
+
+ /* Only 10/100 are allowed to work in FORCE mode */
+ if ((phy->req_line_speed == SPEED_100) &&
+ (phy->supported &
+ (SUPPORTED_100baseT_Half |
+ SUPPORTED_100baseT_Full))) {
+ autoneg_val |= (1<<13);
+ /* Enabled AUTO-MDIX when autoneg is disabled */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_8481_AUX_CTRL,
+ (1<<15 | 1<<9 | 7<<0));
+ /* The PHY needs this set even for forced link. */
+ an_10_100_val |= (1<<8) | (1<<7);
+ DP(NETIF_MSG_LINK, "Setting 100M force\n");
+ }
+ if ((phy->req_line_speed == SPEED_10) &&
+ (phy->supported &
+ (SUPPORTED_10baseT_Half |
+ SUPPORTED_10baseT_Full))) {
+ /* Enabled AUTO-MDIX when autoneg is disabled */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_8481_AUX_CTRL,
+ (1<<15 | 1<<9 | 7<<0));
+ DP(NETIF_MSG_LINK, "Setting 10M force\n");
+ }
+
+ bnx2x_cl45_write(bp, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_8481_LEGACY_AN_ADV,
+ an_10_100_val);
+
+ if (phy->req_duplex == DUPLEX_FULL)
+ autoneg_val |= (1<<8);
+
+ /* Always write this if this is not 84833/4.
+ * For 84833/4, write it only when it's a forced speed.
+ */
+ if (!bnx2x_is_8483x_8485x(phy) ||
+ ((autoneg_val & (1<<12)) == 0))
+ bnx2x_cl45_write(bp, phy,
+ MDIO_AN_DEVAD,
+ MDIO_AN_REG_8481_LEGACY_MII_CTRL, autoneg_val);
+
+ if (((phy->req_line_speed == SPEED_AUTO_NEG) &&
+ (phy->speed_cap_mask &
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) ||
+ (phy->req_line_speed == SPEED_10000)) {
+ DP(NETIF_MSG_LINK, "Advertising 10G\n");
+ /* Restart autoneg for 10G*/
+
+ bnx2x_cl45_read_or_write(
+ bp, phy,
+ MDIO_AN_DEVAD,
+ MDIO_AN_REG_8481_10GBASE_T_AN_CTRL,
+ 0x1000);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_CTRL,
+ 0x3200);
+ } else
+ bnx2x_cl45_write(bp, phy,
+ MDIO_AN_DEVAD,
+ MDIO_AN_REG_8481_10GBASE_T_AN_CTRL,
+ 1);
+
+ return 0;
+}
+
+static void bnx2x_8481_config_init(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
+{
+ struct bnx2x *bp = params->bp;
+ /* Restore normal power mode*/
+ bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
+ MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port);
+
+ /* HW reset */
+ bnx2x_ext_phy_hw_reset(bp, params->port);
+ bnx2x_wait_reset_complete(bp, phy, params);
+
+ bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 1<<15);
+ bnx2x_848xx_cmn_config_init(phy, params, vars);
+}
+
+#define PHY848xx_CMDHDLR_WAIT 300
+#define PHY848xx_CMDHDLR_MAX_ARGS 5
+
+static int bnx2x_84858_cmd_hdlr(struct bnx2x_phy *phy,
+ struct link_params *params,
+ u16 fw_cmd,
+ u16 cmd_args[], int argc)
+{
+ int idx;
+ u16 val;
+ struct bnx2x *bp = params->bp;
+
+ /* Step 1: Poll the STATUS register to see whether the previous command
+ * is in progress or the system is busy (CMD_IN_PROGRESS or
+ * SYSTEM_BUSY). If previous command is in progress or system is busy,
+ * check again until the previous command finishes execution and the
+ * system is available for taking command
+ */
+
+ for (idx = 0; idx < PHY848xx_CMDHDLR_WAIT; idx++) {
+ bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD,
+ MDIO_848xx_CMD_HDLR_STATUS, &val);
+ if ((val != PHY84858_STATUS_CMD_IN_PROGRESS) &&
+ (val != PHY84858_STATUS_CMD_SYSTEM_BUSY))
+ break;
+ usleep_range(1000, 2000);
+ }
+ if (idx >= PHY848xx_CMDHDLR_WAIT) {
+ DP(NETIF_MSG_LINK, "FW cmd: FW not ready.\n");
+ return -EINVAL;
+ }
+
+ /* Step2: If any parameters are required for the function, write them
+ * to the required DATA registers
+ */
+
+ for (idx = 0; idx < argc; idx++) {
+ bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
+ MDIO_848xx_CMD_HDLR_DATA1 + idx,
+ cmd_args[idx]);
+ }
+
+ /* Step3: When the firmware is ready for commands, write the 'Command
+ * code' to the CMD register
+ */
+ bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
+ MDIO_848xx_CMD_HDLR_COMMAND, fw_cmd);
+
+ /* Step4: Once the command has been written, poll the STATUS register
+ * to check whether the command has completed (CMD_COMPLETED_PASS/
+ * CMD_FOR_CMDS or CMD_COMPLETED_ERROR).
+ */
+
+ for (idx = 0; idx < PHY848xx_CMDHDLR_WAIT; idx++) {
+ bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD,
+ MDIO_848xx_CMD_HDLR_STATUS, &val);
+ if ((val == PHY84858_STATUS_CMD_COMPLETE_PASS) ||
+ (val == PHY84858_STATUS_CMD_COMPLETE_ERROR))
+ break;
+ usleep_range(1000, 2000);
+ }
+ if ((idx >= PHY848xx_CMDHDLR_WAIT) ||
+ (val == PHY84858_STATUS_CMD_COMPLETE_ERROR)) {
+ DP(NETIF_MSG_LINK, "FW cmd failed.\n");
+ return -EINVAL;
+ }
+ /* Step5: Once the command has completed, read the specficied DATA
+ * registers for any saved results for the command, if applicable
+ */
+
+ /* Gather returning data */
+ for (idx = 0; idx < argc; idx++) {
+ bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD,
+ MDIO_848xx_CMD_HDLR_DATA1 + idx,
+ &cmd_args[idx]);
+ }
+
+ return 0;
+}
+
+static int bnx2x_84833_cmd_hdlr(struct bnx2x_phy *phy,
+ struct link_params *params, u16 fw_cmd,
+ u16 cmd_args[], int argc, int process)
+{
+ int idx;
+ u16 val;
+ struct bnx2x *bp = params->bp;
+ int rc = 0;
+
+ if (process == PHY84833_MB_PROCESS2) {
+ /* Write CMD_OPEN_OVERRIDE to STATUS reg */
+ bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
+ MDIO_848xx_CMD_HDLR_STATUS,
+ PHY84833_STATUS_CMD_OPEN_OVERRIDE);
+ }
+
+ for (idx = 0; idx < PHY848xx_CMDHDLR_WAIT; idx++) {
+ bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD,
+ MDIO_848xx_CMD_HDLR_STATUS, &val);
+ if (val == PHY84833_STATUS_CMD_OPEN_FOR_CMDS)
+ break;
+ usleep_range(1000, 2000);
+ }
+ if (idx >= PHY848xx_CMDHDLR_WAIT) {
+ DP(NETIF_MSG_LINK, "FW cmd: FW not ready.\n");
+ /* if the status is CMD_COMPLETE_PASS or CMD_COMPLETE_ERROR
+ * clear the status to CMD_CLEAR_COMPLETE
+ */
+ if (val == PHY84833_STATUS_CMD_COMPLETE_PASS ||
+ val == PHY84833_STATUS_CMD_COMPLETE_ERROR) {
+ bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
+ MDIO_848xx_CMD_HDLR_STATUS,
+ PHY84833_STATUS_CMD_CLEAR_COMPLETE);
+ }
+ return -EINVAL;
+ }
+ if (process == PHY84833_MB_PROCESS1 ||
+ process == PHY84833_MB_PROCESS2) {
+ /* Prepare argument(s) */
+ for (idx = 0; idx < argc; idx++) {
+ bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
+ MDIO_848xx_CMD_HDLR_DATA1 + idx,
+ cmd_args[idx]);
+ }
+ }
+
+ bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
+ MDIO_848xx_CMD_HDLR_COMMAND, fw_cmd);
+ for (idx = 0; idx < PHY848xx_CMDHDLR_WAIT; idx++) {
+ bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD,
+ MDIO_848xx_CMD_HDLR_STATUS, &val);
+ if ((val == PHY84833_STATUS_CMD_COMPLETE_PASS) ||
+ (val == PHY84833_STATUS_CMD_COMPLETE_ERROR))
+ break;
+ usleep_range(1000, 2000);
+ }
+ if ((idx >= PHY848xx_CMDHDLR_WAIT) ||
+ (val == PHY84833_STATUS_CMD_COMPLETE_ERROR)) {
+ DP(NETIF_MSG_LINK, "FW cmd failed.\n");
+ rc = -EINVAL;
+ }
+ if (process == PHY84833_MB_PROCESS3 && rc == 0) {
+ /* Gather returning data */
+ for (idx = 0; idx < argc; idx++) {
+ bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD,
+ MDIO_848xx_CMD_HDLR_DATA1 + idx,
+ &cmd_args[idx]);
+ }
+ }
+ if (val == PHY84833_STATUS_CMD_COMPLETE_ERROR ||
+ val == PHY84833_STATUS_CMD_COMPLETE_PASS) {
+ bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
+ MDIO_848xx_CMD_HDLR_STATUS,
+ PHY84833_STATUS_CMD_CLEAR_COMPLETE);
+ }
+ return rc;
+}
+
+static int bnx2x_848xx_cmd_hdlr(struct bnx2x_phy *phy,
+ struct link_params *params,
+ u16 fw_cmd,
+ u16 cmd_args[], int argc,
+ int process)
+{
+ struct bnx2x *bp = params->bp;
+
+ if ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84858) ||
+ (REG_RD(bp, params->shmem2_base +
+ offsetof(struct shmem2_region,
+ link_attr_sync[params->port])) &
+ LINK_ATTR_84858)) {
+ return bnx2x_84858_cmd_hdlr(phy, params, fw_cmd, cmd_args,
+ argc);
+ } else {
+ return bnx2x_84833_cmd_hdlr(phy, params, fw_cmd, cmd_args,
+ argc, process);
+ }
+}
+
+static int bnx2x_848xx_pair_swap_cfg(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
+{
+ u32 pair_swap;
+ u16 data[PHY848xx_CMDHDLR_MAX_ARGS];
+ int status;
+ struct bnx2x *bp = params->bp;
+
+ /* Check for configuration. */
+ pair_swap = REG_RD(bp, params->shmem_base +
+ offsetof(struct shmem_region,
+ dev_info.port_hw_config[params->port].xgbt_phy_cfg)) &
+ PORT_HW_CFG_RJ45_PAIR_SWAP_MASK;
+
+ if (pair_swap == 0)
+ return 0;
+
+ /* Only the second argument is used for this command */
+ data[1] = (u16)pair_swap;
+
+ status = bnx2x_848xx_cmd_hdlr(phy, params,
+ PHY848xx_CMD_SET_PAIR_SWAP, data,
+ 2, PHY84833_MB_PROCESS2);
+ if (status == 0)
+ DP(NETIF_MSG_LINK, "Pairswap OK, val=0x%x\n", data[1]);
+
+ return status;
+}
+
+static u8 bnx2x_84833_get_reset_gpios(struct bnx2x *bp,
+ u32 shmem_base_path[],
+ u32 chip_id)
+{
+ u32 reset_pin[2];
+ u32 idx;
+ u8 reset_gpios;
+ if (CHIP_IS_E3(bp)) {
+ /* Assume that these will be GPIOs, not EPIOs. */
+ for (idx = 0; idx < 2; idx++) {
+ /* Map config param to register bit. */
+ reset_pin[idx] = REG_RD(bp, shmem_base_path[idx] +
+ offsetof(struct shmem_region,
+ dev_info.port_hw_config[0].e3_cmn_pin_cfg));
+ reset_pin[idx] = (reset_pin[idx] &
+ PORT_HW_CFG_E3_PHY_RESET_MASK) >>
+ PORT_HW_CFG_E3_PHY_RESET_SHIFT;
+ reset_pin[idx] -= PIN_CFG_GPIO0_P0;
+ reset_pin[idx] = (1 << reset_pin[idx]);
+ }
+ reset_gpios = (u8)(reset_pin[0] | reset_pin[1]);
+ } else {
+ /* E2, look from diff place of shmem. */
+ for (idx = 0; idx < 2; idx++) {
+ reset_pin[idx] = REG_RD(bp, shmem_base_path[idx] +
+ offsetof(struct shmem_region,
+ dev_info.port_hw_config[0].default_cfg));
+ reset_pin[idx] &= PORT_HW_CFG_EXT_PHY_GPIO_RST_MASK;
+ reset_pin[idx] -= PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO0_P0;
+ reset_pin[idx] >>= PORT_HW_CFG_EXT_PHY_GPIO_RST_SHIFT;
+ reset_pin[idx] = (1 << reset_pin[idx]);
+ }
+ reset_gpios = (u8)(reset_pin[0] | reset_pin[1]);
+ }
+
+ return reset_gpios;
+}
+
+static void bnx2x_84833_hw_reset_phy(struct bnx2x_phy *phy,
+ struct link_params *params)
+{
+ struct bnx2x *bp = params->bp;
+ u8 reset_gpios;
+ u32 other_shmem_base_addr = REG_RD(bp, params->shmem2_base +
+ offsetof(struct shmem2_region,
+ other_shmem_base_addr));
+
+ u32 shmem_base_path[2];
+
+ /* Work around for 84833 LED failure inside RESET status */
+ bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
+ MDIO_AN_REG_8481_LEGACY_MII_CTRL,
+ MDIO_AN_REG_8481_MII_CTRL_FORCE_1G);
+ bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
+ MDIO_AN_REG_8481_1G_100T_EXT_CTRL,
+ MIDO_AN_REG_8481_EXT_CTRL_FORCE_LEDS_OFF);
+
+ shmem_base_path[0] = params->shmem_base;
+ shmem_base_path[1] = other_shmem_base_addr;
+
+ reset_gpios = bnx2x_84833_get_reset_gpios(bp, shmem_base_path,
+ params->chip_id);
+
+ bnx2x_set_mult_gpio(bp, reset_gpios, MISC_REGISTERS_GPIO_OUTPUT_LOW);
+ udelay(10);
+ DP(NETIF_MSG_LINK, "84833 hw reset on pin values 0x%x\n",
+ reset_gpios);
+}
+
+static int bnx2x_8483x_disable_eee(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
+{
+ int rc;
+ struct bnx2x *bp = params->bp;
+ u16 cmd_args = 0;
+
+ DP(NETIF_MSG_LINK, "Don't Advertise 10GBase-T EEE\n");
+
+ /* Prevent Phy from working in EEE and advertising it */
+ rc = bnx2x_848xx_cmd_hdlr(phy, params, PHY848xx_CMD_SET_EEE_MODE,
+ &cmd_args, 1, PHY84833_MB_PROCESS1);
+ if (rc) {
+ DP(NETIF_MSG_LINK, "EEE disable failed.\n");
+ return rc;
+ }
+
+ return bnx2x_eee_disable(phy, params, vars);
+}
+
+static int bnx2x_8483x_enable_eee(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
+{
+ int rc;
+ struct bnx2x *bp = params->bp;
+ u16 cmd_args = 1;
+
+ rc = bnx2x_848xx_cmd_hdlr(phy, params, PHY848xx_CMD_SET_EEE_MODE,
+ &cmd_args, 1, PHY84833_MB_PROCESS1);
+ if (rc) {
+ DP(NETIF_MSG_LINK, "EEE enable failed.\n");
+ return rc;
+ }
+
+ return bnx2x_eee_advertise(phy, params, vars, SHMEM_EEE_10G_ADV);
+}
+
+#define PHY84833_CONSTANT_LATENCY 1193
+static void bnx2x_848x3_config_init(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
+{
+ struct bnx2x *bp = params->bp;
+ u8 port, initialize = 1;
+ u16 val;
+ u32 actual_phy_selection;
+ u16 cmd_args[PHY848xx_CMDHDLR_MAX_ARGS];
+ int rc = 0;
+
+ usleep_range(1000, 2000);
+
+ if (!(CHIP_IS_E1x(bp)))
+ port = BP_PATH(bp);
+ else
+ port = params->port;
+
+ if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823) {
+ bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
+ MISC_REGISTERS_GPIO_OUTPUT_HIGH,
+ port);
+ } else {
+ /* MDIO reset */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_CTRL, 0x8000);
+ }
+
+ bnx2x_wait_reset_complete(bp, phy, params);
+
+ /* Wait for GPHY to come out of reset */
+ msleep(50);
+ if (!bnx2x_is_8483x_8485x(phy)) {
+ /* BCM84823 requires that XGXS links up first @ 10G for normal
+ * behavior.
+ */
+ u16 temp;
+ temp = vars->line_speed;
+ vars->line_speed = SPEED_10000;
+ bnx2x_set_autoneg(&params->phy[INT_PHY], params, vars, 0);
+ bnx2x_program_serdes(&params->phy[INT_PHY], params, vars);
+ vars->line_speed = temp;
+ }
+ /* Check if this is actually BCM84858 */
+ if (phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84858) {
+ u16 hw_rev;
+
+ bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD,
+ MDIO_AN_REG_848xx_ID_MSB, &hw_rev);
+ if (hw_rev == BCM84858_PHY_ID) {
+ params->link_attr_sync |= LINK_ATTR_84858;
+ bnx2x_update_link_attr(params, params->link_attr_sync);
+ }
+ }
+
+ /* Set dual-media configuration according to configuration */
+ bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD,
+ MDIO_CTL_REG_84823_MEDIA, &val);
+ val &= ~(MDIO_CTL_REG_84823_MEDIA_MAC_MASK |
+ MDIO_CTL_REG_84823_MEDIA_LINE_MASK |
+ MDIO_CTL_REG_84823_MEDIA_COPPER_CORE_DOWN |
+ MDIO_CTL_REG_84823_MEDIA_PRIORITY_MASK |
+ MDIO_CTL_REG_84823_MEDIA_FIBER_1G);
+
+ if (CHIP_IS_E3(bp)) {
+ val &= ~(MDIO_CTL_REG_84823_MEDIA_MAC_MASK |
+ MDIO_CTL_REG_84823_MEDIA_LINE_MASK);
+ } else {
+ val |= (MDIO_CTL_REG_84823_CTRL_MAC_XFI |
+ MDIO_CTL_REG_84823_MEDIA_LINE_XAUI_L);
+ }
+
+ actual_phy_selection = bnx2x_phy_selection(params);
+
+ switch (actual_phy_selection) {
+ case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
+ /* Do nothing. Essentially this is like the priority copper */
+ break;
+ case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
+ val |= MDIO_CTL_REG_84823_MEDIA_PRIORITY_COPPER;
+ break;
+ case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
+ val |= MDIO_CTL_REG_84823_MEDIA_PRIORITY_FIBER;
+ break;
+ case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
+ /* Do nothing here. The first PHY won't be initialized at all */
+ break;
+ case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
+ val |= MDIO_CTL_REG_84823_MEDIA_COPPER_CORE_DOWN;
+ initialize = 0;
+ break;
+ }
+ if (params->phy[EXT_PHY2].req_line_speed == SPEED_1000)
+ val |= MDIO_CTL_REG_84823_MEDIA_FIBER_1G;
+
+ bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
+ MDIO_CTL_REG_84823_MEDIA, val);
+ DP(NETIF_MSG_LINK, "Multi_phy config = 0x%x, Media control = 0x%x\n",
+ params->multi_phy_config, val);
+
+ if (bnx2x_is_8483x_8485x(phy)) {
+ bnx2x_848xx_pair_swap_cfg(phy, params, vars);
+
+ /* Keep AutogrEEEn disabled. */
+ cmd_args[0] = 0x0;
+ cmd_args[1] = 0x0;
+ cmd_args[2] = PHY84833_CONSTANT_LATENCY + 1;
+ cmd_args[3] = PHY84833_CONSTANT_LATENCY;
+ rc = bnx2x_848xx_cmd_hdlr(phy, params,
+ PHY848xx_CMD_SET_EEE_MODE, cmd_args,
+ 4, PHY84833_MB_PROCESS1);
+ if (rc)
+ DP(NETIF_MSG_LINK, "Cfg AutogrEEEn failed.\n");
+ }
+ if (initialize)
+ rc = bnx2x_848xx_cmn_config_init(phy, params, vars);
+ else
+ bnx2x_save_848xx_spirom_version(phy, bp, params->port);
+ /* 84833 PHY has a better feature and doesn't need to support this. */
+ if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823) {
+ u32 cms_enable = REG_RD(bp, params->shmem_base +
+ offsetof(struct shmem_region,
+ dev_info.port_hw_config[params->port].default_cfg)) &
+ PORT_HW_CFG_ENABLE_CMS_MASK;
+
+ bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD,
+ MDIO_CTL_REG_84823_USER_CTRL_REG, &val);
+ if (cms_enable)
+ val |= MDIO_CTL_REG_84823_USER_CTRL_CMS;
+ else
+ val &= ~MDIO_CTL_REG_84823_USER_CTRL_CMS;
+ bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
+ MDIO_CTL_REG_84823_USER_CTRL_REG, val);
+ }
+
+ bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD,
+ MDIO_84833_TOP_CFG_FW_REV, &val);
+
+ /* Configure EEE support */
+ if ((val >= MDIO_84833_TOP_CFG_FW_EEE) &&
+ (val != MDIO_84833_TOP_CFG_FW_NO_EEE) &&
+ bnx2x_eee_has_cap(params)) {
+ rc = bnx2x_eee_initial_config(params, vars, SHMEM_EEE_10G_ADV);
+ if (rc) {
+ DP(NETIF_MSG_LINK, "Failed to configure EEE timers\n");
+ bnx2x_8483x_disable_eee(phy, params, vars);
+ return;
+ }
+
+ if ((phy->req_duplex == DUPLEX_FULL) &&
+ (params->eee_mode & EEE_MODE_ADV_LPI) &&
+ (bnx2x_eee_calc_timer(params) ||
+ !(params->eee_mode & EEE_MODE_ENABLE_LPI)))
+ rc = bnx2x_8483x_enable_eee(phy, params, vars);
+ else
+ rc = bnx2x_8483x_disable_eee(phy, params, vars);
+ if (rc) {
+ DP(NETIF_MSG_LINK, "Failed to set EEE advertisement\n");
+ return;
+ }
+ } else {
+ vars->eee_status &= ~SHMEM_EEE_SUPPORTED_MASK;
+ }
+
+ if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) {
+ /* Additional settings for jumbo packets in 1000BASE-T mode */
+ /* Allow rx extended length */
+ bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD,
+ MDIO_AN_REG_8481_AUX_CTRL, &val);
+ val |= 0x4000;
+ bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
+ MDIO_AN_REG_8481_AUX_CTRL, val);
+ /* TX FIFO Elasticity LSB */
+ bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD,
+ MDIO_AN_REG_8481_1G_100T_EXT_CTRL, &val);
+ val |= 0x1;
+ bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
+ MDIO_AN_REG_8481_1G_100T_EXT_CTRL, val);
+ /* TX FIFO Elasticity MSB */
+ /* Enable expansion register 0x46 (Pattern Generator status) */
+ bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
+ MDIO_AN_REG_8481_EXPANSION_REG_ACCESS, 0xf46);
+
+ bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD,
+ MDIO_AN_REG_8481_EXPANSION_REG_RD_RW, &val);
+ val |= 0x4000;
+ bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
+ MDIO_AN_REG_8481_EXPANSION_REG_RD_RW, val);
+ }
+
+ if (bnx2x_is_8483x_8485x(phy)) {
+ /* Bring PHY out of super isolate mode as the final step. */
+ bnx2x_cl45_read_and_write(bp, phy,
+ MDIO_CTL_DEVAD,
+ MDIO_84833_TOP_CFG_XGPHY_STRAP1,
+ (u16)~MDIO_84833_SUPER_ISOLATE);
+ }
+}
+
+static u8 bnx2x_848xx_read_status(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
+{
+ struct bnx2x *bp = params->bp;
+ u16 val, val1, val2;
+ u8 link_up = 0;
+
+
+ /* Check 10G-BaseT link status */
+ /* Check PMD signal ok */
+ bnx2x_cl45_read(bp, phy,
+ MDIO_AN_DEVAD, 0xFFFA, &val1);
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_8481_PMD_SIGNAL,
+ &val2);
+ DP(NETIF_MSG_LINK, "BCM848xx: PMD_SIGNAL 1.a811 = 0x%x\n", val2);
+
+ /* Check link 10G */
+ if (val2 & (1<<11)) {
+ vars->line_speed = SPEED_10000;
+ vars->duplex = DUPLEX_FULL;
+ link_up = 1;
+ bnx2x_ext_phy_10G_an_resolve(bp, phy, vars);
+ } else { /* Check Legacy speed link */
+ u16 legacy_status, legacy_speed;
+
+ /* Enable expansion register 0x42 (Operation mode status) */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_AN_DEVAD,
+ MDIO_AN_REG_8481_EXPANSION_REG_ACCESS, 0xf42);
+
+ /* Get legacy speed operation status */
+ bnx2x_cl45_read(bp, phy,
+ MDIO_AN_DEVAD,
+ MDIO_AN_REG_8481_EXPANSION_REG_RD_RW,
+ &legacy_status);
+
+ DP(NETIF_MSG_LINK, "Legacy speed status = 0x%x\n",
+ legacy_status);
+ link_up = ((legacy_status & (1<<11)) == (1<<11));
+ legacy_speed = (legacy_status & (3<<9));
+ if (legacy_speed == (0<<9))
+ vars->line_speed = SPEED_10;
+ else if (legacy_speed == (1<<9))
+ vars->line_speed = SPEED_100;
+ else if (legacy_speed == (2<<9))
+ vars->line_speed = SPEED_1000;
+ else { /* Should not happen: Treat as link down */
+ vars->line_speed = 0;
+ link_up = 0;
+ }
+
+ if (link_up) {
+ if (legacy_status & (1<<8))
+ vars->duplex = DUPLEX_FULL;
+ else
+ vars->duplex = DUPLEX_HALF;
+
+ DP(NETIF_MSG_LINK,
+ "Link is up in %dMbps, is_duplex_full= %d\n",
+ vars->line_speed,
+ (vars->duplex == DUPLEX_FULL));
+ /* Check legacy speed AN resolution */
+ bnx2x_cl45_read(bp, phy,
+ MDIO_AN_DEVAD,
+ MDIO_AN_REG_8481_LEGACY_MII_STATUS,
+ &val);
+ if (val & (1<<5))
+ vars->link_status |=
+ LINK_STATUS_AUTO_NEGOTIATE_COMPLETE;
+ bnx2x_cl45_read(bp, phy,
+ MDIO_AN_DEVAD,
+ MDIO_AN_REG_8481_LEGACY_AN_EXPANSION,
+ &val);
+ if ((val & (1<<0)) == 0)
+ vars->link_status |=
+ LINK_STATUS_PARALLEL_DETECTION_USED;
+ }
+ }
+ if (link_up) {
+ DP(NETIF_MSG_LINK, "BCM848x3: link speed is %d\n",
+ vars->line_speed);
+ bnx2x_ext_phy_resolve_fc(phy, params, vars);
+
+ /* Read LP advertised speeds */
+ bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD,
+ MDIO_AN_REG_CL37_FC_LP, &val);
+ if (val & (1<<5))
+ vars->link_status |=
+ LINK_STATUS_LINK_PARTNER_10THD_CAPABLE;
+ if (val & (1<<6))
+ vars->link_status |=
+ LINK_STATUS_LINK_PARTNER_10TFD_CAPABLE;
+ if (val & (1<<7))
+ vars->link_status |=
+ LINK_STATUS_LINK_PARTNER_100TXHD_CAPABLE;
+ if (val & (1<<8))
+ vars->link_status |=
+ LINK_STATUS_LINK_PARTNER_100TXFD_CAPABLE;
+ if (val & (1<<9))
+ vars->link_status |=
+ LINK_STATUS_LINK_PARTNER_100T4_CAPABLE;
+
+ bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD,
+ MDIO_AN_REG_1000T_STATUS, &val);
+
+ if (val & (1<<10))
+ vars->link_status |=
+ LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE;
+ if (val & (1<<11))
+ vars->link_status |=
+ LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE;
+
+ bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD,
+ MDIO_AN_REG_MASTER_STATUS, &val);
+
+ if (val & (1<<11))
+ vars->link_status |=
+ LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE;
+
+ /* Determine if EEE was negotiated */
+ if (bnx2x_is_8483x_8485x(phy))
+ bnx2x_eee_an_resolve(phy, params, vars);
+ }
+
+ return link_up;
+}
+
+static int bnx2x_8485x_format_ver(u32 raw_ver, u8 *str, u16 *len)
+{
+ u32 num;
+
+ num = ((raw_ver & 0xF80) >> 7) << 16 | ((raw_ver & 0x7F) << 8) |
+ ((raw_ver & 0xF000) >> 12);
+ return bnx2x_3_seq_format_ver(num, str, len);
+}
+
+static int bnx2x_848xx_format_ver(u32 raw_ver, u8 *str, u16 *len)
+{
+ u32 spirom_ver;
+
+ spirom_ver = ((raw_ver & 0xF80) >> 7) << 16 | (raw_ver & 0x7F);
+ return bnx2x_format_ver(spirom_ver, str, len);
+}
+
+static void bnx2x_8481_hw_reset(struct bnx2x_phy *phy,
+ struct link_params *params)
+{
+ bnx2x_set_gpio(params->bp, MISC_REGISTERS_GPIO_1,
+ MISC_REGISTERS_GPIO_OUTPUT_LOW, 0);
+ bnx2x_set_gpio(params->bp, MISC_REGISTERS_GPIO_1,
+ MISC_REGISTERS_GPIO_OUTPUT_LOW, 1);
+}
+
+static void bnx2x_8481_link_reset(struct bnx2x_phy *phy,
+ struct link_params *params)
+{
+ bnx2x_cl45_write(params->bp, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0x0000);
+ bnx2x_cl45_write(params->bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 1);
+}
+
+static void bnx2x_848x3_link_reset(struct bnx2x_phy *phy,
+ struct link_params *params)
+{
+ struct bnx2x *bp = params->bp;
+ u8 port;
+ u16 val16;
+
+ if (!(CHIP_IS_E1x(bp)))
+ port = BP_PATH(bp);
+ else
+ port = params->port;
+
+ if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823) {
+ bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
+ MISC_REGISTERS_GPIO_OUTPUT_LOW,
+ port);
+ } else {
+ bnx2x_cl45_read(bp, phy,
+ MDIO_CTL_DEVAD,
+ MDIO_84833_TOP_CFG_XGPHY_STRAP1, &val16);
+ val16 |= MDIO_84833_SUPER_ISOLATE;
+ bnx2x_cl45_write(bp, phy,
+ MDIO_CTL_DEVAD,
+ MDIO_84833_TOP_CFG_XGPHY_STRAP1, val16);
+ }
+}
+
+static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy,
+ struct link_params *params, u8 mode)
+{
+ struct bnx2x *bp = params->bp;
+ u16 val;
+ u8 port;
+
+ if (!(CHIP_IS_E1x(bp)))
+ port = BP_PATH(bp);
+ else
+ port = params->port;
+
+ switch (mode) {
+ case LED_MODE_OFF:
+
+ DP(NETIF_MSG_LINK, "Port 0x%x: LED MODE OFF\n", port);
+
+ if ((params->hw_led_mode << SHARED_HW_CFG_LED_MODE_SHIFT) ==
+ SHARED_HW_CFG_LED_EXTPHY1) {
+
+ /* Set LED masks */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED1_MASK,
+ 0x0);
+
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED2_MASK,
+ 0x0);
+
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED3_MASK,
+ 0x0);
+
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED5_MASK,
+ 0x0);
+
+ } else {
+ /* LED 1 OFF */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED1_MASK,
+ 0x0);
+
+ if (phy->type ==
+ PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84858) {
+ /* LED 2 OFF */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED2_MASK,
+ 0x0);
+ /* LED 3 OFF */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED3_MASK,
+ 0x0);
+ }
+ }
+ break;
+ case LED_MODE_FRONT_PANEL_OFF:
+
+ DP(NETIF_MSG_LINK, "Port 0x%x: LED MODE FRONT PANEL OFF\n",
+ port);
+
+ if ((params->hw_led_mode << SHARED_HW_CFG_LED_MODE_SHIFT) ==
+ SHARED_HW_CFG_LED_EXTPHY1) {
+
+ /* Set LED masks */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED1_MASK,
+ 0x0);
+
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED2_MASK,
+ 0x0);
+
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED3_MASK,
+ 0x0);
+
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED5_MASK,
+ 0x20);
+
+ } else {
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED1_MASK,
+ 0x0);
+ if (phy->type ==
+ PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834) {
+ /* Disable MI_INT interrupt before setting LED4
+ * source to constant off.
+ */
+ if (REG_RD(bp, NIG_REG_MASK_INTERRUPT_PORT0 +
+ params->port*4) &
+ NIG_MASK_MI_INT) {
+ params->link_flags |=
+ LINK_FLAGS_INT_DISABLED;
+
+ bnx2x_bits_dis(
+ bp,
+ NIG_REG_MASK_INTERRUPT_PORT0 +
+ params->port*4,
+ NIG_MASK_MI_INT);
+ }
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_SIGNAL_MASK,
+ 0x0);
+ }
+ if (phy->type ==
+ PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84858) {
+ /* LED 2 OFF */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED2_MASK,
+ 0x0);
+ /* LED 3 OFF */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED3_MASK,
+ 0x0);
+ }
+ }
+ break;
+ case LED_MODE_ON:
+
+ DP(NETIF_MSG_LINK, "Port 0x%x: LED MODE ON\n", port);
+
+ if ((params->hw_led_mode << SHARED_HW_CFG_LED_MODE_SHIFT) ==
+ SHARED_HW_CFG_LED_EXTPHY1) {
+ /* Set control reg */
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LINK_SIGNAL,
+ &val);
+ val &= 0x8000;
+ val |= 0x2492;
+
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LINK_SIGNAL,
+ val);
+
+ /* Set LED masks */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED1_MASK,
+ 0x0);
+
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED2_MASK,
+ 0x20);
+
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED3_MASK,
+ 0x20);
+
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED5_MASK,
+ 0x0);
+ } else {
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED1_MASK,
+ 0x20);
+ if (phy->type ==
+ PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834) {
+ /* Disable MI_INT interrupt before setting LED4
+ * source to constant on.
+ */
+ if (REG_RD(bp, NIG_REG_MASK_INTERRUPT_PORT0 +
+ params->port*4) &
+ NIG_MASK_MI_INT) {
+ params->link_flags |=
+ LINK_FLAGS_INT_DISABLED;
+
+ bnx2x_bits_dis(
+ bp,
+ NIG_REG_MASK_INTERRUPT_PORT0 +
+ params->port*4,
+ NIG_MASK_MI_INT);
+ }
+ }
+ if (phy->type ==
+ PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84858) {
+ /* Tell LED3 to constant on */
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LINK_SIGNAL,
+ &val);
+ val &= ~(7<<6);
+ val |= (2<<6); /* A83B[8:6]= 2 */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LINK_SIGNAL,
+ val);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED3_MASK,
+ 0x20);
+ } else {
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_SIGNAL_MASK,
+ 0x20);
+ }
+ }
+ break;
+
+ case LED_MODE_OPER:
+
+ DP(NETIF_MSG_LINK, "Port 0x%x: LED MODE OPER\n", port);
+
+ if ((params->hw_led_mode << SHARED_HW_CFG_LED_MODE_SHIFT) ==
+ SHARED_HW_CFG_LED_EXTPHY1) {
+
+ /* Set control reg */
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LINK_SIGNAL,
+ &val);
+
+ if (!((val &
+ MDIO_PMA_REG_8481_LINK_SIGNAL_LED4_ENABLE_MASK)
+ >> MDIO_PMA_REG_8481_LINK_SIGNAL_LED4_ENABLE_SHIFT)) {
+ DP(NETIF_MSG_LINK, "Setting LINK_SIGNAL\n");
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LINK_SIGNAL,
+ 0xa492);
+ }
+
+ /* Set LED masks */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED1_MASK,
+ 0x10);
+
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED2_MASK,
+ 0x80);
+
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED3_MASK,
+ 0x98);
+
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED5_MASK,
+ 0x40);
+
+ } else {
+ /* EXTPHY2 LED mode indicate that the 100M/1G/10G LED
+ * sources are all wired through LED1, rather than only
+ * 10G in other modes.
+ */
+ val = ((params->hw_led_mode <<
+ SHARED_HW_CFG_LED_MODE_SHIFT) ==
+ SHARED_HW_CFG_LED_EXTPHY2) ? 0x98 : 0x80;
+
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED1_MASK,
+ val);
+
+ /* Tell LED3 to blink on source */
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LINK_SIGNAL,
+ &val);
+ val &= ~(7<<6);
+ val |= (1<<6); /* A83B[8:6]= 1 */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LINK_SIGNAL,
+ val);
+ if (phy->type ==
+ PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84858) {
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED2_MASK,
+ 0x18);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED3_MASK,
+ 0x06);
+ }
+ if (phy->type ==
+ PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834) {
+ /* Restore LED4 source to external link,
+ * and re-enable interrupts.
+ */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_SIGNAL_MASK,
+ 0x40);
+ if (params->link_flags &
+ LINK_FLAGS_INT_DISABLED) {
+ bnx2x_link_int_enable(params);
+ params->link_flags &=
+ ~LINK_FLAGS_INT_DISABLED;
+ }
+ }
+ }
+ break;
+ }
+
+ /* This is a workaround for E3+84833 until autoneg
+ * restart is fixed in f/w
+ */
+ if (CHIP_IS_E3(bp)) {
+ bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_GP2_STATUS_GP_2_1, &val);
+ }
+}
+
+/******************************************************************/
+/* 54618SE PHY SECTION */
+/******************************************************************/
+static void bnx2x_54618se_specific_func(struct bnx2x_phy *phy,
+ struct link_params *params,
+ u32 action)
+{
+ struct bnx2x *bp = params->bp;
+ u16 temp;
+ switch (action) {
+ case PHY_INIT:
+ /* Configure LED4: set to INTR (0x6). */
+ /* Accessing shadow register 0xe. */
+ bnx2x_cl22_write(bp, phy,
+ MDIO_REG_GPHY_SHADOW,
+ MDIO_REG_GPHY_SHADOW_LED_SEL2);
+ bnx2x_cl22_read(bp, phy,
+ MDIO_REG_GPHY_SHADOW,
+ &temp);
+ temp &= ~(0xf << 4);
+ temp |= (0x6 << 4);
+ bnx2x_cl22_write(bp, phy,
+ MDIO_REG_GPHY_SHADOW,
+ MDIO_REG_GPHY_SHADOW_WR_ENA | temp);
+ /* Configure INTR based on link status change. */
+ bnx2x_cl22_write(bp, phy,
+ MDIO_REG_INTR_MASK,
+ ~MDIO_REG_INTR_MASK_LINK_STATUS);
+ break;
+ }
+}
+
+static void bnx2x_54618se_config_init(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
+{
+ struct bnx2x *bp = params->bp;
+ u8 port;
+ u16 autoneg_val, an_1000_val, an_10_100_val, fc_val, temp;
+ u32 cfg_pin;
+
+ DP(NETIF_MSG_LINK, "54618SE cfg init\n");
+ usleep_range(1000, 2000);
+
+ /* This works with E3 only, no need to check the chip
+ * before determining the port.
+ */
+ port = params->port;
+
+ cfg_pin = (REG_RD(bp, params->shmem_base +
+ offsetof(struct shmem_region,
+ dev_info.port_hw_config[port].e3_cmn_pin_cfg)) &
+ PORT_HW_CFG_E3_PHY_RESET_MASK) >>
+ PORT_HW_CFG_E3_PHY_RESET_SHIFT;
+
+ /* Drive pin high to bring the GPHY out of reset. */
+ bnx2x_set_cfg_pin(bp, cfg_pin, 1);
+
+ /* wait for GPHY to reset */
+ msleep(50);
+
+ /* reset phy */
+ bnx2x_cl22_write(bp, phy,
+ MDIO_PMA_REG_CTRL, 0x8000);
+ bnx2x_wait_reset_complete(bp, phy, params);
+
+ /* Wait for GPHY to reset */
+ msleep(50);
+
+
+ bnx2x_54618se_specific_func(phy, params, PHY_INIT);
+ /* Flip the signal detect polarity (set 0x1c.0x1e[8]). */
+ bnx2x_cl22_write(bp, phy,
+ MDIO_REG_GPHY_SHADOW,
+ MDIO_REG_GPHY_SHADOW_AUTO_DET_MED);
+ bnx2x_cl22_read(bp, phy,
+ MDIO_REG_GPHY_SHADOW,
+ &temp);
+ temp |= MDIO_REG_GPHY_SHADOW_INVERT_FIB_SD;
+ bnx2x_cl22_write(bp, phy,
+ MDIO_REG_GPHY_SHADOW,
+ MDIO_REG_GPHY_SHADOW_WR_ENA | temp);
+
+ /* Set up fc */
+ /* Please refer to Table 28B-3 of 802.3ab-1999 spec. */
+ bnx2x_calc_ieee_aneg_adv(phy, params, &vars->ieee_fc);
+ fc_val = 0;
+ if ((vars->ieee_fc & MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) ==
+ MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC)
+ fc_val |= MDIO_AN_REG_ADV_PAUSE_ASYMMETRIC;
+
+ if ((vars->ieee_fc & MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) ==
+ MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH)
+ fc_val |= MDIO_AN_REG_ADV_PAUSE_PAUSE;
+
+ /* Read all advertisement */
+ bnx2x_cl22_read(bp, phy,
+ 0x09,
+ &an_1000_val);
+
+ bnx2x_cl22_read(bp, phy,
+ 0x04,
+ &an_10_100_val);
+
+ bnx2x_cl22_read(bp, phy,
+ MDIO_PMA_REG_CTRL,
+ &autoneg_val);
+
+ /* Disable forced speed */
+ autoneg_val &= ~((1<<6) | (1<<8) | (1<<9) | (1<<12) | (1<<13));
+ an_10_100_val &= ~((1<<5) | (1<<6) | (1<<7) | (1<<8) | (1<<10) |
+ (1<<11));
+
+ if (((phy->req_line_speed == SPEED_AUTO_NEG) &&
+ (phy->speed_cap_mask &
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) ||
+ (phy->req_line_speed == SPEED_1000)) {
+ an_1000_val |= (1<<8);
+ autoneg_val |= (1<<9 | 1<<12);
+ if (phy->req_duplex == DUPLEX_FULL)
+ an_1000_val |= (1<<9);
+ DP(NETIF_MSG_LINK, "Advertising 1G\n");
+ } else
+ an_1000_val &= ~((1<<8) | (1<<9));
+
+ bnx2x_cl22_write(bp, phy,
+ 0x09,
+ an_1000_val);
+ bnx2x_cl22_read(bp, phy,
+ 0x09,
+ &an_1000_val);
+
+ /* Advertise 10/100 link speed */
+ if (phy->req_line_speed == SPEED_AUTO_NEG) {
+ if (phy->speed_cap_mask &
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF) {
+ an_10_100_val |= (1<<5);
+ autoneg_val |= (1<<9 | 1<<12);
+ DP(NETIF_MSG_LINK, "Advertising 10M-HD\n");
+ }
+ if (phy->speed_cap_mask &
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL) {
+ an_10_100_val |= (1<<6);
+ autoneg_val |= (1<<9 | 1<<12);
+ DP(NETIF_MSG_LINK, "Advertising 10M-FD\n");
+ }
+ if (phy->speed_cap_mask &
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF) {
+ an_10_100_val |= (1<<7);
+ autoneg_val |= (1<<9 | 1<<12);
+ DP(NETIF_MSG_LINK, "Advertising 100M-HD\n");
+ }
+ if (phy->speed_cap_mask &
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL) {
+ an_10_100_val |= (1<<8);
+ autoneg_val |= (1<<9 | 1<<12);
+ DP(NETIF_MSG_LINK, "Advertising 100M-FD\n");
+ }
+ }
+
+ /* Only 10/100 are allowed to work in FORCE mode */
+ if (phy->req_line_speed == SPEED_100) {
+ autoneg_val |= (1<<13);
+ /* Enabled AUTO-MDIX when autoneg is disabled */
+ bnx2x_cl22_write(bp, phy,
+ 0x18,
+ (1<<15 | 1<<9 | 7<<0));
+ DP(NETIF_MSG_LINK, "Setting 100M force\n");
+ }
+ if (phy->req_line_speed == SPEED_10) {
+ /* Enabled AUTO-MDIX when autoneg is disabled */
+ bnx2x_cl22_write(bp, phy,
+ 0x18,
+ (1<<15 | 1<<9 | 7<<0));
+ DP(NETIF_MSG_LINK, "Setting 10M force\n");
+ }
+
+ if ((phy->flags & FLAGS_EEE) && bnx2x_eee_has_cap(params)) {
+ int rc;
+
+ bnx2x_cl22_write(bp, phy, MDIO_REG_GPHY_EXP_ACCESS,
+ MDIO_REG_GPHY_EXP_ACCESS_TOP |
+ MDIO_REG_GPHY_EXP_TOP_2K_BUF);
+ bnx2x_cl22_read(bp, phy, MDIO_REG_GPHY_EXP_ACCESS_GATE, &temp);
+ temp &= 0xfffe;
+ bnx2x_cl22_write(bp, phy, MDIO_REG_GPHY_EXP_ACCESS_GATE, temp);
+
+ rc = bnx2x_eee_initial_config(params, vars, SHMEM_EEE_1G_ADV);
+ if (rc) {
+ DP(NETIF_MSG_LINK, "Failed to configure EEE timers\n");
+ bnx2x_eee_disable(phy, params, vars);
+ } else if ((params->eee_mode & EEE_MODE_ADV_LPI) &&
+ (phy->req_duplex == DUPLEX_FULL) &&
+ (bnx2x_eee_calc_timer(params) ||
+ !(params->eee_mode & EEE_MODE_ENABLE_LPI))) {
+ /* Need to advertise EEE only when requested,
+ * and either no LPI assertion was requested,
+ * or it was requested and a valid timer was set.
+ * Also notice full duplex is required for EEE.
+ */
+ bnx2x_eee_advertise(phy, params, vars,
+ SHMEM_EEE_1G_ADV);
+ } else {
+ DP(NETIF_MSG_LINK, "Don't Advertise 1GBase-T EEE\n");
+ bnx2x_eee_disable(phy, params, vars);
+ }
+ } else {
+ vars->eee_status &= ~SHMEM_EEE_1G_ADV <<
+ SHMEM_EEE_SUPPORTED_SHIFT;
+
+ if (phy->flags & FLAGS_EEE) {
+ /* Handle legacy auto-grEEEn */
+ if (params->feature_config_flags &
+ FEATURE_CONFIG_AUTOGREEEN_ENABLED) {
+ temp = 6;
+ DP(NETIF_MSG_LINK, "Enabling Auto-GrEEEn\n");
+ } else {
+ temp = 0;
+ DP(NETIF_MSG_LINK, "Don't Adv. EEE\n");
+ }
+ bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
+ MDIO_AN_REG_EEE_ADV, temp);
+ }
+ }
+
+ bnx2x_cl22_write(bp, phy,
+ 0x04,
+ an_10_100_val | fc_val);
+
+ if (phy->req_duplex == DUPLEX_FULL)
+ autoneg_val |= (1<<8);
+
+ bnx2x_cl22_write(bp, phy,
+ MDIO_PMA_REG_CTRL, autoneg_val);
+}
+
+
+static void bnx2x_5461x_set_link_led(struct bnx2x_phy *phy,
+ struct link_params *params, u8 mode)
+{
+ struct bnx2x *bp = params->bp;
+ u16 temp;
+
+ bnx2x_cl22_write(bp, phy,
+ MDIO_REG_GPHY_SHADOW,
+ MDIO_REG_GPHY_SHADOW_LED_SEL1);
+ bnx2x_cl22_read(bp, phy,
+ MDIO_REG_GPHY_SHADOW,
+ &temp);
+ temp &= 0xff00;
+
+ DP(NETIF_MSG_LINK, "54618x set link led (mode=%x)\n", mode);
+ switch (mode) {
+ case LED_MODE_FRONT_PANEL_OFF:
+ case LED_MODE_OFF:
+ temp |= 0x00ee;
+ break;
+ case LED_MODE_OPER:
+ temp |= 0x0001;
+ break;
+ case LED_MODE_ON:
+ temp |= 0x00ff;
+ break;
+ default:
+ break;
+ }
+ bnx2x_cl22_write(bp, phy,
+ MDIO_REG_GPHY_SHADOW,
+ MDIO_REG_GPHY_SHADOW_WR_ENA | temp);
+ return;
+}
+
+
+static void bnx2x_54618se_link_reset(struct bnx2x_phy *phy,
+ struct link_params *params)
+{
+ struct bnx2x *bp = params->bp;
+ u32 cfg_pin;
+ u8 port;
+
+ /* In case of no EPIO routed to reset the GPHY, put it
+ * in low power mode.
+ */
+ bnx2x_cl22_write(bp, phy, MDIO_PMA_REG_CTRL, 0x800);
+ /* This works with E3 only, no need to check the chip
+ * before determining the port.
+ */
+ port = params->port;
+ cfg_pin = (REG_RD(bp, params->shmem_base +
+ offsetof(struct shmem_region,
+ dev_info.port_hw_config[port].e3_cmn_pin_cfg)) &
+ PORT_HW_CFG_E3_PHY_RESET_MASK) >>
+ PORT_HW_CFG_E3_PHY_RESET_SHIFT;
+
+ /* Drive pin low to put GPHY in reset. */
+ bnx2x_set_cfg_pin(bp, cfg_pin, 0);
+}
+
+static u8 bnx2x_54618se_read_status(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
+{
+ struct bnx2x *bp = params->bp;
+ u16 val;
+ u8 link_up = 0;
+ u16 legacy_status, legacy_speed;
+
+ /* Get speed operation status */
+ bnx2x_cl22_read(bp, phy,
+ MDIO_REG_GPHY_AUX_STATUS,
+ &legacy_status);
+ DP(NETIF_MSG_LINK, "54618SE read_status: 0x%x\n", legacy_status);
+
+ /* Read status to clear the PHY interrupt. */
+ bnx2x_cl22_read(bp, phy,
+ MDIO_REG_INTR_STATUS,
+ &val);
+
+ link_up = ((legacy_status & (1<<2)) == (1<<2));
+
+ if (link_up) {
+ legacy_speed = (legacy_status & (7<<8));
+ if (legacy_speed == (7<<8)) {
+ vars->line_speed = SPEED_1000;
+ vars->duplex = DUPLEX_FULL;
+ } else if (legacy_speed == (6<<8)) {
+ vars->line_speed = SPEED_1000;
+ vars->duplex = DUPLEX_HALF;
+ } else if (legacy_speed == (5<<8)) {
+ vars->line_speed = SPEED_100;
+ vars->duplex = DUPLEX_FULL;
+ }
+ /* Omitting 100Base-T4 for now */
+ else if (legacy_speed == (3<<8)) {
+ vars->line_speed = SPEED_100;
+ vars->duplex = DUPLEX_HALF;
+ } else if (legacy_speed == (2<<8)) {
+ vars->line_speed = SPEED_10;
+ vars->duplex = DUPLEX_FULL;
+ } else if (legacy_speed == (1<<8)) {
+ vars->line_speed = SPEED_10;
+ vars->duplex = DUPLEX_HALF;
+ } else /* Should not happen */
+ vars->line_speed = 0;
+
+ DP(NETIF_MSG_LINK,
+ "Link is up in %dMbps, is_duplex_full= %d\n",
+ vars->line_speed,
+ (vars->duplex == DUPLEX_FULL));
+
+ /* Check legacy speed AN resolution */
+ bnx2x_cl22_read(bp, phy,
+ 0x01,
+ &val);
+ if (val & (1<<5))
+ vars->link_status |=
+ LINK_STATUS_AUTO_NEGOTIATE_COMPLETE;
+ bnx2x_cl22_read(bp, phy,
+ 0x06,
+ &val);
+ if ((val & (1<<0)) == 0)
+ vars->link_status |=
+ LINK_STATUS_PARALLEL_DETECTION_USED;
+
+ DP(NETIF_MSG_LINK, "BCM54618SE: link speed is %d\n",
+ vars->line_speed);
+
+ bnx2x_ext_phy_resolve_fc(phy, params, vars);
+
+ if (vars->link_status & LINK_STATUS_AUTO_NEGOTIATE_COMPLETE) {
+ /* Report LP advertised speeds */
+ bnx2x_cl22_read(bp, phy, 0x5, &val);
+
+ if (val & (1<<5))
+ vars->link_status |=
+ LINK_STATUS_LINK_PARTNER_10THD_CAPABLE;
+ if (val & (1<<6))
+ vars->link_status |=
+ LINK_STATUS_LINK_PARTNER_10TFD_CAPABLE;
+ if (val & (1<<7))
+ vars->link_status |=
+ LINK_STATUS_LINK_PARTNER_100TXHD_CAPABLE;
+ if (val & (1<<8))
+ vars->link_status |=
+ LINK_STATUS_LINK_PARTNER_100TXFD_CAPABLE;
+ if (val & (1<<9))
+ vars->link_status |=
+ LINK_STATUS_LINK_PARTNER_100T4_CAPABLE;
+
+ bnx2x_cl22_read(bp, phy, 0xa, &val);
+ if (val & (1<<10))
+ vars->link_status |=
+ LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE;
+ if (val & (1<<11))
+ vars->link_status |=
+ LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE;
+
+ if ((phy->flags & FLAGS_EEE) &&
+ bnx2x_eee_has_cap(params))
+ bnx2x_eee_an_resolve(phy, params, vars);
+ }
+ }
+ return link_up;
+}
+
+static void bnx2x_54618se_config_loopback(struct bnx2x_phy *phy,
+ struct link_params *params)
+{
+ struct bnx2x *bp = params->bp;
+ u16 val;
+ u32 umac_base = params->port ? GRCBASE_UMAC1 : GRCBASE_UMAC0;
+
+ DP(NETIF_MSG_LINK, "2PMA/PMD ext_phy_loopback: 54618se\n");
+
+ /* Enable master/slave manual mmode and set to master */
+ /* mii write 9 [bits set 11 12] */
+ bnx2x_cl22_write(bp, phy, 0x09, 3<<11);
+
+ /* forced 1G and disable autoneg */
+ /* set val [mii read 0] */
+ /* set val [expr $val & [bits clear 6 12 13]] */
+ /* set val [expr $val | [bits set 6 8]] */
+ /* mii write 0 $val */
+ bnx2x_cl22_read(bp, phy, 0x00, &val);
+ val &= ~((1<<6) | (1<<12) | (1<<13));
+ val |= (1<<6) | (1<<8);
+ bnx2x_cl22_write(bp, phy, 0x00, val);
+
+ /* Set external loopback and Tx using 6dB coding */
+ /* mii write 0x18 7 */
+ /* set val [mii read 0x18] */
+ /* mii write 0x18 [expr $val | [bits set 10 15]] */
+ bnx2x_cl22_write(bp, phy, 0x18, 7);
+ bnx2x_cl22_read(bp, phy, 0x18, &val);
+ bnx2x_cl22_write(bp, phy, 0x18, val | (1<<10) | (1<<15));
+
+ /* This register opens the gate for the UMAC despite its name */
+ REG_WR(bp, NIG_REG_EGRESS_EMAC0_PORT + params->port*4, 1);
+
+ /* Maximum Frame Length (RW). Defines a 14-Bit maximum frame
+ * length used by the MAC receive logic to check frames.
+ */
+ REG_WR(bp, umac_base + UMAC_REG_MAXFR, 0x2710);
+}
+
+/******************************************************************/
+/* SFX7101 PHY SECTION */
+/******************************************************************/
+static void bnx2x_7101_config_loopback(struct bnx2x_phy *phy,
+ struct link_params *params)
+{
+ struct bnx2x *bp = params->bp;
+ /* SFX7101_XGXS_TEST1 */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_XS_DEVAD, MDIO_XS_SFX7101_XGXS_TEST1, 0x100);
+}
+
+static void bnx2x_7101_config_init(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
+{
+ u16 fw_ver1, fw_ver2, val;
+ struct bnx2x *bp = params->bp;
+ DP(NETIF_MSG_LINK, "Setting the SFX7101 LASI indication\n");
+
+ /* Restore normal power mode*/
+ bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
+ MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port);
+ /* HW reset */
+ bnx2x_ext_phy_hw_reset(bp, params->port);
+ bnx2x_wait_reset_complete(bp, phy, params);
+
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, 0x1);
+ DP(NETIF_MSG_LINK, "Setting the SFX7101 LED to blink on traffic\n");
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_7107_LED_CNTL, (1<<3));
+
+ bnx2x_ext_phy_set_pause(params, phy, vars);
+ /* Restart autoneg */
+ bnx2x_cl45_read(bp, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, &val);
+ val |= 0x200;
+ bnx2x_cl45_write(bp, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, val);
+
+ /* Save spirom version */
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_7101_VER1, &fw_ver1);
+
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_7101_VER2, &fw_ver2);
+ bnx2x_save_spirom_version(bp, params->port,
+ (u32)(fw_ver1<<16 | fw_ver2), phy->ver_addr);
+}
+
+static u8 bnx2x_7101_read_status(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
+{
+ struct bnx2x *bp = params->bp;
+ u8 link_up;
+ u16 val1, val2;
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_LASI_STAT, &val2);
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_LASI_STAT, &val1);
+ DP(NETIF_MSG_LINK, "10G-base-T LASI status 0x%x->0x%x\n",
+ val2, val1);
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_STATUS, &val2);
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_STATUS, &val1);
+ DP(NETIF_MSG_LINK, "10G-base-T PMA status 0x%x->0x%x\n",
+ val2, val1);
+ link_up = ((val1 & 4) == 4);
+ /* If link is up print the AN outcome of the SFX7101 PHY */
+ if (link_up) {
+ bnx2x_cl45_read(bp, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_MASTER_STATUS,
+ &val2);
+ vars->line_speed = SPEED_10000;
+ vars->duplex = DUPLEX_FULL;
+ DP(NETIF_MSG_LINK, "SFX7101 AN status 0x%x->Master=%x\n",
+ val2, (val2 & (1<<14)));
+ bnx2x_ext_phy_10G_an_resolve(bp, phy, vars);
+ bnx2x_ext_phy_resolve_fc(phy, params, vars);
+
+ /* Read LP advertised speeds */
+ if (val2 & (1<<11))
+ vars->link_status |=
+ LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE;
+ }
+ return link_up;
+}
+
+static int bnx2x_7101_format_ver(u32 spirom_ver, u8 *str, u16 *len)
+{
+ if (*len < 5)
+ return -EINVAL;
+ str[0] = (spirom_ver & 0xFF);
+ str[1] = (spirom_ver & 0xFF00) >> 8;
+ str[2] = (spirom_ver & 0xFF0000) >> 16;
+ str[3] = (spirom_ver & 0xFF000000) >> 24;
+ str[4] = '\0';
+ *len -= 5;
+ return 0;
+}
+
+void bnx2x_sfx7101_sp_sw_reset(struct bnx2x *bp, struct bnx2x_phy *phy)
+{
+ u16 val, cnt;
+
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_7101_RESET, &val);
+
+ for (cnt = 0; cnt < 10; cnt++) {
+ msleep(50);
+ /* Writes a self-clearing reset */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_7101_RESET,
+ (val | (1<<15)));
+ /* Wait for clear */
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_7101_RESET, &val);
+
+ if ((val & (1<<15)) == 0)
+ break;
+ }
+}
+
+static void bnx2x_7101_hw_reset(struct bnx2x_phy *phy,
+ struct link_params *params) {
+ /* Low power mode is controlled by GPIO 2 */
+ bnx2x_set_gpio(params->bp, MISC_REGISTERS_GPIO_2,
+ MISC_REGISTERS_GPIO_OUTPUT_LOW, params->port);
+ /* The PHY reset is controlled by GPIO 1 */
+ bnx2x_set_gpio(params->bp, MISC_REGISTERS_GPIO_1,
+ MISC_REGISTERS_GPIO_OUTPUT_LOW, params->port);
+}
+
+static void bnx2x_7101_set_link_led(struct bnx2x_phy *phy,
+ struct link_params *params, u8 mode)
+{
+ u16 val = 0;
+ struct bnx2x *bp = params->bp;
+ switch (mode) {
+ case LED_MODE_FRONT_PANEL_OFF:
+ case LED_MODE_OFF:
+ val = 2;
+ break;
+ case LED_MODE_ON:
+ val = 1;
+ break;
+ case LED_MODE_OPER:
+ val = 0;
+ break;
+ }
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_7107_LINK_LED_CNTL,
+ val);
+}
+
+/******************************************************************/
+/* STATIC PHY DECLARATION */
+/******************************************************************/
+
+static const struct bnx2x_phy phy_null = {
+ .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN,
+ .addr = 0,
+ .def_md_devad = 0,
+ .flags = FLAGS_INIT_XGXS_FIRST,
+ .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .mdio_ctrl = 0,
+ .supported = 0,
+ .media_type = ETH_PHY_NOT_PRESENT,
+ .ver_addr = 0,
+ .req_flow_ctrl = 0,
+ .req_line_speed = 0,
+ .speed_cap_mask = 0,
+ .req_duplex = 0,
+ .rsrv = 0,
+ .config_init = NULL,
+ .read_status = NULL,
+ .link_reset = NULL,
+ .config_loopback = NULL,
+ .format_fw_ver = NULL,
+ .hw_reset = NULL,
+ .set_link_led = NULL,
+ .phy_specific_func = NULL
+};
+
+static const struct bnx2x_phy phy_serdes = {
+ .type = PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT,
+ .addr = 0xff,
+ .def_md_devad = 0,
+ .flags = 0,
+ .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .mdio_ctrl = 0,
+ .supported = (SUPPORTED_10baseT_Half |
+ SUPPORTED_10baseT_Full |
+ SUPPORTED_100baseT_Half |
+ SUPPORTED_100baseT_Full |
+ SUPPORTED_1000baseT_Full |
+ SUPPORTED_2500baseX_Full |
+ SUPPORTED_TP |
+ SUPPORTED_Autoneg |
+ SUPPORTED_Pause |
+ SUPPORTED_Asym_Pause),
+ .media_type = ETH_PHY_BASE_T,
+ .ver_addr = 0,
+ .req_flow_ctrl = 0,
+ .req_line_speed = 0,
+ .speed_cap_mask = 0,
+ .req_duplex = 0,
+ .rsrv = 0,
+ .config_init = bnx2x_xgxs_config_init,
+ .read_status = bnx2x_link_settings_status,
+ .link_reset = bnx2x_int_link_reset,
+ .config_loopback = NULL,
+ .format_fw_ver = NULL,
+ .hw_reset = NULL,
+ .set_link_led = NULL,
+ .phy_specific_func = NULL
+};
+
+static const struct bnx2x_phy phy_xgxs = {
+ .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT,
+ .addr = 0xff,
+ .def_md_devad = 0,
+ .flags = 0,
+ .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .mdio_ctrl = 0,
+ .supported = (SUPPORTED_10baseT_Half |
+ SUPPORTED_10baseT_Full |
+ SUPPORTED_100baseT_Half |
+ SUPPORTED_100baseT_Full |
+ SUPPORTED_1000baseT_Full |
+ SUPPORTED_2500baseX_Full |
+ SUPPORTED_10000baseT_Full |
+ SUPPORTED_FIBRE |
+ SUPPORTED_Autoneg |
+ SUPPORTED_Pause |
+ SUPPORTED_Asym_Pause),
+ .media_type = ETH_PHY_CX4,
+ .ver_addr = 0,
+ .req_flow_ctrl = 0,
+ .req_line_speed = 0,
+ .speed_cap_mask = 0,
+ .req_duplex = 0,
+ .rsrv = 0,
+ .config_init = bnx2x_xgxs_config_init,
+ .read_status = bnx2x_link_settings_status,
+ .link_reset = bnx2x_int_link_reset,
+ .config_loopback = bnx2x_set_xgxs_loopback,
+ .format_fw_ver = NULL,
+ .hw_reset = NULL,
+ .set_link_led = NULL,
+ .phy_specific_func = bnx2x_xgxs_specific_func
+};
+static const struct bnx2x_phy phy_warpcore = {
+ .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT,
+ .addr = 0xff,
+ .def_md_devad = 0,
+ .flags = FLAGS_TX_ERROR_CHECK,
+ .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .mdio_ctrl = 0,
+ .supported = (SUPPORTED_10baseT_Half |
+ SUPPORTED_10baseT_Full |
+ SUPPORTED_100baseT_Half |
+ SUPPORTED_100baseT_Full |
+ SUPPORTED_1000baseT_Full |
+ SUPPORTED_1000baseKX_Full |
+ SUPPORTED_10000baseT_Full |
+ SUPPORTED_10000baseKR_Full |
+ SUPPORTED_20000baseKR2_Full |
+ SUPPORTED_20000baseMLD2_Full |
+ SUPPORTED_FIBRE |
+ SUPPORTED_Autoneg |
+ SUPPORTED_Pause |
+ SUPPORTED_Asym_Pause),
+ .media_type = ETH_PHY_UNSPECIFIED,
+ .ver_addr = 0,
+ .req_flow_ctrl = 0,
+ .req_line_speed = 0,
+ .speed_cap_mask = 0,
+ /* req_duplex = */0,
+ /* rsrv = */0,
+ .config_init = bnx2x_warpcore_config_init,
+ .read_status = bnx2x_warpcore_read_status,
+ .link_reset = bnx2x_warpcore_link_reset,
+ .config_loopback = bnx2x_set_warpcore_loopback,
+ .format_fw_ver = NULL,
+ .hw_reset = bnx2x_warpcore_hw_reset,
+ .set_link_led = NULL,
+ .phy_specific_func = NULL
+};
+
+
+static const struct bnx2x_phy phy_7101 = {
+ .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101,
+ .addr = 0xff,
+ .def_md_devad = 0,
+ .flags = FLAGS_FAN_FAILURE_DET_REQ,
+ .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .mdio_ctrl = 0,
+ .supported = (SUPPORTED_10000baseT_Full |
+ SUPPORTED_TP |
+ SUPPORTED_Autoneg |
+ SUPPORTED_Pause |
+ SUPPORTED_Asym_Pause),
+ .media_type = ETH_PHY_BASE_T,
+ .ver_addr = 0,
+ .req_flow_ctrl = 0,
+ .req_line_speed = 0,
+ .speed_cap_mask = 0,
+ .req_duplex = 0,
+ .rsrv = 0,
+ .config_init = bnx2x_7101_config_init,
+ .read_status = bnx2x_7101_read_status,
+ .link_reset = bnx2x_common_ext_link_reset,
+ .config_loopback = bnx2x_7101_config_loopback,
+ .format_fw_ver = bnx2x_7101_format_ver,
+ .hw_reset = bnx2x_7101_hw_reset,
+ .set_link_led = bnx2x_7101_set_link_led,
+ .phy_specific_func = NULL
+};
+static const struct bnx2x_phy phy_8073 = {
+ .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
+ .addr = 0xff,
+ .def_md_devad = 0,
+ .flags = 0,
+ .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .mdio_ctrl = 0,
+ .supported = (SUPPORTED_10000baseT_Full |
+ SUPPORTED_2500baseX_Full |
+ SUPPORTED_1000baseT_Full |
+ SUPPORTED_FIBRE |
+ SUPPORTED_Autoneg |
+ SUPPORTED_Pause |
+ SUPPORTED_Asym_Pause),
+ .media_type = ETH_PHY_KR,
+ .ver_addr = 0,
+ .req_flow_ctrl = 0,
+ .req_line_speed = 0,
+ .speed_cap_mask = 0,
+ .req_duplex = 0,
+ .rsrv = 0,
+ .config_init = bnx2x_8073_config_init,
+ .read_status = bnx2x_8073_read_status,
+ .link_reset = bnx2x_8073_link_reset,
+ .config_loopback = NULL,
+ .format_fw_ver = bnx2x_format_ver,
+ .hw_reset = NULL,
+ .set_link_led = NULL,
+ .phy_specific_func = bnx2x_8073_specific_func
+};
+static const struct bnx2x_phy phy_8705 = {
+ .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705,
+ .addr = 0xff,
+ .def_md_devad = 0,
+ .flags = FLAGS_INIT_XGXS_FIRST,
+ .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .mdio_ctrl = 0,
+ .supported = (SUPPORTED_10000baseT_Full |
+ SUPPORTED_FIBRE |
+ SUPPORTED_Pause |
+ SUPPORTED_Asym_Pause),
+ .media_type = ETH_PHY_XFP_FIBER,
+ .ver_addr = 0,
+ .req_flow_ctrl = 0,
+ .req_line_speed = 0,
+ .speed_cap_mask = 0,
+ .req_duplex = 0,
+ .rsrv = 0,
+ .config_init = bnx2x_8705_config_init,
+ .read_status = bnx2x_8705_read_status,
+ .link_reset = bnx2x_common_ext_link_reset,
+ .config_loopback = NULL,
+ .format_fw_ver = bnx2x_null_format_ver,
+ .hw_reset = NULL,
+ .set_link_led = NULL,
+ .phy_specific_func = NULL
+};
+static const struct bnx2x_phy phy_8706 = {
+ .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706,
+ .addr = 0xff,
+ .def_md_devad = 0,
+ .flags = FLAGS_INIT_XGXS_FIRST,
+ .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .mdio_ctrl = 0,
+ .supported = (SUPPORTED_10000baseT_Full |
+ SUPPORTED_1000baseT_Full |
+ SUPPORTED_FIBRE |
+ SUPPORTED_Pause |
+ SUPPORTED_Asym_Pause),
+ .media_type = ETH_PHY_SFPP_10G_FIBER,
+ .ver_addr = 0,
+ .req_flow_ctrl = 0,
+ .req_line_speed = 0,
+ .speed_cap_mask = 0,
+ .req_duplex = 0,
+ .rsrv = 0,
+ .config_init = bnx2x_8706_config_init,
+ .read_status = bnx2x_8706_read_status,
+ .link_reset = bnx2x_common_ext_link_reset,
+ .config_loopback = NULL,
+ .format_fw_ver = bnx2x_format_ver,
+ .hw_reset = NULL,
+ .set_link_led = NULL,
+ .phy_specific_func = NULL
+};
+
+static const struct bnx2x_phy phy_8726 = {
+ .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726,
+ .addr = 0xff,
+ .def_md_devad = 0,
+ .flags = (FLAGS_INIT_XGXS_FIRST |
+ FLAGS_TX_ERROR_CHECK),
+ .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .mdio_ctrl = 0,
+ .supported = (SUPPORTED_10000baseT_Full |
+ SUPPORTED_1000baseT_Full |
+ SUPPORTED_Autoneg |
+ SUPPORTED_FIBRE |
+ SUPPORTED_Pause |
+ SUPPORTED_Asym_Pause),
+ .media_type = ETH_PHY_NOT_PRESENT,
+ .ver_addr = 0,
+ .req_flow_ctrl = 0,
+ .req_line_speed = 0,
+ .speed_cap_mask = 0,
+ .req_duplex = 0,
+ .rsrv = 0,
+ .config_init = bnx2x_8726_config_init,
+ .read_status = bnx2x_8726_read_status,
+ .link_reset = bnx2x_8726_link_reset,
+ .config_loopback = bnx2x_8726_config_loopback,
+ .format_fw_ver = bnx2x_format_ver,
+ .hw_reset = NULL,
+ .set_link_led = NULL,
+ .phy_specific_func = NULL
+};
+
+static const struct bnx2x_phy phy_8727 = {
+ .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727,
+ .addr = 0xff,
+ .def_md_devad = 0,
+ .flags = (FLAGS_FAN_FAILURE_DET_REQ |
+ FLAGS_TX_ERROR_CHECK),
+ .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .mdio_ctrl = 0,
+ .supported = (SUPPORTED_10000baseT_Full |
+ SUPPORTED_1000baseT_Full |
+ SUPPORTED_FIBRE |
+ SUPPORTED_Pause |
+ SUPPORTED_Asym_Pause),
+ .media_type = ETH_PHY_NOT_PRESENT,
+ .ver_addr = 0,
+ .req_flow_ctrl = 0,
+ .req_line_speed = 0,
+ .speed_cap_mask = 0,
+ .req_duplex = 0,
+ .rsrv = 0,
+ .config_init = bnx2x_8727_config_init,
+ .read_status = bnx2x_8727_read_status,
+ .link_reset = bnx2x_8727_link_reset,
+ .config_loopback = NULL,
+ .format_fw_ver = bnx2x_format_ver,
+ .hw_reset = bnx2x_8727_hw_reset,
+ .set_link_led = bnx2x_8727_set_link_led,
+ .phy_specific_func = bnx2x_8727_specific_func
+};
+static const struct bnx2x_phy phy_8481 = {
+ .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481,
+ .addr = 0xff,
+ .def_md_devad = 0,
+ .flags = FLAGS_FAN_FAILURE_DET_REQ |
+ FLAGS_REARM_LATCH_SIGNAL,
+ .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .mdio_ctrl = 0,
+ .supported = (SUPPORTED_10baseT_Half |
+ SUPPORTED_10baseT_Full |
+ SUPPORTED_100baseT_Half |
+ SUPPORTED_100baseT_Full |
+ SUPPORTED_1000baseT_Full |
+ SUPPORTED_10000baseT_Full |
+ SUPPORTED_TP |
+ SUPPORTED_Autoneg |
+ SUPPORTED_Pause |
+ SUPPORTED_Asym_Pause),
+ .media_type = ETH_PHY_BASE_T,
+ .ver_addr = 0,
+ .req_flow_ctrl = 0,
+ .req_line_speed = 0,
+ .speed_cap_mask = 0,
+ .req_duplex = 0,
+ .rsrv = 0,
+ .config_init = bnx2x_8481_config_init,
+ .read_status = bnx2x_848xx_read_status,
+ .link_reset = bnx2x_8481_link_reset,
+ .config_loopback = NULL,
+ .format_fw_ver = bnx2x_848xx_format_ver,
+ .hw_reset = bnx2x_8481_hw_reset,
+ .set_link_led = bnx2x_848xx_set_link_led,
+ .phy_specific_func = NULL
+};
+
+static const struct bnx2x_phy phy_84823 = {
+ .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823,
+ .addr = 0xff,
+ .def_md_devad = 0,
+ .flags = (FLAGS_FAN_FAILURE_DET_REQ |
+ FLAGS_REARM_LATCH_SIGNAL |
+ FLAGS_TX_ERROR_CHECK),
+ .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .mdio_ctrl = 0,
+ .supported = (SUPPORTED_10baseT_Half |
+ SUPPORTED_10baseT_Full |
+ SUPPORTED_100baseT_Half |
+ SUPPORTED_100baseT_Full |
+ SUPPORTED_1000baseT_Full |
+ SUPPORTED_10000baseT_Full |
+ SUPPORTED_TP |
+ SUPPORTED_Autoneg |
+ SUPPORTED_Pause |
+ SUPPORTED_Asym_Pause),
+ .media_type = ETH_PHY_BASE_T,
+ .ver_addr = 0,
+ .req_flow_ctrl = 0,
+ .req_line_speed = 0,
+ .speed_cap_mask = 0,
+ .req_duplex = 0,
+ .rsrv = 0,
+ .config_init = bnx2x_848x3_config_init,
+ .read_status = bnx2x_848xx_read_status,
+ .link_reset = bnx2x_848x3_link_reset,
+ .config_loopback = NULL,
+ .format_fw_ver = bnx2x_848xx_format_ver,
+ .hw_reset = NULL,
+ .set_link_led = bnx2x_848xx_set_link_led,
+ .phy_specific_func = bnx2x_848xx_specific_func
+};
+
+static const struct bnx2x_phy phy_84833 = {
+ .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833,
+ .addr = 0xff,
+ .def_md_devad = 0,
+ .flags = (FLAGS_FAN_FAILURE_DET_REQ |
+ FLAGS_REARM_LATCH_SIGNAL |
+ FLAGS_TX_ERROR_CHECK),
+ .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .mdio_ctrl = 0,
+ .supported = (SUPPORTED_100baseT_Half |
+ SUPPORTED_100baseT_Full |
+ SUPPORTED_1000baseT_Full |
+ SUPPORTED_10000baseT_Full |
+ SUPPORTED_TP |
+ SUPPORTED_Autoneg |
+ SUPPORTED_Pause |
+ SUPPORTED_Asym_Pause),
+ .media_type = ETH_PHY_BASE_T,
+ .ver_addr = 0,
+ .req_flow_ctrl = 0,
+ .req_line_speed = 0,
+ .speed_cap_mask = 0,
+ .req_duplex = 0,
+ .rsrv = 0,
+ .config_init = bnx2x_848x3_config_init,
+ .read_status = bnx2x_848xx_read_status,
+ .link_reset = bnx2x_848x3_link_reset,
+ .config_loopback = NULL,
+ .format_fw_ver = bnx2x_848xx_format_ver,
+ .hw_reset = bnx2x_84833_hw_reset_phy,
+ .set_link_led = bnx2x_848xx_set_link_led,
+ .phy_specific_func = bnx2x_848xx_specific_func
+};
+
+static const struct bnx2x_phy phy_84834 = {
+ .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834,
+ .addr = 0xff,
+ .def_md_devad = 0,
+ .flags = FLAGS_FAN_FAILURE_DET_REQ |
+ FLAGS_REARM_LATCH_SIGNAL,
+ .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .mdio_ctrl = 0,
+ .supported = (SUPPORTED_100baseT_Half |
+ SUPPORTED_100baseT_Full |
+ SUPPORTED_1000baseT_Full |
+ SUPPORTED_10000baseT_Full |
+ SUPPORTED_TP |
+ SUPPORTED_Autoneg |
+ SUPPORTED_Pause |
+ SUPPORTED_Asym_Pause),
+ .media_type = ETH_PHY_BASE_T,
+ .ver_addr = 0,
+ .req_flow_ctrl = 0,
+ .req_line_speed = 0,
+ .speed_cap_mask = 0,
+ .req_duplex = 0,
+ .rsrv = 0,
+ .config_init = bnx2x_848x3_config_init,
+ .read_status = bnx2x_848xx_read_status,
+ .link_reset = bnx2x_848x3_link_reset,
+ .config_loopback = NULL,
+ .format_fw_ver = bnx2x_848xx_format_ver,
+ .hw_reset = bnx2x_84833_hw_reset_phy,
+ .set_link_led = bnx2x_848xx_set_link_led,
+ .phy_specific_func = bnx2x_848xx_specific_func
+};
+
+static const struct bnx2x_phy phy_84858 = {
+ .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84858,
+ .addr = 0xff,
+ .def_md_devad = 0,
+ .flags = FLAGS_FAN_FAILURE_DET_REQ |
+ FLAGS_REARM_LATCH_SIGNAL,
+ .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .mdio_ctrl = 0,
+ .supported = (SUPPORTED_100baseT_Half |
+ SUPPORTED_100baseT_Full |
+ SUPPORTED_1000baseT_Full |
+ SUPPORTED_10000baseT_Full |
+ SUPPORTED_TP |
+ SUPPORTED_Autoneg |
+ SUPPORTED_Pause |
+ SUPPORTED_Asym_Pause),
+ .media_type = ETH_PHY_BASE_T,
+ .ver_addr = 0,
+ .req_flow_ctrl = 0,
+ .req_line_speed = 0,
+ .speed_cap_mask = 0,
+ .req_duplex = 0,
+ .rsrv = 0,
+ .config_init = bnx2x_848x3_config_init,
+ .read_status = bnx2x_848xx_read_status,
+ .link_reset = bnx2x_848x3_link_reset,
+ .config_loopback = NULL,
+ .format_fw_ver = bnx2x_8485x_format_ver,
+ .hw_reset = bnx2x_84833_hw_reset_phy,
+ .set_link_led = bnx2x_848xx_set_link_led,
+ .phy_specific_func = bnx2x_848xx_specific_func
+};
+
+static const struct bnx2x_phy phy_54618se = {
+ .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54618SE,
+ .addr = 0xff,
+ .def_md_devad = 0,
+ .flags = FLAGS_INIT_XGXS_FIRST,
+ .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .mdio_ctrl = 0,
+ .supported = (SUPPORTED_10baseT_Half |
+ SUPPORTED_10baseT_Full |
+ SUPPORTED_100baseT_Half |
+ SUPPORTED_100baseT_Full |
+ SUPPORTED_1000baseT_Full |
+ SUPPORTED_TP |
+ SUPPORTED_Autoneg |
+ SUPPORTED_Pause |
+ SUPPORTED_Asym_Pause),
+ .media_type = ETH_PHY_BASE_T,
+ .ver_addr = 0,
+ .req_flow_ctrl = 0,
+ .req_line_speed = 0,
+ .speed_cap_mask = 0,
+ /* req_duplex = */0,
+ /* rsrv = */0,
+ .config_init = bnx2x_54618se_config_init,
+ .read_status = bnx2x_54618se_read_status,
+ .link_reset = bnx2x_54618se_link_reset,
+ .config_loopback = bnx2x_54618se_config_loopback,
+ .format_fw_ver = NULL,
+ .hw_reset = NULL,
+ .set_link_led = bnx2x_5461x_set_link_led,
+ .phy_specific_func = bnx2x_54618se_specific_func
+};
+/*****************************************************************/
+/* */
+/* Populate the phy according. Main function: bnx2x_populate_phy */
+/* */
+/*****************************************************************/
+
+static void bnx2x_populate_preemphasis(struct bnx2x *bp, u32 shmem_base,
+ struct bnx2x_phy *phy, u8 port,
+ u8 phy_index)
+{
+ /* Get the 4 lanes xgxs config rx and tx */
+ u32 rx = 0, tx = 0, i;
+ for (i = 0; i < 2; i++) {
+ /* INT_PHY and EXT_PHY1 share the same value location in
+ * the shmem. When num_phys is greater than 1, than this value
+ * applies only to EXT_PHY1
+ */
+ if (phy_index == INT_PHY || phy_index == EXT_PHY1) {
+ rx = REG_RD(bp, shmem_base +
+ offsetof(struct shmem_region,
+ dev_info.port_hw_config[port].xgxs_config_rx[i<<1]));
+
+ tx = REG_RD(bp, shmem_base +
+ offsetof(struct shmem_region,
+ dev_info.port_hw_config[port].xgxs_config_tx[i<<1]));
+ } else {
+ rx = REG_RD(bp, shmem_base +
+ offsetof(struct shmem_region,
+ dev_info.port_hw_config[port].xgxs_config2_rx[i<<1]));
+
+ tx = REG_RD(bp, shmem_base +
+ offsetof(struct shmem_region,
+ dev_info.port_hw_config[port].xgxs_config2_rx[i<<1]));
+ }
+
+ phy->rx_preemphasis[i << 1] = ((rx>>16) & 0xffff);
+ phy->rx_preemphasis[(i << 1) + 1] = (rx & 0xffff);
+
+ phy->tx_preemphasis[i << 1] = ((tx>>16) & 0xffff);
+ phy->tx_preemphasis[(i << 1) + 1] = (tx & 0xffff);
+ }
+}
+
+static u32 bnx2x_get_ext_phy_config(struct bnx2x *bp, u32 shmem_base,
+ u8 phy_index, u8 port)
+{
+ u32 ext_phy_config = 0;
+ switch (phy_index) {
+ case EXT_PHY1:
+ ext_phy_config = REG_RD(bp, shmem_base +
+ offsetof(struct shmem_region,
+ dev_info.port_hw_config[port].external_phy_config));
+ break;
+ case EXT_PHY2:
+ ext_phy_config = REG_RD(bp, shmem_base +
+ offsetof(struct shmem_region,
+ dev_info.port_hw_config[port].external_phy_config2));
+ break;
+ default:
+ DP(NETIF_MSG_LINK, "Invalid phy_index %d\n", phy_index);
+ return -EINVAL;
+ }
+
+ return ext_phy_config;
+}
+static int bnx2x_populate_int_phy(struct bnx2x *bp, u32 shmem_base, u8 port,
+ struct bnx2x_phy *phy)
+{
+ u32 phy_addr;
+ u32 chip_id;
+ u32 switch_cfg = (REG_RD(bp, shmem_base +
+ offsetof(struct shmem_region,
+ dev_info.port_feature_config[port].link_config)) &
+ PORT_FEATURE_CONNECTED_SWITCH_MASK);
+ chip_id = (REG_RD(bp, MISC_REG_CHIP_NUM) << 16) |
+ ((REG_RD(bp, MISC_REG_CHIP_REV) & 0xf) << 12);
+
+ DP(NETIF_MSG_LINK, ":chip_id = 0x%x\n", chip_id);
+ if (USES_WARPCORE(bp)) {
+ u32 serdes_net_if;
+ phy_addr = REG_RD(bp,
+ MISC_REG_WC0_CTRL_PHY_ADDR);
+ *phy = phy_warpcore;
+ if (REG_RD(bp, MISC_REG_PORT4MODE_EN_OVWR) == 0x3)
+ phy->flags |= FLAGS_4_PORT_MODE;
+ else
+ phy->flags &= ~FLAGS_4_PORT_MODE;
+ /* Check Dual mode */
+ serdes_net_if = (REG_RD(bp, shmem_base +
+ offsetof(struct shmem_region, dev_info.
+ port_hw_config[port].default_cfg)) &
+ PORT_HW_CFG_NET_SERDES_IF_MASK);
+ /* Set the appropriate supported and flags indications per
+ * interface type of the chip
+ */
+ switch (serdes_net_if) {
+ case PORT_HW_CFG_NET_SERDES_IF_SGMII:
+ phy->supported &= (SUPPORTED_10baseT_Half |
+ SUPPORTED_10baseT_Full |
+ SUPPORTED_100baseT_Half |
+ SUPPORTED_100baseT_Full |
+ SUPPORTED_1000baseT_Full |
+ SUPPORTED_FIBRE |
+ SUPPORTED_Autoneg |
+ SUPPORTED_Pause |
+ SUPPORTED_Asym_Pause);
+ phy->media_type = ETH_PHY_BASE_T;
+ break;
+ case PORT_HW_CFG_NET_SERDES_IF_XFI:
+ phy->supported &= (SUPPORTED_1000baseT_Full |
+ SUPPORTED_10000baseT_Full |
+ SUPPORTED_FIBRE |
+ SUPPORTED_Pause |
+ SUPPORTED_Asym_Pause);
+ phy->media_type = ETH_PHY_XFP_FIBER;
+ break;
+ case PORT_HW_CFG_NET_SERDES_IF_SFI:
+ phy->supported &= (SUPPORTED_1000baseT_Full |
+ SUPPORTED_10000baseT_Full |
+ SUPPORTED_FIBRE |
+ SUPPORTED_Pause |
+ SUPPORTED_Asym_Pause);
+ phy->media_type = ETH_PHY_SFPP_10G_FIBER;
+ break;
+ case PORT_HW_CFG_NET_SERDES_IF_KR:
+ phy->media_type = ETH_PHY_KR;
+ phy->supported &= (SUPPORTED_1000baseKX_Full |
+ SUPPORTED_10000baseKR_Full |
+ SUPPORTED_FIBRE |
+ SUPPORTED_Autoneg |
+ SUPPORTED_Pause |
+ SUPPORTED_Asym_Pause);
+ break;
+ case PORT_HW_CFG_NET_SERDES_IF_DXGXS:
+ phy->media_type = ETH_PHY_KR;
+ phy->flags |= FLAGS_WC_DUAL_MODE;
+ phy->supported &= (SUPPORTED_20000baseMLD2_Full |
+ SUPPORTED_FIBRE |
+ SUPPORTED_Pause |
+ SUPPORTED_Asym_Pause);
+ break;
+ case PORT_HW_CFG_NET_SERDES_IF_KR2:
+ phy->media_type = ETH_PHY_KR;
+ phy->flags |= FLAGS_WC_DUAL_MODE;
+ phy->supported &= (SUPPORTED_20000baseKR2_Full |
+ SUPPORTED_10000baseKR_Full |
+ SUPPORTED_1000baseKX_Full |
+ SUPPORTED_Autoneg |
+ SUPPORTED_FIBRE |
+ SUPPORTED_Pause |
+ SUPPORTED_Asym_Pause);
+ phy->flags &= ~FLAGS_TX_ERROR_CHECK;
+ break;
+ default:
+ DP(NETIF_MSG_LINK, "Unknown WC interface type 0x%x\n",
+ serdes_net_if);
+ break;
+ }
+
+ /* Enable MDC/MDIO work-around for E3 A0 since free running MDC
+ * was not set as expected. For B0, ECO will be enabled so there
+ * won't be an issue there
+ */
+ if (CHIP_REV(bp) == CHIP_REV_Ax)
+ phy->flags |= FLAGS_MDC_MDIO_WA;
+ else
+ phy->flags |= FLAGS_MDC_MDIO_WA_B0;
+ } else {
+ switch (switch_cfg) {
+ case SWITCH_CFG_1G:
+ phy_addr = REG_RD(bp,
+ NIG_REG_SERDES0_CTRL_PHY_ADDR +
+ port * 0x10);
+ *phy = phy_serdes;
+ break;
+ case SWITCH_CFG_10G:
+ phy_addr = REG_RD(bp,
+ NIG_REG_XGXS0_CTRL_PHY_ADDR +
+ port * 0x18);
+ *phy = phy_xgxs;
+ break;
+ default:
+ DP(NETIF_MSG_LINK, "Invalid switch_cfg\n");
+ return -EINVAL;
+ }
+ }
+ phy->addr = (u8)phy_addr;
+ phy->mdio_ctrl = bnx2x_get_emac_base(bp,
+ SHARED_HW_CFG_MDC_MDIO_ACCESS1_BOTH,
+ port);
+ if (CHIP_IS_E2(bp))
+ phy->def_md_devad = E2_DEFAULT_PHY_DEV_ADDR;
+ else
+ phy->def_md_devad = DEFAULT_PHY_DEV_ADDR;
+
+ DP(NETIF_MSG_LINK, "Internal phy port=%d, addr=0x%x, mdio_ctl=0x%x\n",
+ port, phy->addr, phy->mdio_ctrl);
+
+ bnx2x_populate_preemphasis(bp, shmem_base, phy, port, INT_PHY);
+ return 0;
+}
+
+static int bnx2x_populate_ext_phy(struct bnx2x *bp,
+ u8 phy_index,
+ u32 shmem_base,
+ u32 shmem2_base,
+ u8 port,
+ struct bnx2x_phy *phy)
+{
+ u32 ext_phy_config, phy_type, config2;
+ u32 mdc_mdio_access = SHARED_HW_CFG_MDC_MDIO_ACCESS1_BOTH;
+ ext_phy_config = bnx2x_get_ext_phy_config(bp, shmem_base,
+ phy_index, port);
+ phy_type = XGXS_EXT_PHY_TYPE(ext_phy_config);
+ /* Select the phy type */
+ switch (phy_type) {
+ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
+ mdc_mdio_access = SHARED_HW_CFG_MDC_MDIO_ACCESS1_SWAPPED;
+ *phy = phy_8073;
+ break;
+ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
+ *phy = phy_8705;
+ break;
+ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
+ *phy = phy_8706;
+ break;
+ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
+ mdc_mdio_access = SHARED_HW_CFG_MDC_MDIO_ACCESS1_EMAC1;
+ *phy = phy_8726;
+ break;
+ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC:
+ /* BCM8727_NOC => BCM8727 no over current */
+ mdc_mdio_access = SHARED_HW_CFG_MDC_MDIO_ACCESS1_EMAC1;
+ *phy = phy_8727;
+ phy->flags |= FLAGS_NOC;
+ break;
+ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8722:
+ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
+ mdc_mdio_access = SHARED_HW_CFG_MDC_MDIO_ACCESS1_EMAC1;
+ *phy = phy_8727;
+ break;
+ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
+ *phy = phy_8481;
+ break;
+ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823:
+ *phy = phy_84823;
+ break;
+ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833:
+ *phy = phy_84833;
+ break;
+ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834:
+ *phy = phy_84834;
+ break;
+ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84858:
+ *phy = phy_84858;
+ break;
+ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54616:
+ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54618SE:
+ *phy = phy_54618se;
+ if (phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54618SE)
+ phy->flags |= FLAGS_EEE;
+ break;
+ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
+ *phy = phy_7101;
+ break;
+ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
+ *phy = phy_null;
+ return -EINVAL;
+ default:
+ *phy = phy_null;
+ /* In case external PHY wasn't found */
+ if ((phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) &&
+ (phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
+ return -EINVAL;
+ return 0;
+ }
+
+ phy->addr = XGXS_EXT_PHY_ADDR(ext_phy_config);
+ bnx2x_populate_preemphasis(bp, shmem_base, phy, port, phy_index);
+
+ /* The shmem address of the phy version is located on different
+ * structures. In case this structure is too old, do not set
+ * the address
+ */
+ config2 = REG_RD(bp, shmem_base + offsetof(struct shmem_region,
+ dev_info.shared_hw_config.config2));
+ if (phy_index == EXT_PHY1) {
+ phy->ver_addr = shmem_base + offsetof(struct shmem_region,
+ port_mb[port].ext_phy_fw_version);
+
+ /* Check specific mdc mdio settings */
+ if (config2 & SHARED_HW_CFG_MDC_MDIO_ACCESS1_MASK)
+ mdc_mdio_access = config2 &
+ SHARED_HW_CFG_MDC_MDIO_ACCESS1_MASK;
+ } else {
+ u32 size = REG_RD(bp, shmem2_base);
+
+ if (size >
+ offsetof(struct shmem2_region, ext_phy_fw_version2)) {
+ phy->ver_addr = shmem2_base +
+ offsetof(struct shmem2_region,
+ ext_phy_fw_version2[port]);
+ }
+ /* Check specific mdc mdio settings */
+ if (config2 & SHARED_HW_CFG_MDC_MDIO_ACCESS2_MASK)
+ mdc_mdio_access = (config2 &
+ SHARED_HW_CFG_MDC_MDIO_ACCESS2_MASK) >>
+ (SHARED_HW_CFG_MDC_MDIO_ACCESS2_SHIFT -
+ SHARED_HW_CFG_MDC_MDIO_ACCESS1_SHIFT);
+ }
+ phy->mdio_ctrl = bnx2x_get_emac_base(bp, mdc_mdio_access, port);
+
+ if (bnx2x_is_8483x_8485x(phy) && (phy->ver_addr)) {
+ /* Remove 100Mb link supported for BCM84833/4 when phy fw
+ * version lower than or equal to 1.39
+ */
+ u32 raw_ver = REG_RD(bp, phy->ver_addr);
+ if (((raw_ver & 0x7F) <= 39) &&
+ (((raw_ver & 0xF80) >> 7) <= 1))
+ phy->supported &= ~(SUPPORTED_100baseT_Half |
+ SUPPORTED_100baseT_Full);
+ }
+
+ DP(NETIF_MSG_LINK, "phy_type 0x%x port %d found in index %d\n",
+ phy_type, port, phy_index);
+ DP(NETIF_MSG_LINK, " addr=0x%x, mdio_ctl=0x%x\n",
+ phy->addr, phy->mdio_ctrl);
+ return 0;
+}
+
+static int bnx2x_populate_phy(struct bnx2x *bp, u8 phy_index, u32 shmem_base,
+ u32 shmem2_base, u8 port, struct bnx2x_phy *phy)
+{
+ phy->type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN;
+ if (phy_index == INT_PHY)
+ return bnx2x_populate_int_phy(bp, shmem_base, port, phy);
+
+ return bnx2x_populate_ext_phy(bp, phy_index, shmem_base, shmem2_base,
+ port, phy);
+}
+
+static void bnx2x_phy_def_cfg(struct link_params *params,
+ struct bnx2x_phy *phy,
+ u8 phy_index)
+{
+ struct bnx2x *bp = params->bp;
+ u32 link_config;
+ /* Populate the default phy configuration for MF mode */
+ if (phy_index == EXT_PHY2) {
+ link_config = REG_RD(bp, params->shmem_base +
+ offsetof(struct shmem_region, dev_info.
+ port_feature_config[params->port].link_config2));
+ phy->speed_cap_mask = REG_RD(bp, params->shmem_base +
+ offsetof(struct shmem_region,
+ dev_info.
+ port_hw_config[params->port].speed_capability_mask2));
+ } else {
+ link_config = REG_RD(bp, params->shmem_base +
+ offsetof(struct shmem_region, dev_info.
+ port_feature_config[params->port].link_config));
+ phy->speed_cap_mask = REG_RD(bp, params->shmem_base +
+ offsetof(struct shmem_region,
+ dev_info.
+ port_hw_config[params->port].speed_capability_mask));
+ }
+ DP(NETIF_MSG_LINK,
+ "Default config phy idx %x cfg 0x%x speed_cap_mask 0x%x\n",
+ phy_index, link_config, phy->speed_cap_mask);
+
+ phy->req_duplex = DUPLEX_FULL;
+ switch (link_config & PORT_FEATURE_LINK_SPEED_MASK) {
+ case PORT_FEATURE_LINK_SPEED_10M_HALF:
+ phy->req_duplex = DUPLEX_HALF;
+ fallthrough;
+ case PORT_FEATURE_LINK_SPEED_10M_FULL:
+ phy->req_line_speed = SPEED_10;
+ break;
+ case PORT_FEATURE_LINK_SPEED_100M_HALF:
+ phy->req_duplex = DUPLEX_HALF;
+ fallthrough;
+ case PORT_FEATURE_LINK_SPEED_100M_FULL:
+ phy->req_line_speed = SPEED_100;
+ break;
+ case PORT_FEATURE_LINK_SPEED_1G:
+ phy->req_line_speed = SPEED_1000;
+ break;
+ case PORT_FEATURE_LINK_SPEED_2_5G:
+ phy->req_line_speed = SPEED_2500;
+ break;
+ case PORT_FEATURE_LINK_SPEED_10G_CX4:
+ phy->req_line_speed = SPEED_10000;
+ break;
+ default:
+ phy->req_line_speed = SPEED_AUTO_NEG;
+ break;
+ }
+
+ switch (link_config & PORT_FEATURE_FLOW_CONTROL_MASK) {
+ case PORT_FEATURE_FLOW_CONTROL_AUTO:
+ phy->req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
+ break;
+ case PORT_FEATURE_FLOW_CONTROL_TX:
+ phy->req_flow_ctrl = BNX2X_FLOW_CTRL_TX;
+ break;
+ case PORT_FEATURE_FLOW_CONTROL_RX:
+ phy->req_flow_ctrl = BNX2X_FLOW_CTRL_RX;
+ break;
+ case PORT_FEATURE_FLOW_CONTROL_BOTH:
+ phy->req_flow_ctrl = BNX2X_FLOW_CTRL_BOTH;
+ break;
+ default:
+ phy->req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
+ break;
+ }
+}
+
+u32 bnx2x_phy_selection(struct link_params *params)
+{
+ u32 phy_config_swapped, prio_cfg;
+ u32 return_cfg = PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT;
+
+ phy_config_swapped = params->multi_phy_config &
+ PORT_HW_CFG_PHY_SWAPPED_ENABLED;
+
+ prio_cfg = params->multi_phy_config &
+ PORT_HW_CFG_PHY_SELECTION_MASK;
+
+ if (phy_config_swapped) {
+ switch (prio_cfg) {
+ case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
+ return_cfg = PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY;
+ break;
+ case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
+ return_cfg = PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY;
+ break;
+ case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
+ return_cfg = PORT_HW_CFG_PHY_SELECTION_FIRST_PHY;
+ break;
+ case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
+ return_cfg = PORT_HW_CFG_PHY_SELECTION_SECOND_PHY;
+ break;
+ }
+ } else
+ return_cfg = prio_cfg;
+
+ return return_cfg;
+}
+
+int bnx2x_phy_probe(struct link_params *params)
+{
+ u8 phy_index, actual_phy_idx;
+ u32 phy_config_swapped, sync_offset, media_types;
+ struct bnx2x *bp = params->bp;
+ struct bnx2x_phy *phy;
+ params->num_phys = 0;
+ DP(NETIF_MSG_LINK, "Begin phy probe\n");
+ phy_config_swapped = params->multi_phy_config &
+ PORT_HW_CFG_PHY_SWAPPED_ENABLED;
+
+ for (phy_index = INT_PHY; phy_index < MAX_PHYS;
+ phy_index++) {
+ actual_phy_idx = phy_index;
+ if (phy_config_swapped) {
+ if (phy_index == EXT_PHY1)
+ actual_phy_idx = EXT_PHY2;
+ else if (phy_index == EXT_PHY2)
+ actual_phy_idx = EXT_PHY1;
+ }
+ DP(NETIF_MSG_LINK, "phy_config_swapped %x, phy_index %x,"
+ " actual_phy_idx %x\n", phy_config_swapped,
+ phy_index, actual_phy_idx);
+ phy = &params->phy[actual_phy_idx];
+ if (bnx2x_populate_phy(bp, phy_index, params->shmem_base,
+ params->shmem2_base, params->port,
+ phy) != 0) {
+ params->num_phys = 0;
+ DP(NETIF_MSG_LINK, "phy probe failed in phy index %d\n",
+ phy_index);
+ for (phy_index = INT_PHY;
+ phy_index < MAX_PHYS;
+ phy_index++)
+ *phy = phy_null;
+ return -EINVAL;
+ }
+ if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN)
+ break;
+
+ if (params->feature_config_flags &
+ FEATURE_CONFIG_DISABLE_REMOTE_FAULT_DET)
+ phy->flags &= ~FLAGS_TX_ERROR_CHECK;
+
+ if (!(params->feature_config_flags &
+ FEATURE_CONFIG_MT_SUPPORT))
+ phy->flags |= FLAGS_MDC_MDIO_WA_G;
+
+ sync_offset = params->shmem_base +
+ offsetof(struct shmem_region,
+ dev_info.port_hw_config[params->port].media_type);
+ media_types = REG_RD(bp, sync_offset);
+
+ /* Update media type for non-PMF sync only for the first time
+ * In case the media type changes afterwards, it will be updated
+ * using the update_status function
+ */
+ if ((media_types & (PORT_HW_CFG_MEDIA_TYPE_PHY0_MASK <<
+ (PORT_HW_CFG_MEDIA_TYPE_PHY1_SHIFT *
+ actual_phy_idx))) == 0) {
+ media_types |= ((phy->media_type &
+ PORT_HW_CFG_MEDIA_TYPE_PHY0_MASK) <<
+ (PORT_HW_CFG_MEDIA_TYPE_PHY1_SHIFT *
+ actual_phy_idx));
+ }
+ REG_WR(bp, sync_offset, media_types);
+
+ bnx2x_phy_def_cfg(params, phy, phy_index);
+ params->num_phys++;
+ }
+
+ DP(NETIF_MSG_LINK, "End phy probe. #phys found %x\n", params->num_phys);
+ return 0;
+}
+
+static void bnx2x_init_bmac_loopback(struct link_params *params,
+ struct link_vars *vars)
+{
+ struct bnx2x *bp = params->bp;
+ vars->link_up = 1;
+ vars->line_speed = SPEED_10000;
+ vars->duplex = DUPLEX_FULL;
+ vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
+ vars->mac_type = MAC_TYPE_BMAC;
+
+ vars->phy_flags = PHY_XGXS_FLAG;
+
+ bnx2x_xgxs_deassert(params);
+
+ /* Set bmac loopback */
+ bnx2x_bmac_enable(params, vars, 1, 1);
+
+ REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port * 4, 0);
+}
+
+static void bnx2x_init_emac_loopback(struct link_params *params,
+ struct link_vars *vars)
+{
+ struct bnx2x *bp = params->bp;
+ vars->link_up = 1;
+ vars->line_speed = SPEED_1000;
+ vars->duplex = DUPLEX_FULL;
+ vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
+ vars->mac_type = MAC_TYPE_EMAC;
+
+ vars->phy_flags = PHY_XGXS_FLAG;
+
+ bnx2x_xgxs_deassert(params);
+ /* Set bmac loopback */
+ bnx2x_emac_enable(params, vars, 1);
+ bnx2x_emac_program(params, vars);
+ REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port * 4, 0);
+}
+
+static void bnx2x_init_xmac_loopback(struct link_params *params,
+ struct link_vars *vars)
+{
+ struct bnx2x *bp = params->bp;
+ vars->link_up = 1;
+ if (!params->req_line_speed[0])
+ vars->line_speed = SPEED_10000;
+ else
+ vars->line_speed = params->req_line_speed[0];
+ vars->duplex = DUPLEX_FULL;
+ vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
+ vars->mac_type = MAC_TYPE_XMAC;
+ vars->phy_flags = PHY_XGXS_FLAG;
+ /* Set WC to loopback mode since link is required to provide clock
+ * to the XMAC in 20G mode
+ */
+ bnx2x_set_aer_mmd(params, &params->phy[0]);
+ bnx2x_warpcore_reset_lane(bp, &params->phy[0], 0);
+ params->phy[INT_PHY].config_loopback(
+ &params->phy[INT_PHY],
+ params);
+
+ bnx2x_xmac_enable(params, vars, 1);
+ REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0);
+}
+
+static void bnx2x_init_umac_loopback(struct link_params *params,
+ struct link_vars *vars)
+{
+ struct bnx2x *bp = params->bp;
+ vars->link_up = 1;
+ vars->line_speed = SPEED_1000;
+ vars->duplex = DUPLEX_FULL;
+ vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
+ vars->mac_type = MAC_TYPE_UMAC;
+ vars->phy_flags = PHY_XGXS_FLAG;
+ bnx2x_umac_enable(params, vars, 1);
+
+ REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0);
+}
+
+static void bnx2x_init_xgxs_loopback(struct link_params *params,
+ struct link_vars *vars)
+{
+ struct bnx2x *bp = params->bp;
+ struct bnx2x_phy *int_phy = &params->phy[INT_PHY];
+ vars->link_up = 1;
+ vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
+ vars->duplex = DUPLEX_FULL;
+ if (params->req_line_speed[0] == SPEED_1000)
+ vars->line_speed = SPEED_1000;
+ else if ((params->req_line_speed[0] == SPEED_20000) ||
+ (int_phy->flags & FLAGS_WC_DUAL_MODE))
+ vars->line_speed = SPEED_20000;
+ else
+ vars->line_speed = SPEED_10000;
+
+ if (!USES_WARPCORE(bp))
+ bnx2x_xgxs_deassert(params);
+ bnx2x_link_initialize(params, vars);
+
+ if (params->req_line_speed[0] == SPEED_1000) {
+ if (USES_WARPCORE(bp))
+ bnx2x_umac_enable(params, vars, 0);
+ else {
+ bnx2x_emac_program(params, vars);
+ bnx2x_emac_enable(params, vars, 0);
+ }
+ } else {
+ if (USES_WARPCORE(bp))
+ bnx2x_xmac_enable(params, vars, 0);
+ else
+ bnx2x_bmac_enable(params, vars, 0, 1);
+ }
+
+ if (params->loopback_mode == LOOPBACK_XGXS) {
+ /* Set 10G XGXS loopback */
+ int_phy->config_loopback(int_phy, params);
+ } else {
+ /* Set external phy loopback */
+ u8 phy_index;
+ for (phy_index = EXT_PHY1;
+ phy_index < params->num_phys; phy_index++)
+ if (params->phy[phy_index].config_loopback)
+ params->phy[phy_index].config_loopback(
+ &params->phy[phy_index],
+ params);
+ }
+ REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0);
+
+ bnx2x_set_led(params, vars, LED_MODE_OPER, vars->line_speed);
+}
+
+void bnx2x_set_rx_filter(struct link_params *params, u8 en)
+{
+ struct bnx2x *bp = params->bp;
+ u8 val = en * 0x1F;
+
+ /* Open / close the gate between the NIG and the BRB */
+ if (!CHIP_IS_E1x(bp))
+ val |= en * 0x20;
+ REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + params->port*4, val);
+
+ if (!CHIP_IS_E1(bp)) {
+ REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + params->port*4,
+ en*0x3);
+ }
+
+ REG_WR(bp, (params->port ? NIG_REG_LLH1_BRB1_NOT_MCP :
+ NIG_REG_LLH0_BRB1_NOT_MCP), en);
+}
+static int bnx2x_avoid_link_flap(struct link_params *params,
+ struct link_vars *vars)
+{
+ u32 phy_idx;
+ u32 dont_clear_stat, lfa_sts;
+ struct bnx2x *bp = params->bp;
+
+ bnx2x_set_mdio_emac_per_phy(bp, params);
+ /* Sync the link parameters */
+ bnx2x_link_status_update(params, vars);
+
+ /*
+ * The module verification was already done by previous link owner,
+ * so this call is meant only to get warning message
+ */
+
+ for (phy_idx = INT_PHY; phy_idx < params->num_phys; phy_idx++) {
+ struct bnx2x_phy *phy = &params->phy[phy_idx];
+ if (phy->phy_specific_func) {
+ DP(NETIF_MSG_LINK, "Calling PHY specific func\n");
+ phy->phy_specific_func(phy, params, PHY_INIT);
+ }
+ if ((phy->media_type == ETH_PHY_SFPP_10G_FIBER) ||
+ (phy->media_type == ETH_PHY_SFP_1G_FIBER) ||
+ (phy->media_type == ETH_PHY_DA_TWINAX))
+ bnx2x_verify_sfp_module(phy, params);
+ }
+ lfa_sts = REG_RD(bp, params->lfa_base +
+ offsetof(struct shmem_lfa,
+ lfa_sts));
+
+ dont_clear_stat = lfa_sts & SHMEM_LFA_DONT_CLEAR_STAT;
+
+ /* Re-enable the NIG/MAC */
+ if (CHIP_IS_E3(bp)) {
+ if (!dont_clear_stat) {
+ REG_WR(bp, GRCBASE_MISC +
+ MISC_REGISTERS_RESET_REG_2_CLEAR,
+ (MISC_REGISTERS_RESET_REG_2_MSTAT0 <<
+ params->port));
+ REG_WR(bp, GRCBASE_MISC +
+ MISC_REGISTERS_RESET_REG_2_SET,
+ (MISC_REGISTERS_RESET_REG_2_MSTAT0 <<
+ params->port));
+ }
+ if (vars->line_speed < SPEED_10000)
+ bnx2x_umac_enable(params, vars, 0);
+ else
+ bnx2x_xmac_enable(params, vars, 0);
+ } else {
+ if (vars->line_speed < SPEED_10000)
+ bnx2x_emac_enable(params, vars, 0);
+ else
+ bnx2x_bmac_enable(params, vars, 0, !dont_clear_stat);
+ }
+
+ /* Increment LFA count */
+ lfa_sts = ((lfa_sts & ~LINK_FLAP_AVOIDANCE_COUNT_MASK) |
+ (((((lfa_sts & LINK_FLAP_AVOIDANCE_COUNT_MASK) >>
+ LINK_FLAP_AVOIDANCE_COUNT_OFFSET) + 1) & 0xff)
+ << LINK_FLAP_AVOIDANCE_COUNT_OFFSET));
+ /* Clear link flap reason */
+ lfa_sts &= ~LFA_LINK_FLAP_REASON_MASK;
+
+ REG_WR(bp, params->lfa_base +
+ offsetof(struct shmem_lfa, lfa_sts), lfa_sts);
+
+ /* Disable NIG DRAIN */
+ REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0);
+
+ /* Enable interrupts */
+ bnx2x_link_int_enable(params);
+ return 0;
+}
+
+static void bnx2x_cannot_avoid_link_flap(struct link_params *params,
+ struct link_vars *vars,
+ int lfa_status)
+{
+ u32 lfa_sts, cfg_idx, tmp_val;
+ struct bnx2x *bp = params->bp;
+
+ bnx2x_link_reset(params, vars, 1);
+
+ if (!params->lfa_base)
+ return;
+ /* Store the new link parameters */
+ REG_WR(bp, params->lfa_base +
+ offsetof(struct shmem_lfa, req_duplex),
+ params->req_duplex[0] | (params->req_duplex[1] << 16));
+
+ REG_WR(bp, params->lfa_base +
+ offsetof(struct shmem_lfa, req_flow_ctrl),
+ params->req_flow_ctrl[0] | (params->req_flow_ctrl[1] << 16));
+
+ REG_WR(bp, params->lfa_base +
+ offsetof(struct shmem_lfa, req_line_speed),
+ params->req_line_speed[0] | (params->req_line_speed[1] << 16));
+
+ for (cfg_idx = 0; cfg_idx < SHMEM_LINK_CONFIG_SIZE; cfg_idx++) {
+ REG_WR(bp, params->lfa_base +
+ offsetof(struct shmem_lfa,
+ speed_cap_mask[cfg_idx]),
+ params->speed_cap_mask[cfg_idx]);
+ }
+
+ tmp_val = REG_RD(bp, params->lfa_base +
+ offsetof(struct shmem_lfa, additional_config));
+ tmp_val &= ~REQ_FC_AUTO_ADV_MASK;
+ tmp_val |= params->req_fc_auto_adv;
+
+ REG_WR(bp, params->lfa_base +
+ offsetof(struct shmem_lfa, additional_config), tmp_val);
+
+ lfa_sts = REG_RD(bp, params->lfa_base +
+ offsetof(struct shmem_lfa, lfa_sts));
+
+ /* Clear the "Don't Clear Statistics" bit, and set reason */
+ lfa_sts &= ~SHMEM_LFA_DONT_CLEAR_STAT;
+
+ /* Set link flap reason */
+ lfa_sts &= ~LFA_LINK_FLAP_REASON_MASK;
+ lfa_sts |= ((lfa_status & LFA_LINK_FLAP_REASON_MASK) <<
+ LFA_LINK_FLAP_REASON_OFFSET);
+
+ /* Increment link flap counter */
+ lfa_sts = ((lfa_sts & ~LINK_FLAP_COUNT_MASK) |
+ (((((lfa_sts & LINK_FLAP_COUNT_MASK) >>
+ LINK_FLAP_COUNT_OFFSET) + 1) & 0xff)
+ << LINK_FLAP_COUNT_OFFSET));
+ REG_WR(bp, params->lfa_base +
+ offsetof(struct shmem_lfa, lfa_sts), lfa_sts);
+ /* Proceed with regular link initialization */
+}
+
+int bnx2x_phy_init(struct link_params *params, struct link_vars *vars)
+{
+ int lfa_status;
+ struct bnx2x *bp = params->bp;
+ DP(NETIF_MSG_LINK, "Phy Initialization started\n");
+ DP(NETIF_MSG_LINK, "(1) req_speed %d, req_flowctrl %d\n",
+ params->req_line_speed[0], params->req_flow_ctrl[0]);
+ DP(NETIF_MSG_LINK, "(2) req_speed %d, req_flowctrl %d\n",
+ params->req_line_speed[1], params->req_flow_ctrl[1]);
+ DP(NETIF_MSG_LINK, "req_adv_flow_ctrl 0x%x\n", params->req_fc_auto_adv);
+ vars->link_status = 0;
+ vars->phy_link_up = 0;
+ vars->link_up = 0;
+ vars->line_speed = 0;
+ vars->duplex = DUPLEX_FULL;
+ vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
+ vars->mac_type = MAC_TYPE_NONE;
+ vars->phy_flags = 0;
+ vars->check_kr2_recovery_cnt = 0;
+ params->link_flags = PHY_INITIALIZED;
+ /* Driver opens NIG-BRB filters */
+ bnx2x_set_rx_filter(params, 1);
+ bnx2x_chng_link_count(params, true);
+ /* Check if link flap can be avoided */
+ lfa_status = bnx2x_check_lfa(params);
+
+ if (lfa_status == 0) {
+ DP(NETIF_MSG_LINK, "Link Flap Avoidance in progress\n");
+ return bnx2x_avoid_link_flap(params, vars);
+ }
+
+ DP(NETIF_MSG_LINK, "Cannot avoid link flap lfa_sta=0x%x\n",
+ lfa_status);
+ bnx2x_cannot_avoid_link_flap(params, vars, lfa_status);
+
+ /* Disable attentions */
+ bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + params->port*4,
+ (NIG_MASK_XGXS0_LINK_STATUS |
+ NIG_MASK_XGXS0_LINK10G |
+ NIG_MASK_SERDES0_LINK_STATUS |
+ NIG_MASK_MI_INT));
+
+ bnx2x_emac_init(params, vars);
+
+ if (params->feature_config_flags & FEATURE_CONFIG_PFC_ENABLED)
+ vars->link_status |= LINK_STATUS_PFC_ENABLED;
+
+ if (params->num_phys == 0) {
+ DP(NETIF_MSG_LINK, "No phy found for initialization !!\n");
+ return -EINVAL;
+ }
+ set_phy_vars(params, vars);
+
+ DP(NETIF_MSG_LINK, "Num of phys on board: %d\n", params->num_phys);
+ switch (params->loopback_mode) {
+ case LOOPBACK_BMAC:
+ bnx2x_init_bmac_loopback(params, vars);
+ break;
+ case LOOPBACK_EMAC:
+ bnx2x_init_emac_loopback(params, vars);
+ break;
+ case LOOPBACK_XMAC:
+ bnx2x_init_xmac_loopback(params, vars);
+ break;
+ case LOOPBACK_UMAC:
+ bnx2x_init_umac_loopback(params, vars);
+ break;
+ case LOOPBACK_XGXS:
+ case LOOPBACK_EXT_PHY:
+ bnx2x_init_xgxs_loopback(params, vars);
+ break;
+ default:
+ if (!CHIP_IS_E3(bp)) {
+ if (params->switch_cfg == SWITCH_CFG_10G)
+ bnx2x_xgxs_deassert(params);
+ else
+ bnx2x_serdes_deassert(bp, params->port);
+ }
+ bnx2x_link_initialize(params, vars);
+ msleep(30);
+ bnx2x_link_int_enable(params);
+ break;
+ }
+ bnx2x_update_mng(params, vars->link_status);
+
+ bnx2x_update_mng_eee(params, vars->eee_status);
+ return 0;
+}
+
+int bnx2x_link_reset(struct link_params *params, struct link_vars *vars,
+ u8 reset_ext_phy)
+{
+ struct bnx2x *bp = params->bp;
+ u8 phy_index, port = params->port, clear_latch_ind = 0;
+ DP(NETIF_MSG_LINK, "Resetting the link of port %d\n", port);
+ /* Disable attentions */
+ vars->link_status = 0;
+ bnx2x_chng_link_count(params, true);
+ bnx2x_update_mng(params, vars->link_status);
+ vars->eee_status &= ~(SHMEM_EEE_LP_ADV_STATUS_MASK |
+ SHMEM_EEE_ACTIVE_BIT);
+ bnx2x_update_mng_eee(params, vars->eee_status);
+ bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4,
+ (NIG_MASK_XGXS0_LINK_STATUS |
+ NIG_MASK_XGXS0_LINK10G |
+ NIG_MASK_SERDES0_LINK_STATUS |
+ NIG_MASK_MI_INT));
+
+ /* Activate nig drain */
+ REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + port*4, 1);
+
+ /* Disable nig egress interface */
+ if (!CHIP_IS_E3(bp)) {
+ REG_WR(bp, NIG_REG_BMAC0_OUT_EN + port*4, 0);
+ REG_WR(bp, NIG_REG_EGRESS_EMAC0_OUT_EN + port*4, 0);
+ }
+
+ if (!CHIP_IS_E3(bp)) {
+ bnx2x_set_bmac_rx(bp, params->chip_id, port, 0);
+ } else {
+ bnx2x_set_xmac_rxtx(params, 0);
+ bnx2x_set_umac_rxtx(params, 0);
+ }
+ /* Disable emac */
+ if (!CHIP_IS_E3(bp))
+ REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 0);
+
+ usleep_range(10000, 20000);
+ /* The PHY reset is controlled by GPIO 1
+ * Hold it as vars low
+ */
+ /* Clear link led */
+ bnx2x_set_mdio_emac_per_phy(bp, params);
+ bnx2x_set_led(params, vars, LED_MODE_OFF, 0);
+
+ if (reset_ext_phy) {
+ for (phy_index = EXT_PHY1; phy_index < params->num_phys;
+ phy_index++) {
+ if (params->phy[phy_index].link_reset) {
+ bnx2x_set_aer_mmd(params,
+ &params->phy[phy_index]);
+ params->phy[phy_index].link_reset(
+ &params->phy[phy_index],
+ params);
+ }
+ if (params->phy[phy_index].flags &
+ FLAGS_REARM_LATCH_SIGNAL)
+ clear_latch_ind = 1;
+ }
+ }
+
+ if (clear_latch_ind) {
+ /* Clear latching indication */
+ bnx2x_rearm_latch_signal(bp, port, 0);
+ bnx2x_bits_dis(bp, NIG_REG_LATCH_BC_0 + port*4,
+ 1 << NIG_LATCH_BC_ENABLE_MI_INT);
+ }
+ if (params->phy[INT_PHY].link_reset)
+ params->phy[INT_PHY].link_reset(
+ &params->phy[INT_PHY], params);
+
+ /* Disable nig ingress interface */
+ if (!CHIP_IS_E3(bp)) {
+ /* Reset BigMac */
+ REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
+ (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
+ REG_WR(bp, NIG_REG_BMAC0_IN_EN + port*4, 0);
+ REG_WR(bp, NIG_REG_EMAC0_IN_EN + port*4, 0);
+ } else {
+ u32 xmac_base = (params->port) ? GRCBASE_XMAC1 : GRCBASE_XMAC0;
+ bnx2x_set_xumac_nig(params, 0, 0);
+ if (REG_RD(bp, MISC_REG_RESET_REG_2) &
+ MISC_REGISTERS_RESET_REG_2_XMAC)
+ REG_WR(bp, xmac_base + XMAC_REG_CTRL,
+ XMAC_CTRL_REG_SOFT_RESET);
+ }
+ vars->link_up = 0;
+ vars->phy_flags = 0;
+ return 0;
+}
+int bnx2x_lfa_reset(struct link_params *params,
+ struct link_vars *vars)
+{
+ struct bnx2x *bp = params->bp;
+ vars->link_up = 0;
+ vars->phy_flags = 0;
+ params->link_flags &= ~PHY_INITIALIZED;
+ if (!params->lfa_base)
+ return bnx2x_link_reset(params, vars, 1);
+ /*
+ * Activate NIG drain so that during this time the device won't send
+ * anything while it is unable to response.
+ */
+ REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 1);
+
+ /*
+ * Close gracefully the gate from BMAC to NIG such that no half packets
+ * are passed.
+ */
+ if (!CHIP_IS_E3(bp))
+ bnx2x_set_bmac_rx(bp, params->chip_id, params->port, 0);
+
+ if (CHIP_IS_E3(bp)) {
+ bnx2x_set_xmac_rxtx(params, 0);
+ bnx2x_set_umac_rxtx(params, 0);
+ }
+ /* Wait 10ms for the pipe to clean up*/
+ usleep_range(10000, 20000);
+
+ /* Clean the NIG-BRB using the network filters in a way that will
+ * not cut a packet in the middle.
+ */
+ bnx2x_set_rx_filter(params, 0);
+
+ /*
+ * Re-open the gate between the BMAC and the NIG, after verifying the
+ * gate to the BRB is closed, otherwise packets may arrive to the
+ * firmware before driver had initialized it. The target is to achieve
+ * minimum management protocol down time.
+ */
+ if (!CHIP_IS_E3(bp))
+ bnx2x_set_bmac_rx(bp, params->chip_id, params->port, 1);
+
+ if (CHIP_IS_E3(bp)) {
+ bnx2x_set_xmac_rxtx(params, 1);
+ bnx2x_set_umac_rxtx(params, 1);
+ }
+ /* Disable NIG drain */
+ REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0);
+ return 0;
+}
+
+/****************************************************************************/
+/* Common function */
+/****************************************************************************/
+static int bnx2x_8073_common_init_phy(struct bnx2x *bp,
+ u32 shmem_base_path[],
+ u32 shmem2_base_path[], u8 phy_index,
+ u32 chip_id)
+{
+ struct bnx2x_phy phy[PORT_MAX];
+ struct bnx2x_phy *phy_blk[PORT_MAX];
+ u16 val;
+ s8 port = 0;
+ s8 port_of_path = 0;
+ u32 swap_val, swap_override;
+ swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
+ swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
+ port ^= (swap_val && swap_override);
+ bnx2x_ext_phy_hw_reset(bp, port);
+ /* PART1 - Reset both phys */
+ for (port = PORT_MAX - 1; port >= PORT_0; port--) {
+ u32 shmem_base, shmem2_base;
+ /* In E2, same phy is using for port0 of the two paths */
+ if (CHIP_IS_E1x(bp)) {
+ shmem_base = shmem_base_path[0];
+ shmem2_base = shmem2_base_path[0];
+ port_of_path = port;
+ } else {
+ shmem_base = shmem_base_path[port];
+ shmem2_base = shmem2_base_path[port];
+ port_of_path = 0;
+ }
+
+ /* Extract the ext phy address for the port */
+ if (bnx2x_populate_phy(bp, phy_index, shmem_base, shmem2_base,
+ port_of_path, &phy[port]) !=
+ 0) {
+ DP(NETIF_MSG_LINK, "populate_phy failed\n");
+ return -EINVAL;
+ }
+ /* Disable attentions */
+ bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 +
+ port_of_path*4,
+ (NIG_MASK_XGXS0_LINK_STATUS |
+ NIG_MASK_XGXS0_LINK10G |
+ NIG_MASK_SERDES0_LINK_STATUS |
+ NIG_MASK_MI_INT));
+
+ /* Need to take the phy out of low power mode in order
+ * to write to access its registers
+ */
+ bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
+ MISC_REGISTERS_GPIO_OUTPUT_HIGH,
+ port);
+
+ /* Reset the phy */
+ bnx2x_cl45_write(bp, &phy[port],
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_CTRL,
+ 1<<15);
+ }
+
+ /* Add delay of 150ms after reset */
+ msleep(150);
+
+ if (phy[PORT_0].addr & 0x1) {
+ phy_blk[PORT_0] = &(phy[PORT_1]);
+ phy_blk[PORT_1] = &(phy[PORT_0]);
+ } else {
+ phy_blk[PORT_0] = &(phy[PORT_0]);
+ phy_blk[PORT_1] = &(phy[PORT_1]);
+ }
+
+ /* PART2 - Download firmware to both phys */
+ for (port = PORT_MAX - 1; port >= PORT_0; port--) {
+ if (CHIP_IS_E1x(bp))
+ port_of_path = port;
+ else
+ port_of_path = 0;
+
+ DP(NETIF_MSG_LINK, "Loading spirom for phy address 0x%x\n",
+ phy_blk[port]->addr);
+ if (bnx2x_8073_8727_external_rom_boot(bp, phy_blk[port],
+ port_of_path))
+ return -EINVAL;
+
+ /* Only set bit 10 = 1 (Tx power down) */
+ bnx2x_cl45_read(bp, phy_blk[port],
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_TX_POWER_DOWN, &val);
+
+ /* Phase1 of TX_POWER_DOWN reset */
+ bnx2x_cl45_write(bp, phy_blk[port],
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_TX_POWER_DOWN,
+ (val | 1<<10));
+ }
+
+ /* Toggle Transmitter: Power down and then up with 600ms delay
+ * between
+ */
+ msleep(600);
+
+ /* PART3 - complete TX_POWER_DOWN process, and set GPIO2 back to low */
+ for (port = PORT_MAX - 1; port >= PORT_0; port--) {
+ /* Phase2 of POWER_DOWN_RESET */
+ /* Release bit 10 (Release Tx power down) */
+ bnx2x_cl45_read(bp, phy_blk[port],
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_TX_POWER_DOWN, &val);
+
+ bnx2x_cl45_write(bp, phy_blk[port],
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_TX_POWER_DOWN, (val & (~(1<<10))));
+ usleep_range(15000, 30000);
+
+ /* Read modify write the SPI-ROM version select register */
+ bnx2x_cl45_read(bp, phy_blk[port],
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_EDC_FFE_MAIN, &val);
+ bnx2x_cl45_write(bp, phy_blk[port],
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_EDC_FFE_MAIN, (val | (1<<12)));
+
+ /* set GPIO2 back to LOW */
+ bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
+ MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
+ }
+ return 0;
+}
+static int bnx2x_8726_common_init_phy(struct bnx2x *bp,
+ u32 shmem_base_path[],
+ u32 shmem2_base_path[], u8 phy_index,
+ u32 chip_id)
+{
+ u32 val;
+ s8 port;
+ struct bnx2x_phy phy;
+ /* Use port1 because of the static port-swap */
+ /* Enable the module detection interrupt */
+ val = REG_RD(bp, MISC_REG_GPIO_EVENT_EN);
+ val |= ((1<<MISC_REGISTERS_GPIO_3)|
+ (1<<(MISC_REGISTERS_GPIO_3 + MISC_REGISTERS_GPIO_PORT_SHIFT)));
+ REG_WR(bp, MISC_REG_GPIO_EVENT_EN, val);
+
+ bnx2x_ext_phy_hw_reset(bp, 0);
+ usleep_range(5000, 10000);
+ for (port = 0; port < PORT_MAX; port++) {
+ u32 shmem_base, shmem2_base;
+
+ /* In E2, same phy is using for port0 of the two paths */
+ if (CHIP_IS_E1x(bp)) {
+ shmem_base = shmem_base_path[0];
+ shmem2_base = shmem2_base_path[0];
+ } else {
+ shmem_base = shmem_base_path[port];
+ shmem2_base = shmem2_base_path[port];
+ }
+ /* Extract the ext phy address for the port */
+ if (bnx2x_populate_phy(bp, phy_index, shmem_base, shmem2_base,
+ port, &phy) !=
+ 0) {
+ DP(NETIF_MSG_LINK, "populate phy failed\n");
+ return -EINVAL;
+ }
+
+ /* Reset phy*/
+ bnx2x_cl45_write(bp, &phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_GEN_CTRL, 0x0001);
+
+
+ /* Set fault module detected LED on */
+ bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
+ MISC_REGISTERS_GPIO_HIGH,
+ port);
+ }
+
+ return 0;
+}
+static void bnx2x_get_ext_phy_reset_gpio(struct bnx2x *bp, u32 shmem_base,
+ u8 *io_gpio, u8 *io_port)
+{
+
+ u32 phy_gpio_reset = REG_RD(bp, shmem_base +
+ offsetof(struct shmem_region,
+ dev_info.port_hw_config[PORT_0].default_cfg));
+ switch (phy_gpio_reset) {
+ case PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO0_P0:
+ *io_gpio = 0;
+ *io_port = 0;
+ break;
+ case PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO1_P0:
+ *io_gpio = 1;
+ *io_port = 0;
+ break;
+ case PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO2_P0:
+ *io_gpio = 2;
+ *io_port = 0;
+ break;
+ case PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO3_P0:
+ *io_gpio = 3;
+ *io_port = 0;
+ break;
+ case PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO0_P1:
+ *io_gpio = 0;
+ *io_port = 1;
+ break;
+ case PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO1_P1:
+ *io_gpio = 1;
+ *io_port = 1;
+ break;
+ case PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO2_P1:
+ *io_gpio = 2;
+ *io_port = 1;
+ break;
+ case PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO3_P1:
+ *io_gpio = 3;
+ *io_port = 1;
+ break;
+ default:
+ /* Don't override the io_gpio and io_port */
+ break;
+ }
+}
+
+static int bnx2x_8727_common_init_phy(struct bnx2x *bp,
+ u32 shmem_base_path[],
+ u32 shmem2_base_path[], u8 phy_index,
+ u32 chip_id)
+{
+ s8 port, reset_gpio;
+ u32 swap_val, swap_override;
+ struct bnx2x_phy phy[PORT_MAX];
+ struct bnx2x_phy *phy_blk[PORT_MAX];
+ s8 port_of_path;
+ swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
+ swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
+
+ reset_gpio = MISC_REGISTERS_GPIO_1;
+ port = 1;
+
+ /* Retrieve the reset gpio/port which control the reset.
+ * Default is GPIO1, PORT1
+ */
+ bnx2x_get_ext_phy_reset_gpio(bp, shmem_base_path[0],
+ (u8 *)&reset_gpio, (u8 *)&port);
+
+ /* Calculate the port based on port swap */
+ port ^= (swap_val && swap_override);
+
+ /* Initiate PHY reset*/
+ bnx2x_set_gpio(bp, reset_gpio, MISC_REGISTERS_GPIO_OUTPUT_LOW,
+ port);
+ usleep_range(1000, 2000);
+ bnx2x_set_gpio(bp, reset_gpio, MISC_REGISTERS_GPIO_OUTPUT_HIGH,
+ port);
+
+ usleep_range(5000, 10000);
+
+ /* PART1 - Reset both phys */
+ for (port = PORT_MAX - 1; port >= PORT_0; port--) {
+ u32 shmem_base, shmem2_base;
+
+ /* In E2, same phy is using for port0 of the two paths */
+ if (CHIP_IS_E1x(bp)) {
+ shmem_base = shmem_base_path[0];
+ shmem2_base = shmem2_base_path[0];
+ port_of_path = port;
+ } else {
+ shmem_base = shmem_base_path[port];
+ shmem2_base = shmem2_base_path[port];
+ port_of_path = 0;
+ }
+
+ /* Extract the ext phy address for the port */
+ if (bnx2x_populate_phy(bp, phy_index, shmem_base, shmem2_base,
+ port_of_path, &phy[port]) !=
+ 0) {
+ DP(NETIF_MSG_LINK, "populate phy failed\n");
+ return -EINVAL;
+ }
+ /* disable attentions */
+ bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 +
+ port_of_path*4,
+ (NIG_MASK_XGXS0_LINK_STATUS |
+ NIG_MASK_XGXS0_LINK10G |
+ NIG_MASK_SERDES0_LINK_STATUS |
+ NIG_MASK_MI_INT));
+
+
+ /* Reset the phy */
+ bnx2x_cl45_write(bp, &phy[port],
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 1<<15);
+ }
+
+ /* Add delay of 150ms after reset */
+ msleep(150);
+ if (phy[PORT_0].addr & 0x1) {
+ phy_blk[PORT_0] = &(phy[PORT_1]);
+ phy_blk[PORT_1] = &(phy[PORT_0]);
+ } else {
+ phy_blk[PORT_0] = &(phy[PORT_0]);
+ phy_blk[PORT_1] = &(phy[PORT_1]);
+ }
+ /* PART2 - Download firmware to both phys */
+ for (port = PORT_MAX - 1; port >= PORT_0; port--) {
+ if (CHIP_IS_E1x(bp))
+ port_of_path = port;
+ else
+ port_of_path = 0;
+ DP(NETIF_MSG_LINK, "Loading spirom for phy address 0x%x\n",
+ phy_blk[port]->addr);
+ if (bnx2x_8073_8727_external_rom_boot(bp, phy_blk[port],
+ port_of_path))
+ return -EINVAL;
+ /* Disable PHY transmitter output */
+ bnx2x_cl45_write(bp, phy_blk[port],
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_TX_DISABLE, 1);
+
+ }
+ return 0;
+}
+
+static int bnx2x_84833_common_init_phy(struct bnx2x *bp,
+ u32 shmem_base_path[],
+ u32 shmem2_base_path[],
+ u8 phy_index,
+ u32 chip_id)
+{
+ u8 reset_gpios;
+ reset_gpios = bnx2x_84833_get_reset_gpios(bp, shmem_base_path, chip_id);
+ bnx2x_set_mult_gpio(bp, reset_gpios, MISC_REGISTERS_GPIO_OUTPUT_LOW);
+ udelay(10);
+ bnx2x_set_mult_gpio(bp, reset_gpios, MISC_REGISTERS_GPIO_OUTPUT_HIGH);
+ DP(NETIF_MSG_LINK, "84833 reset pulse on pin values 0x%x\n",
+ reset_gpios);
+ return 0;
+}
+
+static int bnx2x_ext_phy_common_init(struct bnx2x *bp, u32 shmem_base_path[],
+ u32 shmem2_base_path[], u8 phy_index,
+ u32 ext_phy_type, u32 chip_id)
+{
+ int rc = 0;
+
+ switch (ext_phy_type) {
+ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
+ rc = bnx2x_8073_common_init_phy(bp, shmem_base_path,
+ shmem2_base_path,
+ phy_index, chip_id);
+ break;
+ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8722:
+ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
+ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC:
+ rc = bnx2x_8727_common_init_phy(bp, shmem_base_path,
+ shmem2_base_path,
+ phy_index, chip_id);
+ break;
+
+ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
+ /* GPIO1 affects both ports, so there's need to pull
+ * it for single port alone
+ */
+ rc = bnx2x_8726_common_init_phy(bp, shmem_base_path,
+ shmem2_base_path,
+ phy_index, chip_id);
+ break;
+ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833:
+ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834:
+ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84858:
+ /* GPIO3's are linked, and so both need to be toggled
+ * to obtain required 2us pulse.
+ */
+ rc = bnx2x_84833_common_init_phy(bp, shmem_base_path,
+ shmem2_base_path,
+ phy_index, chip_id);
+ break;
+ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
+ rc = -EINVAL;
+ break;
+ default:
+ DP(NETIF_MSG_LINK,
+ "ext_phy 0x%x common init not required\n",
+ ext_phy_type);
+ break;
+ }
+
+ if (rc)
+ netdev_err(bp->dev, "Warning: PHY was not initialized,"
+ " Port %d\n",
+ 0);
+ return rc;
+}
+
+int bnx2x_common_init_phy(struct bnx2x *bp, u32 shmem_base_path[],
+ u32 shmem2_base_path[], u32 chip_id)
+{
+ int rc = 0;
+ u32 phy_ver, val;
+ u8 phy_index = 0;
+ u32 ext_phy_type, ext_phy_config;
+
+ bnx2x_set_mdio_clk(bp, chip_id, GRCBASE_EMAC0);
+ bnx2x_set_mdio_clk(bp, chip_id, GRCBASE_EMAC1);
+ DP(NETIF_MSG_LINK, "Begin common phy init\n");
+ if (CHIP_IS_E3(bp)) {
+ /* Enable EPIO */
+ val = REG_RD(bp, MISC_REG_GEN_PURP_HWG);
+ REG_WR(bp, MISC_REG_GEN_PURP_HWG, val | 1);
+ }
+ /* Check if common init was already done */
+ phy_ver = REG_RD(bp, shmem_base_path[0] +
+ offsetof(struct shmem_region,
+ port_mb[PORT_0].ext_phy_fw_version));
+ if (phy_ver) {
+ DP(NETIF_MSG_LINK, "Not doing common init; phy ver is 0x%x\n",
+ phy_ver);
+ return 0;
+ }
+
+ /* Read the ext_phy_type for arbitrary port(0) */
+ for (phy_index = EXT_PHY1; phy_index < MAX_PHYS;
+ phy_index++) {
+ ext_phy_config = bnx2x_get_ext_phy_config(bp,
+ shmem_base_path[0],
+ phy_index, 0);
+ ext_phy_type = XGXS_EXT_PHY_TYPE(ext_phy_config);
+ rc |= bnx2x_ext_phy_common_init(bp, shmem_base_path,
+ shmem2_base_path,
+ phy_index, ext_phy_type,
+ chip_id);
+ }
+ return rc;
+}
+
+static void bnx2x_check_over_curr(struct link_params *params,
+ struct link_vars *vars)
+{
+ struct bnx2x *bp = params->bp;
+ u32 cfg_pin;
+ u8 port = params->port;
+ u32 pin_val;
+
+ cfg_pin = (REG_RD(bp, params->shmem_base +
+ offsetof(struct shmem_region,
+ dev_info.port_hw_config[port].e3_cmn_pin_cfg1)) &
+ PORT_HW_CFG_E3_OVER_CURRENT_MASK) >>
+ PORT_HW_CFG_E3_OVER_CURRENT_SHIFT;
+
+ /* Ignore check if no external input PIN available */
+ if (bnx2x_get_cfg_pin(bp, cfg_pin, &pin_val) != 0)
+ return;
+
+ if (!pin_val) {
+ if ((vars->phy_flags & PHY_OVER_CURRENT_FLAG) == 0) {
+ netdev_err(bp->dev, "Error: Power fault on Port %d has"
+ " been detected and the power to "
+ "that SFP+ module has been removed"
+ " to prevent failure of the card."
+ " Please remove the SFP+ module and"
+ " restart the system to clear this"
+ " error.\n",
+ params->port);
+ vars->phy_flags |= PHY_OVER_CURRENT_FLAG;
+ bnx2x_warpcore_power_module(params, 0);
+ }
+ } else
+ vars->phy_flags &= ~PHY_OVER_CURRENT_FLAG;
+}
+
+/* Returns 0 if no change occurred since last check; 1 otherwise. */
+static u8 bnx2x_analyze_link_error(struct link_params *params,
+ struct link_vars *vars, u32 status,
+ u32 phy_flag, u32 link_flag, u8 notify)
+{
+ struct bnx2x *bp = params->bp;
+ /* Compare new value with previous value */
+ u8 led_mode;
+ u32 old_status = (vars->phy_flags & phy_flag) ? 1 : 0;
+
+ if ((status ^ old_status) == 0)
+ return 0;
+
+ /* If values differ */
+ switch (phy_flag) {
+ case PHY_HALF_OPEN_CONN_FLAG:
+ DP(NETIF_MSG_LINK, "Analyze Remote Fault\n");
+ break;
+ case PHY_SFP_TX_FAULT_FLAG:
+ DP(NETIF_MSG_LINK, "Analyze TX Fault\n");
+ break;
+ default:
+ DP(NETIF_MSG_LINK, "Analyze UNKNOWN\n");
+ }
+ DP(NETIF_MSG_LINK, "Link changed:[%x %x]->%x\n", vars->link_up,
+ old_status, status);
+
+ /* Do not touch the link in case physical link down */
+ if ((vars->phy_flags & PHY_PHYSICAL_LINK_FLAG) == 0)
+ return 1;
+
+ /* a. Update shmem->link_status accordingly
+ * b. Update link_vars->link_up
+ */
+ if (status) {
+ vars->link_status &= ~LINK_STATUS_LINK_UP;
+ vars->link_status |= link_flag;
+ vars->link_up = 0;
+ vars->phy_flags |= phy_flag;
+
+ /* activate nig drain */
+ REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 1);
+ /* Set LED mode to off since the PHY doesn't know about these
+ * errors
+ */
+ led_mode = LED_MODE_OFF;
+ } else {
+ vars->link_status |= LINK_STATUS_LINK_UP;
+ vars->link_status &= ~link_flag;
+ vars->link_up = 1;
+ vars->phy_flags &= ~phy_flag;
+ led_mode = LED_MODE_OPER;
+
+ /* Clear nig drain */
+ REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0);
+ }
+ bnx2x_sync_link(params, vars);
+ /* Update the LED according to the link state */
+ bnx2x_set_led(params, vars, led_mode, SPEED_10000);
+
+ /* Update link status in the shared memory */
+ bnx2x_update_mng(params, vars->link_status);
+
+ /* C. Trigger General Attention */
+ vars->periodic_flags |= PERIODIC_FLAGS_LINK_EVENT;
+ if (notify)
+ bnx2x_notify_link_changed(bp);
+
+ return 1;
+}
+
+/******************************************************************************
+* Description:
+* This function checks for half opened connection change indication.
+* When such change occurs, it calls the bnx2x_analyze_link_error
+* to check if Remote Fault is set or cleared. Reception of remote fault
+* status message in the MAC indicates that the peer's MAC has detected
+* a fault, for example, due to break in the TX side of fiber.
+*
+******************************************************************************/
+static int bnx2x_check_half_open_conn(struct link_params *params,
+ struct link_vars *vars,
+ u8 notify)
+{
+ struct bnx2x *bp = params->bp;
+ u32 lss_status = 0;
+ u32 mac_base;
+ /* In case link status is physically up @ 10G do */
+ if (((vars->phy_flags & PHY_PHYSICAL_LINK_FLAG) == 0) ||
+ (REG_RD(bp, NIG_REG_EGRESS_EMAC0_PORT + params->port*4)))
+ return 0;
+
+ if (CHIP_IS_E3(bp) &&
+ (REG_RD(bp, MISC_REG_RESET_REG_2) &
+ (MISC_REGISTERS_RESET_REG_2_XMAC))) {
+ /* Check E3 XMAC */
+ /* Note that link speed cannot be queried here, since it may be
+ * zero while link is down. In case UMAC is active, LSS will
+ * simply not be set
+ */
+ mac_base = (params->port) ? GRCBASE_XMAC1 : GRCBASE_XMAC0;
+
+ /* Clear stick bits (Requires rising edge) */
+ REG_WR(bp, mac_base + XMAC_REG_CLEAR_RX_LSS_STATUS, 0);
+ REG_WR(bp, mac_base + XMAC_REG_CLEAR_RX_LSS_STATUS,
+ XMAC_CLEAR_RX_LSS_STATUS_REG_CLEAR_LOCAL_FAULT_STATUS |
+ XMAC_CLEAR_RX_LSS_STATUS_REG_CLEAR_REMOTE_FAULT_STATUS);
+ if (REG_RD(bp, mac_base + XMAC_REG_RX_LSS_STATUS))
+ lss_status = 1;
+
+ bnx2x_analyze_link_error(params, vars, lss_status,
+ PHY_HALF_OPEN_CONN_FLAG,
+ LINK_STATUS_NONE, notify);
+ } else if (REG_RD(bp, MISC_REG_RESET_REG_2) &
+ (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << params->port)) {
+ /* Check E1X / E2 BMAC */
+ u32 lss_status_reg;
+ u32 wb_data[2];
+ mac_base = params->port ? NIG_REG_INGRESS_BMAC1_MEM :
+ NIG_REG_INGRESS_BMAC0_MEM;
+ /* Read BIGMAC_REGISTER_RX_LSS_STATUS */
+ if (CHIP_IS_E2(bp))
+ lss_status_reg = BIGMAC2_REGISTER_RX_LSS_STAT;
+ else
+ lss_status_reg = BIGMAC_REGISTER_RX_LSS_STATUS;
+
+ REG_RD_DMAE(bp, mac_base + lss_status_reg, wb_data, 2);
+ lss_status = (wb_data[0] > 0);
+
+ bnx2x_analyze_link_error(params, vars, lss_status,
+ PHY_HALF_OPEN_CONN_FLAG,
+ LINK_STATUS_NONE, notify);
+ }
+ return 0;
+}
+static void bnx2x_sfp_tx_fault_detection(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
+{
+ struct bnx2x *bp = params->bp;
+ u32 cfg_pin, value = 0;
+ u8 led_change, port = params->port;
+
+ /* Get The SFP+ TX_Fault controlling pin ([eg]pio) */
+ cfg_pin = (REG_RD(bp, params->shmem_base + offsetof(struct shmem_region,
+ dev_info.port_hw_config[port].e3_cmn_pin_cfg)) &
+ PORT_HW_CFG_E3_TX_FAULT_MASK) >>
+ PORT_HW_CFG_E3_TX_FAULT_SHIFT;
+
+ if (bnx2x_get_cfg_pin(bp, cfg_pin, &value)) {
+ DP(NETIF_MSG_LINK, "Failed to read pin 0x%02x\n", cfg_pin);
+ return;
+ }
+
+ led_change = bnx2x_analyze_link_error(params, vars, value,
+ PHY_SFP_TX_FAULT_FLAG,
+ LINK_STATUS_SFP_TX_FAULT, 1);
+
+ if (led_change) {
+ /* Change TX_Fault led, set link status for further syncs */
+ u8 led_mode;
+
+ if (vars->phy_flags & PHY_SFP_TX_FAULT_FLAG) {
+ led_mode = MISC_REGISTERS_GPIO_HIGH;
+ vars->link_status |= LINK_STATUS_SFP_TX_FAULT;
+ } else {
+ led_mode = MISC_REGISTERS_GPIO_LOW;
+ vars->link_status &= ~LINK_STATUS_SFP_TX_FAULT;
+ }
+
+ /* If module is unapproved, led should be on regardless */
+ if (!(phy->flags & FLAGS_SFP_NOT_APPROVED)) {
+ DP(NETIF_MSG_LINK, "Change TX_Fault LED: ->%x\n",
+ led_mode);
+ bnx2x_set_e3_module_fault_led(params, led_mode);
+ }
+ }
+}
+static void bnx2x_kr2_recovery(struct link_params *params,
+ struct link_vars *vars,
+ struct bnx2x_phy *phy)
+{
+ struct bnx2x *bp = params->bp;
+ DP(NETIF_MSG_LINK, "KR2 recovery\n");
+ bnx2x_warpcore_enable_AN_KR2(phy, params, vars);
+ bnx2x_warpcore_restart_AN_KR(phy, params);
+}
+
+static void bnx2x_check_kr2_wa(struct link_params *params,
+ struct link_vars *vars,
+ struct bnx2x_phy *phy)
+{
+ struct bnx2x *bp = params->bp;
+ u16 base_page, next_page, not_kr2_device, lane;
+ int sigdet;
+
+ /* Once KR2 was disabled, wait 5 seconds before checking KR2 recovery
+ * Since some switches tend to reinit the AN process and clear the
+ * advertised BP/NP after ~2 seconds causing the KR2 to be disabled
+ * and recovered many times
+ */
+ if (vars->check_kr2_recovery_cnt > 0) {
+ vars->check_kr2_recovery_cnt--;
+ return;
+ }
+
+ sigdet = bnx2x_warpcore_get_sigdet(phy, params);
+ if (!sigdet) {
+ if (!(params->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE)) {
+ bnx2x_kr2_recovery(params, vars, phy);
+ DP(NETIF_MSG_LINK, "No sigdet\n");
+ }
+ return;
+ }
+
+ lane = bnx2x_get_warpcore_lane(phy, params);
+ CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK,
+ MDIO_AER_BLOCK_AER_REG, lane);
+ bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD,
+ MDIO_AN_REG_LP_AUTO_NEG, &base_page);
+ bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD,
+ MDIO_AN_REG_LP_AUTO_NEG2, &next_page);
+ bnx2x_set_aer_mmd(params, phy);
+
+ /* CL73 has not begun yet */
+ if (base_page == 0) {
+ if (!(params->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE)) {
+ bnx2x_kr2_recovery(params, vars, phy);
+ DP(NETIF_MSG_LINK, "No BP\n");
+ }
+ return;
+ }
+
+ /* In case NP bit is not set in the BasePage, or it is set,
+ * but only KX is advertised, declare this link partner as non-KR2
+ * device.
+ */
+ not_kr2_device = (((base_page & 0x8000) == 0) ||
+ (((base_page & 0x8000) &&
+ ((next_page & 0xe0) == 0x20))));
+
+ /* In case KR2 is already disabled, check if we need to re-enable it */
+ if (!(params->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE)) {
+ if (!not_kr2_device) {
+ DP(NETIF_MSG_LINK, "BP=0x%x, NP=0x%x\n", base_page,
+ next_page);
+ bnx2x_kr2_recovery(params, vars, phy);
+ }
+ return;
+ }
+ /* KR2 is enabled, but not KR2 device */
+ if (not_kr2_device) {
+ /* Disable KR2 on both lanes */
+ DP(NETIF_MSG_LINK, "BP=0x%x, NP=0x%x\n", base_page, next_page);
+ bnx2x_disable_kr2(params, vars, phy);
+ /* Restart AN on leading lane */
+ bnx2x_warpcore_restart_AN_KR(phy, params);
+ return;
+ }
+}
+
+void bnx2x_period_func(struct link_params *params, struct link_vars *vars)
+{
+ u16 phy_idx;
+ struct bnx2x *bp = params->bp;
+ for (phy_idx = INT_PHY; phy_idx < MAX_PHYS; phy_idx++) {
+ if (params->phy[phy_idx].flags & FLAGS_TX_ERROR_CHECK) {
+ bnx2x_set_aer_mmd(params, &params->phy[phy_idx]);
+ if (bnx2x_check_half_open_conn(params, vars, 1) !=
+ 0)
+ DP(NETIF_MSG_LINK, "Fault detection failed\n");
+ break;
+ }
+ }
+
+ if (CHIP_IS_E3(bp)) {
+ struct bnx2x_phy *phy = &params->phy[INT_PHY];
+ bnx2x_set_aer_mmd(params, phy);
+ if (((phy->req_line_speed == SPEED_AUTO_NEG) &&
+ (phy->speed_cap_mask &
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_20G)) ||
+ (phy->req_line_speed == SPEED_20000))
+ bnx2x_check_kr2_wa(params, vars, phy);
+ bnx2x_check_over_curr(params, vars);
+ if (vars->rx_tx_asic_rst)
+ bnx2x_warpcore_config_runtime(phy, params, vars);
+
+ if ((REG_RD(bp, params->shmem_base +
+ offsetof(struct shmem_region, dev_info.
+ port_hw_config[params->port].default_cfg))
+ & PORT_HW_CFG_NET_SERDES_IF_MASK) ==
+ PORT_HW_CFG_NET_SERDES_IF_SFI) {
+ if (bnx2x_is_sfp_module_plugged(phy, params)) {
+ bnx2x_sfp_tx_fault_detection(phy, params, vars);
+ } else if (vars->link_status &
+ LINK_STATUS_SFP_TX_FAULT) {
+ /* Clean trail, interrupt corrects the leds */
+ vars->link_status &= ~LINK_STATUS_SFP_TX_FAULT;
+ vars->phy_flags &= ~PHY_SFP_TX_FAULT_FLAG;
+ /* Update link status in the shared memory */
+ bnx2x_update_mng(params, vars->link_status);
+ }
+ }
+ }
+}
+
+u8 bnx2x_fan_failure_det_req(struct bnx2x *bp,
+ u32 shmem_base,
+ u32 shmem2_base,
+ u8 port)
+{
+ u8 phy_index, fan_failure_det_req = 0;
+ struct bnx2x_phy phy;
+ for (phy_index = EXT_PHY1; phy_index < MAX_PHYS;
+ phy_index++) {
+ if (bnx2x_populate_phy(bp, phy_index, shmem_base, shmem2_base,
+ port, &phy)
+ != 0) {
+ DP(NETIF_MSG_LINK, "populate phy failed\n");
+ return 0;
+ }
+ fan_failure_det_req |= (phy.flags &
+ FLAGS_FAN_FAILURE_DET_REQ);
+ }
+ return fan_failure_det_req;
+}
+
+void bnx2x_hw_reset_phy(struct link_params *params)
+{
+ u8 phy_index;
+ struct bnx2x *bp = params->bp;
+ bnx2x_update_mng(params, 0);
+ bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + params->port*4,
+ (NIG_MASK_XGXS0_LINK_STATUS |
+ NIG_MASK_XGXS0_LINK10G |
+ NIG_MASK_SERDES0_LINK_STATUS |
+ NIG_MASK_MI_INT));
+
+ for (phy_index = INT_PHY; phy_index < MAX_PHYS;
+ phy_index++) {
+ if (params->phy[phy_index].hw_reset) {
+ params->phy[phy_index].hw_reset(
+ &params->phy[phy_index],
+ params);
+ params->phy[phy_index] = phy_null;
+ }
+ }
+}
+
+void bnx2x_init_mod_abs_int(struct bnx2x *bp, struct link_vars *vars,
+ u32 chip_id, u32 shmem_base, u32 shmem2_base,
+ u8 port)
+{
+ u8 gpio_num = 0xff, gpio_port = 0xff, phy_index;
+ u32 val;
+ u32 offset, aeu_mask, swap_val, swap_override, sync_offset;
+ if (CHIP_IS_E3(bp)) {
+ if (bnx2x_get_mod_abs_int_cfg(bp, chip_id,
+ shmem_base,
+ port,
+ &gpio_num,
+ &gpio_port) != 0)
+ return;
+ } else {
+ struct bnx2x_phy phy;
+ for (phy_index = EXT_PHY1; phy_index < MAX_PHYS;
+ phy_index++) {
+ if (bnx2x_populate_phy(bp, phy_index, shmem_base,
+ shmem2_base, port, &phy)
+ != 0) {
+ DP(NETIF_MSG_LINK, "populate phy failed\n");
+ return;
+ }
+ if (phy.type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726) {
+ gpio_num = MISC_REGISTERS_GPIO_3;
+ gpio_port = port;
+ break;
+ }
+ }
+ }
+
+ if (gpio_num == 0xff)
+ return;
+
+ /* Set GPIO3 to trigger SFP+ module insertion/removal */
+ bnx2x_set_gpio(bp, gpio_num, MISC_REGISTERS_GPIO_INPUT_HI_Z, gpio_port);
+
+ swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
+ swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
+ gpio_port ^= (swap_val && swap_override);
+
+ vars->aeu_int_mask = AEU_INPUTS_ATTN_BITS_GPIO0_FUNCTION_0 <<
+ (gpio_num + (gpio_port << 2));
+
+ sync_offset = shmem_base +
+ offsetof(struct shmem_region,
+ dev_info.port_hw_config[port].aeu_int_mask);
+ REG_WR(bp, sync_offset, vars->aeu_int_mask);
+
+ DP(NETIF_MSG_LINK, "Setting MOD_ABS (GPIO%d_P%d) AEU to 0x%x\n",
+ gpio_num, gpio_port, vars->aeu_int_mask);
+
+ if (port == 0)
+ offset = MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
+ else
+ offset = MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0;
+
+ /* Open appropriate AEU for interrupts */
+ aeu_mask = REG_RD(bp, offset);
+ aeu_mask |= vars->aeu_int_mask;
+ REG_WR(bp, offset, aeu_mask);
+
+ /* Enable the GPIO to trigger interrupt */
+ val = REG_RD(bp, MISC_REG_GPIO_EVENT_EN);
+ val |= 1 << (gpio_num + (gpio_port << 2));
+ REG_WR(bp, MISC_REG_GPIO_EVENT_EN, val);
+}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
new file mode 100644
index 0000000000..cae03c89dc
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
@@ -0,0 +1,547 @@
+/* Copyright 2008-2013 Broadcom Corporation
+ * Copyright (c) 2014 QLogic Corporation
+ * All rights reserved
+ *
+ * Unless you and QLogic execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2, available
+ * at http://www.gnu.org/licenses/gpl-2.0.html (the "GPL").
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Qlogic software provided under a
+ * license other than the GPL, without Qlogic's express prior written
+ * consent.
+ *
+ * Written by Yaniv Rosner
+ *
+ */
+
+#ifndef BNX2X_LINK_H
+#define BNX2X_LINK_H
+
+
+
+/***********************************************************/
+/* Defines */
+/***********************************************************/
+#define DEFAULT_PHY_DEV_ADDR 3
+#define E2_DEFAULT_PHY_DEV_ADDR 5
+
+
+
+#define BNX2X_FLOW_CTRL_AUTO PORT_FEATURE_FLOW_CONTROL_AUTO
+#define BNX2X_FLOW_CTRL_TX PORT_FEATURE_FLOW_CONTROL_TX
+#define BNX2X_FLOW_CTRL_RX PORT_FEATURE_FLOW_CONTROL_RX
+#define BNX2X_FLOW_CTRL_BOTH PORT_FEATURE_FLOW_CONTROL_BOTH
+#define BNX2X_FLOW_CTRL_NONE PORT_FEATURE_FLOW_CONTROL_NONE
+
+#define NET_SERDES_IF_XFI 1
+#define NET_SERDES_IF_SFI 2
+#define NET_SERDES_IF_KR 3
+#define NET_SERDES_IF_DXGXS 4
+
+#define SPEED_AUTO_NEG 0
+#define SPEED_20000 20000
+
+#define I2C_DEV_ADDR_A0 0xa0
+#define I2C_DEV_ADDR_A2 0xa2
+
+#define SFP_EEPROM_PAGE_SIZE 16
+#define SFP_EEPROM_VENDOR_NAME_ADDR 0x14
+#define SFP_EEPROM_VENDOR_NAME_SIZE 16
+#define SFP_EEPROM_VENDOR_OUI_ADDR 0x25
+#define SFP_EEPROM_VENDOR_OUI_SIZE 3
+#define SFP_EEPROM_PART_NO_ADDR 0x28
+#define SFP_EEPROM_PART_NO_SIZE 16
+#define SFP_EEPROM_REVISION_ADDR 0x38
+#define SFP_EEPROM_REVISION_SIZE 4
+#define SFP_EEPROM_SERIAL_ADDR 0x44
+#define SFP_EEPROM_SERIAL_SIZE 16
+#define SFP_EEPROM_DATE_ADDR 0x54 /* ASCII YYMMDD */
+#define SFP_EEPROM_DATE_SIZE 6
+#define SFP_EEPROM_DIAG_TYPE_ADDR 0x5c
+#define SFP_EEPROM_DIAG_TYPE_SIZE 1
+#define SFP_EEPROM_DIAG_ADDR_CHANGE_REQ (1<<2)
+#define SFP_EEPROM_DDM_IMPLEMENTED (1<<6)
+#define SFP_EEPROM_SFF_8472_COMP_ADDR 0x5e
+#define SFP_EEPROM_SFF_8472_COMP_SIZE 1
+
+#define SFP_EEPROM_A2_CHECKSUM_RANGE 0x5e
+#define SFP_EEPROM_A2_CC_DMI_ADDR 0x5f
+
+#define PWR_FLT_ERR_MSG_LEN 250
+
+#define XGXS_EXT_PHY_TYPE(ext_phy_config) \
+ ((ext_phy_config) & PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK)
+#define XGXS_EXT_PHY_ADDR(ext_phy_config) \
+ (((ext_phy_config) & PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >> \
+ PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT)
+#define SERDES_EXT_PHY_TYPE(ext_phy_config) \
+ ((ext_phy_config) & PORT_HW_CFG_SERDES_EXT_PHY_TYPE_MASK)
+
+/* Single Media Direct board is the plain 577xx board with CX4/RJ45 jacks */
+#define SINGLE_MEDIA_DIRECT(params) (params->num_phys == 1)
+/* Single Media board contains single external phy */
+#define SINGLE_MEDIA(params) (params->num_phys == 2)
+/* Dual Media board contains two external phy with different media */
+#define DUAL_MEDIA(params) (params->num_phys == 3)
+
+#define FW_PARAM_PHY_ADDR_MASK 0x000000FF
+#define FW_PARAM_PHY_TYPE_MASK 0x0000FF00
+#define FW_PARAM_MDIO_CTRL_MASK 0xFFFF0000
+#define FW_PARAM_MDIO_CTRL_OFFSET 16
+#define FW_PARAM_PHY_ADDR(fw_param) (fw_param & \
+ FW_PARAM_PHY_ADDR_MASK)
+#define FW_PARAM_PHY_TYPE(fw_param) (fw_param & \
+ FW_PARAM_PHY_TYPE_MASK)
+#define FW_PARAM_MDIO_CTRL(fw_param) ((fw_param & \
+ FW_PARAM_MDIO_CTRL_MASK) >> \
+ FW_PARAM_MDIO_CTRL_OFFSET)
+#define FW_PARAM_SET(phy_addr, phy_type, mdio_access) \
+ (phy_addr | phy_type | mdio_access << FW_PARAM_MDIO_CTRL_OFFSET)
+
+
+#define PFC_BRB_FULL_LB_XOFF_THRESHOLD 170
+#define PFC_BRB_FULL_LB_XON_THRESHOLD 250
+
+#define MAXVAL(a, b) (((a) > (b)) ? (a) : (b))
+
+#define BMAC_CONTROL_RX_ENABLE 2
+/***********************************************************/
+/* Structs */
+/***********************************************************/
+#define INT_PHY 0
+#define EXT_PHY1 1
+#define EXT_PHY2 2
+#define MAX_PHYS 3
+
+/* Same configuration is shared between the XGXS and the first external phy */
+#define LINK_CONFIG_SIZE (MAX_PHYS - 1)
+#define LINK_CONFIG_IDX(_phy_idx) ((_phy_idx == INT_PHY) ? \
+ 0 : (_phy_idx - 1))
+/***********************************************************/
+/* bnx2x_phy struct */
+/* Defines the required arguments and function per phy */
+/***********************************************************/
+struct link_vars;
+struct link_params;
+struct bnx2x_phy;
+
+typedef void (*config_init_t)(struct bnx2x_phy *phy, struct link_params *params,
+ struct link_vars *vars);
+typedef u8 (*read_status_t)(struct bnx2x_phy *phy, struct link_params *params,
+ struct link_vars *vars);
+typedef void (*link_reset_t)(struct bnx2x_phy *phy,
+ struct link_params *params);
+typedef void (*config_loopback_t)(struct bnx2x_phy *phy,
+ struct link_params *params);
+typedef int (*format_fw_ver_t)(u32 raw, u8 *str, u16 *len);
+typedef void (*hw_reset_t)(struct bnx2x_phy *phy, struct link_params *params);
+typedef void (*set_link_led_t)(struct bnx2x_phy *phy,
+ struct link_params *params, u8 mode);
+typedef void (*phy_specific_func_t)(struct bnx2x_phy *phy,
+ struct link_params *params, u32 action);
+struct bnx2x_reg_set {
+ u8 devad;
+ u16 reg;
+ u16 val;
+};
+
+struct bnx2x_phy {
+ u32 type;
+
+ /* Loaded during init */
+ u8 addr;
+ u8 def_md_devad;
+ u16 flags;
+ /* No Over-Current detection */
+#define FLAGS_NOC (1<<1)
+ /* Fan failure detection required */
+#define FLAGS_FAN_FAILURE_DET_REQ (1<<2)
+ /* Initialize first the XGXS and only then the phy itself */
+#define FLAGS_INIT_XGXS_FIRST (1<<3)
+#define FLAGS_WC_DUAL_MODE (1<<4)
+#define FLAGS_4_PORT_MODE (1<<5)
+#define FLAGS_REARM_LATCH_SIGNAL (1<<6)
+#define FLAGS_SFP_NOT_APPROVED (1<<7)
+#define FLAGS_MDC_MDIO_WA (1<<8)
+#define FLAGS_DUMMY_READ (1<<9)
+#define FLAGS_MDC_MDIO_WA_B0 (1<<10)
+#define FLAGS_TX_ERROR_CHECK (1<<12)
+#define FLAGS_EEE (1<<13)
+#define FLAGS_MDC_MDIO_WA_G (1<<15)
+
+ /* preemphasis values for the rx side */
+ u16 rx_preemphasis[4];
+
+ /* preemphasis values for the tx side */
+ u16 tx_preemphasis[4];
+
+ /* EMAC address for access MDIO */
+ u32 mdio_ctrl;
+
+ u32 supported;
+
+ u32 media_type;
+#define ETH_PHY_UNSPECIFIED 0x0
+#define ETH_PHY_SFPP_10G_FIBER 0x1
+#define ETH_PHY_XFP_FIBER 0x2
+#define ETH_PHY_DA_TWINAX 0x3
+#define ETH_PHY_BASE_T 0x4
+#define ETH_PHY_SFP_1G_FIBER 0x5
+#define ETH_PHY_KR 0xf0
+#define ETH_PHY_CX4 0xf1
+#define ETH_PHY_NOT_PRESENT 0xff
+
+ /* The address in which version is located*/
+ u32 ver_addr;
+
+ u16 req_flow_ctrl;
+
+ u16 req_line_speed;
+
+ u32 speed_cap_mask;
+
+ u16 req_duplex;
+ u16 rsrv;
+ /* Called per phy/port init, and it configures LASI, speed, autoneg,
+ duplex, flow control negotiation, etc. */
+ config_init_t config_init;
+
+ /* Called due to interrupt. It determines the link, speed */
+ read_status_t read_status;
+
+ /* Called when driver is unloading. Should reset the phy */
+ link_reset_t link_reset;
+
+ /* Set the loopback configuration for the phy */
+ config_loopback_t config_loopback;
+
+ /* Format the given raw number into str up to len */
+ format_fw_ver_t format_fw_ver;
+
+ /* Reset the phy (both ports) */
+ hw_reset_t hw_reset;
+
+ /* Set link led mode (on/off/oper)*/
+ set_link_led_t set_link_led;
+
+ /* PHY Specific tasks */
+ phy_specific_func_t phy_specific_func;
+#define DISABLE_TX 1
+#define ENABLE_TX 2
+#define PHY_INIT 3
+};
+
+/* Inputs parameters to the CLC */
+struct link_params {
+
+ u8 port;
+
+ /* Default / User Configuration */
+ u8 loopback_mode;
+#define LOOPBACK_NONE 0
+#define LOOPBACK_EMAC 1
+#define LOOPBACK_BMAC 2
+#define LOOPBACK_XGXS 3
+#define LOOPBACK_EXT_PHY 4
+#define LOOPBACK_EXT 5
+#define LOOPBACK_UMAC 6
+#define LOOPBACK_XMAC 7
+
+ /* Device parameters */
+ u8 mac_addr[6];
+
+ u16 req_duplex[LINK_CONFIG_SIZE];
+ u16 req_flow_ctrl[LINK_CONFIG_SIZE];
+
+ u16 req_line_speed[LINK_CONFIG_SIZE]; /* Also determine AutoNeg */
+
+ /* shmem parameters */
+ u32 shmem_base;
+ u32 shmem2_base;
+ u32 speed_cap_mask[LINK_CONFIG_SIZE];
+ u32 switch_cfg;
+#define SWITCH_CFG_1G PORT_FEATURE_CON_SWITCH_1G_SWITCH
+#define SWITCH_CFG_10G PORT_FEATURE_CON_SWITCH_10G_SWITCH
+#define SWITCH_CFG_AUTO_DETECT PORT_FEATURE_CON_SWITCH_AUTO_DETECT
+
+ u32 lane_config;
+
+ /* Phy register parameter */
+ u32 chip_id;
+
+ /* features */
+ u32 feature_config_flags;
+#define FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED (1<<0)
+#define FEATURE_CONFIG_PFC_ENABLED (1<<1)
+#define FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY (1<<2)
+#define FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY (1<<3)
+#define FEATURE_CONFIG_BC_SUPPORTS_AFEX (1<<8)
+#define FEATURE_CONFIG_AUTOGREEEN_ENABLED (1<<9)
+#define FEATURE_CONFIG_BC_SUPPORTS_SFP_TX_DISABLED (1<<10)
+#define FEATURE_CONFIG_DISABLE_REMOTE_FAULT_DET (1<<11)
+#define FEATURE_CONFIG_MT_SUPPORT (1<<13)
+#define FEATURE_CONFIG_BOOT_FROM_SAN (1<<14)
+
+ /* Will be populated during common init */
+ struct bnx2x_phy phy[MAX_PHYS];
+
+ /* Will be populated during common init */
+ u8 num_phys;
+
+ u8 rsrv;
+
+ /* Used to configure the EEE Tx LPI timer, has several modes of
+ * operation, according to bits 29:28 -
+ * 2'b00: Timer will be configured by nvram, output will be the value
+ * from nvram.
+ * 2'b01: Timer will be configured by nvram, output will be in
+ * microseconds.
+ * 2'b10: bits 1:0 contain an nvram value which will be used instead
+ * of the one located in the nvram. Output will be that value.
+ * 2'b11: bits 19:0 contain the idle timer in microseconds; output
+ * will be in microseconds.
+ * Bits 31:30 should be 2'b11 in order for EEE to be enabled.
+ */
+ u32 eee_mode;
+#define EEE_MODE_NVRAM_BALANCED_TIME (0xa00)
+#define EEE_MODE_NVRAM_AGGRESSIVE_TIME (0x100)
+#define EEE_MODE_NVRAM_LATENCY_TIME (0x6000)
+#define EEE_MODE_NVRAM_MASK (0x3)
+#define EEE_MODE_TIMER_MASK (0xfffff)
+#define EEE_MODE_OUTPUT_TIME (1<<28)
+#define EEE_MODE_OVERRIDE_NVRAM (1<<29)
+#define EEE_MODE_ENABLE_LPI (1<<30)
+#define EEE_MODE_ADV_LPI (1<<31)
+
+ u16 hw_led_mode; /* part of the hw_config read from the shmem */
+ u32 multi_phy_config;
+
+ /* Device pointer passed to all callback functions */
+ struct bnx2x *bp;
+ u16 req_fc_auto_adv; /* Should be set to TX / BOTH when
+ req_flow_ctrl is set to AUTO */
+ u16 link_flags;
+#define LINK_FLAGS_INT_DISABLED (1<<0)
+#define PHY_INITIALIZED (1<<1)
+ u32 lfa_base;
+
+ /* The same definitions as the shmem2 parameter */
+ u32 link_attr_sync;
+};
+
+/* Output parameters */
+struct link_vars {
+ u8 phy_flags;
+#define PHY_XGXS_FLAG (1<<0)
+#define PHY_SGMII_FLAG (1<<1)
+#define PHY_PHYSICAL_LINK_FLAG (1<<2)
+#define PHY_HALF_OPEN_CONN_FLAG (1<<3)
+#define PHY_OVER_CURRENT_FLAG (1<<4)
+#define PHY_SFP_TX_FAULT_FLAG (1<<5)
+
+ u8 mac_type;
+#define MAC_TYPE_NONE 0
+#define MAC_TYPE_EMAC 1
+#define MAC_TYPE_BMAC 2
+#define MAC_TYPE_UMAC 3
+#define MAC_TYPE_XMAC 4
+
+ u8 phy_link_up; /* internal phy link indication */
+ u8 link_up;
+
+ u16 line_speed;
+ u16 duplex;
+
+ u16 flow_ctrl;
+ u16 ieee_fc;
+
+ /* The same definitions as the shmem parameter */
+ u32 link_status;
+ u32 eee_status;
+ u8 fault_detected;
+ u8 check_kr2_recovery_cnt;
+#define CHECK_KR2_RECOVERY_CNT 5
+ u16 periodic_flags;
+#define PERIODIC_FLAGS_LINK_EVENT 0x0001
+
+ u32 aeu_int_mask;
+ u8 rx_tx_asic_rst;
+ u8 turn_to_run_wc_rt;
+ u16 rsrv2;
+};
+
+/***********************************************************/
+/* Functions */
+/***********************************************************/
+int bnx2x_phy_init(struct link_params *params, struct link_vars *vars);
+
+/* Reset the link. Should be called when driver or interface goes down
+ Before calling phy firmware upgrade, the reset_ext_phy should be set
+ to 0 */
+int bnx2x_link_reset(struct link_params *params, struct link_vars *vars,
+ u8 reset_ext_phy);
+int bnx2x_lfa_reset(struct link_params *params, struct link_vars *vars);
+/* bnx2x_link_update should be called upon link interrupt */
+int bnx2x_link_update(struct link_params *params, struct link_vars *vars);
+
+/* use the following phy functions to read/write from external_phy
+ In order to use it to read/write internal phy registers, use
+ DEFAULT_PHY_DEV_ADDR as devad, and (_bank + (_addr & 0xf)) as
+ the register */
+int bnx2x_phy_read(struct link_params *params, u8 phy_addr,
+ u8 devad, u16 reg, u16 *ret_val);
+
+int bnx2x_phy_write(struct link_params *params, u8 phy_addr,
+ u8 devad, u16 reg, u16 val);
+
+/* Reads the link_status from the shmem,
+ and update the link vars accordingly */
+void bnx2x_link_status_update(struct link_params *input,
+ struct link_vars *output);
+/* returns string representing the fw_version of the external phy */
+int bnx2x_get_ext_phy_fw_version(struct link_params *params, u8 *version,
+ u16 len);
+
+/* Set/Unset the led
+ Basically, the CLC takes care of the led for the link, but in case one needs
+ to set/unset the led unnaturally, set the "mode" to LED_MODE_OPER to
+ blink the led, and LED_MODE_OFF to set the led off.*/
+int bnx2x_set_led(struct link_params *params,
+ struct link_vars *vars, u8 mode, u32 speed);
+#define LED_MODE_OFF 0
+#define LED_MODE_ON 1
+#define LED_MODE_OPER 2
+#define LED_MODE_FRONT_PANEL_OFF 3
+
+/* bnx2x_handle_module_detect_int should be called upon module detection
+ interrupt */
+void bnx2x_handle_module_detect_int(struct link_params *params);
+
+/* Get the actual link status. In case it returns 0, link is up,
+ otherwise link is down*/
+int bnx2x_test_link(struct link_params *params, struct link_vars *vars,
+ u8 is_serdes);
+
+/* One-time initialization for external phy after power up */
+int bnx2x_common_init_phy(struct bnx2x *bp, u32 shmem_base_path[],
+ u32 shmem2_base_path[], u32 chip_id);
+
+/* Reset the external PHY using GPIO */
+void bnx2x_ext_phy_hw_reset(struct bnx2x *bp, u8 port);
+
+/* Reset the external of SFX7101 */
+void bnx2x_sfx7101_sp_sw_reset(struct bnx2x *bp, struct bnx2x_phy *phy);
+
+/* Read "byte_cnt" bytes from address "addr" from the SFP+ EEPROM */
+int bnx2x_read_sfp_module_eeprom(struct bnx2x_phy *phy,
+ struct link_params *params, u8 dev_addr,
+ u16 addr, u16 byte_cnt, u8 *o_buf);
+
+void bnx2x_hw_reset_phy(struct link_params *params);
+
+/* Check swap bit and adjust PHY order */
+u32 bnx2x_phy_selection(struct link_params *params);
+
+/* Probe the phys on board, and populate them in "params" */
+int bnx2x_phy_probe(struct link_params *params);
+
+/* Checks if fan failure detection is required on one of the phys on board */
+u8 bnx2x_fan_failure_det_req(struct bnx2x *bp, u32 shmem_base,
+ u32 shmem2_base, u8 port);
+
+/* Open / close the gate between the NIG and the BRB */
+void bnx2x_set_rx_filter(struct link_params *params, u8 en);
+
+/* DCBX structs */
+
+/* Number of maximum COS per chip */
+#define DCBX_E2E3_MAX_NUM_COS (2)
+#define DCBX_E3B0_MAX_NUM_COS_PORT0 (6)
+#define DCBX_E3B0_MAX_NUM_COS_PORT1 (3)
+#define DCBX_E3B0_MAX_NUM_COS ( \
+ MAXVAL(DCBX_E3B0_MAX_NUM_COS_PORT0, \
+ DCBX_E3B0_MAX_NUM_COS_PORT1))
+
+#define DCBX_MAX_NUM_COS ( \
+ MAXVAL(DCBX_E3B0_MAX_NUM_COS, \
+ DCBX_E2E3_MAX_NUM_COS))
+
+/* PFC port configuration params */
+struct bnx2x_nig_brb_pfc_port_params {
+ /* NIG */
+ u32 pause_enable;
+ u32 llfc_out_en;
+ u32 llfc_enable;
+ u32 pkt_priority_to_cos;
+ u8 num_of_rx_cos_priority_mask;
+ u32 rx_cos_priority_mask[DCBX_MAX_NUM_COS];
+ u32 llfc_high_priority_classes;
+ u32 llfc_low_priority_classes;
+};
+
+
+/* ETS port configuration params */
+struct bnx2x_ets_bw_params {
+ u8 bw;
+};
+
+struct bnx2x_ets_sp_params {
+ /**
+ * valid values are 0 - 5. 0 is highest strict priority.
+ * There can't be two COS's with the same pri.
+ */
+ u8 pri;
+};
+
+enum bnx2x_cos_state {
+ bnx2x_cos_state_strict = 0,
+ bnx2x_cos_state_bw = 1,
+};
+
+struct bnx2x_ets_cos_params {
+ enum bnx2x_cos_state state ;
+ union {
+ struct bnx2x_ets_bw_params bw_params;
+ struct bnx2x_ets_sp_params sp_params;
+ } params;
+};
+
+struct bnx2x_ets_params {
+ u8 num_of_cos; /* Number of valid COS entries*/
+ struct bnx2x_ets_cos_params cos[DCBX_MAX_NUM_COS];
+};
+
+/* Used to update the PFC attributes in EMAC, BMAC, NIG and BRB
+ * when link is already up
+ */
+int bnx2x_update_pfc(struct link_params *params,
+ struct link_vars *vars,
+ struct bnx2x_nig_brb_pfc_port_params *pfc_params);
+
+
+/* Used to configure the ETS to disable */
+int bnx2x_ets_disabled(struct link_params *params,
+ struct link_vars *vars);
+
+/* Used to configure the ETS to BW limited */
+void bnx2x_ets_bw_limit(const struct link_params *params, const u32 cos0_bw,
+ const u32 cos1_bw);
+
+/* Used to configure the ETS to strict */
+int bnx2x_ets_strict(const struct link_params *params, const u8 strict_cos);
+
+
+/* Configure the COS to ETS according to BW and SP settings.*/
+int bnx2x_ets_e3b0_config(const struct link_params *params,
+ const struct link_vars *vars,
+ struct bnx2x_ets_params *ets_params);
+
+void bnx2x_init_mod_abs_int(struct bnx2x *bp, struct link_vars *vars,
+ u32 chip_id, u32 shmem_base, u32 shmem2_base,
+ u8 port);
+
+void bnx2x_period_func(struct link_params *params, struct link_vars *vars);
+
+#endif /* BNX2X_LINK_H */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
new file mode 100644
index 0000000000..0d8e61c63c
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -0,0 +1,15451 @@
+/* bnx2x_main.c: QLogic Everest network driver.
+ *
+ * Copyright (c) 2007-2013 Broadcom Corporation
+ * Copyright (c) 2014 QLogic Corporation
+ * All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
+ * Written by: Eliezer Tamir
+ * Based on code from Michael Chan's bnx2 driver
+ * UDP CSUM errata workaround by Arik Gendelman
+ * Slowpath and fastpath rework by Vladislav Zolotarov
+ * Statistics and Link management by Yitchak Gertner
+ *
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/kernel.h>
+#include <linux/device.h> /* for dev_info() */
+#include <linux/timer.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/dma-mapping.h>
+#include <linux/bitops.h>
+#include <linux/irq.h>
+#include <linux/delay.h>
+#include <asm/byteorder.h>
+#include <linux/time.h>
+#include <linux/ethtool.h>
+#include <linux/mii.h>
+#include <linux/if_vlan.h>
+#include <linux/crash_dump.h>
+#include <net/ip.h>
+#include <net/ipv6.h>
+#include <net/tcp.h>
+#include <net/vxlan.h>
+#include <net/checksum.h>
+#include <net/ip6_checksum.h>
+#include <linux/workqueue.h>
+#include <linux/crc32.h>
+#include <linux/crc32c.h>
+#include <linux/prefetch.h>
+#include <linux/zlib.h>
+#include <linux/io.h>
+#include <linux/semaphore.h>
+#include <linux/stringify.h>
+#include <linux/vmalloc.h>
+#include "bnx2x.h"
+#include "bnx2x_init.h"
+#include "bnx2x_init_ops.h"
+#include "bnx2x_cmn.h"
+#include "bnx2x_vfpf.h"
+#include "bnx2x_dcb.h"
+#include "bnx2x_sp.h"
+#include <linux/firmware.h>
+#include "bnx2x_fw_file_hdr.h"
+/* FW files */
+#define FW_FILE_VERSION \
+ __stringify(BCM_5710_FW_MAJOR_VERSION) "." \
+ __stringify(BCM_5710_FW_MINOR_VERSION) "." \
+ __stringify(BCM_5710_FW_REVISION_VERSION) "." \
+ __stringify(BCM_5710_FW_ENGINEERING_VERSION)
+
+#define FW_FILE_VERSION_V15 \
+ __stringify(BCM_5710_FW_MAJOR_VERSION) "." \
+ __stringify(BCM_5710_FW_MINOR_VERSION) "." \
+ __stringify(BCM_5710_FW_REVISION_VERSION_V15) "." \
+ __stringify(BCM_5710_FW_ENGINEERING_VERSION)
+
+#define FW_FILE_NAME_E1 "bnx2x/bnx2x-e1-" FW_FILE_VERSION ".fw"
+#define FW_FILE_NAME_E1H "bnx2x/bnx2x-e1h-" FW_FILE_VERSION ".fw"
+#define FW_FILE_NAME_E2 "bnx2x/bnx2x-e2-" FW_FILE_VERSION ".fw"
+#define FW_FILE_NAME_E1_V15 "bnx2x/bnx2x-e1-" FW_FILE_VERSION_V15 ".fw"
+#define FW_FILE_NAME_E1H_V15 "bnx2x/bnx2x-e1h-" FW_FILE_VERSION_V15 ".fw"
+#define FW_FILE_NAME_E2_V15 "bnx2x/bnx2x-e2-" FW_FILE_VERSION_V15 ".fw"
+
+/* Time in jiffies before concluding the transmitter is hung */
+#define TX_TIMEOUT (5*HZ)
+
+MODULE_AUTHOR("Eliezer Tamir");
+MODULE_DESCRIPTION("QLogic "
+ "BCM57710/57711/57711E/"
+ "57712/57712_MF/57800/57800_MF/57810/57810_MF/"
+ "57840/57840_MF Driver");
+MODULE_LICENSE("GPL");
+MODULE_FIRMWARE(FW_FILE_NAME_E1);
+MODULE_FIRMWARE(FW_FILE_NAME_E1H);
+MODULE_FIRMWARE(FW_FILE_NAME_E2);
+MODULE_FIRMWARE(FW_FILE_NAME_E1_V15);
+MODULE_FIRMWARE(FW_FILE_NAME_E1H_V15);
+MODULE_FIRMWARE(FW_FILE_NAME_E2_V15);
+
+int bnx2x_num_queues;
+module_param_named(num_queues, bnx2x_num_queues, int, 0444);
+MODULE_PARM_DESC(num_queues,
+ " Set number of queues (default is as a number of CPUs)");
+
+static int disable_tpa;
+module_param(disable_tpa, int, 0444);
+MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
+
+static int int_mode;
+module_param(int_mode, int, 0444);
+MODULE_PARM_DESC(int_mode, " Force interrupt mode other than MSI-X "
+ "(1 INT#x; 2 MSI)");
+
+static int dropless_fc;
+module_param(dropless_fc, int, 0444);
+MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring");
+
+static int mrrs = -1;
+module_param(mrrs, int, 0444);
+MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
+
+static int debug;
+module_param(debug, int, 0444);
+MODULE_PARM_DESC(debug, " Default debug msglevel");
+
+static struct workqueue_struct *bnx2x_wq;
+struct workqueue_struct *bnx2x_iov_wq;
+
+struct bnx2x_mac_vals {
+ u32 xmac_addr;
+ u32 xmac_val;
+ u32 emac_addr;
+ u32 emac_val;
+ u32 umac_addr[2];
+ u32 umac_val[2];
+ u32 bmac_addr;
+ u32 bmac_val[2];
+};
+
+enum bnx2x_board_type {
+ BCM57710 = 0,
+ BCM57711,
+ BCM57711E,
+ BCM57712,
+ BCM57712_MF,
+ BCM57712_VF,
+ BCM57800,
+ BCM57800_MF,
+ BCM57800_VF,
+ BCM57810,
+ BCM57810_MF,
+ BCM57810_VF,
+ BCM57840_4_10,
+ BCM57840_2_20,
+ BCM57840_MF,
+ BCM57840_VF,
+ BCM57811,
+ BCM57811_MF,
+ BCM57840_O,
+ BCM57840_MFO,
+ BCM57811_VF
+};
+
+/* indexed by board_type, above */
+static struct {
+ char *name;
+} board_info[] = {
+ [BCM57710] = { "QLogic BCM57710 10 Gigabit PCIe [Everest]" },
+ [BCM57711] = { "QLogic BCM57711 10 Gigabit PCIe" },
+ [BCM57711E] = { "QLogic BCM57711E 10 Gigabit PCIe" },
+ [BCM57712] = { "QLogic BCM57712 10 Gigabit Ethernet" },
+ [BCM57712_MF] = { "QLogic BCM57712 10 Gigabit Ethernet Multi Function" },
+ [BCM57712_VF] = { "QLogic BCM57712 10 Gigabit Ethernet Virtual Function" },
+ [BCM57800] = { "QLogic BCM57800 10 Gigabit Ethernet" },
+ [BCM57800_MF] = { "QLogic BCM57800 10 Gigabit Ethernet Multi Function" },
+ [BCM57800_VF] = { "QLogic BCM57800 10 Gigabit Ethernet Virtual Function" },
+ [BCM57810] = { "QLogic BCM57810 10 Gigabit Ethernet" },
+ [BCM57810_MF] = { "QLogic BCM57810 10 Gigabit Ethernet Multi Function" },
+ [BCM57810_VF] = { "QLogic BCM57810 10 Gigabit Ethernet Virtual Function" },
+ [BCM57840_4_10] = { "QLogic BCM57840 10 Gigabit Ethernet" },
+ [BCM57840_2_20] = { "QLogic BCM57840 20 Gigabit Ethernet" },
+ [BCM57840_MF] = { "QLogic BCM57840 10/20 Gigabit Ethernet Multi Function" },
+ [BCM57840_VF] = { "QLogic BCM57840 10/20 Gigabit Ethernet Virtual Function" },
+ [BCM57811] = { "QLogic BCM57811 10 Gigabit Ethernet" },
+ [BCM57811_MF] = { "QLogic BCM57811 10 Gigabit Ethernet Multi Function" },
+ [BCM57840_O] = { "QLogic BCM57840 10/20 Gigabit Ethernet" },
+ [BCM57840_MFO] = { "QLogic BCM57840 10/20 Gigabit Ethernet Multi Function" },
+ [BCM57811_VF] = { "QLogic BCM57840 10/20 Gigabit Ethernet Virtual Function" }
+};
+
+#ifndef PCI_DEVICE_ID_NX2_57710
+#define PCI_DEVICE_ID_NX2_57710 CHIP_NUM_57710
+#endif
+#ifndef PCI_DEVICE_ID_NX2_57711
+#define PCI_DEVICE_ID_NX2_57711 CHIP_NUM_57711
+#endif
+#ifndef PCI_DEVICE_ID_NX2_57711E
+#define PCI_DEVICE_ID_NX2_57711E CHIP_NUM_57711E
+#endif
+#ifndef PCI_DEVICE_ID_NX2_57712
+#define PCI_DEVICE_ID_NX2_57712 CHIP_NUM_57712
+#endif
+#ifndef PCI_DEVICE_ID_NX2_57712_MF
+#define PCI_DEVICE_ID_NX2_57712_MF CHIP_NUM_57712_MF
+#endif
+#ifndef PCI_DEVICE_ID_NX2_57712_VF
+#define PCI_DEVICE_ID_NX2_57712_VF CHIP_NUM_57712_VF
+#endif
+#ifndef PCI_DEVICE_ID_NX2_57800
+#define PCI_DEVICE_ID_NX2_57800 CHIP_NUM_57800
+#endif
+#ifndef PCI_DEVICE_ID_NX2_57800_MF
+#define PCI_DEVICE_ID_NX2_57800_MF CHIP_NUM_57800_MF
+#endif
+#ifndef PCI_DEVICE_ID_NX2_57800_VF
+#define PCI_DEVICE_ID_NX2_57800_VF CHIP_NUM_57800_VF
+#endif
+#ifndef PCI_DEVICE_ID_NX2_57810
+#define PCI_DEVICE_ID_NX2_57810 CHIP_NUM_57810
+#endif
+#ifndef PCI_DEVICE_ID_NX2_57810_MF
+#define PCI_DEVICE_ID_NX2_57810_MF CHIP_NUM_57810_MF
+#endif
+#ifndef PCI_DEVICE_ID_NX2_57840_O
+#define PCI_DEVICE_ID_NX2_57840_O CHIP_NUM_57840_OBSOLETE
+#endif
+#ifndef PCI_DEVICE_ID_NX2_57810_VF
+#define PCI_DEVICE_ID_NX2_57810_VF CHIP_NUM_57810_VF
+#endif
+#ifndef PCI_DEVICE_ID_NX2_57840_4_10
+#define PCI_DEVICE_ID_NX2_57840_4_10 CHIP_NUM_57840_4_10
+#endif
+#ifndef PCI_DEVICE_ID_NX2_57840_2_20
+#define PCI_DEVICE_ID_NX2_57840_2_20 CHIP_NUM_57840_2_20
+#endif
+#ifndef PCI_DEVICE_ID_NX2_57840_MFO
+#define PCI_DEVICE_ID_NX2_57840_MFO CHIP_NUM_57840_MF_OBSOLETE
+#endif
+#ifndef PCI_DEVICE_ID_NX2_57840_MF
+#define PCI_DEVICE_ID_NX2_57840_MF CHIP_NUM_57840_MF
+#endif
+#ifndef PCI_DEVICE_ID_NX2_57840_VF
+#define PCI_DEVICE_ID_NX2_57840_VF CHIP_NUM_57840_VF
+#endif
+#ifndef PCI_DEVICE_ID_NX2_57811
+#define PCI_DEVICE_ID_NX2_57811 CHIP_NUM_57811
+#endif
+#ifndef PCI_DEVICE_ID_NX2_57811_MF
+#define PCI_DEVICE_ID_NX2_57811_MF CHIP_NUM_57811_MF
+#endif
+#ifndef PCI_DEVICE_ID_NX2_57811_VF
+#define PCI_DEVICE_ID_NX2_57811_VF CHIP_NUM_57811_VF
+#endif
+
+static const struct pci_device_id bnx2x_pci_tbl[] = {
+ { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
+ { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
+ { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E },
+ { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712), BCM57712 },
+ { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712_MF), BCM57712_MF },
+ { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712_VF), BCM57712_VF },
+ { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57800), BCM57800 },
+ { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57800_MF), BCM57800_MF },
+ { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57800_VF), BCM57800_VF },
+ { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57810), BCM57810 },
+ { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57810_MF), BCM57810_MF },
+ { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_O), BCM57840_O },
+ { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_4_10), BCM57840_4_10 },
+ { PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_NX2_57840_4_10), BCM57840_4_10 },
+ { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_2_20), BCM57840_2_20 },
+ { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57810_VF), BCM57810_VF },
+ { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_MFO), BCM57840_MFO },
+ { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_MF), BCM57840_MF },
+ { PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_NX2_57840_MF), BCM57840_MF },
+ { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_VF), BCM57840_VF },
+ { PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_NX2_57840_VF), BCM57840_VF },
+ { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57811), BCM57811 },
+ { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57811_MF), BCM57811_MF },
+ { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57811_VF), BCM57811_VF },
+ { 0 }
+};
+
+MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
+
+const u32 dmae_reg_go_c[] = {
+ DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
+ DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
+ DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
+ DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
+};
+
+/* Global resources for unloading a previously loaded device */
+#define BNX2X_PREV_WAIT_NEEDED 1
+static DEFINE_SEMAPHORE(bnx2x_prev_sem, 1);
+static LIST_HEAD(bnx2x_prev_list);
+
+/* Forward declaration */
+static struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev);
+static u32 bnx2x_rx_ustorm_prods_offset(struct bnx2x_fastpath *fp);
+static int bnx2x_set_storm_rx_mode(struct bnx2x *bp);
+
+/****************************************************************************
+* General service functions
+****************************************************************************/
+
+static int bnx2x_hwtstamp_ioctl(struct bnx2x *bp, struct ifreq *ifr);
+
+static void __storm_memset_dma_mapping(struct bnx2x *bp,
+ u32 addr, dma_addr_t mapping)
+{
+ REG_WR(bp, addr, U64_LO(mapping));
+ REG_WR(bp, addr + 4, U64_HI(mapping));
+}
+
+static void storm_memset_spq_addr(struct bnx2x *bp,
+ dma_addr_t mapping, u16 abs_fid)
+{
+ u32 addr = XSEM_REG_FAST_MEMORY +
+ XSTORM_SPQ_PAGE_BASE_OFFSET(abs_fid);
+
+ __storm_memset_dma_mapping(bp, addr, mapping);
+}
+
+static void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid,
+ u16 pf_id)
+{
+ REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid),
+ pf_id);
+ REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid),
+ pf_id);
+ REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid),
+ pf_id);
+ REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid),
+ pf_id);
+}
+
+static void storm_memset_func_en(struct bnx2x *bp, u16 abs_fid,
+ u8 enable)
+{
+ REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid),
+ enable);
+ REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid),
+ enable);
+ REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid),
+ enable);
+ REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid),
+ enable);
+}
+
+static void storm_memset_eq_data(struct bnx2x *bp,
+ struct event_ring_data *eq_data,
+ u16 pfid)
+{
+ size_t size = sizeof(struct event_ring_data);
+
+ u32 addr = BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_DATA_OFFSET(pfid);
+
+ __storm_memset_struct(bp, addr, size, (u32 *)eq_data);
+}
+
+static void storm_memset_eq_prod(struct bnx2x *bp, u16 eq_prod,
+ u16 pfid)
+{
+ u32 addr = BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_PROD_OFFSET(pfid);
+ REG_WR16(bp, addr, eq_prod);
+}
+
+/* used only at init
+ * locking is done by mcp
+ */
+static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
+{
+ pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
+ pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
+ pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
+ PCICFG_VENDOR_ID_OFFSET);
+}
+
+static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
+{
+ u32 val;
+
+ pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
+ pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
+ pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
+ PCICFG_VENDOR_ID_OFFSET);
+
+ return val;
+}
+
+#define DMAE_DP_SRC_GRC "grc src_addr [%08x]"
+#define DMAE_DP_SRC_PCI "pci src_addr [%x:%08x]"
+#define DMAE_DP_DST_GRC "grc dst_addr [%08x]"
+#define DMAE_DP_DST_PCI "pci dst_addr [%x:%08x]"
+#define DMAE_DP_DST_NONE "dst_addr [none]"
+
+static void bnx2x_dp_dmae(struct bnx2x *bp,
+ struct dmae_command *dmae, int msglvl)
+{
+ u32 src_type = dmae->opcode & DMAE_COMMAND_SRC;
+ int i;
+
+ switch (dmae->opcode & DMAE_COMMAND_DST) {
+ case DMAE_CMD_DST_PCI:
+ if (src_type == DMAE_CMD_SRC_PCI)
+ DP(msglvl, "DMAE: opcode 0x%08x\n"
+ "src [%x:%08x], len [%d*4], dst [%x:%08x]\n"
+ "comp_addr [%x:%08x], comp_val 0x%08x\n",
+ dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
+ dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo,
+ dmae->comp_addr_hi, dmae->comp_addr_lo,
+ dmae->comp_val);
+ else
+ DP(msglvl, "DMAE: opcode 0x%08x\n"
+ "src [%08x], len [%d*4], dst [%x:%08x]\n"
+ "comp_addr [%x:%08x], comp_val 0x%08x\n",
+ dmae->opcode, dmae->src_addr_lo >> 2,
+ dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo,
+ dmae->comp_addr_hi, dmae->comp_addr_lo,
+ dmae->comp_val);
+ break;
+ case DMAE_CMD_DST_GRC:
+ if (src_type == DMAE_CMD_SRC_PCI)
+ DP(msglvl, "DMAE: opcode 0x%08x\n"
+ "src [%x:%08x], len [%d*4], dst_addr [%08x]\n"
+ "comp_addr [%x:%08x], comp_val 0x%08x\n",
+ dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
+ dmae->len, dmae->dst_addr_lo >> 2,
+ dmae->comp_addr_hi, dmae->comp_addr_lo,
+ dmae->comp_val);
+ else
+ DP(msglvl, "DMAE: opcode 0x%08x\n"
+ "src [%08x], len [%d*4], dst [%08x]\n"
+ "comp_addr [%x:%08x], comp_val 0x%08x\n",
+ dmae->opcode, dmae->src_addr_lo >> 2,
+ dmae->len, dmae->dst_addr_lo >> 2,
+ dmae->comp_addr_hi, dmae->comp_addr_lo,
+ dmae->comp_val);
+ break;
+ default:
+ if (src_type == DMAE_CMD_SRC_PCI)
+ DP(msglvl, "DMAE: opcode 0x%08x\n"
+ "src_addr [%x:%08x] len [%d * 4] dst_addr [none]\n"
+ "comp_addr [%x:%08x] comp_val 0x%08x\n",
+ dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
+ dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo,
+ dmae->comp_val);
+ else
+ DP(msglvl, "DMAE: opcode 0x%08x\n"
+ "src_addr [%08x] len [%d * 4] dst_addr [none]\n"
+ "comp_addr [%x:%08x] comp_val 0x%08x\n",
+ dmae->opcode, dmae->src_addr_lo >> 2,
+ dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo,
+ dmae->comp_val);
+ break;
+ }
+
+ for (i = 0; i < (sizeof(struct dmae_command)/4); i++)
+ DP(msglvl, "DMAE RAW [%02d]: 0x%08x\n",
+ i, *(((u32 *)dmae) + i));
+}
+
+/* copy command into DMAE command memory and set DMAE command go */
+void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx)
+{
+ u32 cmd_offset;
+ int i;
+
+ cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
+ for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
+ REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
+ }
+ REG_WR(bp, dmae_reg_go_c[idx], 1);
+}
+
+u32 bnx2x_dmae_opcode_add_comp(u32 opcode, u8 comp_type)
+{
+ return opcode | ((comp_type << DMAE_COMMAND_C_DST_SHIFT) |
+ DMAE_CMD_C_ENABLE);
+}
+
+u32 bnx2x_dmae_opcode_clr_src_reset(u32 opcode)
+{
+ return opcode & ~DMAE_CMD_SRC_RESET;
+}
+
+u32 bnx2x_dmae_opcode(struct bnx2x *bp, u8 src_type, u8 dst_type,
+ bool with_comp, u8 comp_type)
+{
+ u32 opcode = 0;
+
+ opcode |= ((src_type << DMAE_COMMAND_SRC_SHIFT) |
+ (dst_type << DMAE_COMMAND_DST_SHIFT));
+
+ opcode |= (DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET);
+
+ opcode |= (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0);
+ opcode |= ((BP_VN(bp) << DMAE_CMD_E1HVN_SHIFT) |
+ (BP_VN(bp) << DMAE_COMMAND_DST_VN_SHIFT));
+ opcode |= (DMAE_COM_SET_ERR << DMAE_COMMAND_ERR_POLICY_SHIFT);
+
+#ifdef __BIG_ENDIAN
+ opcode |= DMAE_CMD_ENDIANITY_B_DW_SWAP;
+#else
+ opcode |= DMAE_CMD_ENDIANITY_DW_SWAP;
+#endif
+ if (with_comp)
+ opcode = bnx2x_dmae_opcode_add_comp(opcode, comp_type);
+ return opcode;
+}
+
+void bnx2x_prep_dmae_with_comp(struct bnx2x *bp,
+ struct dmae_command *dmae,
+ u8 src_type, u8 dst_type)
+{
+ memset(dmae, 0, sizeof(struct dmae_command));
+
+ /* set the opcode */
+ dmae->opcode = bnx2x_dmae_opcode(bp, src_type, dst_type,
+ true, DMAE_COMP_PCI);
+
+ /* fill in the completion parameters */
+ dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
+ dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
+ dmae->comp_val = DMAE_COMP_VAL;
+}
+
+/* issue a dmae command over the init-channel and wait for completion */
+int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae,
+ u32 *comp)
+{
+ int cnt = CHIP_REV_IS_SLOW(bp) ? (400000) : 4000;
+ int rc = 0;
+
+ bnx2x_dp_dmae(bp, dmae, BNX2X_MSG_DMAE);
+
+ /* Lock the dmae channel. Disable BHs to prevent a dead-lock
+ * as long as this code is called both from syscall context and
+ * from ndo_set_rx_mode() flow that may be called from BH.
+ */
+
+ spin_lock_bh(&bp->dmae_lock);
+
+ /* reset completion */
+ *comp = 0;
+
+ /* post the command on the channel used for initializations */
+ bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
+
+ /* wait for completion */
+ udelay(5);
+ while ((*comp & ~DMAE_PCI_ERR_FLAG) != DMAE_COMP_VAL) {
+
+ if (!cnt ||
+ (bp->recovery_state != BNX2X_RECOVERY_DONE &&
+ bp->recovery_state != BNX2X_RECOVERY_NIC_LOADING)) {
+ BNX2X_ERR("DMAE timeout!\n");
+ rc = DMAE_TIMEOUT;
+ goto unlock;
+ }
+ cnt--;
+ udelay(50);
+ }
+ if (*comp & DMAE_PCI_ERR_FLAG) {
+ BNX2X_ERR("DMAE PCI error!\n");
+ rc = DMAE_PCI_ERROR;
+ }
+
+unlock:
+
+ spin_unlock_bh(&bp->dmae_lock);
+
+ return rc;
+}
+
+void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
+ u32 len32)
+{
+ int rc;
+ struct dmae_command dmae;
+
+ if (!bp->dmae_ready) {
+ u32 *data = bnx2x_sp(bp, wb_data[0]);
+
+ if (CHIP_IS_E1(bp))
+ bnx2x_init_ind_wr(bp, dst_addr, data, len32);
+ else
+ bnx2x_init_str_wr(bp, dst_addr, data, len32);
+ return;
+ }
+
+ /* set opcode and fixed command fields */
+ bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_PCI, DMAE_DST_GRC);
+
+ /* fill in addresses and len */
+ dmae.src_addr_lo = U64_LO(dma_addr);
+ dmae.src_addr_hi = U64_HI(dma_addr);
+ dmae.dst_addr_lo = dst_addr >> 2;
+ dmae.dst_addr_hi = 0;
+ dmae.len = len32;
+
+ /* issue the command and wait for completion */
+ rc = bnx2x_issue_dmae_with_comp(bp, &dmae, bnx2x_sp(bp, wb_comp));
+ if (rc) {
+ BNX2X_ERR("DMAE returned failure %d\n", rc);
+#ifdef BNX2X_STOP_ON_ERROR
+ bnx2x_panic();
+#endif
+ }
+}
+
+void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
+{
+ int rc;
+ struct dmae_command dmae;
+
+ if (!bp->dmae_ready) {
+ u32 *data = bnx2x_sp(bp, wb_data[0]);
+ int i;
+
+ if (CHIP_IS_E1(bp))
+ for (i = 0; i < len32; i++)
+ data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
+ else
+ for (i = 0; i < len32; i++)
+ data[i] = REG_RD(bp, src_addr + i*4);
+
+ return;
+ }
+
+ /* set opcode and fixed command fields */
+ bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_GRC, DMAE_DST_PCI);
+
+ /* fill in addresses and len */
+ dmae.src_addr_lo = src_addr >> 2;
+ dmae.src_addr_hi = 0;
+ dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
+ dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
+ dmae.len = len32;
+
+ /* issue the command and wait for completion */
+ rc = bnx2x_issue_dmae_with_comp(bp, &dmae, bnx2x_sp(bp, wb_comp));
+ if (rc) {
+ BNX2X_ERR("DMAE returned failure %d\n", rc);
+#ifdef BNX2X_STOP_ON_ERROR
+ bnx2x_panic();
+#endif
+ }
+}
+
+static void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
+ u32 addr, u32 len)
+{
+ int dmae_wr_max = DMAE_LEN32_WR_MAX(bp);
+ int offset = 0;
+
+ while (len > dmae_wr_max) {
+ bnx2x_write_dmae(bp, phys_addr + offset,
+ addr + offset, dmae_wr_max);
+ offset += dmae_wr_max * 4;
+ len -= dmae_wr_max;
+ }
+
+ bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len);
+}
+
+enum storms {
+ XSTORM,
+ TSTORM,
+ CSTORM,
+ USTORM,
+ MAX_STORMS
+};
+
+#define STORMS_NUM 4
+#define REGS_IN_ENTRY 4
+
+static inline int bnx2x_get_assert_list_entry(struct bnx2x *bp,
+ enum storms storm,
+ int entry)
+{
+ switch (storm) {
+ case XSTORM:
+ return XSTORM_ASSERT_LIST_OFFSET(entry);
+ case TSTORM:
+ return TSTORM_ASSERT_LIST_OFFSET(entry);
+ case CSTORM:
+ return CSTORM_ASSERT_LIST_OFFSET(entry);
+ case USTORM:
+ return USTORM_ASSERT_LIST_OFFSET(entry);
+ case MAX_STORMS:
+ default:
+ BNX2X_ERR("unknown storm\n");
+ }
+ return -EINVAL;
+}
+
+static int bnx2x_mc_assert(struct bnx2x *bp)
+{
+ char last_idx;
+ int i, j, rc = 0;
+ enum storms storm;
+ u32 regs[REGS_IN_ENTRY];
+ u32 bar_storm_intmem[STORMS_NUM] = {
+ BAR_XSTRORM_INTMEM,
+ BAR_TSTRORM_INTMEM,
+ BAR_CSTRORM_INTMEM,
+ BAR_USTRORM_INTMEM
+ };
+ u32 storm_assert_list_index[STORMS_NUM] = {
+ XSTORM_ASSERT_LIST_INDEX_OFFSET,
+ TSTORM_ASSERT_LIST_INDEX_OFFSET,
+ CSTORM_ASSERT_LIST_INDEX_OFFSET,
+ USTORM_ASSERT_LIST_INDEX_OFFSET
+ };
+ char *storms_string[STORMS_NUM] = {
+ "XSTORM",
+ "TSTORM",
+ "CSTORM",
+ "USTORM"
+ };
+
+ for (storm = XSTORM; storm < MAX_STORMS; storm++) {
+ last_idx = REG_RD8(bp, bar_storm_intmem[storm] +
+ storm_assert_list_index[storm]);
+ if (last_idx)
+ BNX2X_ERR("%s_ASSERT_LIST_INDEX 0x%x\n",
+ storms_string[storm], last_idx);
+
+ /* print the asserts */
+ for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
+ /* read a single assert entry */
+ for (j = 0; j < REGS_IN_ENTRY; j++)
+ regs[j] = REG_RD(bp, bar_storm_intmem[storm] +
+ bnx2x_get_assert_list_entry(bp,
+ storm,
+ i) +
+ sizeof(u32) * j);
+
+ /* log entry if it contains a valid assert */
+ if (regs[0] != COMMON_ASM_INVALID_ASSERT_OPCODE) {
+ BNX2X_ERR("%s_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n",
+ storms_string[storm], i, regs[3],
+ regs[2], regs[1], regs[0]);
+ rc++;
+ } else {
+ break;
+ }
+ }
+ }
+
+ BNX2X_ERR("Chip Revision: %s, FW Version: %d_%d_%d\n",
+ CHIP_IS_E1(bp) ? "everest1" :
+ CHIP_IS_E1H(bp) ? "everest1h" :
+ CHIP_IS_E2(bp) ? "everest2" : "everest3",
+ bp->fw_major, bp->fw_minor, bp->fw_rev);
+
+ return rc;
+}
+
+#define MCPR_TRACE_BUFFER_SIZE (0x800)
+#define SCRATCH_BUFFER_SIZE(bp) \
+ (CHIP_IS_E1(bp) ? 0x10000 : (CHIP_IS_E1H(bp) ? 0x20000 : 0x28000))
+
+void bnx2x_fw_dump_lvl(struct bnx2x *bp, const char *lvl)
+{
+ u32 addr, val;
+ u32 mark, offset;
+ __be32 data[9];
+ int word;
+ u32 trace_shmem_base;
+ if (BP_NOMCP(bp)) {
+ BNX2X_ERR("NO MCP - can not dump\n");
+ return;
+ }
+ netdev_printk(lvl, bp->dev, "bc %d.%d.%d\n",
+ (bp->common.bc_ver & 0xff0000) >> 16,
+ (bp->common.bc_ver & 0xff00) >> 8,
+ (bp->common.bc_ver & 0xff));
+
+ if (pci_channel_offline(bp->pdev)) {
+ BNX2X_ERR("Cannot dump MCP info while in PCI error\n");
+ return;
+ }
+
+ val = REG_RD(bp, MCP_REG_MCPR_CPU_PROGRAM_COUNTER);
+ if (val == REG_RD(bp, MCP_REG_MCPR_CPU_PROGRAM_COUNTER))
+ BNX2X_ERR("%s" "MCP PC at 0x%x\n", lvl, val);
+
+ if (BP_PATH(bp) == 0)
+ trace_shmem_base = bp->common.shmem_base;
+ else
+ trace_shmem_base = SHMEM2_RD(bp, other_shmem_base_addr);
+
+ /* sanity */
+ if (trace_shmem_base < MCPR_SCRATCH_BASE(bp) + MCPR_TRACE_BUFFER_SIZE ||
+ trace_shmem_base >= MCPR_SCRATCH_BASE(bp) +
+ SCRATCH_BUFFER_SIZE(bp)) {
+ BNX2X_ERR("Unable to dump trace buffer (mark %x)\n",
+ trace_shmem_base);
+ return;
+ }
+
+ addr = trace_shmem_base - MCPR_TRACE_BUFFER_SIZE;
+
+ /* validate TRCB signature */
+ mark = REG_RD(bp, addr);
+ if (mark != MFW_TRACE_SIGNATURE) {
+ BNX2X_ERR("Trace buffer signature is missing.");
+ return ;
+ }
+
+ /* read cyclic buffer pointer */
+ addr += 4;
+ mark = REG_RD(bp, addr);
+ mark = MCPR_SCRATCH_BASE(bp) + ((mark + 0x3) & ~0x3) - 0x08000000;
+ if (mark >= trace_shmem_base || mark < addr + 4) {
+ BNX2X_ERR("Mark doesn't fall inside Trace Buffer\n");
+ return;
+ }
+ printk("%s" "begin fw dump (mark 0x%x)\n", lvl, mark);
+
+ printk("%s", lvl);
+
+ /* dump buffer after the mark */
+ for (offset = mark; offset < trace_shmem_base; offset += 0x8*4) {
+ for (word = 0; word < 8; word++)
+ data[word] = htonl(REG_RD(bp, offset + 4*word));
+ data[8] = 0x0;
+ pr_cont("%s", (char *)data);
+ }
+
+ /* dump buffer before the mark */
+ for (offset = addr + 4; offset <= mark; offset += 0x8*4) {
+ for (word = 0; word < 8; word++)
+ data[word] = htonl(REG_RD(bp, offset + 4*word));
+ data[8] = 0x0;
+ pr_cont("%s", (char *)data);
+ }
+ printk("%s" "end of fw dump\n", lvl);
+}
+
+static void bnx2x_fw_dump(struct bnx2x *bp)
+{
+ bnx2x_fw_dump_lvl(bp, KERN_ERR);
+}
+
+static void bnx2x_hc_int_disable(struct bnx2x *bp)
+{
+ int port = BP_PORT(bp);
+ u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
+ u32 val = REG_RD(bp, addr);
+
+ /* in E1 we must use only PCI configuration space to disable
+ * MSI/MSIX capability
+ * It's forbidden to disable IGU_PF_CONF_MSI_MSIX_EN in HC block
+ */
+ if (CHIP_IS_E1(bp)) {
+ /* Since IGU_PF_CONF_MSI_MSIX_EN still always on
+ * Use mask register to prevent from HC sending interrupts
+ * after we exit the function
+ */
+ REG_WR(bp, HC_REG_INT_MASK + port*4, 0);
+
+ val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
+ HC_CONFIG_0_REG_INT_LINE_EN_0 |
+ HC_CONFIG_0_REG_ATTN_BIT_EN_0);
+ } else
+ val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
+ HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
+ HC_CONFIG_0_REG_INT_LINE_EN_0 |
+ HC_CONFIG_0_REG_ATTN_BIT_EN_0);
+
+ DP(NETIF_MSG_IFDOWN,
+ "write %x to HC %d (addr 0x%x)\n",
+ val, port, addr);
+
+ REG_WR(bp, addr, val);
+ if (REG_RD(bp, addr) != val)
+ BNX2X_ERR("BUG! Proper val not read from IGU!\n");
+}
+
+static void bnx2x_igu_int_disable(struct bnx2x *bp)
+{
+ u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
+
+ val &= ~(IGU_PF_CONF_MSI_MSIX_EN |
+ IGU_PF_CONF_INT_LINE_EN |
+ IGU_PF_CONF_ATTN_BIT_EN);
+
+ DP(NETIF_MSG_IFDOWN, "write %x to IGU\n", val);
+
+ REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
+ if (REG_RD(bp, IGU_REG_PF_CONFIGURATION) != val)
+ BNX2X_ERR("BUG! Proper val not read from IGU!\n");
+}
+
+static void bnx2x_int_disable(struct bnx2x *bp)
+{
+ if (bp->common.int_block == INT_BLOCK_HC)
+ bnx2x_hc_int_disable(bp);
+ else
+ bnx2x_igu_int_disable(bp);
+}
+
+void bnx2x_panic_dump(struct bnx2x *bp, bool disable_int)
+{
+ int i;
+ u16 j;
+ struct hc_sp_status_block_data sp_sb_data;
+ int func = BP_FUNC(bp);
+#ifdef BNX2X_STOP_ON_ERROR
+ u16 start = 0, end = 0;
+ u8 cos;
+#endif
+ if (IS_PF(bp) && disable_int)
+ bnx2x_int_disable(bp);
+
+ bp->stats_state = STATS_STATE_DISABLED;
+ bp->eth_stats.unrecoverable_error++;
+ DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
+
+ BNX2X_ERR("begin crash dump -----------------\n");
+
+ /* Indices */
+ /* Common */
+ if (IS_PF(bp)) {
+ struct host_sp_status_block *def_sb = bp->def_status_blk;
+ int data_size, cstorm_offset;
+
+ BNX2X_ERR("def_idx(0x%x) def_att_idx(0x%x) attn_state(0x%x) spq_prod_idx(0x%x) next_stats_cnt(0x%x)\n",
+ bp->def_idx, bp->def_att_idx, bp->attn_state,
+ bp->spq_prod_idx, bp->stats_counter);
+ BNX2X_ERR("DSB: attn bits(0x%x) ack(0x%x) id(0x%x) idx(0x%x)\n",
+ def_sb->atten_status_block.attn_bits,
+ def_sb->atten_status_block.attn_bits_ack,
+ def_sb->atten_status_block.status_block_id,
+ def_sb->atten_status_block.attn_bits_index);
+ BNX2X_ERR(" def (");
+ for (i = 0; i < HC_SP_SB_MAX_INDICES; i++)
+ pr_cont("0x%x%s",
+ def_sb->sp_sb.index_values[i],
+ (i == HC_SP_SB_MAX_INDICES - 1) ? ") " : " ");
+
+ data_size = sizeof(struct hc_sp_status_block_data) /
+ sizeof(u32);
+ cstorm_offset = CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func);
+ for (i = 0; i < data_size; i++)
+ *((u32 *)&sp_sb_data + i) =
+ REG_RD(bp, BAR_CSTRORM_INTMEM + cstorm_offset +
+ i * sizeof(u32));
+
+ pr_cont("igu_sb_id(0x%x) igu_seg_id(0x%x) pf_id(0x%x) vnic_id(0x%x) vf_id(0x%x) vf_valid (0x%x) state(0x%x)\n",
+ sp_sb_data.igu_sb_id,
+ sp_sb_data.igu_seg_id,
+ sp_sb_data.p_func.pf_id,
+ sp_sb_data.p_func.vnic_id,
+ sp_sb_data.p_func.vf_id,
+ sp_sb_data.p_func.vf_valid,
+ sp_sb_data.state);
+ }
+
+ for_each_eth_queue(bp, i) {
+ struct bnx2x_fastpath *fp = &bp->fp[i];
+ int loop;
+ struct hc_status_block_data_e2 sb_data_e2;
+ struct hc_status_block_data_e1x sb_data_e1x;
+ struct hc_status_block_sm *hc_sm_p =
+ CHIP_IS_E1x(bp) ?
+ sb_data_e1x.common.state_machine :
+ sb_data_e2.common.state_machine;
+ struct hc_index_data *hc_index_p =
+ CHIP_IS_E1x(bp) ?
+ sb_data_e1x.index_data :
+ sb_data_e2.index_data;
+ u8 data_size, cos;
+ u32 *sb_data_p;
+ struct bnx2x_fp_txdata txdata;
+
+ if (!bp->fp)
+ break;
+
+ if (!fp->rx_cons_sb)
+ continue;
+
+ /* Rx */
+ BNX2X_ERR("fp%d: rx_bd_prod(0x%x) rx_bd_cons(0x%x) rx_comp_prod(0x%x) rx_comp_cons(0x%x) *rx_cons_sb(0x%x)\n",
+ i, fp->rx_bd_prod, fp->rx_bd_cons,
+ fp->rx_comp_prod,
+ fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
+ BNX2X_ERR(" rx_sge_prod(0x%x) last_max_sge(0x%x) fp_hc_idx(0x%x)\n",
+ fp->rx_sge_prod, fp->last_max_sge,
+ le16_to_cpu(fp->fp_hc_idx));
+
+ /* Tx */
+ for_each_cos_in_tx_queue(fp, cos)
+ {
+ if (!fp->txdata_ptr[cos])
+ break;
+
+ txdata = *fp->txdata_ptr[cos];
+
+ if (!txdata.tx_cons_sb)
+ continue;
+
+ BNX2X_ERR("fp%d: tx_pkt_prod(0x%x) tx_pkt_cons(0x%x) tx_bd_prod(0x%x) tx_bd_cons(0x%x) *tx_cons_sb(0x%x)\n",
+ i, txdata.tx_pkt_prod,
+ txdata.tx_pkt_cons, txdata.tx_bd_prod,
+ txdata.tx_bd_cons,
+ le16_to_cpu(*txdata.tx_cons_sb));
+ }
+
+ loop = CHIP_IS_E1x(bp) ?
+ HC_SB_MAX_INDICES_E1X : HC_SB_MAX_INDICES_E2;
+
+ /* host sb data */
+
+ if (IS_FCOE_FP(fp))
+ continue;
+
+ BNX2X_ERR(" run indexes (");
+ for (j = 0; j < HC_SB_MAX_SM; j++)
+ pr_cont("0x%x%s",
+ fp->sb_running_index[j],
+ (j == HC_SB_MAX_SM - 1) ? ")" : " ");
+
+ BNX2X_ERR(" indexes (");
+ for (j = 0; j < loop; j++)
+ pr_cont("0x%x%s",
+ fp->sb_index_values[j],
+ (j == loop - 1) ? ")" : " ");
+
+ /* VF cannot access FW refelection for status block */
+ if (IS_VF(bp))
+ continue;
+
+ /* fw sb data */
+ data_size = CHIP_IS_E1x(bp) ?
+ sizeof(struct hc_status_block_data_e1x) :
+ sizeof(struct hc_status_block_data_e2);
+ data_size /= sizeof(u32);
+ sb_data_p = CHIP_IS_E1x(bp) ?
+ (u32 *)&sb_data_e1x :
+ (u32 *)&sb_data_e2;
+ /* copy sb data in here */
+ for (j = 0; j < data_size; j++)
+ *(sb_data_p + j) = REG_RD(bp, BAR_CSTRORM_INTMEM +
+ CSTORM_STATUS_BLOCK_DATA_OFFSET(fp->fw_sb_id) +
+ j * sizeof(u32));
+
+ if (!CHIP_IS_E1x(bp)) {
+ pr_cont("pf_id(0x%x) vf_id(0x%x) vf_valid(0x%x) vnic_id(0x%x) same_igu_sb_1b(0x%x) state(0x%x)\n",
+ sb_data_e2.common.p_func.pf_id,
+ sb_data_e2.common.p_func.vf_id,
+ sb_data_e2.common.p_func.vf_valid,
+ sb_data_e2.common.p_func.vnic_id,
+ sb_data_e2.common.same_igu_sb_1b,
+ sb_data_e2.common.state);
+ } else {
+ pr_cont("pf_id(0x%x) vf_id(0x%x) vf_valid(0x%x) vnic_id(0x%x) same_igu_sb_1b(0x%x) state(0x%x)\n",
+ sb_data_e1x.common.p_func.pf_id,
+ sb_data_e1x.common.p_func.vf_id,
+ sb_data_e1x.common.p_func.vf_valid,
+ sb_data_e1x.common.p_func.vnic_id,
+ sb_data_e1x.common.same_igu_sb_1b,
+ sb_data_e1x.common.state);
+ }
+
+ /* SB_SMs data */
+ for (j = 0; j < HC_SB_MAX_SM; j++) {
+ pr_cont("SM[%d] __flags (0x%x) igu_sb_id (0x%x) igu_seg_id(0x%x) time_to_expire (0x%x) timer_value(0x%x)\n",
+ j, hc_sm_p[j].__flags,
+ hc_sm_p[j].igu_sb_id,
+ hc_sm_p[j].igu_seg_id,
+ hc_sm_p[j].time_to_expire,
+ hc_sm_p[j].timer_value);
+ }
+
+ /* Indices data */
+ for (j = 0; j < loop; j++) {
+ pr_cont("INDEX[%d] flags (0x%x) timeout (0x%x)\n", j,
+ hc_index_p[j].flags,
+ hc_index_p[j].timeout);
+ }
+ }
+
+#ifdef BNX2X_STOP_ON_ERROR
+ if (IS_PF(bp)) {
+ /* event queue */
+ BNX2X_ERR("eq cons %x prod %x\n", bp->eq_cons, bp->eq_prod);
+ for (i = 0; i < NUM_EQ_DESC; i++) {
+ u32 *data = (u32 *)&bp->eq_ring[i].message.data;
+
+ BNX2X_ERR("event queue [%d]: header: opcode %d, error %d\n",
+ i, bp->eq_ring[i].message.opcode,
+ bp->eq_ring[i].message.error);
+ BNX2X_ERR("data: %x %x %x\n",
+ data[0], data[1], data[2]);
+ }
+ }
+
+ /* Rings */
+ /* Rx */
+ for_each_valid_rx_queue(bp, i) {
+ struct bnx2x_fastpath *fp = &bp->fp[i];
+
+ if (!bp->fp)
+ break;
+
+ if (!fp->rx_cons_sb)
+ continue;
+
+ start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
+ end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
+ for (j = start; j != end; j = RX_BD(j + 1)) {
+ u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
+ struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
+
+ BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
+ i, j, rx_bd[1], rx_bd[0], sw_bd->data);
+ }
+
+ start = RX_SGE(fp->rx_sge_prod);
+ end = RX_SGE(fp->last_max_sge);
+ for (j = start; j != end; j = RX_SGE(j + 1)) {
+ u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
+ struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
+
+ BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
+ i, j, rx_sge[1], rx_sge[0], sw_page->page);
+ }
+
+ start = RCQ_BD(fp->rx_comp_cons - 10);
+ end = RCQ_BD(fp->rx_comp_cons + 503);
+ for (j = start; j != end; j = RCQ_BD(j + 1)) {
+ u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
+
+ BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
+ i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
+ }
+ }
+
+ /* Tx */
+ for_each_valid_tx_queue(bp, i) {
+ struct bnx2x_fastpath *fp = &bp->fp[i];
+
+ if (!bp->fp)
+ break;
+
+ for_each_cos_in_tx_queue(fp, cos) {
+ struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
+
+ if (!fp->txdata_ptr[cos])
+ break;
+
+ if (!txdata->tx_cons_sb)
+ continue;
+
+ start = TX_BD(le16_to_cpu(*txdata->tx_cons_sb) - 10);
+ end = TX_BD(le16_to_cpu(*txdata->tx_cons_sb) + 245);
+ for (j = start; j != end; j = TX_BD(j + 1)) {
+ struct sw_tx_bd *sw_bd =
+ &txdata->tx_buf_ring[j];
+
+ BNX2X_ERR("fp%d: txdata %d, packet[%x]=[%p,%x]\n",
+ i, cos, j, sw_bd->skb,
+ sw_bd->first_bd);
+ }
+
+ start = TX_BD(txdata->tx_bd_cons - 10);
+ end = TX_BD(txdata->tx_bd_cons + 254);
+ for (j = start; j != end; j = TX_BD(j + 1)) {
+ u32 *tx_bd = (u32 *)&txdata->tx_desc_ring[j];
+
+ BNX2X_ERR("fp%d: txdata %d, tx_bd[%x]=[%x:%x:%x:%x]\n",
+ i, cos, j, tx_bd[0], tx_bd[1],
+ tx_bd[2], tx_bd[3]);
+ }
+ }
+ }
+#endif
+ if (IS_PF(bp)) {
+ int tmp_msg_en = bp->msg_enable;
+
+ bnx2x_fw_dump(bp);
+ bp->msg_enable |= NETIF_MSG_HW;
+ BNX2X_ERR("Idle check (1st round) ----------\n");
+ bnx2x_idle_chk(bp);
+ BNX2X_ERR("Idle check (2nd round) ----------\n");
+ bnx2x_idle_chk(bp);
+ bp->msg_enable = tmp_msg_en;
+ bnx2x_mc_assert(bp);
+ }
+
+ BNX2X_ERR("end crash dump -----------------\n");
+}
+
+/*
+ * FLR Support for E2
+ *
+ * bnx2x_pf_flr_clnup() is called during nic_load in the per function HW
+ * initialization.
+ */
+#define FLR_WAIT_USEC 10000 /* 10 milliseconds */
+#define FLR_WAIT_INTERVAL 50 /* usec */
+#define FLR_POLL_CNT (FLR_WAIT_USEC/FLR_WAIT_INTERVAL) /* 200 */
+
+struct pbf_pN_buf_regs {
+ int pN;
+ u32 init_crd;
+ u32 crd;
+ u32 crd_freed;
+};
+
+struct pbf_pN_cmd_regs {
+ int pN;
+ u32 lines_occup;
+ u32 lines_freed;
+};
+
+static void bnx2x_pbf_pN_buf_flushed(struct bnx2x *bp,
+ struct pbf_pN_buf_regs *regs,
+ u32 poll_count)
+{
+ u32 init_crd, crd, crd_start, crd_freed, crd_freed_start;
+ u32 cur_cnt = poll_count;
+
+ crd_freed = crd_freed_start = REG_RD(bp, regs->crd_freed);
+ crd = crd_start = REG_RD(bp, regs->crd);
+ init_crd = REG_RD(bp, regs->init_crd);
+
+ DP(BNX2X_MSG_SP, "INIT CREDIT[%d] : %x\n", regs->pN, init_crd);
+ DP(BNX2X_MSG_SP, "CREDIT[%d] : s:%x\n", regs->pN, crd);
+ DP(BNX2X_MSG_SP, "CREDIT_FREED[%d]: s:%x\n", regs->pN, crd_freed);
+
+ while ((crd != init_crd) && ((u32)SUB_S32(crd_freed, crd_freed_start) <
+ (init_crd - crd_start))) {
+ if (cur_cnt--) {
+ udelay(FLR_WAIT_INTERVAL);
+ crd = REG_RD(bp, regs->crd);
+ crd_freed = REG_RD(bp, regs->crd_freed);
+ } else {
+ DP(BNX2X_MSG_SP, "PBF tx buffer[%d] timed out\n",
+ regs->pN);
+ DP(BNX2X_MSG_SP, "CREDIT[%d] : c:%x\n",
+ regs->pN, crd);
+ DP(BNX2X_MSG_SP, "CREDIT_FREED[%d]: c:%x\n",
+ regs->pN, crd_freed);
+ break;
+ }
+ }
+ DP(BNX2X_MSG_SP, "Waited %d*%d usec for PBF tx buffer[%d]\n",
+ poll_count-cur_cnt, FLR_WAIT_INTERVAL, regs->pN);
+}
+
+static void bnx2x_pbf_pN_cmd_flushed(struct bnx2x *bp,
+ struct pbf_pN_cmd_regs *regs,
+ u32 poll_count)
+{
+ u32 occup, to_free, freed, freed_start;
+ u32 cur_cnt = poll_count;
+
+ occup = to_free = REG_RD(bp, regs->lines_occup);
+ freed = freed_start = REG_RD(bp, regs->lines_freed);
+
+ DP(BNX2X_MSG_SP, "OCCUPANCY[%d] : s:%x\n", regs->pN, occup);
+ DP(BNX2X_MSG_SP, "LINES_FREED[%d] : s:%x\n", regs->pN, freed);
+
+ while (occup && ((u32)SUB_S32(freed, freed_start) < to_free)) {
+ if (cur_cnt--) {
+ udelay(FLR_WAIT_INTERVAL);
+ occup = REG_RD(bp, regs->lines_occup);
+ freed = REG_RD(bp, regs->lines_freed);
+ } else {
+ DP(BNX2X_MSG_SP, "PBF cmd queue[%d] timed out\n",
+ regs->pN);
+ DP(BNX2X_MSG_SP, "OCCUPANCY[%d] : s:%x\n",
+ regs->pN, occup);
+ DP(BNX2X_MSG_SP, "LINES_FREED[%d] : s:%x\n",
+ regs->pN, freed);
+ break;
+ }
+ }
+ DP(BNX2X_MSG_SP, "Waited %d*%d usec for PBF cmd queue[%d]\n",
+ poll_count-cur_cnt, FLR_WAIT_INTERVAL, regs->pN);
+}
+
+static u32 bnx2x_flr_clnup_reg_poll(struct bnx2x *bp, u32 reg,
+ u32 expected, u32 poll_count)
+{
+ u32 cur_cnt = poll_count;
+ u32 val;
+
+ while ((val = REG_RD(bp, reg)) != expected && cur_cnt--)
+ udelay(FLR_WAIT_INTERVAL);
+
+ return val;
+}
+
+int bnx2x_flr_clnup_poll_hw_counter(struct bnx2x *bp, u32 reg,
+ char *msg, u32 poll_cnt)
+{
+ u32 val = bnx2x_flr_clnup_reg_poll(bp, reg, 0, poll_cnt);
+ if (val != 0) {
+ BNX2X_ERR("%s usage count=%d\n", msg, val);
+ return 1;
+ }
+ return 0;
+}
+
+/* Common routines with VF FLR cleanup */
+u32 bnx2x_flr_clnup_poll_count(struct bnx2x *bp)
+{
+ /* adjust polling timeout */
+ if (CHIP_REV_IS_EMUL(bp))
+ return FLR_POLL_CNT * 2000;
+
+ if (CHIP_REV_IS_FPGA(bp))
+ return FLR_POLL_CNT * 120;
+
+ return FLR_POLL_CNT;
+}
+
+void bnx2x_tx_hw_flushed(struct bnx2x *bp, u32 poll_count)
+{
+ struct pbf_pN_cmd_regs cmd_regs[] = {
+ {0, (CHIP_IS_E3B0(bp)) ?
+ PBF_REG_TQ_OCCUPANCY_Q0 :
+ PBF_REG_P0_TQ_OCCUPANCY,
+ (CHIP_IS_E3B0(bp)) ?
+ PBF_REG_TQ_LINES_FREED_CNT_Q0 :
+ PBF_REG_P0_TQ_LINES_FREED_CNT},
+ {1, (CHIP_IS_E3B0(bp)) ?
+ PBF_REG_TQ_OCCUPANCY_Q1 :
+ PBF_REG_P1_TQ_OCCUPANCY,
+ (CHIP_IS_E3B0(bp)) ?
+ PBF_REG_TQ_LINES_FREED_CNT_Q1 :
+ PBF_REG_P1_TQ_LINES_FREED_CNT},
+ {4, (CHIP_IS_E3B0(bp)) ?
+ PBF_REG_TQ_OCCUPANCY_LB_Q :
+ PBF_REG_P4_TQ_OCCUPANCY,
+ (CHIP_IS_E3B0(bp)) ?
+ PBF_REG_TQ_LINES_FREED_CNT_LB_Q :
+ PBF_REG_P4_TQ_LINES_FREED_CNT}
+ };
+
+ struct pbf_pN_buf_regs buf_regs[] = {
+ {0, (CHIP_IS_E3B0(bp)) ?
+ PBF_REG_INIT_CRD_Q0 :
+ PBF_REG_P0_INIT_CRD ,
+ (CHIP_IS_E3B0(bp)) ?
+ PBF_REG_CREDIT_Q0 :
+ PBF_REG_P0_CREDIT,
+ (CHIP_IS_E3B0(bp)) ?
+ PBF_REG_INTERNAL_CRD_FREED_CNT_Q0 :
+ PBF_REG_P0_INTERNAL_CRD_FREED_CNT},
+ {1, (CHIP_IS_E3B0(bp)) ?
+ PBF_REG_INIT_CRD_Q1 :
+ PBF_REG_P1_INIT_CRD,
+ (CHIP_IS_E3B0(bp)) ?
+ PBF_REG_CREDIT_Q1 :
+ PBF_REG_P1_CREDIT,
+ (CHIP_IS_E3B0(bp)) ?
+ PBF_REG_INTERNAL_CRD_FREED_CNT_Q1 :
+ PBF_REG_P1_INTERNAL_CRD_FREED_CNT},
+ {4, (CHIP_IS_E3B0(bp)) ?
+ PBF_REG_INIT_CRD_LB_Q :
+ PBF_REG_P4_INIT_CRD,
+ (CHIP_IS_E3B0(bp)) ?
+ PBF_REG_CREDIT_LB_Q :
+ PBF_REG_P4_CREDIT,
+ (CHIP_IS_E3B0(bp)) ?
+ PBF_REG_INTERNAL_CRD_FREED_CNT_LB_Q :
+ PBF_REG_P4_INTERNAL_CRD_FREED_CNT},
+ };
+
+ int i;
+
+ /* Verify the command queues are flushed P0, P1, P4 */
+ for (i = 0; i < ARRAY_SIZE(cmd_regs); i++)
+ bnx2x_pbf_pN_cmd_flushed(bp, &cmd_regs[i], poll_count);
+
+ /* Verify the transmission buffers are flushed P0, P1, P4 */
+ for (i = 0; i < ARRAY_SIZE(buf_regs); i++)
+ bnx2x_pbf_pN_buf_flushed(bp, &buf_regs[i], poll_count);
+}
+
+#define OP_GEN_PARAM(param) \
+ (((param) << SDM_OP_GEN_COMP_PARAM_SHIFT) & SDM_OP_GEN_COMP_PARAM)
+
+#define OP_GEN_TYPE(type) \
+ (((type) << SDM_OP_GEN_COMP_TYPE_SHIFT) & SDM_OP_GEN_COMP_TYPE)
+
+#define OP_GEN_AGG_VECT(index) \
+ (((index) << SDM_OP_GEN_AGG_VECT_IDX_SHIFT) & SDM_OP_GEN_AGG_VECT_IDX)
+
+int bnx2x_send_final_clnup(struct bnx2x *bp, u8 clnup_func, u32 poll_cnt)
+{
+ u32 op_gen_command = 0;
+ u32 comp_addr = BAR_CSTRORM_INTMEM +
+ CSTORM_FINAL_CLEANUP_COMPLETE_OFFSET(clnup_func);
+
+ if (REG_RD(bp, comp_addr)) {
+ BNX2X_ERR("Cleanup complete was not 0 before sending\n");
+ return 1;
+ }
+
+ op_gen_command |= OP_GEN_PARAM(XSTORM_AGG_INT_FINAL_CLEANUP_INDEX);
+ op_gen_command |= OP_GEN_TYPE(XSTORM_AGG_INT_FINAL_CLEANUP_COMP_TYPE);
+ op_gen_command |= OP_GEN_AGG_VECT(clnup_func);
+ op_gen_command |= 1 << SDM_OP_GEN_AGG_VECT_IDX_VALID_SHIFT;
+
+ DP(BNX2X_MSG_SP, "sending FW Final cleanup\n");
+ REG_WR(bp, XSDM_REG_OPERATION_GEN, op_gen_command);
+
+ if (bnx2x_flr_clnup_reg_poll(bp, comp_addr, 1, poll_cnt) != 1) {
+ BNX2X_ERR("FW final cleanup did not succeed\n");
+ DP(BNX2X_MSG_SP, "At timeout completion address contained %x\n",
+ (REG_RD(bp, comp_addr)));
+ bnx2x_panic();
+ return 1;
+ }
+ /* Zero completion for next FLR */
+ REG_WR(bp, comp_addr, 0);
+
+ return 0;
+}
+
+u8 bnx2x_is_pcie_pending(struct pci_dev *dev)
+{
+ u16 status;
+
+ pcie_capability_read_word(dev, PCI_EXP_DEVSTA, &status);
+ return status & PCI_EXP_DEVSTA_TRPND;
+}
+
+/* PF FLR specific routines
+*/
+static int bnx2x_poll_hw_usage_counters(struct bnx2x *bp, u32 poll_cnt)
+{
+ /* wait for CFC PF usage-counter to zero (includes all the VFs) */
+ if (bnx2x_flr_clnup_poll_hw_counter(bp,
+ CFC_REG_NUM_LCIDS_INSIDE_PF,
+ "CFC PF usage counter timed out",
+ poll_cnt))
+ return 1;
+
+ /* Wait for DQ PF usage-counter to zero (until DQ cleanup) */
+ if (bnx2x_flr_clnup_poll_hw_counter(bp,
+ DORQ_REG_PF_USAGE_CNT,
+ "DQ PF usage counter timed out",
+ poll_cnt))
+ return 1;
+
+ /* Wait for QM PF usage-counter to zero (until DQ cleanup) */
+ if (bnx2x_flr_clnup_poll_hw_counter(bp,
+ QM_REG_PF_USG_CNT_0 + 4*BP_FUNC(bp),
+ "QM PF usage counter timed out",
+ poll_cnt))
+ return 1;
+
+ /* Wait for Timer PF usage-counters to zero (until DQ cleanup) */
+ if (bnx2x_flr_clnup_poll_hw_counter(bp,
+ TM_REG_LIN0_VNIC_UC + 4*BP_PORT(bp),
+ "Timers VNIC usage counter timed out",
+ poll_cnt))
+ return 1;
+ if (bnx2x_flr_clnup_poll_hw_counter(bp,
+ TM_REG_LIN0_NUM_SCANS + 4*BP_PORT(bp),
+ "Timers NUM_SCANS usage counter timed out",
+ poll_cnt))
+ return 1;
+
+ /* Wait DMAE PF usage counter to zero */
+ if (bnx2x_flr_clnup_poll_hw_counter(bp,
+ dmae_reg_go_c[INIT_DMAE_C(bp)],
+ "DMAE command register timed out",
+ poll_cnt))
+ return 1;
+
+ return 0;
+}
+
+static void bnx2x_hw_enable_status(struct bnx2x *bp)
+{
+ u32 val;
+
+ val = REG_RD(bp, CFC_REG_WEAK_ENABLE_PF);
+ DP(BNX2X_MSG_SP, "CFC_REG_WEAK_ENABLE_PF is 0x%x\n", val);
+
+ val = REG_RD(bp, PBF_REG_DISABLE_PF);
+ DP(BNX2X_MSG_SP, "PBF_REG_DISABLE_PF is 0x%x\n", val);
+
+ val = REG_RD(bp, IGU_REG_PCI_PF_MSI_EN);
+ DP(BNX2X_MSG_SP, "IGU_REG_PCI_PF_MSI_EN is 0x%x\n", val);
+
+ val = REG_RD(bp, IGU_REG_PCI_PF_MSIX_EN);
+ DP(BNX2X_MSG_SP, "IGU_REG_PCI_PF_MSIX_EN is 0x%x\n", val);
+
+ val = REG_RD(bp, IGU_REG_PCI_PF_MSIX_FUNC_MASK);
+ DP(BNX2X_MSG_SP, "IGU_REG_PCI_PF_MSIX_FUNC_MASK is 0x%x\n", val);
+
+ val = REG_RD(bp, PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR);
+ DP(BNX2X_MSG_SP, "PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR is 0x%x\n", val);
+
+ val = REG_RD(bp, PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR);
+ DP(BNX2X_MSG_SP, "PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR is 0x%x\n", val);
+
+ val = REG_RD(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER);
+ DP(BNX2X_MSG_SP, "PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER is 0x%x\n",
+ val);
+}
+
+static int bnx2x_pf_flr_clnup(struct bnx2x *bp)
+{
+ u32 poll_cnt = bnx2x_flr_clnup_poll_count(bp);
+
+ DP(BNX2X_MSG_SP, "Cleanup after FLR PF[%d]\n", BP_ABS_FUNC(bp));
+
+ /* Re-enable PF target read access */
+ REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1);
+
+ /* Poll HW usage counters */
+ DP(BNX2X_MSG_SP, "Polling usage counters\n");
+ if (bnx2x_poll_hw_usage_counters(bp, poll_cnt))
+ return -EBUSY;
+
+ /* Zero the igu 'trailing edge' and 'leading edge' */
+
+ /* Send the FW cleanup command */
+ if (bnx2x_send_final_clnup(bp, (u8)BP_FUNC(bp), poll_cnt))
+ return -EBUSY;
+
+ /* ATC cleanup */
+
+ /* Verify TX hw is flushed */
+ bnx2x_tx_hw_flushed(bp, poll_cnt);
+
+ /* Wait 100ms (not adjusted according to platform) */
+ msleep(100);
+
+ /* Verify no pending pci transactions */
+ if (bnx2x_is_pcie_pending(bp->pdev))
+ BNX2X_ERR("PCIE Transactions still pending\n");
+
+ /* Debug */
+ bnx2x_hw_enable_status(bp);
+
+ /*
+ * Master enable - Due to WB DMAE writes performed before this
+ * register is re-initialized as part of the regular function init
+ */
+ REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
+
+ return 0;
+}
+
+static void bnx2x_hc_int_enable(struct bnx2x *bp)
+{
+ int port = BP_PORT(bp);
+ u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
+ u32 val = REG_RD(bp, addr);
+ bool msix = (bp->flags & USING_MSIX_FLAG) ? true : false;
+ bool single_msix = (bp->flags & USING_SINGLE_MSIX_FLAG) ? true : false;
+ bool msi = (bp->flags & USING_MSI_FLAG) ? true : false;
+
+ if (msix) {
+ val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
+ HC_CONFIG_0_REG_INT_LINE_EN_0);
+ val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
+ HC_CONFIG_0_REG_ATTN_BIT_EN_0);
+ if (single_msix)
+ val |= HC_CONFIG_0_REG_SINGLE_ISR_EN_0;
+ } else if (msi) {
+ val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
+ val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
+ HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
+ HC_CONFIG_0_REG_ATTN_BIT_EN_0);
+ } else {
+ val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
+ HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
+ HC_CONFIG_0_REG_INT_LINE_EN_0 |
+ HC_CONFIG_0_REG_ATTN_BIT_EN_0);
+
+ if (!CHIP_IS_E1(bp)) {
+ DP(NETIF_MSG_IFUP,
+ "write %x to HC %d (addr 0x%x)\n", val, port, addr);
+
+ REG_WR(bp, addr, val);
+
+ val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
+ }
+ }
+
+ if (CHIP_IS_E1(bp))
+ REG_WR(bp, HC_REG_INT_MASK + port*4, 0x1FFFF);
+
+ DP(NETIF_MSG_IFUP,
+ "write %x to HC %d (addr 0x%x) mode %s\n", val, port, addr,
+ (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
+
+ REG_WR(bp, addr, val);
+ /*
+ * Ensure that HC_CONFIG is written before leading/trailing edge config
+ */
+ barrier();
+
+ if (!CHIP_IS_E1(bp)) {
+ /* init leading/trailing edge */
+ if (IS_MF(bp)) {
+ val = (0xee0f | (1 << (BP_VN(bp) + 4)));
+ if (bp->port.pmf)
+ /* enable nig and gpio3 attention */
+ val |= 0x1100;
+ } else
+ val = 0xffff;
+
+ REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
+ REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
+ }
+}
+
+static void bnx2x_igu_int_enable(struct bnx2x *bp)
+{
+ u32 val;
+ bool msix = (bp->flags & USING_MSIX_FLAG) ? true : false;
+ bool single_msix = (bp->flags & USING_SINGLE_MSIX_FLAG) ? true : false;
+ bool msi = (bp->flags & USING_MSI_FLAG) ? true : false;
+
+ val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
+
+ if (msix) {
+ val &= ~(IGU_PF_CONF_INT_LINE_EN |
+ IGU_PF_CONF_SINGLE_ISR_EN);
+ val |= (IGU_PF_CONF_MSI_MSIX_EN |
+ IGU_PF_CONF_ATTN_BIT_EN);
+
+ if (single_msix)
+ val |= IGU_PF_CONF_SINGLE_ISR_EN;
+ } else if (msi) {
+ val &= ~IGU_PF_CONF_INT_LINE_EN;
+ val |= (IGU_PF_CONF_MSI_MSIX_EN |
+ IGU_PF_CONF_ATTN_BIT_EN |
+ IGU_PF_CONF_SINGLE_ISR_EN);
+ } else {
+ val &= ~IGU_PF_CONF_MSI_MSIX_EN;
+ val |= (IGU_PF_CONF_INT_LINE_EN |
+ IGU_PF_CONF_ATTN_BIT_EN |
+ IGU_PF_CONF_SINGLE_ISR_EN);
+ }
+
+ /* Clean previous status - need to configure igu prior to ack*/
+ if ((!msix) || single_msix) {
+ REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
+ bnx2x_ack_int(bp);
+ }
+
+ val |= IGU_PF_CONF_FUNC_EN;
+
+ DP(NETIF_MSG_IFUP, "write 0x%x to IGU mode %s\n",
+ val, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
+
+ REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
+
+ if (val & IGU_PF_CONF_INT_LINE_EN)
+ pci_intx(bp->pdev, true);
+
+ barrier();
+
+ /* init leading/trailing edge */
+ if (IS_MF(bp)) {
+ val = (0xee0f | (1 << (BP_VN(bp) + 4)));
+ if (bp->port.pmf)
+ /* enable nig and gpio3 attention */
+ val |= 0x1100;
+ } else
+ val = 0xffff;
+
+ REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, val);
+ REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, val);
+}
+
+void bnx2x_int_enable(struct bnx2x *bp)
+{
+ if (bp->common.int_block == INT_BLOCK_HC)
+ bnx2x_hc_int_enable(bp);
+ else
+ bnx2x_igu_int_enable(bp);
+}
+
+void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
+{
+ int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
+ int i, offset;
+
+ if (disable_hw)
+ /* prevent the HW from sending interrupts */
+ bnx2x_int_disable(bp);
+
+ /* make sure all ISRs are done */
+ if (msix) {
+ synchronize_irq(bp->msix_table[0].vector);
+ offset = 1;
+ if (CNIC_SUPPORT(bp))
+ offset++;
+ for_each_eth_queue(bp, i)
+ synchronize_irq(bp->msix_table[offset++].vector);
+ } else
+ synchronize_irq(bp->pdev->irq);
+
+ /* make sure sp_task is not running */
+ cancel_delayed_work(&bp->sp_task);
+ cancel_delayed_work(&bp->period_task);
+ flush_workqueue(bnx2x_wq);
+}
+
+/* fast path */
+
+/*
+ * General service functions
+ */
+
+/* Return true if succeeded to acquire the lock */
+static bool bnx2x_trylock_hw_lock(struct bnx2x *bp, u32 resource)
+{
+ u32 lock_status;
+ u32 resource_bit = (1 << resource);
+ int func = BP_FUNC(bp);
+ u32 hw_lock_control_reg;
+
+ DP(NETIF_MSG_HW | NETIF_MSG_IFUP,
+ "Trying to take a lock on resource %d\n", resource);
+
+ /* Validating that the resource is within range */
+ if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
+ DP(NETIF_MSG_HW | NETIF_MSG_IFUP,
+ "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
+ resource, HW_LOCK_MAX_RESOURCE_VALUE);
+ return false;
+ }
+
+ if (func <= 5)
+ hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
+ else
+ hw_lock_control_reg =
+ (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
+
+ /* Try to acquire the lock */
+ REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
+ lock_status = REG_RD(bp, hw_lock_control_reg);
+ if (lock_status & resource_bit)
+ return true;
+
+ DP(NETIF_MSG_HW | NETIF_MSG_IFUP,
+ "Failed to get a lock on resource %d\n", resource);
+ return false;
+}
+
+/**
+ * bnx2x_get_leader_lock_resource - get the recovery leader resource id
+ *
+ * @bp: driver handle
+ *
+ * Returns the recovery leader resource id according to the engine this function
+ * belongs to. Currently only only 2 engines is supported.
+ */
+static int bnx2x_get_leader_lock_resource(struct bnx2x *bp)
+{
+ if (BP_PATH(bp))
+ return HW_LOCK_RESOURCE_RECOVERY_LEADER_1;
+ else
+ return HW_LOCK_RESOURCE_RECOVERY_LEADER_0;
+}
+
+/**
+ * bnx2x_trylock_leader_lock- try to acquire a leader lock.
+ *
+ * @bp: driver handle
+ *
+ * Tries to acquire a leader lock for current engine.
+ */
+static bool bnx2x_trylock_leader_lock(struct bnx2x *bp)
+{
+ return bnx2x_trylock_hw_lock(bp, bnx2x_get_leader_lock_resource(bp));
+}
+
+static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid, u8 err);
+
+/* schedule the sp task and mark that interrupt occurred (runs from ISR) */
+static int bnx2x_schedule_sp_task(struct bnx2x *bp)
+{
+ /* Set the interrupt occurred bit for the sp-task to recognize it
+ * must ack the interrupt and transition according to the IGU
+ * state machine.
+ */
+ atomic_set(&bp->interrupt_occurred, 1);
+
+ /* The sp_task must execute only after this bit
+ * is set, otherwise we will get out of sync and miss all
+ * further interrupts. Hence, the barrier.
+ */
+ smp_wmb();
+
+ /* schedule sp_task to workqueue */
+ return queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
+}
+
+void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe)
+{
+ struct bnx2x *bp = fp->bp;
+ int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
+ int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
+ enum bnx2x_queue_cmd drv_cmd = BNX2X_Q_CMD_MAX;
+ struct bnx2x_queue_sp_obj *q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
+
+ DP(BNX2X_MSG_SP,
+ "fp %d cid %d got ramrod #%d state is %x type is %d\n",
+ fp->index, cid, command, bp->state,
+ rr_cqe->ramrod_cqe.ramrod_type);
+
+ /* If cid is within VF range, replace the slowpath object with the
+ * one corresponding to this VF
+ */
+ if (cid >= BNX2X_FIRST_VF_CID &&
+ cid < BNX2X_FIRST_VF_CID + BNX2X_VF_CIDS)
+ bnx2x_iov_set_queue_sp_obj(bp, cid, &q_obj);
+
+ switch (command) {
+ case (RAMROD_CMD_ID_ETH_CLIENT_UPDATE):
+ DP(BNX2X_MSG_SP, "got UPDATE ramrod. CID %d\n", cid);
+ drv_cmd = BNX2X_Q_CMD_UPDATE;
+ break;
+
+ case (RAMROD_CMD_ID_ETH_CLIENT_SETUP):
+ DP(BNX2X_MSG_SP, "got MULTI[%d] setup ramrod\n", cid);
+ drv_cmd = BNX2X_Q_CMD_SETUP;
+ break;
+
+ case (RAMROD_CMD_ID_ETH_TX_QUEUE_SETUP):
+ DP(BNX2X_MSG_SP, "got MULTI[%d] tx-only setup ramrod\n", cid);
+ drv_cmd = BNX2X_Q_CMD_SETUP_TX_ONLY;
+ break;
+
+ case (RAMROD_CMD_ID_ETH_HALT):
+ DP(BNX2X_MSG_SP, "got MULTI[%d] halt ramrod\n", cid);
+ drv_cmd = BNX2X_Q_CMD_HALT;
+ break;
+
+ case (RAMROD_CMD_ID_ETH_TERMINATE):
+ DP(BNX2X_MSG_SP, "got MULTI[%d] terminate ramrod\n", cid);
+ drv_cmd = BNX2X_Q_CMD_TERMINATE;
+ break;
+
+ case (RAMROD_CMD_ID_ETH_EMPTY):
+ DP(BNX2X_MSG_SP, "got MULTI[%d] empty ramrod\n", cid);
+ drv_cmd = BNX2X_Q_CMD_EMPTY;
+ break;
+
+ case (RAMROD_CMD_ID_ETH_TPA_UPDATE):
+ DP(BNX2X_MSG_SP, "got tpa update ramrod CID=%d\n", cid);
+ drv_cmd = BNX2X_Q_CMD_UPDATE_TPA;
+ break;
+
+ default:
+ BNX2X_ERR("unexpected MC reply (%d) on fp[%d]\n",
+ command, fp->index);
+ return;
+ }
+
+ if ((drv_cmd != BNX2X_Q_CMD_MAX) &&
+ q_obj->complete_cmd(bp, q_obj, drv_cmd))
+ /* q_obj->complete_cmd() failure means that this was
+ * an unexpected completion.
+ *
+ * In this case we don't want to increase the bp->spq_left
+ * because apparently we haven't sent this command the first
+ * place.
+ */
+#ifdef BNX2X_STOP_ON_ERROR
+ bnx2x_panic();
+#else
+ return;
+#endif
+
+ smp_mb__before_atomic();
+ atomic_inc(&bp->cq_spq_left);
+ /* push the change in bp->spq_left and towards the memory */
+ smp_mb__after_atomic();
+
+ DP(BNX2X_MSG_SP, "bp->cq_spq_left %x\n", atomic_read(&bp->cq_spq_left));
+
+ if ((drv_cmd == BNX2X_Q_CMD_UPDATE) && (IS_FCOE_FP(fp)) &&
+ (!!test_bit(BNX2X_AFEX_FCOE_Q_UPDATE_PENDING, &bp->sp_state))) {
+ /* if Q update ramrod is completed for last Q in AFEX vif set
+ * flow, then ACK MCP at the end
+ *
+ * mark pending ACK to MCP bit.
+ * prevent case that both bits are cleared.
+ * At the end of load/unload driver checks that
+ * sp_state is cleared, and this order prevents
+ * races
+ */
+ smp_mb__before_atomic();
+ set_bit(BNX2X_AFEX_PENDING_VIFSET_MCP_ACK, &bp->sp_state);
+ wmb();
+ clear_bit(BNX2X_AFEX_FCOE_Q_UPDATE_PENDING, &bp->sp_state);
+ smp_mb__after_atomic();
+
+ /* schedule the sp task as mcp ack is required */
+ bnx2x_schedule_sp_task(bp);
+ }
+
+ return;
+}
+
+irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
+{
+ struct bnx2x *bp = netdev_priv(dev_instance);
+ u16 status = bnx2x_ack_int(bp);
+ u16 mask;
+ int i;
+ u8 cos;
+
+ /* Return here if interrupt is shared and it's not for us */
+ if (unlikely(status == 0)) {
+ DP(NETIF_MSG_INTR, "not our interrupt!\n");
+ return IRQ_NONE;
+ }
+ DP(NETIF_MSG_INTR, "got an interrupt status 0x%x\n", status);
+
+#ifdef BNX2X_STOP_ON_ERROR
+ if (unlikely(bp->panic))
+ return IRQ_HANDLED;
+#endif
+
+ for_each_eth_queue(bp, i) {
+ struct bnx2x_fastpath *fp = &bp->fp[i];
+
+ mask = 0x2 << (fp->index + CNIC_SUPPORT(bp));
+ if (status & mask) {
+ /* Handle Rx or Tx according to SB id */
+ for_each_cos_in_tx_queue(fp, cos)
+ prefetch(fp->txdata_ptr[cos]->tx_cons_sb);
+ prefetch(&fp->sb_running_index[SM_RX_ID]);
+ napi_schedule_irqoff(&bnx2x_fp(bp, fp->index, napi));
+ status &= ~mask;
+ }
+ }
+
+ if (CNIC_SUPPORT(bp)) {
+ mask = 0x2;
+ if (status & (mask | 0x1)) {
+ struct cnic_ops *c_ops = NULL;
+
+ rcu_read_lock();
+ c_ops = rcu_dereference(bp->cnic_ops);
+ if (c_ops && (bp->cnic_eth_dev.drv_state &
+ CNIC_DRV_STATE_HANDLES_IRQ))
+ c_ops->cnic_handler(bp->cnic_data, NULL);
+ rcu_read_unlock();
+
+ status &= ~mask;
+ }
+ }
+
+ if (unlikely(status & 0x1)) {
+
+ /* schedule sp task to perform default status block work, ack
+ * attentions and enable interrupts.
+ */
+ bnx2x_schedule_sp_task(bp);
+
+ status &= ~0x1;
+ if (!status)
+ return IRQ_HANDLED;
+ }
+
+ if (unlikely(status))
+ DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
+ status);
+
+ return IRQ_HANDLED;
+}
+
+/* Link */
+
+/*
+ * General service functions
+ */
+
+int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
+{
+ u32 lock_status;
+ u32 resource_bit = (1 << resource);
+ int func = BP_FUNC(bp);
+ u32 hw_lock_control_reg;
+ int cnt;
+
+ /* Validating that the resource is within range */
+ if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
+ BNX2X_ERR("resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
+ resource, HW_LOCK_MAX_RESOURCE_VALUE);
+ return -EINVAL;
+ }
+
+ if (func <= 5) {
+ hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
+ } else {
+ hw_lock_control_reg =
+ (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
+ }
+
+ /* Validating that the resource is not already taken */
+ lock_status = REG_RD(bp, hw_lock_control_reg);
+ if (lock_status & resource_bit) {
+ BNX2X_ERR("lock_status 0x%x resource_bit 0x%x\n",
+ lock_status, resource_bit);
+ return -EEXIST;
+ }
+
+ /* Try for 5 second every 5ms */
+ for (cnt = 0; cnt < 1000; cnt++) {
+ /* Try to acquire the lock */
+ REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
+ lock_status = REG_RD(bp, hw_lock_control_reg);
+ if (lock_status & resource_bit)
+ return 0;
+
+ usleep_range(5000, 10000);
+ }
+ BNX2X_ERR("Timeout\n");
+ return -EAGAIN;
+}
+
+int bnx2x_release_leader_lock(struct bnx2x *bp)
+{
+ return bnx2x_release_hw_lock(bp, bnx2x_get_leader_lock_resource(bp));
+}
+
+int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
+{
+ u32 lock_status;
+ u32 resource_bit = (1 << resource);
+ int func = BP_FUNC(bp);
+ u32 hw_lock_control_reg;
+
+ /* Validating that the resource is within range */
+ if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
+ BNX2X_ERR("resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
+ resource, HW_LOCK_MAX_RESOURCE_VALUE);
+ return -EINVAL;
+ }
+
+ if (func <= 5) {
+ hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
+ } else {
+ hw_lock_control_reg =
+ (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
+ }
+
+ /* Validating that the resource is currently taken */
+ lock_status = REG_RD(bp, hw_lock_control_reg);
+ if (!(lock_status & resource_bit)) {
+ BNX2X_ERR("lock_status 0x%x resource_bit 0x%x. Unlock was called but lock wasn't taken!\n",
+ lock_status, resource_bit);
+ return -EFAULT;
+ }
+
+ REG_WR(bp, hw_lock_control_reg, resource_bit);
+ return 0;
+}
+
+int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
+{
+ /* The GPIO should be swapped if swap register is set and active */
+ int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
+ REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
+ int gpio_shift = gpio_num +
+ (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
+ u32 gpio_mask = (1 << gpio_shift);
+ u32 gpio_reg;
+ int value;
+
+ if (gpio_num > MISC_REGISTERS_GPIO_3) {
+ BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
+ return -EINVAL;
+ }
+
+ /* read GPIO value */
+ gpio_reg = REG_RD(bp, MISC_REG_GPIO);
+
+ /* get the requested pin value */
+ if ((gpio_reg & gpio_mask) == gpio_mask)
+ value = 1;
+ else
+ value = 0;
+
+ return value;
+}
+
+int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
+{
+ /* The GPIO should be swapped if swap register is set and active */
+ int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
+ REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
+ int gpio_shift = gpio_num +
+ (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
+ u32 gpio_mask = (1 << gpio_shift);
+ u32 gpio_reg;
+
+ if (gpio_num > MISC_REGISTERS_GPIO_3) {
+ BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
+ return -EINVAL;
+ }
+
+ bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
+ /* read GPIO and mask except the float bits */
+ gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
+
+ switch (mode) {
+ case MISC_REGISTERS_GPIO_OUTPUT_LOW:
+ DP(NETIF_MSG_LINK,
+ "Set GPIO %d (shift %d) -> output low\n",
+ gpio_num, gpio_shift);
+ /* clear FLOAT and set CLR */
+ gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
+ gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
+ break;
+
+ case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
+ DP(NETIF_MSG_LINK,
+ "Set GPIO %d (shift %d) -> output high\n",
+ gpio_num, gpio_shift);
+ /* clear FLOAT and set SET */
+ gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
+ gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
+ break;
+
+ case MISC_REGISTERS_GPIO_INPUT_HI_Z:
+ DP(NETIF_MSG_LINK,
+ "Set GPIO %d (shift %d) -> input\n",
+ gpio_num, gpio_shift);
+ /* set FLOAT */
+ gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
+ break;
+
+ default:
+ break;
+ }
+
+ REG_WR(bp, MISC_REG_GPIO, gpio_reg);
+ bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
+
+ return 0;
+}
+
+int bnx2x_set_mult_gpio(struct bnx2x *bp, u8 pins, u32 mode)
+{
+ u32 gpio_reg = 0;
+ int rc = 0;
+
+ /* Any port swapping should be handled by caller. */
+
+ bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
+ /* read GPIO and mask except the float bits */
+ gpio_reg = REG_RD(bp, MISC_REG_GPIO);
+ gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_FLOAT_POS);
+ gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_CLR_POS);
+ gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_SET_POS);
+
+ switch (mode) {
+ case MISC_REGISTERS_GPIO_OUTPUT_LOW:
+ DP(NETIF_MSG_LINK, "Set GPIO 0x%x -> output low\n", pins);
+ /* set CLR */
+ gpio_reg |= (pins << MISC_REGISTERS_GPIO_CLR_POS);
+ break;
+
+ case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
+ DP(NETIF_MSG_LINK, "Set GPIO 0x%x -> output high\n", pins);
+ /* set SET */
+ gpio_reg |= (pins << MISC_REGISTERS_GPIO_SET_POS);
+ break;
+
+ case MISC_REGISTERS_GPIO_INPUT_HI_Z:
+ DP(NETIF_MSG_LINK, "Set GPIO 0x%x -> input\n", pins);
+ /* set FLOAT */
+ gpio_reg |= (pins << MISC_REGISTERS_GPIO_FLOAT_POS);
+ break;
+
+ default:
+ BNX2X_ERR("Invalid GPIO mode assignment %d\n", mode);
+ rc = -EINVAL;
+ break;
+ }
+
+ if (rc == 0)
+ REG_WR(bp, MISC_REG_GPIO, gpio_reg);
+
+ bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
+
+ return rc;
+}
+
+int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
+{
+ /* The GPIO should be swapped if swap register is set and active */
+ int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
+ REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
+ int gpio_shift = gpio_num +
+ (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
+ u32 gpio_mask = (1 << gpio_shift);
+ u32 gpio_reg;
+
+ if (gpio_num > MISC_REGISTERS_GPIO_3) {
+ BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
+ return -EINVAL;
+ }
+
+ bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
+ /* read GPIO int */
+ gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
+
+ switch (mode) {
+ case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
+ DP(NETIF_MSG_LINK,
+ "Clear GPIO INT %d (shift %d) -> output low\n",
+ gpio_num, gpio_shift);
+ /* clear SET and set CLR */
+ gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
+ gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
+ break;
+
+ case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
+ DP(NETIF_MSG_LINK,
+ "Set GPIO INT %d (shift %d) -> output high\n",
+ gpio_num, gpio_shift);
+ /* clear CLR and set SET */
+ gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
+ gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
+ break;
+
+ default:
+ break;
+ }
+
+ REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
+ bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
+
+ return 0;
+}
+
+static int bnx2x_set_spio(struct bnx2x *bp, int spio, u32 mode)
+{
+ u32 spio_reg;
+
+ /* Only 2 SPIOs are configurable */
+ if ((spio != MISC_SPIO_SPIO4) && (spio != MISC_SPIO_SPIO5)) {
+ BNX2X_ERR("Invalid SPIO 0x%x\n", spio);
+ return -EINVAL;
+ }
+
+ bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
+ /* read SPIO and mask except the float bits */
+ spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_SPIO_FLOAT);
+
+ switch (mode) {
+ case MISC_SPIO_OUTPUT_LOW:
+ DP(NETIF_MSG_HW, "Set SPIO 0x%x -> output low\n", spio);
+ /* clear FLOAT and set CLR */
+ spio_reg &= ~(spio << MISC_SPIO_FLOAT_POS);
+ spio_reg |= (spio << MISC_SPIO_CLR_POS);
+ break;
+
+ case MISC_SPIO_OUTPUT_HIGH:
+ DP(NETIF_MSG_HW, "Set SPIO 0x%x -> output high\n", spio);
+ /* clear FLOAT and set SET */
+ spio_reg &= ~(spio << MISC_SPIO_FLOAT_POS);
+ spio_reg |= (spio << MISC_SPIO_SET_POS);
+ break;
+
+ case MISC_SPIO_INPUT_HI_Z:
+ DP(NETIF_MSG_HW, "Set SPIO 0x%x -> input\n", spio);
+ /* set FLOAT */
+ spio_reg |= (spio << MISC_SPIO_FLOAT_POS);
+ break;
+
+ default:
+ break;
+ }
+
+ REG_WR(bp, MISC_REG_SPIO, spio_reg);
+ bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
+
+ return 0;
+}
+
+void bnx2x_calc_fc_adv(struct bnx2x *bp)
+{
+ u8 cfg_idx = bnx2x_get_link_cfg_idx(bp);
+
+ bp->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause |
+ ADVERTISED_Pause);
+ switch (bp->link_vars.ieee_fc &
+ MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
+ case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
+ bp->port.advertising[cfg_idx] |= (ADVERTISED_Asym_Pause |
+ ADVERTISED_Pause);
+ break;
+
+ case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
+ bp->port.advertising[cfg_idx] |= ADVERTISED_Asym_Pause;
+ break;
+
+ default:
+ break;
+ }
+}
+
+static void bnx2x_set_requested_fc(struct bnx2x *bp)
+{
+ /* Initialize link parameters structure variables
+ * It is recommended to turn off RX FC for jumbo frames
+ * for better performance
+ */
+ if (CHIP_IS_E1x(bp) && (bp->dev->mtu > 5000))
+ bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
+ else
+ bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
+}
+
+static void bnx2x_init_dropless_fc(struct bnx2x *bp)
+{
+ u32 pause_enabled = 0;
+
+ if (!CHIP_IS_E1(bp) && bp->dropless_fc && bp->link_vars.link_up) {
+ if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
+ pause_enabled = 1;
+
+ REG_WR(bp, BAR_USTRORM_INTMEM +
+ USTORM_ETH_PAUSE_ENABLED_OFFSET(BP_PORT(bp)),
+ pause_enabled);
+ }
+
+ DP(NETIF_MSG_IFUP | NETIF_MSG_LINK, "dropless_fc is %s\n",
+ pause_enabled ? "enabled" : "disabled");
+}
+
+int bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
+{
+ int rc, cfx_idx = bnx2x_get_link_cfg_idx(bp);
+ u16 req_line_speed = bp->link_params.req_line_speed[cfx_idx];
+
+ if (!BP_NOMCP(bp)) {
+ bnx2x_set_requested_fc(bp);
+ bnx2x_acquire_phy_lock(bp);
+
+ if (load_mode == LOAD_DIAG) {
+ struct link_params *lp = &bp->link_params;
+ lp->loopback_mode = LOOPBACK_XGXS;
+ /* Prefer doing PHY loopback at highest speed */
+ if (lp->req_line_speed[cfx_idx] < SPEED_20000) {
+ if (lp->speed_cap_mask[cfx_idx] &
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_20G)
+ lp->req_line_speed[cfx_idx] =
+ SPEED_20000;
+ else if (lp->speed_cap_mask[cfx_idx] &
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)
+ lp->req_line_speed[cfx_idx] =
+ SPEED_10000;
+ else
+ lp->req_line_speed[cfx_idx] =
+ SPEED_1000;
+ }
+ }
+
+ if (load_mode == LOAD_LOOPBACK_EXT) {
+ struct link_params *lp = &bp->link_params;
+ lp->loopback_mode = LOOPBACK_EXT;
+ }
+
+ rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
+
+ bnx2x_release_phy_lock(bp);
+
+ bnx2x_init_dropless_fc(bp);
+
+ bnx2x_calc_fc_adv(bp);
+
+ if (bp->link_vars.link_up) {
+ bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
+ bnx2x_link_report(bp);
+ }
+ queue_delayed_work(bnx2x_wq, &bp->period_task, 0);
+ bp->link_params.req_line_speed[cfx_idx] = req_line_speed;
+ return rc;
+ }
+ BNX2X_ERR("Bootcode is missing - can not initialize link\n");
+ return -EINVAL;
+}
+
+void bnx2x_link_set(struct bnx2x *bp)
+{
+ if (!BP_NOMCP(bp)) {
+ bnx2x_acquire_phy_lock(bp);
+ bnx2x_phy_init(&bp->link_params, &bp->link_vars);
+ bnx2x_release_phy_lock(bp);
+
+ bnx2x_init_dropless_fc(bp);
+
+ bnx2x_calc_fc_adv(bp);
+ } else
+ BNX2X_ERR("Bootcode is missing - can not set link\n");
+}
+
+static void bnx2x__link_reset(struct bnx2x *bp)
+{
+ if (!BP_NOMCP(bp)) {
+ bnx2x_acquire_phy_lock(bp);
+ bnx2x_lfa_reset(&bp->link_params, &bp->link_vars);
+ bnx2x_release_phy_lock(bp);
+ } else
+ BNX2X_ERR("Bootcode is missing - can not reset link\n");
+}
+
+void bnx2x_force_link_reset(struct bnx2x *bp)
+{
+ bnx2x_acquire_phy_lock(bp);
+ bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
+ bnx2x_release_phy_lock(bp);
+}
+
+u8 bnx2x_link_test(struct bnx2x *bp, u8 is_serdes)
+{
+ u8 rc = 0;
+
+ if (!BP_NOMCP(bp)) {
+ bnx2x_acquire_phy_lock(bp);
+ rc = bnx2x_test_link(&bp->link_params, &bp->link_vars,
+ is_serdes);
+ bnx2x_release_phy_lock(bp);
+ } else
+ BNX2X_ERR("Bootcode is missing - can not test link\n");
+
+ return rc;
+}
+
+/* Calculates the sum of vn_min_rates.
+ It's needed for further normalizing of the min_rates.
+ Returns:
+ sum of vn_min_rates.
+ or
+ 0 - if all the min_rates are 0.
+ In the later case fairness algorithm should be deactivated.
+ If not all min_rates are zero then those that are zeroes will be set to 1.
+ */
+static void bnx2x_calc_vn_min(struct bnx2x *bp,
+ struct cmng_init_input *input)
+{
+ int all_zero = 1;
+ int vn;
+
+ for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) {
+ u32 vn_cfg = bp->mf_config[vn];
+ u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
+ FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
+
+ /* Skip hidden vns */
+ if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
+ vn_min_rate = 0;
+ /* If min rate is zero - set it to 1 */
+ else if (!vn_min_rate)
+ vn_min_rate = DEF_MIN_RATE;
+ else
+ all_zero = 0;
+
+ input->vnic_min_rate[vn] = vn_min_rate;
+ }
+
+ /* if ETS or all min rates are zeros - disable fairness */
+ if (BNX2X_IS_ETS_ENABLED(bp)) {
+ input->flags.cmng_enables &=
+ ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
+ DP(NETIF_MSG_IFUP, "Fairness will be disabled due to ETS\n");
+ } else if (all_zero) {
+ input->flags.cmng_enables &=
+ ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
+ DP(NETIF_MSG_IFUP,
+ "All MIN values are zeroes fairness will be disabled\n");
+ } else
+ input->flags.cmng_enables |=
+ CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
+}
+
+static void bnx2x_calc_vn_max(struct bnx2x *bp, int vn,
+ struct cmng_init_input *input)
+{
+ u16 vn_max_rate;
+ u32 vn_cfg = bp->mf_config[vn];
+
+ if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
+ vn_max_rate = 0;
+ else {
+ u32 maxCfg = bnx2x_extract_max_cfg(bp, vn_cfg);
+
+ if (IS_MF_PERCENT_BW(bp)) {
+ /* maxCfg in percents of linkspeed */
+ vn_max_rate = (bp->link_vars.line_speed * maxCfg) / 100;
+ } else /* SD modes */
+ /* maxCfg is absolute in 100Mb units */
+ vn_max_rate = maxCfg * 100;
+ }
+
+ DP(NETIF_MSG_IFUP, "vn %d: vn_max_rate %d\n", vn, vn_max_rate);
+
+ input->vnic_max_rate[vn] = vn_max_rate;
+}
+
+static int bnx2x_get_cmng_fns_mode(struct bnx2x *bp)
+{
+ if (CHIP_REV_IS_SLOW(bp))
+ return CMNG_FNS_NONE;
+ if (IS_MF(bp))
+ return CMNG_FNS_MINMAX;
+
+ return CMNG_FNS_NONE;
+}
+
+void bnx2x_read_mf_cfg(struct bnx2x *bp)
+{
+ int vn, n = (CHIP_MODE_IS_4_PORT(bp) ? 2 : 1);
+
+ if (BP_NOMCP(bp))
+ return; /* what should be the default value in this case */
+
+ /* For 2 port configuration the absolute function number formula
+ * is:
+ * abs_func = 2 * vn + BP_PORT + BP_PATH
+ *
+ * and there are 4 functions per port
+ *
+ * For 4 port configuration it is
+ * abs_func = 4 * vn + 2 * BP_PORT + BP_PATH
+ *
+ * and there are 2 functions per port
+ */
+ for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) {
+ int /*abs*/func = n * (2 * vn + BP_PORT(bp)) + BP_PATH(bp);
+
+ if (func >= E1H_FUNC_MAX)
+ break;
+
+ bp->mf_config[vn] =
+ MF_CFG_RD(bp, func_mf_config[func].config);
+ }
+ if (bp->mf_config[BP_VN(bp)] & FUNC_MF_CFG_FUNC_DISABLED) {
+ DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
+ bp->flags |= MF_FUNC_DIS;
+ } else {
+ DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
+ bp->flags &= ~MF_FUNC_DIS;
+ }
+}
+
+static void bnx2x_cmng_fns_init(struct bnx2x *bp, u8 read_cfg, u8 cmng_type)
+{
+ struct cmng_init_input input;
+ memset(&input, 0, sizeof(struct cmng_init_input));
+
+ input.port_rate = bp->link_vars.line_speed;
+
+ if (cmng_type == CMNG_FNS_MINMAX && input.port_rate) {
+ int vn;
+
+ /* read mf conf from shmem */
+ if (read_cfg)
+ bnx2x_read_mf_cfg(bp);
+
+ /* vn_weight_sum and enable fairness if not 0 */
+ bnx2x_calc_vn_min(bp, &input);
+
+ /* calculate and set min-max rate for each vn */
+ if (bp->port.pmf)
+ for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++)
+ bnx2x_calc_vn_max(bp, vn, &input);
+
+ /* always enable rate shaping and fairness */
+ input.flags.cmng_enables |=
+ CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
+
+ bnx2x_init_cmng(&input, &bp->cmng);
+ return;
+ }
+
+ /* rate shaping and fairness are disabled */
+ DP(NETIF_MSG_IFUP,
+ "rate shaping and fairness are disabled\n");
+}
+
+static void storm_memset_cmng(struct bnx2x *bp,
+ struct cmng_init *cmng,
+ u8 port)
+{
+ int vn;
+ size_t size = sizeof(struct cmng_struct_per_port);
+
+ u32 addr = BAR_XSTRORM_INTMEM +
+ XSTORM_CMNG_PER_PORT_VARS_OFFSET(port);
+
+ __storm_memset_struct(bp, addr, size, (u32 *)&cmng->port);
+
+ for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) {
+ int func = func_by_vn(bp, vn);
+
+ addr = BAR_XSTRORM_INTMEM +
+ XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func);
+ size = sizeof(struct rate_shaping_vars_per_vn);
+ __storm_memset_struct(bp, addr, size,
+ (u32 *)&cmng->vnic.vnic_max_rate[vn]);
+
+ addr = BAR_XSTRORM_INTMEM +
+ XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func);
+ size = sizeof(struct fairness_vars_per_vn);
+ __storm_memset_struct(bp, addr, size,
+ (u32 *)&cmng->vnic.vnic_min_rate[vn]);
+ }
+}
+
+/* init cmng mode in HW according to local configuration */
+void bnx2x_set_local_cmng(struct bnx2x *bp)
+{
+ int cmng_fns = bnx2x_get_cmng_fns_mode(bp);
+
+ if (cmng_fns != CMNG_FNS_NONE) {
+ bnx2x_cmng_fns_init(bp, false, cmng_fns);
+ storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
+ } else {
+ /* rate shaping and fairness are disabled */
+ DP(NETIF_MSG_IFUP,
+ "single function mode without fairness\n");
+ }
+}
+
+/* This function is called upon link interrupt */
+static void bnx2x_link_attn(struct bnx2x *bp)
+{
+ /* Make sure that we are synced with the current statistics */
+ bnx2x_stats_handle(bp, STATS_EVENT_STOP);
+
+ bnx2x_link_update(&bp->link_params, &bp->link_vars);
+
+ bnx2x_init_dropless_fc(bp);
+
+ if (bp->link_vars.link_up) {
+
+ if (bp->link_vars.mac_type != MAC_TYPE_EMAC) {
+ struct host_port_stats *pstats;
+
+ pstats = bnx2x_sp(bp, port_stats);
+ /* reset old mac stats */
+ memset(&(pstats->mac_stx[0]), 0,
+ sizeof(struct mac_stx));
+ }
+ if (bp->state == BNX2X_STATE_OPEN)
+ bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
+ }
+
+ if (bp->link_vars.link_up && bp->link_vars.line_speed)
+ bnx2x_set_local_cmng(bp);
+
+ __bnx2x_link_report(bp);
+
+ if (IS_MF(bp))
+ bnx2x_link_sync_notify(bp);
+}
+
+void bnx2x__link_status_update(struct bnx2x *bp)
+{
+ if (bp->state != BNX2X_STATE_OPEN)
+ return;
+
+ /* read updated dcb configuration */
+ if (IS_PF(bp)) {
+ bnx2x_dcbx_pmf_update(bp);
+ bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
+ if (bp->link_vars.link_up)
+ bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
+ else
+ bnx2x_stats_handle(bp, STATS_EVENT_STOP);
+ /* indicate link status */
+ bnx2x_link_report(bp);
+
+ } else { /* VF */
+ bp->port.supported[0] |= (SUPPORTED_10baseT_Half |
+ SUPPORTED_10baseT_Full |
+ SUPPORTED_100baseT_Half |
+ SUPPORTED_100baseT_Full |
+ SUPPORTED_1000baseT_Full |
+ SUPPORTED_2500baseX_Full |
+ SUPPORTED_10000baseT_Full |
+ SUPPORTED_TP |
+ SUPPORTED_FIBRE |
+ SUPPORTED_Autoneg |
+ SUPPORTED_Pause |
+ SUPPORTED_Asym_Pause);
+ bp->port.advertising[0] = bp->port.supported[0];
+
+ bp->link_params.bp = bp;
+ bp->link_params.port = BP_PORT(bp);
+ bp->link_params.req_duplex[0] = DUPLEX_FULL;
+ bp->link_params.req_flow_ctrl[0] = BNX2X_FLOW_CTRL_NONE;
+ bp->link_params.req_line_speed[0] = SPEED_10000;
+ bp->link_params.speed_cap_mask[0] = 0x7f0000;
+ bp->link_params.switch_cfg = SWITCH_CFG_10G;
+ bp->link_vars.mac_type = MAC_TYPE_BMAC;
+ bp->link_vars.line_speed = SPEED_10000;
+ bp->link_vars.link_status =
+ (LINK_STATUS_LINK_UP |
+ LINK_STATUS_SPEED_AND_DUPLEX_10GTFD);
+ bp->link_vars.link_up = 1;
+ bp->link_vars.duplex = DUPLEX_FULL;
+ bp->link_vars.flow_ctrl = BNX2X_FLOW_CTRL_NONE;
+ __bnx2x_link_report(bp);
+
+ bnx2x_sample_bulletin(bp);
+
+ /* if bulletin board did not have an update for link status
+ * __bnx2x_link_report will report current status
+ * but it will NOT duplicate report in case of already reported
+ * during sampling bulletin board.
+ */
+ bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
+ }
+}
+
+static int bnx2x_afex_func_update(struct bnx2x *bp, u16 vifid,
+ u16 vlan_val, u8 allowed_prio)
+{
+ struct bnx2x_func_state_params func_params = {NULL};
+ struct bnx2x_func_afex_update_params *f_update_params =
+ &func_params.params.afex_update;
+
+ func_params.f_obj = &bp->func_obj;
+ func_params.cmd = BNX2X_F_CMD_AFEX_UPDATE;
+
+ /* no need to wait for RAMROD completion, so don't
+ * set RAMROD_COMP_WAIT flag
+ */
+
+ f_update_params->vif_id = vifid;
+ f_update_params->afex_default_vlan = vlan_val;
+ f_update_params->allowed_priorities = allowed_prio;
+
+ /* if ramrod can not be sent, response to MCP immediately */
+ if (bnx2x_func_state_change(bp, &func_params) < 0)
+ bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_VIFSET_ACK, 0);
+
+ return 0;
+}
+
+static int bnx2x_afex_handle_vif_list_cmd(struct bnx2x *bp, u8 cmd_type,
+ u16 vif_index, u8 func_bit_map)
+{
+ struct bnx2x_func_state_params func_params = {NULL};
+ struct bnx2x_func_afex_viflists_params *update_params =
+ &func_params.params.afex_viflists;
+ int rc;
+ u32 drv_msg_code;
+
+ /* validate only LIST_SET and LIST_GET are received from switch */
+ if ((cmd_type != VIF_LIST_RULE_GET) && (cmd_type != VIF_LIST_RULE_SET))
+ BNX2X_ERR("BUG! afex_handle_vif_list_cmd invalid type 0x%x\n",
+ cmd_type);
+
+ func_params.f_obj = &bp->func_obj;
+ func_params.cmd = BNX2X_F_CMD_AFEX_VIFLISTS;
+
+ /* set parameters according to cmd_type */
+ update_params->afex_vif_list_command = cmd_type;
+ update_params->vif_list_index = vif_index;
+ update_params->func_bit_map =
+ (cmd_type == VIF_LIST_RULE_GET) ? 0 : func_bit_map;
+ update_params->func_to_clear = 0;
+ drv_msg_code =
+ (cmd_type == VIF_LIST_RULE_GET) ?
+ DRV_MSG_CODE_AFEX_LISTGET_ACK :
+ DRV_MSG_CODE_AFEX_LISTSET_ACK;
+
+ /* if ramrod can not be sent, respond to MCP immediately for
+ * SET and GET requests (other are not triggered from MCP)
+ */
+ rc = bnx2x_func_state_change(bp, &func_params);
+ if (rc < 0)
+ bnx2x_fw_command(bp, drv_msg_code, 0);
+
+ return 0;
+}
+
+static void bnx2x_handle_afex_cmd(struct bnx2x *bp, u32 cmd)
+{
+ struct afex_stats afex_stats;
+ u32 func = BP_ABS_FUNC(bp);
+ u32 mf_config;
+ u16 vlan_val;
+ u32 vlan_prio;
+ u16 vif_id;
+ u8 allowed_prio;
+ u8 vlan_mode;
+ u32 addr_to_write, vifid, addrs, stats_type, i;
+
+ if (cmd & DRV_STATUS_AFEX_LISTGET_REQ) {
+ vifid = SHMEM2_RD(bp, afex_param1_to_driver[BP_FW_MB_IDX(bp)]);
+ DP(BNX2X_MSG_MCP,
+ "afex: got MCP req LISTGET_REQ for vifid 0x%x\n", vifid);
+ bnx2x_afex_handle_vif_list_cmd(bp, VIF_LIST_RULE_GET, vifid, 0);
+ }
+
+ if (cmd & DRV_STATUS_AFEX_LISTSET_REQ) {
+ vifid = SHMEM2_RD(bp, afex_param1_to_driver[BP_FW_MB_IDX(bp)]);
+ addrs = SHMEM2_RD(bp, afex_param2_to_driver[BP_FW_MB_IDX(bp)]);
+ DP(BNX2X_MSG_MCP,
+ "afex: got MCP req LISTSET_REQ for vifid 0x%x addrs 0x%x\n",
+ vifid, addrs);
+ bnx2x_afex_handle_vif_list_cmd(bp, VIF_LIST_RULE_SET, vifid,
+ addrs);
+ }
+
+ if (cmd & DRV_STATUS_AFEX_STATSGET_REQ) {
+ addr_to_write = SHMEM2_RD(bp,
+ afex_scratchpad_addr_to_write[BP_FW_MB_IDX(bp)]);
+ stats_type = SHMEM2_RD(bp,
+ afex_param1_to_driver[BP_FW_MB_IDX(bp)]);
+
+ DP(BNX2X_MSG_MCP,
+ "afex: got MCP req STATSGET_REQ, write to addr 0x%x\n",
+ addr_to_write);
+
+ bnx2x_afex_collect_stats(bp, (void *)&afex_stats, stats_type);
+
+ /* write response to scratchpad, for MCP */
+ for (i = 0; i < (sizeof(struct afex_stats)/sizeof(u32)); i++)
+ REG_WR(bp, addr_to_write + i*sizeof(u32),
+ *(((u32 *)(&afex_stats))+i));
+
+ /* send ack message to MCP */
+ bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_STATSGET_ACK, 0);
+ }
+
+ if (cmd & DRV_STATUS_AFEX_VIFSET_REQ) {
+ mf_config = MF_CFG_RD(bp, func_mf_config[func].config);
+ bp->mf_config[BP_VN(bp)] = mf_config;
+ DP(BNX2X_MSG_MCP,
+ "afex: got MCP req VIFSET_REQ, mf_config 0x%x\n",
+ mf_config);
+
+ /* if VIF_SET is "enabled" */
+ if (!(mf_config & FUNC_MF_CFG_FUNC_DISABLED)) {
+ /* set rate limit directly to internal RAM */
+ struct cmng_init_input cmng_input;
+ struct rate_shaping_vars_per_vn m_rs_vn;
+ size_t size = sizeof(struct rate_shaping_vars_per_vn);
+ u32 addr = BAR_XSTRORM_INTMEM +
+ XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(BP_FUNC(bp));
+
+ bp->mf_config[BP_VN(bp)] = mf_config;
+
+ bnx2x_calc_vn_max(bp, BP_VN(bp), &cmng_input);
+ m_rs_vn.vn_counter.rate =
+ cmng_input.vnic_max_rate[BP_VN(bp)];
+ m_rs_vn.vn_counter.quota =
+ (m_rs_vn.vn_counter.rate *
+ RS_PERIODIC_TIMEOUT_USEC) / 8;
+
+ __storm_memset_struct(bp, addr, size, (u32 *)&m_rs_vn);
+
+ /* read relevant values from mf_cfg struct in shmem */
+ vif_id =
+ (MF_CFG_RD(bp, func_mf_config[func].e1hov_tag) &
+ FUNC_MF_CFG_E1HOV_TAG_MASK) >>
+ FUNC_MF_CFG_E1HOV_TAG_SHIFT;
+ vlan_val =
+ (MF_CFG_RD(bp, func_mf_config[func].e1hov_tag) &
+ FUNC_MF_CFG_AFEX_VLAN_MASK) >>
+ FUNC_MF_CFG_AFEX_VLAN_SHIFT;
+ vlan_prio = (mf_config &
+ FUNC_MF_CFG_TRANSMIT_PRIORITY_MASK) >>
+ FUNC_MF_CFG_TRANSMIT_PRIORITY_SHIFT;
+ vlan_val |= (vlan_prio << VLAN_PRIO_SHIFT);
+ vlan_mode =
+ (MF_CFG_RD(bp,
+ func_mf_config[func].afex_config) &
+ FUNC_MF_CFG_AFEX_VLAN_MODE_MASK) >>
+ FUNC_MF_CFG_AFEX_VLAN_MODE_SHIFT;
+ allowed_prio =
+ (MF_CFG_RD(bp,
+ func_mf_config[func].afex_config) &
+ FUNC_MF_CFG_AFEX_COS_FILTER_MASK) >>
+ FUNC_MF_CFG_AFEX_COS_FILTER_SHIFT;
+
+ /* send ramrod to FW, return in case of failure */
+ if (bnx2x_afex_func_update(bp, vif_id, vlan_val,
+ allowed_prio))
+ return;
+
+ bp->afex_def_vlan_tag = vlan_val;
+ bp->afex_vlan_mode = vlan_mode;
+ } else {
+ /* notify link down because BP->flags is disabled */
+ bnx2x_link_report(bp);
+
+ /* send INVALID VIF ramrod to FW */
+ bnx2x_afex_func_update(bp, 0xFFFF, 0, 0);
+
+ /* Reset the default afex VLAN */
+ bp->afex_def_vlan_tag = -1;
+ }
+ }
+}
+
+static void bnx2x_handle_update_svid_cmd(struct bnx2x *bp)
+{
+ struct bnx2x_func_switch_update_params *switch_update_params;
+ struct bnx2x_func_state_params func_params;
+
+ memset(&func_params, 0, sizeof(struct bnx2x_func_state_params));
+ switch_update_params = &func_params.params.switch_update;
+ func_params.f_obj = &bp->func_obj;
+ func_params.cmd = BNX2X_F_CMD_SWITCH_UPDATE;
+
+ /* Prepare parameters for function state transitions */
+ __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
+ __set_bit(RAMROD_RETRY, &func_params.ramrod_flags);
+
+ if (IS_MF_UFP(bp) || IS_MF_BD(bp)) {
+ int func = BP_ABS_FUNC(bp);
+ u32 val;
+
+ /* Re-learn the S-tag from shmem */
+ val = MF_CFG_RD(bp, func_mf_config[func].e1hov_tag) &
+ FUNC_MF_CFG_E1HOV_TAG_MASK;
+ if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
+ bp->mf_ov = val;
+ } else {
+ BNX2X_ERR("Got an SVID event, but no tag is configured in shmem\n");
+ goto fail;
+ }
+
+ /* Configure new S-tag in LLH */
+ REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + BP_PORT(bp) * 8,
+ bp->mf_ov);
+
+ /* Send Ramrod to update FW of change */
+ __set_bit(BNX2X_F_UPDATE_SD_VLAN_TAG_CHNG,
+ &switch_update_params->changes);
+ switch_update_params->vlan = bp->mf_ov;
+
+ if (bnx2x_func_state_change(bp, &func_params) < 0) {
+ BNX2X_ERR("Failed to configure FW of S-tag Change to %02x\n",
+ bp->mf_ov);
+ goto fail;
+ } else {
+ DP(BNX2X_MSG_MCP, "Configured S-tag %02x\n",
+ bp->mf_ov);
+ }
+ } else {
+ goto fail;
+ }
+
+ bnx2x_fw_command(bp, DRV_MSG_CODE_OEM_UPDATE_SVID_OK, 0);
+ return;
+fail:
+ bnx2x_fw_command(bp, DRV_MSG_CODE_OEM_UPDATE_SVID_FAILURE, 0);
+}
+
+static void bnx2x_pmf_update(struct bnx2x *bp)
+{
+ int port = BP_PORT(bp);
+ u32 val;
+
+ bp->port.pmf = 1;
+ DP(BNX2X_MSG_MCP, "pmf %d\n", bp->port.pmf);
+
+ /*
+ * We need the mb() to ensure the ordering between the writing to
+ * bp->port.pmf here and reading it from the bnx2x_periodic_task().
+ */
+ smp_mb();
+
+ /* queue a periodic task */
+ queue_delayed_work(bnx2x_wq, &bp->period_task, 0);
+
+ bnx2x_dcbx_pmf_update(bp);
+
+ /* enable nig attention */
+ val = (0xff0f | (1 << (BP_VN(bp) + 4)));
+ if (bp->common.int_block == INT_BLOCK_HC) {
+ REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
+ REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
+ } else if (!CHIP_IS_E1x(bp)) {
+ REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, val);
+ REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, val);
+ }
+
+ bnx2x_stats_handle(bp, STATS_EVENT_PMF);
+}
+
+/* end of Link */
+
+/* slow path */
+
+/*
+ * General service functions
+ */
+
+/* send the MCP a request, block until there is a reply */
+u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param)
+{
+ int mb_idx = BP_FW_MB_IDX(bp);
+ u32 seq;
+ u32 rc = 0;
+ u32 cnt = 1;
+ u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
+
+ mutex_lock(&bp->fw_mb_mutex);
+ seq = ++bp->fw_seq;
+ SHMEM_WR(bp, func_mb[mb_idx].drv_mb_param, param);
+ SHMEM_WR(bp, func_mb[mb_idx].drv_mb_header, (command | seq));
+
+ DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB param 0x%08x\n",
+ (command | seq), param);
+
+ do {
+ /* let the FW do it's magic ... */
+ msleep(delay);
+
+ rc = SHMEM_RD(bp, func_mb[mb_idx].fw_mb_header);
+
+ /* Give the FW up to 5 second (500*10ms) */
+ } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500));
+
+ DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
+ cnt*delay, rc, seq);
+
+ /* is this a reply to our command? */
+ if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK))
+ rc &= FW_MSG_CODE_MASK;
+ else {
+ /* FW BUG! */
+ BNX2X_ERR("FW failed to respond!\n");
+ bnx2x_fw_dump(bp);
+ rc = 0;
+ }
+ mutex_unlock(&bp->fw_mb_mutex);
+
+ return rc;
+}
+
+static void storm_memset_func_cfg(struct bnx2x *bp,
+ struct tstorm_eth_function_common_config *tcfg,
+ u16 abs_fid)
+{
+ size_t size = sizeof(struct tstorm_eth_function_common_config);
+
+ u32 addr = BAR_TSTRORM_INTMEM +
+ TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(abs_fid);
+
+ __storm_memset_struct(bp, addr, size, (u32 *)tcfg);
+}
+
+void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p)
+{
+ if (CHIP_IS_E1x(bp)) {
+ struct tstorm_eth_function_common_config tcfg = {0};
+
+ storm_memset_func_cfg(bp, &tcfg, p->func_id);
+ }
+
+ /* Enable the function in the FW */
+ storm_memset_vf_to_pf(bp, p->func_id, p->pf_id);
+ storm_memset_func_en(bp, p->func_id, 1);
+
+ /* spq */
+ if (p->spq_active) {
+ storm_memset_spq_addr(bp, p->spq_map, p->func_id);
+ REG_WR(bp, XSEM_REG_FAST_MEMORY +
+ XSTORM_SPQ_PROD_OFFSET(p->func_id), p->spq_prod);
+ }
+}
+
+/**
+ * bnx2x_get_common_flags - Return common flags
+ *
+ * @bp: device handle
+ * @fp: queue handle
+ * @zero_stats: TRUE if statistics zeroing is needed
+ *
+ * Return the flags that are common for the Tx-only and not normal connections.
+ */
+static unsigned long bnx2x_get_common_flags(struct bnx2x *bp,
+ struct bnx2x_fastpath *fp,
+ bool zero_stats)
+{
+ unsigned long flags = 0;
+
+ /* PF driver will always initialize the Queue to an ACTIVE state */
+ __set_bit(BNX2X_Q_FLG_ACTIVE, &flags);
+
+ /* tx only connections collect statistics (on the same index as the
+ * parent connection). The statistics are zeroed when the parent
+ * connection is initialized.
+ */
+
+ __set_bit(BNX2X_Q_FLG_STATS, &flags);
+ if (zero_stats)
+ __set_bit(BNX2X_Q_FLG_ZERO_STATS, &flags);
+
+ if (bp->flags & TX_SWITCHING)
+ __set_bit(BNX2X_Q_FLG_TX_SWITCH, &flags);
+
+ __set_bit(BNX2X_Q_FLG_PCSUM_ON_PKT, &flags);
+ __set_bit(BNX2X_Q_FLG_TUN_INC_INNER_IP_ID, &flags);
+
+#ifdef BNX2X_STOP_ON_ERROR
+ __set_bit(BNX2X_Q_FLG_TX_SEC, &flags);
+#endif
+
+ return flags;
+}
+
+static unsigned long bnx2x_get_q_flags(struct bnx2x *bp,
+ struct bnx2x_fastpath *fp,
+ bool leading)
+{
+ unsigned long flags = 0;
+
+ /* calculate other queue flags */
+ if (IS_MF_SD(bp))
+ __set_bit(BNX2X_Q_FLG_OV, &flags);
+
+ if (IS_FCOE_FP(fp)) {
+ __set_bit(BNX2X_Q_FLG_FCOE, &flags);
+ /* For FCoE - force usage of default priority (for afex) */
+ __set_bit(BNX2X_Q_FLG_FORCE_DEFAULT_PRI, &flags);
+ }
+
+ if (fp->mode != TPA_MODE_DISABLED) {
+ __set_bit(BNX2X_Q_FLG_TPA, &flags);
+ __set_bit(BNX2X_Q_FLG_TPA_IPV6, &flags);
+ if (fp->mode == TPA_MODE_GRO)
+ __set_bit(BNX2X_Q_FLG_TPA_GRO, &flags);
+ }
+
+ if (leading) {
+ __set_bit(BNX2X_Q_FLG_LEADING_RSS, &flags);
+ __set_bit(BNX2X_Q_FLG_MCAST, &flags);
+ }
+
+ /* Always set HW VLAN stripping */
+ __set_bit(BNX2X_Q_FLG_VLAN, &flags);
+
+ /* configure silent vlan removal */
+ if (IS_MF_AFEX(bp))
+ __set_bit(BNX2X_Q_FLG_SILENT_VLAN_REM, &flags);
+
+ return flags | bnx2x_get_common_flags(bp, fp, true);
+}
+
+static void bnx2x_pf_q_prep_general(struct bnx2x *bp,
+ struct bnx2x_fastpath *fp, struct bnx2x_general_setup_params *gen_init,
+ u8 cos)
+{
+ gen_init->stat_id = bnx2x_stats_id(fp);
+ gen_init->spcl_id = fp->cl_id;
+
+ /* Always use mini-jumbo MTU for FCoE L2 ring */
+ if (IS_FCOE_FP(fp))
+ gen_init->mtu = BNX2X_FCOE_MINI_JUMBO_MTU;
+ else
+ gen_init->mtu = bp->dev->mtu;
+
+ gen_init->cos = cos;
+
+ gen_init->fp_hsi = ETH_FP_HSI_VERSION;
+}
+
+static void bnx2x_pf_rx_q_prep(struct bnx2x *bp,
+ struct bnx2x_fastpath *fp, struct rxq_pause_params *pause,
+ struct bnx2x_rxq_setup_params *rxq_init)
+{
+ u8 max_sge = 0;
+ u16 sge_sz = 0;
+ u16 tpa_agg_size = 0;
+
+ if (fp->mode != TPA_MODE_DISABLED) {
+ pause->sge_th_lo = SGE_TH_LO(bp);
+ pause->sge_th_hi = SGE_TH_HI(bp);
+
+ /* validate SGE ring has enough to cross high threshold */
+ WARN_ON(bp->dropless_fc &&
+ pause->sge_th_hi + FW_PREFETCH_CNT >
+ MAX_RX_SGE_CNT * NUM_RX_SGE_PAGES);
+
+ tpa_agg_size = TPA_AGG_SIZE;
+ max_sge = SGE_PAGE_ALIGN(bp->dev->mtu) >>
+ SGE_PAGE_SHIFT;
+ max_sge = ((max_sge + PAGES_PER_SGE - 1) &
+ (~(PAGES_PER_SGE-1))) >> PAGES_PER_SGE_SHIFT;
+ sge_sz = (u16)min_t(u32, SGE_PAGES, 0xffff);
+ }
+
+ /* pause - not for e1 */
+ if (!CHIP_IS_E1(bp)) {
+ pause->bd_th_lo = BD_TH_LO(bp);
+ pause->bd_th_hi = BD_TH_HI(bp);
+
+ pause->rcq_th_lo = RCQ_TH_LO(bp);
+ pause->rcq_th_hi = RCQ_TH_HI(bp);
+ /*
+ * validate that rings have enough entries to cross
+ * high thresholds
+ */
+ WARN_ON(bp->dropless_fc &&
+ pause->bd_th_hi + FW_PREFETCH_CNT >
+ bp->rx_ring_size);
+ WARN_ON(bp->dropless_fc &&
+ pause->rcq_th_hi + FW_PREFETCH_CNT >
+ NUM_RCQ_RINGS * MAX_RCQ_DESC_CNT);
+
+ pause->pri_map = 1;
+ }
+
+ /* rxq setup */
+ rxq_init->dscr_map = fp->rx_desc_mapping;
+ rxq_init->sge_map = fp->rx_sge_mapping;
+ rxq_init->rcq_map = fp->rx_comp_mapping;
+ rxq_init->rcq_np_map = fp->rx_comp_mapping + BCM_PAGE_SIZE;
+
+ /* This should be a maximum number of data bytes that may be
+ * placed on the BD (not including paddings).
+ */
+ rxq_init->buf_sz = fp->rx_buf_size - BNX2X_FW_RX_ALIGN_START -
+ BNX2X_FW_RX_ALIGN_END - IP_HEADER_ALIGNMENT_PADDING;
+
+ rxq_init->cl_qzone_id = fp->cl_qzone_id;
+ rxq_init->tpa_agg_sz = tpa_agg_size;
+ rxq_init->sge_buf_sz = sge_sz;
+ rxq_init->max_sges_pkt = max_sge;
+ rxq_init->rss_engine_id = BP_FUNC(bp);
+ rxq_init->mcast_engine_id = BP_FUNC(bp);
+
+ /* Maximum number or simultaneous TPA aggregation for this Queue.
+ *
+ * For PF Clients it should be the maximum available number.
+ * VF driver(s) may want to define it to a smaller value.
+ */
+ rxq_init->max_tpa_queues = MAX_AGG_QS(bp);
+
+ rxq_init->cache_line_log = BNX2X_RX_ALIGN_SHIFT;
+ rxq_init->fw_sb_id = fp->fw_sb_id;
+
+ if (IS_FCOE_FP(fp))
+ rxq_init->sb_cq_index = HC_SP_INDEX_ETH_FCOE_RX_CQ_CONS;
+ else
+ rxq_init->sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS;
+ /* configure silent vlan removal
+ * if multi function mode is afex, then mask default vlan
+ */
+ if (IS_MF_AFEX(bp)) {
+ rxq_init->silent_removal_value = bp->afex_def_vlan_tag;
+ rxq_init->silent_removal_mask = VLAN_VID_MASK;
+ }
+}
+
+static void bnx2x_pf_tx_q_prep(struct bnx2x *bp,
+ struct bnx2x_fastpath *fp, struct bnx2x_txq_setup_params *txq_init,
+ u8 cos)
+{
+ txq_init->dscr_map = fp->txdata_ptr[cos]->tx_desc_mapping;
+ txq_init->sb_cq_index = HC_INDEX_ETH_FIRST_TX_CQ_CONS + cos;
+ txq_init->traffic_type = LLFC_TRAFFIC_TYPE_NW;
+ txq_init->fw_sb_id = fp->fw_sb_id;
+
+ /*
+ * set the tss leading client id for TX classification ==
+ * leading RSS client id
+ */
+ txq_init->tss_leading_cl_id = bnx2x_fp(bp, 0, cl_id);
+
+ if (IS_FCOE_FP(fp)) {
+ txq_init->sb_cq_index = HC_SP_INDEX_ETH_FCOE_TX_CQ_CONS;
+ txq_init->traffic_type = LLFC_TRAFFIC_TYPE_FCOE;
+ }
+}
+
+static void bnx2x_pf_init(struct bnx2x *bp)
+{
+ struct bnx2x_func_init_params func_init = {0};
+ struct event_ring_data eq_data = { {0} };
+
+ if (!CHIP_IS_E1x(bp)) {
+ /* reset IGU PF statistics: MSIX + ATTN */
+ /* PF */
+ REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
+ BNX2X_IGU_STAS_MSG_VF_CNT*4 +
+ (CHIP_MODE_IS_4_PORT(bp) ?
+ BP_FUNC(bp) : BP_VN(bp))*4, 0);
+ /* ATTN */
+ REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
+ BNX2X_IGU_STAS_MSG_VF_CNT*4 +
+ BNX2X_IGU_STAS_MSG_PF_CNT*4 +
+ (CHIP_MODE_IS_4_PORT(bp) ?
+ BP_FUNC(bp) : BP_VN(bp))*4, 0);
+ }
+
+ func_init.spq_active = true;
+ func_init.pf_id = BP_FUNC(bp);
+ func_init.func_id = BP_FUNC(bp);
+ func_init.spq_map = bp->spq_mapping;
+ func_init.spq_prod = bp->spq_prod_idx;
+
+ bnx2x_func_init(bp, &func_init);
+
+ memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
+
+ /*
+ * Congestion management values depend on the link rate
+ * There is no active link so initial link rate is set to 10 Gbps.
+ * When the link comes up The congestion management values are
+ * re-calculated according to the actual link rate.
+ */
+ bp->link_vars.line_speed = SPEED_10000;
+ bnx2x_cmng_fns_init(bp, true, bnx2x_get_cmng_fns_mode(bp));
+
+ /* Only the PMF sets the HW */
+ if (bp->port.pmf)
+ storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
+
+ /* init Event Queue - PCI bus guarantees correct endianity*/
+ eq_data.base_addr.hi = U64_HI(bp->eq_mapping);
+ eq_data.base_addr.lo = U64_LO(bp->eq_mapping);
+ eq_data.producer = bp->eq_prod;
+ eq_data.index_id = HC_SP_INDEX_EQ_CONS;
+ eq_data.sb_id = DEF_SB_ID;
+ storm_memset_eq_data(bp, &eq_data, BP_FUNC(bp));
+}
+
+static void bnx2x_e1h_disable(struct bnx2x *bp)
+{
+ int port = BP_PORT(bp);
+
+ bnx2x_tx_disable(bp);
+
+ REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
+}
+
+static void bnx2x_e1h_enable(struct bnx2x *bp)
+{
+ int port = BP_PORT(bp);
+
+ if (!(IS_MF_UFP(bp) && BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp)))
+ REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port * 8, 1);
+
+ /* Tx queue should be only re-enabled */
+ netif_tx_wake_all_queues(bp->dev);
+
+ /*
+ * Should not call netif_carrier_on since it will be called if the link
+ * is up when checking for link state
+ */
+}
+
+#define DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED 3
+
+static void bnx2x_drv_info_ether_stat(struct bnx2x *bp)
+{
+ struct eth_stats_info *ether_stat =
+ &bp->slowpath->drv_info_to_mcp.ether_stat;
+ struct bnx2x_vlan_mac_obj *mac_obj =
+ &bp->sp_objs->mac_obj;
+ int i;
+
+ strscpy(ether_stat->version, DRV_MODULE_VERSION,
+ ETH_STAT_INFO_VERSION_LEN);
+
+ /* get DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED macs, placing them in the
+ * mac_local field in ether_stat struct. The base address is offset by 2
+ * bytes to account for the field being 8 bytes but a mac address is
+ * only 6 bytes. Likewise, the stride for the get_n_elements function is
+ * 2 bytes to compensate from the 6 bytes of a mac to the 8 bytes
+ * allocated by the ether_stat struct, so the macs will land in their
+ * proper positions.
+ */
+ for (i = 0; i < DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED; i++)
+ memset(ether_stat->mac_local + i, 0,
+ sizeof(ether_stat->mac_local[0]));
+ mac_obj->get_n_elements(bp, &bp->sp_objs[0].mac_obj,
+ DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED,
+ ether_stat->mac_local + MAC_PAD, MAC_PAD,
+ ETH_ALEN);
+ ether_stat->mtu_size = bp->dev->mtu;
+ if (bp->dev->features & NETIF_F_RXCSUM)
+ ether_stat->feature_flags |= FEATURE_ETH_CHKSUM_OFFLOAD_MASK;
+ if (bp->dev->features & NETIF_F_TSO)
+ ether_stat->feature_flags |= FEATURE_ETH_LSO_MASK;
+ ether_stat->feature_flags |= bp->common.boot_mode;
+
+ ether_stat->promiscuous_mode = (bp->dev->flags & IFF_PROMISC) ? 1 : 0;
+
+ ether_stat->txq_size = bp->tx_ring_size;
+ ether_stat->rxq_size = bp->rx_ring_size;
+
+#ifdef CONFIG_BNX2X_SRIOV
+ ether_stat->vf_cnt = IS_SRIOV(bp) ? bp->vfdb->sriov.nr_virtfn : 0;
+#endif
+}
+
+static void bnx2x_drv_info_fcoe_stat(struct bnx2x *bp)
+{
+ struct bnx2x_dcbx_app_params *app = &bp->dcbx_port_params.app;
+ struct fcoe_stats_info *fcoe_stat =
+ &bp->slowpath->drv_info_to_mcp.fcoe_stat;
+
+ if (!CNIC_LOADED(bp))
+ return;
+
+ memcpy(fcoe_stat->mac_local + MAC_PAD, bp->fip_mac, ETH_ALEN);
+
+ fcoe_stat->qos_priority =
+ app->traffic_type_priority[LLFC_TRAFFIC_TYPE_FCOE];
+
+ /* insert FCoE stats from ramrod response */
+ if (!NO_FCOE(bp)) {
+ struct tstorm_per_queue_stats *fcoe_q_tstorm_stats =
+ &bp->fw_stats_data->queue_stats[FCOE_IDX(bp)].
+ tstorm_queue_statistics;
+
+ struct xstorm_per_queue_stats *fcoe_q_xstorm_stats =
+ &bp->fw_stats_data->queue_stats[FCOE_IDX(bp)].
+ xstorm_queue_statistics;
+
+ struct fcoe_statistics_params *fw_fcoe_stat =
+ &bp->fw_stats_data->fcoe;
+
+ ADD_64_LE(fcoe_stat->rx_bytes_hi, LE32_0,
+ fcoe_stat->rx_bytes_lo,
+ fw_fcoe_stat->rx_stat0.fcoe_rx_byte_cnt);
+
+ ADD_64_LE(fcoe_stat->rx_bytes_hi,
+ fcoe_q_tstorm_stats->rcv_ucast_bytes.hi,
+ fcoe_stat->rx_bytes_lo,
+ fcoe_q_tstorm_stats->rcv_ucast_bytes.lo);
+
+ ADD_64_LE(fcoe_stat->rx_bytes_hi,
+ fcoe_q_tstorm_stats->rcv_bcast_bytes.hi,
+ fcoe_stat->rx_bytes_lo,
+ fcoe_q_tstorm_stats->rcv_bcast_bytes.lo);
+
+ ADD_64_LE(fcoe_stat->rx_bytes_hi,
+ fcoe_q_tstorm_stats->rcv_mcast_bytes.hi,
+ fcoe_stat->rx_bytes_lo,
+ fcoe_q_tstorm_stats->rcv_mcast_bytes.lo);
+
+ ADD_64_LE(fcoe_stat->rx_frames_hi, LE32_0,
+ fcoe_stat->rx_frames_lo,
+ fw_fcoe_stat->rx_stat0.fcoe_rx_pkt_cnt);
+
+ ADD_64_LE(fcoe_stat->rx_frames_hi, LE32_0,
+ fcoe_stat->rx_frames_lo,
+ fcoe_q_tstorm_stats->rcv_ucast_pkts);
+
+ ADD_64_LE(fcoe_stat->rx_frames_hi, LE32_0,
+ fcoe_stat->rx_frames_lo,
+ fcoe_q_tstorm_stats->rcv_bcast_pkts);
+
+ ADD_64_LE(fcoe_stat->rx_frames_hi, LE32_0,
+ fcoe_stat->rx_frames_lo,
+ fcoe_q_tstorm_stats->rcv_mcast_pkts);
+
+ ADD_64_LE(fcoe_stat->tx_bytes_hi, LE32_0,
+ fcoe_stat->tx_bytes_lo,
+ fw_fcoe_stat->tx_stat.fcoe_tx_byte_cnt);
+
+ ADD_64_LE(fcoe_stat->tx_bytes_hi,
+ fcoe_q_xstorm_stats->ucast_bytes_sent.hi,
+ fcoe_stat->tx_bytes_lo,
+ fcoe_q_xstorm_stats->ucast_bytes_sent.lo);
+
+ ADD_64_LE(fcoe_stat->tx_bytes_hi,
+ fcoe_q_xstorm_stats->bcast_bytes_sent.hi,
+ fcoe_stat->tx_bytes_lo,
+ fcoe_q_xstorm_stats->bcast_bytes_sent.lo);
+
+ ADD_64_LE(fcoe_stat->tx_bytes_hi,
+ fcoe_q_xstorm_stats->mcast_bytes_sent.hi,
+ fcoe_stat->tx_bytes_lo,
+ fcoe_q_xstorm_stats->mcast_bytes_sent.lo);
+
+ ADD_64_LE(fcoe_stat->tx_frames_hi, LE32_0,
+ fcoe_stat->tx_frames_lo,
+ fw_fcoe_stat->tx_stat.fcoe_tx_pkt_cnt);
+
+ ADD_64_LE(fcoe_stat->tx_frames_hi, LE32_0,
+ fcoe_stat->tx_frames_lo,
+ fcoe_q_xstorm_stats->ucast_pkts_sent);
+
+ ADD_64_LE(fcoe_stat->tx_frames_hi, LE32_0,
+ fcoe_stat->tx_frames_lo,
+ fcoe_q_xstorm_stats->bcast_pkts_sent);
+
+ ADD_64_LE(fcoe_stat->tx_frames_hi, LE32_0,
+ fcoe_stat->tx_frames_lo,
+ fcoe_q_xstorm_stats->mcast_pkts_sent);
+ }
+
+ /* ask L5 driver to add data to the struct */
+ bnx2x_cnic_notify(bp, CNIC_CTL_FCOE_STATS_GET_CMD);
+}
+
+static void bnx2x_drv_info_iscsi_stat(struct bnx2x *bp)
+{
+ struct bnx2x_dcbx_app_params *app = &bp->dcbx_port_params.app;
+ struct iscsi_stats_info *iscsi_stat =
+ &bp->slowpath->drv_info_to_mcp.iscsi_stat;
+
+ if (!CNIC_LOADED(bp))
+ return;
+
+ memcpy(iscsi_stat->mac_local + MAC_PAD, bp->cnic_eth_dev.iscsi_mac,
+ ETH_ALEN);
+
+ iscsi_stat->qos_priority =
+ app->traffic_type_priority[LLFC_TRAFFIC_TYPE_ISCSI];
+
+ /* ask L5 driver to add data to the struct */
+ bnx2x_cnic_notify(bp, CNIC_CTL_ISCSI_STATS_GET_CMD);
+}
+
+/* called due to MCP event (on pmf):
+ * reread new bandwidth configuration
+ * configure FW
+ * notify others function about the change
+ */
+static void bnx2x_config_mf_bw(struct bnx2x *bp)
+{
+ /* Workaround for MFW bug.
+ * MFW is not supposed to generate BW attention in
+ * single function mode.
+ */
+ if (!IS_MF(bp)) {
+ DP(BNX2X_MSG_MCP,
+ "Ignoring MF BW config in single function mode\n");
+ return;
+ }
+
+ if (bp->link_vars.link_up) {
+ bnx2x_cmng_fns_init(bp, true, CMNG_FNS_MINMAX);
+ bnx2x_link_sync_notify(bp);
+ }
+ storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
+}
+
+static void bnx2x_set_mf_bw(struct bnx2x *bp)
+{
+ bnx2x_config_mf_bw(bp);
+ bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW_ACK, 0);
+}
+
+static void bnx2x_handle_eee_event(struct bnx2x *bp)
+{
+ DP(BNX2X_MSG_MCP, "EEE - LLDP event\n");
+ bnx2x_fw_command(bp, DRV_MSG_CODE_EEE_RESULTS_ACK, 0);
+}
+
+#define BNX2X_UPDATE_DRV_INFO_IND_LENGTH (20)
+#define BNX2X_UPDATE_DRV_INFO_IND_COUNT (25)
+
+static void bnx2x_handle_drv_info_req(struct bnx2x *bp)
+{
+ enum drv_info_opcode op_code;
+ u32 drv_info_ctl = SHMEM2_RD(bp, drv_info_control);
+ bool release = false;
+ int wait;
+
+ /* if drv_info version supported by MFW doesn't match - send NACK */
+ if ((drv_info_ctl & DRV_INFO_CONTROL_VER_MASK) != DRV_INFO_CUR_VER) {
+ bnx2x_fw_command(bp, DRV_MSG_CODE_DRV_INFO_NACK, 0);
+ return;
+ }
+
+ op_code = (drv_info_ctl & DRV_INFO_CONTROL_OP_CODE_MASK) >>
+ DRV_INFO_CONTROL_OP_CODE_SHIFT;
+
+ /* Must prevent other flows from accessing drv_info_to_mcp */
+ mutex_lock(&bp->drv_info_mutex);
+
+ memset(&bp->slowpath->drv_info_to_mcp, 0,
+ sizeof(union drv_info_to_mcp));
+
+ switch (op_code) {
+ case ETH_STATS_OPCODE:
+ bnx2x_drv_info_ether_stat(bp);
+ break;
+ case FCOE_STATS_OPCODE:
+ bnx2x_drv_info_fcoe_stat(bp);
+ break;
+ case ISCSI_STATS_OPCODE:
+ bnx2x_drv_info_iscsi_stat(bp);
+ break;
+ default:
+ /* if op code isn't supported - send NACK */
+ bnx2x_fw_command(bp, DRV_MSG_CODE_DRV_INFO_NACK, 0);
+ goto out;
+ }
+
+ /* if we got drv_info attn from MFW then these fields are defined in
+ * shmem2 for sure
+ */
+ SHMEM2_WR(bp, drv_info_host_addr_lo,
+ U64_LO(bnx2x_sp_mapping(bp, drv_info_to_mcp)));
+ SHMEM2_WR(bp, drv_info_host_addr_hi,
+ U64_HI(bnx2x_sp_mapping(bp, drv_info_to_mcp)));
+
+ bnx2x_fw_command(bp, DRV_MSG_CODE_DRV_INFO_ACK, 0);
+
+ /* Since possible management wants both this and get_driver_version
+ * need to wait until management notifies us it finished utilizing
+ * the buffer.
+ */
+ if (!SHMEM2_HAS(bp, mfw_drv_indication)) {
+ DP(BNX2X_MSG_MCP, "Management does not support indication\n");
+ } else if (!bp->drv_info_mng_owner) {
+ u32 bit = MFW_DRV_IND_READ_DONE_OFFSET((BP_ABS_FUNC(bp) >> 1));
+
+ for (wait = 0; wait < BNX2X_UPDATE_DRV_INFO_IND_COUNT; wait++) {
+ u32 indication = SHMEM2_RD(bp, mfw_drv_indication);
+
+ /* Management is done; need to clear indication */
+ if (indication & bit) {
+ SHMEM2_WR(bp, mfw_drv_indication,
+ indication & ~bit);
+ release = true;
+ break;
+ }
+
+ msleep(BNX2X_UPDATE_DRV_INFO_IND_LENGTH);
+ }
+ }
+ if (!release) {
+ DP(BNX2X_MSG_MCP, "Management did not release indication\n");
+ bp->drv_info_mng_owner = true;
+ }
+
+out:
+ mutex_unlock(&bp->drv_info_mutex);
+}
+
+static u32 bnx2x_update_mng_version_utility(u8 *version, bool bnx2x_format)
+{
+ u8 vals[4];
+ int i = 0;
+
+ if (bnx2x_format) {
+ i = sscanf(version, "1.%c%hhd.%hhd.%hhd",
+ &vals[0], &vals[1], &vals[2], &vals[3]);
+ if (i > 0)
+ vals[0] -= '0';
+ } else {
+ i = sscanf(version, "%hhd.%hhd.%hhd.%hhd",
+ &vals[0], &vals[1], &vals[2], &vals[3]);
+ }
+
+ while (i < 4)
+ vals[i++] = 0;
+
+ return (vals[0] << 24) | (vals[1] << 16) | (vals[2] << 8) | vals[3];
+}
+
+void bnx2x_update_mng_version(struct bnx2x *bp)
+{
+ u32 iscsiver = DRV_VER_NOT_LOADED;
+ u32 fcoever = DRV_VER_NOT_LOADED;
+ u32 ethver = DRV_VER_NOT_LOADED;
+ int idx = BP_FW_MB_IDX(bp);
+ u8 *version;
+
+ if (!SHMEM2_HAS(bp, func_os_drv_ver))
+ return;
+
+ mutex_lock(&bp->drv_info_mutex);
+ /* Must not proceed when `bnx2x_handle_drv_info_req' is feasible */
+ if (bp->drv_info_mng_owner)
+ goto out;
+
+ if (bp->state != BNX2X_STATE_OPEN)
+ goto out;
+
+ /* Parse ethernet driver version */
+ ethver = bnx2x_update_mng_version_utility(DRV_MODULE_VERSION, true);
+ if (!CNIC_LOADED(bp))
+ goto out;
+
+ /* Try getting storage driver version via cnic */
+ memset(&bp->slowpath->drv_info_to_mcp, 0,
+ sizeof(union drv_info_to_mcp));
+ bnx2x_drv_info_iscsi_stat(bp);
+ version = bp->slowpath->drv_info_to_mcp.iscsi_stat.version;
+ iscsiver = bnx2x_update_mng_version_utility(version, false);
+
+ memset(&bp->slowpath->drv_info_to_mcp, 0,
+ sizeof(union drv_info_to_mcp));
+ bnx2x_drv_info_fcoe_stat(bp);
+ version = bp->slowpath->drv_info_to_mcp.fcoe_stat.version;
+ fcoever = bnx2x_update_mng_version_utility(version, false);
+
+out:
+ SHMEM2_WR(bp, func_os_drv_ver[idx].versions[DRV_PERS_ETHERNET], ethver);
+ SHMEM2_WR(bp, func_os_drv_ver[idx].versions[DRV_PERS_ISCSI], iscsiver);
+ SHMEM2_WR(bp, func_os_drv_ver[idx].versions[DRV_PERS_FCOE], fcoever);
+
+ mutex_unlock(&bp->drv_info_mutex);
+
+ DP(BNX2X_MSG_MCP, "Setting driver version: ETH [%08x] iSCSI [%08x] FCoE [%08x]\n",
+ ethver, iscsiver, fcoever);
+}
+
+void bnx2x_update_mfw_dump(struct bnx2x *bp)
+{
+ u32 drv_ver;
+ u32 valid_dump;
+
+ if (!SHMEM2_HAS(bp, drv_info))
+ return;
+
+ /* Update Driver load time, possibly broken in y2038 */
+ SHMEM2_WR(bp, drv_info.epoc, (u32)ktime_get_real_seconds());
+
+ drv_ver = bnx2x_update_mng_version_utility(DRV_MODULE_VERSION, true);
+ SHMEM2_WR(bp, drv_info.drv_ver, drv_ver);
+
+ SHMEM2_WR(bp, drv_info.fw_ver, REG_RD(bp, XSEM_REG_PRAM));
+
+ /* Check & notify On-Chip dump. */
+ valid_dump = SHMEM2_RD(bp, drv_info.valid_dump);
+
+ if (valid_dump & FIRST_DUMP_VALID)
+ DP(NETIF_MSG_IFUP, "A valid On-Chip MFW dump found on 1st partition\n");
+
+ if (valid_dump & SECOND_DUMP_VALID)
+ DP(NETIF_MSG_IFUP, "A valid On-Chip MFW dump found on 2nd partition\n");
+}
+
+static void bnx2x_oem_event(struct bnx2x *bp, u32 event)
+{
+ u32 cmd_ok, cmd_fail;
+
+ /* sanity */
+ if (event & DRV_STATUS_DCC_EVENT_MASK &&
+ event & DRV_STATUS_OEM_EVENT_MASK) {
+ BNX2X_ERR("Received simultaneous events %08x\n", event);
+ return;
+ }
+
+ if (event & DRV_STATUS_DCC_EVENT_MASK) {
+ cmd_fail = DRV_MSG_CODE_DCC_FAILURE;
+ cmd_ok = DRV_MSG_CODE_DCC_OK;
+ } else /* if (event & DRV_STATUS_OEM_EVENT_MASK) */ {
+ cmd_fail = DRV_MSG_CODE_OEM_FAILURE;
+ cmd_ok = DRV_MSG_CODE_OEM_OK;
+ }
+
+ DP(BNX2X_MSG_MCP, "oem_event 0x%x\n", event);
+
+ if (event & (DRV_STATUS_DCC_DISABLE_ENABLE_PF |
+ DRV_STATUS_OEM_DISABLE_ENABLE_PF)) {
+ /* This is the only place besides the function initialization
+ * where the bp->flags can change so it is done without any
+ * locks
+ */
+ if (bp->mf_config[BP_VN(bp)] & FUNC_MF_CFG_FUNC_DISABLED) {
+ DP(BNX2X_MSG_MCP, "mf_cfg function disabled\n");
+ bp->flags |= MF_FUNC_DIS;
+
+ bnx2x_e1h_disable(bp);
+ } else {
+ DP(BNX2X_MSG_MCP, "mf_cfg function enabled\n");
+ bp->flags &= ~MF_FUNC_DIS;
+
+ bnx2x_e1h_enable(bp);
+ }
+ event &= ~(DRV_STATUS_DCC_DISABLE_ENABLE_PF |
+ DRV_STATUS_OEM_DISABLE_ENABLE_PF);
+ }
+
+ if (event & (DRV_STATUS_DCC_BANDWIDTH_ALLOCATION |
+ DRV_STATUS_OEM_BANDWIDTH_ALLOCATION)) {
+ bnx2x_config_mf_bw(bp);
+ event &= ~(DRV_STATUS_DCC_BANDWIDTH_ALLOCATION |
+ DRV_STATUS_OEM_BANDWIDTH_ALLOCATION);
+ }
+
+ /* Report results to MCP */
+ if (event)
+ bnx2x_fw_command(bp, cmd_fail, 0);
+ else
+ bnx2x_fw_command(bp, cmd_ok, 0);
+}
+
+/* must be called under the spq lock */
+static struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp)
+{
+ struct eth_spe *next_spe = bp->spq_prod_bd;
+
+ if (bp->spq_prod_bd == bp->spq_last_bd) {
+ bp->spq_prod_bd = bp->spq;
+ bp->spq_prod_idx = 0;
+ DP(BNX2X_MSG_SP, "end of spq\n");
+ } else {
+ bp->spq_prod_bd++;
+ bp->spq_prod_idx++;
+ }
+ return next_spe;
+}
+
+/* must be called under the spq lock */
+static void bnx2x_sp_prod_update(struct bnx2x *bp)
+{
+ int func = BP_FUNC(bp);
+
+ /*
+ * Make sure that BD data is updated before writing the producer:
+ * BD data is written to the memory, the producer is read from the
+ * memory, thus we need a full memory barrier to ensure the ordering.
+ */
+ mb();
+
+ REG_WR16_RELAXED(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
+ bp->spq_prod_idx);
+}
+
+/**
+ * bnx2x_is_contextless_ramrod - check if the current command ends on EQ
+ *
+ * @cmd: command to check
+ * @cmd_type: command type
+ */
+static bool bnx2x_is_contextless_ramrod(int cmd, int cmd_type)
+{
+ if ((cmd_type == NONE_CONNECTION_TYPE) ||
+ (cmd == RAMROD_CMD_ID_ETH_FORWARD_SETUP) ||
+ (cmd == RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES) ||
+ (cmd == RAMROD_CMD_ID_ETH_FILTER_RULES) ||
+ (cmd == RAMROD_CMD_ID_ETH_MULTICAST_RULES) ||
+ (cmd == RAMROD_CMD_ID_ETH_SET_MAC) ||
+ (cmd == RAMROD_CMD_ID_ETH_RSS_UPDATE))
+ return true;
+ else
+ return false;
+}
+
+/**
+ * bnx2x_sp_post - place a single command on an SP ring
+ *
+ * @bp: driver handle
+ * @command: command to place (e.g. SETUP, FILTER_RULES, etc.)
+ * @cid: SW CID the command is related to
+ * @data_hi: command private data address (high 32 bits)
+ * @data_lo: command private data address (low 32 bits)
+ * @cmd_type: command type (e.g. NONE, ETH)
+ *
+ * SP data is handled as if it's always an address pair, thus data fields are
+ * not swapped to little endian in upper functions. Instead this function swaps
+ * data as if it's two u32 fields.
+ */
+int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
+ u32 data_hi, u32 data_lo, int cmd_type)
+{
+ struct eth_spe *spe;
+ u16 type;
+ bool common = bnx2x_is_contextless_ramrod(command, cmd_type);
+
+#ifdef BNX2X_STOP_ON_ERROR
+ if (unlikely(bp->panic)) {
+ BNX2X_ERR("Can't post SP when there is panic\n");
+ return -EIO;
+ }
+#endif
+
+ spin_lock_bh(&bp->spq_lock);
+
+ if (common) {
+ if (!atomic_read(&bp->eq_spq_left)) {
+ BNX2X_ERR("BUG! EQ ring full!\n");
+ spin_unlock_bh(&bp->spq_lock);
+ bnx2x_panic();
+ return -EBUSY;
+ }
+ } else if (!atomic_read(&bp->cq_spq_left)) {
+ BNX2X_ERR("BUG! SPQ ring full!\n");
+ spin_unlock_bh(&bp->spq_lock);
+ bnx2x_panic();
+ return -EBUSY;
+ }
+
+ spe = bnx2x_sp_get_next(bp);
+
+ /* CID needs port number to be encoded int it */
+ spe->hdr.conn_and_cmd_data =
+ cpu_to_le32((command << SPE_HDR_CMD_ID_SHIFT) |
+ HW_CID(bp, cid));
+
+ /* In some cases, type may already contain the func-id
+ * mainly in SRIOV related use cases, so we add it here only
+ * if it's not already set.
+ */
+ if (!(cmd_type & SPE_HDR_FUNCTION_ID)) {
+ type = (cmd_type << SPE_HDR_CONN_TYPE_SHIFT) &
+ SPE_HDR_CONN_TYPE;
+ type |= ((BP_FUNC(bp) << SPE_HDR_FUNCTION_ID_SHIFT) &
+ SPE_HDR_FUNCTION_ID);
+ } else {
+ type = cmd_type;
+ }
+
+ spe->hdr.type = cpu_to_le16(type);
+
+ spe->data.update_data_addr.hi = cpu_to_le32(data_hi);
+ spe->data.update_data_addr.lo = cpu_to_le32(data_lo);
+
+ /*
+ * It's ok if the actual decrement is issued towards the memory
+ * somewhere between the spin_lock and spin_unlock. Thus no
+ * more explicit memory barrier is needed.
+ */
+ if (common)
+ atomic_dec(&bp->eq_spq_left);
+ else
+ atomic_dec(&bp->cq_spq_left);
+
+ DP(BNX2X_MSG_SP,
+ "SPQE[%x] (%x:%x) (cmd, common?) (%d,%d) hw_cid %x data (%x:%x) type(0x%x) left (CQ, EQ) (%x,%x)\n",
+ bp->spq_prod_idx, (u32)U64_HI(bp->spq_mapping),
+ (u32)(U64_LO(bp->spq_mapping) +
+ (void *)bp->spq_prod_bd - (void *)bp->spq), command, common,
+ HW_CID(bp, cid), data_hi, data_lo, type,
+ atomic_read(&bp->cq_spq_left), atomic_read(&bp->eq_spq_left));
+
+ bnx2x_sp_prod_update(bp);
+ spin_unlock_bh(&bp->spq_lock);
+ return 0;
+}
+
+/* acquire split MCP access lock register */
+static int bnx2x_acquire_alr(struct bnx2x *bp)
+{
+ u32 j, val;
+ int rc = 0;
+
+ might_sleep();
+ for (j = 0; j < 1000; j++) {
+ REG_WR(bp, MCP_REG_MCPR_ACCESS_LOCK, MCPR_ACCESS_LOCK_LOCK);
+ val = REG_RD(bp, MCP_REG_MCPR_ACCESS_LOCK);
+ if (val & MCPR_ACCESS_LOCK_LOCK)
+ break;
+
+ usleep_range(5000, 10000);
+ }
+ if (!(val & MCPR_ACCESS_LOCK_LOCK)) {
+ BNX2X_ERR("Cannot acquire MCP access lock register\n");
+ rc = -EBUSY;
+ }
+
+ return rc;
+}
+
+/* release split MCP access lock register */
+static void bnx2x_release_alr(struct bnx2x *bp)
+{
+ REG_WR(bp, MCP_REG_MCPR_ACCESS_LOCK, 0);
+}
+
+#define BNX2X_DEF_SB_ATT_IDX 0x0001
+#define BNX2X_DEF_SB_IDX 0x0002
+
+static u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
+{
+ struct host_sp_status_block *def_sb = bp->def_status_blk;
+ u16 rc = 0;
+
+ barrier(); /* status block is written to by the chip */
+ if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
+ bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
+ rc |= BNX2X_DEF_SB_ATT_IDX;
+ }
+
+ if (bp->def_idx != def_sb->sp_sb.running_index) {
+ bp->def_idx = def_sb->sp_sb.running_index;
+ rc |= BNX2X_DEF_SB_IDX;
+ }
+
+ /* Do not reorder: indices reading should complete before handling */
+ barrier();
+ return rc;
+}
+
+/*
+ * slow path service functions
+ */
+
+static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
+{
+ int port = BP_PORT(bp);
+ u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
+ MISC_REG_AEU_MASK_ATTN_FUNC_0;
+ u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
+ NIG_REG_MASK_INTERRUPT_PORT0;
+ u32 aeu_mask;
+ u32 nig_mask = 0;
+ u32 reg_addr;
+
+ if (bp->attn_state & asserted)
+ BNX2X_ERR("IGU ERROR\n");
+
+ bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
+ aeu_mask = REG_RD(bp, aeu_addr);
+
+ DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
+ aeu_mask, asserted);
+ aeu_mask &= ~(asserted & 0x3ff);
+ DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
+
+ REG_WR(bp, aeu_addr, aeu_mask);
+ bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
+
+ DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
+ bp->attn_state |= asserted;
+ DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
+
+ if (asserted & ATTN_HARD_WIRED_MASK) {
+ if (asserted & ATTN_NIG_FOR_FUNC) {
+
+ bnx2x_acquire_phy_lock(bp);
+
+ /* save nig interrupt mask */
+ nig_mask = REG_RD(bp, nig_int_mask_addr);
+
+ /* If nig_mask is not set, no need to call the update
+ * function.
+ */
+ if (nig_mask) {
+ REG_WR(bp, nig_int_mask_addr, 0);
+
+ bnx2x_link_attn(bp);
+ }
+
+ /* handle unicore attn? */
+ }
+ if (asserted & ATTN_SW_TIMER_4_FUNC)
+ DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
+
+ if (asserted & GPIO_2_FUNC)
+ DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
+
+ if (asserted & GPIO_3_FUNC)
+ DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
+
+ if (asserted & GPIO_4_FUNC)
+ DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
+
+ if (port == 0) {
+ if (asserted & ATTN_GENERAL_ATTN_1) {
+ DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
+ REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
+ }
+ if (asserted & ATTN_GENERAL_ATTN_2) {
+ DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
+ REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
+ }
+ if (asserted & ATTN_GENERAL_ATTN_3) {
+ DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
+ REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
+ }
+ } else {
+ if (asserted & ATTN_GENERAL_ATTN_4) {
+ DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
+ REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
+ }
+ if (asserted & ATTN_GENERAL_ATTN_5) {
+ DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
+ REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
+ }
+ if (asserted & ATTN_GENERAL_ATTN_6) {
+ DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
+ REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
+ }
+ }
+
+ } /* if hardwired */
+
+ if (bp->common.int_block == INT_BLOCK_HC)
+ reg_addr = (HC_REG_COMMAND_REG + port*32 +
+ COMMAND_REG_ATTN_BITS_SET);
+ else
+ reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_SET_UPPER*8);
+
+ DP(NETIF_MSG_HW, "about to mask 0x%08x at %s addr 0x%x\n", asserted,
+ (bp->common.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr);
+ REG_WR(bp, reg_addr, asserted);
+
+ /* now set back the mask */
+ if (asserted & ATTN_NIG_FOR_FUNC) {
+ /* Verify that IGU ack through BAR was written before restoring
+ * NIG mask. This loop should exit after 2-3 iterations max.
+ */
+ if (bp->common.int_block != INT_BLOCK_HC) {
+ u32 cnt = 0, igu_acked;
+ do {
+ igu_acked = REG_RD(bp,
+ IGU_REG_ATTENTION_ACK_BITS);
+ } while (((igu_acked & ATTN_NIG_FOR_FUNC) == 0) &&
+ (++cnt < MAX_IGU_ATTN_ACK_TO));
+ if (!igu_acked)
+ DP(NETIF_MSG_HW,
+ "Failed to verify IGU ack on time\n");
+ barrier();
+ }
+ REG_WR(bp, nig_int_mask_addr, nig_mask);
+ bnx2x_release_phy_lock(bp);
+ }
+}
+
+static void bnx2x_fan_failure(struct bnx2x *bp)
+{
+ int port = BP_PORT(bp);
+ u32 ext_phy_config;
+ /* mark the failure */
+ ext_phy_config =
+ SHMEM_RD(bp,
+ dev_info.port_hw_config[port].external_phy_config);
+
+ ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
+ ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
+ SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
+ ext_phy_config);
+
+ /* log the failure */
+ netdev_err(bp->dev, "Fan Failure on Network Controller has caused the driver to shutdown the card to prevent permanent damage.\n"
+ "Please contact OEM Support for assistance\n");
+
+ /* Schedule device reset (unload)
+ * This is due to some boards consuming sufficient power when driver is
+ * up to overheat if fan fails.
+ */
+ bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_FAN_FAILURE, 0);
+}
+
+static void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
+{
+ int port = BP_PORT(bp);
+ int reg_offset;
+ u32 val;
+
+ reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
+ MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
+
+ if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
+
+ val = REG_RD(bp, reg_offset);
+ val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
+ REG_WR(bp, reg_offset, val);
+
+ BNX2X_ERR("SPIO5 hw attention\n");
+
+ /* Fan failure attention */
+ bnx2x_hw_reset_phy(&bp->link_params);
+ bnx2x_fan_failure(bp);
+ }
+
+ if ((attn & bp->link_vars.aeu_int_mask) && bp->port.pmf) {
+ bnx2x_acquire_phy_lock(bp);
+ bnx2x_handle_module_detect_int(&bp->link_params);
+ bnx2x_release_phy_lock(bp);
+ }
+
+ if (attn & HW_INTERRUPT_ASSERT_SET_0) {
+
+ val = REG_RD(bp, reg_offset);
+ val &= ~(attn & HW_INTERRUPT_ASSERT_SET_0);
+ REG_WR(bp, reg_offset, val);
+
+ BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
+ (u32)(attn & HW_INTERRUPT_ASSERT_SET_0));
+ bnx2x_panic();
+ }
+}
+
+static void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
+{
+ u32 val;
+
+ if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
+
+ val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
+ BNX2X_ERR("DB hw attention 0x%x\n", val);
+ /* DORQ discard attention */
+ if (val & 0x2)
+ BNX2X_ERR("FATAL error from DORQ\n");
+ }
+
+ if (attn & HW_INTERRUPT_ASSERT_SET_1) {
+
+ int port = BP_PORT(bp);
+ int reg_offset;
+
+ reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
+ MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
+
+ val = REG_RD(bp, reg_offset);
+ val &= ~(attn & HW_INTERRUPT_ASSERT_SET_1);
+ REG_WR(bp, reg_offset, val);
+
+ BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
+ (u32)(attn & HW_INTERRUPT_ASSERT_SET_1));
+ bnx2x_panic();
+ }
+}
+
+static void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
+{
+ u32 val;
+
+ if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
+
+ val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
+ BNX2X_ERR("CFC hw attention 0x%x\n", val);
+ /* CFC error attention */
+ if (val & 0x2)
+ BNX2X_ERR("FATAL error from CFC\n");
+ }
+
+ if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
+ val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
+ BNX2X_ERR("PXP hw attention-0 0x%x\n", val);
+ /* RQ_USDMDP_FIFO_OVERFLOW */
+ if (val & 0x18000)
+ BNX2X_ERR("FATAL error from PXP\n");
+
+ if (!CHIP_IS_E1x(bp)) {
+ val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_1);
+ BNX2X_ERR("PXP hw attention-1 0x%x\n", val);
+ }
+ }
+
+ if (attn & HW_INTERRUPT_ASSERT_SET_2) {
+
+ int port = BP_PORT(bp);
+ int reg_offset;
+
+ reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
+ MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
+
+ val = REG_RD(bp, reg_offset);
+ val &= ~(attn & HW_INTERRUPT_ASSERT_SET_2);
+ REG_WR(bp, reg_offset, val);
+
+ BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
+ (u32)(attn & HW_INTERRUPT_ASSERT_SET_2));
+ bnx2x_panic();
+ }
+}
+
+static void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
+{
+ u32 val;
+
+ if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
+
+ if (attn & BNX2X_PMF_LINK_ASSERT) {
+ int func = BP_FUNC(bp);
+
+ REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
+ bnx2x_read_mf_cfg(bp);
+ bp->mf_config[BP_VN(bp)] = MF_CFG_RD(bp,
+ func_mf_config[BP_ABS_FUNC(bp)].config);
+ val = SHMEM_RD(bp,
+ func_mb[BP_FW_MB_IDX(bp)].drv_status);
+
+ if (val & (DRV_STATUS_DCC_EVENT_MASK |
+ DRV_STATUS_OEM_EVENT_MASK))
+ bnx2x_oem_event(bp,
+ (val & (DRV_STATUS_DCC_EVENT_MASK |
+ DRV_STATUS_OEM_EVENT_MASK)));
+
+ if (val & DRV_STATUS_SET_MF_BW)
+ bnx2x_set_mf_bw(bp);
+
+ if (val & DRV_STATUS_DRV_INFO_REQ)
+ bnx2x_handle_drv_info_req(bp);
+
+ if (val & DRV_STATUS_VF_DISABLED)
+ bnx2x_schedule_iov_task(bp,
+ BNX2X_IOV_HANDLE_FLR);
+
+ if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
+ bnx2x_pmf_update(bp);
+
+ if (bp->port.pmf &&
+ (val & DRV_STATUS_DCBX_NEGOTIATION_RESULTS) &&
+ bp->dcbx_enabled > 0)
+ /* start dcbx state machine */
+ bnx2x_dcbx_set_params(bp,
+ BNX2X_DCBX_STATE_NEG_RECEIVED);
+ if (val & DRV_STATUS_AFEX_EVENT_MASK)
+ bnx2x_handle_afex_cmd(bp,
+ val & DRV_STATUS_AFEX_EVENT_MASK);
+ if (val & DRV_STATUS_EEE_NEGOTIATION_RESULTS)
+ bnx2x_handle_eee_event(bp);
+
+ if (val & DRV_STATUS_OEM_UPDATE_SVID)
+ bnx2x_schedule_sp_rtnl(bp,
+ BNX2X_SP_RTNL_UPDATE_SVID, 0);
+
+ if (bp->link_vars.periodic_flags &
+ PERIODIC_FLAGS_LINK_EVENT) {
+ /* sync with link */
+ bnx2x_acquire_phy_lock(bp);
+ bp->link_vars.periodic_flags &=
+ ~PERIODIC_FLAGS_LINK_EVENT;
+ bnx2x_release_phy_lock(bp);
+ if (IS_MF(bp))
+ bnx2x_link_sync_notify(bp);
+ bnx2x_link_report(bp);
+ }
+ /* Always call it here: bnx2x_link_report() will
+ * prevent the link indication duplication.
+ */
+ bnx2x__link_status_update(bp);
+ } else if (attn & BNX2X_MC_ASSERT_BITS) {
+
+ BNX2X_ERR("MC assert!\n");
+ bnx2x_mc_assert(bp);
+ REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
+ REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
+ REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
+ REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
+ bnx2x_panic();
+
+ } else if (attn & BNX2X_MCP_ASSERT) {
+
+ BNX2X_ERR("MCP assert!\n");
+ REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
+ bnx2x_fw_dump(bp);
+
+ } else
+ BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
+ }
+
+ if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
+ BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
+ if (attn & BNX2X_GRC_TIMEOUT) {
+ val = CHIP_IS_E1(bp) ? 0 :
+ REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN);
+ BNX2X_ERR("GRC time-out 0x%08x\n", val);
+ }
+ if (attn & BNX2X_GRC_RSV) {
+ val = CHIP_IS_E1(bp) ? 0 :
+ REG_RD(bp, MISC_REG_GRC_RSV_ATTN);
+ BNX2X_ERR("GRC reserved 0x%08x\n", val);
+ }
+ REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
+ }
+}
+
+/*
+ * Bits map:
+ * 0-7 - Engine0 load counter.
+ * 8-15 - Engine1 load counter.
+ * 16 - Engine0 RESET_IN_PROGRESS bit.
+ * 17 - Engine1 RESET_IN_PROGRESS bit.
+ * 18 - Engine0 ONE_IS_LOADED. Set when there is at least one active function
+ * on the engine
+ * 19 - Engine1 ONE_IS_LOADED.
+ * 20 - Chip reset flow bit. When set none-leader must wait for both engines
+ * leader to complete (check for both RESET_IN_PROGRESS bits and not for
+ * just the one belonging to its engine).
+ *
+ */
+#define BNX2X_RECOVERY_GLOB_REG MISC_REG_GENERIC_POR_1
+
+#define BNX2X_PATH0_LOAD_CNT_MASK 0x000000ff
+#define BNX2X_PATH0_LOAD_CNT_SHIFT 0
+#define BNX2X_PATH1_LOAD_CNT_MASK 0x0000ff00
+#define BNX2X_PATH1_LOAD_CNT_SHIFT 8
+#define BNX2X_PATH0_RST_IN_PROG_BIT 0x00010000
+#define BNX2X_PATH1_RST_IN_PROG_BIT 0x00020000
+#define BNX2X_GLOBAL_RESET_BIT 0x00040000
+
+/*
+ * Set the GLOBAL_RESET bit.
+ *
+ * Should be run under rtnl lock
+ */
+void bnx2x_set_reset_global(struct bnx2x *bp)
+{
+ u32 val;
+ bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
+ val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
+ REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val | BNX2X_GLOBAL_RESET_BIT);
+ bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
+}
+
+/*
+ * Clear the GLOBAL_RESET bit.
+ *
+ * Should be run under rtnl lock
+ */
+static void bnx2x_clear_reset_global(struct bnx2x *bp)
+{
+ u32 val;
+ bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
+ val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
+ REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val & (~BNX2X_GLOBAL_RESET_BIT));
+ bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
+}
+
+/*
+ * Checks the GLOBAL_RESET bit.
+ *
+ * should be run under rtnl lock
+ */
+static bool bnx2x_reset_is_global(struct bnx2x *bp)
+{
+ u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
+
+ DP(NETIF_MSG_HW, "GEN_REG_VAL=0x%08x\n", val);
+ return (val & BNX2X_GLOBAL_RESET_BIT) ? true : false;
+}
+
+/*
+ * Clear RESET_IN_PROGRESS bit for the current engine.
+ *
+ * Should be run under rtnl lock
+ */
+static void bnx2x_set_reset_done(struct bnx2x *bp)
+{
+ u32 val;
+ u32 bit = BP_PATH(bp) ?
+ BNX2X_PATH1_RST_IN_PROG_BIT : BNX2X_PATH0_RST_IN_PROG_BIT;
+ bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
+ val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
+
+ /* Clear the bit */
+ val &= ~bit;
+ REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val);
+
+ bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
+}
+
+/*
+ * Set RESET_IN_PROGRESS for the current engine.
+ *
+ * should be run under rtnl lock
+ */
+void bnx2x_set_reset_in_progress(struct bnx2x *bp)
+{
+ u32 val;
+ u32 bit = BP_PATH(bp) ?
+ BNX2X_PATH1_RST_IN_PROG_BIT : BNX2X_PATH0_RST_IN_PROG_BIT;
+ bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
+ val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
+
+ /* Set the bit */
+ val |= bit;
+ REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val);
+ bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
+}
+
+/*
+ * Checks the RESET_IN_PROGRESS bit for the given engine.
+ * should be run under rtnl lock
+ */
+bool bnx2x_reset_is_done(struct bnx2x *bp, int engine)
+{
+ u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
+ u32 bit = engine ?
+ BNX2X_PATH1_RST_IN_PROG_BIT : BNX2X_PATH0_RST_IN_PROG_BIT;
+
+ /* return false if bit is set */
+ return (val & bit) ? false : true;
+}
+
+/*
+ * set pf load for the current pf.
+ *
+ * should be run under rtnl lock
+ */
+void bnx2x_set_pf_load(struct bnx2x *bp)
+{
+ u32 val1, val;
+ u32 mask = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_MASK :
+ BNX2X_PATH0_LOAD_CNT_MASK;
+ u32 shift = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_SHIFT :
+ BNX2X_PATH0_LOAD_CNT_SHIFT;
+
+ bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
+ val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
+
+ DP(NETIF_MSG_IFUP, "Old GEN_REG_VAL=0x%08x\n", val);
+
+ /* get the current counter value */
+ val1 = (val & mask) >> shift;
+
+ /* set bit of that PF */
+ val1 |= (1 << bp->pf_num);
+
+ /* clear the old value */
+ val &= ~mask;
+
+ /* set the new one */
+ val |= ((val1 << shift) & mask);
+
+ REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val);
+ bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
+}
+
+/**
+ * bnx2x_clear_pf_load - clear pf load mark
+ *
+ * @bp: driver handle
+ *
+ * Should be run under rtnl lock.
+ * Decrements the load counter for the current engine. Returns
+ * whether other functions are still loaded
+ */
+bool bnx2x_clear_pf_load(struct bnx2x *bp)
+{
+ u32 val1, val;
+ u32 mask = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_MASK :
+ BNX2X_PATH0_LOAD_CNT_MASK;
+ u32 shift = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_SHIFT :
+ BNX2X_PATH0_LOAD_CNT_SHIFT;
+
+ bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
+ val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
+ DP(NETIF_MSG_IFDOWN, "Old GEN_REG_VAL=0x%08x\n", val);
+
+ /* get the current counter value */
+ val1 = (val & mask) >> shift;
+
+ /* clear bit of that PF */
+ val1 &= ~(1 << bp->pf_num);
+
+ /* clear the old value */
+ val &= ~mask;
+
+ /* set the new one */
+ val |= ((val1 << shift) & mask);
+
+ REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val);
+ bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
+ return val1 != 0;
+}
+
+/*
+ * Read the load status for the current engine.
+ *
+ * should be run under rtnl lock
+ */
+static bool bnx2x_get_load_status(struct bnx2x *bp, int engine)
+{
+ u32 mask = (engine ? BNX2X_PATH1_LOAD_CNT_MASK :
+ BNX2X_PATH0_LOAD_CNT_MASK);
+ u32 shift = (engine ? BNX2X_PATH1_LOAD_CNT_SHIFT :
+ BNX2X_PATH0_LOAD_CNT_SHIFT);
+ u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
+
+ DP(NETIF_MSG_HW | NETIF_MSG_IFUP, "GLOB_REG=0x%08x\n", val);
+
+ val = (val & mask) >> shift;
+
+ DP(NETIF_MSG_HW | NETIF_MSG_IFUP, "load mask for engine %d = 0x%x\n",
+ engine, val);
+
+ return val != 0;
+}
+
+static void _print_parity(struct bnx2x *bp, u32 reg)
+{
+ pr_cont(" [0x%08x] ", REG_RD(bp, reg));
+}
+
+static void _print_next_block(int idx, const char *blk)
+{
+ pr_cont("%s%s", idx ? ", " : "", blk);
+}
+
+static bool bnx2x_check_blocks_with_parity0(struct bnx2x *bp, u32 sig,
+ int *par_num, bool print)
+{
+ u32 cur_bit;
+ bool res;
+ int i;
+
+ res = false;
+
+ for (i = 0; sig; i++) {
+ cur_bit = (0x1UL << i);
+ if (sig & cur_bit) {
+ res |= true; /* Each bit is real error! */
+
+ if (print) {
+ switch (cur_bit) {
+ case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR:
+ _print_next_block((*par_num)++, "BRB");
+ _print_parity(bp,
+ BRB1_REG_BRB1_PRTY_STS);
+ break;
+ case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR:
+ _print_next_block((*par_num)++,
+ "PARSER");
+ _print_parity(bp, PRS_REG_PRS_PRTY_STS);
+ break;
+ case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR:
+ _print_next_block((*par_num)++, "TSDM");
+ _print_parity(bp,
+ TSDM_REG_TSDM_PRTY_STS);
+ break;
+ case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR:
+ _print_next_block((*par_num)++,
+ "SEARCHER");
+ _print_parity(bp, SRC_REG_SRC_PRTY_STS);
+ break;
+ case AEU_INPUTS_ATTN_BITS_TCM_PARITY_ERROR:
+ _print_next_block((*par_num)++, "TCM");
+ _print_parity(bp, TCM_REG_TCM_PRTY_STS);
+ break;
+ case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR:
+ _print_next_block((*par_num)++,
+ "TSEMI");
+ _print_parity(bp,
+ TSEM_REG_TSEM_PRTY_STS_0);
+ _print_parity(bp,
+ TSEM_REG_TSEM_PRTY_STS_1);
+ break;
+ case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR:
+ _print_next_block((*par_num)++, "XPB");
+ _print_parity(bp, GRCBASE_XPB +
+ PB_REG_PB_PRTY_STS);
+ break;
+ }
+ }
+
+ /* Clear the bit */
+ sig &= ~cur_bit;
+ }
+ }
+
+ return res;
+}
+
+static bool bnx2x_check_blocks_with_parity1(struct bnx2x *bp, u32 sig,
+ int *par_num, bool *global,
+ bool print)
+{
+ u32 cur_bit;
+ bool res;
+ int i;
+
+ res = false;
+
+ for (i = 0; sig; i++) {
+ cur_bit = (0x1UL << i);
+ if (sig & cur_bit) {
+ res |= true; /* Each bit is real error! */
+ switch (cur_bit) {
+ case AEU_INPUTS_ATTN_BITS_PBF_PARITY_ERROR:
+ if (print) {
+ _print_next_block((*par_num)++, "PBF");
+ _print_parity(bp, PBF_REG_PBF_PRTY_STS);
+ }
+ break;
+ case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR:
+ if (print) {
+ _print_next_block((*par_num)++, "QM");
+ _print_parity(bp, QM_REG_QM_PRTY_STS);
+ }
+ break;
+ case AEU_INPUTS_ATTN_BITS_TIMERS_PARITY_ERROR:
+ if (print) {
+ _print_next_block((*par_num)++, "TM");
+ _print_parity(bp, TM_REG_TM_PRTY_STS);
+ }
+ break;
+ case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR:
+ if (print) {
+ _print_next_block((*par_num)++, "XSDM");
+ _print_parity(bp,
+ XSDM_REG_XSDM_PRTY_STS);
+ }
+ break;
+ case AEU_INPUTS_ATTN_BITS_XCM_PARITY_ERROR:
+ if (print) {
+ _print_next_block((*par_num)++, "XCM");
+ _print_parity(bp, XCM_REG_XCM_PRTY_STS);
+ }
+ break;
+ case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR:
+ if (print) {
+ _print_next_block((*par_num)++,
+ "XSEMI");
+ _print_parity(bp,
+ XSEM_REG_XSEM_PRTY_STS_0);
+ _print_parity(bp,
+ XSEM_REG_XSEM_PRTY_STS_1);
+ }
+ break;
+ case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR:
+ if (print) {
+ _print_next_block((*par_num)++,
+ "DOORBELLQ");
+ _print_parity(bp,
+ DORQ_REG_DORQ_PRTY_STS);
+ }
+ break;
+ case AEU_INPUTS_ATTN_BITS_NIG_PARITY_ERROR:
+ if (print) {
+ _print_next_block((*par_num)++, "NIG");
+ if (CHIP_IS_E1x(bp)) {
+ _print_parity(bp,
+ NIG_REG_NIG_PRTY_STS);
+ } else {
+ _print_parity(bp,
+ NIG_REG_NIG_PRTY_STS_0);
+ _print_parity(bp,
+ NIG_REG_NIG_PRTY_STS_1);
+ }
+ }
+ break;
+ case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR:
+ if (print)
+ _print_next_block((*par_num)++,
+ "VAUX PCI CORE");
+ *global = true;
+ break;
+ case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR:
+ if (print) {
+ _print_next_block((*par_num)++,
+ "DEBUG");
+ _print_parity(bp, DBG_REG_DBG_PRTY_STS);
+ }
+ break;
+ case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR:
+ if (print) {
+ _print_next_block((*par_num)++, "USDM");
+ _print_parity(bp,
+ USDM_REG_USDM_PRTY_STS);
+ }
+ break;
+ case AEU_INPUTS_ATTN_BITS_UCM_PARITY_ERROR:
+ if (print) {
+ _print_next_block((*par_num)++, "UCM");
+ _print_parity(bp, UCM_REG_UCM_PRTY_STS);
+ }
+ break;
+ case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR:
+ if (print) {
+ _print_next_block((*par_num)++,
+ "USEMI");
+ _print_parity(bp,
+ USEM_REG_USEM_PRTY_STS_0);
+ _print_parity(bp,
+ USEM_REG_USEM_PRTY_STS_1);
+ }
+ break;
+ case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR:
+ if (print) {
+ _print_next_block((*par_num)++, "UPB");
+ _print_parity(bp, GRCBASE_UPB +
+ PB_REG_PB_PRTY_STS);
+ }
+ break;
+ case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR:
+ if (print) {
+ _print_next_block((*par_num)++, "CSDM");
+ _print_parity(bp,
+ CSDM_REG_CSDM_PRTY_STS);
+ }
+ break;
+ case AEU_INPUTS_ATTN_BITS_CCM_PARITY_ERROR:
+ if (print) {
+ _print_next_block((*par_num)++, "CCM");
+ _print_parity(bp, CCM_REG_CCM_PRTY_STS);
+ }
+ break;
+ }
+
+ /* Clear the bit */
+ sig &= ~cur_bit;
+ }
+ }
+
+ return res;
+}
+
+static bool bnx2x_check_blocks_with_parity2(struct bnx2x *bp, u32 sig,
+ int *par_num, bool print)
+{
+ u32 cur_bit;
+ bool res;
+ int i;
+
+ res = false;
+
+ for (i = 0; sig; i++) {
+ cur_bit = (0x1UL << i);
+ if (sig & cur_bit) {
+ res = true; /* Each bit is real error! */
+ if (print) {
+ switch (cur_bit) {
+ case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR:
+ _print_next_block((*par_num)++,
+ "CSEMI");
+ _print_parity(bp,
+ CSEM_REG_CSEM_PRTY_STS_0);
+ _print_parity(bp,
+ CSEM_REG_CSEM_PRTY_STS_1);
+ break;
+ case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR:
+ _print_next_block((*par_num)++, "PXP");
+ _print_parity(bp, PXP_REG_PXP_PRTY_STS);
+ _print_parity(bp,
+ PXP2_REG_PXP2_PRTY_STS_0);
+ _print_parity(bp,
+ PXP2_REG_PXP2_PRTY_STS_1);
+ break;
+ case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR:
+ _print_next_block((*par_num)++,
+ "PXPPCICLOCKCLIENT");
+ break;
+ case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR:
+ _print_next_block((*par_num)++, "CFC");
+ _print_parity(bp,
+ CFC_REG_CFC_PRTY_STS);
+ break;
+ case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR:
+ _print_next_block((*par_num)++, "CDU");
+ _print_parity(bp, CDU_REG_CDU_PRTY_STS);
+ break;
+ case AEU_INPUTS_ATTN_BITS_DMAE_PARITY_ERROR:
+ _print_next_block((*par_num)++, "DMAE");
+ _print_parity(bp,
+ DMAE_REG_DMAE_PRTY_STS);
+ break;
+ case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR:
+ _print_next_block((*par_num)++, "IGU");
+ if (CHIP_IS_E1x(bp))
+ _print_parity(bp,
+ HC_REG_HC_PRTY_STS);
+ else
+ _print_parity(bp,
+ IGU_REG_IGU_PRTY_STS);
+ break;
+ case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR:
+ _print_next_block((*par_num)++, "MISC");
+ _print_parity(bp,
+ MISC_REG_MISC_PRTY_STS);
+ break;
+ }
+ }
+
+ /* Clear the bit */
+ sig &= ~cur_bit;
+ }
+ }
+
+ return res;
+}
+
+static bool bnx2x_check_blocks_with_parity3(struct bnx2x *bp, u32 sig,
+ int *par_num, bool *global,
+ bool print)
+{
+ bool res = false;
+ u32 cur_bit;
+ int i;
+
+ for (i = 0; sig; i++) {
+ cur_bit = (0x1UL << i);
+ if (sig & cur_bit) {
+ switch (cur_bit) {
+ case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY:
+ if (print)
+ _print_next_block((*par_num)++,
+ "MCP ROM");
+ *global = true;
+ res = true;
+ break;
+ case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY:
+ if (print)
+ _print_next_block((*par_num)++,
+ "MCP UMP RX");
+ *global = true;
+ res = true;
+ break;
+ case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY:
+ if (print)
+ _print_next_block((*par_num)++,
+ "MCP UMP TX");
+ *global = true;
+ res = true;
+ break;
+ case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY:
+ (*par_num)++;
+ /* clear latched SCPAD PATIRY from MCP */
+ REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL,
+ 1UL << 10);
+ break;
+ }
+
+ /* Clear the bit */
+ sig &= ~cur_bit;
+ }
+ }
+
+ return res;
+}
+
+static bool bnx2x_check_blocks_with_parity4(struct bnx2x *bp, u32 sig,
+ int *par_num, bool print)
+{
+ u32 cur_bit;
+ bool res;
+ int i;
+
+ res = false;
+
+ for (i = 0; sig; i++) {
+ cur_bit = (0x1UL << i);
+ if (sig & cur_bit) {
+ res = true; /* Each bit is real error! */
+ if (print) {
+ switch (cur_bit) {
+ case AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR:
+ _print_next_block((*par_num)++,
+ "PGLUE_B");
+ _print_parity(bp,
+ PGLUE_B_REG_PGLUE_B_PRTY_STS);
+ break;
+ case AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR:
+ _print_next_block((*par_num)++, "ATC");
+ _print_parity(bp,
+ ATC_REG_ATC_PRTY_STS);
+ break;
+ }
+ }
+ /* Clear the bit */
+ sig &= ~cur_bit;
+ }
+ }
+
+ return res;
+}
+
+static bool bnx2x_parity_attn(struct bnx2x *bp, bool *global, bool print,
+ u32 *sig)
+{
+ bool res = false;
+
+ if ((sig[0] & HW_PRTY_ASSERT_SET_0) ||
+ (sig[1] & HW_PRTY_ASSERT_SET_1) ||
+ (sig[2] & HW_PRTY_ASSERT_SET_2) ||
+ (sig[3] & HW_PRTY_ASSERT_SET_3) ||
+ (sig[4] & HW_PRTY_ASSERT_SET_4)) {
+ int par_num = 0;
+
+ DP(NETIF_MSG_HW, "Was parity error: HW block parity attention:\n"
+ "[0]:0x%08x [1]:0x%08x [2]:0x%08x [3]:0x%08x [4]:0x%08x\n",
+ sig[0] & HW_PRTY_ASSERT_SET_0,
+ sig[1] & HW_PRTY_ASSERT_SET_1,
+ sig[2] & HW_PRTY_ASSERT_SET_2,
+ sig[3] & HW_PRTY_ASSERT_SET_3,
+ sig[4] & HW_PRTY_ASSERT_SET_4);
+ if (print) {
+ if (((sig[0] & HW_PRTY_ASSERT_SET_0) ||
+ (sig[1] & HW_PRTY_ASSERT_SET_1) ||
+ (sig[2] & HW_PRTY_ASSERT_SET_2) ||
+ (sig[4] & HW_PRTY_ASSERT_SET_4)) ||
+ (sig[3] & HW_PRTY_ASSERT_SET_3_WITHOUT_SCPAD)) {
+ netdev_err(bp->dev,
+ "Parity errors detected in blocks: ");
+ } else {
+ print = false;
+ }
+ }
+ res |= bnx2x_check_blocks_with_parity0(bp,
+ sig[0] & HW_PRTY_ASSERT_SET_0, &par_num, print);
+ res |= bnx2x_check_blocks_with_parity1(bp,
+ sig[1] & HW_PRTY_ASSERT_SET_1, &par_num, global, print);
+ res |= bnx2x_check_blocks_with_parity2(bp,
+ sig[2] & HW_PRTY_ASSERT_SET_2, &par_num, print);
+ res |= bnx2x_check_blocks_with_parity3(bp,
+ sig[3] & HW_PRTY_ASSERT_SET_3, &par_num, global, print);
+ res |= bnx2x_check_blocks_with_parity4(bp,
+ sig[4] & HW_PRTY_ASSERT_SET_4, &par_num, print);
+
+ if (print)
+ pr_cont("\n");
+ }
+
+ return res;
+}
+
+/**
+ * bnx2x_chk_parity_attn - checks for parity attentions.
+ *
+ * @bp: driver handle
+ * @global: true if there was a global attention
+ * @print: show parity attention in syslog
+ */
+bool bnx2x_chk_parity_attn(struct bnx2x *bp, bool *global, bool print)
+{
+ struct attn_route attn = { {0} };
+ int port = BP_PORT(bp);
+
+ attn.sig[0] = REG_RD(bp,
+ MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 +
+ port*4);
+ attn.sig[1] = REG_RD(bp,
+ MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 +
+ port*4);
+ attn.sig[2] = REG_RD(bp,
+ MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 +
+ port*4);
+ attn.sig[3] = REG_RD(bp,
+ MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 +
+ port*4);
+ /* Since MCP attentions can't be disabled inside the block, we need to
+ * read AEU registers to see whether they're currently disabled
+ */
+ attn.sig[3] &= ((REG_RD(bp,
+ !port ? MISC_REG_AEU_ENABLE4_FUNC_0_OUT_0
+ : MISC_REG_AEU_ENABLE4_FUNC_1_OUT_0) &
+ MISC_AEU_ENABLE_MCP_PRTY_BITS) |
+ ~MISC_AEU_ENABLE_MCP_PRTY_BITS);
+
+ if (!CHIP_IS_E1x(bp))
+ attn.sig[4] = REG_RD(bp,
+ MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 +
+ port*4);
+
+ return bnx2x_parity_attn(bp, global, print, attn.sig);
+}
+
+static void bnx2x_attn_int_deasserted4(struct bnx2x *bp, u32 attn)
+{
+ u32 val;
+ if (attn & AEU_INPUTS_ATTN_BITS_PGLUE_HW_INTERRUPT) {
+
+ val = REG_RD(bp, PGLUE_B_REG_PGLUE_B_INT_STS_CLR);
+ BNX2X_ERR("PGLUE hw attention 0x%x\n", val);
+ if (val & PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR)
+ BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR\n");
+ if (val & PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR)
+ BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR\n");
+ if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN)
+ BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN\n");
+ if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN)
+ BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN\n");
+ if (val &
+ PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN)
+ BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN\n");
+ if (val &
+ PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN)
+ BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN\n");
+ if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN)
+ BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN\n");
+ if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN)
+ BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN\n");
+ if (val & PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW)
+ BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW\n");
+ }
+ if (attn & AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT) {
+ val = REG_RD(bp, ATC_REG_ATC_INT_STS_CLR);
+ BNX2X_ERR("ATC hw attention 0x%x\n", val);
+ if (val & ATC_ATC_INT_STS_REG_ADDRESS_ERROR)
+ BNX2X_ERR("ATC_ATC_INT_STS_REG_ADDRESS_ERROR\n");
+ if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND)
+ BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND\n");
+ if (val & ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS)
+ BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS\n");
+ if (val & ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT)
+ BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT\n");
+ if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR)
+ BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR\n");
+ if (val & ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU)
+ BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU\n");
+ }
+
+ if (attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
+ AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)) {
+ BNX2X_ERR("FATAL parity attention set4 0x%x\n",
+ (u32)(attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
+ AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)));
+ }
+}
+
+static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
+{
+ struct attn_route attn, *group_mask;
+ int port = BP_PORT(bp);
+ int index;
+ u32 reg_addr;
+ u32 val;
+ u32 aeu_mask;
+ bool global = false;
+
+ /* need to take HW lock because MCP or other port might also
+ try to handle this event */
+ bnx2x_acquire_alr(bp);
+
+ if (bnx2x_chk_parity_attn(bp, &global, true)) {
+#ifndef BNX2X_STOP_ON_ERROR
+ bp->recovery_state = BNX2X_RECOVERY_INIT;
+ schedule_delayed_work(&bp->sp_rtnl_task, 0);
+ /* Disable HW interrupts */
+ bnx2x_int_disable(bp);
+ /* In case of parity errors don't handle attentions so that
+ * other function would "see" parity errors.
+ */
+#else
+ bnx2x_panic();
+#endif
+ bnx2x_release_alr(bp);
+ return;
+ }
+
+ attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
+ attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
+ attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
+ attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
+ if (!CHIP_IS_E1x(bp))
+ attn.sig[4] =
+ REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 + port*4);
+ else
+ attn.sig[4] = 0;
+
+ DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x %08x\n",
+ attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3], attn.sig[4]);
+
+ for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
+ if (deasserted & (1 << index)) {
+ group_mask = &bp->attn_group[index];
+
+ DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x %08x\n",
+ index,
+ group_mask->sig[0], group_mask->sig[1],
+ group_mask->sig[2], group_mask->sig[3],
+ group_mask->sig[4]);
+
+ bnx2x_attn_int_deasserted4(bp,
+ attn.sig[4] & group_mask->sig[4]);
+ bnx2x_attn_int_deasserted3(bp,
+ attn.sig[3] & group_mask->sig[3]);
+ bnx2x_attn_int_deasserted1(bp,
+ attn.sig[1] & group_mask->sig[1]);
+ bnx2x_attn_int_deasserted2(bp,
+ attn.sig[2] & group_mask->sig[2]);
+ bnx2x_attn_int_deasserted0(bp,
+ attn.sig[0] & group_mask->sig[0]);
+ }
+ }
+
+ bnx2x_release_alr(bp);
+
+ if (bp->common.int_block == INT_BLOCK_HC)
+ reg_addr = (HC_REG_COMMAND_REG + port*32 +
+ COMMAND_REG_ATTN_BITS_CLR);
+ else
+ reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_CLR_UPPER*8);
+
+ val = ~deasserted;
+ DP(NETIF_MSG_HW, "about to mask 0x%08x at %s addr 0x%x\n", val,
+ (bp->common.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr);
+ REG_WR(bp, reg_addr, val);
+
+ if (~bp->attn_state & deasserted)
+ BNX2X_ERR("IGU ERROR\n");
+
+ reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
+ MISC_REG_AEU_MASK_ATTN_FUNC_0;
+
+ bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
+ aeu_mask = REG_RD(bp, reg_addr);
+
+ DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
+ aeu_mask, deasserted);
+ aeu_mask |= (deasserted & 0x3ff);
+ DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
+
+ REG_WR(bp, reg_addr, aeu_mask);
+ bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
+
+ DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
+ bp->attn_state &= ~deasserted;
+ DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
+}
+
+static void bnx2x_attn_int(struct bnx2x *bp)
+{
+ /* read local copy of bits */
+ u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
+ attn_bits);
+ u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
+ attn_bits_ack);
+ u32 attn_state = bp->attn_state;
+
+ /* look for changed bits */
+ u32 asserted = attn_bits & ~attn_ack & ~attn_state;
+ u32 deasserted = ~attn_bits & attn_ack & attn_state;
+
+ DP(NETIF_MSG_HW,
+ "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
+ attn_bits, attn_ack, asserted, deasserted);
+
+ if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
+ BNX2X_ERR("BAD attention state\n");
+
+ /* handle bits that were raised */
+ if (asserted)
+ bnx2x_attn_int_asserted(bp, asserted);
+
+ if (deasserted)
+ bnx2x_attn_int_deasserted(bp, deasserted);
+}
+
+void bnx2x_igu_ack_sb(struct bnx2x *bp, u8 igu_sb_id, u8 segment,
+ u16 index, u8 op, u8 update)
+{
+ u32 igu_addr = bp->igu_base_addr;
+ igu_addr += (IGU_CMD_INT_ACK_BASE + igu_sb_id)*8;
+ bnx2x_igu_ack_sb_gen(bp, igu_sb_id, segment, index, op, update,
+ igu_addr);
+}
+
+static void bnx2x_update_eq_prod(struct bnx2x *bp, u16 prod)
+{
+ /* No memory barriers */
+ storm_memset_eq_prod(bp, prod, BP_FUNC(bp));
+}
+
+static int bnx2x_cnic_handle_cfc_del(struct bnx2x *bp, u32 cid,
+ union event_ring_elem *elem)
+{
+ u8 err = elem->message.error;
+
+ if (!bp->cnic_eth_dev.starting_cid ||
+ (cid < bp->cnic_eth_dev.starting_cid &&
+ cid != bp->cnic_eth_dev.iscsi_l2_cid))
+ return 1;
+
+ DP(BNX2X_MSG_SP, "got delete ramrod for CNIC CID %d\n", cid);
+
+ if (unlikely(err)) {
+
+ BNX2X_ERR("got delete ramrod for CNIC CID %d with error!\n",
+ cid);
+ bnx2x_panic_dump(bp, false);
+ }
+ bnx2x_cnic_cfc_comp(bp, cid, err);
+ return 0;
+}
+
+static void bnx2x_handle_mcast_eqe(struct bnx2x *bp)
+{
+ struct bnx2x_mcast_ramrod_params rparam;
+ int rc;
+
+ memset(&rparam, 0, sizeof(rparam));
+
+ rparam.mcast_obj = &bp->mcast_obj;
+
+ netif_addr_lock_bh(bp->dev);
+
+ /* Clear pending state for the last command */
+ bp->mcast_obj.raw.clear_pending(&bp->mcast_obj.raw);
+
+ /* If there are pending mcast commands - send them */
+ if (bp->mcast_obj.check_pending(&bp->mcast_obj)) {
+ rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
+ if (rc < 0)
+ BNX2X_ERR("Failed to send pending mcast commands: %d\n",
+ rc);
+ }
+
+ netif_addr_unlock_bh(bp->dev);
+}
+
+static void bnx2x_handle_classification_eqe(struct bnx2x *bp,
+ union event_ring_elem *elem)
+{
+ unsigned long ramrod_flags = 0;
+ int rc = 0;
+ u32 echo = le32_to_cpu(elem->message.data.eth_event.echo);
+ u32 cid = echo & BNX2X_SWCID_MASK;
+ struct bnx2x_vlan_mac_obj *vlan_mac_obj;
+
+ /* Always push next commands out, don't wait here */
+ __set_bit(RAMROD_CONT, &ramrod_flags);
+
+ switch (echo >> BNX2X_SWCID_SHIFT) {
+ case BNX2X_FILTER_MAC_PENDING:
+ DP(BNX2X_MSG_SP, "Got SETUP_MAC completions\n");
+ if (CNIC_LOADED(bp) && (cid == BNX2X_ISCSI_ETH_CID(bp)))
+ vlan_mac_obj = &bp->iscsi_l2_mac_obj;
+ else
+ vlan_mac_obj = &bp->sp_objs[cid].mac_obj;
+
+ break;
+ case BNX2X_FILTER_VLAN_PENDING:
+ DP(BNX2X_MSG_SP, "Got SETUP_VLAN completions\n");
+ vlan_mac_obj = &bp->sp_objs[cid].vlan_obj;
+ break;
+ case BNX2X_FILTER_MCAST_PENDING:
+ DP(BNX2X_MSG_SP, "Got SETUP_MCAST completions\n");
+ /* This is only relevant for 57710 where multicast MACs are
+ * configured as unicast MACs using the same ramrod.
+ */
+ bnx2x_handle_mcast_eqe(bp);
+ return;
+ default:
+ BNX2X_ERR("Unsupported classification command: 0x%x\n", echo);
+ return;
+ }
+
+ rc = vlan_mac_obj->complete(bp, vlan_mac_obj, elem, &ramrod_flags);
+
+ if (rc < 0)
+ BNX2X_ERR("Failed to schedule new commands: %d\n", rc);
+ else if (rc > 0)
+ DP(BNX2X_MSG_SP, "Scheduled next pending commands...\n");
+}
+
+static void bnx2x_set_iscsi_eth_rx_mode(struct bnx2x *bp, bool start);
+
+static void bnx2x_handle_rx_mode_eqe(struct bnx2x *bp)
+{
+ netif_addr_lock_bh(bp->dev);
+
+ clear_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state);
+
+ /* Send rx_mode command again if was requested */
+ if (test_and_clear_bit(BNX2X_FILTER_RX_MODE_SCHED, &bp->sp_state))
+ bnx2x_set_storm_rx_mode(bp);
+ else if (test_and_clear_bit(BNX2X_FILTER_ISCSI_ETH_START_SCHED,
+ &bp->sp_state))
+ bnx2x_set_iscsi_eth_rx_mode(bp, true);
+ else if (test_and_clear_bit(BNX2X_FILTER_ISCSI_ETH_STOP_SCHED,
+ &bp->sp_state))
+ bnx2x_set_iscsi_eth_rx_mode(bp, false);
+
+ netif_addr_unlock_bh(bp->dev);
+}
+
+static void bnx2x_after_afex_vif_lists(struct bnx2x *bp,
+ union event_ring_elem *elem)
+{
+ if (elem->message.data.vif_list_event.echo == VIF_LIST_RULE_GET) {
+ DP(BNX2X_MSG_SP,
+ "afex: ramrod completed VIF LIST_GET, addrs 0x%x\n",
+ elem->message.data.vif_list_event.func_bit_map);
+ bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_LISTGET_ACK,
+ elem->message.data.vif_list_event.func_bit_map);
+ } else if (elem->message.data.vif_list_event.echo ==
+ VIF_LIST_RULE_SET) {
+ DP(BNX2X_MSG_SP, "afex: ramrod completed VIF LIST_SET\n");
+ bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_LISTSET_ACK, 0);
+ }
+}
+
+/* called with rtnl_lock */
+static void bnx2x_after_function_update(struct bnx2x *bp)
+{
+ int q, rc;
+ struct bnx2x_fastpath *fp;
+ struct bnx2x_queue_state_params queue_params = {NULL};
+ struct bnx2x_queue_update_params *q_update_params =
+ &queue_params.params.update;
+
+ /* Send Q update command with afex vlan removal values for all Qs */
+ queue_params.cmd = BNX2X_Q_CMD_UPDATE;
+
+ /* set silent vlan removal values according to vlan mode */
+ __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG,
+ &q_update_params->update_flags);
+ __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM,
+ &q_update_params->update_flags);
+ __set_bit(RAMROD_COMP_WAIT, &queue_params.ramrod_flags);
+
+ /* in access mode mark mask and value are 0 to strip all vlans */
+ if (bp->afex_vlan_mode == FUNC_MF_CFG_AFEX_VLAN_ACCESS_MODE) {
+ q_update_params->silent_removal_value = 0;
+ q_update_params->silent_removal_mask = 0;
+ } else {
+ q_update_params->silent_removal_value =
+ (bp->afex_def_vlan_tag & VLAN_VID_MASK);
+ q_update_params->silent_removal_mask = VLAN_VID_MASK;
+ }
+
+ for_each_eth_queue(bp, q) {
+ /* Set the appropriate Queue object */
+ fp = &bp->fp[q];
+ queue_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
+
+ /* send the ramrod */
+ rc = bnx2x_queue_state_change(bp, &queue_params);
+ if (rc < 0)
+ BNX2X_ERR("Failed to config silent vlan rem for Q %d\n",
+ q);
+ }
+
+ if (!NO_FCOE(bp) && CNIC_ENABLED(bp)) {
+ fp = &bp->fp[FCOE_IDX(bp)];
+ queue_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
+
+ /* clear pending completion bit */
+ __clear_bit(RAMROD_COMP_WAIT, &queue_params.ramrod_flags);
+
+ /* mark latest Q bit */
+ smp_mb__before_atomic();
+ set_bit(BNX2X_AFEX_FCOE_Q_UPDATE_PENDING, &bp->sp_state);
+ smp_mb__after_atomic();
+
+ /* send Q update ramrod for FCoE Q */
+ rc = bnx2x_queue_state_change(bp, &queue_params);
+ if (rc < 0)
+ BNX2X_ERR("Failed to config silent vlan rem for Q %d\n",
+ q);
+ } else {
+ /* If no FCoE ring - ACK MCP now */
+ bnx2x_link_report(bp);
+ bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_VIFSET_ACK, 0);
+ }
+}
+
+static struct bnx2x_queue_sp_obj *bnx2x_cid_to_q_obj(
+ struct bnx2x *bp, u32 cid)
+{
+ DP(BNX2X_MSG_SP, "retrieving fp from cid %d\n", cid);
+
+ if (CNIC_LOADED(bp) && (cid == BNX2X_FCOE_ETH_CID(bp)))
+ return &bnx2x_fcoe_sp_obj(bp, q_obj);
+ else
+ return &bp->sp_objs[CID_TO_FP(cid, bp)].q_obj;
+}
+
+static void bnx2x_eq_int(struct bnx2x *bp)
+{
+ u16 hw_cons, sw_cons, sw_prod;
+ union event_ring_elem *elem;
+ u8 echo;
+ u32 cid;
+ u8 opcode;
+ int rc, spqe_cnt = 0;
+ struct bnx2x_queue_sp_obj *q_obj;
+ struct bnx2x_func_sp_obj *f_obj = &bp->func_obj;
+ struct bnx2x_raw_obj *rss_raw = &bp->rss_conf_obj.raw;
+
+ hw_cons = le16_to_cpu(*bp->eq_cons_sb);
+
+ /* The hw_cos range is 1-255, 257 - the sw_cons range is 0-254, 256.
+ * when we get the next-page we need to adjust so the loop
+ * condition below will be met. The next element is the size of a
+ * regular element and hence incrementing by 1
+ */
+ if ((hw_cons & EQ_DESC_MAX_PAGE) == EQ_DESC_MAX_PAGE)
+ hw_cons++;
+
+ /* This function may never run in parallel with itself for a
+ * specific bp, thus there is no need in "paired" read memory
+ * barrier here.
+ */
+ sw_cons = bp->eq_cons;
+ sw_prod = bp->eq_prod;
+
+ DP(BNX2X_MSG_SP, "EQ: hw_cons %u sw_cons %u bp->eq_spq_left %x\n",
+ hw_cons, sw_cons, atomic_read(&bp->eq_spq_left));
+
+ for (; sw_cons != hw_cons;
+ sw_prod = NEXT_EQ_IDX(sw_prod), sw_cons = NEXT_EQ_IDX(sw_cons)) {
+
+ elem = &bp->eq_ring[EQ_DESC(sw_cons)];
+
+ rc = bnx2x_iov_eq_sp_event(bp, elem);
+ if (!rc) {
+ DP(BNX2X_MSG_IOV, "bnx2x_iov_eq_sp_event returned %d\n",
+ rc);
+ goto next_spqe;
+ }
+
+ opcode = elem->message.opcode;
+
+ /* handle eq element */
+ switch (opcode) {
+ case EVENT_RING_OPCODE_VF_PF_CHANNEL:
+ bnx2x_vf_mbx_schedule(bp,
+ &elem->message.data.vf_pf_event);
+ continue;
+
+ case EVENT_RING_OPCODE_STAT_QUERY:
+ DP_AND((BNX2X_MSG_SP | BNX2X_MSG_STATS),
+ "got statistics comp event %d\n",
+ bp->stats_comp++);
+ /* nothing to do with stats comp */
+ goto next_spqe;
+
+ case EVENT_RING_OPCODE_CFC_DEL:
+ /* handle according to cid range */
+ /*
+ * we may want to verify here that the bp state is
+ * HALTING
+ */
+
+ /* elem CID originates from FW; actually LE */
+ cid = SW_CID(elem->message.data.cfc_del_event.cid);
+
+ DP(BNX2X_MSG_SP,
+ "got delete ramrod for MULTI[%d]\n", cid);
+
+ if (CNIC_LOADED(bp) &&
+ !bnx2x_cnic_handle_cfc_del(bp, cid, elem))
+ goto next_spqe;
+
+ q_obj = bnx2x_cid_to_q_obj(bp, cid);
+
+ if (q_obj->complete_cmd(bp, q_obj, BNX2X_Q_CMD_CFC_DEL))
+ break;
+
+ goto next_spqe;
+
+ case EVENT_RING_OPCODE_STOP_TRAFFIC:
+ DP(BNX2X_MSG_SP | BNX2X_MSG_DCB, "got STOP TRAFFIC\n");
+ bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_PAUSED);
+ if (f_obj->complete_cmd(bp, f_obj,
+ BNX2X_F_CMD_TX_STOP))
+ break;
+ goto next_spqe;
+
+ case EVENT_RING_OPCODE_START_TRAFFIC:
+ DP(BNX2X_MSG_SP | BNX2X_MSG_DCB, "got START TRAFFIC\n");
+ bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_RELEASED);
+ if (f_obj->complete_cmd(bp, f_obj,
+ BNX2X_F_CMD_TX_START))
+ break;
+ goto next_spqe;
+
+ case EVENT_RING_OPCODE_FUNCTION_UPDATE:
+ echo = elem->message.data.function_update_event.echo;
+ if (echo == SWITCH_UPDATE) {
+ DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
+ "got FUNC_SWITCH_UPDATE ramrod\n");
+ if (f_obj->complete_cmd(
+ bp, f_obj, BNX2X_F_CMD_SWITCH_UPDATE))
+ break;
+
+ } else {
+ int cmd = BNX2X_SP_RTNL_AFEX_F_UPDATE;
+
+ DP(BNX2X_MSG_SP | BNX2X_MSG_MCP,
+ "AFEX: ramrod completed FUNCTION_UPDATE\n");
+ f_obj->complete_cmd(bp, f_obj,
+ BNX2X_F_CMD_AFEX_UPDATE);
+
+ /* We will perform the Queues update from
+ * sp_rtnl task as all Queue SP operations
+ * should run under rtnl_lock.
+ */
+ bnx2x_schedule_sp_rtnl(bp, cmd, 0);
+ }
+
+ goto next_spqe;
+
+ case EVENT_RING_OPCODE_AFEX_VIF_LISTS:
+ f_obj->complete_cmd(bp, f_obj,
+ BNX2X_F_CMD_AFEX_VIFLISTS);
+ bnx2x_after_afex_vif_lists(bp, elem);
+ goto next_spqe;
+ case EVENT_RING_OPCODE_FUNCTION_START:
+ DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
+ "got FUNC_START ramrod\n");
+ if (f_obj->complete_cmd(bp, f_obj, BNX2X_F_CMD_START))
+ break;
+
+ goto next_spqe;
+
+ case EVENT_RING_OPCODE_FUNCTION_STOP:
+ DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
+ "got FUNC_STOP ramrod\n");
+ if (f_obj->complete_cmd(bp, f_obj, BNX2X_F_CMD_STOP))
+ break;
+
+ goto next_spqe;
+
+ case EVENT_RING_OPCODE_SET_TIMESYNC:
+ DP(BNX2X_MSG_SP | BNX2X_MSG_PTP,
+ "got set_timesync ramrod completion\n");
+ if (f_obj->complete_cmd(bp, f_obj,
+ BNX2X_F_CMD_SET_TIMESYNC))
+ break;
+ goto next_spqe;
+ }
+
+ switch (opcode | bp->state) {
+ case (EVENT_RING_OPCODE_RSS_UPDATE_RULES |
+ BNX2X_STATE_OPEN):
+ case (EVENT_RING_OPCODE_RSS_UPDATE_RULES |
+ BNX2X_STATE_OPENING_WAIT4_PORT):
+ case (EVENT_RING_OPCODE_RSS_UPDATE_RULES |
+ BNX2X_STATE_CLOSING_WAIT4_HALT):
+ DP(BNX2X_MSG_SP, "got RSS_UPDATE ramrod. CID %d\n",
+ SW_CID(elem->message.data.eth_event.echo));
+ rss_raw->clear_pending(rss_raw);
+ break;
+
+ case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_OPEN):
+ case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_DIAG):
+ case (EVENT_RING_OPCODE_SET_MAC |
+ BNX2X_STATE_CLOSING_WAIT4_HALT):
+ case (EVENT_RING_OPCODE_CLASSIFICATION_RULES |
+ BNX2X_STATE_OPEN):
+ case (EVENT_RING_OPCODE_CLASSIFICATION_RULES |
+ BNX2X_STATE_DIAG):
+ case (EVENT_RING_OPCODE_CLASSIFICATION_RULES |
+ BNX2X_STATE_CLOSING_WAIT4_HALT):
+ DP(BNX2X_MSG_SP, "got (un)set vlan/mac ramrod\n");
+ bnx2x_handle_classification_eqe(bp, elem);
+ break;
+
+ case (EVENT_RING_OPCODE_MULTICAST_RULES |
+ BNX2X_STATE_OPEN):
+ case (EVENT_RING_OPCODE_MULTICAST_RULES |
+ BNX2X_STATE_DIAG):
+ case (EVENT_RING_OPCODE_MULTICAST_RULES |
+ BNX2X_STATE_CLOSING_WAIT4_HALT):
+ DP(BNX2X_MSG_SP, "got mcast ramrod\n");
+ bnx2x_handle_mcast_eqe(bp);
+ break;
+
+ case (EVENT_RING_OPCODE_FILTERS_RULES |
+ BNX2X_STATE_OPEN):
+ case (EVENT_RING_OPCODE_FILTERS_RULES |
+ BNX2X_STATE_DIAG):
+ case (EVENT_RING_OPCODE_FILTERS_RULES |
+ BNX2X_STATE_CLOSING_WAIT4_HALT):
+ DP(BNX2X_MSG_SP, "got rx_mode ramrod\n");
+ bnx2x_handle_rx_mode_eqe(bp);
+ break;
+ default:
+ /* unknown event log error and continue */
+ BNX2X_ERR("Unknown EQ event %d, bp->state 0x%x\n",
+ elem->message.opcode, bp->state);
+ }
+next_spqe:
+ spqe_cnt++;
+ } /* for */
+
+ smp_mb__before_atomic();
+ atomic_add(spqe_cnt, &bp->eq_spq_left);
+
+ bp->eq_cons = sw_cons;
+ bp->eq_prod = sw_prod;
+ /* Make sure that above mem writes were issued towards the memory */
+ smp_wmb();
+
+ /* update producer */
+ bnx2x_update_eq_prod(bp, bp->eq_prod);
+}
+
+static void bnx2x_sp_task(struct work_struct *work)
+{
+ struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
+
+ DP(BNX2X_MSG_SP, "sp task invoked\n");
+
+ /* make sure the atomic interrupt_occurred has been written */
+ smp_rmb();
+ if (atomic_read(&bp->interrupt_occurred)) {
+
+ /* what work needs to be performed? */
+ u16 status = bnx2x_update_dsb_idx(bp);
+
+ DP(BNX2X_MSG_SP, "status %x\n", status);
+ DP(BNX2X_MSG_SP, "setting interrupt_occurred to 0\n");
+ atomic_set(&bp->interrupt_occurred, 0);
+
+ /* HW attentions */
+ if (status & BNX2X_DEF_SB_ATT_IDX) {
+ bnx2x_attn_int(bp);
+ status &= ~BNX2X_DEF_SB_ATT_IDX;
+ }
+
+ /* SP events: STAT_QUERY and others */
+ if (status & BNX2X_DEF_SB_IDX) {
+ struct bnx2x_fastpath *fp = bnx2x_fcoe_fp(bp);
+
+ if (FCOE_INIT(bp) &&
+ (bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
+ /* Prevent local bottom-halves from running as
+ * we are going to change the local NAPI list.
+ */
+ local_bh_disable();
+ napi_schedule(&bnx2x_fcoe(bp, napi));
+ local_bh_enable();
+ }
+
+ /* Handle EQ completions */
+ bnx2x_eq_int(bp);
+ bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID,
+ le16_to_cpu(bp->def_idx), IGU_INT_NOP, 1);
+
+ status &= ~BNX2X_DEF_SB_IDX;
+ }
+
+ /* if status is non zero then perhaps something went wrong */
+ if (unlikely(status))
+ DP(BNX2X_MSG_SP,
+ "got an unknown interrupt! (status 0x%x)\n", status);
+
+ /* ack status block only if something was actually handled */
+ bnx2x_ack_sb(bp, bp->igu_dsb_id, ATTENTION_ID,
+ le16_to_cpu(bp->def_att_idx), IGU_INT_ENABLE, 1);
+ }
+
+ /* afex - poll to check if VIFSET_ACK should be sent to MFW */
+ if (test_and_clear_bit(BNX2X_AFEX_PENDING_VIFSET_MCP_ACK,
+ &bp->sp_state)) {
+ bnx2x_link_report(bp);
+ bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_VIFSET_ACK, 0);
+ }
+}
+
+irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
+{
+ struct net_device *dev = dev_instance;
+ struct bnx2x *bp = netdev_priv(dev);
+
+ bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0,
+ IGU_INT_DISABLE, 0);
+
+#ifdef BNX2X_STOP_ON_ERROR
+ if (unlikely(bp->panic))
+ return IRQ_HANDLED;
+#endif
+
+ if (CNIC_LOADED(bp)) {
+ struct cnic_ops *c_ops;
+
+ rcu_read_lock();
+ c_ops = rcu_dereference(bp->cnic_ops);
+ if (c_ops)
+ c_ops->cnic_handler(bp->cnic_data, NULL);
+ rcu_read_unlock();
+ }
+
+ /* schedule sp task to perform default status block work, ack
+ * attentions and enable interrupts.
+ */
+ bnx2x_schedule_sp_task(bp);
+
+ return IRQ_HANDLED;
+}
+
+/* end of slow path */
+
+void bnx2x_drv_pulse(struct bnx2x *bp)
+{
+ SHMEM_WR(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb,
+ bp->fw_drv_pulse_wr_seq);
+}
+
+static void bnx2x_timer(struct timer_list *t)
+{
+ struct bnx2x *bp = from_timer(bp, t, timer);
+
+ if (!netif_running(bp->dev))
+ return;
+
+ if (IS_PF(bp) &&
+ !BP_NOMCP(bp)) {
+ int mb_idx = BP_FW_MB_IDX(bp);
+ u16 drv_pulse;
+ u16 mcp_pulse;
+
+ ++bp->fw_drv_pulse_wr_seq;
+ bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
+ drv_pulse = bp->fw_drv_pulse_wr_seq;
+ bnx2x_drv_pulse(bp);
+
+ mcp_pulse = (SHMEM_RD(bp, func_mb[mb_idx].mcp_pulse_mb) &
+ MCP_PULSE_SEQ_MASK);
+ /* The delta between driver pulse and mcp response
+ * should not get too big. If the MFW is more than 5 pulses
+ * behind, we should worry about it enough to generate an error
+ * log.
+ */
+ if (((drv_pulse - mcp_pulse) & MCP_PULSE_SEQ_MASK) > 5)
+ BNX2X_ERR("MFW seems hanged: drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
+ drv_pulse, mcp_pulse);
+ }
+
+ if (bp->state == BNX2X_STATE_OPEN)
+ bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
+
+ /* sample pf vf bulletin board for new posts from pf */
+ if (IS_VF(bp))
+ bnx2x_timer_sriov(bp);
+
+ mod_timer(&bp->timer, jiffies + bp->current_interval);
+}
+
+/* end of Statistics */
+
+/* nic init */
+
+/*
+ * nic init service functions
+ */
+
+static void bnx2x_fill(struct bnx2x *bp, u32 addr, int fill, u32 len)
+{
+ u32 i;
+ if (!(len%4) && !(addr%4))
+ for (i = 0; i < len; i += 4)
+ REG_WR(bp, addr + i, fill);
+ else
+ for (i = 0; i < len; i++)
+ REG_WR8(bp, addr + i, fill);
+}
+
+/* helper: writes FP SP data to FW - data_size in dwords */
+static void bnx2x_wr_fp_sb_data(struct bnx2x *bp,
+ int fw_sb_id,
+ u32 *sb_data_p,
+ u32 data_size)
+{
+ int index;
+ for (index = 0; index < data_size; index++)
+ REG_WR(bp, BAR_CSTRORM_INTMEM +
+ CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
+ sizeof(u32)*index,
+ *(sb_data_p + index));
+}
+
+static void bnx2x_zero_fp_sb(struct bnx2x *bp, int fw_sb_id)
+{
+ u32 *sb_data_p;
+ u32 data_size = 0;
+ struct hc_status_block_data_e2 sb_data_e2;
+ struct hc_status_block_data_e1x sb_data_e1x;
+
+ /* disable the function first */
+ if (!CHIP_IS_E1x(bp)) {
+ memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2));
+ sb_data_e2.common.state = SB_DISABLED;
+ sb_data_e2.common.p_func.vf_valid = false;
+ sb_data_p = (u32 *)&sb_data_e2;
+ data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32);
+ } else {
+ memset(&sb_data_e1x, 0,
+ sizeof(struct hc_status_block_data_e1x));
+ sb_data_e1x.common.state = SB_DISABLED;
+ sb_data_e1x.common.p_func.vf_valid = false;
+ sb_data_p = (u32 *)&sb_data_e1x;
+ data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32);
+ }
+ bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size);
+
+ bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
+ CSTORM_STATUS_BLOCK_OFFSET(fw_sb_id), 0,
+ CSTORM_STATUS_BLOCK_SIZE);
+ bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
+ CSTORM_SYNC_BLOCK_OFFSET(fw_sb_id), 0,
+ CSTORM_SYNC_BLOCK_SIZE);
+}
+
+/* helper: writes SP SB data to FW */
+static void bnx2x_wr_sp_sb_data(struct bnx2x *bp,
+ struct hc_sp_status_block_data *sp_sb_data)
+{
+ int func = BP_FUNC(bp);
+ int i;
+ for (i = 0; i < sizeof(struct hc_sp_status_block_data)/sizeof(u32); i++)
+ REG_WR(bp, BAR_CSTRORM_INTMEM +
+ CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) +
+ i*sizeof(u32),
+ *((u32 *)sp_sb_data + i));
+}
+
+static void bnx2x_zero_sp_sb(struct bnx2x *bp)
+{
+ int func = BP_FUNC(bp);
+ struct hc_sp_status_block_data sp_sb_data;
+ memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
+
+ sp_sb_data.state = SB_DISABLED;
+ sp_sb_data.p_func.vf_valid = false;
+
+ bnx2x_wr_sp_sb_data(bp, &sp_sb_data);
+
+ bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
+ CSTORM_SP_STATUS_BLOCK_OFFSET(func), 0,
+ CSTORM_SP_STATUS_BLOCK_SIZE);
+ bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
+ CSTORM_SP_SYNC_BLOCK_OFFSET(func), 0,
+ CSTORM_SP_SYNC_BLOCK_SIZE);
+}
+
+static void bnx2x_setup_ndsb_state_machine(struct hc_status_block_sm *hc_sm,
+ int igu_sb_id, int igu_seg_id)
+{
+ hc_sm->igu_sb_id = igu_sb_id;
+ hc_sm->igu_seg_id = igu_seg_id;
+ hc_sm->timer_value = 0xFF;
+ hc_sm->time_to_expire = 0xFFFFFFFF;
+}
+
+/* allocates state machine ids. */
+static void bnx2x_map_sb_state_machines(struct hc_index_data *index_data)
+{
+ /* zero out state machine indices */
+ /* rx indices */
+ index_data[HC_INDEX_ETH_RX_CQ_CONS].flags &= ~HC_INDEX_DATA_SM_ID;
+
+ /* tx indices */
+ index_data[HC_INDEX_OOO_TX_CQ_CONS].flags &= ~HC_INDEX_DATA_SM_ID;
+ index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags &= ~HC_INDEX_DATA_SM_ID;
+ index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags &= ~HC_INDEX_DATA_SM_ID;
+ index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags &= ~HC_INDEX_DATA_SM_ID;
+
+ /* map indices */
+ /* rx indices */
+ index_data[HC_INDEX_ETH_RX_CQ_CONS].flags |=
+ SM_RX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
+
+ /* tx indices */
+ index_data[HC_INDEX_OOO_TX_CQ_CONS].flags |=
+ SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
+ index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags |=
+ SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
+ index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags |=
+ SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
+ index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags |=
+ SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
+}
+
+void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid,
+ u8 vf_valid, int fw_sb_id, int igu_sb_id)
+{
+ int igu_seg_id;
+
+ struct hc_status_block_data_e2 sb_data_e2;
+ struct hc_status_block_data_e1x sb_data_e1x;
+ struct hc_status_block_sm *hc_sm_p;
+ int data_size;
+ u32 *sb_data_p;
+
+ if (CHIP_INT_MODE_IS_BC(bp))
+ igu_seg_id = HC_SEG_ACCESS_NORM;
+ else
+ igu_seg_id = IGU_SEG_ACCESS_NORM;
+
+ bnx2x_zero_fp_sb(bp, fw_sb_id);
+
+ if (!CHIP_IS_E1x(bp)) {
+ memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2));
+ sb_data_e2.common.state = SB_ENABLED;
+ sb_data_e2.common.p_func.pf_id = BP_FUNC(bp);
+ sb_data_e2.common.p_func.vf_id = vfid;
+ sb_data_e2.common.p_func.vf_valid = vf_valid;
+ sb_data_e2.common.p_func.vnic_id = BP_VN(bp);
+ sb_data_e2.common.same_igu_sb_1b = true;
+ sb_data_e2.common.host_sb_addr.hi = U64_HI(mapping);
+ sb_data_e2.common.host_sb_addr.lo = U64_LO(mapping);
+ hc_sm_p = sb_data_e2.common.state_machine;
+ sb_data_p = (u32 *)&sb_data_e2;
+ data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32);
+ bnx2x_map_sb_state_machines(sb_data_e2.index_data);
+ } else {
+ memset(&sb_data_e1x, 0,
+ sizeof(struct hc_status_block_data_e1x));
+ sb_data_e1x.common.state = SB_ENABLED;
+ sb_data_e1x.common.p_func.pf_id = BP_FUNC(bp);
+ sb_data_e1x.common.p_func.vf_id = 0xff;
+ sb_data_e1x.common.p_func.vf_valid = false;
+ sb_data_e1x.common.p_func.vnic_id = BP_VN(bp);
+ sb_data_e1x.common.same_igu_sb_1b = true;
+ sb_data_e1x.common.host_sb_addr.hi = U64_HI(mapping);
+ sb_data_e1x.common.host_sb_addr.lo = U64_LO(mapping);
+ hc_sm_p = sb_data_e1x.common.state_machine;
+ sb_data_p = (u32 *)&sb_data_e1x;
+ data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32);
+ bnx2x_map_sb_state_machines(sb_data_e1x.index_data);
+ }
+
+ bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_RX_ID],
+ igu_sb_id, igu_seg_id);
+ bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_TX_ID],
+ igu_sb_id, igu_seg_id);
+
+ DP(NETIF_MSG_IFUP, "Init FW SB %d\n", fw_sb_id);
+
+ /* write indices to HW - PCI guarantees endianity of regpairs */
+ bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size);
+}
+
+static void bnx2x_update_coalesce_sb(struct bnx2x *bp, u8 fw_sb_id,
+ u16 tx_usec, u16 rx_usec)
+{
+ bnx2x_update_coalesce_sb_index(bp, fw_sb_id, HC_INDEX_ETH_RX_CQ_CONS,
+ false, rx_usec);
+ bnx2x_update_coalesce_sb_index(bp, fw_sb_id,
+ HC_INDEX_ETH_TX_CQ_CONS_COS0, false,
+ tx_usec);
+ bnx2x_update_coalesce_sb_index(bp, fw_sb_id,
+ HC_INDEX_ETH_TX_CQ_CONS_COS1, false,
+ tx_usec);
+ bnx2x_update_coalesce_sb_index(bp, fw_sb_id,
+ HC_INDEX_ETH_TX_CQ_CONS_COS2, false,
+ tx_usec);
+}
+
+static void bnx2x_init_def_sb(struct bnx2x *bp)
+{
+ struct host_sp_status_block *def_sb = bp->def_status_blk;
+ dma_addr_t mapping = bp->def_status_blk_mapping;
+ int igu_sp_sb_index;
+ int igu_seg_id;
+ int port = BP_PORT(bp);
+ int func = BP_FUNC(bp);
+ int reg_offset, reg_offset_en5;
+ u64 section;
+ int index;
+ struct hc_sp_status_block_data sp_sb_data;
+ memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
+
+ if (CHIP_INT_MODE_IS_BC(bp)) {
+ igu_sp_sb_index = DEF_SB_IGU_ID;
+ igu_seg_id = HC_SEG_ACCESS_DEF;
+ } else {
+ igu_sp_sb_index = bp->igu_dsb_id;
+ igu_seg_id = IGU_SEG_ACCESS_DEF;
+ }
+
+ /* ATTN */
+ section = ((u64)mapping) + offsetof(struct host_sp_status_block,
+ atten_status_block);
+ def_sb->atten_status_block.status_block_id = igu_sp_sb_index;
+
+ bp->attn_state = 0;
+
+ reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
+ MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
+ reg_offset_en5 = (port ? MISC_REG_AEU_ENABLE5_FUNC_1_OUT_0 :
+ MISC_REG_AEU_ENABLE5_FUNC_0_OUT_0);
+ for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
+ int sindex;
+ /* take care of sig[0]..sig[4] */
+ for (sindex = 0; sindex < 4; sindex++)
+ bp->attn_group[index].sig[sindex] =
+ REG_RD(bp, reg_offset + sindex*0x4 + 0x10*index);
+
+ if (!CHIP_IS_E1x(bp))
+ /*
+ * enable5 is separate from the rest of the registers,
+ * and therefore the address skip is 4
+ * and not 16 between the different groups
+ */
+ bp->attn_group[index].sig[4] = REG_RD(bp,
+ reg_offset_en5 + 0x4*index);
+ else
+ bp->attn_group[index].sig[4] = 0;
+ }
+
+ if (bp->common.int_block == INT_BLOCK_HC) {
+ reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
+ HC_REG_ATTN_MSG0_ADDR_L);
+
+ REG_WR(bp, reg_offset, U64_LO(section));
+ REG_WR(bp, reg_offset + 4, U64_HI(section));
+ } else if (!CHIP_IS_E1x(bp)) {
+ REG_WR(bp, IGU_REG_ATTN_MSG_ADDR_L, U64_LO(section));
+ REG_WR(bp, IGU_REG_ATTN_MSG_ADDR_H, U64_HI(section));
+ }
+
+ section = ((u64)mapping) + offsetof(struct host_sp_status_block,
+ sp_sb);
+
+ bnx2x_zero_sp_sb(bp);
+
+ /* PCI guarantees endianity of regpairs */
+ sp_sb_data.state = SB_ENABLED;
+ sp_sb_data.host_sb_addr.lo = U64_LO(section);
+ sp_sb_data.host_sb_addr.hi = U64_HI(section);
+ sp_sb_data.igu_sb_id = igu_sp_sb_index;
+ sp_sb_data.igu_seg_id = igu_seg_id;
+ sp_sb_data.p_func.pf_id = func;
+ sp_sb_data.p_func.vnic_id = BP_VN(bp);
+ sp_sb_data.p_func.vf_id = 0xff;
+
+ bnx2x_wr_sp_sb_data(bp, &sp_sb_data);
+
+ bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0, IGU_INT_ENABLE, 0);
+}
+
+void bnx2x_update_coalesce(struct bnx2x *bp)
+{
+ int i;
+
+ for_each_eth_queue(bp, i)
+ bnx2x_update_coalesce_sb(bp, bp->fp[i].fw_sb_id,
+ bp->tx_ticks, bp->rx_ticks);
+}
+
+static void bnx2x_init_sp_ring(struct bnx2x *bp)
+{
+ spin_lock_init(&bp->spq_lock);
+ atomic_set(&bp->cq_spq_left, MAX_SPQ_PENDING);
+
+ bp->spq_prod_idx = 0;
+ bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
+ bp->spq_prod_bd = bp->spq;
+ bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
+}
+
+static void bnx2x_init_eq_ring(struct bnx2x *bp)
+{
+ int i;
+ for (i = 1; i <= NUM_EQ_PAGES; i++) {
+ union event_ring_elem *elem =
+ &bp->eq_ring[EQ_DESC_CNT_PAGE * i - 1];
+
+ elem->next_page.addr.hi =
+ cpu_to_le32(U64_HI(bp->eq_mapping +
+ BCM_PAGE_SIZE * (i % NUM_EQ_PAGES)));
+ elem->next_page.addr.lo =
+ cpu_to_le32(U64_LO(bp->eq_mapping +
+ BCM_PAGE_SIZE*(i % NUM_EQ_PAGES)));
+ }
+ bp->eq_cons = 0;
+ bp->eq_prod = NUM_EQ_DESC;
+ bp->eq_cons_sb = BNX2X_EQ_INDEX;
+ /* we want a warning message before it gets wrought... */
+ atomic_set(&bp->eq_spq_left,
+ min_t(int, MAX_SP_DESC_CNT - MAX_SPQ_PENDING, NUM_EQ_DESC) - 1);
+}
+
+/* called with netif_addr_lock_bh() */
+static int bnx2x_set_q_rx_mode(struct bnx2x *bp, u8 cl_id,
+ unsigned long rx_mode_flags,
+ unsigned long rx_accept_flags,
+ unsigned long tx_accept_flags,
+ unsigned long ramrod_flags)
+{
+ struct bnx2x_rx_mode_ramrod_params ramrod_param;
+ int rc;
+
+ memset(&ramrod_param, 0, sizeof(ramrod_param));
+
+ /* Prepare ramrod parameters */
+ ramrod_param.cid = 0;
+ ramrod_param.cl_id = cl_id;
+ ramrod_param.rx_mode_obj = &bp->rx_mode_obj;
+ ramrod_param.func_id = BP_FUNC(bp);
+
+ ramrod_param.pstate = &bp->sp_state;
+ ramrod_param.state = BNX2X_FILTER_RX_MODE_PENDING;
+
+ ramrod_param.rdata = bnx2x_sp(bp, rx_mode_rdata);
+ ramrod_param.rdata_mapping = bnx2x_sp_mapping(bp, rx_mode_rdata);
+
+ set_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state);
+
+ ramrod_param.ramrod_flags = ramrod_flags;
+ ramrod_param.rx_mode_flags = rx_mode_flags;
+
+ ramrod_param.rx_accept_flags = rx_accept_flags;
+ ramrod_param.tx_accept_flags = tx_accept_flags;
+
+ rc = bnx2x_config_rx_mode(bp, &ramrod_param);
+ if (rc < 0) {
+ BNX2X_ERR("Set rx_mode %d failed\n", bp->rx_mode);
+ return rc;
+ }
+
+ return 0;
+}
+
+static int bnx2x_fill_accept_flags(struct bnx2x *bp, u32 rx_mode,
+ unsigned long *rx_accept_flags,
+ unsigned long *tx_accept_flags)
+{
+ /* Clear the flags first */
+ *rx_accept_flags = 0;
+ *tx_accept_flags = 0;
+
+ switch (rx_mode) {
+ case BNX2X_RX_MODE_NONE:
+ /*
+ * 'drop all' supersedes any accept flags that may have been
+ * passed to the function.
+ */
+ break;
+ case BNX2X_RX_MODE_NORMAL:
+ __set_bit(BNX2X_ACCEPT_UNICAST, rx_accept_flags);
+ __set_bit(BNX2X_ACCEPT_MULTICAST, rx_accept_flags);
+ __set_bit(BNX2X_ACCEPT_BROADCAST, rx_accept_flags);
+
+ /* internal switching mode */
+ __set_bit(BNX2X_ACCEPT_UNICAST, tx_accept_flags);
+ __set_bit(BNX2X_ACCEPT_MULTICAST, tx_accept_flags);
+ __set_bit(BNX2X_ACCEPT_BROADCAST, tx_accept_flags);
+
+ if (bp->accept_any_vlan) {
+ __set_bit(BNX2X_ACCEPT_ANY_VLAN, rx_accept_flags);
+ __set_bit(BNX2X_ACCEPT_ANY_VLAN, tx_accept_flags);
+ }
+
+ break;
+ case BNX2X_RX_MODE_ALLMULTI:
+ __set_bit(BNX2X_ACCEPT_UNICAST, rx_accept_flags);
+ __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, rx_accept_flags);
+ __set_bit(BNX2X_ACCEPT_BROADCAST, rx_accept_flags);
+
+ /* internal switching mode */
+ __set_bit(BNX2X_ACCEPT_UNICAST, tx_accept_flags);
+ __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, tx_accept_flags);
+ __set_bit(BNX2X_ACCEPT_BROADCAST, tx_accept_flags);
+
+ if (bp->accept_any_vlan) {
+ __set_bit(BNX2X_ACCEPT_ANY_VLAN, rx_accept_flags);
+ __set_bit(BNX2X_ACCEPT_ANY_VLAN, tx_accept_flags);
+ }
+
+ break;
+ case BNX2X_RX_MODE_PROMISC:
+ /* According to definition of SI mode, iface in promisc mode
+ * should receive matched and unmatched (in resolution of port)
+ * unicast packets.
+ */
+ __set_bit(BNX2X_ACCEPT_UNMATCHED, rx_accept_flags);
+ __set_bit(BNX2X_ACCEPT_UNICAST, rx_accept_flags);
+ __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, rx_accept_flags);
+ __set_bit(BNX2X_ACCEPT_BROADCAST, rx_accept_flags);
+
+ /* internal switching mode */
+ __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, tx_accept_flags);
+ __set_bit(BNX2X_ACCEPT_BROADCAST, tx_accept_flags);
+
+ if (IS_MF_SI(bp))
+ __set_bit(BNX2X_ACCEPT_ALL_UNICAST, tx_accept_flags);
+ else
+ __set_bit(BNX2X_ACCEPT_UNICAST, tx_accept_flags);
+
+ __set_bit(BNX2X_ACCEPT_ANY_VLAN, rx_accept_flags);
+ __set_bit(BNX2X_ACCEPT_ANY_VLAN, tx_accept_flags);
+
+ break;
+ default:
+ BNX2X_ERR("Unknown rx_mode: %d\n", rx_mode);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/* called with netif_addr_lock_bh() */
+static int bnx2x_set_storm_rx_mode(struct bnx2x *bp)
+{
+ unsigned long rx_mode_flags = 0, ramrod_flags = 0;
+ unsigned long rx_accept_flags = 0, tx_accept_flags = 0;
+ int rc;
+
+ if (!NO_FCOE(bp))
+ /* Configure rx_mode of FCoE Queue */
+ __set_bit(BNX2X_RX_MODE_FCOE_ETH, &rx_mode_flags);
+
+ rc = bnx2x_fill_accept_flags(bp, bp->rx_mode, &rx_accept_flags,
+ &tx_accept_flags);
+ if (rc)
+ return rc;
+
+ __set_bit(RAMROD_RX, &ramrod_flags);
+ __set_bit(RAMROD_TX, &ramrod_flags);
+
+ return bnx2x_set_q_rx_mode(bp, bp->fp->cl_id, rx_mode_flags,
+ rx_accept_flags, tx_accept_flags,
+ ramrod_flags);
+}
+
+static void bnx2x_init_internal_common(struct bnx2x *bp)
+{
+ int i;
+
+ /* Zero this manually as its initialization is
+ currently missing in the initTool */
+ for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
+ REG_WR(bp, BAR_USTRORM_INTMEM +
+ USTORM_AGG_DATA_OFFSET + i * 4, 0);
+ if (!CHIP_IS_E1x(bp)) {
+ REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_IGU_MODE_OFFSET,
+ CHIP_INT_MODE_IS_BC(bp) ?
+ HC_IGU_BC_MODE : HC_IGU_NBC_MODE);
+ }
+}
+
+static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
+{
+ switch (load_code) {
+ case FW_MSG_CODE_DRV_LOAD_COMMON:
+ case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
+ bnx2x_init_internal_common(bp);
+ fallthrough;
+
+ case FW_MSG_CODE_DRV_LOAD_PORT:
+ /* nothing to do */
+ fallthrough;
+
+ case FW_MSG_CODE_DRV_LOAD_FUNCTION:
+ /* internal memory per function is
+ initialized inside bnx2x_pf_init */
+ break;
+
+ default:
+ BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
+ break;
+ }
+}
+
+static inline u8 bnx2x_fp_igu_sb_id(struct bnx2x_fastpath *fp)
+{
+ return fp->bp->igu_base_sb + fp->index + CNIC_SUPPORT(fp->bp);
+}
+
+static inline u8 bnx2x_fp_fw_sb_id(struct bnx2x_fastpath *fp)
+{
+ return fp->bp->base_fw_ndsb + fp->index + CNIC_SUPPORT(fp->bp);
+}
+
+static u8 bnx2x_fp_cl_id(struct bnx2x_fastpath *fp)
+{
+ if (CHIP_IS_E1x(fp->bp))
+ return BP_L_ID(fp->bp) + fp->index;
+ else /* We want Client ID to be the same as IGU SB ID for 57712 */
+ return bnx2x_fp_igu_sb_id(fp);
+}
+
+static void bnx2x_init_eth_fp(struct bnx2x *bp, int fp_idx)
+{
+ struct bnx2x_fastpath *fp = &bp->fp[fp_idx];
+ u8 cos;
+ unsigned long q_type = 0;
+ u32 cids[BNX2X_MULTI_TX_COS] = { 0 };
+ fp->rx_queue = fp_idx;
+ fp->cid = fp_idx;
+ fp->cl_id = bnx2x_fp_cl_id(fp);
+ fp->fw_sb_id = bnx2x_fp_fw_sb_id(fp);
+ fp->igu_sb_id = bnx2x_fp_igu_sb_id(fp);
+ /* qZone id equals to FW (per path) client id */
+ fp->cl_qzone_id = bnx2x_fp_qzone_id(fp);
+
+ /* init shortcut */
+ fp->ustorm_rx_prods_offset = bnx2x_rx_ustorm_prods_offset(fp);
+
+ /* Setup SB indices */
+ fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
+
+ /* Configure Queue State object */
+ __set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type);
+ __set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type);
+
+ BUG_ON(fp->max_cos > BNX2X_MULTI_TX_COS);
+
+ /* init tx data */
+ for_each_cos_in_tx_queue(fp, cos) {
+ bnx2x_init_txdata(bp, fp->txdata_ptr[cos],
+ CID_COS_TO_TX_ONLY_CID(fp->cid, cos, bp),
+ FP_COS_TO_TXQ(fp, cos, bp),
+ BNX2X_TX_SB_INDEX_BASE + cos, fp);
+ cids[cos] = fp->txdata_ptr[cos]->cid;
+ }
+
+ /* nothing more for vf to do here */
+ if (IS_VF(bp))
+ return;
+
+ bnx2x_init_sb(bp, fp->status_blk_mapping, BNX2X_VF_ID_INVALID, false,
+ fp->fw_sb_id, fp->igu_sb_id);
+ bnx2x_update_fpsb_idx(fp);
+ bnx2x_init_queue_obj(bp, &bnx2x_sp_obj(bp, fp).q_obj, fp->cl_id, cids,
+ fp->max_cos, BP_FUNC(bp), bnx2x_sp(bp, q_rdata),
+ bnx2x_sp_mapping(bp, q_rdata), q_type);
+
+ /**
+ * Configure classification DBs: Always enable Tx switching
+ */
+ bnx2x_init_vlan_mac_fp_objs(fp, BNX2X_OBJ_TYPE_RX_TX);
+
+ DP(NETIF_MSG_IFUP,
+ "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d fw_sb %d igu_sb %d\n",
+ fp_idx, bp, fp->status_blk.e2_sb, fp->cl_id, fp->fw_sb_id,
+ fp->igu_sb_id);
+}
+
+static void bnx2x_init_tx_ring_one(struct bnx2x_fp_txdata *txdata)
+{
+ int i;
+
+ for (i = 1; i <= NUM_TX_RINGS; i++) {
+ struct eth_tx_next_bd *tx_next_bd =
+ &txdata->tx_desc_ring[TX_DESC_CNT * i - 1].next_bd;
+
+ tx_next_bd->addr_hi =
+ cpu_to_le32(U64_HI(txdata->tx_desc_mapping +
+ BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
+ tx_next_bd->addr_lo =
+ cpu_to_le32(U64_LO(txdata->tx_desc_mapping +
+ BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
+ }
+
+ *txdata->tx_cons_sb = cpu_to_le16(0);
+
+ SET_FLAG(txdata->tx_db.data.header.header, DOORBELL_HDR_DB_TYPE, 1);
+ txdata->tx_db.data.zero_fill1 = 0;
+ txdata->tx_db.data.prod = 0;
+
+ txdata->tx_pkt_prod = 0;
+ txdata->tx_pkt_cons = 0;
+ txdata->tx_bd_prod = 0;
+ txdata->tx_bd_cons = 0;
+ txdata->tx_pkt = 0;
+}
+
+static void bnx2x_init_tx_rings_cnic(struct bnx2x *bp)
+{
+ int i;
+
+ for_each_tx_queue_cnic(bp, i)
+ bnx2x_init_tx_ring_one(bp->fp[i].txdata_ptr[0]);
+}
+
+static void bnx2x_init_tx_rings(struct bnx2x *bp)
+{
+ int i;
+ u8 cos;
+
+ for_each_eth_queue(bp, i)
+ for_each_cos_in_tx_queue(&bp->fp[i], cos)
+ bnx2x_init_tx_ring_one(bp->fp[i].txdata_ptr[cos]);
+}
+
+static void bnx2x_init_fcoe_fp(struct bnx2x *bp)
+{
+ struct bnx2x_fastpath *fp = bnx2x_fcoe_fp(bp);
+ unsigned long q_type = 0;
+
+ bnx2x_fcoe(bp, rx_queue) = BNX2X_NUM_ETH_QUEUES(bp);
+ bnx2x_fcoe(bp, cl_id) = bnx2x_cnic_eth_cl_id(bp,
+ BNX2X_FCOE_ETH_CL_ID_IDX);
+ bnx2x_fcoe(bp, cid) = BNX2X_FCOE_ETH_CID(bp);
+ bnx2x_fcoe(bp, fw_sb_id) = DEF_SB_ID;
+ bnx2x_fcoe(bp, igu_sb_id) = bp->igu_dsb_id;
+ bnx2x_fcoe(bp, rx_cons_sb) = BNX2X_FCOE_L2_RX_INDEX;
+ bnx2x_init_txdata(bp, bnx2x_fcoe(bp, txdata_ptr[0]),
+ fp->cid, FCOE_TXQ_IDX(bp), BNX2X_FCOE_L2_TX_INDEX,
+ fp);
+
+ DP(NETIF_MSG_IFUP, "created fcoe tx data (fp index %d)\n", fp->index);
+
+ /* qZone id equals to FW (per path) client id */
+ bnx2x_fcoe(bp, cl_qzone_id) = bnx2x_fp_qzone_id(fp);
+ /* init shortcut */
+ bnx2x_fcoe(bp, ustorm_rx_prods_offset) =
+ bnx2x_rx_ustorm_prods_offset(fp);
+
+ /* Configure Queue State object */
+ __set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type);
+ __set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type);
+
+ /* No multi-CoS for FCoE L2 client */
+ BUG_ON(fp->max_cos != 1);
+
+ bnx2x_init_queue_obj(bp, &bnx2x_sp_obj(bp, fp).q_obj, fp->cl_id,
+ &fp->cid, 1, BP_FUNC(bp), bnx2x_sp(bp, q_rdata),
+ bnx2x_sp_mapping(bp, q_rdata), q_type);
+
+ DP(NETIF_MSG_IFUP,
+ "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d fw_sb %d igu_sb %d\n",
+ fp->index, bp, fp->status_blk.e2_sb, fp->cl_id, fp->fw_sb_id,
+ fp->igu_sb_id);
+}
+
+void bnx2x_nic_init_cnic(struct bnx2x *bp)
+{
+ if (!NO_FCOE(bp))
+ bnx2x_init_fcoe_fp(bp);
+
+ bnx2x_init_sb(bp, bp->cnic_sb_mapping,
+ BNX2X_VF_ID_INVALID, false,
+ bnx2x_cnic_fw_sb_id(bp), bnx2x_cnic_igu_sb_id(bp));
+
+ /* ensure status block indices were read */
+ rmb();
+ bnx2x_init_rx_rings_cnic(bp);
+ bnx2x_init_tx_rings_cnic(bp);
+
+ /* flush all */
+ mb();
+}
+
+void bnx2x_pre_irq_nic_init(struct bnx2x *bp)
+{
+ int i;
+
+ /* Setup NIC internals and enable interrupts */
+ for_each_eth_queue(bp, i)
+ bnx2x_init_eth_fp(bp, i);
+
+ /* ensure status block indices were read */
+ rmb();
+ bnx2x_init_rx_rings(bp);
+ bnx2x_init_tx_rings(bp);
+
+ if (IS_PF(bp)) {
+ /* Initialize MOD_ABS interrupts */
+ bnx2x_init_mod_abs_int(bp, &bp->link_vars, bp->common.chip_id,
+ bp->common.shmem_base,
+ bp->common.shmem2_base, BP_PORT(bp));
+
+ /* initialize the default status block and sp ring */
+ bnx2x_init_def_sb(bp);
+ bnx2x_update_dsb_idx(bp);
+ bnx2x_init_sp_ring(bp);
+ } else {
+ bnx2x_memset_stats(bp);
+ }
+}
+
+void bnx2x_post_irq_nic_init(struct bnx2x *bp, u32 load_code)
+{
+ bnx2x_init_eq_ring(bp);
+ bnx2x_init_internal(bp, load_code);
+ bnx2x_pf_init(bp);
+ bnx2x_stats_init(bp);
+
+ /* flush all before enabling interrupts */
+ mb();
+
+ bnx2x_int_enable(bp);
+
+ /* Check for SPIO5 */
+ bnx2x_attn_int_deasserted0(bp,
+ REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
+ AEU_INPUTS_ATTN_BITS_SPIO5);
+}
+
+/* gzip service functions */
+static int bnx2x_gunzip_init(struct bnx2x *bp)
+{
+ bp->gunzip_buf = dma_alloc_coherent(&bp->pdev->dev, FW_BUF_SIZE,
+ &bp->gunzip_mapping, GFP_KERNEL);
+ if (bp->gunzip_buf == NULL)
+ goto gunzip_nomem1;
+
+ bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
+ if (bp->strm == NULL)
+ goto gunzip_nomem2;
+
+ bp->strm->workspace = vmalloc(zlib_inflate_workspacesize());
+ if (bp->strm->workspace == NULL)
+ goto gunzip_nomem3;
+
+ return 0;
+
+gunzip_nomem3:
+ kfree(bp->strm);
+ bp->strm = NULL;
+
+gunzip_nomem2:
+ dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
+ bp->gunzip_mapping);
+ bp->gunzip_buf = NULL;
+
+gunzip_nomem1:
+ BNX2X_ERR("Cannot allocate firmware buffer for un-compression\n");
+ return -ENOMEM;
+}
+
+static void bnx2x_gunzip_end(struct bnx2x *bp)
+{
+ if (bp->strm) {
+ vfree(bp->strm->workspace);
+ kfree(bp->strm);
+ bp->strm = NULL;
+ }
+
+ if (bp->gunzip_buf) {
+ dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
+ bp->gunzip_mapping);
+ bp->gunzip_buf = NULL;
+ }
+}
+
+static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
+{
+ int n, rc;
+
+ /* check gzip header */
+ if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
+ BNX2X_ERR("Bad gzip header\n");
+ return -EINVAL;
+ }
+
+ n = 10;
+
+#define FNAME 0x8
+
+ if (zbuf[3] & FNAME)
+ while ((zbuf[n++] != 0) && (n < len));
+
+ bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
+ bp->strm->avail_in = len - n;
+ bp->strm->next_out = bp->gunzip_buf;
+ bp->strm->avail_out = FW_BUF_SIZE;
+
+ rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
+ if (rc != Z_OK)
+ return rc;
+
+ rc = zlib_inflate(bp->strm, Z_FINISH);
+ if ((rc != Z_OK) && (rc != Z_STREAM_END))
+ netdev_err(bp->dev, "Firmware decompression error: %s\n",
+ bp->strm->msg);
+
+ bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
+ if (bp->gunzip_outlen & 0x3)
+ netdev_err(bp->dev,
+ "Firmware decompression error: gunzip_outlen (%d) not aligned\n",
+ bp->gunzip_outlen);
+ bp->gunzip_outlen >>= 2;
+
+ zlib_inflateEnd(bp->strm);
+
+ if (rc == Z_STREAM_END)
+ return 0;
+
+ return rc;
+}
+
+/* nic load/unload */
+
+/*
+ * General service functions
+ */
+
+/* send a NIG loopback debug packet */
+static void bnx2x_lb_pckt(struct bnx2x *bp)
+{
+ u32 wb_write[3];
+
+ /* Ethernet source and destination addresses */
+ wb_write[0] = 0x55555555;
+ wb_write[1] = 0x55555555;
+ wb_write[2] = 0x20; /* SOP */
+ REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
+
+ /* NON-IP protocol */
+ wb_write[0] = 0x09000000;
+ wb_write[1] = 0x55555555;
+ wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
+ REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
+}
+
+/* some of the internal memories
+ * are not directly readable from the driver
+ * to test them we send debug packets
+ */
+static int bnx2x_int_mem_test(struct bnx2x *bp)
+{
+ int factor;
+ int count, i;
+ u32 val = 0;
+
+ if (CHIP_REV_IS_FPGA(bp))
+ factor = 120;
+ else if (CHIP_REV_IS_EMUL(bp))
+ factor = 200;
+ else
+ factor = 1;
+
+ /* Disable inputs of parser neighbor blocks */
+ REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
+ REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
+ REG_WR(bp, CFC_REG_DEBUG0, 0x1);
+ REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
+
+ /* Write 0 to parser credits for CFC search request */
+ REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
+
+ /* send Ethernet packet */
+ bnx2x_lb_pckt(bp);
+
+ /* TODO do i reset NIG statistic? */
+ /* Wait until NIG register shows 1 packet of size 0x10 */
+ count = 1000 * factor;
+ while (count) {
+
+ bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
+ val = *bnx2x_sp(bp, wb_data[0]);
+ if (val == 0x10)
+ break;
+
+ usleep_range(10000, 20000);
+ count--;
+ }
+ if (val != 0x10) {
+ BNX2X_ERR("NIG timeout val = 0x%x\n", val);
+ return -1;
+ }
+
+ /* Wait until PRS register shows 1 packet */
+ count = 1000 * factor;
+ while (count) {
+ val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
+ if (val == 1)
+ break;
+
+ usleep_range(10000, 20000);
+ count--;
+ }
+ if (val != 0x1) {
+ BNX2X_ERR("PRS timeout val = 0x%x\n", val);
+ return -2;
+ }
+
+ /* Reset and init BRB, PRS */
+ REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
+ msleep(50);
+ REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
+ msleep(50);
+ bnx2x_init_block(bp, BLOCK_BRB1, PHASE_COMMON);
+ bnx2x_init_block(bp, BLOCK_PRS, PHASE_COMMON);
+
+ DP(NETIF_MSG_HW, "part2\n");
+
+ /* Disable inputs of parser neighbor blocks */
+ REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
+ REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
+ REG_WR(bp, CFC_REG_DEBUG0, 0x1);
+ REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
+
+ /* Write 0 to parser credits for CFC search request */
+ REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
+
+ /* send 10 Ethernet packets */
+ for (i = 0; i < 10; i++)
+ bnx2x_lb_pckt(bp);
+
+ /* Wait until NIG register shows 10 + 1
+ packets of size 11*0x10 = 0xb0 */
+ count = 1000 * factor;
+ while (count) {
+
+ bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
+ val = *bnx2x_sp(bp, wb_data[0]);
+ if (val == 0xb0)
+ break;
+
+ usleep_range(10000, 20000);
+ count--;
+ }
+ if (val != 0xb0) {
+ BNX2X_ERR("NIG timeout val = 0x%x\n", val);
+ return -3;
+ }
+
+ /* Wait until PRS register shows 2 packets */
+ val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
+ if (val != 2)
+ BNX2X_ERR("PRS timeout val = 0x%x\n", val);
+
+ /* Write 1 to parser credits for CFC search request */
+ REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
+
+ /* Wait until PRS register shows 3 packets */
+ msleep(10 * factor);
+ /* Wait until NIG register shows 1 packet of size 0x10 */
+ val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
+ if (val != 3)
+ BNX2X_ERR("PRS timeout val = 0x%x\n", val);
+
+ /* clear NIG EOP FIFO */
+ for (i = 0; i < 11; i++)
+ REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
+ val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
+ if (val != 1) {
+ BNX2X_ERR("clear of NIG failed\n");
+ return -4;
+ }
+
+ /* Reset and init BRB, PRS, NIG */
+ REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
+ msleep(50);
+ REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
+ msleep(50);
+ bnx2x_init_block(bp, BLOCK_BRB1, PHASE_COMMON);
+ bnx2x_init_block(bp, BLOCK_PRS, PHASE_COMMON);
+ if (!CNIC_SUPPORT(bp))
+ /* set NIC mode */
+ REG_WR(bp, PRS_REG_NIC_MODE, 1);
+
+ /* Enable inputs of parser neighbor blocks */
+ REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
+ REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
+ REG_WR(bp, CFC_REG_DEBUG0, 0x0);
+ REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
+
+ DP(NETIF_MSG_HW, "done\n");
+
+ return 0; /* OK */
+}
+
+static void bnx2x_enable_blocks_attention(struct bnx2x *bp)
+{
+ u32 val;
+
+ REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
+ if (!CHIP_IS_E1x(bp))
+ REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0x40);
+ else
+ REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
+ REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
+ REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
+ /*
+ * mask read length error interrupts in brb for parser
+ * (parsing unit and 'checksum and crc' unit)
+ * these errors are legal (PU reads fixed length and CAC can cause
+ * read length error on truncated packets)
+ */
+ REG_WR(bp, BRB1_REG_BRB1_INT_MASK, 0xFC00);
+ REG_WR(bp, QM_REG_QM_INT_MASK, 0);
+ REG_WR(bp, TM_REG_TM_INT_MASK, 0);
+ REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
+ REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
+ REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
+/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
+/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
+ REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
+ REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
+ REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
+/* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
+/* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
+ REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
+ REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
+ REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
+ REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
+/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
+/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
+
+ val = PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_AFT |
+ PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_OF |
+ PXP2_PXP2_INT_MASK_0_REG_PGL_PCIE_ATTN;
+ if (!CHIP_IS_E1x(bp))
+ val |= PXP2_PXP2_INT_MASK_0_REG_PGL_READ_BLOCKED |
+ PXP2_PXP2_INT_MASK_0_REG_PGL_WRITE_BLOCKED;
+ REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, val);
+
+ REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
+ REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
+ REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
+/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
+
+ if (!CHIP_IS_E1x(bp))
+ /* enable VFC attentions: bits 11 and 12, bits 31:13 reserved */
+ REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0x07ff);
+
+ REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
+ REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
+/* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
+ REG_WR(bp, PBF_REG_PBF_INT_MASK, 0x18); /* bit 3,4 masked */
+}
+
+static void bnx2x_reset_common(struct bnx2x *bp)
+{
+ u32 val = 0x1400;
+
+ /* reset_common */
+ REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
+ 0xd3ffff7f);
+
+ if (CHIP_IS_E3(bp)) {
+ val |= MISC_REGISTERS_RESET_REG_2_MSTAT0;
+ val |= MISC_REGISTERS_RESET_REG_2_MSTAT1;
+ }
+
+ REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, val);
+}
+
+static void bnx2x_setup_dmae(struct bnx2x *bp)
+{
+ bp->dmae_ready = 0;
+ spin_lock_init(&bp->dmae_lock);
+}
+
+static void bnx2x_init_pxp(struct bnx2x *bp)
+{
+ u16 devctl;
+ int r_order, w_order;
+
+ pcie_capability_read_word(bp->pdev, PCI_EXP_DEVCTL, &devctl);
+ DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl);
+ w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
+ if (bp->mrrs == -1)
+ r_order = ((devctl & PCI_EXP_DEVCTL_READRQ) >> 12);
+ else {
+ DP(NETIF_MSG_HW, "force read order to %d\n", bp->mrrs);
+ r_order = bp->mrrs;
+ }
+
+ bnx2x_init_pxp_arb(bp, r_order, w_order);
+}
+
+static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
+{
+ int is_required;
+ u32 val;
+ int port;
+
+ if (BP_NOMCP(bp))
+ return;
+
+ is_required = 0;
+ val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
+ SHARED_HW_CFG_FAN_FAILURE_MASK;
+
+ if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
+ is_required = 1;
+
+ /*
+ * The fan failure mechanism is usually related to the PHY type since
+ * the power consumption of the board is affected by the PHY. Currently,
+ * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
+ */
+ else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
+ for (port = PORT_0; port < PORT_MAX; port++) {
+ is_required |=
+ bnx2x_fan_failure_det_req(
+ bp,
+ bp->common.shmem_base,
+ bp->common.shmem2_base,
+ port);
+ }
+
+ DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
+
+ if (is_required == 0)
+ return;
+
+ /* Fan failure is indicated by SPIO 5 */
+ bnx2x_set_spio(bp, MISC_SPIO_SPIO5, MISC_SPIO_INPUT_HI_Z);
+
+ /* set to active low mode */
+ val = REG_RD(bp, MISC_REG_SPIO_INT);
+ val |= (MISC_SPIO_SPIO5 << MISC_SPIO_INT_OLD_SET_POS);
+ REG_WR(bp, MISC_REG_SPIO_INT, val);
+
+ /* enable interrupt to signal the IGU */
+ val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
+ val |= MISC_SPIO_SPIO5;
+ REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
+}
+
+void bnx2x_pf_disable(struct bnx2x *bp)
+{
+ u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
+ val &= ~IGU_PF_CONF_FUNC_EN;
+
+ REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
+ REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0);
+ REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 0);
+}
+
+static void bnx2x__common_init_phy(struct bnx2x *bp)
+{
+ u32 shmem_base[2], shmem2_base[2];
+ /* Avoid common init in case MFW supports LFA */
+ if (SHMEM2_RD(bp, size) >
+ (u32)offsetof(struct shmem2_region, lfa_host_addr[BP_PORT(bp)]))
+ return;
+ shmem_base[0] = bp->common.shmem_base;
+ shmem2_base[0] = bp->common.shmem2_base;
+ if (!CHIP_IS_E1x(bp)) {
+ shmem_base[1] =
+ SHMEM2_RD(bp, other_shmem_base_addr);
+ shmem2_base[1] =
+ SHMEM2_RD(bp, other_shmem2_base_addr);
+ }
+ bnx2x_acquire_phy_lock(bp);
+ bnx2x_common_init_phy(bp, shmem_base, shmem2_base,
+ bp->common.chip_id);
+ bnx2x_release_phy_lock(bp);
+}
+
+static void bnx2x_config_endianity(struct bnx2x *bp, u32 val)
+{
+ REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, val);
+ REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, val);
+ REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, val);
+ REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, val);
+ REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, val);
+
+ /* make sure this value is 0 */
+ REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
+
+ REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, val);
+ REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, val);
+ REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, val);
+ REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, val);
+}
+
+static void bnx2x_set_endianity(struct bnx2x *bp)
+{
+#ifdef __BIG_ENDIAN
+ bnx2x_config_endianity(bp, 1);
+#else
+ bnx2x_config_endianity(bp, 0);
+#endif
+}
+
+static void bnx2x_reset_endianity(struct bnx2x *bp)
+{
+ bnx2x_config_endianity(bp, 0);
+}
+
+/**
+ * bnx2x_init_hw_common - initialize the HW at the COMMON phase.
+ *
+ * @bp: driver handle
+ */
+static int bnx2x_init_hw_common(struct bnx2x *bp)
+{
+ u32 val;
+
+ DP(NETIF_MSG_HW, "starting common init func %d\n", BP_ABS_FUNC(bp));
+
+ /*
+ * take the RESET lock to protect undi_unload flow from accessing
+ * registers while we're resetting the chip
+ */
+ bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
+
+ bnx2x_reset_common(bp);
+ REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
+
+ val = 0xfffc;
+ if (CHIP_IS_E3(bp)) {
+ val |= MISC_REGISTERS_RESET_REG_2_MSTAT0;
+ val |= MISC_REGISTERS_RESET_REG_2_MSTAT1;
+ }
+ REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, val);
+
+ bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
+
+ bnx2x_init_block(bp, BLOCK_MISC, PHASE_COMMON);
+
+ if (!CHIP_IS_E1x(bp)) {
+ u8 abs_func_id;
+
+ /**
+ * 4-port mode or 2-port mode we need to turn of master-enable
+ * for everyone, after that, turn it back on for self.
+ * so, we disregard multi-function or not, and always disable
+ * for all functions on the given path, this means 0,2,4,6 for
+ * path 0 and 1,3,5,7 for path 1
+ */
+ for (abs_func_id = BP_PATH(bp);
+ abs_func_id < E2_FUNC_MAX*2; abs_func_id += 2) {
+ if (abs_func_id == BP_ABS_FUNC(bp)) {
+ REG_WR(bp,
+ PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER,
+ 1);
+ continue;
+ }
+
+ bnx2x_pretend_func(bp, abs_func_id);
+ /* clear pf enable */
+ bnx2x_pf_disable(bp);
+ bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
+ }
+ }
+
+ bnx2x_init_block(bp, BLOCK_PXP, PHASE_COMMON);
+ if (CHIP_IS_E1(bp)) {
+ /* enable HW interrupt from PXP on USDM overflow
+ bit 16 on INT_MASK_0 */
+ REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
+ }
+
+ bnx2x_init_block(bp, BLOCK_PXP2, PHASE_COMMON);
+ bnx2x_init_pxp(bp);
+ bnx2x_set_endianity(bp);
+ bnx2x_ilt_init_page_size(bp, INITOP_SET);
+
+ if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
+ REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
+
+ /* let the HW do it's magic ... */
+ msleep(100);
+ /* finish PXP init */
+ val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
+ if (val != 1) {
+ BNX2X_ERR("PXP2 CFG failed\n");
+ return -EBUSY;
+ }
+ val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
+ if (val != 1) {
+ BNX2X_ERR("PXP2 RD_INIT failed\n");
+ return -EBUSY;
+ }
+
+ /* Timers bug workaround E2 only. We need to set the entire ILT to
+ * have entries with value "0" and valid bit on.
+ * This needs to be done by the first PF that is loaded in a path
+ * (i.e. common phase)
+ */
+ if (!CHIP_IS_E1x(bp)) {
+/* In E2 there is a bug in the timers block that can cause function 6 / 7
+ * (i.e. vnic3) to start even if it is marked as "scan-off".
+ * This occurs when a different function (func2,3) is being marked
+ * as "scan-off". Real-life scenario for example: if a driver is being
+ * load-unloaded while func6,7 are down. This will cause the timer to access
+ * the ilt, translate to a logical address and send a request to read/write.
+ * Since the ilt for the function that is down is not valid, this will cause
+ * a translation error which is unrecoverable.
+ * The Workaround is intended to make sure that when this happens nothing fatal
+ * will occur. The workaround:
+ * 1. First PF driver which loads on a path will:
+ * a. After taking the chip out of reset, by using pretend,
+ * it will write "0" to the following registers of
+ * the other vnics.
+ * REG_WR(pdev, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0);
+ * REG_WR(pdev, CFC_REG_WEAK_ENABLE_PF,0);
+ * REG_WR(pdev, CFC_REG_STRONG_ENABLE_PF,0);
+ * And for itself it will write '1' to
+ * PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER to enable
+ * dmae-operations (writing to pram for example.)
+ * note: can be done for only function 6,7 but cleaner this
+ * way.
+ * b. Write zero+valid to the entire ILT.
+ * c. Init the first_timers_ilt_entry, last_timers_ilt_entry of
+ * VNIC3 (of that port). The range allocated will be the
+ * entire ILT. This is needed to prevent ILT range error.
+ * 2. Any PF driver load flow:
+ * a. ILT update with the physical addresses of the allocated
+ * logical pages.
+ * b. Wait 20msec. - note that this timeout is needed to make
+ * sure there are no requests in one of the PXP internal
+ * queues with "old" ILT addresses.
+ * c. PF enable in the PGLC.
+ * d. Clear the was_error of the PF in the PGLC. (could have
+ * occurred while driver was down)
+ * e. PF enable in the CFC (WEAK + STRONG)
+ * f. Timers scan enable
+ * 3. PF driver unload flow:
+ * a. Clear the Timers scan_en.
+ * b. Polling for scan_on=0 for that PF.
+ * c. Clear the PF enable bit in the PXP.
+ * d. Clear the PF enable in the CFC (WEAK + STRONG)
+ * e. Write zero+valid to all ILT entries (The valid bit must
+ * stay set)
+ * f. If this is VNIC 3 of a port then also init
+ * first_timers_ilt_entry to zero and last_timers_ilt_entry
+ * to the last entry in the ILT.
+ *
+ * Notes:
+ * Currently the PF error in the PGLC is non recoverable.
+ * In the future the there will be a recovery routine for this error.
+ * Currently attention is masked.
+ * Having an MCP lock on the load/unload process does not guarantee that
+ * there is no Timer disable during Func6/7 enable. This is because the
+ * Timers scan is currently being cleared by the MCP on FLR.
+ * Step 2.d can be done only for PF6/7 and the driver can also check if
+ * there is error before clearing it. But the flow above is simpler and
+ * more general.
+ * All ILT entries are written by zero+valid and not just PF6/7
+ * ILT entries since in the future the ILT entries allocation for
+ * PF-s might be dynamic.
+ */
+ struct ilt_client_info ilt_cli;
+ struct bnx2x_ilt ilt;
+ memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
+ memset(&ilt, 0, sizeof(struct bnx2x_ilt));
+
+ /* initialize dummy TM client */
+ ilt_cli.start = 0;
+ ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1;
+ ilt_cli.client_num = ILT_CLIENT_TM;
+
+ /* Step 1: set zeroes to all ilt page entries with valid bit on
+ * Step 2: set the timers first/last ilt entry to point
+ * to the entire range to prevent ILT range error for 3rd/4th
+ * vnic (this code assumes existence of the vnic)
+ *
+ * both steps performed by call to bnx2x_ilt_client_init_op()
+ * with dummy TM client
+ *
+ * we must use pretend since PXP2_REG_RQ_##blk##_FIRST_ILT
+ * and his brother are split registers
+ */
+ bnx2x_pretend_func(bp, (BP_PATH(bp) + 6));
+ bnx2x_ilt_client_init_op_ilt(bp, &ilt, &ilt_cli, INITOP_CLEAR);
+ bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
+
+ REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN, BNX2X_PXP_DRAM_ALIGN);
+ REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN_RD, BNX2X_PXP_DRAM_ALIGN);
+ REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN_SEL, 1);
+ }
+
+ REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
+ REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
+
+ if (!CHIP_IS_E1x(bp)) {
+ int factor = CHIP_REV_IS_EMUL(bp) ? 1000 :
+ (CHIP_REV_IS_FPGA(bp) ? 400 : 0);
+ bnx2x_init_block(bp, BLOCK_PGLUE_B, PHASE_COMMON);
+
+ bnx2x_init_block(bp, BLOCK_ATC, PHASE_COMMON);
+
+ /* let the HW do it's magic ... */
+ do {
+ msleep(200);
+ val = REG_RD(bp, ATC_REG_ATC_INIT_DONE);
+ } while (factor-- && (val != 1));
+
+ if (val != 1) {
+ BNX2X_ERR("ATC_INIT failed\n");
+ return -EBUSY;
+ }
+ }
+
+ bnx2x_init_block(bp, BLOCK_DMAE, PHASE_COMMON);
+
+ bnx2x_iov_init_dmae(bp);
+
+ /* clean the DMAE memory */
+ bp->dmae_ready = 1;
+ bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8, 1);
+
+ bnx2x_init_block(bp, BLOCK_TCM, PHASE_COMMON);
+
+ bnx2x_init_block(bp, BLOCK_UCM, PHASE_COMMON);
+
+ bnx2x_init_block(bp, BLOCK_CCM, PHASE_COMMON);
+
+ bnx2x_init_block(bp, BLOCK_XCM, PHASE_COMMON);
+
+ bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
+ bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
+ bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
+ bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
+
+ bnx2x_init_block(bp, BLOCK_QM, PHASE_COMMON);
+
+ /* QM queues pointers table */
+ bnx2x_qm_init_ptr_table(bp, bp->qm_cid_count, INITOP_SET);
+
+ /* soft reset pulse */
+ REG_WR(bp, QM_REG_SOFT_RESET, 1);
+ REG_WR(bp, QM_REG_SOFT_RESET, 0);
+
+ if (CNIC_SUPPORT(bp))
+ bnx2x_init_block(bp, BLOCK_TM, PHASE_COMMON);
+
+ bnx2x_init_block(bp, BLOCK_DORQ, PHASE_COMMON);
+
+ if (!CHIP_REV_IS_SLOW(bp))
+ /* enable hw interrupt from doorbell Q */
+ REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
+
+ bnx2x_init_block(bp, BLOCK_BRB1, PHASE_COMMON);
+
+ bnx2x_init_block(bp, BLOCK_PRS, PHASE_COMMON);
+ REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
+
+ if (!CHIP_IS_E1(bp))
+ REG_WR(bp, PRS_REG_E1HOV_MODE, bp->path_has_ovlan);
+
+ if (!CHIP_IS_E1x(bp) && !CHIP_IS_E3B0(bp)) {
+ if (IS_MF_AFEX(bp)) {
+ /* configure that VNTag and VLAN headers must be
+ * received in afex mode
+ */
+ REG_WR(bp, PRS_REG_HDRS_AFTER_BASIC, 0xE);
+ REG_WR(bp, PRS_REG_MUST_HAVE_HDRS, 0xA);
+ REG_WR(bp, PRS_REG_HDRS_AFTER_TAG_0, 0x6);
+ REG_WR(bp, PRS_REG_TAG_ETHERTYPE_0, 0x8926);
+ REG_WR(bp, PRS_REG_TAG_LEN_0, 0x4);
+ } else {
+ /* Bit-map indicating which L2 hdrs may appear
+ * after the basic Ethernet header
+ */
+ REG_WR(bp, PRS_REG_HDRS_AFTER_BASIC,
+ bp->path_has_ovlan ? 7 : 6);
+ }
+ }
+
+ bnx2x_init_block(bp, BLOCK_TSDM, PHASE_COMMON);
+ bnx2x_init_block(bp, BLOCK_CSDM, PHASE_COMMON);
+ bnx2x_init_block(bp, BLOCK_USDM, PHASE_COMMON);
+ bnx2x_init_block(bp, BLOCK_XSDM, PHASE_COMMON);
+
+ if (!CHIP_IS_E1x(bp)) {
+ /* reset VFC memories */
+ REG_WR(bp, TSEM_REG_FAST_MEMORY + VFC_REG_MEMORIES_RST,
+ VFC_MEMORIES_RST_REG_CAM_RST |
+ VFC_MEMORIES_RST_REG_RAM_RST);
+ REG_WR(bp, XSEM_REG_FAST_MEMORY + VFC_REG_MEMORIES_RST,
+ VFC_MEMORIES_RST_REG_CAM_RST |
+ VFC_MEMORIES_RST_REG_RAM_RST);
+
+ msleep(20);
+ }
+
+ bnx2x_init_block(bp, BLOCK_TSEM, PHASE_COMMON);
+ bnx2x_init_block(bp, BLOCK_USEM, PHASE_COMMON);
+ bnx2x_init_block(bp, BLOCK_CSEM, PHASE_COMMON);
+ bnx2x_init_block(bp, BLOCK_XSEM, PHASE_COMMON);
+
+ /* sync semi rtc */
+ REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
+ 0x80000000);
+ REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
+ 0x80000000);
+
+ bnx2x_init_block(bp, BLOCK_UPB, PHASE_COMMON);
+ bnx2x_init_block(bp, BLOCK_XPB, PHASE_COMMON);
+ bnx2x_init_block(bp, BLOCK_PBF, PHASE_COMMON);
+
+ if (!CHIP_IS_E1x(bp)) {
+ if (IS_MF_AFEX(bp)) {
+ /* configure that VNTag and VLAN headers must be
+ * sent in afex mode
+ */
+ REG_WR(bp, PBF_REG_HDRS_AFTER_BASIC, 0xE);
+ REG_WR(bp, PBF_REG_MUST_HAVE_HDRS, 0xA);
+ REG_WR(bp, PBF_REG_HDRS_AFTER_TAG_0, 0x6);
+ REG_WR(bp, PBF_REG_TAG_ETHERTYPE_0, 0x8926);
+ REG_WR(bp, PBF_REG_TAG_LEN_0, 0x4);
+ } else {
+ REG_WR(bp, PBF_REG_HDRS_AFTER_BASIC,
+ bp->path_has_ovlan ? 7 : 6);
+ }
+ }
+
+ REG_WR(bp, SRC_REG_SOFT_RST, 1);
+
+ bnx2x_init_block(bp, BLOCK_SRC, PHASE_COMMON);
+
+ if (CNIC_SUPPORT(bp)) {
+ REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672);
+ REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc);
+ REG_WR(bp, SRC_REG_KEYSEARCH_2, 0x223aef9b);
+ REG_WR(bp, SRC_REG_KEYSEARCH_3, 0x26001e3a);
+ REG_WR(bp, SRC_REG_KEYSEARCH_4, 0x7ae91116);
+ REG_WR(bp, SRC_REG_KEYSEARCH_5, 0x5ce5230b);
+ REG_WR(bp, SRC_REG_KEYSEARCH_6, 0x298d8adf);
+ REG_WR(bp, SRC_REG_KEYSEARCH_7, 0x6eb0ff09);
+ REG_WR(bp, SRC_REG_KEYSEARCH_8, 0x1830f82f);
+ REG_WR(bp, SRC_REG_KEYSEARCH_9, 0x01e46be7);
+ }
+ REG_WR(bp, SRC_REG_SOFT_RST, 0);
+
+ if (sizeof(union cdu_context) != 1024)
+ /* we currently assume that a context is 1024 bytes */
+ dev_alert(&bp->pdev->dev,
+ "please adjust the size of cdu_context(%ld)\n",
+ (long)sizeof(union cdu_context));
+
+ bnx2x_init_block(bp, BLOCK_CDU, PHASE_COMMON);
+ val = (4 << 24) + (0 << 12) + 1024;
+ REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
+
+ bnx2x_init_block(bp, BLOCK_CFC, PHASE_COMMON);
+ REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
+ /* enable context validation interrupt from CFC */
+ REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
+
+ /* set the thresholds to prevent CFC/CDU race */
+ REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
+
+ bnx2x_init_block(bp, BLOCK_HC, PHASE_COMMON);
+
+ if (!CHIP_IS_E1x(bp) && BP_NOMCP(bp))
+ REG_WR(bp, IGU_REG_RESET_MEMORIES, 0x36);
+
+ bnx2x_init_block(bp, BLOCK_IGU, PHASE_COMMON);
+ bnx2x_init_block(bp, BLOCK_MISC_AEU, PHASE_COMMON);
+
+ /* Reset PCIE errors for debug */
+ REG_WR(bp, 0x2814, 0xffffffff);
+ REG_WR(bp, 0x3820, 0xffffffff);
+
+ if (!CHIP_IS_E1x(bp)) {
+ REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_CONTROL_5,
+ (PXPCS_TL_CONTROL_5_ERR_UNSPPORT1 |
+ PXPCS_TL_CONTROL_5_ERR_UNSPPORT));
+ REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_FUNC345_STAT,
+ (PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT4 |
+ PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT3 |
+ PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT2));
+ REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_FUNC678_STAT,
+ (PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT7 |
+ PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT6 |
+ PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT5));
+ }
+
+ bnx2x_init_block(bp, BLOCK_NIG, PHASE_COMMON);
+ if (!CHIP_IS_E1(bp)) {
+ /* in E3 this done in per-port section */
+ if (!CHIP_IS_E3(bp))
+ REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_MF(bp));
+ }
+ if (CHIP_IS_E1H(bp))
+ /* not applicable for E2 (and above ...) */
+ REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_MF_SD(bp));
+
+ if (CHIP_REV_IS_SLOW(bp))
+ msleep(200);
+
+ /* finish CFC init */
+ val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
+ if (val != 1) {
+ BNX2X_ERR("CFC LL_INIT failed\n");
+ return -EBUSY;
+ }
+ val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
+ if (val != 1) {
+ BNX2X_ERR("CFC AC_INIT failed\n");
+ return -EBUSY;
+ }
+ val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
+ if (val != 1) {
+ BNX2X_ERR("CFC CAM_INIT failed\n");
+ return -EBUSY;
+ }
+ REG_WR(bp, CFC_REG_DEBUG0, 0);
+
+ if (CHIP_IS_E1(bp)) {
+ /* read NIG statistic
+ to see if this is our first up since powerup */
+ bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
+ val = *bnx2x_sp(bp, wb_data[0]);
+
+ /* do internal memory self test */
+ if ((val == 0) && bnx2x_int_mem_test(bp)) {
+ BNX2X_ERR("internal mem self test failed\n");
+ return -EBUSY;
+ }
+ }
+
+ bnx2x_setup_fan_failure_detection(bp);
+
+ /* clear PXP2 attentions */
+ REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
+
+ bnx2x_enable_blocks_attention(bp);
+ bnx2x_enable_blocks_parity(bp);
+
+ if (!BP_NOMCP(bp)) {
+ if (CHIP_IS_E1x(bp))
+ bnx2x__common_init_phy(bp);
+ } else
+ BNX2X_ERR("Bootcode is missing - can not initialize link\n");
+
+ if (SHMEM2_HAS(bp, netproc_fw_ver))
+ SHMEM2_WR(bp, netproc_fw_ver, REG_RD(bp, XSEM_REG_PRAM));
+
+ return 0;
+}
+
+/**
+ * bnx2x_init_hw_common_chip - init HW at the COMMON_CHIP phase.
+ *
+ * @bp: driver handle
+ */
+static int bnx2x_init_hw_common_chip(struct bnx2x *bp)
+{
+ int rc = bnx2x_init_hw_common(bp);
+
+ if (rc)
+ return rc;
+
+ /* In E2 2-PORT mode, same ext phy is used for the two paths */
+ if (!BP_NOMCP(bp))
+ bnx2x__common_init_phy(bp);
+
+ return 0;
+}
+
+static int bnx2x_init_hw_port(struct bnx2x *bp)
+{
+ int port = BP_PORT(bp);
+ int init_phase = port ? PHASE_PORT1 : PHASE_PORT0;
+ u32 low, high;
+ u32 val, reg;
+
+ DP(NETIF_MSG_HW, "starting port init port %d\n", port);
+
+ REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
+
+ bnx2x_init_block(bp, BLOCK_MISC, init_phase);
+ bnx2x_init_block(bp, BLOCK_PXP, init_phase);
+ bnx2x_init_block(bp, BLOCK_PXP2, init_phase);
+
+ /* Timers bug workaround: disables the pf_master bit in pglue at
+ * common phase, we need to enable it here before any dmae access are
+ * attempted. Therefore we manually added the enable-master to the
+ * port phase (it also happens in the function phase)
+ */
+ if (!CHIP_IS_E1x(bp))
+ REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
+
+ bnx2x_init_block(bp, BLOCK_ATC, init_phase);
+ bnx2x_init_block(bp, BLOCK_DMAE, init_phase);
+ bnx2x_init_block(bp, BLOCK_PGLUE_B, init_phase);
+ bnx2x_init_block(bp, BLOCK_QM, init_phase);
+
+ bnx2x_init_block(bp, BLOCK_TCM, init_phase);
+ bnx2x_init_block(bp, BLOCK_UCM, init_phase);
+ bnx2x_init_block(bp, BLOCK_CCM, init_phase);
+ bnx2x_init_block(bp, BLOCK_XCM, init_phase);
+
+ /* QM cid (connection) count */
+ bnx2x_qm_init_cid_count(bp, bp->qm_cid_count, INITOP_SET);
+
+ if (CNIC_SUPPORT(bp)) {
+ bnx2x_init_block(bp, BLOCK_TM, init_phase);
+ REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20);
+ REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
+ }
+
+ bnx2x_init_block(bp, BLOCK_DORQ, init_phase);
+
+ bnx2x_init_block(bp, BLOCK_BRB1, init_phase);
+
+ if (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp)) {
+
+ if (IS_MF(bp))
+ low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
+ else if (bp->dev->mtu > 4096) {
+ if (bp->flags & ONE_PORT_FLAG)
+ low = 160;
+ else {
+ val = bp->dev->mtu;
+ /* (24*1024 + val*4)/256 */
+ low = 96 + (val/64) +
+ ((val % 64) ? 1 : 0);
+ }
+ } else
+ low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
+ high = low + 56; /* 14*1024/256 */
+ REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
+ REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
+ }
+
+ if (CHIP_MODE_IS_4_PORT(bp))
+ REG_WR(bp, (BP_PORT(bp) ?
+ BRB1_REG_MAC_GUARANTIED_1 :
+ BRB1_REG_MAC_GUARANTIED_0), 40);
+
+ bnx2x_init_block(bp, BLOCK_PRS, init_phase);
+ if (CHIP_IS_E3B0(bp)) {
+ if (IS_MF_AFEX(bp)) {
+ /* configure headers for AFEX mode */
+ REG_WR(bp, BP_PORT(bp) ?
+ PRS_REG_HDRS_AFTER_BASIC_PORT_1 :
+ PRS_REG_HDRS_AFTER_BASIC_PORT_0, 0xE);
+ REG_WR(bp, BP_PORT(bp) ?
+ PRS_REG_HDRS_AFTER_TAG_0_PORT_1 :
+ PRS_REG_HDRS_AFTER_TAG_0_PORT_0, 0x6);
+ REG_WR(bp, BP_PORT(bp) ?
+ PRS_REG_MUST_HAVE_HDRS_PORT_1 :
+ PRS_REG_MUST_HAVE_HDRS_PORT_0, 0xA);
+ } else {
+ /* Ovlan exists only if we are in multi-function +
+ * switch-dependent mode, in switch-independent there
+ * is no ovlan headers
+ */
+ REG_WR(bp, BP_PORT(bp) ?
+ PRS_REG_HDRS_AFTER_BASIC_PORT_1 :
+ PRS_REG_HDRS_AFTER_BASIC_PORT_0,
+ (bp->path_has_ovlan ? 7 : 6));
+ }
+ }
+
+ bnx2x_init_block(bp, BLOCK_TSDM, init_phase);
+ bnx2x_init_block(bp, BLOCK_CSDM, init_phase);
+ bnx2x_init_block(bp, BLOCK_USDM, init_phase);
+ bnx2x_init_block(bp, BLOCK_XSDM, init_phase);
+
+ bnx2x_init_block(bp, BLOCK_TSEM, init_phase);
+ bnx2x_init_block(bp, BLOCK_USEM, init_phase);
+ bnx2x_init_block(bp, BLOCK_CSEM, init_phase);
+ bnx2x_init_block(bp, BLOCK_XSEM, init_phase);
+
+ bnx2x_init_block(bp, BLOCK_UPB, init_phase);
+ bnx2x_init_block(bp, BLOCK_XPB, init_phase);
+
+ bnx2x_init_block(bp, BLOCK_PBF, init_phase);
+
+ if (CHIP_IS_E1x(bp)) {
+ /* configure PBF to work without PAUSE mtu 9000 */
+ REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
+
+ /* update threshold */
+ REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
+ /* update init credit */
+ REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
+
+ /* probe changes */
+ REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
+ udelay(50);
+ REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
+ }
+
+ if (CNIC_SUPPORT(bp))
+ bnx2x_init_block(bp, BLOCK_SRC, init_phase);
+
+ bnx2x_init_block(bp, BLOCK_CDU, init_phase);
+ bnx2x_init_block(bp, BLOCK_CFC, init_phase);
+
+ if (CHIP_IS_E1(bp)) {
+ REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
+ REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
+ }
+ bnx2x_init_block(bp, BLOCK_HC, init_phase);
+
+ bnx2x_init_block(bp, BLOCK_IGU, init_phase);
+
+ bnx2x_init_block(bp, BLOCK_MISC_AEU, init_phase);
+ /* init aeu_mask_attn_func_0/1:
+ * - SF mode: bits 3-7 are masked. Only bits 0-2 are in use
+ * - MF mode: bit 3 is masked. Bits 0-2 are in use as in SF
+ * bits 4-7 are used for "per vn group attention" */
+ val = IS_MF(bp) ? 0xF7 : 0x7;
+ /* Enable DCBX attention for all but E1 */
+ val |= CHIP_IS_E1(bp) ? 0 : 0x10;
+ REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, val);
+
+ /* SCPAD_PARITY should NOT trigger close the gates */
+ reg = port ? MISC_REG_AEU_ENABLE4_NIG_1 : MISC_REG_AEU_ENABLE4_NIG_0;
+ REG_WR(bp, reg,
+ REG_RD(bp, reg) &
+ ~AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY);
+
+ reg = port ? MISC_REG_AEU_ENABLE4_PXP_1 : MISC_REG_AEU_ENABLE4_PXP_0;
+ REG_WR(bp, reg,
+ REG_RD(bp, reg) &
+ ~AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY);
+
+ bnx2x_init_block(bp, BLOCK_NIG, init_phase);
+
+ if (!CHIP_IS_E1x(bp)) {
+ /* Bit-map indicating which L2 hdrs may appear after the
+ * basic Ethernet header
+ */
+ if (IS_MF_AFEX(bp))
+ REG_WR(bp, BP_PORT(bp) ?
+ NIG_REG_P1_HDRS_AFTER_BASIC :
+ NIG_REG_P0_HDRS_AFTER_BASIC, 0xE);
+ else
+ REG_WR(bp, BP_PORT(bp) ?
+ NIG_REG_P1_HDRS_AFTER_BASIC :
+ NIG_REG_P0_HDRS_AFTER_BASIC,
+ IS_MF_SD(bp) ? 7 : 6);
+
+ if (CHIP_IS_E3(bp))
+ REG_WR(bp, BP_PORT(bp) ?
+ NIG_REG_LLH1_MF_MODE :
+ NIG_REG_LLH_MF_MODE, IS_MF(bp));
+ }
+ if (!CHIP_IS_E3(bp))
+ REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
+
+ if (!CHIP_IS_E1(bp)) {
+ /* 0x2 disable mf_ov, 0x1 enable */
+ REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
+ (IS_MF_SD(bp) ? 0x1 : 0x2));
+
+ if (!CHIP_IS_E1x(bp)) {
+ val = 0;
+ switch (bp->mf_mode) {
+ case MULTI_FUNCTION_SD:
+ val = 1;
+ break;
+ case MULTI_FUNCTION_SI:
+ case MULTI_FUNCTION_AFEX:
+ val = 2;
+ break;
+ }
+
+ REG_WR(bp, (BP_PORT(bp) ? NIG_REG_LLH1_CLS_TYPE :
+ NIG_REG_LLH0_CLS_TYPE), val);
+ }
+ {
+ REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
+ REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
+ REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
+ }
+ }
+
+ /* If SPIO5 is set to generate interrupts, enable it for this port */
+ val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
+ if (val & MISC_SPIO_SPIO5) {
+ u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
+ MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
+ val = REG_RD(bp, reg_addr);
+ val |= AEU_INPUTS_ATTN_BITS_SPIO5;
+ REG_WR(bp, reg_addr, val);
+ }
+
+ if (CHIP_IS_E3B0(bp))
+ bp->flags |= PTP_SUPPORTED;
+
+ return 0;
+}
+
+static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
+{
+ int reg;
+ u32 wb_write[2];
+
+ if (CHIP_IS_E1(bp))
+ reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
+ else
+ reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
+
+ wb_write[0] = ONCHIP_ADDR1(addr);
+ wb_write[1] = ONCHIP_ADDR2(addr);
+ REG_WR_DMAE(bp, reg, wb_write, 2);
+}
+
+void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id, bool is_pf)
+{
+ u32 data, ctl, cnt = 100;
+ u32 igu_addr_data = IGU_REG_COMMAND_REG_32LSB_DATA;
+ u32 igu_addr_ctl = IGU_REG_COMMAND_REG_CTRL;
+ u32 igu_addr_ack = IGU_REG_CSTORM_TYPE_0_SB_CLEANUP + (idu_sb_id/32)*4;
+ u32 sb_bit = 1 << (idu_sb_id%32);
+ u32 func_encode = func | (is_pf ? 1 : 0) << IGU_FID_ENCODE_IS_PF_SHIFT;
+ u32 addr_encode = IGU_CMD_E2_PROD_UPD_BASE + idu_sb_id;
+
+ /* Not supported in BC mode */
+ if (CHIP_INT_MODE_IS_BC(bp))
+ return;
+
+ data = (IGU_USE_REGISTER_cstorm_type_0_sb_cleanup
+ << IGU_REGULAR_CLEANUP_TYPE_SHIFT) |
+ IGU_REGULAR_CLEANUP_SET |
+ IGU_REGULAR_BCLEANUP;
+
+ ctl = addr_encode << IGU_CTRL_REG_ADDRESS_SHIFT |
+ func_encode << IGU_CTRL_REG_FID_SHIFT |
+ IGU_CTRL_CMD_TYPE_WR << IGU_CTRL_REG_TYPE_SHIFT;
+
+ DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
+ data, igu_addr_data);
+ REG_WR(bp, igu_addr_data, data);
+ barrier();
+ DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
+ ctl, igu_addr_ctl);
+ REG_WR(bp, igu_addr_ctl, ctl);
+ barrier();
+
+ /* wait for clean up to finish */
+ while (!(REG_RD(bp, igu_addr_ack) & sb_bit) && --cnt)
+ msleep(20);
+
+ if (!(REG_RD(bp, igu_addr_ack) & sb_bit)) {
+ DP(NETIF_MSG_HW,
+ "Unable to finish IGU cleanup: idu_sb_id %d offset %d bit %d (cnt %d)\n",
+ idu_sb_id, idu_sb_id/32, idu_sb_id%32, cnt);
+ }
+}
+
+static void bnx2x_igu_clear_sb(struct bnx2x *bp, u8 idu_sb_id)
+{
+ bnx2x_igu_clear_sb_gen(bp, BP_FUNC(bp), idu_sb_id, true /*PF*/);
+}
+
+static void bnx2x_clear_func_ilt(struct bnx2x *bp, u32 func)
+{
+ u32 i, base = FUNC_ILT_BASE(func);
+ for (i = base; i < base + ILT_PER_FUNC; i++)
+ bnx2x_ilt_wr(bp, i, 0);
+}
+
+static void bnx2x_init_searcher(struct bnx2x *bp)
+{
+ int port = BP_PORT(bp);
+ bnx2x_src_init_t2(bp, bp->t2, bp->t2_mapping, SRC_CONN_NUM);
+ /* T1 hash bits value determines the T1 number of entries */
+ REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, SRC_HASH_BITS);
+}
+
+static inline int bnx2x_func_switch_update(struct bnx2x *bp, int suspend)
+{
+ int rc;
+ struct bnx2x_func_state_params func_params = {NULL};
+ struct bnx2x_func_switch_update_params *switch_update_params =
+ &func_params.params.switch_update;
+
+ /* Prepare parameters for function state transitions */
+ __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
+ __set_bit(RAMROD_RETRY, &func_params.ramrod_flags);
+
+ func_params.f_obj = &bp->func_obj;
+ func_params.cmd = BNX2X_F_CMD_SWITCH_UPDATE;
+
+ /* Function parameters */
+ __set_bit(BNX2X_F_UPDATE_TX_SWITCH_SUSPEND_CHNG,
+ &switch_update_params->changes);
+ if (suspend)
+ __set_bit(BNX2X_F_UPDATE_TX_SWITCH_SUSPEND,
+ &switch_update_params->changes);
+
+ rc = bnx2x_func_state_change(bp, &func_params);
+
+ return rc;
+}
+
+static int bnx2x_reset_nic_mode(struct bnx2x *bp)
+{
+ int rc, i, port = BP_PORT(bp);
+ int vlan_en = 0, mac_en[NUM_MACS];
+
+ /* Close input from network */
+ if (bp->mf_mode == SINGLE_FUNCTION) {
+ bnx2x_set_rx_filter(&bp->link_params, 0);
+ } else {
+ vlan_en = REG_RD(bp, port ? NIG_REG_LLH1_FUNC_EN :
+ NIG_REG_LLH0_FUNC_EN);
+ REG_WR(bp, port ? NIG_REG_LLH1_FUNC_EN :
+ NIG_REG_LLH0_FUNC_EN, 0);
+ for (i = 0; i < NUM_MACS; i++) {
+ mac_en[i] = REG_RD(bp, port ?
+ (NIG_REG_LLH1_FUNC_MEM_ENABLE +
+ 4 * i) :
+ (NIG_REG_LLH0_FUNC_MEM_ENABLE +
+ 4 * i));
+ REG_WR(bp, port ? (NIG_REG_LLH1_FUNC_MEM_ENABLE +
+ 4 * i) :
+ (NIG_REG_LLH0_FUNC_MEM_ENABLE + 4 * i), 0);
+ }
+ }
+
+ /* Close BMC to host */
+ REG_WR(bp, port ? NIG_REG_P0_TX_MNG_HOST_ENABLE :
+ NIG_REG_P1_TX_MNG_HOST_ENABLE, 0);
+
+ /* Suspend Tx switching to the PF. Completion of this ramrod
+ * further guarantees that all the packets of that PF / child
+ * VFs in BRB were processed by the Parser, so it is safe to
+ * change the NIC_MODE register.
+ */
+ rc = bnx2x_func_switch_update(bp, 1);
+ if (rc) {
+ BNX2X_ERR("Can't suspend tx-switching!\n");
+ return rc;
+ }
+
+ /* Change NIC_MODE register */
+ REG_WR(bp, PRS_REG_NIC_MODE, 0);
+
+ /* Open input from network */
+ if (bp->mf_mode == SINGLE_FUNCTION) {
+ bnx2x_set_rx_filter(&bp->link_params, 1);
+ } else {
+ REG_WR(bp, port ? NIG_REG_LLH1_FUNC_EN :
+ NIG_REG_LLH0_FUNC_EN, vlan_en);
+ for (i = 0; i < NUM_MACS; i++) {
+ REG_WR(bp, port ? (NIG_REG_LLH1_FUNC_MEM_ENABLE +
+ 4 * i) :
+ (NIG_REG_LLH0_FUNC_MEM_ENABLE + 4 * i),
+ mac_en[i]);
+ }
+ }
+
+ /* Enable BMC to host */
+ REG_WR(bp, port ? NIG_REG_P0_TX_MNG_HOST_ENABLE :
+ NIG_REG_P1_TX_MNG_HOST_ENABLE, 1);
+
+ /* Resume Tx switching to the PF */
+ rc = bnx2x_func_switch_update(bp, 0);
+ if (rc) {
+ BNX2X_ERR("Can't resume tx-switching!\n");
+ return rc;
+ }
+
+ DP(NETIF_MSG_IFUP, "NIC MODE disabled\n");
+ return 0;
+}
+
+int bnx2x_init_hw_func_cnic(struct bnx2x *bp)
+{
+ int rc;
+
+ bnx2x_ilt_init_op_cnic(bp, INITOP_SET);
+
+ if (CONFIGURE_NIC_MODE(bp)) {
+ /* Configure searcher as part of function hw init */
+ bnx2x_init_searcher(bp);
+
+ /* Reset NIC mode */
+ rc = bnx2x_reset_nic_mode(bp);
+ if (rc)
+ BNX2X_ERR("Can't change NIC mode!\n");
+ return rc;
+ }
+
+ return 0;
+}
+
+/* previous driver DMAE transaction may have occurred when pre-boot stage ended
+ * and boot began, or when kdump kernel was loaded. Either case would invalidate
+ * the addresses of the transaction, resulting in was-error bit set in the pci
+ * causing all hw-to-host pcie transactions to timeout. If this happened we want
+ * to clear the interrupt which detected this from the pglueb and the was done
+ * bit
+ */
+static void bnx2x_clean_pglue_errors(struct bnx2x *bp)
+{
+ if (!CHIP_IS_E1x(bp))
+ REG_WR(bp, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR,
+ 1 << BP_ABS_FUNC(bp));
+}
+
+static int bnx2x_init_hw_func(struct bnx2x *bp)
+{
+ int port = BP_PORT(bp);
+ int func = BP_FUNC(bp);
+ int init_phase = PHASE_PF0 + func;
+ struct bnx2x_ilt *ilt = BP_ILT(bp);
+ u16 cdu_ilt_start;
+ u32 addr, val;
+ u32 main_mem_base, main_mem_size, main_mem_prty_clr;
+ int i, main_mem_width, rc;
+
+ DP(NETIF_MSG_HW, "starting func init func %d\n", func);
+
+ /* FLR cleanup - hmmm */
+ if (!CHIP_IS_E1x(bp)) {
+ rc = bnx2x_pf_flr_clnup(bp);
+ if (rc) {
+ bnx2x_fw_dump(bp);
+ return rc;
+ }
+ }
+
+ /* set MSI reconfigure capability */
+ if (bp->common.int_block == INT_BLOCK_HC) {
+ addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
+ val = REG_RD(bp, addr);
+ val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
+ REG_WR(bp, addr, val);
+ }
+
+ bnx2x_init_block(bp, BLOCK_PXP, init_phase);
+ bnx2x_init_block(bp, BLOCK_PXP2, init_phase);
+
+ ilt = BP_ILT(bp);
+ cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start;
+
+ if (IS_SRIOV(bp))
+ cdu_ilt_start += BNX2X_FIRST_VF_CID/ILT_PAGE_CIDS;
+ cdu_ilt_start = bnx2x_iov_init_ilt(bp, cdu_ilt_start);
+
+ /* since BNX2X_FIRST_VF_CID > 0 the PF L2 cids precedes
+ * those of the VFs, so start line should be reset
+ */
+ cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start;
+ for (i = 0; i < L2_ILT_LINES(bp); i++) {
+ ilt->lines[cdu_ilt_start + i].page = bp->context[i].vcxt;
+ ilt->lines[cdu_ilt_start + i].page_mapping =
+ bp->context[i].cxt_mapping;
+ ilt->lines[cdu_ilt_start + i].size = bp->context[i].size;
+ }
+
+ bnx2x_ilt_init_op(bp, INITOP_SET);
+
+ if (!CONFIGURE_NIC_MODE(bp)) {
+ bnx2x_init_searcher(bp);
+ REG_WR(bp, PRS_REG_NIC_MODE, 0);
+ DP(NETIF_MSG_IFUP, "NIC MODE disabled\n");
+ } else {
+ /* Set NIC mode */
+ REG_WR(bp, PRS_REG_NIC_MODE, 1);
+ DP(NETIF_MSG_IFUP, "NIC MODE configured\n");
+ }
+
+ if (!CHIP_IS_E1x(bp)) {
+ u32 pf_conf = IGU_PF_CONF_FUNC_EN;
+
+ /* Turn on a single ISR mode in IGU if driver is going to use
+ * INT#x or MSI
+ */
+ if (!(bp->flags & USING_MSIX_FLAG))
+ pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN;
+ /*
+ * Timers workaround bug: function init part.
+ * Need to wait 20msec after initializing ILT,
+ * needed to make sure there are no requests in
+ * one of the PXP internal queues with "old" ILT addresses
+ */
+ msleep(20);
+ /*
+ * Master enable - Due to WB DMAE writes performed before this
+ * register is re-initialized as part of the regular function
+ * init
+ */
+ REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
+ /* Enable the function in IGU */
+ REG_WR(bp, IGU_REG_PF_CONFIGURATION, pf_conf);
+ }
+
+ bp->dmae_ready = 1;
+
+ bnx2x_init_block(bp, BLOCK_PGLUE_B, init_phase);
+
+ bnx2x_clean_pglue_errors(bp);
+
+ bnx2x_init_block(bp, BLOCK_ATC, init_phase);
+ bnx2x_init_block(bp, BLOCK_DMAE, init_phase);
+ bnx2x_init_block(bp, BLOCK_NIG, init_phase);
+ bnx2x_init_block(bp, BLOCK_SRC, init_phase);
+ bnx2x_init_block(bp, BLOCK_MISC, init_phase);
+ bnx2x_init_block(bp, BLOCK_TCM, init_phase);
+ bnx2x_init_block(bp, BLOCK_UCM, init_phase);
+ bnx2x_init_block(bp, BLOCK_CCM, init_phase);
+ bnx2x_init_block(bp, BLOCK_XCM, init_phase);
+ bnx2x_init_block(bp, BLOCK_TSEM, init_phase);
+ bnx2x_init_block(bp, BLOCK_USEM, init_phase);
+ bnx2x_init_block(bp, BLOCK_CSEM, init_phase);
+ bnx2x_init_block(bp, BLOCK_XSEM, init_phase);
+
+ if (!CHIP_IS_E1x(bp))
+ REG_WR(bp, QM_REG_PF_EN, 1);
+
+ if (!CHIP_IS_E1x(bp)) {
+ REG_WR(bp, TSEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func);
+ REG_WR(bp, USEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func);
+ REG_WR(bp, CSEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func);
+ REG_WR(bp, XSEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func);
+ }
+ bnx2x_init_block(bp, BLOCK_QM, init_phase);
+
+ bnx2x_init_block(bp, BLOCK_TM, init_phase);
+ bnx2x_init_block(bp, BLOCK_DORQ, init_phase);
+ REG_WR(bp, DORQ_REG_MODE_ACT, 1); /* no dpm */
+
+ bnx2x_iov_init_dq(bp);
+
+ bnx2x_init_block(bp, BLOCK_BRB1, init_phase);
+ bnx2x_init_block(bp, BLOCK_PRS, init_phase);
+ bnx2x_init_block(bp, BLOCK_TSDM, init_phase);
+ bnx2x_init_block(bp, BLOCK_CSDM, init_phase);
+ bnx2x_init_block(bp, BLOCK_USDM, init_phase);
+ bnx2x_init_block(bp, BLOCK_XSDM, init_phase);
+ bnx2x_init_block(bp, BLOCK_UPB, init_phase);
+ bnx2x_init_block(bp, BLOCK_XPB, init_phase);
+ bnx2x_init_block(bp, BLOCK_PBF, init_phase);
+ if (!CHIP_IS_E1x(bp))
+ REG_WR(bp, PBF_REG_DISABLE_PF, 0);
+
+ bnx2x_init_block(bp, BLOCK_CDU, init_phase);
+
+ bnx2x_init_block(bp, BLOCK_CFC, init_phase);
+
+ if (!CHIP_IS_E1x(bp))
+ REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 1);
+
+ if (IS_MF(bp)) {
+ if (!(IS_MF_UFP(bp) && BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp))) {
+ REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port * 8, 1);
+ REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port * 8,
+ bp->mf_ov);
+ }
+ }
+
+ bnx2x_init_block(bp, BLOCK_MISC_AEU, init_phase);
+
+ /* HC init per function */
+ if (bp->common.int_block == INT_BLOCK_HC) {
+ if (CHIP_IS_E1H(bp)) {
+ REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
+
+ REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
+ REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
+ }
+ bnx2x_init_block(bp, BLOCK_HC, init_phase);
+
+ } else {
+ int num_segs, sb_idx, prod_offset;
+
+ REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
+
+ if (!CHIP_IS_E1x(bp)) {
+ REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, 0);
+ REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, 0);
+ }
+
+ bnx2x_init_block(bp, BLOCK_IGU, init_phase);
+
+ if (!CHIP_IS_E1x(bp)) {
+ int dsb_idx = 0;
+ /**
+ * Producer memory:
+ * E2 mode: address 0-135 match to the mapping memory;
+ * 136 - PF0 default prod; 137 - PF1 default prod;
+ * 138 - PF2 default prod; 139 - PF3 default prod;
+ * 140 - PF0 attn prod; 141 - PF1 attn prod;
+ * 142 - PF2 attn prod; 143 - PF3 attn prod;
+ * 144-147 reserved.
+ *
+ * E1.5 mode - In backward compatible mode;
+ * for non default SB; each even line in the memory
+ * holds the U producer and each odd line hold
+ * the C producer. The first 128 producers are for
+ * NDSB (PF0 - 0-31; PF1 - 32-63 and so on). The last 20
+ * producers are for the DSB for each PF.
+ * Each PF has five segments: (the order inside each
+ * segment is PF0; PF1; PF2; PF3) - 128-131 U prods;
+ * 132-135 C prods; 136-139 X prods; 140-143 T prods;
+ * 144-147 attn prods;
+ */
+ /* non-default-status-blocks */
+ num_segs = CHIP_INT_MODE_IS_BC(bp) ?
+ IGU_BC_NDSB_NUM_SEGS : IGU_NORM_NDSB_NUM_SEGS;
+ for (sb_idx = 0; sb_idx < bp->igu_sb_cnt; sb_idx++) {
+ prod_offset = (bp->igu_base_sb + sb_idx) *
+ num_segs;
+
+ for (i = 0; i < num_segs; i++) {
+ addr = IGU_REG_PROD_CONS_MEMORY +
+ (prod_offset + i) * 4;
+ REG_WR(bp, addr, 0);
+ }
+ /* send consumer update with value 0 */
+ bnx2x_ack_sb(bp, bp->igu_base_sb + sb_idx,
+ USTORM_ID, 0, IGU_INT_NOP, 1);
+ bnx2x_igu_clear_sb(bp,
+ bp->igu_base_sb + sb_idx);
+ }
+
+ /* default-status-blocks */
+ num_segs = CHIP_INT_MODE_IS_BC(bp) ?
+ IGU_BC_DSB_NUM_SEGS : IGU_NORM_DSB_NUM_SEGS;
+
+ if (CHIP_MODE_IS_4_PORT(bp))
+ dsb_idx = BP_FUNC(bp);
+ else
+ dsb_idx = BP_VN(bp);
+
+ prod_offset = (CHIP_INT_MODE_IS_BC(bp) ?
+ IGU_BC_BASE_DSB_PROD + dsb_idx :
+ IGU_NORM_BASE_DSB_PROD + dsb_idx);
+
+ /*
+ * igu prods come in chunks of E1HVN_MAX (4) -
+ * does not matters what is the current chip mode
+ */
+ for (i = 0; i < (num_segs * E1HVN_MAX);
+ i += E1HVN_MAX) {
+ addr = IGU_REG_PROD_CONS_MEMORY +
+ (prod_offset + i)*4;
+ REG_WR(bp, addr, 0);
+ }
+ /* send consumer update with 0 */
+ if (CHIP_INT_MODE_IS_BC(bp)) {
+ bnx2x_ack_sb(bp, bp->igu_dsb_id,
+ USTORM_ID, 0, IGU_INT_NOP, 1);
+ bnx2x_ack_sb(bp, bp->igu_dsb_id,
+ CSTORM_ID, 0, IGU_INT_NOP, 1);
+ bnx2x_ack_sb(bp, bp->igu_dsb_id,
+ XSTORM_ID, 0, IGU_INT_NOP, 1);
+ bnx2x_ack_sb(bp, bp->igu_dsb_id,
+ TSTORM_ID, 0, IGU_INT_NOP, 1);
+ bnx2x_ack_sb(bp, bp->igu_dsb_id,
+ ATTENTION_ID, 0, IGU_INT_NOP, 1);
+ } else {
+ bnx2x_ack_sb(bp, bp->igu_dsb_id,
+ USTORM_ID, 0, IGU_INT_NOP, 1);
+ bnx2x_ack_sb(bp, bp->igu_dsb_id,
+ ATTENTION_ID, 0, IGU_INT_NOP, 1);
+ }
+ bnx2x_igu_clear_sb(bp, bp->igu_dsb_id);
+
+ /* !!! These should become driver const once
+ rf-tool supports split-68 const */
+ REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_LSB, 0);
+ REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_MSB, 0);
+ REG_WR(bp, IGU_REG_SB_MASK_LSB, 0);
+ REG_WR(bp, IGU_REG_SB_MASK_MSB, 0);
+ REG_WR(bp, IGU_REG_PBA_STATUS_LSB, 0);
+ REG_WR(bp, IGU_REG_PBA_STATUS_MSB, 0);
+ }
+ }
+
+ /* Reset PCIE errors for debug */
+ REG_WR(bp, 0x2114, 0xffffffff);
+ REG_WR(bp, 0x2120, 0xffffffff);
+
+ if (CHIP_IS_E1x(bp)) {
+ main_mem_size = HC_REG_MAIN_MEMORY_SIZE / 2; /*dwords*/
+ main_mem_base = HC_REG_MAIN_MEMORY +
+ BP_PORT(bp) * (main_mem_size * 4);
+ main_mem_prty_clr = HC_REG_HC_PRTY_STS_CLR;
+ main_mem_width = 8;
+
+ val = REG_RD(bp, main_mem_prty_clr);
+ if (val)
+ DP(NETIF_MSG_HW,
+ "Hmmm... Parity errors in HC block during function init (0x%x)!\n",
+ val);
+
+ /* Clear "false" parity errors in MSI-X table */
+ for (i = main_mem_base;
+ i < main_mem_base + main_mem_size * 4;
+ i += main_mem_width) {
+ bnx2x_read_dmae(bp, i, main_mem_width / 4);
+ bnx2x_write_dmae(bp, bnx2x_sp_mapping(bp, wb_data),
+ i, main_mem_width / 4);
+ }
+ /* Clear HC parity attention */
+ REG_RD(bp, main_mem_prty_clr);
+ }
+
+#ifdef BNX2X_STOP_ON_ERROR
+ /* Enable STORMs SP logging */
+ REG_WR8(bp, BAR_USTRORM_INTMEM +
+ USTORM_RECORD_SLOW_PATH_OFFSET(BP_FUNC(bp)), 1);
+ REG_WR8(bp, BAR_TSTRORM_INTMEM +
+ TSTORM_RECORD_SLOW_PATH_OFFSET(BP_FUNC(bp)), 1);
+ REG_WR8(bp, BAR_CSTRORM_INTMEM +
+ CSTORM_RECORD_SLOW_PATH_OFFSET(BP_FUNC(bp)), 1);
+ REG_WR8(bp, BAR_XSTRORM_INTMEM +
+ XSTORM_RECORD_SLOW_PATH_OFFSET(BP_FUNC(bp)), 1);
+#endif
+
+ bnx2x_phy_probe(&bp->link_params);
+
+ return 0;
+}
+
+void bnx2x_free_mem_cnic(struct bnx2x *bp)
+{
+ bnx2x_ilt_mem_op_cnic(bp, ILT_MEMOP_FREE);
+
+ if (!CHIP_IS_E1x(bp))
+ BNX2X_PCI_FREE(bp->cnic_sb.e2_sb, bp->cnic_sb_mapping,
+ sizeof(struct host_hc_status_block_e2));
+ else
+ BNX2X_PCI_FREE(bp->cnic_sb.e1x_sb, bp->cnic_sb_mapping,
+ sizeof(struct host_hc_status_block_e1x));
+
+ BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, SRC_T2_SZ);
+}
+
+void bnx2x_free_mem(struct bnx2x *bp)
+{
+ int i;
+
+ BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping,
+ bp->fw_stats_data_sz + bp->fw_stats_req_sz);
+
+ if (IS_VF(bp))
+ return;
+
+ BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
+ sizeof(struct host_sp_status_block));
+
+ BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
+ sizeof(struct bnx2x_slowpath));
+
+ for (i = 0; i < L2_ILT_LINES(bp); i++)
+ BNX2X_PCI_FREE(bp->context[i].vcxt, bp->context[i].cxt_mapping,
+ bp->context[i].size);
+ bnx2x_ilt_mem_op(bp, ILT_MEMOP_FREE);
+
+ BNX2X_FREE(bp->ilt->lines);
+
+ BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
+
+ BNX2X_PCI_FREE(bp->eq_ring, bp->eq_mapping,
+ BCM_PAGE_SIZE * NUM_EQ_PAGES);
+
+ BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, SRC_T2_SZ);
+
+ bnx2x_iov_free_mem(bp);
+}
+
+int bnx2x_alloc_mem_cnic(struct bnx2x *bp)
+{
+ if (!CHIP_IS_E1x(bp)) {
+ /* size = the status block + ramrod buffers */
+ bp->cnic_sb.e2_sb = BNX2X_PCI_ALLOC(&bp->cnic_sb_mapping,
+ sizeof(struct host_hc_status_block_e2));
+ if (!bp->cnic_sb.e2_sb)
+ goto alloc_mem_err;
+ } else {
+ bp->cnic_sb.e1x_sb = BNX2X_PCI_ALLOC(&bp->cnic_sb_mapping,
+ sizeof(struct host_hc_status_block_e1x));
+ if (!bp->cnic_sb.e1x_sb)
+ goto alloc_mem_err;
+ }
+
+ if (CONFIGURE_NIC_MODE(bp) && !bp->t2) {
+ /* allocate searcher T2 table, as it wasn't allocated before */
+ bp->t2 = BNX2X_PCI_ALLOC(&bp->t2_mapping, SRC_T2_SZ);
+ if (!bp->t2)
+ goto alloc_mem_err;
+ }
+
+ /* write address to which L5 should insert its values */
+ bp->cnic_eth_dev.addr_drv_info_to_mcp =
+ &bp->slowpath->drv_info_to_mcp;
+
+ if (bnx2x_ilt_mem_op_cnic(bp, ILT_MEMOP_ALLOC))
+ goto alloc_mem_err;
+
+ return 0;
+
+alloc_mem_err:
+ bnx2x_free_mem_cnic(bp);
+ BNX2X_ERR("Can't allocate memory\n");
+ return -ENOMEM;
+}
+
+int bnx2x_alloc_mem(struct bnx2x *bp)
+{
+ int i, allocated, context_size;
+
+ if (!CONFIGURE_NIC_MODE(bp) && !bp->t2) {
+ /* allocate searcher T2 table */
+ bp->t2 = BNX2X_PCI_ALLOC(&bp->t2_mapping, SRC_T2_SZ);
+ if (!bp->t2)
+ goto alloc_mem_err;
+ }
+
+ bp->def_status_blk = BNX2X_PCI_ALLOC(&bp->def_status_blk_mapping,
+ sizeof(struct host_sp_status_block));
+ if (!bp->def_status_blk)
+ goto alloc_mem_err;
+
+ bp->slowpath = BNX2X_PCI_ALLOC(&bp->slowpath_mapping,
+ sizeof(struct bnx2x_slowpath));
+ if (!bp->slowpath)
+ goto alloc_mem_err;
+
+ /* Allocate memory for CDU context:
+ * This memory is allocated separately and not in the generic ILT
+ * functions because CDU differs in few aspects:
+ * 1. There are multiple entities allocating memory for context -
+ * 'regular' driver, CNIC and SRIOV driver. Each separately controls
+ * its own ILT lines.
+ * 2. Since CDU page-size is not a single 4KB page (which is the case
+ * for the other ILT clients), to be efficient we want to support
+ * allocation of sub-page-size in the last entry.
+ * 3. Context pointers are used by the driver to pass to FW / update
+ * the context (for the other ILT clients the pointers are used just to
+ * free the memory during unload).
+ */
+ context_size = sizeof(union cdu_context) * BNX2X_L2_CID_COUNT(bp);
+
+ for (i = 0, allocated = 0; allocated < context_size; i++) {
+ bp->context[i].size = min(CDU_ILT_PAGE_SZ,
+ (context_size - allocated));
+ bp->context[i].vcxt = BNX2X_PCI_ALLOC(&bp->context[i].cxt_mapping,
+ bp->context[i].size);
+ if (!bp->context[i].vcxt)
+ goto alloc_mem_err;
+ allocated += bp->context[i].size;
+ }
+ bp->ilt->lines = kcalloc(ILT_MAX_LINES, sizeof(struct ilt_line),
+ GFP_KERNEL);
+ if (!bp->ilt->lines)
+ goto alloc_mem_err;
+
+ if (bnx2x_ilt_mem_op(bp, ILT_MEMOP_ALLOC))
+ goto alloc_mem_err;
+
+ if (bnx2x_iov_alloc_mem(bp))
+ goto alloc_mem_err;
+
+ /* Slow path ring */
+ bp->spq = BNX2X_PCI_ALLOC(&bp->spq_mapping, BCM_PAGE_SIZE);
+ if (!bp->spq)
+ goto alloc_mem_err;
+
+ /* EQ */
+ bp->eq_ring = BNX2X_PCI_ALLOC(&bp->eq_mapping,
+ BCM_PAGE_SIZE * NUM_EQ_PAGES);
+ if (!bp->eq_ring)
+ goto alloc_mem_err;
+
+ return 0;
+
+alloc_mem_err:
+ bnx2x_free_mem(bp);
+ BNX2X_ERR("Can't allocate memory\n");
+ return -ENOMEM;
+}
+
+/*
+ * Init service functions
+ */
+
+int bnx2x_set_mac_one(struct bnx2x *bp, const u8 *mac,
+ struct bnx2x_vlan_mac_obj *obj, bool set,
+ int mac_type, unsigned long *ramrod_flags)
+{
+ int rc;
+ struct bnx2x_vlan_mac_ramrod_params ramrod_param;
+
+ memset(&ramrod_param, 0, sizeof(ramrod_param));
+
+ /* Fill general parameters */
+ ramrod_param.vlan_mac_obj = obj;
+ ramrod_param.ramrod_flags = *ramrod_flags;
+
+ /* Fill a user request section if needed */
+ if (!test_bit(RAMROD_CONT, ramrod_flags)) {
+ memcpy(ramrod_param.user_req.u.mac.mac, mac, ETH_ALEN);
+
+ __set_bit(mac_type, &ramrod_param.user_req.vlan_mac_flags);
+
+ /* Set the command: ADD or DEL */
+ if (set)
+ ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_ADD;
+ else
+ ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_DEL;
+ }
+
+ rc = bnx2x_config_vlan_mac(bp, &ramrod_param);
+
+ if (rc == -EEXIST) {
+ DP(BNX2X_MSG_SP, "Failed to schedule ADD operations: %d\n", rc);
+ /* do not treat adding same MAC as error */
+ rc = 0;
+ } else if (rc < 0)
+ BNX2X_ERR("%s MAC failed\n", (set ? "Set" : "Del"));
+
+ return rc;
+}
+
+int bnx2x_set_vlan_one(struct bnx2x *bp, u16 vlan,
+ struct bnx2x_vlan_mac_obj *obj, bool set,
+ unsigned long *ramrod_flags)
+{
+ int rc;
+ struct bnx2x_vlan_mac_ramrod_params ramrod_param;
+
+ memset(&ramrod_param, 0, sizeof(ramrod_param));
+
+ /* Fill general parameters */
+ ramrod_param.vlan_mac_obj = obj;
+ ramrod_param.ramrod_flags = *ramrod_flags;
+
+ /* Fill a user request section if needed */
+ if (!test_bit(RAMROD_CONT, ramrod_flags)) {
+ ramrod_param.user_req.u.vlan.vlan = vlan;
+ __set_bit(BNX2X_VLAN, &ramrod_param.user_req.vlan_mac_flags);
+ /* Set the command: ADD or DEL */
+ if (set)
+ ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_ADD;
+ else
+ ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_DEL;
+ }
+
+ rc = bnx2x_config_vlan_mac(bp, &ramrod_param);
+
+ if (rc == -EEXIST) {
+ /* Do not treat adding same vlan as error. */
+ DP(BNX2X_MSG_SP, "Failed to schedule ADD operations: %d\n", rc);
+ rc = 0;
+ } else if (rc < 0) {
+ BNX2X_ERR("%s VLAN failed\n", (set ? "Set" : "Del"));
+ }
+
+ return rc;
+}
+
+void bnx2x_clear_vlan_info(struct bnx2x *bp)
+{
+ struct bnx2x_vlan_entry *vlan;
+
+ /* Mark that hw forgot all entries */
+ list_for_each_entry(vlan, &bp->vlan_reg, link)
+ vlan->hw = false;
+
+ bp->vlan_cnt = 0;
+}
+
+static int bnx2x_del_all_vlans(struct bnx2x *bp)
+{
+ struct bnx2x_vlan_mac_obj *vlan_obj = &bp->sp_objs[0].vlan_obj;
+ unsigned long ramrod_flags = 0, vlan_flags = 0;
+ int rc;
+
+ __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
+ __set_bit(BNX2X_VLAN, &vlan_flags);
+ rc = vlan_obj->delete_all(bp, vlan_obj, &vlan_flags, &ramrod_flags);
+ if (rc)
+ return rc;
+
+ bnx2x_clear_vlan_info(bp);
+
+ return 0;
+}
+
+int bnx2x_del_all_macs(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *mac_obj,
+ int mac_type, bool wait_for_comp)
+{
+ int rc;
+ unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
+
+ /* Wait for completion of requested */
+ if (wait_for_comp)
+ __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
+
+ /* Set the mac type of addresses we want to clear */
+ __set_bit(mac_type, &vlan_mac_flags);
+
+ rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags, &ramrod_flags);
+ if (rc < 0)
+ BNX2X_ERR("Failed to delete MACs: %d\n", rc);
+
+ return rc;
+}
+
+int bnx2x_set_eth_mac(struct bnx2x *bp, bool set)
+{
+ if (IS_PF(bp)) {
+ unsigned long ramrod_flags = 0;
+
+ DP(NETIF_MSG_IFUP, "Adding Eth MAC\n");
+ __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
+ return bnx2x_set_mac_one(bp, bp->dev->dev_addr,
+ &bp->sp_objs->mac_obj, set,
+ BNX2X_ETH_MAC, &ramrod_flags);
+ } else { /* vf */
+ return bnx2x_vfpf_config_mac(bp, bp->dev->dev_addr,
+ bp->fp->index, set);
+ }
+}
+
+int bnx2x_setup_leading(struct bnx2x *bp)
+{
+ if (IS_PF(bp))
+ return bnx2x_setup_queue(bp, &bp->fp[0], true);
+ else /* VF */
+ return bnx2x_vfpf_setup_q(bp, &bp->fp[0], true);
+}
+
+/**
+ * bnx2x_set_int_mode - configure interrupt mode
+ *
+ * @bp: driver handle
+ *
+ * In case of MSI-X it will also try to enable MSI-X.
+ */
+int bnx2x_set_int_mode(struct bnx2x *bp)
+{
+ int rc = 0;
+
+ if (IS_VF(bp) && int_mode != BNX2X_INT_MODE_MSIX) {
+ BNX2X_ERR("VF not loaded since interrupt mode not msix\n");
+ return -EINVAL;
+ }
+
+ switch (int_mode) {
+ case BNX2X_INT_MODE_MSIX:
+ /* attempt to enable msix */
+ rc = bnx2x_enable_msix(bp);
+
+ /* msix attained */
+ if (!rc)
+ return 0;
+
+ /* vfs use only msix */
+ if (rc && IS_VF(bp))
+ return rc;
+
+ /* failed to enable multiple MSI-X */
+ BNX2X_DEV_INFO("Failed to enable multiple MSI-X (%d), set number of queues to %d\n",
+ bp->num_queues,
+ 1 + bp->num_cnic_queues);
+
+ fallthrough;
+ case BNX2X_INT_MODE_MSI:
+ bnx2x_enable_msi(bp);
+
+ fallthrough;
+ case BNX2X_INT_MODE_INTX:
+ bp->num_ethernet_queues = 1;
+ bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
+ BNX2X_DEV_INFO("set number of queues to 1\n");
+ break;
+ default:
+ BNX2X_DEV_INFO("unknown value in int_mode module parameter\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/* must be called prior to any HW initializations */
+static inline u16 bnx2x_cid_ilt_lines(struct bnx2x *bp)
+{
+ if (IS_SRIOV(bp))
+ return (BNX2X_FIRST_VF_CID + BNX2X_VF_CIDS)/ILT_PAGE_CIDS;
+ return L2_ILT_LINES(bp);
+}
+
+void bnx2x_ilt_set_info(struct bnx2x *bp)
+{
+ struct ilt_client_info *ilt_client;
+ struct bnx2x_ilt *ilt = BP_ILT(bp);
+ u16 line = 0;
+
+ ilt->start_line = FUNC_ILT_BASE(BP_FUNC(bp));
+ DP(BNX2X_MSG_SP, "ilt starts at line %d\n", ilt->start_line);
+
+ /* CDU */
+ ilt_client = &ilt->clients[ILT_CLIENT_CDU];
+ ilt_client->client_num = ILT_CLIENT_CDU;
+ ilt_client->page_size = CDU_ILT_PAGE_SZ;
+ ilt_client->flags = ILT_CLIENT_SKIP_MEM;
+ ilt_client->start = line;
+ line += bnx2x_cid_ilt_lines(bp);
+
+ if (CNIC_SUPPORT(bp))
+ line += CNIC_ILT_LINES;
+ ilt_client->end = line - 1;
+
+ DP(NETIF_MSG_IFUP, "ilt client[CDU]: start %d, end %d, psz 0x%x, flags 0x%x, hw psz %d\n",
+ ilt_client->start,
+ ilt_client->end,
+ ilt_client->page_size,
+ ilt_client->flags,
+ ilog2(ilt_client->page_size >> 12));
+
+ /* QM */
+ if (QM_INIT(bp->qm_cid_count)) {
+ ilt_client = &ilt->clients[ILT_CLIENT_QM];
+ ilt_client->client_num = ILT_CLIENT_QM;
+ ilt_client->page_size = QM_ILT_PAGE_SZ;
+ ilt_client->flags = 0;
+ ilt_client->start = line;
+
+ /* 4 bytes for each cid */
+ line += DIV_ROUND_UP(bp->qm_cid_count * QM_QUEUES_PER_FUNC * 4,
+ QM_ILT_PAGE_SZ);
+
+ ilt_client->end = line - 1;
+
+ DP(NETIF_MSG_IFUP,
+ "ilt client[QM]: start %d, end %d, psz 0x%x, flags 0x%x, hw psz %d\n",
+ ilt_client->start,
+ ilt_client->end,
+ ilt_client->page_size,
+ ilt_client->flags,
+ ilog2(ilt_client->page_size >> 12));
+ }
+
+ if (CNIC_SUPPORT(bp)) {
+ /* SRC */
+ ilt_client = &ilt->clients[ILT_CLIENT_SRC];
+ ilt_client->client_num = ILT_CLIENT_SRC;
+ ilt_client->page_size = SRC_ILT_PAGE_SZ;
+ ilt_client->flags = 0;
+ ilt_client->start = line;
+ line += SRC_ILT_LINES;
+ ilt_client->end = line - 1;
+
+ DP(NETIF_MSG_IFUP,
+ "ilt client[SRC]: start %d, end %d, psz 0x%x, flags 0x%x, hw psz %d\n",
+ ilt_client->start,
+ ilt_client->end,
+ ilt_client->page_size,
+ ilt_client->flags,
+ ilog2(ilt_client->page_size >> 12));
+
+ /* TM */
+ ilt_client = &ilt->clients[ILT_CLIENT_TM];
+ ilt_client->client_num = ILT_CLIENT_TM;
+ ilt_client->page_size = TM_ILT_PAGE_SZ;
+ ilt_client->flags = 0;
+ ilt_client->start = line;
+ line += TM_ILT_LINES;
+ ilt_client->end = line - 1;
+
+ DP(NETIF_MSG_IFUP,
+ "ilt client[TM]: start %d, end %d, psz 0x%x, flags 0x%x, hw psz %d\n",
+ ilt_client->start,
+ ilt_client->end,
+ ilt_client->page_size,
+ ilt_client->flags,
+ ilog2(ilt_client->page_size >> 12));
+ }
+
+ BUG_ON(line > ILT_MAX_LINES);
+}
+
+/**
+ * bnx2x_pf_q_prep_init - prepare INIT transition parameters
+ *
+ * @bp: driver handle
+ * @fp: pointer to fastpath
+ * @init_params: pointer to parameters structure
+ *
+ * parameters configured:
+ * - HC configuration
+ * - Queue's CDU context
+ */
+static void bnx2x_pf_q_prep_init(struct bnx2x *bp,
+ struct bnx2x_fastpath *fp, struct bnx2x_queue_init_params *init_params)
+{
+ u8 cos;
+ int cxt_index, cxt_offset;
+
+ /* FCoE Queue uses Default SB, thus has no HC capabilities */
+ if (!IS_FCOE_FP(fp)) {
+ __set_bit(BNX2X_Q_FLG_HC, &init_params->rx.flags);
+ __set_bit(BNX2X_Q_FLG_HC, &init_params->tx.flags);
+
+ /* If HC is supported, enable host coalescing in the transition
+ * to INIT state.
+ */
+ __set_bit(BNX2X_Q_FLG_HC_EN, &init_params->rx.flags);
+ __set_bit(BNX2X_Q_FLG_HC_EN, &init_params->tx.flags);
+
+ /* HC rate */
+ init_params->rx.hc_rate = bp->rx_ticks ?
+ (1000000 / bp->rx_ticks) : 0;
+ init_params->tx.hc_rate = bp->tx_ticks ?
+ (1000000 / bp->tx_ticks) : 0;
+
+ /* FW SB ID */
+ init_params->rx.fw_sb_id = init_params->tx.fw_sb_id =
+ fp->fw_sb_id;
+
+ /*
+ * CQ index among the SB indices: FCoE clients uses the default
+ * SB, therefore it's different.
+ */
+ init_params->rx.sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS;
+ init_params->tx.sb_cq_index = HC_INDEX_ETH_FIRST_TX_CQ_CONS;
+ }
+
+ /* set maximum number of COSs supported by this queue */
+ init_params->max_cos = fp->max_cos;
+
+ DP(NETIF_MSG_IFUP, "fp: %d setting queue params max cos to: %d\n",
+ fp->index, init_params->max_cos);
+
+ /* set the context pointers queue object */
+ for (cos = FIRST_TX_COS_INDEX; cos < init_params->max_cos; cos++) {
+ cxt_index = fp->txdata_ptr[cos]->cid / ILT_PAGE_CIDS;
+ cxt_offset = fp->txdata_ptr[cos]->cid - (cxt_index *
+ ILT_PAGE_CIDS);
+ init_params->cxts[cos] =
+ &bp->context[cxt_index].vcxt[cxt_offset].eth;
+ }
+}
+
+static int bnx2x_setup_tx_only(struct bnx2x *bp, struct bnx2x_fastpath *fp,
+ struct bnx2x_queue_state_params *q_params,
+ struct bnx2x_queue_setup_tx_only_params *tx_only_params,
+ int tx_index, bool leading)
+{
+ memset(tx_only_params, 0, sizeof(*tx_only_params));
+
+ /* Set the command */
+ q_params->cmd = BNX2X_Q_CMD_SETUP_TX_ONLY;
+
+ /* Set tx-only QUEUE flags: don't zero statistics */
+ tx_only_params->flags = bnx2x_get_common_flags(bp, fp, false);
+
+ /* choose the index of the cid to send the slow path on */
+ tx_only_params->cid_index = tx_index;
+
+ /* Set general TX_ONLY_SETUP parameters */
+ bnx2x_pf_q_prep_general(bp, fp, &tx_only_params->gen_params, tx_index);
+
+ /* Set Tx TX_ONLY_SETUP parameters */
+ bnx2x_pf_tx_q_prep(bp, fp, &tx_only_params->txq_params, tx_index);
+
+ DP(NETIF_MSG_IFUP,
+ "preparing to send tx-only ramrod for connection: cos %d, primary cid %d, cid %d, client id %d, sp-client id %d, flags %lx\n",
+ tx_index, q_params->q_obj->cids[FIRST_TX_COS_INDEX],
+ q_params->q_obj->cids[tx_index], q_params->q_obj->cl_id,
+ tx_only_params->gen_params.spcl_id, tx_only_params->flags);
+
+ /* send the ramrod */
+ return bnx2x_queue_state_change(bp, q_params);
+}
+
+/**
+ * bnx2x_setup_queue - setup queue
+ *
+ * @bp: driver handle
+ * @fp: pointer to fastpath
+ * @leading: is leading
+ *
+ * This function performs 2 steps in a Queue state machine
+ * actually: 1) RESET->INIT 2) INIT->SETUP
+ */
+
+int bnx2x_setup_queue(struct bnx2x *bp, struct bnx2x_fastpath *fp,
+ bool leading)
+{
+ struct bnx2x_queue_state_params q_params = {NULL};
+ struct bnx2x_queue_setup_params *setup_params =
+ &q_params.params.setup;
+ struct bnx2x_queue_setup_tx_only_params *tx_only_params =
+ &q_params.params.tx_only;
+ int rc;
+ u8 tx_index;
+
+ DP(NETIF_MSG_IFUP, "setting up queue %d\n", fp->index);
+
+ /* reset IGU state skip FCoE L2 queue */
+ if (!IS_FCOE_FP(fp))
+ bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0,
+ IGU_INT_ENABLE, 0);
+
+ q_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
+ /* We want to wait for completion in this context */
+ __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
+
+ /* Prepare the INIT parameters */
+ bnx2x_pf_q_prep_init(bp, fp, &q_params.params.init);
+
+ /* Set the command */
+ q_params.cmd = BNX2X_Q_CMD_INIT;
+
+ /* Change the state to INIT */
+ rc = bnx2x_queue_state_change(bp, &q_params);
+ if (rc) {
+ BNX2X_ERR("Queue(%d) INIT failed\n", fp->index);
+ return rc;
+ }
+
+ DP(NETIF_MSG_IFUP, "init complete\n");
+
+ /* Now move the Queue to the SETUP state... */
+ memset(setup_params, 0, sizeof(*setup_params));
+
+ /* Set QUEUE flags */
+ setup_params->flags = bnx2x_get_q_flags(bp, fp, leading);
+
+ /* Set general SETUP parameters */
+ bnx2x_pf_q_prep_general(bp, fp, &setup_params->gen_params,
+ FIRST_TX_COS_INDEX);
+
+ bnx2x_pf_rx_q_prep(bp, fp, &setup_params->pause_params,
+ &setup_params->rxq_params);
+
+ bnx2x_pf_tx_q_prep(bp, fp, &setup_params->txq_params,
+ FIRST_TX_COS_INDEX);
+
+ /* Set the command */
+ q_params.cmd = BNX2X_Q_CMD_SETUP;
+
+ if (IS_FCOE_FP(fp))
+ bp->fcoe_init = true;
+
+ /* Change the state to SETUP */
+ rc = bnx2x_queue_state_change(bp, &q_params);
+ if (rc) {
+ BNX2X_ERR("Queue(%d) SETUP failed\n", fp->index);
+ return rc;
+ }
+
+ /* loop through the relevant tx-only indices */
+ for (tx_index = FIRST_TX_ONLY_COS_INDEX;
+ tx_index < fp->max_cos;
+ tx_index++) {
+
+ /* prepare and send tx-only ramrod*/
+ rc = bnx2x_setup_tx_only(bp, fp, &q_params,
+ tx_only_params, tx_index, leading);
+ if (rc) {
+ BNX2X_ERR("Queue(%d.%d) TX_ONLY_SETUP failed\n",
+ fp->index, tx_index);
+ return rc;
+ }
+ }
+
+ return rc;
+}
+
+static int bnx2x_stop_queue(struct bnx2x *bp, int index)
+{
+ struct bnx2x_fastpath *fp = &bp->fp[index];
+ struct bnx2x_fp_txdata *txdata;
+ struct bnx2x_queue_state_params q_params = {NULL};
+ int rc, tx_index;
+
+ DP(NETIF_MSG_IFDOWN, "stopping queue %d cid %d\n", index, fp->cid);
+
+ q_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
+ /* We want to wait for completion in this context */
+ __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
+
+ /* close tx-only connections */
+ for (tx_index = FIRST_TX_ONLY_COS_INDEX;
+ tx_index < fp->max_cos;
+ tx_index++){
+
+ /* ascertain this is a normal queue*/
+ txdata = fp->txdata_ptr[tx_index];
+
+ DP(NETIF_MSG_IFDOWN, "stopping tx-only queue %d\n",
+ txdata->txq_index);
+
+ /* send halt terminate on tx-only connection */
+ q_params.cmd = BNX2X_Q_CMD_TERMINATE;
+ memset(&q_params.params.terminate, 0,
+ sizeof(q_params.params.terminate));
+ q_params.params.terminate.cid_index = tx_index;
+
+ rc = bnx2x_queue_state_change(bp, &q_params);
+ if (rc)
+ return rc;
+
+ /* send halt terminate on tx-only connection */
+ q_params.cmd = BNX2X_Q_CMD_CFC_DEL;
+ memset(&q_params.params.cfc_del, 0,
+ sizeof(q_params.params.cfc_del));
+ q_params.params.cfc_del.cid_index = tx_index;
+ rc = bnx2x_queue_state_change(bp, &q_params);
+ if (rc)
+ return rc;
+ }
+ /* Stop the primary connection: */
+ /* ...halt the connection */
+ q_params.cmd = BNX2X_Q_CMD_HALT;
+ rc = bnx2x_queue_state_change(bp, &q_params);
+ if (rc)
+ return rc;
+
+ /* ...terminate the connection */
+ q_params.cmd = BNX2X_Q_CMD_TERMINATE;
+ memset(&q_params.params.terminate, 0,
+ sizeof(q_params.params.terminate));
+ q_params.params.terminate.cid_index = FIRST_TX_COS_INDEX;
+ rc = bnx2x_queue_state_change(bp, &q_params);
+ if (rc)
+ return rc;
+ /* ...delete cfc entry */
+ q_params.cmd = BNX2X_Q_CMD_CFC_DEL;
+ memset(&q_params.params.cfc_del, 0,
+ sizeof(q_params.params.cfc_del));
+ q_params.params.cfc_del.cid_index = FIRST_TX_COS_INDEX;
+ return bnx2x_queue_state_change(bp, &q_params);
+}
+
+static void bnx2x_reset_func(struct bnx2x *bp)
+{
+ int port = BP_PORT(bp);
+ int func = BP_FUNC(bp);
+ int i;
+
+ /* Disable the function in the FW */
+ REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(func), 0);
+ REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(func), 0);
+ REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(func), 0);
+ REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(func), 0);
+
+ /* FP SBs */
+ for_each_eth_queue(bp, i) {
+ struct bnx2x_fastpath *fp = &bp->fp[i];
+ REG_WR8(bp, BAR_CSTRORM_INTMEM +
+ CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET(fp->fw_sb_id),
+ SB_DISABLED);
+ }
+
+ if (CNIC_LOADED(bp))
+ /* CNIC SB */
+ REG_WR8(bp, BAR_CSTRORM_INTMEM +
+ CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET
+ (bnx2x_cnic_fw_sb_id(bp)), SB_DISABLED);
+
+ /* SP SB */
+ REG_WR8(bp, BAR_CSTRORM_INTMEM +
+ CSTORM_SP_STATUS_BLOCK_DATA_STATE_OFFSET(func),
+ SB_DISABLED);
+
+ for (i = 0; i < XSTORM_SPQ_DATA_SIZE / 4; i++)
+ REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_DATA_OFFSET(func),
+ 0);
+
+ /* Configure IGU */
+ if (bp->common.int_block == INT_BLOCK_HC) {
+ REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
+ REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
+ } else {
+ REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, 0);
+ REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, 0);
+ }
+
+ if (CNIC_LOADED(bp)) {
+ /* Disable Timer scan */
+ REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
+ /*
+ * Wait for at least 10ms and up to 2 second for the timers
+ * scan to complete
+ */
+ for (i = 0; i < 200; i++) {
+ usleep_range(10000, 20000);
+ if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4))
+ break;
+ }
+ }
+ /* Clear ILT */
+ bnx2x_clear_func_ilt(bp, func);
+
+ /* Timers workaround bug for E2: if this is vnic-3,
+ * we need to set the entire ilt range for this timers.
+ */
+ if (!CHIP_IS_E1x(bp) && BP_VN(bp) == 3) {
+ struct ilt_client_info ilt_cli;
+ /* use dummy TM client */
+ memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
+ ilt_cli.start = 0;
+ ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1;
+ ilt_cli.client_num = ILT_CLIENT_TM;
+
+ bnx2x_ilt_boundry_init_op(bp, &ilt_cli, 0, INITOP_CLEAR);
+ }
+
+ /* this assumes that reset_port() called before reset_func()*/
+ if (!CHIP_IS_E1x(bp))
+ bnx2x_pf_disable(bp);
+
+ bp->dmae_ready = 0;
+}
+
+static void bnx2x_reset_port(struct bnx2x *bp)
+{
+ int port = BP_PORT(bp);
+ u32 val;
+
+ /* Reset physical Link */
+ bnx2x__link_reset(bp);
+
+ REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
+
+ /* Do not rcv packets to BRB */
+ REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
+ /* Do not direct rcv packets that are not for MCP to the BRB */
+ REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
+ NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
+
+ /* Configure AEU */
+ REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
+
+ msleep(100);
+ /* Check for BRB port occupancy */
+ val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
+ if (val)
+ DP(NETIF_MSG_IFDOWN,
+ "BRB1 is not empty %d blocks are occupied\n", val);
+
+ /* TODO: Close Doorbell port? */
+}
+
+static int bnx2x_reset_hw(struct bnx2x *bp, u32 load_code)
+{
+ struct bnx2x_func_state_params func_params = {NULL};
+
+ /* Prepare parameters for function state transitions */
+ __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
+
+ func_params.f_obj = &bp->func_obj;
+ func_params.cmd = BNX2X_F_CMD_HW_RESET;
+
+ func_params.params.hw_init.load_phase = load_code;
+
+ return bnx2x_func_state_change(bp, &func_params);
+}
+
+static int bnx2x_func_stop(struct bnx2x *bp)
+{
+ struct bnx2x_func_state_params func_params = {NULL};
+ int rc;
+
+ /* Prepare parameters for function state transitions */
+ __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
+ func_params.f_obj = &bp->func_obj;
+ func_params.cmd = BNX2X_F_CMD_STOP;
+
+ /*
+ * Try to stop the function the 'good way'. If fails (in case
+ * of a parity error during bnx2x_chip_cleanup()) and we are
+ * not in a debug mode, perform a state transaction in order to
+ * enable further HW_RESET transaction.
+ */
+ rc = bnx2x_func_state_change(bp, &func_params);
+ if (rc) {
+#ifdef BNX2X_STOP_ON_ERROR
+ return rc;
+#else
+ BNX2X_ERR("FUNC_STOP ramrod failed. Running a dry transaction\n");
+ __set_bit(RAMROD_DRV_CLR_ONLY, &func_params.ramrod_flags);
+ return bnx2x_func_state_change(bp, &func_params);
+#endif
+ }
+
+ return 0;
+}
+
+/**
+ * bnx2x_send_unload_req - request unload mode from the MCP.
+ *
+ * @bp: driver handle
+ * @unload_mode: requested function's unload mode
+ *
+ * Return unload mode returned by the MCP: COMMON, PORT or FUNC.
+ */
+u32 bnx2x_send_unload_req(struct bnx2x *bp, int unload_mode)
+{
+ u32 reset_code = 0;
+ int port = BP_PORT(bp);
+
+ /* Select the UNLOAD request mode */
+ if (unload_mode == UNLOAD_NORMAL)
+ reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
+
+ else if (bp->flags & NO_WOL_FLAG)
+ reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
+
+ else if (bp->wol) {
+ u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
+ const u8 *mac_addr = bp->dev->dev_addr;
+ struct pci_dev *pdev = bp->pdev;
+ u32 val;
+ u16 pmc;
+
+ /* The mac address is written to entries 1-4 to
+ * preserve entry 0 which is used by the PMF
+ */
+ u8 entry = (BP_VN(bp) + 1)*8;
+
+ val = (mac_addr[0] << 8) | mac_addr[1];
+ EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
+
+ val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
+ (mac_addr[4] << 8) | mac_addr[5];
+ EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
+
+ /* Enable the PME and clear the status */
+ pci_read_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, &pmc);
+ pmc |= PCI_PM_CTRL_PME_ENABLE | PCI_PM_CTRL_PME_STATUS;
+ pci_write_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, pmc);
+
+ reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
+
+ } else
+ reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
+
+ /* Send the request to the MCP */
+ if (!BP_NOMCP(bp))
+ reset_code = bnx2x_fw_command(bp, reset_code, 0);
+ else {
+ int path = BP_PATH(bp);
+
+ DP(NETIF_MSG_IFDOWN, "NO MCP - load counts[%d] %d, %d, %d\n",
+ path, bnx2x_load_count[path][0], bnx2x_load_count[path][1],
+ bnx2x_load_count[path][2]);
+ bnx2x_load_count[path][0]--;
+ bnx2x_load_count[path][1 + port]--;
+ DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts[%d] %d, %d, %d\n",
+ path, bnx2x_load_count[path][0], bnx2x_load_count[path][1],
+ bnx2x_load_count[path][2]);
+ if (bnx2x_load_count[path][0] == 0)
+ reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
+ else if (bnx2x_load_count[path][1 + port] == 0)
+ reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
+ else
+ reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
+ }
+
+ return reset_code;
+}
+
+/**
+ * bnx2x_send_unload_done - send UNLOAD_DONE command to the MCP.
+ *
+ * @bp: driver handle
+ * @keep_link: true iff link should be kept up
+ */
+void bnx2x_send_unload_done(struct bnx2x *bp, bool keep_link)
+{
+ u32 reset_param = keep_link ? DRV_MSG_CODE_UNLOAD_SKIP_LINK_RESET : 0;
+
+ /* Report UNLOAD_DONE to MCP */
+ if (!BP_NOMCP(bp))
+ bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, reset_param);
+}
+
+static int bnx2x_func_wait_started(struct bnx2x *bp)
+{
+ int tout = 50;
+ int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
+
+ if (!bp->port.pmf)
+ return 0;
+
+ /*
+ * (assumption: No Attention from MCP at this stage)
+ * PMF probably in the middle of TX disable/enable transaction
+ * 1. Sync IRS for default SB
+ * 2. Sync SP queue - this guarantees us that attention handling started
+ * 3. Wait, that TX disable/enable transaction completes
+ *
+ * 1+2 guarantee that if DCBx attention was scheduled it already changed
+ * pending bit of transaction from STARTED-->TX_STOPPED, if we already
+ * received completion for the transaction the state is TX_STOPPED.
+ * State will return to STARTED after completion of TX_STOPPED-->STARTED
+ * transaction.
+ */
+
+ /* make sure default SB ISR is done */
+ if (msix)
+ synchronize_irq(bp->msix_table[0].vector);
+ else
+ synchronize_irq(bp->pdev->irq);
+
+ flush_workqueue(bnx2x_wq);
+ flush_workqueue(bnx2x_iov_wq);
+
+ while (bnx2x_func_get_state(bp, &bp->func_obj) !=
+ BNX2X_F_STATE_STARTED && tout--)
+ msleep(20);
+
+ if (bnx2x_func_get_state(bp, &bp->func_obj) !=
+ BNX2X_F_STATE_STARTED) {
+#ifdef BNX2X_STOP_ON_ERROR
+ BNX2X_ERR("Wrong function state\n");
+ return -EBUSY;
+#else
+ /*
+ * Failed to complete the transaction in a "good way"
+ * Force both transactions with CLR bit
+ */
+ struct bnx2x_func_state_params func_params = {NULL};
+
+ DP(NETIF_MSG_IFDOWN,
+ "Hmmm... Unexpected function state! Forcing STARTED-->TX_STOPPED-->STARTED\n");
+
+ func_params.f_obj = &bp->func_obj;
+ __set_bit(RAMROD_DRV_CLR_ONLY,
+ &func_params.ramrod_flags);
+
+ /* STARTED-->TX_ST0PPED */
+ func_params.cmd = BNX2X_F_CMD_TX_STOP;
+ bnx2x_func_state_change(bp, &func_params);
+
+ /* TX_ST0PPED-->STARTED */
+ func_params.cmd = BNX2X_F_CMD_TX_START;
+ return bnx2x_func_state_change(bp, &func_params);
+#endif
+ }
+
+ return 0;
+}
+
+static void bnx2x_disable_ptp(struct bnx2x *bp)
+{
+ int port = BP_PORT(bp);
+
+ /* Disable sending PTP packets to host */
+ REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_TO_HOST :
+ NIG_REG_P0_LLH_PTP_TO_HOST, 0x0);
+
+ /* Reset PTP event detection rules */
+ REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_PARAM_MASK :
+ NIG_REG_P0_LLH_PTP_PARAM_MASK, 0x7FF);
+ REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_RULE_MASK :
+ NIG_REG_P0_LLH_PTP_RULE_MASK, 0x3FFF);
+ REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_PARAM_MASK :
+ NIG_REG_P0_TLLH_PTP_PARAM_MASK, 0x7FF);
+ REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_RULE_MASK :
+ NIG_REG_P0_TLLH_PTP_RULE_MASK, 0x3FFF);
+
+ /* Disable the PTP feature */
+ REG_WR(bp, port ? NIG_REG_P1_PTP_EN :
+ NIG_REG_P0_PTP_EN, 0x0);
+}
+
+/* Called during unload, to stop PTP-related stuff */
+static void bnx2x_stop_ptp(struct bnx2x *bp)
+{
+ /* Cancel PTP work queue. Should be done after the Tx queues are
+ * drained to prevent additional scheduling.
+ */
+ cancel_work_sync(&bp->ptp_task);
+
+ if (bp->ptp_tx_skb) {
+ dev_kfree_skb_any(bp->ptp_tx_skb);
+ bp->ptp_tx_skb = NULL;
+ }
+
+ /* Disable PTP in HW */
+ bnx2x_disable_ptp(bp);
+
+ DP(BNX2X_MSG_PTP, "PTP stop ended successfully\n");
+}
+
+void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode, bool keep_link)
+{
+ int port = BP_PORT(bp);
+ int i, rc = 0;
+ u8 cos;
+ struct bnx2x_mcast_ramrod_params rparam = {NULL};
+ u32 reset_code;
+
+ /* Wait until tx fastpath tasks complete */
+ for_each_tx_queue(bp, i) {
+ struct bnx2x_fastpath *fp = &bp->fp[i];
+
+ for_each_cos_in_tx_queue(fp, cos)
+ rc = bnx2x_clean_tx_queue(bp, fp->txdata_ptr[cos]);
+#ifdef BNX2X_STOP_ON_ERROR
+ if (rc)
+ return;
+#endif
+ }
+
+ /* Give HW time to discard old tx messages */
+ usleep_range(1000, 2000);
+
+ /* Clean all ETH MACs */
+ rc = bnx2x_del_all_macs(bp, &bp->sp_objs[0].mac_obj, BNX2X_ETH_MAC,
+ false);
+ if (rc < 0)
+ BNX2X_ERR("Failed to delete all ETH macs: %d\n", rc);
+
+ /* Clean up UC list */
+ rc = bnx2x_del_all_macs(bp, &bp->sp_objs[0].mac_obj, BNX2X_UC_LIST_MAC,
+ true);
+ if (rc < 0)
+ BNX2X_ERR("Failed to schedule DEL commands for UC MACs list: %d\n",
+ rc);
+
+ /* The whole *vlan_obj structure may be not initialized if VLAN
+ * filtering offload is not supported by hardware. Currently this is
+ * true for all hardware covered by CHIP_IS_E1x().
+ */
+ if (!CHIP_IS_E1x(bp)) {
+ /* Remove all currently configured VLANs */
+ rc = bnx2x_del_all_vlans(bp);
+ if (rc < 0)
+ BNX2X_ERR("Failed to delete all VLANs\n");
+ }
+
+ /* Disable LLH */
+ if (!CHIP_IS_E1(bp))
+ REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
+
+ /* Set "drop all" (stop Rx).
+ * We need to take a netif_addr_lock() here in order to prevent
+ * a race between the completion code and this code.
+ */
+ netif_addr_lock_bh(bp->dev);
+ /* Schedule the rx_mode command */
+ if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state))
+ set_bit(BNX2X_FILTER_RX_MODE_SCHED, &bp->sp_state);
+ else if (bp->slowpath)
+ bnx2x_set_storm_rx_mode(bp);
+
+ /* Cleanup multicast configuration */
+ rparam.mcast_obj = &bp->mcast_obj;
+ rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
+ if (rc < 0)
+ BNX2X_ERR("Failed to send DEL multicast command: %d\n", rc);
+
+ netif_addr_unlock_bh(bp->dev);
+
+ bnx2x_iov_chip_cleanup(bp);
+
+ /*
+ * Send the UNLOAD_REQUEST to the MCP. This will return if
+ * this function should perform FUNC, PORT or COMMON HW
+ * reset.
+ */
+ reset_code = bnx2x_send_unload_req(bp, unload_mode);
+
+ /*
+ * (assumption: No Attention from MCP at this stage)
+ * PMF probably in the middle of TX disable/enable transaction
+ */
+ rc = bnx2x_func_wait_started(bp);
+ if (rc) {
+ BNX2X_ERR("bnx2x_func_wait_started failed\n");
+#ifdef BNX2X_STOP_ON_ERROR
+ return;
+#endif
+ }
+
+ /* Close multi and leading connections
+ * Completions for ramrods are collected in a synchronous way
+ */
+ for_each_eth_queue(bp, i)
+ if (bnx2x_stop_queue(bp, i))
+#ifdef BNX2X_STOP_ON_ERROR
+ return;
+#else
+ goto unload_error;
+#endif
+
+ if (CNIC_LOADED(bp)) {
+ for_each_cnic_queue(bp, i)
+ if (bnx2x_stop_queue(bp, i))
+#ifdef BNX2X_STOP_ON_ERROR
+ return;
+#else
+ goto unload_error;
+#endif
+ }
+
+ /* If SP settings didn't get completed so far - something
+ * very wrong has happen.
+ */
+ if (!bnx2x_wait_sp_comp(bp, ~0x0UL))
+ BNX2X_ERR("Hmmm... Common slow path ramrods got stuck!\n");
+
+#ifndef BNX2X_STOP_ON_ERROR
+unload_error:
+#endif
+ rc = bnx2x_func_stop(bp);
+ if (rc) {
+ BNX2X_ERR("Function stop failed!\n");
+#ifdef BNX2X_STOP_ON_ERROR
+ return;
+#endif
+ }
+
+ /* stop_ptp should be after the Tx queues are drained to prevent
+ * scheduling to the cancelled PTP work queue. It should also be after
+ * function stop ramrod is sent, since as part of this ramrod FW access
+ * PTP registers.
+ */
+ if (bp->flags & PTP_SUPPORTED) {
+ bnx2x_stop_ptp(bp);
+ if (bp->ptp_clock) {
+ ptp_clock_unregister(bp->ptp_clock);
+ bp->ptp_clock = NULL;
+ }
+ }
+
+ if (!bp->nic_stopped) {
+ /* Disable HW interrupts, NAPI */
+ bnx2x_netif_stop(bp, 1);
+ /* Delete all NAPI objects */
+ bnx2x_del_all_napi(bp);
+ if (CNIC_LOADED(bp))
+ bnx2x_del_all_napi_cnic(bp);
+
+ /* Release IRQs */
+ bnx2x_free_irq(bp);
+ bp->nic_stopped = true;
+ }
+
+ /* Reset the chip, unless PCI function is offline. If we reach this
+ * point following a PCI error handling, it means device is really
+ * in a bad state and we're about to remove it, so reset the chip
+ * is not a good idea.
+ */
+ if (!pci_channel_offline(bp->pdev)) {
+ rc = bnx2x_reset_hw(bp, reset_code);
+ if (rc)
+ BNX2X_ERR("HW_RESET failed\n");
+ }
+
+ /* Report UNLOAD_DONE to MCP */
+ bnx2x_send_unload_done(bp, keep_link);
+}
+
+void bnx2x_disable_close_the_gate(struct bnx2x *bp)
+{
+ u32 val;
+
+ DP(NETIF_MSG_IFDOWN, "Disabling \"close the gates\"\n");
+
+ if (CHIP_IS_E1(bp)) {
+ int port = BP_PORT(bp);
+ u32 addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
+ MISC_REG_AEU_MASK_ATTN_FUNC_0;
+
+ val = REG_RD(bp, addr);
+ val &= ~(0x300);
+ REG_WR(bp, addr, val);
+ } else {
+ val = REG_RD(bp, MISC_REG_AEU_GENERAL_MASK);
+ val &= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK |
+ MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK);
+ REG_WR(bp, MISC_REG_AEU_GENERAL_MASK, val);
+ }
+}
+
+/* Close gates #2, #3 and #4: */
+static void bnx2x_set_234_gates(struct bnx2x *bp, bool close)
+{
+ u32 val;
+
+ /* Gates #2 and #4a are closed/opened for "not E1" only */
+ if (!CHIP_IS_E1(bp)) {
+ /* #4 */
+ REG_WR(bp, PXP_REG_HST_DISCARD_DOORBELLS, !!close);
+ /* #2 */
+ REG_WR(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES, !!close);
+ }
+
+ /* #3 */
+ if (CHIP_IS_E1x(bp)) {
+ /* Prevent interrupts from HC on both ports */
+ val = REG_RD(bp, HC_REG_CONFIG_1);
+ REG_WR(bp, HC_REG_CONFIG_1,
+ (!close) ? (val | HC_CONFIG_1_REG_BLOCK_DISABLE_1) :
+ (val & ~(u32)HC_CONFIG_1_REG_BLOCK_DISABLE_1));
+
+ val = REG_RD(bp, HC_REG_CONFIG_0);
+ REG_WR(bp, HC_REG_CONFIG_0,
+ (!close) ? (val | HC_CONFIG_0_REG_BLOCK_DISABLE_0) :
+ (val & ~(u32)HC_CONFIG_0_REG_BLOCK_DISABLE_0));
+ } else {
+ /* Prevent incoming interrupts in IGU */
+ val = REG_RD(bp, IGU_REG_BLOCK_CONFIGURATION);
+
+ REG_WR(bp, IGU_REG_BLOCK_CONFIGURATION,
+ (!close) ?
+ (val | IGU_BLOCK_CONFIGURATION_REG_BLOCK_ENABLE) :
+ (val & ~(u32)IGU_BLOCK_CONFIGURATION_REG_BLOCK_ENABLE));
+ }
+
+ DP(NETIF_MSG_HW | NETIF_MSG_IFUP, "%s gates #2, #3 and #4\n",
+ close ? "closing" : "opening");
+}
+
+#define SHARED_MF_CLP_MAGIC 0x80000000 /* `magic' bit */
+
+static void bnx2x_clp_reset_prep(struct bnx2x *bp, u32 *magic_val)
+{
+ /* Do some magic... */
+ u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
+ *magic_val = val & SHARED_MF_CLP_MAGIC;
+ MF_CFG_WR(bp, shared_mf_config.clp_mb, val | SHARED_MF_CLP_MAGIC);
+}
+
+/**
+ * bnx2x_clp_reset_done - restore the value of the `magic' bit.
+ *
+ * @bp: driver handle
+ * @magic_val: old value of the `magic' bit.
+ */
+static void bnx2x_clp_reset_done(struct bnx2x *bp, u32 magic_val)
+{
+ /* Restore the `magic' bit value... */
+ u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
+ MF_CFG_WR(bp, shared_mf_config.clp_mb,
+ (val & (~SHARED_MF_CLP_MAGIC)) | magic_val);
+}
+
+/**
+ * bnx2x_reset_mcp_prep - prepare for MCP reset.
+ *
+ * @bp: driver handle
+ * @magic_val: old value of 'magic' bit.
+ *
+ * Takes care of CLP configurations.
+ */
+static void bnx2x_reset_mcp_prep(struct bnx2x *bp, u32 *magic_val)
+{
+ u32 shmem;
+ u32 validity_offset;
+
+ DP(NETIF_MSG_HW | NETIF_MSG_IFUP, "Starting\n");
+
+ /* Set `magic' bit in order to save MF config */
+ if (!CHIP_IS_E1(bp))
+ bnx2x_clp_reset_prep(bp, magic_val);
+
+ /* Get shmem offset */
+ shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
+ validity_offset =
+ offsetof(struct shmem_region, validity_map[BP_PORT(bp)]);
+
+ /* Clear validity map flags */
+ if (shmem > 0)
+ REG_WR(bp, shmem + validity_offset, 0);
+}
+
+#define MCP_TIMEOUT 5000 /* 5 seconds (in ms) */
+#define MCP_ONE_TIMEOUT 100 /* 100 ms */
+
+/**
+ * bnx2x_mcp_wait_one - wait for MCP_ONE_TIMEOUT
+ *
+ * @bp: driver handle
+ */
+static void bnx2x_mcp_wait_one(struct bnx2x *bp)
+{
+ /* special handling for emulation and FPGA,
+ wait 10 times longer */
+ if (CHIP_REV_IS_SLOW(bp))
+ msleep(MCP_ONE_TIMEOUT*10);
+ else
+ msleep(MCP_ONE_TIMEOUT);
+}
+
+/*
+ * initializes bp->common.shmem_base and waits for validity signature to appear
+ */
+static int bnx2x_init_shmem(struct bnx2x *bp)
+{
+ int cnt = 0;
+ u32 val = 0;
+
+ do {
+ bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
+
+ /* If we read all 0xFFs, means we are in PCI error state and
+ * should bail out to avoid crashes on adapter's FW reads.
+ */
+ if (bp->common.shmem_base == 0xFFFFFFFF) {
+ bp->flags |= NO_MCP_FLAG;
+ return -ENODEV;
+ }
+
+ if (bp->common.shmem_base) {
+ val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
+ if (val & SHR_MEM_VALIDITY_MB)
+ return 0;
+ }
+
+ bnx2x_mcp_wait_one(bp);
+
+ } while (cnt++ < (MCP_TIMEOUT / MCP_ONE_TIMEOUT));
+
+ BNX2X_ERR("BAD MCP validity signature\n");
+
+ return -ENODEV;
+}
+
+static int bnx2x_reset_mcp_comp(struct bnx2x *bp, u32 magic_val)
+{
+ int rc = bnx2x_init_shmem(bp);
+
+ /* Restore the `magic' bit value */
+ if (!CHIP_IS_E1(bp))
+ bnx2x_clp_reset_done(bp, magic_val);
+
+ return rc;
+}
+
+static void bnx2x_pxp_prep(struct bnx2x *bp)
+{
+ if (!CHIP_IS_E1(bp)) {
+ REG_WR(bp, PXP2_REG_RD_START_INIT, 0);
+ REG_WR(bp, PXP2_REG_RQ_RBC_DONE, 0);
+ }
+}
+
+/*
+ * Reset the whole chip except for:
+ * - PCIE core
+ * - PCI Glue, PSWHST, PXP/PXP2 RF (all controlled by
+ * one reset bit)
+ * - IGU
+ * - MISC (including AEU)
+ * - GRC
+ * - RBCN, RBCP
+ */
+static void bnx2x_process_kill_chip_reset(struct bnx2x *bp, bool global)
+{
+ u32 not_reset_mask1, reset_mask1, not_reset_mask2, reset_mask2;
+ u32 global_bits2, stay_reset2;
+
+ /*
+ * Bits that have to be set in reset_mask2 if we want to reset 'global'
+ * (per chip) blocks.
+ */
+ global_bits2 =
+ MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CPU |
+ MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CORE;
+
+ /* Don't reset the following blocks.
+ * Important: per port blocks (such as EMAC, BMAC, UMAC) can't be
+ * reset, as in 4 port device they might still be owned
+ * by the MCP (there is only one leader per path).
+ */
+ not_reset_mask1 =
+ MISC_REGISTERS_RESET_REG_1_RST_HC |
+ MISC_REGISTERS_RESET_REG_1_RST_PXPV |
+ MISC_REGISTERS_RESET_REG_1_RST_PXP;
+
+ not_reset_mask2 =
+ MISC_REGISTERS_RESET_REG_2_RST_PCI_MDIO |
+ MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE |
+ MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE |
+ MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE |
+ MISC_REGISTERS_RESET_REG_2_RST_RBCN |
+ MISC_REGISTERS_RESET_REG_2_RST_GRC |
+ MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE |
+ MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B |
+ MISC_REGISTERS_RESET_REG_2_RST_ATC |
+ MISC_REGISTERS_RESET_REG_2_PGLC |
+ MISC_REGISTERS_RESET_REG_2_RST_BMAC0 |
+ MISC_REGISTERS_RESET_REG_2_RST_BMAC1 |
+ MISC_REGISTERS_RESET_REG_2_RST_EMAC0 |
+ MISC_REGISTERS_RESET_REG_2_RST_EMAC1 |
+ MISC_REGISTERS_RESET_REG_2_UMAC0 |
+ MISC_REGISTERS_RESET_REG_2_UMAC1;
+
+ /*
+ * Keep the following blocks in reset:
+ * - all xxMACs are handled by the bnx2x_link code.
+ */
+ stay_reset2 =
+ MISC_REGISTERS_RESET_REG_2_XMAC |
+ MISC_REGISTERS_RESET_REG_2_XMAC_SOFT;
+
+ /* Full reset masks according to the chip */
+ reset_mask1 = 0xffffffff;
+
+ if (CHIP_IS_E1(bp))
+ reset_mask2 = 0xffff;
+ else if (CHIP_IS_E1H(bp))
+ reset_mask2 = 0x1ffff;
+ else if (CHIP_IS_E2(bp))
+ reset_mask2 = 0xfffff;
+ else /* CHIP_IS_E3 */
+ reset_mask2 = 0x3ffffff;
+
+ /* Don't reset global blocks unless we need to */
+ if (!global)
+ reset_mask2 &= ~global_bits2;
+
+ /*
+ * In case of attention in the QM, we need to reset PXP
+ * (MISC_REGISTERS_RESET_REG_2_RST_PXP_RQ_RD_WR) before QM
+ * because otherwise QM reset would release 'close the gates' shortly
+ * before resetting the PXP, then the PSWRQ would send a write
+ * request to PGLUE. Then when PXP is reset, PGLUE would try to
+ * read the payload data from PSWWR, but PSWWR would not
+ * respond. The write queue in PGLUE would stuck, dmae commands
+ * would not return. Therefore it's important to reset the second
+ * reset register (containing the
+ * MISC_REGISTERS_RESET_REG_2_RST_PXP_RQ_RD_WR bit) before the
+ * first one (containing the MISC_REGISTERS_RESET_REG_1_RST_QM
+ * bit).
+ */
+ REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
+ reset_mask2 & (~not_reset_mask2));
+
+ REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
+ reset_mask1 & (~not_reset_mask1));
+
+ barrier();
+
+ REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
+ reset_mask2 & (~stay_reset2));
+
+ barrier();
+
+ REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1);
+}
+
+/**
+ * bnx2x_er_poll_igu_vq - poll for pending writes bit.
+ * It should get cleared in no more than 1s.
+ *
+ * @bp: driver handle
+ *
+ * It should get cleared in no more than 1s. Returns 0 if
+ * pending writes bit gets cleared.
+ */
+static int bnx2x_er_poll_igu_vq(struct bnx2x *bp)
+{
+ u32 cnt = 1000;
+ u32 pend_bits = 0;
+
+ do {
+ pend_bits = REG_RD(bp, IGU_REG_PENDING_BITS_STATUS);
+
+ if (pend_bits == 0)
+ break;
+
+ usleep_range(1000, 2000);
+ } while (cnt-- > 0);
+
+ if (cnt <= 0) {
+ BNX2X_ERR("Still pending IGU requests pend_bits=%x!\n",
+ pend_bits);
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static int bnx2x_process_kill(struct bnx2x *bp, bool global)
+{
+ int cnt = 1000;
+ u32 val = 0;
+ u32 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2;
+ u32 tags_63_32 = 0;
+
+ /* Empty the Tetris buffer, wait for 1s */
+ do {
+ sr_cnt = REG_RD(bp, PXP2_REG_RD_SR_CNT);
+ blk_cnt = REG_RD(bp, PXP2_REG_RD_BLK_CNT);
+ port_is_idle_0 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_0);
+ port_is_idle_1 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_1);
+ pgl_exp_rom2 = REG_RD(bp, PXP2_REG_PGL_EXP_ROM2);
+ if (CHIP_IS_E3(bp))
+ tags_63_32 = REG_RD(bp, PGLUE_B_REG_TAGS_63_32);
+
+ if ((sr_cnt == 0x7e) && (blk_cnt == 0xa0) &&
+ ((port_is_idle_0 & 0x1) == 0x1) &&
+ ((port_is_idle_1 & 0x1) == 0x1) &&
+ (pgl_exp_rom2 == 0xffffffff) &&
+ (!CHIP_IS_E3(bp) || (tags_63_32 == 0xffffffff)))
+ break;
+ usleep_range(1000, 2000);
+ } while (cnt-- > 0);
+
+ if (cnt <= 0) {
+ BNX2X_ERR("Tetris buffer didn't get empty or there are still outstanding read requests after 1s!\n");
+ BNX2X_ERR("sr_cnt=0x%08x, blk_cnt=0x%08x, port_is_idle_0=0x%08x, port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x\n",
+ sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1,
+ pgl_exp_rom2);
+ return -EAGAIN;
+ }
+
+ barrier();
+
+ /* Close gates #2, #3 and #4 */
+ bnx2x_set_234_gates(bp, true);
+
+ /* Poll for IGU VQs for 57712 and newer chips */
+ if (!CHIP_IS_E1x(bp) && bnx2x_er_poll_igu_vq(bp))
+ return -EAGAIN;
+
+ /* TBD: Indicate that "process kill" is in progress to MCP */
+
+ /* Clear "unprepared" bit */
+ REG_WR(bp, MISC_REG_UNPREPARED, 0);
+ barrier();
+
+ /* Wait for 1ms to empty GLUE and PCI-E core queues,
+ * PSWHST, GRC and PSWRD Tetris buffer.
+ */
+ usleep_range(1000, 2000);
+
+ /* Prepare to chip reset: */
+ /* MCP */
+ if (global)
+ bnx2x_reset_mcp_prep(bp, &val);
+
+ /* PXP */
+ bnx2x_pxp_prep(bp);
+ barrier();
+
+ /* reset the chip */
+ bnx2x_process_kill_chip_reset(bp, global);
+ barrier();
+
+ /* clear errors in PGB */
+ if (!CHIP_IS_E1x(bp))
+ REG_WR(bp, PGLUE_B_REG_LATCHED_ERRORS_CLR, 0x7f);
+
+ /* Recover after reset: */
+ /* MCP */
+ if (global && bnx2x_reset_mcp_comp(bp, val))
+ return -EAGAIN;
+
+ /* TBD: Add resetting the NO_MCP mode DB here */
+
+ /* Open the gates #2, #3 and #4 */
+ bnx2x_set_234_gates(bp, false);
+
+ /* TBD: IGU/AEU preparation bring back the AEU/IGU to a
+ * reset state, re-enable attentions. */
+
+ return 0;
+}
+
+static int bnx2x_leader_reset(struct bnx2x *bp)
+{
+ int rc = 0;
+ bool global = bnx2x_reset_is_global(bp);
+ u32 load_code;
+
+ /* if not going to reset MCP - load "fake" driver to reset HW while
+ * driver is owner of the HW
+ */
+ if (!global && !BP_NOMCP(bp)) {
+ load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ,
+ DRV_MSG_CODE_LOAD_REQ_WITH_LFA);
+ if (!load_code) {
+ BNX2X_ERR("MCP response failure, aborting\n");
+ rc = -EAGAIN;
+ goto exit_leader_reset;
+ }
+ if ((load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) &&
+ (load_code != FW_MSG_CODE_DRV_LOAD_COMMON)) {
+ BNX2X_ERR("MCP unexpected resp, aborting\n");
+ rc = -EAGAIN;
+ goto exit_leader_reset2;
+ }
+ load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
+ if (!load_code) {
+ BNX2X_ERR("MCP response failure, aborting\n");
+ rc = -EAGAIN;
+ goto exit_leader_reset2;
+ }
+ }
+
+ /* Try to recover after the failure */
+ if (bnx2x_process_kill(bp, global)) {
+ BNX2X_ERR("Something bad had happen on engine %d! Aii!\n",
+ BP_PATH(bp));
+ rc = -EAGAIN;
+ goto exit_leader_reset2;
+ }
+
+ /*
+ * Clear RESET_IN_PROGRES and RESET_GLOBAL bits and update the driver
+ * state.
+ */
+ bnx2x_set_reset_done(bp);
+ if (global)
+ bnx2x_clear_reset_global(bp);
+
+exit_leader_reset2:
+ /* unload "fake driver" if it was loaded */
+ if (!global && !BP_NOMCP(bp)) {
+ bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
+ bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
+ }
+exit_leader_reset:
+ bp->is_leader = 0;
+ bnx2x_release_leader_lock(bp);
+ smp_mb();
+ return rc;
+}
+
+static void bnx2x_recovery_failed(struct bnx2x *bp)
+{
+ netdev_err(bp->dev, "Recovery has failed. Power cycle is needed.\n");
+
+ /* Disconnect this device */
+ netif_device_detach(bp->dev);
+
+ /*
+ * Block ifup for all function on this engine until "process kill"
+ * or power cycle.
+ */
+ bnx2x_set_reset_in_progress(bp);
+
+ /* Shut down the power */
+ bnx2x_set_power_state(bp, PCI_D3hot);
+
+ bp->recovery_state = BNX2X_RECOVERY_FAILED;
+
+ smp_mb();
+}
+
+/*
+ * Assumption: runs under rtnl lock. This together with the fact
+ * that it's called only from bnx2x_sp_rtnl() ensure that it
+ * will never be called when netif_running(bp->dev) is false.
+ */
+static void bnx2x_parity_recover(struct bnx2x *bp)
+{
+ u32 error_recovered, error_unrecovered;
+ bool is_parity, global = false;
+#ifdef CONFIG_BNX2X_SRIOV
+ int vf_idx;
+
+ for (vf_idx = 0; vf_idx < bp->requested_nr_virtfn; vf_idx++) {
+ struct bnx2x_virtf *vf = BP_VF(bp, vf_idx);
+
+ if (vf)
+ vf->state = VF_LOST;
+ }
+#endif
+ DP(NETIF_MSG_HW, "Handling parity\n");
+ while (1) {
+ switch (bp->recovery_state) {
+ case BNX2X_RECOVERY_INIT:
+ DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_INIT\n");
+ is_parity = bnx2x_chk_parity_attn(bp, &global, false);
+ WARN_ON(!is_parity);
+
+ /* Try to get a LEADER_LOCK HW lock */
+ if (bnx2x_trylock_leader_lock(bp)) {
+ bnx2x_set_reset_in_progress(bp);
+ /*
+ * Check if there is a global attention and if
+ * there was a global attention, set the global
+ * reset bit.
+ */
+
+ if (global)
+ bnx2x_set_reset_global(bp);
+
+ bp->is_leader = 1;
+ }
+
+ /* Stop the driver */
+ /* If interface has been removed - break */
+ if (bnx2x_nic_unload(bp, UNLOAD_RECOVERY, false))
+ return;
+
+ bp->recovery_state = BNX2X_RECOVERY_WAIT;
+
+ /* Ensure "is_leader", MCP command sequence and
+ * "recovery_state" update values are seen on other
+ * CPUs.
+ */
+ smp_mb();
+ break;
+
+ case BNX2X_RECOVERY_WAIT:
+ DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_WAIT\n");
+ if (bp->is_leader) {
+ int other_engine = BP_PATH(bp) ? 0 : 1;
+ bool other_load_status =
+ bnx2x_get_load_status(bp, other_engine);
+ bool load_status =
+ bnx2x_get_load_status(bp, BP_PATH(bp));
+ global = bnx2x_reset_is_global(bp);
+
+ /*
+ * In case of a parity in a global block, let
+ * the first leader that performs a
+ * leader_reset() reset the global blocks in
+ * order to clear global attentions. Otherwise
+ * the gates will remain closed for that
+ * engine.
+ */
+ if (load_status ||
+ (global && other_load_status)) {
+ /* Wait until all other functions get
+ * down.
+ */
+ schedule_delayed_work(&bp->sp_rtnl_task,
+ HZ/10);
+ return;
+ } else {
+ /* If all other functions got down -
+ * try to bring the chip back to
+ * normal. In any case it's an exit
+ * point for a leader.
+ */
+ if (bnx2x_leader_reset(bp)) {
+ bnx2x_recovery_failed(bp);
+ return;
+ }
+
+ /* If we are here, means that the
+ * leader has succeeded and doesn't
+ * want to be a leader any more. Try
+ * to continue as a none-leader.
+ */
+ break;
+ }
+ } else { /* non-leader */
+ if (!bnx2x_reset_is_done(bp, BP_PATH(bp))) {
+ /* Try to get a LEADER_LOCK HW lock as
+ * long as a former leader may have
+ * been unloaded by the user or
+ * released a leadership by another
+ * reason.
+ */
+ if (bnx2x_trylock_leader_lock(bp)) {
+ /* I'm a leader now! Restart a
+ * switch case.
+ */
+ bp->is_leader = 1;
+ break;
+ }
+
+ schedule_delayed_work(&bp->sp_rtnl_task,
+ HZ/10);
+ return;
+
+ } else {
+ /*
+ * If there was a global attention, wait
+ * for it to be cleared.
+ */
+ if (bnx2x_reset_is_global(bp)) {
+ schedule_delayed_work(
+ &bp->sp_rtnl_task,
+ HZ/10);
+ return;
+ }
+
+ error_recovered =
+ bp->eth_stats.recoverable_error;
+ error_unrecovered =
+ bp->eth_stats.unrecoverable_error;
+ bp->recovery_state =
+ BNX2X_RECOVERY_NIC_LOADING;
+ if (bnx2x_nic_load(bp, LOAD_NORMAL)) {
+ error_unrecovered++;
+ netdev_err(bp->dev,
+ "Recovery failed. Power cycle needed\n");
+ /* Disconnect this device */
+ netif_device_detach(bp->dev);
+ /* Shut down the power */
+ bnx2x_set_power_state(
+ bp, PCI_D3hot);
+ smp_mb();
+ } else {
+ bp->recovery_state =
+ BNX2X_RECOVERY_DONE;
+ error_recovered++;
+ smp_mb();
+ }
+ bp->eth_stats.recoverable_error =
+ error_recovered;
+ bp->eth_stats.unrecoverable_error =
+ error_unrecovered;
+
+ return;
+ }
+ }
+ default:
+ return;
+ }
+ }
+}
+
+static int bnx2x_udp_port_update(struct bnx2x *bp)
+{
+ struct bnx2x_func_switch_update_params *switch_update_params;
+ struct bnx2x_func_state_params func_params = {NULL};
+ u16 vxlan_port = 0, geneve_port = 0;
+ int rc;
+
+ switch_update_params = &func_params.params.switch_update;
+
+ /* Prepare parameters for function state transitions */
+ __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
+ __set_bit(RAMROD_RETRY, &func_params.ramrod_flags);
+
+ func_params.f_obj = &bp->func_obj;
+ func_params.cmd = BNX2X_F_CMD_SWITCH_UPDATE;
+
+ /* Function parameters */
+ __set_bit(BNX2X_F_UPDATE_TUNNEL_CFG_CHNG,
+ &switch_update_params->changes);
+
+ if (bp->udp_tunnel_ports[BNX2X_UDP_PORT_GENEVE]) {
+ geneve_port = bp->udp_tunnel_ports[BNX2X_UDP_PORT_GENEVE];
+ switch_update_params->geneve_dst_port = geneve_port;
+ }
+
+ if (bp->udp_tunnel_ports[BNX2X_UDP_PORT_VXLAN]) {
+ vxlan_port = bp->udp_tunnel_ports[BNX2X_UDP_PORT_VXLAN];
+ switch_update_params->vxlan_dst_port = vxlan_port;
+ }
+
+ /* Re-enable inner-rss for the offloaded UDP tunnels */
+ __set_bit(BNX2X_F_UPDATE_TUNNEL_INNER_RSS,
+ &switch_update_params->changes);
+
+ rc = bnx2x_func_state_change(bp, &func_params);
+ if (rc)
+ BNX2X_ERR("failed to set UDP dst port to %04x %04x (rc = 0x%x)\n",
+ vxlan_port, geneve_port, rc);
+ else
+ DP(BNX2X_MSG_SP,
+ "Configured UDP ports: Vxlan [%04x] Geneve [%04x]\n",
+ vxlan_port, geneve_port);
+
+ return rc;
+}
+
+static int bnx2x_udp_tunnel_sync(struct net_device *netdev, unsigned int table)
+{
+ struct bnx2x *bp = netdev_priv(netdev);
+ struct udp_tunnel_info ti;
+
+ udp_tunnel_nic_get_port(netdev, table, 0, &ti);
+ bp->udp_tunnel_ports[table] = be16_to_cpu(ti.port);
+
+ return bnx2x_udp_port_update(bp);
+}
+
+static const struct udp_tunnel_nic_info bnx2x_udp_tunnels = {
+ .sync_table = bnx2x_udp_tunnel_sync,
+ .flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP |
+ UDP_TUNNEL_NIC_INFO_OPEN_ONLY,
+ .tables = {
+ { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, },
+ { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, },
+ },
+};
+
+static int bnx2x_close(struct net_device *dev);
+
+/* bnx2x_nic_unload() flushes the bnx2x_wq, thus reset task is
+ * scheduled on a general queue in order to prevent a dead lock.
+ */
+static void bnx2x_sp_rtnl_task(struct work_struct *work)
+{
+ struct bnx2x *bp = container_of(work, struct bnx2x, sp_rtnl_task.work);
+
+ rtnl_lock();
+
+ if (!netif_running(bp->dev)) {
+ rtnl_unlock();
+ return;
+ }
+
+ if (unlikely(bp->recovery_state != BNX2X_RECOVERY_DONE)) {
+#ifdef BNX2X_STOP_ON_ERROR
+ BNX2X_ERR("recovery flow called but STOP_ON_ERROR defined so reset not done to allow debug dump,\n"
+ "you will need to reboot when done\n");
+ goto sp_rtnl_not_reset;
+#endif
+ /*
+ * Clear all pending SP commands as we are going to reset the
+ * function anyway.
+ */
+ bp->sp_rtnl_state = 0;
+ smp_mb();
+
+ bnx2x_parity_recover(bp);
+
+ rtnl_unlock();
+ return;
+ }
+
+ if (test_and_clear_bit(BNX2X_SP_RTNL_TX_TIMEOUT, &bp->sp_rtnl_state)) {
+#ifdef BNX2X_STOP_ON_ERROR
+ BNX2X_ERR("recovery flow called but STOP_ON_ERROR defined so reset not done to allow debug dump,\n"
+ "you will need to reboot when done\n");
+ goto sp_rtnl_not_reset;
+#endif
+
+ /*
+ * Clear all pending SP commands as we are going to reset the
+ * function anyway.
+ */
+ bp->sp_rtnl_state = 0;
+ smp_mb();
+
+ /* Immediately indicate link as down */
+ bp->link_vars.link_up = 0;
+ bp->force_link_down = true;
+ netif_carrier_off(bp->dev);
+ BNX2X_ERR("Indicating link is down due to Tx-timeout\n");
+
+ bnx2x_nic_unload(bp, UNLOAD_NORMAL, true);
+ /* When ret value shows failure of allocation failure,
+ * the nic is rebooted again. If open still fails, a error
+ * message to notify the user.
+ */
+ if (bnx2x_nic_load(bp, LOAD_NORMAL) == -ENOMEM) {
+ bnx2x_nic_unload(bp, UNLOAD_NORMAL, true);
+ if (bnx2x_nic_load(bp, LOAD_NORMAL))
+ BNX2X_ERR("Open the NIC fails again!\n");
+ }
+ rtnl_unlock();
+ return;
+ }
+#ifdef BNX2X_STOP_ON_ERROR
+sp_rtnl_not_reset:
+#endif
+ if (test_and_clear_bit(BNX2X_SP_RTNL_SETUP_TC, &bp->sp_rtnl_state))
+ bnx2x_setup_tc(bp->dev, bp->dcbx_port_params.ets.num_of_cos);
+ if (test_and_clear_bit(BNX2X_SP_RTNL_AFEX_F_UPDATE, &bp->sp_rtnl_state))
+ bnx2x_after_function_update(bp);
+ /*
+ * in case of fan failure we need to reset id if the "stop on error"
+ * debug flag is set, since we trying to prevent permanent overheating
+ * damage
+ */
+ if (test_and_clear_bit(BNX2X_SP_RTNL_FAN_FAILURE, &bp->sp_rtnl_state)) {
+ DP(NETIF_MSG_HW, "fan failure detected. Unloading driver\n");
+ netif_device_detach(bp->dev);
+ bnx2x_close(bp->dev);
+ rtnl_unlock();
+ return;
+ }
+
+ if (test_and_clear_bit(BNX2X_SP_RTNL_VFPF_MCAST, &bp->sp_rtnl_state)) {
+ DP(BNX2X_MSG_SP,
+ "sending set mcast vf pf channel message from rtnl sp-task\n");
+ bnx2x_vfpf_set_mcast(bp->dev);
+ }
+ if (test_and_clear_bit(BNX2X_SP_RTNL_VFPF_CHANNEL_DOWN,
+ &bp->sp_rtnl_state)){
+ if (netif_carrier_ok(bp->dev)) {
+ bnx2x_tx_disable(bp);
+ BNX2X_ERR("PF indicated channel is not servicable anymore. This means this VF device is no longer operational\n");
+ }
+ }
+
+ if (test_and_clear_bit(BNX2X_SP_RTNL_RX_MODE, &bp->sp_rtnl_state)) {
+ DP(BNX2X_MSG_SP, "Handling Rx Mode setting\n");
+ bnx2x_set_rx_mode_inner(bp);
+ }
+
+ if (test_and_clear_bit(BNX2X_SP_RTNL_HYPERVISOR_VLAN,
+ &bp->sp_rtnl_state))
+ bnx2x_pf_set_vfs_vlan(bp);
+
+ if (test_and_clear_bit(BNX2X_SP_RTNL_TX_STOP, &bp->sp_rtnl_state)) {
+ bnx2x_dcbx_stop_hw_tx(bp);
+ bnx2x_dcbx_resume_hw_tx(bp);
+ }
+
+ if (test_and_clear_bit(BNX2X_SP_RTNL_GET_DRV_VERSION,
+ &bp->sp_rtnl_state))
+ bnx2x_update_mng_version(bp);
+
+ if (test_and_clear_bit(BNX2X_SP_RTNL_UPDATE_SVID, &bp->sp_rtnl_state))
+ bnx2x_handle_update_svid_cmd(bp);
+
+ /* work which needs rtnl lock not-taken (as it takes the lock itself and
+ * can be called from other contexts as well)
+ */
+ rtnl_unlock();
+
+ /* enable SR-IOV if applicable */
+ if (IS_SRIOV(bp) && test_and_clear_bit(BNX2X_SP_RTNL_ENABLE_SRIOV,
+ &bp->sp_rtnl_state)) {
+ bnx2x_disable_sriov(bp);
+ bnx2x_enable_sriov(bp);
+ }
+}
+
+static void bnx2x_period_task(struct work_struct *work)
+{
+ struct bnx2x *bp = container_of(work, struct bnx2x, period_task.work);
+
+ if (!netif_running(bp->dev))
+ goto period_task_exit;
+
+ if (CHIP_REV_IS_SLOW(bp)) {
+ BNX2X_ERR("period task called on emulation, ignoring\n");
+ goto period_task_exit;
+ }
+
+ bnx2x_acquire_phy_lock(bp);
+ /*
+ * The barrier is needed to ensure the ordering between the writing to
+ * the bp->port.pmf in the bnx2x_nic_load() or bnx2x_pmf_update() and
+ * the reading here.
+ */
+ smp_mb();
+ if (bp->port.pmf) {
+ bnx2x_period_func(&bp->link_params, &bp->link_vars);
+
+ /* Re-queue task in 1 sec */
+ queue_delayed_work(bnx2x_wq, &bp->period_task, 1*HZ);
+ }
+
+ bnx2x_release_phy_lock(bp);
+period_task_exit:
+ return;
+}
+
+/*
+ * Init service functions
+ */
+
+static u32 bnx2x_get_pretend_reg(struct bnx2x *bp)
+{
+ u32 base = PXP2_REG_PGL_PRETEND_FUNC_F0;
+ u32 stride = PXP2_REG_PGL_PRETEND_FUNC_F1 - base;
+ return base + (BP_ABS_FUNC(bp)) * stride;
+}
+
+static bool bnx2x_prev_unload_close_umac(struct bnx2x *bp,
+ u8 port, u32 reset_reg,
+ struct bnx2x_mac_vals *vals)
+{
+ u32 mask = MISC_REGISTERS_RESET_REG_2_UMAC0 << port;
+ u32 base_addr;
+
+ if (!(mask & reset_reg))
+ return false;
+
+ BNX2X_DEV_INFO("Disable umac Rx %02x\n", port);
+ base_addr = port ? GRCBASE_UMAC1 : GRCBASE_UMAC0;
+ vals->umac_addr[port] = base_addr + UMAC_REG_COMMAND_CONFIG;
+ vals->umac_val[port] = REG_RD(bp, vals->umac_addr[port]);
+ REG_WR(bp, vals->umac_addr[port], 0);
+
+ return true;
+}
+
+static void bnx2x_prev_unload_close_mac(struct bnx2x *bp,
+ struct bnx2x_mac_vals *vals)
+{
+ u32 val, base_addr, offset, mask, reset_reg;
+ bool mac_stopped = false;
+ u8 port = BP_PORT(bp);
+
+ /* reset addresses as they also mark which values were changed */
+ memset(vals, 0, sizeof(*vals));
+
+ reset_reg = REG_RD(bp, MISC_REG_RESET_REG_2);
+
+ if (!CHIP_IS_E3(bp)) {
+ val = REG_RD(bp, NIG_REG_BMAC0_REGS_OUT_EN + port * 4);
+ mask = MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port;
+ if ((mask & reset_reg) && val) {
+ u32 wb_data[2];
+ BNX2X_DEV_INFO("Disable bmac Rx\n");
+ base_addr = BP_PORT(bp) ? NIG_REG_INGRESS_BMAC1_MEM
+ : NIG_REG_INGRESS_BMAC0_MEM;
+ offset = CHIP_IS_E2(bp) ? BIGMAC2_REGISTER_BMAC_CONTROL
+ : BIGMAC_REGISTER_BMAC_CONTROL;
+
+ /*
+ * use rd/wr since we cannot use dmae. This is safe
+ * since MCP won't access the bus due to the request
+ * to unload, and no function on the path can be
+ * loaded at this time.
+ */
+ wb_data[0] = REG_RD(bp, base_addr + offset);
+ wb_data[1] = REG_RD(bp, base_addr + offset + 0x4);
+ vals->bmac_addr = base_addr + offset;
+ vals->bmac_val[0] = wb_data[0];
+ vals->bmac_val[1] = wb_data[1];
+ wb_data[0] &= ~BMAC_CONTROL_RX_ENABLE;
+ REG_WR(bp, vals->bmac_addr, wb_data[0]);
+ REG_WR(bp, vals->bmac_addr + 0x4, wb_data[1]);
+ }
+ BNX2X_DEV_INFO("Disable emac Rx\n");
+ vals->emac_addr = NIG_REG_NIG_EMAC0_EN + BP_PORT(bp)*4;
+ vals->emac_val = REG_RD(bp, vals->emac_addr);
+ REG_WR(bp, vals->emac_addr, 0);
+ mac_stopped = true;
+ } else {
+ if (reset_reg & MISC_REGISTERS_RESET_REG_2_XMAC) {
+ BNX2X_DEV_INFO("Disable xmac Rx\n");
+ base_addr = BP_PORT(bp) ? GRCBASE_XMAC1 : GRCBASE_XMAC0;
+ val = REG_RD(bp, base_addr + XMAC_REG_PFC_CTRL_HI);
+ REG_WR(bp, base_addr + XMAC_REG_PFC_CTRL_HI,
+ val & ~(1 << 1));
+ REG_WR(bp, base_addr + XMAC_REG_PFC_CTRL_HI,
+ val | (1 << 1));
+ vals->xmac_addr = base_addr + XMAC_REG_CTRL;
+ vals->xmac_val = REG_RD(bp, vals->xmac_addr);
+ REG_WR(bp, vals->xmac_addr, 0);
+ mac_stopped = true;
+ }
+
+ mac_stopped |= bnx2x_prev_unload_close_umac(bp, 0,
+ reset_reg, vals);
+ mac_stopped |= bnx2x_prev_unload_close_umac(bp, 1,
+ reset_reg, vals);
+ }
+
+ if (mac_stopped)
+ msleep(20);
+}
+
+#define BNX2X_PREV_UNDI_PROD_ADDR(p) (BAR_TSTRORM_INTMEM + 0x1508 + ((p) << 4))
+#define BNX2X_PREV_UNDI_PROD_ADDR_H(f) (BAR_TSTRORM_INTMEM + \
+ 0x1848 + ((f) << 4))
+#define BNX2X_PREV_UNDI_RCQ(val) ((val) & 0xffff)
+#define BNX2X_PREV_UNDI_BD(val) ((val) >> 16 & 0xffff)
+#define BNX2X_PREV_UNDI_PROD(rcq, bd) ((bd) << 16 | (rcq))
+
+#define BCM_5710_UNDI_FW_MF_MAJOR (0x07)
+#define BCM_5710_UNDI_FW_MF_MINOR (0x08)
+#define BCM_5710_UNDI_FW_MF_VERS (0x05)
+
+static bool bnx2x_prev_is_after_undi(struct bnx2x *bp)
+{
+ /* UNDI marks its presence in DORQ -
+ * it initializes CID offset for normal bell to 0x7
+ */
+ if (!(REG_RD(bp, MISC_REG_RESET_REG_1) &
+ MISC_REGISTERS_RESET_REG_1_RST_DORQ))
+ return false;
+
+ if (REG_RD(bp, DORQ_REG_NORM_CID_OFST) == 0x7) {
+ BNX2X_DEV_INFO("UNDI previously loaded\n");
+ return true;
+ }
+
+ return false;
+}
+
+static void bnx2x_prev_unload_undi_inc(struct bnx2x *bp, u8 inc)
+{
+ u16 rcq, bd;
+ u32 addr, tmp_reg;
+
+ if (BP_FUNC(bp) < 2)
+ addr = BNX2X_PREV_UNDI_PROD_ADDR(BP_PORT(bp));
+ else
+ addr = BNX2X_PREV_UNDI_PROD_ADDR_H(BP_FUNC(bp) - 2);
+
+ tmp_reg = REG_RD(bp, addr);
+ rcq = BNX2X_PREV_UNDI_RCQ(tmp_reg) + inc;
+ bd = BNX2X_PREV_UNDI_BD(tmp_reg) + inc;
+
+ tmp_reg = BNX2X_PREV_UNDI_PROD(rcq, bd);
+ REG_WR(bp, addr, tmp_reg);
+
+ BNX2X_DEV_INFO("UNDI producer [%d/%d][%08x] rings bd -> 0x%04x, rcq -> 0x%04x\n",
+ BP_PORT(bp), BP_FUNC(bp), addr, bd, rcq);
+}
+
+static int bnx2x_prev_mcp_done(struct bnx2x *bp)
+{
+ u32 rc = bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE,
+ DRV_MSG_CODE_UNLOAD_SKIP_LINK_RESET);
+ if (!rc) {
+ BNX2X_ERR("MCP response failure, aborting\n");
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static struct bnx2x_prev_path_list *
+ bnx2x_prev_path_get_entry(struct bnx2x *bp)
+{
+ struct bnx2x_prev_path_list *tmp_list;
+
+ list_for_each_entry(tmp_list, &bnx2x_prev_list, list)
+ if (PCI_SLOT(bp->pdev->devfn) == tmp_list->slot &&
+ bp->pdev->bus->number == tmp_list->bus &&
+ BP_PATH(bp) == tmp_list->path)
+ return tmp_list;
+
+ return NULL;
+}
+
+static int bnx2x_prev_path_mark_eeh(struct bnx2x *bp)
+{
+ struct bnx2x_prev_path_list *tmp_list;
+ int rc;
+
+ rc = down_interruptible(&bnx2x_prev_sem);
+ if (rc) {
+ BNX2X_ERR("Received %d when tried to take lock\n", rc);
+ return rc;
+ }
+
+ tmp_list = bnx2x_prev_path_get_entry(bp);
+ if (tmp_list) {
+ tmp_list->aer = 1;
+ rc = 0;
+ } else {
+ BNX2X_ERR("path %d: Entry does not exist for eeh; Flow occurs before initial insmod is over ?\n",
+ BP_PATH(bp));
+ }
+
+ up(&bnx2x_prev_sem);
+
+ return rc;
+}
+
+static bool bnx2x_prev_is_path_marked(struct bnx2x *bp)
+{
+ struct bnx2x_prev_path_list *tmp_list;
+ bool rc = false;
+
+ if (down_trylock(&bnx2x_prev_sem))
+ return false;
+
+ tmp_list = bnx2x_prev_path_get_entry(bp);
+ if (tmp_list) {
+ if (tmp_list->aer) {
+ DP(NETIF_MSG_HW, "Path %d was marked by AER\n",
+ BP_PATH(bp));
+ } else {
+ rc = true;
+ BNX2X_DEV_INFO("Path %d was already cleaned from previous drivers\n",
+ BP_PATH(bp));
+ }
+ }
+
+ up(&bnx2x_prev_sem);
+
+ return rc;
+}
+
+bool bnx2x_port_after_undi(struct bnx2x *bp)
+{
+ struct bnx2x_prev_path_list *entry;
+ bool val;
+
+ down(&bnx2x_prev_sem);
+
+ entry = bnx2x_prev_path_get_entry(bp);
+ val = !!(entry && (entry->undi & (1 << BP_PORT(bp))));
+
+ up(&bnx2x_prev_sem);
+
+ return val;
+}
+
+static int bnx2x_prev_mark_path(struct bnx2x *bp, bool after_undi)
+{
+ struct bnx2x_prev_path_list *tmp_list;
+ int rc;
+
+ rc = down_interruptible(&bnx2x_prev_sem);
+ if (rc) {
+ BNX2X_ERR("Received %d when tried to take lock\n", rc);
+ return rc;
+ }
+
+ /* Check whether the entry for this path already exists */
+ tmp_list = bnx2x_prev_path_get_entry(bp);
+ if (tmp_list) {
+ if (!tmp_list->aer) {
+ BNX2X_ERR("Re-Marking the path.\n");
+ } else {
+ DP(NETIF_MSG_HW, "Removing AER indication from path %d\n",
+ BP_PATH(bp));
+ tmp_list->aer = 0;
+ }
+ up(&bnx2x_prev_sem);
+ return 0;
+ }
+ up(&bnx2x_prev_sem);
+
+ /* Create an entry for this path and add it */
+ tmp_list = kmalloc(sizeof(struct bnx2x_prev_path_list), GFP_KERNEL);
+ if (!tmp_list) {
+ BNX2X_ERR("Failed to allocate 'bnx2x_prev_path_list'\n");
+ return -ENOMEM;
+ }
+
+ tmp_list->bus = bp->pdev->bus->number;
+ tmp_list->slot = PCI_SLOT(bp->pdev->devfn);
+ tmp_list->path = BP_PATH(bp);
+ tmp_list->aer = 0;
+ tmp_list->undi = after_undi ? (1 << BP_PORT(bp)) : 0;
+
+ rc = down_interruptible(&bnx2x_prev_sem);
+ if (rc) {
+ BNX2X_ERR("Received %d when tried to take lock\n", rc);
+ kfree(tmp_list);
+ } else {
+ DP(NETIF_MSG_HW, "Marked path [%d] - finished previous unload\n",
+ BP_PATH(bp));
+ list_add(&tmp_list->list, &bnx2x_prev_list);
+ up(&bnx2x_prev_sem);
+ }
+
+ return rc;
+}
+
+static int bnx2x_do_flr(struct bnx2x *bp)
+{
+ struct pci_dev *dev = bp->pdev;
+
+ if (CHIP_IS_E1x(bp)) {
+ BNX2X_DEV_INFO("FLR not supported in E1/E1H\n");
+ return -EINVAL;
+ }
+
+ /* only bootcode REQ_BC_VER_4_INITIATE_FLR and onwards support flr */
+ if (bp->common.bc_ver < REQ_BC_VER_4_INITIATE_FLR) {
+ BNX2X_ERR("FLR not supported by BC_VER: 0x%x\n",
+ bp->common.bc_ver);
+ return -EINVAL;
+ }
+
+ if (!pci_wait_for_pending_transaction(dev))
+ dev_err(&dev->dev, "transaction is not cleared; proceeding with reset anyway\n");
+
+ BNX2X_DEV_INFO("Initiating FLR\n");
+ bnx2x_fw_command(bp, DRV_MSG_CODE_INITIATE_FLR, 0);
+
+ return 0;
+}
+
+static int bnx2x_prev_unload_uncommon(struct bnx2x *bp)
+{
+ int rc;
+
+ BNX2X_DEV_INFO("Uncommon unload Flow\n");
+
+ /* Test if previous unload process was already finished for this path */
+ if (bnx2x_prev_is_path_marked(bp))
+ return bnx2x_prev_mcp_done(bp);
+
+ BNX2X_DEV_INFO("Path is unmarked\n");
+
+ /* Cannot proceed with FLR if UNDI is loaded, since FW does not match */
+ if (bnx2x_prev_is_after_undi(bp))
+ goto out;
+
+ /* If function has FLR capabilities, and existing FW version matches
+ * the one required, then FLR will be sufficient to clean any residue
+ * left by previous driver
+ */
+ rc = bnx2x_compare_fw_ver(bp, FW_MSG_CODE_DRV_LOAD_FUNCTION, false);
+
+ if (!rc) {
+ /* fw version is good */
+ BNX2X_DEV_INFO("FW version matches our own. Attempting FLR\n");
+ rc = bnx2x_do_flr(bp);
+ }
+
+ if (!rc) {
+ /* FLR was performed */
+ BNX2X_DEV_INFO("FLR successful\n");
+ return 0;
+ }
+
+ BNX2X_DEV_INFO("Could not FLR\n");
+
+out:
+ /* Close the MCP request, return failure*/
+ rc = bnx2x_prev_mcp_done(bp);
+ if (!rc)
+ rc = BNX2X_PREV_WAIT_NEEDED;
+
+ return rc;
+}
+
+static int bnx2x_prev_unload_common(struct bnx2x *bp)
+{
+ u32 reset_reg, tmp_reg = 0, rc;
+ bool prev_undi = false;
+ struct bnx2x_mac_vals mac_vals;
+
+ /* It is possible a previous function received 'common' answer,
+ * but hasn't loaded yet, therefore creating a scenario of
+ * multiple functions receiving 'common' on the same path.
+ */
+ BNX2X_DEV_INFO("Common unload Flow\n");
+
+ memset(&mac_vals, 0, sizeof(mac_vals));
+
+ if (bnx2x_prev_is_path_marked(bp))
+ return bnx2x_prev_mcp_done(bp);
+
+ reset_reg = REG_RD(bp, MISC_REG_RESET_REG_1);
+
+ /* Reset should be performed after BRB is emptied */
+ if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_BRB1) {
+ u32 timer_count = 1000;
+
+ /* Close the MAC Rx to prevent BRB from filling up */
+ bnx2x_prev_unload_close_mac(bp, &mac_vals);
+
+ /* close LLH filters for both ports towards the BRB */
+ bnx2x_set_rx_filter(&bp->link_params, 0);
+ bp->link_params.port ^= 1;
+ bnx2x_set_rx_filter(&bp->link_params, 0);
+ bp->link_params.port ^= 1;
+
+ /* Check if the UNDI driver was previously loaded */
+ if (bnx2x_prev_is_after_undi(bp)) {
+ prev_undi = true;
+ /* clear the UNDI indication */
+ REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
+ /* clear possible idle check errors */
+ REG_RD(bp, NIG_REG_NIG_INT_STS_CLR_0);
+ }
+ if (!CHIP_IS_E1x(bp))
+ /* block FW from writing to host */
+ REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0);
+
+ /* wait until BRB is empty */
+ tmp_reg = REG_RD(bp, BRB1_REG_NUM_OF_FULL_BLOCKS);
+ while (timer_count) {
+ u32 prev_brb = tmp_reg;
+
+ tmp_reg = REG_RD(bp, BRB1_REG_NUM_OF_FULL_BLOCKS);
+ if (!tmp_reg)
+ break;
+
+ BNX2X_DEV_INFO("BRB still has 0x%08x\n", tmp_reg);
+
+ /* reset timer as long as BRB actually gets emptied */
+ if (prev_brb > tmp_reg)
+ timer_count = 1000;
+ else
+ timer_count--;
+
+ /* If UNDI resides in memory, manually increment it */
+ if (prev_undi)
+ bnx2x_prev_unload_undi_inc(bp, 1);
+
+ udelay(10);
+ }
+
+ if (!timer_count)
+ BNX2X_ERR("Failed to empty BRB, hope for the best\n");
+ }
+
+ /* No packets are in the pipeline, path is ready for reset */
+ bnx2x_reset_common(bp);
+
+ if (mac_vals.xmac_addr)
+ REG_WR(bp, mac_vals.xmac_addr, mac_vals.xmac_val);
+ if (mac_vals.umac_addr[0])
+ REG_WR(bp, mac_vals.umac_addr[0], mac_vals.umac_val[0]);
+ if (mac_vals.umac_addr[1])
+ REG_WR(bp, mac_vals.umac_addr[1], mac_vals.umac_val[1]);
+ if (mac_vals.emac_addr)
+ REG_WR(bp, mac_vals.emac_addr, mac_vals.emac_val);
+ if (mac_vals.bmac_addr) {
+ REG_WR(bp, mac_vals.bmac_addr, mac_vals.bmac_val[0]);
+ REG_WR(bp, mac_vals.bmac_addr + 4, mac_vals.bmac_val[1]);
+ }
+
+ rc = bnx2x_prev_mark_path(bp, prev_undi);
+ if (rc) {
+ bnx2x_prev_mcp_done(bp);
+ return rc;
+ }
+
+ return bnx2x_prev_mcp_done(bp);
+}
+
+static int bnx2x_prev_unload(struct bnx2x *bp)
+{
+ int time_counter = 10;
+ u32 rc, fw, hw_lock_reg, hw_lock_val;
+ BNX2X_DEV_INFO("Entering Previous Unload Flow\n");
+
+ /* clear hw from errors which may have resulted from an interrupted
+ * dmae transaction.
+ */
+ bnx2x_clean_pglue_errors(bp);
+
+ /* Release previously held locks */
+ hw_lock_reg = (BP_FUNC(bp) <= 5) ?
+ (MISC_REG_DRIVER_CONTROL_1 + BP_FUNC(bp) * 8) :
+ (MISC_REG_DRIVER_CONTROL_7 + (BP_FUNC(bp) - 6) * 8);
+
+ hw_lock_val = REG_RD(bp, hw_lock_reg);
+ if (hw_lock_val) {
+ if (hw_lock_val & HW_LOCK_RESOURCE_NVRAM) {
+ BNX2X_DEV_INFO("Release Previously held NVRAM lock\n");
+ REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
+ (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << BP_PORT(bp)));
+ }
+
+ BNX2X_DEV_INFO("Release Previously held hw lock\n");
+ REG_WR(bp, hw_lock_reg, 0xffffffff);
+ } else
+ BNX2X_DEV_INFO("No need to release hw/nvram locks\n");
+
+ if (MCPR_ACCESS_LOCK_LOCK & REG_RD(bp, MCP_REG_MCPR_ACCESS_LOCK)) {
+ BNX2X_DEV_INFO("Release previously held alr\n");
+ bnx2x_release_alr(bp);
+ }
+
+ do {
+ int aer = 0;
+ /* Lock MCP using an unload request */
+ fw = bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS, 0);
+ if (!fw) {
+ BNX2X_ERR("MCP response failure, aborting\n");
+ rc = -EBUSY;
+ break;
+ }
+
+ rc = down_interruptible(&bnx2x_prev_sem);
+ if (rc) {
+ BNX2X_ERR("Cannot check for AER; Received %d when tried to take lock\n",
+ rc);
+ } else {
+ /* If Path is marked by EEH, ignore unload status */
+ aer = !!(bnx2x_prev_path_get_entry(bp) &&
+ bnx2x_prev_path_get_entry(bp)->aer);
+ up(&bnx2x_prev_sem);
+ }
+
+ if (fw == FW_MSG_CODE_DRV_UNLOAD_COMMON || aer) {
+ rc = bnx2x_prev_unload_common(bp);
+ break;
+ }
+
+ /* non-common reply from MCP might require looping */
+ rc = bnx2x_prev_unload_uncommon(bp);
+ if (rc != BNX2X_PREV_WAIT_NEEDED)
+ break;
+
+ msleep(20);
+ } while (--time_counter);
+
+ if (!time_counter || rc) {
+ BNX2X_DEV_INFO("Unloading previous driver did not occur, Possibly due to MF UNDI\n");
+ rc = -EPROBE_DEFER;
+ }
+
+ /* Mark function if its port was used to boot from SAN */
+ if (bnx2x_port_after_undi(bp))
+ bp->link_params.feature_config_flags |=
+ FEATURE_CONFIG_BOOT_FROM_SAN;
+
+ BNX2X_DEV_INFO("Finished Previous Unload Flow [%d]\n", rc);
+
+ return rc;
+}
+
+static void bnx2x_get_common_hwinfo(struct bnx2x *bp)
+{
+ u32 val, val2, val3, val4, id, boot_mode;
+ u16 pmc;
+
+ /* Get the chip revision id and number. */
+ /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
+ val = REG_RD(bp, MISC_REG_CHIP_NUM);
+ id = ((val & 0xffff) << 16);
+ val = REG_RD(bp, MISC_REG_CHIP_REV);
+ id |= ((val & 0xf) << 12);
+
+ /* Metal is read from PCI regs, but we can't access >=0x400 from
+ * the configuration space (so we need to reg_rd)
+ */
+ val = REG_RD(bp, PCICFG_OFFSET + PCI_ID_VAL3);
+ id |= (((val >> 24) & 0xf) << 4);
+ val = REG_RD(bp, MISC_REG_BOND_ID);
+ id |= (val & 0xf);
+ bp->common.chip_id = id;
+
+ /* force 57811 according to MISC register */
+ if (REG_RD(bp, MISC_REG_CHIP_TYPE) & MISC_REG_CHIP_TYPE_57811_MASK) {
+ if (CHIP_IS_57810(bp))
+ bp->common.chip_id = (CHIP_NUM_57811 << 16) |
+ (bp->common.chip_id & 0x0000FFFF);
+ else if (CHIP_IS_57810_MF(bp))
+ bp->common.chip_id = (CHIP_NUM_57811_MF << 16) |
+ (bp->common.chip_id & 0x0000FFFF);
+ bp->common.chip_id |= 0x1;
+ }
+
+ /* Set doorbell size */
+ bp->db_size = (1 << BNX2X_DB_SHIFT);
+
+ if (!CHIP_IS_E1x(bp)) {
+ val = REG_RD(bp, MISC_REG_PORT4MODE_EN_OVWR);
+ if ((val & 1) == 0)
+ val = REG_RD(bp, MISC_REG_PORT4MODE_EN);
+ else
+ val = (val >> 1) & 1;
+ BNX2X_DEV_INFO("chip is in %s\n", val ? "4_PORT_MODE" :
+ "2_PORT_MODE");
+ bp->common.chip_port_mode = val ? CHIP_4_PORT_MODE :
+ CHIP_2_PORT_MODE;
+
+ if (CHIP_MODE_IS_4_PORT(bp))
+ bp->pfid = (bp->pf_num >> 1); /* 0..3 */
+ else
+ bp->pfid = (bp->pf_num & 0x6); /* 0, 2, 4, 6 */
+ } else {
+ bp->common.chip_port_mode = CHIP_PORT_MODE_NONE; /* N/A */
+ bp->pfid = bp->pf_num; /* 0..7 */
+ }
+
+ BNX2X_DEV_INFO("pf_id: %x", bp->pfid);
+
+ bp->link_params.chip_id = bp->common.chip_id;
+ BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
+
+ val = (REG_RD(bp, 0x2874) & 0x55);
+ if ((bp->common.chip_id & 0x1) ||
+ (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
+ bp->flags |= ONE_PORT_FLAG;
+ BNX2X_DEV_INFO("single port device\n");
+ }
+
+ val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
+ bp->common.flash_size = (BNX2X_NVRAM_1MB_SIZE <<
+ (val & MCPR_NVM_CFG4_FLASH_SIZE));
+ BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
+ bp->common.flash_size, bp->common.flash_size);
+
+ bnx2x_init_shmem(bp);
+
+ bp->common.shmem2_base = REG_RD(bp, (BP_PATH(bp) ?
+ MISC_REG_GENERIC_CR_1 :
+ MISC_REG_GENERIC_CR_0));
+
+ bp->link_params.shmem_base = bp->common.shmem_base;
+ bp->link_params.shmem2_base = bp->common.shmem2_base;
+ if (SHMEM2_RD(bp, size) >
+ (u32)offsetof(struct shmem2_region, lfa_host_addr[BP_PORT(bp)]))
+ bp->link_params.lfa_base =
+ REG_RD(bp, bp->common.shmem2_base +
+ (u32)offsetof(struct shmem2_region,
+ lfa_host_addr[BP_PORT(bp)]));
+ else
+ bp->link_params.lfa_base = 0;
+ BNX2X_DEV_INFO("shmem offset 0x%x shmem2 offset 0x%x\n",
+ bp->common.shmem_base, bp->common.shmem2_base);
+
+ if (!bp->common.shmem_base) {
+ BNX2X_DEV_INFO("MCP not active\n");
+ bp->flags |= NO_MCP_FLAG;
+ return;
+ }
+
+ bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
+ BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
+
+ bp->link_params.hw_led_mode = ((bp->common.hw_config &
+ SHARED_HW_CFG_LED_MODE_MASK) >>
+ SHARED_HW_CFG_LED_MODE_SHIFT);
+
+ bp->link_params.feature_config_flags = 0;
+ val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
+ if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
+ bp->link_params.feature_config_flags |=
+ FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
+ else
+ bp->link_params.feature_config_flags &=
+ ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
+
+ val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
+ bp->common.bc_ver = val;
+ BNX2X_DEV_INFO("bc_ver %X\n", val);
+ if (val < BNX2X_BC_VER) {
+ /* for now only warn
+ * later we might need to enforce this */
+ BNX2X_ERR("This driver needs bc_ver %X but found %X, please upgrade BC\n",
+ BNX2X_BC_VER, val);
+ }
+ bp->link_params.feature_config_flags |=
+ (val >= REQ_BC_VER_4_VRFY_FIRST_PHY_OPT_MDL) ?
+ FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0;
+
+ bp->link_params.feature_config_flags |=
+ (val >= REQ_BC_VER_4_VRFY_SPECIFIC_PHY_OPT_MDL) ?
+ FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY : 0;
+ bp->link_params.feature_config_flags |=
+ (val >= REQ_BC_VER_4_VRFY_AFEX_SUPPORTED) ?
+ FEATURE_CONFIG_BC_SUPPORTS_AFEX : 0;
+ bp->link_params.feature_config_flags |=
+ (val >= REQ_BC_VER_4_SFP_TX_DISABLE_SUPPORTED) ?
+ FEATURE_CONFIG_BC_SUPPORTS_SFP_TX_DISABLED : 0;
+
+ bp->link_params.feature_config_flags |=
+ (val >= REQ_BC_VER_4_MT_SUPPORTED) ?
+ FEATURE_CONFIG_MT_SUPPORT : 0;
+
+ bp->flags |= (val >= REQ_BC_VER_4_PFC_STATS_SUPPORTED) ?
+ BC_SUPPORTS_PFC_STATS : 0;
+
+ bp->flags |= (val >= REQ_BC_VER_4_FCOE_FEATURES) ?
+ BC_SUPPORTS_FCOE_FEATURES : 0;
+
+ bp->flags |= (val >= REQ_BC_VER_4_DCBX_ADMIN_MSG_NON_PMF) ?
+ BC_SUPPORTS_DCBX_MSG_NON_PMF : 0;
+
+ bp->flags |= (val >= REQ_BC_VER_4_RMMOD_CMD) ?
+ BC_SUPPORTS_RMMOD_CMD : 0;
+
+ boot_mode = SHMEM_RD(bp,
+ dev_info.port_feature_config[BP_PORT(bp)].mba_config) &
+ PORT_FEATURE_MBA_BOOT_AGENT_TYPE_MASK;
+ switch (boot_mode) {
+ case PORT_FEATURE_MBA_BOOT_AGENT_TYPE_PXE:
+ bp->common.boot_mode = FEATURE_ETH_BOOTMODE_PXE;
+ break;
+ case PORT_FEATURE_MBA_BOOT_AGENT_TYPE_ISCSIB:
+ bp->common.boot_mode = FEATURE_ETH_BOOTMODE_ISCSI;
+ break;
+ case PORT_FEATURE_MBA_BOOT_AGENT_TYPE_FCOE_BOOT:
+ bp->common.boot_mode = FEATURE_ETH_BOOTMODE_FCOE;
+ break;
+ case PORT_FEATURE_MBA_BOOT_AGENT_TYPE_NONE:
+ bp->common.boot_mode = FEATURE_ETH_BOOTMODE_NONE;
+ break;
+ }
+
+ pci_read_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_PMC, &pmc);
+ bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
+
+ BNX2X_DEV_INFO("%sWoL capable\n",
+ (bp->flags & NO_WOL_FLAG) ? "not " : "");
+
+ val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
+ val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
+ val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
+ val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
+
+ dev_info(&bp->pdev->dev, "part number %X-%X-%X-%X\n",
+ val, val2, val3, val4);
+}
+
+#define IGU_FID(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID)
+#define IGU_VEC(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR)
+
+static int bnx2x_get_igu_cam_info(struct bnx2x *bp)
+{
+ int pfid = BP_FUNC(bp);
+ int igu_sb_id;
+ u32 val;
+ u8 fid, igu_sb_cnt = 0;
+
+ bp->igu_base_sb = 0xff;
+ if (CHIP_INT_MODE_IS_BC(bp)) {
+ int vn = BP_VN(bp);
+ igu_sb_cnt = bp->igu_sb_cnt;
+ bp->igu_base_sb = (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn) *
+ FP_SB_MAX_E1x;
+
+ bp->igu_dsb_id = E1HVN_MAX * FP_SB_MAX_E1x +
+ (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn);
+
+ return 0;
+ }
+
+ /* IGU in normal mode - read CAM */
+ for (igu_sb_id = 0; igu_sb_id < IGU_REG_MAPPING_MEMORY_SIZE;
+ igu_sb_id++) {
+ val = REG_RD(bp, IGU_REG_MAPPING_MEMORY + igu_sb_id * 4);
+ if (!(val & IGU_REG_MAPPING_MEMORY_VALID))
+ continue;
+ fid = IGU_FID(val);
+ if ((fid & IGU_FID_ENCODE_IS_PF)) {
+ if ((fid & IGU_FID_PF_NUM_MASK) != pfid)
+ continue;
+ if (IGU_VEC(val) == 0)
+ /* default status block */
+ bp->igu_dsb_id = igu_sb_id;
+ else {
+ if (bp->igu_base_sb == 0xff)
+ bp->igu_base_sb = igu_sb_id;
+ igu_sb_cnt++;
+ }
+ }
+ }
+
+#ifdef CONFIG_PCI_MSI
+ /* Due to new PF resource allocation by MFW T7.4 and above, it's
+ * optional that number of CAM entries will not be equal to the value
+ * advertised in PCI.
+ * Driver should use the minimal value of both as the actual status
+ * block count
+ */
+ bp->igu_sb_cnt = min_t(int, bp->igu_sb_cnt, igu_sb_cnt);
+#endif
+
+ if (igu_sb_cnt == 0) {
+ BNX2X_ERR("CAM configuration error\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void bnx2x_link_settings_supported(struct bnx2x *bp, u32 switch_cfg)
+{
+ int cfg_size = 0, idx, port = BP_PORT(bp);
+
+ /* Aggregation of supported attributes of all external phys */
+ bp->port.supported[0] = 0;
+ bp->port.supported[1] = 0;
+ switch (bp->link_params.num_phys) {
+ case 1:
+ bp->port.supported[0] = bp->link_params.phy[INT_PHY].supported;
+ cfg_size = 1;
+ break;
+ case 2:
+ bp->port.supported[0] = bp->link_params.phy[EXT_PHY1].supported;
+ cfg_size = 1;
+ break;
+ case 3:
+ if (bp->link_params.multi_phy_config &
+ PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
+ bp->port.supported[1] =
+ bp->link_params.phy[EXT_PHY1].supported;
+ bp->port.supported[0] =
+ bp->link_params.phy[EXT_PHY2].supported;
+ } else {
+ bp->port.supported[0] =
+ bp->link_params.phy[EXT_PHY1].supported;
+ bp->port.supported[1] =
+ bp->link_params.phy[EXT_PHY2].supported;
+ }
+ cfg_size = 2;
+ break;
+ }
+
+ if (!(bp->port.supported[0] || bp->port.supported[1])) {
+ BNX2X_ERR("NVRAM config error. BAD phy config. PHY1 config 0x%x, PHY2 config 0x%x\n",
+ SHMEM_RD(bp,
+ dev_info.port_hw_config[port].external_phy_config),
+ SHMEM_RD(bp,
+ dev_info.port_hw_config[port].external_phy_config2));
+ return;
+ }
+
+ if (CHIP_IS_E3(bp))
+ bp->port.phy_addr = REG_RD(bp, MISC_REG_WC0_CTRL_PHY_ADDR);
+ else {
+ switch (switch_cfg) {
+ case SWITCH_CFG_1G:
+ bp->port.phy_addr = REG_RD(
+ bp, NIG_REG_SERDES0_CTRL_PHY_ADDR + port*0x10);
+ break;
+ case SWITCH_CFG_10G:
+ bp->port.phy_addr = REG_RD(
+ bp, NIG_REG_XGXS0_CTRL_PHY_ADDR + port*0x18);
+ break;
+ default:
+ BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
+ bp->port.link_config[0]);
+ return;
+ }
+ }
+ BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
+ /* mask what we support according to speed_cap_mask per configuration */
+ for (idx = 0; idx < cfg_size; idx++) {
+ if (!(bp->link_params.speed_cap_mask[idx] &
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
+ bp->port.supported[idx] &= ~SUPPORTED_10baseT_Half;
+
+ if (!(bp->link_params.speed_cap_mask[idx] &
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
+ bp->port.supported[idx] &= ~SUPPORTED_10baseT_Full;
+
+ if (!(bp->link_params.speed_cap_mask[idx] &
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
+ bp->port.supported[idx] &= ~SUPPORTED_100baseT_Half;
+
+ if (!(bp->link_params.speed_cap_mask[idx] &
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
+ bp->port.supported[idx] &= ~SUPPORTED_100baseT_Full;
+
+ if (!(bp->link_params.speed_cap_mask[idx] &
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
+ bp->port.supported[idx] &= ~(SUPPORTED_1000baseT_Half |
+ SUPPORTED_1000baseT_Full);
+
+ if (!(bp->link_params.speed_cap_mask[idx] &
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
+ bp->port.supported[idx] &= ~SUPPORTED_2500baseX_Full;
+
+ if (!(bp->link_params.speed_cap_mask[idx] &
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
+ bp->port.supported[idx] &= ~SUPPORTED_10000baseT_Full;
+
+ if (!(bp->link_params.speed_cap_mask[idx] &
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_20G))
+ bp->port.supported[idx] &= ~SUPPORTED_20000baseKR2_Full;
+ }
+
+ BNX2X_DEV_INFO("supported 0x%x 0x%x\n", bp->port.supported[0],
+ bp->port.supported[1]);
+}
+
+static void bnx2x_link_settings_requested(struct bnx2x *bp)
+{
+ u32 link_config, idx, cfg_size = 0;
+ bp->port.advertising[0] = 0;
+ bp->port.advertising[1] = 0;
+ switch (bp->link_params.num_phys) {
+ case 1:
+ case 2:
+ cfg_size = 1;
+ break;
+ case 3:
+ cfg_size = 2;
+ break;
+ }
+ for (idx = 0; idx < cfg_size; idx++) {
+ bp->link_params.req_duplex[idx] = DUPLEX_FULL;
+ link_config = bp->port.link_config[idx];
+ switch (link_config & PORT_FEATURE_LINK_SPEED_MASK) {
+ case PORT_FEATURE_LINK_SPEED_AUTO:
+ if (bp->port.supported[idx] & SUPPORTED_Autoneg) {
+ bp->link_params.req_line_speed[idx] =
+ SPEED_AUTO_NEG;
+ bp->port.advertising[idx] |=
+ bp->port.supported[idx];
+ if (bp->link_params.phy[EXT_PHY1].type ==
+ PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833)
+ bp->port.advertising[idx] |=
+ (SUPPORTED_100baseT_Half |
+ SUPPORTED_100baseT_Full);
+ } else {
+ /* force 10G, no AN */
+ bp->link_params.req_line_speed[idx] =
+ SPEED_10000;
+ bp->port.advertising[idx] |=
+ (ADVERTISED_10000baseT_Full |
+ ADVERTISED_FIBRE);
+ continue;
+ }
+ break;
+
+ case PORT_FEATURE_LINK_SPEED_10M_FULL:
+ if (bp->port.supported[idx] & SUPPORTED_10baseT_Full) {
+ bp->link_params.req_line_speed[idx] =
+ SPEED_10;
+ bp->port.advertising[idx] |=
+ (ADVERTISED_10baseT_Full |
+ ADVERTISED_TP);
+ } else {
+ BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n",
+ link_config,
+ bp->link_params.speed_cap_mask[idx]);
+ return;
+ }
+ break;
+
+ case PORT_FEATURE_LINK_SPEED_10M_HALF:
+ if (bp->port.supported[idx] & SUPPORTED_10baseT_Half) {
+ bp->link_params.req_line_speed[idx] =
+ SPEED_10;
+ bp->link_params.req_duplex[idx] =
+ DUPLEX_HALF;
+ bp->port.advertising[idx] |=
+ (ADVERTISED_10baseT_Half |
+ ADVERTISED_TP);
+ } else {
+ BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n",
+ link_config,
+ bp->link_params.speed_cap_mask[idx]);
+ return;
+ }
+ break;
+
+ case PORT_FEATURE_LINK_SPEED_100M_FULL:
+ if (bp->port.supported[idx] &
+ SUPPORTED_100baseT_Full) {
+ bp->link_params.req_line_speed[idx] =
+ SPEED_100;
+ bp->port.advertising[idx] |=
+ (ADVERTISED_100baseT_Full |
+ ADVERTISED_TP);
+ } else {
+ BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n",
+ link_config,
+ bp->link_params.speed_cap_mask[idx]);
+ return;
+ }
+ break;
+
+ case PORT_FEATURE_LINK_SPEED_100M_HALF:
+ if (bp->port.supported[idx] &
+ SUPPORTED_100baseT_Half) {
+ bp->link_params.req_line_speed[idx] =
+ SPEED_100;
+ bp->link_params.req_duplex[idx] =
+ DUPLEX_HALF;
+ bp->port.advertising[idx] |=
+ (ADVERTISED_100baseT_Half |
+ ADVERTISED_TP);
+ } else {
+ BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n",
+ link_config,
+ bp->link_params.speed_cap_mask[idx]);
+ return;
+ }
+ break;
+
+ case PORT_FEATURE_LINK_SPEED_1G:
+ if (bp->port.supported[idx] &
+ SUPPORTED_1000baseT_Full) {
+ bp->link_params.req_line_speed[idx] =
+ SPEED_1000;
+ bp->port.advertising[idx] |=
+ (ADVERTISED_1000baseT_Full |
+ ADVERTISED_TP);
+ } else if (bp->port.supported[idx] &
+ SUPPORTED_1000baseKX_Full) {
+ bp->link_params.req_line_speed[idx] =
+ SPEED_1000;
+ bp->port.advertising[idx] |=
+ ADVERTISED_1000baseKX_Full;
+ } else {
+ BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n",
+ link_config,
+ bp->link_params.speed_cap_mask[idx]);
+ return;
+ }
+ break;
+
+ case PORT_FEATURE_LINK_SPEED_2_5G:
+ if (bp->port.supported[idx] &
+ SUPPORTED_2500baseX_Full) {
+ bp->link_params.req_line_speed[idx] =
+ SPEED_2500;
+ bp->port.advertising[idx] |=
+ (ADVERTISED_2500baseX_Full |
+ ADVERTISED_TP);
+ } else {
+ BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n",
+ link_config,
+ bp->link_params.speed_cap_mask[idx]);
+ return;
+ }
+ break;
+
+ case PORT_FEATURE_LINK_SPEED_10G_CX4:
+ if (bp->port.supported[idx] &
+ SUPPORTED_10000baseT_Full) {
+ bp->link_params.req_line_speed[idx] =
+ SPEED_10000;
+ bp->port.advertising[idx] |=
+ (ADVERTISED_10000baseT_Full |
+ ADVERTISED_FIBRE);
+ } else if (bp->port.supported[idx] &
+ SUPPORTED_10000baseKR_Full) {
+ bp->link_params.req_line_speed[idx] =
+ SPEED_10000;
+ bp->port.advertising[idx] |=
+ (ADVERTISED_10000baseKR_Full |
+ ADVERTISED_FIBRE);
+ } else {
+ BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n",
+ link_config,
+ bp->link_params.speed_cap_mask[idx]);
+ return;
+ }
+ break;
+ case PORT_FEATURE_LINK_SPEED_20G:
+ bp->link_params.req_line_speed[idx] = SPEED_20000;
+
+ break;
+ default:
+ BNX2X_ERR("NVRAM config error. BAD link speed link_config 0x%x\n",
+ link_config);
+ bp->link_params.req_line_speed[idx] =
+ SPEED_AUTO_NEG;
+ bp->port.advertising[idx] =
+ bp->port.supported[idx];
+ break;
+ }
+
+ bp->link_params.req_flow_ctrl[idx] = (link_config &
+ PORT_FEATURE_FLOW_CONTROL_MASK);
+ if (bp->link_params.req_flow_ctrl[idx] ==
+ BNX2X_FLOW_CTRL_AUTO) {
+ if (!(bp->port.supported[idx] & SUPPORTED_Autoneg))
+ bp->link_params.req_flow_ctrl[idx] =
+ BNX2X_FLOW_CTRL_NONE;
+ else
+ bnx2x_set_requested_fc(bp);
+ }
+
+ BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x advertising 0x%x\n",
+ bp->link_params.req_line_speed[idx],
+ bp->link_params.req_duplex[idx],
+ bp->link_params.req_flow_ctrl[idx],
+ bp->port.advertising[idx]);
+ }
+}
+
+static void bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi)
+{
+ __be16 mac_hi_be = cpu_to_be16(mac_hi);
+ __be32 mac_lo_be = cpu_to_be32(mac_lo);
+ memcpy(mac_buf, &mac_hi_be, sizeof(mac_hi_be));
+ memcpy(mac_buf + sizeof(mac_hi_be), &mac_lo_be, sizeof(mac_lo_be));
+}
+
+static void bnx2x_get_port_hwinfo(struct bnx2x *bp)
+{
+ int port = BP_PORT(bp);
+ u32 config;
+ u32 ext_phy_type, ext_phy_config, eee_mode;
+
+ bp->link_params.bp = bp;
+ bp->link_params.port = port;
+
+ bp->link_params.lane_config =
+ SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
+
+ bp->link_params.speed_cap_mask[0] =
+ SHMEM_RD(bp,
+ dev_info.port_hw_config[port].speed_capability_mask) &
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_MASK;
+ bp->link_params.speed_cap_mask[1] =
+ SHMEM_RD(bp,
+ dev_info.port_hw_config[port].speed_capability_mask2) &
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_MASK;
+ bp->port.link_config[0] =
+ SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
+
+ bp->port.link_config[1] =
+ SHMEM_RD(bp, dev_info.port_feature_config[port].link_config2);
+
+ bp->link_params.multi_phy_config =
+ SHMEM_RD(bp, dev_info.port_hw_config[port].multi_phy_config);
+ /* If the device is capable of WoL, set the default state according
+ * to the HW
+ */
+ config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
+ bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
+ (config & PORT_FEATURE_WOL_ENABLED));
+
+ if ((config & PORT_FEAT_CFG_STORAGE_PERSONALITY_MASK) ==
+ PORT_FEAT_CFG_STORAGE_PERSONALITY_FCOE && !IS_MF(bp))
+ bp->flags |= NO_ISCSI_FLAG;
+ if ((config & PORT_FEAT_CFG_STORAGE_PERSONALITY_MASK) ==
+ PORT_FEAT_CFG_STORAGE_PERSONALITY_ISCSI && !(IS_MF(bp)))
+ bp->flags |= NO_FCOE_FLAG;
+
+ BNX2X_DEV_INFO("lane_config 0x%08x speed_cap_mask0 0x%08x link_config0 0x%08x\n",
+ bp->link_params.lane_config,
+ bp->link_params.speed_cap_mask[0],
+ bp->port.link_config[0]);
+
+ bp->link_params.switch_cfg = (bp->port.link_config[0] &
+ PORT_FEATURE_CONNECTED_SWITCH_MASK);
+ bnx2x_phy_probe(&bp->link_params);
+ bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
+
+ bnx2x_link_settings_requested(bp);
+
+ /*
+ * If connected directly, work with the internal PHY, otherwise, work
+ * with the external PHY
+ */
+ ext_phy_config =
+ SHMEM_RD(bp,
+ dev_info.port_hw_config[port].external_phy_config);
+ ext_phy_type = XGXS_EXT_PHY_TYPE(ext_phy_config);
+ if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
+ bp->mdio.prtad = bp->port.phy_addr;
+
+ else if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
+ (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
+ bp->mdio.prtad =
+ XGXS_EXT_PHY_ADDR(ext_phy_config);
+
+ /* Configure link feature according to nvram value */
+ eee_mode = (((SHMEM_RD(bp, dev_info.
+ port_feature_config[port].eee_power_mode)) &
+ PORT_FEAT_CFG_EEE_POWER_MODE_MASK) >>
+ PORT_FEAT_CFG_EEE_POWER_MODE_SHIFT);
+ if (eee_mode != PORT_FEAT_CFG_EEE_POWER_MODE_DISABLED) {
+ bp->link_params.eee_mode = EEE_MODE_ADV_LPI |
+ EEE_MODE_ENABLE_LPI |
+ EEE_MODE_OUTPUT_TIME;
+ } else {
+ bp->link_params.eee_mode = 0;
+ }
+}
+
+void bnx2x_get_iscsi_info(struct bnx2x *bp)
+{
+ u32 no_flags = NO_ISCSI_FLAG;
+ int port = BP_PORT(bp);
+ u32 max_iscsi_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp,
+ drv_lic_key[port].max_iscsi_conn);
+
+ if (!CNIC_SUPPORT(bp)) {
+ bp->flags |= no_flags;
+ return;
+ }
+
+ /* Get the number of maximum allowed iSCSI connections */
+ bp->cnic_eth_dev.max_iscsi_conn =
+ (max_iscsi_conn & BNX2X_MAX_ISCSI_INIT_CONN_MASK) >>
+ BNX2X_MAX_ISCSI_INIT_CONN_SHIFT;
+
+ BNX2X_DEV_INFO("max_iscsi_conn 0x%x\n",
+ bp->cnic_eth_dev.max_iscsi_conn);
+
+ /*
+ * If maximum allowed number of connections is zero -
+ * disable the feature.
+ */
+ if (!bp->cnic_eth_dev.max_iscsi_conn)
+ bp->flags |= no_flags;
+}
+
+static void bnx2x_get_ext_wwn_info(struct bnx2x *bp, int func)
+{
+ /* Port info */
+ bp->cnic_eth_dev.fcoe_wwn_port_name_hi =
+ MF_CFG_RD(bp, func_ext_config[func].fcoe_wwn_port_name_upper);
+ bp->cnic_eth_dev.fcoe_wwn_port_name_lo =
+ MF_CFG_RD(bp, func_ext_config[func].fcoe_wwn_port_name_lower);
+
+ /* Node info */
+ bp->cnic_eth_dev.fcoe_wwn_node_name_hi =
+ MF_CFG_RD(bp, func_ext_config[func].fcoe_wwn_node_name_upper);
+ bp->cnic_eth_dev.fcoe_wwn_node_name_lo =
+ MF_CFG_RD(bp, func_ext_config[func].fcoe_wwn_node_name_lower);
+}
+
+static int bnx2x_shared_fcoe_funcs(struct bnx2x *bp)
+{
+ u8 count = 0;
+
+ if (IS_MF(bp)) {
+ u8 fid;
+
+ /* iterate over absolute function ids for this path: */
+ for (fid = BP_PATH(bp); fid < E2_FUNC_MAX * 2; fid += 2) {
+ if (IS_MF_SD(bp)) {
+ u32 cfg = MF_CFG_RD(bp,
+ func_mf_config[fid].config);
+
+ if (!(cfg & FUNC_MF_CFG_FUNC_HIDE) &&
+ ((cfg & FUNC_MF_CFG_PROTOCOL_MASK) ==
+ FUNC_MF_CFG_PROTOCOL_FCOE))
+ count++;
+ } else {
+ u32 cfg = MF_CFG_RD(bp,
+ func_ext_config[fid].
+ func_cfg);
+
+ if ((cfg & MACP_FUNC_CFG_FLAGS_ENABLED) &&
+ (cfg & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD))
+ count++;
+ }
+ }
+ } else { /* SF */
+ int port, port_cnt = CHIP_MODE_IS_4_PORT(bp) ? 2 : 1;
+
+ for (port = 0; port < port_cnt; port++) {
+ u32 lic = SHMEM_RD(bp,
+ drv_lic_key[port].max_fcoe_conn) ^
+ FW_ENCODE_32BIT_PATTERN;
+ if (lic)
+ count++;
+ }
+ }
+
+ return count;
+}
+
+static void bnx2x_get_fcoe_info(struct bnx2x *bp)
+{
+ int port = BP_PORT(bp);
+ int func = BP_ABS_FUNC(bp);
+ u32 max_fcoe_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp,
+ drv_lic_key[port].max_fcoe_conn);
+ u8 num_fcoe_func = bnx2x_shared_fcoe_funcs(bp);
+
+ if (!CNIC_SUPPORT(bp)) {
+ bp->flags |= NO_FCOE_FLAG;
+ return;
+ }
+
+ /* Get the number of maximum allowed FCoE connections */
+ bp->cnic_eth_dev.max_fcoe_conn =
+ (max_fcoe_conn & BNX2X_MAX_FCOE_INIT_CONN_MASK) >>
+ BNX2X_MAX_FCOE_INIT_CONN_SHIFT;
+
+ /* Calculate the number of maximum allowed FCoE tasks */
+ bp->cnic_eth_dev.max_fcoe_exchanges = MAX_NUM_FCOE_TASKS_PER_ENGINE;
+
+ /* check if FCoE resources must be shared between different functions */
+ if (num_fcoe_func)
+ bp->cnic_eth_dev.max_fcoe_exchanges /= num_fcoe_func;
+
+ /* Read the WWN: */
+ if (!IS_MF(bp)) {
+ /* Port info */
+ bp->cnic_eth_dev.fcoe_wwn_port_name_hi =
+ SHMEM_RD(bp,
+ dev_info.port_hw_config[port].
+ fcoe_wwn_port_name_upper);
+ bp->cnic_eth_dev.fcoe_wwn_port_name_lo =
+ SHMEM_RD(bp,
+ dev_info.port_hw_config[port].
+ fcoe_wwn_port_name_lower);
+
+ /* Node info */
+ bp->cnic_eth_dev.fcoe_wwn_node_name_hi =
+ SHMEM_RD(bp,
+ dev_info.port_hw_config[port].
+ fcoe_wwn_node_name_upper);
+ bp->cnic_eth_dev.fcoe_wwn_node_name_lo =
+ SHMEM_RD(bp,
+ dev_info.port_hw_config[port].
+ fcoe_wwn_node_name_lower);
+ } else if (!IS_MF_SD(bp)) {
+ /* Read the WWN info only if the FCoE feature is enabled for
+ * this function.
+ */
+ if (BNX2X_HAS_MF_EXT_PROTOCOL_FCOE(bp))
+ bnx2x_get_ext_wwn_info(bp, func);
+ } else {
+ if (BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp) && !CHIP_IS_E1x(bp))
+ bnx2x_get_ext_wwn_info(bp, func);
+ }
+
+ BNX2X_DEV_INFO("max_fcoe_conn 0x%x\n", bp->cnic_eth_dev.max_fcoe_conn);
+
+ /*
+ * If maximum allowed number of connections is zero -
+ * disable the feature.
+ */
+ if (!bp->cnic_eth_dev.max_fcoe_conn) {
+ bp->flags |= NO_FCOE_FLAG;
+ eth_zero_addr(bp->fip_mac);
+ }
+}
+
+static void bnx2x_get_cnic_info(struct bnx2x *bp)
+{
+ /*
+ * iSCSI may be dynamically disabled but reading
+ * info here we will decrease memory usage by driver
+ * if the feature is disabled for good
+ */
+ bnx2x_get_iscsi_info(bp);
+ bnx2x_get_fcoe_info(bp);
+}
+
+static void bnx2x_get_cnic_mac_hwinfo(struct bnx2x *bp)
+{
+ u32 val, val2;
+ int func = BP_ABS_FUNC(bp);
+ int port = BP_PORT(bp);
+ u8 *iscsi_mac = bp->cnic_eth_dev.iscsi_mac;
+ u8 *fip_mac = bp->fip_mac;
+
+ if (IS_MF(bp)) {
+ /* iSCSI and FCoE NPAR MACs: if there is no either iSCSI or
+ * FCoE MAC then the appropriate feature should be disabled.
+ * In non SD mode features configuration comes from struct
+ * func_ext_config.
+ */
+ if (!IS_MF_SD(bp)) {
+ u32 cfg = MF_CFG_RD(bp, func_ext_config[func].func_cfg);
+ if (cfg & MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD) {
+ val2 = MF_CFG_RD(bp, func_ext_config[func].
+ iscsi_mac_addr_upper);
+ val = MF_CFG_RD(bp, func_ext_config[func].
+ iscsi_mac_addr_lower);
+ bnx2x_set_mac_buf(iscsi_mac, val, val2);
+ BNX2X_DEV_INFO
+ ("Read iSCSI MAC: %pM\n", iscsi_mac);
+ } else {
+ bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG;
+ }
+
+ if (cfg & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD) {
+ val2 = MF_CFG_RD(bp, func_ext_config[func].
+ fcoe_mac_addr_upper);
+ val = MF_CFG_RD(bp, func_ext_config[func].
+ fcoe_mac_addr_lower);
+ bnx2x_set_mac_buf(fip_mac, val, val2);
+ BNX2X_DEV_INFO
+ ("Read FCoE L2 MAC: %pM\n", fip_mac);
+ } else {
+ bp->flags |= NO_FCOE_FLAG;
+ }
+
+ bp->mf_ext_config = cfg;
+
+ } else { /* SD MODE */
+ if (BNX2X_IS_MF_SD_PROTOCOL_ISCSI(bp)) {
+ /* use primary mac as iscsi mac */
+ memcpy(iscsi_mac, bp->dev->dev_addr, ETH_ALEN);
+
+ BNX2X_DEV_INFO("SD ISCSI MODE\n");
+ BNX2X_DEV_INFO
+ ("Read iSCSI MAC: %pM\n", iscsi_mac);
+ } else if (BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp)) {
+ /* use primary mac as fip mac */
+ memcpy(fip_mac, bp->dev->dev_addr, ETH_ALEN);
+ BNX2X_DEV_INFO("SD FCoE MODE\n");
+ BNX2X_DEV_INFO
+ ("Read FIP MAC: %pM\n", fip_mac);
+ }
+ }
+
+ /* If this is a storage-only interface, use SAN mac as
+ * primary MAC. Notice that for SD this is already the case,
+ * as the SAN mac was copied from the primary MAC.
+ */
+ if (IS_MF_FCOE_AFEX(bp))
+ eth_hw_addr_set(bp->dev, fip_mac);
+ } else {
+ val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].
+ iscsi_mac_upper);
+ val = SHMEM_RD(bp, dev_info.port_hw_config[port].
+ iscsi_mac_lower);
+ bnx2x_set_mac_buf(iscsi_mac, val, val2);
+
+ val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].
+ fcoe_fip_mac_upper);
+ val = SHMEM_RD(bp, dev_info.port_hw_config[port].
+ fcoe_fip_mac_lower);
+ bnx2x_set_mac_buf(fip_mac, val, val2);
+ }
+
+ /* Disable iSCSI OOO if MAC configuration is invalid. */
+ if (!is_valid_ether_addr(iscsi_mac)) {
+ bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG;
+ eth_zero_addr(iscsi_mac);
+ }
+
+ /* Disable FCoE if MAC configuration is invalid. */
+ if (!is_valid_ether_addr(fip_mac)) {
+ bp->flags |= NO_FCOE_FLAG;
+ eth_zero_addr(bp->fip_mac);
+ }
+}
+
+static void bnx2x_get_mac_hwinfo(struct bnx2x *bp)
+{
+ u32 val, val2;
+ int func = BP_ABS_FUNC(bp);
+ int port = BP_PORT(bp);
+ u8 addr[ETH_ALEN] = {};
+
+ /* Zero primary MAC configuration */
+ eth_hw_addr_set(bp->dev, addr);
+
+ if (BP_NOMCP(bp)) {
+ BNX2X_ERROR("warning: random MAC workaround active\n");
+ eth_hw_addr_random(bp->dev);
+ } else if (IS_MF(bp)) {
+ val2 = MF_CFG_RD(bp, func_mf_config[func].mac_upper);
+ val = MF_CFG_RD(bp, func_mf_config[func].mac_lower);
+ if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
+ (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
+ bnx2x_set_mac_buf(addr, val, val2);
+ eth_hw_addr_set(bp->dev, addr);
+ }
+
+ if (CNIC_SUPPORT(bp))
+ bnx2x_get_cnic_mac_hwinfo(bp);
+ } else {
+ /* in SF read MACs from port configuration */
+ val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
+ val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
+ bnx2x_set_mac_buf(addr, val, val2);
+ eth_hw_addr_set(bp->dev, addr);
+
+ if (CNIC_SUPPORT(bp))
+ bnx2x_get_cnic_mac_hwinfo(bp);
+ }
+
+ if (!BP_NOMCP(bp)) {
+ /* Read physical port identifier from shmem */
+ val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
+ val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
+ bnx2x_set_mac_buf(bp->phys_port_id, val, val2);
+ bp->flags |= HAS_PHYS_PORT_ID;
+ }
+
+ memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
+
+ if (!is_valid_ether_addr(bp->dev->dev_addr))
+ dev_err(&bp->pdev->dev,
+ "bad Ethernet MAC address configuration: %pM\n"
+ "change it manually before bringing up the appropriate network interface\n",
+ bp->dev->dev_addr);
+}
+
+static bool bnx2x_get_dropless_info(struct bnx2x *bp)
+{
+ int tmp;
+ u32 cfg;
+
+ if (IS_VF(bp))
+ return false;
+
+ if (IS_MF(bp) && !CHIP_IS_E1x(bp)) {
+ /* Take function: tmp = func */
+ tmp = BP_ABS_FUNC(bp);
+ cfg = MF_CFG_RD(bp, func_ext_config[tmp].func_cfg);
+ cfg = !!(cfg & MACP_FUNC_CFG_PAUSE_ON_HOST_RING);
+ } else {
+ /* Take port: tmp = port */
+ tmp = BP_PORT(bp);
+ cfg = SHMEM_RD(bp,
+ dev_info.port_hw_config[tmp].generic_features);
+ cfg = !!(cfg & PORT_HW_CFG_PAUSE_ON_HOST_RING_ENABLED);
+ }
+ return cfg;
+}
+
+static void validate_set_si_mode(struct bnx2x *bp)
+{
+ u8 func = BP_ABS_FUNC(bp);
+ u32 val;
+
+ val = MF_CFG_RD(bp, func_mf_config[func].mac_upper);
+
+ /* check for legal mac (upper bytes) */
+ if (val != 0xffff) {
+ bp->mf_mode = MULTI_FUNCTION_SI;
+ bp->mf_config[BP_VN(bp)] =
+ MF_CFG_RD(bp, func_mf_config[func].config);
+ } else
+ BNX2X_DEV_INFO("illegal MAC address for SI\n");
+}
+
+static int bnx2x_get_hwinfo(struct bnx2x *bp)
+{
+ int /*abs*/func = BP_ABS_FUNC(bp);
+ int vn;
+ u32 val = 0, val2 = 0;
+ int rc = 0;
+
+ /* Validate that chip access is feasible */
+ if (REG_RD(bp, MISC_REG_CHIP_NUM) == 0xffffffff) {
+ dev_err(&bp->pdev->dev,
+ "Chip read returns all Fs. Preventing probe from continuing\n");
+ return -EINVAL;
+ }
+
+ bnx2x_get_common_hwinfo(bp);
+
+ /*
+ * initialize IGU parameters
+ */
+ if (CHIP_IS_E1x(bp)) {
+ bp->common.int_block = INT_BLOCK_HC;
+
+ bp->igu_dsb_id = DEF_SB_IGU_ID;
+ bp->igu_base_sb = 0;
+ } else {
+ bp->common.int_block = INT_BLOCK_IGU;
+
+ /* do not allow device reset during IGU info processing */
+ bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
+
+ val = REG_RD(bp, IGU_REG_BLOCK_CONFIGURATION);
+
+ if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) {
+ int tout = 5000;
+
+ BNX2X_DEV_INFO("FORCING Normal Mode\n");
+
+ val &= ~(IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN);
+ REG_WR(bp, IGU_REG_BLOCK_CONFIGURATION, val);
+ REG_WR(bp, IGU_REG_RESET_MEMORIES, 0x7f);
+
+ while (tout && REG_RD(bp, IGU_REG_RESET_MEMORIES)) {
+ tout--;
+ usleep_range(1000, 2000);
+ }
+
+ if (REG_RD(bp, IGU_REG_RESET_MEMORIES)) {
+ dev_err(&bp->pdev->dev,
+ "FORCING Normal Mode failed!!!\n");
+ bnx2x_release_hw_lock(bp,
+ HW_LOCK_RESOURCE_RESET);
+ return -EPERM;
+ }
+ }
+
+ if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) {
+ BNX2X_DEV_INFO("IGU Backward Compatible Mode\n");
+ bp->common.int_block |= INT_BLOCK_MODE_BW_COMP;
+ } else
+ BNX2X_DEV_INFO("IGU Normal Mode\n");
+
+ rc = bnx2x_get_igu_cam_info(bp);
+ bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
+ if (rc)
+ return rc;
+ }
+
+ /*
+ * set base FW non-default (fast path) status block id, this value is
+ * used to initialize the fw_sb_id saved on the fp/queue structure to
+ * determine the id used by the FW.
+ */
+ if (CHIP_IS_E1x(bp))
+ bp->base_fw_ndsb = BP_PORT(bp) * FP_SB_MAX_E1x + BP_L_ID(bp);
+ else /*
+ * 57712 - we currently use one FW SB per IGU SB (Rx and Tx of
+ * the same queue are indicated on the same IGU SB). So we prefer
+ * FW and IGU SBs to be the same value.
+ */
+ bp->base_fw_ndsb = bp->igu_base_sb;
+
+ BNX2X_DEV_INFO("igu_dsb_id %d igu_base_sb %d igu_sb_cnt %d\n"
+ "base_fw_ndsb %d\n", bp->igu_dsb_id, bp->igu_base_sb,
+ bp->igu_sb_cnt, bp->base_fw_ndsb);
+
+ /*
+ * Initialize MF configuration
+ */
+ bp->mf_ov = 0;
+ bp->mf_mode = 0;
+ bp->mf_sub_mode = 0;
+ vn = BP_VN(bp);
+
+ if (!CHIP_IS_E1(bp) && !BP_NOMCP(bp)) {
+ BNX2X_DEV_INFO("shmem2base 0x%x, size %d, mfcfg offset %d\n",
+ bp->common.shmem2_base, SHMEM2_RD(bp, size),
+ (u32)offsetof(struct shmem2_region, mf_cfg_addr));
+
+ if (SHMEM2_HAS(bp, mf_cfg_addr))
+ bp->common.mf_cfg_base = SHMEM2_RD(bp, mf_cfg_addr);
+ else
+ bp->common.mf_cfg_base = bp->common.shmem_base +
+ offsetof(struct shmem_region, func_mb) +
+ E1H_FUNC_MAX * sizeof(struct drv_func_mb);
+ /*
+ * get mf configuration:
+ * 1. Existence of MF configuration
+ * 2. MAC address must be legal (check only upper bytes)
+ * for Switch-Independent mode;
+ * OVLAN must be legal for Switch-Dependent mode
+ * 3. SF_MODE configures specific MF mode
+ */
+ if (bp->common.mf_cfg_base != SHMEM_MF_CFG_ADDR_NONE) {
+ /* get mf configuration */
+ val = SHMEM_RD(bp,
+ dev_info.shared_feature_config.config);
+ val &= SHARED_FEAT_CFG_FORCE_SF_MODE_MASK;
+
+ switch (val) {
+ case SHARED_FEAT_CFG_FORCE_SF_MODE_SWITCH_INDEPT:
+ validate_set_si_mode(bp);
+ break;
+ case SHARED_FEAT_CFG_FORCE_SF_MODE_AFEX_MODE:
+ if ((!CHIP_IS_E1x(bp)) &&
+ (MF_CFG_RD(bp, func_mf_config[func].
+ mac_upper) != 0xffff) &&
+ (SHMEM2_HAS(bp,
+ afex_driver_support))) {
+ bp->mf_mode = MULTI_FUNCTION_AFEX;
+ bp->mf_config[vn] = MF_CFG_RD(bp,
+ func_mf_config[func].config);
+ } else {
+ BNX2X_DEV_INFO("can not configure afex mode\n");
+ }
+ break;
+ case SHARED_FEAT_CFG_FORCE_SF_MODE_MF_ALLOWED:
+ /* get OV configuration */
+ val = MF_CFG_RD(bp,
+ func_mf_config[FUNC_0].e1hov_tag);
+ val &= FUNC_MF_CFG_E1HOV_TAG_MASK;
+
+ if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
+ bp->mf_mode = MULTI_FUNCTION_SD;
+ bp->mf_config[vn] = MF_CFG_RD(bp,
+ func_mf_config[func].config);
+ } else
+ BNX2X_DEV_INFO("illegal OV for SD\n");
+ break;
+ case SHARED_FEAT_CFG_FORCE_SF_MODE_BD_MODE:
+ bp->mf_mode = MULTI_FUNCTION_SD;
+ bp->mf_sub_mode = SUB_MF_MODE_BD;
+ bp->mf_config[vn] =
+ MF_CFG_RD(bp,
+ func_mf_config[func].config);
+
+ if (SHMEM2_HAS(bp, mtu_size)) {
+ int mtu_idx = BP_FW_MB_IDX(bp);
+ u16 mtu_size;
+ u32 mtu;
+
+ mtu = SHMEM2_RD(bp, mtu_size[mtu_idx]);
+ mtu_size = (u16)mtu;
+ DP(NETIF_MSG_IFUP, "Read MTU size %04x [%08x]\n",
+ mtu_size, mtu);
+
+ /* if valid: update device mtu */
+ if ((mtu_size >= ETH_MIN_PACKET_SIZE) &&
+ (mtu_size <=
+ ETH_MAX_JUMBO_PACKET_SIZE))
+ bp->dev->mtu = mtu_size;
+ }
+ break;
+ case SHARED_FEAT_CFG_FORCE_SF_MODE_UFP_MODE:
+ bp->mf_mode = MULTI_FUNCTION_SD;
+ bp->mf_sub_mode = SUB_MF_MODE_UFP;
+ bp->mf_config[vn] =
+ MF_CFG_RD(bp,
+ func_mf_config[func].config);
+ break;
+ case SHARED_FEAT_CFG_FORCE_SF_MODE_FORCED_SF:
+ bp->mf_config[vn] = 0;
+ break;
+ case SHARED_FEAT_CFG_FORCE_SF_MODE_EXTENDED_MODE:
+ val2 = SHMEM_RD(bp,
+ dev_info.shared_hw_config.config_3);
+ val2 &= SHARED_HW_CFG_EXTENDED_MF_MODE_MASK;
+ switch (val2) {
+ case SHARED_HW_CFG_EXTENDED_MF_MODE_NPAR1_DOT_5:
+ validate_set_si_mode(bp);
+ bp->mf_sub_mode =
+ SUB_MF_MODE_NPAR1_DOT_5;
+ break;
+ default:
+ /* Unknown configuration */
+ bp->mf_config[vn] = 0;
+ BNX2X_DEV_INFO("unknown extended MF mode 0x%x\n",
+ val);
+ }
+ break;
+ default:
+ /* Unknown configuration: reset mf_config */
+ bp->mf_config[vn] = 0;
+ BNX2X_DEV_INFO("unknown MF mode 0x%x\n", val);
+ }
+ }
+
+ BNX2X_DEV_INFO("%s function mode\n",
+ IS_MF(bp) ? "multi" : "single");
+
+ switch (bp->mf_mode) {
+ case MULTI_FUNCTION_SD:
+ val = MF_CFG_RD(bp, func_mf_config[func].e1hov_tag) &
+ FUNC_MF_CFG_E1HOV_TAG_MASK;
+ if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
+ bp->mf_ov = val;
+ bp->path_has_ovlan = true;
+
+ BNX2X_DEV_INFO("MF OV for func %d is %d (0x%04x)\n",
+ func, bp->mf_ov, bp->mf_ov);
+ } else if ((bp->mf_sub_mode == SUB_MF_MODE_UFP) ||
+ (bp->mf_sub_mode == SUB_MF_MODE_BD)) {
+ dev_err(&bp->pdev->dev,
+ "Unexpected - no valid MF OV for func %d in UFP/BD mode\n",
+ func);
+ bp->path_has_ovlan = true;
+ } else {
+ dev_err(&bp->pdev->dev,
+ "No valid MF OV for func %d, aborting\n",
+ func);
+ return -EPERM;
+ }
+ break;
+ case MULTI_FUNCTION_AFEX:
+ BNX2X_DEV_INFO("func %d is in MF afex mode\n", func);
+ break;
+ case MULTI_FUNCTION_SI:
+ BNX2X_DEV_INFO("func %d is in MF switch-independent mode\n",
+ func);
+ break;
+ default:
+ if (vn) {
+ dev_err(&bp->pdev->dev,
+ "VN %d is in a single function mode, aborting\n",
+ vn);
+ return -EPERM;
+ }
+ break;
+ }
+
+ /* check if other port on the path needs ovlan:
+ * Since MF configuration is shared between ports
+ * Possible mixed modes are only
+ * {SF, SI} {SF, SD} {SD, SF} {SI, SF}
+ */
+ if (CHIP_MODE_IS_4_PORT(bp) &&
+ !bp->path_has_ovlan &&
+ !IS_MF(bp) &&
+ bp->common.mf_cfg_base != SHMEM_MF_CFG_ADDR_NONE) {
+ u8 other_port = !BP_PORT(bp);
+ u8 other_func = BP_PATH(bp) + 2*other_port;
+ val = MF_CFG_RD(bp,
+ func_mf_config[other_func].e1hov_tag);
+ if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT)
+ bp->path_has_ovlan = true;
+ }
+ }
+
+ /* adjust igu_sb_cnt to MF for E1H */
+ if (CHIP_IS_E1H(bp) && IS_MF(bp))
+ bp->igu_sb_cnt = min_t(u8, bp->igu_sb_cnt, E1H_MAX_MF_SB_COUNT);
+
+ /* port info */
+ bnx2x_get_port_hwinfo(bp);
+
+ /* Get MAC addresses */
+ bnx2x_get_mac_hwinfo(bp);
+
+ bnx2x_get_cnic_info(bp);
+
+ return rc;
+}
+
+static void bnx2x_read_fwinfo(struct bnx2x *bp)
+{
+ char str_id[VENDOR_ID_LEN + 1];
+ unsigned int vpd_len, kw_len;
+ u8 *vpd_data;
+ int rodi;
+
+ memset(bp->fw_ver, 0, sizeof(bp->fw_ver));
+
+ vpd_data = pci_vpd_alloc(bp->pdev, &vpd_len);
+ if (IS_ERR(vpd_data))
+ return;
+
+ rodi = pci_vpd_find_ro_info_keyword(vpd_data, vpd_len,
+ PCI_VPD_RO_KEYWORD_MFR_ID, &kw_len);
+ if (rodi < 0 || kw_len != VENDOR_ID_LEN)
+ goto out_not_found;
+
+ /* vendor specific info */
+ snprintf(str_id, VENDOR_ID_LEN + 1, "%04x", PCI_VENDOR_ID_DELL);
+ if (!strncasecmp(str_id, &vpd_data[rodi], VENDOR_ID_LEN)) {
+ rodi = pci_vpd_find_ro_info_keyword(vpd_data, vpd_len,
+ PCI_VPD_RO_KEYWORD_VENDOR0,
+ &kw_len);
+ if (rodi >= 0 && kw_len < sizeof(bp->fw_ver)) {
+ memcpy(bp->fw_ver, &vpd_data[rodi], kw_len);
+ bp->fw_ver[kw_len] = ' ';
+ }
+ }
+out_not_found:
+ kfree(vpd_data);
+}
+
+static void bnx2x_set_modes_bitmap(struct bnx2x *bp)
+{
+ u32 flags = 0;
+
+ if (CHIP_REV_IS_FPGA(bp))
+ SET_FLAGS(flags, MODE_FPGA);
+ else if (CHIP_REV_IS_EMUL(bp))
+ SET_FLAGS(flags, MODE_EMUL);
+ else
+ SET_FLAGS(flags, MODE_ASIC);
+
+ if (CHIP_MODE_IS_4_PORT(bp))
+ SET_FLAGS(flags, MODE_PORT4);
+ else
+ SET_FLAGS(flags, MODE_PORT2);
+
+ if (CHIP_IS_E2(bp))
+ SET_FLAGS(flags, MODE_E2);
+ else if (CHIP_IS_E3(bp)) {
+ SET_FLAGS(flags, MODE_E3);
+ if (CHIP_REV(bp) == CHIP_REV_Ax)
+ SET_FLAGS(flags, MODE_E3_A0);
+ else /*if (CHIP_REV(bp) == CHIP_REV_Bx)*/
+ SET_FLAGS(flags, MODE_E3_B0 | MODE_COS3);
+ }
+
+ if (IS_MF(bp)) {
+ SET_FLAGS(flags, MODE_MF);
+ switch (bp->mf_mode) {
+ case MULTI_FUNCTION_SD:
+ SET_FLAGS(flags, MODE_MF_SD);
+ break;
+ case MULTI_FUNCTION_SI:
+ SET_FLAGS(flags, MODE_MF_SI);
+ break;
+ case MULTI_FUNCTION_AFEX:
+ SET_FLAGS(flags, MODE_MF_AFEX);
+ break;
+ }
+ } else
+ SET_FLAGS(flags, MODE_SF);
+
+#if defined(__LITTLE_ENDIAN)
+ SET_FLAGS(flags, MODE_LITTLE_ENDIAN);
+#else /*(__BIG_ENDIAN)*/
+ SET_FLAGS(flags, MODE_BIG_ENDIAN);
+#endif
+ INIT_MODE_FLAGS(bp) = flags;
+}
+
+static int bnx2x_init_bp(struct bnx2x *bp)
+{
+ int func;
+ int rc;
+
+ mutex_init(&bp->port.phy_mutex);
+ mutex_init(&bp->fw_mb_mutex);
+ mutex_init(&bp->drv_info_mutex);
+ sema_init(&bp->stats_lock, 1);
+ bp->drv_info_mng_owner = false;
+ INIT_LIST_HEAD(&bp->vlan_reg);
+
+ INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
+ INIT_DELAYED_WORK(&bp->sp_rtnl_task, bnx2x_sp_rtnl_task);
+ INIT_DELAYED_WORK(&bp->period_task, bnx2x_period_task);
+ INIT_DELAYED_WORK(&bp->iov_task, bnx2x_iov_task);
+ if (IS_PF(bp)) {
+ rc = bnx2x_get_hwinfo(bp);
+ if (rc)
+ return rc;
+ } else {
+ static const u8 zero_addr[ETH_ALEN] = {};
+
+ eth_hw_addr_set(bp->dev, zero_addr);
+ }
+
+ bnx2x_set_modes_bitmap(bp);
+
+ rc = bnx2x_alloc_mem_bp(bp);
+ if (rc)
+ return rc;
+
+ bnx2x_read_fwinfo(bp);
+
+ func = BP_FUNC(bp);
+
+ /* need to reset chip if undi was active */
+ if (IS_PF(bp) && !BP_NOMCP(bp)) {
+ /* init fw_seq */
+ bp->fw_seq =
+ SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
+ DRV_MSG_SEQ_NUMBER_MASK;
+ BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
+
+ rc = bnx2x_prev_unload(bp);
+ if (rc) {
+ bnx2x_free_mem_bp(bp);
+ return rc;
+ }
+ }
+
+ if (CHIP_REV_IS_FPGA(bp))
+ dev_err(&bp->pdev->dev, "FPGA detected\n");
+
+ if (BP_NOMCP(bp) && (func == 0))
+ dev_err(&bp->pdev->dev, "MCP disabled, must load devices in order!\n");
+
+ bp->disable_tpa = disable_tpa;
+ bp->disable_tpa |= !!IS_MF_STORAGE_ONLY(bp);
+ /* Reduce memory usage in kdump environment by disabling TPA */
+ bp->disable_tpa |= is_kdump_kernel();
+
+ /* Set TPA flags */
+ if (bp->disable_tpa) {
+ bp->dev->hw_features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
+ bp->dev->features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
+ }
+
+ if (CHIP_IS_E1(bp))
+ bp->dropless_fc = false;
+ else
+ bp->dropless_fc = dropless_fc | bnx2x_get_dropless_info(bp);
+
+ bp->mrrs = mrrs;
+
+ bp->tx_ring_size = IS_MF_STORAGE_ONLY(bp) ? 0 : MAX_TX_AVAIL;
+ if (IS_VF(bp))
+ bp->rx_ring_size = MAX_RX_AVAIL;
+
+ /* make sure that the numbers are in the right granularity */
+ bp->tx_ticks = (50 / BNX2X_BTR) * BNX2X_BTR;
+ bp->rx_ticks = (25 / BNX2X_BTR) * BNX2X_BTR;
+
+ bp->current_interval = CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ;
+
+ timer_setup(&bp->timer, bnx2x_timer, 0);
+ bp->timer.expires = jiffies + bp->current_interval;
+
+ if (SHMEM2_HAS(bp, dcbx_lldp_params_offset) &&
+ SHMEM2_HAS(bp, dcbx_lldp_dcbx_stat_offset) &&
+ SHMEM2_HAS(bp, dcbx_en) &&
+ SHMEM2_RD(bp, dcbx_lldp_params_offset) &&
+ SHMEM2_RD(bp, dcbx_lldp_dcbx_stat_offset) &&
+ SHMEM2_RD(bp, dcbx_en[BP_PORT(bp)])) {
+ bnx2x_dcbx_set_state(bp, true, BNX2X_DCBX_ENABLED_ON_NEG_ON);
+ bnx2x_dcbx_init_params(bp);
+ } else {
+ bnx2x_dcbx_set_state(bp, false, BNX2X_DCBX_ENABLED_OFF);
+ }
+
+ if (CHIP_IS_E1x(bp))
+ bp->cnic_base_cl_id = FP_SB_MAX_E1x;
+ else
+ bp->cnic_base_cl_id = FP_SB_MAX_E2;
+
+ /* multiple tx priority */
+ if (IS_VF(bp))
+ bp->max_cos = 1;
+ else if (CHIP_IS_E1x(bp))
+ bp->max_cos = BNX2X_MULTI_TX_COS_E1X;
+ else if (CHIP_IS_E2(bp) || CHIP_IS_E3A0(bp))
+ bp->max_cos = BNX2X_MULTI_TX_COS_E2_E3A0;
+ else if (CHIP_IS_E3B0(bp))
+ bp->max_cos = BNX2X_MULTI_TX_COS_E3B0;
+ else
+ BNX2X_ERR("unknown chip %x revision %x\n",
+ CHIP_NUM(bp), CHIP_REV(bp));
+ BNX2X_DEV_INFO("set bp->max_cos to %d\n", bp->max_cos);
+
+ /* We need at least one default status block for slow-path events,
+ * second status block for the L2 queue, and a third status block for
+ * CNIC if supported.
+ */
+ if (IS_VF(bp))
+ bp->min_msix_vec_cnt = 1;
+ else if (CNIC_SUPPORT(bp))
+ bp->min_msix_vec_cnt = 3;
+ else /* PF w/o cnic */
+ bp->min_msix_vec_cnt = 2;
+ BNX2X_DEV_INFO("bp->min_msix_vec_cnt %d", bp->min_msix_vec_cnt);
+
+ bp->dump_preset_idx = 1;
+
+ return rc;
+}
+
+/****************************************************************************
+* General service functions
+****************************************************************************/
+
+/*
+ * net_device service functions
+ */
+
+/* called with rtnl_lock */
+static int bnx2x_open(struct net_device *dev)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+ int rc;
+
+ bp->stats_init = true;
+
+ netif_carrier_off(dev);
+
+ bnx2x_set_power_state(bp, PCI_D0);
+
+ /* If parity had happen during the unload, then attentions
+ * and/or RECOVERY_IN_PROGRES may still be set. In this case we
+ * want the first function loaded on the current engine to
+ * complete the recovery.
+ * Parity recovery is only relevant for PF driver.
+ */
+ if (IS_PF(bp)) {
+ int other_engine = BP_PATH(bp) ? 0 : 1;
+ bool other_load_status, load_status;
+ bool global = false;
+
+ other_load_status = bnx2x_get_load_status(bp, other_engine);
+ load_status = bnx2x_get_load_status(bp, BP_PATH(bp));
+ if (!bnx2x_reset_is_done(bp, BP_PATH(bp)) ||
+ bnx2x_chk_parity_attn(bp, &global, true)) {
+ do {
+ /* If there are attentions and they are in a
+ * global blocks, set the GLOBAL_RESET bit
+ * regardless whether it will be this function
+ * that will complete the recovery or not.
+ */
+ if (global)
+ bnx2x_set_reset_global(bp);
+
+ /* Only the first function on the current
+ * engine should try to recover in open. In case
+ * of attentions in global blocks only the first
+ * in the chip should try to recover.
+ */
+ if ((!load_status &&
+ (!global || !other_load_status)) &&
+ bnx2x_trylock_leader_lock(bp) &&
+ !bnx2x_leader_reset(bp)) {
+ netdev_info(bp->dev,
+ "Recovered in open\n");
+ break;
+ }
+
+ /* recovery has failed... */
+ bnx2x_set_power_state(bp, PCI_D3hot);
+ bp->recovery_state = BNX2X_RECOVERY_FAILED;
+
+ BNX2X_ERR("Recovery flow hasn't been properly completed yet. Try again later.\n"
+ "If you still see this message after a few retries then power cycle is required.\n");
+
+ return -EAGAIN;
+ } while (0);
+ }
+ }
+
+ bp->recovery_state = BNX2X_RECOVERY_DONE;
+ rc = bnx2x_nic_load(bp, LOAD_OPEN);
+ if (rc)
+ return rc;
+
+ return 0;
+}
+
+/* called with rtnl_lock */
+static int bnx2x_close(struct net_device *dev)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+
+ /* Unload the driver, release IRQs */
+ bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
+
+ return 0;
+}
+
+struct bnx2x_mcast_list_elem_group
+{
+ struct list_head mcast_group_link;
+ struct bnx2x_mcast_list_elem mcast_elems[];
+};
+
+#define MCAST_ELEMS_PER_PG \
+ ((PAGE_SIZE - sizeof(struct bnx2x_mcast_list_elem_group)) / \
+ sizeof(struct bnx2x_mcast_list_elem))
+
+static void bnx2x_free_mcast_macs_list(struct list_head *mcast_group_list)
+{
+ struct bnx2x_mcast_list_elem_group *current_mcast_group;
+
+ while (!list_empty(mcast_group_list)) {
+ current_mcast_group = list_first_entry(mcast_group_list,
+ struct bnx2x_mcast_list_elem_group,
+ mcast_group_link);
+ list_del(&current_mcast_group->mcast_group_link);
+ free_page((unsigned long)current_mcast_group);
+ }
+}
+
+static int bnx2x_init_mcast_macs_list(struct bnx2x *bp,
+ struct bnx2x_mcast_ramrod_params *p,
+ struct list_head *mcast_group_list)
+{
+ struct bnx2x_mcast_list_elem *mc_mac;
+ struct netdev_hw_addr *ha;
+ struct bnx2x_mcast_list_elem_group *current_mcast_group = NULL;
+ int mc_count = netdev_mc_count(bp->dev);
+ int offset = 0;
+
+ INIT_LIST_HEAD(&p->mcast_list);
+ netdev_for_each_mc_addr(ha, bp->dev) {
+ if (!offset) {
+ current_mcast_group =
+ (struct bnx2x_mcast_list_elem_group *)
+ __get_free_page(GFP_ATOMIC);
+ if (!current_mcast_group) {
+ bnx2x_free_mcast_macs_list(mcast_group_list);
+ BNX2X_ERR("Failed to allocate mc MAC list\n");
+ return -ENOMEM;
+ }
+ list_add(&current_mcast_group->mcast_group_link,
+ mcast_group_list);
+ }
+ mc_mac = &current_mcast_group->mcast_elems[offset];
+ mc_mac->mac = bnx2x_mc_addr(ha);
+ list_add_tail(&mc_mac->link, &p->mcast_list);
+ offset++;
+ if (offset == MCAST_ELEMS_PER_PG)
+ offset = 0;
+ }
+ p->mcast_list_len = mc_count;
+ return 0;
+}
+
+/**
+ * bnx2x_set_uc_list - configure a new unicast MACs list.
+ *
+ * @bp: driver handle
+ *
+ * We will use zero (0) as a MAC type for these MACs.
+ */
+static int bnx2x_set_uc_list(struct bnx2x *bp)
+{
+ int rc;
+ struct net_device *dev = bp->dev;
+ struct netdev_hw_addr *ha;
+ struct bnx2x_vlan_mac_obj *mac_obj = &bp->sp_objs->mac_obj;
+ unsigned long ramrod_flags = 0;
+
+ /* First schedule a cleanup up of old configuration */
+ rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_UC_LIST_MAC, false);
+ if (rc < 0) {
+ BNX2X_ERR("Failed to schedule DELETE operations: %d\n", rc);
+ return rc;
+ }
+
+ netdev_for_each_uc_addr(ha, dev) {
+ rc = bnx2x_set_mac_one(bp, bnx2x_uc_addr(ha), mac_obj, true,
+ BNX2X_UC_LIST_MAC, &ramrod_flags);
+ if (rc == -EEXIST) {
+ DP(BNX2X_MSG_SP,
+ "Failed to schedule ADD operations: %d\n", rc);
+ /* do not treat adding same MAC as error */
+ rc = 0;
+
+ } else if (rc < 0) {
+
+ BNX2X_ERR("Failed to schedule ADD operations: %d\n",
+ rc);
+ return rc;
+ }
+ }
+
+ /* Execute the pending commands */
+ __set_bit(RAMROD_CONT, &ramrod_flags);
+ return bnx2x_set_mac_one(bp, NULL, mac_obj, false /* don't care */,
+ BNX2X_UC_LIST_MAC, &ramrod_flags);
+}
+
+static int bnx2x_set_mc_list_e1x(struct bnx2x *bp)
+{
+ LIST_HEAD(mcast_group_list);
+ struct net_device *dev = bp->dev;
+ struct bnx2x_mcast_ramrod_params rparam = {NULL};
+ int rc = 0;
+
+ rparam.mcast_obj = &bp->mcast_obj;
+
+ /* first, clear all configured multicast MACs */
+ rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
+ if (rc < 0) {
+ BNX2X_ERR("Failed to clear multicast configuration: %d\n", rc);
+ return rc;
+ }
+
+ /* then, configure a new MACs list */
+ if (netdev_mc_count(dev)) {
+ rc = bnx2x_init_mcast_macs_list(bp, &rparam, &mcast_group_list);
+ if (rc)
+ return rc;
+
+ /* Now add the new MACs */
+ rc = bnx2x_config_mcast(bp, &rparam,
+ BNX2X_MCAST_CMD_ADD);
+ if (rc < 0)
+ BNX2X_ERR("Failed to set a new multicast configuration: %d\n",
+ rc);
+
+ bnx2x_free_mcast_macs_list(&mcast_group_list);
+ }
+
+ return rc;
+}
+
+static int bnx2x_set_mc_list(struct bnx2x *bp)
+{
+ LIST_HEAD(mcast_group_list);
+ struct bnx2x_mcast_ramrod_params rparam = {NULL};
+ struct net_device *dev = bp->dev;
+ int rc = 0;
+
+ /* On older adapters, we need to flush and re-add filters */
+ if (CHIP_IS_E1x(bp))
+ return bnx2x_set_mc_list_e1x(bp);
+
+ rparam.mcast_obj = &bp->mcast_obj;
+
+ if (netdev_mc_count(dev)) {
+ rc = bnx2x_init_mcast_macs_list(bp, &rparam, &mcast_group_list);
+ if (rc)
+ return rc;
+
+ /* Override the curently configured set of mc filters */
+ rc = bnx2x_config_mcast(bp, &rparam,
+ BNX2X_MCAST_CMD_SET);
+ if (rc < 0)
+ BNX2X_ERR("Failed to set a new multicast configuration: %d\n",
+ rc);
+
+ bnx2x_free_mcast_macs_list(&mcast_group_list);
+ } else {
+ /* If no mc addresses are required, flush the configuration */
+ rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
+ if (rc < 0)
+ BNX2X_ERR("Failed to clear multicast configuration %d\n",
+ rc);
+ }
+
+ return rc;
+}
+
+/* If bp->state is OPEN, should be called with netif_addr_lock_bh() */
+static void bnx2x_set_rx_mode(struct net_device *dev)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+
+ if (bp->state != BNX2X_STATE_OPEN) {
+ DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
+ return;
+ } else {
+ /* Schedule an SP task to handle rest of change */
+ bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_RX_MODE,
+ NETIF_MSG_IFUP);
+ }
+}
+
+void bnx2x_set_rx_mode_inner(struct bnx2x *bp)
+{
+ u32 rx_mode = BNX2X_RX_MODE_NORMAL;
+
+ DP(NETIF_MSG_IFUP, "dev->flags = %x\n", bp->dev->flags);
+
+ netif_addr_lock_bh(bp->dev);
+
+ if (bp->dev->flags & IFF_PROMISC) {
+ rx_mode = BNX2X_RX_MODE_PROMISC;
+ } else if ((bp->dev->flags & IFF_ALLMULTI) ||
+ ((netdev_mc_count(bp->dev) > BNX2X_MAX_MULTICAST) &&
+ CHIP_IS_E1(bp))) {
+ rx_mode = BNX2X_RX_MODE_ALLMULTI;
+ } else {
+ if (IS_PF(bp)) {
+ /* some multicasts */
+ if (bnx2x_set_mc_list(bp) < 0)
+ rx_mode = BNX2X_RX_MODE_ALLMULTI;
+
+ /* release bh lock, as bnx2x_set_uc_list might sleep */
+ netif_addr_unlock_bh(bp->dev);
+ if (bnx2x_set_uc_list(bp) < 0)
+ rx_mode = BNX2X_RX_MODE_PROMISC;
+ netif_addr_lock_bh(bp->dev);
+ } else {
+ /* configuring mcast to a vf involves sleeping (when we
+ * wait for the pf's response).
+ */
+ bnx2x_schedule_sp_rtnl(bp,
+ BNX2X_SP_RTNL_VFPF_MCAST, 0);
+ }
+ }
+
+ bp->rx_mode = rx_mode;
+ /* handle ISCSI SD mode */
+ if (IS_MF_ISCSI_ONLY(bp))
+ bp->rx_mode = BNX2X_RX_MODE_NONE;
+
+ /* Schedule the rx_mode command */
+ if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state)) {
+ set_bit(BNX2X_FILTER_RX_MODE_SCHED, &bp->sp_state);
+ netif_addr_unlock_bh(bp->dev);
+ return;
+ }
+
+ if (IS_PF(bp)) {
+ bnx2x_set_storm_rx_mode(bp);
+ netif_addr_unlock_bh(bp->dev);
+ } else {
+ /* VF will need to request the PF to make this change, and so
+ * the VF needs to release the bottom-half lock prior to the
+ * request (as it will likely require sleep on the VF side)
+ */
+ netif_addr_unlock_bh(bp->dev);
+ bnx2x_vfpf_storm_rx_mode(bp);
+ }
+}
+
+/* called with rtnl_lock */
+static int bnx2x_mdio_read(struct net_device *netdev, int prtad,
+ int devad, u16 addr)
+{
+ struct bnx2x *bp = netdev_priv(netdev);
+ u16 value;
+ int rc;
+
+ DP(NETIF_MSG_LINK, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n",
+ prtad, devad, addr);
+
+ /* The HW expects different devad if CL22 is used */
+ devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
+
+ bnx2x_acquire_phy_lock(bp);
+ rc = bnx2x_phy_read(&bp->link_params, prtad, devad, addr, &value);
+ bnx2x_release_phy_lock(bp);
+ DP(NETIF_MSG_LINK, "mdio_read_val 0x%x rc = 0x%x\n", value, rc);
+
+ if (!rc)
+ rc = value;
+ return rc;
+}
+
+/* called with rtnl_lock */
+static int bnx2x_mdio_write(struct net_device *netdev, int prtad, int devad,
+ u16 addr, u16 value)
+{
+ struct bnx2x *bp = netdev_priv(netdev);
+ int rc;
+
+ DP(NETIF_MSG_LINK,
+ "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x, value 0x%x\n",
+ prtad, devad, addr, value);
+
+ /* The HW expects different devad if CL22 is used */
+ devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
+
+ bnx2x_acquire_phy_lock(bp);
+ rc = bnx2x_phy_write(&bp->link_params, prtad, devad, addr, value);
+ bnx2x_release_phy_lock(bp);
+ return rc;
+}
+
+/* called with rtnl_lock */
+static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+ struct mii_ioctl_data *mdio = if_mii(ifr);
+
+ if (!netif_running(dev))
+ return -EAGAIN;
+
+ switch (cmd) {
+ case SIOCSHWTSTAMP:
+ return bnx2x_hwtstamp_ioctl(bp, ifr);
+ default:
+ DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
+ mdio->phy_id, mdio->reg_num, mdio->val_in);
+ return mdio_mii_ioctl(&bp->mdio, mdio, cmd);
+ }
+}
+
+static int bnx2x_validate_addr(struct net_device *dev)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+
+ /* query the bulletin board for mac address configured by the PF */
+ if (IS_VF(bp))
+ bnx2x_sample_bulletin(bp);
+
+ if (!is_valid_ether_addr(dev->dev_addr)) {
+ BNX2X_ERR("Non-valid Ethernet address\n");
+ return -EADDRNOTAVAIL;
+ }
+ return 0;
+}
+
+static int bnx2x_get_phys_port_id(struct net_device *netdev,
+ struct netdev_phys_item_id *ppid)
+{
+ struct bnx2x *bp = netdev_priv(netdev);
+
+ if (!(bp->flags & HAS_PHYS_PORT_ID))
+ return -EOPNOTSUPP;
+
+ ppid->id_len = sizeof(bp->phys_port_id);
+ memcpy(ppid->id, bp->phys_port_id, ppid->id_len);
+
+ return 0;
+}
+
+static netdev_features_t bnx2x_features_check(struct sk_buff *skb,
+ struct net_device *dev,
+ netdev_features_t features)
+{
+ /*
+ * A skb with gso_size + header length > 9700 will cause a
+ * firmware panic. Drop GSO support.
+ *
+ * Eventually the upper layer should not pass these packets down.
+ *
+ * For speed, if the gso_size is <= 9000, assume there will
+ * not be 700 bytes of headers and pass it through. Only do a
+ * full (slow) validation if the gso_size is > 9000.
+ *
+ * (Due to the way SKB_BY_FRAGS works this will also do a full
+ * validation in that case.)
+ */
+ if (unlikely(skb_is_gso(skb) &&
+ (skb_shinfo(skb)->gso_size > 9000) &&
+ !skb_gso_validate_mac_len(skb, 9700)))
+ features &= ~NETIF_F_GSO_MASK;
+
+ features = vlan_features_check(skb, features);
+ return vxlan_features_check(skb, features);
+}
+
+static int __bnx2x_vlan_configure_vid(struct bnx2x *bp, u16 vid, bool add)
+{
+ int rc;
+
+ if (IS_PF(bp)) {
+ unsigned long ramrod_flags = 0;
+
+ __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
+ rc = bnx2x_set_vlan_one(bp, vid, &bp->sp_objs->vlan_obj,
+ add, &ramrod_flags);
+ } else {
+ rc = bnx2x_vfpf_update_vlan(bp, vid, bp->fp->index, add);
+ }
+
+ return rc;
+}
+
+static int bnx2x_vlan_configure_vid_list(struct bnx2x *bp)
+{
+ struct bnx2x_vlan_entry *vlan;
+ int rc = 0;
+
+ /* Configure all non-configured entries */
+ list_for_each_entry(vlan, &bp->vlan_reg, link) {
+ if (vlan->hw)
+ continue;
+
+ if (bp->vlan_cnt >= bp->vlan_credit)
+ return -ENOBUFS;
+
+ rc = __bnx2x_vlan_configure_vid(bp, vlan->vid, true);
+ if (rc) {
+ BNX2X_ERR("Unable to config VLAN %d\n", vlan->vid);
+ return rc;
+ }
+
+ DP(NETIF_MSG_IFUP, "HW configured for VLAN %d\n", vlan->vid);
+ vlan->hw = true;
+ bp->vlan_cnt++;
+ }
+
+ return 0;
+}
+
+static void bnx2x_vlan_configure(struct bnx2x *bp, bool set_rx_mode)
+{
+ bool need_accept_any_vlan;
+
+ need_accept_any_vlan = !!bnx2x_vlan_configure_vid_list(bp);
+
+ if (bp->accept_any_vlan != need_accept_any_vlan) {
+ bp->accept_any_vlan = need_accept_any_vlan;
+ DP(NETIF_MSG_IFUP, "Accept all VLAN %s\n",
+ bp->accept_any_vlan ? "raised" : "cleared");
+ if (set_rx_mode) {
+ if (IS_PF(bp))
+ bnx2x_set_rx_mode_inner(bp);
+ else
+ bnx2x_vfpf_storm_rx_mode(bp);
+ }
+ }
+}
+
+int bnx2x_vlan_reconfigure_vid(struct bnx2x *bp)
+{
+ /* Don't set rx mode here. Our caller will do it. */
+ bnx2x_vlan_configure(bp, false);
+
+ return 0;
+}
+
+static int bnx2x_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+ struct bnx2x_vlan_entry *vlan;
+
+ DP(NETIF_MSG_IFUP, "Adding VLAN %d\n", vid);
+
+ vlan = kmalloc(sizeof(*vlan), GFP_KERNEL);
+ if (!vlan)
+ return -ENOMEM;
+
+ vlan->vid = vid;
+ vlan->hw = false;
+ list_add_tail(&vlan->link, &bp->vlan_reg);
+
+ if (netif_running(dev))
+ bnx2x_vlan_configure(bp, true);
+
+ return 0;
+}
+
+static int bnx2x_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+ struct bnx2x_vlan_entry *vlan;
+ bool found = false;
+ int rc = 0;
+
+ DP(NETIF_MSG_IFUP, "Removing VLAN %d\n", vid);
+
+ list_for_each_entry(vlan, &bp->vlan_reg, link)
+ if (vlan->vid == vid) {
+ found = true;
+ break;
+ }
+
+ if (!found) {
+ BNX2X_ERR("Unable to kill VLAN %d - not found\n", vid);
+ return -EINVAL;
+ }
+
+ if (netif_running(dev) && vlan->hw) {
+ rc = __bnx2x_vlan_configure_vid(bp, vid, false);
+ DP(NETIF_MSG_IFUP, "HW deconfigured for VLAN %d\n", vid);
+ bp->vlan_cnt--;
+ }
+
+ list_del(&vlan->link);
+ kfree(vlan);
+
+ if (netif_running(dev))
+ bnx2x_vlan_configure(bp, true);
+
+ DP(NETIF_MSG_IFUP, "Removing VLAN result %d\n", rc);
+
+ return rc;
+}
+
+static const struct net_device_ops bnx2x_netdev_ops = {
+ .ndo_open = bnx2x_open,
+ .ndo_stop = bnx2x_close,
+ .ndo_start_xmit = bnx2x_start_xmit,
+ .ndo_select_queue = bnx2x_select_queue,
+ .ndo_set_rx_mode = bnx2x_set_rx_mode,
+ .ndo_set_mac_address = bnx2x_change_mac_addr,
+ .ndo_validate_addr = bnx2x_validate_addr,
+ .ndo_eth_ioctl = bnx2x_ioctl,
+ .ndo_change_mtu = bnx2x_change_mtu,
+ .ndo_fix_features = bnx2x_fix_features,
+ .ndo_set_features = bnx2x_set_features,
+ .ndo_tx_timeout = bnx2x_tx_timeout,
+ .ndo_vlan_rx_add_vid = bnx2x_vlan_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = bnx2x_vlan_rx_kill_vid,
+ .ndo_setup_tc = __bnx2x_setup_tc,
+#ifdef CONFIG_BNX2X_SRIOV
+ .ndo_set_vf_mac = bnx2x_set_vf_mac,
+ .ndo_set_vf_vlan = bnx2x_set_vf_vlan,
+ .ndo_get_vf_config = bnx2x_get_vf_config,
+ .ndo_set_vf_spoofchk = bnx2x_set_vf_spoofchk,
+#endif
+#ifdef NETDEV_FCOE_WWNN
+ .ndo_fcoe_get_wwn = bnx2x_fcoe_get_wwn,
+#endif
+
+ .ndo_get_phys_port_id = bnx2x_get_phys_port_id,
+ .ndo_set_vf_link_state = bnx2x_set_vf_link_state,
+ .ndo_features_check = bnx2x_features_check,
+};
+
+static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev,
+ struct net_device *dev, unsigned long board_type)
+{
+ int rc;
+ u32 pci_cfg_dword;
+ bool chip_is_e1x = (board_type == BCM57710 ||
+ board_type == BCM57711 ||
+ board_type == BCM57711E);
+
+ SET_NETDEV_DEV(dev, &pdev->dev);
+
+ bp->dev = dev;
+ bp->pdev = pdev;
+
+ rc = pci_enable_device(pdev);
+ if (rc) {
+ dev_err(&bp->pdev->dev,
+ "Cannot enable PCI device, aborting\n");
+ goto err_out;
+ }
+
+ if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
+ dev_err(&bp->pdev->dev,
+ "Cannot find PCI device base address, aborting\n");
+ rc = -ENODEV;
+ goto err_out_disable;
+ }
+
+ if (IS_PF(bp) && !(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
+ dev_err(&bp->pdev->dev, "Cannot find second PCI device base address, aborting\n");
+ rc = -ENODEV;
+ goto err_out_disable;
+ }
+
+ pci_read_config_dword(pdev, PCICFG_REVISION_ID_OFFSET, &pci_cfg_dword);
+ if ((pci_cfg_dword & PCICFG_REVESION_ID_MASK) ==
+ PCICFG_REVESION_ID_ERROR_VAL) {
+ pr_err("PCI device error, probably due to fan failure, aborting\n");
+ rc = -ENODEV;
+ goto err_out_disable;
+ }
+
+ if (atomic_read(&pdev->enable_cnt) == 1) {
+ rc = pci_request_regions(pdev, DRV_MODULE_NAME);
+ if (rc) {
+ dev_err(&bp->pdev->dev,
+ "Cannot obtain PCI resources, aborting\n");
+ goto err_out_disable;
+ }
+
+ pci_set_master(pdev);
+ pci_save_state(pdev);
+ }
+
+ if (IS_PF(bp)) {
+ if (!pdev->pm_cap) {
+ dev_err(&bp->pdev->dev,
+ "Cannot find power management capability, aborting\n");
+ rc = -EIO;
+ goto err_out_release;
+ }
+ }
+
+ if (!pci_is_pcie(pdev)) {
+ dev_err(&bp->pdev->dev, "Not PCI Express, aborting\n");
+ rc = -EIO;
+ goto err_out_release;
+ }
+
+ rc = dma_set_mask_and_coherent(&bp->pdev->dev, DMA_BIT_MASK(64));
+ if (rc) {
+ dev_err(&bp->pdev->dev, "System does not support DMA, aborting\n");
+ goto err_out_release;
+ }
+
+ dev->mem_start = pci_resource_start(pdev, 0);
+ dev->base_addr = dev->mem_start;
+ dev->mem_end = pci_resource_end(pdev, 0);
+
+ dev->irq = pdev->irq;
+
+ bp->regview = pci_ioremap_bar(pdev, 0);
+ if (!bp->regview) {
+ dev_err(&bp->pdev->dev,
+ "Cannot map register space, aborting\n");
+ rc = -ENOMEM;
+ goto err_out_release;
+ }
+
+ /* In E1/E1H use pci device function given by kernel.
+ * In E2/E3 read physical function from ME register since these chips
+ * support Physical Device Assignment where kernel BDF maybe arbitrary
+ * (depending on hypervisor).
+ */
+ if (chip_is_e1x) {
+ bp->pf_num = PCI_FUNC(pdev->devfn);
+ } else {
+ /* chip is E2/3*/
+ pci_read_config_dword(bp->pdev,
+ PCICFG_ME_REGISTER, &pci_cfg_dword);
+ bp->pf_num = (u8)((pci_cfg_dword & ME_REG_ABS_PF_NUM) >>
+ ME_REG_ABS_PF_NUM_SHIFT);
+ }
+ BNX2X_DEV_INFO("me reg PF num: %d\n", bp->pf_num);
+
+ /* clean indirect addresses */
+ pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
+ PCICFG_VENDOR_ID_OFFSET);
+
+ /* Set PCIe reset type to fundamental for EEH recovery */
+ pdev->needs_freset = 1;
+
+ /*
+ * Clean the following indirect addresses for all functions since it
+ * is not used by the driver.
+ */
+ if (IS_PF(bp)) {
+ REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0, 0);
+ REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0, 0);
+ REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0, 0);
+ REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0, 0);
+
+ if (chip_is_e1x) {
+ REG_WR(bp, PXP2_REG_PGL_ADDR_88_F1, 0);
+ REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F1, 0);
+ REG_WR(bp, PXP2_REG_PGL_ADDR_90_F1, 0);
+ REG_WR(bp, PXP2_REG_PGL_ADDR_94_F1, 0);
+ }
+
+ /* Enable internal target-read (in case we are probed after PF
+ * FLR). Must be done prior to any BAR read access. Only for
+ * 57712 and up
+ */
+ if (!chip_is_e1x)
+ REG_WR(bp,
+ PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1);
+ }
+
+ dev->watchdog_timeo = TX_TIMEOUT;
+
+ dev->netdev_ops = &bnx2x_netdev_ops;
+ bnx2x_set_ethtool_ops(bp, dev);
+
+ dev->priv_flags |= IFF_UNICAST_FLT;
+
+ dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+ NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 |
+ NETIF_F_RXCSUM | NETIF_F_LRO | NETIF_F_GRO | NETIF_F_GRO_HW |
+ NETIF_F_RXHASH | NETIF_F_HW_VLAN_CTAG_TX;
+ if (!chip_is_e1x) {
+ dev->hw_features |= NETIF_F_GSO_GRE | NETIF_F_GSO_GRE_CSUM |
+ NETIF_F_GSO_IPXIP4 |
+ NETIF_F_GSO_UDP_TUNNEL |
+ NETIF_F_GSO_UDP_TUNNEL_CSUM |
+ NETIF_F_GSO_PARTIAL;
+
+ dev->hw_enc_features =
+ NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
+ NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 |
+ NETIF_F_GSO_IPXIP4 |
+ NETIF_F_GSO_GRE | NETIF_F_GSO_GRE_CSUM |
+ NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_UDP_TUNNEL_CSUM |
+ NETIF_F_GSO_PARTIAL;
+
+ dev->gso_partial_features = NETIF_F_GSO_GRE_CSUM |
+ NETIF_F_GSO_UDP_TUNNEL_CSUM;
+
+ if (IS_PF(bp))
+ dev->udp_tunnel_nic_info = &bnx2x_udp_tunnels;
+ }
+
+ dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+ NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_HIGHDMA;
+
+ if (IS_PF(bp)) {
+ if (chip_is_e1x)
+ bp->accept_any_vlan = true;
+ else
+ dev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+ }
+ /* For VF we'll know whether to enable VLAN filtering after
+ * getting a response to CHANNEL_TLV_ACQUIRE from PF.
+ */
+
+ dev->features |= dev->hw_features | NETIF_F_HW_VLAN_CTAG_RX;
+ dev->features |= NETIF_F_HIGHDMA;
+ if (dev->features & NETIF_F_LRO)
+ dev->features &= ~NETIF_F_GRO_HW;
+
+ /* Add Loopback capability to the device */
+ dev->hw_features |= NETIF_F_LOOPBACK;
+
+#ifdef BCM_DCBNL
+ dev->dcbnl_ops = &bnx2x_dcbnl_ops;
+#endif
+
+ /* MTU range, 46 - 9600 */
+ dev->min_mtu = ETH_MIN_PACKET_SIZE;
+ dev->max_mtu = ETH_MAX_JUMBO_PACKET_SIZE;
+
+ /* get_port_hwinfo() will set prtad and mmds properly */
+ bp->mdio.prtad = MDIO_PRTAD_NONE;
+ bp->mdio.mmds = 0;
+ bp->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
+ bp->mdio.dev = dev;
+ bp->mdio.mdio_read = bnx2x_mdio_read;
+ bp->mdio.mdio_write = bnx2x_mdio_write;
+
+ return 0;
+
+err_out_release:
+ if (atomic_read(&pdev->enable_cnt) == 1)
+ pci_release_regions(pdev);
+
+err_out_disable:
+ pci_disable_device(pdev);
+
+err_out:
+ return rc;
+}
+
+static int bnx2x_check_firmware(struct bnx2x *bp)
+{
+ const struct firmware *firmware = bp->firmware;
+ struct bnx2x_fw_file_hdr *fw_hdr;
+ struct bnx2x_fw_file_section *sections;
+ u32 offset, len, num_ops;
+ __be16 *ops_offsets;
+ int i;
+ const u8 *fw_ver;
+
+ if (firmware->size < sizeof(struct bnx2x_fw_file_hdr)) {
+ BNX2X_ERR("Wrong FW size\n");
+ return -EINVAL;
+ }
+
+ fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
+ sections = (struct bnx2x_fw_file_section *)fw_hdr;
+
+ /* Make sure none of the offsets and sizes make us read beyond
+ * the end of the firmware data */
+ for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
+ offset = be32_to_cpu(sections[i].offset);
+ len = be32_to_cpu(sections[i].len);
+ if (offset + len > firmware->size) {
+ BNX2X_ERR("Section %d length is out of bounds\n", i);
+ return -EINVAL;
+ }
+ }
+
+ /* Likewise for the init_ops offsets */
+ offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
+ ops_offsets = (__force __be16 *)(firmware->data + offset);
+ num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
+
+ for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
+ if (be16_to_cpu(ops_offsets[i]) > num_ops) {
+ BNX2X_ERR("Section offset %d is out of bounds\n", i);
+ return -EINVAL;
+ }
+ }
+
+ /* Check FW version */
+ offset = be32_to_cpu(fw_hdr->fw_version.offset);
+ fw_ver = firmware->data + offset;
+ if (fw_ver[0] != bp->fw_major || fw_ver[1] != bp->fw_minor ||
+ fw_ver[2] != bp->fw_rev || fw_ver[3] != bp->fw_eng) {
+ BNX2X_ERR("Bad FW version:%d.%d.%d.%d. Should be %d.%d.%d.%d\n",
+ fw_ver[0], fw_ver[1], fw_ver[2], fw_ver[3],
+ bp->fw_major, bp->fw_minor, bp->fw_rev, bp->fw_eng);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
+{
+ const __be32 *source = (const __be32 *)_source;
+ u32 *target = (u32 *)_target;
+ u32 i;
+
+ for (i = 0; i < n/4; i++)
+ target[i] = be32_to_cpu(source[i]);
+}
+
+/*
+ Ops array is stored in the following format:
+ {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
+ */
+static void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
+{
+ const __be32 *source = (const __be32 *)_source;
+ struct raw_op *target = (struct raw_op *)_target;
+ u32 i, j, tmp;
+
+ for (i = 0, j = 0; i < n/8; i++, j += 2) {
+ tmp = be32_to_cpu(source[j]);
+ target[i].op = (tmp >> 24) & 0xff;
+ target[i].offset = tmp & 0xffffff;
+ target[i].raw_data = be32_to_cpu(source[j + 1]);
+ }
+}
+
+/* IRO array is stored in the following format:
+ * {base(24bit), m1(16bit), m2(16bit), m3(16bit), size(16bit) }
+ */
+static void bnx2x_prep_iro(const u8 *_source, u8 *_target, u32 n)
+{
+ const __be32 *source = (const __be32 *)_source;
+ struct iro *target = (struct iro *)_target;
+ u32 i, j, tmp;
+
+ for (i = 0, j = 0; i < n/sizeof(struct iro); i++) {
+ target[i].base = be32_to_cpu(source[j]);
+ j++;
+ tmp = be32_to_cpu(source[j]);
+ target[i].m1 = (tmp >> 16) & 0xffff;
+ target[i].m2 = tmp & 0xffff;
+ j++;
+ tmp = be32_to_cpu(source[j]);
+ target[i].m3 = (tmp >> 16) & 0xffff;
+ target[i].size = tmp & 0xffff;
+ j++;
+ }
+}
+
+static void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
+{
+ const __be16 *source = (const __be16 *)_source;
+ u16 *target = (u16 *)_target;
+ u32 i;
+
+ for (i = 0; i < n/2; i++)
+ target[i] = be16_to_cpu(source[i]);
+}
+
+#define BNX2X_ALLOC_AND_SET(arr, lbl, func) \
+do { \
+ u32 len = be32_to_cpu(fw_hdr->arr.len); \
+ bp->arr = kmalloc(len, GFP_KERNEL); \
+ if (!bp->arr) \
+ goto lbl; \
+ func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset), \
+ (u8 *)bp->arr, len); \
+} while (0)
+
+static int bnx2x_init_firmware(struct bnx2x *bp)
+{
+ const char *fw_file_name, *fw_file_name_v15;
+ struct bnx2x_fw_file_hdr *fw_hdr;
+ int rc;
+
+ if (bp->firmware)
+ return 0;
+
+ if (CHIP_IS_E1(bp)) {
+ fw_file_name = FW_FILE_NAME_E1;
+ fw_file_name_v15 = FW_FILE_NAME_E1_V15;
+ } else if (CHIP_IS_E1H(bp)) {
+ fw_file_name = FW_FILE_NAME_E1H;
+ fw_file_name_v15 = FW_FILE_NAME_E1H_V15;
+ } else if (!CHIP_IS_E1x(bp)) {
+ fw_file_name = FW_FILE_NAME_E2;
+ fw_file_name_v15 = FW_FILE_NAME_E2_V15;
+ } else {
+ BNX2X_ERR("Unsupported chip revision\n");
+ return -EINVAL;
+ }
+
+ BNX2X_DEV_INFO("Loading %s\n", fw_file_name);
+
+ rc = request_firmware(&bp->firmware, fw_file_name, &bp->pdev->dev);
+ if (rc) {
+ BNX2X_DEV_INFO("Trying to load older fw %s\n", fw_file_name_v15);
+
+ /* try to load prev version */
+ rc = request_firmware(&bp->firmware, fw_file_name_v15, &bp->pdev->dev);
+
+ if (rc)
+ goto request_firmware_exit;
+
+ bp->fw_rev = BCM_5710_FW_REVISION_VERSION_V15;
+ } else {
+ bp->fw_cap |= FW_CAP_INVALIDATE_VF_FP_HSI;
+ bp->fw_rev = BCM_5710_FW_REVISION_VERSION;
+ }
+
+ bp->fw_major = BCM_5710_FW_MAJOR_VERSION;
+ bp->fw_minor = BCM_5710_FW_MINOR_VERSION;
+ bp->fw_eng = BCM_5710_FW_ENGINEERING_VERSION;
+
+ rc = bnx2x_check_firmware(bp);
+ if (rc) {
+ BNX2X_ERR("Corrupt firmware file %s\n", fw_file_name);
+ goto request_firmware_exit;
+ }
+
+ fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
+
+ /* Initialize the pointers to the init arrays */
+ /* Blob */
+ rc = -ENOMEM;
+ BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
+
+ /* Opcodes */
+ BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
+
+ /* Offsets */
+ BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err,
+ be16_to_cpu_n);
+
+ /* STORMs firmware */
+ INIT_TSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
+ be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
+ INIT_TSEM_PRAM_DATA(bp) = bp->firmware->data +
+ be32_to_cpu(fw_hdr->tsem_pram_data.offset);
+ INIT_USEM_INT_TABLE_DATA(bp) = bp->firmware->data +
+ be32_to_cpu(fw_hdr->usem_int_table_data.offset);
+ INIT_USEM_PRAM_DATA(bp) = bp->firmware->data +
+ be32_to_cpu(fw_hdr->usem_pram_data.offset);
+ INIT_XSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
+ be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
+ INIT_XSEM_PRAM_DATA(bp) = bp->firmware->data +
+ be32_to_cpu(fw_hdr->xsem_pram_data.offset);
+ INIT_CSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
+ be32_to_cpu(fw_hdr->csem_int_table_data.offset);
+ INIT_CSEM_PRAM_DATA(bp) = bp->firmware->data +
+ be32_to_cpu(fw_hdr->csem_pram_data.offset);
+ /* IRO */
+ BNX2X_ALLOC_AND_SET(iro_arr, iro_alloc_err, bnx2x_prep_iro);
+
+ return 0;
+
+iro_alloc_err:
+ kfree(bp->init_ops_offsets);
+init_offsets_alloc_err:
+ kfree(bp->init_ops);
+init_ops_alloc_err:
+ kfree(bp->init_data);
+request_firmware_exit:
+ release_firmware(bp->firmware);
+ bp->firmware = NULL;
+
+ return rc;
+}
+
+static void bnx2x_release_firmware(struct bnx2x *bp)
+{
+ kfree(bp->init_ops_offsets);
+ kfree(bp->init_ops);
+ kfree(bp->init_data);
+ release_firmware(bp->firmware);
+ bp->firmware = NULL;
+}
+
+static struct bnx2x_func_sp_drv_ops bnx2x_func_sp_drv = {
+ .init_hw_cmn_chip = bnx2x_init_hw_common_chip,
+ .init_hw_cmn = bnx2x_init_hw_common,
+ .init_hw_port = bnx2x_init_hw_port,
+ .init_hw_func = bnx2x_init_hw_func,
+
+ .reset_hw_cmn = bnx2x_reset_common,
+ .reset_hw_port = bnx2x_reset_port,
+ .reset_hw_func = bnx2x_reset_func,
+
+ .gunzip_init = bnx2x_gunzip_init,
+ .gunzip_end = bnx2x_gunzip_end,
+
+ .init_fw = bnx2x_init_firmware,
+ .release_fw = bnx2x_release_firmware,
+};
+
+void bnx2x__init_func_obj(struct bnx2x *bp)
+{
+ /* Prepare DMAE related driver resources */
+ bnx2x_setup_dmae(bp);
+
+ bnx2x_init_func_obj(bp, &bp->func_obj,
+ bnx2x_sp(bp, func_rdata),
+ bnx2x_sp_mapping(bp, func_rdata),
+ bnx2x_sp(bp, func_afex_rdata),
+ bnx2x_sp_mapping(bp, func_afex_rdata),
+ &bnx2x_func_sp_drv);
+}
+
+/* must be called after sriov-enable */
+static int bnx2x_set_qm_cid_count(struct bnx2x *bp)
+{
+ int cid_count = BNX2X_L2_MAX_CID(bp);
+
+ if (IS_SRIOV(bp))
+ cid_count += BNX2X_VF_CIDS;
+
+ if (CNIC_SUPPORT(bp))
+ cid_count += CNIC_CID_MAX;
+
+ return roundup(cid_count, QM_CID_ROUND);
+}
+
+/**
+ * bnx2x_get_num_non_def_sbs - return the number of none default SBs
+ * @pdev: pci device
+ * @cnic_cnt: count
+ *
+ */
+static int bnx2x_get_num_non_def_sbs(struct pci_dev *pdev, int cnic_cnt)
+{
+ int index;
+ u16 control = 0;
+
+ /*
+ * If MSI-X is not supported - return number of SBs needed to support
+ * one fast path queue: one FP queue + SB for CNIC
+ */
+ if (!pdev->msix_cap) {
+ dev_info(&pdev->dev, "no msix capability found\n");
+ return 1 + cnic_cnt;
+ }
+ dev_info(&pdev->dev, "msix capability found\n");
+
+ /*
+ * The value in the PCI configuration space is the index of the last
+ * entry, namely one less than the actual size of the table, which is
+ * exactly what we want to return from this function: number of all SBs
+ * without the default SB.
+ * For VFs there is no default SB, then we return (index+1).
+ */
+ pci_read_config_word(pdev, pdev->msix_cap + PCI_MSIX_FLAGS, &control);
+
+ index = control & PCI_MSIX_FLAGS_QSIZE;
+
+ return index;
+}
+
+static int set_max_cos_est(int chip_id)
+{
+ switch (chip_id) {
+ case BCM57710:
+ case BCM57711:
+ case BCM57711E:
+ return BNX2X_MULTI_TX_COS_E1X;
+ case BCM57712:
+ case BCM57712_MF:
+ return BNX2X_MULTI_TX_COS_E2_E3A0;
+ case BCM57800:
+ case BCM57800_MF:
+ case BCM57810:
+ case BCM57810_MF:
+ case BCM57840_4_10:
+ case BCM57840_2_20:
+ case BCM57840_O:
+ case BCM57840_MFO:
+ case BCM57840_MF:
+ case BCM57811:
+ case BCM57811_MF:
+ return BNX2X_MULTI_TX_COS_E3B0;
+ case BCM57712_VF:
+ case BCM57800_VF:
+ case BCM57810_VF:
+ case BCM57840_VF:
+ case BCM57811_VF:
+ return 1;
+ default:
+ pr_err("Unknown board_type (%d), aborting\n", chip_id);
+ return -ENODEV;
+ }
+}
+
+static int set_is_vf(int chip_id)
+{
+ switch (chip_id) {
+ case BCM57712_VF:
+ case BCM57800_VF:
+ case BCM57810_VF:
+ case BCM57840_VF:
+ case BCM57811_VF:
+ return true;
+ default:
+ return false;
+ }
+}
+
+/* nig_tsgen registers relative address */
+#define tsgen_ctrl 0x0
+#define tsgen_freecount 0x10
+#define tsgen_synctime_t0 0x20
+#define tsgen_offset_t0 0x28
+#define tsgen_drift_t0 0x30
+#define tsgen_synctime_t1 0x58
+#define tsgen_offset_t1 0x60
+#define tsgen_drift_t1 0x68
+
+/* FW workaround for setting drift */
+static int bnx2x_send_update_drift_ramrod(struct bnx2x *bp, int drift_dir,
+ int best_val, int best_period)
+{
+ struct bnx2x_func_state_params func_params = {NULL};
+ struct bnx2x_func_set_timesync_params *set_timesync_params =
+ &func_params.params.set_timesync;
+
+ /* Prepare parameters for function state transitions */
+ __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
+ __set_bit(RAMROD_RETRY, &func_params.ramrod_flags);
+
+ func_params.f_obj = &bp->func_obj;
+ func_params.cmd = BNX2X_F_CMD_SET_TIMESYNC;
+
+ /* Function parameters */
+ set_timesync_params->drift_adjust_cmd = TS_DRIFT_ADJUST_SET;
+ set_timesync_params->offset_cmd = TS_OFFSET_KEEP;
+ set_timesync_params->add_sub_drift_adjust_value =
+ drift_dir ? TS_ADD_VALUE : TS_SUB_VALUE;
+ set_timesync_params->drift_adjust_value = best_val;
+ set_timesync_params->drift_adjust_period = best_period;
+
+ return bnx2x_func_state_change(bp, &func_params);
+}
+
+static int bnx2x_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
+{
+ struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info);
+ int rc;
+ int drift_dir = 1;
+ int val, period, period1, period2, dif, dif1, dif2;
+ int best_dif = BNX2X_MAX_PHC_DRIFT, best_period = 0, best_val = 0;
+ s32 ppb = scaled_ppm_to_ppb(scaled_ppm);
+
+ DP(BNX2X_MSG_PTP, "PTP adjfine called, ppb = %d\n", ppb);
+
+ if (!netif_running(bp->dev)) {
+ DP(BNX2X_MSG_PTP,
+ "PTP adjfine called while the interface is down\n");
+ return -ENETDOWN;
+ }
+
+ if (ppb < 0) {
+ ppb = -ppb;
+ drift_dir = 0;
+ }
+
+ if (ppb == 0) {
+ best_val = 1;
+ best_period = 0x1FFFFFF;
+ } else if (ppb >= BNX2X_MAX_PHC_DRIFT) {
+ best_val = 31;
+ best_period = 1;
+ } else {
+ /* Changed not to allow val = 8, 16, 24 as these values
+ * are not supported in workaround.
+ */
+ for (val = 0; val <= 31; val++) {
+ if ((val & 0x7) == 0)
+ continue;
+ period1 = val * 1000000 / ppb;
+ period2 = period1 + 1;
+ if (period1 != 0)
+ dif1 = ppb - (val * 1000000 / period1);
+ else
+ dif1 = BNX2X_MAX_PHC_DRIFT;
+ if (dif1 < 0)
+ dif1 = -dif1;
+ dif2 = ppb - (val * 1000000 / period2);
+ if (dif2 < 0)
+ dif2 = -dif2;
+ dif = (dif1 < dif2) ? dif1 : dif2;
+ period = (dif1 < dif2) ? period1 : period2;
+ if (dif < best_dif) {
+ best_dif = dif;
+ best_val = val;
+ best_period = period;
+ }
+ }
+ }
+
+ rc = bnx2x_send_update_drift_ramrod(bp, drift_dir, best_val,
+ best_period);
+ if (rc) {
+ BNX2X_ERR("Failed to set drift\n");
+ return -EFAULT;
+ }
+
+ DP(BNX2X_MSG_PTP, "Configured val = %d, period = %d\n", best_val,
+ best_period);
+
+ return 0;
+}
+
+static int bnx2x_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
+{
+ struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info);
+
+ if (!netif_running(bp->dev)) {
+ DP(BNX2X_MSG_PTP,
+ "PTP adjtime called while the interface is down\n");
+ return -ENETDOWN;
+ }
+
+ DP(BNX2X_MSG_PTP, "PTP adjtime called, delta = %llx\n", delta);
+
+ timecounter_adjtime(&bp->timecounter, delta);
+
+ return 0;
+}
+
+static int bnx2x_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
+{
+ struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info);
+ u64 ns;
+
+ if (!netif_running(bp->dev)) {
+ DP(BNX2X_MSG_PTP,
+ "PTP gettime called while the interface is down\n");
+ return -ENETDOWN;
+ }
+
+ ns = timecounter_read(&bp->timecounter);
+
+ DP(BNX2X_MSG_PTP, "PTP gettime called, ns = %llu\n", ns);
+
+ *ts = ns_to_timespec64(ns);
+
+ return 0;
+}
+
+static int bnx2x_ptp_settime(struct ptp_clock_info *ptp,
+ const struct timespec64 *ts)
+{
+ struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info);
+ u64 ns;
+
+ if (!netif_running(bp->dev)) {
+ DP(BNX2X_MSG_PTP,
+ "PTP settime called while the interface is down\n");
+ return -ENETDOWN;
+ }
+
+ ns = timespec64_to_ns(ts);
+
+ DP(BNX2X_MSG_PTP, "PTP settime called, ns = %llu\n", ns);
+
+ /* Re-init the timecounter */
+ timecounter_init(&bp->timecounter, &bp->cyclecounter, ns);
+
+ return 0;
+}
+
+/* Enable (or disable) ancillary features of the phc subsystem */
+static int bnx2x_ptp_enable(struct ptp_clock_info *ptp,
+ struct ptp_clock_request *rq, int on)
+{
+ struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info);
+
+ BNX2X_ERR("PHC ancillary features are not supported\n");
+ return -ENOTSUPP;
+}
+
+void bnx2x_register_phc(struct bnx2x *bp)
+{
+ /* Fill the ptp_clock_info struct and register PTP clock*/
+ bp->ptp_clock_info.owner = THIS_MODULE;
+ snprintf(bp->ptp_clock_info.name, 16, "%s", bp->dev->name);
+ bp->ptp_clock_info.max_adj = BNX2X_MAX_PHC_DRIFT; /* In PPB */
+ bp->ptp_clock_info.n_alarm = 0;
+ bp->ptp_clock_info.n_ext_ts = 0;
+ bp->ptp_clock_info.n_per_out = 0;
+ bp->ptp_clock_info.pps = 0;
+ bp->ptp_clock_info.adjfine = bnx2x_ptp_adjfine;
+ bp->ptp_clock_info.adjtime = bnx2x_ptp_adjtime;
+ bp->ptp_clock_info.gettime64 = bnx2x_ptp_gettime;
+ bp->ptp_clock_info.settime64 = bnx2x_ptp_settime;
+ bp->ptp_clock_info.enable = bnx2x_ptp_enable;
+
+ bp->ptp_clock = ptp_clock_register(&bp->ptp_clock_info, &bp->pdev->dev);
+ if (IS_ERR(bp->ptp_clock)) {
+ bp->ptp_clock = NULL;
+ BNX2X_ERR("PTP clock registration failed\n");
+ }
+}
+
+static int bnx2x_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct net_device *dev = NULL;
+ struct bnx2x *bp;
+ int rc, max_non_def_sbs;
+ int rx_count, tx_count, rss_count, doorbell_size;
+ int max_cos_est;
+ bool is_vf;
+ int cnic_cnt;
+
+ /* Management FW 'remembers' living interfaces. Allow it some time
+ * to forget previously living interfaces, allowing a proper re-load.
+ */
+ if (is_kdump_kernel()) {
+ ktime_t now = ktime_get_boottime();
+ ktime_t fw_ready_time = ktime_set(5, 0);
+
+ if (ktime_before(now, fw_ready_time))
+ msleep(ktime_ms_delta(fw_ready_time, now));
+ }
+
+ /* An estimated maximum supported CoS number according to the chip
+ * version.
+ * We will try to roughly estimate the maximum number of CoSes this chip
+ * may support in order to minimize the memory allocated for Tx
+ * netdev_queue's. This number will be accurately calculated during the
+ * initialization of bp->max_cos based on the chip versions AND chip
+ * revision in the bnx2x_init_bp().
+ */
+ max_cos_est = set_max_cos_est(ent->driver_data);
+ if (max_cos_est < 0)
+ return max_cos_est;
+ is_vf = set_is_vf(ent->driver_data);
+ cnic_cnt = is_vf ? 0 : 1;
+
+ max_non_def_sbs = bnx2x_get_num_non_def_sbs(pdev, cnic_cnt);
+
+ /* add another SB for VF as it has no default SB */
+ max_non_def_sbs += is_vf ? 1 : 0;
+
+ /* Maximum number of RSS queues: one IGU SB goes to CNIC */
+ rss_count = max_non_def_sbs - cnic_cnt;
+
+ if (rss_count < 1)
+ return -EINVAL;
+
+ /* Maximum number of netdev Rx queues: RSS + FCoE L2 */
+ rx_count = rss_count + cnic_cnt;
+
+ /* Maximum number of netdev Tx queues:
+ * Maximum TSS queues * Maximum supported number of CoS + FCoE L2
+ */
+ tx_count = rss_count * max_cos_est + cnic_cnt;
+
+ /* dev zeroed in init_etherdev */
+ dev = alloc_etherdev_mqs(sizeof(*bp), tx_count, rx_count);
+ if (!dev)
+ return -ENOMEM;
+
+ bp = netdev_priv(dev);
+
+ bp->flags = 0;
+ if (is_vf)
+ bp->flags |= IS_VF_FLAG;
+
+ bp->igu_sb_cnt = max_non_def_sbs;
+ bp->igu_base_addr = IS_VF(bp) ? PXP_VF_ADDR_IGU_START : BAR_IGU_INTMEM;
+ bp->msg_enable = debug;
+ bp->cnic_support = cnic_cnt;
+ bp->cnic_probe = bnx2x_cnic_probe;
+
+ pci_set_drvdata(pdev, dev);
+
+ rc = bnx2x_init_dev(bp, pdev, dev, ent->driver_data);
+ if (rc < 0) {
+ free_netdev(dev);
+ return rc;
+ }
+
+ BNX2X_DEV_INFO("This is a %s function\n",
+ IS_PF(bp) ? "physical" : "virtual");
+ BNX2X_DEV_INFO("Cnic support is %s\n", CNIC_SUPPORT(bp) ? "on" : "off");
+ BNX2X_DEV_INFO("Max num of status blocks %d\n", max_non_def_sbs);
+ BNX2X_DEV_INFO("Allocated netdev with %d tx and %d rx queues\n",
+ tx_count, rx_count);
+
+ rc = bnx2x_init_bp(bp);
+ if (rc)
+ goto init_one_exit;
+
+ /* Map doorbells here as we need the real value of bp->max_cos which
+ * is initialized in bnx2x_init_bp() to determine the number of
+ * l2 connections.
+ */
+ if (IS_VF(bp)) {
+ bp->doorbells = bnx2x_vf_doorbells(bp);
+ rc = bnx2x_vf_pci_alloc(bp);
+ if (rc)
+ goto init_one_freemem;
+ } else {
+ doorbell_size = BNX2X_L2_MAX_CID(bp) * (1 << BNX2X_DB_SHIFT);
+ if (doorbell_size > pci_resource_len(pdev, 2)) {
+ dev_err(&bp->pdev->dev,
+ "Cannot map doorbells, bar size too small, aborting\n");
+ rc = -ENOMEM;
+ goto init_one_freemem;
+ }
+ bp->doorbells = ioremap(pci_resource_start(pdev, 2),
+ doorbell_size);
+ }
+ if (!bp->doorbells) {
+ dev_err(&bp->pdev->dev,
+ "Cannot map doorbell space, aborting\n");
+ rc = -ENOMEM;
+ goto init_one_freemem;
+ }
+
+ if (IS_VF(bp)) {
+ rc = bnx2x_vfpf_acquire(bp, tx_count, rx_count);
+ if (rc)
+ goto init_one_freemem;
+
+#ifdef CONFIG_BNX2X_SRIOV
+ /* VF with OLD Hypervisor or old PF do not support filtering */
+ if (bp->acquire_resp.pfdev_info.pf_cap & PFVF_CAP_VLAN_FILTER) {
+ dev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+ dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+ }
+#endif
+ }
+
+ /* Enable SRIOV if capability found in configuration space */
+ rc = bnx2x_iov_init_one(bp, int_mode, BNX2X_MAX_NUM_OF_VFS);
+ if (rc)
+ goto init_one_freemem;
+
+ /* calc qm_cid_count */
+ bp->qm_cid_count = bnx2x_set_qm_cid_count(bp);
+ BNX2X_DEV_INFO("qm_cid_count %d\n", bp->qm_cid_count);
+
+ /* disable FCOE L2 queue for E1x*/
+ if (CHIP_IS_E1x(bp))
+ bp->flags |= NO_FCOE_FLAG;
+
+ /* Set bp->num_queues for MSI-X mode*/
+ bnx2x_set_num_queues(bp);
+
+ /* Configure interrupt mode: try to enable MSI-X/MSI if
+ * needed.
+ */
+ rc = bnx2x_set_int_mode(bp);
+ if (rc) {
+ dev_err(&pdev->dev, "Cannot set interrupts\n");
+ goto init_one_freemem;
+ }
+ BNX2X_DEV_INFO("set interrupts successfully\n");
+
+ /* register the net device */
+ rc = register_netdev(dev);
+ if (rc) {
+ dev_err(&pdev->dev, "Cannot register net device\n");
+ goto init_one_freemem;
+ }
+ BNX2X_DEV_INFO("device name after netdev register %s\n", dev->name);
+
+ if (!NO_FCOE(bp)) {
+ /* Add storage MAC address */
+ rtnl_lock();
+ dev_addr_add(bp->dev, bp->fip_mac, NETDEV_HW_ADDR_T_SAN);
+ rtnl_unlock();
+ }
+ BNX2X_DEV_INFO(
+ "%s (%c%d) PCI-E found at mem %lx, IRQ %d, node addr %pM\n",
+ board_info[ent->driver_data].name,
+ (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
+ dev->base_addr, bp->pdev->irq, dev->dev_addr);
+ pcie_print_link_status(bp->pdev);
+
+ if (!IS_MF_SD_STORAGE_PERSONALITY_ONLY(bp))
+ bnx2x_set_os_driver_state(bp, OS_DRIVER_STATE_DISABLED);
+
+ return 0;
+
+init_one_freemem:
+ bnx2x_free_mem_bp(bp);
+
+init_one_exit:
+ if (bp->regview)
+ iounmap(bp->regview);
+
+ if (IS_PF(bp) && bp->doorbells)
+ iounmap(bp->doorbells);
+
+ free_netdev(dev);
+
+ if (atomic_read(&pdev->enable_cnt) == 1)
+ pci_release_regions(pdev);
+
+ pci_disable_device(pdev);
+
+ return rc;
+}
+
+static void __bnx2x_remove(struct pci_dev *pdev,
+ struct net_device *dev,
+ struct bnx2x *bp,
+ bool remove_netdev)
+{
+ /* Delete storage MAC address */
+ if (!NO_FCOE(bp)) {
+ rtnl_lock();
+ dev_addr_del(bp->dev, bp->fip_mac, NETDEV_HW_ADDR_T_SAN);
+ rtnl_unlock();
+ }
+
+#ifdef BCM_DCBNL
+ /* Delete app tlvs from dcbnl */
+ bnx2x_dcbnl_update_applist(bp, true);
+#endif
+
+ if (IS_PF(bp) &&
+ !BP_NOMCP(bp) &&
+ (bp->flags & BC_SUPPORTS_RMMOD_CMD))
+ bnx2x_fw_command(bp, DRV_MSG_CODE_RMMOD, 0);
+
+ /* Close the interface - either directly or implicitly */
+ if (remove_netdev) {
+ unregister_netdev(dev);
+ } else {
+ rtnl_lock();
+ dev_close(dev);
+ rtnl_unlock();
+ }
+
+ bnx2x_iov_remove_one(bp);
+
+ /* Power on: we can't let PCI layer write to us while we are in D3 */
+ if (IS_PF(bp)) {
+ bnx2x_set_power_state(bp, PCI_D0);
+ bnx2x_set_os_driver_state(bp, OS_DRIVER_STATE_NOT_LOADED);
+
+ /* Set endianity registers to reset values in case next driver
+ * boots in different endianty environment.
+ */
+ bnx2x_reset_endianity(bp);
+ }
+
+ /* Disable MSI/MSI-X */
+ bnx2x_disable_msi(bp);
+
+ /* Power off */
+ if (IS_PF(bp))
+ bnx2x_set_power_state(bp, PCI_D3hot);
+
+ /* Make sure RESET task is not scheduled before continuing */
+ cancel_delayed_work_sync(&bp->sp_rtnl_task);
+
+ /* send message via vfpf channel to release the resources of this vf */
+ if (IS_VF(bp))
+ bnx2x_vfpf_release(bp);
+
+ /* Assumes no further PCIe PM changes will occur */
+ if (system_state == SYSTEM_POWER_OFF) {
+ pci_wake_from_d3(pdev, bp->wol);
+ pci_set_power_state(pdev, PCI_D3hot);
+ }
+
+ if (remove_netdev) {
+ if (bp->regview)
+ iounmap(bp->regview);
+
+ /* For vfs, doorbells are part of the regview and were unmapped
+ * along with it. FW is only loaded by PF.
+ */
+ if (IS_PF(bp)) {
+ if (bp->doorbells)
+ iounmap(bp->doorbells);
+
+ bnx2x_release_firmware(bp);
+ } else {
+ bnx2x_vf_pci_dealloc(bp);
+ }
+ bnx2x_free_mem_bp(bp);
+
+ free_netdev(dev);
+
+ if (atomic_read(&pdev->enable_cnt) == 1)
+ pci_release_regions(pdev);
+
+ pci_disable_device(pdev);
+ }
+}
+
+static void bnx2x_remove_one(struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct bnx2x *bp;
+
+ if (!dev) {
+ dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
+ return;
+ }
+ bp = netdev_priv(dev);
+
+ __bnx2x_remove(pdev, dev, bp, true);
+}
+
+static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
+{
+ bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
+
+ bp->rx_mode = BNX2X_RX_MODE_NONE;
+
+ if (CNIC_LOADED(bp))
+ bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
+
+ /* Stop Tx */
+ bnx2x_tx_disable(bp);
+ netdev_reset_tc(bp->dev);
+
+ del_timer_sync(&bp->timer);
+ cancel_delayed_work_sync(&bp->sp_task);
+ cancel_delayed_work_sync(&bp->period_task);
+
+ if (!down_timeout(&bp->stats_lock, HZ / 10)) {
+ bp->stats_state = STATS_STATE_DISABLED;
+ up(&bp->stats_lock);
+ }
+
+ bnx2x_save_statistics(bp);
+
+ netif_carrier_off(bp->dev);
+
+ return 0;
+}
+
+/**
+ * bnx2x_io_error_detected - called when PCI error is detected
+ * @pdev: Pointer to PCI device
+ * @state: The current pci connection state
+ *
+ * This function is called after a PCI bus error affecting
+ * this device has been detected.
+ */
+static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
+ pci_channel_state_t state)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct bnx2x *bp = netdev_priv(dev);
+
+ rtnl_lock();
+
+ BNX2X_ERR("IO error detected\n");
+
+ netif_device_detach(dev);
+
+ if (state == pci_channel_io_perm_failure) {
+ rtnl_unlock();
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+
+ if (netif_running(dev))
+ bnx2x_eeh_nic_unload(bp);
+
+ bnx2x_prev_path_mark_eeh(bp);
+
+ pci_disable_device(pdev);
+
+ rtnl_unlock();
+
+ /* Request a slot reset */
+ return PCI_ERS_RESULT_NEED_RESET;
+}
+
+/**
+ * bnx2x_io_slot_reset - called after the PCI bus has been reset
+ * @pdev: Pointer to PCI device
+ *
+ * Restart the card from scratch, as if from a cold-boot.
+ */
+static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct bnx2x *bp = netdev_priv(dev);
+ int i;
+
+ rtnl_lock();
+ BNX2X_ERR("IO slot reset initializing...\n");
+ if (pci_enable_device(pdev)) {
+ dev_err(&pdev->dev,
+ "Cannot re-enable PCI device after reset\n");
+ rtnl_unlock();
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+
+ pci_set_master(pdev);
+ pci_restore_state(pdev);
+ pci_save_state(pdev);
+
+ if (netif_running(dev))
+ bnx2x_set_power_state(bp, PCI_D0);
+
+ if (netif_running(dev)) {
+ BNX2X_ERR("IO slot reset --> driver unload\n");
+
+ /* MCP should have been reset; Need to wait for validity */
+ if (bnx2x_init_shmem(bp)) {
+ rtnl_unlock();
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+
+ if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
+ u32 v;
+
+ v = SHMEM2_RD(bp,
+ drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
+ SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
+ v & ~DRV_FLAGS_CAPABILITIES_LOADED_L2);
+ }
+ bnx2x_drain_tx_queues(bp);
+ bnx2x_send_unload_req(bp, UNLOAD_RECOVERY);
+ if (!bp->nic_stopped) {
+ bnx2x_netif_stop(bp, 1);
+ bnx2x_del_all_napi(bp);
+
+ if (CNIC_LOADED(bp))
+ bnx2x_del_all_napi_cnic(bp);
+
+ bnx2x_free_irq(bp);
+ bp->nic_stopped = true;
+ }
+
+ /* Report UNLOAD_DONE to MCP */
+ bnx2x_send_unload_done(bp, true);
+
+ bp->sp_state = 0;
+ bp->port.pmf = 0;
+
+ bnx2x_prev_unload(bp);
+
+ /* We should have reseted the engine, so It's fair to
+ * assume the FW will no longer write to the bnx2x driver.
+ */
+ bnx2x_squeeze_objects(bp);
+ bnx2x_free_skbs(bp);
+ for_each_rx_queue(bp, i)
+ bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
+ bnx2x_free_fp_mem(bp);
+ bnx2x_free_mem(bp);
+
+ bp->state = BNX2X_STATE_CLOSED;
+ }
+
+ rtnl_unlock();
+
+ return PCI_ERS_RESULT_RECOVERED;
+}
+
+/**
+ * bnx2x_io_resume - called when traffic can start flowing again
+ * @pdev: Pointer to PCI device
+ *
+ * This callback is called when the error recovery driver tells us that
+ * its OK to resume normal operation.
+ */
+static void bnx2x_io_resume(struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct bnx2x *bp = netdev_priv(dev);
+
+ if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
+ netdev_err(bp->dev, "Handling parity error recovery. Try again later\n");
+ return;
+ }
+
+ rtnl_lock();
+
+ bp->fw_seq = SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
+ DRV_MSG_SEQ_NUMBER_MASK;
+
+ if (netif_running(dev)) {
+ if (bnx2x_nic_load(bp, LOAD_NORMAL)) {
+ netdev_err(bp->dev, "Error during driver initialization, try unloading/reloading the driver\n");
+ goto done;
+ }
+ }
+
+ netif_device_attach(dev);
+
+done:
+ rtnl_unlock();
+}
+
+static const struct pci_error_handlers bnx2x_err_handler = {
+ .error_detected = bnx2x_io_error_detected,
+ .slot_reset = bnx2x_io_slot_reset,
+ .resume = bnx2x_io_resume,
+};
+
+static void bnx2x_shutdown(struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct bnx2x *bp;
+
+ if (!dev)
+ return;
+
+ bp = netdev_priv(dev);
+ if (!bp)
+ return;
+
+ rtnl_lock();
+ netif_device_detach(dev);
+ rtnl_unlock();
+
+ /* Don't remove the netdevice, as there are scenarios which will cause
+ * the kernel to hang, e.g., when trying to remove bnx2i while the
+ * rootfs is mounted from SAN.
+ */
+ __bnx2x_remove(pdev, dev, bp, false);
+}
+
+static struct pci_driver bnx2x_pci_driver = {
+ .name = DRV_MODULE_NAME,
+ .id_table = bnx2x_pci_tbl,
+ .probe = bnx2x_init_one,
+ .remove = bnx2x_remove_one,
+ .driver.pm = &bnx2x_pm_ops,
+ .err_handler = &bnx2x_err_handler,
+#ifdef CONFIG_BNX2X_SRIOV
+ .sriov_configure = bnx2x_sriov_configure,
+#endif
+ .shutdown = bnx2x_shutdown,
+};
+
+static int __init bnx2x_init(void)
+{
+ int ret;
+
+ bnx2x_wq = create_singlethread_workqueue("bnx2x");
+ if (bnx2x_wq == NULL) {
+ pr_err("Cannot create workqueue\n");
+ return -ENOMEM;
+ }
+ bnx2x_iov_wq = create_singlethread_workqueue("bnx2x_iov");
+ if (!bnx2x_iov_wq) {
+ pr_err("Cannot create iov workqueue\n");
+ destroy_workqueue(bnx2x_wq);
+ return -ENOMEM;
+ }
+
+ ret = pci_register_driver(&bnx2x_pci_driver);
+ if (ret) {
+ pr_err("Cannot register driver\n");
+ destroy_workqueue(bnx2x_wq);
+ destroy_workqueue(bnx2x_iov_wq);
+ }
+ return ret;
+}
+
+static void __exit bnx2x_cleanup(void)
+{
+ struct list_head *pos, *q;
+
+ pci_unregister_driver(&bnx2x_pci_driver);
+
+ destroy_workqueue(bnx2x_wq);
+ destroy_workqueue(bnx2x_iov_wq);
+
+ /* Free globally allocated resources */
+ list_for_each_safe(pos, q, &bnx2x_prev_list) {
+ struct bnx2x_prev_path_list *tmp =
+ list_entry(pos, struct bnx2x_prev_path_list, list);
+ list_del(pos);
+ kfree(tmp);
+ }
+}
+
+void bnx2x_notify_link_changed(struct bnx2x *bp)
+{
+ REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + BP_FUNC(bp)*sizeof(u32), 1);
+}
+
+module_init(bnx2x_init);
+module_exit(bnx2x_cleanup);
+
+/**
+ * bnx2x_set_iscsi_eth_mac_addr - set iSCSI MAC(s).
+ * @bp: driver handle
+ *
+ * This function will wait until the ramrod completion returns.
+ * Return 0 if success, -ENODEV if ramrod doesn't return.
+ */
+static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp)
+{
+ unsigned long ramrod_flags = 0;
+
+ __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
+ return bnx2x_set_mac_one(bp, bp->cnic_eth_dev.iscsi_mac,
+ &bp->iscsi_l2_mac_obj, true,
+ BNX2X_ISCSI_ETH_MAC, &ramrod_flags);
+}
+
+/* count denotes the number of new completions we have seen */
+static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
+{
+ struct eth_spe *spe;
+ int cxt_index, cxt_offset;
+
+#ifdef BNX2X_STOP_ON_ERROR
+ if (unlikely(bp->panic))
+ return;
+#endif
+
+ spin_lock_bh(&bp->spq_lock);
+ BUG_ON(bp->cnic_spq_pending < count);
+ bp->cnic_spq_pending -= count;
+
+ for (; bp->cnic_kwq_pending; bp->cnic_kwq_pending--) {
+ u16 type = (le16_to_cpu(bp->cnic_kwq_cons->hdr.type)
+ & SPE_HDR_CONN_TYPE) >>
+ SPE_HDR_CONN_TYPE_SHIFT;
+ u8 cmd = (le32_to_cpu(bp->cnic_kwq_cons->hdr.conn_and_cmd_data)
+ >> SPE_HDR_CMD_ID_SHIFT) & 0xff;
+
+ /* Set validation for iSCSI L2 client before sending SETUP
+ * ramrod
+ */
+ if (type == ETH_CONNECTION_TYPE) {
+ if (cmd == RAMROD_CMD_ID_ETH_CLIENT_SETUP) {
+ cxt_index = BNX2X_ISCSI_ETH_CID(bp) /
+ ILT_PAGE_CIDS;
+ cxt_offset = BNX2X_ISCSI_ETH_CID(bp) -
+ (cxt_index * ILT_PAGE_CIDS);
+ bnx2x_set_ctx_validation(bp,
+ &bp->context[cxt_index].
+ vcxt[cxt_offset].eth,
+ BNX2X_ISCSI_ETH_CID(bp));
+ }
+ }
+
+ /*
+ * There may be not more than 8 L2, not more than 8 L5 SPEs
+ * and in the air. We also check that number of outstanding
+ * COMMON ramrods is not more than the EQ and SPQ can
+ * accommodate.
+ */
+ if (type == ETH_CONNECTION_TYPE) {
+ if (!atomic_read(&bp->cq_spq_left))
+ break;
+ else
+ atomic_dec(&bp->cq_spq_left);
+ } else if (type == NONE_CONNECTION_TYPE) {
+ if (!atomic_read(&bp->eq_spq_left))
+ break;
+ else
+ atomic_dec(&bp->eq_spq_left);
+ } else if ((type == ISCSI_CONNECTION_TYPE) ||
+ (type == FCOE_CONNECTION_TYPE)) {
+ if (bp->cnic_spq_pending >=
+ bp->cnic_eth_dev.max_kwqe_pending)
+ break;
+ else
+ bp->cnic_spq_pending++;
+ } else {
+ BNX2X_ERR("Unknown SPE type: %d\n", type);
+ bnx2x_panic();
+ break;
+ }
+
+ spe = bnx2x_sp_get_next(bp);
+ *spe = *bp->cnic_kwq_cons;
+
+ DP(BNX2X_MSG_SP, "pending on SPQ %d, on KWQ %d count %d\n",
+ bp->cnic_spq_pending, bp->cnic_kwq_pending, count);
+
+ if (bp->cnic_kwq_cons == bp->cnic_kwq_last)
+ bp->cnic_kwq_cons = bp->cnic_kwq;
+ else
+ bp->cnic_kwq_cons++;
+ }
+ bnx2x_sp_prod_update(bp);
+ spin_unlock_bh(&bp->spq_lock);
+}
+
+static int bnx2x_cnic_sp_queue(struct net_device *dev,
+ struct kwqe_16 *kwqes[], u32 count)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+ int i;
+
+#ifdef BNX2X_STOP_ON_ERROR
+ if (unlikely(bp->panic)) {
+ BNX2X_ERR("Can't post to SP queue while panic\n");
+ return -EIO;
+ }
+#endif
+
+ if ((bp->recovery_state != BNX2X_RECOVERY_DONE) &&
+ (bp->recovery_state != BNX2X_RECOVERY_NIC_LOADING)) {
+ BNX2X_ERR("Handling parity error recovery. Try again later\n");
+ return -EAGAIN;
+ }
+
+ spin_lock_bh(&bp->spq_lock);
+
+ for (i = 0; i < count; i++) {
+ struct eth_spe *spe = (struct eth_spe *)kwqes[i];
+
+ if (bp->cnic_kwq_pending == MAX_SP_DESC_CNT)
+ break;
+
+ *bp->cnic_kwq_prod = *spe;
+
+ bp->cnic_kwq_pending++;
+
+ DP(BNX2X_MSG_SP, "L5 SPQE %x %x %x:%x pos %d\n",
+ spe->hdr.conn_and_cmd_data, spe->hdr.type,
+ spe->data.update_data_addr.hi,
+ spe->data.update_data_addr.lo,
+ bp->cnic_kwq_pending);
+
+ if (bp->cnic_kwq_prod == bp->cnic_kwq_last)
+ bp->cnic_kwq_prod = bp->cnic_kwq;
+ else
+ bp->cnic_kwq_prod++;
+ }
+
+ spin_unlock_bh(&bp->spq_lock);
+
+ if (bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending)
+ bnx2x_cnic_sp_post(bp, 0);
+
+ return i;
+}
+
+static int bnx2x_cnic_ctl_send(struct bnx2x *bp, struct cnic_ctl_info *ctl)
+{
+ struct cnic_ops *c_ops;
+ int rc = 0;
+
+ mutex_lock(&bp->cnic_mutex);
+ c_ops = rcu_dereference_protected(bp->cnic_ops,
+ lockdep_is_held(&bp->cnic_mutex));
+ if (c_ops)
+ rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
+ mutex_unlock(&bp->cnic_mutex);
+
+ return rc;
+}
+
+static int bnx2x_cnic_ctl_send_bh(struct bnx2x *bp, struct cnic_ctl_info *ctl)
+{
+ struct cnic_ops *c_ops;
+ int rc = 0;
+
+ rcu_read_lock();
+ c_ops = rcu_dereference(bp->cnic_ops);
+ if (c_ops)
+ rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
+ rcu_read_unlock();
+
+ return rc;
+}
+
+/*
+ * for commands that have no data
+ */
+int bnx2x_cnic_notify(struct bnx2x *bp, int cmd)
+{
+ struct cnic_ctl_info ctl = {0};
+
+ ctl.cmd = cmd;
+
+ return bnx2x_cnic_ctl_send(bp, &ctl);
+}
+
+static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid, u8 err)
+{
+ struct cnic_ctl_info ctl = {0};
+
+ /* first we tell CNIC and only then we count this as a completion */
+ ctl.cmd = CNIC_CTL_COMPLETION_CMD;
+ ctl.data.comp.cid = cid;
+ ctl.data.comp.error = err;
+
+ bnx2x_cnic_ctl_send_bh(bp, &ctl);
+ bnx2x_cnic_sp_post(bp, 0);
+}
+
+/* Called with netif_addr_lock_bh() taken.
+ * Sets an rx_mode config for an iSCSI ETH client.
+ * Doesn't block.
+ * Completion should be checked outside.
+ */
+static void bnx2x_set_iscsi_eth_rx_mode(struct bnx2x *bp, bool start)
+{
+ unsigned long accept_flags = 0, ramrod_flags = 0;
+ u8 cl_id = bnx2x_cnic_eth_cl_id(bp, BNX2X_ISCSI_ETH_CL_ID_IDX);
+ int sched_state = BNX2X_FILTER_ISCSI_ETH_STOP_SCHED;
+
+ if (start) {
+ /* Start accepting on iSCSI L2 ring. Accept all multicasts
+ * because it's the only way for UIO Queue to accept
+ * multicasts (in non-promiscuous mode only one Queue per
+ * function will receive multicast packets (leading in our
+ * case).
+ */
+ __set_bit(BNX2X_ACCEPT_UNICAST, &accept_flags);
+ __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, &accept_flags);
+ __set_bit(BNX2X_ACCEPT_BROADCAST, &accept_flags);
+ __set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags);
+
+ /* Clear STOP_PENDING bit if START is requested */
+ clear_bit(BNX2X_FILTER_ISCSI_ETH_STOP_SCHED, &bp->sp_state);
+
+ sched_state = BNX2X_FILTER_ISCSI_ETH_START_SCHED;
+ } else
+ /* Clear START_PENDING bit if STOP is requested */
+ clear_bit(BNX2X_FILTER_ISCSI_ETH_START_SCHED, &bp->sp_state);
+
+ if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state))
+ set_bit(sched_state, &bp->sp_state);
+ else {
+ __set_bit(RAMROD_RX, &ramrod_flags);
+ bnx2x_set_q_rx_mode(bp, cl_id, 0, accept_flags, 0,
+ ramrod_flags);
+ }
+}
+
+static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+ int rc = 0;
+
+ switch (ctl->cmd) {
+ case DRV_CTL_CTXTBL_WR_CMD: {
+ u32 index = ctl->data.io.offset;
+ dma_addr_t addr = ctl->data.io.dma_addr;
+
+ bnx2x_ilt_wr(bp, index, addr);
+ break;
+ }
+
+ case DRV_CTL_RET_L5_SPQ_CREDIT_CMD: {
+ int count = ctl->data.credit.credit_count;
+
+ bnx2x_cnic_sp_post(bp, count);
+ break;
+ }
+
+ /* rtnl_lock is held. */
+ case DRV_CTL_START_L2_CMD: {
+ struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
+ unsigned long sp_bits = 0;
+
+ /* Configure the iSCSI classification object */
+ bnx2x_init_mac_obj(bp, &bp->iscsi_l2_mac_obj,
+ cp->iscsi_l2_client_id,
+ cp->iscsi_l2_cid, BP_FUNC(bp),
+ bnx2x_sp(bp, mac_rdata),
+ bnx2x_sp_mapping(bp, mac_rdata),
+ BNX2X_FILTER_MAC_PENDING,
+ &bp->sp_state, BNX2X_OBJ_TYPE_RX,
+ &bp->macs_pool);
+
+ /* Set iSCSI MAC address */
+ rc = bnx2x_set_iscsi_eth_mac_addr(bp);
+ if (rc)
+ break;
+
+ barrier();
+
+ /* Start accepting on iSCSI L2 ring */
+
+ netif_addr_lock_bh(dev);
+ bnx2x_set_iscsi_eth_rx_mode(bp, true);
+ netif_addr_unlock_bh(dev);
+
+ /* bits to wait on */
+ __set_bit(BNX2X_FILTER_RX_MODE_PENDING, &sp_bits);
+ __set_bit(BNX2X_FILTER_ISCSI_ETH_START_SCHED, &sp_bits);
+
+ if (!bnx2x_wait_sp_comp(bp, sp_bits))
+ BNX2X_ERR("rx_mode completion timed out!\n");
+
+ break;
+ }
+
+ /* rtnl_lock is held. */
+ case DRV_CTL_STOP_L2_CMD: {
+ unsigned long sp_bits = 0;
+
+ /* Stop accepting on iSCSI L2 ring */
+ netif_addr_lock_bh(dev);
+ bnx2x_set_iscsi_eth_rx_mode(bp, false);
+ netif_addr_unlock_bh(dev);
+
+ /* bits to wait on */
+ __set_bit(BNX2X_FILTER_RX_MODE_PENDING, &sp_bits);
+ __set_bit(BNX2X_FILTER_ISCSI_ETH_STOP_SCHED, &sp_bits);
+
+ if (!bnx2x_wait_sp_comp(bp, sp_bits))
+ BNX2X_ERR("rx_mode completion timed out!\n");
+
+ barrier();
+
+ /* Unset iSCSI L2 MAC */
+ rc = bnx2x_del_all_macs(bp, &bp->iscsi_l2_mac_obj,
+ BNX2X_ISCSI_ETH_MAC, true);
+ break;
+ }
+ case DRV_CTL_RET_L2_SPQ_CREDIT_CMD: {
+ int count = ctl->data.credit.credit_count;
+
+ smp_mb__before_atomic();
+ atomic_add(count, &bp->cq_spq_left);
+ smp_mb__after_atomic();
+ break;
+ }
+ case DRV_CTL_ULP_REGISTER_CMD: {
+ int ulp_type = ctl->data.register_data.ulp_type;
+
+ if (CHIP_IS_E3(bp)) {
+ int idx = BP_FW_MB_IDX(bp);
+ u32 cap = SHMEM2_RD(bp, drv_capabilities_flag[idx]);
+ int path = BP_PATH(bp);
+ int port = BP_PORT(bp);
+ int i;
+ u32 scratch_offset;
+ u32 *host_addr;
+
+ /* first write capability to shmem2 */
+ if (ulp_type == CNIC_ULP_ISCSI)
+ cap |= DRV_FLAGS_CAPABILITIES_LOADED_ISCSI;
+ else if (ulp_type == CNIC_ULP_FCOE)
+ cap |= DRV_FLAGS_CAPABILITIES_LOADED_FCOE;
+ SHMEM2_WR(bp, drv_capabilities_flag[idx], cap);
+
+ if ((ulp_type != CNIC_ULP_FCOE) ||
+ (!SHMEM2_HAS(bp, ncsi_oem_data_addr)) ||
+ (!(bp->flags & BC_SUPPORTS_FCOE_FEATURES)))
+ break;
+
+ /* if reached here - should write fcoe capabilities */
+ scratch_offset = SHMEM2_RD(bp, ncsi_oem_data_addr);
+ if (!scratch_offset)
+ break;
+ scratch_offset += offsetof(struct glob_ncsi_oem_data,
+ fcoe_features[path][port]);
+ host_addr = (u32 *) &(ctl->data.register_data.
+ fcoe_features);
+ for (i = 0; i < sizeof(struct fcoe_capabilities);
+ i += 4)
+ REG_WR(bp, scratch_offset + i,
+ *(host_addr + i/4));
+ }
+ bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_GET_DRV_VERSION, 0);
+ break;
+ }
+
+ case DRV_CTL_ULP_UNREGISTER_CMD: {
+ int ulp_type = ctl->data.ulp_type;
+
+ if (CHIP_IS_E3(bp)) {
+ int idx = BP_FW_MB_IDX(bp);
+ u32 cap;
+
+ cap = SHMEM2_RD(bp, drv_capabilities_flag[idx]);
+ if (ulp_type == CNIC_ULP_ISCSI)
+ cap &= ~DRV_FLAGS_CAPABILITIES_LOADED_ISCSI;
+ else if (ulp_type == CNIC_ULP_FCOE)
+ cap &= ~DRV_FLAGS_CAPABILITIES_LOADED_FCOE;
+ SHMEM2_WR(bp, drv_capabilities_flag[idx], cap);
+ }
+ bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_GET_DRV_VERSION, 0);
+ break;
+ }
+
+ default:
+ BNX2X_ERR("unknown command %x\n", ctl->cmd);
+ rc = -EINVAL;
+ }
+
+ /* For storage-only interfaces, change driver state */
+ if (IS_MF_SD_STORAGE_PERSONALITY_ONLY(bp)) {
+ switch (ctl->drv_state) {
+ case DRV_NOP:
+ break;
+ case DRV_ACTIVE:
+ bnx2x_set_os_driver_state(bp,
+ OS_DRIVER_STATE_ACTIVE);
+ break;
+ case DRV_INACTIVE:
+ bnx2x_set_os_driver_state(bp,
+ OS_DRIVER_STATE_DISABLED);
+ break;
+ case DRV_UNLOADED:
+ bnx2x_set_os_driver_state(bp,
+ OS_DRIVER_STATE_NOT_LOADED);
+ break;
+ default:
+ BNX2X_ERR("Unknown cnic driver state: %d\n", ctl->drv_state);
+ }
+ }
+
+ return rc;
+}
+
+static int bnx2x_get_fc_npiv(struct net_device *dev,
+ struct cnic_fc_npiv_tbl *cnic_tbl)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+ struct bdn_fc_npiv_tbl *tbl = NULL;
+ u32 offset, entries;
+ int rc = -EINVAL;
+ int i;
+
+ if (!SHMEM2_HAS(bp, fc_npiv_nvram_tbl_addr[0]))
+ goto out;
+
+ DP(BNX2X_MSG_MCP, "About to read the FC-NPIV table\n");
+
+ tbl = kmalloc(sizeof(*tbl), GFP_KERNEL);
+ if (!tbl) {
+ BNX2X_ERR("Failed to allocate fc_npiv table\n");
+ goto out;
+ }
+
+ offset = SHMEM2_RD(bp, fc_npiv_nvram_tbl_addr[BP_PORT(bp)]);
+ if (!offset) {
+ DP(BNX2X_MSG_MCP, "No FC-NPIV in NVRAM\n");
+ goto out;
+ }
+ DP(BNX2X_MSG_MCP, "Offset of FC-NPIV in NVRAM: %08x\n", offset);
+
+ /* Read the table contents from nvram */
+ if (bnx2x_nvram_read(bp, offset, (u8 *)tbl, sizeof(*tbl))) {
+ BNX2X_ERR("Failed to read FC-NPIV table\n");
+ goto out;
+ }
+
+ /* Since bnx2x_nvram_read() returns data in be32, we need to convert
+ * the number of entries back to cpu endianness.
+ */
+ entries = tbl->fc_npiv_cfg.num_of_npiv;
+ entries = (__force u32)be32_to_cpu((__force __be32)entries);
+ tbl->fc_npiv_cfg.num_of_npiv = entries;
+
+ if (!tbl->fc_npiv_cfg.num_of_npiv) {
+ DP(BNX2X_MSG_MCP,
+ "No FC-NPIV table [valid, simply not present]\n");
+ goto out;
+ } else if (tbl->fc_npiv_cfg.num_of_npiv > MAX_NUMBER_NPIV) {
+ BNX2X_ERR("FC-NPIV table with bad length 0x%08x\n",
+ tbl->fc_npiv_cfg.num_of_npiv);
+ goto out;
+ } else {
+ DP(BNX2X_MSG_MCP, "Read 0x%08x entries from NVRAM\n",
+ tbl->fc_npiv_cfg.num_of_npiv);
+ }
+
+ /* Copy the data into cnic-provided struct */
+ cnic_tbl->count = tbl->fc_npiv_cfg.num_of_npiv;
+ for (i = 0; i < cnic_tbl->count; i++) {
+ memcpy(cnic_tbl->wwpn[i], tbl->settings[i].npiv_wwpn, 8);
+ memcpy(cnic_tbl->wwnn[i], tbl->settings[i].npiv_wwnn, 8);
+ }
+
+ rc = 0;
+out:
+ kfree(tbl);
+ return rc;
+}
+
+void bnx2x_setup_cnic_irq_info(struct bnx2x *bp)
+{
+ struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
+
+ if (bp->flags & USING_MSIX_FLAG) {
+ cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
+ cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
+ cp->irq_arr[0].vector = bp->msix_table[1].vector;
+ } else {
+ cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
+ cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
+ }
+ if (!CHIP_IS_E1x(bp))
+ cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e2_sb;
+ else
+ cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e1x_sb;
+
+ cp->irq_arr[0].status_blk_num = bnx2x_cnic_fw_sb_id(bp);
+ cp->irq_arr[0].status_blk_num2 = bnx2x_cnic_igu_sb_id(bp);
+ cp->irq_arr[1].status_blk = bp->def_status_blk;
+ cp->irq_arr[1].status_blk_num = DEF_SB_ID;
+ cp->irq_arr[1].status_blk_num2 = DEF_SB_IGU_ID;
+
+ cp->num_irq = 2;
+}
+
+void bnx2x_setup_cnic_info(struct bnx2x *bp)
+{
+ struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
+
+ cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) +
+ bnx2x_cid_ilt_lines(bp);
+ cp->starting_cid = bnx2x_cid_ilt_lines(bp) * ILT_PAGE_CIDS;
+ cp->fcoe_init_cid = BNX2X_FCOE_ETH_CID(bp);
+ cp->iscsi_l2_cid = BNX2X_ISCSI_ETH_CID(bp);
+
+ DP(NETIF_MSG_IFUP, "BNX2X_1st_NON_L2_ETH_CID(bp) %x, cp->starting_cid %x, cp->fcoe_init_cid %x, cp->iscsi_l2_cid %x\n",
+ BNX2X_1st_NON_L2_ETH_CID(bp), cp->starting_cid, cp->fcoe_init_cid,
+ cp->iscsi_l2_cid);
+
+ if (NO_ISCSI_OOO(bp))
+ cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI_OOO;
+}
+
+static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops,
+ void *data)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+ struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
+ int rc;
+
+ DP(NETIF_MSG_IFUP, "Register_cnic called\n");
+
+ if (ops == NULL) {
+ BNX2X_ERR("NULL ops received\n");
+ return -EINVAL;
+ }
+
+ if (!CNIC_SUPPORT(bp)) {
+ BNX2X_ERR("Can't register CNIC when not supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (!CNIC_LOADED(bp)) {
+ rc = bnx2x_load_cnic(bp);
+ if (rc) {
+ BNX2X_ERR("CNIC-related load failed\n");
+ return rc;
+ }
+ }
+
+ bp->cnic_enabled = true;
+
+ bp->cnic_kwq = kzalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!bp->cnic_kwq)
+ return -ENOMEM;
+
+ bp->cnic_kwq_cons = bp->cnic_kwq;
+ bp->cnic_kwq_prod = bp->cnic_kwq;
+ bp->cnic_kwq_last = bp->cnic_kwq + MAX_SP_DESC_CNT;
+
+ bp->cnic_spq_pending = 0;
+ bp->cnic_kwq_pending = 0;
+
+ bp->cnic_data = data;
+
+ cp->num_irq = 0;
+ cp->drv_state |= CNIC_DRV_STATE_REGD;
+ cp->iro_arr = bp->iro_arr;
+
+ bnx2x_setup_cnic_irq_info(bp);
+
+ rcu_assign_pointer(bp->cnic_ops, ops);
+
+ /* Schedule driver to read CNIC driver versions */
+ bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_GET_DRV_VERSION, 0);
+
+ return 0;
+}
+
+static int bnx2x_unregister_cnic(struct net_device *dev)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+ struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
+
+ mutex_lock(&bp->cnic_mutex);
+ cp->drv_state = 0;
+ RCU_INIT_POINTER(bp->cnic_ops, NULL);
+ mutex_unlock(&bp->cnic_mutex);
+ synchronize_rcu();
+ bp->cnic_enabled = false;
+ kfree(bp->cnic_kwq);
+ bp->cnic_kwq = NULL;
+
+ return 0;
+}
+
+static struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+ struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
+
+ /* If both iSCSI and FCoE are disabled - return NULL in
+ * order to indicate CNIC that it should not try to work
+ * with this device.
+ */
+ if (NO_ISCSI(bp) && NO_FCOE(bp))
+ return NULL;
+
+ cp->drv_owner = THIS_MODULE;
+ cp->chip_id = CHIP_ID(bp);
+ cp->pdev = bp->pdev;
+ cp->io_base = bp->regview;
+ cp->io_base2 = bp->doorbells;
+ cp->max_kwqe_pending = 8;
+ cp->ctx_blk_size = CDU_ILT_PAGE_SZ;
+ cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) +
+ bnx2x_cid_ilt_lines(bp);
+ cp->ctx_tbl_len = CNIC_ILT_LINES;
+ cp->starting_cid = bnx2x_cid_ilt_lines(bp) * ILT_PAGE_CIDS;
+ cp->drv_submit_kwqes_16 = bnx2x_cnic_sp_queue;
+ cp->drv_ctl = bnx2x_drv_ctl;
+ cp->drv_get_fc_npiv_tbl = bnx2x_get_fc_npiv;
+ cp->drv_register_cnic = bnx2x_register_cnic;
+ cp->drv_unregister_cnic = bnx2x_unregister_cnic;
+ cp->fcoe_init_cid = BNX2X_FCOE_ETH_CID(bp);
+ cp->iscsi_l2_client_id =
+ bnx2x_cnic_eth_cl_id(bp, BNX2X_ISCSI_ETH_CL_ID_IDX);
+ cp->iscsi_l2_cid = BNX2X_ISCSI_ETH_CID(bp);
+
+ if (NO_ISCSI_OOO(bp))
+ cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI_OOO;
+
+ if (NO_ISCSI(bp))
+ cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI;
+
+ if (NO_FCOE(bp))
+ cp->drv_state |= CNIC_DRV_STATE_NO_FCOE;
+
+ BNX2X_DEV_INFO(
+ "page_size %d, tbl_offset %d, tbl_lines %d, starting cid %d\n",
+ cp->ctx_blk_size,
+ cp->ctx_tbl_offset,
+ cp->ctx_tbl_len,
+ cp->starting_cid);
+ return cp;
+}
+
+static u32 bnx2x_rx_ustorm_prods_offset(struct bnx2x_fastpath *fp)
+{
+ struct bnx2x *bp = fp->bp;
+ u32 offset = BAR_USTRORM_INTMEM;
+
+ if (IS_VF(bp))
+ return bnx2x_vf_ustorm_prods_offset(bp, fp);
+ else if (!CHIP_IS_E1x(bp))
+ offset += USTORM_RX_PRODS_E2_OFFSET(fp->cl_qzone_id);
+ else
+ offset += USTORM_RX_PRODS_E1X_OFFSET(BP_PORT(bp), fp->cl_id);
+
+ return offset;
+}
+
+/* called only on E1H or E2.
+ * When pretending to be PF, the pretend value is the function number 0...7
+ * When pretending to be VF, the pretend val is the PF-num:VF-valid:ABS-VFID
+ * combination
+ */
+int bnx2x_pretend_func(struct bnx2x *bp, u16 pretend_func_val)
+{
+ u32 pretend_reg;
+
+ if (CHIP_IS_E1H(bp) && pretend_func_val >= E1H_FUNC_MAX)
+ return -1;
+
+ /* get my own pretend register */
+ pretend_reg = bnx2x_get_pretend_reg(bp);
+ REG_WR(bp, pretend_reg, pretend_func_val);
+ REG_RD(bp, pretend_reg);
+ return 0;
+}
+
+static void bnx2x_ptp_task(struct work_struct *work)
+{
+ struct bnx2x *bp = container_of(work, struct bnx2x, ptp_task);
+ int port = BP_PORT(bp);
+ u32 val_seq;
+ u64 timestamp, ns;
+ struct skb_shared_hwtstamps shhwtstamps;
+ bool bail = true;
+ int i;
+
+ /* FW may take a while to complete timestamping; try a bit and if it's
+ * still not complete, may indicate an error state - bail out then.
+ */
+ for (i = 0; i < 10; i++) {
+ /* Read Tx timestamp registers */
+ val_seq = REG_RD(bp, port ? NIG_REG_P1_TLLH_PTP_BUF_SEQID :
+ NIG_REG_P0_TLLH_PTP_BUF_SEQID);
+ if (val_seq & 0x10000) {
+ bail = false;
+ break;
+ }
+ msleep(1 << i);
+ }
+
+ if (!bail) {
+ /* There is a valid timestamp value */
+ timestamp = REG_RD(bp, port ? NIG_REG_P1_TLLH_PTP_BUF_TS_MSB :
+ NIG_REG_P0_TLLH_PTP_BUF_TS_MSB);
+ timestamp <<= 32;
+ timestamp |= REG_RD(bp, port ? NIG_REG_P1_TLLH_PTP_BUF_TS_LSB :
+ NIG_REG_P0_TLLH_PTP_BUF_TS_LSB);
+ /* Reset timestamp register to allow new timestamp */
+ REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_BUF_SEQID :
+ NIG_REG_P0_TLLH_PTP_BUF_SEQID, 0x10000);
+ ns = timecounter_cyc2time(&bp->timecounter, timestamp);
+
+ memset(&shhwtstamps, 0, sizeof(shhwtstamps));
+ shhwtstamps.hwtstamp = ns_to_ktime(ns);
+ skb_tstamp_tx(bp->ptp_tx_skb, &shhwtstamps);
+
+ DP(BNX2X_MSG_PTP, "Tx timestamp, timestamp cycles = %llu, ns = %llu\n",
+ timestamp, ns);
+ } else {
+ DP(BNX2X_MSG_PTP,
+ "Tx timestamp is not recorded (register read=%u)\n",
+ val_seq);
+ bp->eth_stats.ptp_skip_tx_ts++;
+ }
+
+ dev_kfree_skb_any(bp->ptp_tx_skb);
+ bp->ptp_tx_skb = NULL;
+}
+
+void bnx2x_set_rx_ts(struct bnx2x *bp, struct sk_buff *skb)
+{
+ int port = BP_PORT(bp);
+ u64 timestamp, ns;
+
+ timestamp = REG_RD(bp, port ? NIG_REG_P1_LLH_PTP_HOST_BUF_TS_MSB :
+ NIG_REG_P0_LLH_PTP_HOST_BUF_TS_MSB);
+ timestamp <<= 32;
+ timestamp |= REG_RD(bp, port ? NIG_REG_P1_LLH_PTP_HOST_BUF_TS_LSB :
+ NIG_REG_P0_LLH_PTP_HOST_BUF_TS_LSB);
+
+ /* Reset timestamp register to allow new timestamp */
+ REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_HOST_BUF_SEQID :
+ NIG_REG_P0_LLH_PTP_HOST_BUF_SEQID, 0x10000);
+
+ ns = timecounter_cyc2time(&bp->timecounter, timestamp);
+
+ skb_hwtstamps(skb)->hwtstamp = ns_to_ktime(ns);
+
+ DP(BNX2X_MSG_PTP, "Rx timestamp, timestamp cycles = %llu, ns = %llu\n",
+ timestamp, ns);
+}
+
+/* Read the PHC */
+static u64 bnx2x_cyclecounter_read(const struct cyclecounter *cc)
+{
+ struct bnx2x *bp = container_of(cc, struct bnx2x, cyclecounter);
+ int port = BP_PORT(bp);
+ u32 wb_data[2];
+ u64 phc_cycles;
+
+ REG_RD_DMAE(bp, port ? NIG_REG_TIMESYNC_GEN_REG + tsgen_synctime_t1 :
+ NIG_REG_TIMESYNC_GEN_REG + tsgen_synctime_t0, wb_data, 2);
+ phc_cycles = wb_data[1];
+ phc_cycles = (phc_cycles << 32) + wb_data[0];
+
+ DP(BNX2X_MSG_PTP, "PHC read cycles = %llu\n", phc_cycles);
+
+ return phc_cycles;
+}
+
+static void bnx2x_init_cyclecounter(struct bnx2x *bp)
+{
+ memset(&bp->cyclecounter, 0, sizeof(bp->cyclecounter));
+ bp->cyclecounter.read = bnx2x_cyclecounter_read;
+ bp->cyclecounter.mask = CYCLECOUNTER_MASK(64);
+ bp->cyclecounter.shift = 0;
+ bp->cyclecounter.mult = 1;
+}
+
+static int bnx2x_send_reset_timesync_ramrod(struct bnx2x *bp)
+{
+ struct bnx2x_func_state_params func_params = {NULL};
+ struct bnx2x_func_set_timesync_params *set_timesync_params =
+ &func_params.params.set_timesync;
+
+ /* Prepare parameters for function state transitions */
+ __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
+ __set_bit(RAMROD_RETRY, &func_params.ramrod_flags);
+
+ func_params.f_obj = &bp->func_obj;
+ func_params.cmd = BNX2X_F_CMD_SET_TIMESYNC;
+
+ /* Function parameters */
+ set_timesync_params->drift_adjust_cmd = TS_DRIFT_ADJUST_RESET;
+ set_timesync_params->offset_cmd = TS_OFFSET_KEEP;
+
+ return bnx2x_func_state_change(bp, &func_params);
+}
+
+static int bnx2x_enable_ptp_packets(struct bnx2x *bp)
+{
+ struct bnx2x_queue_state_params q_params;
+ int rc, i;
+
+ /* send queue update ramrod to enable PTP packets */
+ memset(&q_params, 0, sizeof(q_params));
+ __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
+ q_params.cmd = BNX2X_Q_CMD_UPDATE;
+ __set_bit(BNX2X_Q_UPDATE_PTP_PKTS_CHNG,
+ &q_params.params.update.update_flags);
+ __set_bit(BNX2X_Q_UPDATE_PTP_PKTS,
+ &q_params.params.update.update_flags);
+
+ /* send the ramrod on all the queues of the PF */
+ for_each_eth_queue(bp, i) {
+ struct bnx2x_fastpath *fp = &bp->fp[i];
+
+ /* Set the appropriate Queue object */
+ q_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
+
+ /* Update the Queue state */
+ rc = bnx2x_queue_state_change(bp, &q_params);
+ if (rc) {
+ BNX2X_ERR("Failed to enable PTP packets\n");
+ return rc;
+ }
+ }
+
+ return 0;
+}
+
+#define BNX2X_P2P_DETECT_PARAM_MASK 0x5F5
+#define BNX2X_P2P_DETECT_RULE_MASK 0x3DBB
+#define BNX2X_PTP_TX_ON_PARAM_MASK (BNX2X_P2P_DETECT_PARAM_MASK & 0x6AA)
+#define BNX2X_PTP_TX_ON_RULE_MASK (BNX2X_P2P_DETECT_RULE_MASK & 0x3EEE)
+#define BNX2X_PTP_V1_L4_PARAM_MASK (BNX2X_P2P_DETECT_PARAM_MASK & 0x7EE)
+#define BNX2X_PTP_V1_L4_RULE_MASK (BNX2X_P2P_DETECT_RULE_MASK & 0x3FFE)
+#define BNX2X_PTP_V2_L4_PARAM_MASK (BNX2X_P2P_DETECT_PARAM_MASK & 0x7EA)
+#define BNX2X_PTP_V2_L4_RULE_MASK (BNX2X_P2P_DETECT_RULE_MASK & 0x3FEE)
+#define BNX2X_PTP_V2_L2_PARAM_MASK (BNX2X_P2P_DETECT_PARAM_MASK & 0x6BF)
+#define BNX2X_PTP_V2_L2_RULE_MASK (BNX2X_P2P_DETECT_RULE_MASK & 0x3EFF)
+#define BNX2X_PTP_V2_PARAM_MASK (BNX2X_P2P_DETECT_PARAM_MASK & 0x6AA)
+#define BNX2X_PTP_V2_RULE_MASK (BNX2X_P2P_DETECT_RULE_MASK & 0x3EEE)
+
+int bnx2x_configure_ptp_filters(struct bnx2x *bp)
+{
+ int port = BP_PORT(bp);
+ u32 param, rule;
+ int rc;
+
+ if (!bp->hwtstamp_ioctl_called)
+ return 0;
+
+ param = port ? NIG_REG_P1_TLLH_PTP_PARAM_MASK :
+ NIG_REG_P0_TLLH_PTP_PARAM_MASK;
+ rule = port ? NIG_REG_P1_TLLH_PTP_RULE_MASK :
+ NIG_REG_P0_TLLH_PTP_RULE_MASK;
+ switch (bp->tx_type) {
+ case HWTSTAMP_TX_ON:
+ bp->flags |= TX_TIMESTAMPING_EN;
+ REG_WR(bp, param, BNX2X_PTP_TX_ON_PARAM_MASK);
+ REG_WR(bp, rule, BNX2X_PTP_TX_ON_RULE_MASK);
+ break;
+ case HWTSTAMP_TX_ONESTEP_SYNC:
+ case HWTSTAMP_TX_ONESTEP_P2P:
+ BNX2X_ERR("One-step timestamping is not supported\n");
+ return -ERANGE;
+ }
+
+ param = port ? NIG_REG_P1_LLH_PTP_PARAM_MASK :
+ NIG_REG_P0_LLH_PTP_PARAM_MASK;
+ rule = port ? NIG_REG_P1_LLH_PTP_RULE_MASK :
+ NIG_REG_P0_LLH_PTP_RULE_MASK;
+ switch (bp->rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ break;
+ case HWTSTAMP_FILTER_ALL:
+ case HWTSTAMP_FILTER_SOME:
+ case HWTSTAMP_FILTER_NTP_ALL:
+ bp->rx_filter = HWTSTAMP_FILTER_NONE;
+ break;
+ case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+ bp->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
+ /* Initialize PTP detection for UDP/IPv4 events */
+ REG_WR(bp, param, BNX2X_PTP_V1_L4_PARAM_MASK);
+ REG_WR(bp, rule, BNX2X_PTP_V1_L4_RULE_MASK);
+ break;
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ bp->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
+ /* Initialize PTP detection for UDP/IPv4 or UDP/IPv6 events */
+ REG_WR(bp, param, BNX2X_PTP_V2_L4_PARAM_MASK);
+ REG_WR(bp, rule, BNX2X_PTP_V2_L4_RULE_MASK);
+ break;
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+ bp->rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
+ /* Initialize PTP detection L2 events */
+ REG_WR(bp, param, BNX2X_PTP_V2_L2_PARAM_MASK);
+ REG_WR(bp, rule, BNX2X_PTP_V2_L2_RULE_MASK);
+
+ break;
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ bp->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
+ /* Initialize PTP detection L2, UDP/IPv4 or UDP/IPv6 events */
+ REG_WR(bp, param, BNX2X_PTP_V2_PARAM_MASK);
+ REG_WR(bp, rule, BNX2X_PTP_V2_RULE_MASK);
+ break;
+ }
+
+ /* Indicate to FW that this PF expects recorded PTP packets */
+ rc = bnx2x_enable_ptp_packets(bp);
+ if (rc)
+ return rc;
+
+ /* Enable sending PTP packets to host */
+ REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_TO_HOST :
+ NIG_REG_P0_LLH_PTP_TO_HOST, 0x1);
+
+ return 0;
+}
+
+static int bnx2x_hwtstamp_ioctl(struct bnx2x *bp, struct ifreq *ifr)
+{
+ struct hwtstamp_config config;
+ int rc;
+
+ DP(BNX2X_MSG_PTP, "HWTSTAMP IOCTL called\n");
+
+ if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
+ return -EFAULT;
+
+ DP(BNX2X_MSG_PTP, "Requested tx_type: %d, requested rx_filters = %d\n",
+ config.tx_type, config.rx_filter);
+
+ bp->hwtstamp_ioctl_called = true;
+ bp->tx_type = config.tx_type;
+ bp->rx_filter = config.rx_filter;
+
+ rc = bnx2x_configure_ptp_filters(bp);
+ if (rc)
+ return rc;
+
+ config.rx_filter = bp->rx_filter;
+
+ return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
+ -EFAULT : 0;
+}
+
+/* Configures HW for PTP */
+static int bnx2x_configure_ptp(struct bnx2x *bp)
+{
+ int rc, port = BP_PORT(bp);
+ u32 wb_data[2];
+
+ /* Reset PTP event detection rules - will be configured in the IOCTL */
+ REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_PARAM_MASK :
+ NIG_REG_P0_LLH_PTP_PARAM_MASK, 0x7FF);
+ REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_RULE_MASK :
+ NIG_REG_P0_LLH_PTP_RULE_MASK, 0x3FFF);
+ REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_PARAM_MASK :
+ NIG_REG_P0_TLLH_PTP_PARAM_MASK, 0x7FF);
+ REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_RULE_MASK :
+ NIG_REG_P0_TLLH_PTP_RULE_MASK, 0x3FFF);
+
+ /* Disable PTP packets to host - will be configured in the IOCTL*/
+ REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_TO_HOST :
+ NIG_REG_P0_LLH_PTP_TO_HOST, 0x0);
+
+ /* Enable the PTP feature */
+ REG_WR(bp, port ? NIG_REG_P1_PTP_EN :
+ NIG_REG_P0_PTP_EN, 0x3F);
+
+ /* Enable the free-running counter */
+ wb_data[0] = 0;
+ wb_data[1] = 0;
+ REG_WR_DMAE(bp, NIG_REG_TIMESYNC_GEN_REG + tsgen_ctrl, wb_data, 2);
+
+ /* Reset drift register (offset register is not reset) */
+ rc = bnx2x_send_reset_timesync_ramrod(bp);
+ if (rc) {
+ BNX2X_ERR("Failed to reset PHC drift register\n");
+ return -EFAULT;
+ }
+
+ /* Reset possibly old timestamps */
+ REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_HOST_BUF_SEQID :
+ NIG_REG_P0_LLH_PTP_HOST_BUF_SEQID, 0x10000);
+ REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_BUF_SEQID :
+ NIG_REG_P0_TLLH_PTP_BUF_SEQID, 0x10000);
+
+ return 0;
+}
+
+/* Called during load, to initialize PTP-related stuff */
+void bnx2x_init_ptp(struct bnx2x *bp)
+{
+ int rc;
+
+ /* Configure PTP in HW */
+ rc = bnx2x_configure_ptp(bp);
+ if (rc) {
+ BNX2X_ERR("Stopping PTP initialization\n");
+ return;
+ }
+
+ /* Init work queue for Tx timestamping */
+ INIT_WORK(&bp->ptp_task, bnx2x_ptp_task);
+
+ /* Init cyclecounter and timecounter. This is done only in the first
+ * load. If done in every load, PTP application will fail when doing
+ * unload / load (e.g. MTU change) while it is running.
+ */
+ if (!bp->timecounter_init_done) {
+ bnx2x_init_cyclecounter(bp);
+ timecounter_init(&bp->timecounter, &bp->cyclecounter,
+ ktime_to_ns(ktime_get_real()));
+ bp->timecounter_init_done = true;
+ }
+
+ DP(BNX2X_MSG_PTP, "PTP initialization ended successfully\n");
+}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_mfw_req.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_mfw_req.h
new file mode 100644
index 0000000000..a91ccbf363
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_mfw_req.h
@@ -0,0 +1,170 @@
+/* bnx2x_mfw_req.h: Qlogic Everest network driver.
+ *
+ * Copyright (c) 2012-2013 Broadcom Corporation
+ * Copyright (c) 2014 QLogic Corporation
+ * All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ */
+
+#ifndef BNX2X_MFW_REQ_H
+#define BNX2X_MFW_REQ_H
+
+#define PORT_0 0
+#define PORT_1 1
+#define PORT_MAX 2
+#define NVM_PATH_MAX 2
+
+/* FCoE capabilities required from the driver */
+struct fcoe_capabilities {
+ u32 capability1;
+ /* Maximum number of I/Os per connection */
+ #define FCOE_IOS_PER_CONNECTION_MASK 0x0000ffff
+ #define FCOE_IOS_PER_CONNECTION_SHIFT 0
+ /* Maximum number of Logins per port */
+ #define FCOE_LOGINS_PER_PORT_MASK 0xffff0000
+ #define FCOE_LOGINS_PER_PORT_SHIFT 16
+
+ u32 capability2;
+ /* Maximum number of exchanges */
+ #define FCOE_NUMBER_OF_EXCHANGES_MASK 0x0000ffff
+ #define FCOE_NUMBER_OF_EXCHANGES_SHIFT 0
+ /* Maximum NPIV WWN per port */
+ #define FCOE_NPIV_WWN_PER_PORT_MASK 0xffff0000
+ #define FCOE_NPIV_WWN_PER_PORT_SHIFT 16
+
+ u32 capability3;
+ /* Maximum number of targets supported */
+ #define FCOE_TARGETS_SUPPORTED_MASK 0x0000ffff
+ #define FCOE_TARGETS_SUPPORTED_SHIFT 0
+ /* Maximum number of outstanding commands across all connections */
+ #define FCOE_OUTSTANDING_COMMANDS_MASK 0xffff0000
+ #define FCOE_OUTSTANDING_COMMANDS_SHIFT 16
+
+ u32 capability4;
+ #define FCOE_CAPABILITY4_STATEFUL 0x00000001
+ #define FCOE_CAPABILITY4_STATELESS 0x00000002
+ #define FCOE_CAPABILITY4_CAPABILITIES_REPORTED_VALID 0x00000004
+};
+
+struct glob_ncsi_oem_data {
+ u32 driver_version;
+ u32 unused[3];
+ struct fcoe_capabilities fcoe_features[NVM_PATH_MAX][PORT_MAX];
+};
+
+/* current drv_info version */
+#define DRV_INFO_CUR_VER 2
+
+/* drv_info op codes supported */
+enum drv_info_opcode {
+ ETH_STATS_OPCODE,
+ FCOE_STATS_OPCODE,
+ ISCSI_STATS_OPCODE
+};
+
+#define ETH_STAT_INFO_VERSION_LEN 12
+/* Per PCI Function Ethernet Statistics required from the driver */
+struct eth_stats_info {
+ /* Function's Driver Version. padded to 12 */
+ u8 version[ETH_STAT_INFO_VERSION_LEN];
+ /* Locally Admin Addr. BigEndian EIU48. Actual size is 6 bytes */
+ u8 mac_local[8];
+ u8 mac_add1[8]; /* Additional Programmed MAC Addr 1. */
+ u8 mac_add2[8]; /* Additional Programmed MAC Addr 2. */
+ u32 mtu_size; /* MTU Size. Note : Negotiated MTU */
+ u32 feature_flags; /* Feature_Flags. */
+#define FEATURE_ETH_CHKSUM_OFFLOAD_MASK 0x01
+#define FEATURE_ETH_LSO_MASK 0x02
+#define FEATURE_ETH_BOOTMODE_MASK 0x1C
+#define FEATURE_ETH_BOOTMODE_SHIFT 2
+#define FEATURE_ETH_BOOTMODE_NONE (0x0 << 2)
+#define FEATURE_ETH_BOOTMODE_PXE (0x1 << 2)
+#define FEATURE_ETH_BOOTMODE_ISCSI (0x2 << 2)
+#define FEATURE_ETH_BOOTMODE_FCOE (0x3 << 2)
+#define FEATURE_ETH_TOE_MASK 0x20
+ u32 lso_max_size; /* LSO MaxOffloadSize. */
+ u32 lso_min_seg_cnt; /* LSO MinSegmentCount. */
+ /* Num Offloaded Connections TCP_IPv4. */
+ u32 ipv4_ofld_cnt;
+ /* Num Offloaded Connections TCP_IPv6. */
+ u32 ipv6_ofld_cnt;
+ u32 promiscuous_mode; /* Promiscuous Mode. non-zero true */
+ u32 txq_size; /* TX Descriptors Queue Size */
+ u32 rxq_size; /* RX Descriptors Queue Size */
+ /* TX Descriptor Queue Avg Depth. % Avg Queue Depth since last poll */
+ u32 txq_avg_depth;
+ /* RX Descriptors Queue Avg Depth. % Avg Queue Depth since last poll */
+ u32 rxq_avg_depth;
+ /* IOV_Offload. 0=none; 1=MultiQueue, 2=VEB 3= VEPA*/
+ u32 iov_offload;
+ /* Number of NetQueue/VMQ Config'd. */
+ u32 netq_cnt;
+ u32 vf_cnt; /* Num VF assigned to this PF. */
+};
+
+/* Per PCI Function FCOE Statistics required from the driver */
+struct fcoe_stats_info {
+ u8 version[12]; /* Function's Driver Version. */
+ u8 mac_local[8]; /* Locally Admin Addr. */
+ u8 mac_add1[8]; /* Additional Programmed MAC Addr 1. */
+ u8 mac_add2[8]; /* Additional Programmed MAC Addr 2. */
+ /* QoS Priority (per 802.1p). 0-7255 */
+ u32 qos_priority;
+ u32 txq_size; /* FCoE TX Descriptors Queue Size. */
+ u32 rxq_size; /* FCoE RX Descriptors Queue Size. */
+ /* FCoE TX Descriptor Queue Avg Depth. */
+ u32 txq_avg_depth;
+ /* FCoE RX Descriptors Queue Avg Depth. */
+ u32 rxq_avg_depth;
+ u32 rx_frames_lo; /* FCoE RX Frames received. */
+ u32 rx_frames_hi; /* FCoE RX Frames received. */
+ u32 rx_bytes_lo; /* FCoE RX Bytes received. */
+ u32 rx_bytes_hi; /* FCoE RX Bytes received. */
+ u32 tx_frames_lo; /* FCoE TX Frames sent. */
+ u32 tx_frames_hi; /* FCoE TX Frames sent. */
+ u32 tx_bytes_lo; /* FCoE TX Bytes sent. */
+ u32 tx_bytes_hi; /* FCoE TX Bytes sent. */
+};
+
+/* Per PCI Function iSCSI Statistics required from the driver*/
+struct iscsi_stats_info {
+ u8 version[12]; /* Function's Driver Version. */
+ u8 mac_local[8]; /* Locally Admin iSCSI MAC Addr. */
+ u8 mac_add1[8]; /* Additional Programmed MAC Addr 1. */
+ /* QoS Priority (per 802.1p). 0-7255 */
+ u32 qos_priority;
+ u8 initiator_name[64]; /* iSCSI Boot Initiator Node name. */
+ u8 ww_port_name[64]; /* iSCSI World wide port name */
+ u8 boot_target_name[64];/* iSCSI Boot Target Name. */
+ u8 boot_target_ip[16]; /* iSCSI Boot Target IP. */
+ u32 boot_target_portal; /* iSCSI Boot Target Portal. */
+ u8 boot_init_ip[16]; /* iSCSI Boot Initiator IP Address. */
+ u32 max_frame_size; /* Max Frame Size. bytes */
+ u32 txq_size; /* PDU TX Descriptors Queue Size. */
+ u32 rxq_size; /* PDU RX Descriptors Queue Size. */
+ u32 txq_avg_depth; /* PDU TX Descriptor Queue Avg Depth. */
+ u32 rxq_avg_depth; /* PDU RX Descriptors Queue Avg Depth. */
+ u32 rx_pdus_lo; /* iSCSI PDUs received. */
+ u32 rx_pdus_hi; /* iSCSI PDUs received. */
+ u32 rx_bytes_lo; /* iSCSI RX Bytes received. */
+ u32 rx_bytes_hi; /* iSCSI RX Bytes received. */
+ u32 tx_pdus_lo; /* iSCSI PDUs sent. */
+ u32 tx_pdus_hi; /* iSCSI PDUs sent. */
+ u32 tx_bytes_lo; /* iSCSI PDU TX Bytes sent. */
+ u32 tx_bytes_hi; /* iSCSI PDU TX Bytes sent. */
+ u32 pcp_prior_map_tbl; /* C-PCP to S-PCP Priority MapTable.
+ * 9 nibbles, the position of each nibble
+ * represents the C-PCP value, the value
+ * of the nibble = S-PCP value.
+ */
+};
+
+union drv_info_to_mcp {
+ struct eth_stats_info ether_stat;
+ struct fcoe_stats_info fcoe_stat;
+ struct iscsi_stats_info iscsi_stat;
+};
+#endif /* BNX2X_MFW_REQ_H */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
new file mode 100644
index 0000000000..4e9215bce4
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
@@ -0,0 +1,7776 @@
+/* bnx2x_reg.h: Qlogic Everest network driver.
+ *
+ * Copyright (c) 2007-2013 Broadcom Corporation
+ * Copyright (c) 2014 QLogic Corporation
+ * All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * The registers description starts with the register Access type followed
+ * by size in bits. For example [RW 32]. The access types are:
+ * R - Read only
+ * RC - Clear on read
+ * RW - Read/Write
+ * ST - Statistics register (clear on read)
+ * W - Write only
+ * WB - Wide bus register - the size is over 32 bits and it should be
+ * read/write in consecutive 32 bits accesses
+ * WR - Write Clear (write 1 to clear the bit)
+ *
+ */
+#ifndef BNX2X_REG_H
+#define BNX2X_REG_H
+
+#define ATC_ATC_INT_STS_REG_ADDRESS_ERROR (0x1<<0)
+#define ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS (0x1<<2)
+#define ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU (0x1<<5)
+#define ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT (0x1<<3)
+#define ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR (0x1<<4)
+#define ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND (0x1<<1)
+/* [RW 1] Initiate the ATC array - reset all the valid bits */
+#define ATC_REG_ATC_INIT_ARRAY 0x1100b8
+/* [R 1] ATC initialization done */
+#define ATC_REG_ATC_INIT_DONE 0x1100bc
+/* [RC 6] Interrupt register #0 read clear */
+#define ATC_REG_ATC_INT_STS_CLR 0x1101c0
+/* [RW 5] Parity mask register #0 read/write */
+#define ATC_REG_ATC_PRTY_MASK 0x1101d8
+/* [R 5] Parity register #0 read */
+#define ATC_REG_ATC_PRTY_STS 0x1101cc
+/* [RC 5] Parity register #0 read clear */
+#define ATC_REG_ATC_PRTY_STS_CLR 0x1101d0
+/* [RW 19] Interrupt mask register #0 read/write */
+#define BRB1_REG_BRB1_INT_MASK 0x60128
+/* [R 19] Interrupt register #0 read */
+#define BRB1_REG_BRB1_INT_STS 0x6011c
+/* [RW 4] Parity mask register #0 read/write */
+#define BRB1_REG_BRB1_PRTY_MASK 0x60138
+/* [R 4] Parity register #0 read */
+#define BRB1_REG_BRB1_PRTY_STS 0x6012c
+/* [RC 4] Parity register #0 read clear */
+#define BRB1_REG_BRB1_PRTY_STS_CLR 0x60130
+/* [RW 10] At address BRB1_IND_FREE_LIST_PRS_CRDT initialize free head. At
+ * address BRB1_IND_FREE_LIST_PRS_CRDT+1 initialize free tail. At address
+ * BRB1_IND_FREE_LIST_PRS_CRDT+2 initialize parser initial credit. Warning -
+ * following reset the first rbc access to this reg must be write; there can
+ * be no more rbc writes after the first one; there can be any number of rbc
+ * read following the first write; rbc access not following these rules will
+ * result in hang condition. */
+#define BRB1_REG_FREE_LIST_PRS_CRDT 0x60200
+/* [RW 10] The number of free blocks below which the full signal to class 0
+ * is asserted */
+#define BRB1_REG_FULL_0_XOFF_THRESHOLD_0 0x601d0
+#define BRB1_REG_FULL_0_XOFF_THRESHOLD_1 0x60230
+/* [RW 11] The number of free blocks above which the full signal to class 0
+ * is de-asserted */
+#define BRB1_REG_FULL_0_XON_THRESHOLD_0 0x601d4
+#define BRB1_REG_FULL_0_XON_THRESHOLD_1 0x60234
+/* [RW 11] The number of free blocks below which the full signal to class 1
+ * is asserted */
+#define BRB1_REG_FULL_1_XOFF_THRESHOLD_0 0x601d8
+#define BRB1_REG_FULL_1_XOFF_THRESHOLD_1 0x60238
+/* [RW 11] The number of free blocks above which the full signal to class 1
+ * is de-asserted */
+#define BRB1_REG_FULL_1_XON_THRESHOLD_0 0x601dc
+#define BRB1_REG_FULL_1_XON_THRESHOLD_1 0x6023c
+/* [RW 11] The number of free blocks below which the full signal to the LB
+ * port is asserted */
+#define BRB1_REG_FULL_LB_XOFF_THRESHOLD 0x601e0
+/* [RW 10] The number of free blocks above which the full signal to the LB
+ * port is de-asserted */
+#define BRB1_REG_FULL_LB_XON_THRESHOLD 0x601e4
+/* [RW 10] The number of free blocks above which the High_llfc signal to
+ interface #n is de-asserted. */
+#define BRB1_REG_HIGH_LLFC_HIGH_THRESHOLD_0 0x6014c
+/* [RW 10] The number of free blocks below which the High_llfc signal to
+ interface #n is asserted. */
+#define BRB1_REG_HIGH_LLFC_LOW_THRESHOLD_0 0x6013c
+/* [RW 11] The number of blocks guarantied for the LB port */
+#define BRB1_REG_LB_GUARANTIED 0x601ec
+/* [RW 11] The hysteresis on the guarantied buffer space for the Lb port
+ * before signaling XON. */
+#define BRB1_REG_LB_GUARANTIED_HYST 0x60264
+/* [RW 24] LL RAM data. */
+#define BRB1_REG_LL_RAM 0x61000
+/* [RW 10] The number of free blocks above which the Low_llfc signal to
+ interface #n is de-asserted. */
+#define BRB1_REG_LOW_LLFC_HIGH_THRESHOLD_0 0x6016c
+/* [RW 10] The number of free blocks below which the Low_llfc signal to
+ interface #n is asserted. */
+#define BRB1_REG_LOW_LLFC_LOW_THRESHOLD_0 0x6015c
+/* [RW 11] The number of blocks guarantied for class 0 in MAC 0. The
+ * register is applicable only when per_class_guaranty_mode is set. */
+#define BRB1_REG_MAC_0_CLASS_0_GUARANTIED 0x60244
+/* [RW 11] The hysteresis on the guarantied buffer space for class 0 in MAC
+ * 1 before signaling XON. The register is applicable only when
+ * per_class_guaranty_mode is set. */
+#define BRB1_REG_MAC_0_CLASS_0_GUARANTIED_HYST 0x60254
+/* [RW 11] The number of blocks guarantied for class 1 in MAC 0. The
+ * register is applicable only when per_class_guaranty_mode is set. */
+#define BRB1_REG_MAC_0_CLASS_1_GUARANTIED 0x60248
+/* [RW 11] The hysteresis on the guarantied buffer space for class 1in MAC 0
+ * before signaling XON. The register is applicable only when
+ * per_class_guaranty_mode is set. */
+#define BRB1_REG_MAC_0_CLASS_1_GUARANTIED_HYST 0x60258
+/* [RW 11] The number of blocks guarantied for class 0in MAC1.The register
+ * is applicable only when per_class_guaranty_mode is set. */
+#define BRB1_REG_MAC_1_CLASS_0_GUARANTIED 0x6024c
+/* [RW 11] The hysteresis on the guarantied buffer space for class 0 in MAC
+ * 1 before signaling XON. The register is applicable only when
+ * per_class_guaranty_mode is set. */
+#define BRB1_REG_MAC_1_CLASS_0_GUARANTIED_HYST 0x6025c
+/* [RW 11] The number of blocks guarantied for class 1 in MAC 1. The
+ * register is applicable only when per_class_guaranty_mode is set. */
+#define BRB1_REG_MAC_1_CLASS_1_GUARANTIED 0x60250
+/* [RW 11] The hysteresis on the guarantied buffer space for class 1 in MAC
+ * 1 before signaling XON. The register is applicable only when
+ * per_class_guaranty_mode is set. */
+#define BRB1_REG_MAC_1_CLASS_1_GUARANTIED_HYST 0x60260
+/* [RW 11] The number of blocks guarantied for the MAC port. The register is
+ * applicable only when per_class_guaranty_mode is reset. */
+#define BRB1_REG_MAC_GUARANTIED_0 0x601e8
+#define BRB1_REG_MAC_GUARANTIED_1 0x60240
+/* [R 24] The number of full blocks. */
+#define BRB1_REG_NUM_OF_FULL_BLOCKS 0x60090
+/* [ST 32] The number of cycles that the write_full signal towards MAC #0
+ was asserted. */
+#define BRB1_REG_NUM_OF_FULL_CYCLES_0 0x600c8
+#define BRB1_REG_NUM_OF_FULL_CYCLES_1 0x600cc
+#define BRB1_REG_NUM_OF_FULL_CYCLES_4 0x600d8
+/* [ST 32] The number of cycles that the pause signal towards MAC #0 was
+ asserted. */
+#define BRB1_REG_NUM_OF_PAUSE_CYCLES_0 0x600b8
+#define BRB1_REG_NUM_OF_PAUSE_CYCLES_1 0x600bc
+/* [RW 10] The number of free blocks below which the pause signal to class 0
+ * is asserted */
+#define BRB1_REG_PAUSE_0_XOFF_THRESHOLD_0 0x601c0
+#define BRB1_REG_PAUSE_0_XOFF_THRESHOLD_1 0x60220
+/* [RW 11] The number of free blocks above which the pause signal to class 0
+ * is de-asserted */
+#define BRB1_REG_PAUSE_0_XON_THRESHOLD_0 0x601c4
+#define BRB1_REG_PAUSE_0_XON_THRESHOLD_1 0x60224
+/* [RW 11] The number of free blocks below which the pause signal to class 1
+ * is asserted */
+#define BRB1_REG_PAUSE_1_XOFF_THRESHOLD_0 0x601c8
+#define BRB1_REG_PAUSE_1_XOFF_THRESHOLD_1 0x60228
+/* [RW 11] The number of free blocks above which the pause signal to class 1
+ * is de-asserted */
+#define BRB1_REG_PAUSE_1_XON_THRESHOLD_0 0x601cc
+#define BRB1_REG_PAUSE_1_XON_THRESHOLD_1 0x6022c
+/* [RW 10] Write client 0: De-assert pause threshold. Not Functional */
+#define BRB1_REG_PAUSE_HIGH_THRESHOLD_0 0x60078
+#define BRB1_REG_PAUSE_HIGH_THRESHOLD_1 0x6007c
+/* [RW 10] Write client 0: Assert pause threshold. */
+#define BRB1_REG_PAUSE_LOW_THRESHOLD_0 0x60068
+/* [RW 1] Indicates if to use per-class guaranty mode (new mode) or per-MAC
+ * guaranty mode (backwards-compatible mode). 0=per-MAC guaranty mode (BC
+ * mode). 1=per-class guaranty mode (new mode). */
+#define BRB1_REG_PER_CLASS_GUARANTY_MODE 0x60268
+/* [R 24] The number of full blocks occpied by port. */
+#define BRB1_REG_PORT_NUM_OCC_BLOCKS_0 0x60094
+/* [RW 1] Reset the design by software. */
+#define BRB1_REG_SOFT_RESET 0x600dc
+/* [R 5] Used to read the value of the XX protection CAM occupancy counter. */
+#define CCM_REG_CAM_OCCUP 0xd0188
+/* [RW 1] CM - CFC Interface enable. If 0 - the valid input is disregarded;
+ acknowledge output is deasserted; all other signals are treated as usual;
+ if 1 - normal activity. */
+#define CCM_REG_CCM_CFC_IFEN 0xd003c
+/* [RW 1] CM - QM Interface enable. If 0 - the acknowledge input is
+ disregarded; valid is deasserted; all other signals are treated as usual;
+ if 1 - normal activity. */
+#define CCM_REG_CCM_CQM_IFEN 0xd000c
+/* [RW 1] If set the Q index; received from the QM is inserted to event ID.
+ Otherwise 0 is inserted. */
+#define CCM_REG_CCM_CQM_USE_Q 0xd00c0
+/* [RW 11] Interrupt mask register #0 read/write */
+#define CCM_REG_CCM_INT_MASK 0xd01e4
+/* [R 11] Interrupt register #0 read */
+#define CCM_REG_CCM_INT_STS 0xd01d8
+/* [RW 27] Parity mask register #0 read/write */
+#define CCM_REG_CCM_PRTY_MASK 0xd01f4
+/* [R 27] Parity register #0 read */
+#define CCM_REG_CCM_PRTY_STS 0xd01e8
+/* [RC 27] Parity register #0 read clear */
+#define CCM_REG_CCM_PRTY_STS_CLR 0xd01ec
+/* [RW 3] The size of AG context region 0 in REG-pairs. Designates the MS
+ REG-pair number (e.g. if region 0 is 6 REG-pairs; the value should be 5).
+ Is used to determine the number of the AG context REG-pairs written back;
+ when the input message Reg1WbFlg isn't set. */
+#define CCM_REG_CCM_REG0_SZ 0xd00c4
+/* [RW 1] CM - STORM 0 Interface enable. If 0 - the acknowledge input is
+ disregarded; valid is deasserted; all other signals are treated as usual;
+ if 1 - normal activity. */
+#define CCM_REG_CCM_STORM0_IFEN 0xd0004
+/* [RW 1] CM - STORM 1 Interface enable. If 0 - the acknowledge input is
+ disregarded; valid is deasserted; all other signals are treated as usual;
+ if 1 - normal activity. */
+#define CCM_REG_CCM_STORM1_IFEN 0xd0008
+/* [RW 1] CDU AG read Interface enable. If 0 - the request input is
+ disregarded; valid output is deasserted; all other signals are treated as
+ usual; if 1 - normal activity. */
+#define CCM_REG_CDU_AG_RD_IFEN 0xd0030
+/* [RW 1] CDU AG write Interface enable. If 0 - the request and valid input
+ are disregarded; all other signals are treated as usual; if 1 - normal
+ activity. */
+#define CCM_REG_CDU_AG_WR_IFEN 0xd002c
+/* [RW 1] CDU STORM read Interface enable. If 0 - the request input is
+ disregarded; valid output is deasserted; all other signals are treated as
+ usual; if 1 - normal activity. */
+#define CCM_REG_CDU_SM_RD_IFEN 0xd0038
+/* [RW 1] CDU STORM write Interface enable. If 0 - the request and valid
+ input is disregarded; all other signals are treated as usual; if 1 -
+ normal activity. */
+#define CCM_REG_CDU_SM_WR_IFEN 0xd0034
+/* [RW 4] CFC output initial credit. Max credit available - 15.Write writes
+ the initial credit value; read returns the current value of the credit
+ counter. Must be initialized to 1 at start-up. */
+#define CCM_REG_CFC_INIT_CRD 0xd0204
+/* [RW 2] Auxiliary counter flag Q number 1. */
+#define CCM_REG_CNT_AUX1_Q 0xd00c8
+/* [RW 2] Auxiliary counter flag Q number 2. */
+#define CCM_REG_CNT_AUX2_Q 0xd00cc
+/* [RW 28] The CM header value for QM request (primary). */
+#define CCM_REG_CQM_CCM_HDR_P 0xd008c
+/* [RW 28] The CM header value for QM request (secondary). */
+#define CCM_REG_CQM_CCM_HDR_S 0xd0090
+/* [RW 1] QM - CM Interface enable. If 0 - the valid input is disregarded;
+ acknowledge output is deasserted; all other signals are treated as usual;
+ if 1 - normal activity. */
+#define CCM_REG_CQM_CCM_IFEN 0xd0014
+/* [RW 6] QM output initial credit. Max credit available - 32. Write writes
+ the initial credit value; read returns the current value of the credit
+ counter. Must be initialized to 32 at start-up. */
+#define CCM_REG_CQM_INIT_CRD 0xd020c
+/* [RW 3] The weight of the QM (primary) input in the WRR mechanism. 0
+ stands for weight 8 (the most prioritised); 1 stands for weight 1(least
+ prioritised); 2 stands for weight 2; tc. */
+#define CCM_REG_CQM_P_WEIGHT 0xd00b8
+/* [RW 3] The weight of the QM (secondary) input in the WRR mechanism. 0
+ stands for weight 8 (the most prioritised); 1 stands for weight 1(least
+ prioritised); 2 stands for weight 2; tc. */
+#define CCM_REG_CQM_S_WEIGHT 0xd00bc
+/* [RW 1] Input SDM Interface enable. If 0 - the valid input is disregarded;
+ acknowledge output is deasserted; all other signals are treated as usual;
+ if 1 - normal activity. */
+#define CCM_REG_CSDM_IFEN 0xd0018
+/* [RC 1] Set when the message length mismatch (relative to last indication)
+ at the SDM interface is detected. */
+#define CCM_REG_CSDM_LENGTH_MIS 0xd0170
+/* [RW 3] The weight of the SDM input in the WRR mechanism. 0 stands for
+ weight 8 (the most prioritised); 1 stands for weight 1(least
+ prioritised); 2 stands for weight 2; tc. */
+#define CCM_REG_CSDM_WEIGHT 0xd00b4
+/* [RW 28] The CM header for QM formatting in case of an error in the QM
+ inputs. */
+#define CCM_REG_ERR_CCM_HDR 0xd0094
+/* [RW 8] The Event ID in case the input message ErrorFlg is set. */
+#define CCM_REG_ERR_EVNT_ID 0xd0098
+/* [RW 8] FIC0 output initial credit. Max credit available - 255. Write
+ writes the initial credit value; read returns the current value of the
+ credit counter. Must be initialized to 64 at start-up. */
+#define CCM_REG_FIC0_INIT_CRD 0xd0210
+/* [RW 8] FIC1 output initial credit. Max credit available - 255.Write
+ writes the initial credit value; read returns the current value of the
+ credit counter. Must be initialized to 64 at start-up. */
+#define CCM_REG_FIC1_INIT_CRD 0xd0214
+/* [RW 1] Arbitration between Input Arbiter groups: 0 - fair Round-Robin; 1
+ - strict priority defined by ~ccm_registers_gr_ag_pr.gr_ag_pr;
+ ~ccm_registers_gr_ld0_pr.gr_ld0_pr and
+ ~ccm_registers_gr_ld1_pr.gr_ld1_pr. Groups are according to channels and
+ outputs to STORM: aggregation; load FIC0; load FIC1 and store. */
+#define CCM_REG_GR_ARB_TYPE 0xd015c
+/* [RW 2] Load (FIC0) channel group priority. The lowest priority is 0; the
+ highest priority is 3. It is supposed; that the Store channel priority is
+ the complement to 4 of the rest priorities - Aggregation channel; Load
+ (FIC0) channel and Load (FIC1). */
+#define CCM_REG_GR_LD0_PR 0xd0164
+/* [RW 2] Load (FIC1) channel group priority. The lowest priority is 0; the
+ highest priority is 3. It is supposed; that the Store channel priority is
+ the complement to 4 of the rest priorities - Aggregation channel; Load
+ (FIC0) channel and Load (FIC1). */
+#define CCM_REG_GR_LD1_PR 0xd0168
+/* [RW 2] General flags index. */
+#define CCM_REG_INV_DONE_Q 0xd0108
+/* [RW 4] The number of double REG-pairs(128 bits); loaded from the STORM
+ context and sent to STORM; for a specific connection type. The double
+ REG-pairs are used in order to align to STORM context row size of 128
+ bits. The offset of these data in the STORM context is always 0. Index
+ _(0..15) stands for the connection type (one of 16). */
+#define CCM_REG_N_SM_CTX_LD_0 0xd004c
+#define CCM_REG_N_SM_CTX_LD_1 0xd0050
+#define CCM_REG_N_SM_CTX_LD_2 0xd0054
+#define CCM_REG_N_SM_CTX_LD_3 0xd0058
+#define CCM_REG_N_SM_CTX_LD_4 0xd005c
+/* [RW 1] Input pbf Interface enable. If 0 - the valid input is disregarded;
+ acknowledge output is deasserted; all other signals are treated as usual;
+ if 1 - normal activity. */
+#define CCM_REG_PBF_IFEN 0xd0028
+/* [RC 1] Set when the message length mismatch (relative to last indication)
+ at the pbf interface is detected. */
+#define CCM_REG_PBF_LENGTH_MIS 0xd0180
+/* [RW 3] The weight of the input pbf in the WRR mechanism. 0 stands for
+ weight 8 (the most prioritised); 1 stands for weight 1(least
+ prioritised); 2 stands for weight 2; tc. */
+#define CCM_REG_PBF_WEIGHT 0xd00ac
+#define CCM_REG_PHYS_QNUM1_0 0xd0134
+#define CCM_REG_PHYS_QNUM1_1 0xd0138
+#define CCM_REG_PHYS_QNUM2_0 0xd013c
+#define CCM_REG_PHYS_QNUM2_1 0xd0140
+#define CCM_REG_PHYS_QNUM3_0 0xd0144
+#define CCM_REG_PHYS_QNUM3_1 0xd0148
+#define CCM_REG_QOS_PHYS_QNUM0_0 0xd0114
+#define CCM_REG_QOS_PHYS_QNUM0_1 0xd0118
+#define CCM_REG_QOS_PHYS_QNUM1_0 0xd011c
+#define CCM_REG_QOS_PHYS_QNUM1_1 0xd0120
+#define CCM_REG_QOS_PHYS_QNUM2_0 0xd0124
+#define CCM_REG_QOS_PHYS_QNUM2_1 0xd0128
+#define CCM_REG_QOS_PHYS_QNUM3_0 0xd012c
+#define CCM_REG_QOS_PHYS_QNUM3_1 0xd0130
+/* [RW 1] STORM - CM Interface enable. If 0 - the valid input is
+ disregarded; acknowledge output is deasserted; all other signals are
+ treated as usual; if 1 - normal activity. */
+#define CCM_REG_STORM_CCM_IFEN 0xd0010
+/* [RC 1] Set when the message length mismatch (relative to last indication)
+ at the STORM interface is detected. */
+#define CCM_REG_STORM_LENGTH_MIS 0xd016c
+/* [RW 3] The weight of the STORM input in the WRR (Weighted Round robin)
+ mechanism. 0 stands for weight 8 (the most prioritised); 1 stands for
+ weight 1(least prioritised); 2 stands for weight 2 (more prioritised);
+ tc. */
+#define CCM_REG_STORM_WEIGHT 0xd009c
+/* [RW 1] Input tsem Interface enable. If 0 - the valid input is
+ disregarded; acknowledge output is deasserted; all other signals are
+ treated as usual; if 1 - normal activity. */
+#define CCM_REG_TSEM_IFEN 0xd001c
+/* [RC 1] Set when the message length mismatch (relative to last indication)
+ at the tsem interface is detected. */
+#define CCM_REG_TSEM_LENGTH_MIS 0xd0174
+/* [RW 3] The weight of the input tsem in the WRR mechanism. 0 stands for
+ weight 8 (the most prioritised); 1 stands for weight 1(least
+ prioritised); 2 stands for weight 2; tc. */
+#define CCM_REG_TSEM_WEIGHT 0xd00a0
+/* [RW 1] Input usem Interface enable. If 0 - the valid input is
+ disregarded; acknowledge output is deasserted; all other signals are
+ treated as usual; if 1 - normal activity. */
+#define CCM_REG_USEM_IFEN 0xd0024
+/* [RC 1] Set when message length mismatch (relative to last indication) at
+ the usem interface is detected. */
+#define CCM_REG_USEM_LENGTH_MIS 0xd017c
+/* [RW 3] The weight of the input usem in the WRR mechanism. 0 stands for
+ weight 8 (the most prioritised); 1 stands for weight 1(least
+ prioritised); 2 stands for weight 2; tc. */
+#define CCM_REG_USEM_WEIGHT 0xd00a8
+/* [RW 1] Input xsem Interface enable. If 0 - the valid input is
+ disregarded; acknowledge output is deasserted; all other signals are
+ treated as usual; if 1 - normal activity. */
+#define CCM_REG_XSEM_IFEN 0xd0020
+/* [RC 1] Set when the message length mismatch (relative to last indication)
+ at the xsem interface is detected. */
+#define CCM_REG_XSEM_LENGTH_MIS 0xd0178
+/* [RW 3] The weight of the input xsem in the WRR mechanism. 0 stands for
+ weight 8 (the most prioritised); 1 stands for weight 1(least
+ prioritised); 2 stands for weight 2; tc. */
+#define CCM_REG_XSEM_WEIGHT 0xd00a4
+/* [RW 19] Indirect access to the descriptor table of the XX protection
+ mechanism. The fields are: [5:0] - message length; [12:6] - message
+ pointer; 18:13] - next pointer. */
+#define CCM_REG_XX_DESCR_TABLE 0xd0300
+#define CCM_REG_XX_DESCR_TABLE_SIZE 24
+/* [R 7] Used to read the value of XX protection Free counter. */
+#define CCM_REG_XX_FREE 0xd0184
+/* [RW 6] Initial value for the credit counter; responsible for fulfilling
+ of the Input Stage XX protection buffer by the XX protection pending
+ messages. Max credit available - 127. Write writes the initial credit
+ value; read returns the current value of the credit counter. Must be
+ initialized to maximum XX protected message size - 2 at start-up. */
+#define CCM_REG_XX_INIT_CRD 0xd0220
+/* [RW 7] The maximum number of pending messages; which may be stored in XX
+ protection. At read the ~ccm_registers_xx_free.xx_free counter is read.
+ At write comprises the start value of the ~ccm_registers_xx_free.xx_free
+ counter. */
+#define CCM_REG_XX_MSG_NUM 0xd0224
+/* [RW 8] The Event ID; sent to the STORM in case of XX overflow. */
+#define CCM_REG_XX_OVFL_EVNT_ID 0xd0044
+/* [RW 18] Indirect access to the XX table of the XX protection mechanism.
+ The fields are: [5:0] - tail pointer; 11:6] - Link List size; 17:12] -
+ header pointer. */
+#define CCM_REG_XX_TABLE 0xd0280
+#define CDU_REG_CDU_CHK_MASK0 0x101000
+#define CDU_REG_CDU_CHK_MASK1 0x101004
+#define CDU_REG_CDU_CONTROL0 0x101008
+#define CDU_REG_CDU_DEBUG 0x101010
+#define CDU_REG_CDU_GLOBAL_PARAMS 0x101020
+/* [RW 7] Interrupt mask register #0 read/write */
+#define CDU_REG_CDU_INT_MASK 0x10103c
+/* [R 7] Interrupt register #0 read */
+#define CDU_REG_CDU_INT_STS 0x101030
+/* [RW 5] Parity mask register #0 read/write */
+#define CDU_REG_CDU_PRTY_MASK 0x10104c
+/* [R 5] Parity register #0 read */
+#define CDU_REG_CDU_PRTY_STS 0x101040
+/* [RC 5] Parity register #0 read clear */
+#define CDU_REG_CDU_PRTY_STS_CLR 0x101044
+/* [RC 32] logging of error data in case of a CDU load error:
+ {expected_cid[15:0]; xpected_type[2:0]; xpected_region[2:0]; ctive_error;
+ ype_error; ctual_active; ctual_compressed_context}; */
+#define CDU_REG_ERROR_DATA 0x101014
+/* [WB 216] L1TT ram access. each entry has the following format :
+ {mrege_regions[7:0]; ffset12[5:0]...offset0[5:0];
+ ength12[5:0]...length0[5:0]; d12[3:0]...id0[3:0]} */
+#define CDU_REG_L1TT 0x101800
+/* [WB 24] MATT ram access. each entry has the following
+ format:{RegionLength[11:0]; egionOffset[11:0]} */
+#define CDU_REG_MATT 0x101100
+/* [RW 1] when this bit is set the CDU operates in e1hmf mode */
+#define CDU_REG_MF_MODE 0x101050
+/* [R 1] indication the initializing the activity counter by the hardware
+ was done. */
+#define CFC_REG_AC_INIT_DONE 0x104078
+/* [RW 13] activity counter ram access */
+#define CFC_REG_ACTIVITY_COUNTER 0x104400
+#define CFC_REG_ACTIVITY_COUNTER_SIZE 256
+/* [R 1] indication the initializing the cams by the hardware was done. */
+#define CFC_REG_CAM_INIT_DONE 0x10407c
+/* [RW 2] Interrupt mask register #0 read/write */
+#define CFC_REG_CFC_INT_MASK 0x104108
+/* [R 2] Interrupt register #0 read */
+#define CFC_REG_CFC_INT_STS 0x1040fc
+/* [RC 2] Interrupt register #0 read clear */
+#define CFC_REG_CFC_INT_STS_CLR 0x104100
+/* [RW 4] Parity mask register #0 read/write */
+#define CFC_REG_CFC_PRTY_MASK 0x104118
+/* [R 4] Parity register #0 read */
+#define CFC_REG_CFC_PRTY_STS 0x10410c
+/* [RC 4] Parity register #0 read clear */
+#define CFC_REG_CFC_PRTY_STS_CLR 0x104110
+/* [RW 21] CID cam access (21:1 - Data; alid - 0) */
+#define CFC_REG_CID_CAM 0x104800
+#define CFC_REG_CONTROL0 0x104028
+#define CFC_REG_DEBUG0 0x104050
+/* [RW 14] indicates per error (in #cfc_registers_cfc_error_vector.cfc_error
+ vector) whether the cfc should be disabled upon it */
+#define CFC_REG_DISABLE_ON_ERROR 0x104044
+/* [RC 14] CFC error vector. when the CFC detects an internal error it will
+ set one of these bits. the bit description can be found in CFC
+ specifications */
+#define CFC_REG_ERROR_VECTOR 0x10403c
+/* [WB 93] LCID info ram access */
+#define CFC_REG_INFO_RAM 0x105000
+#define CFC_REG_INFO_RAM_SIZE 1024
+#define CFC_REG_INIT_REG 0x10404c
+#define CFC_REG_INTERFACES 0x104058
+/* [RW 24] {weight_load_client7[2:0] to weight_load_client0[2:0]}. this
+ field allows changing the priorities of the weighted-round-robin arbiter
+ which selects which CFC load client should be served next */
+#define CFC_REG_LCREQ_WEIGHTS 0x104084
+/* [RW 16] Link List ram access; data = {prev_lcid; ext_lcid} */
+#define CFC_REG_LINK_LIST 0x104c00
+#define CFC_REG_LINK_LIST_SIZE 256
+/* [R 1] indication the initializing the link list by the hardware was done. */
+#define CFC_REG_LL_INIT_DONE 0x104074
+/* [R 9] Number of allocated LCIDs which are at empty state */
+#define CFC_REG_NUM_LCIDS_ALLOC 0x104020
+/* [R 9] Number of Arriving LCIDs in Link List Block */
+#define CFC_REG_NUM_LCIDS_ARRIVING 0x104004
+#define CFC_REG_NUM_LCIDS_INSIDE_PF 0x104120
+/* [R 9] Number of Leaving LCIDs in Link List Block */
+#define CFC_REG_NUM_LCIDS_LEAVING 0x104018
+#define CFC_REG_WEAK_ENABLE_PF 0x104124
+/* [RW 8] The event id for aggregated interrupt 0 */
+#define CSDM_REG_AGG_INT_EVENT_0 0xc2038
+#define CSDM_REG_AGG_INT_EVENT_10 0xc2060
+#define CSDM_REG_AGG_INT_EVENT_11 0xc2064
+#define CSDM_REG_AGG_INT_EVENT_12 0xc2068
+#define CSDM_REG_AGG_INT_EVENT_13 0xc206c
+#define CSDM_REG_AGG_INT_EVENT_14 0xc2070
+#define CSDM_REG_AGG_INT_EVENT_15 0xc2074
+#define CSDM_REG_AGG_INT_EVENT_16 0xc2078
+#define CSDM_REG_AGG_INT_EVENT_2 0xc2040
+#define CSDM_REG_AGG_INT_EVENT_3 0xc2044
+#define CSDM_REG_AGG_INT_EVENT_4 0xc2048
+#define CSDM_REG_AGG_INT_EVENT_5 0xc204c
+#define CSDM_REG_AGG_INT_EVENT_6 0xc2050
+#define CSDM_REG_AGG_INT_EVENT_7 0xc2054
+#define CSDM_REG_AGG_INT_EVENT_8 0xc2058
+#define CSDM_REG_AGG_INT_EVENT_9 0xc205c
+/* [RW 1] For each aggregated interrupt index whether the mode is normal (0)
+ or auto-mask-mode (1) */
+#define CSDM_REG_AGG_INT_MODE_10 0xc21e0
+#define CSDM_REG_AGG_INT_MODE_11 0xc21e4
+#define CSDM_REG_AGG_INT_MODE_12 0xc21e8
+#define CSDM_REG_AGG_INT_MODE_13 0xc21ec
+#define CSDM_REG_AGG_INT_MODE_14 0xc21f0
+#define CSDM_REG_AGG_INT_MODE_15 0xc21f4
+#define CSDM_REG_AGG_INT_MODE_16 0xc21f8
+#define CSDM_REG_AGG_INT_MODE_6 0xc21d0
+#define CSDM_REG_AGG_INT_MODE_7 0xc21d4
+#define CSDM_REG_AGG_INT_MODE_8 0xc21d8
+#define CSDM_REG_AGG_INT_MODE_9 0xc21dc
+/* [RW 13] The start address in the internal RAM for the cfc_rsp lcid */
+#define CSDM_REG_CFC_RSP_START_ADDR 0xc2008
+/* [RW 16] The maximum value of the completion counter #0 */
+#define CSDM_REG_CMP_COUNTER_MAX0 0xc201c
+/* [RW 16] The maximum value of the completion counter #1 */
+#define CSDM_REG_CMP_COUNTER_MAX1 0xc2020
+/* [RW 16] The maximum value of the completion counter #2 */
+#define CSDM_REG_CMP_COUNTER_MAX2 0xc2024
+/* [RW 16] The maximum value of the completion counter #3 */
+#define CSDM_REG_CMP_COUNTER_MAX3 0xc2028
+/* [RW 13] The start address in the internal RAM for the completion
+ counters. */
+#define CSDM_REG_CMP_COUNTER_START_ADDR 0xc200c
+/* [RW 32] Interrupt mask register #0 read/write */
+#define CSDM_REG_CSDM_INT_MASK_0 0xc229c
+#define CSDM_REG_CSDM_INT_MASK_1 0xc22ac
+/* [R 32] Interrupt register #0 read */
+#define CSDM_REG_CSDM_INT_STS_0 0xc2290
+#define CSDM_REG_CSDM_INT_STS_1 0xc22a0
+/* [RW 11] Parity mask register #0 read/write */
+#define CSDM_REG_CSDM_PRTY_MASK 0xc22bc
+/* [R 11] Parity register #0 read */
+#define CSDM_REG_CSDM_PRTY_STS 0xc22b0
+/* [RC 11] Parity register #0 read clear */
+#define CSDM_REG_CSDM_PRTY_STS_CLR 0xc22b4
+#define CSDM_REG_ENABLE_IN1 0xc2238
+#define CSDM_REG_ENABLE_IN2 0xc223c
+#define CSDM_REG_ENABLE_OUT1 0xc2240
+#define CSDM_REG_ENABLE_OUT2 0xc2244
+/* [RW 4] The initial number of messages that can be sent to the pxp control
+ interface without receiving any ACK. */
+#define CSDM_REG_INIT_CREDIT_PXP_CTRL 0xc24bc
+/* [ST 32] The number of ACK after placement messages received */
+#define CSDM_REG_NUM_OF_ACK_AFTER_PLACE 0xc227c
+/* [ST 32] The number of packet end messages received from the parser */
+#define CSDM_REG_NUM_OF_PKT_END_MSG 0xc2274
+/* [ST 32] The number of requests received from the pxp async if */
+#define CSDM_REG_NUM_OF_PXP_ASYNC_REQ 0xc2278
+/* [ST 32] The number of commands received in queue 0 */
+#define CSDM_REG_NUM_OF_Q0_CMD 0xc2248
+/* [ST 32] The number of commands received in queue 10 */
+#define CSDM_REG_NUM_OF_Q10_CMD 0xc226c
+/* [ST 32] The number of commands received in queue 11 */
+#define CSDM_REG_NUM_OF_Q11_CMD 0xc2270
+/* [ST 32] The number of commands received in queue 1 */
+#define CSDM_REG_NUM_OF_Q1_CMD 0xc224c
+/* [ST 32] The number of commands received in queue 3 */
+#define CSDM_REG_NUM_OF_Q3_CMD 0xc2250
+/* [ST 32] The number of commands received in queue 4 */
+#define CSDM_REG_NUM_OF_Q4_CMD 0xc2254
+/* [ST 32] The number of commands received in queue 5 */
+#define CSDM_REG_NUM_OF_Q5_CMD 0xc2258
+/* [ST 32] The number of commands received in queue 6 */
+#define CSDM_REG_NUM_OF_Q6_CMD 0xc225c
+/* [ST 32] The number of commands received in queue 7 */
+#define CSDM_REG_NUM_OF_Q7_CMD 0xc2260
+/* [ST 32] The number of commands received in queue 8 */
+#define CSDM_REG_NUM_OF_Q8_CMD 0xc2264
+/* [ST 32] The number of commands received in queue 9 */
+#define CSDM_REG_NUM_OF_Q9_CMD 0xc2268
+/* [RW 13] The start address in the internal RAM for queue counters */
+#define CSDM_REG_Q_COUNTER_START_ADDR 0xc2010
+/* [R 1] pxp_ctrl rd_data fifo empty in sdm_dma_rsp block */
+#define CSDM_REG_RSP_PXP_CTRL_RDATA_EMPTY 0xc2548
+/* [R 1] parser fifo empty in sdm_sync block */
+#define CSDM_REG_SYNC_PARSER_EMPTY 0xc2550
+/* [R 1] parser serial fifo empty in sdm_sync block */
+#define CSDM_REG_SYNC_SYNC_EMPTY 0xc2558
+/* [RW 32] Tick for timer counter. Applicable only when
+ ~csdm_registers_timer_tick_enable.timer_tick_enable =1 */
+#define CSDM_REG_TIMER_TICK 0xc2000
+/* [RW 5] The number of time_slots in the arbitration cycle */
+#define CSEM_REG_ARB_CYCLE_SIZE 0x200034
+/* [RW 3] The source that is associated with arbitration element 0. Source
+ decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3-
+ sleeping thread with priority 1; 4- sleeping thread with priority 2 */
+#define CSEM_REG_ARB_ELEMENT0 0x200020
+/* [RW 3] The source that is associated with arbitration element 1. Source
+ decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3-
+ sleeping thread with priority 1; 4- sleeping thread with priority 2.
+ Could not be equal to register ~csem_registers_arb_element0.arb_element0 */
+#define CSEM_REG_ARB_ELEMENT1 0x200024
+/* [RW 3] The source that is associated with arbitration element 2. Source
+ decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3-
+ sleeping thread with priority 1; 4- sleeping thread with priority 2.
+ Could not be equal to register ~csem_registers_arb_element0.arb_element0
+ and ~csem_registers_arb_element1.arb_element1 */
+#define CSEM_REG_ARB_ELEMENT2 0x200028
+/* [RW 3] The source that is associated with arbitration element 3. Source
+ decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3-
+ sleeping thread with priority 1; 4- sleeping thread with priority 2.Could
+ not be equal to register ~csem_registers_arb_element0.arb_element0 and
+ ~csem_registers_arb_element1.arb_element1 and
+ ~csem_registers_arb_element2.arb_element2 */
+#define CSEM_REG_ARB_ELEMENT3 0x20002c
+/* [RW 3] The source that is associated with arbitration element 4. Source
+ decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3-
+ sleeping thread with priority 1; 4- sleeping thread with priority 2.
+ Could not be equal to register ~csem_registers_arb_element0.arb_element0
+ and ~csem_registers_arb_element1.arb_element1 and
+ ~csem_registers_arb_element2.arb_element2 and
+ ~csem_registers_arb_element3.arb_element3 */
+#define CSEM_REG_ARB_ELEMENT4 0x200030
+/* [RW 32] Interrupt mask register #0 read/write */
+#define CSEM_REG_CSEM_INT_MASK_0 0x200110
+#define CSEM_REG_CSEM_INT_MASK_1 0x200120
+/* [R 32] Interrupt register #0 read */
+#define CSEM_REG_CSEM_INT_STS_0 0x200104
+#define CSEM_REG_CSEM_INT_STS_1 0x200114
+/* [RW 32] Parity mask register #0 read/write */
+#define CSEM_REG_CSEM_PRTY_MASK_0 0x200130
+#define CSEM_REG_CSEM_PRTY_MASK_1 0x200140
+/* [R 32] Parity register #0 read */
+#define CSEM_REG_CSEM_PRTY_STS_0 0x200124
+#define CSEM_REG_CSEM_PRTY_STS_1 0x200134
+/* [RC 32] Parity register #0 read clear */
+#define CSEM_REG_CSEM_PRTY_STS_CLR_0 0x200128
+#define CSEM_REG_CSEM_PRTY_STS_CLR_1 0x200138
+#define CSEM_REG_ENABLE_IN 0x2000a4
+#define CSEM_REG_ENABLE_OUT 0x2000a8
+/* [RW 32] This address space contains all registers and memories that are
+ placed in SEM_FAST block. The SEM_FAST registers are described in
+ appendix B. In order to access the sem_fast registers the base address
+ ~fast_memory.fast_memory should be added to eachsem_fast register offset. */
+#define CSEM_REG_FAST_MEMORY 0x220000
+/* [RW 1] Disables input messages from FIC0 May be updated during run_time
+ by the microcode */
+#define CSEM_REG_FIC0_DISABLE 0x200224
+/* [RW 1] Disables input messages from FIC1 May be updated during run_time
+ by the microcode */
+#define CSEM_REG_FIC1_DISABLE 0x200234
+/* [RW 15] Interrupt table Read and write access to it is not possible in
+ the middle of the work */
+#define CSEM_REG_INT_TABLE 0x200400
+/* [ST 24] Statistics register. The number of messages that entered through
+ FIC0 */
+#define CSEM_REG_MSG_NUM_FIC0 0x200000
+/* [ST 24] Statistics register. The number of messages that entered through
+ FIC1 */
+#define CSEM_REG_MSG_NUM_FIC1 0x200004
+/* [ST 24] Statistics register. The number of messages that were sent to
+ FOC0 */
+#define CSEM_REG_MSG_NUM_FOC0 0x200008
+/* [ST 24] Statistics register. The number of messages that were sent to
+ FOC1 */
+#define CSEM_REG_MSG_NUM_FOC1 0x20000c
+/* [ST 24] Statistics register. The number of messages that were sent to
+ FOC2 */
+#define CSEM_REG_MSG_NUM_FOC2 0x200010
+/* [ST 24] Statistics register. The number of messages that were sent to
+ FOC3 */
+#define CSEM_REG_MSG_NUM_FOC3 0x200014
+/* [RW 1] Disables input messages from the passive buffer May be updated
+ during run_time by the microcode */
+#define CSEM_REG_PAS_DISABLE 0x20024c
+/* [WB 128] Debug only. Passive buffer memory */
+#define CSEM_REG_PASSIVE_BUFFER 0x202000
+/* [WB 46] pram memory. B45 is parity; b[44:0] - data. */
+#define CSEM_REG_PRAM 0x240000
+/* [R 16] Valid sleeping threads indication have bit per thread */
+#define CSEM_REG_SLEEP_THREADS_VALID 0x20026c
+/* [R 1] EXT_STORE FIFO is empty in sem_slow_ls_ext */
+#define CSEM_REG_SLOW_EXT_STORE_EMPTY 0x2002a0
+/* [RW 16] List of free threads . There is a bit per thread. */
+#define CSEM_REG_THREADS_LIST 0x2002e4
+/* [RW 3] The arbitration scheme of time_slot 0 */
+#define CSEM_REG_TS_0_AS 0x200038
+/* [RW 3] The arbitration scheme of time_slot 10 */
+#define CSEM_REG_TS_10_AS 0x200060
+/* [RW 3] The arbitration scheme of time_slot 11 */
+#define CSEM_REG_TS_11_AS 0x200064
+/* [RW 3] The arbitration scheme of time_slot 12 */
+#define CSEM_REG_TS_12_AS 0x200068
+/* [RW 3] The arbitration scheme of time_slot 13 */
+#define CSEM_REG_TS_13_AS 0x20006c
+/* [RW 3] The arbitration scheme of time_slot 14 */
+#define CSEM_REG_TS_14_AS 0x200070
+/* [RW 3] The arbitration scheme of time_slot 15 */
+#define CSEM_REG_TS_15_AS 0x200074
+/* [RW 3] The arbitration scheme of time_slot 16 */
+#define CSEM_REG_TS_16_AS 0x200078
+/* [RW 3] The arbitration scheme of time_slot 17 */
+#define CSEM_REG_TS_17_AS 0x20007c
+/* [RW 3] The arbitration scheme of time_slot 18 */
+#define CSEM_REG_TS_18_AS 0x200080
+/* [RW 3] The arbitration scheme of time_slot 1 */
+#define CSEM_REG_TS_1_AS 0x20003c
+/* [RW 3] The arbitration scheme of time_slot 2 */
+#define CSEM_REG_TS_2_AS 0x200040
+/* [RW 3] The arbitration scheme of time_slot 3 */
+#define CSEM_REG_TS_3_AS 0x200044
+/* [RW 3] The arbitration scheme of time_slot 4 */
+#define CSEM_REG_TS_4_AS 0x200048
+/* [RW 3] The arbitration scheme of time_slot 5 */
+#define CSEM_REG_TS_5_AS 0x20004c
+/* [RW 3] The arbitration scheme of time_slot 6 */
+#define CSEM_REG_TS_6_AS 0x200050
+/* [RW 3] The arbitration scheme of time_slot 7 */
+#define CSEM_REG_TS_7_AS 0x200054
+/* [RW 3] The arbitration scheme of time_slot 8 */
+#define CSEM_REG_TS_8_AS 0x200058
+/* [RW 3] The arbitration scheme of time_slot 9 */
+#define CSEM_REG_TS_9_AS 0x20005c
+/* [W 7] VF or PF ID for reset error bit. Values 0-63 reset error bit for 64
+ * VF; values 64-67 reset error for 4 PF; values 68-127 are not valid. */
+#define CSEM_REG_VFPF_ERR_NUM 0x200380
+/* [RW 1] Parity mask register #0 read/write */
+#define DBG_REG_DBG_PRTY_MASK 0xc0a8
+/* [R 1] Parity register #0 read */
+#define DBG_REG_DBG_PRTY_STS 0xc09c
+/* [RC 1] Parity register #0 read clear */
+#define DBG_REG_DBG_PRTY_STS_CLR 0xc0a0
+/* [RW 1] When set the DMAE will process the commands as in E1.5. 1.The
+ * function that is used is always SRC-PCI; 2.VF_Valid = 0; 3.VFID=0;
+ * 4.Completion function=0; 5.Error handling=0 */
+#define DMAE_REG_BACKWARD_COMP_EN 0x10207c
+/* [RW 32] Commands memory. The address to command X; row Y is to calculated
+ as 14*X+Y. */
+#define DMAE_REG_CMD_MEM 0x102400
+#define DMAE_REG_CMD_MEM_SIZE 224
+/* [RW 1] If 0 - the CRC-16c initial value is all zeroes; if 1 - the CRC-16c
+ initial value is all ones. */
+#define DMAE_REG_CRC16C_INIT 0x10201c
+/* [RW 1] If 0 - the CRC-16 T10 initial value is all zeroes; if 1 - the
+ CRC-16 T10 initial value is all ones. */
+#define DMAE_REG_CRC16T10_INIT 0x102020
+/* [RW 2] Interrupt mask register #0 read/write */
+#define DMAE_REG_DMAE_INT_MASK 0x102054
+/* [RW 4] Parity mask register #0 read/write */
+#define DMAE_REG_DMAE_PRTY_MASK 0x102064
+/* [R 4] Parity register #0 read */
+#define DMAE_REG_DMAE_PRTY_STS 0x102058
+/* [RC 4] Parity register #0 read clear */
+#define DMAE_REG_DMAE_PRTY_STS_CLR 0x10205c
+/* [RW 1] Command 0 go. */
+#define DMAE_REG_GO_C0 0x102080
+/* [RW 1] Command 1 go. */
+#define DMAE_REG_GO_C1 0x102084
+/* [RW 1] Command 10 go. */
+#define DMAE_REG_GO_C10 0x102088
+/* [RW 1] Command 11 go. */
+#define DMAE_REG_GO_C11 0x10208c
+/* [RW 1] Command 12 go. */
+#define DMAE_REG_GO_C12 0x102090
+/* [RW 1] Command 13 go. */
+#define DMAE_REG_GO_C13 0x102094
+/* [RW 1] Command 14 go. */
+#define DMAE_REG_GO_C14 0x102098
+/* [RW 1] Command 15 go. */
+#define DMAE_REG_GO_C15 0x10209c
+/* [RW 1] Command 2 go. */
+#define DMAE_REG_GO_C2 0x1020a0
+/* [RW 1] Command 3 go. */
+#define DMAE_REG_GO_C3 0x1020a4
+/* [RW 1] Command 4 go. */
+#define DMAE_REG_GO_C4 0x1020a8
+/* [RW 1] Command 5 go. */
+#define DMAE_REG_GO_C5 0x1020ac
+/* [RW 1] Command 6 go. */
+#define DMAE_REG_GO_C6 0x1020b0
+/* [RW 1] Command 7 go. */
+#define DMAE_REG_GO_C7 0x1020b4
+/* [RW 1] Command 8 go. */
+#define DMAE_REG_GO_C8 0x1020b8
+/* [RW 1] Command 9 go. */
+#define DMAE_REG_GO_C9 0x1020bc
+/* [RW 1] DMAE GRC Interface (Target; aster) enable. If 0 - the acknowledge
+ input is disregarded; valid is deasserted; all other signals are treated
+ as usual; if 1 - normal activity. */
+#define DMAE_REG_GRC_IFEN 0x102008
+/* [RW 1] DMAE PCI Interface (Request; ead; rite) enable. If 0 - the
+ acknowledge input is disregarded; valid is deasserted; full is asserted;
+ all other signals are treated as usual; if 1 - normal activity. */
+#define DMAE_REG_PCI_IFEN 0x102004
+/* [RW 4] DMAE- PCI Request Interface initial credit. Write writes the
+ initial value to the credit counter; related to the address. Read returns
+ the current value of the counter. */
+#define DMAE_REG_PXP_REQ_INIT_CRD 0x1020c0
+/* [RW 8] Aggregation command. */
+#define DORQ_REG_AGG_CMD0 0x170060
+/* [RW 8] Aggregation command. */
+#define DORQ_REG_AGG_CMD1 0x170064
+/* [RW 8] Aggregation command. */
+#define DORQ_REG_AGG_CMD2 0x170068
+/* [RW 8] Aggregation command. */
+#define DORQ_REG_AGG_CMD3 0x17006c
+/* [RW 28] UCM Header. */
+#define DORQ_REG_CMHEAD_RX 0x170050
+/* [RW 32] Doorbell address for RBC doorbells (function 0). */
+#define DORQ_REG_DB_ADDR0 0x17008c
+/* [RW 5] Interrupt mask register #0 read/write */
+#define DORQ_REG_DORQ_INT_MASK 0x170180
+/* [R 5] Interrupt register #0 read */
+#define DORQ_REG_DORQ_INT_STS 0x170174
+/* [RC 5] Interrupt register #0 read clear */
+#define DORQ_REG_DORQ_INT_STS_CLR 0x170178
+/* [RW 2] Parity mask register #0 read/write */
+#define DORQ_REG_DORQ_PRTY_MASK 0x170190
+/* [R 2] Parity register #0 read */
+#define DORQ_REG_DORQ_PRTY_STS 0x170184
+/* [RC 2] Parity register #0 read clear */
+#define DORQ_REG_DORQ_PRTY_STS_CLR 0x170188
+/* [RW 8] The address to write the DPM CID to STORM. */
+#define DORQ_REG_DPM_CID_ADDR 0x170044
+/* [RW 5] The DPM mode CID extraction offset. */
+#define DORQ_REG_DPM_CID_OFST 0x170030
+/* [RW 12] The threshold of the DQ FIFO to send the almost full interrupt. */
+#define DORQ_REG_DQ_FIFO_AFULL_TH 0x17007c
+/* [RW 12] The threshold of the DQ FIFO to send the full interrupt. */
+#define DORQ_REG_DQ_FIFO_FULL_TH 0x170078
+/* [R 13] Current value of the DQ FIFO fill level according to following
+ pointer. The range is 0 - 256 FIFO rows; where each row stands for the
+ doorbell. */
+#define DORQ_REG_DQ_FILL_LVLF 0x1700a4
+/* [R 1] DQ FIFO full status. Is set; when FIFO filling level is more or
+ equal to full threshold; reset on full clear. */
+#define DORQ_REG_DQ_FULL_ST 0x1700c0
+/* [RW 28] The value sent to CM header in the case of CFC load error. */
+#define DORQ_REG_ERR_CMHEAD 0x170058
+#define DORQ_REG_IF_EN 0x170004
+#define DORQ_REG_MAX_RVFID_SIZE 0x1701ec
+#define DORQ_REG_MODE_ACT 0x170008
+/* [RW 5] The normal mode CID extraction offset. */
+#define DORQ_REG_NORM_CID_OFST 0x17002c
+/* [RW 28] TCM Header when only TCP context is loaded. */
+#define DORQ_REG_NORM_CMHEAD_TX 0x17004c
+/* [RW 3] The number of simultaneous outstanding requests to Context Fetch
+ Interface. */
+#define DORQ_REG_OUTST_REQ 0x17003c
+#define DORQ_REG_PF_USAGE_CNT 0x1701d0
+#define DORQ_REG_REGN 0x170038
+/* [R 4] Current value of response A counter credit. Initial credit is
+ configured through write to ~dorq_registers_rsp_init_crd.rsp_init_crd
+ register. */
+#define DORQ_REG_RSPA_CRD_CNT 0x1700ac
+/* [R 4] Current value of response B counter credit. Initial credit is
+ configured through write to ~dorq_registers_rsp_init_crd.rsp_init_crd
+ register. */
+#define DORQ_REG_RSPB_CRD_CNT 0x1700b0
+/* [RW 4] The initial credit at the Doorbell Response Interface. The write
+ writes the same initial credit to the rspa_crd_cnt and rspb_crd_cnt. The
+ read reads this written value. */
+#define DORQ_REG_RSP_INIT_CRD 0x170048
+#define DORQ_REG_RSPB_CRD_CNT 0x1700b0
+#define DORQ_REG_VF_NORM_CID_BASE 0x1701a0
+#define DORQ_REG_VF_NORM_CID_OFST 0x1701f4
+#define DORQ_REG_VF_NORM_CID_WND_SIZE 0x1701a4
+#define DORQ_REG_VF_NORM_MAX_CID_COUNT 0x1701e4
+#define DORQ_REG_VF_NORM_VF_BASE 0x1701a8
+/* [RW 10] VF type validation mask value */
+#define DORQ_REG_VF_TYPE_MASK_0 0x170218
+/* [RW 17] VF type validation Min MCID value */
+#define DORQ_REG_VF_TYPE_MAX_MCID_0 0x1702d8
+/* [RW 17] VF type validation Max MCID value */
+#define DORQ_REG_VF_TYPE_MIN_MCID_0 0x170298
+/* [RW 10] VF type validation comp value */
+#define DORQ_REG_VF_TYPE_VALUE_0 0x170258
+#define DORQ_REG_VF_USAGE_CT_LIMIT 0x170340
+
+/* [RW 4] Initial activity counter value on the load request; when the
+ shortcut is done. */
+#define DORQ_REG_SHRT_ACT_CNT 0x170070
+/* [RW 28] TCM Header when both ULP and TCP context is loaded. */
+#define DORQ_REG_SHRT_CMHEAD 0x170054
+#define HC_CONFIG_0_REG_ATTN_BIT_EN_0 (0x1<<4)
+#define HC_CONFIG_0_REG_BLOCK_DISABLE_0 (0x1<<0)
+#define HC_CONFIG_0_REG_INT_LINE_EN_0 (0x1<<3)
+#define HC_CONFIG_0_REG_MSI_ATTN_EN_0 (0x1<<7)
+#define HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 (0x1<<2)
+#define HC_CONFIG_0_REG_SINGLE_ISR_EN_0 (0x1<<1)
+#define HC_CONFIG_1_REG_BLOCK_DISABLE_1 (0x1<<0)
+#define DORQ_REG_VF_USAGE_CNT 0x170320
+#define HC_REG_AGG_INT_0 0x108050
+#define HC_REG_AGG_INT_1 0x108054
+#define HC_REG_ATTN_BIT 0x108120
+#define HC_REG_ATTN_IDX 0x108100
+#define HC_REG_ATTN_MSG0_ADDR_L 0x108018
+#define HC_REG_ATTN_MSG1_ADDR_L 0x108020
+#define HC_REG_ATTN_NUM_P0 0x108038
+#define HC_REG_ATTN_NUM_P1 0x10803c
+#define HC_REG_COMMAND_REG 0x108180
+#define HC_REG_CONFIG_0 0x108000
+#define HC_REG_CONFIG_1 0x108004
+#define HC_REG_FUNC_NUM_P0 0x1080ac
+#define HC_REG_FUNC_NUM_P1 0x1080b0
+/* [RW 3] Parity mask register #0 read/write */
+#define HC_REG_HC_PRTY_MASK 0x1080a0
+/* [R 3] Parity register #0 read */
+#define HC_REG_HC_PRTY_STS 0x108094
+/* [RC 3] Parity register #0 read clear */
+#define HC_REG_HC_PRTY_STS_CLR 0x108098
+#define HC_REG_INT_MASK 0x108108
+#define HC_REG_LEADING_EDGE_0 0x108040
+#define HC_REG_LEADING_EDGE_1 0x108048
+#define HC_REG_MAIN_MEMORY 0x108800
+#define HC_REG_MAIN_MEMORY_SIZE 152
+#define HC_REG_P0_PROD_CONS 0x108200
+#define HC_REG_P1_PROD_CONS 0x108400
+#define HC_REG_PBA_COMMAND 0x108140
+#define HC_REG_PCI_CONFIG_0 0x108010
+#define HC_REG_PCI_CONFIG_1 0x108014
+#define HC_REG_STATISTIC_COUNTERS 0x109000
+#define HC_REG_TRAILING_EDGE_0 0x108044
+#define HC_REG_TRAILING_EDGE_1 0x10804c
+#define HC_REG_UC_RAM_ADDR_0 0x108028
+#define HC_REG_UC_RAM_ADDR_1 0x108030
+#define HC_REG_USTORM_ADDR_FOR_COALESCE 0x108068
+#define HC_REG_VQID_0 0x108008
+#define HC_REG_VQID_1 0x10800c
+#define IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN (0x1<<1)
+#define IGU_BLOCK_CONFIGURATION_REG_BLOCK_ENABLE (0x1<<0)
+#define IGU_REG_ATTENTION_ACK_BITS 0x130108
+/* [R 4] Debug: attn_fsm */
+#define IGU_REG_ATTN_FSM 0x130054
+#define IGU_REG_ATTN_MSG_ADDR_H 0x13011c
+#define IGU_REG_ATTN_MSG_ADDR_L 0x130120
+/* [R 4] Debug: [3] - attention write done message is pending (0-no pending;
+ * 1-pending). [2:0] = PFID. Pending means attention message was sent; but
+ * write done didn't receive. */
+#define IGU_REG_ATTN_WRITE_DONE_PENDING 0x130030
+#define IGU_REG_BLOCK_CONFIGURATION 0x130000
+#define IGU_REG_COMMAND_REG_32LSB_DATA 0x130124
+#define IGU_REG_COMMAND_REG_CTRL 0x13012c
+/* [WB_R 32] Cleanup bit status per SB. 1 = cleanup is set. 0 = cleanup bit
+ * is clear. The bits in this registers are set and clear via the producer
+ * command. Data valid only in addresses 0-4. all the rest are zero. */
+#define IGU_REG_CSTORM_TYPE_0_SB_CLEANUP 0x130200
+/* [R 5] Debug: ctrl_fsm */
+#define IGU_REG_CTRL_FSM 0x130064
+/* [R 1] data available for error memory. If this bit is clear do not red
+ * from error_handling_memory. */
+#define IGU_REG_ERROR_HANDLING_DATA_VALID 0x130130
+/* [RW 11] Parity mask register #0 read/write */
+#define IGU_REG_IGU_PRTY_MASK 0x1300a8
+/* [R 11] Parity register #0 read */
+#define IGU_REG_IGU_PRTY_STS 0x13009c
+/* [RC 11] Parity register #0 read clear */
+#define IGU_REG_IGU_PRTY_STS_CLR 0x1300a0
+/* [R 4] Debug: int_handle_fsm */
+#define IGU_REG_INT_HANDLE_FSM 0x130050
+#define IGU_REG_LEADING_EDGE_LATCH 0x130134
+/* [RW 14] mapping CAM; relevant for E2 operating mode only. [0] - valid.
+ * [6:1] - vector number; [13:7] - FID (if VF - [13] = 0; [12:7] = VF
+ * number; if PF - [13] = 1; [12:10] = 0; [9:7] = PF number); */
+#define IGU_REG_MAPPING_MEMORY 0x131000
+#define IGU_REG_MAPPING_MEMORY_SIZE 136
+#define IGU_REG_PBA_STATUS_LSB 0x130138
+#define IGU_REG_PBA_STATUS_MSB 0x13013c
+#define IGU_REG_PCI_PF_MSI_EN 0x130140
+#define IGU_REG_PCI_PF_MSIX_EN 0x130144
+#define IGU_REG_PCI_PF_MSIX_FUNC_MASK 0x130148
+/* [WB_R 32] Each bit represent the pending bits status for that SB. 0 = no
+ * pending; 1 = pending. Pendings means interrupt was asserted; and write
+ * done was not received. Data valid only in addresses 0-4. all the rest are
+ * zero. */
+#define IGU_REG_PENDING_BITS_STATUS 0x130300
+#define IGU_REG_PF_CONFIGURATION 0x130154
+/* [RW 20] producers only. E2 mode: address 0-135 match to the mapping
+ * memory; 136 - PF0 default prod; 137 PF1 default prod; 138 - PF2 default
+ * prod; 139 PF3 default prod; 140 - PF0 - ATTN prod; 141 - PF1 - ATTN prod;
+ * 142 - PF2 - ATTN prod; 143 - PF3 - ATTN prod; 144-147 reserved. E1.5 mode
+ * - In backward compatible mode; for non default SB; each even line in the
+ * memory holds the U producer and each odd line hold the C producer. The
+ * first 128 producer are for NDSB (PF0 - 0-31; PF1 - 32-63 and so on). The
+ * last 20 producers are for the DSB for each PF. each PF has five segments
+ * (the order inside each segment is PF0; PF1; PF2; PF3) - 128-131 U prods;
+ * 132-135 C prods; 136-139 X prods; 140-143 T prods; 144-147 ATTN prods; */
+#define IGU_REG_PROD_CONS_MEMORY 0x132000
+/* [R 3] Debug: pxp_arb_fsm */
+#define IGU_REG_PXP_ARB_FSM 0x130068
+/* [RW 6] Write one for each bit will reset the appropriate memory. When the
+ * memory reset finished the appropriate bit will be clear. Bit 0 - mapping
+ * memory; Bit 1 - SB memory; Bit 2 - SB interrupt and mask register; Bit 3
+ * - MSIX memory; Bit 4 - PBA memory; Bit 5 - statistics; */
+#define IGU_REG_RESET_MEMORIES 0x130158
+/* [R 4] Debug: sb_ctrl_fsm */
+#define IGU_REG_SB_CTRL_FSM 0x13004c
+#define IGU_REG_SB_INT_BEFORE_MASK_LSB 0x13015c
+#define IGU_REG_SB_INT_BEFORE_MASK_MSB 0x130160
+#define IGU_REG_SB_MASK_LSB 0x130164
+#define IGU_REG_SB_MASK_MSB 0x130168
+/* [RW 16] Number of command that were dropped without causing an interrupt
+ * due to: read access for WO BAR address; or write access for RO BAR
+ * address or any access for reserved address or PCI function error is set
+ * and address is not MSIX; PBA or cleanup */
+#define IGU_REG_SILENT_DROP 0x13016c
+/* [RW 10] Number of MSI/MSIX/ATTN messages sent for the function: 0-63 -
+ * number of MSIX messages per VF; 64-67 - number of MSI/MSIX messages per
+ * PF; 68-71 number of ATTN messages per PF */
+#define IGU_REG_STATISTIC_NUM_MESSAGE_SENT 0x130800
+/* [RW 32] Number of cycles the timer mask masking the IGU interrupt when a
+ * timer mask command arrives. Value must be bigger than 100. */
+#define IGU_REG_TIMER_MASKING_VALUE 0x13003c
+#define IGU_REG_TRAILING_EDGE_LATCH 0x130104
+#define IGU_REG_VF_CONFIGURATION 0x130170
+/* [WB_R 32] Each bit represent write done pending bits status for that SB
+ * (MSI/MSIX message was sent and write done was not received yet). 0 =
+ * clear; 1 = set. Data valid only in addresses 0-4. all the rest are zero. */
+#define IGU_REG_WRITE_DONE_PENDING 0x130480
+#define MCP_A_REG_MCPR_SCRATCH 0x3a0000
+#define MCP_REG_MCPR_ACCESS_LOCK 0x8009c
+#define MCP_REG_MCPR_CPU_PROGRAM_COUNTER 0x8501c
+#define MCP_REG_MCPR_GP_INPUTS 0x800c0
+#define MCP_REG_MCPR_GP_OENABLE 0x800c8
+#define MCP_REG_MCPR_GP_OUTPUTS 0x800c4
+#define MCP_REG_MCPR_IMC_COMMAND 0x85900
+#define MCP_REG_MCPR_IMC_DATAREG0 0x85920
+#define MCP_REG_MCPR_IMC_SLAVE_CONTROL 0x85904
+#define MCP_REG_MCPR_CPU_PROGRAM_COUNTER 0x8501c
+#define MCP_REG_MCPR_NVM_ACCESS_ENABLE 0x86424
+#define MCP_REG_MCPR_NVM_ADDR 0x8640c
+#define MCP_REG_MCPR_NVM_CFG4 0x8642c
+#define MCP_REG_MCPR_NVM_COMMAND 0x86400
+#define MCP_REG_MCPR_NVM_READ 0x86410
+#define MCP_REG_MCPR_NVM_SW_ARB 0x86420
+#define MCP_REG_MCPR_NVM_WRITE 0x86408
+#define MCP_REG_MCPR_SCRATCH 0xa0000
+#define MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK (0x1<<1)
+#define MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK (0x1<<0)
+/* [R 32] read first 32 bit after inversion of function 0. mapped as
+ follows: [0] NIG attention for function0; [1] NIG attention for
+ function1; [2] GPIO1 mcp; [3] GPIO2 mcp; [4] GPIO3 mcp; [5] GPIO4 mcp;
+ [6] GPIO1 function 1; [7] GPIO2 function 1; [8] GPIO3 function 1; [9]
+ GPIO4 function 1; [10] PCIE glue/PXP VPD event function0; [11] PCIE
+ glue/PXP VPD event function1; [12] PCIE glue/PXP Expansion ROM event0;
+ [13] PCIE glue/PXP Expansion ROM event1; [14] SPIO4; [15] SPIO5; [16]
+ MSI/X indication for mcp; [17] MSI/X indication for function 1; [18] BRB
+ Parity error; [19] BRB Hw interrupt; [20] PRS Parity error; [21] PRS Hw
+ interrupt; [22] SRC Parity error; [23] SRC Hw interrupt; [24] TSDM Parity
+ error; [25] TSDM Hw interrupt; [26] TCM Parity error; [27] TCM Hw
+ interrupt; [28] TSEMI Parity error; [29] TSEMI Hw interrupt; [30] PBF
+ Parity error; [31] PBF Hw interrupt; */
+#define MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 0xa42c
+#define MISC_REG_AEU_AFTER_INVERT_1_FUNC_1 0xa430
+/* [R 32] read first 32 bit after inversion of mcp. mapped as follows: [0]
+ NIG attention for function0; [1] NIG attention for function1; [2] GPIO1
+ mcp; [3] GPIO2 mcp; [4] GPIO3 mcp; [5] GPIO4 mcp; [6] GPIO1 function 1;
+ [7] GPIO2 function 1; [8] GPIO3 function 1; [9] GPIO4 function 1; [10]
+ PCIE glue/PXP VPD event function0; [11] PCIE glue/PXP VPD event
+ function1; [12] PCIE glue/PXP Expansion ROM event0; [13] PCIE glue/PXP
+ Expansion ROM event1; [14] SPIO4; [15] SPIO5; [16] MSI/X indication for
+ mcp; [17] MSI/X indication for function 1; [18] BRB Parity error; [19]
+ BRB Hw interrupt; [20] PRS Parity error; [21] PRS Hw interrupt; [22] SRC
+ Parity error; [23] SRC Hw interrupt; [24] TSDM Parity error; [25] TSDM Hw
+ interrupt; [26] TCM Parity error; [27] TCM Hw interrupt; [28] TSEMI
+ Parity error; [29] TSEMI Hw interrupt; [30] PBF Parity error; [31] PBF Hw
+ interrupt; */
+#define MISC_REG_AEU_AFTER_INVERT_1_MCP 0xa434
+/* [R 32] read second 32 bit after inversion of function 0. mapped as
+ follows: [0] PBClient Parity error; [1] PBClient Hw interrupt; [2] QM
+ Parity error; [3] QM Hw interrupt; [4] Timers Parity error; [5] Timers Hw
+ interrupt; [6] XSDM Parity error; [7] XSDM Hw interrupt; [8] XCM Parity
+ error; [9] XCM Hw interrupt; [10] XSEMI Parity error; [11] XSEMI Hw
+ interrupt; [12] DoorbellQ Parity error; [13] DoorbellQ Hw interrupt; [14]
+ NIG Parity error; [15] NIG Hw interrupt; [16] Vaux PCI core Parity error;
+ [17] Vaux PCI core Hw interrupt; [18] Debug Parity error; [19] Debug Hw
+ interrupt; [20] USDM Parity error; [21] USDM Hw interrupt; [22] UCM
+ Parity error; [23] UCM Hw interrupt; [24] USEMI Parity error; [25] USEMI
+ Hw interrupt; [26] UPB Parity error; [27] UPB Hw interrupt; [28] CSDM
+ Parity error; [29] CSDM Hw interrupt; [30] CCM Parity error; [31] CCM Hw
+ interrupt; */
+#define MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 0xa438
+#define MISC_REG_AEU_AFTER_INVERT_2_FUNC_1 0xa43c
+/* [R 32] read second 32 bit after inversion of mcp. mapped as follows: [0]
+ PBClient Parity error; [1] PBClient Hw interrupt; [2] QM Parity error;
+ [3] QM Hw interrupt; [4] Timers Parity error; [5] Timers Hw interrupt;
+ [6] XSDM Parity error; [7] XSDM Hw interrupt; [8] XCM Parity error; [9]
+ XCM Hw interrupt; [10] XSEMI Parity error; [11] XSEMI Hw interrupt; [12]
+ DoorbellQ Parity error; [13] DoorbellQ Hw interrupt; [14] NIG Parity
+ error; [15] NIG Hw interrupt; [16] Vaux PCI core Parity error; [17] Vaux
+ PCI core Hw interrupt; [18] Debug Parity error; [19] Debug Hw interrupt;
+ [20] USDM Parity error; [21] USDM Hw interrupt; [22] UCM Parity error;
+ [23] UCM Hw interrupt; [24] USEMI Parity error; [25] USEMI Hw interrupt;
+ [26] UPB Parity error; [27] UPB Hw interrupt; [28] CSDM Parity error;
+ [29] CSDM Hw interrupt; [30] CCM Parity error; [31] CCM Hw interrupt; */
+#define MISC_REG_AEU_AFTER_INVERT_2_MCP 0xa440
+/* [R 32] read third 32 bit after inversion of function 0. mapped as
+ follows: [0] CSEMI Parity error; [1] CSEMI Hw interrupt; [2] PXP Parity
+ error; [3] PXP Hw interrupt; [4] PXPpciClockClient Parity error; [5]
+ PXPpciClockClient Hw interrupt; [6] CFC Parity error; [7] CFC Hw
+ interrupt; [8] CDU Parity error; [9] CDU Hw interrupt; [10] DMAE Parity
+ error; [11] DMAE Hw interrupt; [12] IGU (HC) Parity error; [13] IGU (HC)
+ Hw interrupt; [14] MISC Parity error; [15] MISC Hw interrupt; [16]
+ pxp_misc_mps_attn; [17] Flash event; [18] SMB event; [19] MCP attn0; [20]
+ MCP attn1; [21] SW timers attn_1 func0; [22] SW timers attn_2 func0; [23]
+ SW timers attn_3 func0; [24] SW timers attn_4 func0; [25] PERST; [26] SW
+ timers attn_1 func1; [27] SW timers attn_2 func1; [28] SW timers attn_3
+ func1; [29] SW timers attn_4 func1; [30] General attn0; [31] General
+ attn1; */
+#define MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 0xa444
+#define MISC_REG_AEU_AFTER_INVERT_3_FUNC_1 0xa448
+/* [R 32] read third 32 bit after inversion of mcp. mapped as follows: [0]
+ CSEMI Parity error; [1] CSEMI Hw interrupt; [2] PXP Parity error; [3] PXP
+ Hw interrupt; [4] PXPpciClockClient Parity error; [5] PXPpciClockClient
+ Hw interrupt; [6] CFC Parity error; [7] CFC Hw interrupt; [8] CDU Parity
+ error; [9] CDU Hw interrupt; [10] DMAE Parity error; [11] DMAE Hw
+ interrupt; [12] IGU (HC) Parity error; [13] IGU (HC) Hw interrupt; [14]
+ MISC Parity error; [15] MISC Hw interrupt; [16] pxp_misc_mps_attn; [17]
+ Flash event; [18] SMB event; [19] MCP attn0; [20] MCP attn1; [21] SW
+ timers attn_1 func0; [22] SW timers attn_2 func0; [23] SW timers attn_3
+ func0; [24] SW timers attn_4 func0; [25] PERST; [26] SW timers attn_1
+ func1; [27] SW timers attn_2 func1; [28] SW timers attn_3 func1; [29] SW
+ timers attn_4 func1; [30] General attn0; [31] General attn1; */
+#define MISC_REG_AEU_AFTER_INVERT_3_MCP 0xa44c
+/* [R 32] read fourth 32 bit after inversion of function 0. mapped as
+ follows: [0] General attn2; [1] General attn3; [2] General attn4; [3]
+ General attn5; [4] General attn6; [5] General attn7; [6] General attn8;
+ [7] General attn9; [8] General attn10; [9] General attn11; [10] General
+ attn12; [11] General attn13; [12] General attn14; [13] General attn15;
+ [14] General attn16; [15] General attn17; [16] General attn18; [17]
+ General attn19; [18] General attn20; [19] General attn21; [20] Main power
+ interrupt; [21] RBCR Latched attn; [22] RBCT Latched attn; [23] RBCN
+ Latched attn; [24] RBCU Latched attn; [25] RBCP Latched attn; [26] GRC
+ Latched timeout attention; [27] GRC Latched reserved access attention;
+ [28] MCP Latched rom_parity; [29] MCP Latched ump_rx_parity; [30] MCP
+ Latched ump_tx_parity; [31] MCP Latched scpad_parity; */
+#define MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 0xa450
+#define MISC_REG_AEU_AFTER_INVERT_4_FUNC_1 0xa454
+/* [R 32] read fourth 32 bit after inversion of mcp. mapped as follows: [0]
+ General attn2; [1] General attn3; [2] General attn4; [3] General attn5;
+ [4] General attn6; [5] General attn7; [6] General attn8; [7] General
+ attn9; [8] General attn10; [9] General attn11; [10] General attn12; [11]
+ General attn13; [12] General attn14; [13] General attn15; [14] General
+ attn16; [15] General attn17; [16] General attn18; [17] General attn19;
+ [18] General attn20; [19] General attn21; [20] Main power interrupt; [21]
+ RBCR Latched attn; [22] RBCT Latched attn; [23] RBCN Latched attn; [24]
+ RBCU Latched attn; [25] RBCP Latched attn; [26] GRC Latched timeout
+ attention; [27] GRC Latched reserved access attention; [28] MCP Latched
+ rom_parity; [29] MCP Latched ump_rx_parity; [30] MCP Latched
+ ump_tx_parity; [31] MCP Latched scpad_parity; */
+#define MISC_REG_AEU_AFTER_INVERT_4_MCP 0xa458
+/* [R 32] Read fifth 32 bit after inversion of function 0. Mapped as
+ * follows: [0] PGLUE config_space; [1] PGLUE misc_flr; [2] PGLUE B RBC
+ * attention [3] PGLUE B RBC parity; [4] ATC attention; [5] ATC parity; [6]
+ * CNIG attention (reserved); [7] CNIG parity (reserved); [31-8] Reserved; */
+#define MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 0xa700
+/* [W 14] write to this register results with the clear of the latched
+ signals; one in d0 clears RBCR latch; one in d1 clears RBCT latch; one in
+ d2 clears RBCN latch; one in d3 clears RBCU latch; one in d4 clears RBCP
+ latch; one in d5 clears GRC Latched timeout attention; one in d6 clears
+ GRC Latched reserved access attention; one in d7 clears Latched
+ rom_parity; one in d8 clears Latched ump_rx_parity; one in d9 clears
+ Latched ump_tx_parity; one in d10 clears Latched scpad_parity (both
+ ports); one in d11 clears pxpv_misc_mps_attn; one in d12 clears
+ pxp_misc_exp_rom_attn0; one in d13 clears pxp_misc_exp_rom_attn1; read
+ from this register return zero */
+#define MISC_REG_AEU_CLR_LATCH_SIGNAL 0xa45c
+/* [RW 32] first 32b for enabling the output for function 0 output0. mapped
+ as follows: [0] NIG attention for function0; [1] NIG attention for
+ function1; [2] GPIO1 function 0; [3] GPIO2 function 0; [4] GPIO3 function
+ 0; [5] GPIO4 function 0; [6] GPIO1 function 1; [7] GPIO2 function 1; [8]
+ GPIO3 function 1; [9] GPIO4 function 1; [10] PCIE glue/PXP VPD event
+ function0; [11] PCIE glue/PXP VPD event function1; [12] PCIE glue/PXP
+ Expansion ROM event0; [13] PCIE glue/PXP Expansion ROM event1; [14]
+ SPIO4; [15] SPIO5; [16] MSI/X indication for function 0; [17] MSI/X
+ indication for function 1; [18] BRB Parity error; [19] BRB Hw interrupt;
+ [20] PRS Parity error; [21] PRS Hw interrupt; [22] SRC Parity error; [23]
+ SRC Hw interrupt; [24] TSDM Parity error; [25] TSDM Hw interrupt; [26]
+ TCM Parity error; [27] TCM Hw interrupt; [28] TSEMI Parity error; [29]
+ TSEMI Hw interrupt; [30] PBF Parity error; [31] PBF Hw interrupt; */
+#define MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0 0xa06c
+#define MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1 0xa07c
+#define MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2 0xa08c
+#define MISC_REG_AEU_ENABLE1_FUNC_0_OUT_3 0xa09c
+#define MISC_REG_AEU_ENABLE1_FUNC_0_OUT_5 0xa0bc
+#define MISC_REG_AEU_ENABLE1_FUNC_0_OUT_6 0xa0cc
+#define MISC_REG_AEU_ENABLE1_FUNC_0_OUT_7 0xa0dc
+/* [RW 32] first 32b for enabling the output for function 1 output0. mapped
+ as follows: [0] NIG attention for function0; [1] NIG attention for
+ function1; [2] GPIO1 function 1; [3] GPIO2 function 1; [4] GPIO3 function
+ 1; [5] GPIO4 function 1; [6] GPIO1 function 1; [7] GPIO2 function 1; [8]
+ GPIO3 function 1; [9] GPIO4 function 1; [10] PCIE glue/PXP VPD event
+ function0; [11] PCIE glue/PXP VPD event function1; [12] PCIE glue/PXP
+ Expansion ROM event0; [13] PCIE glue/PXP Expansion ROM event1; [14]
+ SPIO4; [15] SPIO5; [16] MSI/X indication for function 1; [17] MSI/X
+ indication for function 1; [18] BRB Parity error; [19] BRB Hw interrupt;
+ [20] PRS Parity error; [21] PRS Hw interrupt; [22] SRC Parity error; [23]
+ SRC Hw interrupt; [24] TSDM Parity error; [25] TSDM Hw interrupt; [26]
+ TCM Parity error; [27] TCM Hw interrupt; [28] TSEMI Parity error; [29]
+ TSEMI Hw interrupt; [30] PBF Parity error; [31] PBF Hw interrupt; */
+#define MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 0xa10c
+#define MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 0xa11c
+#define MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 0xa12c
+#define MISC_REG_AEU_ENABLE1_FUNC_1_OUT_3 0xa13c
+#define MISC_REG_AEU_ENABLE1_FUNC_1_OUT_5 0xa15c
+#define MISC_REG_AEU_ENABLE1_FUNC_1_OUT_6 0xa16c
+#define MISC_REG_AEU_ENABLE1_FUNC_1_OUT_7 0xa17c
+/* [RW 32] first 32b for enabling the output for close the gate nig. mapped
+ as follows: [0] NIG attention for function0; [1] NIG attention for
+ function1; [2] GPIO1 function 0; [3] GPIO2 function 0; [4] GPIO3 function
+ 0; [5] GPIO4 function 0; [6] GPIO1 function 1; [7] GPIO2 function 1; [8]
+ GPIO3 function 1; [9] GPIO4 function 1; [10] PCIE glue/PXP VPD event
+ function0; [11] PCIE glue/PXP VPD event function1; [12] PCIE glue/PXP
+ Expansion ROM event0; [13] PCIE glue/PXP Expansion ROM event1; [14]
+ SPIO4; [15] SPIO5; [16] MSI/X indication for function 0; [17] MSI/X
+ indication for function 1; [18] BRB Parity error; [19] BRB Hw interrupt;
+ [20] PRS Parity error; [21] PRS Hw interrupt; [22] SRC Parity error; [23]
+ SRC Hw interrupt; [24] TSDM Parity error; [25] TSDM Hw interrupt; [26]
+ TCM Parity error; [27] TCM Hw interrupt; [28] TSEMI Parity error; [29]
+ TSEMI Hw interrupt; [30] PBF Parity error; [31] PBF Hw interrupt; */
+#define MISC_REG_AEU_ENABLE1_NIG_0 0xa0ec
+#define MISC_REG_AEU_ENABLE1_NIG_1 0xa18c
+/* [RW 32] first 32b for enabling the output for close the gate pxp. mapped
+ as follows: [0] NIG attention for function0; [1] NIG attention for
+ function1; [2] GPIO1 function 0; [3] GPIO2 function 0; [4] GPIO3 function
+ 0; [5] GPIO4 function 0; [6] GPIO1 function 1; [7] GPIO2 function 1; [8]
+ GPIO3 function 1; [9] GPIO4 function 1; [10] PCIE glue/PXP VPD event
+ function0; [11] PCIE glue/PXP VPD event function1; [12] PCIE glue/PXP
+ Expansion ROM event0; [13] PCIE glue/PXP Expansion ROM event1; [14]
+ SPIO4; [15] SPIO5; [16] MSI/X indication for function 0; [17] MSI/X
+ indication for function 1; [18] BRB Parity error; [19] BRB Hw interrupt;
+ [20] PRS Parity error; [21] PRS Hw interrupt; [22] SRC Parity error; [23]
+ SRC Hw interrupt; [24] TSDM Parity error; [25] TSDM Hw interrupt; [26]
+ TCM Parity error; [27] TCM Hw interrupt; [28] TSEMI Parity error; [29]
+ TSEMI Hw interrupt; [30] PBF Parity error; [31] PBF Hw interrupt; */
+#define MISC_REG_AEU_ENABLE1_PXP_0 0xa0fc
+#define MISC_REG_AEU_ENABLE1_PXP_1 0xa19c
+/* [RW 32] second 32b for enabling the output for function 0 output0. mapped
+ as follows: [0] PBClient Parity error; [1] PBClient Hw interrupt; [2] QM
+ Parity error; [3] QM Hw interrupt; [4] Timers Parity error; [5] Timers Hw
+ interrupt; [6] XSDM Parity error; [7] XSDM Hw interrupt; [8] XCM Parity
+ error; [9] XCM Hw interrupt; [10] XSEMI Parity error; [11] XSEMI Hw
+ interrupt; [12] DoorbellQ Parity error; [13] DoorbellQ Hw interrupt; [14]
+ NIG Parity error; [15] NIG Hw interrupt; [16] Vaux PCI core Parity error;
+ [17] Vaux PCI core Hw interrupt; [18] Debug Parity error; [19] Debug Hw
+ interrupt; [20] USDM Parity error; [21] USDM Hw interrupt; [22] UCM
+ Parity error; [23] UCM Hw interrupt; [24] USEMI Parity error; [25] USEMI
+ Hw interrupt; [26] UPB Parity error; [27] UPB Hw interrupt; [28] CSDM
+ Parity error; [29] CSDM Hw interrupt; [30] CCM Parity error; [31] CCM Hw
+ interrupt; */
+#define MISC_REG_AEU_ENABLE2_FUNC_0_OUT_0 0xa070
+#define MISC_REG_AEU_ENABLE2_FUNC_0_OUT_1 0xa080
+/* [RW 32] second 32b for enabling the output for function 1 output0. mapped
+ as follows: [0] PBClient Parity error; [1] PBClient Hw interrupt; [2] QM
+ Parity error; [3] QM Hw interrupt; [4] Timers Parity error; [5] Timers Hw
+ interrupt; [6] XSDM Parity error; [7] XSDM Hw interrupt; [8] XCM Parity
+ error; [9] XCM Hw interrupt; [10] XSEMI Parity error; [11] XSEMI Hw
+ interrupt; [12] DoorbellQ Parity error; [13] DoorbellQ Hw interrupt; [14]
+ NIG Parity error; [15] NIG Hw interrupt; [16] Vaux PCI core Parity error;
+ [17] Vaux PCI core Hw interrupt; [18] Debug Parity error; [19] Debug Hw
+ interrupt; [20] USDM Parity error; [21] USDM Hw interrupt; [22] UCM
+ Parity error; [23] UCM Hw interrupt; [24] USEMI Parity error; [25] USEMI
+ Hw interrupt; [26] UPB Parity error; [27] UPB Hw interrupt; [28] CSDM
+ Parity error; [29] CSDM Hw interrupt; [30] CCM Parity error; [31] CCM Hw
+ interrupt; */
+#define MISC_REG_AEU_ENABLE2_FUNC_1_OUT_0 0xa110
+#define MISC_REG_AEU_ENABLE2_FUNC_1_OUT_1 0xa120
+/* [RW 32] second 32b for enabling the output for close the gate nig. mapped
+ as follows: [0] PBClient Parity error; [1] PBClient Hw interrupt; [2] QM
+ Parity error; [3] QM Hw interrupt; [4] Timers Parity error; [5] Timers Hw
+ interrupt; [6] XSDM Parity error; [7] XSDM Hw interrupt; [8] XCM Parity
+ error; [9] XCM Hw interrupt; [10] XSEMI Parity error; [11] XSEMI Hw
+ interrupt; [12] DoorbellQ Parity error; [13] DoorbellQ Hw interrupt; [14]
+ NIG Parity error; [15] NIG Hw interrupt; [16] Vaux PCI core Parity error;
+ [17] Vaux PCI core Hw interrupt; [18] Debug Parity error; [19] Debug Hw
+ interrupt; [20] USDM Parity error; [21] USDM Hw interrupt; [22] UCM
+ Parity error; [23] UCM Hw interrupt; [24] USEMI Parity error; [25] USEMI
+ Hw interrupt; [26] UPB Parity error; [27] UPB Hw interrupt; [28] CSDM
+ Parity error; [29] CSDM Hw interrupt; [30] CCM Parity error; [31] CCM Hw
+ interrupt; */
+#define MISC_REG_AEU_ENABLE2_NIG_0 0xa0f0
+#define MISC_REG_AEU_ENABLE2_NIG_1 0xa190
+/* [RW 32] second 32b for enabling the output for close the gate pxp. mapped
+ as follows: [0] PBClient Parity error; [1] PBClient Hw interrupt; [2] QM
+ Parity error; [3] QM Hw interrupt; [4] Timers Parity error; [5] Timers Hw
+ interrupt; [6] XSDM Parity error; [7] XSDM Hw interrupt; [8] XCM Parity
+ error; [9] XCM Hw interrupt; [10] XSEMI Parity error; [11] XSEMI Hw
+ interrupt; [12] DoorbellQ Parity error; [13] DoorbellQ Hw interrupt; [14]
+ NIG Parity error; [15] NIG Hw interrupt; [16] Vaux PCI core Parity error;
+ [17] Vaux PCI core Hw interrupt; [18] Debug Parity error; [19] Debug Hw
+ interrupt; [20] USDM Parity error; [21] USDM Hw interrupt; [22] UCM
+ Parity error; [23] UCM Hw interrupt; [24] USEMI Parity error; [25] USEMI
+ Hw interrupt; [26] UPB Parity error; [27] UPB Hw interrupt; [28] CSDM
+ Parity error; [29] CSDM Hw interrupt; [30] CCM Parity error; [31] CCM Hw
+ interrupt; */
+#define MISC_REG_AEU_ENABLE2_PXP_0 0xa100
+#define MISC_REG_AEU_ENABLE2_PXP_1 0xa1a0
+/* [RW 32] third 32b for enabling the output for function 0 output0. mapped
+ as follows: [0] CSEMI Parity error; [1] CSEMI Hw interrupt; [2] PXP
+ Parity error; [3] PXP Hw interrupt; [4] PXPpciClockClient Parity error;
+ [5] PXPpciClockClient Hw interrupt; [6] CFC Parity error; [7] CFC Hw
+ interrupt; [8] CDU Parity error; [9] CDU Hw interrupt; [10] DMAE Parity
+ error; [11] DMAE Hw interrupt; [12] IGU (HC) Parity error; [13] IGU (HC)
+ Hw interrupt; [14] MISC Parity error; [15] MISC Hw interrupt; [16]
+ pxp_misc_mps_attn; [17] Flash event; [18] SMB event; [19] MCP attn0; [20]
+ MCP attn1; [21] SW timers attn_1 func0; [22] SW timers attn_2 func0; [23]
+ SW timers attn_3 func0; [24] SW timers attn_4 func0; [25] PERST; [26] SW
+ timers attn_1 func1; [27] SW timers attn_2 func1; [28] SW timers attn_3
+ func1; [29] SW timers attn_4 func1; [30] General attn0; [31] General
+ attn1; */
+#define MISC_REG_AEU_ENABLE3_FUNC_0_OUT_0 0xa074
+#define MISC_REG_AEU_ENABLE3_FUNC_0_OUT_1 0xa084
+/* [RW 32] third 32b for enabling the output for function 1 output0. mapped
+ as follows: [0] CSEMI Parity error; [1] CSEMI Hw interrupt; [2] PXP
+ Parity error; [3] PXP Hw interrupt; [4] PXPpciClockClient Parity error;
+ [5] PXPpciClockClient Hw interrupt; [6] CFC Parity error; [7] CFC Hw
+ interrupt; [8] CDU Parity error; [9] CDU Hw interrupt; [10] DMAE Parity
+ error; [11] DMAE Hw interrupt; [12] IGU (HC) Parity error; [13] IGU (HC)
+ Hw interrupt; [14] MISC Parity error; [15] MISC Hw interrupt; [16]
+ pxp_misc_mps_attn; [17] Flash event; [18] SMB event; [19] MCP attn0; [20]
+ MCP attn1; [21] SW timers attn_1 func0; [22] SW timers attn_2 func0; [23]
+ SW timers attn_3 func0; [24] SW timers attn_4 func0; [25] PERST; [26] SW
+ timers attn_1 func1; [27] SW timers attn_2 func1; [28] SW timers attn_3
+ func1; [29] SW timers attn_4 func1; [30] General attn0; [31] General
+ attn1; */
+#define MISC_REG_AEU_ENABLE3_FUNC_1_OUT_0 0xa114
+#define MISC_REG_AEU_ENABLE3_FUNC_1_OUT_1 0xa124
+/* [RW 32] third 32b for enabling the output for close the gate nig. mapped
+ as follows: [0] CSEMI Parity error; [1] CSEMI Hw interrupt; [2] PXP
+ Parity error; [3] PXP Hw interrupt; [4] PXPpciClockClient Parity error;
+ [5] PXPpciClockClient Hw interrupt; [6] CFC Parity error; [7] CFC Hw
+ interrupt; [8] CDU Parity error; [9] CDU Hw interrupt; [10] DMAE Parity
+ error; [11] DMAE Hw interrupt; [12] IGU (HC) Parity error; [13] IGU (HC)
+ Hw interrupt; [14] MISC Parity error; [15] MISC Hw interrupt; [16]
+ pxp_misc_mps_attn; [17] Flash event; [18] SMB event; [19] MCP attn0; [20]
+ MCP attn1; [21] SW timers attn_1 func0; [22] SW timers attn_2 func0; [23]
+ SW timers attn_3 func0; [24] SW timers attn_4 func0; [25] PERST; [26] SW
+ timers attn_1 func1; [27] SW timers attn_2 func1; [28] SW timers attn_3
+ func1; [29] SW timers attn_4 func1; [30] General attn0; [31] General
+ attn1; */
+#define MISC_REG_AEU_ENABLE3_NIG_0 0xa0f4
+#define MISC_REG_AEU_ENABLE3_NIG_1 0xa194
+/* [RW 32] third 32b for enabling the output for close the gate pxp. mapped
+ as follows: [0] CSEMI Parity error; [1] CSEMI Hw interrupt; [2] PXP
+ Parity error; [3] PXP Hw interrupt; [4] PXPpciClockClient Parity error;
+ [5] PXPpciClockClient Hw interrupt; [6] CFC Parity error; [7] CFC Hw
+ interrupt; [8] CDU Parity error; [9] CDU Hw interrupt; [10] DMAE Parity
+ error; [11] DMAE Hw interrupt; [12] IGU (HC) Parity error; [13] IGU (HC)
+ Hw interrupt; [14] MISC Parity error; [15] MISC Hw interrupt; [16]
+ pxp_misc_mps_attn; [17] Flash event; [18] SMB event; [19] MCP attn0; [20]
+ MCP attn1; [21] SW timers attn_1 func0; [22] SW timers attn_2 func0; [23]
+ SW timers attn_3 func0; [24] SW timers attn_4 func0; [25] PERST; [26] SW
+ timers attn_1 func1; [27] SW timers attn_2 func1; [28] SW timers attn_3
+ func1; [29] SW timers attn_4 func1; [30] General attn0; [31] General
+ attn1; */
+#define MISC_REG_AEU_ENABLE3_PXP_0 0xa104
+#define MISC_REG_AEU_ENABLE3_PXP_1 0xa1a4
+/* [RW 32] fourth 32b for enabling the output for function 0 output0.mapped
+ as follows: [0] General attn2; [1] General attn3; [2] General attn4; [3]
+ General attn5; [4] General attn6; [5] General attn7; [6] General attn8;
+ [7] General attn9; [8] General attn10; [9] General attn11; [10] General
+ attn12; [11] General attn13; [12] General attn14; [13] General attn15;
+ [14] General attn16; [15] General attn17; [16] General attn18; [17]
+ General attn19; [18] General attn20; [19] General attn21; [20] Main power
+ interrupt; [21] RBCR Latched attn; [22] RBCT Latched attn; [23] RBCN
+ Latched attn; [24] RBCU Latched attn; [25] RBCP Latched attn; [26] GRC
+ Latched timeout attention; [27] GRC Latched reserved access attention;
+ [28] MCP Latched rom_parity; [29] MCP Latched ump_rx_parity; [30] MCP
+ Latched ump_tx_parity; [31] MCP Latched scpad_parity; */
+#define MISC_REG_AEU_ENABLE4_FUNC_0_OUT_0 0xa078
+#define MISC_REG_AEU_ENABLE4_FUNC_0_OUT_2 0xa098
+#define MISC_REG_AEU_ENABLE4_FUNC_0_OUT_4 0xa0b8
+#define MISC_REG_AEU_ENABLE4_FUNC_0_OUT_5 0xa0c8
+#define MISC_REG_AEU_ENABLE4_FUNC_0_OUT_6 0xa0d8
+#define MISC_REG_AEU_ENABLE4_FUNC_0_OUT_7 0xa0e8
+/* [RW 32] fourth 32b for enabling the output for function 1 output0.mapped
+ as follows: [0] General attn2; [1] General attn3; [2] General attn4; [3]
+ General attn5; [4] General attn6; [5] General attn7; [6] General attn8;
+ [7] General attn9; [8] General attn10; [9] General attn11; [10] General
+ attn12; [11] General attn13; [12] General attn14; [13] General attn15;
+ [14] General attn16; [15] General attn17; [16] General attn18; [17]
+ General attn19; [18] General attn20; [19] General attn21; [20] Main power
+ interrupt; [21] RBCR Latched attn; [22] RBCT Latched attn; [23] RBCN
+ Latched attn; [24] RBCU Latched attn; [25] RBCP Latched attn; [26] GRC
+ Latched timeout attention; [27] GRC Latched reserved access attention;
+ [28] MCP Latched rom_parity; [29] MCP Latched ump_rx_parity; [30] MCP
+ Latched ump_tx_parity; [31] MCP Latched scpad_parity; */
+#define MISC_REG_AEU_ENABLE4_FUNC_1_OUT_0 0xa118
+#define MISC_REG_AEU_ENABLE4_FUNC_1_OUT_2 0xa138
+#define MISC_REG_AEU_ENABLE4_FUNC_1_OUT_4 0xa158
+#define MISC_REG_AEU_ENABLE4_FUNC_1_OUT_5 0xa168
+#define MISC_REG_AEU_ENABLE4_FUNC_1_OUT_6 0xa178
+#define MISC_REG_AEU_ENABLE4_FUNC_1_OUT_7 0xa188
+/* [RW 32] fourth 32b for enabling the output for close the gate nig.mapped
+ as follows: [0] General attn2; [1] General attn3; [2] General attn4; [3]
+ General attn5; [4] General attn6; [5] General attn7; [6] General attn8;
+ [7] General attn9; [8] General attn10; [9] General attn11; [10] General
+ attn12; [11] General attn13; [12] General attn14; [13] General attn15;
+ [14] General attn16; [15] General attn17; [16] General attn18; [17]
+ General attn19; [18] General attn20; [19] General attn21; [20] Main power
+ interrupt; [21] RBCR Latched attn; [22] RBCT Latched attn; [23] RBCN
+ Latched attn; [24] RBCU Latched attn; [25] RBCP Latched attn; [26] GRC
+ Latched timeout attention; [27] GRC Latched reserved access attention;
+ [28] MCP Latched rom_parity; [29] MCP Latched ump_rx_parity; [30] MCP
+ Latched ump_tx_parity; [31] MCP Latched scpad_parity; */
+#define MISC_REG_AEU_ENABLE4_NIG_0 0xa0f8
+#define MISC_REG_AEU_ENABLE4_NIG_1 0xa198
+/* [RW 32] fourth 32b for enabling the output for close the gate pxp.mapped
+ as follows: [0] General attn2; [1] General attn3; [2] General attn4; [3]
+ General attn5; [4] General attn6; [5] General attn7; [6] General attn8;
+ [7] General attn9; [8] General attn10; [9] General attn11; [10] General
+ attn12; [11] General attn13; [12] General attn14; [13] General attn15;
+ [14] General attn16; [15] General attn17; [16] General attn18; [17]
+ General attn19; [18] General attn20; [19] General attn21; [20] Main power
+ interrupt; [21] RBCR Latched attn; [22] RBCT Latched attn; [23] RBCN
+ Latched attn; [24] RBCU Latched attn; [25] RBCP Latched attn; [26] GRC
+ Latched timeout attention; [27] GRC Latched reserved access attention;
+ [28] MCP Latched rom_parity; [29] MCP Latched ump_rx_parity; [30] MCP
+ Latched ump_tx_parity; [31] MCP Latched scpad_parity; */
+#define MISC_REG_AEU_ENABLE4_PXP_0 0xa108
+#define MISC_REG_AEU_ENABLE4_PXP_1 0xa1a8
+/* [RW 32] fifth 32b for enabling the output for function 0 output0. Mapped
+ * as follows: [0] PGLUE config_space; [1] PGLUE misc_flr; [2] PGLUE B RBC
+ * attention [3] PGLUE B RBC parity; [4] ATC attention; [5] ATC parity; [6]
+ * mstat0 attention; [7] mstat0 parity; [8] mstat1 attention; [9] mstat1
+ * parity; [31-10] Reserved; */
+#define MISC_REG_AEU_ENABLE5_FUNC_0_OUT_0 0xa688
+/* [RW 32] Fifth 32b for enabling the output for function 1 output0. Mapped
+ * as follows: [0] PGLUE config_space; [1] PGLUE misc_flr; [2] PGLUE B RBC
+ * attention [3] PGLUE B RBC parity; [4] ATC attention; [5] ATC parity; [6]
+ * mstat0 attention; [7] mstat0 parity; [8] mstat1 attention; [9] mstat1
+ * parity; [31-10] Reserved; */
+#define MISC_REG_AEU_ENABLE5_FUNC_1_OUT_0 0xa6b0
+/* [RW 1] set/clr general attention 0; this will set/clr bit 94 in the aeu
+ 128 bit vector */
+#define MISC_REG_AEU_GENERAL_ATTN_0 0xa000
+#define MISC_REG_AEU_GENERAL_ATTN_1 0xa004
+#define MISC_REG_AEU_GENERAL_ATTN_10 0xa028
+#define MISC_REG_AEU_GENERAL_ATTN_11 0xa02c
+#define MISC_REG_AEU_GENERAL_ATTN_12 0xa030
+#define MISC_REG_AEU_GENERAL_ATTN_2 0xa008
+#define MISC_REG_AEU_GENERAL_ATTN_3 0xa00c
+#define MISC_REG_AEU_GENERAL_ATTN_4 0xa010
+#define MISC_REG_AEU_GENERAL_ATTN_5 0xa014
+#define MISC_REG_AEU_GENERAL_ATTN_6 0xa018
+#define MISC_REG_AEU_GENERAL_ATTN_7 0xa01c
+#define MISC_REG_AEU_GENERAL_ATTN_8 0xa020
+#define MISC_REG_AEU_GENERAL_ATTN_9 0xa024
+#define MISC_REG_AEU_GENERAL_MASK 0xa61c
+/* [RW 32] first 32b for inverting the input for function 0; for each bit:
+ 0= do not invert; 1= invert; mapped as follows: [0] NIG attention for
+ function0; [1] NIG attention for function1; [2] GPIO1 mcp; [3] GPIO2 mcp;
+ [4] GPIO3 mcp; [5] GPIO4 mcp; [6] GPIO1 function 1; [7] GPIO2 function 1;
+ [8] GPIO3 function 1; [9] GPIO4 function 1; [10] PCIE glue/PXP VPD event
+ function0; [11] PCIE glue/PXP VPD event function1; [12] PCIE glue/PXP
+ Expansion ROM event0; [13] PCIE glue/PXP Expansion ROM event1; [14]
+ SPIO4; [15] SPIO5; [16] MSI/X indication for mcp; [17] MSI/X indication
+ for function 1; [18] BRB Parity error; [19] BRB Hw interrupt; [20] PRS
+ Parity error; [21] PRS Hw interrupt; [22] SRC Parity error; [23] SRC Hw
+ interrupt; [24] TSDM Parity error; [25] TSDM Hw interrupt; [26] TCM
+ Parity error; [27] TCM Hw interrupt; [28] TSEMI Parity error; [29] TSEMI
+ Hw interrupt; [30] PBF Parity error; [31] PBF Hw interrupt; */
+#define MISC_REG_AEU_INVERTER_1_FUNC_0 0xa22c
+#define MISC_REG_AEU_INVERTER_1_FUNC_1 0xa23c
+/* [RW 32] second 32b for inverting the input for function 0; for each bit:
+ 0= do not invert; 1= invert. mapped as follows: [0] PBClient Parity
+ error; [1] PBClient Hw interrupt; [2] QM Parity error; [3] QM Hw
+ interrupt; [4] Timers Parity error; [5] Timers Hw interrupt; [6] XSDM
+ Parity error; [7] XSDM Hw interrupt; [8] XCM Parity error; [9] XCM Hw
+ interrupt; [10] XSEMI Parity error; [11] XSEMI Hw interrupt; [12]
+ DoorbellQ Parity error; [13] DoorbellQ Hw interrupt; [14] NIG Parity
+ error; [15] NIG Hw interrupt; [16] Vaux PCI core Parity error; [17] Vaux
+ PCI core Hw interrupt; [18] Debug Parity error; [19] Debug Hw interrupt;
+ [20] USDM Parity error; [21] USDM Hw interrupt; [22] UCM Parity error;
+ [23] UCM Hw interrupt; [24] USEMI Parity error; [25] USEMI Hw interrupt;
+ [26] UPB Parity error; [27] UPB Hw interrupt; [28] CSDM Parity error;
+ [29] CSDM Hw interrupt; [30] CCM Parity error; [31] CCM Hw interrupt; */
+#define MISC_REG_AEU_INVERTER_2_FUNC_0 0xa230
+#define MISC_REG_AEU_INVERTER_2_FUNC_1 0xa240
+/* [RW 10] [7:0] = mask 8 attention output signals toward IGU function0;
+ [9:8] = raserved. Zero = mask; one = unmask */
+#define MISC_REG_AEU_MASK_ATTN_FUNC_0 0xa060
+#define MISC_REG_AEU_MASK_ATTN_FUNC_1 0xa064
+/* [RW 1] If set a system kill occurred */
+#define MISC_REG_AEU_SYS_KILL_OCCURRED 0xa610
+/* [RW 32] Represent the status of the input vector to the AEU when a system
+ kill occurred. The register is reset in por reset. Mapped as follows: [0]
+ NIG attention for function0; [1] NIG attention for function1; [2] GPIO1
+ mcp; [3] GPIO2 mcp; [4] GPIO3 mcp; [5] GPIO4 mcp; [6] GPIO1 function 1;
+ [7] GPIO2 function 1; [8] GPIO3 function 1; [9] GPIO4 function 1; [10]
+ PCIE glue/PXP VPD event function0; [11] PCIE glue/PXP VPD event
+ function1; [12] PCIE glue/PXP Expansion ROM event0; [13] PCIE glue/PXP
+ Expansion ROM event1; [14] SPIO4; [15] SPIO5; [16] MSI/X indication for
+ mcp; [17] MSI/X indication for function 1; [18] BRB Parity error; [19]
+ BRB Hw interrupt; [20] PRS Parity error; [21] PRS Hw interrupt; [22] SRC
+ Parity error; [23] SRC Hw interrupt; [24] TSDM Parity error; [25] TSDM Hw
+ interrupt; [26] TCM Parity error; [27] TCM Hw interrupt; [28] TSEMI
+ Parity error; [29] TSEMI Hw interrupt; [30] PBF Parity error; [31] PBF Hw
+ interrupt; */
+#define MISC_REG_AEU_SYS_KILL_STATUS_0 0xa600
+#define MISC_REG_AEU_SYS_KILL_STATUS_1 0xa604
+#define MISC_REG_AEU_SYS_KILL_STATUS_2 0xa608
+#define MISC_REG_AEU_SYS_KILL_STATUS_3 0xa60c
+/* [R 4] This field indicates the type of the device. '0' - 2 Ports; '1' - 1
+ Port. */
+#define MISC_REG_BOND_ID 0xa400
+/* [R 16] These bits indicate the part number for the chip. */
+#define MISC_REG_CHIP_NUM 0xa408
+/* [R 4] These bits indicate the base revision of the chip. This value
+ starts at 0x0 for the A0 tape-out and increments by one for each
+ all-layer tape-out. */
+#define MISC_REG_CHIP_REV 0xa40c
+/* [R 14] otp_misc_do[100:0] spare bits collection: 13:11-
+ * otp_misc_do[100:98]; 10:7 - otp_misc_do[87:84]; 6:3 - otp_misc_do[75:72];
+ * 2:1 - otp_misc_do[51:50]; 0 - otp_misc_do[1]. */
+#define MISC_REG_CHIP_TYPE 0xac60
+#define MISC_REG_CHIP_TYPE_57811_MASK (1<<1)
+#define MISC_REG_CPMU_LP_DR_ENABLE 0xa858
+/* [RW 1] FW EEE LPI Enable. When 1 indicates that EEE LPI mode is enabled
+ * by FW. When 0 indicates that the EEE LPI mode is disabled by FW. Clk
+ * 25MHz. Reset on hard reset. */
+#define MISC_REG_CPMU_LP_FW_ENABLE_P0 0xa84c
+/* [RW 32] EEE LPI Idle Threshold. The threshold value for the idle EEE LPI
+ * counter. Timer tick is 1 us. Clock 25MHz. Reset on hard reset. */
+#define MISC_REG_CPMU_LP_IDLE_THR_P0 0xa8a0
+/* [RW 18] LPI entry events mask. [0] - Vmain SM Mask. When 1 indicates that
+ * the Vmain SM end state is disabled. When 0 indicates that the Vmain SM
+ * end state is enabled. [1] - FW Queues Empty Mask. When 1 indicates that
+ * the FW command that all Queues are empty is disabled. When 0 indicates
+ * that the FW command that all Queues are empty is enabled. [2] - FW Early
+ * Exit Mask / Reserved (Entry mask). When 1 indicates that the FW Early
+ * Exit command is disabled. When 0 indicates that the FW Early Exit command
+ * is enabled. This bit applicable only in the EXIT Events Mask registers.
+ * [3] - PBF Request Mask. When 1 indicates that the PBF Request indication
+ * is disabled. When 0 indicates that the PBF Request indication is enabled.
+ * [4] - Tx Request Mask. When =1 indicates that the Tx other Than PBF
+ * Request indication is disabled. When 0 indicates that the Tx Other Than
+ * PBF Request indication is enabled. [5] - Rx EEE LPI Status Mask. When 1
+ * indicates that the RX EEE LPI Status indication is disabled. When 0
+ * indicates that the RX EEE LPI Status indication is enabled. In the EXIT
+ * Events Masks registers; this bit masks the falling edge detect of the LPI
+ * Status (Rx LPI is on - off). [6] - Tx Pause Mask. When 1 indicates that
+ * the Tx Pause indication is disabled. When 0 indicates that the Tx Pause
+ * indication is enabled. [7] - BRB1 Empty Mask. When 1 indicates that the
+ * BRB1 EMPTY indication is disabled. When 0 indicates that the BRB1 EMPTY
+ * indication is enabled. [8] - QM Idle Mask. When 1 indicates that the QM
+ * IDLE indication is disabled. When 0 indicates that the QM IDLE indication
+ * is enabled. (One bit for both VOQ0 and VOQ1). [9] - QM LB Idle Mask. When
+ * 1 indicates that the QM IDLE indication for LOOPBACK is disabled. When 0
+ * indicates that the QM IDLE indication for LOOPBACK is enabled. [10] - L1
+ * Status Mask. When 1 indicates that the L1 Status indication from the PCIE
+ * CORE is disabled. When 0 indicates that the RX EEE LPI Status indication
+ * from the PCIE CORE is enabled. In the EXIT Events Masks registers; this
+ * bit masks the falling edge detect of the L1 status (L1 is on - off). [11]
+ * - P0 E0 EEE EEE LPI REQ Mask. When =1 indicates that the P0 E0 EEE EEE
+ * LPI REQ indication is disabled. When =0 indicates that the P0 E0 EEE LPI
+ * REQ indication is enabled. [12] - P1 E0 EEE LPI REQ Mask. When =1
+ * indicates that the P0 EEE LPI REQ indication is disabled. When =0
+ * indicates that the P0 EEE LPI REQ indication is enabled. [13] - P0 E1 EEE
+ * LPI REQ Mask. When =1 indicates that the P0 EEE LPI REQ indication is
+ * disabled. When =0 indicates that the P0 EEE LPI REQ indication is
+ * enabled. [14] - P1 E1 EEE LPI REQ Mask. When =1 indicates that the P0 EEE
+ * LPI REQ indication is disabled. When =0 indicates that the P0 EEE LPI REQ
+ * indication is enabled. [15] - L1 REQ Mask. When =1 indicates that the L1
+ * REQ indication is disabled. When =0 indicates that the L1 indication is
+ * enabled. [16] - Rx EEE LPI Status Edge Detect Mask. When =1 indicates
+ * that the RX EEE LPI Status Falling Edge Detect indication is disabled (Rx
+ * EEE LPI is on - off). When =0 indicates that the RX EEE LPI Status
+ * Falling Edge Detec indication is enabled (Rx EEE LPI is on - off). This
+ * bit is applicable only in the EXIT Events Masks registers. [17] - L1
+ * Status Edge Detect Mask. When =1 indicates that the L1 Status Falling
+ * Edge Detect indication from the PCIE CORE is disabled (L1 is on - off).
+ * When =0 indicates that the L1 Status Falling Edge Detect indication from
+ * the PCIE CORE is enabled (L1 is on - off). This bit is applicable only in
+ * the EXIT Events Masks registers. Clock 25MHz. Reset on hard reset. */
+#define MISC_REG_CPMU_LP_MASK_ENT_P0 0xa880
+/* [RW 18] EEE LPI exit events mask. [0] - Vmain SM Mask. When 1 indicates
+ * that the Vmain SM end state is disabled. When 0 indicates that the Vmain
+ * SM end state is enabled. [1] - FW Queues Empty Mask. When 1 indicates
+ * that the FW command that all Queues are empty is disabled. When 0
+ * indicates that the FW command that all Queues are empty is enabled. [2] -
+ * FW Early Exit Mask / Reserved (Entry mask). When 1 indicates that the FW
+ * Early Exit command is disabled. When 0 indicates that the FW Early Exit
+ * command is enabled. This bit applicable only in the EXIT Events Mask
+ * registers. [3] - PBF Request Mask. When 1 indicates that the PBF Request
+ * indication is disabled. When 0 indicates that the PBF Request indication
+ * is enabled. [4] - Tx Request Mask. When =1 indicates that the Tx other
+ * Than PBF Request indication is disabled. When 0 indicates that the Tx
+ * Other Than PBF Request indication is enabled. [5] - Rx EEE LPI Status
+ * Mask. When 1 indicates that the RX EEE LPI Status indication is disabled.
+ * When 0 indicates that the RX LPI Status indication is enabled. In the
+ * EXIT Events Masks registers; this bit masks the falling edge detect of
+ * the EEE LPI Status (Rx EEE LPI is on - off). [6] - Tx Pause Mask. When 1
+ * indicates that the Tx Pause indication is disabled. When 0 indicates that
+ * the Tx Pause indication is enabled. [7] - BRB1 Empty Mask. When 1
+ * indicates that the BRB1 EMPTY indication is disabled. When 0 indicates
+ * that the BRB1 EMPTY indication is enabled. [8] - QM Idle Mask. When 1
+ * indicates that the QM IDLE indication is disabled. When 0 indicates that
+ * the QM IDLE indication is enabled. (One bit for both VOQ0 and VOQ1). [9]
+ * - QM LB Idle Mask. When 1 indicates that the QM IDLE indication for
+ * LOOPBACK is disabled. When 0 indicates that the QM IDLE indication for
+ * LOOPBACK is enabled. [10] - L1 Status Mask. When 1 indicates that the L1
+ * Status indication from the PCIE CORE is disabled. When 0 indicates that
+ * the RX EEE LPI Status indication from the PCIE CORE is enabled. In the
+ * EXIT Events Masks registers; this bit masks the falling edge detect of
+ * the L1 status (L1 is on - off). [11] - P0 E0 EEE EEE LPI REQ Mask. When
+ * =1 indicates that the P0 E0 EEE EEE LPI REQ indication is disabled. When
+ * =0 indicates that the P0 E0 EEE LPI REQ indication is enabled. [12] - P1
+ * E0 EEE LPI REQ Mask. When =1 indicates that the P0 EEE LPI REQ indication
+ * is disabled. When =0 indicates that the P0 EEE LPI REQ indication is
+ * enabled. [13] - P0 E1 EEE LPI REQ Mask. When =1 indicates that the P0 EEE
+ * LPI REQ indication is disabled. When =0 indicates that the P0 EEE LPI REQ
+ * indication is enabled. [14] - P1 E1 EEE LPI REQ Mask. When =1 indicates
+ * that the P0 EEE LPI REQ indication is disabled. When =0 indicates that
+ * the P0 EEE LPI REQ indication is enabled. [15] - L1 REQ Mask. When =1
+ * indicates that the L1 REQ indication is disabled. When =0 indicates that
+ * the L1 indication is enabled. [16] - Rx EEE LPI Status Edge Detect Mask.
+ * When =1 indicates that the RX EEE LPI Status Falling Edge Detect
+ * indication is disabled (Rx EEE LPI is on - off). When =0 indicates that
+ * the RX EEE LPI Status Falling Edge Detec indication is enabled (Rx EEE
+ * LPI is on - off). This bit is applicable only in the EXIT Events Masks
+ * registers. [17] - L1 Status Edge Detect Mask. When =1 indicates that the
+ * L1 Status Falling Edge Detect indication from the PCIE CORE is disabled
+ * (L1 is on - off). When =0 indicates that the L1 Status Falling Edge
+ * Detect indication from the PCIE CORE is enabled (L1 is on - off). This
+ * bit is applicable only in the EXIT Events Masks registers.Clock 25MHz.
+ * Reset on hard reset. */
+#define MISC_REG_CPMU_LP_MASK_EXT_P0 0xa888
+/* [RW 16] EEE LPI Entry Events Counter. A statistic counter with the number
+ * of counts that the SM entered the EEE LPI state. Clock 25MHz. Read only
+ * register. Reset on hard reset. */
+#define MISC_REG_CPMU_LP_SM_ENT_CNT_P0 0xa8b8
+/* [RW 16] EEE LPI Entry Events Counter. A statistic counter with the number
+ * of counts that the SM entered the EEE LPI state. Clock 25MHz. Read only
+ * register. Reset on hard reset. */
+#define MISC_REG_CPMU_LP_SM_ENT_CNT_P1 0xa8bc
+/* [RW 32] The following driver registers(1...16) represent 16 drivers and
+ 32 clients. Each client can be controlled by one driver only. One in each
+ bit represent that this driver control the appropriate client (Ex: bit 5
+ is set means this driver control client number 5). addr1 = set; addr0 =
+ clear; read from both addresses will give the same result = status. write
+ to address 1 will set a request to control all the clients that their
+ appropriate bit (in the write command) is set. if the client is free (the
+ appropriate bit in all the other drivers is clear) one will be written to
+ that driver register; if the client isn't free the bit will remain zero.
+ if the appropriate bit is set (the driver request to gain control on a
+ client it already controls the ~MISC_REGISTERS_INT_STS.GENERIC_SW
+ interrupt will be asserted). write to address 0 will set a request to
+ free all the clients that their appropriate bit (in the write command) is
+ set. if the appropriate bit is clear (the driver request to free a client
+ it doesn't controls the ~MISC_REGISTERS_INT_STS.GENERIC_SW interrupt will
+ be asserted). */
+#define MISC_REG_DRIVER_CONTROL_1 0xa510
+#define MISC_REG_DRIVER_CONTROL_7 0xa3c8
+/* [RW 1] e1hmf for WOL. If clr WOL signal o the PXP will be send on bit 0
+ only. */
+#define MISC_REG_E1HMF_MODE 0xa5f8
+/* [R 1] Status of four port mode path swap input pin. */
+#define MISC_REG_FOUR_PORT_PATH_SWAP 0xa75c
+/* [RW 2] 4 port path swap overwrite.[0] - Overwrite control; if it is 0 -
+ the path_swap output is equal to 4 port mode path swap input pin; if it
+ is 1 - the path_swap output is equal to bit[1] of this register; [1] -
+ Overwrite value. If bit[0] of this register is 1 this is the value that
+ receives the path_swap output. Reset on Hard reset. */
+#define MISC_REG_FOUR_PORT_PATH_SWAP_OVWR 0xa738
+/* [R 1] Status of 4 port mode port swap input pin. */
+#define MISC_REG_FOUR_PORT_PORT_SWAP 0xa754
+/* [RW 2] 4 port port swap overwrite.[0] - Overwrite control; if it is 0 -
+ the port_swap output is equal to 4 port mode port swap input pin; if it
+ is 1 - the port_swap output is equal to bit[1] of this register; [1] -
+ Overwrite value. If bit[0] of this register is 1 this is the value that
+ receives the port_swap output. Reset on Hard reset. */
+#define MISC_REG_FOUR_PORT_PORT_SWAP_OVWR 0xa734
+/* [RW 32] Debug only: spare RW register reset by core reset */
+#define MISC_REG_GENERIC_CR_0 0xa460
+#define MISC_REG_GENERIC_CR_1 0xa464
+/* [RW 32] Debug only: spare RW register reset by por reset */
+#define MISC_REG_GENERIC_POR_1 0xa474
+/* [RW 32] Bit[0]: EPIO MODE SEL: Setting this bit to 1 will allow SW/FW to
+ use all of the 32 Extended GPIO pins. Without setting this bit; an EPIO
+ can not be configured as an output. Each output has its output enable in
+ the MCP register space; but this bit needs to be set to make use of that.
+ Bit[3:1] spare. Bit[4]: WCVTMON_PWRDN: Powerdown for Warpcore VTMON. When
+ set to 1 - Powerdown. Bit[5]: WCVTMON_RESETB: Reset for Warpcore VTMON.
+ When set to 0 - vTMON is in reset. Bit[6]: setting this bit will change
+ the i/o to an output and will drive the TimeSync output. Bit[31:7]:
+ spare. Global register. Reset by hard reset. */
+#define MISC_REG_GEN_PURP_HWG 0xa9a0
+/* [RW 32] GPIO. [31-28] FLOAT port 0; [27-24] FLOAT port 0; When any of
+ these bits is written as a '1'; the corresponding SPIO bit will turn off
+ it's drivers and become an input. This is the reset state of all GPIO
+ pins. The read value of these bits will be a '1' if that last command
+ (#SET; #CLR; or #FLOAT) for this bit was a #FLOAT. (reset value 0xff).
+ [23-20] CLR port 1; 19-16] CLR port 0; When any of these bits is written
+ as a '1'; the corresponding GPIO bit will drive low. The read value of
+ these bits will be a '1' if that last command (#SET; #CLR; or #FLOAT) for
+ this bit was a #CLR. (reset value 0). [15-12] SET port 1; 11-8] port 0;
+ SET When any of these bits is written as a '1'; the corresponding GPIO
+ bit will drive high (if it has that capability). The read value of these
+ bits will be a '1' if that last command (#SET; #CLR; or #FLOAT) for this
+ bit was a #SET. (reset value 0). [7-4] VALUE port 1; [3-0] VALUE port 0;
+ RO; These bits indicate the read value of each of the eight GPIO pins.
+ This is the result value of the pin; not the drive value. Writing these
+ bits will have not effect. */
+#define MISC_REG_GPIO 0xa490
+/* [RW 8] These bits enable the GPIO_INTs to signals event to the
+ IGU/MCP.according to the following map: [0] p0_gpio_0; [1] p0_gpio_1; [2]
+ p0_gpio_2; [3] p0_gpio_3; [4] p1_gpio_0; [5] p1_gpio_1; [6] p1_gpio_2;
+ [7] p1_gpio_3; */
+#define MISC_REG_GPIO_EVENT_EN 0xa2bc
+/* [RW 32] GPIO INT. [31-28] OLD_CLR port1; [27-24] OLD_CLR port0; Writing a
+ '1' to these bit clears the corresponding bit in the #OLD_VALUE register.
+ This will acknowledge an interrupt on the falling edge of corresponding
+ GPIO input (reset value 0). [23-16] OLD_SET [23-16] port1; OLD_SET port0;
+ Writing a '1' to these bit sets the corresponding bit in the #OLD_VALUE
+ register. This will acknowledge an interrupt on the rising edge of
+ corresponding SPIO input (reset value 0). [15-12] OLD_VALUE [11-8] port1;
+ OLD_VALUE port0; RO; These bits indicate the old value of the GPIO input
+ value. When the ~INT_STATE bit is set; this bit indicates the OLD value
+ of the pin such that if ~INT_STATE is set and this bit is '0'; then the
+ interrupt is due to a low to high edge. If ~INT_STATE is set and this bit
+ is '1'; then the interrupt is due to a high to low edge (reset value 0).
+ [7-4] INT_STATE port1; [3-0] INT_STATE RO port0; These bits indicate the
+ current GPIO interrupt state for each GPIO pin. This bit is cleared when
+ the appropriate #OLD_SET or #OLD_CLR command bit is written. This bit is
+ set when the GPIO input does not match the current value in #OLD_VALUE
+ (reset value 0). */
+#define MISC_REG_GPIO_INT 0xa494
+/* [R 28] this field hold the last information that caused reserved
+ attention. bits [19:0] - address; [22:20] function; [23] reserved;
+ [27:24] the master that caused the attention - according to the following
+ encodeing:1 = pxp; 2 = mcp; 3 = usdm; 4 = tsdm; 5 = xsdm; 6 = csdm; 7 =
+ dbu; 8 = dmae */
+#define MISC_REG_GRC_RSV_ATTN 0xa3c0
+/* [R 28] this field hold the last information that caused timeout
+ attention. bits [19:0] - address; [22:20] function; [23] reserved;
+ [27:24] the master that caused the attention - according to the following
+ encodeing:1 = pxp; 2 = mcp; 3 = usdm; 4 = tsdm; 5 = xsdm; 6 = csdm; 7 =
+ dbu; 8 = dmae */
+#define MISC_REG_GRC_TIMEOUT_ATTN 0xa3c4
+/* [RW 1] Setting this bit enables a timer in the GRC block to timeout any
+ access that does not finish within
+ ~misc_registers_grc_timout_val.grc_timeout_val cycles. When this bit is
+ cleared; this timeout is disabled. If this timeout occurs; the GRC shall
+ assert it attention output. */
+#define MISC_REG_GRC_TIMEOUT_EN 0xa280
+/* [RW 28] 28 LSB of LCPLL first register; reset val = 521. inside order of
+ the bits is: [2:0] OAC reset value 001) CML output buffer bias control;
+ 111 for +40%; 011 for +20%; 001 for 0%; 000 for -20%. [5:3] Icp_ctrl
+ (reset value 001) Charge pump current control; 111 for 720u; 011 for
+ 600u; 001 for 480u and 000 for 360u. [7:6] Bias_ctrl (reset value 00)
+ Global bias control; When bit 7 is high bias current will be 10 0gh; When
+ bit 6 is high bias will be 100w; Valid values are 00; 10; 01. [10:8]
+ Pll_observe (reset value 010) Bits to control observability. bit 10 is
+ for test bias; bit 9 is for test CK; bit 8 is test Vc. [12:11] Vth_ctrl
+ (reset value 00) Comparator threshold control. 00 for 0.6V; 01 for 0.54V
+ and 10 for 0.66V. [13] pllSeqStart (reset value 0) Enables VCO tuning
+ sequencer: 1= sequencer disabled; 0= sequencer enabled (inverted
+ internally). [14] reserved (reset value 0) Reset for VCO sequencer is
+ connected to RESET input directly. [15] capRetry_en (reset value 0)
+ enable retry on cap search failure (inverted). [16] freqMonitor_e (reset
+ value 0) bit to continuously monitor vco freq (inverted). [17]
+ freqDetRestart_en (reset value 0) bit to enable restart when not freq
+ locked (inverted). [18] freqDetRetry_en (reset value 0) bit to enable
+ retry on freq det failure(inverted). [19] pllForceFdone_en (reset value
+ 0) bit to enable pllForceFdone & pllForceFpass into pllSeq. [20]
+ pllForceFdone (reset value 0) bit to force freqDone. [21] pllForceFpass
+ (reset value 0) bit to force freqPass. [22] pllForceDone_en (reset value
+ 0) bit to enable pllForceCapDone. [23] pllForceCapDone (reset value 0)
+ bit to force capDone. [24] pllForceCapPass_en (reset value 0) bit to
+ enable pllForceCapPass. [25] pllForceCapPass (reset value 0) bit to force
+ capPass. [26] capRestart (reset value 0) bit to force cap sequencer to
+ restart. [27] capSelectM_en (reset value 0) bit to enable cap select
+ register bits. */
+#define MISC_REG_LCPLL_CTRL_1 0xa2a4
+#define MISC_REG_LCPLL_CTRL_REG_2 0xa2a8
+/* [RW 1] LCPLL power down. Global register. Active High. Reset on POR
+ * reset. */
+#define MISC_REG_LCPLL_E40_PWRDWN 0xaa74
+/* [RW 1] LCPLL VCO reset. Global register. Active Low Reset on POR reset. */
+#define MISC_REG_LCPLL_E40_RESETB_ANA 0xaa78
+/* [RW 1] LCPLL post-divider reset. Global register. Active Low Reset on POR
+ * reset. */
+#define MISC_REG_LCPLL_E40_RESETB_DIG 0xaa7c
+/* [RW 4] Interrupt mask register #0 read/write */
+#define MISC_REG_MISC_INT_MASK 0xa388
+/* [RW 1] Parity mask register #0 read/write */
+#define MISC_REG_MISC_PRTY_MASK 0xa398
+/* [R 1] Parity register #0 read */
+#define MISC_REG_MISC_PRTY_STS 0xa38c
+/* [RC 1] Parity register #0 read clear */
+#define MISC_REG_MISC_PRTY_STS_CLR 0xa390
+#define MISC_REG_NIG_WOL_P0 0xa270
+#define MISC_REG_NIG_WOL_P1 0xa274
+/* [R 1] If set indicate that the pcie_rst_b was asserted without perst
+ assertion */
+#define MISC_REG_PCIE_HOT_RESET 0xa618
+/* [RW 32] 32 LSB of storm PLL first register; reset val = 0x 071d2911.
+ inside order of the bits is: [0] P1 divider[0] (reset value 1); [1] P1
+ divider[1] (reset value 0); [2] P1 divider[2] (reset value 0); [3] P1
+ divider[3] (reset value 0); [4] P2 divider[0] (reset value 1); [5] P2
+ divider[1] (reset value 0); [6] P2 divider[2] (reset value 0); [7] P2
+ divider[3] (reset value 0); [8] ph_det_dis (reset value 1); [9]
+ freq_det_dis (reset value 0); [10] Icpx[0] (reset value 0); [11] Icpx[1]
+ (reset value 1); [12] Icpx[2] (reset value 0); [13] Icpx[3] (reset value
+ 1); [14] Icpx[4] (reset value 0); [15] Icpx[5] (reset value 0); [16]
+ Rx[0] (reset value 1); [17] Rx[1] (reset value 0); [18] vc_en (reset
+ value 1); [19] vco_rng[0] (reset value 1); [20] vco_rng[1] (reset value
+ 1); [21] Kvco_xf[0] (reset value 0); [22] Kvco_xf[1] (reset value 0);
+ [23] Kvco_xf[2] (reset value 0); [24] Kvco_xs[0] (reset value 1); [25]
+ Kvco_xs[1] (reset value 1); [26] Kvco_xs[2] (reset value 1); [27]
+ testd_en (reset value 0); [28] testd_sel[0] (reset value 0); [29]
+ testd_sel[1] (reset value 0); [30] testd_sel[2] (reset value 0); [31]
+ testa_en (reset value 0); */
+#define MISC_REG_PLL_STORM_CTRL_1 0xa294
+#define MISC_REG_PLL_STORM_CTRL_2 0xa298
+#define MISC_REG_PLL_STORM_CTRL_3 0xa29c
+#define MISC_REG_PLL_STORM_CTRL_4 0xa2a0
+/* [R 1] Status of 4 port mode enable input pin. */
+#define MISC_REG_PORT4MODE_EN 0xa750
+/* [RW 2] 4 port mode enable overwrite.[0] - Overwrite control; if it is 0 -
+ * the port4mode_en output is equal to 4 port mode input pin; if it is 1 -
+ * the port4mode_en output is equal to bit[1] of this register; [1] -
+ * Overwrite value. If bit[0] of this register is 1 this is the value that
+ * receives the port4mode_en output . */
+#define MISC_REG_PORT4MODE_EN_OVWR 0xa720
+/* [RW 32] reset reg#2; rite/read one = the specific block is out of reset;
+ write/read zero = the specific block is in reset; addr 0-wr- the write
+ value will be written to the register; addr 1-set - one will be written
+ to all the bits that have the value of one in the data written (bits that
+ have the value of zero will not be change) ; addr 2-clear - zero will be
+ written to all the bits that have the value of one in the data written
+ (bits that have the value of zero will not be change); addr 3-ignore;
+ read ignore from all addr except addr 00; inside order of the bits is:
+ [0] rst_bmac0; [1] rst_bmac1; [2] rst_emac0; [3] rst_emac1; [4] rst_grc;
+ [5] rst_mcp_n_reset_reg_hard_core; [6] rst_ mcp_n_hard_core_rst_b; [7]
+ rst_ mcp_n_reset_cmn_cpu; [8] rst_ mcp_n_reset_cmn_core; [9] rst_rbcn;
+ [10] rst_dbg; [11] rst_misc_core; [12] rst_dbue (UART); [13]
+ Pci_resetmdio_n; [14] rst_emac0_hard_core; [15] rst_emac1_hard_core; 16]
+ rst_pxp_rq_rd_wr; 31:17] reserved */
+#define MISC_REG_RESET_REG_1 0xa580
+#define MISC_REG_RESET_REG_2 0xa590
+/* [RW 20] 20 bit GRC address where the scratch-pad of the MCP that is
+ shared with the driver resides */
+#define MISC_REG_SHARED_MEM_ADDR 0xa2b4
+/* [RW 32] SPIO. [31-24] FLOAT When any of these bits is written as a '1';
+ the corresponding SPIO bit will turn off it's drivers and become an
+ input. This is the reset state of all SPIO pins. The read value of these
+ bits will be a '1' if that last command (#SET; #CL; or #FLOAT) for this
+ bit was a #FLOAT. (reset value 0xff). [23-16] CLR When any of these bits
+ is written as a '1'; the corresponding SPIO bit will drive low. The read
+ value of these bits will be a '1' if that last command (#SET; #CLR; or
+#FLOAT) for this bit was a #CLR. (reset value 0). [15-8] SET When any of
+ these bits is written as a '1'; the corresponding SPIO bit will drive
+ high (if it has that capability). The read value of these bits will be a
+ '1' if that last command (#SET; #CLR; or #FLOAT) for this bit was a #SET.
+ (reset value 0). [7-0] VALUE RO; These bits indicate the read value of
+ each of the eight SPIO pins. This is the result value of the pin; not the
+ drive value. Writing these bits will have not effect. Each 8 bits field
+ is divided as follows: [0] VAUX Enable; when pulsed low; enables supply
+ from VAUX. (This is an output pin only; the FLOAT field is not applicable
+ for this pin); [1] VAUX Disable; when pulsed low; disables supply form
+ VAUX. (This is an output pin only; FLOAT field is not applicable for this
+ pin); [2] SEL_VAUX_B - Control to power switching logic. Drive low to
+ select VAUX supply. (This is an output pin only; it is not controlled by
+ the SET and CLR fields; it is controlled by the Main Power SM; the FLOAT
+ field is not applicable for this pin; only the VALUE fields is relevant -
+ it reflects the output value); [3] port swap [4] spio_4; [5] spio_5; [6]
+ Bit 0 of UMP device ID select; read by UMP firmware; [7] Bit 1 of UMP
+ device ID select; read by UMP firmware. */
+#define MISC_REG_SPIO 0xa4fc
+/* [RW 8] These bits enable the SPIO_INTs to signals event to the IGU/MC.
+ according to the following map: [3:0] reserved; [4] spio_4 [5] spio_5;
+ [7:0] reserved */
+#define MISC_REG_SPIO_EVENT_EN 0xa2b8
+/* [RW 32] SPIO INT. [31-24] OLD_CLR Writing a '1' to these bit clears the
+ corresponding bit in the #OLD_VALUE register. This will acknowledge an
+ interrupt on the falling edge of corresponding SPIO input (reset value
+ 0). [23-16] OLD_SET Writing a '1' to these bit sets the corresponding bit
+ in the #OLD_VALUE register. This will acknowledge an interrupt on the
+ rising edge of corresponding SPIO input (reset value 0). [15-8] OLD_VALUE
+ RO; These bits indicate the old value of the SPIO input value. When the
+ ~INT_STATE bit is set; this bit indicates the OLD value of the pin such
+ that if ~INT_STATE is set and this bit is '0'; then the interrupt is due
+ to a low to high edge. If ~INT_STATE is set and this bit is '1'; then the
+ interrupt is due to a high to low edge (reset value 0). [7-0] INT_STATE
+ RO; These bits indicate the current SPIO interrupt state for each SPIO
+ pin. This bit is cleared when the appropriate #OLD_SET or #OLD_CLR
+ command bit is written. This bit is set when the SPIO input does not
+ match the current value in #OLD_VALUE (reset value 0). */
+#define MISC_REG_SPIO_INT 0xa500
+/* [RW 32] reload value for counter 4 if reload; the value will be reload if
+ the counter reached zero and the reload bit
+ (~misc_registers_sw_timer_cfg_4.sw_timer_cfg_4[1] ) is set */
+#define MISC_REG_SW_TIMER_RELOAD_VAL_4 0xa2fc
+/* [RW 32] the value of the counter for sw timers1-8. there are 8 addresses
+ in this register. address 0 - timer 1; address 1 - timer 2, ... address 7 -
+ timer 8 */
+#define MISC_REG_SW_TIMER_VAL 0xa5c0
+/* [R 1] Status of two port mode path swap input pin. */
+#define MISC_REG_TWO_PORT_PATH_SWAP 0xa758
+/* [RW 2] 2 port swap overwrite.[0] - Overwrite control; if it is 0 - the
+ path_swap output is equal to 2 port mode path swap input pin; if it is 1
+ - the path_swap output is equal to bit[1] of this register; [1] -
+ Overwrite value. If bit[0] of this register is 1 this is the value that
+ receives the path_swap output. Reset on Hard reset. */
+#define MISC_REG_TWO_PORT_PATH_SWAP_OVWR 0xa72c
+/* [RW 1] Set by the MCP to remember if one or more of the drivers is/are
+ loaded; 0-prepare; -unprepare */
+#define MISC_REG_UNPREPARED 0xa424
+#define NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST (0x1<<0)
+#define NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST (0x1<<1)
+#define NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN (0x1<<4)
+#define NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST (0x1<<2)
+#define NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN (0x1<<3)
+/* [RW 5] MDIO PHY Address. The WC uses this address to determine whether or
+ * not it is the recipient of the message on the MDIO interface. The value
+ * is compared to the value on ctrl_md_devad. Drives output
+ * misc_xgxs0_phy_addr. Global register. */
+#define MISC_REG_WC0_CTRL_PHY_ADDR 0xa9cc
+#define MISC_REG_WC0_RESET 0xac30
+/* [RW 2] XMAC Core port mode. Indicates the number of ports on the system
+ side. This should be less than or equal to phy_port_mode; if some of the
+ ports are not used. This enables reduction of frequency on the core side.
+ This is a strap input for the XMAC_MP core. 00 - Single Port Mode; 01 -
+ Dual Port Mode; 10 - Tri Port Mode; 11 - Quad Port Mode. This is a strap
+ input for the XMAC_MP core; and should be changed only while reset is
+ held low. Reset on Hard reset. */
+#define MISC_REG_XMAC_CORE_PORT_MODE 0xa964
+/* [RW 2] XMAC PHY port mode. Indicates the number of ports on the Warp
+ Core. This is a strap input for the XMAC_MP core. 00 - Single Port Mode;
+ 01 - Dual Port Mode; 1x - Quad Port Mode; This is a strap input for the
+ XMAC_MP core; and should be changed only while reset is held low. Reset
+ on Hard reset. */
+#define MISC_REG_XMAC_PHY_PORT_MODE 0xa960
+/* [RW 32] 1 [47] Packet Size = 64 Write to this register write bits 31:0.
+ * Reads from this register will clear bits 31:0. */
+#define MSTAT_REG_RX_STAT_GR64_LO 0x200
+/* [RW 32] 1 [00] Tx Good Packet Count Write to this register write bits
+ * 31:0. Reads from this register will clear bits 31:0. */
+#define MSTAT_REG_TX_STAT_GTXPOK_LO 0
+#define NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST (0x1<<0)
+#define NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST (0x1<<1)
+#define NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN (0x1<<4)
+#define NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST (0x1<<2)
+#define NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN (0x1<<3)
+#define NIG_LLH0_XCM_MASK_REG_LLH0_XCM_MASK_BCN (0x1<<0)
+#define NIG_LLH1_XCM_MASK_REG_LLH1_XCM_MASK_BCN (0x1<<0)
+#define NIG_MASK_INTERRUPT_PORT0_REG_MASK_EMAC0_MISC_MI_INT (0x1<<0)
+#define NIG_MASK_INTERRUPT_PORT0_REG_MASK_SERDES0_LINK_STATUS (0x1<<9)
+#define NIG_MASK_INTERRUPT_PORT0_REG_MASK_XGXS0_LINK10G (0x1<<15)
+#define NIG_MASK_INTERRUPT_PORT0_REG_MASK_XGXS0_LINK_STATUS (0xf<<18)
+/* [RW 1] Input enable for RX_BMAC0 IF */
+#define NIG_REG_BMAC0_IN_EN 0x100ac
+/* [RW 1] output enable for TX_BMAC0 IF */
+#define NIG_REG_BMAC0_OUT_EN 0x100e0
+/* [RW 1] output enable for TX BMAC pause port 0 IF */
+#define NIG_REG_BMAC0_PAUSE_OUT_EN 0x10110
+/* [RW 1] output enable for RX_BMAC0_REGS IF */
+#define NIG_REG_BMAC0_REGS_OUT_EN 0x100e8
+/* [RW 1] output enable for RX BRB1 port0 IF */
+#define NIG_REG_BRB0_OUT_EN 0x100f8
+/* [RW 1] Input enable for TX BRB1 pause port 0 IF */
+#define NIG_REG_BRB0_PAUSE_IN_EN 0x100c4
+/* [RW 1] output enable for RX BRB1 port1 IF */
+#define NIG_REG_BRB1_OUT_EN 0x100fc
+/* [RW 1] Input enable for TX BRB1 pause port 1 IF */
+#define NIG_REG_BRB1_PAUSE_IN_EN 0x100c8
+/* [RW 1] output enable for RX BRB1 LP IF */
+#define NIG_REG_BRB_LB_OUT_EN 0x10100
+/* [WB_W 82] Debug packet to LP from RBC; Data spelling:[63:0] data; 64]
+ error; [67:65]eop_bvalid; [68]eop; [69]sop; [70]port_id; 71]flush;
+ 72:73]-vnic_num; 81:74]-sideband_info */
+#define NIG_REG_DEBUG_PACKET_LB 0x10800
+/* [RW 1] Input enable for TX Debug packet */
+#define NIG_REG_EGRESS_DEBUG_IN_EN 0x100dc
+/* [RW 1] If 1 - egress drain mode for port0 is active. In this mode all
+ packets from PBFare not forwarded to the MAC and just deleted from FIFO.
+ First packet may be deleted from the middle. And last packet will be
+ always deleted till the end. */
+#define NIG_REG_EGRESS_DRAIN0_MODE 0x10060
+/* [RW 1] Output enable to EMAC0 */
+#define NIG_REG_EGRESS_EMAC0_OUT_EN 0x10120
+/* [RW 1] MAC configuration for packets of port0. If 1 - all packet outputs
+ to emac for port0; other way to bmac for port0 */
+#define NIG_REG_EGRESS_EMAC0_PORT 0x10058
+/* [RW 1] Input enable for TX PBF user packet port0 IF */
+#define NIG_REG_EGRESS_PBF0_IN_EN 0x100cc
+/* [RW 1] Input enable for TX PBF user packet port1 IF */
+#define NIG_REG_EGRESS_PBF1_IN_EN 0x100d0
+/* [RW 1] Input enable for TX UMP management packet port0 IF */
+#define NIG_REG_EGRESS_UMP0_IN_EN 0x100d4
+/* [RW 1] Input enable for RX_EMAC0 IF */
+#define NIG_REG_EMAC0_IN_EN 0x100a4
+/* [RW 1] output enable for TX EMAC pause port 0 IF */
+#define NIG_REG_EMAC0_PAUSE_OUT_EN 0x10118
+/* [R 1] status from emac0. This bit is set when MDINT from either the
+ EXT_MDINT pin or from the Copper PHY is driven low. This condition must
+ be cleared in the attached PHY device that is driving the MINT pin. */
+#define NIG_REG_EMAC0_STATUS_MISC_MI_INT 0x10494
+/* [WB 48] This address space contains BMAC0 registers. The BMAC registers
+ are described in appendix A. In order to access the BMAC0 registers; the
+ base address; NIG_REGISTERS_INGRESS_BMAC0_MEM; Offset: 0x10c00; should be
+ added to each BMAC register offset */
+#define NIG_REG_INGRESS_BMAC0_MEM 0x10c00
+/* [WB 48] This address space contains BMAC1 registers. The BMAC registers
+ are described in appendix A. In order to access the BMAC0 registers; the
+ base address; NIG_REGISTERS_INGRESS_BMAC1_MEM; Offset: 0x11000; should be
+ added to each BMAC register offset */
+#define NIG_REG_INGRESS_BMAC1_MEM 0x11000
+/* [R 1] FIFO empty in EOP descriptor FIFO of LP in NIG_RX_EOP */
+#define NIG_REG_INGRESS_EOP_LB_EMPTY 0x104e0
+/* [RW 17] Debug only. RX_EOP_DSCR_lb_FIFO in NIG_RX_EOP. Data
+ packet_length[13:0]; mac_error[14]; trunc_error[15]; parity[16] */
+#define NIG_REG_INGRESS_EOP_LB_FIFO 0x104e4
+/* [RW 27] 0 - must be active for Everest A0; 1- for Everest B0 when latch
+ logic for interrupts must be used. Enable per bit of interrupt of
+ ~latch_status.latch_status */
+#define NIG_REG_LATCH_BC_0 0x16210
+/* [RW 27] Latch for each interrupt from Unicore.b[0]
+ status_emac0_misc_mi_int; b[1] status_emac0_misc_mi_complete;
+ b[2]status_emac0_misc_cfg_change; b[3]status_emac0_misc_link_status;
+ b[4]status_emac0_misc_link_change; b[5]status_emac0_misc_attn;
+ b[6]status_serdes0_mac_crs; b[7]status_serdes0_autoneg_complete;
+ b[8]status_serdes0_fiber_rxact; b[9]status_serdes0_link_status;
+ b[10]status_serdes0_mr_page_rx; b[11]status_serdes0_cl73_an_complete;
+ b[12]status_serdes0_cl73_mr_page_rx; b[13]status_serdes0_rx_sigdet;
+ b[14]status_xgxs0_remotemdioreq; b[15]status_xgxs0_link10g;
+ b[16]status_xgxs0_autoneg_complete; b[17]status_xgxs0_fiber_rxact;
+ b[21:18]status_xgxs0_link_status; b[22]status_xgxs0_mr_page_rx;
+ b[23]status_xgxs0_cl73_an_complete; b[24]status_xgxs0_cl73_mr_page_rx;
+ b[25]status_xgxs0_rx_sigdet; b[26]status_xgxs0_mac_crs */
+#define NIG_REG_LATCH_STATUS_0 0x18000
+/* [RW 1] led 10g for port 0 */
+#define NIG_REG_LED_10G_P0 0x10320
+/* [RW 1] led 10g for port 1 */
+#define NIG_REG_LED_10G_P1 0x10324
+/* [RW 1] Port0: This bit is set to enable the use of the
+ ~nig_registers_led_control_blink_rate_p0.led_control_blink_rate_p0 field
+ defined below. If this bit is cleared; then the blink rate will be about
+ 8Hz. */
+#define NIG_REG_LED_CONTROL_BLINK_RATE_ENA_P0 0x10318
+/* [RW 12] Port0: Specifies the period of each blink cycle (on + off) for
+ Traffic LED in milliseconds. Must be a non-zero value. This 12-bit field
+ is reset to 0x080; giving a default blink period of approximately 8Hz. */
+#define NIG_REG_LED_CONTROL_BLINK_RATE_P0 0x10310
+/* [RW 1] Port0: If set along with the
+ ~nig_registers_led_control_override_traffic_p0.led_control_override_traffic_p0
+ bit and ~nig_registers_led_control_traffic_p0.led_control_traffic_p0 LED
+ bit; the Traffic LED will blink with the blink rate specified in
+ ~nig_registers_led_control_blink_rate_p0.led_control_blink_rate_p0 and
+ ~nig_registers_led_control_blink_rate_ena_p0.led_control_blink_rate_ena_p0
+ fields. */
+#define NIG_REG_LED_CONTROL_BLINK_TRAFFIC_P0 0x10308
+/* [RW 1] Port0: If set overrides hardware control of the Traffic LED. The
+ Traffic LED will then be controlled via bit ~nig_registers_
+ led_control_traffic_p0.led_control_traffic_p0 and bit
+ ~nig_registers_led_control_blink_traffic_p0.led_control_blink_traffic_p0 */
+#define NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0 0x102f8
+/* [RW 1] Port0: If set along with the led_control_override_trafic_p0 bit;
+ turns on the Traffic LED. If the led_control_blink_traffic_p0 bit is also
+ set; the LED will blink with blink rate specified in
+ ~nig_registers_led_control_blink_rate_p0.led_control_blink_rate_p0 and
+ ~nig_regsters_led_control_blink_rate_ena_p0.led_control_blink_rate_ena_p0
+ fields. */
+#define NIG_REG_LED_CONTROL_TRAFFIC_P0 0x10300
+/* [RW 4] led mode for port0: 0 MAC; 1-3 PHY1; 4 MAC2; 5-7 PHY4; 8-MAC3;
+ 9-11PHY7; 12 MAC4; 13-15 PHY10; */
+#define NIG_REG_LED_MODE_P0 0x102f0
+/* [RW 3] for port0 enable for llfc ppp and pause. b0 - brb1 enable; b1-
+ tsdm enable; b2- usdm enable */
+#define NIG_REG_LLFC_EGRESS_SRC_ENABLE_0 0x16070
+#define NIG_REG_LLFC_EGRESS_SRC_ENABLE_1 0x16074
+/* [RW 1] SAFC enable for port0. This register may get 1 only when
+ ~ppp_enable.ppp_enable = 0 and pause_enable.pause_enable =0 for the same
+ port */
+#define NIG_REG_LLFC_ENABLE_0 0x16208
+#define NIG_REG_LLFC_ENABLE_1 0x1620c
+/* [RW 16] classes are high-priority for port0 */
+#define NIG_REG_LLFC_HIGH_PRIORITY_CLASSES_0 0x16058
+#define NIG_REG_LLFC_HIGH_PRIORITY_CLASSES_1 0x1605c
+/* [RW 16] classes are low-priority for port0 */
+#define NIG_REG_LLFC_LOW_PRIORITY_CLASSES_0 0x16060
+#define NIG_REG_LLFC_LOW_PRIORITY_CLASSES_1 0x16064
+/* [RW 1] Output enable of message to LLFC BMAC IF for port0 */
+#define NIG_REG_LLFC_OUT_EN_0 0x160c8
+#define NIG_REG_LLFC_OUT_EN_1 0x160cc
+#define NIG_REG_LLH0_ACPI_PAT_0_CRC 0x1015c
+#define NIG_REG_LLH0_ACPI_PAT_6_LEN 0x10154
+#define NIG_REG_LLH0_BRB1_DRV_MASK 0x10244
+#define NIG_REG_LLH0_BRB1_DRV_MASK_MF 0x16048
+/* [RW 1] send to BRB1 if no match on any of RMP rules. */
+#define NIG_REG_LLH0_BRB1_NOT_MCP 0x1025c
+/* [RW 2] Determine the classification participants. 0: no classification.1:
+ classification upon VLAN id. 2: classification upon MAC address. 3:
+ classification upon both VLAN id & MAC addr. */
+#define NIG_REG_LLH0_CLS_TYPE 0x16080
+/* [RW 32] cm header for llh0 */
+#define NIG_REG_LLH0_CM_HEADER 0x1007c
+#define NIG_REG_LLH0_DEST_IP_0_1 0x101dc
+#define NIG_REG_LLH0_DEST_MAC_0_0 0x101c0
+/* [RW 16] destination TCP address 1. The LLH will look for this address in
+ all incoming packets. */
+#define NIG_REG_LLH0_DEST_TCP_0 0x10220
+/* [RW 16] destination UDP address 1 The LLH will look for this address in
+ all incoming packets. */
+#define NIG_REG_LLH0_DEST_UDP_0 0x10214
+#define NIG_REG_LLH0_ERROR_MASK 0x1008c
+/* [RW 8] event id for llh0 */
+#define NIG_REG_LLH0_EVENT_ID 0x10084
+#define NIG_REG_LLH0_FUNC_EN 0x160fc
+#define NIG_REG_LLH0_FUNC_MEM 0x16180
+#define NIG_REG_LLH0_FUNC_MEM_ENABLE 0x16140
+#define NIG_REG_LLH0_FUNC_VLAN_ID 0x16100
+/* [RW 1] Determine the IP version to look for in
+ ~nig_registers_llh0_dest_ip_0.llh0_dest_ip_0. 0 - IPv6; 1-IPv4 */
+#define NIG_REG_LLH0_IPV4_IPV6_0 0x10208
+/* [RW 1] t bit for llh0 */
+#define NIG_REG_LLH0_T_BIT 0x10074
+/* [RW 12] VLAN ID 1. In case of VLAN packet the LLH will look for this ID. */
+#define NIG_REG_LLH0_VLAN_ID_0 0x1022c
+/* [RW 8] init credit counter for port0 in LLH */
+#define NIG_REG_LLH0_XCM_INIT_CREDIT 0x10554
+#define NIG_REG_LLH0_XCM_MASK 0x10130
+#define NIG_REG_LLH1_BRB1_DRV_MASK 0x10248
+/* [RW 1] send to BRB1 if no match on any of RMP rules. */
+#define NIG_REG_LLH1_BRB1_NOT_MCP 0x102dc
+/* [RW 2] Determine the classification participants. 0: no classification.1:
+ classification upon VLAN id. 2: classification upon MAC address. 3:
+ classification upon both VLAN id & MAC addr. */
+#define NIG_REG_LLH1_CLS_TYPE 0x16084
+/* [RW 32] cm header for llh1 */
+#define NIG_REG_LLH1_CM_HEADER 0x10080
+#define NIG_REG_LLH1_ERROR_MASK 0x10090
+/* [RW 8] event id for llh1 */
+#define NIG_REG_LLH1_EVENT_ID 0x10088
+#define NIG_REG_LLH1_FUNC_EN 0x16104
+#define NIG_REG_LLH1_FUNC_MEM 0x161c0
+#define NIG_REG_LLH1_FUNC_MEM_ENABLE 0x16160
+#define NIG_REG_LLH1_FUNC_MEM_SIZE 16
+/* [RW 1] When this bit is set; the LLH will classify the packet before
+ * sending it to the BRB or calculating WoL on it. This bit controls port 1
+ * only. The legacy llh_multi_function_mode bit controls port 0. */
+#define NIG_REG_LLH1_MF_MODE 0x18614
+/* [RW 8] init credit counter for port1 in LLH */
+#define NIG_REG_LLH1_XCM_INIT_CREDIT 0x10564
+#define NIG_REG_LLH1_XCM_MASK 0x10134
+/* [RW 1] When this bit is set; the LLH will expect all packets to be with
+ e1hov */
+#define NIG_REG_LLH_E1HOV_MODE 0x160d8
+/* [RW 16] Outer VLAN type identifier for multi-function mode. In non
+ * multi-function mode; it will hold the inner VLAN type. Typically 0x8100.
+ */
+#define NIG_REG_LLH_E1HOV_TYPE_1 0x16028
+/* [RW 1] When this bit is set; the LLH will classify the packet before
+ sending it to the BRB or calculating WoL on it. */
+#define NIG_REG_LLH_MF_MODE 0x16024
+#define NIG_REG_MASK_INTERRUPT_PORT0 0x10330
+#define NIG_REG_MASK_INTERRUPT_PORT1 0x10334
+/* [RW 1] Output signal from NIG to EMAC0. When set enables the EMAC0 block. */
+#define NIG_REG_NIG_EMAC0_EN 0x1003c
+/* [RW 1] Output signal from NIG to EMAC1. When set enables the EMAC1 block. */
+#define NIG_REG_NIG_EMAC1_EN 0x10040
+/* [RW 1] Output signal from NIG to TX_EMAC0. When set indicates to the
+ EMAC0 to strip the CRC from the ingress packets. */
+#define NIG_REG_NIG_INGRESS_EMAC0_NO_CRC 0x10044
+/* [R 32] Interrupt register #0 read */
+#define NIG_REG_NIG_INT_STS_0 0x103b0
+#define NIG_REG_NIG_INT_STS_1 0x103c0
+/* [RC 32] Interrupt register #0 read clear */
+#define NIG_REG_NIG_INT_STS_CLR_0 0x103b4
+/* [R 32] Legacy E1 and E1H location for parity error mask register. */
+#define NIG_REG_NIG_PRTY_MASK 0x103dc
+/* [RW 32] Parity mask register #0 read/write */
+#define NIG_REG_NIG_PRTY_MASK_0 0x183c8
+#define NIG_REG_NIG_PRTY_MASK_1 0x183d8
+/* [R 32] Legacy E1 and E1H location for parity error status register. */
+#define NIG_REG_NIG_PRTY_STS 0x103d0
+/* [R 32] Parity register #0 read */
+#define NIG_REG_NIG_PRTY_STS_0 0x183bc
+#define NIG_REG_NIG_PRTY_STS_1 0x183cc
+/* [R 32] Legacy E1 and E1H location for parity error status clear register. */
+#define NIG_REG_NIG_PRTY_STS_CLR 0x103d4
+/* [RC 32] Parity register #0 read clear */
+#define NIG_REG_NIG_PRTY_STS_CLR_0 0x183c0
+#define NIG_REG_NIG_PRTY_STS_CLR_1 0x183d0
+#define MCPR_IMC_COMMAND_ENABLE (1L<<31)
+#define MCPR_IMC_COMMAND_IMC_STATUS_BITSHIFT 16
+#define MCPR_IMC_COMMAND_OPERATION_BITSHIFT 28
+#define MCPR_IMC_COMMAND_TRANSFER_ADDRESS_BITSHIFT 8
+/* [RW 6] Bit-map indicating which L2 hdrs may appear after the basic
+ * Ethernet header. */
+#define NIG_REG_P0_HDRS_AFTER_BASIC 0x18038
+/* [RW 1] HW PFC enable bit. Set this bit to enable the PFC functionality in
+ * the NIG. Other flow control modes such as PAUSE and SAFC/LLFC should be
+ * disabled when this bit is set. */
+#define NIG_REG_P0_HWPFC_ENABLE 0x18078
+#define NIG_REG_P0_LLH_FUNC_MEM2 0x18480
+#define NIG_REG_P0_LLH_FUNC_MEM2_ENABLE 0x18440
+/* [RW 17] Packet TimeSync information that is buffered in 1-deep FIFOs for
+ * the host. Bits [15:0] return the sequence ID of the packet. Bit 16
+ * indicates the validity of the data in the buffer. Writing a 1 to bit 16
+ * will clear the buffer.
+ */
+#define NIG_REG_P0_LLH_PTP_HOST_BUF_SEQID 0x1875c
+/* [R 32] Packet TimeSync information that is buffered in 1-deep FIFOs for
+ * the host. This location returns the lower 32 bits of timestamp value.
+ */
+#define NIG_REG_P0_LLH_PTP_HOST_BUF_TS_LSB 0x18754
+/* [R 32] Packet TimeSync information that is buffered in 1-deep FIFOs for
+ * the host. This location returns the upper 32 bits of timestamp value.
+ */
+#define NIG_REG_P0_LLH_PTP_HOST_BUF_TS_MSB 0x18758
+/* [RW 11] Mask register for the various parameters used in determining PTP
+ * packet presence. Set each bit to 1 to mask out the particular parameter.
+ * 0-IPv4 DA 0 of 224.0.1.129. 1-IPv4 DA 1 of 224.0.0.107. 2-IPv6 DA 0 of
+ * 0xFF0*:0:0:0:0:0:0:181. 3-IPv6 DA 1 of 0xFF02:0:0:0:0:0:0:6B. 4-UDP
+ * destination port 0 of 319. 5-UDP destination port 1 of 320. 6-MAC
+ * Ethertype 0 of 0x88F7. 7-configurable MAC Ethertype 1. 8-MAC DA 0 of
+ * 0x01-1B-19-00-00-00. 9-MAC DA 1 of 0x01-80-C2-00-00-0E. 10-configurable
+ * MAC DA 2. The reset default is set to mask out all parameters.
+ */
+#define NIG_REG_P0_LLH_PTP_PARAM_MASK 0x187a0
+/* [RW 14] Mask register for the rules used in detecting PTP packets. Set
+ * each bit to 1 to mask out that particular rule. 0-{IPv4 DA 0; UDP DP 0} .
+ * 1-{IPv4 DA 0; UDP DP 1} . 2-{IPv4 DA 1; UDP DP 0} . 3-{IPv4 DA 1; UDP DP
+ * 1} . 4-{IPv6 DA 0; UDP DP 0} . 5-{IPv6 DA 0; UDP DP 1} . 6-{IPv6 DA 1;
+ * UDP DP 0} . 7-{IPv6 DA 1; UDP DP 1} . 8-{MAC DA 0; Ethertype 0} . 9-{MAC
+ * DA 1; Ethertype 0} . 10-{MAC DA 0; Ethertype 1} . 11-{MAC DA 1; Ethertype
+ * 1} . 12-{MAC DA 2; Ethertype 0} . 13-{MAC DA 2; Ethertype 1} . The reset
+ * default is to mask out all of the rules. Note that rules 0-3 are for IPv4
+ * packets only and require that the packet is IPv4 for the rules to match.
+ * Note that rules 4-7 are for IPv6 packets only and require that the packet
+ * is IPv6 for the rules to match.
+ */
+#define NIG_REG_P0_LLH_PTP_RULE_MASK 0x187a4
+/* [RW 1] Set to 1 to enable PTP packets to be forwarded to the host. */
+#define NIG_REG_P0_LLH_PTP_TO_HOST 0x187ac
+/* [RW 1] Input enable for RX MAC interface. */
+#define NIG_REG_P0_MAC_IN_EN 0x185ac
+/* [RW 1] Output enable for TX MAC interface */
+#define NIG_REG_P0_MAC_OUT_EN 0x185b0
+/* [RW 1] Output enable for TX PAUSE signal to the MAC. */
+#define NIG_REG_P0_MAC_PAUSE_OUT_EN 0x185b4
+/* [RW 32] Eight 4-bit configurations for specifying which COS (0-15 for
+ * future expansion) each priorty is to be mapped to. Bits 3:0 specify the
+ * COS for priority 0. Bits 31:28 specify the COS for priority 7. The 3-bit
+ * priority field is extracted from the outer-most VLAN in receive packet.
+ * Only COS 0 and COS 1 are supported in E2. */
+#define NIG_REG_P0_PKT_PRIORITY_TO_COS 0x18054
+/* [RW 6] Enable for TimeSync feature. Bits [2:0] are for RX side. Bits
+ * [5:3] are for TX side. Bit 0 enables TimeSync on RX side. Bit 1 enables
+ * V1 frame format in timesync event detection on RX side. Bit 2 enables V2
+ * frame format in timesync event detection on RX side. Bit 3 enables
+ * TimeSync on TX side. Bit 4 enables V1 frame format in timesync event
+ * detection on TX side. Bit 5 enables V2 frame format in timesync event
+ * detection on TX side. Note that for HW to detect PTP packet and extract
+ * data from the packet, at least one of the version bits of that traffic
+ * direction has to be enabled.
+ */
+#define NIG_REG_P0_PTP_EN 0x18788
+/* [RW 16] Bit-map indicating which SAFC/PFC priorities to map to COS 0. A
+ * priority is mapped to COS 0 when the corresponding mask bit is 1. More
+ * than one bit may be set; allowing multiple priorities to be mapped to one
+ * COS. */
+#define NIG_REG_P0_RX_COS0_PRIORITY_MASK 0x18058
+/* [RW 16] Bit-map indicating which SAFC/PFC priorities to map to COS 1. A
+ * priority is mapped to COS 1 when the corresponding mask bit is 1. More
+ * than one bit may be set; allowing multiple priorities to be mapped to one
+ * COS. */
+#define NIG_REG_P0_RX_COS1_PRIORITY_MASK 0x1805c
+/* [RW 16] Bit-map indicating which SAFC/PFC priorities to map to COS 2. A
+ * priority is mapped to COS 2 when the corresponding mask bit is 1. More
+ * than one bit may be set; allowing multiple priorities to be mapped to one
+ * COS. */
+#define NIG_REG_P0_RX_COS2_PRIORITY_MASK 0x186b0
+/* [RW 16] Bit-map indicating which SAFC/PFC priorities to map to COS 3. A
+ * priority is mapped to COS 3 when the corresponding mask bit is 1. More
+ * than one bit may be set; allowing multiple priorities to be mapped to one
+ * COS. */
+#define NIG_REG_P0_RX_COS3_PRIORITY_MASK 0x186b4
+/* [RW 16] Bit-map indicating which SAFC/PFC priorities to map to COS 4. A
+ * priority is mapped to COS 4 when the corresponding mask bit is 1. More
+ * than one bit may be set; allowing multiple priorities to be mapped to one
+ * COS. */
+#define NIG_REG_P0_RX_COS4_PRIORITY_MASK 0x186b8
+/* [RW 16] Bit-map indicating which SAFC/PFC priorities to map to COS 5. A
+ * priority is mapped to COS 5 when the corresponding mask bit is 1. More
+ * than one bit may be set; allowing multiple priorities to be mapped to one
+ * COS. */
+#define NIG_REG_P0_RX_COS5_PRIORITY_MASK 0x186bc
+/* [R 1] RX FIFO for receiving data from MAC is empty. */
+/* [RW 15] Specify which of the credit registers the client is to be mapped
+ * to. Bits[2:0] are for client 0; bits [14:12] are for client 4. For
+ * clients that are not subject to WFQ credit blocking - their
+ * specifications here are not used. */
+#define NIG_REG_P0_TX_ARB_CLIENT_CREDIT_MAP 0x180f0
+/* [RW 32] Specify which of the credit registers the client is to be mapped
+ * to. This register specifies bits 31:0 of the 36-bit value. Bits[3:0] are
+ * for client 0; bits [35:32] are for client 8. For clients that are not
+ * subject to WFQ credit blocking - their specifications here are not used.
+ * This is a new register (with 2_) added in E3 B0 to accommodate the 9
+ * input clients to ETS arbiter. The reset default is set for management and
+ * debug to use credit registers 6, 7, and 8, respectively, and COSes 0-5 to
+ * use credit registers 0-5 respectively (0x543210876). Note that credit
+ * registers can not be shared between clients. */
+#define NIG_REG_P0_TX_ARB_CLIENT_CREDIT_MAP2_LSB 0x18688
+/* [RW 4] Specify which of the credit registers the client is to be mapped
+ * to. This register specifies bits 35:32 of the 36-bit value. Bits[3:0] are
+ * for client 0; bits [35:32] are for client 8. For clients that are not
+ * subject to WFQ credit blocking - their specifications here are not used.
+ * This is a new register (with 2_) added in E3 B0 to accommodate the 9
+ * input clients to ETS arbiter. The reset default is set for management and
+ * debug to use credit registers 6, 7, and 8, respectively, and COSes 0-5 to
+ * use credit registers 0-5 respectively (0x543210876). Note that credit
+ * registers can not be shared between clients. */
+#define NIG_REG_P0_TX_ARB_CLIENT_CREDIT_MAP2_MSB 0x1868c
+/* [RW 5] Specify whether the client competes directly in the strict
+ * priority arbiter. The bits are mapped according to client ID (client IDs
+ * are defined in tx_arb_priority_client). Default value is set to enable
+ * strict priorities for clients 0-2 -- management and debug traffic. */
+#define NIG_REG_P0_TX_ARB_CLIENT_IS_STRICT 0x180e8
+/* [RW 5] Specify whether the client is subject to WFQ credit blocking. The
+ * bits are mapped according to client ID (client IDs are defined in
+ * tx_arb_priority_client). Default value is 0 for not using WFQ credit
+ * blocking. */
+#define NIG_REG_P0_TX_ARB_CLIENT_IS_SUBJECT2WFQ 0x180ec
+/* [RW 32] Specify the upper bound that credit register 0 is allowed to
+ * reach. */
+#define NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_0 0x1810c
+#define NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_1 0x18110
+#define NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_2 0x18114
+#define NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_3 0x18118
+#define NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_4 0x1811c
+#define NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_5 0x186a0
+#define NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_6 0x186a4
+#define NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_7 0x186a8
+#define NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_8 0x186ac
+/* [RW 32] Specify the weight (in bytes) to be added to credit register 0
+ * when it is time to increment. */
+#define NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_0 0x180f8
+#define NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_1 0x180fc
+#define NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_2 0x18100
+#define NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_3 0x18104
+#define NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_4 0x18108
+#define NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_5 0x18690
+#define NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_6 0x18694
+#define NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_7 0x18698
+#define NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_8 0x1869c
+/* [RW 12] Specify the number of strict priority arbitration slots between
+ * two round-robin arbitration slots to avoid starvation. A value of 0 means
+ * no strict priority cycles - the strict priority with anti-starvation
+ * arbiter becomes a round-robin arbiter. */
+#define NIG_REG_P0_TX_ARB_NUM_STRICT_ARB_SLOTS 0x180f4
+/* [RW 15] Specify the client number to be assigned to each priority of the
+ * strict priority arbiter. Priority 0 is the highest priority. Bits [2:0]
+ * are for priority 0 client; bits [14:12] are for priority 4 client. The
+ * clients are assigned the following IDs: 0-management; 1-debug traffic
+ * from this port; 2-debug traffic from other port; 3-COS0 traffic; 4-COS1
+ * traffic. The reset value[14:0] is set to 0x4688 (15'b100_011_010_001_000)
+ * for management at priority 0; debug traffic at priorities 1 and 2; COS0
+ * traffic at priority 3; and COS1 traffic at priority 4. */
+#define NIG_REG_P0_TX_ARB_PRIORITY_CLIENT 0x180e4
+/* [RW 6] Bit-map indicating which L2 hdrs may appear after the basic
+ * Ethernet header. */
+#define NIG_REG_P1_HDRS_AFTER_BASIC 0x1818c
+#define NIG_REG_P1_LLH_FUNC_MEM2 0x184c0
+#define NIG_REG_P1_LLH_FUNC_MEM2_ENABLE 0x18460a
+/* [RW 17] Packet TimeSync information that is buffered in 1-deep FIFOs for
+ * the host. Bits [15:0] return the sequence ID of the packet. Bit 16
+ * indicates the validity of the data in the buffer. Writing a 1 to bit 16
+ * will clear the buffer.
+ */
+#define NIG_REG_P1_LLH_PTP_HOST_BUF_SEQID 0x18774
+/* [R 32] Packet TimeSync information that is buffered in 1-deep FIFOs for
+ * the host. This location returns the lower 32 bits of timestamp value.
+ */
+#define NIG_REG_P1_LLH_PTP_HOST_BUF_TS_LSB 0x1876c
+/* [R 32] Packet TimeSync information that is buffered in 1-deep FIFOs for
+ * the host. This location returns the upper 32 bits of timestamp value.
+ */
+#define NIG_REG_P1_LLH_PTP_HOST_BUF_TS_MSB 0x18770
+/* [RW 11] Mask register for the various parameters used in determining PTP
+ * packet presence. Set each bit to 1 to mask out the particular parameter.
+ * 0-IPv4 DA 0 of 224.0.1.129. 1-IPv4 DA 1 of 224.0.0.107. 2-IPv6 DA 0 of
+ * 0xFF0*:0:0:0:0:0:0:181. 3-IPv6 DA 1 of 0xFF02:0:0:0:0:0:0:6B. 4-UDP
+ * destination port 0 of 319. 5-UDP destination port 1 of 320. 6-MAC
+ * Ethertype 0 of 0x88F7. 7-configurable MAC Ethertype 1. 8-MAC DA 0 of
+ * 0x01-1B-19-00-00-00. 9-MAC DA 1 of 0x01-80-C2-00-00-0E. 10-configurable
+ * MAC DA 2. The reset default is set to mask out all parameters.
+ */
+#define NIG_REG_P1_LLH_PTP_PARAM_MASK 0x187c8
+/* [RW 14] Mask register for the rules used in detecting PTP packets. Set
+ * each bit to 1 to mask out that particular rule. 0-{IPv4 DA 0; UDP DP 0} .
+ * 1-{IPv4 DA 0; UDP DP 1} . 2-{IPv4 DA 1; UDP DP 0} . 3-{IPv4 DA 1; UDP DP
+ * 1} . 4-{IPv6 DA 0; UDP DP 0} . 5-{IPv6 DA 0; UDP DP 1} . 6-{IPv6 DA 1;
+ * UDP DP 0} . 7-{IPv6 DA 1; UDP DP 1} . 8-{MAC DA 0; Ethertype 0} . 9-{MAC
+ * DA 1; Ethertype 0} . 10-{MAC DA 0; Ethertype 1} . 11-{MAC DA 1; Ethertype
+ * 1} . 12-{MAC DA 2; Ethertype 0} . 13-{MAC DA 2; Ethertype 1} . The reset
+ * default is to mask out all of the rules. Note that rules 0-3 are for IPv4
+ * packets only and require that the packet is IPv4 for the rules to match.
+ * Note that rules 4-7 are for IPv6 packets only and require that the packet
+ * is IPv6 for the rules to match.
+ */
+#define NIG_REG_P1_LLH_PTP_RULE_MASK 0x187cc
+/* [RW 1] Set to 1 to enable PTP packets to be forwarded to the host. */
+#define NIG_REG_P1_LLH_PTP_TO_HOST 0x187d4
+/* [RW 32] Specify the client number to be assigned to each priority of the
+ * strict priority arbiter. This register specifies bits 31:0 of the 36-bit
+ * value. Priority 0 is the highest priority. Bits [3:0] are for priority 0
+ * client; bits [35-32] are for priority 8 client. The clients are assigned
+ * the following IDs: 0-management; 1-debug traffic from this port; 2-debug
+ * traffic from other port; 3-COS0 traffic; 4-COS1 traffic; 5-COS2 traffic;
+ * 6-COS3 traffic; 7-COS4 traffic; 8-COS5 traffic. The reset value[35:0] is
+ * set to 0x345678021. This is a new register (with 2_) added in E3 B0 to
+ * accommodate the 9 input clients to ETS arbiter. */
+#define NIG_REG_P0_TX_ARB_PRIORITY_CLIENT2_LSB 0x18680
+/* [RW 4] Specify the client number to be assigned to each priority of the
+ * strict priority arbiter. This register specifies bits 35:32 of the 36-bit
+ * value. Priority 0 is the highest priority. Bits [3:0] are for priority 0
+ * client; bits [35-32] are for priority 8 client. The clients are assigned
+ * the following IDs: 0-management; 1-debug traffic from this port; 2-debug
+ * traffic from other port; 3-COS0 traffic; 4-COS1 traffic; 5-COS2 traffic;
+ * 6-COS3 traffic; 7-COS4 traffic; 8-COS5 traffic. The reset value[35:0] is
+ * set to 0x345678021. This is a new register (with 2_) added in E3 B0 to
+ * accommodate the 9 input clients to ETS arbiter. */
+#define NIG_REG_P0_TX_ARB_PRIORITY_CLIENT2_MSB 0x18684
+/* [RW 1] MCP-to-host path enable. Set this bit to enable the routing of MCP
+ * packets to BRB LB interface to forward the packet to the host. All
+ * packets from MCP are forwarded to the network when this bit is cleared -
+ * regardless of the configured destination in tx_mng_destination register.
+ * When MCP-to-host paths for both ports 0 and 1 are disabled - the arbiter
+ * for BRB LB interface is bypassed and PBF LB traffic is always selected to
+ * send to BRB LB.
+ */
+#define NIG_REG_P0_TX_MNG_HOST_ENABLE 0x182f4
+#define NIG_REG_P1_HWPFC_ENABLE 0x181d0
+#define NIG_REG_P1_MAC_IN_EN 0x185c0
+/* [RW 1] Output enable for TX MAC interface */
+#define NIG_REG_P1_MAC_OUT_EN 0x185c4
+/* [RW 1] Output enable for TX PAUSE signal to the MAC. */
+#define NIG_REG_P1_MAC_PAUSE_OUT_EN 0x185c8
+/* [RW 32] Eight 4-bit configurations for specifying which COS (0-15 for
+ * future expansion) each priorty is to be mapped to. Bits 3:0 specify the
+ * COS for priority 0. Bits 31:28 specify the COS for priority 7. The 3-bit
+ * priority field is extracted from the outer-most VLAN in receive packet.
+ * Only COS 0 and COS 1 are supported in E2. */
+#define NIG_REG_P1_PKT_PRIORITY_TO_COS 0x181a8
+/* [RW 6] Enable for TimeSync feature. Bits [2:0] are for RX side. Bits
+ * [5:3] are for TX side. Bit 0 enables TimeSync on RX side. Bit 1 enables
+ * V1 frame format in timesync event detection on RX side. Bit 2 enables V2
+ * frame format in timesync event detection on RX side. Bit 3 enables
+ * TimeSync on TX side. Bit 4 enables V1 frame format in timesync event
+ * detection on TX side. Bit 5 enables V2 frame format in timesync event
+ * detection on TX side. Note that for HW to detect PTP packet and extract
+ * data from the packet, at least one of the version bits of that traffic
+ * direction has to be enabled.
+ */
+#define NIG_REG_P1_PTP_EN 0x187b0
+/* [RW 16] Bit-map indicating which SAFC/PFC priorities to map to COS 0. A
+ * priority is mapped to COS 0 when the corresponding mask bit is 1. More
+ * than one bit may be set; allowing multiple priorities to be mapped to one
+ * COS. */
+#define NIG_REG_P1_RX_COS0_PRIORITY_MASK 0x181ac
+/* [RW 16] Bit-map indicating which SAFC/PFC priorities to map to COS 1. A
+ * priority is mapped to COS 1 when the corresponding mask bit is 1. More
+ * than one bit may be set; allowing multiple priorities to be mapped to one
+ * COS. */
+#define NIG_REG_P1_RX_COS1_PRIORITY_MASK 0x181b0
+/* [RW 16] Bit-map indicating which SAFC/PFC priorities to map to COS 2. A
+ * priority is mapped to COS 2 when the corresponding mask bit is 1. More
+ * than one bit may be set; allowing multiple priorities to be mapped to one
+ * COS. */
+#define NIG_REG_P1_RX_COS2_PRIORITY_MASK 0x186f8
+/* [R 1] RX FIFO for receiving data from MAC is empty. */
+#define NIG_REG_P1_RX_MACFIFO_EMPTY 0x1858c
+/* [R 1] TLLH FIFO is empty. */
+#define NIG_REG_P1_TLLH_FIFO_EMPTY 0x18338
+/* [RW 19] Packet TimeSync information that is buffered in 1-deep FIFOs for
+ * TX side. Bits [15:0] reflect the sequence ID of the packet. Bit 16
+ * indicates the validity of the data in the buffer. Bit 17 indicates that
+ * the sequence ID is valid and it is waiting for the TX timestamp value.
+ * Bit 18 indicates whether the timestamp is from a SW request (value of 1)
+ * or HW request (value of 0). Writing a 1 to bit 16 will clear the buffer.
+ */
+#define NIG_REG_P0_TLLH_PTP_BUF_SEQID 0x187e0
+/* [R 32] Packet TimeSync information that is buffered in 1-deep FIFOs for
+ * MCP. This location returns the lower 32 bits of timestamp value.
+ */
+#define NIG_REG_P0_TLLH_PTP_BUF_TS_LSB 0x187d8
+/* [R 32] Packet TimeSync information that is buffered in 1-deep FIFOs for
+ * MCP. This location returns the upper 32 bits of timestamp value.
+ */
+#define NIG_REG_P0_TLLH_PTP_BUF_TS_MSB 0x187dc
+/* [RW 11] Mask register for the various parameters used in determining PTP
+ * packet presence. Set each bit to 1 to mask out the particular parameter.
+ * 0-IPv4 DA 0 of 224.0.1.129. 1-IPv4 DA 1 of 224.0.0.107. 2-IPv6 DA 0 of
+ * 0xFF0*:0:0:0:0:0:0:181. 3-IPv6 DA 1 of 0xFF02:0:0:0:0:0:0:6B. 4-UDP
+ * destination port 0 of 319. 5-UDP destination port 1 of 320. 6-MAC
+ * Ethertype 0 of 0x88F7. 7-configurable MAC Ethertype 1. 8-MAC DA 0 of
+ * 0x01-1B-19-00-00-00. 9-MAC DA 1 of 0x01-80-C2-00-00-0E. 10-configurable
+ * MAC DA 2. The reset default is set to mask out all parameters.
+ */
+#define NIG_REG_P0_TLLH_PTP_PARAM_MASK 0x187f0
+/* [RW 14] Mask register for the rules used in detecting PTP packets. Set
+ * each bit to 1 to mask out that particular rule. 0-{IPv4 DA 0; UDP DP 0} .
+ * 1-{IPv4 DA 0; UDP DP 1} . 2-{IPv4 DA 1; UDP DP 0} . 3-{IPv4 DA 1; UDP DP
+ * 1} . 4-{IPv6 DA 0; UDP DP 0} . 5-{IPv6 DA 0; UDP DP 1} . 6-{IPv6 DA 1;
+ * UDP DP 0} . 7-{IPv6 DA 1; UDP DP 1} . 8-{MAC DA 0; Ethertype 0} . 9-{MAC
+ * DA 1; Ethertype 0} . 10-{MAC DA 0; Ethertype 1} . 11-{MAC DA 1; Ethertype
+ * 1} . 12-{MAC DA 2; Ethertype 0} . 13-{MAC DA 2; Ethertype 1} . The reset
+ * default is to mask out all of the rules.
+ */
+#define NIG_REG_P0_TLLH_PTP_RULE_MASK 0x187f4
+/* [RW 19] Packet TimeSync information that is buffered in 1-deep FIFOs for
+ * TX side. Bits [15:0] reflect the sequence ID of the packet. Bit 16
+ * indicates the validity of the data in the buffer. Bit 17 indicates that
+ * the sequence ID is valid and it is waiting for the TX timestamp value.
+ * Bit 18 indicates whether the timestamp is from a SW request (value of 1)
+ * or HW request (value of 0). Writing a 1 to bit 16 will clear the buffer.
+ */
+#define NIG_REG_P1_TLLH_PTP_BUF_SEQID 0x187ec
+/* [R 32] Packet TimeSync information that is buffered in 1-deep FIFOs for
+ * MCP. This location returns the lower 32 bits of timestamp value.
+ */
+#define NIG_REG_P1_TLLH_PTP_BUF_TS_LSB 0x187e4
+/* [R 32] Packet TimeSync information that is buffered in 1-deep FIFOs for
+ * MCP. This location returns the upper 32 bits of timestamp value.
+ */
+#define NIG_REG_P1_TLLH_PTP_BUF_TS_MSB 0x187e8
+/* [RW 11] Mask register for the various parameters used in determining PTP
+ * packet presence. Set each bit to 1 to mask out the particular parameter.
+ * 0-IPv4 DA 0 of 224.0.1.129. 1-IPv4 DA 1 of 224.0.0.107. 2-IPv6 DA 0 of
+ * 0xFF0*:0:0:0:0:0:0:181. 3-IPv6 DA 1 of 0xFF02:0:0:0:0:0:0:6B. 4-UDP
+ * destination port 0 of 319. 5-UDP destination port 1 of 320. 6-MAC
+ * Ethertype 0 of 0x88F7. 7-configurable MAC Ethertype 1. 8-MAC DA 0 of
+ * 0x01-1B-19-00-00-00. 9-MAC DA 1 of 0x01-80-C2-00-00-0E. 10-configurable
+ * MAC DA 2. The reset default is set to mask out all parameters.
+ */
+#define NIG_REG_P1_TLLH_PTP_PARAM_MASK 0x187f8
+/* [RW 14] Mask register for the rules used in detecting PTP packets. Set
+ * each bit to 1 to mask out that particular rule. 0-{IPv4 DA 0; UDP DP 0} .
+ * 1-{IPv4 DA 0; UDP DP 1} . 2-{IPv4 DA 1; UDP DP 0} . 3-{IPv4 DA 1; UDP DP
+ * 1} . 4-{IPv6 DA 0; UDP DP 0} . 5-{IPv6 DA 0; UDP DP 1} . 6-{IPv6 DA 1;
+ * UDP DP 0} . 7-{IPv6 DA 1; UDP DP 1} . 8-{MAC DA 0; Ethertype 0} . 9-{MAC
+ * DA 1; Ethertype 0} . 10-{MAC DA 0; Ethertype 1} . 11-{MAC DA 1; Ethertype
+ * 1} . 12-{MAC DA 2; Ethertype 0} . 13-{MAC DA 2; Ethertype 1} . The reset
+ * default is to mask out all of the rules.
+ */
+#define NIG_REG_P1_TLLH_PTP_RULE_MASK 0x187fc
+/* [RW 32] Specify which of the credit registers the client is to be mapped
+ * to. This register specifies bits 31:0 of the 36-bit value. Bits[3:0] are
+ * for client 0; bits [35:32] are for client 8. For clients that are not
+ * subject to WFQ credit blocking - their specifications here are not used.
+ * This is a new register (with 2_) added in E3 B0 to accommodate the 9
+ * input clients to ETS arbiter. The reset default is set for management and
+ * debug to use credit registers 6, 7, and 8, respectively, and COSes 0-5 to
+ * use credit registers 0-5 respectively (0x543210876). Note that credit
+ * registers can not be shared between clients. Note also that there are
+ * only COS0-2 in port 1- there is a total of 6 clients in port 1. Only
+ * credit registers 0-5 are valid. This register should be configured
+ * appropriately before enabling WFQ. */
+#define NIG_REG_P1_TX_ARB_CLIENT_CREDIT_MAP2_LSB 0x186e8
+/* [RW 4] Specify which of the credit registers the client is to be mapped
+ * to. This register specifies bits 35:32 of the 36-bit value. Bits[3:0] are
+ * for client 0; bits [35:32] are for client 8. For clients that are not
+ * subject to WFQ credit blocking - their specifications here are not used.
+ * This is a new register (with 2_) added in E3 B0 to accommodate the 9
+ * input clients to ETS arbiter. The reset default is set for management and
+ * debug to use credit registers 6, 7, and 8, respectively, and COSes 0-5 to
+ * use credit registers 0-5 respectively (0x543210876). Note that credit
+ * registers can not be shared between clients. Note also that there are
+ * only COS0-2 in port 1- there is a total of 6 clients in port 1. Only
+ * credit registers 0-5 are valid. This register should be configured
+ * appropriately before enabling WFQ. */
+#define NIG_REG_P1_TX_ARB_CLIENT_CREDIT_MAP2_MSB 0x186ec
+/* [RW 9] Specify whether the client competes directly in the strict
+ * priority arbiter. The bits are mapped according to client ID (client IDs
+ * are defined in tx_arb_priority_client2): 0-management; 1-debug traffic
+ * from this port; 2-debug traffic from other port; 3-COS0 traffic; 4-COS1
+ * traffic; 5-COS2 traffic; 6-COS3 traffic; 7-COS4 traffic; 8-COS5 traffic.
+ * Default value is set to enable strict priorities for all clients. */
+#define NIG_REG_P1_TX_ARB_CLIENT_IS_STRICT 0x18234
+/* [RW 9] Specify whether the client is subject to WFQ credit blocking. The
+ * bits are mapped according to client ID (client IDs are defined in
+ * tx_arb_priority_client2): 0-management; 1-debug traffic from this port;
+ * 2-debug traffic from other port; 3-COS0 traffic; 4-COS1 traffic; 5-COS2
+ * traffic; 6-COS3 traffic; 7-COS4 traffic; 8-COS5 traffic. Default value is
+ * 0 for not using WFQ credit blocking. */
+#define NIG_REG_P1_TX_ARB_CLIENT_IS_SUBJECT2WFQ 0x18238
+#define NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_0 0x18258
+#define NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_1 0x1825c
+#define NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_2 0x18260
+#define NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_3 0x18264
+#define NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_4 0x18268
+#define NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_5 0x186f4
+/* [RW 32] Specify the weight (in bytes) to be added to credit register 0
+ * when it is time to increment. */
+#define NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_0 0x18244
+#define NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_1 0x18248
+#define NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_2 0x1824c
+#define NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_3 0x18250
+#define NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_4 0x18254
+#define NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_5 0x186f0
+/* [RW 12] Specify the number of strict priority arbitration slots between
+ two round-robin arbitration slots to avoid starvation. A value of 0 means
+ no strict priority cycles - the strict priority with anti-starvation
+ arbiter becomes a round-robin arbiter. */
+#define NIG_REG_P1_TX_ARB_NUM_STRICT_ARB_SLOTS 0x18240
+/* [RW 32] Specify the client number to be assigned to each priority of the
+ strict priority arbiter. This register specifies bits 31:0 of the 36-bit
+ value. Priority 0 is the highest priority. Bits [3:0] are for priority 0
+ client; bits [35-32] are for priority 8 client. The clients are assigned
+ the following IDs: 0-management; 1-debug traffic from this port; 2-debug
+ traffic from other port; 3-COS0 traffic; 4-COS1 traffic; 5-COS2 traffic;
+ 6-COS3 traffic; 7-COS4 traffic; 8-COS5 traffic. The reset value[35:0] is
+ set to 0x345678021. This is a new register (with 2_) added in E3 B0 to
+ accommodate the 9 input clients to ETS arbiter. Note that this register
+ is the same as the one for port 0, except that port 1 only has COS 0-2
+ traffic. There is no traffic for COS 3-5 of port 1. */
+#define NIG_REG_P1_TX_ARB_PRIORITY_CLIENT2_LSB 0x186e0
+/* [RW 4] Specify the client number to be assigned to each priority of the
+ strict priority arbiter. This register specifies bits 35:32 of the 36-bit
+ value. Priority 0 is the highest priority. Bits [3:0] are for priority 0
+ client; bits [35-32] are for priority 8 client. The clients are assigned
+ the following IDs: 0-management; 1-debug traffic from this port; 2-debug
+ traffic from other port; 3-COS0 traffic; 4-COS1 traffic; 5-COS2 traffic;
+ 6-COS3 traffic; 7-COS4 traffic; 8-COS5 traffic. The reset value[35:0] is
+ set to 0x345678021. This is a new register (with 2_) added in E3 B0 to
+ accommodate the 9 input clients to ETS arbiter. Note that this register
+ is the same as the one for port 0, except that port 1 only has COS 0-2
+ traffic. There is no traffic for COS 3-5 of port 1. */
+#define NIG_REG_P1_TX_ARB_PRIORITY_CLIENT2_MSB 0x186e4
+/* [R 1] TX FIFO for transmitting data to MAC is empty. */
+#define NIG_REG_P1_TX_MACFIFO_EMPTY 0x18594
+/* [RW 1] MCP-to-host path enable. Set this bit to enable the routing of MCP
+ * packets to BRB LB interface to forward the packet to the host. All
+ * packets from MCP are forwarded to the network when this bit is cleared -
+ * regardless of the configured destination in tx_mng_destination register.
+ */
+#define NIG_REG_P1_TX_MNG_HOST_ENABLE 0x182f8
+/* [R 1] FIFO empty status of the MCP TX FIFO used for storing MCP packets
+ forwarded to the host. */
+#define NIG_REG_P1_TX_MNG_HOST_FIFO_EMPTY 0x182b8
+/* [RW 32] Specify the upper bound that credit register 0 is allowed to
+ * reach. */
+/* [RW 1] Pause enable for port0. This register may get 1 only when
+ ~safc_enable.safc_enable = 0 and ppp_enable.ppp_enable =0 for the same
+ port */
+#define NIG_REG_PAUSE_ENABLE_0 0x160c0
+#define NIG_REG_PAUSE_ENABLE_1 0x160c4
+/* [RW 1] Input enable for RX PBF LP IF */
+#define NIG_REG_PBF_LB_IN_EN 0x100b4
+/* [RW 1] Value of this register will be transmitted to port swap when
+ ~nig_registers_strap_override.strap_override =1 */
+#define NIG_REG_PORT_SWAP 0x10394
+/* [RW 1] PPP enable for port0. This register may get 1 only when
+ * ~safc_enable.safc_enable = 0 and pause_enable.pause_enable =0 for the
+ * same port */
+#define NIG_REG_PPP_ENABLE_0 0x160b0
+#define NIG_REG_PPP_ENABLE_1 0x160b4
+/* [RW 1] output enable for RX parser descriptor IF */
+#define NIG_REG_PRS_EOP_OUT_EN 0x10104
+/* [RW 1] Input enable for RX parser request IF */
+#define NIG_REG_PRS_REQ_IN_EN 0x100b8
+/* [RW 5] control to serdes - CL45 DEVAD */
+#define NIG_REG_SERDES0_CTRL_MD_DEVAD 0x10370
+/* [RW 1] control to serdes; 0 - clause 45; 1 - clause 22 */
+#define NIG_REG_SERDES0_CTRL_MD_ST 0x1036c
+/* [RW 5] control to serdes - CL22 PHY_ADD and CL45 PRTAD */
+#define NIG_REG_SERDES0_CTRL_PHY_ADDR 0x10374
+/* [R 1] status from serdes0 that inputs to interrupt logic of link status */
+#define NIG_REG_SERDES0_STATUS_LINK_STATUS 0x10578
+/* [R 32] Rx statistics : In user packets discarded due to BRB backpressure
+ for port0 */
+#define NIG_REG_STAT0_BRB_DISCARD 0x105f0
+/* [R 32] Rx statistics : In user packets truncated due to BRB backpressure
+ for port0 */
+#define NIG_REG_STAT0_BRB_TRUNCATE 0x105f8
+/* [WB_R 36] Tx statistics : Number of packets from emac0 or bmac0 that
+ between 1024 and 1522 bytes for port0 */
+#define NIG_REG_STAT0_EGRESS_MAC_PKT0 0x10750
+/* [WB_R 36] Tx statistics : Number of packets from emac0 or bmac0 that
+ between 1523 bytes and above for port0 */
+#define NIG_REG_STAT0_EGRESS_MAC_PKT1 0x10760
+/* [R 32] Rx statistics : In user packets discarded due to BRB backpressure
+ for port1 */
+#define NIG_REG_STAT1_BRB_DISCARD 0x10628
+/* [WB_R 36] Tx statistics : Number of packets from emac1 or bmac1 that
+ between 1024 and 1522 bytes for port1 */
+#define NIG_REG_STAT1_EGRESS_MAC_PKT0 0x107a0
+/* [WB_R 36] Tx statistics : Number of packets from emac1 or bmac1 that
+ between 1523 bytes and above for port1 */
+#define NIG_REG_STAT1_EGRESS_MAC_PKT1 0x107b0
+/* [WB_R 64] Rx statistics : User octets received for LP */
+#define NIG_REG_STAT2_BRB_OCTET 0x107e0
+#define NIG_REG_STATUS_INTERRUPT_PORT0 0x10328
+#define NIG_REG_STATUS_INTERRUPT_PORT1 0x1032c
+/* [RW 1] port swap mux selection. If this register equal to 0 then port
+ swap is equal to SPIO pin that inputs from ifmux_serdes_swap. If 1 then
+ ort swap is equal to ~nig_registers_port_swap.port_swap */
+#define NIG_REG_STRAP_OVERRIDE 0x10398
+/* [WB 64] Addresses for TimeSync related registers in the timesync
+ * generator sub-module.
+ */
+#define NIG_REG_TIMESYNC_GEN_REG 0x18800
+/* [RW 1] output enable for RX_XCM0 IF */
+#define NIG_REG_XCM0_OUT_EN 0x100f0
+/* [RW 1] output enable for RX_XCM1 IF */
+#define NIG_REG_XCM1_OUT_EN 0x100f4
+/* [RW 1] control to xgxs - remote PHY in-band MDIO */
+#define NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST 0x10348
+/* [RW 5] control to xgxs - CL45 DEVAD */
+#define NIG_REG_XGXS0_CTRL_MD_DEVAD 0x1033c
+/* [RW 1] control to xgxs; 0 - clause 45; 1 - clause 22 */
+#define NIG_REG_XGXS0_CTRL_MD_ST 0x10338
+/* [RW 5] control to xgxs - CL22 PHY_ADD and CL45 PRTAD */
+#define NIG_REG_XGXS0_CTRL_PHY_ADDR 0x10340
+/* [R 1] status from xgxs0 that inputs to interrupt logic of link10g. */
+#define NIG_REG_XGXS0_STATUS_LINK10G 0x10680
+/* [R 4] status from xgxs0 that inputs to interrupt logic of link status */
+#define NIG_REG_XGXS0_STATUS_LINK_STATUS 0x10684
+/* [RW 2] selection for XGXS lane of port 0 in NIG_MUX block */
+#define NIG_REG_XGXS_LANE_SEL_P0 0x102e8
+/* [RW 1] selection for port0 for NIG_MUX block : 0 = SerDes; 1 = XGXS */
+#define NIG_REG_XGXS_SERDES0_MODE_SEL 0x102e0
+#define NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_EMAC0_MISC_MI_INT (0x1<<0)
+#define NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_SERDES0_LINK_STATUS (0x1<<9)
+#define NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_XGXS0_LINK10G (0x1<<15)
+#define NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_XGXS0_LINK_STATUS (0xf<<18)
+#define NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_XGXS0_LINK_STATUS_SIZE 18
+/* [RW 31] The upper bound of the weight of COS0 in the ETS command arbiter. */
+#define PBF_REG_COS0_UPPER_BOUND 0x15c05c
+/* [RW 31] The upper bound of the weight of COS0 in the ETS command arbiter
+ * of port 0. */
+#define PBF_REG_COS0_UPPER_BOUND_P0 0x15c2cc
+/* [RW 31] The upper bound of the weight of COS0 in the ETS command arbiter
+ * of port 1. */
+#define PBF_REG_COS0_UPPER_BOUND_P1 0x15c2e4
+/* [RW 31] The weight of COS0 in the ETS command arbiter. */
+#define PBF_REG_COS0_WEIGHT 0x15c054
+/* [RW 31] The weight of COS0 in port 0 ETS command arbiter. */
+#define PBF_REG_COS0_WEIGHT_P0 0x15c2a8
+/* [RW 31] The weight of COS0 in port 1 ETS command arbiter. */
+#define PBF_REG_COS0_WEIGHT_P1 0x15c2c0
+/* [RW 31] The upper bound of the weight of COS1 in the ETS command arbiter. */
+#define PBF_REG_COS1_UPPER_BOUND 0x15c060
+/* [RW 31] The weight of COS1 in the ETS command arbiter. */
+#define PBF_REG_COS1_WEIGHT 0x15c058
+/* [RW 31] The weight of COS1 in port 0 ETS command arbiter. */
+#define PBF_REG_COS1_WEIGHT_P0 0x15c2ac
+/* [RW 31] The weight of COS1 in port 1 ETS command arbiter. */
+#define PBF_REG_COS1_WEIGHT_P1 0x15c2c4
+/* [RW 31] The weight of COS2 in port 0 ETS command arbiter. */
+#define PBF_REG_COS2_WEIGHT_P0 0x15c2b0
+/* [RW 31] The weight of COS2 in port 1 ETS command arbiter. */
+#define PBF_REG_COS2_WEIGHT_P1 0x15c2c8
+/* [RW 31] The weight of COS3 in port 0 ETS command arbiter. */
+#define PBF_REG_COS3_WEIGHT_P0 0x15c2b4
+/* [RW 31] The weight of COS4 in port 0 ETS command arbiter. */
+#define PBF_REG_COS4_WEIGHT_P0 0x15c2b8
+/* [RW 31] The weight of COS5 in port 0 ETS command arbiter. */
+#define PBF_REG_COS5_WEIGHT_P0 0x15c2bc
+/* [R 11] Current credit for the LB queue in the tx port buffers in 16 byte
+ * lines. */
+#define PBF_REG_CREDIT_LB_Q 0x140338
+/* [R 11] Current credit for queue 0 in the tx port buffers in 16 byte
+ * lines. */
+#define PBF_REG_CREDIT_Q0 0x14033c
+/* [R 11] Current credit for queue 1 in the tx port buffers in 16 byte
+ * lines. */
+#define PBF_REG_CREDIT_Q1 0x140340
+/* [RW 1] Disable processing further tasks from port 0 (after ending the
+ current task in process). */
+#define PBF_REG_DISABLE_NEW_TASK_PROC_P0 0x14005c
+/* [RW 1] Disable processing further tasks from port 1 (after ending the
+ current task in process). */
+#define PBF_REG_DISABLE_NEW_TASK_PROC_P1 0x140060
+/* [RW 1] Disable processing further tasks from port 4 (after ending the
+ current task in process). */
+#define PBF_REG_DISABLE_NEW_TASK_PROC_P4 0x14006c
+#define PBF_REG_DISABLE_PF 0x1402e8
+#define PBF_REG_DISABLE_VF 0x1402ec
+/* [RW 18] For port 0: For each client that is subject to WFQ (the
+ * corresponding bit is 1); indicates to which of the credit registers this
+ * client is mapped. For clients which are not credit blocked; their mapping
+ * is dont care. */
+#define PBF_REG_ETS_ARB_CLIENT_CREDIT_MAP_P0 0x15c288
+/* [RW 9] For port 1: For each client that is subject to WFQ (the
+ * corresponding bit is 1); indicates to which of the credit registers this
+ * client is mapped. For clients which are not credit blocked; their mapping
+ * is dont care. */
+#define PBF_REG_ETS_ARB_CLIENT_CREDIT_MAP_P1 0x15c28c
+/* [RW 6] For port 0: Bit per client to indicate if the client competes in
+ * the strict priority arbiter directly (corresponding bit = 1); or first
+ * goes to the RR arbiter (corresponding bit = 0); and then competes in the
+ * lowest priority in the strict-priority arbiter. */
+#define PBF_REG_ETS_ARB_CLIENT_IS_STRICT_P0 0x15c278
+/* [RW 3] For port 1: Bit per client to indicate if the client competes in
+ * the strict priority arbiter directly (corresponding bit = 1); or first
+ * goes to the RR arbiter (corresponding bit = 0); and then competes in the
+ * lowest priority in the strict-priority arbiter. */
+#define PBF_REG_ETS_ARB_CLIENT_IS_STRICT_P1 0x15c27c
+/* [RW 6] For port 0: Bit per client to indicate if the client is subject to
+ * WFQ credit blocking (corresponding bit = 1). */
+#define PBF_REG_ETS_ARB_CLIENT_IS_SUBJECT2WFQ_P0 0x15c280
+/* [RW 3] For port 0: Bit per client to indicate if the client is subject to
+ * WFQ credit blocking (corresponding bit = 1). */
+#define PBF_REG_ETS_ARB_CLIENT_IS_SUBJECT2WFQ_P1 0x15c284
+/* [RW 16] For port 0: The number of strict priority arbitration slots
+ * between 2 RR arbitration slots. A value of 0 means no strict priority
+ * cycles; i.e. the strict-priority w/ anti-starvation arbiter is a RR
+ * arbiter. */
+#define PBF_REG_ETS_ARB_NUM_STRICT_ARB_SLOTS_P0 0x15c2a0
+/* [RW 16] For port 1: The number of strict priority arbitration slots
+ * between 2 RR arbitration slots. A value of 0 means no strict priority
+ * cycles; i.e. the strict-priority w/ anti-starvation arbiter is a RR
+ * arbiter. */
+#define PBF_REG_ETS_ARB_NUM_STRICT_ARB_SLOTS_P1 0x15c2a4
+/* [RW 18] For port 0: Indicates which client is connected to each priority
+ * in the strict-priority arbiter. Priority 0 is the highest priority, and
+ * priority 5 is the lowest; to which the RR output is connected to (this is
+ * not configurable). */
+#define PBF_REG_ETS_ARB_PRIORITY_CLIENT_P0 0x15c270
+/* [RW 9] For port 1: Indicates which client is connected to each priority
+ * in the strict-priority arbiter. Priority 0 is the highest priority, and
+ * priority 5 is the lowest; to which the RR output is connected to (this is
+ * not configurable). */
+#define PBF_REG_ETS_ARB_PRIORITY_CLIENT_P1 0x15c274
+/* [RW 1] Indicates that ETS is performed between the COSes in the command
+ * arbiter. If reset strict priority w/ anti-starvation will be performed
+ * w/o WFQ. */
+#define PBF_REG_ETS_ENABLED 0x15c050
+/* [RW 6] Bit-map indicating which L2 hdrs may appear after the basic
+ * Ethernet header. */
+#define PBF_REG_HDRS_AFTER_BASIC 0x15c0a8
+/* [RW 6] Bit-map indicating which L2 hdrs may appear after L2 tag 0 */
+#define PBF_REG_HDRS_AFTER_TAG_0 0x15c0b8
+/* [R 1] Removed for E3 B0 - Indicates which COS is conncted to the highest
+ * priority in the command arbiter. */
+#define PBF_REG_HIGH_PRIORITY_COS_NUM 0x15c04c
+#define PBF_REG_IF_ENABLE_REG 0x140044
+/* [RW 1] Init bit. When set the initial credits are copied to the credit
+ registers (except the port credits). Should be set and then reset after
+ the configuration of the block has ended. */
+#define PBF_REG_INIT 0x140000
+/* [RW 11] Initial credit for the LB queue in the tx port buffers in 16 byte
+ * lines. */
+#define PBF_REG_INIT_CRD_LB_Q 0x15c248
+/* [RW 11] Initial credit for queue 0 in the tx port buffers in 16 byte
+ * lines. */
+#define PBF_REG_INIT_CRD_Q0 0x15c230
+/* [RW 11] Initial credit for queue 1 in the tx port buffers in 16 byte
+ * lines. */
+#define PBF_REG_INIT_CRD_Q1 0x15c234
+/* [RW 1] Init bit for port 0. When set the initial credit of port 0 is
+ copied to the credit register. Should be set and then reset after the
+ configuration of the port has ended. */
+#define PBF_REG_INIT_P0 0x140004
+/* [RW 1] Init bit for port 1. When set the initial credit of port 1 is
+ copied to the credit register. Should be set and then reset after the
+ configuration of the port has ended. */
+#define PBF_REG_INIT_P1 0x140008
+/* [RW 1] Init bit for port 4. When set the initial credit of port 4 is
+ copied to the credit register. Should be set and then reset after the
+ configuration of the port has ended. */
+#define PBF_REG_INIT_P4 0x14000c
+/* [R 32] Cyclic counter for the amount credits in 16 bytes lines added for
+ * the LB queue. Reset upon init. */
+#define PBF_REG_INTERNAL_CRD_FREED_CNT_LB_Q 0x140354
+/* [R 32] Cyclic counter for the amount credits in 16 bytes lines added for
+ * queue 0. Reset upon init. */
+#define PBF_REG_INTERNAL_CRD_FREED_CNT_Q0 0x140358
+/* [R 32] Cyclic counter for the amount credits in 16 bytes lines added for
+ * queue 1. Reset upon init. */
+#define PBF_REG_INTERNAL_CRD_FREED_CNT_Q1 0x14035c
+/* [RW 1] Enable for mac interface 0. */
+#define PBF_REG_MAC_IF0_ENABLE 0x140030
+/* [RW 1] Enable for mac interface 1. */
+#define PBF_REG_MAC_IF1_ENABLE 0x140034
+/* [RW 1] Enable for the loopback interface. */
+#define PBF_REG_MAC_LB_ENABLE 0x140040
+/* [RW 6] Bit-map indicating which headers must appear in the packet */
+#define PBF_REG_MUST_HAVE_HDRS 0x15c0c4
+/* [RW 16] The number of strict priority arbitration slots between 2 RR
+ * arbitration slots. A value of 0 means no strict priority cycles; i.e. the
+ * strict-priority w/ anti-starvation arbiter is a RR arbiter. */
+#define PBF_REG_NUM_STRICT_ARB_SLOTS 0x15c064
+/* [RW 10] Port 0 threshold used by arbiter in 16 byte lines used when pause
+ not suppoterd. */
+#define PBF_REG_P0_ARB_THRSH 0x1400e4
+/* [R 11] Current credit for port 0 in the tx port buffers in 16 byte lines. */
+#define PBF_REG_P0_CREDIT 0x140200
+/* [RW 11] Initial credit for port 0 in the tx port buffers in 16 byte
+ lines. */
+#define PBF_REG_P0_INIT_CRD 0x1400d0
+/* [R 32] Cyclic counter for the amount credits in 16 bytes lines added for
+ * port 0. Reset upon init. */
+#define PBF_REG_P0_INTERNAL_CRD_FREED_CNT 0x140308
+/* [R 1] Removed for E3 B0 - Indication that pause is enabled for port 0. */
+#define PBF_REG_P0_PAUSE_ENABLE 0x140014
+/* [R 8] Removed for E3 B0 - Number of tasks in port 0 task queue. */
+#define PBF_REG_P0_TASK_CNT 0x140204
+/* [R 32] Removed for E3 B0 - Cyclic counter for number of 8 byte lines
+ * freed from the task queue of port 0. Reset upon init. */
+#define PBF_REG_P0_TQ_LINES_FREED_CNT 0x1402f0
+/* [R 12] Number of 8 bytes lines occupied in the task queue of port 0. */
+#define PBF_REG_P0_TQ_OCCUPANCY 0x1402fc
+/* [R 11] Removed for E3 B0 - Current credit for port 1 in the tx port
+ * buffers in 16 byte lines. */
+#define PBF_REG_P1_CREDIT 0x140208
+/* [R 11] Removed for E3 B0 - Initial credit for port 0 in the tx port
+ * buffers in 16 byte lines. */
+#define PBF_REG_P1_INIT_CRD 0x1400d4
+/* [R 32] Cyclic counter for the amount credits in 16 bytes lines added for
+ * port 1. Reset upon init. */
+#define PBF_REG_P1_INTERNAL_CRD_FREED_CNT 0x14030c
+/* [R 8] Removed for E3 B0 - Number of tasks in port 1 task queue. */
+#define PBF_REG_P1_TASK_CNT 0x14020c
+/* [R 32] Removed for E3 B0 - Cyclic counter for number of 8 byte lines
+ * freed from the task queue of port 1. Reset upon init. */
+#define PBF_REG_P1_TQ_LINES_FREED_CNT 0x1402f4
+/* [R 12] Number of 8 bytes lines occupied in the task queue of port 1. */
+#define PBF_REG_P1_TQ_OCCUPANCY 0x140300
+/* [R 11] Current credit for port 4 in the tx port buffers in 16 byte lines. */
+#define PBF_REG_P4_CREDIT 0x140210
+/* [RW 11] Initial credit for port 4 in the tx port buffers in 16 byte
+ lines. */
+#define PBF_REG_P4_INIT_CRD 0x1400e0
+/* [R 32] Cyclic counter for the amount credits in 16 bytes lines added for
+ * port 4. Reset upon init. */
+#define PBF_REG_P4_INTERNAL_CRD_FREED_CNT 0x140310
+/* [R 8] Removed for E3 B0 - Number of tasks in port 4 task queue. */
+#define PBF_REG_P4_TASK_CNT 0x140214
+/* [R 32] Removed for E3 B0 - Cyclic counter for number of 8 byte lines
+ * freed from the task queue of port 4. Reset upon init. */
+#define PBF_REG_P4_TQ_LINES_FREED_CNT 0x1402f8
+/* [R 12] Number of 8 bytes lines occupied in the task queue of port 4. */
+#define PBF_REG_P4_TQ_OCCUPANCY 0x140304
+/* [RW 5] Interrupt mask register #0 read/write */
+#define PBF_REG_PBF_INT_MASK 0x1401d4
+/* [R 5] Interrupt register #0 read */
+#define PBF_REG_PBF_INT_STS 0x1401c8
+/* [RW 20] Parity mask register #0 read/write */
+#define PBF_REG_PBF_PRTY_MASK 0x1401e4
+/* [R 28] Parity register #0 read */
+#define PBF_REG_PBF_PRTY_STS 0x1401d8
+/* [RC 20] Parity register #0 read clear */
+#define PBF_REG_PBF_PRTY_STS_CLR 0x1401dc
+/* [RW 16] The Ethernet type value for L2 tag 0 */
+#define PBF_REG_TAG_ETHERTYPE_0 0x15c090
+/* [RW 4] The length of the info field for L2 tag 0. The length is between
+ * 2B and 14B; in 2B granularity */
+#define PBF_REG_TAG_LEN_0 0x15c09c
+/* [R 32] Cyclic counter for number of 8 byte lines freed from the LB task
+ * queue. Reset upon init. */
+#define PBF_REG_TQ_LINES_FREED_CNT_LB_Q 0x14038c
+/* [R 32] Cyclic counter for number of 8 byte lines freed from the task
+ * queue 0. Reset upon init. */
+#define PBF_REG_TQ_LINES_FREED_CNT_Q0 0x140390
+/* [R 32] Cyclic counter for number of 8 byte lines freed from task queue 1.
+ * Reset upon init. */
+#define PBF_REG_TQ_LINES_FREED_CNT_Q1 0x140394
+/* [R 13] Number of 8 bytes lines occupied in the task queue of the LB
+ * queue. */
+#define PBF_REG_TQ_OCCUPANCY_LB_Q 0x1403a8
+/* [R 13] Number of 8 bytes lines occupied in the task queue of queue 0. */
+#define PBF_REG_TQ_OCCUPANCY_Q0 0x1403ac
+/* [R 13] Number of 8 bytes lines occupied in the task queue of queue 1. */
+#define PBF_REG_TQ_OCCUPANCY_Q1 0x1403b0
+/* [RW 16] One of 8 values that should be compared to type in Ethernet
+ * parsing. If there is a match; the field after Ethernet is the first VLAN.
+ * Reset value is 0x8100 which is the standard VLAN type. Note that when
+ * checking second VLAN; type is compared only to 0x8100.
+ */
+#define PBF_REG_VLAN_TYPE_0 0x15c06c
+/* [RW 2] Interrupt mask register #0 read/write */
+#define PB_REG_PB_INT_MASK 0x28
+/* [R 2] Interrupt register #0 read */
+#define PB_REG_PB_INT_STS 0x1c
+/* [RW 4] Parity mask register #0 read/write */
+#define PB_REG_PB_PRTY_MASK 0x38
+/* [R 4] Parity register #0 read */
+#define PB_REG_PB_PRTY_STS 0x2c
+/* [RC 4] Parity register #0 read clear */
+#define PB_REG_PB_PRTY_STS_CLR 0x30
+#define PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR (0x1<<0)
+#define PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW (0x1<<8)
+#define PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR (0x1<<1)
+#define PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN (0x1<<6)
+#define PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN (0x1<<7)
+#define PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN (0x1<<4)
+#define PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN (0x1<<3)
+#define PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN (0x1<<5)
+#define PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN (0x1<<2)
+/* [R 8] Config space A attention dirty bits. Each bit indicates that the
+ * corresponding PF generates config space A attention. Set by PXP. Reset by
+ * MCP writing 1 to icfg_space_a_request_clr. Note: register contains bits
+ * from both paths. */
+#define PGLUE_B_REG_CFG_SPACE_A_REQUEST 0x9010
+/* [R 8] Config space B attention dirty bits. Each bit indicates that the
+ * corresponding PF generates config space B attention. Set by PXP. Reset by
+ * MCP writing 1 to icfg_space_b_request_clr. Note: register contains bits
+ * from both paths. */
+#define PGLUE_B_REG_CFG_SPACE_B_REQUEST 0x9014
+/* [RW 1] Type A PF enable inbound interrupt table for CSDM. 0 - disable; 1
+ * - enable. */
+#define PGLUE_B_REG_CSDM_INB_INT_A_PF_ENABLE 0x9194
+/* [RW 18] Type B VF inbound interrupt table for CSDM: bits[17:9]-mask;
+ * its[8:0]-address. Bits [1:0] must be zero (DW resolution address). */
+#define PGLUE_B_REG_CSDM_INB_INT_B_VF 0x916c
+/* [RW 1] Type B VF enable inbound interrupt table for CSDM. 0 - disable; 1
+ * - enable. */
+#define PGLUE_B_REG_CSDM_INB_INT_B_VF_ENABLE 0x919c
+/* [RW 16] Start offset of CSDM zone A (queue zone) in the internal RAM */
+#define PGLUE_B_REG_CSDM_START_OFFSET_A 0x9100
+/* [RW 16] Start offset of CSDM zone B (legacy zone) in the internal RAM */
+#define PGLUE_B_REG_CSDM_START_OFFSET_B 0x9108
+/* [RW 5] VF Shift of CSDM zone B (legacy zone) in the internal RAM */
+#define PGLUE_B_REG_CSDM_VF_SHIFT_B 0x9110
+/* [RW 1] 0 - Zone A size is 136x32B; 1 - Zone A size is 152x32B. */
+#define PGLUE_B_REG_CSDM_ZONE_A_SIZE_PF 0x91ac
+/* [R 8] FLR request attention dirty bits for PFs 0 to 7. Each bit indicates
+ * that the FLR register of the corresponding PF was set. Set by PXP. Reset
+ * by MCP writing 1 to flr_request_pf_7_0_clr. Note: register contains bits
+ * from both paths. */
+#define PGLUE_B_REG_FLR_REQUEST_PF_7_0 0x9028
+/* [W 8] FLR request attention dirty bits clear for PFs 0 to 7. MCP writes 1
+ * to a bit in this register in order to clear the corresponding bit in
+ * flr_request_pf_7_0 register. Note: register contains bits from both
+ * paths. */
+#define PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR 0x9418
+/* [R 32] FLR request attention dirty bits for VFs 96 to 127. Each bit
+ * indicates that the FLR register of the corresponding VF was set. Set by
+ * PXP. Reset by MCP writing 1 to flr_request_vf_127_96_clr. */
+#define PGLUE_B_REG_FLR_REQUEST_VF_127_96 0x9024
+/* [R 32] FLR request attention dirty bits for VFs 0 to 31. Each bit
+ * indicates that the FLR register of the corresponding VF was set. Set by
+ * PXP. Reset by MCP writing 1 to flr_request_vf_31_0_clr. */
+#define PGLUE_B_REG_FLR_REQUEST_VF_31_0 0x9018
+/* [R 32] FLR request attention dirty bits for VFs 32 to 63. Each bit
+ * indicates that the FLR register of the corresponding VF was set. Set by
+ * PXP. Reset by MCP writing 1 to flr_request_vf_63_32_clr. */
+#define PGLUE_B_REG_FLR_REQUEST_VF_63_32 0x901c
+/* [R 32] FLR request attention dirty bits for VFs 64 to 95. Each bit
+ * indicates that the FLR register of the corresponding VF was set. Set by
+ * PXP. Reset by MCP writing 1 to flr_request_vf_95_64_clr. */
+#define PGLUE_B_REG_FLR_REQUEST_VF_95_64 0x9020
+/* [R 8] Each bit indicates an incorrect behavior in user RX interface. Bit
+ * 0 - Target memory read arrived with a correctable error. Bit 1 - Target
+ * memory read arrived with an uncorrectable error. Bit 2 - Configuration RW
+ * arrived with a correctable error. Bit 3 - Configuration RW arrived with
+ * an uncorrectable error. Bit 4 - Completion with Configuration Request
+ * Retry Status. Bit 5 - Expansion ROM access received with a write request.
+ * Bit 6 - Completion with pcie_rx_err of 0000; CMPL_STATUS of non-zero; and
+ * pcie_rx_last not asserted. Bit 7 - Completion with pcie_rx_err of 1010;
+ * and pcie_rx_last not asserted. */
+#define PGLUE_B_REG_INCORRECT_RCV_DETAILS 0x9068
+#define PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER 0x942c
+#define PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ 0x9430
+#define PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_WRITE 0x9434
+#define PGLUE_B_REG_INTERNAL_VFID_ENABLE 0x9438
+/* [W 7] Writing 1 to each bit in this register clears a corresponding error
+ * details register and enables logging new error details. Bit 0 - clears
+ * INCORRECT_RCV_DETAILS; Bit 1 - clears RX_ERR_DETAILS; Bit 2 - clears
+ * TX_ERR_WR_ADD_31_0 TX_ERR_WR_ADD_63_32 TX_ERR_WR_DETAILS
+ * TX_ERR_WR_DETAILS2 TX_ERR_RD_ADD_31_0 TX_ERR_RD_ADD_63_32
+ * TX_ERR_RD_DETAILS TX_ERR_RD_DETAILS2 TX_ERR_WR_DETAILS_ICPL; Bit 3 -
+ * clears VF_LENGTH_VIOLATION_DETAILS. Bit 4 - clears
+ * VF_GRC_SPACE_VIOLATION_DETAILS. Bit 5 - clears RX_TCPL_ERR_DETAILS. Bit 6
+ * - clears TCPL_IN_TWO_RCBS_DETAILS. */
+#define PGLUE_B_REG_LATCHED_ERRORS_CLR 0x943c
+
+/* [R 9] Interrupt register #0 read */
+#define PGLUE_B_REG_PGLUE_B_INT_STS 0x9298
+/* [RC 9] Interrupt register #0 read clear */
+#define PGLUE_B_REG_PGLUE_B_INT_STS_CLR 0x929c
+/* [RW 2] Parity mask register #0 read/write */
+#define PGLUE_B_REG_PGLUE_B_PRTY_MASK 0x92b4
+/* [R 2] Parity register #0 read */
+#define PGLUE_B_REG_PGLUE_B_PRTY_STS 0x92a8
+/* [RC 2] Parity register #0 read clear */
+#define PGLUE_B_REG_PGLUE_B_PRTY_STS_CLR 0x92ac
+/* [R 13] Details of first request received with error. [2:0] - PFID. [3] -
+ * VF_VALID. [9:4] - VFID. [11:10] - Error Code - 0 - Indicates Completion
+ * Timeout of a User Tx non-posted request. 1 - unsupported request. 2 -
+ * completer abort. 3 - Illegal value for this field. [12] valid - indicates
+ * if there was a completion error since the last time this register was
+ * cleared. */
+#define PGLUE_B_REG_RX_ERR_DETAILS 0x9080
+/* [R 18] Details of first ATS Translation Completion request received with
+ * error. [2:0] - PFID. [3] - VF_VALID. [9:4] - VFID. [11:10] - Error Code -
+ * 0 - Indicates Completion Timeout of a User Tx non-posted request. 1 -
+ * unsupported request. 2 - completer abort. 3 - Illegal value for this
+ * field. [16:12] - ATC OTB EntryID. [17] valid - indicates if there was a
+ * completion error since the last time this register was cleared. */
+#define PGLUE_B_REG_RX_TCPL_ERR_DETAILS 0x9084
+/* [W 8] Debug only - Shadow BME bits clear for PFs 0 to 7. MCP writes 1 to
+ * a bit in this register in order to clear the corresponding bit in
+ * shadow_bme_pf_7_0 register. MCP should never use this unless a
+ * work-around is needed. Note: register contains bits from both paths. */
+#define PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR 0x9458
+/* [R 8] SR IOV disabled attention dirty bits. Each bit indicates that the
+ * VF enable register of the corresponding PF is written to 0 and was
+ * previously 1. Set by PXP. Reset by MCP writing 1 to
+ * sr_iov_disabled_request_clr. Note: register contains bits from both
+ * paths. */
+#define PGLUE_B_REG_SR_IOV_DISABLED_REQUEST 0x9030
+/* [R 32] Indicates the status of tags 32-63. 0 - tags is used - read
+ * completion did not return yet. 1 - tag is unused. Same functionality as
+ * pxp2_registers_pgl_exp_rom_data2 for tags 0-31. */
+#define PGLUE_B_REG_TAGS_63_32 0x9244
+/* [RW 1] Type A PF enable inbound interrupt table for TSDM. 0 - disable; 1
+ * - enable. */
+#define PGLUE_B_REG_TSDM_INB_INT_A_PF_ENABLE 0x9170
+/* [RW 16] Start offset of TSDM zone A (queue zone) in the internal RAM */
+#define PGLUE_B_REG_TSDM_START_OFFSET_A 0x90c4
+/* [RW 16] Start offset of TSDM zone B (legacy zone) in the internal RAM */
+#define PGLUE_B_REG_TSDM_START_OFFSET_B 0x90cc
+/* [RW 5] VF Shift of TSDM zone B (legacy zone) in the internal RAM */
+#define PGLUE_B_REG_TSDM_VF_SHIFT_B 0x90d4
+/* [RW 1] 0 - Zone A size is 136x32B; 1 - Zone A size is 152x32B. */
+#define PGLUE_B_REG_TSDM_ZONE_A_SIZE_PF 0x91a0
+/* [R 32] Address [31:0] of first read request not submitted due to error */
+#define PGLUE_B_REG_TX_ERR_RD_ADD_31_0 0x9098
+/* [R 32] Address [63:32] of first read request not submitted due to error */
+#define PGLUE_B_REG_TX_ERR_RD_ADD_63_32 0x909c
+/* [R 31] Details of first read request not submitted due to error. [4:0]
+ * VQID. [5] TREQ. 1 - Indicates the request is a Translation Request.
+ * [20:8] - Length in bytes. [23:21] - PFID. [24] - VF_VALID. [30:25] -
+ * VFID. */
+#define PGLUE_B_REG_TX_ERR_RD_DETAILS 0x90a0
+/* [R 26] Details of first read request not submitted due to error. [15:0]
+ * Request ID. [19:16] client ID. [20] - last SR. [24:21] - Error type -
+ * [21] - Indicates was_error was set; [22] - Indicates BME was cleared;
+ * [23] - Indicates FID_enable was cleared; [24] - Indicates VF with parent
+ * PF FLR_request or IOV_disable_request dirty bit is set. [25] valid -
+ * indicates if there was a request not submitted due to error since the
+ * last time this register was cleared. */
+#define PGLUE_B_REG_TX_ERR_RD_DETAILS2 0x90a4
+/* [R 32] Address [31:0] of first write request not submitted due to error */
+#define PGLUE_B_REG_TX_ERR_WR_ADD_31_0 0x9088
+/* [R 32] Address [63:32] of first write request not submitted due to error */
+#define PGLUE_B_REG_TX_ERR_WR_ADD_63_32 0x908c
+/* [R 31] Details of first write request not submitted due to error. [4:0]
+ * VQID. [20:8] - Length in bytes. [23:21] - PFID. [24] - VF_VALID. [30:25]
+ * - VFID. */
+#define PGLUE_B_REG_TX_ERR_WR_DETAILS 0x9090
+/* [R 26] Details of first write request not submitted due to error. [15:0]
+ * Request ID. [19:16] client ID. [20] - last SR. [24:21] - Error type -
+ * [21] - Indicates was_error was set; [22] - Indicates BME was cleared;
+ * [23] - Indicates FID_enable was cleared; [24] - Indicates VF with parent
+ * PF FLR_request or IOV_disable_request dirty bit is set. [25] valid -
+ * indicates if there was a request not submitted due to error since the
+ * last time this register was cleared. */
+#define PGLUE_B_REG_TX_ERR_WR_DETAILS2 0x9094
+/* [RW 10] Type A PF/VF inbound interrupt table for USDM: bits[9:5]-mask;
+ * its[4:0]-address relative to start_offset_a. Bits [1:0] can have any
+ * value (Byte resolution address). */
+#define PGLUE_B_REG_USDM_INB_INT_A_0 0x9128
+#define PGLUE_B_REG_USDM_INB_INT_A_1 0x912c
+#define PGLUE_B_REG_USDM_INB_INT_A_2 0x9130
+#define PGLUE_B_REG_USDM_INB_INT_A_3 0x9134
+#define PGLUE_B_REG_USDM_INB_INT_A_4 0x9138
+#define PGLUE_B_REG_USDM_INB_INT_A_5 0x913c
+#define PGLUE_B_REG_USDM_INB_INT_A_6 0x9140
+/* [RW 1] Type A PF enable inbound interrupt table for USDM. 0 - disable; 1
+ * - enable. */
+#define PGLUE_B_REG_USDM_INB_INT_A_PF_ENABLE 0x917c
+/* [RW 1] Type A VF enable inbound interrupt table for USDM. 0 - disable; 1
+ * - enable. */
+#define PGLUE_B_REG_USDM_INB_INT_A_VF_ENABLE 0x9180
+/* [RW 1] Type B VF enable inbound interrupt table for USDM. 0 - disable; 1
+ * - enable. */
+#define PGLUE_B_REG_USDM_INB_INT_B_VF_ENABLE 0x9184
+/* [RW 16] Start offset of USDM zone A (queue zone) in the internal RAM */
+#define PGLUE_B_REG_USDM_START_OFFSET_A 0x90d8
+/* [RW 16] Start offset of USDM zone B (legacy zone) in the internal RAM */
+#define PGLUE_B_REG_USDM_START_OFFSET_B 0x90e0
+/* [RW 5] VF Shift of USDM zone B (legacy zone) in the internal RAM */
+#define PGLUE_B_REG_USDM_VF_SHIFT_B 0x90e8
+/* [RW 1] 0 - Zone A size is 136x32B; 1 - Zone A size is 152x32B. */
+#define PGLUE_B_REG_USDM_ZONE_A_SIZE_PF 0x91a4
+/* [R 26] Details of first target VF request accessing VF GRC space that
+ * failed permission check. [14:0] Address. [15] w_nr: 0 - Read; 1 - Write.
+ * [21:16] VFID. [24:22] - PFID. [25] valid - indicates if there was a
+ * request accessing VF GRC space that failed permission check since the
+ * last time this register was cleared. Permission checks are: function
+ * permission; R/W permission; address range permission. */
+#define PGLUE_B_REG_VF_GRC_SPACE_VIOLATION_DETAILS 0x9234
+/* [R 31] Details of first target VF request with length violation (too many
+ * DWs) accessing BAR0. [12:0] Address in DWs (bits [14:2] of byte address).
+ * [14:13] BAR. [20:15] VFID. [23:21] - PFID. [29:24] - Length in DWs. [30]
+ * valid - indicates if there was a request with length violation since the
+ * last time this register was cleared. Length violations: length of more
+ * than 2DWs; length of 2DWs and address not QW aligned; window is GRC and
+ * length is more than 1 DW. */
+#define PGLUE_B_REG_VF_LENGTH_VIOLATION_DETAILS 0x9230
+/* [R 8] Was_error indication dirty bits for PFs 0 to 7. Each bit indicates
+ * that there was a completion with uncorrectable error for the
+ * corresponding PF. Set by PXP. Reset by MCP writing 1 to
+ * was_error_pf_7_0_clr. */
+#define PGLUE_B_REG_WAS_ERROR_PF_7_0 0x907c
+/* [W 8] Was_error indication dirty bits clear for PFs 0 to 7. MCP writes 1
+ * to a bit in this register in order to clear the corresponding bit in
+ * flr_request_pf_7_0 register. */
+#define PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR 0x9470
+/* [R 32] Was_error indication dirty bits for VFs 96 to 127. Each bit
+ * indicates that there was a completion with uncorrectable error for the
+ * corresponding VF. Set by PXP. Reset by MCP writing 1 to
+ * was_error_vf_127_96_clr. */
+#define PGLUE_B_REG_WAS_ERROR_VF_127_96 0x9078
+/* [W 32] Was_error indication dirty bits clear for VFs 96 to 127. MCP
+ * writes 1 to a bit in this register in order to clear the corresponding
+ * bit in was_error_vf_127_96 register. */
+#define PGLUE_B_REG_WAS_ERROR_VF_127_96_CLR 0x9474
+/* [R 32] Was_error indication dirty bits for VFs 0 to 31. Each bit
+ * indicates that there was a completion with uncorrectable error for the
+ * corresponding VF. Set by PXP. Reset by MCP writing 1 to
+ * was_error_vf_31_0_clr. */
+#define PGLUE_B_REG_WAS_ERROR_VF_31_0 0x906c
+/* [W 32] Was_error indication dirty bits clear for VFs 0 to 31. MCP writes
+ * 1 to a bit in this register in order to clear the corresponding bit in
+ * was_error_vf_31_0 register. */
+#define PGLUE_B_REG_WAS_ERROR_VF_31_0_CLR 0x9478
+/* [R 32] Was_error indication dirty bits for VFs 32 to 63. Each bit
+ * indicates that there was a completion with uncorrectable error for the
+ * corresponding VF. Set by PXP. Reset by MCP writing 1 to
+ * was_error_vf_63_32_clr. */
+#define PGLUE_B_REG_WAS_ERROR_VF_63_32 0x9070
+/* [W 32] Was_error indication dirty bits clear for VFs 32 to 63. MCP writes
+ * 1 to a bit in this register in order to clear the corresponding bit in
+ * was_error_vf_63_32 register. */
+#define PGLUE_B_REG_WAS_ERROR_VF_63_32_CLR 0x947c
+/* [R 32] Was_error indication dirty bits for VFs 64 to 95. Each bit
+ * indicates that there was a completion with uncorrectable error for the
+ * corresponding VF. Set by PXP. Reset by MCP writing 1 to
+ * was_error_vf_95_64_clr. */
+#define PGLUE_B_REG_WAS_ERROR_VF_95_64 0x9074
+/* [W 32] Was_error indication dirty bits clear for VFs 64 to 95. MCP writes
+ * 1 to a bit in this register in order to clear the corresponding bit in
+ * was_error_vf_95_64 register. */
+#define PGLUE_B_REG_WAS_ERROR_VF_95_64_CLR 0x9480
+/* [RW 1] Type A PF enable inbound interrupt table for XSDM. 0 - disable; 1
+ * - enable. */
+#define PGLUE_B_REG_XSDM_INB_INT_A_PF_ENABLE 0x9188
+/* [RW 16] Start offset of XSDM zone A (queue zone) in the internal RAM */
+#define PGLUE_B_REG_XSDM_START_OFFSET_A 0x90ec
+/* [RW 16] Start offset of XSDM zone B (legacy zone) in the internal RAM */
+#define PGLUE_B_REG_XSDM_START_OFFSET_B 0x90f4
+/* [RW 5] VF Shift of XSDM zone B (legacy zone) in the internal RAM */
+#define PGLUE_B_REG_XSDM_VF_SHIFT_B 0x90fc
+/* [RW 1] 0 - Zone A size is 136x32B; 1 - Zone A size is 152x32B. */
+#define PGLUE_B_REG_XSDM_ZONE_A_SIZE_PF 0x91a8
+#define PRS_REG_A_PRSU_20 0x40134
+/* [R 8] debug only: CFC load request current credit. Transaction based. */
+#define PRS_REG_CFC_LD_CURRENT_CREDIT 0x40164
+/* [R 8] debug only: CFC search request current credit. Transaction based. */
+#define PRS_REG_CFC_SEARCH_CURRENT_CREDIT 0x40168
+/* [RW 6] The initial credit for the search message to the CFC interface.
+ Credit is transaction based. */
+#define PRS_REG_CFC_SEARCH_INITIAL_CREDIT 0x4011c
+/* [RW 24] CID for port 0 if no match */
+#define PRS_REG_CID_PORT_0 0x400fc
+/* [RW 32] The CM header for flush message where 'load existed' bit in CFC
+ load response is reset and packet type is 0. Used in packet start message
+ to TCM. */
+#define PRS_REG_CM_HDR_FLUSH_LOAD_TYPE_0 0x400dc
+#define PRS_REG_CM_HDR_FLUSH_LOAD_TYPE_1 0x400e0
+#define PRS_REG_CM_HDR_FLUSH_LOAD_TYPE_2 0x400e4
+#define PRS_REG_CM_HDR_FLUSH_LOAD_TYPE_3 0x400e8
+#define PRS_REG_CM_HDR_FLUSH_LOAD_TYPE_4 0x400ec
+#define PRS_REG_CM_HDR_FLUSH_LOAD_TYPE_5 0x400f0
+/* [RW 32] The CM header for flush message where 'load existed' bit in CFC
+ load response is set and packet type is 0. Used in packet start message
+ to TCM. */
+#define PRS_REG_CM_HDR_FLUSH_NO_LOAD_TYPE_0 0x400bc
+#define PRS_REG_CM_HDR_FLUSH_NO_LOAD_TYPE_1 0x400c0
+#define PRS_REG_CM_HDR_FLUSH_NO_LOAD_TYPE_2 0x400c4
+#define PRS_REG_CM_HDR_FLUSH_NO_LOAD_TYPE_3 0x400c8
+#define PRS_REG_CM_HDR_FLUSH_NO_LOAD_TYPE_4 0x400cc
+#define PRS_REG_CM_HDR_FLUSH_NO_LOAD_TYPE_5 0x400d0
+/* [RW 32] The CM header for a match and packet type 1 for loopback port.
+ Used in packet start message to TCM. */
+#define PRS_REG_CM_HDR_LOOPBACK_TYPE_1 0x4009c
+#define PRS_REG_CM_HDR_LOOPBACK_TYPE_2 0x400a0
+#define PRS_REG_CM_HDR_LOOPBACK_TYPE_3 0x400a4
+#define PRS_REG_CM_HDR_LOOPBACK_TYPE_4 0x400a8
+/* [RW 32] The CM header for a match and packet type 0. Used in packet start
+ message to TCM. */
+#define PRS_REG_CM_HDR_TYPE_0 0x40078
+#define PRS_REG_CM_HDR_TYPE_1 0x4007c
+#define PRS_REG_CM_HDR_TYPE_2 0x40080
+#define PRS_REG_CM_HDR_TYPE_3 0x40084
+#define PRS_REG_CM_HDR_TYPE_4 0x40088
+/* [RW 32] The CM header in case there was not a match on the connection */
+#define PRS_REG_CM_NO_MATCH_HDR 0x400b8
+/* [RW 1] Indicates if in e1hov mode. 0=non-e1hov mode; 1=e1hov mode. */
+#define PRS_REG_E1HOV_MODE 0x401c8
+/* [RW 8] The 8-bit event ID for a match and packet type 1. Used in packet
+ start message to TCM. */
+#define PRS_REG_EVENT_ID_1 0x40054
+#define PRS_REG_EVENT_ID_2 0x40058
+#define PRS_REG_EVENT_ID_3 0x4005c
+/* [RW 16] The Ethernet type value for FCoE */
+#define PRS_REG_FCOE_TYPE 0x401d0
+/* [RW 8] Context region for flush packet with packet type 0. Used in CFC
+ load request message. */
+#define PRS_REG_FLUSH_REGIONS_TYPE_0 0x40004
+#define PRS_REG_FLUSH_REGIONS_TYPE_1 0x40008
+#define PRS_REG_FLUSH_REGIONS_TYPE_2 0x4000c
+#define PRS_REG_FLUSH_REGIONS_TYPE_3 0x40010
+#define PRS_REG_FLUSH_REGIONS_TYPE_4 0x40014
+#define PRS_REG_FLUSH_REGIONS_TYPE_5 0x40018
+#define PRS_REG_FLUSH_REGIONS_TYPE_6 0x4001c
+#define PRS_REG_FLUSH_REGIONS_TYPE_7 0x40020
+/* [RW 6] Bit-map indicating which L2 hdrs may appear after the basic
+ * Ethernet header. */
+#define PRS_REG_HDRS_AFTER_BASIC 0x40238
+/* [RW 6] Bit-map indicating which L2 hdrs may appear after the basic
+ * Ethernet header for port 0 packets. */
+#define PRS_REG_HDRS_AFTER_BASIC_PORT_0 0x40270
+#define PRS_REG_HDRS_AFTER_BASIC_PORT_1 0x40290
+/* [R 6] Bit-map indicating which L2 hdrs may appear after L2 tag 0 */
+#define PRS_REG_HDRS_AFTER_TAG_0 0x40248
+/* [RW 6] Bit-map indicating which L2 hdrs may appear after L2 tag 0 for
+ * port 0 packets */
+#define PRS_REG_HDRS_AFTER_TAG_0_PORT_0 0x40280
+#define PRS_REG_HDRS_AFTER_TAG_0_PORT_1 0x402a0
+/* [RW 4] The increment value to send in the CFC load request message */
+#define PRS_REG_INC_VALUE 0x40048
+/* [RW 6] Bit-map indicating which headers must appear in the packet */
+#define PRS_REG_MUST_HAVE_HDRS 0x40254
+/* [RW 6] Bit-map indicating which headers must appear in the packet for
+ * port 0 packets */
+#define PRS_REG_MUST_HAVE_HDRS_PORT_0 0x4028c
+#define PRS_REG_MUST_HAVE_HDRS_PORT_1 0x402ac
+#define PRS_REG_NIC_MODE 0x40138
+/* [RW 8] The 8-bit event ID for cases where there is no match on the
+ connection. Used in packet start message to TCM. */
+#define PRS_REG_NO_MATCH_EVENT_ID 0x40070
+/* [ST 24] The number of input CFC flush packets */
+#define PRS_REG_NUM_OF_CFC_FLUSH_MESSAGES 0x40128
+/* [ST 32] The number of cycles the Parser halted its operation since it
+ could not allocate the next serial number */
+#define PRS_REG_NUM_OF_DEAD_CYCLES 0x40130
+/* [ST 24] The number of input packets */
+#define PRS_REG_NUM_OF_PACKETS 0x40124
+/* [ST 24] The number of input transparent flush packets */
+#define PRS_REG_NUM_OF_TRANSPARENT_FLUSH_MESSAGES 0x4012c
+/* [RW 8] Context region for received Ethernet packet with a match and
+ packet type 0. Used in CFC load request message */
+#define PRS_REG_PACKET_REGIONS_TYPE_0 0x40028
+#define PRS_REG_PACKET_REGIONS_TYPE_1 0x4002c
+#define PRS_REG_PACKET_REGIONS_TYPE_2 0x40030
+#define PRS_REG_PACKET_REGIONS_TYPE_3 0x40034
+#define PRS_REG_PACKET_REGIONS_TYPE_4 0x40038
+#define PRS_REG_PACKET_REGIONS_TYPE_5 0x4003c
+#define PRS_REG_PACKET_REGIONS_TYPE_6 0x40040
+#define PRS_REG_PACKET_REGIONS_TYPE_7 0x40044
+/* [R 2] debug only: Number of pending requests for CAC on port 0. */
+#define PRS_REG_PENDING_BRB_CAC0_RQ 0x40174
+/* [R 2] debug only: Number of pending requests for header parsing. */
+#define PRS_REG_PENDING_BRB_PRS_RQ 0x40170
+/* [R 1] Interrupt register #0 read */
+#define PRS_REG_PRS_INT_STS 0x40188
+/* [RW 8] Parity mask register #0 read/write */
+#define PRS_REG_PRS_PRTY_MASK 0x401a4
+/* [R 8] Parity register #0 read */
+#define PRS_REG_PRS_PRTY_STS 0x40198
+/* [RC 8] Parity register #0 read clear */
+#define PRS_REG_PRS_PRTY_STS_CLR 0x4019c
+/* [RW 8] Context region for pure acknowledge packets. Used in CFC load
+ request message */
+#define PRS_REG_PURE_REGIONS 0x40024
+/* [R 32] debug only: Serial number status lsb 32 bits. '1' indicates this
+ serail number was released by SDM but cannot be used because a previous
+ serial number was not released. */
+#define PRS_REG_SERIAL_NUM_STATUS_LSB 0x40154
+/* [R 32] debug only: Serial number status msb 32 bits. '1' indicates this
+ serail number was released by SDM but cannot be used because a previous
+ serial number was not released. */
+#define PRS_REG_SERIAL_NUM_STATUS_MSB 0x40158
+/* [R 4] debug only: SRC current credit. Transaction based. */
+#define PRS_REG_SRC_CURRENT_CREDIT 0x4016c
+/* [RW 16] The Ethernet type value for L2 tag 0 */
+#define PRS_REG_TAG_ETHERTYPE_0 0x401d4
+/* [RW 4] The length of the info field for L2 tag 0. The length is between
+ * 2B and 14B; in 2B granularity */
+#define PRS_REG_TAG_LEN_0 0x4022c
+/* [R 8] debug only: TCM current credit. Cycle based. */
+#define PRS_REG_TCM_CURRENT_CREDIT 0x40160
+/* [R 8] debug only: TSDM current credit. Transaction based. */
+#define PRS_REG_TSDM_CURRENT_CREDIT 0x4015c
+/* [RW 16] One of 8 values that should be compared to type in Ethernet
+ * parsing. If there is a match; the field after Ethernet is the first VLAN.
+ * Reset value is 0x8100 which is the standard VLAN type. Note that when
+ * checking second VLAN; type is compared only to 0x8100.
+ */
+#define PRS_REG_VLAN_TYPE_0 0x401a8
+#define PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_AFT (0x1<<19)
+#define PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_OF (0x1<<20)
+#define PXP2_PXP2_INT_MASK_0_REG_PGL_PCIE_ATTN (0x1<<22)
+#define PXP2_PXP2_INT_MASK_0_REG_PGL_READ_BLOCKED (0x1<<23)
+#define PXP2_PXP2_INT_MASK_0_REG_PGL_WRITE_BLOCKED (0x1<<24)
+#define PXP2_PXP2_INT_STS_0_REG_WR_PGLUE_EOP_ERROR (0x1<<7)
+#define PXP2_PXP2_INT_STS_CLR_0_REG_WR_PGLUE_EOP_ERROR (0x1<<7)
+/* [R 6] Debug only: Number of used entries in the data FIFO */
+#define PXP2_REG_HST_DATA_FIFO_STATUS 0x12047c
+/* [R 7] Debug only: Number of used entries in the header FIFO */
+#define PXP2_REG_HST_HEADER_FIFO_STATUS 0x120478
+#define PXP2_REG_PGL_ADDR_88_F0 0x120534
+/* [R 32] GRC address for configuration access to PCIE config address 0x88.
+ * any write to this PCIE address will cause a GRC write access to the
+ * address that's in t this register */
+#define PXP2_REG_PGL_ADDR_88_F1 0x120544
+#define PXP2_REG_PGL_ADDR_8C_F0 0x120538
+/* [R 32] GRC address for configuration access to PCIE config address 0x8c.
+ * any write to this PCIE address will cause a GRC write access to the
+ * address that's in t this register */
+#define PXP2_REG_PGL_ADDR_8C_F1 0x120548
+#define PXP2_REG_PGL_ADDR_90_F0 0x12053c
+/* [R 32] GRC address for configuration access to PCIE config address 0x90.
+ * any write to this PCIE address will cause a GRC write access to the
+ * address that's in t this register */
+#define PXP2_REG_PGL_ADDR_90_F1 0x12054c
+#define PXP2_REG_PGL_ADDR_94_F0 0x120540
+/* [R 32] GRC address for configuration access to PCIE config address 0x94.
+ * any write to this PCIE address will cause a GRC write access to the
+ * address that's in t this register */
+#define PXP2_REG_PGL_ADDR_94_F1 0x120550
+#define PXP2_REG_PGL_CONTROL0 0x120490
+#define PXP2_REG_PGL_CONTROL1 0x120514
+#define PXP2_REG_PGL_DEBUG 0x120520
+/* [RW 32] third dword data of expansion rom request. this register is
+ special. reading from it provides a vector outstanding read requests. if
+ a bit is zero it means that a read request on the corresponding tag did
+ not finish yet (not all completions have arrived for it) */
+#define PXP2_REG_PGL_EXP_ROM2 0x120808
+/* [RW 32] Inbound interrupt table for CSDM: bits[31:16]-mask;
+ its[15:0]-address */
+#define PXP2_REG_PGL_INT_CSDM_0 0x1204f4
+#define PXP2_REG_PGL_INT_CSDM_1 0x1204f8
+#define PXP2_REG_PGL_INT_CSDM_2 0x1204fc
+#define PXP2_REG_PGL_INT_CSDM_3 0x120500
+#define PXP2_REG_PGL_INT_CSDM_4 0x120504
+#define PXP2_REG_PGL_INT_CSDM_5 0x120508
+#define PXP2_REG_PGL_INT_CSDM_6 0x12050c
+#define PXP2_REG_PGL_INT_CSDM_7 0x120510
+/* [RW 32] Inbound interrupt table for TSDM: bits[31:16]-mask;
+ its[15:0]-address */
+#define PXP2_REG_PGL_INT_TSDM_0 0x120494
+#define PXP2_REG_PGL_INT_TSDM_1 0x120498
+#define PXP2_REG_PGL_INT_TSDM_2 0x12049c
+#define PXP2_REG_PGL_INT_TSDM_3 0x1204a0
+#define PXP2_REG_PGL_INT_TSDM_4 0x1204a4
+#define PXP2_REG_PGL_INT_TSDM_5 0x1204a8
+#define PXP2_REG_PGL_INT_TSDM_6 0x1204ac
+#define PXP2_REG_PGL_INT_TSDM_7 0x1204b0
+/* [RW 32] Inbound interrupt table for USDM: bits[31:16]-mask;
+ its[15:0]-address */
+#define PXP2_REG_PGL_INT_USDM_0 0x1204b4
+#define PXP2_REG_PGL_INT_USDM_1 0x1204b8
+#define PXP2_REG_PGL_INT_USDM_2 0x1204bc
+#define PXP2_REG_PGL_INT_USDM_3 0x1204c0
+#define PXP2_REG_PGL_INT_USDM_4 0x1204c4
+#define PXP2_REG_PGL_INT_USDM_5 0x1204c8
+#define PXP2_REG_PGL_INT_USDM_6 0x1204cc
+#define PXP2_REG_PGL_INT_USDM_7 0x1204d0
+/* [RW 32] Inbound interrupt table for XSDM: bits[31:16]-mask;
+ its[15:0]-address */
+#define PXP2_REG_PGL_INT_XSDM_0 0x1204d4
+#define PXP2_REG_PGL_INT_XSDM_1 0x1204d8
+#define PXP2_REG_PGL_INT_XSDM_2 0x1204dc
+#define PXP2_REG_PGL_INT_XSDM_3 0x1204e0
+#define PXP2_REG_PGL_INT_XSDM_4 0x1204e4
+#define PXP2_REG_PGL_INT_XSDM_5 0x1204e8
+#define PXP2_REG_PGL_INT_XSDM_6 0x1204ec
+#define PXP2_REG_PGL_INT_XSDM_7 0x1204f0
+/* [RW 3] this field allows one function to pretend being another function
+ when accessing any BAR mapped resource within the device. the value of
+ the field is the number of the function that will be accessed
+ effectively. after software write to this bit it must read it in order to
+ know that the new value is updated */
+#define PXP2_REG_PGL_PRETEND_FUNC_F0 0x120674
+#define PXP2_REG_PGL_PRETEND_FUNC_F1 0x120678
+#define PXP2_REG_PGL_PRETEND_FUNC_F2 0x12067c
+#define PXP2_REG_PGL_PRETEND_FUNC_F3 0x120680
+#define PXP2_REG_PGL_PRETEND_FUNC_F4 0x120684
+#define PXP2_REG_PGL_PRETEND_FUNC_F5 0x120688
+#define PXP2_REG_PGL_PRETEND_FUNC_F6 0x12068c
+#define PXP2_REG_PGL_PRETEND_FUNC_F7 0x120690
+/* [R 1] this bit indicates that a read request was blocked because of
+ bus_master_en was deasserted */
+#define PXP2_REG_PGL_READ_BLOCKED 0x120568
+#define PXP2_REG_PGL_TAGS_LIMIT 0x1205a8
+/* [R 18] debug only */
+#define PXP2_REG_PGL_TXW_CDTS 0x12052c
+/* [R 1] this bit indicates that a write request was blocked because of
+ bus_master_en was deasserted */
+#define PXP2_REG_PGL_WRITE_BLOCKED 0x120564
+#define PXP2_REG_PSWRQ_BW_ADD1 0x1201c0
+#define PXP2_REG_PSWRQ_BW_ADD10 0x1201e4
+#define PXP2_REG_PSWRQ_BW_ADD11 0x1201e8
+#define PXP2_REG_PSWRQ_BW_ADD2 0x1201c4
+#define PXP2_REG_PSWRQ_BW_ADD28 0x120228
+#define PXP2_REG_PSWRQ_BW_ADD3 0x1201c8
+#define PXP2_REG_PSWRQ_BW_ADD6 0x1201d4
+#define PXP2_REG_PSWRQ_BW_ADD7 0x1201d8
+#define PXP2_REG_PSWRQ_BW_ADD8 0x1201dc
+#define PXP2_REG_PSWRQ_BW_ADD9 0x1201e0
+#define PXP2_REG_PSWRQ_BW_CREDIT 0x12032c
+#define PXP2_REG_PSWRQ_BW_L1 0x1202b0
+#define PXP2_REG_PSWRQ_BW_L10 0x1202d4
+#define PXP2_REG_PSWRQ_BW_L11 0x1202d8
+#define PXP2_REG_PSWRQ_BW_L2 0x1202b4
+#define PXP2_REG_PSWRQ_BW_L28 0x120318
+#define PXP2_REG_PSWRQ_BW_L3 0x1202b8
+#define PXP2_REG_PSWRQ_BW_L6 0x1202c4
+#define PXP2_REG_PSWRQ_BW_L7 0x1202c8
+#define PXP2_REG_PSWRQ_BW_L8 0x1202cc
+#define PXP2_REG_PSWRQ_BW_L9 0x1202d0
+#define PXP2_REG_PSWRQ_BW_RD 0x120324
+#define PXP2_REG_PSWRQ_BW_UB1 0x120238
+#define PXP2_REG_PSWRQ_BW_UB10 0x12025c
+#define PXP2_REG_PSWRQ_BW_UB11 0x120260
+#define PXP2_REG_PSWRQ_BW_UB2 0x12023c
+#define PXP2_REG_PSWRQ_BW_UB28 0x1202a0
+#define PXP2_REG_PSWRQ_BW_UB3 0x120240
+#define PXP2_REG_PSWRQ_BW_UB6 0x12024c
+#define PXP2_REG_PSWRQ_BW_UB7 0x120250
+#define PXP2_REG_PSWRQ_BW_UB8 0x120254
+#define PXP2_REG_PSWRQ_BW_UB9 0x120258
+#define PXP2_REG_PSWRQ_BW_WR 0x120328
+#define PXP2_REG_PSWRQ_CDU0_L2P 0x120000
+#define PXP2_REG_PSWRQ_QM0_L2P 0x120038
+#define PXP2_REG_PSWRQ_SRC0_L2P 0x120054
+#define PXP2_REG_PSWRQ_TM0_L2P 0x12001c
+#define PXP2_REG_PSWRQ_TSDM0_L2P 0x1200e0
+/* [RW 32] Interrupt mask register #0 read/write */
+#define PXP2_REG_PXP2_INT_MASK_0 0x120578
+/* [R 32] Interrupt register #0 read */
+#define PXP2_REG_PXP2_INT_STS_0 0x12056c
+#define PXP2_REG_PXP2_INT_STS_1 0x120608
+/* [RC 32] Interrupt register #0 read clear */
+#define PXP2_REG_PXP2_INT_STS_CLR_0 0x120570
+/* [RW 32] Parity mask register #0 read/write */
+#define PXP2_REG_PXP2_PRTY_MASK_0 0x120588
+#define PXP2_REG_PXP2_PRTY_MASK_1 0x120598
+/* [R 32] Parity register #0 read */
+#define PXP2_REG_PXP2_PRTY_STS_0 0x12057c
+#define PXP2_REG_PXP2_PRTY_STS_1 0x12058c
+/* [RC 32] Parity register #0 read clear */
+#define PXP2_REG_PXP2_PRTY_STS_CLR_0 0x120580
+#define PXP2_REG_PXP2_PRTY_STS_CLR_1 0x120590
+/* [R 1] Debug only: The 'almost full' indication from each fifo (gives
+ indication about backpressure) */
+#define PXP2_REG_RD_ALMOST_FULL_0 0x120424
+/* [R 8] Debug only: The blocks counter - number of unused block ids */
+#define PXP2_REG_RD_BLK_CNT 0x120418
+/* [RW 8] Debug only: Total number of available blocks in Tetris Buffer.
+ Must be bigger than 6. Normally should not be changed. */
+#define PXP2_REG_RD_BLK_NUM_CFG 0x12040c
+/* [RW 2] CDU byte swapping mode configuration for master read requests */
+#define PXP2_REG_RD_CDURD_SWAP_MODE 0x120404
+/* [RW 1] When '1'; inputs to the PSWRD block are ignored */
+#define PXP2_REG_RD_DISABLE_INPUTS 0x120374
+/* [R 1] PSWRD internal memories initialization is done */
+#define PXP2_REG_RD_INIT_DONE 0x120370
+/* [RW 8] The maximum number of blocks in Tetris Buffer that can be
+ allocated for vq10 */
+#define PXP2_REG_RD_MAX_BLKS_VQ10 0x1203a0
+/* [RW 8] The maximum number of blocks in Tetris Buffer that can be
+ allocated for vq11 */
+#define PXP2_REG_RD_MAX_BLKS_VQ11 0x1203a4
+/* [RW 8] The maximum number of blocks in Tetris Buffer that can be
+ allocated for vq17 */
+#define PXP2_REG_RD_MAX_BLKS_VQ17 0x1203bc
+/* [RW 8] The maximum number of blocks in Tetris Buffer that can be
+ allocated for vq18 */
+#define PXP2_REG_RD_MAX_BLKS_VQ18 0x1203c0
+/* [RW 8] The maximum number of blocks in Tetris Buffer that can be
+ allocated for vq19 */
+#define PXP2_REG_RD_MAX_BLKS_VQ19 0x1203c4
+/* [RW 8] The maximum number of blocks in Tetris Buffer that can be
+ allocated for vq22 */
+#define PXP2_REG_RD_MAX_BLKS_VQ22 0x1203d0
+/* [RW 8] The maximum number of blocks in Tetris Buffer that can be
+ allocated for vq25 */
+#define PXP2_REG_RD_MAX_BLKS_VQ25 0x1203dc
+/* [RW 8] The maximum number of blocks in Tetris Buffer that can be
+ allocated for vq6 */
+#define PXP2_REG_RD_MAX_BLKS_VQ6 0x120390
+/* [RW 8] The maximum number of blocks in Tetris Buffer that can be
+ allocated for vq9 */
+#define PXP2_REG_RD_MAX_BLKS_VQ9 0x12039c
+/* [RW 2] PBF byte swapping mode configuration for master read requests */
+#define PXP2_REG_RD_PBF_SWAP_MODE 0x1203f4
+/* [R 1] Debug only: Indication if delivery ports are idle */
+#define PXP2_REG_RD_PORT_IS_IDLE_0 0x12041c
+#define PXP2_REG_RD_PORT_IS_IDLE_1 0x120420
+/* [RW 2] QM byte swapping mode configuration for master read requests */
+#define PXP2_REG_RD_QM_SWAP_MODE 0x1203f8
+/* [R 7] Debug only: The SR counter - number of unused sub request ids */
+#define PXP2_REG_RD_SR_CNT 0x120414
+/* [RW 2] SRC byte swapping mode configuration for master read requests */
+#define PXP2_REG_RD_SRC_SWAP_MODE 0x120400
+/* [RW 7] Debug only: Total number of available PCI read sub-requests. Must
+ be bigger than 1. Normally should not be changed. */
+#define PXP2_REG_RD_SR_NUM_CFG 0x120408
+/* [RW 1] Signals the PSWRD block to start initializing internal memories */
+#define PXP2_REG_RD_START_INIT 0x12036c
+/* [RW 2] TM byte swapping mode configuration for master read requests */
+#define PXP2_REG_RD_TM_SWAP_MODE 0x1203fc
+/* [RW 10] Bandwidth addition to VQ0 write requests */
+#define PXP2_REG_RQ_BW_RD_ADD0 0x1201bc
+/* [RW 10] Bandwidth addition to VQ12 read requests */
+#define PXP2_REG_RQ_BW_RD_ADD12 0x1201ec
+/* [RW 10] Bandwidth addition to VQ13 read requests */
+#define PXP2_REG_RQ_BW_RD_ADD13 0x1201f0
+/* [RW 10] Bandwidth addition to VQ14 read requests */
+#define PXP2_REG_RQ_BW_RD_ADD14 0x1201f4
+/* [RW 10] Bandwidth addition to VQ15 read requests */
+#define PXP2_REG_RQ_BW_RD_ADD15 0x1201f8
+/* [RW 10] Bandwidth addition to VQ16 read requests */
+#define PXP2_REG_RQ_BW_RD_ADD16 0x1201fc
+/* [RW 10] Bandwidth addition to VQ17 read requests */
+#define PXP2_REG_RQ_BW_RD_ADD17 0x120200
+/* [RW 10] Bandwidth addition to VQ18 read requests */
+#define PXP2_REG_RQ_BW_RD_ADD18 0x120204
+/* [RW 10] Bandwidth addition to VQ19 read requests */
+#define PXP2_REG_RQ_BW_RD_ADD19 0x120208
+/* [RW 10] Bandwidth addition to VQ20 read requests */
+#define PXP2_REG_RQ_BW_RD_ADD20 0x12020c
+/* [RW 10] Bandwidth addition to VQ22 read requests */
+#define PXP2_REG_RQ_BW_RD_ADD22 0x120210
+/* [RW 10] Bandwidth addition to VQ23 read requests */
+#define PXP2_REG_RQ_BW_RD_ADD23 0x120214
+/* [RW 10] Bandwidth addition to VQ24 read requests */
+#define PXP2_REG_RQ_BW_RD_ADD24 0x120218
+/* [RW 10] Bandwidth addition to VQ25 read requests */
+#define PXP2_REG_RQ_BW_RD_ADD25 0x12021c
+/* [RW 10] Bandwidth addition to VQ26 read requests */
+#define PXP2_REG_RQ_BW_RD_ADD26 0x120220
+/* [RW 10] Bandwidth addition to VQ27 read requests */
+#define PXP2_REG_RQ_BW_RD_ADD27 0x120224
+/* [RW 10] Bandwidth addition to VQ4 read requests */
+#define PXP2_REG_RQ_BW_RD_ADD4 0x1201cc
+/* [RW 10] Bandwidth addition to VQ5 read requests */
+#define PXP2_REG_RQ_BW_RD_ADD5 0x1201d0
+/* [RW 10] Bandwidth Typical L for VQ0 Read requests */
+#define PXP2_REG_RQ_BW_RD_L0 0x1202ac
+/* [RW 10] Bandwidth Typical L for VQ12 Read requests */
+#define PXP2_REG_RQ_BW_RD_L12 0x1202dc
+/* [RW 10] Bandwidth Typical L for VQ13 Read requests */
+#define PXP2_REG_RQ_BW_RD_L13 0x1202e0
+/* [RW 10] Bandwidth Typical L for VQ14 Read requests */
+#define PXP2_REG_RQ_BW_RD_L14 0x1202e4
+/* [RW 10] Bandwidth Typical L for VQ15 Read requests */
+#define PXP2_REG_RQ_BW_RD_L15 0x1202e8
+/* [RW 10] Bandwidth Typical L for VQ16 Read requests */
+#define PXP2_REG_RQ_BW_RD_L16 0x1202ec
+/* [RW 10] Bandwidth Typical L for VQ17 Read requests */
+#define PXP2_REG_RQ_BW_RD_L17 0x1202f0
+/* [RW 10] Bandwidth Typical L for VQ18 Read requests */
+#define PXP2_REG_RQ_BW_RD_L18 0x1202f4
+/* [RW 10] Bandwidth Typical L for VQ19 Read requests */
+#define PXP2_REG_RQ_BW_RD_L19 0x1202f8
+/* [RW 10] Bandwidth Typical L for VQ20 Read requests */
+#define PXP2_REG_RQ_BW_RD_L20 0x1202fc
+/* [RW 10] Bandwidth Typical L for VQ22 Read requests */
+#define PXP2_REG_RQ_BW_RD_L22 0x120300
+/* [RW 10] Bandwidth Typical L for VQ23 Read requests */
+#define PXP2_REG_RQ_BW_RD_L23 0x120304
+/* [RW 10] Bandwidth Typical L for VQ24 Read requests */
+#define PXP2_REG_RQ_BW_RD_L24 0x120308
+/* [RW 10] Bandwidth Typical L for VQ25 Read requests */
+#define PXP2_REG_RQ_BW_RD_L25 0x12030c
+/* [RW 10] Bandwidth Typical L for VQ26 Read requests */
+#define PXP2_REG_RQ_BW_RD_L26 0x120310
+/* [RW 10] Bandwidth Typical L for VQ27 Read requests */
+#define PXP2_REG_RQ_BW_RD_L27 0x120314
+/* [RW 10] Bandwidth Typical L for VQ4 Read requests */
+#define PXP2_REG_RQ_BW_RD_L4 0x1202bc
+/* [RW 10] Bandwidth Typical L for VQ5 Read- currently not used */
+#define PXP2_REG_RQ_BW_RD_L5 0x1202c0
+/* [RW 7] Bandwidth upper bound for VQ0 read requests */
+#define PXP2_REG_RQ_BW_RD_UBOUND0 0x120234
+/* [RW 7] Bandwidth upper bound for VQ12 read requests */
+#define PXP2_REG_RQ_BW_RD_UBOUND12 0x120264
+/* [RW 7] Bandwidth upper bound for VQ13 read requests */
+#define PXP2_REG_RQ_BW_RD_UBOUND13 0x120268
+/* [RW 7] Bandwidth upper bound for VQ14 read requests */
+#define PXP2_REG_RQ_BW_RD_UBOUND14 0x12026c
+/* [RW 7] Bandwidth upper bound for VQ15 read requests */
+#define PXP2_REG_RQ_BW_RD_UBOUND15 0x120270
+/* [RW 7] Bandwidth upper bound for VQ16 read requests */
+#define PXP2_REG_RQ_BW_RD_UBOUND16 0x120274
+/* [RW 7] Bandwidth upper bound for VQ17 read requests */
+#define PXP2_REG_RQ_BW_RD_UBOUND17 0x120278
+/* [RW 7] Bandwidth upper bound for VQ18 read requests */
+#define PXP2_REG_RQ_BW_RD_UBOUND18 0x12027c
+/* [RW 7] Bandwidth upper bound for VQ19 read requests */
+#define PXP2_REG_RQ_BW_RD_UBOUND19 0x120280
+/* [RW 7] Bandwidth upper bound for VQ20 read requests */
+#define PXP2_REG_RQ_BW_RD_UBOUND20 0x120284
+/* [RW 7] Bandwidth upper bound for VQ22 read requests */
+#define PXP2_REG_RQ_BW_RD_UBOUND22 0x120288
+/* [RW 7] Bandwidth upper bound for VQ23 read requests */
+#define PXP2_REG_RQ_BW_RD_UBOUND23 0x12028c
+/* [RW 7] Bandwidth upper bound for VQ24 read requests */
+#define PXP2_REG_RQ_BW_RD_UBOUND24 0x120290
+/* [RW 7] Bandwidth upper bound for VQ25 read requests */
+#define PXP2_REG_RQ_BW_RD_UBOUND25 0x120294
+/* [RW 7] Bandwidth upper bound for VQ26 read requests */
+#define PXP2_REG_RQ_BW_RD_UBOUND26 0x120298
+/* [RW 7] Bandwidth upper bound for VQ27 read requests */
+#define PXP2_REG_RQ_BW_RD_UBOUND27 0x12029c
+/* [RW 7] Bandwidth upper bound for VQ4 read requests */
+#define PXP2_REG_RQ_BW_RD_UBOUND4 0x120244
+/* [RW 7] Bandwidth upper bound for VQ5 read requests */
+#define PXP2_REG_RQ_BW_RD_UBOUND5 0x120248
+/* [RW 10] Bandwidth addition to VQ29 write requests */
+#define PXP2_REG_RQ_BW_WR_ADD29 0x12022c
+/* [RW 10] Bandwidth addition to VQ30 write requests */
+#define PXP2_REG_RQ_BW_WR_ADD30 0x120230
+/* [RW 10] Bandwidth Typical L for VQ29 Write requests */
+#define PXP2_REG_RQ_BW_WR_L29 0x12031c
+/* [RW 10] Bandwidth Typical L for VQ30 Write requests */
+#define PXP2_REG_RQ_BW_WR_L30 0x120320
+/* [RW 7] Bandwidth upper bound for VQ29 */
+#define PXP2_REG_RQ_BW_WR_UBOUND29 0x1202a4
+/* [RW 7] Bandwidth upper bound for VQ30 */
+#define PXP2_REG_RQ_BW_WR_UBOUND30 0x1202a8
+/* [RW 18] external first_mem_addr field in L2P table for CDU module port 0 */
+#define PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR 0x120008
+/* [RW 2] Endian mode for cdu */
+#define PXP2_REG_RQ_CDU_ENDIAN_M 0x1201a0
+#define PXP2_REG_RQ_CDU_FIRST_ILT 0x12061c
+#define PXP2_REG_RQ_CDU_LAST_ILT 0x120620
+/* [RW 3] page size in L2P table for CDU module; -4k; -8k; -16k; -32k; -64k;
+ -128k */
+#define PXP2_REG_RQ_CDU_P_SIZE 0x120018
+/* [R 1] 1' indicates that the requester has finished its internal
+ configuration */
+#define PXP2_REG_RQ_CFG_DONE 0x1201b4
+/* [RW 2] Endian mode for debug */
+#define PXP2_REG_RQ_DBG_ENDIAN_M 0x1201a4
+/* [RW 1] When '1'; requests will enter input buffers but wont get out
+ towards the glue */
+#define PXP2_REG_RQ_DISABLE_INPUTS 0x120330
+/* [RW 4] Determines alignment of write SRs when a request is split into
+ * several SRs. 0 - 8B aligned. 1 - 64B aligned. 2 - 128B aligned. 3 - 256B
+ * aligned. 4 - 512B aligned. */
+#define PXP2_REG_RQ_DRAM_ALIGN 0x1205b0
+/* [RW 4] Determines alignment of read SRs when a request is split into
+ * several SRs. 0 - 8B aligned. 1 - 64B aligned. 2 - 128B aligned. 3 - 256B
+ * aligned. 4 - 512B aligned. */
+#define PXP2_REG_RQ_DRAM_ALIGN_RD 0x12092c
+/* [RW 1] when set the new alignment method (E2) will be applied; when reset
+ * the original alignment method (E1 E1H) will be applied */
+#define PXP2_REG_RQ_DRAM_ALIGN_SEL 0x120930
+/* [RW 1] If 1 ILT failiue will not result in ELT access; An interrupt will
+ be asserted */
+#define PXP2_REG_RQ_ELT_DISABLE 0x12066c
+/* [RW 2] Endian mode for hc */
+#define PXP2_REG_RQ_HC_ENDIAN_M 0x1201a8
+/* [RW 1] when '0' ILT logic will work as in A0; otherwise B0; for back
+ compatibility needs; Note that different registers are used per mode */
+#define PXP2_REG_RQ_ILT_MODE 0x1205b4
+/* [WB 53] Onchip address table */
+#define PXP2_REG_RQ_ONCHIP_AT 0x122000
+/* [WB 53] Onchip address table - B0 */
+#define PXP2_REG_RQ_ONCHIP_AT_B0 0x128000
+/* [RW 13] Pending read limiter threshold; in Dwords */
+#define PXP2_REG_RQ_PDR_LIMIT 0x12033c
+/* [RW 2] Endian mode for qm */
+#define PXP2_REG_RQ_QM_ENDIAN_M 0x120194
+#define PXP2_REG_RQ_QM_FIRST_ILT 0x120634
+#define PXP2_REG_RQ_QM_LAST_ILT 0x120638
+/* [RW 3] page size in L2P table for QM module; -4k; -8k; -16k; -32k; -64k;
+ -128k */
+#define PXP2_REG_RQ_QM_P_SIZE 0x120050
+/* [RW 1] 1' indicates that the RBC has finished configuring the PSWRQ */
+#define PXP2_REG_RQ_RBC_DONE 0x1201b0
+/* [RW 3] Max burst size filed for read requests port 0; 000 - 128B;
+ 001:256B; 010: 512B; 11:1K:100:2K; 01:4K */
+#define PXP2_REG_RQ_RD_MBS0 0x120160
+/* [RW 3] Max burst size filed for read requests port 1; 000 - 128B;
+ 001:256B; 010: 512B; 11:1K:100:2K; 01:4K */
+#define PXP2_REG_RQ_RD_MBS1 0x120168
+/* [RW 2] Endian mode for src */
+#define PXP2_REG_RQ_SRC_ENDIAN_M 0x12019c
+#define PXP2_REG_RQ_SRC_FIRST_ILT 0x12063c
+#define PXP2_REG_RQ_SRC_LAST_ILT 0x120640
+/* [RW 3] page size in L2P table for SRC module; -4k; -8k; -16k; -32k; -64k;
+ -128k */
+#define PXP2_REG_RQ_SRC_P_SIZE 0x12006c
+/* [RW 2] Endian mode for tm */
+#define PXP2_REG_RQ_TM_ENDIAN_M 0x120198
+#define PXP2_REG_RQ_TM_FIRST_ILT 0x120644
+#define PXP2_REG_RQ_TM_LAST_ILT 0x120648
+/* [RW 3] page size in L2P table for TM module; -4k; -8k; -16k; -32k; -64k;
+ -128k */
+#define PXP2_REG_RQ_TM_P_SIZE 0x120034
+/* [R 5] Number of entries in the ufifo; his fifo has l2p completions */
+#define PXP2_REG_RQ_UFIFO_NUM_OF_ENTRY 0x12080c
+/* [RW 18] external first_mem_addr field in L2P table for USDM module port 0 */
+#define PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR 0x120094
+/* [R 8] Number of entries occupied by vq 0 in pswrq memory */
+#define PXP2_REG_RQ_VQ0_ENTRY_CNT 0x120810
+/* [R 8] Number of entries occupied by vq 10 in pswrq memory */
+#define PXP2_REG_RQ_VQ10_ENTRY_CNT 0x120818
+/* [R 8] Number of entries occupied by vq 11 in pswrq memory */
+#define PXP2_REG_RQ_VQ11_ENTRY_CNT 0x120820
+/* [R 8] Number of entries occupied by vq 12 in pswrq memory */
+#define PXP2_REG_RQ_VQ12_ENTRY_CNT 0x120828
+/* [R 8] Number of entries occupied by vq 13 in pswrq memory */
+#define PXP2_REG_RQ_VQ13_ENTRY_CNT 0x120830
+/* [R 8] Number of entries occupied by vq 14 in pswrq memory */
+#define PXP2_REG_RQ_VQ14_ENTRY_CNT 0x120838
+/* [R 8] Number of entries occupied by vq 15 in pswrq memory */
+#define PXP2_REG_RQ_VQ15_ENTRY_CNT 0x120840
+/* [R 8] Number of entries occupied by vq 16 in pswrq memory */
+#define PXP2_REG_RQ_VQ16_ENTRY_CNT 0x120848
+/* [R 8] Number of entries occupied by vq 17 in pswrq memory */
+#define PXP2_REG_RQ_VQ17_ENTRY_CNT 0x120850
+/* [R 8] Number of entries occupied by vq 18 in pswrq memory */
+#define PXP2_REG_RQ_VQ18_ENTRY_CNT 0x120858
+/* [R 8] Number of entries occupied by vq 19 in pswrq memory */
+#define PXP2_REG_RQ_VQ19_ENTRY_CNT 0x120860
+/* [R 8] Number of entries occupied by vq 1 in pswrq memory */
+#define PXP2_REG_RQ_VQ1_ENTRY_CNT 0x120868
+/* [R 8] Number of entries occupied by vq 20 in pswrq memory */
+#define PXP2_REG_RQ_VQ20_ENTRY_CNT 0x120870
+/* [R 8] Number of entries occupied by vq 21 in pswrq memory */
+#define PXP2_REG_RQ_VQ21_ENTRY_CNT 0x120878
+/* [R 8] Number of entries occupied by vq 22 in pswrq memory */
+#define PXP2_REG_RQ_VQ22_ENTRY_CNT 0x120880
+/* [R 8] Number of entries occupied by vq 23 in pswrq memory */
+#define PXP2_REG_RQ_VQ23_ENTRY_CNT 0x120888
+/* [R 8] Number of entries occupied by vq 24 in pswrq memory */
+#define PXP2_REG_RQ_VQ24_ENTRY_CNT 0x120890
+/* [R 8] Number of entries occupied by vq 25 in pswrq memory */
+#define PXP2_REG_RQ_VQ25_ENTRY_CNT 0x120898
+/* [R 8] Number of entries occupied by vq 26 in pswrq memory */
+#define PXP2_REG_RQ_VQ26_ENTRY_CNT 0x1208a0
+/* [R 8] Number of entries occupied by vq 27 in pswrq memory */
+#define PXP2_REG_RQ_VQ27_ENTRY_CNT 0x1208a8
+/* [R 8] Number of entries occupied by vq 28 in pswrq memory */
+#define PXP2_REG_RQ_VQ28_ENTRY_CNT 0x1208b0
+/* [R 8] Number of entries occupied by vq 29 in pswrq memory */
+#define PXP2_REG_RQ_VQ29_ENTRY_CNT 0x1208b8
+/* [R 8] Number of entries occupied by vq 2 in pswrq memory */
+#define PXP2_REG_RQ_VQ2_ENTRY_CNT 0x1208c0
+/* [R 8] Number of entries occupied by vq 30 in pswrq memory */
+#define PXP2_REG_RQ_VQ30_ENTRY_CNT 0x1208c8
+/* [R 8] Number of entries occupied by vq 31 in pswrq memory */
+#define PXP2_REG_RQ_VQ31_ENTRY_CNT 0x1208d0
+/* [R 8] Number of entries occupied by vq 3 in pswrq memory */
+#define PXP2_REG_RQ_VQ3_ENTRY_CNT 0x1208d8
+/* [R 8] Number of entries occupied by vq 4 in pswrq memory */
+#define PXP2_REG_RQ_VQ4_ENTRY_CNT 0x1208e0
+/* [R 8] Number of entries occupied by vq 5 in pswrq memory */
+#define PXP2_REG_RQ_VQ5_ENTRY_CNT 0x1208e8
+/* [R 8] Number of entries occupied by vq 6 in pswrq memory */
+#define PXP2_REG_RQ_VQ6_ENTRY_CNT 0x1208f0
+/* [R 8] Number of entries occupied by vq 7 in pswrq memory */
+#define PXP2_REG_RQ_VQ7_ENTRY_CNT 0x1208f8
+/* [R 8] Number of entries occupied by vq 8 in pswrq memory */
+#define PXP2_REG_RQ_VQ8_ENTRY_CNT 0x120900
+/* [R 8] Number of entries occupied by vq 9 in pswrq memory */
+#define PXP2_REG_RQ_VQ9_ENTRY_CNT 0x120908
+/* [RW 3] Max burst size filed for write requests port 0; 000 - 128B;
+ 001:256B; 010: 512B; */
+#define PXP2_REG_RQ_WR_MBS0 0x12015c
+/* [RW 3] Max burst size filed for write requests port 1; 000 - 128B;
+ 001:256B; 010: 512B; */
+#define PXP2_REG_RQ_WR_MBS1 0x120164
+/* [RW 2] 0 - 128B; - 256B; - 512B; - 1024B; when the payload in the
+ buffer reaches this number has_payload will be asserted */
+#define PXP2_REG_WR_CDU_MPS 0x1205f0
+/* [RW 2] 0 - 128B; - 256B; - 512B; - 1024B; when the payload in the
+ buffer reaches this number has_payload will be asserted */
+#define PXP2_REG_WR_CSDM_MPS 0x1205d0
+/* [RW 2] 0 - 128B; - 256B; - 512B; - 1024B; when the payload in the
+ buffer reaches this number has_payload will be asserted */
+#define PXP2_REG_WR_DBG_MPS 0x1205e8
+/* [RW 2] 0 - 128B; - 256B; - 512B; - 1024B; when the payload in the
+ buffer reaches this number has_payload will be asserted */
+#define PXP2_REG_WR_DMAE_MPS 0x1205ec
+/* [RW 10] if Number of entries in dmae fifo will be higher than this
+ threshold then has_payload indication will be asserted; the default value
+ should be equal to &gt; write MBS size! */
+#define PXP2_REG_WR_DMAE_TH 0x120368
+/* [RW 2] 0 - 128B; - 256B; - 512B; - 1024B; when the payload in the
+ buffer reaches this number has_payload will be asserted */
+#define PXP2_REG_WR_HC_MPS 0x1205c8
+/* [RW 2] 0 - 128B; - 256B; - 512B; - 1024B; when the payload in the
+ buffer reaches this number has_payload will be asserted */
+#define PXP2_REG_WR_QM_MPS 0x1205dc
+/* [RW 1] 0 - working in A0 mode; - working in B0 mode */
+#define PXP2_REG_WR_REV_MODE 0x120670
+/* [RW 2] 0 - 128B; - 256B; - 512B; - 1024B; when the payload in the
+ buffer reaches this number has_payload will be asserted */
+#define PXP2_REG_WR_SRC_MPS 0x1205e4
+/* [RW 2] 0 - 128B; - 256B; - 512B; - 1024B; when the payload in the
+ buffer reaches this number has_payload will be asserted */
+#define PXP2_REG_WR_TM_MPS 0x1205e0
+/* [RW 2] 0 - 128B; - 256B; - 512B; - 1024B; when the payload in the
+ buffer reaches this number has_payload will be asserted */
+#define PXP2_REG_WR_TSDM_MPS 0x1205d4
+/* [RW 10] if Number of entries in usdmdp fifo will be higher than this
+ threshold then has_payload indication will be asserted; the default value
+ should be equal to &gt; write MBS size! */
+#define PXP2_REG_WR_USDMDP_TH 0x120348
+/* [RW 2] 0 - 128B; - 256B; - 512B; - 1024B; when the payload in the
+ buffer reaches this number has_payload will be asserted */
+#define PXP2_REG_WR_USDM_MPS 0x1205cc
+/* [RW 2] 0 - 128B; - 256B; - 512B; - 1024B; when the payload in the
+ buffer reaches this number has_payload will be asserted */
+#define PXP2_REG_WR_XSDM_MPS 0x1205d8
+/* [R 1] debug only: Indication if PSWHST arbiter is idle */
+#define PXP_REG_HST_ARB_IS_IDLE 0x103004
+/* [R 8] debug only: A bit mask for all PSWHST arbiter clients. '1' means
+ this client is waiting for the arbiter. */
+#define PXP_REG_HST_CLIENTS_WAITING_TO_ARB 0x103008
+/* [RW 1] When 1; doorbells are discarded and not passed to doorbell queue
+ block. Should be used for close the gates. */
+#define PXP_REG_HST_DISCARD_DOORBELLS 0x1030a4
+/* [R 1] debug only: '1' means this PSWHST is discarding doorbells. This bit
+ should update according to 'hst_discard_doorbells' register when the state
+ machine is idle */
+#define PXP_REG_HST_DISCARD_DOORBELLS_STATUS 0x1030a0
+/* [RW 1] When 1; new internal writes arriving to the block are discarded.
+ Should be used for close the gates. */
+#define PXP_REG_HST_DISCARD_INTERNAL_WRITES 0x1030a8
+/* [R 6] debug only: A bit mask for all PSWHST internal write clients. '1'
+ means this PSWHST is discarding inputs from this client. Each bit should
+ update according to 'hst_discard_internal_writes' register when the state
+ machine is idle. */
+#define PXP_REG_HST_DISCARD_INTERNAL_WRITES_STATUS 0x10309c
+/* [WB 160] Used for initialization of the inbound interrupts memory */
+#define PXP_REG_HST_INBOUND_INT 0x103800
+/* [RW 7] Indirect access to the permission table. The fields are : {Valid;
+ * VFID[5:0]}
+ */
+#define PXP_REG_HST_ZONE_PERMISSION_TABLE 0x103400
+/* [RW 32] Interrupt mask register #0 read/write */
+#define PXP_REG_PXP_INT_MASK_0 0x103074
+#define PXP_REG_PXP_INT_MASK_1 0x103084
+/* [R 32] Interrupt register #0 read */
+#define PXP_REG_PXP_INT_STS_0 0x103068
+#define PXP_REG_PXP_INT_STS_1 0x103078
+/* [RC 32] Interrupt register #0 read clear */
+#define PXP_REG_PXP_INT_STS_CLR_0 0x10306c
+#define PXP_REG_PXP_INT_STS_CLR_1 0x10307c
+/* [RW 27] Parity mask register #0 read/write */
+#define PXP_REG_PXP_PRTY_MASK 0x103094
+/* [R 26] Parity register #0 read */
+#define PXP_REG_PXP_PRTY_STS 0x103088
+/* [RC 27] Parity register #0 read clear */
+#define PXP_REG_PXP_PRTY_STS_CLR 0x10308c
+/* [RW 4] The activity counter initial increment value sent in the load
+ request */
+#define QM_REG_ACTCTRINITVAL_0 0x168040
+#define QM_REG_ACTCTRINITVAL_1 0x168044
+#define QM_REG_ACTCTRINITVAL_2 0x168048
+#define QM_REG_ACTCTRINITVAL_3 0x16804c
+/* [RW 32] The base logical address (in bytes) of each physical queue. The
+ index I represents the physical queue number. The 12 lsbs are ignore and
+ considered zero so practically there are only 20 bits in this register;
+ queues 63-0 */
+#define QM_REG_BASEADDR 0x168900
+/* [RW 32] The base logical address (in bytes) of each physical queue. The
+ index I represents the physical queue number. The 12 lsbs are ignore and
+ considered zero so practically there are only 20 bits in this register;
+ queues 127-64 */
+#define QM_REG_BASEADDR_EXT_A 0x16e100
+/* [RW 16] The byte credit cost for each task. This value is for both ports */
+#define QM_REG_BYTECRDCOST 0x168234
+/* [RW 16] The initial byte credit value for both ports. */
+#define QM_REG_BYTECRDINITVAL 0x168238
+/* [RW 32] A bit per physical queue. If the bit is cleared then the physical
+ queue uses port 0 else it uses port 1; queues 31-0 */
+#define QM_REG_BYTECRDPORT_LSB 0x168228
+/* [RW 32] A bit per physical queue. If the bit is cleared then the physical
+ queue uses port 0 else it uses port 1; queues 95-64 */
+#define QM_REG_BYTECRDPORT_LSB_EXT_A 0x16e520
+/* [RW 32] A bit per physical queue. If the bit is cleared then the physical
+ queue uses port 0 else it uses port 1; queues 63-32 */
+#define QM_REG_BYTECRDPORT_MSB 0x168224
+/* [RW 32] A bit per physical queue. If the bit is cleared then the physical
+ queue uses port 0 else it uses port 1; queues 127-96 */
+#define QM_REG_BYTECRDPORT_MSB_EXT_A 0x16e51c
+/* [RW 16] The byte credit value that if above the QM is considered almost
+ full */
+#define QM_REG_BYTECREDITAFULLTHR 0x168094
+/* [RW 4] The initial credit for interface */
+#define QM_REG_CMINITCRD_0 0x1680cc
+#define QM_REG_BYTECRDCMDQ_0 0x16e6e8
+#define QM_REG_CMINITCRD_1 0x1680d0
+#define QM_REG_CMINITCRD_2 0x1680d4
+#define QM_REG_CMINITCRD_3 0x1680d8
+#define QM_REG_CMINITCRD_4 0x1680dc
+#define QM_REG_CMINITCRD_5 0x1680e0
+#define QM_REG_CMINITCRD_6 0x1680e4
+#define QM_REG_CMINITCRD_7 0x1680e8
+/* [RW 8] A mask bit per CM interface. If this bit is 0 then this interface
+ is masked */
+#define QM_REG_CMINTEN 0x1680ec
+/* [RW 12] A bit vector which indicates which one of the queues are tied to
+ interface 0 */
+#define QM_REG_CMINTVOQMASK_0 0x1681f4
+#define QM_REG_CMINTVOQMASK_1 0x1681f8
+#define QM_REG_CMINTVOQMASK_2 0x1681fc
+#define QM_REG_CMINTVOQMASK_3 0x168200
+#define QM_REG_CMINTVOQMASK_4 0x168204
+#define QM_REG_CMINTVOQMASK_5 0x168208
+#define QM_REG_CMINTVOQMASK_6 0x16820c
+#define QM_REG_CMINTVOQMASK_7 0x168210
+/* [RW 20] The number of connections divided by 16 which dictates the size
+ of each queue which belongs to even function number. */
+#define QM_REG_CONNNUM_0 0x168020
+/* [R 6] Keep the fill level of the fifo from write client 4 */
+#define QM_REG_CQM_WRC_FIFOLVL 0x168018
+/* [RW 8] The context regions sent in the CFC load request */
+#define QM_REG_CTXREG_0 0x168030
+#define QM_REG_CTXREG_1 0x168034
+#define QM_REG_CTXREG_2 0x168038
+#define QM_REG_CTXREG_3 0x16803c
+/* [RW 12] The VOQ mask used to select the VOQs which needs to be full for
+ bypass enable */
+#define QM_REG_ENBYPVOQMASK 0x16823c
+/* [RW 32] A bit mask per each physical queue. If a bit is set then the
+ physical queue uses the byte credit; queues 31-0 */
+#define QM_REG_ENBYTECRD_LSB 0x168220
+/* [RW 32] A bit mask per each physical queue. If a bit is set then the
+ physical queue uses the byte credit; queues 95-64 */
+#define QM_REG_ENBYTECRD_LSB_EXT_A 0x16e518
+/* [RW 32] A bit mask per each physical queue. If a bit is set then the
+ physical queue uses the byte credit; queues 63-32 */
+#define QM_REG_ENBYTECRD_MSB 0x16821c
+/* [RW 32] A bit mask per each physical queue. If a bit is set then the
+ physical queue uses the byte credit; queues 127-96 */
+#define QM_REG_ENBYTECRD_MSB_EXT_A 0x16e514
+/* [RW 4] If cleared then the secondary interface will not be served by the
+ RR arbiter */
+#define QM_REG_ENSEC 0x1680f0
+/* [RW 32] NA */
+#define QM_REG_FUNCNUMSEL_LSB 0x168230
+/* [RW 32] NA */
+#define QM_REG_FUNCNUMSEL_MSB 0x16822c
+/* [RW 32] A mask register to mask the Almost empty signals which will not
+ be use for the almost empty indication to the HW block; queues 31:0 */
+#define QM_REG_HWAEMPTYMASK_LSB 0x168218
+/* [RW 32] A mask register to mask the Almost empty signals which will not
+ be use for the almost empty indication to the HW block; queues 95-64 */
+#define QM_REG_HWAEMPTYMASK_LSB_EXT_A 0x16e510
+/* [RW 32] A mask register to mask the Almost empty signals which will not
+ be use for the almost empty indication to the HW block; queues 63:32 */
+#define QM_REG_HWAEMPTYMASK_MSB 0x168214
+/* [RW 32] A mask register to mask the Almost empty signals which will not
+ be use for the almost empty indication to the HW block; queues 127-96 */
+#define QM_REG_HWAEMPTYMASK_MSB_EXT_A 0x16e50c
+/* [RW 4] The number of outstanding request to CFC */
+#define QM_REG_OUTLDREQ 0x168804
+/* [RC 1] A flag to indicate that overflow error occurred in one of the
+ queues. */
+#define QM_REG_OVFERROR 0x16805c
+/* [RC 7] the Q where the overflow occurs */
+#define QM_REG_OVFQNUM 0x168058
+/* [R 16] Pause state for physical queues 15-0 */
+#define QM_REG_PAUSESTATE0 0x168410
+/* [R 16] Pause state for physical queues 31-16 */
+#define QM_REG_PAUSESTATE1 0x168414
+/* [R 16] Pause state for physical queues 47-32 */
+#define QM_REG_PAUSESTATE2 0x16e684
+/* [R 16] Pause state for physical queues 63-48 */
+#define QM_REG_PAUSESTATE3 0x16e688
+/* [R 16] Pause state for physical queues 79-64 */
+#define QM_REG_PAUSESTATE4 0x16e68c
+/* [R 16] Pause state for physical queues 95-80 */
+#define QM_REG_PAUSESTATE5 0x16e690
+/* [R 16] Pause state for physical queues 111-96 */
+#define QM_REG_PAUSESTATE6 0x16e694
+/* [R 16] Pause state for physical queues 127-112 */
+#define QM_REG_PAUSESTATE7 0x16e698
+/* [RW 2] The PCI attributes field used in the PCI request. */
+#define QM_REG_PCIREQAT 0x168054
+#define QM_REG_PF_EN 0x16e70c
+/* [R 24] The number of tasks stored in the QM for the PF. only even
+ * functions are valid in E2 (odd I registers will be hard wired to 0) */
+#define QM_REG_PF_USG_CNT_0 0x16e040
+/* [R 16] NOT USED */
+#define QM_REG_PORT0BYTECRD 0x168300
+/* [R 16] The byte credit of port 1 */
+#define QM_REG_PORT1BYTECRD 0x168304
+/* [RW 3] pci function number of queues 15-0 */
+#define QM_REG_PQ2PCIFUNC_0 0x16e6bc
+#define QM_REG_PQ2PCIFUNC_1 0x16e6c0
+#define QM_REG_PQ2PCIFUNC_2 0x16e6c4
+#define QM_REG_PQ2PCIFUNC_3 0x16e6c8
+#define QM_REG_PQ2PCIFUNC_4 0x16e6cc
+#define QM_REG_PQ2PCIFUNC_5 0x16e6d0
+#define QM_REG_PQ2PCIFUNC_6 0x16e6d4
+#define QM_REG_PQ2PCIFUNC_7 0x16e6d8
+/* [WB 54] Pointer Table Memory for queues 63-0; The mapping is as follow:
+ ptrtbl[53:30] read pointer; ptrtbl[29:6] write pointer; ptrtbl[5:4] read
+ bank0; ptrtbl[3:2] read bank 1; ptrtbl[1:0] write bank; */
+#define QM_REG_PTRTBL 0x168a00
+/* [WB 54] Pointer Table Memory for queues 127-64; The mapping is as follow:
+ ptrtbl[53:30] read pointer; ptrtbl[29:6] write pointer; ptrtbl[5:4] read
+ bank0; ptrtbl[3:2] read bank 1; ptrtbl[1:0] write bank; */
+#define QM_REG_PTRTBL_EXT_A 0x16e200
+/* [RW 2] Interrupt mask register #0 read/write */
+#define QM_REG_QM_INT_MASK 0x168444
+/* [R 2] Interrupt register #0 read */
+#define QM_REG_QM_INT_STS 0x168438
+/* [RW 12] Parity mask register #0 read/write */
+#define QM_REG_QM_PRTY_MASK 0x168454
+/* [R 12] Parity register #0 read */
+#define QM_REG_QM_PRTY_STS 0x168448
+/* [RC 12] Parity register #0 read clear */
+#define QM_REG_QM_PRTY_STS_CLR 0x16844c
+/* [R 32] Current queues in pipeline: Queues from 32 to 63 */
+#define QM_REG_QSTATUS_HIGH 0x16802c
+/* [R 32] Current queues in pipeline: Queues from 96 to 127 */
+#define QM_REG_QSTATUS_HIGH_EXT_A 0x16e408
+/* [R 32] Current queues in pipeline: Queues from 0 to 31 */
+#define QM_REG_QSTATUS_LOW 0x168028
+/* [R 32] Current queues in pipeline: Queues from 64 to 95 */
+#define QM_REG_QSTATUS_LOW_EXT_A 0x16e404
+/* [R 24] The number of tasks queued for each queue; queues 63-0 */
+#define QM_REG_QTASKCTR_0 0x168308
+/* [R 24] The number of tasks queued for each queue; queues 127-64 */
+#define QM_REG_QTASKCTR_EXT_A_0 0x16e584
+/* [RW 4] Queue tied to VOQ */
+#define QM_REG_QVOQIDX_0 0x1680f4
+#define QM_REG_QVOQIDX_10 0x16811c
+#define QM_REG_QVOQIDX_100 0x16e49c
+#define QM_REG_QVOQIDX_101 0x16e4a0
+#define QM_REG_QVOQIDX_102 0x16e4a4
+#define QM_REG_QVOQIDX_103 0x16e4a8
+#define QM_REG_QVOQIDX_104 0x16e4ac
+#define QM_REG_QVOQIDX_105 0x16e4b0
+#define QM_REG_QVOQIDX_106 0x16e4b4
+#define QM_REG_QVOQIDX_107 0x16e4b8
+#define QM_REG_QVOQIDX_108 0x16e4bc
+#define QM_REG_QVOQIDX_109 0x16e4c0
+#define QM_REG_QVOQIDX_11 0x168120
+#define QM_REG_QVOQIDX_110 0x16e4c4
+#define QM_REG_QVOQIDX_111 0x16e4c8
+#define QM_REG_QVOQIDX_112 0x16e4cc
+#define QM_REG_QVOQIDX_113 0x16e4d0
+#define QM_REG_QVOQIDX_114 0x16e4d4
+#define QM_REG_QVOQIDX_115 0x16e4d8
+#define QM_REG_QVOQIDX_116 0x16e4dc
+#define QM_REG_QVOQIDX_117 0x16e4e0
+#define QM_REG_QVOQIDX_118 0x16e4e4
+#define QM_REG_QVOQIDX_119 0x16e4e8
+#define QM_REG_QVOQIDX_12 0x168124
+#define QM_REG_QVOQIDX_120 0x16e4ec
+#define QM_REG_QVOQIDX_121 0x16e4f0
+#define QM_REG_QVOQIDX_122 0x16e4f4
+#define QM_REG_QVOQIDX_123 0x16e4f8
+#define QM_REG_QVOQIDX_124 0x16e4fc
+#define QM_REG_QVOQIDX_125 0x16e500
+#define QM_REG_QVOQIDX_126 0x16e504
+#define QM_REG_QVOQIDX_127 0x16e508
+#define QM_REG_QVOQIDX_13 0x168128
+#define QM_REG_QVOQIDX_14 0x16812c
+#define QM_REG_QVOQIDX_15 0x168130
+#define QM_REG_QVOQIDX_16 0x168134
+#define QM_REG_QVOQIDX_17 0x168138
+#define QM_REG_QVOQIDX_21 0x168148
+#define QM_REG_QVOQIDX_22 0x16814c
+#define QM_REG_QVOQIDX_23 0x168150
+#define QM_REG_QVOQIDX_24 0x168154
+#define QM_REG_QVOQIDX_25 0x168158
+#define QM_REG_QVOQIDX_26 0x16815c
+#define QM_REG_QVOQIDX_27 0x168160
+#define QM_REG_QVOQIDX_28 0x168164
+#define QM_REG_QVOQIDX_29 0x168168
+#define QM_REG_QVOQIDX_30 0x16816c
+#define QM_REG_QVOQIDX_31 0x168170
+#define QM_REG_QVOQIDX_32 0x168174
+#define QM_REG_QVOQIDX_33 0x168178
+#define QM_REG_QVOQIDX_34 0x16817c
+#define QM_REG_QVOQIDX_35 0x168180
+#define QM_REG_QVOQIDX_36 0x168184
+#define QM_REG_QVOQIDX_37 0x168188
+#define QM_REG_QVOQIDX_38 0x16818c
+#define QM_REG_QVOQIDX_39 0x168190
+#define QM_REG_QVOQIDX_40 0x168194
+#define QM_REG_QVOQIDX_41 0x168198
+#define QM_REG_QVOQIDX_42 0x16819c
+#define QM_REG_QVOQIDX_43 0x1681a0
+#define QM_REG_QVOQIDX_44 0x1681a4
+#define QM_REG_QVOQIDX_45 0x1681a8
+#define QM_REG_QVOQIDX_46 0x1681ac
+#define QM_REG_QVOQIDX_47 0x1681b0
+#define QM_REG_QVOQIDX_48 0x1681b4
+#define QM_REG_QVOQIDX_49 0x1681b8
+#define QM_REG_QVOQIDX_5 0x168108
+#define QM_REG_QVOQIDX_50 0x1681bc
+#define QM_REG_QVOQIDX_51 0x1681c0
+#define QM_REG_QVOQIDX_52 0x1681c4
+#define QM_REG_QVOQIDX_53 0x1681c8
+#define QM_REG_QVOQIDX_54 0x1681cc
+#define QM_REG_QVOQIDX_55 0x1681d0
+#define QM_REG_QVOQIDX_56 0x1681d4
+#define QM_REG_QVOQIDX_57 0x1681d8
+#define QM_REG_QVOQIDX_58 0x1681dc
+#define QM_REG_QVOQIDX_59 0x1681e0
+#define QM_REG_QVOQIDX_6 0x16810c
+#define QM_REG_QVOQIDX_60 0x1681e4
+#define QM_REG_QVOQIDX_61 0x1681e8
+#define QM_REG_QVOQIDX_62 0x1681ec
+#define QM_REG_QVOQIDX_63 0x1681f0
+#define QM_REG_QVOQIDX_64 0x16e40c
+#define QM_REG_QVOQIDX_65 0x16e410
+#define QM_REG_QVOQIDX_69 0x16e420
+#define QM_REG_QVOQIDX_7 0x168110
+#define QM_REG_QVOQIDX_70 0x16e424
+#define QM_REG_QVOQIDX_71 0x16e428
+#define QM_REG_QVOQIDX_72 0x16e42c
+#define QM_REG_QVOQIDX_73 0x16e430
+#define QM_REG_QVOQIDX_74 0x16e434
+#define QM_REG_QVOQIDX_75 0x16e438
+#define QM_REG_QVOQIDX_76 0x16e43c
+#define QM_REG_QVOQIDX_77 0x16e440
+#define QM_REG_QVOQIDX_78 0x16e444
+#define QM_REG_QVOQIDX_79 0x16e448
+#define QM_REG_QVOQIDX_8 0x168114
+#define QM_REG_QVOQIDX_80 0x16e44c
+#define QM_REG_QVOQIDX_81 0x16e450
+#define QM_REG_QVOQIDX_85 0x16e460
+#define QM_REG_QVOQIDX_86 0x16e464
+#define QM_REG_QVOQIDX_87 0x16e468
+#define QM_REG_QVOQIDX_88 0x16e46c
+#define QM_REG_QVOQIDX_89 0x16e470
+#define QM_REG_QVOQIDX_9 0x168118
+#define QM_REG_QVOQIDX_90 0x16e474
+#define QM_REG_QVOQIDX_91 0x16e478
+#define QM_REG_QVOQIDX_92 0x16e47c
+#define QM_REG_QVOQIDX_93 0x16e480
+#define QM_REG_QVOQIDX_94 0x16e484
+#define QM_REG_QVOQIDX_95 0x16e488
+#define QM_REG_QVOQIDX_96 0x16e48c
+#define QM_REG_QVOQIDX_97 0x16e490
+#define QM_REG_QVOQIDX_98 0x16e494
+#define QM_REG_QVOQIDX_99 0x16e498
+/* [RW 1] Initialization bit command */
+#define QM_REG_SOFT_RESET 0x168428
+/* [RW 8] The credit cost per every task in the QM. A value per each VOQ */
+#define QM_REG_TASKCRDCOST_0 0x16809c
+#define QM_REG_TASKCRDCOST_1 0x1680a0
+#define QM_REG_TASKCRDCOST_2 0x1680a4
+#define QM_REG_TASKCRDCOST_4 0x1680ac
+#define QM_REG_TASKCRDCOST_5 0x1680b0
+/* [R 6] Keep the fill level of the fifo from write client 3 */
+#define QM_REG_TQM_WRC_FIFOLVL 0x168010
+/* [R 6] Keep the fill level of the fifo from write client 2 */
+#define QM_REG_UQM_WRC_FIFOLVL 0x168008
+/* [RC 32] Credit update error register */
+#define QM_REG_VOQCRDERRREG 0x168408
+/* [R 16] The credit value for each VOQ */
+#define QM_REG_VOQCREDIT_0 0x1682d0
+#define QM_REG_VOQCREDIT_1 0x1682d4
+#define QM_REG_VOQCREDIT_4 0x1682e0
+/* [RW 16] The credit value that if above the QM is considered almost full */
+#define QM_REG_VOQCREDITAFULLTHR 0x168090
+/* [RW 16] The init and maximum credit for each VoQ */
+#define QM_REG_VOQINITCREDIT_0 0x168060
+#define QM_REG_VOQINITCREDIT_1 0x168064
+#define QM_REG_VOQINITCREDIT_2 0x168068
+#define QM_REG_VOQINITCREDIT_4 0x168070
+#define QM_REG_VOQINITCREDIT_5 0x168074
+/* [RW 1] The port of which VOQ belongs */
+#define QM_REG_VOQPORT_0 0x1682a0
+#define QM_REG_VOQPORT_1 0x1682a4
+#define QM_REG_VOQPORT_2 0x1682a8
+/* [RW 32] The physical queue number associated with each VOQ; queues 31-0 */
+#define QM_REG_VOQQMASK_0_LSB 0x168240
+/* [RW 32] The physical queue number associated with each VOQ; queues 95-64 */
+#define QM_REG_VOQQMASK_0_LSB_EXT_A 0x16e524
+/* [RW 32] The physical queue number associated with each VOQ; queues 63-32 */
+#define QM_REG_VOQQMASK_0_MSB 0x168244
+/* [RW 32] The physical queue number associated with each VOQ; queues 127-96 */
+#define QM_REG_VOQQMASK_0_MSB_EXT_A 0x16e528
+/* [RW 32] The physical queue number associated with each VOQ; queues 31-0 */
+#define QM_REG_VOQQMASK_10_LSB 0x168290
+/* [RW 32] The physical queue number associated with each VOQ; queues 95-64 */
+#define QM_REG_VOQQMASK_10_LSB_EXT_A 0x16e574
+/* [RW 32] The physical queue number associated with each VOQ; queues 63-32 */
+#define QM_REG_VOQQMASK_10_MSB 0x168294
+/* [RW 32] The physical queue number associated with each VOQ; queues 127-96 */
+#define QM_REG_VOQQMASK_10_MSB_EXT_A 0x16e578
+/* [RW 32] The physical queue number associated with each VOQ; queues 31-0 */
+#define QM_REG_VOQQMASK_11_LSB 0x168298
+/* [RW 32] The physical queue number associated with each VOQ; queues 95-64 */
+#define QM_REG_VOQQMASK_11_LSB_EXT_A 0x16e57c
+/* [RW 32] The physical queue number associated with each VOQ; queues 63-32 */
+#define QM_REG_VOQQMASK_11_MSB 0x16829c
+/* [RW 32] The physical queue number associated with each VOQ; queues 127-96 */
+#define QM_REG_VOQQMASK_11_MSB_EXT_A 0x16e580
+/* [RW 32] The physical queue number associated with each VOQ; queues 31-0 */
+#define QM_REG_VOQQMASK_1_LSB 0x168248
+/* [RW 32] The physical queue number associated with each VOQ; queues 95-64 */
+#define QM_REG_VOQQMASK_1_LSB_EXT_A 0x16e52c
+/* [RW 32] The physical queue number associated with each VOQ; queues 63-32 */
+#define QM_REG_VOQQMASK_1_MSB 0x16824c
+/* [RW 32] The physical queue number associated with each VOQ; queues 127-96 */
+#define QM_REG_VOQQMASK_1_MSB_EXT_A 0x16e530
+/* [RW 32] The physical queue number associated with each VOQ; queues 31-0 */
+#define QM_REG_VOQQMASK_2_LSB 0x168250
+/* [RW 32] The physical queue number associated with each VOQ; queues 95-64 */
+#define QM_REG_VOQQMASK_2_LSB_EXT_A 0x16e534
+/* [RW 32] The physical queue number associated with each VOQ; queues 63-32 */
+#define QM_REG_VOQQMASK_2_MSB 0x168254
+/* [RW 32] The physical queue number associated with each VOQ; queues 127-96 */
+#define QM_REG_VOQQMASK_2_MSB_EXT_A 0x16e538
+/* [RW 32] The physical queue number associated with each VOQ; queues 31-0 */
+#define QM_REG_VOQQMASK_3_LSB 0x168258
+/* [RW 32] The physical queue number associated with each VOQ; queues 95-64 */
+#define QM_REG_VOQQMASK_3_LSB_EXT_A 0x16e53c
+/* [RW 32] The physical queue number associated with each VOQ; queues 127-96 */
+#define QM_REG_VOQQMASK_3_MSB_EXT_A 0x16e540
+/* [RW 32] The physical queue number associated with each VOQ; queues 31-0 */
+#define QM_REG_VOQQMASK_4_LSB 0x168260
+/* [RW 32] The physical queue number associated with each VOQ; queues 95-64 */
+#define QM_REG_VOQQMASK_4_LSB_EXT_A 0x16e544
+/* [RW 32] The physical queue number associated with each VOQ; queues 63-32 */
+#define QM_REG_VOQQMASK_4_MSB 0x168264
+/* [RW 32] The physical queue number associated with each VOQ; queues 127-96 */
+#define QM_REG_VOQQMASK_4_MSB_EXT_A 0x16e548
+/* [RW 32] The physical queue number associated with each VOQ; queues 31-0 */
+#define QM_REG_VOQQMASK_5_LSB 0x168268
+/* [RW 32] The physical queue number associated with each VOQ; queues 95-64 */
+#define QM_REG_VOQQMASK_5_LSB_EXT_A 0x16e54c
+/* [RW 32] The physical queue number associated with each VOQ; queues 63-32 */
+#define QM_REG_VOQQMASK_5_MSB 0x16826c
+/* [RW 32] The physical queue number associated with each VOQ; queues 127-96 */
+#define QM_REG_VOQQMASK_5_MSB_EXT_A 0x16e550
+/* [RW 32] The physical queue number associated with each VOQ; queues 31-0 */
+#define QM_REG_VOQQMASK_6_LSB 0x168270
+/* [RW 32] The physical queue number associated with each VOQ; queues 95-64 */
+#define QM_REG_VOQQMASK_6_LSB_EXT_A 0x16e554
+/* [RW 32] The physical queue number associated with each VOQ; queues 63-32 */
+#define QM_REG_VOQQMASK_6_MSB 0x168274
+/* [RW 32] The physical queue number associated with each VOQ; queues 127-96 */
+#define QM_REG_VOQQMASK_6_MSB_EXT_A 0x16e558
+/* [RW 32] The physical queue number associated with each VOQ; queues 31-0 */
+#define QM_REG_VOQQMASK_7_LSB 0x168278
+/* [RW 32] The physical queue number associated with each VOQ; queues 95-64 */
+#define QM_REG_VOQQMASK_7_LSB_EXT_A 0x16e55c
+/* [RW 32] The physical queue number associated with each VOQ; queues 63-32 */
+#define QM_REG_VOQQMASK_7_MSB 0x16827c
+/* [RW 32] The physical queue number associated with each VOQ; queues 127-96 */
+#define QM_REG_VOQQMASK_7_MSB_EXT_A 0x16e560
+/* [RW 32] The physical queue number associated with each VOQ; queues 31-0 */
+#define QM_REG_VOQQMASK_8_LSB 0x168280
+/* [RW 32] The physical queue number associated with each VOQ; queues 95-64 */
+#define QM_REG_VOQQMASK_8_LSB_EXT_A 0x16e564
+/* [RW 32] The physical queue number associated with each VOQ; queues 63-32 */
+#define QM_REG_VOQQMASK_8_MSB 0x168284
+/* [RW 32] The physical queue number associated with each VOQ; queues 127-96 */
+#define QM_REG_VOQQMASK_8_MSB_EXT_A 0x16e568
+/* [RW 32] The physical queue number associated with each VOQ; queues 31-0 */
+#define QM_REG_VOQQMASK_9_LSB 0x168288
+/* [RW 32] The physical queue number associated with each VOQ; queues 95-64 */
+#define QM_REG_VOQQMASK_9_LSB_EXT_A 0x16e56c
+/* [RW 32] The physical queue number associated with each VOQ; queues 127-96 */
+#define QM_REG_VOQQMASK_9_MSB_EXT_A 0x16e570
+/* [RW 32] Wrr weights */
+#define QM_REG_WRRWEIGHTS_0 0x16880c
+#define QM_REG_WRRWEIGHTS_1 0x168810
+#define QM_REG_WRRWEIGHTS_10 0x168814
+#define QM_REG_WRRWEIGHTS_11 0x168818
+#define QM_REG_WRRWEIGHTS_12 0x16881c
+#define QM_REG_WRRWEIGHTS_13 0x168820
+#define QM_REG_WRRWEIGHTS_14 0x168824
+#define QM_REG_WRRWEIGHTS_15 0x168828
+#define QM_REG_WRRWEIGHTS_16 0x16e000
+#define QM_REG_WRRWEIGHTS_17 0x16e004
+#define QM_REG_WRRWEIGHTS_18 0x16e008
+#define QM_REG_WRRWEIGHTS_19 0x16e00c
+#define QM_REG_WRRWEIGHTS_2 0x16882c
+#define QM_REG_WRRWEIGHTS_20 0x16e010
+#define QM_REG_WRRWEIGHTS_21 0x16e014
+#define QM_REG_WRRWEIGHTS_22 0x16e018
+#define QM_REG_WRRWEIGHTS_23 0x16e01c
+#define QM_REG_WRRWEIGHTS_24 0x16e020
+#define QM_REG_WRRWEIGHTS_25 0x16e024
+#define QM_REG_WRRWEIGHTS_26 0x16e028
+#define QM_REG_WRRWEIGHTS_27 0x16e02c
+#define QM_REG_WRRWEIGHTS_28 0x16e030
+#define QM_REG_WRRWEIGHTS_29 0x16e034
+#define QM_REG_WRRWEIGHTS_3 0x168830
+#define QM_REG_WRRWEIGHTS_30 0x16e038
+#define QM_REG_WRRWEIGHTS_31 0x16e03c
+#define QM_REG_WRRWEIGHTS_4 0x168834
+#define QM_REG_WRRWEIGHTS_5 0x168838
+#define QM_REG_WRRWEIGHTS_6 0x16883c
+#define QM_REG_WRRWEIGHTS_7 0x168840
+#define QM_REG_WRRWEIGHTS_8 0x168844
+#define QM_REG_WRRWEIGHTS_9 0x168848
+/* [R 6] Keep the fill level of the fifo from write client 1 */
+#define QM_REG_XQM_WRC_FIFOLVL 0x168000
+/* [W 1] reset to parity interrupt */
+#define SEM_FAST_REG_PARITY_RST 0x18840
+#define SRC_REG_COUNTFREE0 0x40500
+/* [RW 1] If clr the searcher is compatible to E1 A0 - support only two
+ ports. If set the searcher support 8 functions. */
+#define SRC_REG_E1HMF_ENABLE 0x404cc
+#define SRC_REG_FIRSTFREE0 0x40510
+#define SRC_REG_KEYRSS0_0 0x40408
+#define SRC_REG_KEYRSS0_7 0x40424
+#define SRC_REG_KEYRSS1_9 0x40454
+#define SRC_REG_KEYSEARCH_0 0x40458
+#define SRC_REG_KEYSEARCH_1 0x4045c
+#define SRC_REG_KEYSEARCH_2 0x40460
+#define SRC_REG_KEYSEARCH_3 0x40464
+#define SRC_REG_KEYSEARCH_4 0x40468
+#define SRC_REG_KEYSEARCH_5 0x4046c
+#define SRC_REG_KEYSEARCH_6 0x40470
+#define SRC_REG_KEYSEARCH_7 0x40474
+#define SRC_REG_KEYSEARCH_8 0x40478
+#define SRC_REG_KEYSEARCH_9 0x4047c
+#define SRC_REG_LASTFREE0 0x40530
+#define SRC_REG_NUMBER_HASH_BITS0 0x40400
+/* [RW 1] Reset internal state machines. */
+#define SRC_REG_SOFT_RST 0x4049c
+/* [R 3] Interrupt register #0 read */
+#define SRC_REG_SRC_INT_STS 0x404ac
+/* [RW 3] Parity mask register #0 read/write */
+#define SRC_REG_SRC_PRTY_MASK 0x404c8
+/* [R 3] Parity register #0 read */
+#define SRC_REG_SRC_PRTY_STS 0x404bc
+/* [RC 3] Parity register #0 read clear */
+#define SRC_REG_SRC_PRTY_STS_CLR 0x404c0
+/* [R 4] Used to read the value of the XX protection CAM occupancy counter. */
+#define TCM_REG_CAM_OCCUP 0x5017c
+/* [RW 1] CDU AG read Interface enable. If 0 - the request input is
+ disregarded; valid output is deasserted; all other signals are treated as
+ usual; if 1 - normal activity. */
+#define TCM_REG_CDU_AG_RD_IFEN 0x50034
+/* [RW 1] CDU AG write Interface enable. If 0 - the request and valid input
+ are disregarded; all other signals are treated as usual; if 1 - normal
+ activity. */
+#define TCM_REG_CDU_AG_WR_IFEN 0x50030
+/* [RW 1] CDU STORM read Interface enable. If 0 - the request input is
+ disregarded; valid output is deasserted; all other signals are treated as
+ usual; if 1 - normal activity. */
+#define TCM_REG_CDU_SM_RD_IFEN 0x5003c
+/* [RW 1] CDU STORM write Interface enable. If 0 - the request and valid
+ input is disregarded; all other signals are treated as usual; if 1 -
+ normal activity. */
+#define TCM_REG_CDU_SM_WR_IFEN 0x50038
+/* [RW 4] CFC output initial credit. Max credit available - 15.Write writes
+ the initial credit value; read returns the current value of the credit
+ counter. Must be initialized to 1 at start-up. */
+#define TCM_REG_CFC_INIT_CRD 0x50204
+/* [RW 3] The weight of the CP input in the WRR mechanism. 0 stands for
+ weight 8 (the most prioritised); 1 stands for weight 1(least
+ prioritised); 2 stands for weight 2; tc. */
+#define TCM_REG_CP_WEIGHT 0x500c0
+/* [RW 1] Input csem Interface enable. If 0 - the valid input is
+ disregarded; acknowledge output is deasserted; all other signals are
+ treated as usual; if 1 - normal activity. */
+#define TCM_REG_CSEM_IFEN 0x5002c
+/* [RC 1] Message length mismatch (relative to last indication) at the In#9
+ interface. */
+#define TCM_REG_CSEM_LENGTH_MIS 0x50174
+/* [RW 3] The weight of the input csem in the WRR mechanism. 0 stands for
+ weight 8 (the most prioritised); 1 stands for weight 1(least
+ prioritised); 2 stands for weight 2; tc. */
+#define TCM_REG_CSEM_WEIGHT 0x500bc
+/* [RW 8] The Event ID in case of ErrorFlg is set in the input message. */
+#define TCM_REG_ERR_EVNT_ID 0x500a0
+/* [RW 28] The CM erroneous header for QM and Timers formatting. */
+#define TCM_REG_ERR_TCM_HDR 0x5009c
+/* [RW 8] The Event ID for Timers expiration. */
+#define TCM_REG_EXPR_EVNT_ID 0x500a4
+/* [RW 8] FIC0 output initial credit. Max credit available - 255.Write
+ writes the initial credit value; read returns the current value of the
+ credit counter. Must be initialized to 64 at start-up. */
+#define TCM_REG_FIC0_INIT_CRD 0x5020c
+/* [RW 8] FIC1 output initial credit. Max credit available - 255.Write
+ writes the initial credit value; read returns the current value of the
+ credit counter. Must be initialized to 64 at start-up. */
+#define TCM_REG_FIC1_INIT_CRD 0x50210
+/* [RW 1] Arbitration between Input Arbiter groups: 0 - fair Round-Robin; 1
+ - strict priority defined by ~tcm_registers_gr_ag_pr.gr_ag_pr;
+ ~tcm_registers_gr_ld0_pr.gr_ld0_pr and
+ ~tcm_registers_gr_ld1_pr.gr_ld1_pr. */
+#define TCM_REG_GR_ARB_TYPE 0x50114
+/* [RW 2] Load (FIC0) channel group priority. The lowest priority is 0; the
+ highest priority is 3. It is supposed that the Store channel is the
+ complement of the other 3 groups. */
+#define TCM_REG_GR_LD0_PR 0x5011c
+/* [RW 2] Load (FIC1) channel group priority. The lowest priority is 0; the
+ highest priority is 3. It is supposed that the Store channel is the
+ complement of the other 3 groups. */
+#define TCM_REG_GR_LD1_PR 0x50120
+/* [RW 4] The number of double REG-pairs; loaded from the STORM context and
+ sent to STORM; for a specific connection type. The double REG-pairs are
+ used to align to STORM context row size of 128 bits. The offset of these
+ data in the STORM context is always 0. Index _i stands for the connection
+ type (one of 16). */
+#define TCM_REG_N_SM_CTX_LD_0 0x50050
+#define TCM_REG_N_SM_CTX_LD_1 0x50054
+#define TCM_REG_N_SM_CTX_LD_2 0x50058
+#define TCM_REG_N_SM_CTX_LD_3 0x5005c
+#define TCM_REG_N_SM_CTX_LD_4 0x50060
+#define TCM_REG_N_SM_CTX_LD_5 0x50064
+/* [RW 1] Input pbf Interface enable. If 0 - the valid input is disregarded;
+ acknowledge output is deasserted; all other signals are treated as usual;
+ if 1 - normal activity. */
+#define TCM_REG_PBF_IFEN 0x50024
+/* [RC 1] Message length mismatch (relative to last indication) at the In#7
+ interface. */
+#define TCM_REG_PBF_LENGTH_MIS 0x5016c
+/* [RW 3] The weight of the input pbf in the WRR mechanism. 0 stands for
+ weight 8 (the most prioritised); 1 stands for weight 1(least
+ prioritised); 2 stands for weight 2; tc. */
+#define TCM_REG_PBF_WEIGHT 0x500b4
+#define TCM_REG_PHYS_QNUM0_0 0x500e0
+#define TCM_REG_PHYS_QNUM0_1 0x500e4
+#define TCM_REG_PHYS_QNUM1_0 0x500e8
+#define TCM_REG_PHYS_QNUM1_1 0x500ec
+#define TCM_REG_PHYS_QNUM2_0 0x500f0
+#define TCM_REG_PHYS_QNUM2_1 0x500f4
+#define TCM_REG_PHYS_QNUM3_0 0x500f8
+#define TCM_REG_PHYS_QNUM3_1 0x500fc
+/* [RW 1] Input prs Interface enable. If 0 - the valid input is disregarded;
+ acknowledge output is deasserted; all other signals are treated as usual;
+ if 1 - normal activity. */
+#define TCM_REG_PRS_IFEN 0x50020
+/* [RC 1] Message length mismatch (relative to last indication) at the In#6
+ interface. */
+#define TCM_REG_PRS_LENGTH_MIS 0x50168
+/* [RW 3] The weight of the input prs in the WRR mechanism. 0 stands for
+ weight 8 (the most prioritised); 1 stands for weight 1(least
+ prioritised); 2 stands for weight 2; tc. */
+#define TCM_REG_PRS_WEIGHT 0x500b0
+/* [RW 8] The Event ID for Timers formatting in case of stop done. */
+#define TCM_REG_STOP_EVNT_ID 0x500a8
+/* [RC 1] Message length mismatch (relative to last indication) at the STORM
+ interface. */
+#define TCM_REG_STORM_LENGTH_MIS 0x50160
+/* [RW 1] STORM - CM Interface enable. If 0 - the valid input is
+ disregarded; acknowledge output is deasserted; all other signals are
+ treated as usual; if 1 - normal activity. */
+#define TCM_REG_STORM_TCM_IFEN 0x50010
+/* [RW 3] The weight of the STORM input in the WRR mechanism. 0 stands for
+ weight 8 (the most prioritised); 1 stands for weight 1(least
+ prioritised); 2 stands for weight 2; tc. */
+#define TCM_REG_STORM_WEIGHT 0x500ac
+/* [RW 1] CM - CFC Interface enable. If 0 - the valid input is disregarded;
+ acknowledge output is deasserted; all other signals are treated as usual;
+ if 1 - normal activity. */
+#define TCM_REG_TCM_CFC_IFEN 0x50040
+/* [RW 11] Interrupt mask register #0 read/write */
+#define TCM_REG_TCM_INT_MASK 0x501dc
+/* [R 11] Interrupt register #0 read */
+#define TCM_REG_TCM_INT_STS 0x501d0
+/* [RW 27] Parity mask register #0 read/write */
+#define TCM_REG_TCM_PRTY_MASK 0x501ec
+/* [R 27] Parity register #0 read */
+#define TCM_REG_TCM_PRTY_STS 0x501e0
+/* [RC 27] Parity register #0 read clear */
+#define TCM_REG_TCM_PRTY_STS_CLR 0x501e4
+/* [RW 3] The size of AG context region 0 in REG-pairs. Designates the MS
+ REG-pair number (e.g. if region 0 is 6 REG-pairs; the value should be 5).
+ Is used to determine the number of the AG context REG-pairs written back;
+ when the input message Reg1WbFlg isn't set. */
+#define TCM_REG_TCM_REG0_SZ 0x500d8
+/* [RW 1] CM - STORM 0 Interface enable. If 0 - the acknowledge input is
+ disregarded; valid is deasserted; all other signals are treated as usual;
+ if 1 - normal activity. */
+#define TCM_REG_TCM_STORM0_IFEN 0x50004
+/* [RW 1] CM - STORM 1 Interface enable. If 0 - the acknowledge input is
+ disregarded; valid is deasserted; all other signals are treated as usual;
+ if 1 - normal activity. */
+#define TCM_REG_TCM_STORM1_IFEN 0x50008
+/* [RW 1] CM - QM Interface enable. If 0 - the acknowledge input is
+ disregarded; valid is deasserted; all other signals are treated as usual;
+ if 1 - normal activity. */
+#define TCM_REG_TCM_TQM_IFEN 0x5000c
+/* [RW 1] If set the Q index; received from the QM is inserted to event ID. */
+#define TCM_REG_TCM_TQM_USE_Q 0x500d4
+/* [RW 28] The CM header for Timers expiration command. */
+#define TCM_REG_TM_TCM_HDR 0x50098
+/* [RW 1] Timers - CM Interface enable. If 0 - the valid input is
+ disregarded; acknowledge output is deasserted; all other signals are
+ treated as usual; if 1 - normal activity. */
+#define TCM_REG_TM_TCM_IFEN 0x5001c
+/* [RW 3] The weight of the Timers input in the WRR mechanism. 0 stands for
+ weight 8 (the most prioritised); 1 stands for weight 1(least
+ prioritised); 2 stands for weight 2; tc. */
+#define TCM_REG_TM_WEIGHT 0x500d0
+/* [RW 6] QM output initial credit. Max credit available - 32.Write writes
+ the initial credit value; read returns the current value of the credit
+ counter. Must be initialized to 32 at start-up. */
+#define TCM_REG_TQM_INIT_CRD 0x5021c
+/* [RW 3] The weight of the QM (primary) input in the WRR mechanism. 0
+ stands for weight 8 (the most prioritised); 1 stands for weight 1(least
+ prioritised); 2 stands for weight 2; tc. */
+#define TCM_REG_TQM_P_WEIGHT 0x500c8
+/* [RW 3] The weight of the QM (secondary) input in the WRR mechanism. 0
+ stands for weight 8 (the most prioritised); 1 stands for weight 1(least
+ prioritised); 2 stands for weight 2; tc. */
+#define TCM_REG_TQM_S_WEIGHT 0x500cc
+/* [RW 28] The CM header value for QM request (primary). */
+#define TCM_REG_TQM_TCM_HDR_P 0x50090
+/* [RW 28] The CM header value for QM request (secondary). */
+#define TCM_REG_TQM_TCM_HDR_S 0x50094
+/* [RW 1] QM - CM Interface enable. If 0 - the valid input is disregarded;
+ acknowledge output is deasserted; all other signals are treated as usual;
+ if 1 - normal activity. */
+#define TCM_REG_TQM_TCM_IFEN 0x50014
+/* [RW 1] Input SDM Interface enable. If 0 - the valid input is disregarded;
+ acknowledge output is deasserted; all other signals are treated as usual;
+ if 1 - normal activity. */
+#define TCM_REG_TSDM_IFEN 0x50018
+/* [RC 1] Message length mismatch (relative to last indication) at the SDM
+ interface. */
+#define TCM_REG_TSDM_LENGTH_MIS 0x50164
+/* [RW 3] The weight of the SDM input in the WRR mechanism. 0 stands for
+ weight 8 (the most prioritised); 1 stands for weight 1(least
+ prioritised); 2 stands for weight 2; tc. */
+#define TCM_REG_TSDM_WEIGHT 0x500c4
+/* [RW 1] Input usem Interface enable. If 0 - the valid input is
+ disregarded; acknowledge output is deasserted; all other signals are
+ treated as usual; if 1 - normal activity. */
+#define TCM_REG_USEM_IFEN 0x50028
+/* [RC 1] Message length mismatch (relative to last indication) at the In#8
+ interface. */
+#define TCM_REG_USEM_LENGTH_MIS 0x50170
+/* [RW 3] The weight of the input usem in the WRR mechanism. 0 stands for
+ weight 8 (the most prioritised); 1 stands for weight 1(least
+ prioritised); 2 stands for weight 2; tc. */
+#define TCM_REG_USEM_WEIGHT 0x500b8
+/* [RW 21] Indirect access to the descriptor table of the XX protection
+ mechanism. The fields are: [5:0] - length of the message; 15:6] - message
+ pointer; 20:16] - next pointer. */
+#define TCM_REG_XX_DESCR_TABLE 0x50280
+#define TCM_REG_XX_DESCR_TABLE_SIZE 29
+/* [R 6] Use to read the value of XX protection Free counter. */
+#define TCM_REG_XX_FREE 0x50178
+/* [RW 6] Initial value for the credit counter; responsible for fulfilling
+ of the Input Stage XX protection buffer by the XX protection pending
+ messages. Max credit available - 127.Write writes the initial credit
+ value; read returns the current value of the credit counter. Must be
+ initialized to 19 at start-up. */
+#define TCM_REG_XX_INIT_CRD 0x50220
+/* [RW 6] Maximum link list size (messages locked) per connection in the XX
+ protection. */
+#define TCM_REG_XX_MAX_LL_SZ 0x50044
+/* [RW 6] The maximum number of pending messages; which may be stored in XX
+ protection. ~tcm_registers_xx_free.xx_free is read on read. */
+#define TCM_REG_XX_MSG_NUM 0x50224
+/* [RW 8] The Event ID; sent to the STORM in case of XX overflow. */
+#define TCM_REG_XX_OVFL_EVNT_ID 0x50048
+/* [RW 16] Indirect access to the XX table of the XX protection mechanism.
+ The fields are:[4:0] - tail pointer; [10:5] - Link List size; 15:11] -
+ header pointer. */
+#define TCM_REG_XX_TABLE 0x50240
+/* [RW 4] Load value for cfc ac credit cnt. */
+#define TM_REG_CFC_AC_CRDCNT_VAL 0x164208
+/* [RW 4] Load value for cfc cld credit cnt. */
+#define TM_REG_CFC_CLD_CRDCNT_VAL 0x164210
+/* [RW 8] Client0 context region. */
+#define TM_REG_CL0_CONT_REGION 0x164030
+/* [RW 8] Client1 context region. */
+#define TM_REG_CL1_CONT_REGION 0x164034
+/* [RW 8] Client2 context region. */
+#define TM_REG_CL2_CONT_REGION 0x164038
+/* [RW 2] Client in High priority client number. */
+#define TM_REG_CLIN_PRIOR0_CLIENT 0x164024
+/* [RW 4] Load value for clout0 cred cnt. */
+#define TM_REG_CLOUT_CRDCNT0_VAL 0x164220
+/* [RW 4] Load value for clout1 cred cnt. */
+#define TM_REG_CLOUT_CRDCNT1_VAL 0x164228
+/* [RW 4] Load value for clout2 cred cnt. */
+#define TM_REG_CLOUT_CRDCNT2_VAL 0x164230
+/* [RW 1] Enable client0 input. */
+#define TM_REG_EN_CL0_INPUT 0x164008
+/* [RW 1] Enable client1 input. */
+#define TM_REG_EN_CL1_INPUT 0x16400c
+/* [RW 1] Enable client2 input. */
+#define TM_REG_EN_CL2_INPUT 0x164010
+#define TM_REG_EN_LINEAR0_TIMER 0x164014
+/* [RW 1] Enable real time counter. */
+#define TM_REG_EN_REAL_TIME_CNT 0x1640d8
+/* [RW 1] Enable for Timers state machines. */
+#define TM_REG_EN_TIMERS 0x164000
+/* [RW 4] Load value for expiration credit cnt. CFC max number of
+ outstanding load requests for timers (expiration) context loading. */
+#define TM_REG_EXP_CRDCNT_VAL 0x164238
+/* [RW 32] Linear0 logic address. */
+#define TM_REG_LIN0_LOGIC_ADDR 0x164240
+/* [RW 18] Linear0 Max active cid (in banks of 32 entries). */
+#define TM_REG_LIN0_MAX_ACTIVE_CID 0x164048
+/* [ST 16] Linear0 Number of scans counter. */
+#define TM_REG_LIN0_NUM_SCANS 0x1640a0
+/* [WB 64] Linear0 phy address. */
+#define TM_REG_LIN0_PHY_ADDR 0x164270
+/* [RW 1] Linear0 physical address valid. */
+#define TM_REG_LIN0_PHY_ADDR_VALID 0x164248
+#define TM_REG_LIN0_SCAN_ON 0x1640d0
+/* [RW 24] Linear0 array scan timeout. */
+#define TM_REG_LIN0_SCAN_TIME 0x16403c
+#define TM_REG_LIN0_VNIC_UC 0x164128
+/* [RW 32] Linear1 logic address. */
+#define TM_REG_LIN1_LOGIC_ADDR 0x164250
+/* [WB 64] Linear1 phy address. */
+#define TM_REG_LIN1_PHY_ADDR 0x164280
+/* [RW 1] Linear1 physical address valid. */
+#define TM_REG_LIN1_PHY_ADDR_VALID 0x164258
+/* [RW 6] Linear timer set_clear fifo threshold. */
+#define TM_REG_LIN_SETCLR_FIFO_ALFULL_THR 0x164070
+/* [RW 2] Load value for pci arbiter credit cnt. */
+#define TM_REG_PCIARB_CRDCNT_VAL 0x164260
+/* [RW 20] The amount of hardware cycles for each timer tick. */
+#define TM_REG_TIMER_TICK_SIZE 0x16401c
+/* [RW 8] Timers Context region. */
+#define TM_REG_TM_CONTEXT_REGION 0x164044
+/* [RW 1] Interrupt mask register #0 read/write */
+#define TM_REG_TM_INT_MASK 0x1640fc
+/* [R 1] Interrupt register #0 read */
+#define TM_REG_TM_INT_STS 0x1640f0
+/* [RW 7] Parity mask register #0 read/write */
+#define TM_REG_TM_PRTY_MASK 0x16410c
+/* [R 7] Parity register #0 read */
+#define TM_REG_TM_PRTY_STS 0x164100
+/* [RC 7] Parity register #0 read clear */
+#define TM_REG_TM_PRTY_STS_CLR 0x164104
+/* [RW 8] The event id for aggregated interrupt 0 */
+#define TSDM_REG_AGG_INT_EVENT_0 0x42038
+#define TSDM_REG_AGG_INT_EVENT_1 0x4203c
+#define TSDM_REG_AGG_INT_EVENT_2 0x42040
+#define TSDM_REG_AGG_INT_EVENT_3 0x42044
+#define TSDM_REG_AGG_INT_EVENT_4 0x42048
+/* [RW 1] The T bit for aggregated interrupt 0 */
+#define TSDM_REG_AGG_INT_T_0 0x420b8
+#define TSDM_REG_AGG_INT_T_1 0x420bc
+/* [RW 13] The start address in the internal RAM for the cfc_rsp lcid */
+#define TSDM_REG_CFC_RSP_START_ADDR 0x42008
+/* [RW 16] The maximum value of the completion counter #0 */
+#define TSDM_REG_CMP_COUNTER_MAX0 0x4201c
+/* [RW 16] The maximum value of the completion counter #1 */
+#define TSDM_REG_CMP_COUNTER_MAX1 0x42020
+/* [RW 16] The maximum value of the completion counter #2 */
+#define TSDM_REG_CMP_COUNTER_MAX2 0x42024
+/* [RW 16] The maximum value of the completion counter #3 */
+#define TSDM_REG_CMP_COUNTER_MAX3 0x42028
+/* [RW 13] The start address in the internal RAM for the completion
+ counters. */
+#define TSDM_REG_CMP_COUNTER_START_ADDR 0x4200c
+#define TSDM_REG_ENABLE_IN1 0x42238
+#define TSDM_REG_ENABLE_IN2 0x4223c
+#define TSDM_REG_ENABLE_OUT1 0x42240
+#define TSDM_REG_ENABLE_OUT2 0x42244
+/* [RW 4] The initial number of messages that can be sent to the pxp control
+ interface without receiving any ACK. */
+#define TSDM_REG_INIT_CREDIT_PXP_CTRL 0x424bc
+/* [ST 32] The number of ACK after placement messages received */
+#define TSDM_REG_NUM_OF_ACK_AFTER_PLACE 0x4227c
+/* [ST 32] The number of packet end messages received from the parser */
+#define TSDM_REG_NUM_OF_PKT_END_MSG 0x42274
+/* [ST 32] The number of requests received from the pxp async if */
+#define TSDM_REG_NUM_OF_PXP_ASYNC_REQ 0x42278
+/* [ST 32] The number of commands received in queue 0 */
+#define TSDM_REG_NUM_OF_Q0_CMD 0x42248
+/* [ST 32] The number of commands received in queue 10 */
+#define TSDM_REG_NUM_OF_Q10_CMD 0x4226c
+/* [ST 32] The number of commands received in queue 11 */
+#define TSDM_REG_NUM_OF_Q11_CMD 0x42270
+/* [ST 32] The number of commands received in queue 1 */
+#define TSDM_REG_NUM_OF_Q1_CMD 0x4224c
+/* [ST 32] The number of commands received in queue 3 */
+#define TSDM_REG_NUM_OF_Q3_CMD 0x42250
+/* [ST 32] The number of commands received in queue 4 */
+#define TSDM_REG_NUM_OF_Q4_CMD 0x42254
+/* [ST 32] The number of commands received in queue 5 */
+#define TSDM_REG_NUM_OF_Q5_CMD 0x42258
+/* [ST 32] The number of commands received in queue 6 */
+#define TSDM_REG_NUM_OF_Q6_CMD 0x4225c
+/* [ST 32] The number of commands received in queue 7 */
+#define TSDM_REG_NUM_OF_Q7_CMD 0x42260
+/* [ST 32] The number of commands received in queue 8 */
+#define TSDM_REG_NUM_OF_Q8_CMD 0x42264
+/* [ST 32] The number of commands received in queue 9 */
+#define TSDM_REG_NUM_OF_Q9_CMD 0x42268
+/* [RW 13] The start address in the internal RAM for the packet end message */
+#define TSDM_REG_PCK_END_MSG_START_ADDR 0x42014
+/* [RW 13] The start address in the internal RAM for queue counters */
+#define TSDM_REG_Q_COUNTER_START_ADDR 0x42010
+/* [R 1] pxp_ctrl rd_data fifo empty in sdm_dma_rsp block */
+#define TSDM_REG_RSP_PXP_CTRL_RDATA_EMPTY 0x42548
+/* [R 1] parser fifo empty in sdm_sync block */
+#define TSDM_REG_SYNC_PARSER_EMPTY 0x42550
+/* [R 1] parser serial fifo empty in sdm_sync block */
+#define TSDM_REG_SYNC_SYNC_EMPTY 0x42558
+/* [RW 32] Tick for timer counter. Applicable only when
+ ~tsdm_registers_timer_tick_enable.timer_tick_enable =1 */
+#define TSDM_REG_TIMER_TICK 0x42000
+/* [RW 32] Interrupt mask register #0 read/write */
+#define TSDM_REG_TSDM_INT_MASK_0 0x4229c
+#define TSDM_REG_TSDM_INT_MASK_1 0x422ac
+/* [R 32] Interrupt register #0 read */
+#define TSDM_REG_TSDM_INT_STS_0 0x42290
+#define TSDM_REG_TSDM_INT_STS_1 0x422a0
+/* [RW 11] Parity mask register #0 read/write */
+#define TSDM_REG_TSDM_PRTY_MASK 0x422bc
+/* [R 11] Parity register #0 read */
+#define TSDM_REG_TSDM_PRTY_STS 0x422b0
+/* [RC 11] Parity register #0 read clear */
+#define TSDM_REG_TSDM_PRTY_STS_CLR 0x422b4
+/* [RW 5] The number of time_slots in the arbitration cycle */
+#define TSEM_REG_ARB_CYCLE_SIZE 0x180034
+/* [RW 3] The source that is associated with arbitration element 0. Source
+ decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3-
+ sleeping thread with priority 1; 4- sleeping thread with priority 2 */
+#define TSEM_REG_ARB_ELEMENT0 0x180020
+/* [RW 3] The source that is associated with arbitration element 1. Source
+ decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3-
+ sleeping thread with priority 1; 4- sleeping thread with priority 2.
+ Could not be equal to register ~tsem_registers_arb_element0.arb_element0 */
+#define TSEM_REG_ARB_ELEMENT1 0x180024
+/* [RW 3] The source that is associated with arbitration element 2. Source
+ decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3-
+ sleeping thread with priority 1; 4- sleeping thread with priority 2.
+ Could not be equal to register ~tsem_registers_arb_element0.arb_element0
+ and ~tsem_registers_arb_element1.arb_element1 */
+#define TSEM_REG_ARB_ELEMENT2 0x180028
+/* [RW 3] The source that is associated with arbitration element 3. Source
+ decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3-
+ sleeping thread with priority 1; 4- sleeping thread with priority 2.Could
+ not be equal to register ~tsem_registers_arb_element0.arb_element0 and
+ ~tsem_registers_arb_element1.arb_element1 and
+ ~tsem_registers_arb_element2.arb_element2 */
+#define TSEM_REG_ARB_ELEMENT3 0x18002c
+/* [RW 3] The source that is associated with arbitration element 4. Source
+ decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3-
+ sleeping thread with priority 1; 4- sleeping thread with priority 2.
+ Could not be equal to register ~tsem_registers_arb_element0.arb_element0
+ and ~tsem_registers_arb_element1.arb_element1 and
+ ~tsem_registers_arb_element2.arb_element2 and
+ ~tsem_registers_arb_element3.arb_element3 */
+#define TSEM_REG_ARB_ELEMENT4 0x180030
+#define TSEM_REG_ENABLE_IN 0x1800a4
+#define TSEM_REG_ENABLE_OUT 0x1800a8
+/* [RW 32] This address space contains all registers and memories that are
+ placed in SEM_FAST block. The SEM_FAST registers are described in
+ appendix B. In order to access the sem_fast registers the base address
+ ~fast_memory.fast_memory should be added to eachsem_fast register offset. */
+#define TSEM_REG_FAST_MEMORY 0x1a0000
+/* [RW 1] Disables input messages from FIC0 May be updated during run_time
+ by the microcode */
+#define TSEM_REG_FIC0_DISABLE 0x180224
+/* [RW 1] Disables input messages from FIC1 May be updated during run_time
+ by the microcode */
+#define TSEM_REG_FIC1_DISABLE 0x180234
+/* [RW 15] Interrupt table Read and write access to it is not possible in
+ the middle of the work */
+#define TSEM_REG_INT_TABLE 0x180400
+/* [ST 24] Statistics register. The number of messages that entered through
+ FIC0 */
+#define TSEM_REG_MSG_NUM_FIC0 0x180000
+/* [ST 24] Statistics register. The number of messages that entered through
+ FIC1 */
+#define TSEM_REG_MSG_NUM_FIC1 0x180004
+/* [ST 24] Statistics register. The number of messages that were sent to
+ FOC0 */
+#define TSEM_REG_MSG_NUM_FOC0 0x180008
+/* [ST 24] Statistics register. The number of messages that were sent to
+ FOC1 */
+#define TSEM_REG_MSG_NUM_FOC1 0x18000c
+/* [ST 24] Statistics register. The number of messages that were sent to
+ FOC2 */
+#define TSEM_REG_MSG_NUM_FOC2 0x180010
+/* [ST 24] Statistics register. The number of messages that were sent to
+ FOC3 */
+#define TSEM_REG_MSG_NUM_FOC3 0x180014
+/* [RW 1] Disables input messages from the passive buffer May be updated
+ during run_time by the microcode */
+#define TSEM_REG_PAS_DISABLE 0x18024c
+/* [WB 128] Debug only. Passive buffer memory */
+#define TSEM_REG_PASSIVE_BUFFER 0x181000
+/* [WB 46] pram memory. B45 is parity; b[44:0] - data. */
+#define TSEM_REG_PRAM 0x1c0000
+/* [R 8] Valid sleeping threads indication have bit per thread */
+#define TSEM_REG_SLEEP_THREADS_VALID 0x18026c
+/* [R 1] EXT_STORE FIFO is empty in sem_slow_ls_ext */
+#define TSEM_REG_SLOW_EXT_STORE_EMPTY 0x1802a0
+/* [RW 8] List of free threads . There is a bit per thread. */
+#define TSEM_REG_THREADS_LIST 0x1802e4
+/* [RC 32] Parity register #0 read clear */
+#define TSEM_REG_TSEM_PRTY_STS_CLR_0 0x180118
+#define TSEM_REG_TSEM_PRTY_STS_CLR_1 0x180128
+/* [RW 3] The arbitration scheme of time_slot 0 */
+#define TSEM_REG_TS_0_AS 0x180038
+/* [RW 3] The arbitration scheme of time_slot 10 */
+#define TSEM_REG_TS_10_AS 0x180060
+/* [RW 3] The arbitration scheme of time_slot 11 */
+#define TSEM_REG_TS_11_AS 0x180064
+/* [RW 3] The arbitration scheme of time_slot 12 */
+#define TSEM_REG_TS_12_AS 0x180068
+/* [RW 3] The arbitration scheme of time_slot 13 */
+#define TSEM_REG_TS_13_AS 0x18006c
+/* [RW 3] The arbitration scheme of time_slot 14 */
+#define TSEM_REG_TS_14_AS 0x180070
+/* [RW 3] The arbitration scheme of time_slot 15 */
+#define TSEM_REG_TS_15_AS 0x180074
+/* [RW 3] The arbitration scheme of time_slot 16 */
+#define TSEM_REG_TS_16_AS 0x180078
+/* [RW 3] The arbitration scheme of time_slot 17 */
+#define TSEM_REG_TS_17_AS 0x18007c
+/* [RW 3] The arbitration scheme of time_slot 18 */
+#define TSEM_REG_TS_18_AS 0x180080
+/* [RW 3] The arbitration scheme of time_slot 1 */
+#define TSEM_REG_TS_1_AS 0x18003c
+/* [RW 3] The arbitration scheme of time_slot 2 */
+#define TSEM_REG_TS_2_AS 0x180040
+/* [RW 3] The arbitration scheme of time_slot 3 */
+#define TSEM_REG_TS_3_AS 0x180044
+/* [RW 3] The arbitration scheme of time_slot 4 */
+#define TSEM_REG_TS_4_AS 0x180048
+/* [RW 3] The arbitration scheme of time_slot 5 */
+#define TSEM_REG_TS_5_AS 0x18004c
+/* [RW 3] The arbitration scheme of time_slot 6 */
+#define TSEM_REG_TS_6_AS 0x180050
+/* [RW 3] The arbitration scheme of time_slot 7 */
+#define TSEM_REG_TS_7_AS 0x180054
+/* [RW 3] The arbitration scheme of time_slot 8 */
+#define TSEM_REG_TS_8_AS 0x180058
+/* [RW 3] The arbitration scheme of time_slot 9 */
+#define TSEM_REG_TS_9_AS 0x18005c
+/* [RW 32] Interrupt mask register #0 read/write */
+#define TSEM_REG_TSEM_INT_MASK_0 0x180100
+#define TSEM_REG_TSEM_INT_MASK_1 0x180110
+/* [R 32] Interrupt register #0 read */
+#define TSEM_REG_TSEM_INT_STS_0 0x1800f4
+#define TSEM_REG_TSEM_INT_STS_1 0x180104
+/* [RW 32] Parity mask register #0 read/write */
+#define TSEM_REG_TSEM_PRTY_MASK_0 0x180120
+#define TSEM_REG_TSEM_PRTY_MASK_1 0x180130
+/* [R 32] Parity register #0 read */
+#define TSEM_REG_TSEM_PRTY_STS_0 0x180114
+#define TSEM_REG_TSEM_PRTY_STS_1 0x180124
+/* [W 7] VF or PF ID for reset error bit. Values 0-63 reset error bit for 64
+ * VF; values 64-67 reset error for 4 PF; values 68-127 are not valid. */
+#define TSEM_REG_VFPF_ERR_NUM 0x180380
+/* [RW 32] Indirect access to AG context with 32-bits granularity. The bits
+ * [10:8] of the address should be the offset within the accessed LCID
+ * context; the bits [7:0] are the accessed LCID.Example: to write to REG10
+ * LCID100. The RBC address should be 12'ha64. */
+#define UCM_REG_AG_CTX 0xe2000
+/* [R 5] Used to read the XX protection CAM occupancy counter. */
+#define UCM_REG_CAM_OCCUP 0xe0170
+/* [RW 1] CDU AG read Interface enable. If 0 - the request input is
+ disregarded; valid output is deasserted; all other signals are treated as
+ usual; if 1 - normal activity. */
+#define UCM_REG_CDU_AG_RD_IFEN 0xe0038
+/* [RW 1] CDU AG write Interface enable. If 0 - the request and valid input
+ are disregarded; all other signals are treated as usual; if 1 - normal
+ activity. */
+#define UCM_REG_CDU_AG_WR_IFEN 0xe0034
+/* [RW 1] CDU STORM read Interface enable. If 0 - the request input is
+ disregarded; valid output is deasserted; all other signals are treated as
+ usual; if 1 - normal activity. */
+#define UCM_REG_CDU_SM_RD_IFEN 0xe0040
+/* [RW 1] CDU STORM write Interface enable. If 0 - the request and valid
+ input is disregarded; all other signals are treated as usual; if 1 -
+ normal activity. */
+#define UCM_REG_CDU_SM_WR_IFEN 0xe003c
+/* [RW 4] CFC output initial credit. Max credit available - 15.Write writes
+ the initial credit value; read returns the current value of the credit
+ counter. Must be initialized to 1 at start-up. */
+#define UCM_REG_CFC_INIT_CRD 0xe0204
+/* [RW 3] The weight of the CP input in the WRR mechanism. 0 stands for
+ weight 8 (the most prioritised); 1 stands for weight 1(least
+ prioritised); 2 stands for weight 2; tc. */
+#define UCM_REG_CP_WEIGHT 0xe00c4
+/* [RW 1] Input csem Interface enable. If 0 - the valid input is
+ disregarded; acknowledge output is deasserted; all other signals are
+ treated as usual; if 1 - normal activity. */
+#define UCM_REG_CSEM_IFEN 0xe0028
+/* [RC 1] Set when the message length mismatch (relative to last indication)
+ at the csem interface is detected. */
+#define UCM_REG_CSEM_LENGTH_MIS 0xe0160
+/* [RW 3] The weight of the input csem in the WRR mechanism. 0 stands for
+ weight 8 (the most prioritised); 1 stands for weight 1(least
+ prioritised); 2 stands for weight 2; tc. */
+#define UCM_REG_CSEM_WEIGHT 0xe00b8
+/* [RW 1] Input dorq Interface enable. If 0 - the valid input is
+ disregarded; acknowledge output is deasserted; all other signals are
+ treated as usual; if 1 - normal activity. */
+#define UCM_REG_DORQ_IFEN 0xe0030
+/* [RC 1] Set when the message length mismatch (relative to last indication)
+ at the dorq interface is detected. */
+#define UCM_REG_DORQ_LENGTH_MIS 0xe0168
+/* [RW 3] The weight of the input dorq in the WRR mechanism. 0 stands for
+ weight 8 (the most prioritised); 1 stands for weight 1(least
+ prioritised); 2 stands for weight 2; tc. */
+#define UCM_REG_DORQ_WEIGHT 0xe00c0
+/* [RW 8] The Event ID in case ErrorFlg input message bit is set. */
+#define UCM_REG_ERR_EVNT_ID 0xe00a4
+/* [RW 28] The CM erroneous header for QM and Timers formatting. */
+#define UCM_REG_ERR_UCM_HDR 0xe00a0
+/* [RW 8] The Event ID for Timers expiration. */
+#define UCM_REG_EXPR_EVNT_ID 0xe00a8
+/* [RW 8] FIC0 output initial credit. Max credit available - 255.Write
+ writes the initial credit value; read returns the current value of the
+ credit counter. Must be initialized to 64 at start-up. */
+#define UCM_REG_FIC0_INIT_CRD 0xe020c
+/* [RW 8] FIC1 output initial credit. Max credit available - 255.Write
+ writes the initial credit value; read returns the current value of the
+ credit counter. Must be initialized to 64 at start-up. */
+#define UCM_REG_FIC1_INIT_CRD 0xe0210
+/* [RW 1] Arbitration between Input Arbiter groups: 0 - fair Round-Robin; 1
+ - strict priority defined by ~ucm_registers_gr_ag_pr.gr_ag_pr;
+ ~ucm_registers_gr_ld0_pr.gr_ld0_pr and
+ ~ucm_registers_gr_ld1_pr.gr_ld1_pr. */
+#define UCM_REG_GR_ARB_TYPE 0xe0144
+/* [RW 2] Load (FIC0) channel group priority. The lowest priority is 0; the
+ highest priority is 3. It is supposed that the Store channel group is
+ complement to the others. */
+#define UCM_REG_GR_LD0_PR 0xe014c
+/* [RW 2] Load (FIC1) channel group priority. The lowest priority is 0; the
+ highest priority is 3. It is supposed that the Store channel group is
+ complement to the others. */
+#define UCM_REG_GR_LD1_PR 0xe0150
+/* [RW 2] The queue index for invalidate counter flag decision. */
+#define UCM_REG_INV_CFLG_Q 0xe00e4
+/* [RW 5] The number of double REG-pairs; loaded from the STORM context and
+ sent to STORM; for a specific connection type. the double REG-pairs are
+ used in order to align to STORM context row size of 128 bits. The offset
+ of these data in the STORM context is always 0. Index _i stands for the
+ connection type (one of 16). */
+#define UCM_REG_N_SM_CTX_LD_0 0xe0054
+#define UCM_REG_N_SM_CTX_LD_1 0xe0058
+#define UCM_REG_N_SM_CTX_LD_2 0xe005c
+#define UCM_REG_N_SM_CTX_LD_3 0xe0060
+#define UCM_REG_N_SM_CTX_LD_4 0xe0064
+#define UCM_REG_N_SM_CTX_LD_5 0xe0068
+#define UCM_REG_PHYS_QNUM0_0 0xe0110
+#define UCM_REG_PHYS_QNUM0_1 0xe0114
+#define UCM_REG_PHYS_QNUM1_0 0xe0118
+#define UCM_REG_PHYS_QNUM1_1 0xe011c
+#define UCM_REG_PHYS_QNUM2_0 0xe0120
+#define UCM_REG_PHYS_QNUM2_1 0xe0124
+#define UCM_REG_PHYS_QNUM3_0 0xe0128
+#define UCM_REG_PHYS_QNUM3_1 0xe012c
+/* [RW 8] The Event ID for Timers formatting in case of stop done. */
+#define UCM_REG_STOP_EVNT_ID 0xe00ac
+/* [RC 1] Set when the message length mismatch (relative to last indication)
+ at the STORM interface is detected. */
+#define UCM_REG_STORM_LENGTH_MIS 0xe0154
+/* [RW 1] STORM - CM Interface enable. If 0 - the valid input is
+ disregarded; acknowledge output is deasserted; all other signals are
+ treated as usual; if 1 - normal activity. */
+#define UCM_REG_STORM_UCM_IFEN 0xe0010
+/* [RW 3] The weight of the STORM input in the WRR mechanism. 0 stands for
+ weight 8 (the most prioritised); 1 stands for weight 1(least
+ prioritised); 2 stands for weight 2; tc. */
+#define UCM_REG_STORM_WEIGHT 0xe00b0
+/* [RW 4] Timers output initial credit. Max credit available - 15.Write
+ writes the initial credit value; read returns the current value of the
+ credit counter. Must be initialized to 4 at start-up. */
+#define UCM_REG_TM_INIT_CRD 0xe021c
+/* [RW 28] The CM header for Timers expiration command. */
+#define UCM_REG_TM_UCM_HDR 0xe009c
+/* [RW 1] Timers - CM Interface enable. If 0 - the valid input is
+ disregarded; acknowledge output is deasserted; all other signals are
+ treated as usual; if 1 - normal activity. */
+#define UCM_REG_TM_UCM_IFEN 0xe001c
+/* [RW 3] The weight of the Timers input in the WRR mechanism. 0 stands for
+ weight 8 (the most prioritised); 1 stands for weight 1(least
+ prioritised); 2 stands for weight 2; tc. */
+#define UCM_REG_TM_WEIGHT 0xe00d4
+/* [RW 1] Input tsem Interface enable. If 0 - the valid input is
+ disregarded; acknowledge output is deasserted; all other signals are
+ treated as usual; if 1 - normal activity. */
+#define UCM_REG_TSEM_IFEN 0xe0024
+/* [RC 1] Set when the message length mismatch (relative to last indication)
+ at the tsem interface is detected. */
+#define UCM_REG_TSEM_LENGTH_MIS 0xe015c
+/* [RW 3] The weight of the input tsem in the WRR mechanism. 0 stands for
+ weight 8 (the most prioritised); 1 stands for weight 1(least
+ prioritised); 2 stands for weight 2; tc. */
+#define UCM_REG_TSEM_WEIGHT 0xe00b4
+/* [RW 1] CM - CFC Interface enable. If 0 - the valid input is disregarded;
+ acknowledge output is deasserted; all other signals are treated as usual;
+ if 1 - normal activity. */
+#define UCM_REG_UCM_CFC_IFEN 0xe0044
+/* [RW 11] Interrupt mask register #0 read/write */
+#define UCM_REG_UCM_INT_MASK 0xe01d4
+/* [R 11] Interrupt register #0 read */
+#define UCM_REG_UCM_INT_STS 0xe01c8
+/* [RW 27] Parity mask register #0 read/write */
+#define UCM_REG_UCM_PRTY_MASK 0xe01e4
+/* [R 27] Parity register #0 read */
+#define UCM_REG_UCM_PRTY_STS 0xe01d8
+/* [RC 27] Parity register #0 read clear */
+#define UCM_REG_UCM_PRTY_STS_CLR 0xe01dc
+/* [RW 2] The size of AG context region 0 in REG-pairs. Designates the MS
+ REG-pair number (e.g. if region 0 is 6 REG-pairs; the value should be 5).
+ Is used to determine the number of the AG context REG-pairs written back;
+ when the Reg1WbFlg isn't set. */
+#define UCM_REG_UCM_REG0_SZ 0xe00dc
+/* [RW 1] CM - STORM 0 Interface enable. If 0 - the acknowledge input is
+ disregarded; valid is deasserted; all other signals are treated as usual;
+ if 1 - normal activity. */
+#define UCM_REG_UCM_STORM0_IFEN 0xe0004
+/* [RW 1] CM - STORM 1 Interface enable. If 0 - the acknowledge input is
+ disregarded; valid is deasserted; all other signals are treated as usual;
+ if 1 - normal activity. */
+#define UCM_REG_UCM_STORM1_IFEN 0xe0008
+/* [RW 1] CM - Timers Interface enable. If 0 - the valid input is
+ disregarded; acknowledge output is deasserted; all other signals are
+ treated as usual; if 1 - normal activity. */
+#define UCM_REG_UCM_TM_IFEN 0xe0020
+/* [RW 1] CM - QM Interface enable. If 0 - the acknowledge input is
+ disregarded; valid is deasserted; all other signals are treated as usual;
+ if 1 - normal activity. */
+#define UCM_REG_UCM_UQM_IFEN 0xe000c
+/* [RW 1] If set the Q index; received from the QM is inserted to event ID. */
+#define UCM_REG_UCM_UQM_USE_Q 0xe00d8
+/* [RW 6] QM output initial credit. Max credit available - 32.Write writes
+ the initial credit value; read returns the current value of the credit
+ counter. Must be initialized to 32 at start-up. */
+#define UCM_REG_UQM_INIT_CRD 0xe0220
+/* [RW 3] The weight of the QM (primary) input in the WRR mechanism. 0
+ stands for weight 8 (the most prioritised); 1 stands for weight 1(least
+ prioritised); 2 stands for weight 2; tc. */
+#define UCM_REG_UQM_P_WEIGHT 0xe00cc
+/* [RW 3] The weight of the QM (secondary) input in the WRR mechanism. 0
+ stands for weight 8 (the most prioritised); 1 stands for weight 1(least
+ prioritised); 2 stands for weight 2; tc. */
+#define UCM_REG_UQM_S_WEIGHT 0xe00d0
+/* [RW 28] The CM header value for QM request (primary). */
+#define UCM_REG_UQM_UCM_HDR_P 0xe0094
+/* [RW 28] The CM header value for QM request (secondary). */
+#define UCM_REG_UQM_UCM_HDR_S 0xe0098
+/* [RW 1] QM - CM Interface enable. If 0 - the valid input is disregarded;
+ acknowledge output is deasserted; all other signals are treated as usual;
+ if 1 - normal activity. */
+#define UCM_REG_UQM_UCM_IFEN 0xe0014
+/* [RW 1] Input SDM Interface enable. If 0 - the valid input is disregarded;
+ acknowledge output is deasserted; all other signals are treated as usual;
+ if 1 - normal activity. */
+#define UCM_REG_USDM_IFEN 0xe0018
+/* [RC 1] Set when the message length mismatch (relative to last indication)
+ at the SDM interface is detected. */
+#define UCM_REG_USDM_LENGTH_MIS 0xe0158
+/* [RW 3] The weight of the SDM input in the WRR mechanism. 0 stands for
+ weight 8 (the most prioritised); 1 stands for weight 1(least
+ prioritised); 2 stands for weight 2; tc. */
+#define UCM_REG_USDM_WEIGHT 0xe00c8
+/* [RW 1] Input xsem Interface enable. If 0 - the valid input is
+ disregarded; acknowledge output is deasserted; all other signals are
+ treated as usual; if 1 - normal activity. */
+#define UCM_REG_XSEM_IFEN 0xe002c
+/* [RC 1] Set when the message length mismatch (relative to last indication)
+ at the xsem interface isdetected. */
+#define UCM_REG_XSEM_LENGTH_MIS 0xe0164
+/* [RW 3] The weight of the input xsem in the WRR mechanism. 0 stands for
+ weight 8 (the most prioritised); 1 stands for weight 1(least
+ prioritised); 2 stands for weight 2; tc. */
+#define UCM_REG_XSEM_WEIGHT 0xe00bc
+/* [RW 20] Indirect access to the descriptor table of the XX protection
+ mechanism. The fields are:[5:0] - message length; 14:6] - message
+ pointer; 19:15] - next pointer. */
+#define UCM_REG_XX_DESCR_TABLE 0xe0280
+#define UCM_REG_XX_DESCR_TABLE_SIZE 27
+/* [R 6] Use to read the XX protection Free counter. */
+#define UCM_REG_XX_FREE 0xe016c
+/* [RW 6] Initial value for the credit counter; responsible for fulfilling
+ of the Input Stage XX protection buffer by the XX protection pending
+ messages. Write writes the initial credit value; read returns the current
+ value of the credit counter. Must be initialized to 12 at start-up. */
+#define UCM_REG_XX_INIT_CRD 0xe0224
+/* [RW 6] The maximum number of pending messages; which may be stored in XX
+ protection. ~ucm_registers_xx_free.xx_free read on read. */
+#define UCM_REG_XX_MSG_NUM 0xe0228
+/* [RW 8] The Event ID; sent to the STORM in case of XX overflow. */
+#define UCM_REG_XX_OVFL_EVNT_ID 0xe004c
+/* [RW 16] Indirect access to the XX table of the XX protection mechanism.
+ The fields are: [4:0] - tail pointer; 10:5] - Link List size; 15:11] -
+ header pointer. */
+#define UCM_REG_XX_TABLE 0xe0300
+#define UMAC_COMMAND_CONFIG_REG_HD_ENA (0x1<<10)
+#define UMAC_COMMAND_CONFIG_REG_IGNORE_TX_PAUSE (0x1<<28)
+#define UMAC_COMMAND_CONFIG_REG_LOOP_ENA (0x1<<15)
+#define UMAC_COMMAND_CONFIG_REG_NO_LGTH_CHECK (0x1<<24)
+#define UMAC_COMMAND_CONFIG_REG_PAD_EN (0x1<<5)
+#define UMAC_COMMAND_CONFIG_REG_PAUSE_IGNORE (0x1<<8)
+#define UMAC_COMMAND_CONFIG_REG_PROMIS_EN (0x1<<4)
+#define UMAC_COMMAND_CONFIG_REG_RX_ENA (0x1<<1)
+#define UMAC_COMMAND_CONFIG_REG_SW_RESET (0x1<<13)
+#define UMAC_COMMAND_CONFIG_REG_TX_ENA (0x1<<0)
+#define UMAC_REG_COMMAND_CONFIG 0x8
+/* [RW 16] This is the duration for which MAC must wait to go back to ACTIVE
+ * state from LPI state when it receives packet for transmission. The
+ * decrement unit is 1 micro-second. */
+#define UMAC_REG_EEE_WAKE_TIMER 0x6c
+/* [RW 32] Register Bit 0 refers to Bit 16 of the MAC address; Bit 1 refers
+ * to bit 17 of the MAC address etc. */
+#define UMAC_REG_MAC_ADDR0 0xc
+/* [RW 16] Register Bit 0 refers to Bit 0 of the MAC address; Register Bit 1
+ * refers to Bit 1 of the MAC address etc. Bits 16 to 31 are reserved. */
+#define UMAC_REG_MAC_ADDR1 0x10
+/* [RW 14] Defines a 14-Bit maximum frame length used by the MAC receive
+ * logic to check frames. */
+#define UMAC_REG_MAXFR 0x14
+#define UMAC_REG_UMAC_EEE_CTRL 0x64
+#define UMAC_UMAC_EEE_CTRL_REG_EEE_EN (0x1<<3)
+/* [RW 8] The event id for aggregated interrupt 0 */
+#define USDM_REG_AGG_INT_EVENT_0 0xc4038
+#define USDM_REG_AGG_INT_EVENT_1 0xc403c
+#define USDM_REG_AGG_INT_EVENT_2 0xc4040
+#define USDM_REG_AGG_INT_EVENT_4 0xc4048
+#define USDM_REG_AGG_INT_EVENT_5 0xc404c
+#define USDM_REG_AGG_INT_EVENT_6 0xc4050
+/* [RW 1] For each aggregated interrupt index whether the mode is normal (0)
+ or auto-mask-mode (1) */
+#define USDM_REG_AGG_INT_MODE_0 0xc41b8
+#define USDM_REG_AGG_INT_MODE_1 0xc41bc
+#define USDM_REG_AGG_INT_MODE_4 0xc41c8
+#define USDM_REG_AGG_INT_MODE_5 0xc41cc
+#define USDM_REG_AGG_INT_MODE_6 0xc41d0
+/* [RW 1] The T bit for aggregated interrupt 5 */
+#define USDM_REG_AGG_INT_T_5 0xc40cc
+#define USDM_REG_AGG_INT_T_6 0xc40d0
+/* [RW 13] The start address in the internal RAM for the cfc_rsp lcid */
+#define USDM_REG_CFC_RSP_START_ADDR 0xc4008
+/* [RW 16] The maximum value of the completion counter #0 */
+#define USDM_REG_CMP_COUNTER_MAX0 0xc401c
+/* [RW 16] The maximum value of the completion counter #1 */
+#define USDM_REG_CMP_COUNTER_MAX1 0xc4020
+/* [RW 16] The maximum value of the completion counter #2 */
+#define USDM_REG_CMP_COUNTER_MAX2 0xc4024
+/* [RW 16] The maximum value of the completion counter #3 */
+#define USDM_REG_CMP_COUNTER_MAX3 0xc4028
+/* [RW 13] The start address in the internal RAM for the completion
+ counters. */
+#define USDM_REG_CMP_COUNTER_START_ADDR 0xc400c
+#define USDM_REG_ENABLE_IN1 0xc4238
+#define USDM_REG_ENABLE_IN2 0xc423c
+#define USDM_REG_ENABLE_OUT1 0xc4240
+#define USDM_REG_ENABLE_OUT2 0xc4244
+/* [RW 4] The initial number of messages that can be sent to the pxp control
+ interface without receiving any ACK. */
+#define USDM_REG_INIT_CREDIT_PXP_CTRL 0xc44c0
+/* [ST 32] The number of ACK after placement messages received */
+#define USDM_REG_NUM_OF_ACK_AFTER_PLACE 0xc4280
+/* [ST 32] The number of packet end messages received from the parser */
+#define USDM_REG_NUM_OF_PKT_END_MSG 0xc4278
+/* [ST 32] The number of requests received from the pxp async if */
+#define USDM_REG_NUM_OF_PXP_ASYNC_REQ 0xc427c
+/* [ST 32] The number of commands received in queue 0 */
+#define USDM_REG_NUM_OF_Q0_CMD 0xc4248
+/* [ST 32] The number of commands received in queue 10 */
+#define USDM_REG_NUM_OF_Q10_CMD 0xc4270
+/* [ST 32] The number of commands received in queue 11 */
+#define USDM_REG_NUM_OF_Q11_CMD 0xc4274
+/* [ST 32] The number of commands received in queue 1 */
+#define USDM_REG_NUM_OF_Q1_CMD 0xc424c
+/* [ST 32] The number of commands received in queue 2 */
+#define USDM_REG_NUM_OF_Q2_CMD 0xc4250
+/* [ST 32] The number of commands received in queue 3 */
+#define USDM_REG_NUM_OF_Q3_CMD 0xc4254
+/* [ST 32] The number of commands received in queue 4 */
+#define USDM_REG_NUM_OF_Q4_CMD 0xc4258
+/* [ST 32] The number of commands received in queue 5 */
+#define USDM_REG_NUM_OF_Q5_CMD 0xc425c
+/* [ST 32] The number of commands received in queue 6 */
+#define USDM_REG_NUM_OF_Q6_CMD 0xc4260
+/* [ST 32] The number of commands received in queue 7 */
+#define USDM_REG_NUM_OF_Q7_CMD 0xc4264
+/* [ST 32] The number of commands received in queue 8 */
+#define USDM_REG_NUM_OF_Q8_CMD 0xc4268
+/* [ST 32] The number of commands received in queue 9 */
+#define USDM_REG_NUM_OF_Q9_CMD 0xc426c
+/* [RW 13] The start address in the internal RAM for the packet end message */
+#define USDM_REG_PCK_END_MSG_START_ADDR 0xc4014
+/* [RW 13] The start address in the internal RAM for queue counters */
+#define USDM_REG_Q_COUNTER_START_ADDR 0xc4010
+/* [R 1] pxp_ctrl rd_data fifo empty in sdm_dma_rsp block */
+#define USDM_REG_RSP_PXP_CTRL_RDATA_EMPTY 0xc4550
+/* [R 1] parser fifo empty in sdm_sync block */
+#define USDM_REG_SYNC_PARSER_EMPTY 0xc4558
+/* [R 1] parser serial fifo empty in sdm_sync block */
+#define USDM_REG_SYNC_SYNC_EMPTY 0xc4560
+/* [RW 32] Tick for timer counter. Applicable only when
+ ~usdm_registers_timer_tick_enable.timer_tick_enable =1 */
+#define USDM_REG_TIMER_TICK 0xc4000
+/* [RW 32] Interrupt mask register #0 read/write */
+#define USDM_REG_USDM_INT_MASK_0 0xc42a0
+#define USDM_REG_USDM_INT_MASK_1 0xc42b0
+/* [R 32] Interrupt register #0 read */
+#define USDM_REG_USDM_INT_STS_0 0xc4294
+#define USDM_REG_USDM_INT_STS_1 0xc42a4
+/* [RW 11] Parity mask register #0 read/write */
+#define USDM_REG_USDM_PRTY_MASK 0xc42c0
+/* [R 11] Parity register #0 read */
+#define USDM_REG_USDM_PRTY_STS 0xc42b4
+/* [RC 11] Parity register #0 read clear */
+#define USDM_REG_USDM_PRTY_STS_CLR 0xc42b8
+/* [RW 5] The number of time_slots in the arbitration cycle */
+#define USEM_REG_ARB_CYCLE_SIZE 0x300034
+/* [RW 3] The source that is associated with arbitration element 0. Source
+ decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3-
+ sleeping thread with priority 1; 4- sleeping thread with priority 2 */
+#define USEM_REG_ARB_ELEMENT0 0x300020
+/* [RW 3] The source that is associated with arbitration element 1. Source
+ decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3-
+ sleeping thread with priority 1; 4- sleeping thread with priority 2.
+ Could not be equal to register ~usem_registers_arb_element0.arb_element0 */
+#define USEM_REG_ARB_ELEMENT1 0x300024
+/* [RW 3] The source that is associated with arbitration element 2. Source
+ decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3-
+ sleeping thread with priority 1; 4- sleeping thread with priority 2.
+ Could not be equal to register ~usem_registers_arb_element0.arb_element0
+ and ~usem_registers_arb_element1.arb_element1 */
+#define USEM_REG_ARB_ELEMENT2 0x300028
+/* [RW 3] The source that is associated with arbitration element 3. Source
+ decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3-
+ sleeping thread with priority 1; 4- sleeping thread with priority 2.Could
+ not be equal to register ~usem_registers_arb_element0.arb_element0 and
+ ~usem_registers_arb_element1.arb_element1 and
+ ~usem_registers_arb_element2.arb_element2 */
+#define USEM_REG_ARB_ELEMENT3 0x30002c
+/* [RW 3] The source that is associated with arbitration element 4. Source
+ decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3-
+ sleeping thread with priority 1; 4- sleeping thread with priority 2.
+ Could not be equal to register ~usem_registers_arb_element0.arb_element0
+ and ~usem_registers_arb_element1.arb_element1 and
+ ~usem_registers_arb_element2.arb_element2 and
+ ~usem_registers_arb_element3.arb_element3 */
+#define USEM_REG_ARB_ELEMENT4 0x300030
+#define USEM_REG_ENABLE_IN 0x3000a4
+#define USEM_REG_ENABLE_OUT 0x3000a8
+/* [RW 32] This address space contains all registers and memories that are
+ placed in SEM_FAST block. The SEM_FAST registers are described in
+ appendix B. In order to access the sem_fast registers the base address
+ ~fast_memory.fast_memory should be added to eachsem_fast register offset. */
+#define USEM_REG_FAST_MEMORY 0x320000
+/* [RW 1] Disables input messages from FIC0 May be updated during run_time
+ by the microcode */
+#define USEM_REG_FIC0_DISABLE 0x300224
+/* [RW 1] Disables input messages from FIC1 May be updated during run_time
+ by the microcode */
+#define USEM_REG_FIC1_DISABLE 0x300234
+/* [RW 15] Interrupt table Read and write access to it is not possible in
+ the middle of the work */
+#define USEM_REG_INT_TABLE 0x300400
+/* [ST 24] Statistics register. The number of messages that entered through
+ FIC0 */
+#define USEM_REG_MSG_NUM_FIC0 0x300000
+/* [ST 24] Statistics register. The number of messages that entered through
+ FIC1 */
+#define USEM_REG_MSG_NUM_FIC1 0x300004
+/* [ST 24] Statistics register. The number of messages that were sent to
+ FOC0 */
+#define USEM_REG_MSG_NUM_FOC0 0x300008
+/* [ST 24] Statistics register. The number of messages that were sent to
+ FOC1 */
+#define USEM_REG_MSG_NUM_FOC1 0x30000c
+/* [ST 24] Statistics register. The number of messages that were sent to
+ FOC2 */
+#define USEM_REG_MSG_NUM_FOC2 0x300010
+/* [ST 24] Statistics register. The number of messages that were sent to
+ FOC3 */
+#define USEM_REG_MSG_NUM_FOC3 0x300014
+/* [RW 1] Disables input messages from the passive buffer May be updated
+ during run_time by the microcode */
+#define USEM_REG_PAS_DISABLE 0x30024c
+/* [WB 128] Debug only. Passive buffer memory */
+#define USEM_REG_PASSIVE_BUFFER 0x302000
+/* [WB 46] pram memory. B45 is parity; b[44:0] - data. */
+#define USEM_REG_PRAM 0x340000
+/* [R 16] Valid sleeping threads indication have bit per thread */
+#define USEM_REG_SLEEP_THREADS_VALID 0x30026c
+/* [R 1] EXT_STORE FIFO is empty in sem_slow_ls_ext */
+#define USEM_REG_SLOW_EXT_STORE_EMPTY 0x3002a0
+/* [RW 16] List of free threads . There is a bit per thread. */
+#define USEM_REG_THREADS_LIST 0x3002e4
+/* [RW 3] The arbitration scheme of time_slot 0 */
+#define USEM_REG_TS_0_AS 0x300038
+/* [RW 3] The arbitration scheme of time_slot 10 */
+#define USEM_REG_TS_10_AS 0x300060
+/* [RW 3] The arbitration scheme of time_slot 11 */
+#define USEM_REG_TS_11_AS 0x300064
+/* [RW 3] The arbitration scheme of time_slot 12 */
+#define USEM_REG_TS_12_AS 0x300068
+/* [RW 3] The arbitration scheme of time_slot 13 */
+#define USEM_REG_TS_13_AS 0x30006c
+/* [RW 3] The arbitration scheme of time_slot 14 */
+#define USEM_REG_TS_14_AS 0x300070
+/* [RW 3] The arbitration scheme of time_slot 15 */
+#define USEM_REG_TS_15_AS 0x300074
+/* [RW 3] The arbitration scheme of time_slot 16 */
+#define USEM_REG_TS_16_AS 0x300078
+/* [RW 3] The arbitration scheme of time_slot 17 */
+#define USEM_REG_TS_17_AS 0x30007c
+/* [RW 3] The arbitration scheme of time_slot 18 */
+#define USEM_REG_TS_18_AS 0x300080
+/* [RW 3] The arbitration scheme of time_slot 1 */
+#define USEM_REG_TS_1_AS 0x30003c
+/* [RW 3] The arbitration scheme of time_slot 2 */
+#define USEM_REG_TS_2_AS 0x300040
+/* [RW 3] The arbitration scheme of time_slot 3 */
+#define USEM_REG_TS_3_AS 0x300044
+/* [RW 3] The arbitration scheme of time_slot 4 */
+#define USEM_REG_TS_4_AS 0x300048
+/* [RW 3] The arbitration scheme of time_slot 5 */
+#define USEM_REG_TS_5_AS 0x30004c
+/* [RW 3] The arbitration scheme of time_slot 6 */
+#define USEM_REG_TS_6_AS 0x300050
+/* [RW 3] The arbitration scheme of time_slot 7 */
+#define USEM_REG_TS_7_AS 0x300054
+/* [RW 3] The arbitration scheme of time_slot 8 */
+#define USEM_REG_TS_8_AS 0x300058
+/* [RW 3] The arbitration scheme of time_slot 9 */
+#define USEM_REG_TS_9_AS 0x30005c
+/* [RW 32] Interrupt mask register #0 read/write */
+#define USEM_REG_USEM_INT_MASK_0 0x300110
+#define USEM_REG_USEM_INT_MASK_1 0x300120
+/* [R 32] Interrupt register #0 read */
+#define USEM_REG_USEM_INT_STS_0 0x300104
+#define USEM_REG_USEM_INT_STS_1 0x300114
+/* [RW 32] Parity mask register #0 read/write */
+#define USEM_REG_USEM_PRTY_MASK_0 0x300130
+#define USEM_REG_USEM_PRTY_MASK_1 0x300140
+/* [R 32] Parity register #0 read */
+#define USEM_REG_USEM_PRTY_STS_0 0x300124
+#define USEM_REG_USEM_PRTY_STS_1 0x300134
+/* [RC 32] Parity register #0 read clear */
+#define USEM_REG_USEM_PRTY_STS_CLR_0 0x300128
+#define USEM_REG_USEM_PRTY_STS_CLR_1 0x300138
+/* [W 7] VF or PF ID for reset error bit. Values 0-63 reset error bit for 64
+ * VF; values 64-67 reset error for 4 PF; values 68-127 are not valid. */
+#define USEM_REG_VFPF_ERR_NUM 0x300380
+#define VFC_MEMORIES_RST_REG_CAM_RST (0x1<<0)
+#define VFC_MEMORIES_RST_REG_RAM_RST (0x1<<1)
+#define VFC_REG_MEMORIES_RST 0x1943c
+/* [RW 32] Indirect access to AG context with 32-bits granularity. The bits
+ * [12:8] of the address should be the offset within the accessed LCID
+ * context; the bits [7:0] are the accessed LCID.Example: to write to REG10
+ * LCID100. The RBC address should be 13'ha64. */
+#define XCM_REG_AG_CTX 0x28000
+/* [RW 2] The queue index for registration on Aux1 counter flag. */
+#define XCM_REG_AUX1_Q 0x20134
+/* [RW 2] Per each decision rule the queue index to register to. */
+#define XCM_REG_AUX_CNT_FLG_Q_19 0x201b0
+/* [R 5] Used to read the XX protection CAM occupancy counter. */
+#define XCM_REG_CAM_OCCUP 0x20244
+/* [RW 1] CDU AG read Interface enable. If 0 - the request input is
+ disregarded; valid output is deasserted; all other signals are treated as
+ usual; if 1 - normal activity. */
+#define XCM_REG_CDU_AG_RD_IFEN 0x20044
+/* [RW 1] CDU AG write Interface enable. If 0 - the request and valid input
+ are disregarded; all other signals are treated as usual; if 1 - normal
+ activity. */
+#define XCM_REG_CDU_AG_WR_IFEN 0x20040
+/* [RW 1] CDU STORM read Interface enable. If 0 - the request input is
+ disregarded; valid output is deasserted; all other signals are treated as
+ usual; if 1 - normal activity. */
+#define XCM_REG_CDU_SM_RD_IFEN 0x2004c
+/* [RW 1] CDU STORM write Interface enable. If 0 - the request and valid
+ input is disregarded; all other signals are treated as usual; if 1 -
+ normal activity. */
+#define XCM_REG_CDU_SM_WR_IFEN 0x20048
+/* [RW 4] CFC output initial credit. Max credit available - 15.Write writes
+ the initial credit value; read returns the current value of the credit
+ counter. Must be initialized to 1 at start-up. */
+#define XCM_REG_CFC_INIT_CRD 0x20404
+/* [RW 3] The weight of the CP input in the WRR mechanism. 0 stands for
+ weight 8 (the most prioritised); 1 stands for weight 1(least
+ prioritised); 2 stands for weight 2; tc. */
+#define XCM_REG_CP_WEIGHT 0x200dc
+/* [RW 1] Input csem Interface enable. If 0 - the valid input is
+ disregarded; acknowledge output is deasserted; all other signals are
+ treated as usual; if 1 - normal activity. */
+#define XCM_REG_CSEM_IFEN 0x20028
+/* [RC 1] Set at message length mismatch (relative to last indication) at
+ the csem interface. */
+#define XCM_REG_CSEM_LENGTH_MIS 0x20228
+/* [RW 3] The weight of the input csem in the WRR mechanism. 0 stands for
+ weight 8 (the most prioritised); 1 stands for weight 1(least
+ prioritised); 2 stands for weight 2; tc. */
+#define XCM_REG_CSEM_WEIGHT 0x200c4
+/* [RW 1] Input dorq Interface enable. If 0 - the valid input is
+ disregarded; acknowledge output is deasserted; all other signals are
+ treated as usual; if 1 - normal activity. */
+#define XCM_REG_DORQ_IFEN 0x20030
+/* [RC 1] Set at message length mismatch (relative to last indication) at
+ the dorq interface. */
+#define XCM_REG_DORQ_LENGTH_MIS 0x20230
+/* [RW 3] The weight of the input dorq in the WRR mechanism. 0 stands for
+ weight 8 (the most prioritised); 1 stands for weight 1(least
+ prioritised); 2 stands for weight 2; tc. */
+#define XCM_REG_DORQ_WEIGHT 0x200cc
+/* [RW 8] The Event ID in case the ErrorFlg input message bit is set. */
+#define XCM_REG_ERR_EVNT_ID 0x200b0
+/* [RW 28] The CM erroneous header for QM and Timers formatting. */
+#define XCM_REG_ERR_XCM_HDR 0x200ac
+/* [RW 8] The Event ID for Timers expiration. */
+#define XCM_REG_EXPR_EVNT_ID 0x200b4
+/* [RW 8] FIC0 output initial credit. Max credit available - 255.Write
+ writes the initial credit value; read returns the current value of the
+ credit counter. Must be initialized to 64 at start-up. */
+#define XCM_REG_FIC0_INIT_CRD 0x2040c
+/* [RW 8] FIC1 output initial credit. Max credit available - 255.Write
+ writes the initial credit value; read returns the current value of the
+ credit counter. Must be initialized to 64 at start-up. */
+#define XCM_REG_FIC1_INIT_CRD 0x20410
+#define XCM_REG_GLB_DEL_ACK_MAX_CNT_0 0x20118
+#define XCM_REG_GLB_DEL_ACK_MAX_CNT_1 0x2011c
+#define XCM_REG_GLB_DEL_ACK_TMR_VAL_0 0x20108
+#define XCM_REG_GLB_DEL_ACK_TMR_VAL_1 0x2010c
+/* [RW 1] Arbitratiojn between Input Arbiter groups: 0 - fair Round-Robin; 1
+ - strict priority defined by ~xcm_registers_gr_ag_pr.gr_ag_pr;
+ ~xcm_registers_gr_ld0_pr.gr_ld0_pr and
+ ~xcm_registers_gr_ld1_pr.gr_ld1_pr. */
+#define XCM_REG_GR_ARB_TYPE 0x2020c
+/* [RW 2] Load (FIC0) channel group priority. The lowest priority is 0; the
+ highest priority is 3. It is supposed that the Channel group is the
+ complement of the other 3 groups. */
+#define XCM_REG_GR_LD0_PR 0x20214
+/* [RW 2] Load (FIC1) channel group priority. The lowest priority is 0; the
+ highest priority is 3. It is supposed that the Channel group is the
+ complement of the other 3 groups. */
+#define XCM_REG_GR_LD1_PR 0x20218
+/* [RW 1] Input nig0 Interface enable. If 0 - the valid input is
+ disregarded; acknowledge output is deasserted; all other signals are
+ treated as usual; if 1 - normal activity. */
+#define XCM_REG_NIG0_IFEN 0x20038
+/* [RC 1] Set at message length mismatch (relative to last indication) at
+ the nig0 interface. */
+#define XCM_REG_NIG0_LENGTH_MIS 0x20238
+/* [RW 3] The weight of the input nig0 in the WRR mechanism. 0 stands for
+ weight 8 (the most prioritised); 1 stands for weight 1(least
+ prioritised); 2 stands for weight 2; tc. */
+#define XCM_REG_NIG0_WEIGHT 0x200d4
+/* [RW 1] Input nig1 Interface enable. If 0 - the valid input is
+ disregarded; acknowledge output is deasserted; all other signals are
+ treated as usual; if 1 - normal activity. */
+#define XCM_REG_NIG1_IFEN 0x2003c
+/* [RC 1] Set at message length mismatch (relative to last indication) at
+ the nig1 interface. */
+#define XCM_REG_NIG1_LENGTH_MIS 0x2023c
+/* [RW 5] The number of double REG-pairs; loaded from the STORM context and
+ sent to STORM; for a specific connection type. The double REG-pairs are
+ used in order to align to STORM context row size of 128 bits. The offset
+ of these data in the STORM context is always 0. Index _i stands for the
+ connection type (one of 16). */
+#define XCM_REG_N_SM_CTX_LD_0 0x20060
+#define XCM_REG_N_SM_CTX_LD_1 0x20064
+#define XCM_REG_N_SM_CTX_LD_2 0x20068
+#define XCM_REG_N_SM_CTX_LD_3 0x2006c
+#define XCM_REG_N_SM_CTX_LD_4 0x20070
+#define XCM_REG_N_SM_CTX_LD_5 0x20074
+/* [RW 1] Input pbf Interface enable. If 0 - the valid input is disregarded;
+ acknowledge output is deasserted; all other signals are treated as usual;
+ if 1 - normal activity. */
+#define XCM_REG_PBF_IFEN 0x20034
+/* [RC 1] Set at message length mismatch (relative to last indication) at
+ the pbf interface. */
+#define XCM_REG_PBF_LENGTH_MIS 0x20234
+/* [RW 3] The weight of the input pbf in the WRR mechanism. 0 stands for
+ weight 8 (the most prioritised); 1 stands for weight 1(least
+ prioritised); 2 stands for weight 2; tc. */
+#define XCM_REG_PBF_WEIGHT 0x200d0
+#define XCM_REG_PHYS_QNUM3_0 0x20100
+#define XCM_REG_PHYS_QNUM3_1 0x20104
+/* [RW 8] The Event ID for Timers formatting in case of stop done. */
+#define XCM_REG_STOP_EVNT_ID 0x200b8
+/* [RC 1] Set at message length mismatch (relative to last indication) at
+ the STORM interface. */
+#define XCM_REG_STORM_LENGTH_MIS 0x2021c
+/* [RW 3] The weight of the STORM input in the WRR mechanism. 0 stands for
+ weight 8 (the most prioritised); 1 stands for weight 1(least
+ prioritised); 2 stands for weight 2; tc. */
+#define XCM_REG_STORM_WEIGHT 0x200bc
+/* [RW 1] STORM - CM Interface enable. If 0 - the valid input is
+ disregarded; acknowledge output is deasserted; all other signals are
+ treated as usual; if 1 - normal activity. */
+#define XCM_REG_STORM_XCM_IFEN 0x20010
+/* [RW 4] Timers output initial credit. Max credit available - 15.Write
+ writes the initial credit value; read returns the current value of the
+ credit counter. Must be initialized to 4 at start-up. */
+#define XCM_REG_TM_INIT_CRD 0x2041c
+/* [RW 3] The weight of the Timers input in the WRR mechanism. 0 stands for
+ weight 8 (the most prioritised); 1 stands for weight 1(least
+ prioritised); 2 stands for weight 2; tc. */
+#define XCM_REG_TM_WEIGHT 0x200ec
+/* [RW 28] The CM header for Timers expiration command. */
+#define XCM_REG_TM_XCM_HDR 0x200a8
+/* [RW 1] Timers - CM Interface enable. If 0 - the valid input is
+ disregarded; acknowledge output is deasserted; all other signals are
+ treated as usual; if 1 - normal activity. */
+#define XCM_REG_TM_XCM_IFEN 0x2001c
+/* [RW 1] Input tsem Interface enable. If 0 - the valid input is
+ disregarded; acknowledge output is deasserted; all other signals are
+ treated as usual; if 1 - normal activity. */
+#define XCM_REG_TSEM_IFEN 0x20024
+/* [RC 1] Set at message length mismatch (relative to last indication) at
+ the tsem interface. */
+#define XCM_REG_TSEM_LENGTH_MIS 0x20224
+/* [RW 3] The weight of the input tsem in the WRR mechanism. 0 stands for
+ weight 8 (the most prioritised); 1 stands for weight 1(least
+ prioritised); 2 stands for weight 2; tc. */
+#define XCM_REG_TSEM_WEIGHT 0x200c0
+/* [RW 2] The queue index for registration on UNA greater NXT decision rule. */
+#define XCM_REG_UNA_GT_NXT_Q 0x20120
+/* [RW 1] Input usem Interface enable. If 0 - the valid input is
+ disregarded; acknowledge output is deasserted; all other signals are
+ treated as usual; if 1 - normal activity. */
+#define XCM_REG_USEM_IFEN 0x2002c
+/* [RC 1] Message length mismatch (relative to last indication) at the usem
+ interface. */
+#define XCM_REG_USEM_LENGTH_MIS 0x2022c
+/* [RW 3] The weight of the input usem in the WRR mechanism. 0 stands for
+ weight 8 (the most prioritised); 1 stands for weight 1(least
+ prioritised); 2 stands for weight 2; tc. */
+#define XCM_REG_USEM_WEIGHT 0x200c8
+#define XCM_REG_WU_DA_CNT_CMD00 0x201d4
+#define XCM_REG_WU_DA_CNT_CMD01 0x201d8
+#define XCM_REG_WU_DA_CNT_CMD10 0x201dc
+#define XCM_REG_WU_DA_CNT_CMD11 0x201e0
+#define XCM_REG_WU_DA_CNT_UPD_VAL00 0x201e4
+#define XCM_REG_WU_DA_CNT_UPD_VAL01 0x201e8
+#define XCM_REG_WU_DA_CNT_UPD_VAL10 0x201ec
+#define XCM_REG_WU_DA_CNT_UPD_VAL11 0x201f0
+#define XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00 0x201c4
+#define XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD01 0x201c8
+#define XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD10 0x201cc
+#define XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD11 0x201d0
+/* [RW 1] CM - CFC Interface enable. If 0 - the valid input is disregarded;
+ acknowledge output is deasserted; all other signals are treated as usual;
+ if 1 - normal activity. */
+#define XCM_REG_XCM_CFC_IFEN 0x20050
+/* [RW 14] Interrupt mask register #0 read/write */
+#define XCM_REG_XCM_INT_MASK 0x202b4
+/* [R 14] Interrupt register #0 read */
+#define XCM_REG_XCM_INT_STS 0x202a8
+/* [RW 30] Parity mask register #0 read/write */
+#define XCM_REG_XCM_PRTY_MASK 0x202c4
+/* [R 30] Parity register #0 read */
+#define XCM_REG_XCM_PRTY_STS 0x202b8
+/* [RC 30] Parity register #0 read clear */
+#define XCM_REG_XCM_PRTY_STS_CLR 0x202bc
+
+/* [RW 4] The size of AG context region 0 in REG-pairs. Designates the MS
+ REG-pair number (e.g. if region 0 is 6 REG-pairs; the value should be 5).
+ Is used to determine the number of the AG context REG-pairs written back;
+ when the Reg1WbFlg isn't set. */
+#define XCM_REG_XCM_REG0_SZ 0x200f4
+/* [RW 1] CM - STORM 0 Interface enable. If 0 - the acknowledge input is
+ disregarded; valid is deasserted; all other signals are treated as usual;
+ if 1 - normal activity. */
+#define XCM_REG_XCM_STORM0_IFEN 0x20004
+/* [RW 1] CM - STORM 1 Interface enable. If 0 - the acknowledge input is
+ disregarded; valid is deasserted; all other signals are treated as usual;
+ if 1 - normal activity. */
+#define XCM_REG_XCM_STORM1_IFEN 0x20008
+/* [RW 1] CM - Timers Interface enable. If 0 - the valid input is
+ disregarded; acknowledge output is deasserted; all other signals are
+ treated as usual; if 1 - normal activity. */
+#define XCM_REG_XCM_TM_IFEN 0x20020
+/* [RW 1] CM - QM Interface enable. If 0 - the acknowledge input is
+ disregarded; valid is deasserted; all other signals are treated as usual;
+ if 1 - normal activity. */
+#define XCM_REG_XCM_XQM_IFEN 0x2000c
+/* [RW 1] If set the Q index; received from the QM is inserted to event ID. */
+#define XCM_REG_XCM_XQM_USE_Q 0x200f0
+/* [RW 4] The value by which CFC updates the activity counter at QM bypass. */
+#define XCM_REG_XQM_BYP_ACT_UPD 0x200fc
+/* [RW 6] QM output initial credit. Max credit available - 32.Write writes
+ the initial credit value; read returns the current value of the credit
+ counter. Must be initialized to 32 at start-up. */
+#define XCM_REG_XQM_INIT_CRD 0x20420
+/* [RW 3] The weight of the QM (primary) input in the WRR mechanism. 0
+ stands for weight 8 (the most prioritised); 1 stands for weight 1(least
+ prioritised); 2 stands for weight 2; tc. */
+#define XCM_REG_XQM_P_WEIGHT 0x200e4
+/* [RW 3] The weight of the QM (secondary) input in the WRR mechanism. 0
+ stands for weight 8 (the most prioritised); 1 stands for weight 1(least
+ prioritised); 2 stands for weight 2; tc. */
+#define XCM_REG_XQM_S_WEIGHT 0x200e8
+/* [RW 28] The CM header value for QM request (primary). */
+#define XCM_REG_XQM_XCM_HDR_P 0x200a0
+/* [RW 28] The CM header value for QM request (secondary). */
+#define XCM_REG_XQM_XCM_HDR_S 0x200a4
+/* [RW 1] QM - CM Interface enable. If 0 - the valid input is disregarded;
+ acknowledge output is deasserted; all other signals are treated as usual;
+ if 1 - normal activity. */
+#define XCM_REG_XQM_XCM_IFEN 0x20014
+/* [RW 1] Input SDM Interface enable. If 0 - the valid input is disregarded;
+ acknowledge output is deasserted; all other signals are treated as usual;
+ if 1 - normal activity. */
+#define XCM_REG_XSDM_IFEN 0x20018
+/* [RC 1] Set at message length mismatch (relative to last indication) at
+ the SDM interface. */
+#define XCM_REG_XSDM_LENGTH_MIS 0x20220
+/* [RW 3] The weight of the SDM input in the WRR mechanism. 0 stands for
+ weight 8 (the most prioritised); 1 stands for weight 1(least
+ prioritised); 2 stands for weight 2; tc. */
+#define XCM_REG_XSDM_WEIGHT 0x200e0
+/* [RW 17] Indirect access to the descriptor table of the XX protection
+ mechanism. The fields are: [5:0] - message length; 11:6] - message
+ pointer; 16:12] - next pointer. */
+#define XCM_REG_XX_DESCR_TABLE 0x20480
+#define XCM_REG_XX_DESCR_TABLE_SIZE 32
+/* [R 6] Used to read the XX protection Free counter. */
+#define XCM_REG_XX_FREE 0x20240
+/* [RW 6] Initial value for the credit counter; responsible for fulfilling
+ of the Input Stage XX protection buffer by the XX protection pending
+ messages. Max credit available - 3.Write writes the initial credit value;
+ read returns the current value of the credit counter. Must be initialized
+ to 2 at start-up. */
+#define XCM_REG_XX_INIT_CRD 0x20424
+/* [RW 6] The maximum number of pending messages; which may be stored in XX
+ protection. ~xcm_registers_xx_free.xx_free read on read. */
+#define XCM_REG_XX_MSG_NUM 0x20428
+/* [RW 8] The Event ID; sent to the STORM in case of XX overflow. */
+#define XCM_REG_XX_OVFL_EVNT_ID 0x20058
+#define XMAC_CLEAR_RX_LSS_STATUS_REG_CLEAR_LOCAL_FAULT_STATUS (0x1<<0)
+#define XMAC_CLEAR_RX_LSS_STATUS_REG_CLEAR_REMOTE_FAULT_STATUS (0x1<<1)
+#define XMAC_CTRL_REG_LINE_LOCAL_LPBK (0x1<<2)
+#define XMAC_CTRL_REG_RX_EN (0x1<<1)
+#define XMAC_CTRL_REG_SOFT_RESET (0x1<<6)
+#define XMAC_CTRL_REG_TX_EN (0x1<<0)
+#define XMAC_CTRL_REG_XLGMII_ALIGN_ENB (0x1<<7)
+#define XMAC_PAUSE_CTRL_REG_RX_PAUSE_EN (0x1<<18)
+#define XMAC_PAUSE_CTRL_REG_TX_PAUSE_EN (0x1<<17)
+#define XMAC_PFC_CTRL_HI_REG_FORCE_PFC_XON (0x1<<1)
+#define XMAC_PFC_CTRL_HI_REG_PFC_REFRESH_EN (0x1<<0)
+#define XMAC_PFC_CTRL_HI_REG_PFC_STATS_EN (0x1<<3)
+#define XMAC_PFC_CTRL_HI_REG_RX_PFC_EN (0x1<<4)
+#define XMAC_PFC_CTRL_HI_REG_TX_PFC_EN (0x1<<5)
+#define XMAC_REG_CLEAR_RX_LSS_STATUS 0x60
+#define XMAC_REG_CTRL 0
+/* [RW 16] Upper 48 bits of ctrl_sa register. Used as the SA in PAUSE/PFC
+ * packets transmitted by the MAC */
+#define XMAC_REG_CTRL_SA_HI 0x2c
+/* [RW 32] Lower 48 bits of ctrl_sa register. Used as the SA in PAUSE/PFC
+ * packets transmitted by the MAC */
+#define XMAC_REG_CTRL_SA_LO 0x28
+#define XMAC_REG_EEE_CTRL 0xd8
+#define XMAC_REG_EEE_TIMERS_HI 0xe4
+#define XMAC_REG_PAUSE_CTRL 0x68
+#define XMAC_REG_PFC_CTRL 0x70
+#define XMAC_REG_PFC_CTRL_HI 0x74
+#define XMAC_REG_RX_LSS_CTRL 0x50
+#define XMAC_REG_RX_LSS_STATUS 0x58
+/* [RW 14] Maximum packet size in receive direction; exclusive of preamble &
+ * CRC in strip mode */
+#define XMAC_REG_RX_MAX_SIZE 0x40
+#define XMAC_REG_TX_CTRL 0x20
+#define XMAC_RX_LSS_CTRL_REG_LOCAL_FAULT_DISABLE (0x1<<0)
+#define XMAC_RX_LSS_CTRL_REG_REMOTE_FAULT_DISABLE (0x1<<1)
+/* [RW 16] Indirect access to the XX table of the XX protection mechanism.
+ The fields are:[4:0] - tail pointer; 9:5] - Link List size; 14:10] -
+ header pointer. */
+#define XCM_REG_XX_TABLE 0x20500
+/* [RW 8] The event id for aggregated interrupt 0 */
+#define XSDM_REG_AGG_INT_EVENT_0 0x166038
+#define XSDM_REG_AGG_INT_EVENT_1 0x16603c
+#define XSDM_REG_AGG_INT_EVENT_10 0x166060
+#define XSDM_REG_AGG_INT_EVENT_11 0x166064
+#define XSDM_REG_AGG_INT_EVENT_12 0x166068
+#define XSDM_REG_AGG_INT_EVENT_13 0x16606c
+#define XSDM_REG_AGG_INT_EVENT_14 0x166070
+#define XSDM_REG_AGG_INT_EVENT_2 0x166040
+#define XSDM_REG_AGG_INT_EVENT_3 0x166044
+#define XSDM_REG_AGG_INT_EVENT_4 0x166048
+#define XSDM_REG_AGG_INT_EVENT_5 0x16604c
+#define XSDM_REG_AGG_INT_EVENT_6 0x166050
+#define XSDM_REG_AGG_INT_EVENT_7 0x166054
+#define XSDM_REG_AGG_INT_EVENT_8 0x166058
+#define XSDM_REG_AGG_INT_EVENT_9 0x16605c
+/* [RW 1] For each aggregated interrupt index whether the mode is normal (0)
+ or auto-mask-mode (1) */
+#define XSDM_REG_AGG_INT_MODE_0 0x1661b8
+#define XSDM_REG_AGG_INT_MODE_1 0x1661bc
+/* [RW 13] The start address in the internal RAM for the cfc_rsp lcid */
+#define XSDM_REG_CFC_RSP_START_ADDR 0x166008
+/* [RW 16] The maximum value of the completion counter #0 */
+#define XSDM_REG_CMP_COUNTER_MAX0 0x16601c
+/* [RW 16] The maximum value of the completion counter #1 */
+#define XSDM_REG_CMP_COUNTER_MAX1 0x166020
+/* [RW 16] The maximum value of the completion counter #2 */
+#define XSDM_REG_CMP_COUNTER_MAX2 0x166024
+/* [RW 16] The maximum value of the completion counter #3 */
+#define XSDM_REG_CMP_COUNTER_MAX3 0x166028
+/* [RW 13] The start address in the internal RAM for the completion
+ counters. */
+#define XSDM_REG_CMP_COUNTER_START_ADDR 0x16600c
+#define XSDM_REG_ENABLE_IN1 0x166238
+#define XSDM_REG_ENABLE_IN2 0x16623c
+#define XSDM_REG_ENABLE_OUT1 0x166240
+#define XSDM_REG_ENABLE_OUT2 0x166244
+/* [RW 4] The initial number of messages that can be sent to the pxp control
+ interface without receiving any ACK. */
+#define XSDM_REG_INIT_CREDIT_PXP_CTRL 0x1664bc
+/* [ST 32] The number of ACK after placement messages received */
+#define XSDM_REG_NUM_OF_ACK_AFTER_PLACE 0x16627c
+/* [ST 32] The number of packet end messages received from the parser */
+#define XSDM_REG_NUM_OF_PKT_END_MSG 0x166274
+/* [ST 32] The number of requests received from the pxp async if */
+#define XSDM_REG_NUM_OF_PXP_ASYNC_REQ 0x166278
+/* [ST 32] The number of commands received in queue 0 */
+#define XSDM_REG_NUM_OF_Q0_CMD 0x166248
+/* [ST 32] The number of commands received in queue 10 */
+#define XSDM_REG_NUM_OF_Q10_CMD 0x16626c
+/* [ST 32] The number of commands received in queue 11 */
+#define XSDM_REG_NUM_OF_Q11_CMD 0x166270
+/* [ST 32] The number of commands received in queue 1 */
+#define XSDM_REG_NUM_OF_Q1_CMD 0x16624c
+/* [ST 32] The number of commands received in queue 3 */
+#define XSDM_REG_NUM_OF_Q3_CMD 0x166250
+/* [ST 32] The number of commands received in queue 4 */
+#define XSDM_REG_NUM_OF_Q4_CMD 0x166254
+/* [ST 32] The number of commands received in queue 5 */
+#define XSDM_REG_NUM_OF_Q5_CMD 0x166258
+/* [ST 32] The number of commands received in queue 6 */
+#define XSDM_REG_NUM_OF_Q6_CMD 0x16625c
+/* [ST 32] The number of commands received in queue 7 */
+#define XSDM_REG_NUM_OF_Q7_CMD 0x166260
+/* [ST 32] The number of commands received in queue 8 */
+#define XSDM_REG_NUM_OF_Q8_CMD 0x166264
+/* [ST 32] The number of commands received in queue 9 */
+#define XSDM_REG_NUM_OF_Q9_CMD 0x166268
+/* [RW 13] The start address in the internal RAM for queue counters */
+#define XSDM_REG_Q_COUNTER_START_ADDR 0x166010
+/* [W 17] Generate an operation after completion; bit-16 is
+ * AggVectIdx_valid; bits 15:8 are AggVectIdx; bits 7:5 are the TRIG and
+ * bits 4:0 are the T124Param[4:0] */
+#define XSDM_REG_OPERATION_GEN 0x1664c4
+/* [R 1] pxp_ctrl rd_data fifo empty in sdm_dma_rsp block */
+#define XSDM_REG_RSP_PXP_CTRL_RDATA_EMPTY 0x166548
+/* [R 1] parser fifo empty in sdm_sync block */
+#define XSDM_REG_SYNC_PARSER_EMPTY 0x166550
+/* [R 1] parser serial fifo empty in sdm_sync block */
+#define XSDM_REG_SYNC_SYNC_EMPTY 0x166558
+/* [RW 32] Tick for timer counter. Applicable only when
+ ~xsdm_registers_timer_tick_enable.timer_tick_enable =1 */
+#define XSDM_REG_TIMER_TICK 0x166000
+/* [RW 32] Interrupt mask register #0 read/write */
+#define XSDM_REG_XSDM_INT_MASK_0 0x16629c
+#define XSDM_REG_XSDM_INT_MASK_1 0x1662ac
+/* [R 32] Interrupt register #0 read */
+#define XSDM_REG_XSDM_INT_STS_0 0x166290
+#define XSDM_REG_XSDM_INT_STS_1 0x1662a0
+/* [RW 11] Parity mask register #0 read/write */
+#define XSDM_REG_XSDM_PRTY_MASK 0x1662bc
+/* [R 11] Parity register #0 read */
+#define XSDM_REG_XSDM_PRTY_STS 0x1662b0
+/* [RC 11] Parity register #0 read clear */
+#define XSDM_REG_XSDM_PRTY_STS_CLR 0x1662b4
+/* [RW 5] The number of time_slots in the arbitration cycle */
+#define XSEM_REG_ARB_CYCLE_SIZE 0x280034
+/* [RW 3] The source that is associated with arbitration element 0. Source
+ decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3-
+ sleeping thread with priority 1; 4- sleeping thread with priority 2 */
+#define XSEM_REG_ARB_ELEMENT0 0x280020
+/* [RW 3] The source that is associated with arbitration element 1. Source
+ decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3-
+ sleeping thread with priority 1; 4- sleeping thread with priority 2.
+ Could not be equal to register ~xsem_registers_arb_element0.arb_element0 */
+#define XSEM_REG_ARB_ELEMENT1 0x280024
+/* [RW 3] The source that is associated with arbitration element 2. Source
+ decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3-
+ sleeping thread with priority 1; 4- sleeping thread with priority 2.
+ Could not be equal to register ~xsem_registers_arb_element0.arb_element0
+ and ~xsem_registers_arb_element1.arb_element1 */
+#define XSEM_REG_ARB_ELEMENT2 0x280028
+/* [RW 3] The source that is associated with arbitration element 3. Source
+ decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3-
+ sleeping thread with priority 1; 4- sleeping thread with priority 2.Could
+ not be equal to register ~xsem_registers_arb_element0.arb_element0 and
+ ~xsem_registers_arb_element1.arb_element1 and
+ ~xsem_registers_arb_element2.arb_element2 */
+#define XSEM_REG_ARB_ELEMENT3 0x28002c
+/* [RW 3] The source that is associated with arbitration element 4. Source
+ decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3-
+ sleeping thread with priority 1; 4- sleeping thread with priority 2.
+ Could not be equal to register ~xsem_registers_arb_element0.arb_element0
+ and ~xsem_registers_arb_element1.arb_element1 and
+ ~xsem_registers_arb_element2.arb_element2 and
+ ~xsem_registers_arb_element3.arb_element3 */
+#define XSEM_REG_ARB_ELEMENT4 0x280030
+#define XSEM_REG_ENABLE_IN 0x2800a4
+#define XSEM_REG_ENABLE_OUT 0x2800a8
+/* [RW 32] This address space contains all registers and memories that are
+ placed in SEM_FAST block. The SEM_FAST registers are described in
+ appendix B. In order to access the sem_fast registers the base address
+ ~fast_memory.fast_memory should be added to eachsem_fast register offset. */
+#define XSEM_REG_FAST_MEMORY 0x2a0000
+/* [RW 1] Disables input messages from FIC0 May be updated during run_time
+ by the microcode */
+#define XSEM_REG_FIC0_DISABLE 0x280224
+/* [RW 1] Disables input messages from FIC1 May be updated during run_time
+ by the microcode */
+#define XSEM_REG_FIC1_DISABLE 0x280234
+/* [RW 15] Interrupt table Read and write access to it is not possible in
+ the middle of the work */
+#define XSEM_REG_INT_TABLE 0x280400
+/* [ST 24] Statistics register. The number of messages that entered through
+ FIC0 */
+#define XSEM_REG_MSG_NUM_FIC0 0x280000
+/* [ST 24] Statistics register. The number of messages that entered through
+ FIC1 */
+#define XSEM_REG_MSG_NUM_FIC1 0x280004
+/* [ST 24] Statistics register. The number of messages that were sent to
+ FOC0 */
+#define XSEM_REG_MSG_NUM_FOC0 0x280008
+/* [ST 24] Statistics register. The number of messages that were sent to
+ FOC1 */
+#define XSEM_REG_MSG_NUM_FOC1 0x28000c
+/* [ST 24] Statistics register. The number of messages that were sent to
+ FOC2 */
+#define XSEM_REG_MSG_NUM_FOC2 0x280010
+/* [ST 24] Statistics register. The number of messages that were sent to
+ FOC3 */
+#define XSEM_REG_MSG_NUM_FOC3 0x280014
+/* [RW 1] Disables input messages from the passive buffer May be updated
+ during run_time by the microcode */
+#define XSEM_REG_PAS_DISABLE 0x28024c
+/* [WB 128] Debug only. Passive buffer memory */
+#define XSEM_REG_PASSIVE_BUFFER 0x282000
+/* [WB 46] pram memory. B45 is parity; b[44:0] - data. */
+#define XSEM_REG_PRAM 0x2c0000
+/* [R 16] Valid sleeping threads indication have bit per thread */
+#define XSEM_REG_SLEEP_THREADS_VALID 0x28026c
+/* [R 1] EXT_STORE FIFO is empty in sem_slow_ls_ext */
+#define XSEM_REG_SLOW_EXT_STORE_EMPTY 0x2802a0
+/* [RW 16] List of free threads . There is a bit per thread. */
+#define XSEM_REG_THREADS_LIST 0x2802e4
+/* [RW 3] The arbitration scheme of time_slot 0 */
+#define XSEM_REG_TS_0_AS 0x280038
+/* [RW 3] The arbitration scheme of time_slot 10 */
+#define XSEM_REG_TS_10_AS 0x280060
+/* [RW 3] The arbitration scheme of time_slot 11 */
+#define XSEM_REG_TS_11_AS 0x280064
+/* [RW 3] The arbitration scheme of time_slot 12 */
+#define XSEM_REG_TS_12_AS 0x280068
+/* [RW 3] The arbitration scheme of time_slot 13 */
+#define XSEM_REG_TS_13_AS 0x28006c
+/* [RW 3] The arbitration scheme of time_slot 14 */
+#define XSEM_REG_TS_14_AS 0x280070
+/* [RW 3] The arbitration scheme of time_slot 15 */
+#define XSEM_REG_TS_15_AS 0x280074
+/* [RW 3] The arbitration scheme of time_slot 16 */
+#define XSEM_REG_TS_16_AS 0x280078
+/* [RW 3] The arbitration scheme of time_slot 17 */
+#define XSEM_REG_TS_17_AS 0x28007c
+/* [RW 3] The arbitration scheme of time_slot 18 */
+#define XSEM_REG_TS_18_AS 0x280080
+/* [RW 3] The arbitration scheme of time_slot 1 */
+#define XSEM_REG_TS_1_AS 0x28003c
+/* [RW 3] The arbitration scheme of time_slot 2 */
+#define XSEM_REG_TS_2_AS 0x280040
+/* [RW 3] The arbitration scheme of time_slot 3 */
+#define XSEM_REG_TS_3_AS 0x280044
+/* [RW 3] The arbitration scheme of time_slot 4 */
+#define XSEM_REG_TS_4_AS 0x280048
+/* [RW 3] The arbitration scheme of time_slot 5 */
+#define XSEM_REG_TS_5_AS 0x28004c
+/* [RW 3] The arbitration scheme of time_slot 6 */
+#define XSEM_REG_TS_6_AS 0x280050
+/* [RW 3] The arbitration scheme of time_slot 7 */
+#define XSEM_REG_TS_7_AS 0x280054
+/* [RW 3] The arbitration scheme of time_slot 8 */
+#define XSEM_REG_TS_8_AS 0x280058
+/* [RW 3] The arbitration scheme of time_slot 9 */
+#define XSEM_REG_TS_9_AS 0x28005c
+/* [W 7] VF or PF ID for reset error bit. Values 0-63 reset error bit for 64
+ * VF; values 64-67 reset error for 4 PF; values 68-127 are not valid. */
+#define XSEM_REG_VFPF_ERR_NUM 0x280380
+/* [RW 32] Interrupt mask register #0 read/write */
+#define XSEM_REG_XSEM_INT_MASK_0 0x280110
+#define XSEM_REG_XSEM_INT_MASK_1 0x280120
+/* [R 32] Interrupt register #0 read */
+#define XSEM_REG_XSEM_INT_STS_0 0x280104
+#define XSEM_REG_XSEM_INT_STS_1 0x280114
+/* [RW 32] Parity mask register #0 read/write */
+#define XSEM_REG_XSEM_PRTY_MASK_0 0x280130
+#define XSEM_REG_XSEM_PRTY_MASK_1 0x280140
+/* [R 32] Parity register #0 read */
+#define XSEM_REG_XSEM_PRTY_STS_0 0x280124
+#define XSEM_REG_XSEM_PRTY_STS_1 0x280134
+/* [RC 32] Parity register #0 read clear */
+#define XSEM_REG_XSEM_PRTY_STS_CLR_0 0x280128
+#define XSEM_REG_XSEM_PRTY_STS_CLR_1 0x280138
+#define MCPR_ACCESS_LOCK_LOCK (1L<<31)
+#define MCPR_NVM_ACCESS_ENABLE_EN (1L<<0)
+#define MCPR_NVM_ACCESS_ENABLE_WR_EN (1L<<1)
+#define MCPR_NVM_ADDR_NVM_ADDR_VALUE (0xffffffL<<0)
+#define MCPR_NVM_CFG4_FLASH_SIZE (0x7L<<0)
+#define MCPR_NVM_COMMAND_DOIT (1L<<4)
+#define MCPR_NVM_COMMAND_DONE (1L<<3)
+#define MCPR_NVM_COMMAND_FIRST (1L<<7)
+#define MCPR_NVM_COMMAND_LAST (1L<<8)
+#define MCPR_NVM_COMMAND_WR (1L<<5)
+#define MCPR_NVM_SW_ARB_ARB_ARB1 (1L<<9)
+#define MCPR_NVM_SW_ARB_ARB_REQ_CLR1 (1L<<5)
+#define MCPR_NVM_SW_ARB_ARB_REQ_SET1 (1L<<1)
+#define BIGMAC_REGISTER_BMAC_CONTROL (0x00<<3)
+#define BIGMAC_REGISTER_BMAC_XGXS_CONTROL (0x01<<3)
+#define BIGMAC_REGISTER_CNT_MAX_SIZE (0x05<<3)
+#define BIGMAC_REGISTER_RX_CONTROL (0x21<<3)
+#define BIGMAC_REGISTER_RX_LLFC_MSG_FLDS (0x46<<3)
+#define BIGMAC_REGISTER_RX_LSS_STATUS (0x43<<3)
+#define BIGMAC_REGISTER_RX_MAX_SIZE (0x23<<3)
+#define BIGMAC_REGISTER_RX_STAT_GR64 (0x26<<3)
+#define BIGMAC_REGISTER_RX_STAT_GRIPJ (0x42<<3)
+#define BIGMAC_REGISTER_TX_CONTROL (0x07<<3)
+#define BIGMAC_REGISTER_TX_MAX_SIZE (0x09<<3)
+#define BIGMAC_REGISTER_TX_PAUSE_THRESHOLD (0x0A<<3)
+#define BIGMAC_REGISTER_TX_SOURCE_ADDR (0x08<<3)
+#define BIGMAC_REGISTER_TX_STAT_GTBYT (0x20<<3)
+#define BIGMAC_REGISTER_TX_STAT_GTPKT (0x0C<<3)
+#define BIGMAC2_REGISTER_BMAC_CONTROL (0x00<<3)
+#define BIGMAC2_REGISTER_BMAC_XGXS_CONTROL (0x01<<3)
+#define BIGMAC2_REGISTER_CNT_MAX_SIZE (0x05<<3)
+#define BIGMAC2_REGISTER_PFC_CONTROL (0x06<<3)
+#define BIGMAC2_REGISTER_RX_CONTROL (0x3A<<3)
+#define BIGMAC2_REGISTER_RX_LLFC_MSG_FLDS (0x62<<3)
+#define BIGMAC2_REGISTER_RX_LSS_STAT (0x3E<<3)
+#define BIGMAC2_REGISTER_RX_MAX_SIZE (0x3C<<3)
+#define BIGMAC2_REGISTER_RX_STAT_GR64 (0x40<<3)
+#define BIGMAC2_REGISTER_RX_STAT_GRIPJ (0x5f<<3)
+#define BIGMAC2_REGISTER_RX_STAT_GRPP (0x51<<3)
+#define BIGMAC2_REGISTER_TX_CONTROL (0x1C<<3)
+#define BIGMAC2_REGISTER_TX_MAX_SIZE (0x1E<<3)
+#define BIGMAC2_REGISTER_TX_PAUSE_CONTROL (0x20<<3)
+#define BIGMAC2_REGISTER_TX_SOURCE_ADDR (0x1D<<3)
+#define BIGMAC2_REGISTER_TX_STAT_GTBYT (0x39<<3)
+#define BIGMAC2_REGISTER_TX_STAT_GTPOK (0x22<<3)
+#define BIGMAC2_REGISTER_TX_STAT_GTPP (0x24<<3)
+#define EMAC_LED_1000MB_OVERRIDE (1L<<1)
+#define EMAC_LED_100MB_OVERRIDE (1L<<2)
+#define EMAC_LED_10MB_OVERRIDE (1L<<3)
+#define EMAC_LED_2500MB_OVERRIDE (1L<<12)
+#define EMAC_LED_OVERRIDE (1L<<0)
+#define EMAC_LED_TRAFFIC (1L<<6)
+#define EMAC_MDIO_COMM_COMMAND_ADDRESS (0L<<26)
+#define EMAC_MDIO_COMM_COMMAND_READ_22 (2L<<26)
+#define EMAC_MDIO_COMM_COMMAND_READ_45 (3L<<26)
+#define EMAC_MDIO_COMM_COMMAND_WRITE_22 (1L<<26)
+#define EMAC_MDIO_COMM_COMMAND_WRITE_45 (1L<<26)
+#define EMAC_MDIO_COMM_DATA (0xffffL<<0)
+#define EMAC_MDIO_COMM_START_BUSY (1L<<29)
+#define EMAC_MDIO_MODE_AUTO_POLL (1L<<4)
+#define EMAC_MDIO_MODE_CLAUSE_45 (1L<<31)
+#define EMAC_MDIO_MODE_CLOCK_CNT (0x3ffL<<16)
+#define EMAC_MDIO_MODE_CLOCK_CNT_BITSHIFT 16
+#define EMAC_MDIO_STATUS_10MB (1L<<1)
+#define EMAC_MODE_25G_MODE (1L<<5)
+#define EMAC_MODE_HALF_DUPLEX (1L<<1)
+#define EMAC_MODE_PORT_GMII (2L<<2)
+#define EMAC_MODE_PORT_MII (1L<<2)
+#define EMAC_MODE_PORT_MII_10M (3L<<2)
+#define EMAC_MODE_RESET (1L<<0)
+#define EMAC_REG_EMAC_LED 0xc
+#define EMAC_REG_EMAC_MAC_MATCH 0x10
+#define EMAC_REG_EMAC_MDIO_COMM 0xac
+#define EMAC_REG_EMAC_MDIO_MODE 0xb4
+#define EMAC_REG_EMAC_MDIO_STATUS 0xb0
+#define EMAC_REG_EMAC_MODE 0x0
+#define EMAC_REG_EMAC_RX_MODE 0xc8
+#define EMAC_REG_EMAC_RX_MTU_SIZE 0x9c
+#define EMAC_REG_EMAC_RX_STAT_AC 0x180
+#define EMAC_REG_EMAC_RX_STAT_AC_28 0x1f4
+#define EMAC_REG_EMAC_RX_STAT_AC_COUNT 23
+#define EMAC_REG_EMAC_TX_MODE 0xbc
+#define EMAC_REG_EMAC_TX_STAT_AC 0x280
+#define EMAC_REG_EMAC_TX_STAT_AC_COUNT 22
+#define EMAC_REG_RX_PFC_MODE 0x320
+#define EMAC_REG_RX_PFC_MODE_PRIORITIES (1L<<2)
+#define EMAC_REG_RX_PFC_MODE_RX_EN (1L<<1)
+#define EMAC_REG_RX_PFC_MODE_TX_EN (1L<<0)
+#define EMAC_REG_RX_PFC_PARAM 0x324
+#define EMAC_REG_RX_PFC_PARAM_OPCODE_BITSHIFT 0
+#define EMAC_REG_RX_PFC_PARAM_PRIORITY_EN_BITSHIFT 16
+#define EMAC_REG_RX_PFC_STATS_XOFF_RCVD 0x328
+#define EMAC_REG_RX_PFC_STATS_XOFF_RCVD_COUNT (0xffff<<0)
+#define EMAC_REG_RX_PFC_STATS_XOFF_SENT 0x330
+#define EMAC_REG_RX_PFC_STATS_XOFF_SENT_COUNT (0xffff<<0)
+#define EMAC_REG_RX_PFC_STATS_XON_RCVD 0x32c
+#define EMAC_REG_RX_PFC_STATS_XON_RCVD_COUNT (0xffff<<0)
+#define EMAC_REG_RX_PFC_STATS_XON_SENT 0x334
+#define EMAC_REG_RX_PFC_STATS_XON_SENT_COUNT (0xffff<<0)
+#define EMAC_RX_MODE_FLOW_EN (1L<<2)
+#define EMAC_RX_MODE_KEEP_MAC_CONTROL (1L<<3)
+#define EMAC_RX_MODE_KEEP_VLAN_TAG (1L<<10)
+#define EMAC_RX_MODE_PROMISCUOUS (1L<<8)
+#define EMAC_RX_MODE_RESET (1L<<0)
+#define EMAC_RX_MTU_SIZE_JUMBO_ENA (1L<<31)
+#define EMAC_TX_MODE_EXT_PAUSE_EN (1L<<3)
+#define EMAC_TX_MODE_FLOW_EN (1L<<4)
+#define EMAC_TX_MODE_RESET (1L<<0)
+#define MISC_REGISTERS_GPIO_0 0
+#define MISC_REGISTERS_GPIO_1 1
+#define MISC_REGISTERS_GPIO_2 2
+#define MISC_REGISTERS_GPIO_3 3
+#define MISC_REGISTERS_GPIO_CLR_POS 16
+#define MISC_REGISTERS_GPIO_FLOAT (0xffL<<24)
+#define MISC_REGISTERS_GPIO_FLOAT_POS 24
+#define MISC_REGISTERS_GPIO_HIGH 1
+#define MISC_REGISTERS_GPIO_INPUT_HI_Z 2
+#define MISC_REGISTERS_GPIO_INT_CLR_POS 24
+#define MISC_REGISTERS_GPIO_INT_OUTPUT_CLR 0
+#define MISC_REGISTERS_GPIO_INT_OUTPUT_SET 1
+#define MISC_REGISTERS_GPIO_INT_SET_POS 16
+#define MISC_REGISTERS_GPIO_LOW 0
+#define MISC_REGISTERS_GPIO_OUTPUT_HIGH 1
+#define MISC_REGISTERS_GPIO_OUTPUT_LOW 0
+#define MISC_REGISTERS_GPIO_PORT_SHIFT 4
+#define MISC_REGISTERS_GPIO_SET_POS 8
+#define MISC_REGISTERS_RESET_REG_1_CLEAR 0x588
+#define MISC_REGISTERS_RESET_REG_1_RST_BRB1 (0x1<<0)
+#define MISC_REGISTERS_RESET_REG_1_RST_DORQ (0x1<<19)
+#define MISC_REGISTERS_RESET_REG_1_RST_HC (0x1<<29)
+#define MISC_REGISTERS_RESET_REG_1_RST_NIG (0x1<<7)
+#define MISC_REGISTERS_RESET_REG_1_RST_PXP (0x1<<26)
+#define MISC_REGISTERS_RESET_REG_1_RST_PXPV (0x1<<27)
+#define MISC_REGISTERS_RESET_REG_1_RST_XSEM (0x1<<22)
+#define MISC_REGISTERS_RESET_REG_1_SET 0x584
+#define MISC_REGISTERS_RESET_REG_2_CLEAR 0x598
+#define MISC_REGISTERS_RESET_REG_2_MSTAT0 (0x1<<24)
+#define MISC_REGISTERS_RESET_REG_2_MSTAT1 (0x1<<25)
+#define MISC_REGISTERS_RESET_REG_2_PGLC (0x1<<19)
+#define MISC_REGISTERS_RESET_REG_2_RST_ATC (0x1<<17)
+#define MISC_REGISTERS_RESET_REG_2_RST_BMAC0 (0x1<<0)
+#define MISC_REGISTERS_RESET_REG_2_RST_BMAC1 (0x1<<1)
+#define MISC_REGISTERS_RESET_REG_2_RST_EMAC0 (0x1<<2)
+#define MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE (0x1<<14)
+#define MISC_REGISTERS_RESET_REG_2_RST_EMAC1 (0x1<<3)
+#define MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE (0x1<<15)
+#define MISC_REGISTERS_RESET_REG_2_RST_GRC (0x1<<4)
+#define MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B (0x1<<6)
+#define MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CORE (0x1<<8)
+#define MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CPU (0x1<<7)
+#define MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE (0x1<<5)
+#define MISC_REGISTERS_RESET_REG_2_RST_MDIO (0x1<<13)
+#define MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE (0x1<<11)
+#define MISC_REGISTERS_RESET_REG_2_RST_PCI_MDIO (0x1<<13)
+#define MISC_REGISTERS_RESET_REG_2_RST_RBCN (0x1<<9)
+#define MISC_REGISTERS_RESET_REG_2_SET 0x594
+#define MISC_REGISTERS_RESET_REG_2_UMAC0 (0x1<<20)
+#define MISC_REGISTERS_RESET_REG_2_UMAC1 (0x1<<21)
+#define MISC_REGISTERS_RESET_REG_2_XMAC (0x1<<22)
+#define MISC_REGISTERS_RESET_REG_2_XMAC_SOFT (0x1<<23)
+#define MISC_REGISTERS_RESET_REG_3_CLEAR 0x5a8
+#define MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_SERDES0_IDDQ (0x1<<1)
+#define MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_SERDES0_PWRDWN (0x1<<2)
+#define MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_SERDES0_PWRDWN_SD (0x1<<3)
+#define MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_SERDES0_RSTB_HW (0x1<<0)
+#define MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_XGXS0_IDDQ (0x1<<5)
+#define MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_XGXS0_PWRDWN (0x1<<6)
+#define MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_XGXS0_PWRDWN_SD (0x1<<7)
+#define MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_XGXS0_RSTB_HW (0x1<<4)
+#define MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_XGXS0_TXD_FIFO_RSTB (0x1<<8)
+#define MISC_REGISTERS_RESET_REG_3_SET 0x5a4
+#define MISC_REGISTERS_SPIO_4 4
+#define MISC_REGISTERS_SPIO_5 5
+#define MISC_REGISTERS_SPIO_7 7
+#define MISC_REGISTERS_SPIO_CLR_POS 16
+#define MISC_REGISTERS_SPIO_FLOAT (0xffL<<24)
+#define MISC_REGISTERS_SPIO_FLOAT_POS 24
+#define MISC_REGISTERS_SPIO_INPUT_HI_Z 2
+#define MISC_REGISTERS_SPIO_INT_OLD_SET_POS 16
+#define MISC_REGISTERS_SPIO_OUTPUT_HIGH 1
+#define MISC_REGISTERS_SPIO_OUTPUT_LOW 0
+#define MISC_REGISTERS_SPIO_SET_POS 8
+#define MISC_SPIO_CLR_POS 16
+#define MISC_SPIO_FLOAT (0xffL<<24)
+#define MISC_SPIO_FLOAT_POS 24
+#define MISC_SPIO_INPUT_HI_Z 2
+#define MISC_SPIO_INT_OLD_SET_POS 16
+#define MISC_SPIO_OUTPUT_HIGH 1
+#define MISC_SPIO_OUTPUT_LOW 0
+#define MISC_SPIO_SET_POS 8
+#define MISC_SPIO_SPIO4 0x10
+#define MISC_SPIO_SPIO5 0x20
+#define HW_LOCK_MAX_RESOURCE_VALUE 31
+#define HW_LOCK_RESOURCE_DCBX_ADMIN_MIB 13
+#define HW_LOCK_RESOURCE_DRV_FLAGS 10
+#define HW_LOCK_RESOURCE_GPIO 1
+#define HW_LOCK_RESOURCE_MDIO 0
+#define HW_LOCK_RESOURCE_NVRAM 12
+#define HW_LOCK_RESOURCE_PORT0_ATT_MASK 3
+#define HW_LOCK_RESOURCE_RECOVERY_LEADER_0 8
+#define HW_LOCK_RESOURCE_RECOVERY_LEADER_1 9
+#define HW_LOCK_RESOURCE_RECOVERY_REG 11
+#define HW_LOCK_RESOURCE_RESET 5
+#define HW_LOCK_RESOURCE_SPIO 2
+#define AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT (0x1<<4)
+#define AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR (0x1<<5)
+#define AEU_INPUTS_ATTN_BITS_BRB_HW_INTERRUPT (0x1<<19)
+#define AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR (0x1<<18)
+#define AEU_INPUTS_ATTN_BITS_CCM_HW_INTERRUPT (0x1<<31)
+#define AEU_INPUTS_ATTN_BITS_CCM_PARITY_ERROR (0x1<<30)
+#define AEU_INPUTS_ATTN_BITS_CDU_HW_INTERRUPT (0x1<<9)
+#define AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR (0x1<<8)
+#define AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT (0x1<<7)
+#define AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR (0x1<<6)
+#define AEU_INPUTS_ATTN_BITS_CSDM_HW_INTERRUPT (0x1<<29)
+#define AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR (0x1<<28)
+#define AEU_INPUTS_ATTN_BITS_CSEMI_HW_INTERRUPT (0x1<<1)
+#define AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR (0x1<<0)
+#define AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR (0x1<<18)
+#define AEU_INPUTS_ATTN_BITS_DMAE_HW_INTERRUPT (0x1<<11)
+#define AEU_INPUTS_ATTN_BITS_DMAE_PARITY_ERROR (0x1<<10)
+#define AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT (0x1<<13)
+#define AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR (0x1<<12)
+#define AEU_INPUTS_ATTN_BITS_GPIO0_FUNCTION_0 (0x1<<2)
+#define AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR (0x1<<12)
+#define AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY (0x1<<28)
+#define AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY (0x1U<<31)
+#define AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY (0x1<<29)
+#define AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY (0x1<<30)
+#define AEU_INPUTS_ATTN_BITS_MISC_HW_INTERRUPT (0x1<<15)
+#define AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR (0x1<<14)
+#define AEU_INPUTS_ATTN_BITS_NIG_PARITY_ERROR (0x1<<14)
+#define AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR (0x1<<20)
+#define AEU_INPUTS_ATTN_BITS_PBCLIENT_HW_INTERRUPT (0x1<<31)
+#define AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR (0x1<<30)
+#define AEU_INPUTS_ATTN_BITS_PBF_PARITY_ERROR (0x1<<0)
+#define AEU_INPUTS_ATTN_BITS_PGLUE_HW_INTERRUPT (0x1<<2)
+#define AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR (0x1<<3)
+#define AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_HW_INTERRUPT (0x1<<5)
+#define AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR (0x1<<4)
+#define AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT (0x1<<3)
+#define AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR (0x1<<2)
+#define AEU_INPUTS_ATTN_BITS_QM_HW_INTERRUPT (0x1<<3)
+#define AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR (0x1<<2)
+#define AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR (0x1<<22)
+#define AEU_INPUTS_ATTN_BITS_SPIO5 (0x1<<15)
+#define AEU_INPUTS_ATTN_BITS_TCM_HW_INTERRUPT (0x1<<27)
+#define AEU_INPUTS_ATTN_BITS_TCM_PARITY_ERROR (0x1<<26)
+#define AEU_INPUTS_ATTN_BITS_TIMERS_HW_INTERRUPT (0x1<<5)
+#define AEU_INPUTS_ATTN_BITS_TIMERS_PARITY_ERROR (0x1<<4)
+#define AEU_INPUTS_ATTN_BITS_TSDM_HW_INTERRUPT (0x1<<25)
+#define AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR (0x1<<24)
+#define AEU_INPUTS_ATTN_BITS_TSEMI_HW_INTERRUPT (0x1<<29)
+#define AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR (0x1<<28)
+#define AEU_INPUTS_ATTN_BITS_UCM_HW_INTERRUPT (0x1<<23)
+#define AEU_INPUTS_ATTN_BITS_UCM_PARITY_ERROR (0x1<<22)
+#define AEU_INPUTS_ATTN_BITS_UPB_HW_INTERRUPT (0x1<<27)
+#define AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR (0x1<<26)
+#define AEU_INPUTS_ATTN_BITS_USDM_HW_INTERRUPT (0x1<<21)
+#define AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR (0x1<<20)
+#define AEU_INPUTS_ATTN_BITS_USEMI_HW_INTERRUPT (0x1<<25)
+#define AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR (0x1<<24)
+#define AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR (0x1<<16)
+#define AEU_INPUTS_ATTN_BITS_XCM_HW_INTERRUPT (0x1<<9)
+#define AEU_INPUTS_ATTN_BITS_XCM_PARITY_ERROR (0x1<<8)
+#define AEU_INPUTS_ATTN_BITS_XSDM_HW_INTERRUPT (0x1<<7)
+#define AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR (0x1<<6)
+#define AEU_INPUTS_ATTN_BITS_XSEMI_HW_INTERRUPT (0x1<<11)
+#define AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR (0x1<<10)
+
+#define AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 (0x1<<5)
+#define AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1 (0x1<<9)
+
+#define RESERVED_GENERAL_ATTENTION_BIT_0 0
+
+#define EVEREST_GEN_ATTN_IN_USE_MASK 0x7ffe0
+#define EVEREST_LATCHED_ATTN_IN_USE_MASK 0xffe00000
+
+#define RESERVED_GENERAL_ATTENTION_BIT_6 6
+#define RESERVED_GENERAL_ATTENTION_BIT_7 7
+#define RESERVED_GENERAL_ATTENTION_BIT_8 8
+#define RESERVED_GENERAL_ATTENTION_BIT_9 9
+#define RESERVED_GENERAL_ATTENTION_BIT_10 10
+#define RESERVED_GENERAL_ATTENTION_BIT_11 11
+#define RESERVED_GENERAL_ATTENTION_BIT_12 12
+#define RESERVED_GENERAL_ATTENTION_BIT_13 13
+#define RESERVED_GENERAL_ATTENTION_BIT_14 14
+#define RESERVED_GENERAL_ATTENTION_BIT_15 15
+#define RESERVED_GENERAL_ATTENTION_BIT_16 16
+#define RESERVED_GENERAL_ATTENTION_BIT_17 17
+#define RESERVED_GENERAL_ATTENTION_BIT_18 18
+#define RESERVED_GENERAL_ATTENTION_BIT_19 19
+#define RESERVED_GENERAL_ATTENTION_BIT_20 20
+#define RESERVED_GENERAL_ATTENTION_BIT_21 21
+
+/* storm asserts attention bits */
+#define TSTORM_FATAL_ASSERT_ATTENTION_BIT RESERVED_GENERAL_ATTENTION_BIT_7
+#define USTORM_FATAL_ASSERT_ATTENTION_BIT RESERVED_GENERAL_ATTENTION_BIT_8
+#define CSTORM_FATAL_ASSERT_ATTENTION_BIT RESERVED_GENERAL_ATTENTION_BIT_9
+#define XSTORM_FATAL_ASSERT_ATTENTION_BIT RESERVED_GENERAL_ATTENTION_BIT_10
+
+/* mcp error attention bit */
+#define MCP_FATAL_ASSERT_ATTENTION_BIT RESERVED_GENERAL_ATTENTION_BIT_11
+
+/*E1H NIG status sync attention mapped to group 4-7*/
+#define LINK_SYNC_ATTENTION_BIT_FUNC_0 RESERVED_GENERAL_ATTENTION_BIT_12
+#define LINK_SYNC_ATTENTION_BIT_FUNC_1 RESERVED_GENERAL_ATTENTION_BIT_13
+#define LINK_SYNC_ATTENTION_BIT_FUNC_2 RESERVED_GENERAL_ATTENTION_BIT_14
+#define LINK_SYNC_ATTENTION_BIT_FUNC_3 RESERVED_GENERAL_ATTENTION_BIT_15
+#define LINK_SYNC_ATTENTION_BIT_FUNC_4 RESERVED_GENERAL_ATTENTION_BIT_16
+#define LINK_SYNC_ATTENTION_BIT_FUNC_5 RESERVED_GENERAL_ATTENTION_BIT_17
+#define LINK_SYNC_ATTENTION_BIT_FUNC_6 RESERVED_GENERAL_ATTENTION_BIT_18
+#define LINK_SYNC_ATTENTION_BIT_FUNC_7 RESERVED_GENERAL_ATTENTION_BIT_19
+
+
+#define LATCHED_ATTN_RBCR 23
+#define LATCHED_ATTN_RBCT 24
+#define LATCHED_ATTN_RBCN 25
+#define LATCHED_ATTN_RBCU 26
+#define LATCHED_ATTN_RBCP 27
+#define LATCHED_ATTN_TIMEOUT_GRC 28
+#define LATCHED_ATTN_RSVD_GRC 29
+#define LATCHED_ATTN_ROM_PARITY_MCP 30
+#define LATCHED_ATTN_UM_RX_PARITY_MCP 31
+#define LATCHED_ATTN_UM_TX_PARITY_MCP 32
+#define LATCHED_ATTN_SCPAD_PARITY_MCP 33
+
+#define GENERAL_ATTEN_WORD(atten_name) ((94 + atten_name) / 32)
+#define GENERAL_ATTEN_OFFSET(atten_name)\
+ (1UL << ((94 + atten_name) % 32))
+/*
+ * This file defines GRC base address for every block.
+ * This file is included by chipsim, asm microcode and cpp microcode.
+ * These values are used in Design.xml on regBase attribute
+ * Use the base with the generated offsets of specific registers.
+ */
+
+#define GRCBASE_PXPCS 0x000000
+#define GRCBASE_PCICONFIG 0x002000
+#define GRCBASE_PCIREG 0x002400
+#define GRCBASE_EMAC0 0x008000
+#define GRCBASE_EMAC1 0x008400
+#define GRCBASE_DBU 0x008800
+#define GRCBASE_MISC 0x00A000
+#define GRCBASE_DBG 0x00C000
+#define GRCBASE_NIG 0x010000
+#define GRCBASE_XCM 0x020000
+#define GRCBASE_PRS 0x040000
+#define GRCBASE_SRCH 0x040400
+#define GRCBASE_TSDM 0x042000
+#define GRCBASE_TCM 0x050000
+#define GRCBASE_BRB1 0x060000
+#define GRCBASE_MCP 0x080000
+#define GRCBASE_UPB 0x0C1000
+#define GRCBASE_CSDM 0x0C2000
+#define GRCBASE_USDM 0x0C4000
+#define GRCBASE_CCM 0x0D0000
+#define GRCBASE_UCM 0x0E0000
+#define GRCBASE_CDU 0x101000
+#define GRCBASE_DMAE 0x102000
+#define GRCBASE_PXP 0x103000
+#define GRCBASE_CFC 0x104000
+#define GRCBASE_HC 0x108000
+#define GRCBASE_PXP2 0x120000
+#define GRCBASE_PBF 0x140000
+#define GRCBASE_UMAC0 0x160000
+#define GRCBASE_UMAC1 0x160400
+#define GRCBASE_XPB 0x161000
+#define GRCBASE_MSTAT0 0x162000
+#define GRCBASE_MSTAT1 0x162800
+#define GRCBASE_XMAC0 0x163000
+#define GRCBASE_XMAC1 0x163800
+#define GRCBASE_TIMERS 0x164000
+#define GRCBASE_XSDM 0x166000
+#define GRCBASE_QM 0x168000
+#define GRCBASE_DQ 0x170000
+#define GRCBASE_TSEM 0x180000
+#define GRCBASE_CSEM 0x200000
+#define GRCBASE_XSEM 0x280000
+#define GRCBASE_USEM 0x300000
+#define GRCBASE_MISC_AEU GRCBASE_MISC
+
+
+/* offset of configuration space in the pci core register */
+#define PCICFG_OFFSET 0x2000
+#define PCICFG_VENDOR_ID_OFFSET 0x00
+#define PCICFG_DEVICE_ID_OFFSET 0x02
+#define PCICFG_COMMAND_OFFSET 0x04
+#define PCICFG_COMMAND_IO_SPACE (1<<0)
+#define PCICFG_COMMAND_MEM_SPACE (1<<1)
+#define PCICFG_COMMAND_BUS_MASTER (1<<2)
+#define PCICFG_COMMAND_SPECIAL_CYCLES (1<<3)
+#define PCICFG_COMMAND_MWI_CYCLES (1<<4)
+#define PCICFG_COMMAND_VGA_SNOOP (1<<5)
+#define PCICFG_COMMAND_PERR_ENA (1<<6)
+#define PCICFG_COMMAND_STEPPING (1<<7)
+#define PCICFG_COMMAND_SERR_ENA (1<<8)
+#define PCICFG_COMMAND_FAST_B2B (1<<9)
+#define PCICFG_COMMAND_INT_DISABLE (1<<10)
+#define PCICFG_COMMAND_RESERVED (0x1f<<11)
+#define PCICFG_STATUS_OFFSET 0x06
+#define PCICFG_REVISION_ID_OFFSET 0x08
+#define PCICFG_REVESION_ID_MASK 0xff
+#define PCICFG_REVESION_ID_ERROR_VAL 0xff
+#define PCICFG_CACHE_LINE_SIZE 0x0c
+#define PCICFG_LATENCY_TIMER 0x0d
+#define PCICFG_BAR_1_LOW 0x10
+#define PCICFG_BAR_1_HIGH 0x14
+#define PCICFG_BAR_2_LOW 0x18
+#define PCICFG_BAR_2_HIGH 0x1c
+#define PCICFG_SUBSYSTEM_VENDOR_ID_OFFSET 0x2c
+#define PCICFG_SUBSYSTEM_ID_OFFSET 0x2e
+#define PCICFG_INT_LINE 0x3c
+#define PCICFG_INT_PIN 0x3d
+#define PCICFG_PM_CAPABILITY 0x48
+#define PCICFG_PM_CAPABILITY_VERSION (0x3<<16)
+#define PCICFG_PM_CAPABILITY_CLOCK (1<<19)
+#define PCICFG_PM_CAPABILITY_RESERVED (1<<20)
+#define PCICFG_PM_CAPABILITY_DSI (1<<21)
+#define PCICFG_PM_CAPABILITY_AUX_CURRENT (0x7<<22)
+#define PCICFG_PM_CAPABILITY_D1_SUPPORT (1<<25)
+#define PCICFG_PM_CAPABILITY_D2_SUPPORT (1<<26)
+#define PCICFG_PM_CAPABILITY_PME_IN_D0 (1<<27)
+#define PCICFG_PM_CAPABILITY_PME_IN_D1 (1<<28)
+#define PCICFG_PM_CAPABILITY_PME_IN_D2 (1<<29)
+#define PCICFG_PM_CAPABILITY_PME_IN_D3_HOT (1<<30)
+#define PCICFG_PM_CAPABILITY_PME_IN_D3_COLD (1<<31)
+#define PCICFG_PM_CSR_OFFSET 0x4c
+#define PCICFG_PM_CSR_STATE (0x3<<0)
+#define PCICFG_PM_CSR_PME_ENABLE (1<<8)
+#define PCICFG_PM_CSR_PME_STATUS (1<<15)
+#define PCICFG_MSI_CAP_ID_OFFSET 0x58
+#define PCICFG_MSI_CONTROL_ENABLE (0x1<<16)
+#define PCICFG_MSI_CONTROL_MCAP (0x7<<17)
+#define PCICFG_MSI_CONTROL_MENA (0x7<<20)
+#define PCICFG_MSI_CONTROL_64_BIT_ADDR_CAP (0x1<<23)
+#define PCICFG_MSI_CONTROL_MSI_PVMASK_CAPABLE (0x1<<24)
+#define PCICFG_GRC_ADDRESS 0x78
+#define PCICFG_GRC_DATA 0x80
+#define PCICFG_ME_REGISTER 0x98
+#define PCICFG_MSIX_CAP_ID_OFFSET 0xa0
+#define PCICFG_MSIX_CONTROL_TABLE_SIZE (0x7ff<<16)
+#define PCICFG_MSIX_CONTROL_RESERVED (0x7<<27)
+#define PCICFG_MSIX_CONTROL_FUNC_MASK (0x1<<30)
+#define PCICFG_MSIX_CONTROL_MSIX_ENABLE (0x1<<31)
+
+#define PCICFG_DEVICE_CONTROL 0xb4
+#define PCICFG_DEVICE_STATUS 0xb6
+#define PCICFG_DEVICE_STATUS_CORR_ERR_DET (1<<0)
+#define PCICFG_DEVICE_STATUS_NON_FATAL_ERR_DET (1<<1)
+#define PCICFG_DEVICE_STATUS_FATAL_ERR_DET (1<<2)
+#define PCICFG_DEVICE_STATUS_UNSUP_REQ_DET (1<<3)
+#define PCICFG_DEVICE_STATUS_AUX_PWR_DET (1<<4)
+#define PCICFG_DEVICE_STATUS_NO_PEND (1<<5)
+#define PCICFG_LINK_CONTROL 0xbc
+
+
+#define BAR_USTRORM_INTMEM 0x400000
+#define BAR_CSTRORM_INTMEM 0x410000
+#define BAR_XSTRORM_INTMEM 0x420000
+#define BAR_TSTRORM_INTMEM 0x430000
+
+/* for accessing the IGU in case of status block ACK */
+#define BAR_IGU_INTMEM 0x440000
+
+#define BAR_DOORBELL_OFFSET 0x800000
+
+#define BAR_ME_REGISTER 0x450000
+
+/* config_2 offset */
+#define GRC_CONFIG_2_SIZE_REG 0x408
+#define PCI_CONFIG_2_BAR1_SIZE (0xfL<<0)
+#define PCI_CONFIG_2_BAR1_SIZE_DISABLED (0L<<0)
+#define PCI_CONFIG_2_BAR1_SIZE_64K (1L<<0)
+#define PCI_CONFIG_2_BAR1_SIZE_128K (2L<<0)
+#define PCI_CONFIG_2_BAR1_SIZE_256K (3L<<0)
+#define PCI_CONFIG_2_BAR1_SIZE_512K (4L<<0)
+#define PCI_CONFIG_2_BAR1_SIZE_1M (5L<<0)
+#define PCI_CONFIG_2_BAR1_SIZE_2M (6L<<0)
+#define PCI_CONFIG_2_BAR1_SIZE_4M (7L<<0)
+#define PCI_CONFIG_2_BAR1_SIZE_8M (8L<<0)
+#define PCI_CONFIG_2_BAR1_SIZE_16M (9L<<0)
+#define PCI_CONFIG_2_BAR1_SIZE_32M (10L<<0)
+#define PCI_CONFIG_2_BAR1_SIZE_64M (11L<<0)
+#define PCI_CONFIG_2_BAR1_SIZE_128M (12L<<0)
+#define PCI_CONFIG_2_BAR1_SIZE_256M (13L<<0)
+#define PCI_CONFIG_2_BAR1_SIZE_512M (14L<<0)
+#define PCI_CONFIG_2_BAR1_SIZE_1G (15L<<0)
+#define PCI_CONFIG_2_BAR1_64ENA (1L<<4)
+#define PCI_CONFIG_2_EXP_ROM_RETRY (1L<<5)
+#define PCI_CONFIG_2_CFG_CYCLE_RETRY (1L<<6)
+#define PCI_CONFIG_2_FIRST_CFG_DONE (1L<<7)
+#define PCI_CONFIG_2_EXP_ROM_SIZE (0xffL<<8)
+#define PCI_CONFIG_2_EXP_ROM_SIZE_DISABLED (0L<<8)
+#define PCI_CONFIG_2_EXP_ROM_SIZE_2K (1L<<8)
+#define PCI_CONFIG_2_EXP_ROM_SIZE_4K (2L<<8)
+#define PCI_CONFIG_2_EXP_ROM_SIZE_8K (3L<<8)
+#define PCI_CONFIG_2_EXP_ROM_SIZE_16K (4L<<8)
+#define PCI_CONFIG_2_EXP_ROM_SIZE_32K (5L<<8)
+#define PCI_CONFIG_2_EXP_ROM_SIZE_64K (6L<<8)
+#define PCI_CONFIG_2_EXP_ROM_SIZE_128K (7L<<8)
+#define PCI_CONFIG_2_EXP_ROM_SIZE_256K (8L<<8)
+#define PCI_CONFIG_2_EXP_ROM_SIZE_512K (9L<<8)
+#define PCI_CONFIG_2_EXP_ROM_SIZE_1M (10L<<8)
+#define PCI_CONFIG_2_EXP_ROM_SIZE_2M (11L<<8)
+#define PCI_CONFIG_2_EXP_ROM_SIZE_4M (12L<<8)
+#define PCI_CONFIG_2_EXP_ROM_SIZE_8M (13L<<8)
+#define PCI_CONFIG_2_EXP_ROM_SIZE_16M (14L<<8)
+#define PCI_CONFIG_2_EXP_ROM_SIZE_32M (15L<<8)
+#define PCI_CONFIG_2_BAR_PREFETCH (1L<<16)
+#define PCI_CONFIG_2_RESERVED0 (0x7fffL<<17)
+
+/* config_3 offset */
+#define GRC_CONFIG_3_SIZE_REG 0x40c
+#define PCI_CONFIG_3_STICKY_BYTE (0xffL<<0)
+#define PCI_CONFIG_3_FORCE_PME (1L<<24)
+#define PCI_CONFIG_3_PME_STATUS (1L<<25)
+#define PCI_CONFIG_3_PME_ENABLE (1L<<26)
+#define PCI_CONFIG_3_PM_STATE (0x3L<<27)
+#define PCI_CONFIG_3_VAUX_PRESET (1L<<30)
+#define PCI_CONFIG_3_PCI_POWER (1L<<31)
+
+#define GRC_BAR2_CONFIG 0x4e0
+#define PCI_CONFIG_2_BAR2_SIZE (0xfL<<0)
+#define PCI_CONFIG_2_BAR2_SIZE_DISABLED (0L<<0)
+#define PCI_CONFIG_2_BAR2_SIZE_64K (1L<<0)
+#define PCI_CONFIG_2_BAR2_SIZE_128K (2L<<0)
+#define PCI_CONFIG_2_BAR2_SIZE_256K (3L<<0)
+#define PCI_CONFIG_2_BAR2_SIZE_512K (4L<<0)
+#define PCI_CONFIG_2_BAR2_SIZE_1M (5L<<0)
+#define PCI_CONFIG_2_BAR2_SIZE_2M (6L<<0)
+#define PCI_CONFIG_2_BAR2_SIZE_4M (7L<<0)
+#define PCI_CONFIG_2_BAR2_SIZE_8M (8L<<0)
+#define PCI_CONFIG_2_BAR2_SIZE_16M (9L<<0)
+#define PCI_CONFIG_2_BAR2_SIZE_32M (10L<<0)
+#define PCI_CONFIG_2_BAR2_SIZE_64M (11L<<0)
+#define PCI_CONFIG_2_BAR2_SIZE_128M (12L<<0)
+#define PCI_CONFIG_2_BAR2_SIZE_256M (13L<<0)
+#define PCI_CONFIG_2_BAR2_SIZE_512M (14L<<0)
+#define PCI_CONFIG_2_BAR2_SIZE_1G (15L<<0)
+#define PCI_CONFIG_2_BAR2_64ENA (1L<<4)
+
+#define PCI_PM_DATA_A 0x410
+#define PCI_PM_DATA_B 0x414
+#define PCI_ID_VAL1 0x434
+#define PCI_ID_VAL2 0x438
+#define PCI_ID_VAL3 0x43c
+
+#define GRC_CONFIG_REG_VF_MSIX_CONTROL 0x61C
+#define GRC_CONFIG_REG_PF_INIT_VF 0x624
+#define GRC_CR_PF_INIT_VF_PF_FIRST_VF_NUM_MASK 0xf
+/* First VF_NUM for PF is encoded in this register.
+ * The number of VFs assigned to a PF is assumed to be a multiple of 8.
+ * Software should program these bits based on Total Number of VFs \
+ * programmed for each PF.
+ * Since registers from 0x000-0x7ff are split across functions, each PF will
+ * have the same location for the same 4 bits
+ */
+
+#define PXPCS_TL_CONTROL_5 0x814
+#define PXPCS_TL_CONTROL_5_UNKNOWNTYPE_ERR_ATTN (1 << 29) /*WC*/
+#define PXPCS_TL_CONTROL_5_BOUNDARY4K_ERR_ATTN (1 << 28) /*WC*/
+#define PXPCS_TL_CONTROL_5_MRRS_ERR_ATTN (1 << 27) /*WC*/
+#define PXPCS_TL_CONTROL_5_MPS_ERR_ATTN (1 << 26) /*WC*/
+#define PXPCS_TL_CONTROL_5_TTX_BRIDGE_FORWARD_ERR (1 << 25) /*WC*/
+#define PXPCS_TL_CONTROL_5_TTX_TXINTF_OVERFLOW (1 << 24) /*WC*/
+#define PXPCS_TL_CONTROL_5_PHY_ERR_ATTN (1 << 23) /*RO*/
+#define PXPCS_TL_CONTROL_5_DL_ERR_ATTN (1 << 22) /*RO*/
+#define PXPCS_TL_CONTROL_5_TTX_ERR_NP_TAG_IN_USE (1 << 21) /*WC*/
+#define PXPCS_TL_CONTROL_5_TRX_ERR_UNEXP_RTAG (1 << 20) /*WC*/
+#define PXPCS_TL_CONTROL_5_PRI_SIG_TARGET_ABORT1 (1 << 19) /*WC*/
+#define PXPCS_TL_CONTROL_5_ERR_UNSPPORT1 (1 << 18) /*WC*/
+#define PXPCS_TL_CONTROL_5_ERR_ECRC1 (1 << 17) /*WC*/
+#define PXPCS_TL_CONTROL_5_ERR_MALF_TLP1 (1 << 16) /*WC*/
+#define PXPCS_TL_CONTROL_5_ERR_RX_OFLOW1 (1 << 15) /*WC*/
+#define PXPCS_TL_CONTROL_5_ERR_UNEXP_CPL1 (1 << 14) /*WC*/
+#define PXPCS_TL_CONTROL_5_ERR_MASTER_ABRT1 (1 << 13) /*WC*/
+#define PXPCS_TL_CONTROL_5_ERR_CPL_TIMEOUT1 (1 << 12) /*WC*/
+#define PXPCS_TL_CONTROL_5_ERR_FC_PRTL1 (1 << 11) /*WC*/
+#define PXPCS_TL_CONTROL_5_ERR_PSND_TLP1 (1 << 10) /*WC*/
+#define PXPCS_TL_CONTROL_5_PRI_SIG_TARGET_ABORT (1 << 9) /*WC*/
+#define PXPCS_TL_CONTROL_5_ERR_UNSPPORT (1 << 8) /*WC*/
+#define PXPCS_TL_CONTROL_5_ERR_ECRC (1 << 7) /*WC*/
+#define PXPCS_TL_CONTROL_5_ERR_MALF_TLP (1 << 6) /*WC*/
+#define PXPCS_TL_CONTROL_5_ERR_RX_OFLOW (1 << 5) /*WC*/
+#define PXPCS_TL_CONTROL_5_ERR_UNEXP_CPL (1 << 4) /*WC*/
+#define PXPCS_TL_CONTROL_5_ERR_MASTER_ABRT (1 << 3) /*WC*/
+#define PXPCS_TL_CONTROL_5_ERR_CPL_TIMEOUT (1 << 2) /*WC*/
+#define PXPCS_TL_CONTROL_5_ERR_FC_PRTL (1 << 1) /*WC*/
+#define PXPCS_TL_CONTROL_5_ERR_PSND_TLP (1 << 0) /*WC*/
+
+
+#define PXPCS_TL_FUNC345_STAT 0x854
+#define PXPCS_TL_FUNC345_STAT_PRI_SIG_TARGET_ABORT4 (1 << 29) /* WC */
+#define PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT4\
+ (1 << 28) /* Unsupported Request Error Status in function4, if \
+ set, generate pcie_err_attn output when this error is seen. WC */
+#define PXPCS_TL_FUNC345_STAT_ERR_ECRC4\
+ (1 << 27) /* ECRC Error TLP Status Status in function 4, if set, \
+ generate pcie_err_attn output when this error is seen.. WC */
+#define PXPCS_TL_FUNC345_STAT_ERR_MALF_TLP4\
+ (1 << 26) /* Malformed TLP Status Status in function 4, if set, \
+ generate pcie_err_attn output when this error is seen.. WC */
+#define PXPCS_TL_FUNC345_STAT_ERR_RX_OFLOW4\
+ (1 << 25) /* Receiver Overflow Status Status in function 4, if \
+ set, generate pcie_err_attn output when this error is seen.. WC \
+ */
+#define PXPCS_TL_FUNC345_STAT_ERR_UNEXP_CPL4\
+ (1 << 24) /* Unexpected Completion Status Status in function 4, \
+ if set, generate pcie_err_attn output when this error is seen. WC \
+ */
+#define PXPCS_TL_FUNC345_STAT_ERR_MASTER_ABRT4\
+ (1 << 23) /* Receive UR Statusin function 4. If set, generate \
+ pcie_err_attn output when this error is seen. WC */
+#define PXPCS_TL_FUNC345_STAT_ERR_CPL_TIMEOUT4\
+ (1 << 22) /* Completer Timeout Status Status in function 4, if \
+ set, generate pcie_err_attn output when this error is seen. WC */
+#define PXPCS_TL_FUNC345_STAT_ERR_FC_PRTL4\
+ (1 << 21) /* Flow Control Protocol Error Status Status in \
+ function 4, if set, generate pcie_err_attn output when this error \
+ is seen. WC */
+#define PXPCS_TL_FUNC345_STAT_ERR_PSND_TLP4\
+ (1 << 20) /* Poisoned Error Status Status in function 4, if set, \
+ generate pcie_err_attn output when this error is seen.. WC */
+#define PXPCS_TL_FUNC345_STAT_PRI_SIG_TARGET_ABORT3 (1 << 19) /* WC */
+#define PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT3\
+ (1 << 18) /* Unsupported Request Error Status in function3, if \
+ set, generate pcie_err_attn output when this error is seen. WC */
+#define PXPCS_TL_FUNC345_STAT_ERR_ECRC3\
+ (1 << 17) /* ECRC Error TLP Status Status in function 3, if set, \
+ generate pcie_err_attn output when this error is seen.. WC */
+#define PXPCS_TL_FUNC345_STAT_ERR_MALF_TLP3\
+ (1 << 16) /* Malformed TLP Status Status in function 3, if set, \
+ generate pcie_err_attn output when this error is seen.. WC */
+#define PXPCS_TL_FUNC345_STAT_ERR_RX_OFLOW3\
+ (1 << 15) /* Receiver Overflow Status Status in function 3, if \
+ set, generate pcie_err_attn output when this error is seen.. WC \
+ */
+#define PXPCS_TL_FUNC345_STAT_ERR_UNEXP_CPL3\
+ (1 << 14) /* Unexpected Completion Status Status in function 3, \
+ if set, generate pcie_err_attn output when this error is seen. WC \
+ */
+#define PXPCS_TL_FUNC345_STAT_ERR_MASTER_ABRT3\
+ (1 << 13) /* Receive UR Statusin function 3. If set, generate \
+ pcie_err_attn output when this error is seen. WC */
+#define PXPCS_TL_FUNC345_STAT_ERR_CPL_TIMEOUT3\
+ (1 << 12) /* Completer Timeout Status Status in function 3, if \
+ set, generate pcie_err_attn output when this error is seen. WC */
+#define PXPCS_TL_FUNC345_STAT_ERR_FC_PRTL3\
+ (1 << 11) /* Flow Control Protocol Error Status Status in \
+ function 3, if set, generate pcie_err_attn output when this error \
+ is seen. WC */
+#define PXPCS_TL_FUNC345_STAT_ERR_PSND_TLP3\
+ (1 << 10) /* Poisoned Error Status Status in function 3, if set, \
+ generate pcie_err_attn output when this error is seen.. WC */
+#define PXPCS_TL_FUNC345_STAT_PRI_SIG_TARGET_ABORT2 (1 << 9) /* WC */
+#define PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT2\
+ (1 << 8) /* Unsupported Request Error Status for Function 2, if \
+ set, generate pcie_err_attn output when this error is seen. WC */
+#define PXPCS_TL_FUNC345_STAT_ERR_ECRC2\
+ (1 << 7) /* ECRC Error TLP Status Status for Function 2, if set, \
+ generate pcie_err_attn output when this error is seen.. WC */
+#define PXPCS_TL_FUNC345_STAT_ERR_MALF_TLP2\
+ (1 << 6) /* Malformed TLP Status Status for Function 2, if set, \
+ generate pcie_err_attn output when this error is seen.. WC */
+#define PXPCS_TL_FUNC345_STAT_ERR_RX_OFLOW2\
+ (1 << 5) /* Receiver Overflow Status Status for Function 2, if \
+ set, generate pcie_err_attn output when this error is seen.. WC \
+ */
+#define PXPCS_TL_FUNC345_STAT_ERR_UNEXP_CPL2\
+ (1 << 4) /* Unexpected Completion Status Status for Function 2, \
+ if set, generate pcie_err_attn output when this error is seen. WC \
+ */
+#define PXPCS_TL_FUNC345_STAT_ERR_MASTER_ABRT2\
+ (1 << 3) /* Receive UR Statusfor Function 2. If set, generate \
+ pcie_err_attn output when this error is seen. WC */
+#define PXPCS_TL_FUNC345_STAT_ERR_CPL_TIMEOUT2\
+ (1 << 2) /* Completer Timeout Status Status for Function 2, if \
+ set, generate pcie_err_attn output when this error is seen. WC */
+#define PXPCS_TL_FUNC345_STAT_ERR_FC_PRTL2\
+ (1 << 1) /* Flow Control Protocol Error Status Status for \
+ Function 2, if set, generate pcie_err_attn output when this error \
+ is seen. WC */
+#define PXPCS_TL_FUNC345_STAT_ERR_PSND_TLP2\
+ (1 << 0) /* Poisoned Error Status Status for Function 2, if set, \
+ generate pcie_err_attn output when this error is seen.. WC */
+
+
+#define PXPCS_TL_FUNC678_STAT 0x85C
+#define PXPCS_TL_FUNC678_STAT_PRI_SIG_TARGET_ABORT7 (1 << 29) /* WC */
+#define PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT7\
+ (1 << 28) /* Unsupported Request Error Status in function7, if \
+ set, generate pcie_err_attn output when this error is seen. WC */
+#define PXPCS_TL_FUNC678_STAT_ERR_ECRC7\
+ (1 << 27) /* ECRC Error TLP Status Status in function 7, if set, \
+ generate pcie_err_attn output when this error is seen.. WC */
+#define PXPCS_TL_FUNC678_STAT_ERR_MALF_TLP7\
+ (1 << 26) /* Malformed TLP Status Status in function 7, if set, \
+ generate pcie_err_attn output when this error is seen.. WC */
+#define PXPCS_TL_FUNC678_STAT_ERR_RX_OFLOW7\
+ (1 << 25) /* Receiver Overflow Status Status in function 7, if \
+ set, generate pcie_err_attn output when this error is seen.. WC \
+ */
+#define PXPCS_TL_FUNC678_STAT_ERR_UNEXP_CPL7\
+ (1 << 24) /* Unexpected Completion Status Status in function 7, \
+ if set, generate pcie_err_attn output when this error is seen. WC \
+ */
+#define PXPCS_TL_FUNC678_STAT_ERR_MASTER_ABRT7\
+ (1 << 23) /* Receive UR Statusin function 7. If set, generate \
+ pcie_err_attn output when this error is seen. WC */
+#define PXPCS_TL_FUNC678_STAT_ERR_CPL_TIMEOUT7\
+ (1 << 22) /* Completer Timeout Status Status in function 7, if \
+ set, generate pcie_err_attn output when this error is seen. WC */
+#define PXPCS_TL_FUNC678_STAT_ERR_FC_PRTL7\
+ (1 << 21) /* Flow Control Protocol Error Status Status in \
+ function 7, if set, generate pcie_err_attn output when this error \
+ is seen. WC */
+#define PXPCS_TL_FUNC678_STAT_ERR_PSND_TLP7\
+ (1 << 20) /* Poisoned Error Status Status in function 7, if set, \
+ generate pcie_err_attn output when this error is seen.. WC */
+#define PXPCS_TL_FUNC678_STAT_PRI_SIG_TARGET_ABORT6 (1 << 19) /* WC */
+#define PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT6\
+ (1 << 18) /* Unsupported Request Error Status in function6, if \
+ set, generate pcie_err_attn output when this error is seen. WC */
+#define PXPCS_TL_FUNC678_STAT_ERR_ECRC6\
+ (1 << 17) /* ECRC Error TLP Status Status in function 6, if set, \
+ generate pcie_err_attn output when this error is seen.. WC */
+#define PXPCS_TL_FUNC678_STAT_ERR_MALF_TLP6\
+ (1 << 16) /* Malformed TLP Status Status in function 6, if set, \
+ generate pcie_err_attn output when this error is seen.. WC */
+#define PXPCS_TL_FUNC678_STAT_ERR_RX_OFLOW6\
+ (1 << 15) /* Receiver Overflow Status Status in function 6, if \
+ set, generate pcie_err_attn output when this error is seen.. WC \
+ */
+#define PXPCS_TL_FUNC678_STAT_ERR_UNEXP_CPL6\
+ (1 << 14) /* Unexpected Completion Status Status in function 6, \
+ if set, generate pcie_err_attn output when this error is seen. WC \
+ */
+#define PXPCS_TL_FUNC678_STAT_ERR_MASTER_ABRT6\
+ (1 << 13) /* Receive UR Statusin function 6. If set, generate \
+ pcie_err_attn output when this error is seen. WC */
+#define PXPCS_TL_FUNC678_STAT_ERR_CPL_TIMEOUT6\
+ (1 << 12) /* Completer Timeout Status Status in function 6, if \
+ set, generate pcie_err_attn output when this error is seen. WC */
+#define PXPCS_TL_FUNC678_STAT_ERR_FC_PRTL6\
+ (1 << 11) /* Flow Control Protocol Error Status Status in \
+ function 6, if set, generate pcie_err_attn output when this error \
+ is seen. WC */
+#define PXPCS_TL_FUNC678_STAT_ERR_PSND_TLP6\
+ (1 << 10) /* Poisoned Error Status Status in function 6, if set, \
+ generate pcie_err_attn output when this error is seen.. WC */
+#define PXPCS_TL_FUNC678_STAT_PRI_SIG_TARGET_ABORT5 (1 << 9) /* WC */
+#define PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT5\
+ (1 << 8) /* Unsupported Request Error Status for Function 5, if \
+ set, generate pcie_err_attn output when this error is seen. WC */
+#define PXPCS_TL_FUNC678_STAT_ERR_ECRC5\
+ (1 << 7) /* ECRC Error TLP Status Status for Function 5, if set, \
+ generate pcie_err_attn output when this error is seen.. WC */
+#define PXPCS_TL_FUNC678_STAT_ERR_MALF_TLP5\
+ (1 << 6) /* Malformed TLP Status Status for Function 5, if set, \
+ generate pcie_err_attn output when this error is seen.. WC */
+#define PXPCS_TL_FUNC678_STAT_ERR_RX_OFLOW5\
+ (1 << 5) /* Receiver Overflow Status Status for Function 5, if \
+ set, generate pcie_err_attn output when this error is seen.. WC \
+ */
+#define PXPCS_TL_FUNC678_STAT_ERR_UNEXP_CPL5\
+ (1 << 4) /* Unexpected Completion Status Status for Function 5, \
+ if set, generate pcie_err_attn output when this error is seen. WC \
+ */
+#define PXPCS_TL_FUNC678_STAT_ERR_MASTER_ABRT5\
+ (1 << 3) /* Receive UR Statusfor Function 5. If set, generate \
+ pcie_err_attn output when this error is seen. WC */
+#define PXPCS_TL_FUNC678_STAT_ERR_CPL_TIMEOUT5\
+ (1 << 2) /* Completer Timeout Status Status for Function 5, if \
+ set, generate pcie_err_attn output when this error is seen. WC */
+#define PXPCS_TL_FUNC678_STAT_ERR_FC_PRTL5\
+ (1 << 1) /* Flow Control Protocol Error Status Status for \
+ Function 5, if set, generate pcie_err_attn output when this error \
+ is seen. WC */
+#define PXPCS_TL_FUNC678_STAT_ERR_PSND_TLP5\
+ (1 << 0) /* Poisoned Error Status Status for Function 5, if set, \
+ generate pcie_err_attn output when this error is seen.. WC */
+
+
+#define BAR_USTRORM_INTMEM 0x400000
+#define BAR_CSTRORM_INTMEM 0x410000
+#define BAR_XSTRORM_INTMEM 0x420000
+#define BAR_TSTRORM_INTMEM 0x430000
+
+/* for accessing the IGU in case of status block ACK */
+#define BAR_IGU_INTMEM 0x440000
+
+#define BAR_DOORBELL_OFFSET 0x800000
+
+#define BAR_ME_REGISTER 0x450000
+#define ME_REG_PF_NUM_SHIFT 0
+#define ME_REG_PF_NUM\
+ (7L<<ME_REG_PF_NUM_SHIFT) /* Relative PF Num */
+#define ME_REG_VF_VALID (1<<8)
+#define ME_REG_VF_NUM_SHIFT 9
+#define ME_REG_VF_NUM_MASK (0x3f<<ME_REG_VF_NUM_SHIFT)
+#define ME_REG_VF_ERR (0x1<<3)
+#define ME_REG_ABS_PF_NUM_SHIFT 16
+#define ME_REG_ABS_PF_NUM\
+ (7L<<ME_REG_ABS_PF_NUM_SHIFT) /* Absolute PF Num */
+
+
+#define PXP_VF_ADDR_IGU_START 0
+#define PXP_VF_ADDR_IGU_SIZE 0x3000
+#define PXP_VF_ADDR_IGU_END\
+ ((PXP_VF_ADDR_IGU_START) + (PXP_VF_ADDR_IGU_SIZE) - 1)
+
+#define PXP_VF_ADDR_USDM_QUEUES_START 0x3000
+#define PXP_VF_ADDR_USDM_QUEUES_SIZE\
+ (PXP_VF_ADRR_NUM_QUEUES * PXP_ADDR_QUEUE_SIZE)
+#define PXP_VF_ADDR_USDM_QUEUES_END\
+ ((PXP_VF_ADDR_USDM_QUEUES_START) + (PXP_VF_ADDR_USDM_QUEUES_SIZE) - 1)
+
+#define PXP_VF_ADDR_CSDM_GLOBAL_START 0x7600
+#define PXP_VF_ADDR_CSDM_GLOBAL_SIZE (PXP_ADDR_REG_SIZE)
+#define PXP_VF_ADDR_CSDM_GLOBAL_END\
+ ((PXP_VF_ADDR_CSDM_GLOBAL_START) + (PXP_VF_ADDR_CSDM_GLOBAL_SIZE) - 1)
+
+#define PXP_VF_ADDR_DB_START 0x7c00
+#define PXP_VF_ADDR_DB_SIZE 0x200
+#define PXP_VF_ADDR_DB_END\
+ ((PXP_VF_ADDR_DB_START) + (PXP_VF_ADDR_DB_SIZE) - 1)
+
+#define MDIO_REG_BANK_CL73_IEEEB0 0x0
+#define MDIO_CL73_IEEEB0_CL73_AN_CONTROL 0x0
+#define MDIO_CL73_IEEEB0_CL73_AN_CONTROL_RESTART_AN 0x0200
+#define MDIO_CL73_IEEEB0_CL73_AN_CONTROL_AN_EN 0x1000
+#define MDIO_CL73_IEEEB0_CL73_AN_CONTROL_MAIN_RST 0x8000
+
+#define MDIO_REG_BANK_CL73_IEEEB1 0x10
+#define MDIO_CL73_IEEEB1_AN_ADV1 0x00
+#define MDIO_CL73_IEEEB1_AN_ADV1_PAUSE 0x0400
+#define MDIO_CL73_IEEEB1_AN_ADV1_ASYMMETRIC 0x0800
+#define MDIO_CL73_IEEEB1_AN_ADV1_PAUSE_BOTH 0x0C00
+#define MDIO_CL73_IEEEB1_AN_ADV1_PAUSE_MASK 0x0C00
+#define MDIO_CL73_IEEEB1_AN_ADV2 0x01
+#define MDIO_CL73_IEEEB1_AN_ADV2_ADVR_1000M 0x0000
+#define MDIO_CL73_IEEEB1_AN_ADV2_ADVR_1000M_KX 0x0020
+#define MDIO_CL73_IEEEB1_AN_ADV2_ADVR_10G_KX4 0x0040
+#define MDIO_CL73_IEEEB1_AN_ADV2_ADVR_10G_KR 0x0080
+#define MDIO_CL73_IEEEB1_AN_LP_ADV1 0x03
+#define MDIO_CL73_IEEEB1_AN_LP_ADV1_PAUSE 0x0400
+#define MDIO_CL73_IEEEB1_AN_LP_ADV1_ASYMMETRIC 0x0800
+#define MDIO_CL73_IEEEB1_AN_LP_ADV1_PAUSE_BOTH 0x0C00
+#define MDIO_CL73_IEEEB1_AN_LP_ADV1_PAUSE_MASK 0x0C00
+#define MDIO_CL73_IEEEB1_AN_LP_ADV2 0x04
+
+#define MDIO_REG_BANK_RX0 0x80b0
+#define MDIO_RX0_RX_STATUS 0x10
+#define MDIO_RX0_RX_STATUS_SIGDET 0x8000
+#define MDIO_RX0_RX_STATUS_RX_SEQ_DONE 0x1000
+#define MDIO_RX0_RX_EQ_BOOST 0x1c
+#define MDIO_RX0_RX_EQ_BOOST_EQUALIZER_CTRL_MASK 0x7
+#define MDIO_RX0_RX_EQ_BOOST_OFFSET_CTRL 0x10
+
+#define MDIO_REG_BANK_RX1 0x80c0
+#define MDIO_RX1_RX_EQ_BOOST 0x1c
+#define MDIO_RX1_RX_EQ_BOOST_EQUALIZER_CTRL_MASK 0x7
+#define MDIO_RX1_RX_EQ_BOOST_OFFSET_CTRL 0x10
+
+#define MDIO_REG_BANK_RX2 0x80d0
+#define MDIO_RX2_RX_EQ_BOOST 0x1c
+#define MDIO_RX2_RX_EQ_BOOST_EQUALIZER_CTRL_MASK 0x7
+#define MDIO_RX2_RX_EQ_BOOST_OFFSET_CTRL 0x10
+
+#define MDIO_REG_BANK_RX3 0x80e0
+#define MDIO_RX3_RX_EQ_BOOST 0x1c
+#define MDIO_RX3_RX_EQ_BOOST_EQUALIZER_CTRL_MASK 0x7
+#define MDIO_RX3_RX_EQ_BOOST_OFFSET_CTRL 0x10
+
+#define MDIO_REG_BANK_RX_ALL 0x80f0
+#define MDIO_RX_ALL_RX_EQ_BOOST 0x1c
+#define MDIO_RX_ALL_RX_EQ_BOOST_EQUALIZER_CTRL_MASK 0x7
+#define MDIO_RX_ALL_RX_EQ_BOOST_OFFSET_CTRL 0x10
+
+#define MDIO_REG_BANK_TX0 0x8060
+#define MDIO_TX0_TX_DRIVER 0x17
+#define MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK 0xf000
+#define MDIO_TX0_TX_DRIVER_PREEMPHASIS_SHIFT 12
+#define MDIO_TX0_TX_DRIVER_IDRIVER_MASK 0x0f00
+#define MDIO_TX0_TX_DRIVER_IDRIVER_SHIFT 8
+#define MDIO_TX0_TX_DRIVER_IPREDRIVER_MASK 0x00f0
+#define MDIO_TX0_TX_DRIVER_IPREDRIVER_SHIFT 4
+#define MDIO_TX0_TX_DRIVER_IFULLSPD_MASK 0x000e
+#define MDIO_TX0_TX_DRIVER_IFULLSPD_SHIFT 1
+#define MDIO_TX0_TX_DRIVER_ICBUF1T 1
+
+#define MDIO_REG_BANK_TX1 0x8070
+#define MDIO_TX1_TX_DRIVER 0x17
+#define MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK 0xf000
+#define MDIO_TX0_TX_DRIVER_PREEMPHASIS_SHIFT 12
+#define MDIO_TX0_TX_DRIVER_IDRIVER_MASK 0x0f00
+#define MDIO_TX0_TX_DRIVER_IDRIVER_SHIFT 8
+#define MDIO_TX0_TX_DRIVER_IPREDRIVER_MASK 0x00f0
+#define MDIO_TX0_TX_DRIVER_IPREDRIVER_SHIFT 4
+#define MDIO_TX0_TX_DRIVER_IFULLSPD_MASK 0x000e
+#define MDIO_TX0_TX_DRIVER_IFULLSPD_SHIFT 1
+#define MDIO_TX0_TX_DRIVER_ICBUF1T 1
+
+#define MDIO_REG_BANK_TX2 0x8080
+#define MDIO_TX2_TX_DRIVER 0x17
+#define MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK 0xf000
+#define MDIO_TX0_TX_DRIVER_PREEMPHASIS_SHIFT 12
+#define MDIO_TX0_TX_DRIVER_IDRIVER_MASK 0x0f00
+#define MDIO_TX0_TX_DRIVER_IDRIVER_SHIFT 8
+#define MDIO_TX0_TX_DRIVER_IPREDRIVER_MASK 0x00f0
+#define MDIO_TX0_TX_DRIVER_IPREDRIVER_SHIFT 4
+#define MDIO_TX0_TX_DRIVER_IFULLSPD_MASK 0x000e
+#define MDIO_TX0_TX_DRIVER_IFULLSPD_SHIFT 1
+#define MDIO_TX0_TX_DRIVER_ICBUF1T 1
+
+#define MDIO_REG_BANK_TX3 0x8090
+#define MDIO_TX3_TX_DRIVER 0x17
+#define MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK 0xf000
+#define MDIO_TX0_TX_DRIVER_PREEMPHASIS_SHIFT 12
+#define MDIO_TX0_TX_DRIVER_IDRIVER_MASK 0x0f00
+#define MDIO_TX0_TX_DRIVER_IDRIVER_SHIFT 8
+#define MDIO_TX0_TX_DRIVER_IPREDRIVER_MASK 0x00f0
+#define MDIO_TX0_TX_DRIVER_IPREDRIVER_SHIFT 4
+#define MDIO_TX0_TX_DRIVER_IFULLSPD_MASK 0x000e
+#define MDIO_TX0_TX_DRIVER_IFULLSPD_SHIFT 1
+#define MDIO_TX0_TX_DRIVER_ICBUF1T 1
+
+#define MDIO_REG_BANK_XGXS_BLOCK0 0x8000
+#define MDIO_BLOCK0_XGXS_CONTROL 0x10
+
+#define MDIO_REG_BANK_XGXS_BLOCK1 0x8010
+#define MDIO_BLOCK1_LANE_CTRL0 0x15
+#define MDIO_BLOCK1_LANE_CTRL1 0x16
+#define MDIO_BLOCK1_LANE_CTRL2 0x17
+#define MDIO_BLOCK1_LANE_PRBS 0x19
+
+#define MDIO_REG_BANK_XGXS_BLOCK2 0x8100
+#define MDIO_XGXS_BLOCK2_RX_LN_SWAP 0x10
+#define MDIO_XGXS_BLOCK2_RX_LN_SWAP_ENABLE 0x8000
+#define MDIO_XGXS_BLOCK2_RX_LN_SWAP_FORCE_ENABLE 0x4000
+#define MDIO_XGXS_BLOCK2_TX_LN_SWAP 0x11
+#define MDIO_XGXS_BLOCK2_TX_LN_SWAP_ENABLE 0x8000
+#define MDIO_XGXS_BLOCK2_UNICORE_MODE_10G 0x14
+#define MDIO_XGXS_BLOCK2_UNICORE_MODE_10G_CX4_XGXS 0x0001
+#define MDIO_XGXS_BLOCK2_UNICORE_MODE_10G_HIGIG_XGXS 0x0010
+#define MDIO_XGXS_BLOCK2_TEST_MODE_LANE 0x15
+
+#define MDIO_REG_BANK_GP_STATUS 0x8120
+#define MDIO_GP_STATUS_TOP_AN_STATUS1 0x1B
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_AUTONEG_COMPLETE 0x0001
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_CL37_AUTONEG_COMPLETE 0x0002
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_LINK_STATUS 0x0004
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_DUPLEX_STATUS 0x0008
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_MR_LP_NP_AN_ABLE 0x0010
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_LP_NP_BAM_ABLE 0x0020
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_PAUSE_RSOLUTION_TXSIDE 0x0040
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_PAUSE_RSOLUTION_RXSIDE 0x0080
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_MASK 0x3f00
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10M 0x0000
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_100M 0x0100
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_1G 0x0200
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_2_5G 0x0300
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_5G 0x0400
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_6G 0x0500
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_HIG 0x0600
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_CX4 0x0700
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_12G_HIG 0x0800
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_12_5G 0x0900
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_13G 0x0A00
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_15G 0x0B00
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_16G 0x0C00
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_1G_KX 0x0D00
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_KX4 0x0E00
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_KR 0x0F00
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_XFI 0x1B00
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_20G_DXGXS 0x1E00
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_SFI 0x1F00
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_20G_KR2 0x3900
+
+
+#define MDIO_REG_BANK_10G_PARALLEL_DETECT 0x8130
+#define MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_STATUS 0x10
+#define MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_STATUS_PD_LINK 0x8000
+#define MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL 0x11
+#define MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL_PARDET10G_EN 0x1
+#define MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_LINK 0x13
+#define MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_LINK_CNT (0xb71<<1)
+
+#define MDIO_REG_BANK_SERDES_DIGITAL 0x8300
+#define MDIO_SERDES_DIGITAL_A_1000X_CONTROL1 0x10
+#define MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_FIBER_MODE 0x0001
+#define MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_TBI_IF 0x0002
+#define MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_SIGNAL_DETECT_EN 0x0004
+#define MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_INVERT_SIGNAL_DETECT 0x0008
+#define MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET 0x0010
+#define MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_MSTR_MODE 0x0020
+#define MDIO_SERDES_DIGITAL_A_1000X_CONTROL2 0x11
+#define MDIO_SERDES_DIGITAL_A_1000X_CONTROL2_PRL_DT_EN 0x0001
+#define MDIO_SERDES_DIGITAL_A_1000X_CONTROL2_AN_FST_TMR 0x0040
+#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1 0x14
+#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1_SGMII 0x0001
+#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1_LINK 0x0002
+#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1_DUPLEX 0x0004
+#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1_SPEED_MASK 0x0018
+#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1_SPEED_SHIFT 3
+#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1_SPEED_2_5G 0x0018
+#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1_SPEED_1G 0x0010
+#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1_SPEED_100M 0x0008
+#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1_SPEED_10M 0x0000
+#define MDIO_SERDES_DIGITAL_A_1000X_STATUS2 0x15
+#define MDIO_SERDES_DIGITAL_A_1000X_STATUS2_AN_DISABLED 0x0002
+#define MDIO_SERDES_DIGITAL_MISC1 0x18
+#define MDIO_SERDES_DIGITAL_MISC1_REFCLK_SEL_MASK 0xE000
+#define MDIO_SERDES_DIGITAL_MISC1_REFCLK_SEL_25M 0x0000
+#define MDIO_SERDES_DIGITAL_MISC1_REFCLK_SEL_100M 0x2000
+#define MDIO_SERDES_DIGITAL_MISC1_REFCLK_SEL_125M 0x4000
+#define MDIO_SERDES_DIGITAL_MISC1_REFCLK_SEL_156_25M 0x6000
+#define MDIO_SERDES_DIGITAL_MISC1_REFCLK_SEL_187_5M 0x8000
+#define MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_SEL 0x0010
+#define MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_MASK 0x000f
+#define MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_2_5G 0x0000
+#define MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_5G 0x0001
+#define MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_6G 0x0002
+#define MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_10G_HIG 0x0003
+#define MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_10G_CX4 0x0004
+#define MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_12G 0x0005
+#define MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_12_5G 0x0006
+#define MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_13G 0x0007
+#define MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_15G 0x0008
+#define MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_16G 0x0009
+
+#define MDIO_REG_BANK_OVER_1G 0x8320
+#define MDIO_OVER_1G_DIGCTL_3_4 0x14
+#define MDIO_OVER_1G_DIGCTL_3_4_MP_ID_MASK 0xffe0
+#define MDIO_OVER_1G_DIGCTL_3_4_MP_ID_SHIFT 5
+#define MDIO_OVER_1G_UP1 0x19
+#define MDIO_OVER_1G_UP1_2_5G 0x0001
+#define MDIO_OVER_1G_UP1_5G 0x0002
+#define MDIO_OVER_1G_UP1_6G 0x0004
+#define MDIO_OVER_1G_UP1_10G 0x0010
+#define MDIO_OVER_1G_UP1_10GH 0x0008
+#define MDIO_OVER_1G_UP1_12G 0x0020
+#define MDIO_OVER_1G_UP1_12_5G 0x0040
+#define MDIO_OVER_1G_UP1_13G 0x0080
+#define MDIO_OVER_1G_UP1_15G 0x0100
+#define MDIO_OVER_1G_UP1_16G 0x0200
+#define MDIO_OVER_1G_UP2 0x1A
+#define MDIO_OVER_1G_UP2_IPREDRIVER_MASK 0x0007
+#define MDIO_OVER_1G_UP2_IDRIVER_MASK 0x0038
+#define MDIO_OVER_1G_UP2_PREEMPHASIS_MASK 0x03C0
+#define MDIO_OVER_1G_UP3 0x1B
+#define MDIO_OVER_1G_UP3_HIGIG2 0x0001
+#define MDIO_OVER_1G_LP_UP1 0x1C
+#define MDIO_OVER_1G_LP_UP2 0x1D
+#define MDIO_OVER_1G_LP_UP2_MR_ADV_OVER_1G_MASK 0x03ff
+#define MDIO_OVER_1G_LP_UP2_PREEMPHASIS_MASK 0x0780
+#define MDIO_OVER_1G_LP_UP2_PREEMPHASIS_SHIFT 7
+#define MDIO_OVER_1G_LP_UP3 0x1E
+
+#define MDIO_REG_BANK_REMOTE_PHY 0x8330
+#define MDIO_REMOTE_PHY_MISC_RX_STATUS 0x10
+#define MDIO_REMOTE_PHY_MISC_RX_STATUS_CL37_FSM_RECEIVED_OVER1G_MSG 0x0010
+#define MDIO_REMOTE_PHY_MISC_RX_STATUS_CL37_FSM_RECEIVED_BRCM_OUI_MSG 0x0600
+
+#define MDIO_REG_BANK_BAM_NEXT_PAGE 0x8350
+#define MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL 0x10
+#define MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_BAM_MODE 0x0001
+#define MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_TETON_AN 0x0002
+
+#define MDIO_REG_BANK_CL73_USERB0 0x8370
+#define MDIO_CL73_USERB0_CL73_UCTRL 0x10
+#define MDIO_CL73_USERB0_CL73_UCTRL_USTAT1_MUXSEL 0x0002
+#define MDIO_CL73_USERB0_CL73_USTAT1 0x11
+#define MDIO_CL73_USERB0_CL73_USTAT1_LINK_STATUS_CHECK 0x0100
+#define MDIO_CL73_USERB0_CL73_USTAT1_AN_GOOD_CHECK_BAM37 0x0400
+#define MDIO_CL73_USERB0_CL73_BAM_CTRL1 0x12
+#define MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_EN 0x8000
+#define MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_STATION_MNGR_EN 0x4000
+#define MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_NP_AFTER_BP_EN 0x2000
+#define MDIO_CL73_USERB0_CL73_BAM_CTRL3 0x14
+#define MDIO_CL73_USERB0_CL73_BAM_CTRL3_USE_CL73_HCD_MR 0x0001
+
+#define MDIO_REG_BANK_AER_BLOCK 0xFFD0
+#define MDIO_AER_BLOCK_AER_REG 0x1E
+
+#define MDIO_REG_BANK_COMBO_IEEE0 0xFFE0
+#define MDIO_COMBO_IEEE0_MII_CONTROL 0x10
+#define MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_MASK 0x2040
+#define MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_10 0x0000
+#define MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_100 0x2000
+#define MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_1000 0x0040
+#define MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX 0x0100
+#define MDIO_COMBO_IEEO_MII_CONTROL_RESTART_AN 0x0200
+#define MDIO_COMBO_IEEO_MII_CONTROL_AN_EN 0x1000
+#define MDIO_COMBO_IEEO_MII_CONTROL_LOOPBACK 0x4000
+#define MDIO_COMBO_IEEO_MII_CONTROL_RESET 0x8000
+#define MDIO_COMBO_IEEE0_MII_STATUS 0x11
+#define MDIO_COMBO_IEEE0_MII_STATUS_LINK_PASS 0x0004
+#define MDIO_COMBO_IEEE0_MII_STATUS_AUTONEG_COMPLETE 0x0020
+#define MDIO_COMBO_IEEE0_AUTO_NEG_ADV 0x14
+#define MDIO_COMBO_IEEE0_AUTO_NEG_ADV_FULL_DUPLEX 0x0020
+#define MDIO_COMBO_IEEE0_AUTO_NEG_ADV_HALF_DUPLEX 0x0040
+#define MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK 0x0180
+#define MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE 0x0000
+#define MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_SYMMETRIC 0x0080
+#define MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC 0x0100
+#define MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH 0x0180
+#define MDIO_COMBO_IEEE0_AUTO_NEG_ADV_NEXT_PAGE 0x8000
+#define MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1 0x15
+#define MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1_NEXT_PAGE 0x8000
+#define MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1_ACK 0x4000
+#define MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1_PAUSE_MASK 0x0180
+#define MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1_PAUSE_NONE 0x0000
+#define MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1_PAUSE_BOTH 0x0180
+#define MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1_HALF_DUP_CAP 0x0040
+#define MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1_FULL_DUP_CAP 0x0020
+/*WhenthelinkpartnerisinSGMIImode(bit0=1),then
+bit15=link,bit12=duplex,bits11:10=speed,bit14=acknowledge.
+Theotherbitsarereservedandshouldbezero*/
+#define MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1_SGMII_MODE 0x0001
+
+
+#define MDIO_PMA_DEVAD 0x1
+/*ieee*/
+#define MDIO_PMA_REG_CTRL 0x0
+#define MDIO_PMA_REG_STATUS 0x1
+#define MDIO_PMA_REG_10G_CTRL2 0x7
+#define MDIO_PMA_REG_TX_DISABLE 0x0009
+#define MDIO_PMA_REG_RX_SD 0xa
+/*bcm*/
+#define MDIO_PMA_REG_BCM_CTRL 0x0096
+#define MDIO_PMA_REG_FEC_CTRL 0x00ab
+#define MDIO_PMA_REG_PHY_IDENTIFIER 0xc800
+#define MDIO_PMA_REG_DIGITAL_CTRL 0xc808
+#define MDIO_PMA_REG_DIGITAL_STATUS 0xc809
+#define MDIO_PMA_REG_TX_POWER_DOWN 0xca02
+#define MDIO_PMA_REG_CMU_PLL_BYPASS 0xca09
+#define MDIO_PMA_REG_MISC_CTRL 0xca0a
+#define MDIO_PMA_REG_GEN_CTRL 0xca10
+#define MDIO_PMA_REG_GEN_CTRL_ROM_RESET_INTERNAL_MP 0x0188
+#define MDIO_PMA_REG_GEN_CTRL_ROM_MICRO_RESET 0x018a
+#define MDIO_PMA_REG_M8051_MSGIN_REG 0xca12
+#define MDIO_PMA_REG_M8051_MSGOUT_REG 0xca13
+#define MDIO_PMA_REG_ROM_VER1 0xca19
+#define MDIO_PMA_REG_ROM_VER2 0xca1a
+#define MDIO_PMA_REG_EDC_FFE_MAIN 0xca1b
+#define MDIO_PMA_REG_PLL_BANDWIDTH 0xca1d
+#define MDIO_PMA_REG_PLL_CTRL 0xca1e
+#define MDIO_PMA_REG_MISC_CTRL0 0xca23
+#define MDIO_PMA_REG_LRM_MODE 0xca3f
+#define MDIO_PMA_REG_CDR_BANDWIDTH 0xca46
+#define MDIO_PMA_REG_MISC_CTRL1 0xca85
+
+#define MDIO_PMA_REG_SFP_TWO_WIRE_CTRL 0x8000
+#define MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK 0x000c
+#define MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_IDLE 0x0000
+#define MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_COMPLETE 0x0004
+#define MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_IN_PROGRESS 0x0008
+#define MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_FAILED 0x000c
+#define MDIO_PMA_REG_SFP_TWO_WIRE_BYTE_CNT 0x8002
+#define MDIO_PMA_REG_SFP_TWO_WIRE_MEM_ADDR 0x8003
+#define MDIO_PMA_REG_8726_TWO_WIRE_DATA_BUF 0xc820
+#define MDIO_PMA_REG_8726_TWO_WIRE_DATA_MASK 0xff
+#define MDIO_PMA_REG_8726_TX_CTRL1 0xca01
+#define MDIO_PMA_REG_8726_TX_CTRL2 0xca05
+
+#define MDIO_PMA_REG_8727_TWO_WIRE_SLAVE_ADDR 0x8005
+#define MDIO_PMA_REG_8727_TWO_WIRE_DATA_BUF 0x8007
+#define MDIO_PMA_REG_8727_TWO_WIRE_DATA_MASK 0xff
+#define MDIO_PMA_REG_8727_TX_CTRL1 0xca02
+#define MDIO_PMA_REG_8727_TX_CTRL2 0xca05
+#define MDIO_PMA_REG_8727_PCS_OPT_CTRL 0xc808
+#define MDIO_PMA_REG_8727_GPIO_CTRL 0xc80e
+#define MDIO_PMA_REG_8727_PCS_GP 0xc842
+#define MDIO_PMA_REG_8727_OPT_CFG_REG 0xc8e4
+
+#define MDIO_AN_REG_8727_MISC_CTRL 0x8309
+
+#define MDIO_PMA_REG_8073_CHIP_REV 0xc801
+#define MDIO_PMA_REG_8073_SPEED_LINK_STATUS 0xc820
+#define MDIO_PMA_REG_8073_XAUI_WA 0xc841
+#define MDIO_PMA_REG_8073_OPT_DIGITAL_CTRL 0xcd08
+
+#define MDIO_PMA_REG_7101_RESET 0xc000
+#define MDIO_PMA_REG_7107_LED_CNTL 0xc007
+#define MDIO_PMA_REG_7107_LINK_LED_CNTL 0xc009
+#define MDIO_PMA_REG_7101_VER1 0xc026
+#define MDIO_PMA_REG_7101_VER2 0xc027
+
+#define MDIO_PMA_REG_8481_PMD_SIGNAL 0xa811
+#define MDIO_PMA_REG_8481_LED1_MASK 0xa82c
+#define MDIO_PMA_REG_8481_LED2_MASK 0xa82f
+#define MDIO_PMA_REG_8481_LED3_MASK 0xa832
+#define MDIO_PMA_REG_8481_LED3_BLINK 0xa834
+#define MDIO_PMA_REG_8481_LED5_MASK 0xa838
+#define MDIO_PMA_REG_8481_SIGNAL_MASK 0xa835
+#define MDIO_PMA_REG_8481_LINK_SIGNAL 0xa83b
+#define MDIO_PMA_REG_8481_LINK_SIGNAL_LED4_ENABLE_MASK 0x800
+#define MDIO_PMA_REG_8481_LINK_SIGNAL_LED4_ENABLE_SHIFT 11
+
+
+#define MDIO_WIS_DEVAD 0x2
+/*bcm*/
+#define MDIO_WIS_REG_LASI_CNTL 0x9002
+#define MDIO_WIS_REG_LASI_STATUS 0x9005
+
+#define MDIO_PCS_DEVAD 0x3
+#define MDIO_PCS_REG_STATUS 0x0020
+#define MDIO_PCS_REG_LASI_STATUS 0x9005
+#define MDIO_PCS_REG_7101_DSP_ACCESS 0xD000
+#define MDIO_PCS_REG_7101_SPI_MUX 0xD008
+#define MDIO_PCS_REG_7101_SPI_CTRL_ADDR 0xE12A
+#define MDIO_PCS_REG_7101_SPI_RESET_BIT (5)
+#define MDIO_PCS_REG_7101_SPI_FIFO_ADDR 0xE02A
+#define MDIO_PCS_REG_7101_SPI_FIFO_ADDR_WRITE_ENABLE_CMD (6)
+#define MDIO_PCS_REG_7101_SPI_FIFO_ADDR_BULK_ERASE_CMD (0xC7)
+#define MDIO_PCS_REG_7101_SPI_FIFO_ADDR_PAGE_PROGRAM_CMD (2)
+#define MDIO_PCS_REG_7101_SPI_BYTES_TO_TRANSFER_ADDR 0xE028
+
+
+#define MDIO_XS_DEVAD 0x4
+#define MDIO_XS_PLL_SEQUENCER 0x8000
+#define MDIO_XS_SFX7101_XGXS_TEST1 0xc00a
+
+#define MDIO_XS_8706_REG_BANK_RX0 0x80bc
+#define MDIO_XS_8706_REG_BANK_RX1 0x80cc
+#define MDIO_XS_8706_REG_BANK_RX2 0x80dc
+#define MDIO_XS_8706_REG_BANK_RX3 0x80ec
+#define MDIO_XS_8706_REG_BANK_RXA 0x80fc
+
+#define MDIO_XS_REG_8073_RX_CTRL_PCIE 0x80FA
+
+#define MDIO_AN_DEVAD 0x7
+/*ieee*/
+#define MDIO_AN_REG_CTRL 0x0000
+#define MDIO_AN_REG_STATUS 0x0001
+#define MDIO_AN_REG_STATUS_AN_COMPLETE 0x0020
+#define MDIO_AN_REG_ADV_PAUSE 0x0010
+#define MDIO_AN_REG_ADV_PAUSE_PAUSE 0x0400
+#define MDIO_AN_REG_ADV_PAUSE_ASYMMETRIC 0x0800
+#define MDIO_AN_REG_ADV_PAUSE_BOTH 0x0C00
+#define MDIO_AN_REG_ADV_PAUSE_MASK 0x0C00
+#define MDIO_AN_REG_ADV 0x0011
+#define MDIO_AN_REG_ADV2 0x0012
+#define MDIO_AN_REG_LP_AUTO_NEG 0x0013
+#define MDIO_AN_REG_LP_AUTO_NEG2 0x0014
+#define MDIO_AN_REG_MASTER_STATUS 0x0021
+#define MDIO_AN_REG_EEE_ADV 0x003c
+#define MDIO_AN_REG_LP_EEE_ADV 0x003d
+/*bcm*/
+#define MDIO_AN_REG_LINK_STATUS 0x8304
+#define MDIO_AN_REG_CL37_CL73 0x8370
+#define MDIO_AN_REG_CL37_AN 0xffe0
+#define MDIO_AN_REG_CL37_FC_LD 0xffe4
+#define MDIO_AN_REG_CL37_FC_LP 0xffe5
+#define MDIO_AN_REG_1000T_STATUS 0xffea
+
+#define MDIO_AN_REG_8073_2_5G 0x8329
+#define MDIO_AN_REG_8073_BAM 0x8350
+
+#define MDIO_AN_REG_8481_10GBASE_T_AN_CTRL 0x0020
+#define MDIO_AN_REG_8481_LEGACY_MII_CTRL 0xffe0
+#define MDIO_AN_REG_8481_MII_CTRL_FORCE_1G 0x40
+#define MDIO_AN_REG_8481_LEGACY_MII_STATUS 0xffe1
+#define MDIO_AN_REG_848xx_ID_MSB 0xffe2
+#define BCM84858_PHY_ID 0x600d
+#define MDIO_AN_REG_848xx_ID_LSB 0xffe3
+#define MDIO_AN_REG_8481_LEGACY_AN_ADV 0xffe4
+#define MDIO_AN_REG_8481_LEGACY_AN_EXPANSION 0xffe6
+#define MDIO_AN_REG_8481_1000T_CTRL 0xffe9
+#define MDIO_AN_REG_8481_1G_100T_EXT_CTRL 0xfff0
+#define MIDO_AN_REG_8481_EXT_CTRL_FORCE_LEDS_OFF 0x0008
+#define MDIO_AN_REG_8481_EXPANSION_REG_RD_RW 0xfff5
+#define MDIO_AN_REG_8481_EXPANSION_REG_ACCESS 0xfff7
+#define MDIO_AN_REG_8481_AUX_CTRL 0xfff8
+#define MDIO_AN_REG_8481_LEGACY_SHADOW 0xfffc
+
+/* BCM84823 only */
+#define MDIO_CTL_DEVAD 0x1e
+#define MDIO_CTL_REG_84823_MEDIA 0x401a
+#define MDIO_CTL_REG_84823_MEDIA_MAC_MASK 0x0018
+ /* These pins configure the BCM84823 interface to MAC after reset. */
+#define MDIO_CTL_REG_84823_CTRL_MAC_XFI 0x0008
+#define MDIO_CTL_REG_84823_MEDIA_MAC_XAUI_M 0x0010
+ /* These pins configure the BCM84823 interface to Line after reset. */
+#define MDIO_CTL_REG_84823_MEDIA_LINE_MASK 0x0060
+#define MDIO_CTL_REG_84823_MEDIA_LINE_XAUI_L 0x0020
+#define MDIO_CTL_REG_84823_MEDIA_LINE_XFI 0x0040
+ /* When this pin is active high during reset, 10GBASE-T core is power
+ * down, When it is active low the 10GBASE-T is power up
+ */
+#define MDIO_CTL_REG_84823_MEDIA_COPPER_CORE_DOWN 0x0080
+#define MDIO_CTL_REG_84823_MEDIA_PRIORITY_MASK 0x0100
+#define MDIO_CTL_REG_84823_MEDIA_PRIORITY_COPPER 0x0000
+#define MDIO_CTL_REG_84823_MEDIA_PRIORITY_FIBER 0x0100
+#define MDIO_CTL_REG_84823_MEDIA_FIBER_1G 0x1000
+#define MDIO_CTL_REG_84823_USER_CTRL_REG 0x4005
+#define MDIO_CTL_REG_84823_USER_CTRL_CMS 0x0080
+#define MDIO_PMA_REG_84823_CTL_SLOW_CLK_CNT_HIGH 0xa82b
+#define MDIO_PMA_REG_84823_BLINK_RATE_VAL_15P9HZ 0x2f
+#define MDIO_PMA_REG_84823_CTL_LED_CTL_1 0xa8e3
+#define MDIO_PMA_REG_84833_CTL_LED_CTL_1 0xa8ec
+#define MDIO_PMA_REG_84823_LED3_STRETCH_EN 0x0080
+/* BCM84858 only */
+#define MDIO_PMA_REG_84858_ALLOW_GPHY_ACT 0x8000
+
+/* BCM84833 only */
+#define MDIO_84833_TOP_CFG_FW_REV 0x400f
+#define MDIO_84833_TOP_CFG_FW_EEE 0x10b1
+#define MDIO_84833_TOP_CFG_FW_NO_EEE 0x1f81
+#define MDIO_84833_TOP_CFG_XGPHY_STRAP1 0x401a
+#define MDIO_84833_SUPER_ISOLATE 0x8000
+/* These are mailbox register set used by 84833/84858. */
+#define MDIO_848xx_TOP_CFG_SCRATCH_REG0 0x4005
+#define MDIO_848xx_TOP_CFG_SCRATCH_REG1 0x4006
+#define MDIO_848xx_TOP_CFG_SCRATCH_REG2 0x4007
+#define MDIO_848xx_TOP_CFG_SCRATCH_REG3 0x4008
+#define MDIO_848xx_TOP_CFG_SCRATCH_REG4 0x4009
+#define MDIO_848xx_TOP_CFG_SCRATCH_REG26 0x4037
+#define MDIO_848xx_TOP_CFG_SCRATCH_REG27 0x4038
+#define MDIO_848xx_TOP_CFG_SCRATCH_REG28 0x4039
+#define MDIO_848xx_TOP_CFG_SCRATCH_REG29 0x403a
+#define MDIO_848xx_TOP_CFG_SCRATCH_REG30 0x403b
+#define MDIO_848xx_TOP_CFG_SCRATCH_REG31 0x403c
+#define MDIO_848xx_CMD_HDLR_COMMAND (MDIO_848xx_TOP_CFG_SCRATCH_REG0)
+#define MDIO_848xx_CMD_HDLR_STATUS (MDIO_848xx_TOP_CFG_SCRATCH_REG26)
+#define MDIO_848xx_CMD_HDLR_DATA1 (MDIO_848xx_TOP_CFG_SCRATCH_REG27)
+#define MDIO_848xx_CMD_HDLR_DATA2 (MDIO_848xx_TOP_CFG_SCRATCH_REG28)
+#define MDIO_848xx_CMD_HDLR_DATA3 (MDIO_848xx_TOP_CFG_SCRATCH_REG29)
+#define MDIO_848xx_CMD_HDLR_DATA4 (MDIO_848xx_TOP_CFG_SCRATCH_REG30)
+#define MDIO_848xx_CMD_HDLR_DATA5 (MDIO_848xx_TOP_CFG_SCRATCH_REG31)
+
+/* Mailbox command set used by 84833/84858 */
+#define PHY848xx_CMD_SET_PAIR_SWAP 0x8001
+#define PHY848xx_CMD_GET_EEE_MODE 0x8008
+#define PHY848xx_CMD_SET_EEE_MODE 0x8009
+/* Mailbox status set used by 84833 only */
+#define PHY84833_STATUS_CMD_RECEIVED 0x0001
+#define PHY84833_STATUS_CMD_IN_PROGRESS 0x0002
+#define PHY84833_STATUS_CMD_COMPLETE_PASS 0x0004
+#define PHY84833_STATUS_CMD_COMPLETE_ERROR 0x0008
+#define PHY84833_STATUS_CMD_OPEN_FOR_CMDS 0x0010
+#define PHY84833_STATUS_CMD_SYSTEM_BOOT 0x0020
+#define PHY84833_STATUS_CMD_NOT_OPEN_FOR_CMDS 0x0040
+#define PHY84833_STATUS_CMD_CLEAR_COMPLETE 0x0080
+#define PHY84833_STATUS_CMD_OPEN_OVERRIDE 0xa5a5
+/* Mailbox Process */
+#define PHY84833_MB_PROCESS1 1
+#define PHY84833_MB_PROCESS2 2
+#define PHY84833_MB_PROCESS3 3
+
+/* Mailbox status set used by 84858 only */
+#define PHY84858_STATUS_CMD_RECEIVED 0x0001
+#define PHY84858_STATUS_CMD_IN_PROGRESS 0x0002
+#define PHY84858_STATUS_CMD_COMPLETE_PASS 0x0004
+#define PHY84858_STATUS_CMD_COMPLETE_ERROR 0x0008
+#define PHY84858_STATUS_CMD_SYSTEM_BUSY 0xbbbb
+
+
+/* Warpcore clause 45 addressing */
+#define MDIO_WC_DEVAD 0x3
+#define MDIO_WC_REG_IEEE0BLK_MIICNTL 0x0
+#define MDIO_WC_REG_IEEE0BLK_AUTONEGNP 0x7
+#define MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT0 0x10
+#define MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT1 0x11
+#define MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT2 0x12
+#define MDIO_WC_REG_AN_IEEE1BLK_AN_ADV2_FEC_ABILITY 0x4000
+#define MDIO_WC_REG_AN_IEEE1BLK_AN_ADV2_FEC_REQ 0x8000
+#define MDIO_WC_REG_PCS_STATUS2 0x0021
+#define MDIO_WC_REG_PMD_KR_CONTROL 0x0096
+#define MDIO_WC_REG_XGXSBLK0_XGXSCONTROL 0x8000
+#define MDIO_WC_REG_XGXSBLK0_MISCCONTROL1 0x800e
+#define MDIO_WC_REG_XGXSBLK1_DESKEW 0x8010
+#define MDIO_WC_REG_XGXSBLK1_LANECTRL0 0x8015
+#define MDIO_WC_REG_XGXSBLK1_LANECTRL1 0x8016
+#define MDIO_WC_REG_XGXSBLK1_LANECTRL2 0x8017
+#define MDIO_WC_REG_TX0_ANA_CTRL0 0x8061
+#define MDIO_WC_REG_TX1_ANA_CTRL0 0x8071
+#define MDIO_WC_REG_TX2_ANA_CTRL0 0x8081
+#define MDIO_WC_REG_TX3_ANA_CTRL0 0x8091
+#define MDIO_WC_REG_TX0_TX_DRIVER 0x8067
+#define MDIO_WC_REG_TX0_TX_DRIVER_IFIR_OFFSET 0x01
+#define MDIO_WC_REG_TX0_TX_DRIVER_IFIR_MASK 0x000e
+#define MDIO_WC_REG_TX0_TX_DRIVER_IPRE_DRIVER_OFFSET 0x04
+#define MDIO_WC_REG_TX0_TX_DRIVER_IPRE_DRIVER_MASK 0x00f0
+#define MDIO_WC_REG_TX0_TX_DRIVER_IDRIVER_OFFSET 0x08
+#define MDIO_WC_REG_TX0_TX_DRIVER_IDRIVER_MASK 0x0f00
+#define MDIO_WC_REG_TX0_TX_DRIVER_POST2_COEFF_OFFSET 0x0c
+#define MDIO_WC_REG_TX0_TX_DRIVER_POST2_COEFF_MASK 0x7000
+#define MDIO_WC_REG_TX1_TX_DRIVER 0x8077
+#define MDIO_WC_REG_TX2_TX_DRIVER 0x8087
+#define MDIO_WC_REG_TX3_TX_DRIVER 0x8097
+#define MDIO_WC_REG_RX0_ANARXCONTROL1G 0x80b9
+#define MDIO_WC_REG_RX2_ANARXCONTROL1G 0x80d9
+#define MDIO_WC_REG_RX0_PCI_CTRL 0x80ba
+#define MDIO_WC_REG_RX1_PCI_CTRL 0x80ca
+#define MDIO_WC_REG_RX2_PCI_CTRL 0x80da
+#define MDIO_WC_REG_RX3_PCI_CTRL 0x80ea
+#define MDIO_WC_REG_RXB_ANA_RX_CONTROL_PCI 0x80fa
+#define MDIO_WC_REG_XGXSBLK2_UNICORE_MODE_10G 0x8104
+#define MDIO_WC_REG_XGXS_STATUS3 0x8129
+#define MDIO_WC_REG_PAR_DET_10G_STATUS 0x8130
+#define MDIO_WC_REG_PAR_DET_10G_CTRL 0x8131
+#define MDIO_WC_REG_XGXS_X2_CONTROL2 0x8141
+#define MDIO_WC_REG_XGXS_X2_CONTROL3 0x8142
+#define MDIO_WC_REG_XGXS_RX_LN_SWAP1 0x816B
+#define MDIO_WC_REG_XGXS_TX_LN_SWAP1 0x8169
+#define MDIO_WC_REG_GP2_STATUS_GP_2_0 0x81d0
+#define MDIO_WC_REG_GP2_STATUS_GP_2_1 0x81d1
+#define MDIO_WC_REG_GP2_STATUS_GP_2_2 0x81d2
+#define MDIO_WC_REG_GP2_STATUS_GP_2_3 0x81d3
+#define MDIO_WC_REG_GP2_STATUS_GP_2_4 0x81d4
+#define MDIO_WC_REG_GP2_STATUS_GP_2_4_CL73_AN_CMPL 0x1000
+#define MDIO_WC_REG_GP2_STATUS_GP_2_4_CL37_AN_CMPL 0x0100
+#define MDIO_WC_REG_GP2_STATUS_GP_2_4_CL37_LP_AN_CAP 0x0010
+#define MDIO_WC_REG_GP2_STATUS_GP_2_4_CL37_AN_CAP 0x1
+#define MDIO_WC_REG_UC_INFO_B0_DEAD_TRAP 0x81EE
+#define MDIO_WC_REG_UC_INFO_B1_VERSION 0x81F0
+#define MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE 0x81F2
+#define MDIO_WC_REG_UC_INFO_B1_FIRMWARE_LANE0_OFFSET 0x0
+#define MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE_DEFAULT 0x0
+#define MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE_SFP_OPT_LR 0x1
+#define MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE_SFP_DAC 0x2
+#define MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE_SFP_XLAUI 0x3
+#define MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE_LONG_CH_6G 0x4
+#define MDIO_WC_REG_UC_INFO_B1_FIRMWARE_LANE1_OFFSET 0x4
+#define MDIO_WC_REG_UC_INFO_B1_FIRMWARE_LANE2_OFFSET 0x8
+#define MDIO_WC_REG_UC_INFO_B1_FIRMWARE_LANE3_OFFSET 0xc
+#define MDIO_WC_REG_UC_INFO_B1_CRC 0x81FE
+#define MDIO_WC_REG_DSC_SMC 0x8213
+#define MDIO_WC_REG_DSC2B0_DSC_MISC_CTRL0 0x821e
+#define MDIO_WC_REG_TX_FIR_TAP 0x82e2
+#define MDIO_WC_REG_TX_FIR_TAP_PRE_TAP_OFFSET 0x00
+#define MDIO_WC_REG_TX_FIR_TAP_PRE_TAP_MASK 0x000f
+#define MDIO_WC_REG_TX_FIR_TAP_MAIN_TAP_OFFSET 0x04
+#define MDIO_WC_REG_TX_FIR_TAP_MAIN_TAP_MASK 0x03f0
+#define MDIO_WC_REG_TX_FIR_TAP_POST_TAP_OFFSET 0x0a
+#define MDIO_WC_REG_TX_FIR_TAP_POST_TAP_MASK 0x7c00
+#define MDIO_WC_REG_TX_FIR_TAP_ENABLE 0x8000
+#define MDIO_WC_REG_CL72_USERB0_CL72_TX_FIR_TAP 0x82e2
+#define MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL 0x82e3
+#define MDIO_WC_REG_CL72_USERB0_CL72_OS_DEF_CTRL 0x82e6
+#define MDIO_WC_REG_CL72_USERB0_CL72_BR_DEF_CTRL 0x82e7
+#define MDIO_WC_REG_CL72_USERB0_CL72_2P5_DEF_CTRL 0x82e8
+#define MDIO_WC_REG_CL72_USERB0_CL72_MISC4_CONTROL 0x82ec
+#define MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1 0x8300
+#define MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2 0x8301
+#define MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X3 0x8302
+#define MDIO_WC_REG_SERDESDIGITAL_STATUS1000X1 0x8304
+#define MDIO_WC_REG_SERDESDIGITAL_MISC1 0x8308
+#define MDIO_WC_REG_SERDESDIGITAL_MISC2 0x8309
+#define MDIO_WC_REG_DIGITAL3_UP1 0x8329
+#define MDIO_WC_REG_DIGITAL3_LP_UP1 0x832c
+#define MDIO_WC_REG_DIGITAL4_MISC3 0x833c
+#define MDIO_WC_REG_DIGITAL4_MISC5 0x833e
+#define MDIO_WC_REG_DIGITAL5_MISC6 0x8345
+#define MDIO_WC_REG_DIGITAL5_MISC7 0x8349
+#define MDIO_WC_REG_DIGITAL5_LINK_STATUS 0x834d
+#define MDIO_WC_REG_DIGITAL5_ACTUAL_SPEED 0x834e
+#define MDIO_WC_REG_DIGITAL6_MP5_NEXTPAGECTRL 0x8350
+#define MDIO_WC_REG_CL49_USERB0_CTRL 0x8368
+#define MDIO_WC_REG_CL73_USERB0_CTRL 0x8370
+#define MDIO_WC_REG_CL73_USERB0_USTAT 0x8371
+#define MDIO_WC_REG_CL73_BAM_CTRL1 0x8372
+#define MDIO_WC_REG_CL73_BAM_CTRL2 0x8373
+#define MDIO_WC_REG_CL73_BAM_CTRL3 0x8374
+#define MDIO_WC_REG_CL73_BAM_CODE_FIELD 0x837b
+#define MDIO_WC_REG_EEE_COMBO_CONTROL0 0x8390
+#define MDIO_WC_REG_TX66_CONTROL 0x83b0
+#define MDIO_WC_REG_RX66_CONTROL 0x83c0
+#define MDIO_WC_REG_RX66_SCW0 0x83c2
+#define MDIO_WC_REG_RX66_SCW1 0x83c3
+#define MDIO_WC_REG_RX66_SCW2 0x83c4
+#define MDIO_WC_REG_RX66_SCW3 0x83c5
+#define MDIO_WC_REG_RX66_SCW0_MASK 0x83c6
+#define MDIO_WC_REG_RX66_SCW1_MASK 0x83c7
+#define MDIO_WC_REG_RX66_SCW2_MASK 0x83c8
+#define MDIO_WC_REG_RX66_SCW3_MASK 0x83c9
+#define MDIO_WC_REG_FX100_CTRL1 0x8400
+#define MDIO_WC_REG_FX100_CTRL3 0x8402
+#define MDIO_WC_REG_CL82_USERB1_TX_CTRL5 0x8436
+#define MDIO_WC_REG_CL82_USERB1_TX_CTRL6 0x8437
+#define MDIO_WC_REG_CL82_USERB1_TX_CTRL7 0x8438
+#define MDIO_WC_REG_CL82_USERB1_TX_CTRL9 0x8439
+#define MDIO_WC_REG_CL82_USERB1_RX_CTRL10 0x843a
+#define MDIO_WC_REG_CL82_USERB1_RX_CTRL11 0x843b
+#define MDIO_WC_REG_ETA_CL73_OUI1 0x8453
+#define MDIO_WC_REG_ETA_CL73_OUI2 0x8454
+#define MDIO_WC_REG_ETA_CL73_OUI3 0x8455
+#define MDIO_WC_REG_ETA_CL73_LD_BAM_CODE 0x8456
+#define MDIO_WC_REG_ETA_CL73_LD_UD_CODE 0x8457
+#define MDIO_WC_REG_MICROBLK_CMD 0xffc2
+#define MDIO_WC_REG_MICROBLK_DL_STATUS 0xffc5
+#define MDIO_WC_REG_MICROBLK_CMD3 0xffcc
+
+#define MDIO_WC_REG_AERBLK_AER 0xffde
+#define MDIO_WC_REG_COMBO_IEEE0_MIICTRL 0xffe0
+#define MDIO_WC_REG_COMBO_IEEE0_MIIISTAT 0xffe1
+
+#define MDIO_WC0_XGXS_BLK2_LANE_RESET 0x810A
+#define MDIO_WC0_XGXS_BLK2_LANE_RESET_RX_BITSHIFT 0
+#define MDIO_WC0_XGXS_BLK2_LANE_RESET_TX_BITSHIFT 4
+
+#define MDIO_WC0_XGXS_BLK6_XGXS_X2_CONTROL2 0x8141
+
+#define DIGITAL5_ACTUAL_SPEED_TX_MASK 0x003f
+
+/* 54618se */
+#define MDIO_REG_GPHY_PHYID_LSB 0x3
+#define MDIO_REG_GPHY_ID_54618SE 0x5cd5
+#define MDIO_REG_GPHY_CL45_ADDR_REG 0xd
+#define MDIO_REG_GPHY_CL45_DATA_REG 0xe
+#define MDIO_REG_GPHY_EEE_RESOLVED 0x803e
+#define MDIO_REG_GPHY_EXP_ACCESS_GATE 0x15
+#define MDIO_REG_GPHY_EXP_ACCESS 0x17
+#define MDIO_REG_GPHY_EXP_ACCESS_TOP 0xd00
+#define MDIO_REG_GPHY_EXP_TOP_2K_BUF 0x40
+#define MDIO_REG_GPHY_AUX_STATUS 0x19
+#define MDIO_REG_INTR_STATUS 0x1a
+#define MDIO_REG_INTR_MASK 0x1b
+#define MDIO_REG_INTR_MASK_LINK_STATUS (0x1 << 1)
+#define MDIO_REG_GPHY_SHADOW 0x1c
+#define MDIO_REG_GPHY_SHADOW_LED_SEL1 (0x0d << 10)
+#define MDIO_REG_GPHY_SHADOW_LED_SEL2 (0x0e << 10)
+#define MDIO_REG_GPHY_SHADOW_WR_ENA (0x1 << 15)
+#define MDIO_REG_GPHY_SHADOW_AUTO_DET_MED (0x1e << 10)
+#define MDIO_REG_GPHY_SHADOW_INVERT_FIB_SD (0x1 << 8)
+
+#define IGU_FUNC_BASE 0x0400
+
+#define IGU_ADDR_MSIX 0x0000
+#define IGU_ADDR_INT_ACK 0x0200
+#define IGU_ADDR_PROD_UPD 0x0201
+#define IGU_ADDR_ATTN_BITS_UPD 0x0202
+#define IGU_ADDR_ATTN_BITS_SET 0x0203
+#define IGU_ADDR_ATTN_BITS_CLR 0x0204
+#define IGU_ADDR_COALESCE_NOW 0x0205
+#define IGU_ADDR_SIMD_MASK 0x0206
+#define IGU_ADDR_SIMD_NOMASK 0x0207
+#define IGU_ADDR_MSI_CTL 0x0210
+#define IGU_ADDR_MSI_ADDR_LO 0x0211
+#define IGU_ADDR_MSI_ADDR_HI 0x0212
+#define IGU_ADDR_MSI_DATA 0x0213
+
+#define IGU_USE_REGISTER_ustorm_type_0_sb_cleanup 0
+#define IGU_USE_REGISTER_ustorm_type_1_sb_cleanup 1
+#define IGU_USE_REGISTER_cstorm_type_0_sb_cleanup 2
+#define IGU_USE_REGISTER_cstorm_type_1_sb_cleanup 3
+
+#define COMMAND_REG_INT_ACK 0x0
+#define COMMAND_REG_PROD_UPD 0x4
+#define COMMAND_REG_ATTN_BITS_UPD 0x8
+#define COMMAND_REG_ATTN_BITS_SET 0xc
+#define COMMAND_REG_ATTN_BITS_CLR 0x10
+#define COMMAND_REG_COALESCE_NOW 0x14
+#define COMMAND_REG_SIMD_MASK 0x18
+#define COMMAND_REG_SIMD_NOMASK 0x1c
+
+
+#define IGU_MEM_BASE 0x0000
+
+#define IGU_MEM_MSIX_BASE 0x0000
+#define IGU_MEM_MSIX_UPPER 0x007f
+#define IGU_MEM_MSIX_RESERVED_UPPER 0x01ff
+
+#define IGU_MEM_PBA_MSIX_BASE 0x0200
+#define IGU_MEM_PBA_MSIX_UPPER 0x0200
+
+#define IGU_CMD_BACKWARD_COMP_PROD_UPD 0x0201
+#define IGU_MEM_PBA_MSIX_RESERVED_UPPER 0x03ff
+
+#define IGU_CMD_INT_ACK_BASE 0x0400
+#define IGU_CMD_INT_ACK_UPPER\
+ (IGU_CMD_INT_ACK_BASE + MAX_SB_PER_PORT * NUM_OF_PORTS_PER_PATH - 1)
+#define IGU_CMD_INT_ACK_RESERVED_UPPER 0x04ff
+
+#define IGU_CMD_E2_PROD_UPD_BASE 0x0500
+#define IGU_CMD_E2_PROD_UPD_UPPER\
+ (IGU_CMD_E2_PROD_UPD_BASE + MAX_SB_PER_PORT * NUM_OF_PORTS_PER_PATH - 1)
+#define IGU_CMD_E2_PROD_UPD_RESERVED_UPPER 0x059f
+
+#define IGU_CMD_ATTN_BIT_UPD_UPPER 0x05a0
+#define IGU_CMD_ATTN_BIT_SET_UPPER 0x05a1
+#define IGU_CMD_ATTN_BIT_CLR_UPPER 0x05a2
+
+#define IGU_REG_SISR_MDPC_WMASK_UPPER 0x05a3
+#define IGU_REG_SISR_MDPC_WMASK_LSB_UPPER 0x05a4
+#define IGU_REG_SISR_MDPC_WMASK_MSB_UPPER 0x05a5
+#define IGU_REG_SISR_MDPC_WOMASK_UPPER 0x05a6
+
+#define IGU_REG_RESERVED_UPPER 0x05ff
+/* Fields of IGU PF CONFIGURATION REGISTER */
+#define IGU_PF_CONF_FUNC_EN (0x1<<0) /* function enable */
+#define IGU_PF_CONF_MSI_MSIX_EN (0x1<<1) /* MSI/MSIX enable */
+#define IGU_PF_CONF_INT_LINE_EN (0x1<<2) /* INT enable */
+#define IGU_PF_CONF_ATTN_BIT_EN (0x1<<3) /* attention enable */
+#define IGU_PF_CONF_SINGLE_ISR_EN (0x1<<4) /* single ISR mode enable */
+#define IGU_PF_CONF_SIMD_MODE (0x1<<5) /* simd all ones mode */
+
+/* Fields of IGU VF CONFIGURATION REGISTER */
+#define IGU_VF_CONF_FUNC_EN (0x1<<0) /* function enable */
+#define IGU_VF_CONF_MSI_MSIX_EN (0x1<<1) /* MSI/MSIX enable */
+#define IGU_VF_CONF_PARENT_MASK (0x3<<2) /* Parent PF */
+#define IGU_VF_CONF_PARENT_SHIFT 2 /* Parent PF */
+#define IGU_VF_CONF_SINGLE_ISR_EN (0x1<<4) /* single ISR mode enable */
+
+
+#define IGU_BC_DSB_NUM_SEGS 5
+#define IGU_BC_NDSB_NUM_SEGS 2
+#define IGU_NORM_DSB_NUM_SEGS 2
+#define IGU_NORM_NDSB_NUM_SEGS 1
+#define IGU_BC_BASE_DSB_PROD 128
+#define IGU_NORM_BASE_DSB_PROD 136
+
+ /* FID (if VF - [6] = 0; [5:0] = VF number; if PF - [6] = 1; \
+ [5:2] = 0; [1:0] = PF number) */
+#define IGU_FID_ENCODE_IS_PF (0x1<<6)
+#define IGU_FID_ENCODE_IS_PF_SHIFT 6
+#define IGU_FID_VF_NUM_MASK (0x3f)
+#define IGU_FID_PF_NUM_MASK (0x7)
+
+#define IGU_REG_MAPPING_MEMORY_VALID (1<<0)
+#define IGU_REG_MAPPING_MEMORY_VECTOR_MASK (0x3F<<1)
+#define IGU_REG_MAPPING_MEMORY_VECTOR_SHIFT 1
+#define IGU_REG_MAPPING_MEMORY_FID_MASK (0x7F<<7)
+#define IGU_REG_MAPPING_MEMORY_FID_SHIFT 7
+
+
+#define CDU_REGION_NUMBER_XCM_AG 2
+#define CDU_REGION_NUMBER_UCM_AG 4
+
+
+/* String-to-compress [31:8] = CID (all 24 bits)
+ * String-to-compress [7:4] = Region
+ * String-to-compress [3:0] = Type
+ */
+#define CDU_VALID_DATA(_cid, _region, _type)\
+ (((_cid) << 8) | (((_region)&0xf)<<4) | (((_type)&0xf)))
+#define CDU_CRC8(_cid, _region, _type)\
+ (calc_crc8(CDU_VALID_DATA(_cid, _region, _type), 0xff))
+#define CDU_RSRVD_VALUE_TYPE_A(_cid, _region, _type)\
+ (0x80 | ((CDU_CRC8(_cid, _region, _type)) & 0x7f))
+#define CDU_RSRVD_VALUE_TYPE_B(_crc, _type)\
+ (0x80 | ((_type)&0xf << 3) | ((CDU_CRC8(_cid, _region, _type)) & 0x7))
+#define CDU_RSRVD_INVALIDATE_CONTEXT_VALUE(_val) ((_val) & ~0x80)
+
+/* IdleChk registers */
+#define PXP_REG_HST_VF_DISABLED_ERROR_VALID 0x1030bc
+#define PXP_REG_HST_VF_DISABLED_ERROR_DATA 0x1030b8
+#define PXP_REG_HST_PER_VIOLATION_VALID 0x1030e0
+#define PXP_REG_HST_INCORRECT_ACCESS_VALID 0x1030cc
+#define PXP2_REG_RD_CPL_ERR_DETAILS 0x120778
+#define PXP2_REG_RD_CPL_ERR_DETAILS2 0x12077c
+#define PXP2_REG_RQ_GARB 0x120748
+#define PBF_REG_DISABLE_NEW_TASK_PROC_Q0 0x15c1bc
+#define PBF_REG_DISABLE_NEW_TASK_PROC_Q1 0x15c1c0
+#define PBF_REG_DISABLE_NEW_TASK_PROC_Q2 0x15c1c4
+#define PBF_REG_DISABLE_NEW_TASK_PROC_Q3 0x15c1c8
+#define PBF_REG_DISABLE_NEW_TASK_PROC_Q4 0x15c1cc
+#define PBF_REG_DISABLE_NEW_TASK_PROC_Q5 0x15c1d0
+#define PBF_REG_CREDIT_Q2 0x140344
+#define PBF_REG_CREDIT_Q3 0x140348
+#define PBF_REG_CREDIT_Q4 0x14034c
+#define PBF_REG_CREDIT_Q5 0x140350
+#define PBF_REG_INIT_CRD_Q2 0x15c238
+#define PBF_REG_INIT_CRD_Q3 0x15c23c
+#define PBF_REG_INIT_CRD_Q4 0x15c240
+#define PBF_REG_INIT_CRD_Q5 0x15c244
+#define PBF_REG_TASK_CNT_Q0 0x140374
+#define PBF_REG_TASK_CNT_Q1 0x140378
+#define PBF_REG_TASK_CNT_Q2 0x14037c
+#define PBF_REG_TASK_CNT_Q3 0x140380
+#define PBF_REG_TASK_CNT_Q4 0x140384
+#define PBF_REG_TASK_CNT_Q5 0x140388
+#define PBF_REG_TASK_CNT_LB_Q 0x140370
+#define QM_REG_BYTECRD0 0x16e6fc
+#define QM_REG_BYTECRD1 0x16e700
+#define QM_REG_BYTECRD2 0x16e704
+#define QM_REG_BYTECRD3 0x16e7ac
+#define QM_REG_BYTECRD4 0x16e7b0
+#define QM_REG_BYTECRD5 0x16e7b4
+#define QM_REG_BYTECRD6 0x16e7b8
+#define QM_REG_BYTECRDCMDQ_0 0x16e6e8
+#define QM_REG_BYTECRDERRREG 0x16e708
+#define MISC_REG_GRC_TIMEOUT_ATTN_FULL_FID 0xa714
+#define QM_REG_VOQCREDIT_2 0x1682d8
+#define QM_REG_VOQCREDIT_3 0x1682dc
+#define QM_REG_VOQCREDIT_5 0x1682e4
+#define QM_REG_VOQCREDIT_6 0x1682e8
+#define QM_REG_VOQINITCREDIT_3 0x16806c
+#define QM_REG_VOQINITCREDIT_6 0x168078
+#define QM_REG_FWVOQ0TOHWVOQ 0x16e7bc
+#define QM_REG_FWVOQ1TOHWVOQ 0x16e7c0
+#define QM_REG_FWVOQ2TOHWVOQ 0x16e7c4
+#define QM_REG_FWVOQ3TOHWVOQ 0x16e7c8
+#define QM_REG_FWVOQ4TOHWVOQ 0x16e7cc
+#define QM_REG_FWVOQ5TOHWVOQ 0x16e7d0
+#define QM_REG_FWVOQ6TOHWVOQ 0x16e7d4
+#define QM_REG_FWVOQ7TOHWVOQ 0x16e7d8
+#define NIG_REG_INGRESS_EOP_PORT0_EMPTY 0x104ec
+#define NIG_REG_INGRESS_EOP_PORT1_EMPTY 0x104f8
+#define NIG_REG_INGRESS_RMP0_DSCR_EMPTY 0x10530
+#define NIG_REG_INGRESS_RMP1_DSCR_EMPTY 0x10538
+#define NIG_REG_INGRESS_LB_PBF_DELAY_EMPTY 0x10508
+#define NIG_REG_EGRESS_MNG0_FIFO_EMPTY 0x10460
+#define NIG_REG_EGRESS_MNG1_FIFO_EMPTY 0x10474
+#define NIG_REG_EGRESS_DEBUG_FIFO_EMPTY 0x10418
+#define NIG_REG_EGRESS_DELAY0_EMPTY 0x10420
+#define NIG_REG_EGRESS_DELAY1_EMPTY 0x10428
+#define NIG_REG_LLH0_FIFO_EMPTY 0x10548
+#define NIG_REG_LLH1_FIFO_EMPTY 0x10558
+#define NIG_REG_P0_TX_MNG_HOST_FIFO_EMPTY 0x182a8
+#define NIG_REG_P0_TLLH_FIFO_EMPTY 0x18308
+#define NIG_REG_P0_HBUF_DSCR_EMPTY 0x18318
+#define NIG_REG_P1_HBUF_DSCR_EMPTY 0x18348
+#define NIG_REG_P0_RX_MACFIFO_EMPTY 0x18570
+#define NIG_REG_P0_TX_MACFIFO_EMPTY 0x18578
+#define NIG_REG_EGRESS_DELAY2_EMPTY 0x1862c
+#define NIG_REG_EGRESS_DELAY3_EMPTY 0x18630
+#define NIG_REG_EGRESS_DELAY4_EMPTY 0x18634
+#define NIG_REG_EGRESS_DELAY5_EMPTY 0x18638
+
+/******************************************************************************
+ * Description:
+ * Calculates crc 8 on a word value: polynomial 0-1-2-8
+ * Code was translated from Verilog.
+ * Return:
+ *****************************************************************************/
+static inline u8 calc_crc8(u32 data, u8 crc)
+{
+ u8 D[32];
+ u8 NewCRC[8];
+ u8 C[8];
+ u8 crc_res;
+ u8 i;
+
+ /* split the data into 31 bits */
+ for (i = 0; i < 32; i++) {
+ D[i] = (u8)(data & 1);
+ data = data >> 1;
+ }
+
+ /* split the crc into 8 bits */
+ for (i = 0; i < 8; i++) {
+ C[i] = crc & 1;
+ crc = crc >> 1;
+ }
+
+ NewCRC[0] = D[31] ^ D[30] ^ D[28] ^ D[23] ^ D[21] ^ D[19] ^ D[18] ^
+ D[16] ^ D[14] ^ D[12] ^ D[8] ^ D[7] ^ D[6] ^ D[0] ^ C[4] ^
+ C[6] ^ C[7];
+ NewCRC[1] = D[30] ^ D[29] ^ D[28] ^ D[24] ^ D[23] ^ D[22] ^ D[21] ^
+ D[20] ^ D[18] ^ D[17] ^ D[16] ^ D[15] ^ D[14] ^ D[13] ^
+ D[12] ^ D[9] ^ D[6] ^ D[1] ^ D[0] ^ C[0] ^ C[4] ^ C[5] ^
+ C[6];
+ NewCRC[2] = D[29] ^ D[28] ^ D[25] ^ D[24] ^ D[22] ^ D[17] ^ D[15] ^
+ D[13] ^ D[12] ^ D[10] ^ D[8] ^ D[6] ^ D[2] ^ D[1] ^ D[0] ^
+ C[0] ^ C[1] ^ C[4] ^ C[5];
+ NewCRC[3] = D[30] ^ D[29] ^ D[26] ^ D[25] ^ D[23] ^ D[18] ^ D[16] ^
+ D[14] ^ D[13] ^ D[11] ^ D[9] ^ D[7] ^ D[3] ^ D[2] ^ D[1] ^
+ C[1] ^ C[2] ^ C[5] ^ C[6];
+ NewCRC[4] = D[31] ^ D[30] ^ D[27] ^ D[26] ^ D[24] ^ D[19] ^ D[17] ^
+ D[15] ^ D[14] ^ D[12] ^ D[10] ^ D[8] ^ D[4] ^ D[3] ^ D[2] ^
+ C[0] ^ C[2] ^ C[3] ^ C[6] ^ C[7];
+ NewCRC[5] = D[31] ^ D[28] ^ D[27] ^ D[25] ^ D[20] ^ D[18] ^ D[16] ^
+ D[15] ^ D[13] ^ D[11] ^ D[9] ^ D[5] ^ D[4] ^ D[3] ^ C[1] ^
+ C[3] ^ C[4] ^ C[7];
+ NewCRC[6] = D[29] ^ D[28] ^ D[26] ^ D[21] ^ D[19] ^ D[17] ^ D[16] ^
+ D[14] ^ D[12] ^ D[10] ^ D[6] ^ D[5] ^ D[4] ^ C[2] ^ C[4] ^
+ C[5];
+ NewCRC[7] = D[30] ^ D[29] ^ D[27] ^ D[22] ^ D[20] ^ D[18] ^ D[17] ^
+ D[15] ^ D[13] ^ D[11] ^ D[7] ^ D[6] ^ D[5] ^ C[3] ^ C[5] ^
+ C[6];
+
+ crc_res = 0;
+ for (i = 0; i < 8; i++)
+ crc_res |= (NewCRC[i] << i);
+
+ return crc_res;
+}
+#endif /* BNX2X_REG_H */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_self_test.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_self_test.c
new file mode 100644
index 0000000000..3f8bdad335
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_self_test.c
@@ -0,0 +1,3183 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include "bnx2x.h"
+
+#define NA 0xCD
+
+#define IDLE_CHK_E1 0x01
+#define IDLE_CHK_E1H 0x02
+#define IDLE_CHK_E2 0x04
+#define IDLE_CHK_E3A0 0x08
+#define IDLE_CHK_E3B0 0x10
+
+#define IDLE_CHK_ERROR 1
+#define IDLE_CHK_ERROR_NO_TRAFFIC 2
+#define IDLE_CHK_WARNING 3
+
+#define MAX_FAIL_MSG 256
+
+/* statistics and error reporting */
+static int idle_chk_errors, idle_chk_warnings;
+
+/* masks for all chip types */
+static int is_e1, is_e1h, is_e2, is_e3a0, is_e3b0;
+
+/* struct for the argument list for a predicate in the self test databasei */
+struct st_pred_args {
+ u32 val1; /* value read from first register */
+ u32 val2; /* value read from second register, if applicable */
+ u32 imm1; /* 1st value in predicate condition, left-to-right */
+ u32 imm2; /* 2nd value in predicate condition, left-to-right */
+ u32 imm3; /* 3rd value in predicate condition, left-to-right */
+ u32 imm4; /* 4th value in predicate condition, left-to-right */
+};
+
+/* struct representing self test record - a single test */
+struct st_record {
+ u8 chip_mask;
+ u8 macro;
+ u32 reg1;
+ u32 reg2;
+ u16 loop;
+ u16 incr;
+ int (*bnx2x_predicate)(struct st_pred_args *pred_args);
+ u32 reg3;
+ u8 severity;
+ char *fail_msg;
+ struct st_pred_args pred_args;
+};
+
+/* predicates for self test */
+static int peq(struct st_pred_args *args)
+{
+ return (args->val1 == args->imm1);
+}
+
+static int pneq(struct st_pred_args *args)
+{
+ return (args->val1 != args->imm1);
+}
+
+static int pand_neq(struct st_pred_args *args)
+{
+ return ((args->val1 & args->imm1) != args->imm2);
+}
+
+static int pand_neq_x2(struct st_pred_args *args)
+{
+ return (((args->val1 & args->imm1) != args->imm2) &&
+ ((args->val1 & args->imm3) != args->imm4));
+}
+
+static int pneq_err(struct st_pred_args *args)
+{
+ return ((args->val1 != args->imm1) && (idle_chk_errors > args->imm2));
+}
+
+static int pgt(struct st_pred_args *args)
+{
+ return (args->val1 > args->imm1);
+}
+
+static int pneq_r2(struct st_pred_args *args)
+{
+ return (args->val1 != args->val2);
+}
+
+static int plt_sub_r2(struct st_pred_args *args)
+{
+ return (args->val1 < (args->val2 - args->imm1));
+}
+
+static int pne_sub_r2(struct st_pred_args *args)
+{
+ return (args->val1 != (args->val2 - args->imm1));
+}
+
+static int prsh_and_neq(struct st_pred_args *args)
+{
+ return (((args->val1 >> args->imm1) & args->imm2) != args->imm3);
+}
+
+static int peq_neq_r2(struct st_pred_args *args)
+{
+ return ((args->val1 == args->imm1) && (args->val2 != args->imm2));
+}
+
+static int peq_neq_neq_r2(struct st_pred_args *args)
+{
+ return ((args->val1 == args->imm1) && (args->val2 != args->imm2) &&
+ (args->val2 != args->imm3));
+}
+
+/* struct holding the database of self test checks (registers and predicates) */
+/* lines start from 2 since line 1 is heading in csv */
+#define ST_DB_LINES 468
+static struct st_record st_database[ST_DB_LINES] = {
+/*line 2*/{(0x3), 1, 0x2114,
+ NA, 1, 0, pand_neq,
+ NA, IDLE_CHK_ERROR,
+ "PCIE: ucorr_err_status is not 0",
+ {NA, NA, 0x0FF010, 0, NA, NA} },
+
+/*line 3*/{(0x3), 1, 0x2114,
+ NA, 1, 0, pand_neq,
+ NA, IDLE_CHK_WARNING,
+ "PCIE: ucorr_err_status - Unsupported request error",
+ {NA, NA, 0x100000, 0, NA, NA} },
+
+/*line 4*/{(0x3), 1, 0x2120,
+ NA, 1, 0, pand_neq_x2,
+ NA, IDLE_CHK_WARNING,
+ "PCIE: corr_err_status is not 0x2000",
+ {NA, NA, 0x31C1, 0x2000, 0x31C1, 0} },
+
+/*line 5*/{(0x3), 1, 0x2814,
+ NA, 1, 0, pand_neq,
+ NA, IDLE_CHK_ERROR,
+ "PCIE: attentions register is not 0x40100",
+ {NA, NA, ~0x40100, 0, NA, NA} },
+
+/*line 6*/{(0x2), 1, 0x281c,
+ NA, 1, 0, pand_neq,
+ NA, IDLE_CHK_ERROR,
+ "PCIE: attentions register is not 0x40040100",
+ {NA, NA, ~0x40040100, 0, NA, NA} },
+
+/*line 7*/{(0x2), 1, 0x2820,
+ NA, 1, 0, pand_neq,
+ NA, IDLE_CHK_ERROR,
+ "PCIE: attentions register is not 0x40040100",
+ {NA, NA, ~0x40040100, 0, NA, NA} },
+
+/*line 8*/{(0x3), 1, PXP2_REG_PGL_EXP_ROM2,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_WARNING,
+ "PXP2: There are outstanding read requests. Not all completios have arrived for read requests on tags that are marked with 0",
+ {NA, NA, 0xffffffff, NA, NA, NA} },
+
+/*line 9*/{(0x3), 2, 0x212c,
+ NA, 4, 4, pneq_err,
+ NA, IDLE_CHK_WARNING,
+ "PCIE: error packet header is not 0",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 10*/{(0x1C), 1, 0x2104,
+ NA, 1, 0, pand_neq,
+ NA, IDLE_CHK_ERROR,
+ "PCIE: ucorr_err_status is not 0",
+ {NA, NA, 0x0FD010, 0, NA, NA} },
+
+/*line 11*/{(0x1C), 1, 0x2104,
+ NA, 1, 0, pand_neq,
+ NA, IDLE_CHK_WARNING,
+ "PCIE: ucorr_err_status - Unsupported request error",
+ {NA, NA, 0x100000, 0, NA, NA} },
+
+/*line 12*/{(0x1C), 1, 0x2104,
+ NA, 1, 0, pand_neq,
+ NA, IDLE_CHK_WARNING,
+ "PCIE: ucorr_err_status - Flow Control Protocol Error",
+ {NA, NA, 0x2000, 0, NA, NA} },
+
+/*line 13*/{(0x1C), 1, 0x2110,
+ NA, 1, 0, pand_neq_x2,
+ NA, IDLE_CHK_WARNING,
+ "PCIE: corr_err_status is not 0x2000",
+ {NA, NA, 0x31C1, 0x2000, 0x31C1, 0} },
+
+/*line 14*/{(0x1C), 1, 0x2814,
+ NA, 1, 0, pand_neq,
+ NA, IDLE_CHK_WARNING,
+ "PCIE: TTX_BRIDGE_FORWARD_ERR - Received master request while BME was 0",
+ {NA, NA, 0x2000000, 0, NA, NA} },
+
+/*line 15*/{(0x1C), 1, 0x2814,
+ NA, 1, 0, pand_neq,
+ NA, IDLE_CHK_ERROR,
+ "PCIE: Func 0 1: attentions register is not 0x2040902",
+ {NA, NA, ~0x2040902, 0, NA, NA} },
+
+/*line 16*/{(0x1C), 1, 0x2854,
+ NA, 1, 0, pand_neq,
+ NA, IDLE_CHK_ERROR,
+ "PCIE: Func 2 3 4: attentions register is not 0x10240902",
+ {NA, NA, ~0x10240902, 0, NA, NA} },
+
+/*line 17*/{(0x1C), 1, 0x285c,
+ NA, 1, 0, pand_neq,
+ NA, IDLE_CHK_ERROR,
+ "PCIE: Func 5 6 7: attentions register is not 0x10240902",
+ {NA, NA, ~0x10240902, 0, NA, NA} },
+
+/*line 18*/{(0x18), 1, 0x3040,
+ NA, 1, 0, pand_neq,
+ NA, IDLE_CHK_ERROR,
+ "PCIE: Overflow in DLP2TLP buffer",
+ {NA, NA, 0x2, 0, NA, NA} },
+
+/*line 19*/{(0x1C), 1, PXP2_REG_PGL_EXP_ROM2,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_WARNING,
+ "PXP2: There are outstanding read requests for tags 0-31. Not all completios have arrived for read requests on tags that are marked with 0",
+ {NA, NA, 0xffffffff, NA, NA, NA} },
+
+/*line 20*/{(0x1C), 2, 0x211c,
+ NA, 4, 4, pneq_err,
+ NA, IDLE_CHK_WARNING,
+ "PCIE: error packet header is not 0",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 21*/{(0x1C), 1, PGLUE_B_REG_INCORRECT_RCV_DETAILS,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR,
+ "PGLUE_B: Packet received from PCIe not according to the rules",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 22*/{(0x1C), 1, PGLUE_B_REG_WAS_ERROR_VF_31_0,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_WARNING,
+ "PGLUE_B: was_error for VFs 0-31 is not 0",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 23*/{(0x1C), 1, PGLUE_B_REG_WAS_ERROR_VF_63_32,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_WARNING,
+ "PGLUE_B: was_error for VFs 32-63 is not 0",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 24*/{(0x1C), 1, PGLUE_B_REG_WAS_ERROR_VF_95_64,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_WARNING,
+ "PGLUE_B: was_error for VFs 64-95 is not 0",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 25*/{(0x1C), 1, PGLUE_B_REG_WAS_ERROR_VF_127_96,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_WARNING,
+ "PGLUE_B: was_error for VFs 96-127 is not 0",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 26*/{(0x1C), 1, PGLUE_B_REG_WAS_ERROR_PF_7_0,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_WARNING,
+ "PGLUE_B: was_error for PFs 0-7 is not 0",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 27*/{(0x1C), 1, PGLUE_B_REG_RX_ERR_DETAILS,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_WARNING,
+ "PGLUE_B: Completion received with error. (2:0) - PFID. (3) - VF_VALID. (9:4) - VFID. (11:10) - Error code : 0 - Completion Timeout; 1 - Unsupported Request; 2 - Completer Abort. (12) - valid bit",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 28*/{(0x1C), 1, PGLUE_B_REG_RX_TCPL_ERR_DETAILS,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_WARNING,
+ "PGLUE_B: ATS TCPL received with error. (2:0) - PFID. (3) - VF_VALID. (9:4) - VFID. (11:10) - Error code : 0 - Completion Timeout ; 1 - Unsupported Request; 2 - Completer Abort. (16:12) - OTB Entry ID. (17) - valid bit",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 29*/{(0x1C), 1, PGLUE_B_REG_TX_ERR_WR_ADD_31_0,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_WARNING,
+ "PGLUE_B: Error in master write. Address(31:0) is not 0",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 30*/{(0x1C), 1, PGLUE_B_REG_TX_ERR_WR_ADD_63_32,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_WARNING,
+ "PGLUE_B: Error in master write. Address(63:32) is not 0",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 31*/{(0x1C), 1, PGLUE_B_REG_TX_ERR_WR_DETAILS,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_WARNING,
+ "PGLUE_B: Error in master write. Error details register is not 0. (4:0) VQID. (23:21) - PFID. (24) - VF_VALID. (30:25) - VFID",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 32*/{(0x1C), 1, PGLUE_B_REG_TX_ERR_WR_DETAILS2,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_WARNING,
+ "PGLUE_B: Error in master write. Error details 2nd register is not 0. (21) - was_error set; (22) - BME cleared; (23) - FID_enable cleared; (24) - VF with parent PF FLR_request or IOV_disable_request",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 33*/{(0x1C), 1, PGLUE_B_REG_TX_ERR_RD_ADD_31_0,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_WARNING,
+ "PGLUE: Error in master read address(31:0) is not 0",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 34*/{(0x1C), 1, PGLUE_B_REG_TX_ERR_RD_ADD_63_32,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_WARNING,
+ "PGLUE_B: Error in master read address(63:32) is not 0",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 35*/{(0x1C), 1, PGLUE_B_REG_TX_ERR_RD_DETAILS,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_WARNING,
+ "PGLUE_B: Error in master read Error details register is not 0. (4:0) VQID. (23:21) - PFID. (24) - VF_VALID. (30:25) - VFID",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 36*/{(0x1C), 1, PGLUE_B_REG_TX_ERR_RD_DETAILS2,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_WARNING,
+ "PGLUE_B: Error in master read Error details 2nd register is not 0. (21) - was_error set; (22) - BME cleared; (23) - FID_enable cleared; (24) - VF with parent PF FLR_request or IOV_disable_request",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 37*/{(0x1C), 1, PGLUE_B_REG_VF_LENGTH_VIOLATION_DETAILS,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_WARNING,
+ "PGLUE_B: Target VF length violation access",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 38*/{(0x1C), 1, PGLUE_B_REG_VF_GRC_SPACE_VIOLATION_DETAILS,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_WARNING,
+ "PGLUE_B: Target VF GRC space access failed permission check",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 39*/{(0x1C), 1, PGLUE_B_REG_TAGS_63_32,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_WARNING,
+ "PGLUE_B: There are outstanding read requests for tags 32-63. Not all completios have arrived for read requests on tags that are marked with 0",
+ {NA, NA, 0xffffffff, NA, NA, NA} },
+
+/*line 40*/{(0x1C), 3, PXP_REG_HST_VF_DISABLED_ERROR_VALID,
+ PXP_REG_HST_VF_DISABLED_ERROR_DATA, 1, 0, pneq,
+ NA, IDLE_CHK_WARNING,
+ "PXP: Access to disabled VF took place",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 41*/{(0x1C), 1, PXP_REG_HST_PER_VIOLATION_VALID,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_WARNING,
+ "PXP: Zone A permission violation occurred",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 42*/{(0x1C), 1, PXP_REG_HST_INCORRECT_ACCESS_VALID,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_WARNING,
+ "PXP: Incorrect transaction took place",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 43*/{(0x1C), 1, PXP2_REG_RD_CPL_ERR_DETAILS,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_WARNING,
+ "PXP2: Completion received with error. Error details register is not 0. (15:0) - ECHO. (28:16) - Sub Request length plus start_offset_2_0 minus 1",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 44*/{(0x1C), 1, PXP2_REG_RD_CPL_ERR_DETAILS2,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_WARNING,
+ "PXP2: Completion received with error. Error details 2nd register is not 0. (4:0) - VQ ID. (8:5) - client ID. (9) - valid bit",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 45*/{(0x1F), 1, PXP2_REG_RQ_VQ0_ENTRY_CNT,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "PXP2: VQ0 is not empty",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 46*/{(0x1F), 1, PXP2_REG_RQ_VQ1_ENTRY_CNT,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "PXP2: VQ1 is not empty",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 47*/{(0x1F), 1, PXP2_REG_RQ_VQ2_ENTRY_CNT,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "PXP2: VQ2 is not empty",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 48*/{(0x1F), 1, PXP2_REG_RQ_VQ3_ENTRY_CNT,
+ NA, 1, 0, pgt,
+ NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "PXP2: VQ3 is not empty",
+ {NA, NA, 2, NA, NA, NA} },
+
+/*line 49*/{(0x1F), 1, PXP2_REG_RQ_VQ4_ENTRY_CNT,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "PXP2: VQ4 is not empty",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 50*/{(0x1F), 1, PXP2_REG_RQ_VQ5_ENTRY_CNT,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "PXP2: VQ5 is not empty",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 51*/{(0x1F), 1, PXP2_REG_RQ_VQ6_ENTRY_CNT,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "PXP2: VQ6 is not empty",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 52*/{(0x1F), 1, PXP2_REG_RQ_VQ7_ENTRY_CNT,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "PXP2: VQ7 is not empty",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 53*/{(0x1F), 1, PXP2_REG_RQ_VQ8_ENTRY_CNT,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "PXP2: VQ8 is not empty",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 54*/{(0x1F), 1, PXP2_REG_RQ_VQ9_ENTRY_CNT,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "PXP2: VQ9 is not empty",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 55*/{(0x1F), 1, PXP2_REG_RQ_VQ10_ENTRY_CNT,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "PXP2: VQ10 is not empty",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 56*/{(0x1F), 1, PXP2_REG_RQ_VQ11_ENTRY_CNT,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "PXP2: VQ11 is not empty",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 57*/{(0x1F), 1, PXP2_REG_RQ_VQ12_ENTRY_CNT,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "PXP2: VQ12 is not empty",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 58*/{(0x1F), 1, PXP2_REG_RQ_VQ13_ENTRY_CNT,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "PXP2: VQ13 is not empty",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 59*/{(0x1F), 1, PXP2_REG_RQ_VQ14_ENTRY_CNT,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "PXP2: VQ14 is not empty",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 60*/{(0x1F), 1, PXP2_REG_RQ_VQ15_ENTRY_CNT,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "PXP2: VQ15 is not empty",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 61*/{(0x1F), 1, PXP2_REG_RQ_VQ16_ENTRY_CNT,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "PXP2: VQ16 is not empty",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 62*/{(0x1F), 1, PXP2_REG_RQ_VQ17_ENTRY_CNT,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "PXP2: VQ17 is not empty",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 63*/{(0x1F), 1, PXP2_REG_RQ_VQ18_ENTRY_CNT,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "PXP2: VQ18 is not empty",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 64*/{(0x1F), 1, PXP2_REG_RQ_VQ19_ENTRY_CNT,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "PXP2: VQ19 is not empty",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 65*/{(0x1F), 1, PXP2_REG_RQ_VQ20_ENTRY_CNT,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "PXP2: VQ20 is not empty",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 66*/{(0x1F), 1, PXP2_REG_RQ_VQ21_ENTRY_CNT,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "PXP2: VQ21 is not empty",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 67*/{(0x1F), 1, PXP2_REG_RQ_VQ22_ENTRY_CNT,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "PXP2: VQ22 is not empty",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 68*/{(0x1F), 1, PXP2_REG_RQ_VQ23_ENTRY_CNT,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "PXP2: VQ23 is not empty",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 69*/{(0x1F), 1, PXP2_REG_RQ_VQ24_ENTRY_CNT,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "PXP2: VQ24 is not empty",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 70*/{(0x1F), 1, PXP2_REG_RQ_VQ25_ENTRY_CNT,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "PXP2: VQ25 is not empty",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 71*/{(0x1F), 1, PXP2_REG_RQ_VQ26_ENTRY_CNT,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "PXP2: VQ26 is not empty",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 72*/{(0x1F), 1, PXP2_REG_RQ_VQ27_ENTRY_CNT,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "PXP2: VQ27 is not empty",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 73*/{(0x1F), 1, PXP2_REG_RQ_VQ28_ENTRY_CNT,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "PXP2: VQ28 is not empty",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 74*/{(0x1F), 1, PXP2_REG_RQ_VQ29_ENTRY_CNT,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "PXP2: VQ29 is not empty",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 75*/{(0x1F), 1, PXP2_REG_RQ_VQ30_ENTRY_CNT,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "PXP2: VQ30 is not empty",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 76*/{(0x1F), 1, PXP2_REG_RQ_VQ31_ENTRY_CNT,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "PXP2: VQ31 is not empty",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 77*/{(0x1F), 1, PXP2_REG_RQ_UFIFO_NUM_OF_ENTRY,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "PXP2: rq_ufifo_num_of_entry is not 0",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 78*/{(0x1F), 1, PXP2_REG_RQ_RBC_DONE,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR,
+ "PXP2: rq_rbc_done is not 1",
+ {NA, NA, 1, NA, NA, NA} },
+
+/*line 79*/{(0x1F), 1, PXP2_REG_RQ_CFG_DONE,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR,
+ "PXP2: rq_cfg_done is not 1",
+ {NA, NA, 1, NA, NA, NA} },
+
+/*line 80*/{(0x3), 1, PXP2_REG_PSWRQ_BW_CREDIT,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "PXP2: rq_read_credit and rq_write_credit are not 3",
+ {NA, NA, 0x1B, NA, NA, NA} },
+
+/*line 81*/{(0x1F), 1, PXP2_REG_RD_START_INIT,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR,
+ "PXP2: rd_start_init is not 1",
+ {NA, NA, 1, NA, NA, NA} },
+
+/*line 82*/{(0x1F), 1, PXP2_REG_RD_INIT_DONE,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR,
+ "PXP2: rd_init_done is not 1",
+ {NA, NA, 1, NA, NA, NA} },
+
+/*line 83*/{(0x1F), 3, PXP2_REG_RD_SR_CNT,
+ PXP2_REG_RD_SR_NUM_CFG, 1, 0, pne_sub_r2,
+ NA, IDLE_CHK_WARNING,
+ "PXP2: rd_sr_cnt is not equal to rd_sr_num_cfg",
+ {NA, NA, 1, NA, NA, NA} },
+
+/*line 84*/{(0x1F), 3, PXP2_REG_RD_BLK_CNT,
+ PXP2_REG_RD_BLK_NUM_CFG, 1, 0, pneq_r2,
+ NA, IDLE_CHK_WARNING,
+ "PXP2: rd_blk_cnt is not equal to rd_blk_num_cfg",
+ {NA, NA, NA, NA, NA, NA} },
+
+/*line 85*/{(0x1F), 3, PXP2_REG_RD_SR_CNT,
+ PXP2_REG_RD_SR_NUM_CFG, 1, 0, plt_sub_r2,
+ NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "PXP2: There are more than two unused SRs",
+ {NA, NA, 3, NA, NA, NA} },
+
+/*line 86*/{(0x1F), 3, PXP2_REG_RD_BLK_CNT,
+ PXP2_REG_RD_BLK_NUM_CFG, 1, 0, plt_sub_r2,
+ NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "PXP2: There are more than two unused blocks",
+ {NA, NA, 2, NA, NA, NA} },
+
+/*line 87*/{(0x1F), 1, PXP2_REG_RD_PORT_IS_IDLE_0,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "PXP2: P0 All delivery ports are not idle",
+ {NA, NA, 1, NA, NA, NA} },
+
+/*line 88*/{(0x1F), 1, PXP2_REG_RD_PORT_IS_IDLE_1,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "PXP2: P1 All delivery ports are not idle",
+ {NA, NA, 1, NA, NA, NA} },
+
+/*line 89*/{(0x1F), 2, PXP2_REG_RD_ALMOST_FULL_0,
+ NA, 11, 4, pneq,
+ NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "PXP2: rd_almost_full is not 0",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 90*/{(0x1F), 1, PXP2_REG_RD_DISABLE_INPUTS,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR,
+ "PXP2: PSWRD inputs are disabled",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 91*/{(0x1F), 1, PXP2_REG_HST_HEADER_FIFO_STATUS,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "PXP2: HST header FIFO status is not 0",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 92*/{(0x1F), 1, PXP2_REG_HST_DATA_FIFO_STATUS,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "PXP2: HST data FIFO status is not 0",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 93*/{(0x3), 1, PXP2_REG_PGL_WRITE_BLOCKED,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR,
+ "PXP2: pgl_write_blocked is not 0",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 94*/{(0x3), 1, PXP2_REG_PGL_READ_BLOCKED,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR,
+ "PXP2: pgl_read_blocked is not 0",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 95*/{(0x1C), 1, PXP2_REG_PGL_WRITE_BLOCKED,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_WARNING,
+ "PXP2: pgl_write_blocked is not 0",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 96*/{(0x1C), 1, PXP2_REG_PGL_READ_BLOCKED,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_WARNING,
+ "PXP2: pgl_read_blocked is not 0",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 97*/{(0x1F), 1, PXP2_REG_PGL_TXW_CDTS,
+ NA, 1, 0, prsh_and_neq,
+ NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "PXP2: There is data which is ready",
+ {NA, NA, 17, 1, 0, NA} },
+
+/*line 98*/{(0x1F), 1, PXP_REG_HST_ARB_IS_IDLE,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_WARNING,
+ "PXP: HST arbiter is not idle",
+ {NA, NA, 1, NA, NA, NA} },
+
+/*line 99*/{(0x1F), 1, PXP_REG_HST_CLIENTS_WAITING_TO_ARB,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_WARNING,
+ "PXP: HST one of the clients is waiting for delivery",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 100*/{(0x1E), 1, PXP_REG_HST_DISCARD_INTERNAL_WRITES_STATUS,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_WARNING,
+ "PXP: HST Close the gates: Discarding internal writes",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 101*/{(0x1E), 1, PXP_REG_HST_DISCARD_DOORBELLS_STATUS,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_WARNING,
+ "PXP: HST Close the gates: Discarding doorbells",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 102*/{(0x1C), 1, PXP2_REG_RQ_GARB,
+ NA, 1, 0, pand_neq,
+ NA, IDLE_CHK_WARNING,
+ "PXP2: PSWRQ Close the gates is asserted. Check AEU AFTER_INVERT registers for parity errors",
+ {NA, NA, 0x1000, 0, NA, NA} },
+
+/*line 103*/{(0x1F), 1, DMAE_REG_GO_C0,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "DMAE: command 0 go is not 0",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 104*/{(0x1F), 1, DMAE_REG_GO_C1,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "DMAE: command 1 go is not 0",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 105*/{(0x1F), 1, DMAE_REG_GO_C2,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "DMAE: command 2 go is not 0",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 106*/{(0x1F), 1, DMAE_REG_GO_C3,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "DMAE: command 3 go is not 0",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 107*/{(0x1F), 1, DMAE_REG_GO_C4,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "DMAE: command 4 go is not 0",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 108*/{(0x1F), 1, DMAE_REG_GO_C5,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "DMAE: command 5 go is not 0",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 109*/{(0x1F), 1, DMAE_REG_GO_C6,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "DMAE: command 6 go is not 0",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 110*/{(0x1F), 1, DMAE_REG_GO_C7,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "DMAE: command 7 go is not 0",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 111*/{(0x1F), 1, DMAE_REG_GO_C8,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "DMAE: command 8 go is not 0",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 112*/{(0x1F), 1, DMAE_REG_GO_C9,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "DMAE: command 9 go is not 0",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 113*/{(0x1F), 1, DMAE_REG_GO_C10,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "DMAE: command 10 go is not 0",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 114*/{(0x1F), 1, DMAE_REG_GO_C11,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "DMAE: command 11 go is not 0",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 115*/{(0x1F), 1, DMAE_REG_GO_C12,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "DMAE: command 12 go is not 0",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 116*/{(0x1F), 1, DMAE_REG_GO_C13,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "DMAE: command 13 go is not 0",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 117*/{(0x1F), 1, DMAE_REG_GO_C14,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "DMAE: command 14 go is not 0",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 118*/{(0x1F), 1, DMAE_REG_GO_C15,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "DMAE: command 15 go is not 0",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 119*/{(0x1F), 1, CFC_REG_ERROR_VECTOR,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR,
+ "CFC: error vector is not 0",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 120*/{(0x1F), 1, CFC_REG_NUM_LCIDS_ARRIVING,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR,
+ "CFC: number of arriving LCIDs is not 0",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 121*/{(0x1F), 1, CFC_REG_NUM_LCIDS_ALLOC,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR,
+ "CFC: number of alloc LCIDs is not 0",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 122*/{(0x1F), 1, CFC_REG_NUM_LCIDS_LEAVING,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR,
+ "CFC: number of leaving LCIDs is not 0",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 123*/{(0x1F), 7, CFC_REG_INFO_RAM,
+ CFC_REG_CID_CAM, (CFC_REG_INFO_RAM_SIZE >> 4), 16, peq_neq_neq_r2,
+ CFC_REG_ACTIVITY_COUNTER, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "CFC: AC is neither 0 nor 2 on connType 0 (ETH)",
+ {NA, NA, 0, 0, 2, NA} },
+
+/*line 124*/{(0x1F), 7, CFC_REG_INFO_RAM,
+ CFC_REG_CID_CAM, (CFC_REG_INFO_RAM_SIZE >> 4), 16, peq_neq_r2,
+ CFC_REG_ACTIVITY_COUNTER, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "CFC: AC is not 0 on connType 1 (TOE)",
+ {NA, NA, 1, 0, NA, NA} },
+
+/*line 125*/{(0x1F), 7, CFC_REG_INFO_RAM,
+ CFC_REG_CID_CAM, (CFC_REG_INFO_RAM_SIZE >> 4), 16, peq_neq_r2,
+ CFC_REG_ACTIVITY_COUNTER, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "CFC: AC is not 0 on connType 3 (iSCSI)",
+ {NA, NA, 3, 0, NA, NA} },
+
+/*line 126*/{(0x1F), 7, CFC_REG_INFO_RAM,
+ CFC_REG_CID_CAM, (CFC_REG_INFO_RAM_SIZE >> 4), 16, peq_neq_r2,
+ CFC_REG_ACTIVITY_COUNTER, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "CFC: AC is not 0 on connType 4 (FCoE)",
+ {NA, NA, 4, 0, NA, NA} },
+
+/*line 127*/{(0x1F), 2, QM_REG_QTASKCTR_0,
+ NA, 64, 4, pneq,
+ NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "QM: Queue is not empty",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 128*/{(0xF), 3, QM_REG_VOQCREDIT_0,
+ QM_REG_VOQINITCREDIT_0, 1, 0, pneq_r2,
+ NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "QM: VOQ_0, VOQ credit is not equal to initial credit",
+ {NA, NA, NA, NA, NA, NA} },
+
+/*line 129*/{(0xF), 3, QM_REG_VOQCREDIT_1,
+ QM_REG_VOQINITCREDIT_1, 1, 0, pneq_r2,
+ NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "QM: VOQ_1, VOQ credit is not equal to initial credit",
+ {NA, NA, NA, NA, NA, NA} },
+
+/*line 130*/{(0xF), 3, QM_REG_VOQCREDIT_4,
+ QM_REG_VOQINITCREDIT_4, 1, 0, pneq_r2,
+ NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "QM: VOQ_4, VOQ credit is not equal to initial credit",
+ {NA, NA, NA, NA, NA, NA} },
+
+/*line 131*/{(0x3), 3, QM_REG_PORT0BYTECRD,
+ QM_REG_BYTECRDINITVAL, 1, 0, pneq_r2,
+ NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "QM: P0 Byte credit is not equal to initial credit",
+ {NA, NA, NA, NA, NA, NA} },
+
+/*line 132*/{(0x3), 3, QM_REG_PORT1BYTECRD,
+ QM_REG_BYTECRDINITVAL, 1, 0, pneq_r2,
+ NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "QM: P1 Byte credit is not equal to initial credit",
+ {NA, NA, NA, NA, NA, NA} },
+
+/*line 133*/{(0x1F), 1, CCM_REG_CAM_OCCUP,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "CCM: XX protection CAM is not empty",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 134*/{(0x1F), 1, TCM_REG_CAM_OCCUP,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "TCM: XX protection CAM is not empty",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 135*/{(0x1F), 1, UCM_REG_CAM_OCCUP,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "UCM: XX protection CAM is not empty",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 136*/{(0x1F), 1, XCM_REG_CAM_OCCUP,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "XCM: XX protection CAM is not empty",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 137*/{(0x1F), 1, BRB1_REG_NUM_OF_FULL_BLOCKS,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "BRB1: BRB is not empty",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 138*/{(0x1F), 1, CSEM_REG_SLEEP_THREADS_VALID,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "CSEM: There are sleeping threads",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 139*/{(0x1F), 1, TSEM_REG_SLEEP_THREADS_VALID,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "TSEM: There are sleeping threads",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 140*/{(0x1F), 1, USEM_REG_SLEEP_THREADS_VALID,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "USEM: There are sleeping threads",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 141*/{(0x1F), 1, XSEM_REG_SLEEP_THREADS_VALID,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "XSEM: There are sleeping threads",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 142*/{(0x1F), 1, CSEM_REG_SLOW_EXT_STORE_EMPTY,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "CSEM: External store FIFO is not empty",
+ {NA, NA, 1, NA, NA, NA} },
+
+/*line 143*/{(0x1F), 1, TSEM_REG_SLOW_EXT_STORE_EMPTY,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "TSEM: External store FIFO is not empty",
+ {NA, NA, 1, NA, NA, NA} },
+
+/*line 144*/{(0x1F), 1, USEM_REG_SLOW_EXT_STORE_EMPTY,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "USEM: External store FIFO is not empty",
+ {NA, NA, 1, NA, NA, NA} },
+
+/*line 145*/{(0x1F), 1, XSEM_REG_SLOW_EXT_STORE_EMPTY,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "XSEM: External store FIFO is not empty",
+ {NA, NA, 1, NA, NA, NA} },
+
+/*line 146*/{(0x1F), 1, CSDM_REG_SYNC_PARSER_EMPTY,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "CSDM: Parser serial FIFO is not empty",
+ {NA, NA, 1, NA, NA, NA} },
+
+/*line 147*/{(0x1F), 1, TSDM_REG_SYNC_PARSER_EMPTY,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "TSDM: Parser serial FIFO is not empty",
+ {NA, NA, 1, NA, NA, NA} },
+
+/*line 148*/{(0x1F), 1, USDM_REG_SYNC_PARSER_EMPTY,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "USDM: Parser serial FIFO is not empty",
+ {NA, NA, 1, NA, NA, NA} },
+
+/*line 149*/{(0x1F), 1, XSDM_REG_SYNC_PARSER_EMPTY,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "XSDM: Parser serial FIFO is not empty",
+ {NA, NA, 1, NA, NA, NA} },
+
+/*line 150*/{(0x1F), 1, CSDM_REG_SYNC_SYNC_EMPTY,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "CSDM: Parser SYNC serial FIFO is not empty",
+ {NA, NA, 1, NA, NA, NA} },
+
+/*line 151*/{(0x1F), 1, TSDM_REG_SYNC_SYNC_EMPTY,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "TSDM: Parser SYNC serial FIFO is not empty",
+ {NA, NA, 1, NA, NA, NA} },
+
+/*line 152*/{(0x1F), 1, USDM_REG_SYNC_SYNC_EMPTY,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "USDM: Parser SYNC serial FIFO is not empty",
+ {NA, NA, 1, NA, NA, NA} },
+
+/*line 153*/{(0x1F), 1, XSDM_REG_SYNC_SYNC_EMPTY,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "XSDM: Parser SYNC serial FIFO is not empty",
+ {NA, NA, 1, NA, NA, NA} },
+
+/*line 154*/{(0x1F), 1, CSDM_REG_RSP_PXP_CTRL_RDATA_EMPTY,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "CSDM: pxp_ctrl rd_data fifo is not empty in sdm_dma_rsp block",
+ {NA, NA, 1, NA, NA, NA} },
+
+/*line 155*/{(0x1F), 1, TSDM_REG_RSP_PXP_CTRL_RDATA_EMPTY,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "TSDM: pxp_ctrl rd_data fifo is not empty in sdm_dma_rsp block",
+ {NA, NA, 1, NA, NA, NA} },
+
+/*line 156*/{(0x1F), 1, USDM_REG_RSP_PXP_CTRL_RDATA_EMPTY,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "USDM: pxp_ctrl rd_data fifo is not empty in sdm_dma_rsp block",
+ {NA, NA, 1, NA, NA, NA} },
+
+/*line 157*/{(0x1F), 1, XSDM_REG_RSP_PXP_CTRL_RDATA_EMPTY,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "XSDM: pxp_ctrl rd_data fifo is not empty in sdm_dma_rsp block",
+ {NA, NA, 1, NA, NA, NA} },
+
+/*line 158*/{(0x1F), 1, DORQ_REG_DQ_FILL_LVLF,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "DORQ: DORQ queue is not empty",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 159*/{(0x1F), 1, CFC_REG_CFC_INT_STS,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR,
+ "CFC: Interrupt status is not 0",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 160*/{(0x1F), 1, CDU_REG_CDU_INT_STS,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR,
+ "CDU: Interrupt status is not 0",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 161*/{(0x1F), 1, CCM_REG_CCM_INT_STS,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR,
+ "CCM: Interrupt status is not 0",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 162*/{(0x1F), 1, TCM_REG_TCM_INT_STS,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR,
+ "TCM: Interrupt status is not 0",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 163*/{(0x1F), 1, UCM_REG_UCM_INT_STS,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR,
+ "UCM: Interrupt status is not 0",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 164*/{(0x1F), 1, XCM_REG_XCM_INT_STS,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR,
+ "XCM: Interrupt status is not 0",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 165*/{(0xF), 1, PBF_REG_PBF_INT_STS,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR,
+ "PBF: Interrupt status is not 0",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 166*/{(0x1F), 1, TM_REG_TM_INT_STS,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR,
+ "TIMERS: Interrupt status is not 0",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 167*/{(0x1F), 1, DORQ_REG_DORQ_INT_STS,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR,
+ "DORQ: Interrupt status is not 0",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 168*/{(0x1F), 1, SRC_REG_SRC_INT_STS,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR,
+ "SRCH: Interrupt status is not 0",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 169*/{(0x1F), 1, PRS_REG_PRS_INT_STS,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR,
+ "PRS: Interrupt status is not 0",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 170*/{(0x1F), 1, BRB1_REG_BRB1_INT_STS,
+ NA, 1, 0, pand_neq,
+ NA, IDLE_CHK_ERROR,
+ "BRB1: Interrupt status is not 0",
+ {NA, NA, ~0xFC00, 0, NA, NA} },
+
+/*line 171*/{(0x1F), 1, GRCBASE_XPB + PB_REG_PB_INT_STS,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR,
+ "XPB: Interrupt status is not 0",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 172*/{(0x1F), 1, GRCBASE_UPB + PB_REG_PB_INT_STS,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR,
+ "UPB: Interrupt status is not 0",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 173*/{(0x1), 1, PXP2_REG_PXP2_INT_STS,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_WARNING,
+ "PXP2: Interrupt status 0 is not 0",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 174*/{(0x1E), 1, PXP2_REG_PXP2_INT_STS_0,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_WARNING,
+ "PXP2: Interrupt status 0 is not 0",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 175*/{(0x1E), 1, PXP2_REG_PXP2_INT_STS_1,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_WARNING,
+ "PXP2: Interrupt status 1 is not 0",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 176*/{(0x1F), 1, QM_REG_QM_INT_STS,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR,
+ "QM: Interrupt status is not 0",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 177*/{(0x1F), 1, PXP_REG_PXP_INT_STS_0,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_WARNING,
+ "PXP: P0 Interrupt status is not 0",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 178*/{(0x1F), 1, PXP_REG_PXP_INT_STS_1,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_WARNING,
+ "PXP: P1 Interrupt status is not 0",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 179*/{(0x1C), 1, PGLUE_B_REG_PGLUE_B_INT_STS,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_WARNING,
+ "PGLUE_B: Interrupt status is not 0",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 180*/{(0x1F), 1, DORQ_REG_RSPA_CRD_CNT,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "DORQ: Credit to XCM is not full",
+ {NA, NA, 2, NA, NA, NA} },
+
+/*line 181*/{(0x1F), 1, DORQ_REG_RSPB_CRD_CNT,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "DORQ: Credit to UCM is not full",
+ {NA, NA, 2, NA, NA, NA} },
+
+/*line 182*/{(0x3), 1, QM_REG_VOQCRDERRREG,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR,
+ "QM: Credit error register is not 0 (byte or credit overflow/underflow)",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 183*/{(0x1F), 1, DORQ_REG_DQ_FULL_ST,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR,
+ "DORQ: DORQ queue is full",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 184*/{(0x1F), 1, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0,
+ NA, 1, 0, pand_neq,
+ NA, IDLE_CHK_WARNING,
+ "AEU: P0 AFTER_INVERT_1 is not 0",
+ {NA, NA, ~0xCFFC, 0, NA, NA} },
+
+/*line 185*/{(0x1F), 1, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR,
+ "AEU: P0 AFTER_INVERT_2 is not 0",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 186*/{(0x1F), 1, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0,
+ NA, 1, 0, pand_neq,
+ NA, IDLE_CHK_ERROR,
+ "AEU: P0 AFTER_INVERT_3 is not 0",
+ {NA, NA, ~0xFFFF0000, 0, NA, NA} },
+
+/*line 187*/{(0x1F), 1, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0,
+ NA, 1, 0, pand_neq,
+ NA, IDLE_CHK_ERROR,
+ "AEU: P0 AFTER_INVERT_4 is not 0",
+ {NA, NA, ~0x801FFFFF, 0, NA, NA} },
+
+/*line 188*/{(0x3), 1, MISC_REG_AEU_AFTER_INVERT_1_FUNC_1,
+ NA, 1, 0, pand_neq,
+ NA, IDLE_CHK_WARNING,
+ "AEU: P1 AFTER_INVERT_1 is not 0",
+ {NA, NA, ~0xCFFC, 0, NA, NA} },
+
+/*line 189*/{(0x3), 1, MISC_REG_AEU_AFTER_INVERT_2_FUNC_1,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR,
+ "AEU: P1 AFTER_INVERT_2 is not 0",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 190*/{(0x3), 1, MISC_REG_AEU_AFTER_INVERT_3_FUNC_1,
+ NA, 1, 0, pand_neq,
+ NA, IDLE_CHK_ERROR,
+ "AEU: P1 AFTER_INVERT_3 is not 0",
+ {NA, NA, ~0xFFFF0000, 0, NA, NA} },
+
+/*line 191*/{(0x3), 1, MISC_REG_AEU_AFTER_INVERT_4_FUNC_1,
+ NA, 1, 0, pand_neq,
+ NA, IDLE_CHK_ERROR,
+ "AEU: P1 AFTER_INVERT_4 is not 0",
+ {NA, NA, ~0x801FFFFF, 0, NA, NA} },
+
+/*line 192*/{(0x1F), 1, MISC_REG_AEU_AFTER_INVERT_1_MCP,
+ NA, 1, 0, pand_neq,
+ NA, IDLE_CHK_WARNING,
+ "AEU: MCP AFTER_INVERT_1 is not 0",
+ {NA, NA, ~0xCFFC, 0, NA, NA} },
+
+/*line 193*/{(0x1F), 1, MISC_REG_AEU_AFTER_INVERT_2_MCP,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR,
+ "AEU: MCP AFTER_INVERT_2 is not 0",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 194*/{(0x1F), 1, MISC_REG_AEU_AFTER_INVERT_3_MCP,
+ NA, 1, 0, pand_neq,
+ NA, IDLE_CHK_ERROR,
+ "AEU: MCP AFTER_INVERT_3 is not 0",
+ {NA, NA, ~0xFFFF0000, 0, NA, NA} },
+
+/*line 195*/{(0x1F), 1, MISC_REG_AEU_AFTER_INVERT_4_MCP,
+ NA, 1, 0, pand_neq,
+ NA, IDLE_CHK_ERROR,
+ "AEU: MCP AFTER_INVERT_4 is not 0",
+ {NA, NA, ~0x801FFFFF, 0, NA, NA} },
+
+/*line 196*/{(0xF), 5, PBF_REG_P0_CREDIT,
+ PBF_REG_P0_INIT_CRD, 1, 0, pneq_r2,
+ PBF_REG_DISABLE_NEW_TASK_PROC_P0, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "PBF: P0 credit is not equal to init_crd",
+ {NA, NA, NA, NA, NA, NA} },
+
+/*line 197*/{(0xF), 5, PBF_REG_P1_CREDIT,
+ PBF_REG_P1_INIT_CRD, 1, 0, pneq_r2,
+ PBF_REG_DISABLE_NEW_TASK_PROC_P1, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "PBF: P1 credit is not equal to init_crd",
+ {NA, NA, NA, NA, NA, NA} },
+
+/*line 198*/{(0xF), 3, PBF_REG_P4_CREDIT,
+ PBF_REG_P4_INIT_CRD, 1, 0, pneq_r2,
+ NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "PBF: P4 credit is not equal to init_crd",
+ {NA, NA, NA, NA, NA, NA} },
+
+/*line 199*/{(0x10), 5, PBF_REG_CREDIT_Q0,
+ PBF_REG_INIT_CRD_Q0, 1, 0, pneq_r2,
+ PBF_REG_DISABLE_NEW_TASK_PROC_Q0, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "PBF: Q0 credit is not equal to init_crd",
+ {NA, NA, NA, NA, NA, NA} },
+
+/*line 200*/{(0x10), 5, PBF_REG_CREDIT_Q1,
+ PBF_REG_INIT_CRD_Q1, 1, 0, pneq_r2,
+ PBF_REG_DISABLE_NEW_TASK_PROC_Q1, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "PBF: Q1 credit is not equal to init_crd",
+ {NA, NA, NA, NA, NA, NA} },
+
+/*line 201*/{(0x10), 5, PBF_REG_CREDIT_Q2,
+ PBF_REG_INIT_CRD_Q2, 1, 0, pneq_r2,
+ PBF_REG_DISABLE_NEW_TASK_PROC_Q2, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "PBF: Q2 credit is not equal to init_crd",
+ {NA, NA, NA, NA, NA, NA} },
+
+/*line 202*/{(0x10), 5, PBF_REG_CREDIT_Q3,
+ PBF_REG_INIT_CRD_Q3, 1, 0, pneq_r2,
+ PBF_REG_DISABLE_NEW_TASK_PROC_Q3, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "PBF: Q3 credit is not equal to init_crd",
+ {NA, NA, NA, NA, NA, NA} },
+
+/*line 203*/{(0x10), 5, PBF_REG_CREDIT_Q4,
+ PBF_REG_INIT_CRD_Q4, 1, 0, pneq_r2,
+ PBF_REG_DISABLE_NEW_TASK_PROC_Q4, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "PBF: Q4 credit is not equal to init_crd",
+ {NA, NA, NA, NA, NA, NA} },
+
+/*line 204*/{(0x10), 5, PBF_REG_CREDIT_Q5,
+ PBF_REG_INIT_CRD_Q5, 1, 0, pneq_r2,
+ PBF_REG_DISABLE_NEW_TASK_PROC_Q5, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "PBF: Q5 credit is not equal to init_crd",
+ {NA, NA, NA, NA, NA, NA} },
+
+/*line 205*/{(0x10), 3, PBF_REG_CREDIT_LB_Q,
+ PBF_REG_INIT_CRD_LB_Q, 1, 0, pneq_r2,
+ NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "PBF: LB Q credit is not equal to init_crd",
+ {NA, NA, NA, NA, NA, NA} },
+
+/*line 206*/{(0xF), 1, PBF_REG_P0_TASK_CNT,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "PBF: P0 task_cnt is not 0",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 207*/{(0xF), 1, PBF_REG_P1_TASK_CNT,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "PBF: P1 task_cnt is not 0",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 208*/{(0xF), 1, PBF_REG_P4_TASK_CNT,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "PBF: P4 task_cnt is not 0",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 209*/{(0x10), 1, PBF_REG_TASK_CNT_Q0,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "PBF: Q0 task_cnt is not 0",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 210*/{(0x10), 1, PBF_REG_TASK_CNT_Q1,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "PBF: Q1 task_cnt is not 0",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 211*/{(0x10), 1, PBF_REG_TASK_CNT_Q2,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "PBF: Q2 task_cnt is not 0",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 212*/{(0x10), 1, PBF_REG_TASK_CNT_Q3,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "PBF: Q3 task_cnt is not 0",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 213*/{(0x10), 1, PBF_REG_TASK_CNT_Q4,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "PBF: Q4 task_cnt is not 0",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 214*/{(0x10), 1, PBF_REG_TASK_CNT_Q5,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "PBF: Q5 task_cnt is not 0",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 215*/{(0x10), 1, PBF_REG_TASK_CNT_LB_Q,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "PBF: LB Q task_cnt is not 0",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 216*/{(0x1F), 1, XCM_REG_CFC_INIT_CRD,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "XCM: CFC_INIT_CRD is not 1",
+ {NA, NA, 1, NA, NA, NA} },
+
+/*line 217*/{(0x1F), 1, UCM_REG_CFC_INIT_CRD,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "UCM: CFC_INIT_CRD is not 1",
+ {NA, NA, 1, NA, NA, NA} },
+
+/*line 218*/{(0x1F), 1, TCM_REG_CFC_INIT_CRD,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "TCM: CFC_INIT_CRD is not 1",
+ {NA, NA, 1, NA, NA, NA} },
+
+/*line 219*/{(0x1F), 1, CCM_REG_CFC_INIT_CRD,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "CCM: CFC_INIT_CRD is not 1",
+ {NA, NA, 1, NA, NA, NA} },
+
+/*line 220*/{(0x1F), 1, XCM_REG_XQM_INIT_CRD,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "XCM: XQM_INIT_CRD is not 32",
+ {NA, NA, 32, NA, NA, NA} },
+
+/*line 221*/{(0x1F), 1, UCM_REG_UQM_INIT_CRD,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "UCM: UQM_INIT_CRD is not 32",
+ {NA, NA, 32, NA, NA, NA} },
+
+/*line 222*/{(0x1F), 1, TCM_REG_TQM_INIT_CRD,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "TCM: TQM_INIT_CRD is not 32",
+ {NA, NA, 32, NA, NA, NA} },
+
+/*line 223*/{(0x1F), 1, CCM_REG_CQM_INIT_CRD,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "CCM: CQM_INIT_CRD is not 32",
+ {NA, NA, 32, NA, NA, NA} },
+
+/*line 224*/{(0x1F), 1, XCM_REG_TM_INIT_CRD,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "XCM: TM_INIT_CRD is not 4",
+ {NA, NA, 4, NA, NA, NA} },
+
+/*line 225*/{(0x1F), 1, UCM_REG_TM_INIT_CRD,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "UCM: TM_INIT_CRD is not 4",
+ {NA, NA, 4, NA, NA, NA} },
+
+/*line 226*/{(0x1F), 1, XCM_REG_FIC0_INIT_CRD,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_WARNING,
+ "XCM: FIC0_INIT_CRD is not 64",
+ {NA, NA, 64, NA, NA, NA} },
+
+/*line 227*/{(0x1F), 1, UCM_REG_FIC0_INIT_CRD,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "UCM: FIC0_INIT_CRD is not 64",
+ {NA, NA, 64, NA, NA, NA} },
+
+/*line 228*/{(0x1F), 1, TCM_REG_FIC0_INIT_CRD,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "TCM: FIC0_INIT_CRD is not 64",
+ {NA, NA, 64, NA, NA, NA} },
+
+/*line 229*/{(0x1F), 1, CCM_REG_FIC0_INIT_CRD,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "CCM: FIC0_INIT_CRD is not 64",
+ {NA, NA, 64, NA, NA, NA} },
+
+/*line 230*/{(0x1F), 1, XCM_REG_FIC1_INIT_CRD,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "XCM: FIC1_INIT_CRD is not 64",
+ {NA, NA, 64, NA, NA, NA} },
+
+/*line 231*/{(0x1F), 1, UCM_REG_FIC1_INIT_CRD,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "UCM: FIC1_INIT_CRD is not 64",
+ {NA, NA, 64, NA, NA, NA} },
+
+/*line 232*/{(0x1F), 1, TCM_REG_FIC1_INIT_CRD,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "TCM: FIC1_INIT_CRD is not 64",
+ {NA, NA, 64, NA, NA, NA} },
+
+/*line 233*/{(0x1F), 1, CCM_REG_FIC1_INIT_CRD,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "CCM: FIC1_INIT_CRD is not 64",
+ {NA, NA, 64, NA, NA, NA} },
+
+/*line 234*/{(0x1), 1, XCM_REG_XX_FREE,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "XCM: XX_FREE differs from expected 31",
+ {NA, NA, 31, NA, NA, NA} },
+
+/*line 235*/{(0x1E), 1, XCM_REG_XX_FREE,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "XCM: XX_FREE differs from expected 32",
+ {NA, NA, 32, NA, NA, NA} },
+
+/*line 236*/{(0x1F), 1, UCM_REG_XX_FREE,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "UCM: XX_FREE differs from expected 27",
+ {NA, NA, 27, NA, NA, NA} },
+
+/*line 237*/{(0x7), 1, TCM_REG_XX_FREE,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "TCM: XX_FREE differs from expected 32",
+ {NA, NA, 32, NA, NA, NA} },
+
+/*line 238*/{(0x18), 1, TCM_REG_XX_FREE,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "TCM: XX_FREE differs from expected 29",
+ {NA, NA, 29, NA, NA, NA} },
+
+/*line 239*/{(0x1F), 1, CCM_REG_XX_FREE,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "CCM: XX_FREE differs from expected 24",
+ {NA, NA, 24, NA, NA, NA} },
+
+/*line 240*/{(0x1F), 1, XSEM_REG_FAST_MEMORY + 0x18000,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "XSEM: FOC0 credit less than initial credit",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 241*/{(0x1F), 1, XSEM_REG_FAST_MEMORY + 0x18040,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "XSEM: FOC1 credit less than initial credit",
+ {NA, NA, 24, NA, NA, NA} },
+
+/*line 242*/{(0x1F), 1, XSEM_REG_FAST_MEMORY + 0x18080,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "XSEM: FOC2 credit less than initial credit",
+ {NA, NA, 12, NA, NA, NA} },
+
+/*line 243*/{(0x1F), 1, USEM_REG_FAST_MEMORY + 0x18000,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "USEM: FOC0 credit less than initial credit",
+ {NA, NA, 26, NA, NA, NA} },
+
+/*line 244*/{(0x1F), 1, USEM_REG_FAST_MEMORY + 0x18040,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "USEM: FOC1 credit less than initial credit",
+ {NA, NA, 78, NA, NA, NA} },
+
+/*line 245*/{(0x1F), 1, USEM_REG_FAST_MEMORY + 0x18080,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "USEM: FOC2 credit less than initial credit",
+ {NA, NA, 16, NA, NA, NA} },
+
+/*line 246*/{(0x1F), 1, USEM_REG_FAST_MEMORY + 0x180C0,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "USEM: FOC3 credit less than initial credit",
+ {NA, NA, 32, NA, NA, NA} },
+
+/*line 247*/{(0x1F), 1, TSEM_REG_FAST_MEMORY + 0x18000,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "TSEM: FOC0 credit less than initial credit",
+ {NA, NA, 52, NA, NA, NA} },
+
+/*line 248*/{(0x1F), 1, TSEM_REG_FAST_MEMORY + 0x18040,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "TSEM: FOC1 credit less than initial credit",
+ {NA, NA, 24, NA, NA, NA} },
+
+/*line 249*/{(0x1F), 1, TSEM_REG_FAST_MEMORY + 0x18080,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "TSEM: FOC2 credit less than initial credit",
+ {NA, NA, 12, NA, NA, NA} },
+
+/*line 250*/{(0x1F), 1, TSEM_REG_FAST_MEMORY + 0x180C0,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "TSEM: FOC3 credit less than initial credit",
+ {NA, NA, 32, NA, NA, NA} },
+
+/*line 251*/{(0x1F), 1, CSEM_REG_FAST_MEMORY + 0x18000,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "CSEM: FOC0 credit less than initial credit",
+ {NA, NA, 16, NA, NA, NA} },
+
+/*line 252*/{(0x1F), 1, CSEM_REG_FAST_MEMORY + 0x18040,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "CSEM: FOC1 credit less than initial credit",
+ {NA, NA, 18, NA, NA, NA} },
+
+/*line 253*/{(0x1F), 1, CSEM_REG_FAST_MEMORY + 0x18080,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "CSEM: FOC2 credit less than initial credit",
+ {NA, NA, 48, NA, NA, NA} },
+
+/*line 254*/{(0x1F), 1, CSEM_REG_FAST_MEMORY + 0x180C0,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "CSEM: FOC3 credit less than initial credit",
+ {NA, NA, 14, NA, NA, NA} },
+
+/*line 255*/{(0x1F), 1, PRS_REG_TSDM_CURRENT_CREDIT,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "PRS: TSDM current credit is not 0",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 256*/{(0x1F), 1, PRS_REG_TCM_CURRENT_CREDIT,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "PRS: TCM current credit is not 0",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 257*/{(0x1F), 1, PRS_REG_CFC_LD_CURRENT_CREDIT,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "PRS: CFC_LD current credit is not 0",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 258*/{(0x1F), 1, PRS_REG_CFC_SEARCH_CURRENT_CREDIT,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "PRS: CFC_SEARCH current credit is not 0",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 259*/{(0x1F), 1, PRS_REG_SRC_CURRENT_CREDIT,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "PRS: SRCH current credit is not 0",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 260*/{(0x1F), 1, PRS_REG_PENDING_BRB_PRS_RQ,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "PRS: PENDING_BRB_PRS_RQ is not 0",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 261*/{(0x1F), 2, PRS_REG_PENDING_BRB_CAC0_RQ,
+ NA, 5, 4, pneq,
+ NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "PRS: PENDING_BRB_CAC_RQ is not 0",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 262*/{(0x1F), 1, PRS_REG_SERIAL_NUM_STATUS_LSB,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "PRS: SERIAL_NUM_STATUS_LSB is not 0",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 263*/{(0x1F), 1, PRS_REG_SERIAL_NUM_STATUS_MSB,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "PRS: SERIAL_NUM_STATUS_MSB is not 0",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 264*/{(0x1F), 1, CDU_REG_ERROR_DATA,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR,
+ "CDU: ERROR_DATA is not 0",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 265*/{(0x1F), 1, CCM_REG_STORM_LENGTH_MIS,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR,
+ "CCM: STORM declared message length unequal to actual",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 266*/{(0x1F), 1, CCM_REG_CSDM_LENGTH_MIS,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR,
+ "CCM: CSDM declared message length unequal to actual",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 267*/{(0x1F), 1, CCM_REG_TSEM_LENGTH_MIS,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR,
+ "CCM: TSEM declared message length unequal to actual",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 268*/{(0x1F), 1, CCM_REG_XSEM_LENGTH_MIS,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR,
+ "CCM: XSEM declared message length unequal to actual",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 269*/{(0x1F), 1, CCM_REG_USEM_LENGTH_MIS,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR,
+ "CCM: USEM declared message length unequal to actual",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 270*/{(0x1F), 1, CCM_REG_PBF_LENGTH_MIS,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR,
+ "CCM: PBF declared message length unequal to actual",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 271*/{(0x1F), 1, TCM_REG_STORM_LENGTH_MIS,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR,
+ "TCM: STORM declared message length unequal to actual",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 272*/{(0x1F), 1, TCM_REG_TSDM_LENGTH_MIS,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR,
+ "TCM: TSDM declared message length unequal to actual",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 273*/{(0x1F), 1, TCM_REG_PRS_LENGTH_MIS,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR,
+ "TCM: PRS declared message length unequal to actual",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 274*/{(0x1F), 1, TCM_REG_PBF_LENGTH_MIS,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR,
+ "TCM: PBF declared message length unequal to actual",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 275*/{(0x1F), 1, TCM_REG_USEM_LENGTH_MIS,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR,
+ "TCM: USEM declared message length unequal to actual",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 276*/{(0x1F), 1, TCM_REG_CSEM_LENGTH_MIS,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR,
+ "TCM: CSEM declared message length unequal to actual",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 277*/{(0x1F), 1, UCM_REG_STORM_LENGTH_MIS,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR,
+ "UCM: STORM declared message length unequal to actual",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 278*/{(0x1F), 1, UCM_REG_USDM_LENGTH_MIS,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR,
+ "UCM: USDM declared message length unequal to actual",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 279*/{(0x1F), 1, UCM_REG_TSEM_LENGTH_MIS,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR,
+ "UCM: TSEM declared message length unequal to actual",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 280*/{(0x1F), 1, UCM_REG_CSEM_LENGTH_MIS,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR,
+ "UCM: CSEM declared message length unequal to actual",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 281*/{(0x1F), 1, UCM_REG_XSEM_LENGTH_MIS,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR,
+ "UCM: XSEM declared message length unequal to actual",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 282*/{(0x1F), 1, UCM_REG_DORQ_LENGTH_MIS,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR,
+ "UCM: DORQ declared message length unequal to actual",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 283*/{(0x1F), 1, XCM_REG_STORM_LENGTH_MIS,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR,
+ "XCM: STORM declared message length unequal to actual",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 284*/{(0x1F), 1, XCM_REG_XSDM_LENGTH_MIS,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR,
+ "XCM: XSDM declared message length unequal to actual",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 285*/{(0x1F), 1, XCM_REG_TSEM_LENGTH_MIS,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR,
+ "XCM: TSEM declared message length unequal to actual",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 286*/{(0x1F), 1, XCM_REG_CSEM_LENGTH_MIS,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR,
+ "XCM: CSEM declared message length unequal to actual",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 287*/{(0x1F), 1, XCM_REG_USEM_LENGTH_MIS,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR,
+ "XCM: USEM declared message length unequal to actual",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 288*/{(0x1F), 1, XCM_REG_DORQ_LENGTH_MIS,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR,
+ "XCM: DORQ declared message length unequal to actual",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 289*/{(0x1F), 1, XCM_REG_PBF_LENGTH_MIS,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR,
+ "XCM: PBF declared message length unequal to actual",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 290*/{(0x1F), 1, XCM_REG_NIG0_LENGTH_MIS,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR,
+ "XCM: NIG0 declared message length unequal to actual",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 291*/{(0x1F), 1, XCM_REG_NIG1_LENGTH_MIS,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR,
+ "XCM: NIG1 declared message length unequal to actual",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 292*/{(0x1F), 1, QM_REG_XQM_WRC_FIFOLVL,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "QM: XQM wrc_fifolvl is not 0",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 293*/{(0x1F), 1, QM_REG_UQM_WRC_FIFOLVL,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "QM: UQM wrc_fifolvl is not 0",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 294*/{(0x1F), 1, QM_REG_TQM_WRC_FIFOLVL,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "QM: TQM wrc_fifolvl is not 0",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 295*/{(0x1F), 1, QM_REG_CQM_WRC_FIFOLVL,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "QM: CQM wrc_fifolvl is not 0",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 296*/{(0x1F), 1, QM_REG_QSTATUS_LOW,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "QM: QSTATUS_LOW is not 0",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 297*/{(0x1F), 1, QM_REG_QSTATUS_HIGH,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "QM: QSTATUS_HIGH is not 0",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 298*/{(0x1F), 1, QM_REG_PAUSESTATE0,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "QM: PAUSESTATE0 is not 0",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 299*/{(0x1F), 1, QM_REG_PAUSESTATE1,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "QM: PAUSESTATE1 is not 0",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 300*/{(0x1F), 1, QM_REG_OVFQNUM,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR,
+ "QM: OVFQNUM is not 0",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 301*/{(0x1F), 1, QM_REG_OVFERROR,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR,
+ "QM: OVFERROR is not 0",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 302*/{(0x1F), 6, QM_REG_PTRTBL,
+ NA, 64, 8, pneq_r2,
+ NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "QM: read and write variables not equal",
+ {NA, NA, NA, NA, NA, NA} },
+
+/*line 303*/{(0x1F), 1, BRB1_REG_BRB1_PRTY_STS,
+ NA, 1, 0, pand_neq,
+ NA, IDLE_CHK_WARNING,
+ "BRB1: parity status is not 0",
+ {NA, NA, ~0x8, 0, NA, NA} },
+
+/*line 304*/{(0x1F), 1, CDU_REG_CDU_PRTY_STS,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_WARNING,
+ "CDU: parity status is not 0",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 305*/{(0x1F), 1, CFC_REG_CFC_PRTY_STS,
+ NA, 1, 0, pand_neq,
+ NA, IDLE_CHK_WARNING,
+ "CFC: parity status is not 0",
+ {NA, NA, ~0x2, 0, NA, NA} },
+
+/*line 306*/{(0x1F), 1, CSDM_REG_CSDM_PRTY_STS,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_WARNING,
+ "CSDM: parity status is not 0",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 307*/{(0x3), 1, DBG_REG_DBG_PRTY_STS,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_WARNING,
+ "DBG: parity status is not 0",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 308*/{(0x1F), 1, DMAE_REG_DMAE_PRTY_STS,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_WARNING,
+ "DMAE: parity status is not 0",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 309*/{(0x1F), 1, DORQ_REG_DORQ_PRTY_STS,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_WARNING,
+ "DORQ: parity status is not 0",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 310*/{(0x1), 1, TCM_REG_TCM_PRTY_STS,
+ NA, 1, 0, pand_neq,
+ NA, IDLE_CHK_WARNING,
+ "TCM: parity status is not 0",
+ {NA, NA, ~0x3ffc0, 0, NA, NA} },
+
+/*line 311*/{(0x1E), 1, TCM_REG_TCM_PRTY_STS,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_WARNING,
+ "TCM: parity status is not 0",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 312*/{(0x1), 1, CCM_REG_CCM_PRTY_STS,
+ NA, 1, 0, pand_neq,
+ NA, IDLE_CHK_WARNING,
+ "CCM: parity status is not 0",
+ {NA, NA, ~0x3ffc0, 0, NA, NA} },
+
+/*line 313*/{(0x1E), 1, CCM_REG_CCM_PRTY_STS,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_WARNING,
+ "CCM: parity status is not 0",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 314*/{(0x1), 1, UCM_REG_UCM_PRTY_STS,
+ NA, 1, 0, pand_neq,
+ NA, IDLE_CHK_WARNING,
+ "UCM: parity status is not 0",
+ {NA, NA, ~0x3ffc0, 0, NA, NA} },
+
+/*line 315*/{(0x1E), 1, UCM_REG_UCM_PRTY_STS,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_WARNING,
+ "UCM: parity status is not 0",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 316*/{(0x1), 1, XCM_REG_XCM_PRTY_STS,
+ NA, 1, 0, pand_neq,
+ NA, IDLE_CHK_WARNING,
+ "XCM: parity status is not 0",
+ {NA, NA, ~0x3ffc0, 0, NA, NA} },
+
+/*line 317*/{(0x1E), 1, XCM_REG_XCM_PRTY_STS,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_WARNING,
+ "XCM: parity status is not 0",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 318*/{(0x1), 1, HC_REG_HC_PRTY_STS,
+ NA, 1, 0, pand_neq,
+ NA, IDLE_CHK_WARNING,
+ "HC: parity status is not 0",
+ {NA, NA, ~0x1, 0, NA, NA} },
+
+/*line 319*/{(0x1), 1, MISC_REG_MISC_PRTY_STS,
+ NA, 1, 0, pand_neq,
+ NA, IDLE_CHK_WARNING,
+ "MISC: parity status is not 0",
+ {NA, NA, ~0x1, 0, NA, NA} },
+
+/*line 320*/{(0x1F), 1, PRS_REG_PRS_PRTY_STS,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_WARNING,
+ "PRS: parity status is not 0",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 321*/{(0x1F), 1, PXP_REG_PXP_PRTY_STS,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_WARNING,
+ "PXP: parity status is not 0",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 322*/{(0x1F), 1, QM_REG_QM_PRTY_STS,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_WARNING,
+ "QM: parity status is not 0",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 323*/{(0x1), 1, SRC_REG_SRC_PRTY_STS,
+ NA, 1, 0, pand_neq,
+ NA, IDLE_CHK_WARNING,
+ "SRCH: parity status is not 0",
+ {NA, NA, ~0x4, 0, NA, NA} },
+
+/*line 324*/{(0x1F), 1, TSDM_REG_TSDM_PRTY_STS,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_WARNING,
+ "TSDM: parity status is not 0",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 325*/{(0x1F), 1, USDM_REG_USDM_PRTY_STS,
+ NA, 1, 0, pand_neq,
+ NA, IDLE_CHK_WARNING,
+ "USDM: parity status is not 0",
+ {NA, NA, ~0x20, 0, NA, NA} },
+
+/*line 326*/{(0x1F), 1, XSDM_REG_XSDM_PRTY_STS,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_WARNING,
+ "XSDM: parity status is not 0",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 327*/{(0x1F), 1, GRCBASE_XPB + PB_REG_PB_PRTY_STS,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_WARNING,
+ "XPB: parity status is not 0",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 328*/{(0x1F), 1, GRCBASE_UPB + PB_REG_PB_PRTY_STS,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_WARNING,
+ "UPB: parity status is not 0",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 329*/{(0x1F), 1, CSEM_REG_CSEM_PRTY_STS_0,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_WARNING,
+ "CSEM: parity status 0 is not 0",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 330*/{(0x1), 1, PXP2_REG_PXP2_PRTY_STS_0,
+ NA, 1, 0, pand_neq,
+ NA, IDLE_CHK_WARNING,
+ "PXP2: parity status 0 is not 0",
+ {NA, NA, ~0xfff40020, 0, NA, NA} },
+
+/*line 331*/{(0x1E), 1, PXP2_REG_PXP2_PRTY_STS_0,
+ NA, 1, 0, pand_neq,
+ NA, IDLE_CHK_WARNING,
+ "PXP2: parity status 0 is not 0",
+ {NA, NA, ~0x20, 0, NA, NA} },
+
+/*line 332*/{(0x1F), 1, TSEM_REG_TSEM_PRTY_STS_0,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_WARNING,
+ "TSEM: parity status 0 is not 0",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 333*/{(0x1F), 1, USEM_REG_USEM_PRTY_STS_0,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_WARNING,
+ "USEM: parity status 0 is not 0",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 334*/{(0x1F), 1, XSEM_REG_XSEM_PRTY_STS_0,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_WARNING,
+ "XSEM: parity status 0 is not 0",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 335*/{(0x1F), 1, CSEM_REG_CSEM_PRTY_STS_1,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_WARNING,
+ "CSEM: parity status 1 is not 0",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 336*/{(0x1), 1, PXP2_REG_PXP2_PRTY_STS_1,
+ NA, 1, 0, pand_neq,
+ NA, IDLE_CHK_WARNING,
+ "PXP2: parity status 1 is not 0",
+ {NA, NA, ~0x20, 0, NA, NA} },
+
+/*line 337*/{(0x1E), 1, PXP2_REG_PXP2_PRTY_STS_1,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_WARNING,
+ "PXP2: parity status 1 is not 0",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 338*/{(0x1F), 1, TSEM_REG_TSEM_PRTY_STS_1,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_WARNING,
+ "TSEM: parity status 1 is not 0",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 339*/{(0x1F), 1, USEM_REG_USEM_PRTY_STS_1,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_WARNING,
+ "USEM: parity status 1 is not 0",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 340*/{(0x1F), 1, XSEM_REG_XSEM_PRTY_STS_1,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_WARNING,
+ "XSEM: parity status 1 is not 0",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 341*/{(0x1C), 1, PGLUE_B_REG_PGLUE_B_PRTY_STS,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_WARNING,
+ "PGLUE_B: parity status is not 0",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 342*/{(0x2), 2, QM_REG_QTASKCTR_EXT_A_0,
+ NA, 64, 4, pneq,
+ NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "QM: Q_EXT_A (upper 64 queues), Queue is not empty",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 343*/{(0x2), 1, QM_REG_QSTATUS_LOW_EXT_A,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR,
+ "QM: QSTATUS_LOW_EXT_A is not 0",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 344*/{(0x2), 1, QM_REG_QSTATUS_HIGH_EXT_A,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR,
+ "QM: QSTATUS_HIGH_EXT_A is not 0",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 345*/{(0x1E), 1, QM_REG_PAUSESTATE2,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "QM: PAUSESTATE2 is not 0",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 346*/{(0x1E), 1, QM_REG_PAUSESTATE3,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "QM: PAUSESTATE3 is not 0",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 347*/{(0x2), 1, QM_REG_PAUSESTATE4,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR,
+ "QM: PAUSESTATE4 is not 0",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 348*/{(0x2), 1, QM_REG_PAUSESTATE5,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR,
+ "QM: PAUSESTATE5 is not 0",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 349*/{(0x2), 1, QM_REG_PAUSESTATE6,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR,
+ "QM: PAUSESTATE6 is not 0",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 350*/{(0x2), 1, QM_REG_PAUSESTATE7,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR,
+ "QM: PAUSESTATE7 is not 0",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 351*/{(0x2), 6, QM_REG_PTRTBL_EXT_A,
+ NA, 64, 8, pneq_r2,
+ NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "QM: read and write variables not equal in ext table",
+ {NA, NA, NA, NA, NA, NA} },
+
+/*line 352*/{(0x1E), 1, MISC_REG_AEU_SYS_KILL_OCCURRED,
+ NA, NA, NA, pneq,
+ NA, IDLE_CHK_ERROR,
+ "MISC: system kill occurred;",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 353*/{(0x1E), 1, MISC_REG_AEU_SYS_KILL_STATUS_0,
+ NA, NA, NA, pneq,
+ NA, IDLE_CHK_ERROR,
+ "MISC: system kill occurred; status_0 register",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 354*/{(0x1E), 1, MISC_REG_AEU_SYS_KILL_STATUS_1,
+ NA, NA, NA, pneq,
+ NA, IDLE_CHK_ERROR,
+ "MISC: system kill occurred; status_1 register",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 355*/{(0x1E), 1, MISC_REG_AEU_SYS_KILL_STATUS_2,
+ NA, NA, NA, pneq,
+ NA, IDLE_CHK_ERROR,
+ "MISC: system kill occurred; status_2 register",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 356*/{(0x1E), 1, MISC_REG_AEU_SYS_KILL_STATUS_3,
+ NA, NA, NA, pneq,
+ NA, IDLE_CHK_ERROR,
+ "MISC: system kill occurred; status_3 register",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 357*/{(0x1E), 1, MISC_REG_PCIE_HOT_RESET,
+ NA, NA, NA, pneq,
+ NA, IDLE_CHK_WARNING,
+ "MISC: pcie_rst_b was asserted without perst assertion",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 358*/{(0x1F), 1, NIG_REG_NIG_INT_STS_0,
+ NA, NA, NA, pand_neq,
+ NA, IDLE_CHK_ERROR,
+ "NIG: interrupt 0 is active",
+ {NA, NA, ~0x300, 0, NA, NA} },
+
+/*line 359*/{(0x1F), 1, NIG_REG_NIG_INT_STS_0,
+ NA, NA, NA, peq,
+ NA, IDLE_CHK_WARNING,
+ "NIG: Access to BMAC while not active. If tested on FPGA, ignore this warning",
+ {NA, NA, 0x300, NA, NA, NA} },
+
+/*line 360*/{(0x1F), 1, NIG_REG_NIG_INT_STS_1,
+ NA, NA, NA, pand_neq,
+ NA, IDLE_CHK_ERROR,
+ "NIG: interrupt 1 is active",
+ {NA, NA, 0x783FF03, 0, NA, NA} },
+
+/*line 361*/{(0x1F), 1, NIG_REG_NIG_INT_STS_1,
+ NA, NA, NA, pand_neq,
+ NA, IDLE_CHK_WARNING,
+ "NIG: port cos was paused too long",
+ {NA, NA, ~0x783FF0F, 0, NA, NA} },
+
+/*line 362*/{(0x1F), 1, NIG_REG_NIG_INT_STS_1,
+ NA, NA, NA, pand_neq,
+ NA, IDLE_CHK_WARNING,
+ "NIG: Got packets w/o Outer-VLAN in MF mode",
+ {NA, NA, 0xC, 0, NA, NA} },
+
+/*line 363*/{(0x2), 1, NIG_REG_NIG_PRTY_STS,
+ NA, NA, NA, pand_neq,
+ NA, IDLE_CHK_ERROR,
+ "NIG: parity interrupt is active",
+ {NA, NA, ~0xFFC00000, 0, NA, NA} },
+
+/*line 364*/{(0x1C), 1, NIG_REG_NIG_PRTY_STS_0,
+ NA, NA, NA, pand_neq,
+ NA, IDLE_CHK_ERROR,
+ "NIG: parity 0 interrupt is active",
+ {NA, NA, ~0xFFC00000, 0, NA, NA} },
+
+/*line 365*/{(0x4), 1, NIG_REG_NIG_PRTY_STS_1,
+ NA, NA, NA, pand_neq,
+ NA, IDLE_CHK_ERROR,
+ "NIG: parity 1 interrupt is active",
+ {NA, NA, 0xff, 0, NA, NA} },
+
+/*line 366*/{(0x18), 1, NIG_REG_NIG_PRTY_STS_1,
+ NA, NA, NA, pneq,
+ NA, IDLE_CHK_ERROR,
+ "NIG: parity 1 interrupt is active",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 367*/{(0x1F), 1, TSEM_REG_TSEM_INT_STS_0,
+ NA, NA, NA, pand_neq,
+ NA, IDLE_CHK_WARNING,
+ "TSEM: interrupt 0 is active",
+ {NA, NA, ~0x10000000, 0, NA, NA} },
+
+/*line 368*/{(0x1F), 1, TSEM_REG_TSEM_INT_STS_0,
+ NA, NA, NA, peq,
+ NA, IDLE_CHK_WARNING,
+ "TSEM: interrupt 0 is active",
+ {NA, NA, 0x10000000, NA, NA, NA} },
+
+/*line 369*/{(0x1F), 1, TSEM_REG_TSEM_INT_STS_1,
+ NA, NA, NA, pneq,
+ NA, IDLE_CHK_ERROR,
+ "TSEM: interrupt 1 is active",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 370*/{(0x1F), 1, CSEM_REG_CSEM_INT_STS_0,
+ NA, NA, NA, pand_neq,
+ NA, IDLE_CHK_WARNING,
+ "CSEM: interrupt 0 is active",
+ {NA, NA, ~0x10000000, 0, NA, NA} },
+
+/*line 371*/{(0x1F), 1, CSEM_REG_CSEM_INT_STS_0,
+ NA, NA, NA, peq,
+ NA, IDLE_CHK_WARNING,
+ "CSEM: interrupt 0 is active",
+ {NA, NA, 0x10000000, NA, NA, NA} },
+
+/*line 372*/{(0x1F), 1, CSEM_REG_CSEM_INT_STS_1,
+ NA, NA, NA, pneq,
+ NA, IDLE_CHK_ERROR,
+ "CSEM: interrupt 1 is active",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 373*/{(0x1F), 1, USEM_REG_USEM_INT_STS_0,
+ NA, NA, NA, pand_neq,
+ NA, IDLE_CHK_WARNING,
+ "USEM: interrupt 0 is active",
+ {NA, NA, ~0x10000000, 0, NA, NA} },
+
+/*line 374*/{(0x1F), 1, USEM_REG_USEM_INT_STS_0,
+ NA, NA, NA, peq,
+ NA, IDLE_CHK_WARNING,
+ "USEM: interrupt 0 is active",
+ {NA, NA, 0x10000000, NA, NA, NA} },
+
+/*line 375*/{(0x1F), 1, USEM_REG_USEM_INT_STS_1,
+ NA, NA, NA, pneq,
+ NA, IDLE_CHK_ERROR,
+ "USEM: interrupt 1 is active",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 376*/{(0x1F), 1, XSEM_REG_XSEM_INT_STS_0,
+ NA, NA, NA, pand_neq,
+ NA, IDLE_CHK_WARNING,
+ "XSEM: interrupt 0 is active",
+ {NA, NA, ~0x10000000, 0, NA, NA} },
+
+/*line 377*/{(0x1F), 1, XSEM_REG_XSEM_INT_STS_0,
+ NA, NA, NA, peq,
+ NA, IDLE_CHK_WARNING,
+ "XSEM: interrupt 0 is active",
+ {NA, NA, 0x10000000, NA, NA, NA} },
+
+/*line 378*/{(0x1F), 1, XSEM_REG_XSEM_INT_STS_1,
+ NA, NA, NA, pneq,
+ NA, IDLE_CHK_ERROR,
+ "XSEM: interrupt 1 is active",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 379*/{(0x1F), 1, TSDM_REG_TSDM_INT_STS_0,
+ NA, NA, NA, pneq,
+ NA, IDLE_CHK_ERROR,
+ "TSDM: interrupt 0 is active",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 380*/{(0x1F), 1, TSDM_REG_TSDM_INT_STS_1,
+ NA, NA, NA, pneq,
+ NA, IDLE_CHK_ERROR,
+ "TSDM: interrupt 0 is active",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 381*/{(0x1F), 1, CSDM_REG_CSDM_INT_STS_0,
+ NA, NA, NA, pneq,
+ NA, IDLE_CHK_ERROR,
+ "CSDM: interrupt 0 is active",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 382*/{(0x1F), 1, CSDM_REG_CSDM_INT_STS_1,
+ NA, NA, NA, pneq,
+ NA, IDLE_CHK_ERROR,
+ "CSDM: interrupt 0 is active",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 383*/{(0x1F), 1, USDM_REG_USDM_INT_STS_0,
+ NA, NA, NA, pneq,
+ NA, IDLE_CHK_ERROR,
+ "USDM: interrupt 0 is active",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 384*/{(0x1F), 1, USDM_REG_USDM_INT_STS_1,
+ NA, NA, NA, pneq,
+ NA, IDLE_CHK_ERROR,
+ "USDM: interrupt 0 is active",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 385*/{(0x1F), 1, XSDM_REG_XSDM_INT_STS_0,
+ NA, NA, NA, pneq,
+ NA, IDLE_CHK_ERROR,
+ "XSDM: interrupt 0 is active",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 386*/{(0x1F), 1, XSDM_REG_XSDM_INT_STS_1,
+ NA, NA, NA, pneq,
+ NA, IDLE_CHK_ERROR,
+ "XSDM: interrupt 0 is active",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 387*/{(0x2), 1, HC_REG_HC_PRTY_STS,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_WARNING,
+ "HC: parity status is not 0",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 388*/{(0x1E), 1, MISC_REG_MISC_PRTY_STS,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_WARNING,
+ "MISC: parity status is not 0",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 389*/{(0x1E), 1, SRC_REG_SRC_PRTY_STS,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_WARNING,
+ "SRCH: parity status is not 0",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 390*/{(0xC), 3, QM_REG_BYTECRD0,
+ QM_REG_BYTECRDINITVAL, 1, 0, pneq_r2,
+ NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "QM: Byte credit 0 is not equal to initial credit",
+ {NA, NA, NA, NA, NA, NA} },
+
+/*line 391*/{(0xC), 3, QM_REG_BYTECRD1,
+ QM_REG_BYTECRDINITVAL, 1, 0, pneq_r2,
+ NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "QM: Byte credit 1 is not equal to initial credit",
+ {NA, NA, NA, NA, NA, NA} },
+
+/*line 392*/{(0xC), 3, QM_REG_BYTECRD2,
+ QM_REG_BYTECRDINITVAL, 1, 0, pneq_r2,
+ NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "QM: Byte credit 2 is not equal to initial credit",
+ {NA, NA, NA, NA, NA, NA} },
+
+/*line 393*/{(0x1C), 1, QM_REG_VOQCRDERRREG,
+ NA, 1, 0, pand_neq,
+ NA, IDLE_CHK_ERROR,
+ "QM: VOQ credit error register is not 0 (VOQ credit overflow/underflow)",
+ {NA, NA, 0xFFFF, 0, NA, NA} },
+
+/*line 394*/{(0x1C), 1, QM_REG_BYTECRDERRREG,
+ NA, 1, 0, pand_neq,
+ NA, IDLE_CHK_ERROR,
+ "QM: Byte credit error register is not 0 (Byte credit overflow/underflow)",
+ {NA, NA, 0xFFF, 0, NA, NA} },
+
+/*line 395*/{(0x1C), 1, PGLUE_B_REG_FLR_REQUEST_VF_31_0,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_WARNING,
+ "PGL: FLR request is set for VF addresses 31-0",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 396*/{(0x1C), 1, PGLUE_B_REG_FLR_REQUEST_VF_63_32,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_WARNING,
+ "PGL: FLR request is set for VF addresses 63-32",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 397*/{(0x1C), 1, PGLUE_B_REG_FLR_REQUEST_VF_95_64,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_WARNING,
+ "PGL: FLR request is set for VF addresses 95-64",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 398*/{(0x1C), 1, PGLUE_B_REG_FLR_REQUEST_VF_127_96,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_WARNING,
+ "PGL: FLR request is set for VF addresses 127-96",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 399*/{(0x1C), 1, PGLUE_B_REG_FLR_REQUEST_PF_7_0,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_WARNING,
+ "PGL: FLR request is set for PF addresses 7-0",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 400*/{(0x1C), 1, PGLUE_B_REG_SR_IOV_DISABLED_REQUEST,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_WARNING,
+ "PGL: SR-IOV disable request is set",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 401*/{(0x1C), 1, PGLUE_B_REG_CFG_SPACE_A_REQUEST,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_WARNING,
+ "PGL: Cfg-Space A request is set",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 402*/{(0x1C), 1, PGLUE_B_REG_CFG_SPACE_B_REQUEST,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_WARNING,
+ "PGL: Cfg-Space B request is set",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 403*/{(0x1C), 1, IGU_REG_ERROR_HANDLING_DATA_VALID,
+ NA, NA, 0, pneq,
+ NA, IDLE_CHK_WARNING,
+ "IGU: some unauthorized commands arrived to the IGU. Use igu_dump_fifo utility for more details",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 404*/{(0x1C), 1, IGU_REG_ATTN_WRITE_DONE_PENDING,
+ NA, NA, NA, pneq,
+ NA, IDLE_CHK_WARNING,
+ "IGU attention message write done pending is not empty",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 405*/{(0x1C), 1, IGU_REG_WRITE_DONE_PENDING,
+ NA, 5, 4, pneq,
+ NA, IDLE_CHK_WARNING,
+ "IGU MSI/MSIX message write done pending is not empty",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 406*/{(0x1C), 1, IGU_REG_IGU_PRTY_STS,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_WARNING,
+ "IGU: parity status is not 0",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 407*/{(0x1E), 3, MISC_REG_GRC_TIMEOUT_ATTN,
+ MISC_REG_AEU_AFTER_INVERT_4_FUNC_0, 1, 0, pand_neq,
+ NA, IDLE_CHK_ERROR,
+ "MISC_REG_GRC_TIMEOUT_ATTN: GRC timeout attention parameters (FUNC_0)",
+ {NA, NA, 0x4000000, 0, NA, NA} },
+
+/*line 408*/{(0x1C), 3, MISC_REG_GRC_TIMEOUT_ATTN_FULL_FID,
+ MISC_REG_AEU_AFTER_INVERT_4_FUNC_0, 1, 0, pand_neq,
+ NA, IDLE_CHK_ERROR,
+ "MISC_REG_GRC_TIMEOUT_ATTN_FULL_FID: GRC timeout attention FID (FUNC_0)",
+ {NA, NA, 0x4000000, 0, NA, NA} },
+
+/*line 409*/{(0x1E), 3, MISC_REG_GRC_TIMEOUT_ATTN,
+ MISC_REG_AEU_AFTER_INVERT_4_FUNC_1, 1, 0, pand_neq,
+ NA, IDLE_CHK_ERROR,
+ "MISC_REG_GRC_TIMEOUT_ATTN: GRC timeout attention parameters (FUNC_1)",
+ {NA, NA, 0x4000000, 0, NA, NA} },
+
+/*line 410*/{(0x1C), 3, MISC_REG_GRC_TIMEOUT_ATTN_FULL_FID,
+ MISC_REG_AEU_AFTER_INVERT_4_FUNC_1, 1, 0, pand_neq,
+ NA, IDLE_CHK_ERROR,
+ "MISC_REG_GRC_TIMEOUT_ATTN_FULL_FID: GRC timeout attention FID (FUNC_1)",
+ {NA, NA, 0x4000000, 0, NA, NA} },
+
+/*line 411*/{(0x1E), 3, MISC_REG_GRC_TIMEOUT_ATTN,
+ MISC_REG_AEU_AFTER_INVERT_4_MCP, 1, 0, pand_neq,
+ NA, IDLE_CHK_ERROR,
+ "MISC_REG_GRC_TIMEOUT_ATTN: GRC timeout attention parameters (MCP)",
+ {NA, NA, 0x4000000, 0, NA, NA} },
+
+/*line 412*/{(0x1C), 3, MISC_REG_GRC_TIMEOUT_ATTN_FULL_FID,
+ MISC_REG_AEU_AFTER_INVERT_4_MCP, 1, 0, pand_neq,
+ NA, IDLE_CHK_ERROR,
+ "MISC_REG_GRC_TIMEOUT_ATTN_FULL_FID: GRC timeout attention FID (MCP)",
+ {NA, NA, 0x4000000, 0, NA, NA} },
+
+/*line 413*/{(0x1C), 1, IGU_REG_SILENT_DROP,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR,
+ "Some messages were not executed in the IGU",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 414*/{(0x1C), 1, PXP2_REG_PSWRQ_BW_CREDIT,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR,
+ "PXP2: rq_read_credit and rq_write_credit are not 5",
+ {NA, NA, 0x2D, NA, NA, NA} },
+
+/*line 415*/{(0x1C), 1, IGU_REG_SB_CTRL_FSM,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_WARNING,
+ "IGU: block is not in idle. SB_CTRL_FSM should be zero in idle state",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 416*/{(0x1C), 1, IGU_REG_INT_HANDLE_FSM,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_WARNING,
+ "IGU: block is not in idle. INT_HANDLE_FSM should be zero in idle state",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 417*/{(0x1C), 1, IGU_REG_ATTN_FSM,
+ NA, 1, 0, pand_neq,
+ NA, IDLE_CHK_WARNING,
+ "IGU: block is not in idle. SB_ATTN_FSMshould be zeroor two in idle state",
+ {NA, NA, ~0x2, 0, NA, NA} },
+
+/*line 418*/{(0x1C), 1, IGU_REG_CTRL_FSM,
+ NA, 1, 0, pand_neq,
+ NA, IDLE_CHK_WARNING,
+ "IGU: block is not in idle. SB_CTRL_FSM should be zero in idle state",
+ {NA, NA, ~0x1, 0, NA, NA} },
+
+/*line 419*/{(0x1C), 1, IGU_REG_PXP_ARB_FSM,
+ NA, 1, 0, pand_neq,
+ NA, IDLE_CHK_WARNING,
+ "IGU: block is not in idle. SB_ARB_FSM should be zero in idle state",
+ {NA, NA, ~0x1, 0, NA, NA} },
+
+/*line 420*/{(0x1C), 1, IGU_REG_PENDING_BITS_STATUS,
+ NA, 5, 4, pneq,
+ NA, IDLE_CHK_WARNING,
+ "IGU: block is not in idle. There are pending write done",
+ {NA, NA, 0, NA, NA, NA} },
+
+/*line 421*/{(0x10), 3, QM_REG_VOQCREDIT_0,
+ QM_REG_VOQINITCREDIT_0, 1, 0, pneq_r2,
+ NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "QM: VOQ_0, VOQ credit is not equal to initial credit",
+ {NA, NA, NA, NA, NA, NA} },
+
+/*line 422*/{(0x10), 3, QM_REG_VOQCREDIT_1,
+ QM_REG_VOQINITCREDIT_1, 1, 0, pneq_r2,
+ NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "QM: VOQ_1, VOQ credit is not equal to initial credit",
+ {NA, NA, NA, NA, NA, NA} },
+
+/*line 423*/{(0x10), 3, QM_REG_VOQCREDIT_2,
+ QM_REG_VOQINITCREDIT_2, 1, 0, pneq_r2,
+ NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "QM: VOQ_2, VOQ credit is not equal to initial credit",
+ {NA, NA, NA, NA, NA, NA} },
+
+/*line 424*/{(0x10), 3, QM_REG_VOQCREDIT_3,
+ QM_REG_VOQINITCREDIT_3, 1, 0, pneq_r2,
+ NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "QM: VOQ_3, VOQ credit is not equal to initial credit",
+ {NA, NA, NA, NA, NA, NA} },
+
+/*line 425*/{(0x10), 3, QM_REG_VOQCREDIT_4,
+ QM_REG_VOQINITCREDIT_4, 1, 0, pneq_r2,
+ NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "QM: VOQ_4, VOQ credit is not equal to initial credit",
+ {NA, NA, NA, NA, NA, NA} },
+
+/*line 426*/{(0x10), 3, QM_REG_VOQCREDIT_5,
+ QM_REG_VOQINITCREDIT_5, 1, 0, pneq_r2,
+ NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "QM: VOQ_5, VOQ credit is not equal to initial credit",
+ {NA, NA, NA, NA, NA, NA} },
+
+/*line 427*/{(0x10), 3, QM_REG_VOQCREDIT_6,
+ QM_REG_VOQINITCREDIT_6, 1, 0, pneq_r2,
+ NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "QM: VOQ_6 (LB VOQ), VOQ credit is not equal to initial credit",
+ {NA, NA, NA, NA, NA, NA} },
+
+/*line 428*/{(0x10), 3, QM_REG_BYTECRD0,
+ QM_REG_BYTECRDINITVAL, 1, 0, pneq_r2,
+ NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "QM: Byte credit 0 is not equal to initial credit",
+ {NA, NA, NA, NA, NA, NA} },
+
+/*line 429*/{(0x10), 3, QM_REG_BYTECRD1,
+ QM_REG_BYTECRDINITVAL, 1, 0, pneq_r2,
+ NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "QM: Byte credit 1 is not equal to initial credit",
+ {NA, NA, NA, NA, NA, NA} },
+
+/*line 430*/{(0x10), 3, QM_REG_BYTECRD2,
+ QM_REG_BYTECRDINITVAL, 1, 0, pneq_r2,
+ NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "QM: Byte credit 2 is not equal to initial credit",
+ {NA, NA, NA, NA, NA, NA} },
+
+/*line 431*/{(0x10), 3, QM_REG_BYTECRD3,
+ QM_REG_BYTECRDINITVAL, 1, 0, pneq_r2,
+ NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "QM: Byte credit 3 is not equal to initial credit",
+ {NA, NA, NA, NA, NA, NA} },
+
+/*line 432*/{(0x10), 3, QM_REG_BYTECRD4,
+ QM_REG_BYTECRDINITVAL, 1, 0, pneq_r2,
+ NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "QM: Byte credit 4 is not equal to initial credit",
+ {NA, NA, NA, NA, NA, NA} },
+
+/*line 433*/{(0x10), 3, QM_REG_BYTECRD5,
+ QM_REG_BYTECRDINITVAL, 1, 0, pneq_r2,
+ NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "QM: Byte credit 5 is not equal to initial credit",
+ {NA, NA, NA, NA, NA, NA} },
+
+/*line 434*/{(0x10), 3, QM_REG_BYTECRD6,
+ QM_REG_BYTECRDINITVAL, 1, 0, pneq_r2,
+ NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "QM: Byte credit 6 is not equal to initial credit",
+ {NA, NA, NA, NA, NA, NA} },
+
+/*line 435*/{(0x10), 1, QM_REG_FWVOQ0TOHWVOQ,
+ NA, 1, 0, peq,
+ NA, IDLE_CHK_ERROR,
+ "QM: FwVoq0 is mapped to HwVoq7 (non-TX HwVoq)",
+ {NA, NA, 0x7, NA, NA, NA} },
+
+/*line 436*/{(0x10), 1, QM_REG_FWVOQ1TOHWVOQ,
+ NA, 1, 0, peq,
+ NA, IDLE_CHK_ERROR,
+ "QM: FwVoq1 is mapped to HwVoq7 (non-TX HwVoq)",
+ {NA, NA, 0x7, NA, NA, NA} },
+
+/*line 437*/{(0x10), 1, QM_REG_FWVOQ2TOHWVOQ,
+ NA, 1, 0, peq,
+ NA, IDLE_CHK_ERROR,
+ "QM: FwVoq2 is mapped to HwVoq7 (non-TX HwVoq)",
+ {NA, NA, 0x7, NA, NA, NA} },
+
+/*line 438*/{(0x10), 1, QM_REG_FWVOQ3TOHWVOQ,
+ NA, 1, 0, peq,
+ NA, IDLE_CHK_ERROR,
+ "QM: FwVoq3 is mapped to HwVoq7 (non-TX HwVoq)",
+ {NA, NA, 0x7, NA, NA, NA} },
+
+/*line 439*/{(0x10), 1, QM_REG_FWVOQ4TOHWVOQ,
+ NA, 1, 0, peq,
+ NA, IDLE_CHK_ERROR,
+ "QM: FwVoq4 is mapped to HwVoq7 (non-TX HwVoq)",
+ {NA, NA, 0x7, NA, NA, NA} },
+
+/*line 440*/{(0x10), 1, QM_REG_FWVOQ5TOHWVOQ,
+ NA, 1, 0, peq,
+ NA, IDLE_CHK_ERROR,
+ "QM: FwVoq5 is mapped to HwVoq7 (non-TX HwVoq)",
+ {NA, NA, 0x7, NA, NA, NA} },
+
+/*line 441*/{(0x10), 1, QM_REG_FWVOQ6TOHWVOQ,
+ NA, 1, 0, peq,
+ NA, IDLE_CHK_ERROR,
+ "QM: FwVoq6 is mapped to HwVoq7 (non-TX HwVoq)",
+ {NA, NA, 0x7, NA, NA, NA} },
+
+/*line 442*/{(0x10), 1, QM_REG_FWVOQ7TOHWVOQ,
+ NA, 1, 0, peq,
+ NA, IDLE_CHK_ERROR,
+ "QM: FwVoq7 is mapped to HwVoq7 (non-TX HwVoq)",
+ {NA, NA, 0x7, NA, NA, NA} },
+
+/*line 443*/{(0x1F), 1, NIG_REG_INGRESS_EOP_PORT0_EMPTY,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "NIG: Port 0 EOP FIFO is not empty",
+ {NA, NA, 1, NA, NA, NA} },
+
+/*line 444*/{(0x1F), 1, NIG_REG_INGRESS_EOP_PORT1_EMPTY,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "NIG: Port 1 EOP FIFO is not empty",
+ {NA, NA, 1, NA, NA, NA} },
+
+/*line 445*/{(0x1F), 1, NIG_REG_INGRESS_EOP_LB_EMPTY,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "NIG: LB EOP FIFO is not empty",
+ {NA, NA, 1, NA, NA, NA} },
+
+/*line 446*/{(0x1F), 1, NIG_REG_INGRESS_RMP0_DSCR_EMPTY,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_WARNING,
+ "NIG: Port 0 RX MCP descriptor FIFO is not empty",
+ {NA, NA, 1, NA, NA, NA} },
+
+/*line 447*/{(0x1F), 1, NIG_REG_INGRESS_RMP1_DSCR_EMPTY,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_WARNING,
+ "NIG: Port 1 RX MCP descriptor FIFO is not empty",
+ {NA, NA, 1, NA, NA, NA} },
+
+/*line 448*/{(0x1F), 1, NIG_REG_INGRESS_LB_PBF_DELAY_EMPTY,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "NIG: PBF LB FIFO is not empty",
+ {NA, NA, 1, NA, NA, NA} },
+
+/*line 449*/{(0x1F), 1, NIG_REG_EGRESS_MNG0_FIFO_EMPTY,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_WARNING,
+ "NIG: Port 0 TX MCP FIFO is not empty",
+ {NA, NA, 1, NA, NA, NA} },
+
+/*line 450*/{(0x1F), 1, NIG_REG_EGRESS_MNG1_FIFO_EMPTY,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_WARNING,
+ "NIG: Port 1 TX MCP FIFO is not empty",
+ {NA, NA, 1, NA, NA, NA} },
+
+/*line 451*/{(0x1F), 1, NIG_REG_EGRESS_DEBUG_FIFO_EMPTY,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "NIG: Debug FIFO is not empty",
+ {NA, NA, 1, NA, NA, NA} },
+
+/*line 452*/{(0x1F), 1, NIG_REG_EGRESS_DELAY0_EMPTY,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "NIG: PBF IF0 FIFO is not empty",
+ {NA, NA, 1, NA, NA, NA} },
+
+/*line 453*/{(0x1F), 1, NIG_REG_EGRESS_DELAY1_EMPTY,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "NIG: PBF IF1 FIFO is not empty",
+ {NA, NA, 1, NA, NA, NA} },
+
+/*line 454*/{(0x1F), 1, NIG_REG_LLH0_FIFO_EMPTY,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "NIG: Port 0 RX LLH FIFO is not empty",
+ {NA, NA, 1, NA, NA, NA} },
+
+/*line 455*/{(0x1F), 1, NIG_REG_LLH1_FIFO_EMPTY,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "NIG: Port 1 RX LLH FIFO is not empty",
+ {NA, NA, 1, NA, NA, NA} },
+
+/*line 456*/{(0x1C), 1, NIG_REG_P0_TX_MNG_HOST_FIFO_EMPTY,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_WARNING,
+ "NIG: Port 0 TX MCP FIFO for traffic going to the host is not empty",
+ {NA, NA, 1, NA, NA, NA} },
+
+/*line 457*/{(0x1C), 1, NIG_REG_P1_TX_MNG_HOST_FIFO_EMPTY,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_WARNING,
+ "NIG: Port 1 TX MCP FIFO for traffic going to the host is not empty",
+ {NA, NA, 1, NA, NA, NA} },
+
+/*line 458*/{(0x1C), 1, NIG_REG_P0_TLLH_FIFO_EMPTY,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "NIG: Port 0 TX LLH FIFO is not empty",
+ {NA, NA, 1, NA, NA, NA} },
+
+/*line 459*/{(0x1C), 1, NIG_REG_P1_TLLH_FIFO_EMPTY,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "NIG: Port 1 TX LLH FIFO is not empty",
+ {NA, NA, 1, NA, NA, NA} },
+
+/*line 460*/{(0x1C), 1, NIG_REG_P0_HBUF_DSCR_EMPTY,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_WARNING,
+ "NIG: Port 0 RX MCP descriptor FIFO for traffic from the host is not empty",
+ {NA, NA, 1, NA, NA, NA} },
+
+/*line 461*/{(0x1C), 1, NIG_REG_P1_HBUF_DSCR_EMPTY,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_WARNING,
+ "NIG: Port 1 RX MCP descriptor FIFO for traffic from the host is not empty",
+ {NA, NA, 1, NA, NA, NA} },
+
+/*line 462*/{(0x18), 1, NIG_REG_P0_RX_MACFIFO_EMPTY,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "NIG: Port 0 RX MAC interface FIFO is not empty",
+ {NA, NA, 1, NA, NA, NA} },
+
+/*line 463*/{(0x18), 1, NIG_REG_P1_RX_MACFIFO_EMPTY,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "NIG: Port 1 RX MAC interface FIFO is not empty",
+ {NA, NA, 1, NA, NA, NA} },
+
+/*line 464*/{(0x18), 1, NIG_REG_P0_TX_MACFIFO_EMPTY,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "NIG: Port 0 TX MAC interface FIFO is not empty",
+ {NA, NA, 1, NA, NA, NA} },
+
+/*line 465*/{(0x18), 1, NIG_REG_P1_TX_MACFIFO_EMPTY,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "NIG: Port 1 TX MAC interface FIFO is not empty",
+ {NA, NA, 1, NA, NA, NA} },
+
+/*line 466*/{(0x10), 1, NIG_REG_EGRESS_DELAY2_EMPTY,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "NIG: PBF IF2 FIFO is not empty",
+ {NA, NA, 1, NA, NA, NA} },
+
+/*line 467*/{(0x10), 1, NIG_REG_EGRESS_DELAY3_EMPTY,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "NIG: PBF IF3 FIFO is not empty",
+ {NA, NA, 1, NA, NA, NA} },
+
+/*line 468*/{(0x10), 1, NIG_REG_EGRESS_DELAY4_EMPTY,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "NIG: PBF IF4 FIFO is not empty",
+ {NA, NA, 1, NA, NA, NA} },
+
+/*line 469*/{(0x10), 1, NIG_REG_EGRESS_DELAY5_EMPTY,
+ NA, 1, 0, pneq,
+ NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+ "NIG: PBF IF5 FIFO is not empty",
+ {NA, NA, 1, NA, NA, NA} },
+};
+
+/* handle self test fails according to severity and type */
+static void bnx2x_self_test_log(struct bnx2x *bp, u8 severity, char *message)
+{
+ switch (severity) {
+ case IDLE_CHK_ERROR:
+ BNX2X_ERR("ERROR %s", message);
+ idle_chk_errors++;
+ break;
+ case IDLE_CHK_ERROR_NO_TRAFFIC:
+ DP(NETIF_MSG_HW, "INFO %s", message);
+ break;
+ case IDLE_CHK_WARNING:
+ DP(NETIF_MSG_HW, "WARNING %s", message);
+ idle_chk_warnings++;
+ break;
+ }
+}
+
+/* specific test for QM rd/wr pointers and rd/wr banks */
+static void bnx2x_idle_chk6(struct bnx2x *bp,
+ struct st_record *rec, char *message)
+{
+ u32 rd_ptr, wr_ptr, rd_bank, wr_bank;
+ int i;
+
+ for (i = 0; i < rec->loop; i++) {
+ /* read regs */
+ rec->pred_args.val1 =
+ REG_RD(bp, rec->reg1 + i * rec->incr);
+ rec->pred_args.val2 =
+ REG_RD(bp, rec->reg1 + i * rec->incr + 4);
+
+ /* calc read and write pointers */
+ rd_ptr = ((rec->pred_args.val1 & 0x3FFFFFC0) >> 6);
+ wr_ptr = ((((rec->pred_args.val1 & 0xC0000000) >> 30) & 0x3) |
+ ((rec->pred_args.val2 & 0x3FFFFF) << 2));
+
+ /* perfrom pointer test */
+ if (rd_ptr != wr_ptr) {
+ snprintf(message, MAX_FAIL_MSG,
+ "QM: PTRTBL entry %d- rd_ptr is not equal to wr_ptr. Values are 0x%x and 0x%x\n",
+ i, rd_ptr, wr_ptr);
+ bnx2x_self_test_log(bp, rec->severity, message);
+ }
+
+ /* calculate read and write banks */
+ rd_bank = ((rec->pred_args.val1 & 0x30) >> 4);
+ wr_bank = (rec->pred_args.val1 & 0x03);
+
+ /* perform bank test */
+ if (rd_bank != wr_bank) {
+ snprintf(message, MAX_FAIL_MSG,
+ "QM: PTRTBL entry %d - rd_bank is not equal to wr_bank. Values are 0x%x 0x%x\n",
+ i, rd_bank, wr_bank);
+ bnx2x_self_test_log(bp, rec->severity, message);
+ }
+ }
+}
+
+/* specific test for cfc info ram and cid cam */
+static void bnx2x_idle_chk7(struct bnx2x *bp,
+ struct st_record *rec, char *message)
+{
+ int i;
+
+ /* iterate through lcids */
+ for (i = 0; i < rec->loop; i++) {
+ /* make sure cam entry is valid (bit 0) */
+ if ((REG_RD(bp, (rec->reg2 + i * 4)) & 0x1) != 0x1)
+ continue;
+
+ /* get connection type (multiple reads due to widebus) */
+ REG_RD(bp, (rec->reg1 + i * rec->incr));
+ REG_RD(bp, (rec->reg1 + i * rec->incr + 4));
+ rec->pred_args.val1 =
+ REG_RD(bp, (rec->reg1 + i * rec->incr + 8));
+ REG_RD(bp, (rec->reg1 + i * rec->incr + 12));
+
+ /* obtain connection type */
+ if (is_e1 || is_e1h) {
+ /* E1 E1H (bits 4..7) */
+ rec->pred_args.val1 &= 0x78;
+ rec->pred_args.val1 >>= 3;
+ } else {
+ /* E2 E3A0 E3B0 (bits 26..29) */
+ rec->pred_args.val1 &= 0x1E000000;
+ rec->pred_args.val1 >>= 25;
+ }
+
+ /* get activity counter value */
+ rec->pred_args.val2 = REG_RD(bp, rec->reg3 + i * 4);
+
+ /* validate ac value is legal for con_type at idle state */
+ if (rec->bnx2x_predicate(&rec->pred_args)) {
+ snprintf(message, MAX_FAIL_MSG,
+ "%s. Values are 0x%x 0x%x\n", rec->fail_msg,
+ rec->pred_args.val1, rec->pred_args.val2);
+ bnx2x_self_test_log(bp, rec->severity, message);
+ }
+ }
+}
+
+/* self test procedure
+ * scan auto-generated database
+ * for each line:
+ * 1. compare chip mask
+ * 2. determine type (according to maro number)
+ * 3. read registers
+ * 4. call predicate
+ * 5. collate results and statistics
+ */
+int bnx2x_idle_chk(struct bnx2x *bp)
+{
+ u16 i; /* loop counter */
+ u16 st_ind; /* self test database access index */
+ struct st_record rec; /* current record variable */
+ char message[MAX_FAIL_MSG]; /* message to log */
+
+ /*init stats*/
+ idle_chk_errors = 0;
+ idle_chk_warnings = 0;
+
+ /*create masks for all chip types*/
+ is_e1 = CHIP_IS_E1(bp);
+ is_e1h = CHIP_IS_E1H(bp);
+ is_e2 = CHIP_IS_E2(bp);
+ is_e3a0 = CHIP_IS_E3A0(bp);
+ is_e3b0 = CHIP_IS_E3B0(bp);
+
+ /*database main loop*/
+ for (st_ind = 0; st_ind < ST_DB_LINES; st_ind++) {
+ rec = st_database[st_ind];
+
+ /*check if test applies to chip*/
+ if (!((rec.chip_mask & IDLE_CHK_E1) && is_e1) &&
+ !((rec.chip_mask & IDLE_CHK_E1H) && is_e1h) &&
+ !((rec.chip_mask & IDLE_CHK_E2) && is_e2) &&
+ !((rec.chip_mask & IDLE_CHK_E3A0) && is_e3a0) &&
+ !((rec.chip_mask & IDLE_CHK_E3B0) && is_e3b0))
+ continue;
+
+ /* identify macro */
+ switch (rec.macro) {
+ case 1:
+ /* read single reg and call predicate */
+ rec.pred_args.val1 = REG_RD(bp, rec.reg1);
+ DP(BNX2X_MSG_IDLE, "mac1 add %x\n", rec.reg1);
+ if (rec.bnx2x_predicate(&rec.pred_args)) {
+ snprintf(message, sizeof(message),
+ "%s.Value is 0x%x\n", rec.fail_msg,
+ rec.pred_args.val1);
+ bnx2x_self_test_log(bp, rec.severity, message);
+ }
+ break;
+ case 2:
+ /* read repeatedly starting from reg1 and call
+ * predicate after each read
+ */
+ for (i = 0; i < rec.loop; i++) {
+ rec.pred_args.val1 =
+ REG_RD(bp, rec.reg1 + i * rec.incr);
+ DP(BNX2X_MSG_IDLE, "mac2 add %x\n", rec.reg1);
+ if (rec.bnx2x_predicate(&rec.pred_args)) {
+ snprintf(message, sizeof(message),
+ "%s. Value is 0x%x in loop %d\n",
+ rec.fail_msg,
+ rec.pred_args.val1, i);
+ bnx2x_self_test_log(bp, rec.severity,
+ message);
+ }
+ }
+ break;
+ case 3:
+ /* read two regs and call predicate */
+ rec.pred_args.val1 = REG_RD(bp, rec.reg1);
+ rec.pred_args.val2 = REG_RD(bp, rec.reg2);
+ DP(BNX2X_MSG_IDLE, "mac3 add1 %x add2 %x\n",
+ rec.reg1, rec.reg2);
+ if (rec.bnx2x_predicate(&rec.pred_args)) {
+ snprintf(message, sizeof(message),
+ "%s. Values are 0x%x 0x%x\n",
+ rec.fail_msg, rec.pred_args.val1,
+ rec.pred_args.val2);
+ bnx2x_self_test_log(bp, rec.severity, message);
+ }
+ break;
+ case 4:
+ /*unused to-date*/
+ for (i = 0; i < rec.loop; i++) {
+ rec.pred_args.val1 =
+ REG_RD(bp, rec.reg1 + i * rec.incr);
+ rec.pred_args.val2 =
+ (REG_RD(bp,
+ rec.reg2 + i * rec.incr)) >> 1;
+ if (rec.bnx2x_predicate(&rec.pred_args)) {
+ snprintf(message, sizeof(message),
+ "%s. Values are 0x%x 0x%x in loop %d\n",
+ rec.fail_msg,
+ rec.pred_args.val1,
+ rec.pred_args.val2, i);
+ bnx2x_self_test_log(bp, rec.severity,
+ message);
+ }
+ }
+ break;
+ case 5:
+ /* compare two regs, pending
+ * the value of a condition reg
+ */
+ rec.pred_args.val1 = REG_RD(bp, rec.reg1);
+ rec.pred_args.val2 = REG_RD(bp, rec.reg2);
+ DP(BNX2X_MSG_IDLE, "mac3 add1 %x add2 %x add3 %x\n",
+ rec.reg1, rec.reg2, rec.reg3);
+ if (REG_RD(bp, rec.reg3) != 0) {
+ if (rec.bnx2x_predicate(&rec.pred_args)) {
+ snprintf(message, sizeof(message),
+ "%s. Values are 0x%x 0x%x\n",
+ rec.fail_msg,
+ rec.pred_args.val1,
+ rec.pred_args.val2);
+ bnx2x_self_test_log(bp, rec.severity,
+ message);
+ }
+ }
+ break;
+ case 6:
+ /* compare read and write pointers
+ * and read and write banks in QM
+ */
+ bnx2x_idle_chk6(bp, &rec, message);
+ break;
+ case 7:
+ /* compare cfc info cam with cid cam */
+ bnx2x_idle_chk7(bp, &rec, message);
+ break;
+ default:
+ DP(BNX2X_MSG_IDLE,
+ "unknown macro in self test data base. macro %d line %d",
+ rec.macro, st_ind);
+ }
+ }
+
+ /* abort if interface is not running */
+ if (!netif_running(bp->dev))
+ return idle_chk_errors;
+
+ /* return value accorindg to statistics */
+ if (idle_chk_errors == 0) {
+ DP(BNX2X_MSG_IDLE,
+ "completed successfully (logged %d warnings)\n",
+ idle_chk_warnings);
+ } else {
+ BNX2X_ERR("failed (with %d errors, %d warnings)\n",
+ idle_chk_errors, idle_chk_warnings);
+ }
+ return idle_chk_errors;
+}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
new file mode 100644
index 0000000000..8e04552d22
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
@@ -0,0 +1,6563 @@
+/* bnx2x_sp.c: Qlogic Everest network driver.
+ *
+ * Copyright 2011-2013 Broadcom Corporation
+ * Copyright (c) 2014 QLogic Corporation
+ * All rights reserved
+ *
+ * Unless you and Qlogic execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2, available
+ * at http://www.gnu.org/licenses/gpl-2.0.html (the "GPL").
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Qlogic software provided under a
+ * license other than the GPL, without Qlogic's express prior written
+ * consent.
+ *
+ * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
+ * Written by: Vladislav Zolotarov
+ *
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/crc32.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/crc32c.h>
+#include "bnx2x.h"
+#include "bnx2x_cmn.h"
+#include "bnx2x_sp.h"
+
+#define BNX2X_MAX_EMUL_MULTI 16
+
+/**** Exe Queue interfaces ****/
+
+/**
+ * bnx2x_exe_queue_init - init the Exe Queue object
+ *
+ * @bp: driver handle
+ * @o: pointer to the object
+ * @exe_len: length
+ * @owner: pointer to the owner
+ * @validate: validate function pointer
+ * @remove: remove function pointer
+ * @optimize: optimize function pointer
+ * @exec: execute function pointer
+ * @get: get function pointer
+ */
+static inline void bnx2x_exe_queue_init(struct bnx2x *bp,
+ struct bnx2x_exe_queue_obj *o,
+ int exe_len,
+ union bnx2x_qable_obj *owner,
+ exe_q_validate validate,
+ exe_q_remove remove,
+ exe_q_optimize optimize,
+ exe_q_execute exec,
+ exe_q_get get)
+{
+ memset(o, 0, sizeof(*o));
+
+ INIT_LIST_HEAD(&o->exe_queue);
+ INIT_LIST_HEAD(&o->pending_comp);
+
+ spin_lock_init(&o->lock);
+
+ o->exe_chunk_len = exe_len;
+ o->owner = owner;
+
+ /* Owner specific callbacks */
+ o->validate = validate;
+ o->remove = remove;
+ o->optimize = optimize;
+ o->execute = exec;
+ o->get = get;
+
+ DP(BNX2X_MSG_SP, "Setup the execution queue with the chunk length of %d\n",
+ exe_len);
+}
+
+static inline void bnx2x_exe_queue_free_elem(struct bnx2x *bp,
+ struct bnx2x_exeq_elem *elem)
+{
+ DP(BNX2X_MSG_SP, "Deleting an exe_queue element\n");
+ kfree(elem);
+}
+
+static inline int bnx2x_exe_queue_length(struct bnx2x_exe_queue_obj *o)
+{
+ struct bnx2x_exeq_elem *elem;
+ int cnt = 0;
+
+ spin_lock_bh(&o->lock);
+
+ list_for_each_entry(elem, &o->exe_queue, link)
+ cnt++;
+
+ spin_unlock_bh(&o->lock);
+
+ return cnt;
+}
+
+/**
+ * bnx2x_exe_queue_add - add a new element to the execution queue
+ *
+ * @bp: driver handle
+ * @o: queue
+ * @elem: new command to add
+ * @restore: true - do not optimize the command
+ *
+ * If the element is optimized or is illegal, frees it.
+ */
+static inline int bnx2x_exe_queue_add(struct bnx2x *bp,
+ struct bnx2x_exe_queue_obj *o,
+ struct bnx2x_exeq_elem *elem,
+ bool restore)
+{
+ int rc;
+
+ spin_lock_bh(&o->lock);
+
+ if (!restore) {
+ /* Try to cancel this element queue */
+ rc = o->optimize(bp, o->owner, elem);
+ if (rc)
+ goto free_and_exit;
+
+ /* Check if this request is ok */
+ rc = o->validate(bp, o->owner, elem);
+ if (rc) {
+ DP(BNX2X_MSG_SP, "Preamble failed: %d\n", rc);
+ goto free_and_exit;
+ }
+ }
+
+ /* If so, add it to the execution queue */
+ list_add_tail(&elem->link, &o->exe_queue);
+
+ spin_unlock_bh(&o->lock);
+
+ return 0;
+
+free_and_exit:
+ bnx2x_exe_queue_free_elem(bp, elem);
+
+ spin_unlock_bh(&o->lock);
+
+ return rc;
+}
+
+static inline void __bnx2x_exe_queue_reset_pending(
+ struct bnx2x *bp,
+ struct bnx2x_exe_queue_obj *o)
+{
+ struct bnx2x_exeq_elem *elem;
+
+ while (!list_empty(&o->pending_comp)) {
+ elem = list_first_entry(&o->pending_comp,
+ struct bnx2x_exeq_elem, link);
+
+ list_del(&elem->link);
+ bnx2x_exe_queue_free_elem(bp, elem);
+ }
+}
+
+/**
+ * bnx2x_exe_queue_step - execute one execution chunk atomically
+ *
+ * @bp: driver handle
+ * @o: queue
+ * @ramrod_flags: flags
+ *
+ * (Should be called while holding the exe_queue->lock).
+ */
+static inline int bnx2x_exe_queue_step(struct bnx2x *bp,
+ struct bnx2x_exe_queue_obj *o,
+ unsigned long *ramrod_flags)
+{
+ struct bnx2x_exeq_elem *elem, spacer;
+ int cur_len = 0, rc;
+
+ memset(&spacer, 0, sizeof(spacer));
+
+ /* Next step should not be performed until the current is finished,
+ * unless a DRV_CLEAR_ONLY bit is set. In this case we just want to
+ * properly clear object internals without sending any command to the FW
+ * which also implies there won't be any completion to clear the
+ * 'pending' list.
+ */
+ if (!list_empty(&o->pending_comp)) {
+ if (test_bit(RAMROD_DRV_CLR_ONLY, ramrod_flags)) {
+ DP(BNX2X_MSG_SP, "RAMROD_DRV_CLR_ONLY requested: resetting a pending_comp list\n");
+ __bnx2x_exe_queue_reset_pending(bp, o);
+ } else {
+ return 1;
+ }
+ }
+
+ /* Run through the pending commands list and create a next
+ * execution chunk.
+ */
+ while (!list_empty(&o->exe_queue)) {
+ elem = list_first_entry(&o->exe_queue, struct bnx2x_exeq_elem,
+ link);
+ WARN_ON(!elem->cmd_len);
+
+ if (cur_len + elem->cmd_len <= o->exe_chunk_len) {
+ cur_len += elem->cmd_len;
+ /* Prevent from both lists being empty when moving an
+ * element. This will allow the call of
+ * bnx2x_exe_queue_empty() without locking.
+ */
+ list_add_tail(&spacer.link, &o->pending_comp);
+ mb();
+ list_move_tail(&elem->link, &o->pending_comp);
+ list_del(&spacer.link);
+ } else
+ break;
+ }
+
+ /* Sanity check */
+ if (!cur_len)
+ return 0;
+
+ rc = o->execute(bp, o->owner, &o->pending_comp, ramrod_flags);
+ if (rc < 0)
+ /* In case of an error return the commands back to the queue
+ * and reset the pending_comp.
+ */
+ list_splice_init(&o->pending_comp, &o->exe_queue);
+ else if (!rc)
+ /* If zero is returned, means there are no outstanding pending
+ * completions and we may dismiss the pending list.
+ */
+ __bnx2x_exe_queue_reset_pending(bp, o);
+
+ return rc;
+}
+
+static inline bool bnx2x_exe_queue_empty(struct bnx2x_exe_queue_obj *o)
+{
+ bool empty = list_empty(&o->exe_queue);
+
+ /* Don't reorder!!! */
+ mb();
+
+ return empty && list_empty(&o->pending_comp);
+}
+
+static inline struct bnx2x_exeq_elem *bnx2x_exe_queue_alloc_elem(
+ struct bnx2x *bp)
+{
+ DP(BNX2X_MSG_SP, "Allocating a new exe_queue element\n");
+ return kzalloc(sizeof(struct bnx2x_exeq_elem), GFP_ATOMIC);
+}
+
+/************************ raw_obj functions ***********************************/
+static bool bnx2x_raw_check_pending(struct bnx2x_raw_obj *o)
+{
+ return !!test_bit(o->state, o->pstate);
+}
+
+static void bnx2x_raw_clear_pending(struct bnx2x_raw_obj *o)
+{
+ smp_mb__before_atomic();
+ clear_bit(o->state, o->pstate);
+ smp_mb__after_atomic();
+}
+
+static void bnx2x_raw_set_pending(struct bnx2x_raw_obj *o)
+{
+ smp_mb__before_atomic();
+ set_bit(o->state, o->pstate);
+ smp_mb__after_atomic();
+}
+
+/**
+ * bnx2x_state_wait - wait until the given bit(state) is cleared
+ *
+ * @bp: device handle
+ * @state: state which is to be cleared
+ * @pstate: state buffer
+ *
+ */
+static inline int bnx2x_state_wait(struct bnx2x *bp, int state,
+ unsigned long *pstate)
+{
+ /* can take a while if any port is running */
+ int cnt = 5000;
+
+ if (CHIP_REV_IS_EMUL(bp))
+ cnt *= 20;
+
+ DP(BNX2X_MSG_SP, "waiting for state to become %d\n", state);
+
+ might_sleep();
+ while (cnt--) {
+ if (!test_bit(state, pstate)) {
+#ifdef BNX2X_STOP_ON_ERROR
+ DP(BNX2X_MSG_SP, "exit (cnt %d)\n", 5000 - cnt);
+#endif
+ return 0;
+ }
+
+ usleep_range(1000, 2000);
+
+ if (bp->panic)
+ return -EIO;
+ }
+
+ /* timeout! */
+ BNX2X_ERR("timeout waiting for state %d\n", state);
+#ifdef BNX2X_STOP_ON_ERROR
+ bnx2x_panic();
+#endif
+
+ return -EBUSY;
+}
+
+static int bnx2x_raw_wait(struct bnx2x *bp, struct bnx2x_raw_obj *raw)
+{
+ return bnx2x_state_wait(bp, raw->state, raw->pstate);
+}
+
+/***************** Classification verbs: Set/Del MAC/VLAN/VLAN-MAC ************/
+/* credit handling callbacks */
+static bool bnx2x_get_cam_offset_mac(struct bnx2x_vlan_mac_obj *o, int *offset)
+{
+ struct bnx2x_credit_pool_obj *mp = o->macs_pool;
+
+ WARN_ON(!mp);
+
+ return mp->get_entry(mp, offset);
+}
+
+static bool bnx2x_get_credit_mac(struct bnx2x_vlan_mac_obj *o)
+{
+ struct bnx2x_credit_pool_obj *mp = o->macs_pool;
+
+ WARN_ON(!mp);
+
+ return mp->get(mp, 1);
+}
+
+static bool bnx2x_get_cam_offset_vlan(struct bnx2x_vlan_mac_obj *o, int *offset)
+{
+ struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
+
+ WARN_ON(!vp);
+
+ return vp->get_entry(vp, offset);
+}
+
+static bool bnx2x_get_credit_vlan(struct bnx2x_vlan_mac_obj *o)
+{
+ struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
+
+ WARN_ON(!vp);
+
+ return vp->get(vp, 1);
+}
+
+static bool bnx2x_get_credit_vlan_mac(struct bnx2x_vlan_mac_obj *o)
+{
+ struct bnx2x_credit_pool_obj *mp = o->macs_pool;
+ struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
+
+ if (!mp->get(mp, 1))
+ return false;
+
+ if (!vp->get(vp, 1)) {
+ mp->put(mp, 1);
+ return false;
+ }
+
+ return true;
+}
+
+static bool bnx2x_put_cam_offset_mac(struct bnx2x_vlan_mac_obj *o, int offset)
+{
+ struct bnx2x_credit_pool_obj *mp = o->macs_pool;
+
+ return mp->put_entry(mp, offset);
+}
+
+static bool bnx2x_put_credit_mac(struct bnx2x_vlan_mac_obj *o)
+{
+ struct bnx2x_credit_pool_obj *mp = o->macs_pool;
+
+ return mp->put(mp, 1);
+}
+
+static bool bnx2x_put_cam_offset_vlan(struct bnx2x_vlan_mac_obj *o, int offset)
+{
+ struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
+
+ return vp->put_entry(vp, offset);
+}
+
+static bool bnx2x_put_credit_vlan(struct bnx2x_vlan_mac_obj *o)
+{
+ struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
+
+ return vp->put(vp, 1);
+}
+
+static bool bnx2x_put_credit_vlan_mac(struct bnx2x_vlan_mac_obj *o)
+{
+ struct bnx2x_credit_pool_obj *mp = o->macs_pool;
+ struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
+
+ if (!mp->put(mp, 1))
+ return false;
+
+ if (!vp->put(vp, 1)) {
+ mp->get(mp, 1);
+ return false;
+ }
+
+ return true;
+}
+
+/**
+ * __bnx2x_vlan_mac_h_write_trylock - try getting the vlan mac writer lock
+ *
+ * @bp: device handle
+ * @o: vlan_mac object
+ *
+ * Context: Non-blocking implementation; should be called under execution
+ * queue lock.
+ */
+static int __bnx2x_vlan_mac_h_write_trylock(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *o)
+{
+ if (o->head_reader) {
+ DP(BNX2X_MSG_SP, "vlan_mac_lock writer - There are readers; Busy\n");
+ return -EBUSY;
+ }
+
+ DP(BNX2X_MSG_SP, "vlan_mac_lock writer - Taken\n");
+ return 0;
+}
+
+/**
+ * __bnx2x_vlan_mac_h_exec_pending - execute step instead of a previous step
+ *
+ * @bp: device handle
+ * @o: vlan_mac object
+ *
+ * details Should be called under execution queue lock; notice it might release
+ * and reclaim it during its run.
+ */
+static void __bnx2x_vlan_mac_h_exec_pending(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *o)
+{
+ int rc;
+ unsigned long ramrod_flags = o->saved_ramrod_flags;
+
+ DP(BNX2X_MSG_SP, "vlan_mac_lock execute pending command with ramrod flags %lu\n",
+ ramrod_flags);
+ o->head_exe_request = false;
+ o->saved_ramrod_flags = 0;
+ rc = bnx2x_exe_queue_step(bp, &o->exe_queue, &ramrod_flags);
+ if ((rc != 0) && (rc != 1)) {
+ BNX2X_ERR("execution of pending commands failed with rc %d\n",
+ rc);
+#ifdef BNX2X_STOP_ON_ERROR
+ bnx2x_panic();
+#endif
+ }
+}
+
+/**
+ * __bnx2x_vlan_mac_h_pend - Pend an execution step which couldn't run
+ *
+ * @bp: device handle
+ * @o: vlan_mac object
+ * @ramrod_flags: ramrod flags of missed execution
+ *
+ * Context: Should be called under execution queue lock.
+ */
+static void __bnx2x_vlan_mac_h_pend(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *o,
+ unsigned long ramrod_flags)
+{
+ o->head_exe_request = true;
+ o->saved_ramrod_flags = ramrod_flags;
+ DP(BNX2X_MSG_SP, "Placing pending execution with ramrod flags %lu\n",
+ ramrod_flags);
+}
+
+/**
+ * __bnx2x_vlan_mac_h_write_unlock - unlock the vlan mac head list writer lock
+ *
+ * @bp: device handle
+ * @o: vlan_mac object
+ *
+ * Context: Should be called under execution queue lock. Notice if a pending
+ * execution exists, it would perform it - possibly releasing and
+ * reclaiming the execution queue lock.
+ */
+static void __bnx2x_vlan_mac_h_write_unlock(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *o)
+{
+ /* It's possible a new pending execution was added since this writer
+ * executed. If so, execute again. [Ad infinitum]
+ */
+ while (o->head_exe_request) {
+ DP(BNX2X_MSG_SP, "vlan_mac_lock - writer release encountered a pending request\n");
+ __bnx2x_vlan_mac_h_exec_pending(bp, o);
+ }
+}
+
+
+/**
+ * __bnx2x_vlan_mac_h_read_lock - lock the vlan mac head list reader lock
+ *
+ * @bp: device handle
+ * @o: vlan_mac object
+ *
+ * Context: Should be called under the execution queue lock. May sleep. May
+ * release and reclaim execution queue lock during its run.
+ */
+static int __bnx2x_vlan_mac_h_read_lock(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *o)
+{
+ /* If we got here, we're holding lock --> no WRITER exists */
+ o->head_reader++;
+ DP(BNX2X_MSG_SP, "vlan_mac_lock - locked reader - number %d\n",
+ o->head_reader);
+
+ return 0;
+}
+
+/**
+ * bnx2x_vlan_mac_h_read_lock - lock the vlan mac head list reader lock
+ *
+ * @bp: device handle
+ * @o: vlan_mac object
+ *
+ * Context: May sleep. Claims and releases execution queue lock during its run.
+ */
+int bnx2x_vlan_mac_h_read_lock(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *o)
+{
+ int rc;
+
+ spin_lock_bh(&o->exe_queue.lock);
+ rc = __bnx2x_vlan_mac_h_read_lock(bp, o);
+ spin_unlock_bh(&o->exe_queue.lock);
+
+ return rc;
+}
+
+/**
+ * __bnx2x_vlan_mac_h_read_unlock - unlock the vlan mac head list reader lock
+ *
+ * @bp: device handle
+ * @o: vlan_mac object
+ *
+ * Context: Should be called under execution queue lock. Notice if a pending
+ * execution exists, it would be performed if this was the last
+ * reader. possibly releasing and reclaiming the execution queue lock.
+ */
+static void __bnx2x_vlan_mac_h_read_unlock(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *o)
+{
+ if (!o->head_reader) {
+ BNX2X_ERR("Need to release vlan mac reader lock, but lock isn't taken\n");
+#ifdef BNX2X_STOP_ON_ERROR
+ bnx2x_panic();
+#endif
+ } else {
+ o->head_reader--;
+ DP(BNX2X_MSG_SP, "vlan_mac_lock - decreased readers to %d\n",
+ o->head_reader);
+ }
+
+ /* It's possible a new pending execution was added, and that this reader
+ * was last - if so we need to execute the command.
+ */
+ if (!o->head_reader && o->head_exe_request) {
+ DP(BNX2X_MSG_SP, "vlan_mac_lock - reader release encountered a pending request\n");
+
+ /* Writer release will do the trick */
+ __bnx2x_vlan_mac_h_write_unlock(bp, o);
+ }
+}
+
+/**
+ * bnx2x_vlan_mac_h_read_unlock - unlock the vlan mac head list reader lock
+ *
+ * @bp: device handle
+ * @o: vlan_mac object
+ *
+ * Context: Notice if a pending execution exists, it would be performed if this
+ * was the last reader. Claims and releases the execution queue lock
+ * during its run.
+ */
+void bnx2x_vlan_mac_h_read_unlock(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *o)
+{
+ spin_lock_bh(&o->exe_queue.lock);
+ __bnx2x_vlan_mac_h_read_unlock(bp, o);
+ spin_unlock_bh(&o->exe_queue.lock);
+}
+
+static int bnx2x_get_n_elements(struct bnx2x *bp, struct bnx2x_vlan_mac_obj *o,
+ int n, u8 *base, u8 stride, u8 size)
+{
+ struct bnx2x_vlan_mac_registry_elem *pos;
+ u8 *next = base;
+ int counter = 0;
+ int read_lock;
+
+ DP(BNX2X_MSG_SP, "get_n_elements - taking vlan_mac_lock (reader)\n");
+ read_lock = bnx2x_vlan_mac_h_read_lock(bp, o);
+ if (read_lock != 0)
+ BNX2X_ERR("get_n_elements failed to get vlan mac reader lock; Access without lock\n");
+
+ /* traverse list */
+ list_for_each_entry(pos, &o->head, link) {
+ if (counter < n) {
+ memcpy(next, &pos->u, size);
+ counter++;
+ DP(BNX2X_MSG_SP, "copied element number %d to address %p element was:\n",
+ counter, next);
+ next += stride + size;
+ }
+ }
+
+ if (read_lock == 0) {
+ DP(BNX2X_MSG_SP, "get_n_elements - releasing vlan_mac_lock (reader)\n");
+ bnx2x_vlan_mac_h_read_unlock(bp, o);
+ }
+
+ return counter * ETH_ALEN;
+}
+
+/* check_add() callbacks */
+static int bnx2x_check_mac_add(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *o,
+ union bnx2x_classification_ramrod_data *data)
+{
+ struct bnx2x_vlan_mac_registry_elem *pos;
+
+ DP(BNX2X_MSG_SP, "Checking MAC %pM for ADD command\n", data->mac.mac);
+
+ if (!is_valid_ether_addr(data->mac.mac))
+ return -EINVAL;
+
+ /* Check if a requested MAC already exists */
+ list_for_each_entry(pos, &o->head, link)
+ if (ether_addr_equal(data->mac.mac, pos->u.mac.mac) &&
+ (data->mac.is_inner_mac == pos->u.mac.is_inner_mac))
+ return -EEXIST;
+
+ return 0;
+}
+
+static int bnx2x_check_vlan_add(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *o,
+ union bnx2x_classification_ramrod_data *data)
+{
+ struct bnx2x_vlan_mac_registry_elem *pos;
+
+ DP(BNX2X_MSG_SP, "Checking VLAN %d for ADD command\n", data->vlan.vlan);
+
+ list_for_each_entry(pos, &o->head, link)
+ if (data->vlan.vlan == pos->u.vlan.vlan)
+ return -EEXIST;
+
+ return 0;
+}
+
+static int bnx2x_check_vlan_mac_add(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *o,
+ union bnx2x_classification_ramrod_data *data)
+{
+ struct bnx2x_vlan_mac_registry_elem *pos;
+
+ DP(BNX2X_MSG_SP, "Checking VLAN_MAC (%pM, %d) for ADD command\n",
+ data->vlan_mac.mac, data->vlan_mac.vlan);
+
+ list_for_each_entry(pos, &o->head, link)
+ if ((data->vlan_mac.vlan == pos->u.vlan_mac.vlan) &&
+ (!memcmp(data->vlan_mac.mac, pos->u.vlan_mac.mac,
+ ETH_ALEN)) &&
+ (data->vlan_mac.is_inner_mac ==
+ pos->u.vlan_mac.is_inner_mac))
+ return -EEXIST;
+
+ return 0;
+}
+
+/* check_del() callbacks */
+static struct bnx2x_vlan_mac_registry_elem *
+ bnx2x_check_mac_del(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *o,
+ union bnx2x_classification_ramrod_data *data)
+{
+ struct bnx2x_vlan_mac_registry_elem *pos;
+
+ DP(BNX2X_MSG_SP, "Checking MAC %pM for DEL command\n", data->mac.mac);
+
+ list_for_each_entry(pos, &o->head, link)
+ if (ether_addr_equal(data->mac.mac, pos->u.mac.mac) &&
+ (data->mac.is_inner_mac == pos->u.mac.is_inner_mac))
+ return pos;
+
+ return NULL;
+}
+
+static struct bnx2x_vlan_mac_registry_elem *
+ bnx2x_check_vlan_del(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *o,
+ union bnx2x_classification_ramrod_data *data)
+{
+ struct bnx2x_vlan_mac_registry_elem *pos;
+
+ DP(BNX2X_MSG_SP, "Checking VLAN %d for DEL command\n", data->vlan.vlan);
+
+ list_for_each_entry(pos, &o->head, link)
+ if (data->vlan.vlan == pos->u.vlan.vlan)
+ return pos;
+
+ return NULL;
+}
+
+static struct bnx2x_vlan_mac_registry_elem *
+ bnx2x_check_vlan_mac_del(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *o,
+ union bnx2x_classification_ramrod_data *data)
+{
+ struct bnx2x_vlan_mac_registry_elem *pos;
+
+ DP(BNX2X_MSG_SP, "Checking VLAN_MAC (%pM, %d) for DEL command\n",
+ data->vlan_mac.mac, data->vlan_mac.vlan);
+
+ list_for_each_entry(pos, &o->head, link)
+ if ((data->vlan_mac.vlan == pos->u.vlan_mac.vlan) &&
+ (!memcmp(data->vlan_mac.mac, pos->u.vlan_mac.mac,
+ ETH_ALEN)) &&
+ (data->vlan_mac.is_inner_mac ==
+ pos->u.vlan_mac.is_inner_mac))
+ return pos;
+
+ return NULL;
+}
+
+/* check_move() callback */
+static bool bnx2x_check_move(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *src_o,
+ struct bnx2x_vlan_mac_obj *dst_o,
+ union bnx2x_classification_ramrod_data *data)
+{
+ struct bnx2x_vlan_mac_registry_elem *pos;
+ int rc;
+
+ /* Check if we can delete the requested configuration from the first
+ * object.
+ */
+ pos = src_o->check_del(bp, src_o, data);
+
+ /* check if configuration can be added */
+ rc = dst_o->check_add(bp, dst_o, data);
+
+ /* If this classification can not be added (is already set)
+ * or can't be deleted - return an error.
+ */
+ if (rc || !pos)
+ return false;
+
+ return true;
+}
+
+static bool bnx2x_check_move_always_err(
+ struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *src_o,
+ struct bnx2x_vlan_mac_obj *dst_o,
+ union bnx2x_classification_ramrod_data *data)
+{
+ return false;
+}
+
+static inline u8 bnx2x_vlan_mac_get_rx_tx_flag(struct bnx2x_vlan_mac_obj *o)
+{
+ struct bnx2x_raw_obj *raw = &o->raw;
+ u8 rx_tx_flag = 0;
+
+ if ((raw->obj_type == BNX2X_OBJ_TYPE_TX) ||
+ (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX))
+ rx_tx_flag |= ETH_CLASSIFY_CMD_HEADER_TX_CMD;
+
+ if ((raw->obj_type == BNX2X_OBJ_TYPE_RX) ||
+ (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX))
+ rx_tx_flag |= ETH_CLASSIFY_CMD_HEADER_RX_CMD;
+
+ return rx_tx_flag;
+}
+
+static void bnx2x_set_mac_in_nig(struct bnx2x *bp,
+ bool add, unsigned char *dev_addr, int index)
+{
+ u32 wb_data[2];
+ u32 reg_offset = BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM :
+ NIG_REG_LLH0_FUNC_MEM;
+
+ if (!IS_MF_SI(bp) && !IS_MF_AFEX(bp))
+ return;
+
+ if (index > BNX2X_LLH_CAM_MAX_PF_LINE)
+ return;
+
+ DP(BNX2X_MSG_SP, "Going to %s LLH configuration at entry %d\n",
+ (add ? "ADD" : "DELETE"), index);
+
+ if (add) {
+ /* LLH_FUNC_MEM is a u64 WB register */
+ reg_offset += 8*index;
+
+ wb_data[0] = ((dev_addr[2] << 24) | (dev_addr[3] << 16) |
+ (dev_addr[4] << 8) | dev_addr[5]);
+ wb_data[1] = ((dev_addr[0] << 8) | dev_addr[1]);
+
+ REG_WR_DMAE(bp, reg_offset, wb_data, 2);
+ }
+
+ REG_WR(bp, (BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM_ENABLE :
+ NIG_REG_LLH0_FUNC_MEM_ENABLE) + 4*index, add);
+}
+
+/**
+ * bnx2x_vlan_mac_set_cmd_hdr_e2 - set a header in a single classify ramrod
+ *
+ * @bp: device handle
+ * @o: queue for which we want to configure this rule
+ * @add: if true the command is an ADD command, DEL otherwise
+ * @opcode: CLASSIFY_RULE_OPCODE_XXX
+ * @hdr: pointer to a header to setup
+ *
+ */
+static inline void bnx2x_vlan_mac_set_cmd_hdr_e2(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *o, bool add, int opcode,
+ struct eth_classify_cmd_header *hdr)
+{
+ struct bnx2x_raw_obj *raw = &o->raw;
+
+ hdr->client_id = raw->cl_id;
+ hdr->func_id = raw->func_id;
+
+ /* Rx or/and Tx (internal switching) configuration ? */
+ hdr->cmd_general_data |=
+ bnx2x_vlan_mac_get_rx_tx_flag(o);
+
+ if (add)
+ hdr->cmd_general_data |= ETH_CLASSIFY_CMD_HEADER_IS_ADD;
+
+ hdr->cmd_general_data |=
+ (opcode << ETH_CLASSIFY_CMD_HEADER_OPCODE_SHIFT);
+}
+
+/**
+ * bnx2x_vlan_mac_set_rdata_hdr_e2 - set the classify ramrod data header
+ *
+ * @cid: connection id
+ * @type: BNX2X_FILTER_XXX_PENDING
+ * @hdr: pointer to header to setup
+ * @rule_cnt:
+ *
+ * currently we always configure one rule and echo field to contain a CID and an
+ * opcode type.
+ */
+static inline void bnx2x_vlan_mac_set_rdata_hdr_e2(u32 cid, int type,
+ struct eth_classify_header *hdr, int rule_cnt)
+{
+ hdr->echo = cpu_to_le32((cid & BNX2X_SWCID_MASK) |
+ (type << BNX2X_SWCID_SHIFT));
+ hdr->rule_cnt = (u8)rule_cnt;
+}
+
+/* hw_config() callbacks */
+static void bnx2x_set_one_mac_e2(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *o,
+ struct bnx2x_exeq_elem *elem, int rule_idx,
+ int cam_offset)
+{
+ struct bnx2x_raw_obj *raw = &o->raw;
+ struct eth_classify_rules_ramrod_data *data =
+ (struct eth_classify_rules_ramrod_data *)(raw->rdata);
+ int rule_cnt = rule_idx + 1, cmd = elem->cmd_data.vlan_mac.cmd;
+ union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx];
+ bool add = cmd == BNX2X_VLAN_MAC_ADD;
+ unsigned long *vlan_mac_flags = &elem->cmd_data.vlan_mac.vlan_mac_flags;
+ u8 *mac = elem->cmd_data.vlan_mac.u.mac.mac;
+
+ /* Set LLH CAM entry: currently only iSCSI and ETH macs are
+ * relevant. In addition, current implementation is tuned for a
+ * single ETH MAC.
+ *
+ * When multiple unicast ETH MACs PF configuration in switch
+ * independent mode is required (NetQ, multiple netdev MACs,
+ * etc.), consider better utilisation of 8 per function MAC
+ * entries in the LLH register. There is also
+ * NIG_REG_P[01]_LLH_FUNC_MEM2 registers that complete the
+ * total number of CAM entries to 16.
+ *
+ * Currently we won't configure NIG for MACs other than a primary ETH
+ * MAC and iSCSI L2 MAC.
+ *
+ * If this MAC is moving from one Queue to another, no need to change
+ * NIG configuration.
+ */
+ if (cmd != BNX2X_VLAN_MAC_MOVE) {
+ if (test_bit(BNX2X_ISCSI_ETH_MAC, vlan_mac_flags))
+ bnx2x_set_mac_in_nig(bp, add, mac,
+ BNX2X_LLH_CAM_ISCSI_ETH_LINE);
+ else if (test_bit(BNX2X_ETH_MAC, vlan_mac_flags))
+ bnx2x_set_mac_in_nig(bp, add, mac,
+ BNX2X_LLH_CAM_ETH_LINE);
+ }
+
+ /* Reset the ramrod data buffer for the first rule */
+ if (rule_idx == 0)
+ memset(data, 0, sizeof(*data));
+
+ /* Setup a command header */
+ bnx2x_vlan_mac_set_cmd_hdr_e2(bp, o, add, CLASSIFY_RULE_OPCODE_MAC,
+ &rule_entry->mac.header);
+
+ DP(BNX2X_MSG_SP, "About to %s MAC %pM for Queue %d\n",
+ (add ? "add" : "delete"), mac, raw->cl_id);
+
+ /* Set a MAC itself */
+ bnx2x_set_fw_mac_addr(&rule_entry->mac.mac_msb,
+ &rule_entry->mac.mac_mid,
+ &rule_entry->mac.mac_lsb, mac);
+ rule_entry->mac.inner_mac =
+ cpu_to_le16(elem->cmd_data.vlan_mac.u.mac.is_inner_mac);
+
+ /* MOVE: Add a rule that will add this MAC to the target Queue */
+ if (cmd == BNX2X_VLAN_MAC_MOVE) {
+ rule_entry++;
+ rule_cnt++;
+
+ /* Setup ramrod data */
+ bnx2x_vlan_mac_set_cmd_hdr_e2(bp,
+ elem->cmd_data.vlan_mac.target_obj,
+ true, CLASSIFY_RULE_OPCODE_MAC,
+ &rule_entry->mac.header);
+
+ /* Set a MAC itself */
+ bnx2x_set_fw_mac_addr(&rule_entry->mac.mac_msb,
+ &rule_entry->mac.mac_mid,
+ &rule_entry->mac.mac_lsb, mac);
+ rule_entry->mac.inner_mac =
+ cpu_to_le16(elem->cmd_data.vlan_mac.
+ u.mac.is_inner_mac);
+ }
+
+ /* Set the ramrod data header */
+ /* TODO: take this to the higher level in order to prevent multiple
+ writing */
+ bnx2x_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header,
+ rule_cnt);
+}
+
+/**
+ * bnx2x_vlan_mac_set_rdata_hdr_e1x - set a header in a single classify ramrod
+ *
+ * @bp: device handle
+ * @o: queue
+ * @type: the type of echo
+ * @cam_offset: offset in cam memory
+ * @hdr: pointer to a header to setup
+ *
+ * E1/E1H
+ */
+static inline void bnx2x_vlan_mac_set_rdata_hdr_e1x(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *o, int type, int cam_offset,
+ struct mac_configuration_hdr *hdr)
+{
+ struct bnx2x_raw_obj *r = &o->raw;
+
+ hdr->length = 1;
+ hdr->offset = (u8)cam_offset;
+ hdr->client_id = cpu_to_le16(0xff);
+ hdr->echo = cpu_to_le32((r->cid & BNX2X_SWCID_MASK) |
+ (type << BNX2X_SWCID_SHIFT));
+}
+
+static inline void bnx2x_vlan_mac_set_cfg_entry_e1x(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *o, bool add, int opcode, u8 *mac,
+ u16 vlan_id, struct mac_configuration_entry *cfg_entry)
+{
+ struct bnx2x_raw_obj *r = &o->raw;
+ u32 cl_bit_vec = (1 << r->cl_id);
+
+ cfg_entry->clients_bit_vector = cpu_to_le32(cl_bit_vec);
+ cfg_entry->pf_id = r->func_id;
+ cfg_entry->vlan_id = cpu_to_le16(vlan_id);
+
+ if (add) {
+ SET_FLAG(cfg_entry->flags, MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
+ T_ETH_MAC_COMMAND_SET);
+ SET_FLAG(cfg_entry->flags,
+ MAC_CONFIGURATION_ENTRY_VLAN_FILTERING_MODE, opcode);
+
+ /* Set a MAC in a ramrod data */
+ bnx2x_set_fw_mac_addr(&cfg_entry->msb_mac_addr,
+ &cfg_entry->middle_mac_addr,
+ &cfg_entry->lsb_mac_addr, mac);
+ } else
+ SET_FLAG(cfg_entry->flags, MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
+ T_ETH_MAC_COMMAND_INVALIDATE);
+}
+
+static inline void bnx2x_vlan_mac_set_rdata_e1x(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *o, int type, int cam_offset, bool add,
+ u8 *mac, u16 vlan_id, int opcode, struct mac_configuration_cmd *config)
+{
+ struct mac_configuration_entry *cfg_entry = &config->config_table[0];
+ struct bnx2x_raw_obj *raw = &o->raw;
+
+ bnx2x_vlan_mac_set_rdata_hdr_e1x(bp, o, type, cam_offset,
+ &config->hdr);
+ bnx2x_vlan_mac_set_cfg_entry_e1x(bp, o, add, opcode, mac, vlan_id,
+ cfg_entry);
+
+ DP(BNX2X_MSG_SP, "%s MAC %pM CLID %d CAM offset %d\n",
+ (add ? "setting" : "clearing"),
+ mac, raw->cl_id, cam_offset);
+}
+
+/**
+ * bnx2x_set_one_mac_e1x - fill a single MAC rule ramrod data
+ *
+ * @bp: device handle
+ * @o: bnx2x_vlan_mac_obj
+ * @elem: bnx2x_exeq_elem
+ * @rule_idx: rule_idx
+ * @cam_offset: cam_offset
+ */
+static void bnx2x_set_one_mac_e1x(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *o,
+ struct bnx2x_exeq_elem *elem, int rule_idx,
+ int cam_offset)
+{
+ struct bnx2x_raw_obj *raw = &o->raw;
+ struct mac_configuration_cmd *config =
+ (struct mac_configuration_cmd *)(raw->rdata);
+ /* 57710 and 57711 do not support MOVE command,
+ * so it's either ADD or DEL
+ */
+ bool add = (elem->cmd_data.vlan_mac.cmd == BNX2X_VLAN_MAC_ADD) ?
+ true : false;
+
+ /* Reset the ramrod data buffer */
+ memset(config, 0, sizeof(*config));
+
+ bnx2x_vlan_mac_set_rdata_e1x(bp, o, raw->state,
+ cam_offset, add,
+ elem->cmd_data.vlan_mac.u.mac.mac, 0,
+ ETH_VLAN_FILTER_ANY_VLAN, config);
+}
+
+static void bnx2x_set_one_vlan_e2(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *o,
+ struct bnx2x_exeq_elem *elem, int rule_idx,
+ int cam_offset)
+{
+ struct bnx2x_raw_obj *raw = &o->raw;
+ struct eth_classify_rules_ramrod_data *data =
+ (struct eth_classify_rules_ramrod_data *)(raw->rdata);
+ int rule_cnt = rule_idx + 1;
+ union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx];
+ enum bnx2x_vlan_mac_cmd cmd = elem->cmd_data.vlan_mac.cmd;
+ bool add = cmd == BNX2X_VLAN_MAC_ADD;
+ u16 vlan = elem->cmd_data.vlan_mac.u.vlan.vlan;
+
+ /* Reset the ramrod data buffer for the first rule */
+ if (rule_idx == 0)
+ memset(data, 0, sizeof(*data));
+
+ /* Set a rule header */
+ bnx2x_vlan_mac_set_cmd_hdr_e2(bp, o, add, CLASSIFY_RULE_OPCODE_VLAN,
+ &rule_entry->vlan.header);
+
+ DP(BNX2X_MSG_SP, "About to %s VLAN %d\n", (add ? "add" : "delete"),
+ vlan);
+
+ /* Set a VLAN itself */
+ rule_entry->vlan.vlan = cpu_to_le16(vlan);
+
+ /* MOVE: Add a rule that will add this MAC to the target Queue */
+ if (cmd == BNX2X_VLAN_MAC_MOVE) {
+ rule_entry++;
+ rule_cnt++;
+
+ /* Setup ramrod data */
+ bnx2x_vlan_mac_set_cmd_hdr_e2(bp,
+ elem->cmd_data.vlan_mac.target_obj,
+ true, CLASSIFY_RULE_OPCODE_VLAN,
+ &rule_entry->vlan.header);
+
+ /* Set a VLAN itself */
+ rule_entry->vlan.vlan = cpu_to_le16(vlan);
+ }
+
+ /* Set the ramrod data header */
+ /* TODO: take this to the higher level in order to prevent multiple
+ writing */
+ bnx2x_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header,
+ rule_cnt);
+}
+
+static void bnx2x_set_one_vlan_mac_e2(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *o,
+ struct bnx2x_exeq_elem *elem,
+ int rule_idx, int cam_offset)
+{
+ struct bnx2x_raw_obj *raw = &o->raw;
+ struct eth_classify_rules_ramrod_data *data =
+ (struct eth_classify_rules_ramrod_data *)(raw->rdata);
+ int rule_cnt = rule_idx + 1;
+ union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx];
+ enum bnx2x_vlan_mac_cmd cmd = elem->cmd_data.vlan_mac.cmd;
+ bool add = cmd == BNX2X_VLAN_MAC_ADD;
+ u16 vlan = elem->cmd_data.vlan_mac.u.vlan_mac.vlan;
+ u8 *mac = elem->cmd_data.vlan_mac.u.vlan_mac.mac;
+ u16 inner_mac;
+
+ /* Reset the ramrod data buffer for the first rule */
+ if (rule_idx == 0)
+ memset(data, 0, sizeof(*data));
+
+ /* Set a rule header */
+ bnx2x_vlan_mac_set_cmd_hdr_e2(bp, o, add, CLASSIFY_RULE_OPCODE_PAIR,
+ &rule_entry->pair.header);
+
+ /* Set VLAN and MAC themselves */
+ rule_entry->pair.vlan = cpu_to_le16(vlan);
+ bnx2x_set_fw_mac_addr(&rule_entry->pair.mac_msb,
+ &rule_entry->pair.mac_mid,
+ &rule_entry->pair.mac_lsb, mac);
+ inner_mac = elem->cmd_data.vlan_mac.u.vlan_mac.is_inner_mac;
+ rule_entry->pair.inner_mac = cpu_to_le16(inner_mac);
+ /* MOVE: Add a rule that will add this MAC/VLAN to the target Queue */
+ if (cmd == BNX2X_VLAN_MAC_MOVE) {
+ struct bnx2x_vlan_mac_obj *target_obj;
+
+ rule_entry++;
+ rule_cnt++;
+
+ /* Setup ramrod data */
+ target_obj = elem->cmd_data.vlan_mac.target_obj;
+ bnx2x_vlan_mac_set_cmd_hdr_e2(bp, target_obj,
+ true, CLASSIFY_RULE_OPCODE_PAIR,
+ &rule_entry->pair.header);
+
+ /* Set a VLAN itself */
+ rule_entry->pair.vlan = cpu_to_le16(vlan);
+ bnx2x_set_fw_mac_addr(&rule_entry->pair.mac_msb,
+ &rule_entry->pair.mac_mid,
+ &rule_entry->pair.mac_lsb, mac);
+ rule_entry->pair.inner_mac = cpu_to_le16(inner_mac);
+ }
+
+ /* Set the ramrod data header */
+ bnx2x_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header,
+ rule_cnt);
+}
+
+/**
+ * bnx2x_set_one_vlan_mac_e1h -
+ *
+ * @bp: device handle
+ * @o: bnx2x_vlan_mac_obj
+ * @elem: bnx2x_exeq_elem
+ * @rule_idx: rule_idx
+ * @cam_offset: cam_offset
+ */
+static void bnx2x_set_one_vlan_mac_e1h(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *o,
+ struct bnx2x_exeq_elem *elem,
+ int rule_idx, int cam_offset)
+{
+ struct bnx2x_raw_obj *raw = &o->raw;
+ struct mac_configuration_cmd *config =
+ (struct mac_configuration_cmd *)(raw->rdata);
+ /* 57710 and 57711 do not support MOVE command,
+ * so it's either ADD or DEL
+ */
+ bool add = (elem->cmd_data.vlan_mac.cmd == BNX2X_VLAN_MAC_ADD) ?
+ true : false;
+
+ /* Reset the ramrod data buffer */
+ memset(config, 0, sizeof(*config));
+
+ bnx2x_vlan_mac_set_rdata_e1x(bp, o, BNX2X_FILTER_VLAN_MAC_PENDING,
+ cam_offset, add,
+ elem->cmd_data.vlan_mac.u.vlan_mac.mac,
+ elem->cmd_data.vlan_mac.u.vlan_mac.vlan,
+ ETH_VLAN_FILTER_CLASSIFY, config);
+}
+
+/**
+ * bnx2x_vlan_mac_restore - reconfigure next MAC/VLAN/VLAN-MAC element
+ *
+ * @bp: device handle
+ * @p: command parameters
+ * @ppos: pointer to the cookie
+ *
+ * reconfigure next MAC/VLAN/VLAN-MAC element from the
+ * previously configured elements list.
+ *
+ * from command parameters only RAMROD_COMP_WAIT bit in ramrod_flags is taken
+ * into an account
+ *
+ * pointer to the cookie - that should be given back in the next call to make
+ * function handle the next element. If *ppos is set to NULL it will restart the
+ * iterator. If returned *ppos == NULL this means that the last element has been
+ * handled.
+ *
+ */
+static int bnx2x_vlan_mac_restore(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_ramrod_params *p,
+ struct bnx2x_vlan_mac_registry_elem **ppos)
+{
+ struct bnx2x_vlan_mac_registry_elem *pos;
+ struct bnx2x_vlan_mac_obj *o = p->vlan_mac_obj;
+
+ /* If list is empty - there is nothing to do here */
+ if (list_empty(&o->head)) {
+ *ppos = NULL;
+ return 0;
+ }
+
+ /* make a step... */
+ if (*ppos == NULL)
+ *ppos = list_first_entry(&o->head,
+ struct bnx2x_vlan_mac_registry_elem,
+ link);
+ else
+ *ppos = list_next_entry(*ppos, link);
+
+ pos = *ppos;
+
+ /* If it's the last step - return NULL */
+ if (list_is_last(&pos->link, &o->head))
+ *ppos = NULL;
+
+ /* Prepare a 'user_req' */
+ memcpy(&p->user_req.u, &pos->u, sizeof(pos->u));
+
+ /* Set the command */
+ p->user_req.cmd = BNX2X_VLAN_MAC_ADD;
+
+ /* Set vlan_mac_flags */
+ p->user_req.vlan_mac_flags = pos->vlan_mac_flags;
+
+ /* Set a restore bit */
+ __set_bit(RAMROD_RESTORE, &p->ramrod_flags);
+
+ return bnx2x_config_vlan_mac(bp, p);
+}
+
+/* bnx2x_exeq_get_mac/bnx2x_exeq_get_vlan/bnx2x_exeq_get_vlan_mac return a
+ * pointer to an element with a specific criteria and NULL if such an element
+ * hasn't been found.
+ */
+static struct bnx2x_exeq_elem *bnx2x_exeq_get_mac(
+ struct bnx2x_exe_queue_obj *o,
+ struct bnx2x_exeq_elem *elem)
+{
+ struct bnx2x_exeq_elem *pos;
+ struct bnx2x_mac_ramrod_data *data = &elem->cmd_data.vlan_mac.u.mac;
+
+ /* Check pending for execution commands */
+ list_for_each_entry(pos, &o->exe_queue, link)
+ if (!memcmp(&pos->cmd_data.vlan_mac.u.mac, data,
+ sizeof(*data)) &&
+ (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd))
+ return pos;
+
+ return NULL;
+}
+
+static struct bnx2x_exeq_elem *bnx2x_exeq_get_vlan(
+ struct bnx2x_exe_queue_obj *o,
+ struct bnx2x_exeq_elem *elem)
+{
+ struct bnx2x_exeq_elem *pos;
+ struct bnx2x_vlan_ramrod_data *data = &elem->cmd_data.vlan_mac.u.vlan;
+
+ /* Check pending for execution commands */
+ list_for_each_entry(pos, &o->exe_queue, link)
+ if (!memcmp(&pos->cmd_data.vlan_mac.u.vlan, data,
+ sizeof(*data)) &&
+ (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd))
+ return pos;
+
+ return NULL;
+}
+
+static struct bnx2x_exeq_elem *bnx2x_exeq_get_vlan_mac(
+ struct bnx2x_exe_queue_obj *o,
+ struct bnx2x_exeq_elem *elem)
+{
+ struct bnx2x_exeq_elem *pos;
+ struct bnx2x_vlan_mac_ramrod_data *data =
+ &elem->cmd_data.vlan_mac.u.vlan_mac;
+
+ /* Check pending for execution commands */
+ list_for_each_entry(pos, &o->exe_queue, link)
+ if (!memcmp(&pos->cmd_data.vlan_mac.u.vlan_mac, data,
+ sizeof(*data)) &&
+ (pos->cmd_data.vlan_mac.cmd ==
+ elem->cmd_data.vlan_mac.cmd))
+ return pos;
+
+ return NULL;
+}
+
+/**
+ * bnx2x_validate_vlan_mac_add - check if an ADD command can be executed
+ *
+ * @bp: device handle
+ * @qo: bnx2x_qable_obj
+ * @elem: bnx2x_exeq_elem
+ *
+ * Checks that the requested configuration can be added. If yes and if
+ * requested, consume CAM credit.
+ *
+ * The 'validate' is run after the 'optimize'.
+ *
+ */
+static inline int bnx2x_validate_vlan_mac_add(struct bnx2x *bp,
+ union bnx2x_qable_obj *qo,
+ struct bnx2x_exeq_elem *elem)
+{
+ struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac;
+ struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
+ int rc;
+
+ /* Check the registry */
+ rc = o->check_add(bp, o, &elem->cmd_data.vlan_mac.u);
+ if (rc) {
+ DP(BNX2X_MSG_SP, "ADD command is not allowed considering current registry state.\n");
+ return rc;
+ }
+
+ /* Check if there is a pending ADD command for this
+ * MAC/VLAN/VLAN-MAC. Return an error if there is.
+ */
+ if (exeq->get(exeq, elem)) {
+ DP(BNX2X_MSG_SP, "There is a pending ADD command already\n");
+ return -EEXIST;
+ }
+
+ /* TODO: Check the pending MOVE from other objects where this
+ * object is a destination object.
+ */
+
+ /* Consume the credit if not requested not to */
+ if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
+ &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
+ o->get_credit(o)))
+ return -EINVAL;
+
+ return 0;
+}
+
+/**
+ * bnx2x_validate_vlan_mac_del - check if the DEL command can be executed
+ *
+ * @bp: device handle
+ * @qo: quable object to check
+ * @elem: element that needs to be deleted
+ *
+ * Checks that the requested configuration can be deleted. If yes and if
+ * requested, returns a CAM credit.
+ *
+ * The 'validate' is run after the 'optimize'.
+ */
+static inline int bnx2x_validate_vlan_mac_del(struct bnx2x *bp,
+ union bnx2x_qable_obj *qo,
+ struct bnx2x_exeq_elem *elem)
+{
+ struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac;
+ struct bnx2x_vlan_mac_registry_elem *pos;
+ struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
+ struct bnx2x_exeq_elem query_elem;
+
+ /* If this classification can not be deleted (doesn't exist)
+ * - return a BNX2X_EXIST.
+ */
+ pos = o->check_del(bp, o, &elem->cmd_data.vlan_mac.u);
+ if (!pos) {
+ DP(BNX2X_MSG_SP, "DEL command is not allowed considering current registry state\n");
+ return -EEXIST;
+ }
+
+ /* Check if there are pending DEL or MOVE commands for this
+ * MAC/VLAN/VLAN-MAC. Return an error if so.
+ */
+ memcpy(&query_elem, elem, sizeof(query_elem));
+
+ /* Check for MOVE commands */
+ query_elem.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_MOVE;
+ if (exeq->get(exeq, &query_elem)) {
+ BNX2X_ERR("There is a pending MOVE command already\n");
+ return -EINVAL;
+ }
+
+ /* Check for DEL commands */
+ if (exeq->get(exeq, elem)) {
+ DP(BNX2X_MSG_SP, "There is a pending DEL command already\n");
+ return -EEXIST;
+ }
+
+ /* Return the credit to the credit pool if not requested not to */
+ if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
+ &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
+ o->put_credit(o))) {
+ BNX2X_ERR("Failed to return a credit\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * bnx2x_validate_vlan_mac_move - check if the MOVE command can be executed
+ *
+ * @bp: device handle
+ * @qo: quable object to check (source)
+ * @elem: element that needs to be moved
+ *
+ * Checks that the requested configuration can be moved. If yes and if
+ * requested, returns a CAM credit.
+ *
+ * The 'validate' is run after the 'optimize'.
+ */
+static inline int bnx2x_validate_vlan_mac_move(struct bnx2x *bp,
+ union bnx2x_qable_obj *qo,
+ struct bnx2x_exeq_elem *elem)
+{
+ struct bnx2x_vlan_mac_obj *src_o = &qo->vlan_mac;
+ struct bnx2x_vlan_mac_obj *dest_o = elem->cmd_data.vlan_mac.target_obj;
+ struct bnx2x_exeq_elem query_elem;
+ struct bnx2x_exe_queue_obj *src_exeq = &src_o->exe_queue;
+ struct bnx2x_exe_queue_obj *dest_exeq = &dest_o->exe_queue;
+
+ /* Check if we can perform this operation based on the current registry
+ * state.
+ */
+ if (!src_o->check_move(bp, src_o, dest_o,
+ &elem->cmd_data.vlan_mac.u)) {
+ DP(BNX2X_MSG_SP, "MOVE command is not allowed considering current registry state\n");
+ return -EINVAL;
+ }
+
+ /* Check if there is an already pending DEL or MOVE command for the
+ * source object or ADD command for a destination object. Return an
+ * error if so.
+ */
+ memcpy(&query_elem, elem, sizeof(query_elem));
+
+ /* Check DEL on source */
+ query_elem.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_DEL;
+ if (src_exeq->get(src_exeq, &query_elem)) {
+ BNX2X_ERR("There is a pending DEL command on the source queue already\n");
+ return -EINVAL;
+ }
+
+ /* Check MOVE on source */
+ if (src_exeq->get(src_exeq, elem)) {
+ DP(BNX2X_MSG_SP, "There is a pending MOVE command already\n");
+ return -EEXIST;
+ }
+
+ /* Check ADD on destination */
+ query_elem.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_ADD;
+ if (dest_exeq->get(dest_exeq, &query_elem)) {
+ BNX2X_ERR("There is a pending ADD command on the destination queue already\n");
+ return -EINVAL;
+ }
+
+ /* Consume the credit if not requested not to */
+ if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT_DEST,
+ &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
+ dest_o->get_credit(dest_o)))
+ return -EINVAL;
+
+ if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
+ &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
+ src_o->put_credit(src_o))) {
+ /* return the credit taken from dest... */
+ dest_o->put_credit(dest_o);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int bnx2x_validate_vlan_mac(struct bnx2x *bp,
+ union bnx2x_qable_obj *qo,
+ struct bnx2x_exeq_elem *elem)
+{
+ switch (elem->cmd_data.vlan_mac.cmd) {
+ case BNX2X_VLAN_MAC_ADD:
+ return bnx2x_validate_vlan_mac_add(bp, qo, elem);
+ case BNX2X_VLAN_MAC_DEL:
+ return bnx2x_validate_vlan_mac_del(bp, qo, elem);
+ case BNX2X_VLAN_MAC_MOVE:
+ return bnx2x_validate_vlan_mac_move(bp, qo, elem);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int bnx2x_remove_vlan_mac(struct bnx2x *bp,
+ union bnx2x_qable_obj *qo,
+ struct bnx2x_exeq_elem *elem)
+{
+ int rc = 0;
+
+ /* If consumption wasn't required, nothing to do */
+ if (test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
+ &elem->cmd_data.vlan_mac.vlan_mac_flags))
+ return 0;
+
+ switch (elem->cmd_data.vlan_mac.cmd) {
+ case BNX2X_VLAN_MAC_ADD:
+ case BNX2X_VLAN_MAC_MOVE:
+ rc = qo->vlan_mac.put_credit(&qo->vlan_mac);
+ break;
+ case BNX2X_VLAN_MAC_DEL:
+ rc = qo->vlan_mac.get_credit(&qo->vlan_mac);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (rc != true)
+ return -EINVAL;
+
+ return 0;
+}
+
+/**
+ * bnx2x_wait_vlan_mac - passively wait for 5 seconds until all work completes.
+ *
+ * @bp: device handle
+ * @o: bnx2x_vlan_mac_obj
+ *
+ */
+static int bnx2x_wait_vlan_mac(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *o)
+{
+ int cnt = 5000, rc;
+ struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
+ struct bnx2x_raw_obj *raw = &o->raw;
+
+ while (cnt--) {
+ /* Wait for the current command to complete */
+ rc = raw->wait_comp(bp, raw);
+ if (rc)
+ return rc;
+
+ /* Wait until there are no pending commands */
+ if (!bnx2x_exe_queue_empty(exeq))
+ usleep_range(1000, 2000);
+ else
+ return 0;
+ }
+
+ return -EBUSY;
+}
+
+static int __bnx2x_vlan_mac_execute_step(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *o,
+ unsigned long *ramrod_flags)
+{
+ int rc = 0;
+
+ spin_lock_bh(&o->exe_queue.lock);
+
+ DP(BNX2X_MSG_SP, "vlan_mac_execute_step - trying to take writer lock\n");
+ rc = __bnx2x_vlan_mac_h_write_trylock(bp, o);
+
+ if (rc != 0) {
+ __bnx2x_vlan_mac_h_pend(bp, o, *ramrod_flags);
+
+ /* Calling function should not differentiate between this case
+ * and the case in which there is already a pending ramrod
+ */
+ rc = 1;
+ } else {
+ rc = bnx2x_exe_queue_step(bp, &o->exe_queue, ramrod_flags);
+ }
+ spin_unlock_bh(&o->exe_queue.lock);
+
+ return rc;
+}
+
+/**
+ * bnx2x_complete_vlan_mac - complete one VLAN-MAC ramrod
+ *
+ * @bp: device handle
+ * @o: bnx2x_vlan_mac_obj
+ * @cqe: completion element
+ * @ramrod_flags: if set schedule next execution chunk
+ *
+ */
+static int bnx2x_complete_vlan_mac(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *o,
+ union event_ring_elem *cqe,
+ unsigned long *ramrod_flags)
+{
+ struct bnx2x_raw_obj *r = &o->raw;
+ int rc;
+
+ /* Clearing the pending list & raw state should be made
+ * atomically (as execution flow assumes they represent the same).
+ */
+ spin_lock_bh(&o->exe_queue.lock);
+
+ /* Reset pending list */
+ __bnx2x_exe_queue_reset_pending(bp, &o->exe_queue);
+
+ /* Clear pending */
+ r->clear_pending(r);
+
+ spin_unlock_bh(&o->exe_queue.lock);
+
+ /* If ramrod failed this is most likely a SW bug */
+ if (cqe->message.error)
+ return -EINVAL;
+
+ /* Run the next bulk of pending commands if requested */
+ if (test_bit(RAMROD_CONT, ramrod_flags)) {
+ rc = __bnx2x_vlan_mac_execute_step(bp, o, ramrod_flags);
+
+ if (rc < 0)
+ return rc;
+ }
+
+ /* If there is more work to do return PENDING */
+ if (!bnx2x_exe_queue_empty(&o->exe_queue))
+ return 1;
+
+ return 0;
+}
+
+/**
+ * bnx2x_optimize_vlan_mac - optimize ADD and DEL commands.
+ *
+ * @bp: device handle
+ * @qo: bnx2x_qable_obj
+ * @elem: bnx2x_exeq_elem
+ */
+static int bnx2x_optimize_vlan_mac(struct bnx2x *bp,
+ union bnx2x_qable_obj *qo,
+ struct bnx2x_exeq_elem *elem)
+{
+ struct bnx2x_exeq_elem query, *pos;
+ struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac;
+ struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
+
+ memcpy(&query, elem, sizeof(query));
+
+ switch (elem->cmd_data.vlan_mac.cmd) {
+ case BNX2X_VLAN_MAC_ADD:
+ query.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_DEL;
+ break;
+ case BNX2X_VLAN_MAC_DEL:
+ query.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_ADD;
+ break;
+ default:
+ /* Don't handle anything other than ADD or DEL */
+ return 0;
+ }
+
+ /* If we found the appropriate element - delete it */
+ pos = exeq->get(exeq, &query);
+ if (pos) {
+
+ /* Return the credit of the optimized command */
+ if (!test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
+ &pos->cmd_data.vlan_mac.vlan_mac_flags)) {
+ if ((query.cmd_data.vlan_mac.cmd ==
+ BNX2X_VLAN_MAC_ADD) && !o->put_credit(o)) {
+ BNX2X_ERR("Failed to return the credit for the optimized ADD command\n");
+ return -EINVAL;
+ } else if (!o->get_credit(o)) { /* VLAN_MAC_DEL */
+ BNX2X_ERR("Failed to recover the credit from the optimized DEL command\n");
+ return -EINVAL;
+ }
+ }
+
+ DP(BNX2X_MSG_SP, "Optimizing %s command\n",
+ (elem->cmd_data.vlan_mac.cmd == BNX2X_VLAN_MAC_ADD) ?
+ "ADD" : "DEL");
+
+ list_del(&pos->link);
+ bnx2x_exe_queue_free_elem(bp, pos);
+ return 1;
+ }
+
+ return 0;
+}
+
+/**
+ * bnx2x_vlan_mac_get_registry_elem - prepare a registry element
+ *
+ * @bp: device handle
+ * @o: vlan object
+ * @elem: element
+ * @restore: to restore or not
+ * @re: registry
+ *
+ * prepare a registry element according to the current command request.
+ */
+static inline int bnx2x_vlan_mac_get_registry_elem(
+ struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *o,
+ struct bnx2x_exeq_elem *elem,
+ bool restore,
+ struct bnx2x_vlan_mac_registry_elem **re)
+{
+ enum bnx2x_vlan_mac_cmd cmd = elem->cmd_data.vlan_mac.cmd;
+ struct bnx2x_vlan_mac_registry_elem *reg_elem;
+
+ /* Allocate a new registry element if needed. */
+ if (!restore &&
+ ((cmd == BNX2X_VLAN_MAC_ADD) || (cmd == BNX2X_VLAN_MAC_MOVE))) {
+ reg_elem = kzalloc(sizeof(*reg_elem), GFP_ATOMIC);
+ if (!reg_elem)
+ return -ENOMEM;
+
+ /* Get a new CAM offset */
+ if (!o->get_cam_offset(o, &reg_elem->cam_offset)) {
+ /* This shall never happen, because we have checked the
+ * CAM availability in the 'validate'.
+ */
+ WARN_ON(1);
+ kfree(reg_elem);
+ return -EINVAL;
+ }
+
+ DP(BNX2X_MSG_SP, "Got cam offset %d\n", reg_elem->cam_offset);
+
+ /* Set a VLAN-MAC data */
+ memcpy(&reg_elem->u, &elem->cmd_data.vlan_mac.u,
+ sizeof(reg_elem->u));
+
+ /* Copy the flags (needed for DEL and RESTORE flows) */
+ reg_elem->vlan_mac_flags =
+ elem->cmd_data.vlan_mac.vlan_mac_flags;
+ } else /* DEL, RESTORE */
+ reg_elem = o->check_del(bp, o, &elem->cmd_data.vlan_mac.u);
+
+ *re = reg_elem;
+ return 0;
+}
+
+/**
+ * bnx2x_execute_vlan_mac - execute vlan mac command
+ *
+ * @bp: device handle
+ * @qo: bnx2x_qable_obj pointer
+ * @exe_chunk: chunk
+ * @ramrod_flags: flags
+ *
+ * go and send a ramrod!
+ */
+static int bnx2x_execute_vlan_mac(struct bnx2x *bp,
+ union bnx2x_qable_obj *qo,
+ struct list_head *exe_chunk,
+ unsigned long *ramrod_flags)
+{
+ struct bnx2x_exeq_elem *elem;
+ struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac, *cam_obj;
+ struct bnx2x_raw_obj *r = &o->raw;
+ int rc, idx = 0;
+ bool restore = test_bit(RAMROD_RESTORE, ramrod_flags);
+ bool drv_only = test_bit(RAMROD_DRV_CLR_ONLY, ramrod_flags);
+ struct bnx2x_vlan_mac_registry_elem *reg_elem;
+ enum bnx2x_vlan_mac_cmd cmd;
+
+ /* If DRIVER_ONLY execution is requested, cleanup a registry
+ * and exit. Otherwise send a ramrod to FW.
+ */
+ if (!drv_only) {
+ WARN_ON(r->check_pending(r));
+
+ /* Set pending */
+ r->set_pending(r);
+
+ /* Fill the ramrod data */
+ list_for_each_entry(elem, exe_chunk, link) {
+ cmd = elem->cmd_data.vlan_mac.cmd;
+ /* We will add to the target object in MOVE command, so
+ * change the object for a CAM search.
+ */
+ if (cmd == BNX2X_VLAN_MAC_MOVE)
+ cam_obj = elem->cmd_data.vlan_mac.target_obj;
+ else
+ cam_obj = o;
+
+ rc = bnx2x_vlan_mac_get_registry_elem(bp, cam_obj,
+ elem, restore,
+ &reg_elem);
+ if (rc)
+ goto error_exit;
+
+ WARN_ON(!reg_elem);
+
+ /* Push a new entry into the registry */
+ if (!restore &&
+ ((cmd == BNX2X_VLAN_MAC_ADD) ||
+ (cmd == BNX2X_VLAN_MAC_MOVE)))
+ list_add(&reg_elem->link, &cam_obj->head);
+
+ /* Configure a single command in a ramrod data buffer */
+ o->set_one_rule(bp, o, elem, idx,
+ reg_elem->cam_offset);
+
+ /* MOVE command consumes 2 entries in the ramrod data */
+ if (cmd == BNX2X_VLAN_MAC_MOVE)
+ idx += 2;
+ else
+ idx++;
+ }
+
+ /* No need for an explicit memory barrier here as long we would
+ * need to ensure the ordering of writing to the SPQ element
+ * and updating of the SPQ producer which involves a memory
+ * read and we will have to put a full memory barrier there
+ * (inside bnx2x_sp_post()).
+ */
+
+ rc = bnx2x_sp_post(bp, o->ramrod_cmd, r->cid,
+ U64_HI(r->rdata_mapping),
+ U64_LO(r->rdata_mapping),
+ ETH_CONNECTION_TYPE);
+ if (rc)
+ goto error_exit;
+ }
+
+ /* Now, when we are done with the ramrod - clean up the registry */
+ list_for_each_entry(elem, exe_chunk, link) {
+ cmd = elem->cmd_data.vlan_mac.cmd;
+ if ((cmd == BNX2X_VLAN_MAC_DEL) ||
+ (cmd == BNX2X_VLAN_MAC_MOVE)) {
+ reg_elem = o->check_del(bp, o,
+ &elem->cmd_data.vlan_mac.u);
+
+ WARN_ON(!reg_elem);
+
+ o->put_cam_offset(o, reg_elem->cam_offset);
+ list_del(&reg_elem->link);
+ kfree(reg_elem);
+ }
+ }
+
+ if (!drv_only)
+ return 1;
+ else
+ return 0;
+
+error_exit:
+ r->clear_pending(r);
+
+ /* Cleanup a registry in case of a failure */
+ list_for_each_entry(elem, exe_chunk, link) {
+ cmd = elem->cmd_data.vlan_mac.cmd;
+
+ if (cmd == BNX2X_VLAN_MAC_MOVE)
+ cam_obj = elem->cmd_data.vlan_mac.target_obj;
+ else
+ cam_obj = o;
+
+ /* Delete all newly added above entries */
+ if (!restore &&
+ ((cmd == BNX2X_VLAN_MAC_ADD) ||
+ (cmd == BNX2X_VLAN_MAC_MOVE))) {
+ reg_elem = o->check_del(bp, cam_obj,
+ &elem->cmd_data.vlan_mac.u);
+ if (reg_elem) {
+ list_del(&reg_elem->link);
+ kfree(reg_elem);
+ }
+ }
+ }
+
+ return rc;
+}
+
+static inline int bnx2x_vlan_mac_push_new_cmd(
+ struct bnx2x *bp,
+ struct bnx2x_vlan_mac_ramrod_params *p)
+{
+ struct bnx2x_exeq_elem *elem;
+ struct bnx2x_vlan_mac_obj *o = p->vlan_mac_obj;
+ bool restore = test_bit(RAMROD_RESTORE, &p->ramrod_flags);
+
+ /* Allocate the execution queue element */
+ elem = bnx2x_exe_queue_alloc_elem(bp);
+ if (!elem)
+ return -ENOMEM;
+
+ /* Set the command 'length' */
+ switch (p->user_req.cmd) {
+ case BNX2X_VLAN_MAC_MOVE:
+ elem->cmd_len = 2;
+ break;
+ default:
+ elem->cmd_len = 1;
+ }
+
+ /* Fill the object specific info */
+ memcpy(&elem->cmd_data.vlan_mac, &p->user_req, sizeof(p->user_req));
+
+ /* Try to add a new command to the pending list */
+ return bnx2x_exe_queue_add(bp, &o->exe_queue, elem, restore);
+}
+
+/**
+ * bnx2x_config_vlan_mac - configure VLAN/MAC/VLAN_MAC filtering rules.
+ *
+ * @bp: device handle
+ * @p:
+ *
+ */
+int bnx2x_config_vlan_mac(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_ramrod_params *p)
+{
+ int rc = 0;
+ struct bnx2x_vlan_mac_obj *o = p->vlan_mac_obj;
+ unsigned long *ramrod_flags = &p->ramrod_flags;
+ bool cont = test_bit(RAMROD_CONT, ramrod_flags);
+ struct bnx2x_raw_obj *raw = &o->raw;
+
+ /*
+ * Add new elements to the execution list for commands that require it.
+ */
+ if (!cont) {
+ rc = bnx2x_vlan_mac_push_new_cmd(bp, p);
+ if (rc)
+ return rc;
+ }
+
+ /* If nothing will be executed further in this iteration we want to
+ * return PENDING if there are pending commands
+ */
+ if (!bnx2x_exe_queue_empty(&o->exe_queue))
+ rc = 1;
+
+ if (test_bit(RAMROD_DRV_CLR_ONLY, ramrod_flags)) {
+ DP(BNX2X_MSG_SP, "RAMROD_DRV_CLR_ONLY requested: clearing a pending bit.\n");
+ raw->clear_pending(raw);
+ }
+
+ /* Execute commands if required */
+ if (cont || test_bit(RAMROD_EXEC, ramrod_flags) ||
+ test_bit(RAMROD_COMP_WAIT, ramrod_flags)) {
+ rc = __bnx2x_vlan_mac_execute_step(bp, p->vlan_mac_obj,
+ &p->ramrod_flags);
+ if (rc < 0)
+ return rc;
+ }
+
+ /* RAMROD_COMP_WAIT is a superset of RAMROD_EXEC. If it was set
+ * then user want to wait until the last command is done.
+ */
+ if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags)) {
+ /* Wait maximum for the current exe_queue length iterations plus
+ * one (for the current pending command).
+ */
+ int max_iterations = bnx2x_exe_queue_length(&o->exe_queue) + 1;
+
+ while (!bnx2x_exe_queue_empty(&o->exe_queue) &&
+ max_iterations--) {
+
+ /* Wait for the current command to complete */
+ rc = raw->wait_comp(bp, raw);
+ if (rc)
+ return rc;
+
+ /* Make a next step */
+ rc = __bnx2x_vlan_mac_execute_step(bp,
+ p->vlan_mac_obj,
+ &p->ramrod_flags);
+ if (rc < 0)
+ return rc;
+ }
+
+ return 0;
+ }
+
+ return rc;
+}
+
+/**
+ * bnx2x_vlan_mac_del_all - delete elements with given vlan_mac_flags spec
+ *
+ * @bp: device handle
+ * @o: vlan object info
+ * @vlan_mac_flags: vlan flags
+ * @ramrod_flags: execution flags to be used for this deletion
+ *
+ * if the last operation has completed successfully and there are no
+ * more elements left, positive value if the last operation has completed
+ * successfully and there are more previously configured elements, negative
+ * value is current operation has failed.
+ */
+static int bnx2x_vlan_mac_del_all(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *o,
+ unsigned long *vlan_mac_flags,
+ unsigned long *ramrod_flags)
+{
+ struct bnx2x_vlan_mac_registry_elem *pos = NULL;
+ struct bnx2x_vlan_mac_ramrod_params p;
+ struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
+ struct bnx2x_exeq_elem *exeq_pos, *exeq_pos_n;
+ unsigned long flags;
+ int read_lock;
+ int rc = 0;
+
+ /* Clear pending commands first */
+
+ spin_lock_bh(&exeq->lock);
+
+ list_for_each_entry_safe(exeq_pos, exeq_pos_n, &exeq->exe_queue, link) {
+ flags = exeq_pos->cmd_data.vlan_mac.vlan_mac_flags;
+ if (BNX2X_VLAN_MAC_CMP_FLAGS(flags) ==
+ BNX2X_VLAN_MAC_CMP_FLAGS(*vlan_mac_flags)) {
+ rc = exeq->remove(bp, exeq->owner, exeq_pos);
+ if (rc) {
+ BNX2X_ERR("Failed to remove command\n");
+ spin_unlock_bh(&exeq->lock);
+ return rc;
+ }
+ list_del(&exeq_pos->link);
+ bnx2x_exe_queue_free_elem(bp, exeq_pos);
+ }
+ }
+
+ spin_unlock_bh(&exeq->lock);
+
+ /* Prepare a command request */
+ memset(&p, 0, sizeof(p));
+ p.vlan_mac_obj = o;
+ p.ramrod_flags = *ramrod_flags;
+ p.user_req.cmd = BNX2X_VLAN_MAC_DEL;
+
+ /* Add all but the last VLAN-MAC to the execution queue without actually
+ * execution anything.
+ */
+ __clear_bit(RAMROD_COMP_WAIT, &p.ramrod_flags);
+ __clear_bit(RAMROD_EXEC, &p.ramrod_flags);
+ __clear_bit(RAMROD_CONT, &p.ramrod_flags);
+
+ DP(BNX2X_MSG_SP, "vlan_mac_del_all -- taking vlan_mac_lock (reader)\n");
+ read_lock = bnx2x_vlan_mac_h_read_lock(bp, o);
+ if (read_lock != 0)
+ return read_lock;
+
+ list_for_each_entry(pos, &o->head, link) {
+ flags = pos->vlan_mac_flags;
+ if (BNX2X_VLAN_MAC_CMP_FLAGS(flags) ==
+ BNX2X_VLAN_MAC_CMP_FLAGS(*vlan_mac_flags)) {
+ p.user_req.vlan_mac_flags = pos->vlan_mac_flags;
+ memcpy(&p.user_req.u, &pos->u, sizeof(pos->u));
+ rc = bnx2x_config_vlan_mac(bp, &p);
+ if (rc < 0) {
+ BNX2X_ERR("Failed to add a new DEL command\n");
+ bnx2x_vlan_mac_h_read_unlock(bp, o);
+ return rc;
+ }
+ }
+ }
+
+ DP(BNX2X_MSG_SP, "vlan_mac_del_all -- releasing vlan_mac_lock (reader)\n");
+ bnx2x_vlan_mac_h_read_unlock(bp, o);
+
+ p.ramrod_flags = *ramrod_flags;
+ __set_bit(RAMROD_CONT, &p.ramrod_flags);
+
+ return bnx2x_config_vlan_mac(bp, &p);
+}
+
+static inline void bnx2x_init_raw_obj(struct bnx2x_raw_obj *raw, u8 cl_id,
+ u32 cid, u8 func_id, void *rdata, dma_addr_t rdata_mapping, int state,
+ unsigned long *pstate, bnx2x_obj_type type)
+{
+ raw->func_id = func_id;
+ raw->cid = cid;
+ raw->cl_id = cl_id;
+ raw->rdata = rdata;
+ raw->rdata_mapping = rdata_mapping;
+ raw->state = state;
+ raw->pstate = pstate;
+ raw->obj_type = type;
+ raw->check_pending = bnx2x_raw_check_pending;
+ raw->clear_pending = bnx2x_raw_clear_pending;
+ raw->set_pending = bnx2x_raw_set_pending;
+ raw->wait_comp = bnx2x_raw_wait;
+}
+
+static inline void bnx2x_init_vlan_mac_common(struct bnx2x_vlan_mac_obj *o,
+ u8 cl_id, u32 cid, u8 func_id, void *rdata, dma_addr_t rdata_mapping,
+ int state, unsigned long *pstate, bnx2x_obj_type type,
+ struct bnx2x_credit_pool_obj *macs_pool,
+ struct bnx2x_credit_pool_obj *vlans_pool)
+{
+ INIT_LIST_HEAD(&o->head);
+ o->head_reader = 0;
+ o->head_exe_request = false;
+ o->saved_ramrod_flags = 0;
+
+ o->macs_pool = macs_pool;
+ o->vlans_pool = vlans_pool;
+
+ o->delete_all = bnx2x_vlan_mac_del_all;
+ o->restore = bnx2x_vlan_mac_restore;
+ o->complete = bnx2x_complete_vlan_mac;
+ o->wait = bnx2x_wait_vlan_mac;
+
+ bnx2x_init_raw_obj(&o->raw, cl_id, cid, func_id, rdata, rdata_mapping,
+ state, pstate, type);
+}
+
+void bnx2x_init_mac_obj(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *mac_obj,
+ u8 cl_id, u32 cid, u8 func_id, void *rdata,
+ dma_addr_t rdata_mapping, int state,
+ unsigned long *pstate, bnx2x_obj_type type,
+ struct bnx2x_credit_pool_obj *macs_pool)
+{
+ union bnx2x_qable_obj *qable_obj = (union bnx2x_qable_obj *)mac_obj;
+
+ bnx2x_init_vlan_mac_common(mac_obj, cl_id, cid, func_id, rdata,
+ rdata_mapping, state, pstate, type,
+ macs_pool, NULL);
+
+ /* CAM credit pool handling */
+ mac_obj->get_credit = bnx2x_get_credit_mac;
+ mac_obj->put_credit = bnx2x_put_credit_mac;
+ mac_obj->get_cam_offset = bnx2x_get_cam_offset_mac;
+ mac_obj->put_cam_offset = bnx2x_put_cam_offset_mac;
+
+ if (CHIP_IS_E1x(bp)) {
+ mac_obj->set_one_rule = bnx2x_set_one_mac_e1x;
+ mac_obj->check_del = bnx2x_check_mac_del;
+ mac_obj->check_add = bnx2x_check_mac_add;
+ mac_obj->check_move = bnx2x_check_move_always_err;
+ mac_obj->ramrod_cmd = RAMROD_CMD_ID_ETH_SET_MAC;
+
+ /* Exe Queue */
+ bnx2x_exe_queue_init(bp,
+ &mac_obj->exe_queue, 1, qable_obj,
+ bnx2x_validate_vlan_mac,
+ bnx2x_remove_vlan_mac,
+ bnx2x_optimize_vlan_mac,
+ bnx2x_execute_vlan_mac,
+ bnx2x_exeq_get_mac);
+ } else {
+ mac_obj->set_one_rule = bnx2x_set_one_mac_e2;
+ mac_obj->check_del = bnx2x_check_mac_del;
+ mac_obj->check_add = bnx2x_check_mac_add;
+ mac_obj->check_move = bnx2x_check_move;
+ mac_obj->ramrod_cmd =
+ RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES;
+ mac_obj->get_n_elements = bnx2x_get_n_elements;
+
+ /* Exe Queue */
+ bnx2x_exe_queue_init(bp,
+ &mac_obj->exe_queue, CLASSIFY_RULES_COUNT,
+ qable_obj, bnx2x_validate_vlan_mac,
+ bnx2x_remove_vlan_mac,
+ bnx2x_optimize_vlan_mac,
+ bnx2x_execute_vlan_mac,
+ bnx2x_exeq_get_mac);
+ }
+}
+
+void bnx2x_init_vlan_obj(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *vlan_obj,
+ u8 cl_id, u32 cid, u8 func_id, void *rdata,
+ dma_addr_t rdata_mapping, int state,
+ unsigned long *pstate, bnx2x_obj_type type,
+ struct bnx2x_credit_pool_obj *vlans_pool)
+{
+ union bnx2x_qable_obj *qable_obj = (union bnx2x_qable_obj *)vlan_obj;
+
+ bnx2x_init_vlan_mac_common(vlan_obj, cl_id, cid, func_id, rdata,
+ rdata_mapping, state, pstate, type, NULL,
+ vlans_pool);
+
+ vlan_obj->get_credit = bnx2x_get_credit_vlan;
+ vlan_obj->put_credit = bnx2x_put_credit_vlan;
+ vlan_obj->get_cam_offset = bnx2x_get_cam_offset_vlan;
+ vlan_obj->put_cam_offset = bnx2x_put_cam_offset_vlan;
+
+ if (CHIP_IS_E1x(bp)) {
+ BNX2X_ERR("Do not support chips others than E2 and newer\n");
+ BUG();
+ } else {
+ vlan_obj->set_one_rule = bnx2x_set_one_vlan_e2;
+ vlan_obj->check_del = bnx2x_check_vlan_del;
+ vlan_obj->check_add = bnx2x_check_vlan_add;
+ vlan_obj->check_move = bnx2x_check_move;
+ vlan_obj->ramrod_cmd =
+ RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES;
+ vlan_obj->get_n_elements = bnx2x_get_n_elements;
+
+ /* Exe Queue */
+ bnx2x_exe_queue_init(bp,
+ &vlan_obj->exe_queue, CLASSIFY_RULES_COUNT,
+ qable_obj, bnx2x_validate_vlan_mac,
+ bnx2x_remove_vlan_mac,
+ bnx2x_optimize_vlan_mac,
+ bnx2x_execute_vlan_mac,
+ bnx2x_exeq_get_vlan);
+ }
+}
+
+void bnx2x_init_vlan_mac_obj(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *vlan_mac_obj,
+ u8 cl_id, u32 cid, u8 func_id, void *rdata,
+ dma_addr_t rdata_mapping, int state,
+ unsigned long *pstate, bnx2x_obj_type type,
+ struct bnx2x_credit_pool_obj *macs_pool,
+ struct bnx2x_credit_pool_obj *vlans_pool)
+{
+ union bnx2x_qable_obj *qable_obj =
+ (union bnx2x_qable_obj *)vlan_mac_obj;
+
+ bnx2x_init_vlan_mac_common(vlan_mac_obj, cl_id, cid, func_id, rdata,
+ rdata_mapping, state, pstate, type,
+ macs_pool, vlans_pool);
+
+ /* CAM pool handling */
+ vlan_mac_obj->get_credit = bnx2x_get_credit_vlan_mac;
+ vlan_mac_obj->put_credit = bnx2x_put_credit_vlan_mac;
+ /* CAM offset is relevant for 57710 and 57711 chips only which have a
+ * single CAM for both MACs and VLAN-MAC pairs. So the offset
+ * will be taken from MACs' pool object only.
+ */
+ vlan_mac_obj->get_cam_offset = bnx2x_get_cam_offset_mac;
+ vlan_mac_obj->put_cam_offset = bnx2x_put_cam_offset_mac;
+
+ if (CHIP_IS_E1(bp)) {
+ BNX2X_ERR("Do not support chips others than E2\n");
+ BUG();
+ } else if (CHIP_IS_E1H(bp)) {
+ vlan_mac_obj->set_one_rule = bnx2x_set_one_vlan_mac_e1h;
+ vlan_mac_obj->check_del = bnx2x_check_vlan_mac_del;
+ vlan_mac_obj->check_add = bnx2x_check_vlan_mac_add;
+ vlan_mac_obj->check_move = bnx2x_check_move_always_err;
+ vlan_mac_obj->ramrod_cmd = RAMROD_CMD_ID_ETH_SET_MAC;
+
+ /* Exe Queue */
+ bnx2x_exe_queue_init(bp,
+ &vlan_mac_obj->exe_queue, 1, qable_obj,
+ bnx2x_validate_vlan_mac,
+ bnx2x_remove_vlan_mac,
+ bnx2x_optimize_vlan_mac,
+ bnx2x_execute_vlan_mac,
+ bnx2x_exeq_get_vlan_mac);
+ } else {
+ vlan_mac_obj->set_one_rule = bnx2x_set_one_vlan_mac_e2;
+ vlan_mac_obj->check_del = bnx2x_check_vlan_mac_del;
+ vlan_mac_obj->check_add = bnx2x_check_vlan_mac_add;
+ vlan_mac_obj->check_move = bnx2x_check_move;
+ vlan_mac_obj->ramrod_cmd =
+ RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES;
+
+ /* Exe Queue */
+ bnx2x_exe_queue_init(bp,
+ &vlan_mac_obj->exe_queue,
+ CLASSIFY_RULES_COUNT,
+ qable_obj, bnx2x_validate_vlan_mac,
+ bnx2x_remove_vlan_mac,
+ bnx2x_optimize_vlan_mac,
+ bnx2x_execute_vlan_mac,
+ bnx2x_exeq_get_vlan_mac);
+ }
+}
+/* RX_MODE verbs: DROP_ALL/ACCEPT_ALL/ACCEPT_ALL_MULTI/ACCEPT_ALL_VLAN/NORMAL */
+static inline void __storm_memset_mac_filters(struct bnx2x *bp,
+ struct tstorm_eth_mac_filter_config *mac_filters,
+ u16 pf_id)
+{
+ size_t size = sizeof(struct tstorm_eth_mac_filter_config);
+
+ u32 addr = BAR_TSTRORM_INTMEM +
+ TSTORM_MAC_FILTER_CONFIG_OFFSET(pf_id);
+
+ __storm_memset_struct(bp, addr, size, (u32 *)mac_filters);
+}
+
+static int bnx2x_set_rx_mode_e1x(struct bnx2x *bp,
+ struct bnx2x_rx_mode_ramrod_params *p)
+{
+ /* update the bp MAC filter structure */
+ u32 mask = (1 << p->cl_id);
+
+ struct tstorm_eth_mac_filter_config *mac_filters =
+ (struct tstorm_eth_mac_filter_config *)p->rdata;
+
+ /* initial setting is drop-all */
+ u8 drop_all_ucast = 1, drop_all_mcast = 1;
+ u8 accp_all_ucast = 0, accp_all_bcast = 0, accp_all_mcast = 0;
+ u8 unmatched_unicast = 0;
+
+ /* In e1x there we only take into account rx accept flag since tx switching
+ * isn't enabled. */
+ if (test_bit(BNX2X_ACCEPT_UNICAST, &p->rx_accept_flags))
+ /* accept matched ucast */
+ drop_all_ucast = 0;
+
+ if (test_bit(BNX2X_ACCEPT_MULTICAST, &p->rx_accept_flags))
+ /* accept matched mcast */
+ drop_all_mcast = 0;
+
+ if (test_bit(BNX2X_ACCEPT_ALL_UNICAST, &p->rx_accept_flags)) {
+ /* accept all mcast */
+ drop_all_ucast = 0;
+ accp_all_ucast = 1;
+ }
+ if (test_bit(BNX2X_ACCEPT_ALL_MULTICAST, &p->rx_accept_flags)) {
+ /* accept all mcast */
+ drop_all_mcast = 0;
+ accp_all_mcast = 1;
+ }
+ if (test_bit(BNX2X_ACCEPT_BROADCAST, &p->rx_accept_flags))
+ /* accept (all) bcast */
+ accp_all_bcast = 1;
+ if (test_bit(BNX2X_ACCEPT_UNMATCHED, &p->rx_accept_flags))
+ /* accept unmatched unicasts */
+ unmatched_unicast = 1;
+
+ mac_filters->ucast_drop_all = drop_all_ucast ?
+ mac_filters->ucast_drop_all | mask :
+ mac_filters->ucast_drop_all & ~mask;
+
+ mac_filters->mcast_drop_all = drop_all_mcast ?
+ mac_filters->mcast_drop_all | mask :
+ mac_filters->mcast_drop_all & ~mask;
+
+ mac_filters->ucast_accept_all = accp_all_ucast ?
+ mac_filters->ucast_accept_all | mask :
+ mac_filters->ucast_accept_all & ~mask;
+
+ mac_filters->mcast_accept_all = accp_all_mcast ?
+ mac_filters->mcast_accept_all | mask :
+ mac_filters->mcast_accept_all & ~mask;
+
+ mac_filters->bcast_accept_all = accp_all_bcast ?
+ mac_filters->bcast_accept_all | mask :
+ mac_filters->bcast_accept_all & ~mask;
+
+ mac_filters->unmatched_unicast = unmatched_unicast ?
+ mac_filters->unmatched_unicast | mask :
+ mac_filters->unmatched_unicast & ~mask;
+
+ DP(BNX2X_MSG_SP, "drop_ucast 0x%x\ndrop_mcast 0x%x\n accp_ucast 0x%x\n"
+ "accp_mcast 0x%x\naccp_bcast 0x%x\n",
+ mac_filters->ucast_drop_all, mac_filters->mcast_drop_all,
+ mac_filters->ucast_accept_all, mac_filters->mcast_accept_all,
+ mac_filters->bcast_accept_all);
+
+ /* write the MAC filter structure*/
+ __storm_memset_mac_filters(bp, mac_filters, p->func_id);
+
+ /* The operation is completed */
+ clear_bit(p->state, p->pstate);
+ smp_mb__after_atomic();
+
+ return 0;
+}
+
+/* Setup ramrod data */
+static inline void bnx2x_rx_mode_set_rdata_hdr_e2(u32 cid,
+ struct eth_classify_header *hdr,
+ u8 rule_cnt)
+{
+ hdr->echo = cpu_to_le32(cid);
+ hdr->rule_cnt = rule_cnt;
+}
+
+static inline void bnx2x_rx_mode_set_cmd_state_e2(struct bnx2x *bp,
+ unsigned long *accept_flags,
+ struct eth_filter_rules_cmd *cmd,
+ bool clear_accept_all)
+{
+ u16 state;
+
+ /* start with 'drop-all' */
+ state = ETH_FILTER_RULES_CMD_UCAST_DROP_ALL |
+ ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
+
+ if (test_bit(BNX2X_ACCEPT_UNICAST, accept_flags))
+ state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
+
+ if (test_bit(BNX2X_ACCEPT_MULTICAST, accept_flags))
+ state &= ~ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
+
+ if (test_bit(BNX2X_ACCEPT_ALL_UNICAST, accept_flags)) {
+ state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
+ state |= ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL;
+ }
+
+ if (test_bit(BNX2X_ACCEPT_ALL_MULTICAST, accept_flags)) {
+ state |= ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL;
+ state &= ~ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
+ }
+
+ if (test_bit(BNX2X_ACCEPT_BROADCAST, accept_flags))
+ state |= ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL;
+
+ if (test_bit(BNX2X_ACCEPT_UNMATCHED, accept_flags)) {
+ state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
+ state |= ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED;
+ }
+
+ if (test_bit(BNX2X_ACCEPT_ANY_VLAN, accept_flags))
+ state |= ETH_FILTER_RULES_CMD_ACCEPT_ANY_VLAN;
+
+ /* Clear ACCEPT_ALL_XXX flags for FCoE L2 Queue */
+ if (clear_accept_all) {
+ state &= ~ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL;
+ state &= ~ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL;
+ state &= ~ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL;
+ state &= ~ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED;
+ }
+
+ cmd->state = cpu_to_le16(state);
+}
+
+static int bnx2x_set_rx_mode_e2(struct bnx2x *bp,
+ struct bnx2x_rx_mode_ramrod_params *p)
+{
+ struct eth_filter_rules_ramrod_data *data = p->rdata;
+ int rc;
+ u8 rule_idx = 0;
+
+ /* Reset the ramrod data buffer */
+ memset(data, 0, sizeof(*data));
+
+ /* Setup ramrod data */
+
+ /* Tx (internal switching) */
+ if (test_bit(RAMROD_TX, &p->ramrod_flags)) {
+ data->rules[rule_idx].client_id = p->cl_id;
+ data->rules[rule_idx].func_id = p->func_id;
+
+ data->rules[rule_idx].cmd_general_data =
+ ETH_FILTER_RULES_CMD_TX_CMD;
+
+ bnx2x_rx_mode_set_cmd_state_e2(bp, &p->tx_accept_flags,
+ &(data->rules[rule_idx++]),
+ false);
+ }
+
+ /* Rx */
+ if (test_bit(RAMROD_RX, &p->ramrod_flags)) {
+ data->rules[rule_idx].client_id = p->cl_id;
+ data->rules[rule_idx].func_id = p->func_id;
+
+ data->rules[rule_idx].cmd_general_data =
+ ETH_FILTER_RULES_CMD_RX_CMD;
+
+ bnx2x_rx_mode_set_cmd_state_e2(bp, &p->rx_accept_flags,
+ &(data->rules[rule_idx++]),
+ false);
+ }
+
+ /* If FCoE Queue configuration has been requested configure the Rx and
+ * internal switching modes for this queue in separate rules.
+ *
+ * FCoE queue shell never be set to ACCEPT_ALL packets of any sort:
+ * MCAST_ALL, UCAST_ALL, BCAST_ALL and UNMATCHED.
+ */
+ if (test_bit(BNX2X_RX_MODE_FCOE_ETH, &p->rx_mode_flags)) {
+ /* Tx (internal switching) */
+ if (test_bit(RAMROD_TX, &p->ramrod_flags)) {
+ data->rules[rule_idx].client_id = bnx2x_fcoe(bp, cl_id);
+ data->rules[rule_idx].func_id = p->func_id;
+
+ data->rules[rule_idx].cmd_general_data =
+ ETH_FILTER_RULES_CMD_TX_CMD;
+
+ bnx2x_rx_mode_set_cmd_state_e2(bp, &p->tx_accept_flags,
+ &(data->rules[rule_idx]),
+ true);
+ rule_idx++;
+ }
+
+ /* Rx */
+ if (test_bit(RAMROD_RX, &p->ramrod_flags)) {
+ data->rules[rule_idx].client_id = bnx2x_fcoe(bp, cl_id);
+ data->rules[rule_idx].func_id = p->func_id;
+
+ data->rules[rule_idx].cmd_general_data =
+ ETH_FILTER_RULES_CMD_RX_CMD;
+
+ bnx2x_rx_mode_set_cmd_state_e2(bp, &p->rx_accept_flags,
+ &(data->rules[rule_idx]),
+ true);
+ rule_idx++;
+ }
+ }
+
+ /* Set the ramrod header (most importantly - number of rules to
+ * configure).
+ */
+ bnx2x_rx_mode_set_rdata_hdr_e2(p->cid, &data->header, rule_idx);
+
+ DP(BNX2X_MSG_SP, "About to configure %d rules, rx_accept_flags 0x%lx, tx_accept_flags 0x%lx\n",
+ data->header.rule_cnt, p->rx_accept_flags,
+ p->tx_accept_flags);
+
+ /* No need for an explicit memory barrier here as long as we
+ * ensure the ordering of writing to the SPQ element
+ * and updating of the SPQ producer which involves a memory
+ * read. If the memory read is removed we will have to put a
+ * full memory barrier there (inside bnx2x_sp_post()).
+ */
+
+ /* Send a ramrod */
+ rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_FILTER_RULES, p->cid,
+ U64_HI(p->rdata_mapping),
+ U64_LO(p->rdata_mapping),
+ ETH_CONNECTION_TYPE);
+ if (rc)
+ return rc;
+
+ /* Ramrod completion is pending */
+ return 1;
+}
+
+static int bnx2x_wait_rx_mode_comp_e2(struct bnx2x *bp,
+ struct bnx2x_rx_mode_ramrod_params *p)
+{
+ return bnx2x_state_wait(bp, p->state, p->pstate);
+}
+
+static int bnx2x_empty_rx_mode_wait(struct bnx2x *bp,
+ struct bnx2x_rx_mode_ramrod_params *p)
+{
+ /* Do nothing */
+ return 0;
+}
+
+int bnx2x_config_rx_mode(struct bnx2x *bp,
+ struct bnx2x_rx_mode_ramrod_params *p)
+{
+ int rc;
+
+ /* Configure the new classification in the chip */
+ rc = p->rx_mode_obj->config_rx_mode(bp, p);
+ if (rc < 0)
+ return rc;
+
+ /* Wait for a ramrod completion if was requested */
+ if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags)) {
+ rc = p->rx_mode_obj->wait_comp(bp, p);
+ if (rc)
+ return rc;
+ }
+
+ return rc;
+}
+
+void bnx2x_init_rx_mode_obj(struct bnx2x *bp,
+ struct bnx2x_rx_mode_obj *o)
+{
+ if (CHIP_IS_E1x(bp)) {
+ o->wait_comp = bnx2x_empty_rx_mode_wait;
+ o->config_rx_mode = bnx2x_set_rx_mode_e1x;
+ } else {
+ o->wait_comp = bnx2x_wait_rx_mode_comp_e2;
+ o->config_rx_mode = bnx2x_set_rx_mode_e2;
+ }
+}
+
+/********************* Multicast verbs: SET, CLEAR ****************************/
+static inline u8 bnx2x_mcast_bin_from_mac(u8 *mac)
+{
+ return (crc32c_le(0, mac, ETH_ALEN) >> 24) & 0xff;
+}
+
+struct bnx2x_mcast_mac_elem {
+ struct list_head link;
+ u8 mac[ETH_ALEN];
+ u8 pad[2]; /* For a natural alignment of the following buffer */
+};
+
+struct bnx2x_mcast_bin_elem {
+ struct list_head link;
+ int bin;
+ int type; /* BNX2X_MCAST_CMD_SET_{ADD, DEL} */
+};
+
+union bnx2x_mcast_elem {
+ struct bnx2x_mcast_bin_elem bin_elem;
+ struct bnx2x_mcast_mac_elem mac_elem;
+};
+
+struct bnx2x_mcast_elem_group {
+ struct list_head mcast_group_link;
+ union bnx2x_mcast_elem mcast_elems[];
+};
+
+#define MCAST_MAC_ELEMS_PER_PG \
+ ((PAGE_SIZE - sizeof(struct bnx2x_mcast_elem_group)) / \
+ sizeof(union bnx2x_mcast_elem))
+
+struct bnx2x_pending_mcast_cmd {
+ struct list_head link;
+ struct list_head group_head;
+ int type; /* BNX2X_MCAST_CMD_X */
+ union {
+ struct list_head macs_head;
+ u32 macs_num; /* Needed for DEL command */
+ int next_bin; /* Needed for RESTORE flow with aprox match */
+ } data;
+
+ bool set_convert; /* in case type == BNX2X_MCAST_CMD_SET, this is set
+ * when macs_head had been converted to a list of
+ * bnx2x_mcast_bin_elem.
+ */
+
+ bool done; /* set to true, when the command has been handled,
+ * practically used in 57712 handling only, where one pending
+ * command may be handled in a few operations. As long as for
+ * other chips every operation handling is completed in a
+ * single ramrod, there is no need to utilize this field.
+ */
+};
+
+static int bnx2x_mcast_wait(struct bnx2x *bp,
+ struct bnx2x_mcast_obj *o)
+{
+ if (bnx2x_state_wait(bp, o->sched_state, o->raw.pstate) ||
+ o->raw.wait_comp(bp, &o->raw))
+ return -EBUSY;
+
+ return 0;
+}
+
+static void bnx2x_free_groups(struct list_head *mcast_group_list)
+{
+ struct bnx2x_mcast_elem_group *current_mcast_group;
+
+ while (!list_empty(mcast_group_list)) {
+ current_mcast_group = list_first_entry(mcast_group_list,
+ struct bnx2x_mcast_elem_group,
+ mcast_group_link);
+ list_del(&current_mcast_group->mcast_group_link);
+ free_page((unsigned long)current_mcast_group);
+ }
+}
+
+static int bnx2x_mcast_enqueue_cmd(struct bnx2x *bp,
+ struct bnx2x_mcast_obj *o,
+ struct bnx2x_mcast_ramrod_params *p,
+ enum bnx2x_mcast_cmd cmd)
+{
+ struct bnx2x_pending_mcast_cmd *new_cmd;
+ struct bnx2x_mcast_list_elem *pos;
+ struct bnx2x_mcast_elem_group *elem_group;
+ struct bnx2x_mcast_mac_elem *mac_elem;
+ int total_elems = 0, macs_list_len = 0, offset = 0;
+
+ /* When adding MACs we'll need to store their values */
+ if (cmd == BNX2X_MCAST_CMD_ADD || cmd == BNX2X_MCAST_CMD_SET)
+ macs_list_len = p->mcast_list_len;
+
+ /* If the command is empty ("handle pending commands only"), break */
+ if (!p->mcast_list_len)
+ return 0;
+
+ /* Add mcast is called under spin_lock, thus calling with GFP_ATOMIC */
+ new_cmd = kzalloc(sizeof(*new_cmd), GFP_ATOMIC);
+ if (!new_cmd)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&new_cmd->data.macs_head);
+ INIT_LIST_HEAD(&new_cmd->group_head);
+ new_cmd->type = cmd;
+ new_cmd->done = false;
+
+ DP(BNX2X_MSG_SP, "About to enqueue a new %d command. macs_list_len=%d\n",
+ cmd, macs_list_len);
+
+ switch (cmd) {
+ case BNX2X_MCAST_CMD_ADD:
+ case BNX2X_MCAST_CMD_SET:
+ /* For a set command, we need to allocate sufficient memory for
+ * all the bins, since we can't analyze at this point how much
+ * memory would be required.
+ */
+ total_elems = macs_list_len;
+ if (cmd == BNX2X_MCAST_CMD_SET) {
+ if (total_elems < BNX2X_MCAST_BINS_NUM)
+ total_elems = BNX2X_MCAST_BINS_NUM;
+ }
+ while (total_elems > 0) {
+ elem_group = (struct bnx2x_mcast_elem_group *)
+ __get_free_page(GFP_ATOMIC | __GFP_ZERO);
+ if (!elem_group) {
+ bnx2x_free_groups(&new_cmd->group_head);
+ kfree(new_cmd);
+ return -ENOMEM;
+ }
+ total_elems -= MCAST_MAC_ELEMS_PER_PG;
+ list_add_tail(&elem_group->mcast_group_link,
+ &new_cmd->group_head);
+ }
+ elem_group = list_first_entry(&new_cmd->group_head,
+ struct bnx2x_mcast_elem_group,
+ mcast_group_link);
+ list_for_each_entry(pos, &p->mcast_list, link) {
+ mac_elem = &elem_group->mcast_elems[offset].mac_elem;
+ memcpy(mac_elem->mac, pos->mac, ETH_ALEN);
+ /* Push the MACs of the current command into the pending
+ * command MACs list: FIFO
+ */
+ list_add_tail(&mac_elem->link,
+ &new_cmd->data.macs_head);
+ offset++;
+ if (offset == MCAST_MAC_ELEMS_PER_PG) {
+ offset = 0;
+ elem_group = list_next_entry(elem_group,
+ mcast_group_link);
+ }
+ }
+ break;
+
+ case BNX2X_MCAST_CMD_DEL:
+ new_cmd->data.macs_num = p->mcast_list_len;
+ break;
+
+ case BNX2X_MCAST_CMD_RESTORE:
+ new_cmd->data.next_bin = 0;
+ break;
+
+ default:
+ kfree(new_cmd);
+ BNX2X_ERR("Unknown command: %d\n", cmd);
+ return -EINVAL;
+ }
+
+ /* Push the new pending command to the tail of the pending list: FIFO */
+ list_add_tail(&new_cmd->link, &o->pending_cmds_head);
+
+ o->set_sched(o);
+
+ return 1;
+}
+
+/**
+ * bnx2x_mcast_get_next_bin - get the next set bin (index)
+ *
+ * @o: multicast object info
+ * @last: index to start looking from (including)
+ *
+ * returns the next found (set) bin or a negative value if none is found.
+ */
+static inline int bnx2x_mcast_get_next_bin(struct bnx2x_mcast_obj *o, int last)
+{
+ int i, j, inner_start = last % BIT_VEC64_ELEM_SZ;
+
+ for (i = last / BIT_VEC64_ELEM_SZ; i < BNX2X_MCAST_VEC_SZ; i++) {
+ if (o->registry.aprox_match.vec[i])
+ for (j = inner_start; j < BIT_VEC64_ELEM_SZ; j++) {
+ int cur_bit = j + BIT_VEC64_ELEM_SZ * i;
+ if (BIT_VEC64_TEST_BIT(o->registry.aprox_match.
+ vec, cur_bit)) {
+ return cur_bit;
+ }
+ }
+ inner_start = 0;
+ }
+
+ /* None found */
+ return -1;
+}
+
+/**
+ * bnx2x_mcast_clear_first_bin - find the first set bin and clear it
+ *
+ * @o:
+ *
+ * returns the index of the found bin or -1 if none is found
+ */
+static inline int bnx2x_mcast_clear_first_bin(struct bnx2x_mcast_obj *o)
+{
+ int cur_bit = bnx2x_mcast_get_next_bin(o, 0);
+
+ if (cur_bit >= 0)
+ BIT_VEC64_CLEAR_BIT(o->registry.aprox_match.vec, cur_bit);
+
+ return cur_bit;
+}
+
+static inline u8 bnx2x_mcast_get_rx_tx_flag(struct bnx2x_mcast_obj *o)
+{
+ struct bnx2x_raw_obj *raw = &o->raw;
+ u8 rx_tx_flag = 0;
+
+ if ((raw->obj_type == BNX2X_OBJ_TYPE_TX) ||
+ (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX))
+ rx_tx_flag |= ETH_MULTICAST_RULES_CMD_TX_CMD;
+
+ if ((raw->obj_type == BNX2X_OBJ_TYPE_RX) ||
+ (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX))
+ rx_tx_flag |= ETH_MULTICAST_RULES_CMD_RX_CMD;
+
+ return rx_tx_flag;
+}
+
+static void bnx2x_mcast_set_one_rule_e2(struct bnx2x *bp,
+ struct bnx2x_mcast_obj *o, int idx,
+ union bnx2x_mcast_config_data *cfg_data,
+ enum bnx2x_mcast_cmd cmd)
+{
+ struct bnx2x_raw_obj *r = &o->raw;
+ struct eth_multicast_rules_ramrod_data *data =
+ (struct eth_multicast_rules_ramrod_data *)(r->rdata);
+ u8 func_id = r->func_id;
+ u8 rx_tx_add_flag = bnx2x_mcast_get_rx_tx_flag(o);
+ int bin;
+
+ if ((cmd == BNX2X_MCAST_CMD_ADD) || (cmd == BNX2X_MCAST_CMD_RESTORE) ||
+ (cmd == BNX2X_MCAST_CMD_SET_ADD))
+ rx_tx_add_flag |= ETH_MULTICAST_RULES_CMD_IS_ADD;
+
+ data->rules[idx].cmd_general_data |= rx_tx_add_flag;
+
+ /* Get a bin and update a bins' vector */
+ switch (cmd) {
+ case BNX2X_MCAST_CMD_ADD:
+ bin = bnx2x_mcast_bin_from_mac(cfg_data->mac);
+ BIT_VEC64_SET_BIT(o->registry.aprox_match.vec, bin);
+ break;
+
+ case BNX2X_MCAST_CMD_DEL:
+ /* If there were no more bins to clear
+ * (bnx2x_mcast_clear_first_bin() returns -1) then we would
+ * clear any (0xff) bin.
+ * See bnx2x_mcast_validate_e2() for explanation when it may
+ * happen.
+ */
+ bin = bnx2x_mcast_clear_first_bin(o);
+ break;
+
+ case BNX2X_MCAST_CMD_RESTORE:
+ bin = cfg_data->bin;
+ break;
+
+ case BNX2X_MCAST_CMD_SET_ADD:
+ bin = cfg_data->bin;
+ BIT_VEC64_SET_BIT(o->registry.aprox_match.vec, bin);
+ break;
+
+ case BNX2X_MCAST_CMD_SET_DEL:
+ bin = cfg_data->bin;
+ BIT_VEC64_CLEAR_BIT(o->registry.aprox_match.vec, bin);
+ break;
+
+ default:
+ BNX2X_ERR("Unknown command: %d\n", cmd);
+ return;
+ }
+
+ DP(BNX2X_MSG_SP, "%s bin %d\n",
+ ((rx_tx_add_flag & ETH_MULTICAST_RULES_CMD_IS_ADD) ?
+ "Setting" : "Clearing"), bin);
+
+ data->rules[idx].bin_id = (u8)bin;
+ data->rules[idx].func_id = func_id;
+ data->rules[idx].engine_id = o->engine_id;
+}
+
+/**
+ * bnx2x_mcast_handle_restore_cmd_e2 - restore configuration from the registry
+ *
+ * @bp: device handle
+ * @o: multicast object info
+ * @start_bin: index in the registry to start from (including)
+ * @rdata_idx: index in the ramrod data to start from
+ *
+ * returns last handled bin index or -1 if all bins have been handled
+ */
+static inline int bnx2x_mcast_handle_restore_cmd_e2(
+ struct bnx2x *bp, struct bnx2x_mcast_obj *o , int start_bin,
+ int *rdata_idx)
+{
+ int cur_bin, cnt = *rdata_idx;
+ union bnx2x_mcast_config_data cfg_data = {NULL};
+
+ /* go through the registry and configure the bins from it */
+ for (cur_bin = bnx2x_mcast_get_next_bin(o, start_bin); cur_bin >= 0;
+ cur_bin = bnx2x_mcast_get_next_bin(o, cur_bin + 1)) {
+
+ cfg_data.bin = (u8)cur_bin;
+ o->set_one_rule(bp, o, cnt, &cfg_data,
+ BNX2X_MCAST_CMD_RESTORE);
+
+ cnt++;
+
+ DP(BNX2X_MSG_SP, "About to configure a bin %d\n", cur_bin);
+
+ /* Break if we reached the maximum number
+ * of rules.
+ */
+ if (cnt >= o->max_cmd_len)
+ break;
+ }
+
+ *rdata_idx = cnt;
+
+ return cur_bin;
+}
+
+static inline void bnx2x_mcast_hdl_pending_add_e2(struct bnx2x *bp,
+ struct bnx2x_mcast_obj *o, struct bnx2x_pending_mcast_cmd *cmd_pos,
+ int *line_idx)
+{
+ struct bnx2x_mcast_mac_elem *pmac_pos, *pmac_pos_n;
+ int cnt = *line_idx;
+ union bnx2x_mcast_config_data cfg_data = {NULL};
+
+ list_for_each_entry_safe(pmac_pos, pmac_pos_n, &cmd_pos->data.macs_head,
+ link) {
+
+ cfg_data.mac = &pmac_pos->mac[0];
+ o->set_one_rule(bp, o, cnt, &cfg_data, cmd_pos->type);
+
+ cnt++;
+
+ DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC\n",
+ pmac_pos->mac);
+
+ list_del(&pmac_pos->link);
+
+ /* Break if we reached the maximum number
+ * of rules.
+ */
+ if (cnt >= o->max_cmd_len)
+ break;
+ }
+
+ *line_idx = cnt;
+
+ /* if no more MACs to configure - we are done */
+ if (list_empty(&cmd_pos->data.macs_head))
+ cmd_pos->done = true;
+}
+
+static inline void bnx2x_mcast_hdl_pending_del_e2(struct bnx2x *bp,
+ struct bnx2x_mcast_obj *o, struct bnx2x_pending_mcast_cmd *cmd_pos,
+ int *line_idx)
+{
+ int cnt = *line_idx;
+
+ while (cmd_pos->data.macs_num) {
+ o->set_one_rule(bp, o, cnt, NULL, cmd_pos->type);
+
+ cnt++;
+
+ cmd_pos->data.macs_num--;
+
+ DP(BNX2X_MSG_SP, "Deleting MAC. %d left,cnt is %d\n",
+ cmd_pos->data.macs_num, cnt);
+
+ /* Break if we reached the maximum
+ * number of rules.
+ */
+ if (cnt >= o->max_cmd_len)
+ break;
+ }
+
+ *line_idx = cnt;
+
+ /* If we cleared all bins - we are done */
+ if (!cmd_pos->data.macs_num)
+ cmd_pos->done = true;
+}
+
+static inline void bnx2x_mcast_hdl_pending_restore_e2(struct bnx2x *bp,
+ struct bnx2x_mcast_obj *o, struct bnx2x_pending_mcast_cmd *cmd_pos,
+ int *line_idx)
+{
+ cmd_pos->data.next_bin = o->hdl_restore(bp, o, cmd_pos->data.next_bin,
+ line_idx);
+
+ if (cmd_pos->data.next_bin < 0)
+ /* If o->set_restore returned -1 we are done */
+ cmd_pos->done = true;
+ else
+ /* Start from the next bin next time */
+ cmd_pos->data.next_bin++;
+}
+
+static void
+bnx2x_mcast_hdl_pending_set_e2_convert(struct bnx2x *bp,
+ struct bnx2x_mcast_obj *o,
+ struct bnx2x_pending_mcast_cmd *cmd_pos)
+{
+ u64 cur[BNX2X_MCAST_VEC_SZ], req[BNX2X_MCAST_VEC_SZ];
+ struct bnx2x_mcast_mac_elem *pmac_pos, *pmac_pos_n;
+ struct bnx2x_mcast_bin_elem *p_item;
+ struct bnx2x_mcast_elem_group *elem_group;
+ int cnt = 0, mac_cnt = 0, offset = 0, i;
+
+ memset(req, 0, sizeof(u64) * BNX2X_MCAST_VEC_SZ);
+ memcpy(cur, o->registry.aprox_match.vec,
+ sizeof(u64) * BNX2X_MCAST_VEC_SZ);
+
+ /* Fill `current' with the required set of bins to configure */
+ list_for_each_entry_safe(pmac_pos, pmac_pos_n, &cmd_pos->data.macs_head,
+ link) {
+ int bin = bnx2x_mcast_bin_from_mac(pmac_pos->mac);
+
+ DP(BNX2X_MSG_SP, "Set contains %pM mcast MAC\n",
+ pmac_pos->mac);
+
+ BIT_VEC64_SET_BIT(req, bin);
+ list_del(&pmac_pos->link);
+ mac_cnt++;
+ }
+
+ /* We no longer have use for the MACs; Need to re-use memory for
+ * a list that will be used to configure bins.
+ */
+ cmd_pos->set_convert = true;
+ INIT_LIST_HEAD(&cmd_pos->data.macs_head);
+ elem_group = list_first_entry(&cmd_pos->group_head,
+ struct bnx2x_mcast_elem_group,
+ mcast_group_link);
+ for (i = 0; i < BNX2X_MCAST_BINS_NUM; i++) {
+ bool b_current = !!BIT_VEC64_TEST_BIT(cur, i);
+ bool b_required = !!BIT_VEC64_TEST_BIT(req, i);
+
+ if (b_current == b_required)
+ continue;
+
+ p_item = &elem_group->mcast_elems[offset].bin_elem;
+ p_item->bin = i;
+ p_item->type = b_required ? BNX2X_MCAST_CMD_SET_ADD
+ : BNX2X_MCAST_CMD_SET_DEL;
+ list_add_tail(&p_item->link , &cmd_pos->data.macs_head);
+ cnt++;
+ offset++;
+ if (offset == MCAST_MAC_ELEMS_PER_PG) {
+ offset = 0;
+ elem_group = list_next_entry(elem_group,
+ mcast_group_link);
+ }
+ }
+
+ /* We now definitely know how many commands are hiding here.
+ * Also need to correct the disruption we've added to guarantee this
+ * would be enqueued.
+ */
+ o->total_pending_num -= (o->max_cmd_len + mac_cnt);
+ o->total_pending_num += cnt;
+
+ DP(BNX2X_MSG_SP, "o->total_pending_num=%d\n", o->total_pending_num);
+}
+
+static void
+bnx2x_mcast_hdl_pending_set_e2(struct bnx2x *bp,
+ struct bnx2x_mcast_obj *o,
+ struct bnx2x_pending_mcast_cmd *cmd_pos,
+ int *cnt)
+{
+ union bnx2x_mcast_config_data cfg_data = {NULL};
+ struct bnx2x_mcast_bin_elem *p_item, *p_item_n;
+
+ /* This is actually a 2-part scheme - it starts by converting the MACs
+ * into a list of bins to be added/removed, and correcting the numbers
+ * on the object. this is now allowed, as we're now sure that all
+ * previous configured requests have already applied.
+ * The second part is actually adding rules for the newly introduced
+ * entries [like all the rest of the hdl_pending functions].
+ */
+ if (!cmd_pos->set_convert)
+ bnx2x_mcast_hdl_pending_set_e2_convert(bp, o, cmd_pos);
+
+ list_for_each_entry_safe(p_item, p_item_n, &cmd_pos->data.macs_head,
+ link) {
+ cfg_data.bin = (u8)p_item->bin;
+ o->set_one_rule(bp, o, *cnt, &cfg_data, p_item->type);
+ (*cnt)++;
+
+ list_del(&p_item->link);
+
+ /* Break if we reached the maximum number of rules. */
+ if (*cnt >= o->max_cmd_len)
+ break;
+ }
+
+ /* if no more MACs to configure - we are done */
+ if (list_empty(&cmd_pos->data.macs_head))
+ cmd_pos->done = true;
+}
+
+static inline int bnx2x_mcast_handle_pending_cmds_e2(struct bnx2x *bp,
+ struct bnx2x_mcast_ramrod_params *p)
+{
+ struct bnx2x_pending_mcast_cmd *cmd_pos, *cmd_pos_n;
+ int cnt = 0;
+ struct bnx2x_mcast_obj *o = p->mcast_obj;
+
+ list_for_each_entry_safe(cmd_pos, cmd_pos_n, &o->pending_cmds_head,
+ link) {
+ switch (cmd_pos->type) {
+ case BNX2X_MCAST_CMD_ADD:
+ bnx2x_mcast_hdl_pending_add_e2(bp, o, cmd_pos, &cnt);
+ break;
+
+ case BNX2X_MCAST_CMD_DEL:
+ bnx2x_mcast_hdl_pending_del_e2(bp, o, cmd_pos, &cnt);
+ break;
+
+ case BNX2X_MCAST_CMD_RESTORE:
+ bnx2x_mcast_hdl_pending_restore_e2(bp, o, cmd_pos,
+ &cnt);
+ break;
+
+ case BNX2X_MCAST_CMD_SET:
+ bnx2x_mcast_hdl_pending_set_e2(bp, o, cmd_pos, &cnt);
+ break;
+
+ default:
+ BNX2X_ERR("Unknown command: %d\n", cmd_pos->type);
+ return -EINVAL;
+ }
+
+ /* If the command has been completed - remove it from the list
+ * and free the memory
+ */
+ if (cmd_pos->done) {
+ list_del(&cmd_pos->link);
+ bnx2x_free_groups(&cmd_pos->group_head);
+ kfree(cmd_pos);
+ }
+
+ /* Break if we reached the maximum number of rules */
+ if (cnt >= o->max_cmd_len)
+ break;
+ }
+
+ return cnt;
+}
+
+static inline void bnx2x_mcast_hdl_add(struct bnx2x *bp,
+ struct bnx2x_mcast_obj *o, struct bnx2x_mcast_ramrod_params *p,
+ int *line_idx)
+{
+ struct bnx2x_mcast_list_elem *mlist_pos;
+ union bnx2x_mcast_config_data cfg_data = {NULL};
+ int cnt = *line_idx;
+
+ list_for_each_entry(mlist_pos, &p->mcast_list, link) {
+ cfg_data.mac = mlist_pos->mac;
+ o->set_one_rule(bp, o, cnt, &cfg_data, BNX2X_MCAST_CMD_ADD);
+
+ cnt++;
+
+ DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC\n",
+ mlist_pos->mac);
+ }
+
+ *line_idx = cnt;
+}
+
+static inline void bnx2x_mcast_hdl_del(struct bnx2x *bp,
+ struct bnx2x_mcast_obj *o, struct bnx2x_mcast_ramrod_params *p,
+ int *line_idx)
+{
+ int cnt = *line_idx, i;
+
+ for (i = 0; i < p->mcast_list_len; i++) {
+ o->set_one_rule(bp, o, cnt, NULL, BNX2X_MCAST_CMD_DEL);
+
+ cnt++;
+
+ DP(BNX2X_MSG_SP, "Deleting MAC. %d left\n",
+ p->mcast_list_len - i - 1);
+ }
+
+ *line_idx = cnt;
+}
+
+/**
+ * bnx2x_mcast_handle_current_cmd - send command if room
+ *
+ * @bp: device handle
+ * @p: ramrod mcast info
+ * @cmd: command
+ * @start_cnt: first line in the ramrod data that may be used
+ *
+ * This function is called iff there is enough place for the current command in
+ * the ramrod data.
+ * Returns number of lines filled in the ramrod data in total.
+ */
+static inline int bnx2x_mcast_handle_current_cmd(struct bnx2x *bp,
+ struct bnx2x_mcast_ramrod_params *p,
+ enum bnx2x_mcast_cmd cmd,
+ int start_cnt)
+{
+ struct bnx2x_mcast_obj *o = p->mcast_obj;
+ int cnt = start_cnt;
+
+ DP(BNX2X_MSG_SP, "p->mcast_list_len=%d\n", p->mcast_list_len);
+
+ switch (cmd) {
+ case BNX2X_MCAST_CMD_ADD:
+ bnx2x_mcast_hdl_add(bp, o, p, &cnt);
+ break;
+
+ case BNX2X_MCAST_CMD_DEL:
+ bnx2x_mcast_hdl_del(bp, o, p, &cnt);
+ break;
+
+ case BNX2X_MCAST_CMD_RESTORE:
+ o->hdl_restore(bp, o, 0, &cnt);
+ break;
+
+ default:
+ BNX2X_ERR("Unknown command: %d\n", cmd);
+ return -EINVAL;
+ }
+
+ /* The current command has been handled */
+ p->mcast_list_len = 0;
+
+ return cnt;
+}
+
+static int bnx2x_mcast_validate_e2(struct bnx2x *bp,
+ struct bnx2x_mcast_ramrod_params *p,
+ enum bnx2x_mcast_cmd cmd)
+{
+ struct bnx2x_mcast_obj *o = p->mcast_obj;
+ int reg_sz = o->get_registry_size(o);
+
+ switch (cmd) {
+ /* DEL command deletes all currently configured MACs */
+ case BNX2X_MCAST_CMD_DEL:
+ o->set_registry_size(o, 0);
+ fallthrough;
+
+ /* RESTORE command will restore the entire multicast configuration */
+ case BNX2X_MCAST_CMD_RESTORE:
+ /* Here we set the approximate amount of work to do, which in
+ * fact may be only less as some MACs in postponed ADD
+ * command(s) scheduled before this command may fall into
+ * the same bin and the actual number of bins set in the
+ * registry would be less than we estimated here. See
+ * bnx2x_mcast_set_one_rule_e2() for further details.
+ */
+ p->mcast_list_len = reg_sz;
+ break;
+
+ case BNX2X_MCAST_CMD_ADD:
+ case BNX2X_MCAST_CMD_CONT:
+ /* Here we assume that all new MACs will fall into new bins.
+ * However we will correct the real registry size after we
+ * handle all pending commands.
+ */
+ o->set_registry_size(o, reg_sz + p->mcast_list_len);
+ break;
+
+ case BNX2X_MCAST_CMD_SET:
+ /* We can only learn how many commands would actually be used
+ * when this is being configured. So for now, simply guarantee
+ * the command will be enqueued [to refrain from adding logic
+ * that handles this and THEN learns it needs several ramrods].
+ * Just like for ADD/Cont, the mcast_list_len might be an over
+ * estimation; or even more so, since we don't take into
+ * account the possibility of removal of existing bins.
+ */
+ o->set_registry_size(o, reg_sz + p->mcast_list_len);
+ o->total_pending_num += o->max_cmd_len;
+ break;
+
+ default:
+ BNX2X_ERR("Unknown command: %d\n", cmd);
+ return -EINVAL;
+ }
+
+ /* Increase the total number of MACs pending to be configured */
+ o->total_pending_num += p->mcast_list_len;
+
+ return 0;
+}
+
+static void bnx2x_mcast_revert_e2(struct bnx2x *bp,
+ struct bnx2x_mcast_ramrod_params *p,
+ int old_num_bins,
+ enum bnx2x_mcast_cmd cmd)
+{
+ struct bnx2x_mcast_obj *o = p->mcast_obj;
+
+ o->set_registry_size(o, old_num_bins);
+ o->total_pending_num -= p->mcast_list_len;
+
+ if (cmd == BNX2X_MCAST_CMD_SET)
+ o->total_pending_num -= o->max_cmd_len;
+}
+
+/**
+ * bnx2x_mcast_set_rdata_hdr_e2 - sets a header values
+ *
+ * @bp: device handle
+ * @p: ramrod parameters
+ * @len: number of rules to handle
+ */
+static inline void bnx2x_mcast_set_rdata_hdr_e2(struct bnx2x *bp,
+ struct bnx2x_mcast_ramrod_params *p,
+ u8 len)
+{
+ struct bnx2x_raw_obj *r = &p->mcast_obj->raw;
+ struct eth_multicast_rules_ramrod_data *data =
+ (struct eth_multicast_rules_ramrod_data *)(r->rdata);
+
+ data->header.echo = cpu_to_le32((r->cid & BNX2X_SWCID_MASK) |
+ (BNX2X_FILTER_MCAST_PENDING <<
+ BNX2X_SWCID_SHIFT));
+ data->header.rule_cnt = len;
+}
+
+/**
+ * bnx2x_mcast_refresh_registry_e2 - recalculate the actual number of set bins
+ *
+ * @bp: device handle
+ * @o:
+ *
+ * Recalculate the actual number of set bins in the registry using Brian
+ * Kernighan's algorithm: it's execution complexity is as a number of set bins.
+ *
+ * returns 0 for the compliance with bnx2x_mcast_refresh_registry_e1().
+ */
+static inline int bnx2x_mcast_refresh_registry_e2(struct bnx2x *bp,
+ struct bnx2x_mcast_obj *o)
+{
+ int i, cnt = 0;
+ u64 elem;
+
+ for (i = 0; i < BNX2X_MCAST_VEC_SZ; i++) {
+ elem = o->registry.aprox_match.vec[i];
+ for (; elem; cnt++)
+ elem &= elem - 1;
+ }
+
+ o->set_registry_size(o, cnt);
+
+ return 0;
+}
+
+static int bnx2x_mcast_setup_e2(struct bnx2x *bp,
+ struct bnx2x_mcast_ramrod_params *p,
+ enum bnx2x_mcast_cmd cmd)
+{
+ struct bnx2x_raw_obj *raw = &p->mcast_obj->raw;
+ struct bnx2x_mcast_obj *o = p->mcast_obj;
+ struct eth_multicast_rules_ramrod_data *data =
+ (struct eth_multicast_rules_ramrod_data *)(raw->rdata);
+ int cnt = 0, rc;
+
+ /* Reset the ramrod data buffer */
+ memset(data, 0, sizeof(*data));
+
+ cnt = bnx2x_mcast_handle_pending_cmds_e2(bp, p);
+
+ /* If there are no more pending commands - clear SCHEDULED state */
+ if (list_empty(&o->pending_cmds_head))
+ o->clear_sched(o);
+
+ /* The below may be true iff there was enough room in ramrod
+ * data for all pending commands and for the current
+ * command. Otherwise the current command would have been added
+ * to the pending commands and p->mcast_list_len would have been
+ * zeroed.
+ */
+ if (p->mcast_list_len > 0)
+ cnt = bnx2x_mcast_handle_current_cmd(bp, p, cmd, cnt);
+
+ /* We've pulled out some MACs - update the total number of
+ * outstanding.
+ */
+ o->total_pending_num -= cnt;
+
+ /* send a ramrod */
+ WARN_ON(o->total_pending_num < 0);
+ WARN_ON(cnt > o->max_cmd_len);
+
+ bnx2x_mcast_set_rdata_hdr_e2(bp, p, (u8)cnt);
+
+ /* Update a registry size if there are no more pending operations.
+ *
+ * We don't want to change the value of the registry size if there are
+ * pending operations because we want it to always be equal to the
+ * exact or the approximate number (see bnx2x_mcast_validate_e2()) of
+ * set bins after the last requested operation in order to properly
+ * evaluate the size of the next DEL/RESTORE operation.
+ *
+ * Note that we update the registry itself during command(s) handling
+ * - see bnx2x_mcast_set_one_rule_e2(). That's because for 57712 we
+ * aggregate multiple commands (ADD/DEL/RESTORE) into one ramrod but
+ * with a limited amount of update commands (per MAC/bin) and we don't
+ * know in this scope what the actual state of bins configuration is
+ * going to be after this ramrod.
+ */
+ if (!o->total_pending_num)
+ bnx2x_mcast_refresh_registry_e2(bp, o);
+
+ /* If CLEAR_ONLY was requested - don't send a ramrod and clear
+ * RAMROD_PENDING status immediately. due to the SET option, it's also
+ * possible that after evaluating the differences there's no need for
+ * a ramrod. In that case, we can skip it as well.
+ */
+ if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags) || !cnt) {
+ raw->clear_pending(raw);
+ return 0;
+ } else {
+ /* No need for an explicit memory barrier here as long as we
+ * ensure the ordering of writing to the SPQ element
+ * and updating of the SPQ producer which involves a memory
+ * read. If the memory read is removed we will have to put a
+ * full memory barrier there (inside bnx2x_sp_post()).
+ */
+
+ /* Send a ramrod */
+ rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_MULTICAST_RULES,
+ raw->cid, U64_HI(raw->rdata_mapping),
+ U64_LO(raw->rdata_mapping),
+ ETH_CONNECTION_TYPE);
+ if (rc)
+ return rc;
+
+ /* Ramrod completion is pending */
+ return 1;
+ }
+}
+
+static int bnx2x_mcast_validate_e1h(struct bnx2x *bp,
+ struct bnx2x_mcast_ramrod_params *p,
+ enum bnx2x_mcast_cmd cmd)
+{
+ if (cmd == BNX2X_MCAST_CMD_SET) {
+ BNX2X_ERR("Can't use `set' command on e1h!\n");
+ return -EINVAL;
+ }
+
+ /* Mark, that there is a work to do */
+ if ((cmd == BNX2X_MCAST_CMD_DEL) || (cmd == BNX2X_MCAST_CMD_RESTORE))
+ p->mcast_list_len = 1;
+
+ return 0;
+}
+
+static void bnx2x_mcast_revert_e1h(struct bnx2x *bp,
+ struct bnx2x_mcast_ramrod_params *p,
+ int old_num_bins,
+ enum bnx2x_mcast_cmd cmd)
+{
+ /* Do nothing */
+}
+
+#define BNX2X_57711_SET_MC_FILTER(filter, bit) \
+do { \
+ (filter)[(bit) >> 5] |= (1 << ((bit) & 0x1f)); \
+} while (0)
+
+static inline void bnx2x_mcast_hdl_add_e1h(struct bnx2x *bp,
+ struct bnx2x_mcast_obj *o,
+ struct bnx2x_mcast_ramrod_params *p,
+ u32 *mc_filter)
+{
+ struct bnx2x_mcast_list_elem *mlist_pos;
+ int bit;
+
+ list_for_each_entry(mlist_pos, &p->mcast_list, link) {
+ bit = bnx2x_mcast_bin_from_mac(mlist_pos->mac);
+ BNX2X_57711_SET_MC_FILTER(mc_filter, bit);
+
+ DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC, bin %d\n",
+ mlist_pos->mac, bit);
+
+ /* bookkeeping... */
+ BIT_VEC64_SET_BIT(o->registry.aprox_match.vec,
+ bit);
+ }
+}
+
+static inline void bnx2x_mcast_hdl_restore_e1h(struct bnx2x *bp,
+ struct bnx2x_mcast_obj *o, struct bnx2x_mcast_ramrod_params *p,
+ u32 *mc_filter)
+{
+ int bit;
+
+ for (bit = bnx2x_mcast_get_next_bin(o, 0);
+ bit >= 0;
+ bit = bnx2x_mcast_get_next_bin(o, bit + 1)) {
+ BNX2X_57711_SET_MC_FILTER(mc_filter, bit);
+ DP(BNX2X_MSG_SP, "About to set bin %d\n", bit);
+ }
+}
+
+/* On 57711 we write the multicast MACs' approximate match
+ * table by directly into the TSTORM's internal RAM. So we don't
+ * really need to handle any tricks to make it work.
+ */
+static int bnx2x_mcast_setup_e1h(struct bnx2x *bp,
+ struct bnx2x_mcast_ramrod_params *p,
+ enum bnx2x_mcast_cmd cmd)
+{
+ int i;
+ struct bnx2x_mcast_obj *o = p->mcast_obj;
+ struct bnx2x_raw_obj *r = &o->raw;
+
+ /* If CLEAR_ONLY has been requested - clear the registry
+ * and clear a pending bit.
+ */
+ if (!test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
+ u32 mc_filter[MC_HASH_SIZE] = {0};
+
+ /* Set the multicast filter bits before writing it into
+ * the internal memory.
+ */
+ switch (cmd) {
+ case BNX2X_MCAST_CMD_ADD:
+ bnx2x_mcast_hdl_add_e1h(bp, o, p, mc_filter);
+ break;
+
+ case BNX2X_MCAST_CMD_DEL:
+ DP(BNX2X_MSG_SP,
+ "Invalidating multicast MACs configuration\n");
+
+ /* clear the registry */
+ memset(o->registry.aprox_match.vec, 0,
+ sizeof(o->registry.aprox_match.vec));
+ break;
+
+ case BNX2X_MCAST_CMD_RESTORE:
+ bnx2x_mcast_hdl_restore_e1h(bp, o, p, mc_filter);
+ break;
+
+ default:
+ BNX2X_ERR("Unknown command: %d\n", cmd);
+ return -EINVAL;
+ }
+
+ /* Set the mcast filter in the internal memory */
+ for (i = 0; i < MC_HASH_SIZE; i++)
+ REG_WR(bp, MC_HASH_OFFSET(bp, i), mc_filter[i]);
+ } else
+ /* clear the registry */
+ memset(o->registry.aprox_match.vec, 0,
+ sizeof(o->registry.aprox_match.vec));
+
+ /* We are done */
+ r->clear_pending(r);
+
+ return 0;
+}
+
+static int bnx2x_mcast_validate_e1(struct bnx2x *bp,
+ struct bnx2x_mcast_ramrod_params *p,
+ enum bnx2x_mcast_cmd cmd)
+{
+ struct bnx2x_mcast_obj *o = p->mcast_obj;
+ int reg_sz = o->get_registry_size(o);
+
+ if (cmd == BNX2X_MCAST_CMD_SET) {
+ BNX2X_ERR("Can't use `set' command on e1!\n");
+ return -EINVAL;
+ }
+
+ switch (cmd) {
+ /* DEL command deletes all currently configured MACs */
+ case BNX2X_MCAST_CMD_DEL:
+ o->set_registry_size(o, 0);
+ fallthrough;
+
+ /* RESTORE command will restore the entire multicast configuration */
+ case BNX2X_MCAST_CMD_RESTORE:
+ p->mcast_list_len = reg_sz;
+ DP(BNX2X_MSG_SP, "Command %d, p->mcast_list_len=%d\n",
+ cmd, p->mcast_list_len);
+ break;
+
+ case BNX2X_MCAST_CMD_ADD:
+ case BNX2X_MCAST_CMD_CONT:
+ /* Multicast MACs on 57710 are configured as unicast MACs and
+ * there is only a limited number of CAM entries for that
+ * matter.
+ */
+ if (p->mcast_list_len > o->max_cmd_len) {
+ BNX2X_ERR("Can't configure more than %d multicast MACs on 57710\n",
+ o->max_cmd_len);
+ return -EINVAL;
+ }
+ /* Every configured MAC should be cleared if DEL command is
+ * called. Only the last ADD command is relevant as long as
+ * every ADD commands overrides the previous configuration.
+ */
+ DP(BNX2X_MSG_SP, "p->mcast_list_len=%d\n", p->mcast_list_len);
+ if (p->mcast_list_len > 0)
+ o->set_registry_size(o, p->mcast_list_len);
+
+ break;
+
+ default:
+ BNX2X_ERR("Unknown command: %d\n", cmd);
+ return -EINVAL;
+ }
+
+ /* We want to ensure that commands are executed one by one for 57710.
+ * Therefore each none-empty command will consume o->max_cmd_len.
+ */
+ if (p->mcast_list_len)
+ o->total_pending_num += o->max_cmd_len;
+
+ return 0;
+}
+
+static void bnx2x_mcast_revert_e1(struct bnx2x *bp,
+ struct bnx2x_mcast_ramrod_params *p,
+ int old_num_macs,
+ enum bnx2x_mcast_cmd cmd)
+{
+ struct bnx2x_mcast_obj *o = p->mcast_obj;
+
+ o->set_registry_size(o, old_num_macs);
+
+ /* If current command hasn't been handled yet and we are
+ * here means that it's meant to be dropped and we have to
+ * update the number of outstanding MACs accordingly.
+ */
+ if (p->mcast_list_len)
+ o->total_pending_num -= o->max_cmd_len;
+}
+
+static void bnx2x_mcast_set_one_rule_e1(struct bnx2x *bp,
+ struct bnx2x_mcast_obj *o, int idx,
+ union bnx2x_mcast_config_data *cfg_data,
+ enum bnx2x_mcast_cmd cmd)
+{
+ struct bnx2x_raw_obj *r = &o->raw;
+ struct mac_configuration_cmd *data =
+ (struct mac_configuration_cmd *)(r->rdata);
+
+ /* copy mac */
+ if ((cmd == BNX2X_MCAST_CMD_ADD) || (cmd == BNX2X_MCAST_CMD_RESTORE)) {
+ bnx2x_set_fw_mac_addr(&data->config_table[idx].msb_mac_addr,
+ &data->config_table[idx].middle_mac_addr,
+ &data->config_table[idx].lsb_mac_addr,
+ cfg_data->mac);
+
+ data->config_table[idx].vlan_id = 0;
+ data->config_table[idx].pf_id = r->func_id;
+ data->config_table[idx].clients_bit_vector =
+ cpu_to_le32(1 << r->cl_id);
+
+ SET_FLAG(data->config_table[idx].flags,
+ MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
+ T_ETH_MAC_COMMAND_SET);
+ }
+}
+
+/**
+ * bnx2x_mcast_set_rdata_hdr_e1 - set header values in mac_configuration_cmd
+ *
+ * @bp: device handle
+ * @p: ramrod parameters
+ * @len: number of rules to handle
+ */
+static inline void bnx2x_mcast_set_rdata_hdr_e1(struct bnx2x *bp,
+ struct bnx2x_mcast_ramrod_params *p,
+ u8 len)
+{
+ struct bnx2x_raw_obj *r = &p->mcast_obj->raw;
+ struct mac_configuration_cmd *data =
+ (struct mac_configuration_cmd *)(r->rdata);
+
+ u8 offset = (CHIP_REV_IS_SLOW(bp) ?
+ BNX2X_MAX_EMUL_MULTI*(1 + r->func_id) :
+ BNX2X_MAX_MULTICAST*(1 + r->func_id));
+
+ data->hdr.offset = offset;
+ data->hdr.client_id = cpu_to_le16(0xff);
+ data->hdr.echo = cpu_to_le32((r->cid & BNX2X_SWCID_MASK) |
+ (BNX2X_FILTER_MCAST_PENDING <<
+ BNX2X_SWCID_SHIFT));
+ data->hdr.length = len;
+}
+
+/**
+ * bnx2x_mcast_handle_restore_cmd_e1 - restore command for 57710
+ *
+ * @bp: device handle
+ * @o: multicast info
+ * @start_idx: index in the registry to start from
+ * @rdata_idx: index in the ramrod data to start from
+ *
+ * restore command for 57710 is like all other commands - always a stand alone
+ * command - start_idx and rdata_idx will always be 0. This function will always
+ * succeed.
+ * returns -1 to comply with 57712 variant.
+ */
+static inline int bnx2x_mcast_handle_restore_cmd_e1(
+ struct bnx2x *bp, struct bnx2x_mcast_obj *o , int start_idx,
+ int *rdata_idx)
+{
+ struct bnx2x_mcast_mac_elem *elem;
+ int i = 0;
+ union bnx2x_mcast_config_data cfg_data = {NULL};
+
+ /* go through the registry and configure the MACs from it. */
+ list_for_each_entry(elem, &o->registry.exact_match.macs, link) {
+ cfg_data.mac = &elem->mac[0];
+ o->set_one_rule(bp, o, i, &cfg_data, BNX2X_MCAST_CMD_RESTORE);
+
+ i++;
+
+ DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC\n",
+ cfg_data.mac);
+ }
+
+ *rdata_idx = i;
+
+ return -1;
+}
+
+static inline int bnx2x_mcast_handle_pending_cmds_e1(
+ struct bnx2x *bp, struct bnx2x_mcast_ramrod_params *p)
+{
+ struct bnx2x_pending_mcast_cmd *cmd_pos;
+ struct bnx2x_mcast_mac_elem *pmac_pos;
+ struct bnx2x_mcast_obj *o = p->mcast_obj;
+ union bnx2x_mcast_config_data cfg_data = {NULL};
+ int cnt = 0;
+
+ /* If nothing to be done - return */
+ if (list_empty(&o->pending_cmds_head))
+ return 0;
+
+ /* Handle the first command */
+ cmd_pos = list_first_entry(&o->pending_cmds_head,
+ struct bnx2x_pending_mcast_cmd, link);
+
+ switch (cmd_pos->type) {
+ case BNX2X_MCAST_CMD_ADD:
+ list_for_each_entry(pmac_pos, &cmd_pos->data.macs_head, link) {
+ cfg_data.mac = &pmac_pos->mac[0];
+ o->set_one_rule(bp, o, cnt, &cfg_data, cmd_pos->type);
+
+ cnt++;
+
+ DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC\n",
+ pmac_pos->mac);
+ }
+ break;
+
+ case BNX2X_MCAST_CMD_DEL:
+ cnt = cmd_pos->data.macs_num;
+ DP(BNX2X_MSG_SP, "About to delete %d multicast MACs\n", cnt);
+ break;
+
+ case BNX2X_MCAST_CMD_RESTORE:
+ o->hdl_restore(bp, o, 0, &cnt);
+ break;
+
+ default:
+ BNX2X_ERR("Unknown command: %d\n", cmd_pos->type);
+ return -EINVAL;
+ }
+
+ list_del(&cmd_pos->link);
+ bnx2x_free_groups(&cmd_pos->group_head);
+ kfree(cmd_pos);
+
+ return cnt;
+}
+
+/**
+ * bnx2x_get_fw_mac_addr - revert the bnx2x_set_fw_mac_addr().
+ *
+ * @fw_hi: address
+ * @fw_mid: address
+ * @fw_lo: address
+ * @mac: mac address
+ */
+static inline void bnx2x_get_fw_mac_addr(__le16 *fw_hi, __le16 *fw_mid,
+ __le16 *fw_lo, u8 *mac)
+{
+ mac[1] = ((u8 *)fw_hi)[0];
+ mac[0] = ((u8 *)fw_hi)[1];
+ mac[3] = ((u8 *)fw_mid)[0];
+ mac[2] = ((u8 *)fw_mid)[1];
+ mac[5] = ((u8 *)fw_lo)[0];
+ mac[4] = ((u8 *)fw_lo)[1];
+}
+
+/**
+ * bnx2x_mcast_refresh_registry_e1 -
+ *
+ * @bp: device handle
+ * @o: multicast info
+ *
+ * Check the ramrod data first entry flag to see if it's a DELETE or ADD command
+ * and update the registry correspondingly: if ADD - allocate a memory and add
+ * the entries to the registry (list), if DELETE - clear the registry and free
+ * the memory.
+ */
+static inline int bnx2x_mcast_refresh_registry_e1(struct bnx2x *bp,
+ struct bnx2x_mcast_obj *o)
+{
+ struct bnx2x_raw_obj *raw = &o->raw;
+ struct bnx2x_mcast_mac_elem *elem;
+ struct mac_configuration_cmd *data =
+ (struct mac_configuration_cmd *)(raw->rdata);
+
+ /* If first entry contains a SET bit - the command was ADD,
+ * otherwise - DEL_ALL
+ */
+ if (GET_FLAG(data->config_table[0].flags,
+ MAC_CONFIGURATION_ENTRY_ACTION_TYPE)) {
+ int i, len = data->hdr.length;
+
+ /* Break if it was a RESTORE command */
+ if (!list_empty(&o->registry.exact_match.macs))
+ return 0;
+
+ elem = kcalloc(len, sizeof(*elem), GFP_ATOMIC);
+ if (!elem) {
+ BNX2X_ERR("Failed to allocate registry memory\n");
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < len; i++, elem++) {
+ bnx2x_get_fw_mac_addr(
+ &data->config_table[i].msb_mac_addr,
+ &data->config_table[i].middle_mac_addr,
+ &data->config_table[i].lsb_mac_addr,
+ elem->mac);
+ DP(BNX2X_MSG_SP, "Adding registry entry for [%pM]\n",
+ elem->mac);
+ list_add_tail(&elem->link,
+ &o->registry.exact_match.macs);
+ }
+ } else {
+ elem = list_first_entry(&o->registry.exact_match.macs,
+ struct bnx2x_mcast_mac_elem, link);
+ DP(BNX2X_MSG_SP, "Deleting a registry\n");
+ kfree(elem);
+ INIT_LIST_HEAD(&o->registry.exact_match.macs);
+ }
+
+ return 0;
+}
+
+static int bnx2x_mcast_setup_e1(struct bnx2x *bp,
+ struct bnx2x_mcast_ramrod_params *p,
+ enum bnx2x_mcast_cmd cmd)
+{
+ struct bnx2x_mcast_obj *o = p->mcast_obj;
+ struct bnx2x_raw_obj *raw = &o->raw;
+ struct mac_configuration_cmd *data =
+ (struct mac_configuration_cmd *)(raw->rdata);
+ int cnt = 0, i, rc;
+
+ /* Reset the ramrod data buffer */
+ memset(data, 0, sizeof(*data));
+
+ /* First set all entries as invalid */
+ for (i = 0; i < o->max_cmd_len ; i++)
+ SET_FLAG(data->config_table[i].flags,
+ MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
+ T_ETH_MAC_COMMAND_INVALIDATE);
+
+ /* Handle pending commands first */
+ cnt = bnx2x_mcast_handle_pending_cmds_e1(bp, p);
+
+ /* If there are no more pending commands - clear SCHEDULED state */
+ if (list_empty(&o->pending_cmds_head))
+ o->clear_sched(o);
+
+ /* The below may be true iff there were no pending commands */
+ if (!cnt)
+ cnt = bnx2x_mcast_handle_current_cmd(bp, p, cmd, 0);
+
+ /* For 57710 every command has o->max_cmd_len length to ensure that
+ * commands are done one at a time.
+ */
+ o->total_pending_num -= o->max_cmd_len;
+
+ /* send a ramrod */
+
+ WARN_ON(cnt > o->max_cmd_len);
+
+ /* Set ramrod header (in particular, a number of entries to update) */
+ bnx2x_mcast_set_rdata_hdr_e1(bp, p, (u8)cnt);
+
+ /* update a registry: we need the registry contents to be always up
+ * to date in order to be able to execute a RESTORE opcode. Here
+ * we use the fact that for 57710 we sent one command at a time
+ * hence we may take the registry update out of the command handling
+ * and do it in a simpler way here.
+ */
+ rc = bnx2x_mcast_refresh_registry_e1(bp, o);
+ if (rc)
+ return rc;
+
+ /* If CLEAR_ONLY was requested - don't send a ramrod and clear
+ * RAMROD_PENDING status immediately.
+ */
+ if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
+ raw->clear_pending(raw);
+ return 0;
+ } else {
+ /* No need for an explicit memory barrier here as long as we
+ * ensure the ordering of writing to the SPQ element
+ * and updating of the SPQ producer which involves a memory
+ * read. If the memory read is removed we will have to put a
+ * full memory barrier there (inside bnx2x_sp_post()).
+ */
+
+ /* Send a ramrod */
+ rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, raw->cid,
+ U64_HI(raw->rdata_mapping),
+ U64_LO(raw->rdata_mapping),
+ ETH_CONNECTION_TYPE);
+ if (rc)
+ return rc;
+
+ /* Ramrod completion is pending */
+ return 1;
+ }
+}
+
+static int bnx2x_mcast_get_registry_size_exact(struct bnx2x_mcast_obj *o)
+{
+ return o->registry.exact_match.num_macs_set;
+}
+
+static int bnx2x_mcast_get_registry_size_aprox(struct bnx2x_mcast_obj *o)
+{
+ return o->registry.aprox_match.num_bins_set;
+}
+
+static void bnx2x_mcast_set_registry_size_exact(struct bnx2x_mcast_obj *o,
+ int n)
+{
+ o->registry.exact_match.num_macs_set = n;
+}
+
+static void bnx2x_mcast_set_registry_size_aprox(struct bnx2x_mcast_obj *o,
+ int n)
+{
+ o->registry.aprox_match.num_bins_set = n;
+}
+
+int bnx2x_config_mcast(struct bnx2x *bp,
+ struct bnx2x_mcast_ramrod_params *p,
+ enum bnx2x_mcast_cmd cmd)
+{
+ struct bnx2x_mcast_obj *o = p->mcast_obj;
+ struct bnx2x_raw_obj *r = &o->raw;
+ int rc = 0, old_reg_size;
+
+ /* This is needed to recover number of currently configured mcast macs
+ * in case of failure.
+ */
+ old_reg_size = o->get_registry_size(o);
+
+ /* Do some calculations and checks */
+ rc = o->validate(bp, p, cmd);
+ if (rc)
+ return rc;
+
+ /* Return if there is no work to do */
+ if ((!p->mcast_list_len) && (!o->check_sched(o)))
+ return 0;
+
+ DP(BNX2X_MSG_SP, "o->total_pending_num=%d p->mcast_list_len=%d o->max_cmd_len=%d\n",
+ o->total_pending_num, p->mcast_list_len, o->max_cmd_len);
+
+ /* Enqueue the current command to the pending list if we can't complete
+ * it in the current iteration
+ */
+ if (r->check_pending(r) ||
+ ((o->max_cmd_len > 0) && (o->total_pending_num > o->max_cmd_len))) {
+ rc = o->enqueue_cmd(bp, p->mcast_obj, p, cmd);
+ if (rc < 0)
+ goto error_exit1;
+
+ /* As long as the current command is in a command list we
+ * don't need to handle it separately.
+ */
+ p->mcast_list_len = 0;
+ }
+
+ if (!r->check_pending(r)) {
+
+ /* Set 'pending' state */
+ r->set_pending(r);
+
+ /* Configure the new classification in the chip */
+ rc = o->config_mcast(bp, p, cmd);
+ if (rc < 0)
+ goto error_exit2;
+
+ /* Wait for a ramrod completion if was requested */
+ if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags))
+ rc = o->wait_comp(bp, o);
+ }
+
+ return rc;
+
+error_exit2:
+ r->clear_pending(r);
+
+error_exit1:
+ o->revert(bp, p, old_reg_size, cmd);
+
+ return rc;
+}
+
+static void bnx2x_mcast_clear_sched(struct bnx2x_mcast_obj *o)
+{
+ smp_mb__before_atomic();
+ clear_bit(o->sched_state, o->raw.pstate);
+ smp_mb__after_atomic();
+}
+
+static void bnx2x_mcast_set_sched(struct bnx2x_mcast_obj *o)
+{
+ smp_mb__before_atomic();
+ set_bit(o->sched_state, o->raw.pstate);
+ smp_mb__after_atomic();
+}
+
+static bool bnx2x_mcast_check_sched(struct bnx2x_mcast_obj *o)
+{
+ return !!test_bit(o->sched_state, o->raw.pstate);
+}
+
+static bool bnx2x_mcast_check_pending(struct bnx2x_mcast_obj *o)
+{
+ return o->raw.check_pending(&o->raw) || o->check_sched(o);
+}
+
+void bnx2x_init_mcast_obj(struct bnx2x *bp,
+ struct bnx2x_mcast_obj *mcast_obj,
+ u8 mcast_cl_id, u32 mcast_cid, u8 func_id,
+ u8 engine_id, void *rdata, dma_addr_t rdata_mapping,
+ int state, unsigned long *pstate, bnx2x_obj_type type)
+{
+ memset(mcast_obj, 0, sizeof(*mcast_obj));
+
+ bnx2x_init_raw_obj(&mcast_obj->raw, mcast_cl_id, mcast_cid, func_id,
+ rdata, rdata_mapping, state, pstate, type);
+
+ mcast_obj->engine_id = engine_id;
+
+ INIT_LIST_HEAD(&mcast_obj->pending_cmds_head);
+
+ mcast_obj->sched_state = BNX2X_FILTER_MCAST_SCHED;
+ mcast_obj->check_sched = bnx2x_mcast_check_sched;
+ mcast_obj->set_sched = bnx2x_mcast_set_sched;
+ mcast_obj->clear_sched = bnx2x_mcast_clear_sched;
+
+ if (CHIP_IS_E1(bp)) {
+ mcast_obj->config_mcast = bnx2x_mcast_setup_e1;
+ mcast_obj->enqueue_cmd = bnx2x_mcast_enqueue_cmd;
+ mcast_obj->hdl_restore =
+ bnx2x_mcast_handle_restore_cmd_e1;
+ mcast_obj->check_pending = bnx2x_mcast_check_pending;
+
+ if (CHIP_REV_IS_SLOW(bp))
+ mcast_obj->max_cmd_len = BNX2X_MAX_EMUL_MULTI;
+ else
+ mcast_obj->max_cmd_len = BNX2X_MAX_MULTICAST;
+
+ mcast_obj->wait_comp = bnx2x_mcast_wait;
+ mcast_obj->set_one_rule = bnx2x_mcast_set_one_rule_e1;
+ mcast_obj->validate = bnx2x_mcast_validate_e1;
+ mcast_obj->revert = bnx2x_mcast_revert_e1;
+ mcast_obj->get_registry_size =
+ bnx2x_mcast_get_registry_size_exact;
+ mcast_obj->set_registry_size =
+ bnx2x_mcast_set_registry_size_exact;
+
+ /* 57710 is the only chip that uses the exact match for mcast
+ * at the moment.
+ */
+ INIT_LIST_HEAD(&mcast_obj->registry.exact_match.macs);
+
+ } else if (CHIP_IS_E1H(bp)) {
+ mcast_obj->config_mcast = bnx2x_mcast_setup_e1h;
+ mcast_obj->enqueue_cmd = NULL;
+ mcast_obj->hdl_restore = NULL;
+ mcast_obj->check_pending = bnx2x_mcast_check_pending;
+
+ /* 57711 doesn't send a ramrod, so it has unlimited credit
+ * for one command.
+ */
+ mcast_obj->max_cmd_len = -1;
+ mcast_obj->wait_comp = bnx2x_mcast_wait;
+ mcast_obj->set_one_rule = NULL;
+ mcast_obj->validate = bnx2x_mcast_validate_e1h;
+ mcast_obj->revert = bnx2x_mcast_revert_e1h;
+ mcast_obj->get_registry_size =
+ bnx2x_mcast_get_registry_size_aprox;
+ mcast_obj->set_registry_size =
+ bnx2x_mcast_set_registry_size_aprox;
+ } else {
+ mcast_obj->config_mcast = bnx2x_mcast_setup_e2;
+ mcast_obj->enqueue_cmd = bnx2x_mcast_enqueue_cmd;
+ mcast_obj->hdl_restore =
+ bnx2x_mcast_handle_restore_cmd_e2;
+ mcast_obj->check_pending = bnx2x_mcast_check_pending;
+ /* TODO: There should be a proper HSI define for this number!!!
+ */
+ mcast_obj->max_cmd_len = 16;
+ mcast_obj->wait_comp = bnx2x_mcast_wait;
+ mcast_obj->set_one_rule = bnx2x_mcast_set_one_rule_e2;
+ mcast_obj->validate = bnx2x_mcast_validate_e2;
+ mcast_obj->revert = bnx2x_mcast_revert_e2;
+ mcast_obj->get_registry_size =
+ bnx2x_mcast_get_registry_size_aprox;
+ mcast_obj->set_registry_size =
+ bnx2x_mcast_set_registry_size_aprox;
+ }
+}
+
+/*************************** Credit handling **********************************/
+
+/**
+ * __atomic_add_ifless - add if the result is less than a given value.
+ *
+ * @v: pointer of type atomic_t
+ * @a: the amount to add to v...
+ * @u: ...if (v + a) is less than u.
+ *
+ * returns true if (v + a) was less than u, and false otherwise.
+ *
+ */
+static inline bool __atomic_add_ifless(atomic_t *v, int a, int u)
+{
+ int c, old;
+
+ c = atomic_read(v);
+ for (;;) {
+ if (unlikely(c + a >= u))
+ return false;
+
+ old = atomic_cmpxchg((v), c, c + a);
+ if (likely(old == c))
+ break;
+ c = old;
+ }
+
+ return true;
+}
+
+/**
+ * __atomic_dec_ifmoe - dec if the result is more or equal than a given value.
+ *
+ * @v: pointer of type atomic_t
+ * @a: the amount to dec from v...
+ * @u: ...if (v - a) is more or equal than u.
+ *
+ * returns true if (v - a) was more or equal than u, and false
+ * otherwise.
+ */
+static inline bool __atomic_dec_ifmoe(atomic_t *v, int a, int u)
+{
+ int c, old;
+
+ c = atomic_read(v);
+ for (;;) {
+ if (unlikely(c - a < u))
+ return false;
+
+ old = atomic_cmpxchg((v), c, c - a);
+ if (likely(old == c))
+ break;
+ c = old;
+ }
+
+ return true;
+}
+
+static bool bnx2x_credit_pool_get(struct bnx2x_credit_pool_obj *o, int cnt)
+{
+ bool rc;
+
+ smp_mb();
+ rc = __atomic_dec_ifmoe(&o->credit, cnt, 0);
+ smp_mb();
+
+ return rc;
+}
+
+static bool bnx2x_credit_pool_put(struct bnx2x_credit_pool_obj *o, int cnt)
+{
+ bool rc;
+
+ smp_mb();
+
+ /* Don't let to refill if credit + cnt > pool_sz */
+ rc = __atomic_add_ifless(&o->credit, cnt, o->pool_sz + 1);
+
+ smp_mb();
+
+ return rc;
+}
+
+static int bnx2x_credit_pool_check(struct bnx2x_credit_pool_obj *o)
+{
+ int cur_credit;
+
+ smp_mb();
+ cur_credit = atomic_read(&o->credit);
+
+ return cur_credit;
+}
+
+static bool bnx2x_credit_pool_always_true(struct bnx2x_credit_pool_obj *o,
+ int cnt)
+{
+ return true;
+}
+
+static bool bnx2x_credit_pool_get_entry(
+ struct bnx2x_credit_pool_obj *o,
+ int *offset)
+{
+ int idx, vec, i;
+
+ *offset = -1;
+
+ /* Find "internal cam-offset" then add to base for this object... */
+ for (vec = 0; vec < BNX2X_POOL_VEC_SIZE; vec++) {
+
+ /* Skip the current vector if there are no free entries in it */
+ if (!o->pool_mirror[vec])
+ continue;
+
+ /* If we've got here we are going to find a free entry */
+ for (idx = vec * BIT_VEC64_ELEM_SZ, i = 0;
+ i < BIT_VEC64_ELEM_SZ; idx++, i++)
+
+ if (BIT_VEC64_TEST_BIT(o->pool_mirror, idx)) {
+ /* Got one!! */
+ BIT_VEC64_CLEAR_BIT(o->pool_mirror, idx);
+ *offset = o->base_pool_offset + idx;
+ return true;
+ }
+ }
+
+ return false;
+}
+
+static bool bnx2x_credit_pool_put_entry(
+ struct bnx2x_credit_pool_obj *o,
+ int offset)
+{
+ if (offset < o->base_pool_offset)
+ return false;
+
+ offset -= o->base_pool_offset;
+
+ if (offset >= o->pool_sz)
+ return false;
+
+ /* Return the entry to the pool */
+ BIT_VEC64_SET_BIT(o->pool_mirror, offset);
+
+ return true;
+}
+
+static bool bnx2x_credit_pool_put_entry_always_true(
+ struct bnx2x_credit_pool_obj *o,
+ int offset)
+{
+ return true;
+}
+
+static bool bnx2x_credit_pool_get_entry_always_true(
+ struct bnx2x_credit_pool_obj *o,
+ int *offset)
+{
+ *offset = -1;
+ return true;
+}
+/**
+ * bnx2x_init_credit_pool - initialize credit pool internals.
+ *
+ * @p: credit pool
+ * @base: Base entry in the CAM to use.
+ * @credit: pool size.
+ *
+ * If base is negative no CAM entries handling will be performed.
+ * If credit is negative pool operations will always succeed (unlimited pool).
+ *
+ */
+void bnx2x_init_credit_pool(struct bnx2x_credit_pool_obj *p,
+ int base, int credit)
+{
+ /* Zero the object first */
+ memset(p, 0, sizeof(*p));
+
+ /* Set the table to all 1s */
+ memset(&p->pool_mirror, 0xff, sizeof(p->pool_mirror));
+
+ /* Init a pool as full */
+ atomic_set(&p->credit, credit);
+
+ /* The total poll size */
+ p->pool_sz = credit;
+
+ p->base_pool_offset = base;
+
+ /* Commit the change */
+ smp_mb();
+
+ p->check = bnx2x_credit_pool_check;
+
+ /* if pool credit is negative - disable the checks */
+ if (credit >= 0) {
+ p->put = bnx2x_credit_pool_put;
+ p->get = bnx2x_credit_pool_get;
+ p->put_entry = bnx2x_credit_pool_put_entry;
+ p->get_entry = bnx2x_credit_pool_get_entry;
+ } else {
+ p->put = bnx2x_credit_pool_always_true;
+ p->get = bnx2x_credit_pool_always_true;
+ p->put_entry = bnx2x_credit_pool_put_entry_always_true;
+ p->get_entry = bnx2x_credit_pool_get_entry_always_true;
+ }
+
+ /* If base is negative - disable entries handling */
+ if (base < 0) {
+ p->put_entry = bnx2x_credit_pool_put_entry_always_true;
+ p->get_entry = bnx2x_credit_pool_get_entry_always_true;
+ }
+}
+
+void bnx2x_init_mac_credit_pool(struct bnx2x *bp,
+ struct bnx2x_credit_pool_obj *p, u8 func_id,
+ u8 func_num)
+{
+/* TODO: this will be defined in consts as well... */
+#define BNX2X_CAM_SIZE_EMUL 5
+
+ int cam_sz;
+
+ if (CHIP_IS_E1(bp)) {
+ /* In E1, Multicast is saved in cam... */
+ if (!CHIP_REV_IS_SLOW(bp))
+ cam_sz = (MAX_MAC_CREDIT_E1 / 2) - BNX2X_MAX_MULTICAST;
+ else
+ cam_sz = BNX2X_CAM_SIZE_EMUL - BNX2X_MAX_EMUL_MULTI;
+
+ bnx2x_init_credit_pool(p, func_id * cam_sz, cam_sz);
+
+ } else if (CHIP_IS_E1H(bp)) {
+ /* CAM credit is equaly divided between all active functions
+ * on the PORT!.
+ */
+ if ((func_num > 0)) {
+ if (!CHIP_REV_IS_SLOW(bp))
+ cam_sz = (MAX_MAC_CREDIT_E1H / (2*func_num));
+ else
+ cam_sz = BNX2X_CAM_SIZE_EMUL;
+ bnx2x_init_credit_pool(p, func_id * cam_sz, cam_sz);
+ } else {
+ /* this should never happen! Block MAC operations. */
+ bnx2x_init_credit_pool(p, 0, 0);
+ }
+
+ } else {
+
+ /* CAM credit is equaly divided between all active functions
+ * on the PATH.
+ */
+ if (func_num > 0) {
+ if (!CHIP_REV_IS_SLOW(bp))
+ cam_sz = PF_MAC_CREDIT_E2(bp, func_num);
+ else
+ cam_sz = BNX2X_CAM_SIZE_EMUL;
+
+ /* No need for CAM entries handling for 57712 and
+ * newer.
+ */
+ bnx2x_init_credit_pool(p, -1, cam_sz);
+ } else {
+ /* this should never happen! Block MAC operations. */
+ bnx2x_init_credit_pool(p, 0, 0);
+ }
+ }
+}
+
+void bnx2x_init_vlan_credit_pool(struct bnx2x *bp,
+ struct bnx2x_credit_pool_obj *p,
+ u8 func_id,
+ u8 func_num)
+{
+ if (CHIP_IS_E1x(bp)) {
+ /* There is no VLAN credit in HW on 57710 and 57711 only
+ * MAC / MAC-VLAN can be set
+ */
+ bnx2x_init_credit_pool(p, 0, -1);
+ } else {
+ /* CAM credit is equally divided between all active functions
+ * on the PATH.
+ */
+ if (func_num > 0) {
+ int credit = PF_VLAN_CREDIT_E2(bp, func_num);
+
+ bnx2x_init_credit_pool(p, -1/*unused for E2*/, credit);
+ } else
+ /* this should never happen! Block VLAN operations. */
+ bnx2x_init_credit_pool(p, 0, 0);
+ }
+}
+
+/****************** RSS Configuration ******************/
+/**
+ * bnx2x_debug_print_ind_table - prints the indirection table configuration.
+ *
+ * @bp: driver handle
+ * @p: pointer to rss configuration
+ *
+ * Prints it when NETIF_MSG_IFUP debug level is configured.
+ */
+static inline void bnx2x_debug_print_ind_table(struct bnx2x *bp,
+ struct bnx2x_config_rss_params *p)
+{
+ int i;
+
+ DP(BNX2X_MSG_SP, "Setting indirection table to:\n");
+ DP(BNX2X_MSG_SP, "0x0000: ");
+ for (i = 0; i < T_ETH_INDIRECTION_TABLE_SIZE; i++) {
+ DP_CONT(BNX2X_MSG_SP, "0x%02x ", p->ind_table[i]);
+
+ /* Print 4 bytes in a line */
+ if ((i + 1 < T_ETH_INDIRECTION_TABLE_SIZE) &&
+ (((i + 1) & 0x3) == 0)) {
+ DP_CONT(BNX2X_MSG_SP, "\n");
+ DP(BNX2X_MSG_SP, "0x%04x: ", i + 1);
+ }
+ }
+
+ DP_CONT(BNX2X_MSG_SP, "\n");
+}
+
+/**
+ * bnx2x_setup_rss - configure RSS
+ *
+ * @bp: device handle
+ * @p: rss configuration
+ *
+ * sends on UPDATE ramrod for that matter.
+ */
+static int bnx2x_setup_rss(struct bnx2x *bp,
+ struct bnx2x_config_rss_params *p)
+{
+ struct bnx2x_rss_config_obj *o = p->rss_obj;
+ struct bnx2x_raw_obj *r = &o->raw;
+ struct eth_rss_update_ramrod_data *data =
+ (struct eth_rss_update_ramrod_data *)(r->rdata);
+ u16 caps = 0;
+ u8 rss_mode = 0;
+ int rc;
+
+ memset(data, 0, sizeof(*data));
+
+ DP(BNX2X_MSG_SP, "Configuring RSS\n");
+
+ /* Set an echo field */
+ data->echo = cpu_to_le32((r->cid & BNX2X_SWCID_MASK) |
+ (r->state << BNX2X_SWCID_SHIFT));
+
+ /* RSS mode */
+ if (test_bit(BNX2X_RSS_MODE_DISABLED, &p->rss_flags))
+ rss_mode = ETH_RSS_MODE_DISABLED;
+ else if (test_bit(BNX2X_RSS_MODE_REGULAR, &p->rss_flags))
+ rss_mode = ETH_RSS_MODE_REGULAR;
+
+ data->rss_mode = rss_mode;
+
+ DP(BNX2X_MSG_SP, "rss_mode=%d\n", rss_mode);
+
+ /* RSS capabilities */
+ if (test_bit(BNX2X_RSS_IPV4, &p->rss_flags))
+ caps |= ETH_RSS_UPDATE_RAMROD_DATA_IPV4_CAPABILITY;
+
+ if (test_bit(BNX2X_RSS_IPV4_TCP, &p->rss_flags))
+ caps |= ETH_RSS_UPDATE_RAMROD_DATA_IPV4_TCP_CAPABILITY;
+
+ if (test_bit(BNX2X_RSS_IPV4_UDP, &p->rss_flags))
+ caps |= ETH_RSS_UPDATE_RAMROD_DATA_IPV4_UDP_CAPABILITY;
+
+ if (test_bit(BNX2X_RSS_IPV6, &p->rss_flags))
+ caps |= ETH_RSS_UPDATE_RAMROD_DATA_IPV6_CAPABILITY;
+
+ if (test_bit(BNX2X_RSS_IPV6_TCP, &p->rss_flags))
+ caps |= ETH_RSS_UPDATE_RAMROD_DATA_IPV6_TCP_CAPABILITY;
+
+ if (test_bit(BNX2X_RSS_IPV6_UDP, &p->rss_flags))
+ caps |= ETH_RSS_UPDATE_RAMROD_DATA_IPV6_UDP_CAPABILITY;
+
+ if (test_bit(BNX2X_RSS_IPV4_VXLAN, &p->rss_flags))
+ caps |= ETH_RSS_UPDATE_RAMROD_DATA_IPV4_VXLAN_CAPABILITY;
+
+ if (test_bit(BNX2X_RSS_IPV6_VXLAN, &p->rss_flags))
+ caps |= ETH_RSS_UPDATE_RAMROD_DATA_IPV6_VXLAN_CAPABILITY;
+
+ if (test_bit(BNX2X_RSS_TUNN_INNER_HDRS, &p->rss_flags))
+ caps |= ETH_RSS_UPDATE_RAMROD_DATA_TUNN_INNER_HDRS_CAPABILITY;
+
+ /* RSS keys */
+ if (test_bit(BNX2X_RSS_SET_SRCH, &p->rss_flags)) {
+ u8 *dst = (u8 *)(data->rss_key) + sizeof(data->rss_key);
+ const u8 *src = (const u8 *)p->rss_key;
+ int i;
+
+ /* Apparently, bnx2x reads this array in reverse order
+ * We need to byte swap rss_key to comply with Toeplitz specs.
+ */
+ for (i = 0; i < sizeof(data->rss_key); i++)
+ *--dst = *src++;
+
+ caps |= ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY;
+ }
+
+ data->capabilities = cpu_to_le16(caps);
+
+ /* Hashing mask */
+ data->rss_result_mask = p->rss_result_mask;
+
+ /* RSS engine ID */
+ data->rss_engine_id = o->engine_id;
+
+ DP(BNX2X_MSG_SP, "rss_engine_id=%d\n", data->rss_engine_id);
+
+ /* Indirection table */
+ memcpy(data->indirection_table, p->ind_table,
+ T_ETH_INDIRECTION_TABLE_SIZE);
+
+ /* Remember the last configuration */
+ memcpy(o->ind_table, p->ind_table, T_ETH_INDIRECTION_TABLE_SIZE);
+
+ /* Print the indirection table */
+ if (netif_msg_ifup(bp))
+ bnx2x_debug_print_ind_table(bp, p);
+
+ /* No need for an explicit memory barrier here as long as we
+ * ensure the ordering of writing to the SPQ element
+ * and updating of the SPQ producer which involves a memory
+ * read. If the memory read is removed we will have to put a
+ * full memory barrier there (inside bnx2x_sp_post()).
+ */
+
+ /* Send a ramrod */
+ rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_RSS_UPDATE, r->cid,
+ U64_HI(r->rdata_mapping),
+ U64_LO(r->rdata_mapping),
+ ETH_CONNECTION_TYPE);
+
+ if (rc < 0)
+ return rc;
+
+ return 1;
+}
+
+void bnx2x_get_rss_ind_table(struct bnx2x_rss_config_obj *rss_obj,
+ u8 *ind_table)
+{
+ memcpy(ind_table, rss_obj->ind_table, sizeof(rss_obj->ind_table));
+}
+
+int bnx2x_config_rss(struct bnx2x *bp,
+ struct bnx2x_config_rss_params *p)
+{
+ int rc;
+ struct bnx2x_rss_config_obj *o = p->rss_obj;
+ struct bnx2x_raw_obj *r = &o->raw;
+
+ /* Do nothing if only driver cleanup was requested */
+ if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
+ DP(BNX2X_MSG_SP, "Not configuring RSS ramrod_flags=%lx\n",
+ p->ramrod_flags);
+ return 0;
+ }
+
+ r->set_pending(r);
+
+ rc = o->config_rss(bp, p);
+ if (rc < 0) {
+ r->clear_pending(r);
+ return rc;
+ }
+
+ if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags))
+ rc = r->wait_comp(bp, r);
+
+ return rc;
+}
+
+void bnx2x_init_rss_config_obj(struct bnx2x *bp,
+ struct bnx2x_rss_config_obj *rss_obj,
+ u8 cl_id, u32 cid, u8 func_id, u8 engine_id,
+ void *rdata, dma_addr_t rdata_mapping,
+ int state, unsigned long *pstate,
+ bnx2x_obj_type type)
+{
+ bnx2x_init_raw_obj(&rss_obj->raw, cl_id, cid, func_id, rdata,
+ rdata_mapping, state, pstate, type);
+
+ rss_obj->engine_id = engine_id;
+ rss_obj->config_rss = bnx2x_setup_rss;
+}
+
+/********************** Queue state object ***********************************/
+
+/**
+ * bnx2x_queue_state_change - perform Queue state change transition
+ *
+ * @bp: device handle
+ * @params: parameters to perform the transition
+ *
+ * returns 0 in case of successfully completed transition, negative error
+ * code in case of failure, positive (EBUSY) value if there is a completion
+ * to that is still pending (possible only if RAMROD_COMP_WAIT is
+ * not set in params->ramrod_flags for asynchronous commands).
+ *
+ */
+int bnx2x_queue_state_change(struct bnx2x *bp,
+ struct bnx2x_queue_state_params *params)
+{
+ struct bnx2x_queue_sp_obj *o = params->q_obj;
+ int rc, pending_bit;
+ unsigned long *pending = &o->pending;
+
+ /* Check that the requested transition is legal */
+ rc = o->check_transition(bp, o, params);
+ if (rc) {
+ BNX2X_ERR("check transition returned an error. rc %d\n", rc);
+ return -EINVAL;
+ }
+
+ /* Set "pending" bit */
+ DP(BNX2X_MSG_SP, "pending bit was=%lx\n", o->pending);
+ pending_bit = o->set_pending(o, params);
+ DP(BNX2X_MSG_SP, "pending bit now=%lx\n", o->pending);
+
+ /* Don't send a command if only driver cleanup was requested */
+ if (test_bit(RAMROD_DRV_CLR_ONLY, &params->ramrod_flags))
+ o->complete_cmd(bp, o, pending_bit);
+ else {
+ /* Send a ramrod */
+ rc = o->send_cmd(bp, params);
+ if (rc) {
+ o->next_state = BNX2X_Q_STATE_MAX;
+ clear_bit(pending_bit, pending);
+ smp_mb__after_atomic();
+ return rc;
+ }
+
+ if (test_bit(RAMROD_COMP_WAIT, &params->ramrod_flags)) {
+ rc = o->wait_comp(bp, o, pending_bit);
+ if (rc)
+ return rc;
+
+ return 0;
+ }
+ }
+
+ return !!test_bit(pending_bit, pending);
+}
+
+static int bnx2x_queue_set_pending(struct bnx2x_queue_sp_obj *obj,
+ struct bnx2x_queue_state_params *params)
+{
+ enum bnx2x_queue_cmd cmd = params->cmd, bit;
+
+ /* ACTIVATE and DEACTIVATE commands are implemented on top of
+ * UPDATE command.
+ */
+ if ((cmd == BNX2X_Q_CMD_ACTIVATE) ||
+ (cmd == BNX2X_Q_CMD_DEACTIVATE))
+ bit = BNX2X_Q_CMD_UPDATE;
+ else
+ bit = cmd;
+
+ set_bit(bit, &obj->pending);
+ return bit;
+}
+
+static int bnx2x_queue_wait_comp(struct bnx2x *bp,
+ struct bnx2x_queue_sp_obj *o,
+ enum bnx2x_queue_cmd cmd)
+{
+ return bnx2x_state_wait(bp, cmd, &o->pending);
+}
+
+/**
+ * bnx2x_queue_comp_cmd - complete the state change command.
+ *
+ * @bp: device handle
+ * @o: queue info
+ * @cmd: command to exec
+ *
+ * Checks that the arrived completion is expected.
+ */
+static int bnx2x_queue_comp_cmd(struct bnx2x *bp,
+ struct bnx2x_queue_sp_obj *o,
+ enum bnx2x_queue_cmd cmd)
+{
+ unsigned long cur_pending = o->pending;
+
+ if (!test_and_clear_bit(cmd, &cur_pending)) {
+ BNX2X_ERR("Bad MC reply %d for queue %d in state %d pending 0x%lx, next_state %d\n",
+ cmd, o->cids[BNX2X_PRIMARY_CID_INDEX],
+ o->state, cur_pending, o->next_state);
+ return -EINVAL;
+ }
+
+ if (o->next_tx_only >= o->max_cos)
+ /* >= because tx only must always be smaller than cos since the
+ * primary connection supports COS 0
+ */
+ BNX2X_ERR("illegal value for next tx_only: %d. max cos was %d",
+ o->next_tx_only, o->max_cos);
+
+ DP(BNX2X_MSG_SP,
+ "Completing command %d for queue %d, setting state to %d\n",
+ cmd, o->cids[BNX2X_PRIMARY_CID_INDEX], o->next_state);
+
+ if (o->next_tx_only) /* print num tx-only if any exist */
+ DP(BNX2X_MSG_SP, "primary cid %d: num tx-only cons %d\n",
+ o->cids[BNX2X_PRIMARY_CID_INDEX], o->next_tx_only);
+
+ o->state = o->next_state;
+ o->num_tx_only = o->next_tx_only;
+ o->next_state = BNX2X_Q_STATE_MAX;
+
+ /* It's important that o->state and o->next_state are
+ * updated before o->pending.
+ */
+ wmb();
+
+ clear_bit(cmd, &o->pending);
+ smp_mb__after_atomic();
+
+ return 0;
+}
+
+static void bnx2x_q_fill_setup_data_e2(struct bnx2x *bp,
+ struct bnx2x_queue_state_params *cmd_params,
+ struct client_init_ramrod_data *data)
+{
+ struct bnx2x_queue_setup_params *params = &cmd_params->params.setup;
+
+ /* Rx data */
+
+ /* IPv6 TPA supported for E2 and above only */
+ data->rx.tpa_en |= test_bit(BNX2X_Q_FLG_TPA_IPV6, &params->flags) *
+ CLIENT_INIT_RX_DATA_TPA_EN_IPV6;
+}
+
+static void bnx2x_q_fill_init_general_data(struct bnx2x *bp,
+ struct bnx2x_queue_sp_obj *o,
+ struct bnx2x_general_setup_params *params,
+ struct client_init_general_data *gen_data,
+ unsigned long *flags)
+{
+ gen_data->client_id = o->cl_id;
+
+ if (test_bit(BNX2X_Q_FLG_STATS, flags)) {
+ gen_data->statistics_counter_id =
+ params->stat_id;
+ gen_data->statistics_en_flg = 1;
+ gen_data->statistics_zero_flg =
+ test_bit(BNX2X_Q_FLG_ZERO_STATS, flags);
+ } else
+ gen_data->statistics_counter_id =
+ DISABLE_STATISTIC_COUNTER_ID_VALUE;
+
+ gen_data->is_fcoe_flg = test_bit(BNX2X_Q_FLG_FCOE, flags);
+ gen_data->activate_flg = test_bit(BNX2X_Q_FLG_ACTIVE, flags);
+ gen_data->sp_client_id = params->spcl_id;
+ gen_data->mtu = cpu_to_le16(params->mtu);
+ gen_data->func_id = o->func_id;
+
+ gen_data->cos = params->cos;
+
+ gen_data->traffic_type =
+ test_bit(BNX2X_Q_FLG_FCOE, flags) ?
+ LLFC_TRAFFIC_TYPE_FCOE : LLFC_TRAFFIC_TYPE_NW;
+
+ gen_data->fp_hsi_ver = params->fp_hsi;
+
+ DP(BNX2X_MSG_SP, "flags: active %d, cos %d, stats en %d\n",
+ gen_data->activate_flg, gen_data->cos, gen_data->statistics_en_flg);
+}
+
+static void bnx2x_q_fill_init_tx_data(struct bnx2x_queue_sp_obj *o,
+ struct bnx2x_txq_setup_params *params,
+ struct client_init_tx_data *tx_data,
+ unsigned long *flags)
+{
+ tx_data->enforce_security_flg =
+ test_bit(BNX2X_Q_FLG_TX_SEC, flags);
+ tx_data->default_vlan =
+ cpu_to_le16(params->default_vlan);
+ tx_data->default_vlan_flg =
+ test_bit(BNX2X_Q_FLG_DEF_VLAN, flags);
+ tx_data->tx_switching_flg =
+ test_bit(BNX2X_Q_FLG_TX_SWITCH, flags);
+ tx_data->anti_spoofing_flg =
+ test_bit(BNX2X_Q_FLG_ANTI_SPOOF, flags);
+ tx_data->force_default_pri_flg =
+ test_bit(BNX2X_Q_FLG_FORCE_DEFAULT_PRI, flags);
+ tx_data->refuse_outband_vlan_flg =
+ test_bit(BNX2X_Q_FLG_REFUSE_OUTBAND_VLAN, flags);
+ tx_data->tunnel_lso_inc_ip_id =
+ test_bit(BNX2X_Q_FLG_TUN_INC_INNER_IP_ID, flags);
+ tx_data->tunnel_non_lso_pcsum_location =
+ test_bit(BNX2X_Q_FLG_PCSUM_ON_PKT, flags) ? CSUM_ON_PKT :
+ CSUM_ON_BD;
+
+ tx_data->tx_status_block_id = params->fw_sb_id;
+ tx_data->tx_sb_index_number = params->sb_cq_index;
+ tx_data->tss_leading_client_id = params->tss_leading_cl_id;
+
+ tx_data->tx_bd_page_base.lo =
+ cpu_to_le32(U64_LO(params->dscr_map));
+ tx_data->tx_bd_page_base.hi =
+ cpu_to_le32(U64_HI(params->dscr_map));
+
+ /* Don't configure any Tx switching mode during queue SETUP */
+ tx_data->state = 0;
+}
+
+static void bnx2x_q_fill_init_pause_data(struct bnx2x_queue_sp_obj *o,
+ struct rxq_pause_params *params,
+ struct client_init_rx_data *rx_data)
+{
+ /* flow control data */
+ rx_data->cqe_pause_thr_low = cpu_to_le16(params->rcq_th_lo);
+ rx_data->cqe_pause_thr_high = cpu_to_le16(params->rcq_th_hi);
+ rx_data->bd_pause_thr_low = cpu_to_le16(params->bd_th_lo);
+ rx_data->bd_pause_thr_high = cpu_to_le16(params->bd_th_hi);
+ rx_data->sge_pause_thr_low = cpu_to_le16(params->sge_th_lo);
+ rx_data->sge_pause_thr_high = cpu_to_le16(params->sge_th_hi);
+ rx_data->rx_cos_mask = cpu_to_le16(params->pri_map);
+}
+
+static void bnx2x_q_fill_init_rx_data(struct bnx2x_queue_sp_obj *o,
+ struct bnx2x_rxq_setup_params *params,
+ struct client_init_rx_data *rx_data,
+ unsigned long *flags)
+{
+ rx_data->tpa_en = test_bit(BNX2X_Q_FLG_TPA, flags) *
+ CLIENT_INIT_RX_DATA_TPA_EN_IPV4;
+ rx_data->tpa_en |= test_bit(BNX2X_Q_FLG_TPA_GRO, flags) *
+ CLIENT_INIT_RX_DATA_TPA_MODE;
+ rx_data->vmqueue_mode_en_flg = 0;
+
+ rx_data->cache_line_alignment_log_size =
+ params->cache_line_log;
+ rx_data->enable_dynamic_hc =
+ test_bit(BNX2X_Q_FLG_DHC, flags);
+ rx_data->max_sges_for_packet = params->max_sges_pkt;
+ rx_data->client_qzone_id = params->cl_qzone_id;
+ rx_data->max_agg_size = cpu_to_le16(params->tpa_agg_sz);
+
+ /* Always start in DROP_ALL mode */
+ rx_data->state = cpu_to_le16(CLIENT_INIT_RX_DATA_UCAST_DROP_ALL |
+ CLIENT_INIT_RX_DATA_MCAST_DROP_ALL);
+
+ /* We don't set drop flags */
+ rx_data->drop_ip_cs_err_flg = 0;
+ rx_data->drop_tcp_cs_err_flg = 0;
+ rx_data->drop_ttl0_flg = 0;
+ rx_data->drop_udp_cs_err_flg = 0;
+ rx_data->inner_vlan_removal_enable_flg =
+ test_bit(BNX2X_Q_FLG_VLAN, flags);
+ rx_data->outer_vlan_removal_enable_flg =
+ test_bit(BNX2X_Q_FLG_OV, flags);
+ rx_data->status_block_id = params->fw_sb_id;
+ rx_data->rx_sb_index_number = params->sb_cq_index;
+ rx_data->max_tpa_queues = params->max_tpa_queues;
+ rx_data->max_bytes_on_bd = cpu_to_le16(params->buf_sz);
+ rx_data->sge_buff_size = cpu_to_le16(params->sge_buf_sz);
+ rx_data->bd_page_base.lo =
+ cpu_to_le32(U64_LO(params->dscr_map));
+ rx_data->bd_page_base.hi =
+ cpu_to_le32(U64_HI(params->dscr_map));
+ rx_data->sge_page_base.lo =
+ cpu_to_le32(U64_LO(params->sge_map));
+ rx_data->sge_page_base.hi =
+ cpu_to_le32(U64_HI(params->sge_map));
+ rx_data->cqe_page_base.lo =
+ cpu_to_le32(U64_LO(params->rcq_map));
+ rx_data->cqe_page_base.hi =
+ cpu_to_le32(U64_HI(params->rcq_map));
+ rx_data->is_leading_rss = test_bit(BNX2X_Q_FLG_LEADING_RSS, flags);
+
+ if (test_bit(BNX2X_Q_FLG_MCAST, flags)) {
+ rx_data->approx_mcast_engine_id = params->mcast_engine_id;
+ rx_data->is_approx_mcast = 1;
+ }
+
+ rx_data->rss_engine_id = params->rss_engine_id;
+
+ /* silent vlan removal */
+ rx_data->silent_vlan_removal_flg =
+ test_bit(BNX2X_Q_FLG_SILENT_VLAN_REM, flags);
+ rx_data->silent_vlan_value =
+ cpu_to_le16(params->silent_removal_value);
+ rx_data->silent_vlan_mask =
+ cpu_to_le16(params->silent_removal_mask);
+}
+
+/* initialize the general, tx and rx parts of a queue object */
+static void bnx2x_q_fill_setup_data_cmn(struct bnx2x *bp,
+ struct bnx2x_queue_state_params *cmd_params,
+ struct client_init_ramrod_data *data)
+{
+ bnx2x_q_fill_init_general_data(bp, cmd_params->q_obj,
+ &cmd_params->params.setup.gen_params,
+ &data->general,
+ &cmd_params->params.setup.flags);
+
+ bnx2x_q_fill_init_tx_data(cmd_params->q_obj,
+ &cmd_params->params.setup.txq_params,
+ &data->tx,
+ &cmd_params->params.setup.flags);
+
+ bnx2x_q_fill_init_rx_data(cmd_params->q_obj,
+ &cmd_params->params.setup.rxq_params,
+ &data->rx,
+ &cmd_params->params.setup.flags);
+
+ bnx2x_q_fill_init_pause_data(cmd_params->q_obj,
+ &cmd_params->params.setup.pause_params,
+ &data->rx);
+}
+
+/* initialize the general and tx parts of a tx-only queue object */
+static void bnx2x_q_fill_setup_tx_only(struct bnx2x *bp,
+ struct bnx2x_queue_state_params *cmd_params,
+ struct tx_queue_init_ramrod_data *data)
+{
+ bnx2x_q_fill_init_general_data(bp, cmd_params->q_obj,
+ &cmd_params->params.tx_only.gen_params,
+ &data->general,
+ &cmd_params->params.tx_only.flags);
+
+ bnx2x_q_fill_init_tx_data(cmd_params->q_obj,
+ &cmd_params->params.tx_only.txq_params,
+ &data->tx,
+ &cmd_params->params.tx_only.flags);
+
+ DP(BNX2X_MSG_SP, "cid %d, tx bd page lo %x hi %x",
+ cmd_params->q_obj->cids[0],
+ data->tx.tx_bd_page_base.lo,
+ data->tx.tx_bd_page_base.hi);
+}
+
+/**
+ * bnx2x_q_init - init HW/FW queue
+ *
+ * @bp: device handle
+ * @params:
+ *
+ * HW/FW initial Queue configuration:
+ * - HC: Rx and Tx
+ * - CDU context validation
+ *
+ */
+static inline int bnx2x_q_init(struct bnx2x *bp,
+ struct bnx2x_queue_state_params *params)
+{
+ struct bnx2x_queue_sp_obj *o = params->q_obj;
+ struct bnx2x_queue_init_params *init = &params->params.init;
+ u16 hc_usec;
+ u8 cos;
+
+ /* Tx HC configuration */
+ if (test_bit(BNX2X_Q_TYPE_HAS_TX, &o->type) &&
+ test_bit(BNX2X_Q_FLG_HC, &init->tx.flags)) {
+ hc_usec = init->tx.hc_rate ? 1000000 / init->tx.hc_rate : 0;
+
+ bnx2x_update_coalesce_sb_index(bp, init->tx.fw_sb_id,
+ init->tx.sb_cq_index,
+ !test_bit(BNX2X_Q_FLG_HC_EN, &init->tx.flags),
+ hc_usec);
+ }
+
+ /* Rx HC configuration */
+ if (test_bit(BNX2X_Q_TYPE_HAS_RX, &o->type) &&
+ test_bit(BNX2X_Q_FLG_HC, &init->rx.flags)) {
+ hc_usec = init->rx.hc_rate ? 1000000 / init->rx.hc_rate : 0;
+
+ bnx2x_update_coalesce_sb_index(bp, init->rx.fw_sb_id,
+ init->rx.sb_cq_index,
+ !test_bit(BNX2X_Q_FLG_HC_EN, &init->rx.flags),
+ hc_usec);
+ }
+
+ /* Set CDU context validation values */
+ for (cos = 0; cos < o->max_cos; cos++) {
+ DP(BNX2X_MSG_SP, "setting context validation. cid %d, cos %d\n",
+ o->cids[cos], cos);
+ DP(BNX2X_MSG_SP, "context pointer %p\n", init->cxts[cos]);
+ bnx2x_set_ctx_validation(bp, init->cxts[cos], o->cids[cos]);
+ }
+
+ /* As no ramrod is sent, complete the command immediately */
+ o->complete_cmd(bp, o, BNX2X_Q_CMD_INIT);
+
+ smp_mb();
+
+ return 0;
+}
+
+static inline int bnx2x_q_send_setup_e1x(struct bnx2x *bp,
+ struct bnx2x_queue_state_params *params)
+{
+ struct bnx2x_queue_sp_obj *o = params->q_obj;
+ struct client_init_ramrod_data *rdata =
+ (struct client_init_ramrod_data *)o->rdata;
+ dma_addr_t data_mapping = o->rdata_mapping;
+ int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP;
+
+ /* Clear the ramrod data */
+ memset(rdata, 0, sizeof(*rdata));
+
+ /* Fill the ramrod data */
+ bnx2x_q_fill_setup_data_cmn(bp, params, rdata);
+
+ /* No need for an explicit memory barrier here as long as we
+ * ensure the ordering of writing to the SPQ element
+ * and updating of the SPQ producer which involves a memory
+ * read. If the memory read is removed we will have to put a
+ * full memory barrier there (inside bnx2x_sp_post()).
+ */
+ return bnx2x_sp_post(bp, ramrod, o->cids[BNX2X_PRIMARY_CID_INDEX],
+ U64_HI(data_mapping),
+ U64_LO(data_mapping), ETH_CONNECTION_TYPE);
+}
+
+static inline int bnx2x_q_send_setup_e2(struct bnx2x *bp,
+ struct bnx2x_queue_state_params *params)
+{
+ struct bnx2x_queue_sp_obj *o = params->q_obj;
+ struct client_init_ramrod_data *rdata =
+ (struct client_init_ramrod_data *)o->rdata;
+ dma_addr_t data_mapping = o->rdata_mapping;
+ int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP;
+
+ /* Clear the ramrod data */
+ memset(rdata, 0, sizeof(*rdata));
+
+ /* Fill the ramrod data */
+ bnx2x_q_fill_setup_data_cmn(bp, params, rdata);
+ bnx2x_q_fill_setup_data_e2(bp, params, rdata);
+
+ /* No need for an explicit memory barrier here as long as we
+ * ensure the ordering of writing to the SPQ element
+ * and updating of the SPQ producer which involves a memory
+ * read. If the memory read is removed we will have to put a
+ * full memory barrier there (inside bnx2x_sp_post()).
+ */
+ return bnx2x_sp_post(bp, ramrod, o->cids[BNX2X_PRIMARY_CID_INDEX],
+ U64_HI(data_mapping),
+ U64_LO(data_mapping), ETH_CONNECTION_TYPE);
+}
+
+static inline int bnx2x_q_send_setup_tx_only(struct bnx2x *bp,
+ struct bnx2x_queue_state_params *params)
+{
+ struct bnx2x_queue_sp_obj *o = params->q_obj;
+ struct tx_queue_init_ramrod_data *rdata =
+ (struct tx_queue_init_ramrod_data *)o->rdata;
+ dma_addr_t data_mapping = o->rdata_mapping;
+ int ramrod = RAMROD_CMD_ID_ETH_TX_QUEUE_SETUP;
+ struct bnx2x_queue_setup_tx_only_params *tx_only_params =
+ &params->params.tx_only;
+ u8 cid_index = tx_only_params->cid_index;
+
+ if (cid_index >= o->max_cos) {
+ BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n",
+ o->cl_id, cid_index);
+ return -EINVAL;
+ }
+
+ DP(BNX2X_MSG_SP, "parameters received: cos: %d sp-id: %d\n",
+ tx_only_params->gen_params.cos,
+ tx_only_params->gen_params.spcl_id);
+
+ /* Clear the ramrod data */
+ memset(rdata, 0, sizeof(*rdata));
+
+ /* Fill the ramrod data */
+ bnx2x_q_fill_setup_tx_only(bp, params, rdata);
+
+ DP(BNX2X_MSG_SP, "sending tx-only ramrod: cid %d, client-id %d, sp-client id %d, cos %d\n",
+ o->cids[cid_index], rdata->general.client_id,
+ rdata->general.sp_client_id, rdata->general.cos);
+
+ /* No need for an explicit memory barrier here as long as we
+ * ensure the ordering of writing to the SPQ element
+ * and updating of the SPQ producer which involves a memory
+ * read. If the memory read is removed we will have to put a
+ * full memory barrier there (inside bnx2x_sp_post()).
+ */
+ return bnx2x_sp_post(bp, ramrod, o->cids[cid_index],
+ U64_HI(data_mapping),
+ U64_LO(data_mapping), ETH_CONNECTION_TYPE);
+}
+
+static void bnx2x_q_fill_update_data(struct bnx2x *bp,
+ struct bnx2x_queue_sp_obj *obj,
+ struct bnx2x_queue_update_params *params,
+ struct client_update_ramrod_data *data)
+{
+ /* Client ID of the client to update */
+ data->client_id = obj->cl_id;
+
+ /* Function ID of the client to update */
+ data->func_id = obj->func_id;
+
+ /* Default VLAN value */
+ data->default_vlan = cpu_to_le16(params->def_vlan);
+
+ /* Inner VLAN stripping */
+ data->inner_vlan_removal_enable_flg =
+ test_bit(BNX2X_Q_UPDATE_IN_VLAN_REM, &params->update_flags);
+ data->inner_vlan_removal_change_flg =
+ test_bit(BNX2X_Q_UPDATE_IN_VLAN_REM_CHNG,
+ &params->update_flags);
+
+ /* Outer VLAN stripping */
+ data->outer_vlan_removal_enable_flg =
+ test_bit(BNX2X_Q_UPDATE_OUT_VLAN_REM, &params->update_flags);
+ data->outer_vlan_removal_change_flg =
+ test_bit(BNX2X_Q_UPDATE_OUT_VLAN_REM_CHNG,
+ &params->update_flags);
+
+ /* Drop packets that have source MAC that doesn't belong to this
+ * Queue.
+ */
+ data->anti_spoofing_enable_flg =
+ test_bit(BNX2X_Q_UPDATE_ANTI_SPOOF, &params->update_flags);
+ data->anti_spoofing_change_flg =
+ test_bit(BNX2X_Q_UPDATE_ANTI_SPOOF_CHNG, &params->update_flags);
+
+ /* Activate/Deactivate */
+ data->activate_flg =
+ test_bit(BNX2X_Q_UPDATE_ACTIVATE, &params->update_flags);
+ data->activate_change_flg =
+ test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG, &params->update_flags);
+
+ /* Enable default VLAN */
+ data->default_vlan_enable_flg =
+ test_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN, &params->update_flags);
+ data->default_vlan_change_flg =
+ test_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG,
+ &params->update_flags);
+
+ /* silent vlan removal */
+ data->silent_vlan_change_flg =
+ test_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG,
+ &params->update_flags);
+ data->silent_vlan_removal_flg =
+ test_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM, &params->update_flags);
+ data->silent_vlan_value = cpu_to_le16(params->silent_removal_value);
+ data->silent_vlan_mask = cpu_to_le16(params->silent_removal_mask);
+
+ /* tx switching */
+ data->tx_switching_flg =
+ test_bit(BNX2X_Q_UPDATE_TX_SWITCHING, &params->update_flags);
+ data->tx_switching_change_flg =
+ test_bit(BNX2X_Q_UPDATE_TX_SWITCHING_CHNG,
+ &params->update_flags);
+
+ /* PTP */
+ data->handle_ptp_pkts_flg =
+ test_bit(BNX2X_Q_UPDATE_PTP_PKTS, &params->update_flags);
+ data->handle_ptp_pkts_change_flg =
+ test_bit(BNX2X_Q_UPDATE_PTP_PKTS_CHNG, &params->update_flags);
+}
+
+static inline int bnx2x_q_send_update(struct bnx2x *bp,
+ struct bnx2x_queue_state_params *params)
+{
+ struct bnx2x_queue_sp_obj *o = params->q_obj;
+ struct client_update_ramrod_data *rdata =
+ (struct client_update_ramrod_data *)o->rdata;
+ dma_addr_t data_mapping = o->rdata_mapping;
+ struct bnx2x_queue_update_params *update_params =
+ &params->params.update;
+ u8 cid_index = update_params->cid_index;
+
+ if (cid_index >= o->max_cos) {
+ BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n",
+ o->cl_id, cid_index);
+ return -EINVAL;
+ }
+
+ /* Clear the ramrod data */
+ memset(rdata, 0, sizeof(*rdata));
+
+ /* Fill the ramrod data */
+ bnx2x_q_fill_update_data(bp, o, update_params, rdata);
+
+ /* No need for an explicit memory barrier here as long as we
+ * ensure the ordering of writing to the SPQ element
+ * and updating of the SPQ producer which involves a memory
+ * read. If the memory read is removed we will have to put a
+ * full memory barrier there (inside bnx2x_sp_post()).
+ */
+ return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_UPDATE,
+ o->cids[cid_index], U64_HI(data_mapping),
+ U64_LO(data_mapping), ETH_CONNECTION_TYPE);
+}
+
+/**
+ * bnx2x_q_send_deactivate - send DEACTIVATE command
+ *
+ * @bp: device handle
+ * @params:
+ *
+ * implemented using the UPDATE command.
+ */
+static inline int bnx2x_q_send_deactivate(struct bnx2x *bp,
+ struct bnx2x_queue_state_params *params)
+{
+ struct bnx2x_queue_update_params *update = &params->params.update;
+
+ memset(update, 0, sizeof(*update));
+
+ __set_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG, &update->update_flags);
+
+ return bnx2x_q_send_update(bp, params);
+}
+
+/**
+ * bnx2x_q_send_activate - send ACTIVATE command
+ *
+ * @bp: device handle
+ * @params:
+ *
+ * implemented using the UPDATE command.
+ */
+static inline int bnx2x_q_send_activate(struct bnx2x *bp,
+ struct bnx2x_queue_state_params *params)
+{
+ struct bnx2x_queue_update_params *update = &params->params.update;
+
+ memset(update, 0, sizeof(*update));
+
+ __set_bit(BNX2X_Q_UPDATE_ACTIVATE, &update->update_flags);
+ __set_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG, &update->update_flags);
+
+ return bnx2x_q_send_update(bp, params);
+}
+
+static void bnx2x_q_fill_update_tpa_data(struct bnx2x *bp,
+ struct bnx2x_queue_sp_obj *obj,
+ struct bnx2x_queue_update_tpa_params *params,
+ struct tpa_update_ramrod_data *data)
+{
+ data->client_id = obj->cl_id;
+ data->complete_on_both_clients = params->complete_on_both_clients;
+ data->dont_verify_rings_pause_thr_flg =
+ params->dont_verify_thr;
+ data->max_agg_size = cpu_to_le16(params->max_agg_sz);
+ data->max_sges_for_packet = params->max_sges_pkt;
+ data->max_tpa_queues = params->max_tpa_queues;
+ data->sge_buff_size = cpu_to_le16(params->sge_buff_sz);
+ data->sge_page_base_hi = cpu_to_le32(U64_HI(params->sge_map));
+ data->sge_page_base_lo = cpu_to_le32(U64_LO(params->sge_map));
+ data->sge_pause_thr_high = cpu_to_le16(params->sge_pause_thr_high);
+ data->sge_pause_thr_low = cpu_to_le16(params->sge_pause_thr_low);
+ data->tpa_mode = params->tpa_mode;
+ data->update_ipv4 = params->update_ipv4;
+ data->update_ipv6 = params->update_ipv6;
+}
+
+static inline int bnx2x_q_send_update_tpa(struct bnx2x *bp,
+ struct bnx2x_queue_state_params *params)
+{
+ struct bnx2x_queue_sp_obj *o = params->q_obj;
+ struct tpa_update_ramrod_data *rdata =
+ (struct tpa_update_ramrod_data *)o->rdata;
+ dma_addr_t data_mapping = o->rdata_mapping;
+ struct bnx2x_queue_update_tpa_params *update_tpa_params =
+ &params->params.update_tpa;
+ u16 type;
+
+ /* Clear the ramrod data */
+ memset(rdata, 0, sizeof(*rdata));
+
+ /* Fill the ramrod data */
+ bnx2x_q_fill_update_tpa_data(bp, o, update_tpa_params, rdata);
+
+ /* Add the function id inside the type, so that sp post function
+ * doesn't automatically add the PF func-id, this is required
+ * for operations done by PFs on behalf of their VFs
+ */
+ type = ETH_CONNECTION_TYPE |
+ ((o->func_id) << SPE_HDR_FUNCTION_ID_SHIFT);
+
+ /* No need for an explicit memory barrier here as long as we
+ * ensure the ordering of writing to the SPQ element
+ * and updating of the SPQ producer which involves a memory
+ * read. If the memory read is removed we will have to put a
+ * full memory barrier there (inside bnx2x_sp_post()).
+ */
+ return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_TPA_UPDATE,
+ o->cids[BNX2X_PRIMARY_CID_INDEX],
+ U64_HI(data_mapping),
+ U64_LO(data_mapping), type);
+}
+
+static inline int bnx2x_q_send_halt(struct bnx2x *bp,
+ struct bnx2x_queue_state_params *params)
+{
+ struct bnx2x_queue_sp_obj *o = params->q_obj;
+
+ return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT,
+ o->cids[BNX2X_PRIMARY_CID_INDEX], 0, o->cl_id,
+ ETH_CONNECTION_TYPE);
+}
+
+static inline int bnx2x_q_send_cfc_del(struct bnx2x *bp,
+ struct bnx2x_queue_state_params *params)
+{
+ struct bnx2x_queue_sp_obj *o = params->q_obj;
+ u8 cid_idx = params->params.cfc_del.cid_index;
+
+ if (cid_idx >= o->max_cos) {
+ BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n",
+ o->cl_id, cid_idx);
+ return -EINVAL;
+ }
+
+ return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_CFC_DEL,
+ o->cids[cid_idx], 0, 0, NONE_CONNECTION_TYPE);
+}
+
+static inline int bnx2x_q_send_terminate(struct bnx2x *bp,
+ struct bnx2x_queue_state_params *params)
+{
+ struct bnx2x_queue_sp_obj *o = params->q_obj;
+ u8 cid_index = params->params.terminate.cid_index;
+
+ if (cid_index >= o->max_cos) {
+ BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n",
+ o->cl_id, cid_index);
+ return -EINVAL;
+ }
+
+ return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_TERMINATE,
+ o->cids[cid_index], 0, 0, ETH_CONNECTION_TYPE);
+}
+
+static inline int bnx2x_q_send_empty(struct bnx2x *bp,
+ struct bnx2x_queue_state_params *params)
+{
+ struct bnx2x_queue_sp_obj *o = params->q_obj;
+
+ return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_EMPTY,
+ o->cids[BNX2X_PRIMARY_CID_INDEX], 0, 0,
+ ETH_CONNECTION_TYPE);
+}
+
+static inline int bnx2x_queue_send_cmd_cmn(struct bnx2x *bp,
+ struct bnx2x_queue_state_params *params)
+{
+ switch (params->cmd) {
+ case BNX2X_Q_CMD_INIT:
+ return bnx2x_q_init(bp, params);
+ case BNX2X_Q_CMD_SETUP_TX_ONLY:
+ return bnx2x_q_send_setup_tx_only(bp, params);
+ case BNX2X_Q_CMD_DEACTIVATE:
+ return bnx2x_q_send_deactivate(bp, params);
+ case BNX2X_Q_CMD_ACTIVATE:
+ return bnx2x_q_send_activate(bp, params);
+ case BNX2X_Q_CMD_UPDATE:
+ return bnx2x_q_send_update(bp, params);
+ case BNX2X_Q_CMD_UPDATE_TPA:
+ return bnx2x_q_send_update_tpa(bp, params);
+ case BNX2X_Q_CMD_HALT:
+ return bnx2x_q_send_halt(bp, params);
+ case BNX2X_Q_CMD_CFC_DEL:
+ return bnx2x_q_send_cfc_del(bp, params);
+ case BNX2X_Q_CMD_TERMINATE:
+ return bnx2x_q_send_terminate(bp, params);
+ case BNX2X_Q_CMD_EMPTY:
+ return bnx2x_q_send_empty(bp, params);
+ default:
+ BNX2X_ERR("Unknown command: %d\n", params->cmd);
+ return -EINVAL;
+ }
+}
+
+static int bnx2x_queue_send_cmd_e1x(struct bnx2x *bp,
+ struct bnx2x_queue_state_params *params)
+{
+ switch (params->cmd) {
+ case BNX2X_Q_CMD_SETUP:
+ return bnx2x_q_send_setup_e1x(bp, params);
+ case BNX2X_Q_CMD_INIT:
+ case BNX2X_Q_CMD_SETUP_TX_ONLY:
+ case BNX2X_Q_CMD_DEACTIVATE:
+ case BNX2X_Q_CMD_ACTIVATE:
+ case BNX2X_Q_CMD_UPDATE:
+ case BNX2X_Q_CMD_UPDATE_TPA:
+ case BNX2X_Q_CMD_HALT:
+ case BNX2X_Q_CMD_CFC_DEL:
+ case BNX2X_Q_CMD_TERMINATE:
+ case BNX2X_Q_CMD_EMPTY:
+ return bnx2x_queue_send_cmd_cmn(bp, params);
+ default:
+ BNX2X_ERR("Unknown command: %d\n", params->cmd);
+ return -EINVAL;
+ }
+}
+
+static int bnx2x_queue_send_cmd_e2(struct bnx2x *bp,
+ struct bnx2x_queue_state_params *params)
+{
+ switch (params->cmd) {
+ case BNX2X_Q_CMD_SETUP:
+ return bnx2x_q_send_setup_e2(bp, params);
+ case BNX2X_Q_CMD_INIT:
+ case BNX2X_Q_CMD_SETUP_TX_ONLY:
+ case BNX2X_Q_CMD_DEACTIVATE:
+ case BNX2X_Q_CMD_ACTIVATE:
+ case BNX2X_Q_CMD_UPDATE:
+ case BNX2X_Q_CMD_UPDATE_TPA:
+ case BNX2X_Q_CMD_HALT:
+ case BNX2X_Q_CMD_CFC_DEL:
+ case BNX2X_Q_CMD_TERMINATE:
+ case BNX2X_Q_CMD_EMPTY:
+ return bnx2x_queue_send_cmd_cmn(bp, params);
+ default:
+ BNX2X_ERR("Unknown command: %d\n", params->cmd);
+ return -EINVAL;
+ }
+}
+
+/**
+ * bnx2x_queue_chk_transition - check state machine of a regular Queue
+ *
+ * @bp: device handle
+ * @o: queue info
+ * @params: queue state
+ *
+ * (not Forwarding)
+ * It both checks if the requested command is legal in a current
+ * state and, if it's legal, sets a `next_state' in the object
+ * that will be used in the completion flow to set the `state'
+ * of the object.
+ *
+ * returns 0 if a requested command is a legal transition,
+ * -EINVAL otherwise.
+ */
+static int bnx2x_queue_chk_transition(struct bnx2x *bp,
+ struct bnx2x_queue_sp_obj *o,
+ struct bnx2x_queue_state_params *params)
+{
+ enum bnx2x_q_state state = o->state, next_state = BNX2X_Q_STATE_MAX;
+ enum bnx2x_queue_cmd cmd = params->cmd;
+ struct bnx2x_queue_update_params *update_params =
+ &params->params.update;
+ u8 next_tx_only = o->num_tx_only;
+
+ /* Forget all pending for completion commands if a driver only state
+ * transition has been requested.
+ */
+ if (test_bit(RAMROD_DRV_CLR_ONLY, &params->ramrod_flags)) {
+ o->pending = 0;
+ o->next_state = BNX2X_Q_STATE_MAX;
+ }
+
+ /* Don't allow a next state transition if we are in the middle of
+ * the previous one.
+ */
+ if (o->pending) {
+ BNX2X_ERR("Blocking transition since pending was %lx\n",
+ o->pending);
+ return -EBUSY;
+ }
+
+ switch (state) {
+ case BNX2X_Q_STATE_RESET:
+ if (cmd == BNX2X_Q_CMD_INIT)
+ next_state = BNX2X_Q_STATE_INITIALIZED;
+
+ break;
+ case BNX2X_Q_STATE_INITIALIZED:
+ if (cmd == BNX2X_Q_CMD_SETUP) {
+ if (test_bit(BNX2X_Q_FLG_ACTIVE,
+ &params->params.setup.flags))
+ next_state = BNX2X_Q_STATE_ACTIVE;
+ else
+ next_state = BNX2X_Q_STATE_INACTIVE;
+ }
+
+ break;
+ case BNX2X_Q_STATE_ACTIVE:
+ if (cmd == BNX2X_Q_CMD_DEACTIVATE)
+ next_state = BNX2X_Q_STATE_INACTIVE;
+
+ else if ((cmd == BNX2X_Q_CMD_EMPTY) ||
+ (cmd == BNX2X_Q_CMD_UPDATE_TPA))
+ next_state = BNX2X_Q_STATE_ACTIVE;
+
+ else if (cmd == BNX2X_Q_CMD_SETUP_TX_ONLY) {
+ next_state = BNX2X_Q_STATE_MULTI_COS;
+ next_tx_only = 1;
+ }
+
+ else if (cmd == BNX2X_Q_CMD_HALT)
+ next_state = BNX2X_Q_STATE_STOPPED;
+
+ else if (cmd == BNX2X_Q_CMD_UPDATE) {
+ /* If "active" state change is requested, update the
+ * state accordingly.
+ */
+ if (test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG,
+ &update_params->update_flags) &&
+ !test_bit(BNX2X_Q_UPDATE_ACTIVATE,
+ &update_params->update_flags))
+ next_state = BNX2X_Q_STATE_INACTIVE;
+ else
+ next_state = BNX2X_Q_STATE_ACTIVE;
+ }
+
+ break;
+ case BNX2X_Q_STATE_MULTI_COS:
+ if (cmd == BNX2X_Q_CMD_TERMINATE)
+ next_state = BNX2X_Q_STATE_MCOS_TERMINATED;
+
+ else if (cmd == BNX2X_Q_CMD_SETUP_TX_ONLY) {
+ next_state = BNX2X_Q_STATE_MULTI_COS;
+ next_tx_only = o->num_tx_only + 1;
+ }
+
+ else if ((cmd == BNX2X_Q_CMD_EMPTY) ||
+ (cmd == BNX2X_Q_CMD_UPDATE_TPA))
+ next_state = BNX2X_Q_STATE_MULTI_COS;
+
+ else if (cmd == BNX2X_Q_CMD_UPDATE) {
+ /* If "active" state change is requested, update the
+ * state accordingly.
+ */
+ if (test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG,
+ &update_params->update_flags) &&
+ !test_bit(BNX2X_Q_UPDATE_ACTIVATE,
+ &update_params->update_flags))
+ next_state = BNX2X_Q_STATE_INACTIVE;
+ else
+ next_state = BNX2X_Q_STATE_MULTI_COS;
+ }
+
+ break;
+ case BNX2X_Q_STATE_MCOS_TERMINATED:
+ if (cmd == BNX2X_Q_CMD_CFC_DEL) {
+ next_tx_only = o->num_tx_only - 1;
+ if (next_tx_only == 0)
+ next_state = BNX2X_Q_STATE_ACTIVE;
+ else
+ next_state = BNX2X_Q_STATE_MULTI_COS;
+ }
+
+ break;
+ case BNX2X_Q_STATE_INACTIVE:
+ if (cmd == BNX2X_Q_CMD_ACTIVATE)
+ next_state = BNX2X_Q_STATE_ACTIVE;
+
+ else if ((cmd == BNX2X_Q_CMD_EMPTY) ||
+ (cmd == BNX2X_Q_CMD_UPDATE_TPA))
+ next_state = BNX2X_Q_STATE_INACTIVE;
+
+ else if (cmd == BNX2X_Q_CMD_HALT)
+ next_state = BNX2X_Q_STATE_STOPPED;
+
+ else if (cmd == BNX2X_Q_CMD_UPDATE) {
+ /* If "active" state change is requested, update the
+ * state accordingly.
+ */
+ if (test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG,
+ &update_params->update_flags) &&
+ test_bit(BNX2X_Q_UPDATE_ACTIVATE,
+ &update_params->update_flags)){
+ if (o->num_tx_only == 0)
+ next_state = BNX2X_Q_STATE_ACTIVE;
+ else /* tx only queues exist for this queue */
+ next_state = BNX2X_Q_STATE_MULTI_COS;
+ } else
+ next_state = BNX2X_Q_STATE_INACTIVE;
+ }
+
+ break;
+ case BNX2X_Q_STATE_STOPPED:
+ if (cmd == BNX2X_Q_CMD_TERMINATE)
+ next_state = BNX2X_Q_STATE_TERMINATED;
+
+ break;
+ case BNX2X_Q_STATE_TERMINATED:
+ if (cmd == BNX2X_Q_CMD_CFC_DEL)
+ next_state = BNX2X_Q_STATE_RESET;
+
+ break;
+ default:
+ BNX2X_ERR("Illegal state: %d\n", state);
+ }
+
+ /* Transition is assured */
+ if (next_state != BNX2X_Q_STATE_MAX) {
+ DP(BNX2X_MSG_SP, "Good state transition: %d(%d)->%d\n",
+ state, cmd, next_state);
+ o->next_state = next_state;
+ o->next_tx_only = next_tx_only;
+ return 0;
+ }
+
+ DP(BNX2X_MSG_SP, "Bad state transition request: %d %d\n", state, cmd);
+
+ return -EINVAL;
+}
+
+void bnx2x_init_queue_obj(struct bnx2x *bp,
+ struct bnx2x_queue_sp_obj *obj,
+ u8 cl_id, u32 *cids, u8 cid_cnt, u8 func_id,
+ void *rdata,
+ dma_addr_t rdata_mapping, unsigned long type)
+{
+ memset(obj, 0, sizeof(*obj));
+
+ /* We support only BNX2X_MULTI_TX_COS Tx CoS at the moment */
+ BUG_ON(BNX2X_MULTI_TX_COS < cid_cnt);
+
+ memcpy(obj->cids, cids, sizeof(obj->cids[0]) * cid_cnt);
+ obj->max_cos = cid_cnt;
+ obj->cl_id = cl_id;
+ obj->func_id = func_id;
+ obj->rdata = rdata;
+ obj->rdata_mapping = rdata_mapping;
+ obj->type = type;
+ obj->next_state = BNX2X_Q_STATE_MAX;
+
+ if (CHIP_IS_E1x(bp))
+ obj->send_cmd = bnx2x_queue_send_cmd_e1x;
+ else
+ obj->send_cmd = bnx2x_queue_send_cmd_e2;
+
+ obj->check_transition = bnx2x_queue_chk_transition;
+
+ obj->complete_cmd = bnx2x_queue_comp_cmd;
+ obj->wait_comp = bnx2x_queue_wait_comp;
+ obj->set_pending = bnx2x_queue_set_pending;
+}
+
+/* return a queue object's logical state*/
+int bnx2x_get_q_logical_state(struct bnx2x *bp,
+ struct bnx2x_queue_sp_obj *obj)
+{
+ switch (obj->state) {
+ case BNX2X_Q_STATE_ACTIVE:
+ case BNX2X_Q_STATE_MULTI_COS:
+ return BNX2X_Q_LOGICAL_STATE_ACTIVE;
+ case BNX2X_Q_STATE_RESET:
+ case BNX2X_Q_STATE_INITIALIZED:
+ case BNX2X_Q_STATE_MCOS_TERMINATED:
+ case BNX2X_Q_STATE_INACTIVE:
+ case BNX2X_Q_STATE_STOPPED:
+ case BNX2X_Q_STATE_TERMINATED:
+ case BNX2X_Q_STATE_FLRED:
+ return BNX2X_Q_LOGICAL_STATE_STOPPED;
+ default:
+ return -EINVAL;
+ }
+}
+
+/********************** Function state object *********************************/
+enum bnx2x_func_state bnx2x_func_get_state(struct bnx2x *bp,
+ struct bnx2x_func_sp_obj *o)
+{
+ /* in the middle of transaction - return INVALID state */
+ if (o->pending)
+ return BNX2X_F_STATE_MAX;
+
+ /* unsure the order of reading of o->pending and o->state
+ * o->pending should be read first
+ */
+ rmb();
+
+ return o->state;
+}
+
+static int bnx2x_func_wait_comp(struct bnx2x *bp,
+ struct bnx2x_func_sp_obj *o,
+ enum bnx2x_func_cmd cmd)
+{
+ return bnx2x_state_wait(bp, cmd, &o->pending);
+}
+
+/**
+ * bnx2x_func_state_change_comp - complete the state machine transition
+ *
+ * @bp: device handle
+ * @o: function info
+ * @cmd: more info
+ *
+ * Called on state change transition. Completes the state
+ * machine transition only - no HW interaction.
+ */
+static inline int bnx2x_func_state_change_comp(struct bnx2x *bp,
+ struct bnx2x_func_sp_obj *o,
+ enum bnx2x_func_cmd cmd)
+{
+ unsigned long cur_pending = o->pending;
+
+ if (!test_and_clear_bit(cmd, &cur_pending)) {
+ BNX2X_ERR("Bad MC reply %d for func %d in state %d pending 0x%lx, next_state %d\n",
+ cmd, BP_FUNC(bp), o->state,
+ cur_pending, o->next_state);
+ return -EINVAL;
+ }
+
+ DP(BNX2X_MSG_SP,
+ "Completing command %d for func %d, setting state to %d\n",
+ cmd, BP_FUNC(bp), o->next_state);
+
+ o->state = o->next_state;
+ o->next_state = BNX2X_F_STATE_MAX;
+
+ /* It's important that o->state and o->next_state are
+ * updated before o->pending.
+ */
+ wmb();
+
+ clear_bit(cmd, &o->pending);
+ smp_mb__after_atomic();
+
+ return 0;
+}
+
+/**
+ * bnx2x_func_comp_cmd - complete the state change command
+ *
+ * @bp: device handle
+ * @o: function info
+ * @cmd: more info
+ *
+ * Checks that the arrived completion is expected.
+ */
+static int bnx2x_func_comp_cmd(struct bnx2x *bp,
+ struct bnx2x_func_sp_obj *o,
+ enum bnx2x_func_cmd cmd)
+{
+ /* Complete the state machine part first, check if it's a
+ * legal completion.
+ */
+ int rc = bnx2x_func_state_change_comp(bp, o, cmd);
+ return rc;
+}
+
+/**
+ * bnx2x_func_chk_transition - perform function state machine transition
+ *
+ * @bp: device handle
+ * @o: function info
+ * @params: state parameters
+ *
+ * It both checks if the requested command is legal in a current
+ * state and, if it's legal, sets a `next_state' in the object
+ * that will be used in the completion flow to set the `state'
+ * of the object.
+ *
+ * returns 0 if a requested command is a legal transition,
+ * -EINVAL otherwise.
+ */
+static int bnx2x_func_chk_transition(struct bnx2x *bp,
+ struct bnx2x_func_sp_obj *o,
+ struct bnx2x_func_state_params *params)
+{
+ enum bnx2x_func_state state = o->state, next_state = BNX2X_F_STATE_MAX;
+ enum bnx2x_func_cmd cmd = params->cmd;
+
+ /* Forget all pending for completion commands if a driver only state
+ * transition has been requested.
+ */
+ if (test_bit(RAMROD_DRV_CLR_ONLY, &params->ramrod_flags)) {
+ o->pending = 0;
+ o->next_state = BNX2X_F_STATE_MAX;
+ }
+
+ /* Don't allow a next state transition if we are in the middle of
+ * the previous one.
+ */
+ if (o->pending)
+ return -EBUSY;
+
+ switch (state) {
+ case BNX2X_F_STATE_RESET:
+ if (cmd == BNX2X_F_CMD_HW_INIT)
+ next_state = BNX2X_F_STATE_INITIALIZED;
+
+ break;
+ case BNX2X_F_STATE_INITIALIZED:
+ if (cmd == BNX2X_F_CMD_START)
+ next_state = BNX2X_F_STATE_STARTED;
+
+ else if (cmd == BNX2X_F_CMD_HW_RESET)
+ next_state = BNX2X_F_STATE_RESET;
+
+ break;
+ case BNX2X_F_STATE_STARTED:
+ if (cmd == BNX2X_F_CMD_STOP)
+ next_state = BNX2X_F_STATE_INITIALIZED;
+ /* afex ramrods can be sent only in started mode, and only
+ * if not pending for function_stop ramrod completion
+ * for these events - next state remained STARTED.
+ */
+ else if ((cmd == BNX2X_F_CMD_AFEX_UPDATE) &&
+ (!test_bit(BNX2X_F_CMD_STOP, &o->pending)))
+ next_state = BNX2X_F_STATE_STARTED;
+
+ else if ((cmd == BNX2X_F_CMD_AFEX_VIFLISTS) &&
+ (!test_bit(BNX2X_F_CMD_STOP, &o->pending)))
+ next_state = BNX2X_F_STATE_STARTED;
+
+ /* Switch_update ramrod can be sent in either started or
+ * tx_stopped state, and it doesn't change the state.
+ */
+ else if ((cmd == BNX2X_F_CMD_SWITCH_UPDATE) &&
+ (!test_bit(BNX2X_F_CMD_STOP, &o->pending)))
+ next_state = BNX2X_F_STATE_STARTED;
+
+ else if ((cmd == BNX2X_F_CMD_SET_TIMESYNC) &&
+ (!test_bit(BNX2X_F_CMD_STOP, &o->pending)))
+ next_state = BNX2X_F_STATE_STARTED;
+
+ else if (cmd == BNX2X_F_CMD_TX_STOP)
+ next_state = BNX2X_F_STATE_TX_STOPPED;
+
+ break;
+ case BNX2X_F_STATE_TX_STOPPED:
+ if ((cmd == BNX2X_F_CMD_SWITCH_UPDATE) &&
+ (!test_bit(BNX2X_F_CMD_STOP, &o->pending)))
+ next_state = BNX2X_F_STATE_TX_STOPPED;
+
+ else if ((cmd == BNX2X_F_CMD_SET_TIMESYNC) &&
+ (!test_bit(BNX2X_F_CMD_STOP, &o->pending)))
+ next_state = BNX2X_F_STATE_TX_STOPPED;
+
+ else if (cmd == BNX2X_F_CMD_TX_START)
+ next_state = BNX2X_F_STATE_STARTED;
+
+ break;
+ default:
+ BNX2X_ERR("Unknown state: %d\n", state);
+ }
+
+ /* Transition is assured */
+ if (next_state != BNX2X_F_STATE_MAX) {
+ DP(BNX2X_MSG_SP, "Good function state transition: %d(%d)->%d\n",
+ state, cmd, next_state);
+ o->next_state = next_state;
+ return 0;
+ }
+
+ DP(BNX2X_MSG_SP, "Bad function state transition request: %d %d\n",
+ state, cmd);
+
+ return -EINVAL;
+}
+
+/**
+ * bnx2x_func_init_func - performs HW init at function stage
+ *
+ * @bp: device handle
+ * @drv:
+ *
+ * Init HW when the current phase is
+ * FW_MSG_CODE_DRV_LOAD_FUNCTION: initialize only FUNCTION-only
+ * HW blocks.
+ */
+static inline int bnx2x_func_init_func(struct bnx2x *bp,
+ const struct bnx2x_func_sp_drv_ops *drv)
+{
+ return drv->init_hw_func(bp);
+}
+
+/**
+ * bnx2x_func_init_port - performs HW init at port stage
+ *
+ * @bp: device handle
+ * @drv:
+ *
+ * Init HW when the current phase is
+ * FW_MSG_CODE_DRV_LOAD_PORT: initialize PORT-only and
+ * FUNCTION-only HW blocks.
+ *
+ */
+static inline int bnx2x_func_init_port(struct bnx2x *bp,
+ const struct bnx2x_func_sp_drv_ops *drv)
+{
+ int rc = drv->init_hw_port(bp);
+ if (rc)
+ return rc;
+
+ return bnx2x_func_init_func(bp, drv);
+}
+
+/**
+ * bnx2x_func_init_cmn_chip - performs HW init at chip-common stage
+ *
+ * @bp: device handle
+ * @drv:
+ *
+ * Init HW when the current phase is
+ * FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: initialize COMMON_CHIP,
+ * PORT-only and FUNCTION-only HW blocks.
+ */
+static inline int bnx2x_func_init_cmn_chip(struct bnx2x *bp,
+ const struct bnx2x_func_sp_drv_ops *drv)
+{
+ int rc = drv->init_hw_cmn_chip(bp);
+ if (rc)
+ return rc;
+
+ return bnx2x_func_init_port(bp, drv);
+}
+
+/**
+ * bnx2x_func_init_cmn - performs HW init at common stage
+ *
+ * @bp: device handle
+ * @drv:
+ *
+ * Init HW when the current phase is
+ * FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: initialize COMMON,
+ * PORT-only and FUNCTION-only HW blocks.
+ */
+static inline int bnx2x_func_init_cmn(struct bnx2x *bp,
+ const struct bnx2x_func_sp_drv_ops *drv)
+{
+ int rc = drv->init_hw_cmn(bp);
+ if (rc)
+ return rc;
+
+ return bnx2x_func_init_port(bp, drv);
+}
+
+static int bnx2x_func_hw_init(struct bnx2x *bp,
+ struct bnx2x_func_state_params *params)
+{
+ u32 load_code = params->params.hw_init.load_phase;
+ struct bnx2x_func_sp_obj *o = params->f_obj;
+ const struct bnx2x_func_sp_drv_ops *drv = o->drv;
+ int rc = 0;
+
+ DP(BNX2X_MSG_SP, "function %d load_code %x\n",
+ BP_ABS_FUNC(bp), load_code);
+
+ /* Prepare buffers for unzipping the FW */
+ rc = drv->gunzip_init(bp);
+ if (rc)
+ return rc;
+
+ /* Prepare FW */
+ rc = drv->init_fw(bp);
+ if (rc) {
+ BNX2X_ERR("Error loading firmware\n");
+ goto init_err;
+ }
+
+ /* Handle the beginning of COMMON_XXX pases separately... */
+ switch (load_code) {
+ case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
+ rc = bnx2x_func_init_cmn_chip(bp, drv);
+ if (rc)
+ goto init_err;
+
+ break;
+ case FW_MSG_CODE_DRV_LOAD_COMMON:
+ rc = bnx2x_func_init_cmn(bp, drv);
+ if (rc)
+ goto init_err;
+
+ break;
+ case FW_MSG_CODE_DRV_LOAD_PORT:
+ rc = bnx2x_func_init_port(bp, drv);
+ if (rc)
+ goto init_err;
+
+ break;
+ case FW_MSG_CODE_DRV_LOAD_FUNCTION:
+ rc = bnx2x_func_init_func(bp, drv);
+ if (rc)
+ goto init_err;
+
+ break;
+ default:
+ BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
+ rc = -EINVAL;
+ }
+
+init_err:
+ drv->gunzip_end(bp);
+
+ /* In case of success, complete the command immediately: no ramrods
+ * have been sent.
+ */
+ if (!rc)
+ o->complete_cmd(bp, o, BNX2X_F_CMD_HW_INIT);
+
+ return rc;
+}
+
+/**
+ * bnx2x_func_reset_func - reset HW at function stage
+ *
+ * @bp: device handle
+ * @drv:
+ *
+ * Reset HW at FW_MSG_CODE_DRV_UNLOAD_FUNCTION stage: reset only
+ * FUNCTION-only HW blocks.
+ */
+static inline void bnx2x_func_reset_func(struct bnx2x *bp,
+ const struct bnx2x_func_sp_drv_ops *drv)
+{
+ drv->reset_hw_func(bp);
+}
+
+/**
+ * bnx2x_func_reset_port - reset HW at port stage
+ *
+ * @bp: device handle
+ * @drv:
+ *
+ * Reset HW at FW_MSG_CODE_DRV_UNLOAD_PORT stage: reset
+ * FUNCTION-only and PORT-only HW blocks.
+ *
+ * !!!IMPORTANT!!!
+ *
+ * It's important to call reset_port before reset_func() as the last thing
+ * reset_func does is pf_disable() thus disabling PGLUE_B, which
+ * makes impossible any DMAE transactions.
+ */
+static inline void bnx2x_func_reset_port(struct bnx2x *bp,
+ const struct bnx2x_func_sp_drv_ops *drv)
+{
+ drv->reset_hw_port(bp);
+ bnx2x_func_reset_func(bp, drv);
+}
+
+/**
+ * bnx2x_func_reset_cmn - reset HW at common stage
+ *
+ * @bp: device handle
+ * @drv:
+ *
+ * Reset HW at FW_MSG_CODE_DRV_UNLOAD_COMMON and
+ * FW_MSG_CODE_DRV_UNLOAD_COMMON_CHIP stages: reset COMMON,
+ * COMMON_CHIP, FUNCTION-only and PORT-only HW blocks.
+ */
+static inline void bnx2x_func_reset_cmn(struct bnx2x *bp,
+ const struct bnx2x_func_sp_drv_ops *drv)
+{
+ bnx2x_func_reset_port(bp, drv);
+ drv->reset_hw_cmn(bp);
+}
+
+static inline int bnx2x_func_hw_reset(struct bnx2x *bp,
+ struct bnx2x_func_state_params *params)
+{
+ u32 reset_phase = params->params.hw_reset.reset_phase;
+ struct bnx2x_func_sp_obj *o = params->f_obj;
+ const struct bnx2x_func_sp_drv_ops *drv = o->drv;
+
+ DP(BNX2X_MSG_SP, "function %d reset_phase %x\n", BP_ABS_FUNC(bp),
+ reset_phase);
+
+ switch (reset_phase) {
+ case FW_MSG_CODE_DRV_UNLOAD_COMMON:
+ bnx2x_func_reset_cmn(bp, drv);
+ break;
+ case FW_MSG_CODE_DRV_UNLOAD_PORT:
+ bnx2x_func_reset_port(bp, drv);
+ break;
+ case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
+ bnx2x_func_reset_func(bp, drv);
+ break;
+ default:
+ BNX2X_ERR("Unknown reset_phase (0x%x) from MCP\n",
+ reset_phase);
+ break;
+ }
+
+ /* Complete the command immediately: no ramrods have been sent. */
+ o->complete_cmd(bp, o, BNX2X_F_CMD_HW_RESET);
+
+ return 0;
+}
+
+static inline int bnx2x_func_send_start(struct bnx2x *bp,
+ struct bnx2x_func_state_params *params)
+{
+ struct bnx2x_func_sp_obj *o = params->f_obj;
+ struct function_start_data *rdata =
+ (struct function_start_data *)o->rdata;
+ dma_addr_t data_mapping = o->rdata_mapping;
+ struct bnx2x_func_start_params *start_params = &params->params.start;
+
+ memset(rdata, 0, sizeof(*rdata));
+
+ /* Fill the ramrod data with provided parameters */
+ rdata->function_mode = (u8)start_params->mf_mode;
+ rdata->sd_vlan_tag = cpu_to_le16(start_params->sd_vlan_tag);
+ rdata->path_id = BP_PATH(bp);
+ rdata->network_cos_mode = start_params->network_cos_mode;
+ rdata->dmae_cmd_id = BNX2X_FW_DMAE_C;
+
+ rdata->vxlan_dst_port = cpu_to_le16(start_params->vxlan_dst_port);
+ rdata->geneve_dst_port = cpu_to_le16(start_params->geneve_dst_port);
+ rdata->inner_clss_l2gre = start_params->inner_clss_l2gre;
+ rdata->inner_clss_l2geneve = start_params->inner_clss_l2geneve;
+ rdata->inner_clss_vxlan = start_params->inner_clss_vxlan;
+ rdata->inner_rss = start_params->inner_rss;
+
+ rdata->sd_accept_mf_clss_fail = start_params->class_fail;
+ if (start_params->class_fail_ethtype) {
+ rdata->sd_accept_mf_clss_fail_match_ethtype = 1;
+ rdata->sd_accept_mf_clss_fail_ethtype =
+ cpu_to_le16(start_params->class_fail_ethtype);
+ }
+
+ rdata->sd_vlan_force_pri_flg = start_params->sd_vlan_force_pri;
+ rdata->sd_vlan_force_pri_val = start_params->sd_vlan_force_pri_val;
+ if (start_params->sd_vlan_eth_type)
+ rdata->sd_vlan_eth_type =
+ cpu_to_le16(start_params->sd_vlan_eth_type);
+ else
+ rdata->sd_vlan_eth_type =
+ cpu_to_le16(0x8100);
+
+ rdata->no_added_tags = start_params->no_added_tags;
+
+ rdata->c2s_pri_tt_valid = start_params->c2s_pri_valid;
+ if (rdata->c2s_pri_tt_valid) {
+ memcpy(rdata->c2s_pri_trans_table.val,
+ start_params->c2s_pri,
+ MAX_VLAN_PRIORITIES);
+ rdata->c2s_pri_default = start_params->c2s_pri_default;
+ }
+ /* No need for an explicit memory barrier here as long we would
+ * need to ensure the ordering of writing to the SPQ element
+ * and updating of the SPQ producer which involves a memory
+ * read and we will have to put a full memory barrier there
+ * (inside bnx2x_sp_post()).
+ */
+
+ return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_START, 0,
+ U64_HI(data_mapping),
+ U64_LO(data_mapping), NONE_CONNECTION_TYPE);
+}
+
+static inline int bnx2x_func_send_switch_update(struct bnx2x *bp,
+ struct bnx2x_func_state_params *params)
+{
+ struct bnx2x_func_sp_obj *o = params->f_obj;
+ struct function_update_data *rdata =
+ (struct function_update_data *)o->rdata;
+ dma_addr_t data_mapping = o->rdata_mapping;
+ struct bnx2x_func_switch_update_params *switch_update_params =
+ &params->params.switch_update;
+
+ memset(rdata, 0, sizeof(*rdata));
+
+ /* Fill the ramrod data with provided parameters */
+ if (test_bit(BNX2X_F_UPDATE_TX_SWITCH_SUSPEND_CHNG,
+ &switch_update_params->changes)) {
+ rdata->tx_switch_suspend_change_flg = 1;
+ rdata->tx_switch_suspend =
+ test_bit(BNX2X_F_UPDATE_TX_SWITCH_SUSPEND,
+ &switch_update_params->changes);
+ }
+
+ if (test_bit(BNX2X_F_UPDATE_SD_VLAN_TAG_CHNG,
+ &switch_update_params->changes)) {
+ rdata->sd_vlan_tag_change_flg = 1;
+ rdata->sd_vlan_tag =
+ cpu_to_le16(switch_update_params->vlan);
+ }
+
+ if (test_bit(BNX2X_F_UPDATE_SD_VLAN_ETH_TYPE_CHNG,
+ &switch_update_params->changes)) {
+ rdata->sd_vlan_eth_type_change_flg = 1;
+ rdata->sd_vlan_eth_type =
+ cpu_to_le16(switch_update_params->vlan_eth_type);
+ }
+
+ if (test_bit(BNX2X_F_UPDATE_VLAN_FORCE_PRIO_CHNG,
+ &switch_update_params->changes)) {
+ rdata->sd_vlan_force_pri_change_flg = 1;
+ if (test_bit(BNX2X_F_UPDATE_VLAN_FORCE_PRIO_FLAG,
+ &switch_update_params->changes))
+ rdata->sd_vlan_force_pri_flg = 1;
+ rdata->sd_vlan_force_pri_flg =
+ switch_update_params->vlan_force_prio;
+ }
+
+ if (test_bit(BNX2X_F_UPDATE_TUNNEL_CFG_CHNG,
+ &switch_update_params->changes)) {
+ rdata->update_tunn_cfg_flg = 1;
+ if (test_bit(BNX2X_F_UPDATE_TUNNEL_INNER_CLSS_L2GRE,
+ &switch_update_params->changes))
+ rdata->inner_clss_l2gre = 1;
+ if (test_bit(BNX2X_F_UPDATE_TUNNEL_INNER_CLSS_VXLAN,
+ &switch_update_params->changes))
+ rdata->inner_clss_vxlan = 1;
+ if (test_bit(BNX2X_F_UPDATE_TUNNEL_INNER_CLSS_L2GENEVE,
+ &switch_update_params->changes))
+ rdata->inner_clss_l2geneve = 1;
+ if (test_bit(BNX2X_F_UPDATE_TUNNEL_INNER_RSS,
+ &switch_update_params->changes))
+ rdata->inner_rss = 1;
+ rdata->vxlan_dst_port =
+ cpu_to_le16(switch_update_params->vxlan_dst_port);
+ rdata->geneve_dst_port =
+ cpu_to_le16(switch_update_params->geneve_dst_port);
+ }
+
+ rdata->echo = SWITCH_UPDATE;
+
+ /* No need for an explicit memory barrier here as long as we
+ * ensure the ordering of writing to the SPQ element
+ * and updating of the SPQ producer which involves a memory
+ * read. If the memory read is removed we will have to put a
+ * full memory barrier there (inside bnx2x_sp_post()).
+ */
+ return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_UPDATE, 0,
+ U64_HI(data_mapping),
+ U64_LO(data_mapping), NONE_CONNECTION_TYPE);
+}
+
+static inline int bnx2x_func_send_afex_update(struct bnx2x *bp,
+ struct bnx2x_func_state_params *params)
+{
+ struct bnx2x_func_sp_obj *o = params->f_obj;
+ struct function_update_data *rdata =
+ (struct function_update_data *)o->afex_rdata;
+ dma_addr_t data_mapping = o->afex_rdata_mapping;
+ struct bnx2x_func_afex_update_params *afex_update_params =
+ &params->params.afex_update;
+
+ memset(rdata, 0, sizeof(*rdata));
+
+ /* Fill the ramrod data with provided parameters */
+ rdata->vif_id_change_flg = 1;
+ rdata->vif_id = cpu_to_le16(afex_update_params->vif_id);
+ rdata->afex_default_vlan_change_flg = 1;
+ rdata->afex_default_vlan =
+ cpu_to_le16(afex_update_params->afex_default_vlan);
+ rdata->allowed_priorities_change_flg = 1;
+ rdata->allowed_priorities = afex_update_params->allowed_priorities;
+ rdata->echo = AFEX_UPDATE;
+
+ /* No need for an explicit memory barrier here as long as we
+ * ensure the ordering of writing to the SPQ element
+ * and updating of the SPQ producer which involves a memory
+ * read. If the memory read is removed we will have to put a
+ * full memory barrier there (inside bnx2x_sp_post()).
+ */
+ DP(BNX2X_MSG_SP,
+ "afex: sending func_update vif_id 0x%x dvlan 0x%x prio 0x%x\n",
+ rdata->vif_id,
+ rdata->afex_default_vlan, rdata->allowed_priorities);
+
+ return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_UPDATE, 0,
+ U64_HI(data_mapping),
+ U64_LO(data_mapping), NONE_CONNECTION_TYPE);
+}
+
+static
+inline int bnx2x_func_send_afex_viflists(struct bnx2x *bp,
+ struct bnx2x_func_state_params *params)
+{
+ struct bnx2x_func_sp_obj *o = params->f_obj;
+ struct afex_vif_list_ramrod_data *rdata =
+ (struct afex_vif_list_ramrod_data *)o->afex_rdata;
+ struct bnx2x_func_afex_viflists_params *afex_vif_params =
+ &params->params.afex_viflists;
+ u64 *p_rdata = (u64 *)rdata;
+
+ memset(rdata, 0, sizeof(*rdata));
+
+ /* Fill the ramrod data with provided parameters */
+ rdata->vif_list_index = cpu_to_le16(afex_vif_params->vif_list_index);
+ rdata->func_bit_map = afex_vif_params->func_bit_map;
+ rdata->afex_vif_list_command = afex_vif_params->afex_vif_list_command;
+ rdata->func_to_clear = afex_vif_params->func_to_clear;
+
+ /* send in echo type of sub command */
+ rdata->echo = afex_vif_params->afex_vif_list_command;
+
+ /* No need for an explicit memory barrier here as long we would
+ * need to ensure the ordering of writing to the SPQ element
+ * and updating of the SPQ producer which involves a memory
+ * read and we will have to put a full memory barrier there
+ * (inside bnx2x_sp_post()).
+ */
+
+ DP(BNX2X_MSG_SP, "afex: ramrod lists, cmd 0x%x index 0x%x func_bit_map 0x%x func_to_clr 0x%x\n",
+ rdata->afex_vif_list_command, rdata->vif_list_index,
+ rdata->func_bit_map, rdata->func_to_clear);
+
+ /* this ramrod sends data directly and not through DMA mapping */
+ return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_AFEX_VIF_LISTS, 0,
+ U64_HI(*p_rdata), U64_LO(*p_rdata),
+ NONE_CONNECTION_TYPE);
+}
+
+static inline int bnx2x_func_send_stop(struct bnx2x *bp,
+ struct bnx2x_func_state_params *params)
+{
+ return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_STOP, 0, 0, 0,
+ NONE_CONNECTION_TYPE);
+}
+
+static inline int bnx2x_func_send_tx_stop(struct bnx2x *bp,
+ struct bnx2x_func_state_params *params)
+{
+ return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_STOP_TRAFFIC, 0, 0, 0,
+ NONE_CONNECTION_TYPE);
+}
+static inline int bnx2x_func_send_tx_start(struct bnx2x *bp,
+ struct bnx2x_func_state_params *params)
+{
+ struct bnx2x_func_sp_obj *o = params->f_obj;
+ struct flow_control_configuration *rdata =
+ (struct flow_control_configuration *)o->rdata;
+ dma_addr_t data_mapping = o->rdata_mapping;
+ struct bnx2x_func_tx_start_params *tx_start_params =
+ &params->params.tx_start;
+ int i;
+
+ memset(rdata, 0, sizeof(*rdata));
+
+ rdata->dcb_enabled = tx_start_params->dcb_enabled;
+ rdata->dcb_version = tx_start_params->dcb_version;
+ rdata->dont_add_pri_0_en = tx_start_params->dont_add_pri_0_en;
+
+ for (i = 0; i < ARRAY_SIZE(rdata->traffic_type_to_priority_cos); i++)
+ rdata->traffic_type_to_priority_cos[i] =
+ tx_start_params->traffic_type_to_priority_cos[i];
+
+ for (i = 0; i < MAX_TRAFFIC_TYPES; i++)
+ rdata->dcb_outer_pri[i] = tx_start_params->dcb_outer_pri[i];
+ /* No need for an explicit memory barrier here as long as we
+ * ensure the ordering of writing to the SPQ element
+ * and updating of the SPQ producer which involves a memory
+ * read. If the memory read is removed we will have to put a
+ * full memory barrier there (inside bnx2x_sp_post()).
+ */
+ return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_START_TRAFFIC, 0,
+ U64_HI(data_mapping),
+ U64_LO(data_mapping), NONE_CONNECTION_TYPE);
+}
+
+static inline
+int bnx2x_func_send_set_timesync(struct bnx2x *bp,
+ struct bnx2x_func_state_params *params)
+{
+ struct bnx2x_func_sp_obj *o = params->f_obj;
+ struct set_timesync_ramrod_data *rdata =
+ (struct set_timesync_ramrod_data *)o->rdata;
+ dma_addr_t data_mapping = o->rdata_mapping;
+ struct bnx2x_func_set_timesync_params *set_timesync_params =
+ &params->params.set_timesync;
+
+ memset(rdata, 0, sizeof(*rdata));
+
+ /* Fill the ramrod data with provided parameters */
+ rdata->drift_adjust_cmd = set_timesync_params->drift_adjust_cmd;
+ rdata->offset_cmd = set_timesync_params->offset_cmd;
+ rdata->add_sub_drift_adjust_value =
+ set_timesync_params->add_sub_drift_adjust_value;
+ rdata->drift_adjust_value = set_timesync_params->drift_adjust_value;
+ rdata->drift_adjust_period = set_timesync_params->drift_adjust_period;
+ rdata->offset_delta.lo =
+ cpu_to_le32(U64_LO(set_timesync_params->offset_delta));
+ rdata->offset_delta.hi =
+ cpu_to_le32(U64_HI(set_timesync_params->offset_delta));
+
+ DP(BNX2X_MSG_SP, "Set timesync command params: drift_cmd = %d, offset_cmd = %d, add_sub_drift = %d, drift_val = %d, drift_period = %d, offset_lo = %d, offset_hi = %d\n",
+ rdata->drift_adjust_cmd, rdata->offset_cmd,
+ rdata->add_sub_drift_adjust_value, rdata->drift_adjust_value,
+ rdata->drift_adjust_period, rdata->offset_delta.lo,
+ rdata->offset_delta.hi);
+
+ return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_TIMESYNC, 0,
+ U64_HI(data_mapping),
+ U64_LO(data_mapping), NONE_CONNECTION_TYPE);
+}
+
+static int bnx2x_func_send_cmd(struct bnx2x *bp,
+ struct bnx2x_func_state_params *params)
+{
+ switch (params->cmd) {
+ case BNX2X_F_CMD_HW_INIT:
+ return bnx2x_func_hw_init(bp, params);
+ case BNX2X_F_CMD_START:
+ return bnx2x_func_send_start(bp, params);
+ case BNX2X_F_CMD_STOP:
+ return bnx2x_func_send_stop(bp, params);
+ case BNX2X_F_CMD_HW_RESET:
+ return bnx2x_func_hw_reset(bp, params);
+ case BNX2X_F_CMD_AFEX_UPDATE:
+ return bnx2x_func_send_afex_update(bp, params);
+ case BNX2X_F_CMD_AFEX_VIFLISTS:
+ return bnx2x_func_send_afex_viflists(bp, params);
+ case BNX2X_F_CMD_TX_STOP:
+ return bnx2x_func_send_tx_stop(bp, params);
+ case BNX2X_F_CMD_TX_START:
+ return bnx2x_func_send_tx_start(bp, params);
+ case BNX2X_F_CMD_SWITCH_UPDATE:
+ return bnx2x_func_send_switch_update(bp, params);
+ case BNX2X_F_CMD_SET_TIMESYNC:
+ return bnx2x_func_send_set_timesync(bp, params);
+ default:
+ BNX2X_ERR("Unknown command: %d\n", params->cmd);
+ return -EINVAL;
+ }
+}
+
+void bnx2x_init_func_obj(struct bnx2x *bp,
+ struct bnx2x_func_sp_obj *obj,
+ void *rdata, dma_addr_t rdata_mapping,
+ void *afex_rdata, dma_addr_t afex_rdata_mapping,
+ struct bnx2x_func_sp_drv_ops *drv_iface)
+{
+ memset(obj, 0, sizeof(*obj));
+
+ mutex_init(&obj->one_pending_mutex);
+
+ obj->rdata = rdata;
+ obj->rdata_mapping = rdata_mapping;
+ obj->afex_rdata = afex_rdata;
+ obj->afex_rdata_mapping = afex_rdata_mapping;
+ obj->send_cmd = bnx2x_func_send_cmd;
+ obj->check_transition = bnx2x_func_chk_transition;
+ obj->complete_cmd = bnx2x_func_comp_cmd;
+ obj->wait_comp = bnx2x_func_wait_comp;
+
+ obj->drv = drv_iface;
+}
+
+/**
+ * bnx2x_func_state_change - perform Function state change transition
+ *
+ * @bp: device handle
+ * @params: parameters to perform the transaction
+ *
+ * returns 0 in case of successfully completed transition,
+ * negative error code in case of failure, positive
+ * (EBUSY) value if there is a completion to that is
+ * still pending (possible only if RAMROD_COMP_WAIT is
+ * not set in params->ramrod_flags for asynchronous
+ * commands).
+ */
+int bnx2x_func_state_change(struct bnx2x *bp,
+ struct bnx2x_func_state_params *params)
+{
+ struct bnx2x_func_sp_obj *o = params->f_obj;
+ int rc, cnt = 300;
+ enum bnx2x_func_cmd cmd = params->cmd;
+ unsigned long *pending = &o->pending;
+
+ mutex_lock(&o->one_pending_mutex);
+
+ /* Check that the requested transition is legal */
+ rc = o->check_transition(bp, o, params);
+ if ((rc == -EBUSY) &&
+ (test_bit(RAMROD_RETRY, &params->ramrod_flags))) {
+ while ((rc == -EBUSY) && (--cnt > 0)) {
+ mutex_unlock(&o->one_pending_mutex);
+ msleep(10);
+ mutex_lock(&o->one_pending_mutex);
+ rc = o->check_transition(bp, o, params);
+ }
+ if (rc == -EBUSY) {
+ mutex_unlock(&o->one_pending_mutex);
+ BNX2X_ERR("timeout waiting for previous ramrod completion\n");
+ return rc;
+ }
+ } else if (rc) {
+ mutex_unlock(&o->one_pending_mutex);
+ return rc;
+ }
+
+ /* Set "pending" bit */
+ set_bit(cmd, pending);
+
+ /* Don't send a command if only driver cleanup was requested */
+ if (test_bit(RAMROD_DRV_CLR_ONLY, &params->ramrod_flags)) {
+ bnx2x_func_state_change_comp(bp, o, cmd);
+ mutex_unlock(&o->one_pending_mutex);
+ } else {
+ /* Send a ramrod */
+ rc = o->send_cmd(bp, params);
+
+ mutex_unlock(&o->one_pending_mutex);
+
+ if (rc) {
+ o->next_state = BNX2X_F_STATE_MAX;
+ clear_bit(cmd, pending);
+ smp_mb__after_atomic();
+ return rc;
+ }
+
+ if (test_bit(RAMROD_COMP_WAIT, &params->ramrod_flags)) {
+ rc = o->wait_comp(bp, o, cmd);
+ if (rc)
+ return rc;
+
+ return 0;
+ }
+ }
+
+ return !!test_bit(cmd, pending);
+}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
new file mode 100644
index 0000000000..bacc8552bc
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
@@ -0,0 +1,1546 @@
+/* bnx2x_sp.h: Qlogic Everest network driver.
+ *
+ * Copyright 2011-2013 Broadcom Corporation
+ * Copyright (c) 2014 QLogic Corporation
+ * All rights reserved
+ *
+ * Unless you and Qlogic execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2, available
+ * at http://www.gnu.org/licenses/gpl-2.0.html (the "GPL").
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Qlogic software provided under a
+ * license other than the GPL, without Qlogic's express prior written
+ * consent.
+ *
+ * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
+ * Written by: Vladislav Zolotarov
+ *
+ */
+#ifndef BNX2X_SP_VERBS
+#define BNX2X_SP_VERBS
+
+struct bnx2x;
+struct eth_context;
+
+/* Bits representing general command's configuration */
+enum {
+ RAMROD_TX,
+ RAMROD_RX,
+ /* Wait until all pending commands complete */
+ RAMROD_COMP_WAIT,
+ /* Don't send a ramrod, only update a registry */
+ RAMROD_DRV_CLR_ONLY,
+ /* Configure HW according to the current object state */
+ RAMROD_RESTORE,
+ /* Execute the next command now */
+ RAMROD_EXEC,
+ /* Don't add a new command and continue execution of postponed
+ * commands. If not set a new command will be added to the
+ * pending commands list.
+ */
+ RAMROD_CONT,
+ /* If there is another pending ramrod, wait until it finishes and
+ * re-try to submit this one. This flag can be set only in sleepable
+ * context, and should not be set from the context that completes the
+ * ramrods as deadlock will occur.
+ */
+ RAMROD_RETRY,
+};
+
+typedef enum {
+ BNX2X_OBJ_TYPE_RX,
+ BNX2X_OBJ_TYPE_TX,
+ BNX2X_OBJ_TYPE_RX_TX,
+} bnx2x_obj_type;
+
+/* Public slow path states */
+enum {
+ BNX2X_FILTER_MAC_PENDING,
+ BNX2X_FILTER_VLAN_PENDING,
+ BNX2X_FILTER_VLAN_MAC_PENDING,
+ BNX2X_FILTER_RX_MODE_PENDING,
+ BNX2X_FILTER_RX_MODE_SCHED,
+ BNX2X_FILTER_ISCSI_ETH_START_SCHED,
+ BNX2X_FILTER_ISCSI_ETH_STOP_SCHED,
+ BNX2X_FILTER_FCOE_ETH_START_SCHED,
+ BNX2X_FILTER_FCOE_ETH_STOP_SCHED,
+ BNX2X_FILTER_MCAST_PENDING,
+ BNX2X_FILTER_MCAST_SCHED,
+ BNX2X_FILTER_RSS_CONF_PENDING,
+ BNX2X_AFEX_FCOE_Q_UPDATE_PENDING,
+ BNX2X_AFEX_PENDING_VIFSET_MCP_ACK
+};
+
+struct bnx2x_raw_obj {
+ u8 func_id;
+
+ /* Queue params */
+ u8 cl_id;
+ u32 cid;
+
+ /* Ramrod data buffer params */
+ void *rdata;
+ dma_addr_t rdata_mapping;
+
+ /* Ramrod state params */
+ int state; /* "ramrod is pending" state bit */
+ unsigned long *pstate; /* pointer to state buffer */
+
+ bnx2x_obj_type obj_type;
+
+ int (*wait_comp)(struct bnx2x *bp,
+ struct bnx2x_raw_obj *o);
+
+ bool (*check_pending)(struct bnx2x_raw_obj *o);
+ void (*clear_pending)(struct bnx2x_raw_obj *o);
+ void (*set_pending)(struct bnx2x_raw_obj *o);
+};
+
+/************************* VLAN-MAC commands related parameters ***************/
+struct bnx2x_mac_ramrod_data {
+ u8 mac[ETH_ALEN];
+ u8 is_inner_mac;
+};
+
+struct bnx2x_vlan_ramrod_data {
+ u16 vlan;
+};
+
+struct bnx2x_vlan_mac_ramrod_data {
+ u8 mac[ETH_ALEN];
+ u8 is_inner_mac;
+ u16 vlan;
+};
+
+union bnx2x_classification_ramrod_data {
+ struct bnx2x_mac_ramrod_data mac;
+ struct bnx2x_vlan_ramrod_data vlan;
+ struct bnx2x_vlan_mac_ramrod_data vlan_mac;
+};
+
+/* VLAN_MAC commands */
+enum bnx2x_vlan_mac_cmd {
+ BNX2X_VLAN_MAC_ADD,
+ BNX2X_VLAN_MAC_DEL,
+ BNX2X_VLAN_MAC_MOVE,
+};
+
+struct bnx2x_vlan_mac_data {
+ /* Requested command: BNX2X_VLAN_MAC_XX */
+ enum bnx2x_vlan_mac_cmd cmd;
+ /* used to contain the data related vlan_mac_flags bits from
+ * ramrod parameters.
+ */
+ unsigned long vlan_mac_flags;
+
+ /* Needed for MOVE command */
+ struct bnx2x_vlan_mac_obj *target_obj;
+
+ union bnx2x_classification_ramrod_data u;
+};
+
+/*************************** Exe Queue obj ************************************/
+union bnx2x_exe_queue_cmd_data {
+ struct bnx2x_vlan_mac_data vlan_mac;
+
+ struct {
+ /* TODO */
+ } mcast;
+};
+
+struct bnx2x_exeq_elem {
+ struct list_head link;
+
+ /* Length of this element in the exe_chunk. */
+ int cmd_len;
+
+ union bnx2x_exe_queue_cmd_data cmd_data;
+};
+
+union bnx2x_qable_obj;
+
+union bnx2x_exeq_comp_elem {
+ union event_ring_elem *elem;
+};
+
+struct bnx2x_exe_queue_obj;
+
+typedef int (*exe_q_validate)(struct bnx2x *bp,
+ union bnx2x_qable_obj *o,
+ struct bnx2x_exeq_elem *elem);
+
+typedef int (*exe_q_remove)(struct bnx2x *bp,
+ union bnx2x_qable_obj *o,
+ struct bnx2x_exeq_elem *elem);
+
+/* Return positive if entry was optimized, 0 - if not, negative
+ * in case of an error.
+ */
+typedef int (*exe_q_optimize)(struct bnx2x *bp,
+ union bnx2x_qable_obj *o,
+ struct bnx2x_exeq_elem *elem);
+typedef int (*exe_q_execute)(struct bnx2x *bp,
+ union bnx2x_qable_obj *o,
+ struct list_head *exe_chunk,
+ unsigned long *ramrod_flags);
+typedef struct bnx2x_exeq_elem *
+ (*exe_q_get)(struct bnx2x_exe_queue_obj *o,
+ struct bnx2x_exeq_elem *elem);
+
+struct bnx2x_exe_queue_obj {
+ /* Commands pending for an execution. */
+ struct list_head exe_queue;
+
+ /* Commands pending for an completion. */
+ struct list_head pending_comp;
+
+ spinlock_t lock;
+
+ /* Maximum length of commands' list for one execution */
+ int exe_chunk_len;
+
+ union bnx2x_qable_obj *owner;
+
+ /****** Virtual functions ******/
+ /**
+ * Called before commands execution for commands that are really
+ * going to be executed (after 'optimize').
+ *
+ * Must run under exe_queue->lock
+ */
+ exe_q_validate validate;
+
+ /**
+ * Called before removing pending commands, cleaning allocated
+ * resources (e.g., credits from validate)
+ */
+ exe_q_remove remove;
+
+ /**
+ * This will try to cancel the current pending commands list
+ * considering the new command.
+ *
+ * Returns the number of optimized commands or a negative error code
+ *
+ * Must run under exe_queue->lock
+ */
+ exe_q_optimize optimize;
+
+ /**
+ * Run the next commands chunk (owner specific).
+ */
+ exe_q_execute execute;
+
+ /**
+ * Return the exe_queue element containing the specific command
+ * if any. Otherwise return NULL.
+ */
+ exe_q_get get;
+};
+/***************** Classification verbs: Set/Del MAC/VLAN/VLAN-MAC ************/
+/*
+ * Element in the VLAN_MAC registry list having all currently configured
+ * rules.
+ */
+struct bnx2x_vlan_mac_registry_elem {
+ struct list_head link;
+
+ /* Used to store the cam offset used for the mac/vlan/vlan-mac.
+ * Relevant for 57710 and 57711 only. VLANs and MACs share the
+ * same CAM for these chips.
+ */
+ int cam_offset;
+
+ /* Needed for DEL and RESTORE flows */
+ unsigned long vlan_mac_flags;
+
+ union bnx2x_classification_ramrod_data u;
+};
+
+/* Bits representing VLAN_MAC commands specific flags */
+enum {
+ BNX2X_UC_LIST_MAC,
+ BNX2X_ETH_MAC,
+ BNX2X_ISCSI_ETH_MAC,
+ BNX2X_NETQ_ETH_MAC,
+ BNX2X_VLAN,
+ BNX2X_DONT_CONSUME_CAM_CREDIT,
+ BNX2X_DONT_CONSUME_CAM_CREDIT_DEST,
+};
+/* When looking for matching filters, some flags are not interesting */
+#define BNX2X_VLAN_MAC_CMP_MASK (1 << BNX2X_UC_LIST_MAC | \
+ 1 << BNX2X_ETH_MAC | \
+ 1 << BNX2X_ISCSI_ETH_MAC | \
+ 1 << BNX2X_NETQ_ETH_MAC | \
+ 1 << BNX2X_VLAN)
+#define BNX2X_VLAN_MAC_CMP_FLAGS(flags) \
+ ((flags) & BNX2X_VLAN_MAC_CMP_MASK)
+
+struct bnx2x_vlan_mac_ramrod_params {
+ /* Object to run the command from */
+ struct bnx2x_vlan_mac_obj *vlan_mac_obj;
+
+ /* General command flags: COMP_WAIT, etc. */
+ unsigned long ramrod_flags;
+
+ /* Command specific configuration request */
+ struct bnx2x_vlan_mac_data user_req;
+};
+
+struct bnx2x_vlan_mac_obj {
+ struct bnx2x_raw_obj raw;
+
+ /* Bookkeeping list: will prevent the addition of already existing
+ * entries.
+ */
+ struct list_head head;
+ /* Implement a simple reader/writer lock on the head list.
+ * all these fields should only be accessed under the exe_queue lock
+ */
+ u8 head_reader; /* Num. of readers accessing head list */
+ bool head_exe_request; /* Pending execution request. */
+ unsigned long saved_ramrod_flags; /* Ramrods of pending execution */
+
+ /* TODO: Add it's initialization in the init functions */
+ struct bnx2x_exe_queue_obj exe_queue;
+
+ /* MACs credit pool */
+ struct bnx2x_credit_pool_obj *macs_pool;
+
+ /* VLANs credit pool */
+ struct bnx2x_credit_pool_obj *vlans_pool;
+
+ /* RAMROD command to be used */
+ int ramrod_cmd;
+
+ /* copy first n elements onto preallocated buffer
+ *
+ * @param n number of elements to get
+ * @param buf buffer preallocated by caller into which elements
+ * will be copied. Note elements are 4-byte aligned
+ * so buffer size must be able to accommodate the
+ * aligned elements.
+ *
+ * @return number of copied bytes
+ */
+ int (*get_n_elements)(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *o, int n, u8 *base,
+ u8 stride, u8 size);
+
+ /**
+ * Checks if ADD-ramrod with the given params may be performed.
+ *
+ * @return zero if the element may be added
+ */
+
+ int (*check_add)(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *o,
+ union bnx2x_classification_ramrod_data *data);
+
+ /**
+ * Checks if DEL-ramrod with the given params may be performed.
+ *
+ * @return true if the element may be deleted
+ */
+ struct bnx2x_vlan_mac_registry_elem *
+ (*check_del)(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *o,
+ union bnx2x_classification_ramrod_data *data);
+
+ /**
+ * Checks if DEL-ramrod with the given params may be performed.
+ *
+ * @return true if the element may be deleted
+ */
+ bool (*check_move)(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *src_o,
+ struct bnx2x_vlan_mac_obj *dst_o,
+ union bnx2x_classification_ramrod_data *data);
+
+ /**
+ * Update the relevant credit object(s) (consume/return
+ * correspondingly).
+ */
+ bool (*get_credit)(struct bnx2x_vlan_mac_obj *o);
+ bool (*put_credit)(struct bnx2x_vlan_mac_obj *o);
+ bool (*get_cam_offset)(struct bnx2x_vlan_mac_obj *o, int *offset);
+ bool (*put_cam_offset)(struct bnx2x_vlan_mac_obj *o, int offset);
+
+ /**
+ * Configures one rule in the ramrod data buffer.
+ */
+ void (*set_one_rule)(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *o,
+ struct bnx2x_exeq_elem *elem, int rule_idx,
+ int cam_offset);
+
+ /**
+ * Delete all configured elements having the given
+ * vlan_mac_flags specification. Assumes no pending for
+ * execution commands. Will schedule all all currently
+ * configured MACs/VLANs/VLAN-MACs matching the vlan_mac_flags
+ * specification for deletion and will use the given
+ * ramrod_flags for the last DEL operation.
+ *
+ * @param bp
+ * @param o
+ * @param ramrod_flags RAMROD_XX flags
+ *
+ * @return 0 if the last operation has completed successfully
+ * and there are no more elements left, positive value
+ * if there are pending for completion commands,
+ * negative value in case of failure.
+ */
+ int (*delete_all)(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *o,
+ unsigned long *vlan_mac_flags,
+ unsigned long *ramrod_flags);
+
+ /**
+ * Reconfigures the next MAC/VLAN/VLAN-MAC element from the previously
+ * configured elements list.
+ *
+ * @param bp
+ * @param p Command parameters (RAMROD_COMP_WAIT bit in
+ * ramrod_flags is only taken into an account)
+ * @param ppos a pointer to the cookie that should be given back in the
+ * next call to make function handle the next element. If
+ * *ppos is set to NULL it will restart the iterator.
+ * If returned *ppos == NULL this means that the last
+ * element has been handled.
+ *
+ * @return int
+ */
+ int (*restore)(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_ramrod_params *p,
+ struct bnx2x_vlan_mac_registry_elem **ppos);
+
+ /**
+ * Should be called on a completion arrival.
+ *
+ * @param bp
+ * @param o
+ * @param cqe Completion element we are handling
+ * @param ramrod_flags if RAMROD_CONT is set the next bulk of
+ * pending commands will be executed.
+ * RAMROD_DRV_CLR_ONLY and RAMROD_RESTORE
+ * may also be set if needed.
+ *
+ * @return 0 if there are neither pending nor waiting for
+ * completion commands. Positive value if there are
+ * pending for execution or for completion commands.
+ * Negative value in case of an error (including an
+ * error in the cqe).
+ */
+ int (*complete)(struct bnx2x *bp, struct bnx2x_vlan_mac_obj *o,
+ union event_ring_elem *cqe,
+ unsigned long *ramrod_flags);
+
+ /**
+ * Wait for completion of all commands. Don't schedule new ones,
+ * just wait. It assumes that the completion code will schedule
+ * for new commands.
+ */
+ int (*wait)(struct bnx2x *bp, struct bnx2x_vlan_mac_obj *o);
+};
+
+enum {
+ BNX2X_LLH_CAM_ISCSI_ETH_LINE = 0,
+ BNX2X_LLH_CAM_ETH_LINE,
+ BNX2X_LLH_CAM_MAX_PF_LINE = NIG_REG_LLH1_FUNC_MEM_SIZE / 2
+};
+
+/** RX_MODE verbs:DROP_ALL/ACCEPT_ALL/ACCEPT_ALL_MULTI/ACCEPT_ALL_VLAN/NORMAL */
+
+/* RX_MODE ramrod special flags: set in rx_mode_flags field in
+ * a bnx2x_rx_mode_ramrod_params.
+ */
+enum {
+ BNX2X_RX_MODE_FCOE_ETH,
+ BNX2X_RX_MODE_ISCSI_ETH,
+};
+
+enum {
+ BNX2X_ACCEPT_UNICAST,
+ BNX2X_ACCEPT_MULTICAST,
+ BNX2X_ACCEPT_ALL_UNICAST,
+ BNX2X_ACCEPT_ALL_MULTICAST,
+ BNX2X_ACCEPT_BROADCAST,
+ BNX2X_ACCEPT_UNMATCHED,
+ BNX2X_ACCEPT_ANY_VLAN
+};
+
+struct bnx2x_rx_mode_ramrod_params {
+ struct bnx2x_rx_mode_obj *rx_mode_obj;
+ unsigned long *pstate;
+ int state;
+ u8 cl_id;
+ u32 cid;
+ u8 func_id;
+ unsigned long ramrod_flags;
+ unsigned long rx_mode_flags;
+
+ /* rdata is either a pointer to eth_filter_rules_ramrod_data(e2) or to
+ * a tstorm_eth_mac_filter_config (e1x).
+ */
+ void *rdata;
+ dma_addr_t rdata_mapping;
+
+ /* Rx mode settings */
+ unsigned long rx_accept_flags;
+
+ /* internal switching settings */
+ unsigned long tx_accept_flags;
+};
+
+struct bnx2x_rx_mode_obj {
+ int (*config_rx_mode)(struct bnx2x *bp,
+ struct bnx2x_rx_mode_ramrod_params *p);
+
+ int (*wait_comp)(struct bnx2x *bp,
+ struct bnx2x_rx_mode_ramrod_params *p);
+};
+
+/********************** Set multicast group ***********************************/
+
+struct bnx2x_mcast_list_elem {
+ struct list_head link;
+ u8 *mac;
+};
+
+union bnx2x_mcast_config_data {
+ u8 *mac;
+ u8 bin; /* used in a RESTORE flow */
+};
+
+struct bnx2x_mcast_ramrod_params {
+ struct bnx2x_mcast_obj *mcast_obj;
+
+ /* Relevant options are RAMROD_COMP_WAIT and RAMROD_DRV_CLR_ONLY */
+ unsigned long ramrod_flags;
+
+ struct list_head mcast_list; /* list of struct bnx2x_mcast_list_elem */
+ /** TODO:
+ * - rename it to macs_num.
+ * - Add a new command type for handling pending commands
+ * (remove "zero semantics").
+ *
+ * Length of mcast_list. If zero and ADD_CONT command - post
+ * pending commands.
+ */
+ int mcast_list_len;
+};
+
+enum bnx2x_mcast_cmd {
+ BNX2X_MCAST_CMD_ADD,
+ BNX2X_MCAST_CMD_CONT,
+ BNX2X_MCAST_CMD_DEL,
+ BNX2X_MCAST_CMD_RESTORE,
+
+ /* Following this, multicast configuration should equal to approx
+ * the set of MACs provided [i.e., remove all else].
+ * The two sub-commands are used internally to decide whether a given
+ * bin is to be added or removed
+ */
+ BNX2X_MCAST_CMD_SET,
+ BNX2X_MCAST_CMD_SET_ADD,
+ BNX2X_MCAST_CMD_SET_DEL,
+};
+
+struct bnx2x_mcast_obj {
+ struct bnx2x_raw_obj raw;
+
+ union {
+ struct {
+ #define BNX2X_MCAST_BINS_NUM 256
+ #define BNX2X_MCAST_VEC_SZ (BNX2X_MCAST_BINS_NUM / 64)
+ u64 vec[BNX2X_MCAST_VEC_SZ];
+
+ /** Number of BINs to clear. Should be updated
+ * immediately when a command arrives in order to
+ * properly create DEL commands.
+ */
+ int num_bins_set;
+ } aprox_match;
+
+ struct {
+ struct list_head macs;
+ int num_macs_set;
+ } exact_match;
+ } registry;
+
+ /* Pending commands */
+ struct list_head pending_cmds_head;
+
+ /* A state that is set in raw.pstate, when there are pending commands */
+ int sched_state;
+
+ /* Maximal number of mcast MACs configured in one command */
+ int max_cmd_len;
+
+ /* Total number of currently pending MACs to configure: both
+ * in the pending commands list and in the current command.
+ */
+ int total_pending_num;
+
+ u8 engine_id;
+
+ /**
+ * @param cmd command to execute (BNX2X_MCAST_CMD_X, see above)
+ */
+ int (*config_mcast)(struct bnx2x *bp,
+ struct bnx2x_mcast_ramrod_params *p,
+ enum bnx2x_mcast_cmd cmd);
+
+ /**
+ * Fills the ramrod data during the RESTORE flow.
+ *
+ * @param bp
+ * @param o
+ * @param start_idx Registry index to start from
+ * @param rdata_idx Index in the ramrod data to start from
+ *
+ * @return -1 if we handled the whole registry or index of the last
+ * handled registry element.
+ */
+ int (*hdl_restore)(struct bnx2x *bp, struct bnx2x_mcast_obj *o,
+ int start_bin, int *rdata_idx);
+
+ int (*enqueue_cmd)(struct bnx2x *bp, struct bnx2x_mcast_obj *o,
+ struct bnx2x_mcast_ramrod_params *p,
+ enum bnx2x_mcast_cmd cmd);
+
+ void (*set_one_rule)(struct bnx2x *bp,
+ struct bnx2x_mcast_obj *o, int idx,
+ union bnx2x_mcast_config_data *cfg_data,
+ enum bnx2x_mcast_cmd cmd);
+
+ /** Checks if there are more mcast MACs to be set or a previous
+ * command is still pending.
+ */
+ bool (*check_pending)(struct bnx2x_mcast_obj *o);
+
+ /**
+ * Set/Clear/Check SCHEDULED state of the object
+ */
+ void (*set_sched)(struct bnx2x_mcast_obj *o);
+ void (*clear_sched)(struct bnx2x_mcast_obj *o);
+ bool (*check_sched)(struct bnx2x_mcast_obj *o);
+
+ /* Wait until all pending commands complete */
+ int (*wait_comp)(struct bnx2x *bp, struct bnx2x_mcast_obj *o);
+
+ /**
+ * Handle the internal object counters needed for proper
+ * commands handling. Checks that the provided parameters are
+ * feasible.
+ */
+ int (*validate)(struct bnx2x *bp,
+ struct bnx2x_mcast_ramrod_params *p,
+ enum bnx2x_mcast_cmd cmd);
+
+ /**
+ * Restore the values of internal counters in case of a failure.
+ */
+ void (*revert)(struct bnx2x *bp,
+ struct bnx2x_mcast_ramrod_params *p,
+ int old_num_bins,
+ enum bnx2x_mcast_cmd cmd);
+
+ int (*get_registry_size)(struct bnx2x_mcast_obj *o);
+ void (*set_registry_size)(struct bnx2x_mcast_obj *o, int n);
+};
+
+/*************************** Credit handling **********************************/
+struct bnx2x_credit_pool_obj {
+
+ /* Current amount of credit in the pool */
+ atomic_t credit;
+
+ /* Maximum allowed credit. put() will check against it. */
+ int pool_sz;
+
+ /* Allocate a pool table statically.
+ *
+ * Currently the maximum allowed size is MAX_MAC_CREDIT_E2(272)
+ *
+ * The set bit in the table will mean that the entry is available.
+ */
+#define BNX2X_POOL_VEC_SIZE (MAX_MAC_CREDIT_E2 / 64)
+ u64 pool_mirror[BNX2X_POOL_VEC_SIZE];
+
+ /* Base pool offset (initialized differently */
+ int base_pool_offset;
+
+ /**
+ * Get the next free pool entry.
+ *
+ * @return true if there was a free entry in the pool
+ */
+ bool (*get_entry)(struct bnx2x_credit_pool_obj *o, int *entry);
+
+ /**
+ * Return the entry back to the pool.
+ *
+ * @return true if entry is legal and has been successfully
+ * returned to the pool.
+ */
+ bool (*put_entry)(struct bnx2x_credit_pool_obj *o, int entry);
+
+ /**
+ * Get the requested amount of credit from the pool.
+ *
+ * @param cnt Amount of requested credit
+ * @return true if the operation is successful
+ */
+ bool (*get)(struct bnx2x_credit_pool_obj *o, int cnt);
+
+ /**
+ * Returns the credit to the pool.
+ *
+ * @param cnt Amount of credit to return
+ * @return true if the operation is successful
+ */
+ bool (*put)(struct bnx2x_credit_pool_obj *o, int cnt);
+
+ /**
+ * Reads the current amount of credit.
+ */
+ int (*check)(struct bnx2x_credit_pool_obj *o);
+};
+
+/*************************** RSS configuration ********************************/
+enum {
+ /* RSS_MODE bits are mutually exclusive */
+ BNX2X_RSS_MODE_DISABLED,
+ BNX2X_RSS_MODE_REGULAR,
+
+ BNX2X_RSS_SET_SRCH, /* Setup searcher, E1x specific flag */
+
+ BNX2X_RSS_IPV4,
+ BNX2X_RSS_IPV4_TCP,
+ BNX2X_RSS_IPV4_UDP,
+ BNX2X_RSS_IPV6,
+ BNX2X_RSS_IPV6_TCP,
+ BNX2X_RSS_IPV6_UDP,
+
+ BNX2X_RSS_IPV4_VXLAN,
+ BNX2X_RSS_IPV6_VXLAN,
+ BNX2X_RSS_TUNN_INNER_HDRS,
+};
+
+struct bnx2x_config_rss_params {
+ struct bnx2x_rss_config_obj *rss_obj;
+
+ /* may have RAMROD_COMP_WAIT set only */
+ unsigned long ramrod_flags;
+
+ /* BNX2X_RSS_X bits */
+ unsigned long rss_flags;
+
+ /* Number hash bits to take into an account */
+ u8 rss_result_mask;
+
+ /* Indirection table */
+ u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE];
+
+ /* RSS hash values */
+ u32 rss_key[10];
+
+ /* valid only iff BNX2X_RSS_UPDATE_TOE is set */
+ u16 toe_rss_bitmap;
+};
+
+struct bnx2x_rss_config_obj {
+ struct bnx2x_raw_obj raw;
+
+ /* RSS engine to use */
+ u8 engine_id;
+
+ /* Last configured indirection table */
+ u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE];
+
+ /* flags for enabling 4-tupple hash on UDP */
+ u8 udp_rss_v4;
+ u8 udp_rss_v6;
+
+ int (*config_rss)(struct bnx2x *bp,
+ struct bnx2x_config_rss_params *p);
+};
+
+/*********************** Queue state update ***********************************/
+
+/* UPDATE command options */
+enum {
+ BNX2X_Q_UPDATE_IN_VLAN_REM,
+ BNX2X_Q_UPDATE_IN_VLAN_REM_CHNG,
+ BNX2X_Q_UPDATE_OUT_VLAN_REM,
+ BNX2X_Q_UPDATE_OUT_VLAN_REM_CHNG,
+ BNX2X_Q_UPDATE_ANTI_SPOOF,
+ BNX2X_Q_UPDATE_ANTI_SPOOF_CHNG,
+ BNX2X_Q_UPDATE_ACTIVATE,
+ BNX2X_Q_UPDATE_ACTIVATE_CHNG,
+ BNX2X_Q_UPDATE_DEF_VLAN_EN,
+ BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG,
+ BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG,
+ BNX2X_Q_UPDATE_SILENT_VLAN_REM,
+ BNX2X_Q_UPDATE_TX_SWITCHING_CHNG,
+ BNX2X_Q_UPDATE_TX_SWITCHING,
+ BNX2X_Q_UPDATE_PTP_PKTS_CHNG,
+ BNX2X_Q_UPDATE_PTP_PKTS,
+};
+
+/* Allowed Queue states */
+enum bnx2x_q_state {
+ BNX2X_Q_STATE_RESET,
+ BNX2X_Q_STATE_INITIALIZED,
+ BNX2X_Q_STATE_ACTIVE,
+ BNX2X_Q_STATE_MULTI_COS,
+ BNX2X_Q_STATE_MCOS_TERMINATED,
+ BNX2X_Q_STATE_INACTIVE,
+ BNX2X_Q_STATE_STOPPED,
+ BNX2X_Q_STATE_TERMINATED,
+ BNX2X_Q_STATE_FLRED,
+ BNX2X_Q_STATE_MAX,
+};
+
+/* Allowed Queue states */
+enum bnx2x_q_logical_state {
+ BNX2X_Q_LOGICAL_STATE_ACTIVE,
+ BNX2X_Q_LOGICAL_STATE_STOPPED,
+};
+
+/* Allowed commands */
+enum bnx2x_queue_cmd {
+ BNX2X_Q_CMD_INIT,
+ BNX2X_Q_CMD_SETUP,
+ BNX2X_Q_CMD_SETUP_TX_ONLY,
+ BNX2X_Q_CMD_DEACTIVATE,
+ BNX2X_Q_CMD_ACTIVATE,
+ BNX2X_Q_CMD_UPDATE,
+ BNX2X_Q_CMD_UPDATE_TPA,
+ BNX2X_Q_CMD_HALT,
+ BNX2X_Q_CMD_CFC_DEL,
+ BNX2X_Q_CMD_TERMINATE,
+ BNX2X_Q_CMD_EMPTY,
+ BNX2X_Q_CMD_MAX,
+};
+
+/* queue SETUP + INIT flags */
+enum {
+ BNX2X_Q_FLG_TPA,
+ BNX2X_Q_FLG_TPA_IPV6,
+ BNX2X_Q_FLG_TPA_GRO,
+ BNX2X_Q_FLG_STATS,
+ BNX2X_Q_FLG_ZERO_STATS,
+ BNX2X_Q_FLG_ACTIVE,
+ BNX2X_Q_FLG_OV,
+ BNX2X_Q_FLG_VLAN,
+ BNX2X_Q_FLG_COS,
+ BNX2X_Q_FLG_HC,
+ BNX2X_Q_FLG_HC_EN,
+ BNX2X_Q_FLG_DHC,
+ BNX2X_Q_FLG_FCOE,
+ BNX2X_Q_FLG_LEADING_RSS,
+ BNX2X_Q_FLG_MCAST,
+ BNX2X_Q_FLG_DEF_VLAN,
+ BNX2X_Q_FLG_TX_SWITCH,
+ BNX2X_Q_FLG_TX_SEC,
+ BNX2X_Q_FLG_ANTI_SPOOF,
+ BNX2X_Q_FLG_SILENT_VLAN_REM,
+ BNX2X_Q_FLG_FORCE_DEFAULT_PRI,
+ BNX2X_Q_FLG_REFUSE_OUTBAND_VLAN,
+ BNX2X_Q_FLG_PCSUM_ON_PKT,
+ BNX2X_Q_FLG_TUN_INC_INNER_IP_ID
+};
+
+/* Queue type options: queue type may be a combination of below. */
+enum bnx2x_q_type {
+ /** TODO: Consider moving both these flags into the init()
+ * ramrod params.
+ */
+ BNX2X_Q_TYPE_HAS_RX,
+ BNX2X_Q_TYPE_HAS_TX,
+};
+
+#define BNX2X_PRIMARY_CID_INDEX 0
+#define BNX2X_MULTI_TX_COS_E1X 3 /* QM only */
+#define BNX2X_MULTI_TX_COS_E2_E3A0 2
+#define BNX2X_MULTI_TX_COS_E3B0 3
+#define BNX2X_MULTI_TX_COS 3 /* Maximum possible */
+
+#define MAC_PAD (ALIGN(ETH_ALEN, sizeof(u32)) - ETH_ALEN)
+/* DMAE channel to be used by FW for timesync workaroun. A driver that sends
+ * timesync-related ramrods must not use this DMAE command ID.
+ */
+#define FW_DMAE_CMD_ID 6
+
+struct bnx2x_queue_init_params {
+ struct {
+ unsigned long flags;
+ u16 hc_rate;
+ u8 fw_sb_id;
+ u8 sb_cq_index;
+ } tx;
+
+ struct {
+ unsigned long flags;
+ u16 hc_rate;
+ u8 fw_sb_id;
+ u8 sb_cq_index;
+ } rx;
+
+ /* CID context in the host memory */
+ struct eth_context *cxts[BNX2X_MULTI_TX_COS];
+
+ /* maximum number of cos supported by hardware */
+ u8 max_cos;
+};
+
+struct bnx2x_queue_terminate_params {
+ /* index within the tx_only cids of this queue object */
+ u8 cid_index;
+};
+
+struct bnx2x_queue_cfc_del_params {
+ /* index within the tx_only cids of this queue object */
+ u8 cid_index;
+};
+
+struct bnx2x_queue_update_params {
+ unsigned long update_flags; /* BNX2X_Q_UPDATE_XX bits */
+ u16 def_vlan;
+ u16 silent_removal_value;
+ u16 silent_removal_mask;
+/* index within the tx_only cids of this queue object */
+ u8 cid_index;
+};
+
+struct bnx2x_queue_update_tpa_params {
+ dma_addr_t sge_map;
+ u8 update_ipv4;
+ u8 update_ipv6;
+ u8 max_tpa_queues;
+ u8 max_sges_pkt;
+ u8 complete_on_both_clients;
+ u8 dont_verify_thr;
+ u8 tpa_mode;
+ u8 _pad;
+
+ u16 sge_buff_sz;
+ u16 max_agg_sz;
+
+ u16 sge_pause_thr_low;
+ u16 sge_pause_thr_high;
+};
+
+struct rxq_pause_params {
+ u16 bd_th_lo;
+ u16 bd_th_hi;
+ u16 rcq_th_lo;
+ u16 rcq_th_hi;
+ u16 sge_th_lo; /* valid iff BNX2X_Q_FLG_TPA */
+ u16 sge_th_hi; /* valid iff BNX2X_Q_FLG_TPA */
+ u16 pri_map;
+};
+
+/* general */
+struct bnx2x_general_setup_params {
+ /* valid iff BNX2X_Q_FLG_STATS */
+ u8 stat_id;
+
+ u8 spcl_id;
+ u16 mtu;
+ u8 cos;
+
+ u8 fp_hsi;
+};
+
+struct bnx2x_rxq_setup_params {
+ /* dma */
+ dma_addr_t dscr_map;
+ dma_addr_t sge_map;
+ dma_addr_t rcq_map;
+ dma_addr_t rcq_np_map;
+
+ u16 drop_flags;
+ u16 buf_sz;
+ u8 fw_sb_id;
+ u8 cl_qzone_id;
+
+ /* valid iff BNX2X_Q_FLG_TPA */
+ u16 tpa_agg_sz;
+ u16 sge_buf_sz;
+ u8 max_sges_pkt;
+ u8 max_tpa_queues;
+ u8 rss_engine_id;
+
+ /* valid iff BNX2X_Q_FLG_MCAST */
+ u8 mcast_engine_id;
+
+ u8 cache_line_log;
+
+ u8 sb_cq_index;
+
+ /* valid iff BXN2X_Q_FLG_SILENT_VLAN_REM */
+ u16 silent_removal_value;
+ u16 silent_removal_mask;
+};
+
+struct bnx2x_txq_setup_params {
+ /* dma */
+ dma_addr_t dscr_map;
+
+ u8 fw_sb_id;
+ u8 sb_cq_index;
+ u8 cos; /* valid iff BNX2X_Q_FLG_COS */
+ u16 traffic_type;
+ /* equals to the leading rss client id, used for TX classification*/
+ u8 tss_leading_cl_id;
+
+ /* valid iff BNX2X_Q_FLG_DEF_VLAN */
+ u16 default_vlan;
+};
+
+struct bnx2x_queue_setup_params {
+ struct bnx2x_general_setup_params gen_params;
+ struct bnx2x_txq_setup_params txq_params;
+ struct bnx2x_rxq_setup_params rxq_params;
+ struct rxq_pause_params pause_params;
+ unsigned long flags;
+};
+
+struct bnx2x_queue_setup_tx_only_params {
+ struct bnx2x_general_setup_params gen_params;
+ struct bnx2x_txq_setup_params txq_params;
+ unsigned long flags;
+ /* index within the tx_only cids of this queue object */
+ u8 cid_index;
+};
+
+struct bnx2x_queue_state_params {
+ struct bnx2x_queue_sp_obj *q_obj;
+
+ /* Current command */
+ enum bnx2x_queue_cmd cmd;
+
+ /* may have RAMROD_COMP_WAIT set only */
+ unsigned long ramrod_flags;
+
+ /* Params according to the current command */
+ union {
+ struct bnx2x_queue_update_params update;
+ struct bnx2x_queue_update_tpa_params update_tpa;
+ struct bnx2x_queue_setup_params setup;
+ struct bnx2x_queue_init_params init;
+ struct bnx2x_queue_setup_tx_only_params tx_only;
+ struct bnx2x_queue_terminate_params terminate;
+ struct bnx2x_queue_cfc_del_params cfc_del;
+ } params;
+};
+
+struct bnx2x_viflist_params {
+ u8 echo_res;
+ u8 func_bit_map_res;
+};
+
+struct bnx2x_queue_sp_obj {
+ u32 cids[BNX2X_MULTI_TX_COS];
+ u8 cl_id;
+ u8 func_id;
+
+ /* number of traffic classes supported by queue.
+ * The primary connection of the queue supports the first traffic
+ * class. Any further traffic class is supported by a tx-only
+ * connection.
+ *
+ * Therefore max_cos is also a number of valid entries in the cids
+ * array.
+ */
+ u8 max_cos;
+ u8 num_tx_only, next_tx_only;
+
+ enum bnx2x_q_state state, next_state;
+
+ /* bits from enum bnx2x_q_type */
+ unsigned long type;
+
+ /* BNX2X_Q_CMD_XX bits. This object implements "one
+ * pending" paradigm but for debug and tracing purposes it's
+ * more convenient to have different bits for different
+ * commands.
+ */
+ unsigned long pending;
+
+ /* Buffer to use as a ramrod data and its mapping */
+ void *rdata;
+ dma_addr_t rdata_mapping;
+
+ /**
+ * Performs one state change according to the given parameters.
+ *
+ * @return 0 in case of success and negative value otherwise.
+ */
+ int (*send_cmd)(struct bnx2x *bp,
+ struct bnx2x_queue_state_params *params);
+
+ /**
+ * Sets the pending bit according to the requested transition.
+ */
+ int (*set_pending)(struct bnx2x_queue_sp_obj *o,
+ struct bnx2x_queue_state_params *params);
+
+ /**
+ * Checks that the requested state transition is legal.
+ */
+ int (*check_transition)(struct bnx2x *bp,
+ struct bnx2x_queue_sp_obj *o,
+ struct bnx2x_queue_state_params *params);
+
+ /**
+ * Completes the pending command.
+ */
+ int (*complete_cmd)(struct bnx2x *bp,
+ struct bnx2x_queue_sp_obj *o,
+ enum bnx2x_queue_cmd);
+
+ int (*wait_comp)(struct bnx2x *bp,
+ struct bnx2x_queue_sp_obj *o,
+ enum bnx2x_queue_cmd cmd);
+};
+
+/********************** Function state update *********************************/
+
+/* UPDATE command options */
+enum {
+ BNX2X_F_UPDATE_TX_SWITCH_SUSPEND_CHNG,
+ BNX2X_F_UPDATE_TX_SWITCH_SUSPEND,
+ BNX2X_F_UPDATE_SD_VLAN_TAG_CHNG,
+ BNX2X_F_UPDATE_SD_VLAN_ETH_TYPE_CHNG,
+ BNX2X_F_UPDATE_VLAN_FORCE_PRIO_CHNG,
+ BNX2X_F_UPDATE_VLAN_FORCE_PRIO_FLAG,
+ BNX2X_F_UPDATE_TUNNEL_CFG_CHNG,
+ BNX2X_F_UPDATE_TUNNEL_INNER_CLSS_L2GRE,
+ BNX2X_F_UPDATE_TUNNEL_INNER_CLSS_VXLAN,
+ BNX2X_F_UPDATE_TUNNEL_INNER_CLSS_L2GENEVE,
+ BNX2X_F_UPDATE_TUNNEL_INNER_RSS,
+};
+
+/* Allowed Function states */
+enum bnx2x_func_state {
+ BNX2X_F_STATE_RESET,
+ BNX2X_F_STATE_INITIALIZED,
+ BNX2X_F_STATE_STARTED,
+ BNX2X_F_STATE_TX_STOPPED,
+ BNX2X_F_STATE_MAX,
+};
+
+/* Allowed Function commands */
+enum bnx2x_func_cmd {
+ BNX2X_F_CMD_HW_INIT,
+ BNX2X_F_CMD_START,
+ BNX2X_F_CMD_STOP,
+ BNX2X_F_CMD_HW_RESET,
+ BNX2X_F_CMD_AFEX_UPDATE,
+ BNX2X_F_CMD_AFEX_VIFLISTS,
+ BNX2X_F_CMD_TX_STOP,
+ BNX2X_F_CMD_TX_START,
+ BNX2X_F_CMD_SWITCH_UPDATE,
+ BNX2X_F_CMD_SET_TIMESYNC,
+ BNX2X_F_CMD_MAX,
+};
+
+struct bnx2x_func_hw_init_params {
+ /* A load phase returned by MCP.
+ *
+ * May be:
+ * FW_MSG_CODE_DRV_LOAD_COMMON_CHIP
+ * FW_MSG_CODE_DRV_LOAD_COMMON
+ * FW_MSG_CODE_DRV_LOAD_PORT
+ * FW_MSG_CODE_DRV_LOAD_FUNCTION
+ */
+ u32 load_phase;
+};
+
+struct bnx2x_func_hw_reset_params {
+ /* A load phase returned by MCP.
+ *
+ * May be:
+ * FW_MSG_CODE_DRV_LOAD_COMMON_CHIP
+ * FW_MSG_CODE_DRV_LOAD_COMMON
+ * FW_MSG_CODE_DRV_LOAD_PORT
+ * FW_MSG_CODE_DRV_LOAD_FUNCTION
+ */
+ u32 reset_phase;
+};
+
+struct bnx2x_func_start_params {
+ /* Multi Function mode:
+ * - Single Function
+ * - Switch Dependent
+ * - Switch Independent
+ */
+ u16 mf_mode;
+
+ /* Switch Dependent mode outer VLAN tag */
+ u16 sd_vlan_tag;
+
+ /* Function cos mode */
+ u8 network_cos_mode;
+
+ /* UDP dest port for VXLAN */
+ u16 vxlan_dst_port;
+
+ /* UDP dest port for Geneve */
+ u16 geneve_dst_port;
+
+ /* Enable inner Rx classifications for L2GRE packets */
+ u8 inner_clss_l2gre;
+
+ /* Enable inner Rx classifications for L2-Geneve packets */
+ u8 inner_clss_l2geneve;
+
+ /* Enable inner Rx classification for vxlan packets */
+ u8 inner_clss_vxlan;
+
+ /* Enable RSS according to inner header */
+ u8 inner_rss;
+
+ /* Allows accepting of packets failing MF classification, possibly
+ * only matching a given ethertype
+ */
+ u8 class_fail;
+ u16 class_fail_ethtype;
+
+ /* Override priority of output packets */
+ u8 sd_vlan_force_pri;
+ u8 sd_vlan_force_pri_val;
+
+ /* Replace vlan's ethertype */
+ u16 sd_vlan_eth_type;
+
+ /* Prevent inner vlans from being added by FW */
+ u8 no_added_tags;
+
+ /* Inner-to-Outer vlan priority mapping */
+ u8 c2s_pri[MAX_VLAN_PRIORITIES];
+ u8 c2s_pri_default;
+ u8 c2s_pri_valid;
+};
+
+struct bnx2x_func_switch_update_params {
+ unsigned long changes; /* BNX2X_F_UPDATE_XX bits */
+ u16 vlan;
+ u16 vlan_eth_type;
+ u8 vlan_force_prio;
+ u16 vxlan_dst_port;
+ u16 geneve_dst_port;
+};
+
+struct bnx2x_func_afex_update_params {
+ u16 vif_id;
+ u16 afex_default_vlan;
+ u8 allowed_priorities;
+};
+
+struct bnx2x_func_afex_viflists_params {
+ u16 vif_list_index;
+ u8 func_bit_map;
+ u8 afex_vif_list_command;
+ u8 func_to_clear;
+};
+
+struct bnx2x_func_tx_start_params {
+ struct priority_cos traffic_type_to_priority_cos[MAX_TRAFFIC_TYPES];
+ u8 dcb_enabled;
+ u8 dcb_version;
+ u8 dont_add_pri_0_en;
+ u8 dcb_outer_pri[MAX_TRAFFIC_TYPES];
+};
+
+struct bnx2x_func_set_timesync_params {
+ /* Reset, set or keep the current drift value */
+ u8 drift_adjust_cmd;
+
+ /* Dec, inc or keep the current offset */
+ u8 offset_cmd;
+
+ /* Drift value direction */
+ u8 add_sub_drift_adjust_value;
+
+ /* Drift, period and offset values to be used according to the commands
+ * above.
+ */
+ u8 drift_adjust_value;
+ u32 drift_adjust_period;
+ u64 offset_delta;
+};
+
+struct bnx2x_func_state_params {
+ struct bnx2x_func_sp_obj *f_obj;
+
+ /* Current command */
+ enum bnx2x_func_cmd cmd;
+
+ /* may have RAMROD_COMP_WAIT set only */
+ unsigned long ramrod_flags;
+
+ /* Params according to the current command */
+ union {
+ struct bnx2x_func_hw_init_params hw_init;
+ struct bnx2x_func_hw_reset_params hw_reset;
+ struct bnx2x_func_start_params start;
+ struct bnx2x_func_switch_update_params switch_update;
+ struct bnx2x_func_afex_update_params afex_update;
+ struct bnx2x_func_afex_viflists_params afex_viflists;
+ struct bnx2x_func_tx_start_params tx_start;
+ struct bnx2x_func_set_timesync_params set_timesync;
+ } params;
+};
+
+struct bnx2x_func_sp_drv_ops {
+ /* Init tool + runtime initialization:
+ * - Common Chip
+ * - Common (per Path)
+ * - Port
+ * - Function phases
+ */
+ int (*init_hw_cmn_chip)(struct bnx2x *bp);
+ int (*init_hw_cmn)(struct bnx2x *bp);
+ int (*init_hw_port)(struct bnx2x *bp);
+ int (*init_hw_func)(struct bnx2x *bp);
+
+ /* Reset Function HW: Common, Port, Function phases. */
+ void (*reset_hw_cmn)(struct bnx2x *bp);
+ void (*reset_hw_port)(struct bnx2x *bp);
+ void (*reset_hw_func)(struct bnx2x *bp);
+
+ /* Init/Free GUNZIP resources */
+ int (*gunzip_init)(struct bnx2x *bp);
+ void (*gunzip_end)(struct bnx2x *bp);
+
+ /* Prepare/Release FW resources */
+ int (*init_fw)(struct bnx2x *bp);
+ void (*release_fw)(struct bnx2x *bp);
+};
+
+struct bnx2x_func_sp_obj {
+ enum bnx2x_func_state state, next_state;
+
+ /* BNX2X_FUNC_CMD_XX bits. This object implements "one
+ * pending" paradigm but for debug and tracing purposes it's
+ * more convenient to have different bits for different
+ * commands.
+ */
+ unsigned long pending;
+
+ /* Buffer to use as a ramrod data and its mapping */
+ void *rdata;
+ dma_addr_t rdata_mapping;
+
+ /* Buffer to use as a afex ramrod data and its mapping.
+ * This can't be same rdata as above because afex ramrod requests
+ * can arrive to the object in parallel to other ramrod requests.
+ */
+ void *afex_rdata;
+ dma_addr_t afex_rdata_mapping;
+
+ /* this mutex validates that when pending flag is taken, the next
+ * ramrod to be sent will be the one set the pending bit
+ */
+ struct mutex one_pending_mutex;
+
+ /* Driver interface */
+ struct bnx2x_func_sp_drv_ops *drv;
+
+ /**
+ * Performs one state change according to the given parameters.
+ *
+ * @return 0 in case of success and negative value otherwise.
+ */
+ int (*send_cmd)(struct bnx2x *bp,
+ struct bnx2x_func_state_params *params);
+
+ /**
+ * Checks that the requested state transition is legal.
+ */
+ int (*check_transition)(struct bnx2x *bp,
+ struct bnx2x_func_sp_obj *o,
+ struct bnx2x_func_state_params *params);
+
+ /**
+ * Completes the pending command.
+ */
+ int (*complete_cmd)(struct bnx2x *bp,
+ struct bnx2x_func_sp_obj *o,
+ enum bnx2x_func_cmd cmd);
+
+ int (*wait_comp)(struct bnx2x *bp, struct bnx2x_func_sp_obj *o,
+ enum bnx2x_func_cmd cmd);
+};
+
+/********************** Interfaces ********************************************/
+/* Queueable objects set */
+union bnx2x_qable_obj {
+ struct bnx2x_vlan_mac_obj vlan_mac;
+};
+/************** Function state update *********/
+void bnx2x_init_func_obj(struct bnx2x *bp,
+ struct bnx2x_func_sp_obj *obj,
+ void *rdata, dma_addr_t rdata_mapping,
+ void *afex_rdata, dma_addr_t afex_rdata_mapping,
+ struct bnx2x_func_sp_drv_ops *drv_iface);
+
+int bnx2x_func_state_change(struct bnx2x *bp,
+ struct bnx2x_func_state_params *params);
+
+enum bnx2x_func_state bnx2x_func_get_state(struct bnx2x *bp,
+ struct bnx2x_func_sp_obj *o);
+/******************* Queue State **************/
+void bnx2x_init_queue_obj(struct bnx2x *bp,
+ struct bnx2x_queue_sp_obj *obj, u8 cl_id, u32 *cids,
+ u8 cid_cnt, u8 func_id, void *rdata,
+ dma_addr_t rdata_mapping, unsigned long type);
+
+int bnx2x_queue_state_change(struct bnx2x *bp,
+ struct bnx2x_queue_state_params *params);
+
+int bnx2x_get_q_logical_state(struct bnx2x *bp,
+ struct bnx2x_queue_sp_obj *obj);
+
+/********************* VLAN-MAC ****************/
+void bnx2x_init_mac_obj(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *mac_obj,
+ u8 cl_id, u32 cid, u8 func_id, void *rdata,
+ dma_addr_t rdata_mapping, int state,
+ unsigned long *pstate, bnx2x_obj_type type,
+ struct bnx2x_credit_pool_obj *macs_pool);
+
+void bnx2x_init_vlan_obj(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *vlan_obj,
+ u8 cl_id, u32 cid, u8 func_id, void *rdata,
+ dma_addr_t rdata_mapping, int state,
+ unsigned long *pstate, bnx2x_obj_type type,
+ struct bnx2x_credit_pool_obj *vlans_pool);
+
+void bnx2x_init_vlan_mac_obj(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *vlan_mac_obj,
+ u8 cl_id, u32 cid, u8 func_id, void *rdata,
+ dma_addr_t rdata_mapping, int state,
+ unsigned long *pstate, bnx2x_obj_type type,
+ struct bnx2x_credit_pool_obj *macs_pool,
+ struct bnx2x_credit_pool_obj *vlans_pool);
+
+int bnx2x_vlan_mac_h_read_lock(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *o);
+void bnx2x_vlan_mac_h_read_unlock(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *o);
+int bnx2x_vlan_mac_h_write_lock(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *o);
+int bnx2x_config_vlan_mac(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_ramrod_params *p);
+
+int bnx2x_vlan_mac_move(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_ramrod_params *p,
+ struct bnx2x_vlan_mac_obj *dest_o);
+
+/********************* RX MODE ****************/
+
+void bnx2x_init_rx_mode_obj(struct bnx2x *bp,
+ struct bnx2x_rx_mode_obj *o);
+
+/**
+ * bnx2x_config_rx_mode - Send and RX_MODE ramrod according to the provided parameters.
+ *
+ * @p: Command parameters
+ *
+ * Return: 0 - if operation was successful and there is no pending completions,
+ * positive number - if there are pending completions,
+ * negative - if there were errors
+ */
+int bnx2x_config_rx_mode(struct bnx2x *bp,
+ struct bnx2x_rx_mode_ramrod_params *p);
+
+/****************** MULTICASTS ****************/
+
+void bnx2x_init_mcast_obj(struct bnx2x *bp,
+ struct bnx2x_mcast_obj *mcast_obj,
+ u8 mcast_cl_id, u32 mcast_cid, u8 func_id,
+ u8 engine_id, void *rdata, dma_addr_t rdata_mapping,
+ int state, unsigned long *pstate,
+ bnx2x_obj_type type);
+
+/**
+ * bnx2x_config_mcast - Configure multicast MACs list.
+ *
+ * @cmd: command to execute: BNX2X_MCAST_CMD_X
+ *
+ * May configure a new list
+ * provided in p->mcast_list (BNX2X_MCAST_CMD_ADD), clean up
+ * (BNX2X_MCAST_CMD_DEL) or restore (BNX2X_MCAST_CMD_RESTORE) a current
+ * configuration, continue to execute the pending commands
+ * (BNX2X_MCAST_CMD_CONT).
+ *
+ * If previous command is still pending or if number of MACs to
+ * configure is more that maximum number of MACs in one command,
+ * the current command will be enqueued to the tail of the
+ * pending commands list.
+ *
+ * Return: 0 is operation was successful and there are no pending completions,
+ * negative if there were errors, positive if there are pending
+ * completions.
+ */
+int bnx2x_config_mcast(struct bnx2x *bp,
+ struct bnx2x_mcast_ramrod_params *p,
+ enum bnx2x_mcast_cmd cmd);
+
+/****************** CREDIT POOL ****************/
+void bnx2x_init_mac_credit_pool(struct bnx2x *bp,
+ struct bnx2x_credit_pool_obj *p, u8 func_id,
+ u8 func_num);
+void bnx2x_init_vlan_credit_pool(struct bnx2x *bp,
+ struct bnx2x_credit_pool_obj *p, u8 func_id,
+ u8 func_num);
+void bnx2x_init_credit_pool(struct bnx2x_credit_pool_obj *p,
+ int base, int credit);
+
+/****************** RSS CONFIGURATION ****************/
+void bnx2x_init_rss_config_obj(struct bnx2x *bp,
+ struct bnx2x_rss_config_obj *rss_obj,
+ u8 cl_id, u32 cid, u8 func_id, u8 engine_id,
+ void *rdata, dma_addr_t rdata_mapping,
+ int state, unsigned long *pstate,
+ bnx2x_obj_type type);
+
+/**
+ * bnx2x_config_rss - Updates RSS configuration according to provided parameters
+ *
+ * Return: 0 in case of success
+ */
+int bnx2x_config_rss(struct bnx2x *bp,
+ struct bnx2x_config_rss_params *p);
+
+/**
+ * bnx2x_get_rss_ind_table - Return the current ind_table configuration.
+ *
+ * @ind_table: buffer to fill with the current indirection
+ * table content. Should be at least
+ * T_ETH_INDIRECTION_TABLE_SIZE bytes long.
+ */
+void bnx2x_get_rss_ind_table(struct bnx2x_rss_config_obj *rss_obj,
+ u8 *ind_table);
+
+#define PF_MAC_CREDIT_E2(bp, func_num) \
+ ((MAX_MAC_CREDIT_E2 - GET_NUM_VFS_PER_PATH(bp) * VF_MAC_CREDIT_CNT) / \
+ func_num + GET_NUM_VFS_PER_PF(bp) * VF_MAC_CREDIT_CNT)
+
+#define BNX2X_VFS_VLAN_CREDIT(bp) \
+ (GET_NUM_VFS_PER_PATH(bp) * VF_VLAN_CREDIT_CNT)
+
+#define PF_VLAN_CREDIT_E2(bp, func_num) \
+ ((MAX_VLAN_CREDIT_E2 - 1 - BNX2X_VFS_VLAN_CREDIT(bp)) / \
+ func_num + GET_NUM_VFS_PER_PF(bp) * VF_VLAN_CREDIT_CNT)
+
+#endif /* BNX2X_SP_VERBS */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
new file mode 100644
index 0000000000..77d4cb4ad7
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
@@ -0,0 +1,3201 @@
+/* bnx2x_sriov.c: QLogic Everest network driver.
+ *
+ * Copyright 2009-2013 Broadcom Corporation
+ * Copyright 2014 QLogic Corporation
+ * All rights reserved
+ *
+ * Unless you and QLogic execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2, available
+ * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other QLogic software provided under a
+ * license other than the GPL, without QLogic's express prior written
+ * consent.
+ *
+ * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
+ * Written by: Shmulik Ravid
+ * Ariel Elior <ariel.elior@qlogic.com>
+ *
+ */
+#include "bnx2x.h"
+#include "bnx2x_init.h"
+#include "bnx2x_cmn.h"
+#include "bnx2x_sp.h"
+#include <linux/crc32.h>
+#include <linux/if_vlan.h>
+
+static int bnx2x_vf_op_prep(struct bnx2x *bp, int vfidx,
+ struct bnx2x_virtf **vf,
+ struct pf_vf_bulletin_content **bulletin,
+ bool test_queue);
+
+/* General service functions */
+static void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid,
+ u16 pf_id)
+{
+ REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid),
+ pf_id);
+ REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid),
+ pf_id);
+ REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid),
+ pf_id);
+ REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid),
+ pf_id);
+}
+
+static void storm_memset_func_en(struct bnx2x *bp, u16 abs_fid,
+ u8 enable)
+{
+ REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid),
+ enable);
+ REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid),
+ enable);
+ REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid),
+ enable);
+ REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid),
+ enable);
+}
+
+int bnx2x_vf_idx_by_abs_fid(struct bnx2x *bp, u16 abs_vfid)
+{
+ int idx;
+
+ for_each_vf(bp, idx)
+ if (bnx2x_vf(bp, idx, abs_vfid) == abs_vfid)
+ break;
+ return idx;
+}
+
+static
+struct bnx2x_virtf *bnx2x_vf_by_abs_fid(struct bnx2x *bp, u16 abs_vfid)
+{
+ u16 idx = (u16)bnx2x_vf_idx_by_abs_fid(bp, abs_vfid);
+ return (idx < BNX2X_NR_VIRTFN(bp)) ? BP_VF(bp, idx) : NULL;
+}
+
+static void bnx2x_vf_igu_ack_sb(struct bnx2x *bp, struct bnx2x_virtf *vf,
+ u8 igu_sb_id, u8 segment, u16 index, u8 op,
+ u8 update)
+{
+ /* acking a VF sb through the PF - use the GRC */
+ u32 ctl;
+ u32 igu_addr_data = IGU_REG_COMMAND_REG_32LSB_DATA;
+ u32 igu_addr_ctl = IGU_REG_COMMAND_REG_CTRL;
+ u32 func_encode = vf->abs_vfid;
+ u32 addr_encode = IGU_CMD_E2_PROD_UPD_BASE + igu_sb_id;
+ struct igu_regular cmd_data = {0};
+
+ cmd_data.sb_id_and_flags =
+ ((index << IGU_REGULAR_SB_INDEX_SHIFT) |
+ (segment << IGU_REGULAR_SEGMENT_ACCESS_SHIFT) |
+ (update << IGU_REGULAR_BUPDATE_SHIFT) |
+ (op << IGU_REGULAR_ENABLE_INT_SHIFT));
+
+ ctl = addr_encode << IGU_CTRL_REG_ADDRESS_SHIFT |
+ func_encode << IGU_CTRL_REG_FID_SHIFT |
+ IGU_CTRL_CMD_TYPE_WR << IGU_CTRL_REG_TYPE_SHIFT;
+
+ DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
+ cmd_data.sb_id_and_flags, igu_addr_data);
+ REG_WR(bp, igu_addr_data, cmd_data.sb_id_and_flags);
+ barrier();
+
+ DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
+ ctl, igu_addr_ctl);
+ REG_WR(bp, igu_addr_ctl, ctl);
+ barrier();
+}
+
+static bool bnx2x_validate_vf_sp_objs(struct bnx2x *bp,
+ struct bnx2x_virtf *vf,
+ bool print_err)
+{
+ if (!bnx2x_leading_vfq(vf, sp_initialized)) {
+ if (print_err)
+ BNX2X_ERR("Slowpath objects not yet initialized!\n");
+ else
+ DP(BNX2X_MSG_IOV, "Slowpath objects not yet initialized!\n");
+ return false;
+ }
+ return true;
+}
+
+/* VFOP operations states */
+void bnx2x_vfop_qctor_dump_tx(struct bnx2x *bp, struct bnx2x_virtf *vf,
+ struct bnx2x_queue_init_params *init_params,
+ struct bnx2x_queue_setup_params *setup_params,
+ u16 q_idx, u16 sb_idx)
+{
+ DP(BNX2X_MSG_IOV,
+ "VF[%d] Q_SETUP: txq[%d]-- vfsb=%d, sb-index=%d, hc-rate=%d, flags=0x%lx, traffic-type=%d",
+ vf->abs_vfid,
+ q_idx,
+ sb_idx,
+ init_params->tx.sb_cq_index,
+ init_params->tx.hc_rate,
+ setup_params->flags,
+ setup_params->txq_params.traffic_type);
+}
+
+void bnx2x_vfop_qctor_dump_rx(struct bnx2x *bp, struct bnx2x_virtf *vf,
+ struct bnx2x_queue_init_params *init_params,
+ struct bnx2x_queue_setup_params *setup_params,
+ u16 q_idx, u16 sb_idx)
+{
+ struct bnx2x_rxq_setup_params *rxq_params = &setup_params->rxq_params;
+
+ DP(BNX2X_MSG_IOV, "VF[%d] Q_SETUP: rxq[%d]-- vfsb=%d, sb-index=%d, hc-rate=%d, mtu=%d, buf-size=%d\n"
+ "sge-size=%d, max_sge_pkt=%d, tpa-agg-size=%d, flags=0x%lx, drop-flags=0x%x, cache-log=%d\n",
+ vf->abs_vfid,
+ q_idx,
+ sb_idx,
+ init_params->rx.sb_cq_index,
+ init_params->rx.hc_rate,
+ setup_params->gen_params.mtu,
+ rxq_params->buf_sz,
+ rxq_params->sge_buf_sz,
+ rxq_params->max_sges_pkt,
+ rxq_params->tpa_agg_sz,
+ setup_params->flags,
+ rxq_params->drop_flags,
+ rxq_params->cache_line_log);
+}
+
+void bnx2x_vfop_qctor_prep(struct bnx2x *bp,
+ struct bnx2x_virtf *vf,
+ struct bnx2x_vf_queue *q,
+ struct bnx2x_vf_queue_construct_params *p,
+ unsigned long q_type)
+{
+ struct bnx2x_queue_init_params *init_p = &p->qstate.params.init;
+ struct bnx2x_queue_setup_params *setup_p = &p->prep_qsetup;
+
+ /* INIT */
+
+ /* Enable host coalescing in the transition to INIT state */
+ if (test_bit(BNX2X_Q_FLG_HC, &init_p->rx.flags))
+ __set_bit(BNX2X_Q_FLG_HC_EN, &init_p->rx.flags);
+
+ if (test_bit(BNX2X_Q_FLG_HC, &init_p->tx.flags))
+ __set_bit(BNX2X_Q_FLG_HC_EN, &init_p->tx.flags);
+
+ /* FW SB ID */
+ init_p->rx.fw_sb_id = vf_igu_sb(vf, q->sb_idx);
+ init_p->tx.fw_sb_id = vf_igu_sb(vf, q->sb_idx);
+
+ /* context */
+ init_p->cxts[0] = q->cxt;
+
+ /* SETUP */
+
+ /* Setup-op general parameters */
+ setup_p->gen_params.spcl_id = vf->sp_cl_id;
+ setup_p->gen_params.stat_id = vfq_stat_id(vf, q);
+ setup_p->gen_params.fp_hsi = vf->fp_hsi;
+
+ /* Setup-op flags:
+ * collect statistics, zero statistics, local-switching, security,
+ * OV for Flex10, RSS and MCAST for leading
+ */
+ if (test_bit(BNX2X_Q_FLG_STATS, &setup_p->flags))
+ __set_bit(BNX2X_Q_FLG_ZERO_STATS, &setup_p->flags);
+
+ /* for VFs, enable tx switching, bd coherency, and mac address
+ * anti-spoofing
+ */
+ __set_bit(BNX2X_Q_FLG_TX_SWITCH, &setup_p->flags);
+ __set_bit(BNX2X_Q_FLG_TX_SEC, &setup_p->flags);
+ if (vf->spoofchk)
+ __set_bit(BNX2X_Q_FLG_ANTI_SPOOF, &setup_p->flags);
+ else
+ __clear_bit(BNX2X_Q_FLG_ANTI_SPOOF, &setup_p->flags);
+
+ /* Setup-op rx parameters */
+ if (test_bit(BNX2X_Q_TYPE_HAS_RX, &q_type)) {
+ struct bnx2x_rxq_setup_params *rxq_p = &setup_p->rxq_params;
+
+ rxq_p->cl_qzone_id = vfq_qzone_id(vf, q);
+ rxq_p->fw_sb_id = vf_igu_sb(vf, q->sb_idx);
+ rxq_p->rss_engine_id = FW_VF_HANDLE(vf->abs_vfid);
+
+ if (test_bit(BNX2X_Q_FLG_TPA, &setup_p->flags))
+ rxq_p->max_tpa_queues = BNX2X_VF_MAX_TPA_AGG_QUEUES;
+ }
+
+ /* Setup-op tx parameters */
+ if (test_bit(BNX2X_Q_TYPE_HAS_TX, &q_type)) {
+ setup_p->txq_params.tss_leading_cl_id = vf->leading_rss;
+ setup_p->txq_params.fw_sb_id = vf_igu_sb(vf, q->sb_idx);
+ }
+}
+
+static int bnx2x_vf_queue_create(struct bnx2x *bp,
+ struct bnx2x_virtf *vf, int qid,
+ struct bnx2x_vf_queue_construct_params *qctor)
+{
+ struct bnx2x_queue_state_params *q_params;
+ int rc = 0;
+
+ DP(BNX2X_MSG_IOV, "vf[%d:%d]\n", vf->abs_vfid, qid);
+
+ /* Prepare ramrod information */
+ q_params = &qctor->qstate;
+ q_params->q_obj = &bnx2x_vfq(vf, qid, sp_obj);
+ set_bit(RAMROD_COMP_WAIT, &q_params->ramrod_flags);
+
+ if (bnx2x_get_q_logical_state(bp, q_params->q_obj) ==
+ BNX2X_Q_LOGICAL_STATE_ACTIVE) {
+ DP(BNX2X_MSG_IOV, "queue was already up. Aborting gracefully\n");
+ goto out;
+ }
+
+ /* Run Queue 'construction' ramrods */
+ q_params->cmd = BNX2X_Q_CMD_INIT;
+ rc = bnx2x_queue_state_change(bp, q_params);
+ if (rc)
+ goto out;
+
+ memcpy(&q_params->params.setup, &qctor->prep_qsetup,
+ sizeof(struct bnx2x_queue_setup_params));
+ q_params->cmd = BNX2X_Q_CMD_SETUP;
+ rc = bnx2x_queue_state_change(bp, q_params);
+ if (rc)
+ goto out;
+
+ /* enable interrupts */
+ bnx2x_vf_igu_ack_sb(bp, vf, vf_igu_sb(vf, bnx2x_vfq(vf, qid, sb_idx)),
+ USTORM_ID, 0, IGU_INT_ENABLE, 0);
+out:
+ return rc;
+}
+
+static int bnx2x_vf_queue_destroy(struct bnx2x *bp, struct bnx2x_virtf *vf,
+ int qid)
+{
+ enum bnx2x_queue_cmd cmds[] = {BNX2X_Q_CMD_HALT,
+ BNX2X_Q_CMD_TERMINATE,
+ BNX2X_Q_CMD_CFC_DEL};
+ struct bnx2x_queue_state_params q_params;
+ int rc, i;
+
+ DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid);
+
+ /* Prepare ramrod information */
+ memset(&q_params, 0, sizeof(struct bnx2x_queue_state_params));
+ q_params.q_obj = &bnx2x_vfq(vf, qid, sp_obj);
+ set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
+
+ if (bnx2x_get_q_logical_state(bp, q_params.q_obj) ==
+ BNX2X_Q_LOGICAL_STATE_STOPPED) {
+ DP(BNX2X_MSG_IOV, "queue was already stopped. Aborting gracefully\n");
+ goto out;
+ }
+
+ /* Run Queue 'destruction' ramrods */
+ for (i = 0; i < ARRAY_SIZE(cmds); i++) {
+ q_params.cmd = cmds[i];
+ rc = bnx2x_queue_state_change(bp, &q_params);
+ if (rc) {
+ BNX2X_ERR("Failed to run Queue command %d\n", cmds[i]);
+ return rc;
+ }
+ }
+out:
+ /* Clean Context */
+ if (bnx2x_vfq(vf, qid, cxt)) {
+ bnx2x_vfq(vf, qid, cxt)->ustorm_ag_context.cdu_usage = 0;
+ bnx2x_vfq(vf, qid, cxt)->xstorm_ag_context.cdu_reserved = 0;
+ }
+
+ return 0;
+}
+
+static void
+bnx2x_vf_set_igu_info(struct bnx2x *bp, u8 igu_sb_id, u8 abs_vfid)
+{
+ struct bnx2x_virtf *vf = bnx2x_vf_by_abs_fid(bp, abs_vfid);
+ if (vf) {
+ /* the first igu entry belonging to VFs of this PF */
+ if (!BP_VFDB(bp)->first_vf_igu_entry)
+ BP_VFDB(bp)->first_vf_igu_entry = igu_sb_id;
+
+ /* the first igu entry belonging to this VF */
+ if (!vf_sb_count(vf))
+ vf->igu_base_id = igu_sb_id;
+
+ ++vf_sb_count(vf);
+ ++vf->sb_count;
+ }
+ BP_VFDB(bp)->vf_sbs_pool++;
+}
+
+static int bnx2x_vf_vlan_mac_clear(struct bnx2x *bp, struct bnx2x_virtf *vf,
+ int qid, bool drv_only, int type)
+{
+ struct bnx2x_vlan_mac_ramrod_params ramrod;
+ int rc;
+
+ DP(BNX2X_MSG_IOV, "vf[%d] - deleting all %s\n", vf->abs_vfid,
+ (type == BNX2X_VF_FILTER_VLAN_MAC) ? "VLAN-MACs" :
+ (type == BNX2X_VF_FILTER_MAC) ? "MACs" : "VLANs");
+
+ /* Prepare ramrod params */
+ memset(&ramrod, 0, sizeof(struct bnx2x_vlan_mac_ramrod_params));
+ if (type == BNX2X_VF_FILTER_VLAN_MAC) {
+ set_bit(BNX2X_ETH_MAC, &ramrod.user_req.vlan_mac_flags);
+ ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_mac_obj);
+ } else if (type == BNX2X_VF_FILTER_MAC) {
+ set_bit(BNX2X_ETH_MAC, &ramrod.user_req.vlan_mac_flags);
+ ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, mac_obj);
+ } else {
+ ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj);
+ }
+ ramrod.user_req.cmd = BNX2X_VLAN_MAC_DEL;
+
+ set_bit(RAMROD_EXEC, &ramrod.ramrod_flags);
+ if (drv_only)
+ set_bit(RAMROD_DRV_CLR_ONLY, &ramrod.ramrod_flags);
+ else
+ set_bit(RAMROD_COMP_WAIT, &ramrod.ramrod_flags);
+
+ /* Start deleting */
+ rc = ramrod.vlan_mac_obj->delete_all(bp,
+ ramrod.vlan_mac_obj,
+ &ramrod.user_req.vlan_mac_flags,
+ &ramrod.ramrod_flags);
+ if (rc) {
+ BNX2X_ERR("Failed to delete all %s\n",
+ (type == BNX2X_VF_FILTER_VLAN_MAC) ? "VLAN-MACs" :
+ (type == BNX2X_VF_FILTER_MAC) ? "MACs" : "VLANs");
+ return rc;
+ }
+
+ return 0;
+}
+
+static int bnx2x_vf_mac_vlan_config(struct bnx2x *bp,
+ struct bnx2x_virtf *vf, int qid,
+ struct bnx2x_vf_mac_vlan_filter *filter,
+ bool drv_only)
+{
+ struct bnx2x_vlan_mac_ramrod_params ramrod;
+ int rc;
+
+ DP(BNX2X_MSG_IOV, "vf[%d] - %s a %s filter\n",
+ vf->abs_vfid, filter->add ? "Adding" : "Deleting",
+ (filter->type == BNX2X_VF_FILTER_VLAN_MAC) ? "VLAN-MAC" :
+ (filter->type == BNX2X_VF_FILTER_MAC) ? "MAC" : "VLAN");
+
+ /* Prepare ramrod params */
+ memset(&ramrod, 0, sizeof(struct bnx2x_vlan_mac_ramrod_params));
+ if (filter->type == BNX2X_VF_FILTER_VLAN_MAC) {
+ ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_mac_obj);
+ ramrod.user_req.u.vlan.vlan = filter->vid;
+ memcpy(&ramrod.user_req.u.mac.mac, filter->mac, ETH_ALEN);
+ set_bit(BNX2X_ETH_MAC, &ramrod.user_req.vlan_mac_flags);
+ } else if (filter->type == BNX2X_VF_FILTER_VLAN) {
+ ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj);
+ ramrod.user_req.u.vlan.vlan = filter->vid;
+ } else {
+ set_bit(BNX2X_ETH_MAC, &ramrod.user_req.vlan_mac_flags);
+ ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, mac_obj);
+ memcpy(&ramrod.user_req.u.mac.mac, filter->mac, ETH_ALEN);
+ }
+ ramrod.user_req.cmd = filter->add ? BNX2X_VLAN_MAC_ADD :
+ BNX2X_VLAN_MAC_DEL;
+
+ set_bit(RAMROD_EXEC, &ramrod.ramrod_flags);
+ if (drv_only)
+ set_bit(RAMROD_DRV_CLR_ONLY, &ramrod.ramrod_flags);
+ else
+ set_bit(RAMROD_COMP_WAIT, &ramrod.ramrod_flags);
+
+ /* Add/Remove the filter */
+ rc = bnx2x_config_vlan_mac(bp, &ramrod);
+ if (rc == -EEXIST)
+ return 0;
+ if (rc) {
+ BNX2X_ERR("Failed to %s %s\n",
+ filter->add ? "add" : "delete",
+ (filter->type == BNX2X_VF_FILTER_VLAN_MAC) ?
+ "VLAN-MAC" :
+ (filter->type == BNX2X_VF_FILTER_MAC) ?
+ "MAC" : "VLAN");
+ return rc;
+ }
+
+ filter->applied = true;
+
+ return 0;
+}
+
+int bnx2x_vf_mac_vlan_config_list(struct bnx2x *bp, struct bnx2x_virtf *vf,
+ struct bnx2x_vf_mac_vlan_filters *filters,
+ int qid, bool drv_only)
+{
+ int rc = 0, i;
+
+ DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid);
+
+ if (!bnx2x_validate_vf_sp_objs(bp, vf, true))
+ return -EINVAL;
+
+ /* Prepare ramrod params */
+ for (i = 0; i < filters->count; i++) {
+ rc = bnx2x_vf_mac_vlan_config(bp, vf, qid,
+ &filters->filters[i], drv_only);
+ if (rc)
+ break;
+ }
+
+ /* Rollback if needed */
+ if (i != filters->count) {
+ BNX2X_ERR("Managed only %d/%d filters - rolling back\n",
+ i, filters->count);
+ while (--i >= 0) {
+ if (!filters->filters[i].applied)
+ continue;
+ filters->filters[i].add = !filters->filters[i].add;
+ bnx2x_vf_mac_vlan_config(bp, vf, qid,
+ &filters->filters[i],
+ drv_only);
+ }
+ }
+
+ /* It's our responsibility to free the filters */
+ kfree(filters);
+
+ return rc;
+}
+
+int bnx2x_vf_queue_setup(struct bnx2x *bp, struct bnx2x_virtf *vf, int qid,
+ struct bnx2x_vf_queue_construct_params *qctor)
+{
+ int rc;
+
+ DP(BNX2X_MSG_IOV, "vf[%d:%d]\n", vf->abs_vfid, qid);
+
+ rc = bnx2x_vf_queue_create(bp, vf, qid, qctor);
+ if (rc)
+ goto op_err;
+
+ /* Schedule the configuration of any pending vlan filters */
+ bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_HYPERVISOR_VLAN,
+ BNX2X_MSG_IOV);
+ return 0;
+op_err:
+ BNX2X_ERR("QSETUP[%d:%d] error: rc %d\n", vf->abs_vfid, qid, rc);
+ return rc;
+}
+
+static int bnx2x_vf_queue_flr(struct bnx2x *bp, struct bnx2x_virtf *vf,
+ int qid)
+{
+ int rc;
+
+ DP(BNX2X_MSG_IOV, "vf[%d:%d]\n", vf->abs_vfid, qid);
+
+ /* If needed, clean the filtering data base */
+ if ((qid == LEADING_IDX) &&
+ bnx2x_validate_vf_sp_objs(bp, vf, false)) {
+ rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid, true,
+ BNX2X_VF_FILTER_VLAN_MAC);
+ if (rc)
+ goto op_err;
+ rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid, true,
+ BNX2X_VF_FILTER_VLAN);
+ if (rc)
+ goto op_err;
+ rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid, true,
+ BNX2X_VF_FILTER_MAC);
+ if (rc)
+ goto op_err;
+ }
+
+ /* Terminate queue */
+ if (bnx2x_vfq(vf, qid, sp_obj).state != BNX2X_Q_STATE_RESET) {
+ struct bnx2x_queue_state_params qstate;
+
+ memset(&qstate, 0, sizeof(struct bnx2x_queue_state_params));
+ qstate.q_obj = &bnx2x_vfq(vf, qid, sp_obj);
+ qstate.q_obj->state = BNX2X_Q_STATE_STOPPED;
+ qstate.cmd = BNX2X_Q_CMD_TERMINATE;
+ set_bit(RAMROD_COMP_WAIT, &qstate.ramrod_flags);
+ rc = bnx2x_queue_state_change(bp, &qstate);
+ if (rc)
+ goto op_err;
+ }
+
+ return 0;
+op_err:
+ BNX2X_ERR("vf[%d:%d] error: rc %d\n", vf->abs_vfid, qid, rc);
+ return rc;
+}
+
+int bnx2x_vf_mcast(struct bnx2x *bp, struct bnx2x_virtf *vf,
+ bnx2x_mac_addr_t *mcasts, int mc_num, bool drv_only)
+{
+ struct bnx2x_mcast_list_elem *mc = NULL;
+ struct bnx2x_mcast_ramrod_params mcast;
+ int rc, i;
+
+ DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid);
+
+ /* Prepare Multicast command */
+ memset(&mcast, 0, sizeof(struct bnx2x_mcast_ramrod_params));
+ mcast.mcast_obj = &vf->mcast_obj;
+ if (drv_only)
+ set_bit(RAMROD_DRV_CLR_ONLY, &mcast.ramrod_flags);
+ else
+ set_bit(RAMROD_COMP_WAIT, &mcast.ramrod_flags);
+ if (mc_num) {
+ mc = kcalloc(mc_num, sizeof(struct bnx2x_mcast_list_elem),
+ GFP_KERNEL);
+ if (!mc) {
+ BNX2X_ERR("Cannot Configure multicasts due to lack of memory\n");
+ return -ENOMEM;
+ }
+ }
+
+ if (mc_num) {
+ INIT_LIST_HEAD(&mcast.mcast_list);
+ for (i = 0; i < mc_num; i++) {
+ mc[i].mac = mcasts[i];
+ list_add_tail(&mc[i].link,
+ &mcast.mcast_list);
+ }
+
+ /* add new mcasts */
+ mcast.mcast_list_len = mc_num;
+ rc = bnx2x_config_mcast(bp, &mcast, BNX2X_MCAST_CMD_SET);
+ if (rc)
+ BNX2X_ERR("Failed to set multicasts\n");
+ } else {
+ /* clear existing mcasts */
+ rc = bnx2x_config_mcast(bp, &mcast, BNX2X_MCAST_CMD_DEL);
+ if (rc)
+ BNX2X_ERR("Failed to remove multicasts\n");
+ }
+
+ kfree(mc);
+
+ return rc;
+}
+
+static void bnx2x_vf_prep_rx_mode(struct bnx2x *bp, u8 qid,
+ struct bnx2x_rx_mode_ramrod_params *ramrod,
+ struct bnx2x_virtf *vf,
+ unsigned long accept_flags)
+{
+ struct bnx2x_vf_queue *vfq = vfq_get(vf, qid);
+
+ memset(ramrod, 0, sizeof(*ramrod));
+ ramrod->cid = vfq->cid;
+ ramrod->cl_id = vfq_cl_id(vf, vfq);
+ ramrod->rx_mode_obj = &bp->rx_mode_obj;
+ ramrod->func_id = FW_VF_HANDLE(vf->abs_vfid);
+ ramrod->rx_accept_flags = accept_flags;
+ ramrod->tx_accept_flags = accept_flags;
+ ramrod->pstate = &vf->filter_state;
+ ramrod->state = BNX2X_FILTER_RX_MODE_PENDING;
+
+ set_bit(BNX2X_FILTER_RX_MODE_PENDING, &vf->filter_state);
+ set_bit(RAMROD_RX, &ramrod->ramrod_flags);
+ set_bit(RAMROD_TX, &ramrod->ramrod_flags);
+
+ ramrod->rdata = bnx2x_vf_sp(bp, vf, rx_mode_rdata.e2);
+ ramrod->rdata_mapping = bnx2x_vf_sp_map(bp, vf, rx_mode_rdata.e2);
+}
+
+int bnx2x_vf_rxmode(struct bnx2x *bp, struct bnx2x_virtf *vf,
+ int qid, unsigned long accept_flags)
+{
+ struct bnx2x_rx_mode_ramrod_params ramrod;
+
+ DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid);
+
+ bnx2x_vf_prep_rx_mode(bp, qid, &ramrod, vf, accept_flags);
+ set_bit(RAMROD_COMP_WAIT, &ramrod.ramrod_flags);
+ vfq_get(vf, qid)->accept_flags = ramrod.rx_accept_flags;
+ return bnx2x_config_rx_mode(bp, &ramrod);
+}
+
+int bnx2x_vf_queue_teardown(struct bnx2x *bp, struct bnx2x_virtf *vf, int qid)
+{
+ int rc;
+
+ DP(BNX2X_MSG_IOV, "vf[%d:%d]\n", vf->abs_vfid, qid);
+
+ /* Remove all classification configuration for leading queue */
+ if (qid == LEADING_IDX) {
+ rc = bnx2x_vf_rxmode(bp, vf, qid, 0);
+ if (rc)
+ goto op_err;
+
+ /* Remove filtering if feasible */
+ if (bnx2x_validate_vf_sp_objs(bp, vf, true)) {
+ rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid,
+ false,
+ BNX2X_VF_FILTER_VLAN_MAC);
+ if (rc)
+ goto op_err;
+ rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid,
+ false,
+ BNX2X_VF_FILTER_VLAN);
+ if (rc)
+ goto op_err;
+ rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid,
+ false,
+ BNX2X_VF_FILTER_MAC);
+ if (rc)
+ goto op_err;
+ rc = bnx2x_vf_mcast(bp, vf, NULL, 0, false);
+ if (rc)
+ goto op_err;
+ }
+ }
+
+ /* Destroy queue */
+ rc = bnx2x_vf_queue_destroy(bp, vf, qid);
+ if (rc)
+ goto op_err;
+ return rc;
+op_err:
+ BNX2X_ERR("vf[%d:%d] error: rc %d\n",
+ vf->abs_vfid, qid, rc);
+ return rc;
+}
+
+/* VF enable primitives
+ * when pretend is required the caller is responsible
+ * for calling pretend prior to calling these routines
+ */
+
+/* internal vf enable - until vf is enabled internally all transactions
+ * are blocked. This routine should always be called last with pretend.
+ */
+static void bnx2x_vf_enable_internal(struct bnx2x *bp, u8 enable)
+{
+ REG_WR(bp, PGLUE_B_REG_INTERNAL_VFID_ENABLE, enable ? 1 : 0);
+}
+
+/* clears vf error in all semi blocks */
+static void bnx2x_vf_semi_clear_err(struct bnx2x *bp, u8 abs_vfid)
+{
+ REG_WR(bp, TSEM_REG_VFPF_ERR_NUM, abs_vfid);
+ REG_WR(bp, USEM_REG_VFPF_ERR_NUM, abs_vfid);
+ REG_WR(bp, CSEM_REG_VFPF_ERR_NUM, abs_vfid);
+ REG_WR(bp, XSEM_REG_VFPF_ERR_NUM, abs_vfid);
+}
+
+static void bnx2x_vf_pglue_clear_err(struct bnx2x *bp, u8 abs_vfid)
+{
+ u32 was_err_group = (2 * BP_PATH(bp) + abs_vfid) >> 5;
+ u32 was_err_reg = 0;
+
+ switch (was_err_group) {
+ case 0:
+ was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_31_0_CLR;
+ break;
+ case 1:
+ was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_63_32_CLR;
+ break;
+ case 2:
+ was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_95_64_CLR;
+ break;
+ case 3:
+ was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_127_96_CLR;
+ break;
+ }
+ REG_WR(bp, was_err_reg, 1 << (abs_vfid & 0x1f));
+}
+
+static void bnx2x_vf_igu_reset(struct bnx2x *bp, struct bnx2x_virtf *vf)
+{
+ int i;
+ u32 val;
+
+ /* Set VF masks and configuration - pretend */
+ bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid));
+
+ REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_LSB, 0);
+ REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_MSB, 0);
+ REG_WR(bp, IGU_REG_SB_MASK_LSB, 0);
+ REG_WR(bp, IGU_REG_SB_MASK_MSB, 0);
+ REG_WR(bp, IGU_REG_PBA_STATUS_LSB, 0);
+ REG_WR(bp, IGU_REG_PBA_STATUS_MSB, 0);
+
+ val = REG_RD(bp, IGU_REG_VF_CONFIGURATION);
+ val |= (IGU_VF_CONF_FUNC_EN | IGU_VF_CONF_MSI_MSIX_EN);
+ val &= ~IGU_VF_CONF_PARENT_MASK;
+ val |= (BP_ABS_FUNC(bp) >> 1) << IGU_VF_CONF_PARENT_SHIFT;
+ REG_WR(bp, IGU_REG_VF_CONFIGURATION, val);
+
+ DP(BNX2X_MSG_IOV,
+ "value in IGU_REG_VF_CONFIGURATION of vf %d after write is 0x%08x\n",
+ vf->abs_vfid, val);
+
+ bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
+
+ /* iterate over all queues, clear sb consumer */
+ for (i = 0; i < vf_sb_count(vf); i++) {
+ u8 igu_sb_id = vf_igu_sb(vf, i);
+
+ /* zero prod memory */
+ REG_WR(bp, IGU_REG_PROD_CONS_MEMORY + igu_sb_id * 4, 0);
+
+ /* clear sb state machine */
+ bnx2x_igu_clear_sb_gen(bp, vf->abs_vfid, igu_sb_id,
+ false /* VF */);
+
+ /* disable + update */
+ bnx2x_vf_igu_ack_sb(bp, vf, igu_sb_id, USTORM_ID, 0,
+ IGU_INT_DISABLE, 1);
+ }
+}
+
+void bnx2x_vf_enable_access(struct bnx2x *bp, u8 abs_vfid)
+{
+ u16 abs_fid;
+
+ abs_fid = FW_VF_HANDLE(abs_vfid);
+
+ /* set the VF-PF association in the FW */
+ storm_memset_vf_to_pf(bp, abs_fid, BP_FUNC(bp));
+ storm_memset_func_en(bp, abs_fid, 1);
+
+ /* Invalidate fp_hsi version for vfs */
+ if (bp->fw_cap & FW_CAP_INVALIDATE_VF_FP_HSI)
+ REG_WR8(bp, BAR_XSTRORM_INTMEM +
+ XSTORM_ETH_FUNCTION_INFO_FP_HSI_VALID_E2_OFFSET(abs_fid), 0);
+
+ /* clear vf errors*/
+ bnx2x_vf_semi_clear_err(bp, abs_vfid);
+ bnx2x_vf_pglue_clear_err(bp, abs_vfid);
+
+ /* internal vf-enable - pretend */
+ bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, abs_vfid));
+ DP(BNX2X_MSG_IOV, "enabling internal access for vf %x\n", abs_vfid);
+ bnx2x_vf_enable_internal(bp, true);
+ bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
+}
+
+static void bnx2x_vf_enable_traffic(struct bnx2x *bp, struct bnx2x_virtf *vf)
+{
+ /* Reset vf in IGU interrupts are still disabled */
+ bnx2x_vf_igu_reset(bp, vf);
+
+ /* pretend to enable the vf with the PBF */
+ bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid));
+ REG_WR(bp, PBF_REG_DISABLE_VF, 0);
+ bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
+}
+
+static u8 bnx2x_vf_is_pcie_pending(struct bnx2x *bp, u8 abs_vfid)
+{
+ struct bnx2x_virtf *vf = bnx2x_vf_by_abs_fid(bp, abs_vfid);
+ struct pci_dev *dev;
+ bool pending;
+
+ if (!vf)
+ return false;
+
+ dev = pci_get_domain_bus_and_slot(vf->domain, vf->bus, vf->devfn);
+ if (!dev)
+ return false;
+ pending = bnx2x_is_pcie_pending(dev);
+ pci_dev_put(dev);
+
+ return pending;
+}
+
+int bnx2x_vf_flr_clnup_epilog(struct bnx2x *bp, u8 abs_vfid)
+{
+ /* Verify no pending pci transactions */
+ if (bnx2x_vf_is_pcie_pending(bp, abs_vfid))
+ BNX2X_ERR("PCIE Transactions still pending\n");
+
+ return 0;
+}
+
+/* must be called after the number of PF queues and the number of VFs are
+ * both known
+ */
+static void
+bnx2x_iov_static_resc(struct bnx2x *bp, struct bnx2x_virtf *vf)
+{
+ struct vf_pf_resc_request *resc = &vf->alloc_resc;
+
+ /* will be set only during VF-ACQUIRE */
+ resc->num_rxqs = 0;
+ resc->num_txqs = 0;
+
+ resc->num_mac_filters = VF_MAC_CREDIT_CNT;
+ resc->num_vlan_filters = VF_VLAN_CREDIT_CNT;
+
+ /* no real limitation */
+ resc->num_mc_filters = 0;
+
+ /* num_sbs already set */
+ resc->num_sbs = vf->sb_count;
+}
+
+/* FLR routines: */
+static void bnx2x_vf_free_resc(struct bnx2x *bp, struct bnx2x_virtf *vf)
+{
+ /* reset the state variables */
+ bnx2x_iov_static_resc(bp, vf);
+ vf->state = VF_FREE;
+}
+
+static void bnx2x_vf_flr_clnup_hw(struct bnx2x *bp, struct bnx2x_virtf *vf)
+{
+ u32 poll_cnt = bnx2x_flr_clnup_poll_count(bp);
+
+ /* DQ usage counter */
+ bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid));
+ bnx2x_flr_clnup_poll_hw_counter(bp, DORQ_REG_VF_USAGE_CNT,
+ "DQ VF usage counter timed out",
+ poll_cnt);
+ bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
+
+ /* FW cleanup command - poll for the results */
+ if (bnx2x_send_final_clnup(bp, (u8)FW_VF_HANDLE(vf->abs_vfid),
+ poll_cnt))
+ BNX2X_ERR("VF[%d] Final cleanup timed-out\n", vf->abs_vfid);
+
+ /* verify TX hw is flushed */
+ bnx2x_tx_hw_flushed(bp, poll_cnt);
+}
+
+static void bnx2x_vf_flr(struct bnx2x *bp, struct bnx2x_virtf *vf)
+{
+ int rc, i;
+
+ DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid);
+
+ /* the cleanup operations are valid if and only if the VF
+ * was first acquired.
+ */
+ for (i = 0; i < vf_rxq_count(vf); i++) {
+ rc = bnx2x_vf_queue_flr(bp, vf, i);
+ if (rc)
+ goto out;
+ }
+
+ /* remove multicasts */
+ bnx2x_vf_mcast(bp, vf, NULL, 0, true);
+
+ /* dispatch final cleanup and wait for HW queues to flush */
+ bnx2x_vf_flr_clnup_hw(bp, vf);
+
+ /* release VF resources */
+ bnx2x_vf_free_resc(bp, vf);
+
+ vf->malicious = false;
+
+ /* re-open the mailbox */
+ bnx2x_vf_enable_mbx(bp, vf->abs_vfid);
+ return;
+out:
+ BNX2X_ERR("vf[%d:%d] failed flr: rc %d\n",
+ vf->abs_vfid, i, rc);
+}
+
+static void bnx2x_vf_flr_clnup(struct bnx2x *bp)
+{
+ struct bnx2x_virtf *vf;
+ int i;
+
+ for (i = 0; i < BNX2X_NR_VIRTFN(bp); i++) {
+ /* VF should be RESET & in FLR cleanup states */
+ if (bnx2x_vf(bp, i, state) != VF_RESET ||
+ !bnx2x_vf(bp, i, flr_clnup_stage))
+ continue;
+
+ DP(BNX2X_MSG_IOV, "next vf to cleanup: %d. Num of vfs: %d\n",
+ i, BNX2X_NR_VIRTFN(bp));
+
+ vf = BP_VF(bp, i);
+
+ /* lock the vf pf channel */
+ bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_FLR);
+
+ /* invoke the VF FLR SM */
+ bnx2x_vf_flr(bp, vf);
+
+ /* mark the VF to be ACKED and continue */
+ vf->flr_clnup_stage = false;
+ bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_FLR);
+ }
+
+ /* Acknowledge the handled VFs.
+ * we are acknowledge all the vfs which an flr was requested for, even
+ * if amongst them there are such that we never opened, since the mcp
+ * will interrupt us immediately again if we only ack some of the bits,
+ * resulting in an endless loop. This can happen for example in KVM
+ * where an 'all ones' flr request is sometimes given by hyper visor
+ */
+ DP(BNX2X_MSG_MCP, "DRV_STATUS_VF_DISABLED ACK for vfs 0x%x 0x%x\n",
+ bp->vfdb->flrd_vfs[0], bp->vfdb->flrd_vfs[1]);
+ for (i = 0; i < FLRD_VFS_DWORDS; i++)
+ SHMEM2_WR(bp, drv_ack_vf_disabled[BP_FW_MB_IDX(bp)][i],
+ bp->vfdb->flrd_vfs[i]);
+
+ bnx2x_fw_command(bp, DRV_MSG_CODE_VF_DISABLED_DONE, 0);
+
+ /* clear the acked bits - better yet if the MCP implemented
+ * write to clear semantics
+ */
+ for (i = 0; i < FLRD_VFS_DWORDS; i++)
+ SHMEM2_WR(bp, drv_ack_vf_disabled[BP_FW_MB_IDX(bp)][i], 0);
+}
+
+void bnx2x_vf_handle_flr_event(struct bnx2x *bp)
+{
+ int i;
+
+ /* Read FLR'd VFs */
+ for (i = 0; i < FLRD_VFS_DWORDS; i++)
+ bp->vfdb->flrd_vfs[i] = SHMEM2_RD(bp, mcp_vf_disabled[i]);
+
+ DP(BNX2X_MSG_MCP,
+ "DRV_STATUS_VF_DISABLED received for vfs 0x%x 0x%x\n",
+ bp->vfdb->flrd_vfs[0], bp->vfdb->flrd_vfs[1]);
+
+ for_each_vf(bp, i) {
+ struct bnx2x_virtf *vf = BP_VF(bp, i);
+ u32 reset = 0;
+
+ if (vf->abs_vfid < 32)
+ reset = bp->vfdb->flrd_vfs[0] & (1 << vf->abs_vfid);
+ else
+ reset = bp->vfdb->flrd_vfs[1] &
+ (1 << (vf->abs_vfid - 32));
+
+ if (reset) {
+ /* set as reset and ready for cleanup */
+ vf->state = VF_RESET;
+ vf->flr_clnup_stage = true;
+
+ DP(BNX2X_MSG_IOV,
+ "Initiating Final cleanup for VF %d\n",
+ vf->abs_vfid);
+ }
+ }
+
+ /* do the FLR cleanup for all marked VFs*/
+ bnx2x_vf_flr_clnup(bp);
+}
+
+/* IOV global initialization routines */
+void bnx2x_iov_init_dq(struct bnx2x *bp)
+{
+ if (!IS_SRIOV(bp))
+ return;
+
+ /* Set the DQ such that the CID reflect the abs_vfid */
+ REG_WR(bp, DORQ_REG_VF_NORM_VF_BASE, 0);
+ REG_WR(bp, DORQ_REG_MAX_RVFID_SIZE, ilog2(BNX2X_MAX_NUM_OF_VFS));
+
+ /* Set VFs starting CID. If its > 0 the preceding CIDs are belong to
+ * the PF L2 queues
+ */
+ REG_WR(bp, DORQ_REG_VF_NORM_CID_BASE, BNX2X_FIRST_VF_CID);
+
+ /* The VF window size is the log2 of the max number of CIDs per VF */
+ REG_WR(bp, DORQ_REG_VF_NORM_CID_WND_SIZE, BNX2X_VF_CID_WND);
+
+ /* The VF doorbell size 0 - *B, 4 - 128B. We set it here to match
+ * the Pf doorbell size although the 2 are independent.
+ */
+ REG_WR(bp, DORQ_REG_VF_NORM_CID_OFST, 3);
+
+ /* No security checks for now -
+ * configure single rule (out of 16) mask = 0x1, value = 0x0,
+ * CID range 0 - 0x1ffff
+ */
+ REG_WR(bp, DORQ_REG_VF_TYPE_MASK_0, 1);
+ REG_WR(bp, DORQ_REG_VF_TYPE_VALUE_0, 0);
+ REG_WR(bp, DORQ_REG_VF_TYPE_MIN_MCID_0, 0);
+ REG_WR(bp, DORQ_REG_VF_TYPE_MAX_MCID_0, 0x1ffff);
+
+ /* set the VF doorbell threshold. This threshold represents the amount
+ * of doorbells allowed in the main DORQ fifo for a specific VF.
+ */
+ REG_WR(bp, DORQ_REG_VF_USAGE_CT_LIMIT, 64);
+}
+
+void bnx2x_iov_init_dmae(struct bnx2x *bp)
+{
+ if (pci_find_ext_capability(bp->pdev, PCI_EXT_CAP_ID_SRIOV))
+ REG_WR(bp, DMAE_REG_BACKWARD_COMP_EN, 0);
+}
+
+static int bnx2x_vf_domain(struct bnx2x *bp, int vfid)
+{
+ struct pci_dev *dev = bp->pdev;
+
+ return pci_domain_nr(dev->bus);
+}
+
+static int bnx2x_vf_bus(struct bnx2x *bp, int vfid)
+{
+ struct pci_dev *dev = bp->pdev;
+ struct bnx2x_sriov *iov = &bp->vfdb->sriov;
+
+ return dev->bus->number + ((dev->devfn + iov->offset +
+ iov->stride * vfid) >> 8);
+}
+
+static int bnx2x_vf_devfn(struct bnx2x *bp, int vfid)
+{
+ struct pci_dev *dev = bp->pdev;
+ struct bnx2x_sriov *iov = &bp->vfdb->sriov;
+
+ return (dev->devfn + iov->offset + iov->stride * vfid) & 0xff;
+}
+
+static void bnx2x_vf_set_bars(struct bnx2x *bp, struct bnx2x_virtf *vf)
+{
+ int i, n;
+ struct pci_dev *dev = bp->pdev;
+ struct bnx2x_sriov *iov = &bp->vfdb->sriov;
+
+ for (i = 0, n = 0; i < PCI_SRIOV_NUM_BARS; i += 2, n++) {
+ u64 start = pci_resource_start(dev, PCI_IOV_RESOURCES + i);
+ u32 size = pci_resource_len(dev, PCI_IOV_RESOURCES + i);
+
+ size /= iov->total;
+ vf->bars[n].bar = start + size * vf->abs_vfid;
+ vf->bars[n].size = size;
+ }
+}
+
+static int
+bnx2x_get_vf_igu_cam_info(struct bnx2x *bp)
+{
+ int sb_id;
+ u32 val;
+ u8 fid, current_pf = 0;
+
+ /* IGU in normal mode - read CAM */
+ for (sb_id = 0; sb_id < IGU_REG_MAPPING_MEMORY_SIZE; sb_id++) {
+ val = REG_RD(bp, IGU_REG_MAPPING_MEMORY + sb_id * 4);
+ if (!(val & IGU_REG_MAPPING_MEMORY_VALID))
+ continue;
+ fid = GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID);
+ if (fid & IGU_FID_ENCODE_IS_PF)
+ current_pf = fid & IGU_FID_PF_NUM_MASK;
+ else if (current_pf == BP_FUNC(bp))
+ bnx2x_vf_set_igu_info(bp, sb_id,
+ (fid & IGU_FID_VF_NUM_MASK));
+ DP(BNX2X_MSG_IOV, "%s[%d], igu_sb_id=%d, msix=%d\n",
+ ((fid & IGU_FID_ENCODE_IS_PF) ? "PF" : "VF"),
+ ((fid & IGU_FID_ENCODE_IS_PF) ? (fid & IGU_FID_PF_NUM_MASK) :
+ (fid & IGU_FID_VF_NUM_MASK)), sb_id,
+ GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR));
+ }
+ DP(BNX2X_MSG_IOV, "vf_sbs_pool is %d\n", BP_VFDB(bp)->vf_sbs_pool);
+ return BP_VFDB(bp)->vf_sbs_pool;
+}
+
+static void __bnx2x_iov_free_vfdb(struct bnx2x *bp)
+{
+ if (bp->vfdb) {
+ kfree(bp->vfdb->vfqs);
+ kfree(bp->vfdb->vfs);
+ kfree(bp->vfdb);
+ }
+ bp->vfdb = NULL;
+}
+
+static int bnx2x_sriov_pci_cfg_info(struct bnx2x *bp, struct bnx2x_sriov *iov)
+{
+ int pos;
+ struct pci_dev *dev = bp->pdev;
+
+ pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV);
+ if (!pos) {
+ BNX2X_ERR("failed to find SRIOV capability in device\n");
+ return -ENODEV;
+ }
+
+ iov->pos = pos;
+ DP(BNX2X_MSG_IOV, "sriov ext pos %d\n", pos);
+ pci_read_config_word(dev, pos + PCI_SRIOV_CTRL, &iov->ctrl);
+ pci_read_config_word(dev, pos + PCI_SRIOV_TOTAL_VF, &iov->total);
+ pci_read_config_word(dev, pos + PCI_SRIOV_INITIAL_VF, &iov->initial);
+ pci_read_config_word(dev, pos + PCI_SRIOV_VF_OFFSET, &iov->offset);
+ pci_read_config_word(dev, pos + PCI_SRIOV_VF_STRIDE, &iov->stride);
+ pci_read_config_dword(dev, pos + PCI_SRIOV_SUP_PGSIZE, &iov->pgsz);
+ pci_read_config_dword(dev, pos + PCI_SRIOV_CAP, &iov->cap);
+ pci_read_config_byte(dev, pos + PCI_SRIOV_FUNC_LINK, &iov->link);
+
+ return 0;
+}
+
+static int bnx2x_sriov_info(struct bnx2x *bp, struct bnx2x_sriov *iov)
+{
+ u32 val;
+
+ /* read the SRIOV capability structure
+ * The fields can be read via configuration read or
+ * directly from the device (starting at offset PCICFG_OFFSET)
+ */
+ if (bnx2x_sriov_pci_cfg_info(bp, iov))
+ return -ENODEV;
+
+ /* get the number of SRIOV bars */
+ iov->nres = 0;
+
+ /* read the first_vfid */
+ val = REG_RD(bp, PCICFG_OFFSET + GRC_CONFIG_REG_PF_INIT_VF);
+ iov->first_vf_in_pf = ((val & GRC_CR_PF_INIT_VF_PF_FIRST_VF_NUM_MASK)
+ * 8) - (BNX2X_MAX_NUM_OF_VFS * BP_PATH(bp));
+
+ DP(BNX2X_MSG_IOV,
+ "IOV info[%d]: first vf %d, nres %d, cap 0x%x, ctrl 0x%x, total %d, initial %d, num vfs %d, offset %d, stride %d, page size 0x%x\n",
+ BP_FUNC(bp),
+ iov->first_vf_in_pf, iov->nres, iov->cap, iov->ctrl, iov->total,
+ iov->initial, iov->nr_virtfn, iov->offset, iov->stride, iov->pgsz);
+
+ return 0;
+}
+
+/* must be called after PF bars are mapped */
+int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param,
+ int num_vfs_param)
+{
+ int err, i;
+ struct bnx2x_sriov *iov;
+ struct pci_dev *dev = bp->pdev;
+
+ bp->vfdb = NULL;
+
+ /* verify is pf */
+ if (IS_VF(bp))
+ return 0;
+
+ /* verify sriov capability is present in configuration space */
+ if (!pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV))
+ return 0;
+
+ /* verify chip revision */
+ if (CHIP_IS_E1x(bp))
+ return 0;
+
+ /* check if SRIOV support is turned off */
+ if (!num_vfs_param)
+ return 0;
+
+ /* SRIOV assumes that num of PF CIDs < BNX2X_FIRST_VF_CID */
+ if (BNX2X_L2_MAX_CID(bp) >= BNX2X_FIRST_VF_CID) {
+ BNX2X_ERR("PF cids %d are overspilling into vf space (starts at %d). Abort SRIOV\n",
+ BNX2X_L2_MAX_CID(bp), BNX2X_FIRST_VF_CID);
+ return 0;
+ }
+
+ /* SRIOV can be enabled only with MSIX */
+ if (int_mode_param == BNX2X_INT_MODE_MSI ||
+ int_mode_param == BNX2X_INT_MODE_INTX) {
+ BNX2X_ERR("Forced MSI/INTx mode is incompatible with SRIOV\n");
+ return 0;
+ }
+
+ /* verify ari is enabled */
+ if (!pci_ari_enabled(bp->pdev->bus)) {
+ BNX2X_ERR("ARI not supported (check pci bridge ARI forwarding), SRIOV can not be enabled\n");
+ return 0;
+ }
+
+ /* verify igu is in normal mode */
+ if (CHIP_INT_MODE_IS_BC(bp)) {
+ BNX2X_ERR("IGU not normal mode, SRIOV can not be enabled\n");
+ return 0;
+ }
+
+ /* allocate the vfs database */
+ bp->vfdb = kzalloc(sizeof(*(bp->vfdb)), GFP_KERNEL);
+ if (!bp->vfdb) {
+ BNX2X_ERR("failed to allocate vf database\n");
+ err = -ENOMEM;
+ goto failed;
+ }
+
+ /* get the sriov info - Linux already collected all the pertinent
+ * information, however the sriov structure is for the private use
+ * of the pci module. Also we want this information regardless
+ * of the hyper-visor.
+ */
+ iov = &(bp->vfdb->sriov);
+ err = bnx2x_sriov_info(bp, iov);
+ if (err)
+ goto failed;
+
+ /* SR-IOV capability was enabled but there are no VFs*/
+ if (iov->total == 0) {
+ err = 0;
+ goto failed;
+ }
+
+ iov->nr_virtfn = min_t(u16, iov->total, num_vfs_param);
+
+ DP(BNX2X_MSG_IOV, "num_vfs_param was %d, nr_virtfn was %d\n",
+ num_vfs_param, iov->nr_virtfn);
+
+ /* allocate the vf array */
+ bp->vfdb->vfs = kcalloc(BNX2X_NR_VIRTFN(bp),
+ sizeof(struct bnx2x_virtf),
+ GFP_KERNEL);
+ if (!bp->vfdb->vfs) {
+ BNX2X_ERR("failed to allocate vf array\n");
+ err = -ENOMEM;
+ goto failed;
+ }
+
+ /* Initial VF init - index and abs_vfid - nr_virtfn must be set */
+ for_each_vf(bp, i) {
+ bnx2x_vf(bp, i, index) = i;
+ bnx2x_vf(bp, i, abs_vfid) = iov->first_vf_in_pf + i;
+ bnx2x_vf(bp, i, state) = VF_FREE;
+ mutex_init(&bnx2x_vf(bp, i, op_mutex));
+ bnx2x_vf(bp, i, op_current) = CHANNEL_TLV_NONE;
+ /* enable spoofchk by default */
+ bnx2x_vf(bp, i, spoofchk) = 1;
+ }
+
+ /* re-read the IGU CAM for VFs - index and abs_vfid must be set */
+ if (!bnx2x_get_vf_igu_cam_info(bp)) {
+ BNX2X_ERR("No entries in IGU CAM for vfs\n");
+ err = -EINVAL;
+ goto failed;
+ }
+
+ /* allocate the queue arrays for all VFs */
+ bp->vfdb->vfqs = kcalloc(BNX2X_MAX_NUM_VF_QUEUES,
+ sizeof(struct bnx2x_vf_queue),
+ GFP_KERNEL);
+
+ if (!bp->vfdb->vfqs) {
+ BNX2X_ERR("failed to allocate vf queue array\n");
+ err = -ENOMEM;
+ goto failed;
+ }
+
+ /* Prepare the VFs event synchronization mechanism */
+ mutex_init(&bp->vfdb->event_mutex);
+
+ mutex_init(&bp->vfdb->bulletin_mutex);
+
+ if (SHMEM2_HAS(bp, sriov_switch_mode))
+ SHMEM2_WR(bp, sriov_switch_mode, SRIOV_SWITCH_MODE_VEB);
+
+ return 0;
+failed:
+ DP(BNX2X_MSG_IOV, "Failed err=%d\n", err);
+ __bnx2x_iov_free_vfdb(bp);
+ return err;
+}
+
+void bnx2x_iov_remove_one(struct bnx2x *bp)
+{
+ int vf_idx;
+
+ /* if SRIOV is not enabled there's nothing to do */
+ if (!IS_SRIOV(bp))
+ return;
+
+ bnx2x_disable_sriov(bp);
+
+ /* disable access to all VFs */
+ for (vf_idx = 0; vf_idx < bp->vfdb->sriov.total; vf_idx++) {
+ bnx2x_pretend_func(bp,
+ HW_VF_HANDLE(bp,
+ bp->vfdb->sriov.first_vf_in_pf +
+ vf_idx));
+ DP(BNX2X_MSG_IOV, "disabling internal access for vf %d\n",
+ bp->vfdb->sriov.first_vf_in_pf + vf_idx);
+ bnx2x_vf_enable_internal(bp, 0);
+ bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
+ }
+
+ /* free vf database */
+ __bnx2x_iov_free_vfdb(bp);
+}
+
+void bnx2x_iov_free_mem(struct bnx2x *bp)
+{
+ int i;
+
+ if (!IS_SRIOV(bp))
+ return;
+
+ /* free vfs hw contexts */
+ for (i = 0; i < BNX2X_VF_CIDS/ILT_PAGE_CIDS; i++) {
+ struct hw_dma *cxt = &bp->vfdb->context[i];
+ BNX2X_PCI_FREE(cxt->addr, cxt->mapping, cxt->size);
+ }
+
+ BNX2X_PCI_FREE(BP_VFDB(bp)->sp_dma.addr,
+ BP_VFDB(bp)->sp_dma.mapping,
+ BP_VFDB(bp)->sp_dma.size);
+
+ BNX2X_PCI_FREE(BP_VF_MBX_DMA(bp)->addr,
+ BP_VF_MBX_DMA(bp)->mapping,
+ BP_VF_MBX_DMA(bp)->size);
+
+ BNX2X_PCI_FREE(BP_VF_BULLETIN_DMA(bp)->addr,
+ BP_VF_BULLETIN_DMA(bp)->mapping,
+ BP_VF_BULLETIN_DMA(bp)->size);
+}
+
+int bnx2x_iov_alloc_mem(struct bnx2x *bp)
+{
+ size_t tot_size;
+ int i, rc = 0;
+
+ if (!IS_SRIOV(bp))
+ return rc;
+
+ /* allocate vfs hw contexts */
+ tot_size = (BP_VFDB(bp)->sriov.first_vf_in_pf + BNX2X_NR_VIRTFN(bp)) *
+ BNX2X_CIDS_PER_VF * sizeof(union cdu_context);
+
+ for (i = 0; i < BNX2X_VF_CIDS/ILT_PAGE_CIDS; i++) {
+ struct hw_dma *cxt = BP_VF_CXT_PAGE(bp, i);
+ cxt->size = min_t(size_t, tot_size, CDU_ILT_PAGE_SZ);
+
+ if (cxt->size) {
+ cxt->addr = BNX2X_PCI_ALLOC(&cxt->mapping, cxt->size);
+ if (!cxt->addr)
+ goto alloc_mem_err;
+ } else {
+ cxt->addr = NULL;
+ cxt->mapping = 0;
+ }
+ tot_size -= cxt->size;
+ }
+
+ /* allocate vfs ramrods dma memory - client_init and set_mac */
+ tot_size = BNX2X_NR_VIRTFN(bp) * sizeof(struct bnx2x_vf_sp);
+ BP_VFDB(bp)->sp_dma.addr = BNX2X_PCI_ALLOC(&BP_VFDB(bp)->sp_dma.mapping,
+ tot_size);
+ if (!BP_VFDB(bp)->sp_dma.addr)
+ goto alloc_mem_err;
+ BP_VFDB(bp)->sp_dma.size = tot_size;
+
+ /* allocate mailboxes */
+ tot_size = BNX2X_NR_VIRTFN(bp) * MBX_MSG_ALIGNED_SIZE;
+ BP_VF_MBX_DMA(bp)->addr = BNX2X_PCI_ALLOC(&BP_VF_MBX_DMA(bp)->mapping,
+ tot_size);
+ if (!BP_VF_MBX_DMA(bp)->addr)
+ goto alloc_mem_err;
+
+ BP_VF_MBX_DMA(bp)->size = tot_size;
+
+ /* allocate local bulletin boards */
+ tot_size = BNX2X_NR_VIRTFN(bp) * BULLETIN_CONTENT_SIZE;
+ BP_VF_BULLETIN_DMA(bp)->addr = BNX2X_PCI_ALLOC(&BP_VF_BULLETIN_DMA(bp)->mapping,
+ tot_size);
+ if (!BP_VF_BULLETIN_DMA(bp)->addr)
+ goto alloc_mem_err;
+
+ BP_VF_BULLETIN_DMA(bp)->size = tot_size;
+
+ return 0;
+
+alloc_mem_err:
+ return -ENOMEM;
+}
+
+static void bnx2x_vfq_init(struct bnx2x *bp, struct bnx2x_virtf *vf,
+ struct bnx2x_vf_queue *q)
+{
+ u8 cl_id = vfq_cl_id(vf, q);
+ u8 func_id = FW_VF_HANDLE(vf->abs_vfid);
+ unsigned long q_type = 0;
+
+ set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type);
+ set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type);
+
+ /* Queue State object */
+ bnx2x_init_queue_obj(bp, &q->sp_obj,
+ cl_id, &q->cid, 1, func_id,
+ bnx2x_vf_sp(bp, vf, q_data),
+ bnx2x_vf_sp_map(bp, vf, q_data),
+ q_type);
+
+ /* sp indication is set only when vlan/mac/etc. are initialized */
+ q->sp_initialized = false;
+
+ DP(BNX2X_MSG_IOV,
+ "initialized vf %d's queue object. func id set to %d. cid set to 0x%x\n",
+ vf->abs_vfid, q->sp_obj.func_id, q->cid);
+}
+
+static int bnx2x_max_speed_cap(struct bnx2x *bp)
+{
+ u32 supported = bp->port.supported[bnx2x_get_link_cfg_idx(bp)];
+
+ if (supported &
+ (SUPPORTED_20000baseMLD2_Full | SUPPORTED_20000baseKR2_Full))
+ return 20000;
+
+ return 10000; /* assume lowest supported speed is 10G */
+}
+
+int bnx2x_iov_link_update_vf(struct bnx2x *bp, int idx)
+{
+ struct bnx2x_link_report_data *state = &bp->last_reported_link;
+ struct pf_vf_bulletin_content *bulletin;
+ struct bnx2x_virtf *vf;
+ bool update = true;
+ int rc = 0;
+
+ /* sanity and init */
+ rc = bnx2x_vf_op_prep(bp, idx, &vf, &bulletin, false);
+ if (rc)
+ return rc;
+
+ mutex_lock(&bp->vfdb->bulletin_mutex);
+
+ if (vf->link_cfg == IFLA_VF_LINK_STATE_AUTO) {
+ bulletin->valid_bitmap |= 1 << LINK_VALID;
+
+ bulletin->link_speed = state->line_speed;
+ bulletin->link_flags = 0;
+ if (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
+ &state->link_report_flags))
+ bulletin->link_flags |= VFPF_LINK_REPORT_LINK_DOWN;
+ if (test_bit(BNX2X_LINK_REPORT_FD,
+ &state->link_report_flags))
+ bulletin->link_flags |= VFPF_LINK_REPORT_FULL_DUPLEX;
+ if (test_bit(BNX2X_LINK_REPORT_RX_FC_ON,
+ &state->link_report_flags))
+ bulletin->link_flags |= VFPF_LINK_REPORT_RX_FC_ON;
+ if (test_bit(BNX2X_LINK_REPORT_TX_FC_ON,
+ &state->link_report_flags))
+ bulletin->link_flags |= VFPF_LINK_REPORT_TX_FC_ON;
+ } else if (vf->link_cfg == IFLA_VF_LINK_STATE_DISABLE &&
+ !(bulletin->link_flags & VFPF_LINK_REPORT_LINK_DOWN)) {
+ bulletin->valid_bitmap |= 1 << LINK_VALID;
+ bulletin->link_flags |= VFPF_LINK_REPORT_LINK_DOWN;
+ } else if (vf->link_cfg == IFLA_VF_LINK_STATE_ENABLE &&
+ (bulletin->link_flags & VFPF_LINK_REPORT_LINK_DOWN)) {
+ bulletin->valid_bitmap |= 1 << LINK_VALID;
+ bulletin->link_speed = bnx2x_max_speed_cap(bp);
+ bulletin->link_flags &= ~VFPF_LINK_REPORT_LINK_DOWN;
+ } else {
+ update = false;
+ }
+
+ if (update) {
+ DP(NETIF_MSG_LINK | BNX2X_MSG_IOV,
+ "vf %d mode %u speed %d flags %x\n", idx,
+ vf->link_cfg, bulletin->link_speed, bulletin->link_flags);
+
+ /* Post update on VF's bulletin board */
+ rc = bnx2x_post_vf_bulletin(bp, idx);
+ if (rc) {
+ BNX2X_ERR("failed to update VF[%d] bulletin\n", idx);
+ goto out;
+ }
+ }
+
+out:
+ mutex_unlock(&bp->vfdb->bulletin_mutex);
+ return rc;
+}
+
+int bnx2x_set_vf_link_state(struct net_device *dev, int idx, int link_state)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+ struct bnx2x_virtf *vf = BP_VF(bp, idx);
+
+ if (!vf)
+ return -EINVAL;
+
+ if (vf->link_cfg == link_state)
+ return 0; /* nothing todo */
+
+ vf->link_cfg = link_state;
+
+ return bnx2x_iov_link_update_vf(bp, idx);
+}
+
+void bnx2x_iov_link_update(struct bnx2x *bp)
+{
+ int vfid;
+
+ if (!IS_SRIOV(bp))
+ return;
+
+ for_each_vf(bp, vfid)
+ bnx2x_iov_link_update_vf(bp, vfid);
+}
+
+/* called by bnx2x_nic_load */
+int bnx2x_iov_nic_init(struct bnx2x *bp)
+{
+ int vfid;
+
+ if (!IS_SRIOV(bp)) {
+ DP(BNX2X_MSG_IOV, "vfdb was not allocated\n");
+ return 0;
+ }
+
+ DP(BNX2X_MSG_IOV, "num of vfs: %d\n", (bp)->vfdb->sriov.nr_virtfn);
+
+ /* let FLR complete ... */
+ msleep(100);
+
+ /* initialize vf database */
+ for_each_vf(bp, vfid) {
+ struct bnx2x_virtf *vf = BP_VF(bp, vfid);
+
+ int base_vf_cid = (BP_VFDB(bp)->sriov.first_vf_in_pf + vfid) *
+ BNX2X_CIDS_PER_VF;
+
+ union cdu_context *base_cxt = (union cdu_context *)
+ BP_VF_CXT_PAGE(bp, base_vf_cid/ILT_PAGE_CIDS)->addr +
+ (base_vf_cid & (ILT_PAGE_CIDS-1));
+
+ DP(BNX2X_MSG_IOV,
+ "VF[%d] Max IGU SBs: %d, base vf cid 0x%x, base cid 0x%x, base cxt %p\n",
+ vf->abs_vfid, vf_sb_count(vf), base_vf_cid,
+ BNX2X_FIRST_VF_CID + base_vf_cid, base_cxt);
+
+ /* init statically provisioned resources */
+ bnx2x_iov_static_resc(bp, vf);
+
+ /* queues are initialized during VF-ACQUIRE */
+ vf->filter_state = 0;
+ vf->sp_cl_id = bnx2x_fp(bp, 0, cl_id);
+
+ bnx2x_init_credit_pool(&vf->vf_vlans_pool, 0,
+ vf_vlan_rules_cnt(vf));
+ bnx2x_init_credit_pool(&vf->vf_macs_pool, 0,
+ vf_mac_rules_cnt(vf));
+
+ /* init mcast object - This object will be re-initialized
+ * during VF-ACQUIRE with the proper cl_id and cid.
+ * It needs to be initialized here so that it can be safely
+ * handled by a subsequent FLR flow.
+ */
+ bnx2x_init_mcast_obj(bp, &vf->mcast_obj, 0xFF,
+ 0xFF, 0xFF, 0xFF,
+ bnx2x_vf_sp(bp, vf, mcast_rdata),
+ bnx2x_vf_sp_map(bp, vf, mcast_rdata),
+ BNX2X_FILTER_MCAST_PENDING,
+ &vf->filter_state,
+ BNX2X_OBJ_TYPE_RX_TX);
+
+ /* set the mailbox message addresses */
+ BP_VF_MBX(bp, vfid)->msg = (struct bnx2x_vf_mbx_msg *)
+ (((u8 *)BP_VF_MBX_DMA(bp)->addr) + vfid *
+ MBX_MSG_ALIGNED_SIZE);
+
+ BP_VF_MBX(bp, vfid)->msg_mapping = BP_VF_MBX_DMA(bp)->mapping +
+ vfid * MBX_MSG_ALIGNED_SIZE;
+
+ /* Enable vf mailbox */
+ bnx2x_vf_enable_mbx(bp, vf->abs_vfid);
+ }
+
+ /* Final VF init */
+ for_each_vf(bp, vfid) {
+ struct bnx2x_virtf *vf = BP_VF(bp, vfid);
+
+ /* fill in the BDF and bars */
+ vf->domain = bnx2x_vf_domain(bp, vfid);
+ vf->bus = bnx2x_vf_bus(bp, vfid);
+ vf->devfn = bnx2x_vf_devfn(bp, vfid);
+ bnx2x_vf_set_bars(bp, vf);
+
+ DP(BNX2X_MSG_IOV,
+ "VF info[%d]: bus 0x%x, devfn 0x%x, bar0 [0x%x, %d], bar1 [0x%x, %d], bar2 [0x%x, %d]\n",
+ vf->abs_vfid, vf->bus, vf->devfn,
+ (unsigned)vf->bars[0].bar, vf->bars[0].size,
+ (unsigned)vf->bars[1].bar, vf->bars[1].size,
+ (unsigned)vf->bars[2].bar, vf->bars[2].size);
+ }
+
+ return 0;
+}
+
+/* called by bnx2x_chip_cleanup */
+int bnx2x_iov_chip_cleanup(struct bnx2x *bp)
+{
+ int i;
+
+ if (!IS_SRIOV(bp))
+ return 0;
+
+ /* release all the VFs */
+ for_each_vf(bp, i)
+ bnx2x_vf_release(bp, BP_VF(bp, i));
+
+ return 0;
+}
+
+/* called by bnx2x_init_hw_func, returns the next ilt line */
+int bnx2x_iov_init_ilt(struct bnx2x *bp, u16 line)
+{
+ int i;
+ struct bnx2x_ilt *ilt = BP_ILT(bp);
+
+ if (!IS_SRIOV(bp))
+ return line;
+
+ /* set vfs ilt lines */
+ for (i = 0; i < BNX2X_VF_CIDS/ILT_PAGE_CIDS; i++) {
+ struct hw_dma *hw_cxt = BP_VF_CXT_PAGE(bp, i);
+
+ ilt->lines[line+i].page = hw_cxt->addr;
+ ilt->lines[line+i].page_mapping = hw_cxt->mapping;
+ ilt->lines[line+i].size = hw_cxt->size; /* doesn't matter */
+ }
+ return line + i;
+}
+
+static u8 bnx2x_iov_is_vf_cid(struct bnx2x *bp, u16 cid)
+{
+ return ((cid >= BNX2X_FIRST_VF_CID) &&
+ ((cid - BNX2X_FIRST_VF_CID) < BNX2X_VF_CIDS));
+}
+
+static
+void bnx2x_vf_handle_classification_eqe(struct bnx2x *bp,
+ struct bnx2x_vf_queue *vfq,
+ union event_ring_elem *elem)
+{
+ unsigned long ramrod_flags = 0;
+ int rc = 0;
+ u32 echo = le32_to_cpu(elem->message.data.eth_event.echo);
+
+ /* Always push next commands out, don't wait here */
+ set_bit(RAMROD_CONT, &ramrod_flags);
+
+ switch (echo >> BNX2X_SWCID_SHIFT) {
+ case BNX2X_FILTER_MAC_PENDING:
+ rc = vfq->mac_obj.complete(bp, &vfq->mac_obj, elem,
+ &ramrod_flags);
+ break;
+ case BNX2X_FILTER_VLAN_PENDING:
+ rc = vfq->vlan_obj.complete(bp, &vfq->vlan_obj, elem,
+ &ramrod_flags);
+ break;
+ default:
+ BNX2X_ERR("Unsupported classification command: 0x%x\n", echo);
+ return;
+ }
+ if (rc < 0)
+ BNX2X_ERR("Failed to schedule new commands: %d\n", rc);
+ else if (rc > 0)
+ DP(BNX2X_MSG_IOV, "Scheduled next pending commands...\n");
+}
+
+static
+void bnx2x_vf_handle_mcast_eqe(struct bnx2x *bp,
+ struct bnx2x_virtf *vf)
+{
+ struct bnx2x_mcast_ramrod_params rparam = {NULL};
+ int rc;
+
+ rparam.mcast_obj = &vf->mcast_obj;
+ vf->mcast_obj.raw.clear_pending(&vf->mcast_obj.raw);
+
+ /* If there are pending mcast commands - send them */
+ if (vf->mcast_obj.check_pending(&vf->mcast_obj)) {
+ rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
+ if (rc < 0)
+ BNX2X_ERR("Failed to send pending mcast commands: %d\n",
+ rc);
+ }
+}
+
+static
+void bnx2x_vf_handle_filters_eqe(struct bnx2x *bp,
+ struct bnx2x_virtf *vf)
+{
+ smp_mb__before_atomic();
+ clear_bit(BNX2X_FILTER_RX_MODE_PENDING, &vf->filter_state);
+ smp_mb__after_atomic();
+}
+
+static void bnx2x_vf_handle_rss_update_eqe(struct bnx2x *bp,
+ struct bnx2x_virtf *vf)
+{
+ vf->rss_conf_obj.raw.clear_pending(&vf->rss_conf_obj.raw);
+}
+
+int bnx2x_iov_eq_sp_event(struct bnx2x *bp, union event_ring_elem *elem)
+{
+ struct bnx2x_virtf *vf;
+ int qidx = 0, abs_vfid;
+ u8 opcode;
+ u16 cid = 0xffff;
+
+ if (!IS_SRIOV(bp))
+ return 1;
+
+ /* first get the cid - the only events we handle here are cfc-delete
+ * and set-mac completion
+ */
+ opcode = elem->message.opcode;
+
+ switch (opcode) {
+ case EVENT_RING_OPCODE_CFC_DEL:
+ cid = SW_CID(elem->message.data.cfc_del_event.cid);
+ DP(BNX2X_MSG_IOV, "checking cfc-del comp cid=%d\n", cid);
+ break;
+ case EVENT_RING_OPCODE_CLASSIFICATION_RULES:
+ case EVENT_RING_OPCODE_MULTICAST_RULES:
+ case EVENT_RING_OPCODE_FILTERS_RULES:
+ case EVENT_RING_OPCODE_RSS_UPDATE_RULES:
+ cid = SW_CID(elem->message.data.eth_event.echo);
+ DP(BNX2X_MSG_IOV, "checking filtering comp cid=%d\n", cid);
+ break;
+ case EVENT_RING_OPCODE_VF_FLR:
+ abs_vfid = elem->message.data.vf_flr_event.vf_id;
+ DP(BNX2X_MSG_IOV, "Got VF FLR notification abs_vfid=%d\n",
+ abs_vfid);
+ goto get_vf;
+ case EVENT_RING_OPCODE_MALICIOUS_VF:
+ abs_vfid = elem->message.data.malicious_vf_event.vf_id;
+ BNX2X_ERR("Got VF MALICIOUS notification abs_vfid=%d err_id=0x%x\n",
+ abs_vfid,
+ elem->message.data.malicious_vf_event.err_id);
+ goto get_vf;
+ default:
+ return 1;
+ }
+
+ /* check if the cid is the VF range */
+ if (!bnx2x_iov_is_vf_cid(bp, cid)) {
+ DP(BNX2X_MSG_IOV, "cid is outside vf range: %d\n", cid);
+ return 1;
+ }
+
+ /* extract vf and rxq index from vf_cid - relies on the following:
+ * 1. vfid on cid reflects the true abs_vfid
+ * 2. The max number of VFs (per path) is 64
+ */
+ qidx = cid & ((1 << BNX2X_VF_CID_WND)-1);
+ abs_vfid = (cid >> BNX2X_VF_CID_WND) & (BNX2X_MAX_NUM_OF_VFS-1);
+get_vf:
+ vf = bnx2x_vf_by_abs_fid(bp, abs_vfid);
+
+ if (!vf) {
+ BNX2X_ERR("EQ completion for unknown VF, cid %d, abs_vfid %d\n",
+ cid, abs_vfid);
+ return 0;
+ }
+
+ switch (opcode) {
+ case EVENT_RING_OPCODE_CFC_DEL:
+ DP(BNX2X_MSG_IOV, "got VF [%d:%d] cfc delete ramrod\n",
+ vf->abs_vfid, qidx);
+ vfq_get(vf, qidx)->sp_obj.complete_cmd(bp,
+ &vfq_get(vf,
+ qidx)->sp_obj,
+ BNX2X_Q_CMD_CFC_DEL);
+ break;
+ case EVENT_RING_OPCODE_CLASSIFICATION_RULES:
+ DP(BNX2X_MSG_IOV, "got VF [%d:%d] set mac/vlan ramrod\n",
+ vf->abs_vfid, qidx);
+ bnx2x_vf_handle_classification_eqe(bp, vfq_get(vf, qidx), elem);
+ break;
+ case EVENT_RING_OPCODE_MULTICAST_RULES:
+ DP(BNX2X_MSG_IOV, "got VF [%d:%d] set mcast ramrod\n",
+ vf->abs_vfid, qidx);
+ bnx2x_vf_handle_mcast_eqe(bp, vf);
+ break;
+ case EVENT_RING_OPCODE_FILTERS_RULES:
+ DP(BNX2X_MSG_IOV, "got VF [%d:%d] set rx-mode ramrod\n",
+ vf->abs_vfid, qidx);
+ bnx2x_vf_handle_filters_eqe(bp, vf);
+ break;
+ case EVENT_RING_OPCODE_RSS_UPDATE_RULES:
+ DP(BNX2X_MSG_IOV, "got VF [%d:%d] RSS update ramrod\n",
+ vf->abs_vfid, qidx);
+ bnx2x_vf_handle_rss_update_eqe(bp, vf);
+ fallthrough;
+ case EVENT_RING_OPCODE_VF_FLR:
+ /* Do nothing for now */
+ return 0;
+ case EVENT_RING_OPCODE_MALICIOUS_VF:
+ vf->malicious = true;
+ return 0;
+ }
+
+ return 0;
+}
+
+static struct bnx2x_virtf *bnx2x_vf_by_cid(struct bnx2x *bp, int vf_cid)
+{
+ /* extract the vf from vf_cid - relies on the following:
+ * 1. vfid on cid reflects the true abs_vfid
+ * 2. The max number of VFs (per path) is 64
+ */
+ int abs_vfid = (vf_cid >> BNX2X_VF_CID_WND) & (BNX2X_MAX_NUM_OF_VFS-1);
+ return bnx2x_vf_by_abs_fid(bp, abs_vfid);
+}
+
+void bnx2x_iov_set_queue_sp_obj(struct bnx2x *bp, int vf_cid,
+ struct bnx2x_queue_sp_obj **q_obj)
+{
+ struct bnx2x_virtf *vf;
+
+ if (!IS_SRIOV(bp))
+ return;
+
+ vf = bnx2x_vf_by_cid(bp, vf_cid);
+
+ if (vf) {
+ /* extract queue index from vf_cid - relies on the following:
+ * 1. vfid on cid reflects the true abs_vfid
+ * 2. The max number of VFs (per path) is 64
+ */
+ int q_index = vf_cid & ((1 << BNX2X_VF_CID_WND)-1);
+ *q_obj = &bnx2x_vfq(vf, q_index, sp_obj);
+ } else {
+ BNX2X_ERR("No vf matching cid %d\n", vf_cid);
+ }
+}
+
+void bnx2x_iov_adjust_stats_req(struct bnx2x *bp)
+{
+ int i;
+ int first_queue_query_index, num_queues_req;
+ struct stats_query_entry *cur_query_entry;
+ u8 stats_count = 0;
+ bool is_fcoe = false;
+
+ if (!IS_SRIOV(bp))
+ return;
+
+ if (!NO_FCOE(bp))
+ is_fcoe = true;
+
+ /* fcoe adds one global request and one queue request */
+ num_queues_req = BNX2X_NUM_ETH_QUEUES(bp) + is_fcoe;
+ first_queue_query_index = BNX2X_FIRST_QUEUE_QUERY_IDX -
+ (is_fcoe ? 0 : 1);
+
+ DP_AND((BNX2X_MSG_IOV | BNX2X_MSG_STATS),
+ "BNX2X_NUM_ETH_QUEUES %d, is_fcoe %d, first_queue_query_index %d => determined the last non virtual statistics query index is %d. Will add queries on top of that\n",
+ BNX2X_NUM_ETH_QUEUES(bp), is_fcoe, first_queue_query_index,
+ first_queue_query_index + num_queues_req);
+
+ cur_query_entry = &bp->fw_stats_req->
+ query[first_queue_query_index + num_queues_req];
+
+ for_each_vf(bp, i) {
+ int j;
+ struct bnx2x_virtf *vf = BP_VF(bp, i);
+
+ if (vf->state != VF_ENABLED) {
+ DP_AND((BNX2X_MSG_IOV | BNX2X_MSG_STATS),
+ "vf %d not enabled so no stats for it\n",
+ vf->abs_vfid);
+ continue;
+ }
+
+ if (vf->malicious) {
+ DP_AND((BNX2X_MSG_IOV | BNX2X_MSG_STATS),
+ "vf %d malicious so no stats for it\n",
+ vf->abs_vfid);
+ continue;
+ }
+
+ DP_AND((BNX2X_MSG_IOV | BNX2X_MSG_STATS),
+ "add addresses for vf %d\n", vf->abs_vfid);
+ for_each_vfq(vf, j) {
+ struct bnx2x_vf_queue *rxq = vfq_get(vf, j);
+
+ dma_addr_t q_stats_addr =
+ vf->fw_stat_map + j * vf->stats_stride;
+
+ /* collect stats fro active queues only */
+ if (bnx2x_get_q_logical_state(bp, &rxq->sp_obj) ==
+ BNX2X_Q_LOGICAL_STATE_STOPPED)
+ continue;
+
+ /* create stats query entry for this queue */
+ cur_query_entry->kind = STATS_TYPE_QUEUE;
+ cur_query_entry->index = vfq_stat_id(vf, rxq);
+ cur_query_entry->funcID =
+ cpu_to_le16(FW_VF_HANDLE(vf->abs_vfid));
+ cur_query_entry->address.hi =
+ cpu_to_le32(U64_HI(q_stats_addr));
+ cur_query_entry->address.lo =
+ cpu_to_le32(U64_LO(q_stats_addr));
+ DP_AND((BNX2X_MSG_IOV | BNX2X_MSG_STATS),
+ "added address %x %x for vf %d queue %d client %d\n",
+ cur_query_entry->address.hi,
+ cur_query_entry->address.lo,
+ cur_query_entry->funcID,
+ j, cur_query_entry->index);
+ cur_query_entry++;
+ stats_count++;
+
+ /* all stats are coalesced to the leading queue */
+ if (vf->cfg_flags & VF_CFG_STATS_COALESCE)
+ break;
+ }
+ }
+ bp->fw_stats_req->hdr.cmd_num = bp->fw_stats_num + stats_count;
+}
+
+/* VF API helpers */
+static void bnx2x_vf_qtbl_set_q(struct bnx2x *bp, u8 abs_vfid, u8 qid,
+ u8 enable)
+{
+ u32 reg = PXP_REG_HST_ZONE_PERMISSION_TABLE + qid * 4;
+ u32 val = enable ? (abs_vfid | (1 << 6)) : 0;
+
+ REG_WR(bp, reg, val);
+}
+
+static void bnx2x_vf_clr_qtbl(struct bnx2x *bp, struct bnx2x_virtf *vf)
+{
+ int i;
+
+ for_each_vfq(vf, i)
+ bnx2x_vf_qtbl_set_q(bp, vf->abs_vfid,
+ vfq_qzone_id(vf, vfq_get(vf, i)), false);
+}
+
+static void bnx2x_vf_igu_disable(struct bnx2x *bp, struct bnx2x_virtf *vf)
+{
+ u32 val;
+
+ /* clear the VF configuration - pretend */
+ bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid));
+ val = REG_RD(bp, IGU_REG_VF_CONFIGURATION);
+ val &= ~(IGU_VF_CONF_MSI_MSIX_EN | IGU_VF_CONF_SINGLE_ISR_EN |
+ IGU_VF_CONF_FUNC_EN | IGU_VF_CONF_PARENT_MASK);
+ REG_WR(bp, IGU_REG_VF_CONFIGURATION, val);
+ bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
+}
+
+u8 bnx2x_vf_max_queue_cnt(struct bnx2x *bp, struct bnx2x_virtf *vf)
+{
+ return min_t(u8, min_t(u8, vf_sb_count(vf), BNX2X_CIDS_PER_VF),
+ BNX2X_VF_MAX_QUEUES);
+}
+
+static
+int bnx2x_vf_chk_avail_resc(struct bnx2x *bp, struct bnx2x_virtf *vf,
+ struct vf_pf_resc_request *req_resc)
+{
+ u8 rxq_cnt = vf_rxq_count(vf) ? : bnx2x_vf_max_queue_cnt(bp, vf);
+ u8 txq_cnt = vf_txq_count(vf) ? : bnx2x_vf_max_queue_cnt(bp, vf);
+
+ return ((req_resc->num_rxqs <= rxq_cnt) &&
+ (req_resc->num_txqs <= txq_cnt) &&
+ (req_resc->num_sbs <= vf_sb_count(vf)) &&
+ (req_resc->num_mac_filters <= vf_mac_rules_cnt(vf)) &&
+ (req_resc->num_vlan_filters <= vf_vlan_rules_cnt(vf)));
+}
+
+/* CORE VF API */
+int bnx2x_vf_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf,
+ struct vf_pf_resc_request *resc)
+{
+ int base_vf_cid = (BP_VFDB(bp)->sriov.first_vf_in_pf + vf->index) *
+ BNX2X_CIDS_PER_VF;
+
+ union cdu_context *base_cxt = (union cdu_context *)
+ BP_VF_CXT_PAGE(bp, base_vf_cid/ILT_PAGE_CIDS)->addr +
+ (base_vf_cid & (ILT_PAGE_CIDS-1));
+ int i;
+
+ /* if state is 'acquired' the VF was not released or FLR'd, in
+ * this case the returned resources match the acquired already
+ * acquired resources. Verify that the requested numbers do
+ * not exceed the already acquired numbers.
+ */
+ if (vf->state == VF_ACQUIRED) {
+ DP(BNX2X_MSG_IOV, "VF[%d] Trying to re-acquire resources (VF was not released or FLR'd)\n",
+ vf->abs_vfid);
+
+ if (!bnx2x_vf_chk_avail_resc(bp, vf, resc)) {
+ BNX2X_ERR("VF[%d] When re-acquiring resources, requested numbers must be <= then previously acquired numbers\n",
+ vf->abs_vfid);
+ return -EINVAL;
+ }
+ return 0;
+ }
+
+ /* Otherwise vf state must be 'free' or 'reset' */
+ if (vf->state != VF_FREE && vf->state != VF_RESET) {
+ BNX2X_ERR("VF[%d] Can not acquire a VF with state %d\n",
+ vf->abs_vfid, vf->state);
+ return -EINVAL;
+ }
+
+ /* static allocation:
+ * the global maximum number are fixed per VF. Fail the request if
+ * requested number exceed these globals
+ */
+ if (!bnx2x_vf_chk_avail_resc(bp, vf, resc)) {
+ DP(BNX2X_MSG_IOV,
+ "cannot fulfill vf resource request. Placing maximal available values in response\n");
+ /* set the max resource in the vf */
+ return -ENOMEM;
+ }
+
+ /* Set resources counters - 0 request means max available */
+ vf_sb_count(vf) = resc->num_sbs;
+ vf_rxq_count(vf) = resc->num_rxqs ? : bnx2x_vf_max_queue_cnt(bp, vf);
+ vf_txq_count(vf) = resc->num_txqs ? : bnx2x_vf_max_queue_cnt(bp, vf);
+
+ DP(BNX2X_MSG_IOV,
+ "Fulfilling vf request: sb count %d, tx_count %d, rx_count %d, mac_rules_count %d, vlan_rules_count %d\n",
+ vf_sb_count(vf), vf_rxq_count(vf),
+ vf_txq_count(vf), vf_mac_rules_cnt(vf),
+ vf_vlan_rules_cnt(vf));
+
+ /* Initialize the queues */
+ if (!vf->vfqs) {
+ DP(BNX2X_MSG_IOV, "vf->vfqs was not allocated\n");
+ return -EINVAL;
+ }
+
+ for_each_vfq(vf, i) {
+ struct bnx2x_vf_queue *q = vfq_get(vf, i);
+
+ if (!q) {
+ BNX2X_ERR("q number %d was not allocated\n", i);
+ return -EINVAL;
+ }
+
+ q->index = i;
+ q->cxt = &((base_cxt + i)->eth);
+ q->cid = BNX2X_FIRST_VF_CID + base_vf_cid + i;
+
+ DP(BNX2X_MSG_IOV, "VFQ[%d:%d]: index %d, cid 0x%x, cxt %p\n",
+ vf->abs_vfid, i, q->index, q->cid, q->cxt);
+
+ /* init SP objects */
+ bnx2x_vfq_init(bp, vf, q);
+ }
+ vf->state = VF_ACQUIRED;
+ return 0;
+}
+
+int bnx2x_vf_init(struct bnx2x *bp, struct bnx2x_virtf *vf, dma_addr_t *sb_map)
+{
+ struct bnx2x_func_init_params func_init = {0};
+ int i;
+
+ /* the sb resources are initialized at this point, do the
+ * FW/HW initializations
+ */
+ for_each_vf_sb(vf, i)
+ bnx2x_init_sb(bp, (dma_addr_t)sb_map[i], vf->abs_vfid, true,
+ vf_igu_sb(vf, i), vf_igu_sb(vf, i));
+
+ /* Sanity checks */
+ if (vf->state != VF_ACQUIRED) {
+ DP(BNX2X_MSG_IOV, "VF[%d] is not in VF_ACQUIRED, but %d\n",
+ vf->abs_vfid, vf->state);
+ return -EINVAL;
+ }
+
+ /* let FLR complete ... */
+ msleep(100);
+
+ /* FLR cleanup epilogue */
+ if (bnx2x_vf_flr_clnup_epilog(bp, vf->abs_vfid))
+ return -EBUSY;
+
+ /* reset IGU VF statistics: MSIX */
+ REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT + vf->abs_vfid * 4 , 0);
+
+ /* function setup */
+ func_init.pf_id = BP_FUNC(bp);
+ func_init.func_id = FW_VF_HANDLE(vf->abs_vfid);
+ bnx2x_func_init(bp, &func_init);
+
+ /* Enable the vf */
+ bnx2x_vf_enable_access(bp, vf->abs_vfid);
+ bnx2x_vf_enable_traffic(bp, vf);
+
+ /* queue protection table */
+ for_each_vfq(vf, i)
+ bnx2x_vf_qtbl_set_q(bp, vf->abs_vfid,
+ vfq_qzone_id(vf, vfq_get(vf, i)), true);
+
+ vf->state = VF_ENABLED;
+
+ /* update vf bulletin board */
+ bnx2x_post_vf_bulletin(bp, vf->index);
+
+ return 0;
+}
+
+struct set_vf_state_cookie {
+ struct bnx2x_virtf *vf;
+ u8 state;
+};
+
+static void bnx2x_set_vf_state(void *cookie)
+{
+ struct set_vf_state_cookie *p = (struct set_vf_state_cookie *)cookie;
+
+ p->vf->state = p->state;
+}
+
+int bnx2x_vf_close(struct bnx2x *bp, struct bnx2x_virtf *vf)
+{
+ int rc = 0, i;
+
+ DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid);
+
+ /* Close all queues */
+ for (i = 0; i < vf_rxq_count(vf); i++) {
+ rc = bnx2x_vf_queue_teardown(bp, vf, i);
+ if (rc)
+ goto op_err;
+ }
+
+ /* disable the interrupts */
+ DP(BNX2X_MSG_IOV, "disabling igu\n");
+ bnx2x_vf_igu_disable(bp, vf);
+
+ /* disable the VF */
+ DP(BNX2X_MSG_IOV, "clearing qtbl\n");
+ bnx2x_vf_clr_qtbl(bp, vf);
+
+ /* need to make sure there are no outstanding stats ramrods which may
+ * cause the device to access the VF's stats buffer which it will free
+ * as soon as we return from the close flow.
+ */
+ {
+ struct set_vf_state_cookie cookie;
+
+ cookie.vf = vf;
+ cookie.state = VF_ACQUIRED;
+ rc = bnx2x_stats_safe_exec(bp, bnx2x_set_vf_state, &cookie);
+ if (rc)
+ goto op_err;
+ }
+
+ DP(BNX2X_MSG_IOV, "set state to acquired\n");
+
+ return 0;
+op_err:
+ BNX2X_ERR("vf[%d] CLOSE error: rc %d\n", vf->abs_vfid, rc);
+ return rc;
+}
+
+/* VF release can be called either: 1. The VF was acquired but
+ * not enabled 2. the vf was enabled or in the process of being
+ * enabled
+ */
+int bnx2x_vf_free(struct bnx2x *bp, struct bnx2x_virtf *vf)
+{
+ int rc;
+
+ DP(BNX2X_MSG_IOV, "VF[%d] STATE: %s\n", vf->abs_vfid,
+ vf->state == VF_FREE ? "Free" :
+ vf->state == VF_ACQUIRED ? "Acquired" :
+ vf->state == VF_ENABLED ? "Enabled" :
+ vf->state == VF_RESET ? "Reset" :
+ "Unknown");
+
+ switch (vf->state) {
+ case VF_ENABLED:
+ rc = bnx2x_vf_close(bp, vf);
+ if (rc)
+ goto op_err;
+ fallthrough; /* to release resources */
+ case VF_ACQUIRED:
+ DP(BNX2X_MSG_IOV, "about to free resources\n");
+ bnx2x_vf_free_resc(bp, vf);
+ break;
+
+ case VF_FREE:
+ case VF_RESET:
+ default:
+ break;
+ }
+ return 0;
+op_err:
+ BNX2X_ERR("VF[%d] RELEASE error: rc %d\n", vf->abs_vfid, rc);
+ return rc;
+}
+
+int bnx2x_vf_rss_update(struct bnx2x *bp, struct bnx2x_virtf *vf,
+ struct bnx2x_config_rss_params *rss)
+{
+ DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid);
+ set_bit(RAMROD_COMP_WAIT, &rss->ramrod_flags);
+ return bnx2x_config_rss(bp, rss);
+}
+
+int bnx2x_vf_tpa_update(struct bnx2x *bp, struct bnx2x_virtf *vf,
+ struct vfpf_tpa_tlv *tlv,
+ struct bnx2x_queue_update_tpa_params *params)
+{
+ aligned_u64 *sge_addr = tlv->tpa_client_info.sge_addr;
+ struct bnx2x_queue_state_params qstate;
+ int qid, rc = 0;
+
+ DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid);
+
+ /* Set ramrod params */
+ memset(&qstate, 0, sizeof(struct bnx2x_queue_state_params));
+ memcpy(&qstate.params.update_tpa, params,
+ sizeof(struct bnx2x_queue_update_tpa_params));
+ qstate.cmd = BNX2X_Q_CMD_UPDATE_TPA;
+ set_bit(RAMROD_COMP_WAIT, &qstate.ramrod_flags);
+
+ for (qid = 0; qid < vf_rxq_count(vf); qid++) {
+ qstate.q_obj = &bnx2x_vfq(vf, qid, sp_obj);
+ qstate.params.update_tpa.sge_map = sge_addr[qid];
+ DP(BNX2X_MSG_IOV, "sge_addr[%d:%d] %08x:%08x\n",
+ vf->abs_vfid, qid, U64_HI(sge_addr[qid]),
+ U64_LO(sge_addr[qid]));
+ rc = bnx2x_queue_state_change(bp, &qstate);
+ if (rc) {
+ BNX2X_ERR("Failed to configure sge_addr %08x:%08x for [%d:%d]\n",
+ U64_HI(sge_addr[qid]), U64_LO(sge_addr[qid]),
+ vf->abs_vfid, qid);
+ return rc;
+ }
+ }
+
+ return rc;
+}
+
+/* VF release ~ VF close + VF release-resources
+ * Release is the ultimate SW shutdown and is called whenever an
+ * irrecoverable error is encountered.
+ */
+int bnx2x_vf_release(struct bnx2x *bp, struct bnx2x_virtf *vf)
+{
+ int rc;
+
+ DP(BNX2X_MSG_IOV, "PF releasing vf %d\n", vf->abs_vfid);
+ bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_RELEASE_VF);
+
+ rc = bnx2x_vf_free(bp, vf);
+ if (rc)
+ WARN(rc,
+ "VF[%d] Failed to allocate resources for release op- rc=%d\n",
+ vf->abs_vfid, rc);
+ bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_RELEASE_VF);
+ return rc;
+}
+
+void bnx2x_lock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf,
+ enum channel_tlvs tlv)
+{
+ /* we don't lock the channel for unsupported tlvs */
+ if (!bnx2x_tlv_supported(tlv)) {
+ BNX2X_ERR("attempting to lock with unsupported tlv. Aborting\n");
+ return;
+ }
+
+ /* lock the channel */
+ mutex_lock(&vf->op_mutex);
+
+ /* record the locking op */
+ vf->op_current = tlv;
+
+ /* log the lock */
+ DP(BNX2X_MSG_IOV, "VF[%d]: vf pf channel locked by %d\n",
+ vf->abs_vfid, tlv);
+}
+
+void bnx2x_unlock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf,
+ enum channel_tlvs expected_tlv)
+{
+ enum channel_tlvs current_tlv;
+
+ if (!vf) {
+ BNX2X_ERR("VF was %p\n", vf);
+ return;
+ }
+
+ current_tlv = vf->op_current;
+
+ /* we don't unlock the channel for unsupported tlvs */
+ if (!bnx2x_tlv_supported(expected_tlv))
+ return;
+
+ WARN(expected_tlv != vf->op_current,
+ "lock mismatch: expected %d found %d", expected_tlv,
+ vf->op_current);
+
+ /* record the locking op */
+ vf->op_current = CHANNEL_TLV_NONE;
+
+ /* lock the channel */
+ mutex_unlock(&vf->op_mutex);
+
+ /* log the unlock */
+ DP(BNX2X_MSG_IOV, "VF[%d]: vf pf channel unlocked by %d\n",
+ vf->abs_vfid, current_tlv);
+}
+
+static int bnx2x_set_pf_tx_switching(struct bnx2x *bp, bool enable)
+{
+ struct bnx2x_queue_state_params q_params;
+ u32 prev_flags;
+ int i, rc;
+
+ /* Verify changes are needed and record current Tx switching state */
+ prev_flags = bp->flags;
+ if (enable)
+ bp->flags |= TX_SWITCHING;
+ else
+ bp->flags &= ~TX_SWITCHING;
+ if (prev_flags == bp->flags)
+ return 0;
+
+ /* Verify state enables the sending of queue ramrods */
+ if ((bp->state != BNX2X_STATE_OPEN) ||
+ (bnx2x_get_q_logical_state(bp,
+ &bnx2x_sp_obj(bp, &bp->fp[0]).q_obj) !=
+ BNX2X_Q_LOGICAL_STATE_ACTIVE))
+ return 0;
+
+ /* send q. update ramrod to configure Tx switching */
+ memset(&q_params, 0, sizeof(q_params));
+ __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
+ q_params.cmd = BNX2X_Q_CMD_UPDATE;
+ __set_bit(BNX2X_Q_UPDATE_TX_SWITCHING_CHNG,
+ &q_params.params.update.update_flags);
+ if (enable)
+ __set_bit(BNX2X_Q_UPDATE_TX_SWITCHING,
+ &q_params.params.update.update_flags);
+ else
+ __clear_bit(BNX2X_Q_UPDATE_TX_SWITCHING,
+ &q_params.params.update.update_flags);
+
+ /* send the ramrod on all the queues of the PF */
+ for_each_eth_queue(bp, i) {
+ struct bnx2x_fastpath *fp = &bp->fp[i];
+ int tx_idx;
+
+ /* Set the appropriate Queue object */
+ q_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
+
+ for (tx_idx = FIRST_TX_COS_INDEX;
+ tx_idx < fp->max_cos; tx_idx++) {
+ q_params.params.update.cid_index = tx_idx;
+
+ /* Update the Queue state */
+ rc = bnx2x_queue_state_change(bp, &q_params);
+ if (rc) {
+ BNX2X_ERR("Failed to configure Tx switching\n");
+ return rc;
+ }
+ }
+ }
+
+ DP(BNX2X_MSG_IOV, "%s Tx Switching\n", enable ? "Enabled" : "Disabled");
+ return 0;
+}
+
+int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs_param)
+{
+ struct bnx2x *bp = netdev_priv(pci_get_drvdata(dev));
+
+ if (!IS_SRIOV(bp)) {
+ BNX2X_ERR("failed to configure SR-IOV since vfdb was not allocated. Check dmesg for errors in probe stage\n");
+ return -EINVAL;
+ }
+
+ DP(BNX2X_MSG_IOV, "bnx2x_sriov_configure called with %d, BNX2X_NR_VIRTFN(bp) was %d\n",
+ num_vfs_param, BNX2X_NR_VIRTFN(bp));
+
+ /* HW channel is only operational when PF is up */
+ if (bp->state != BNX2X_STATE_OPEN) {
+ BNX2X_ERR("VF num configuration via sysfs not supported while PF is down\n");
+ return -EINVAL;
+ }
+
+ /* we are always bound by the total_vfs in the configuration space */
+ if (num_vfs_param > BNX2X_NR_VIRTFN(bp)) {
+ BNX2X_ERR("truncating requested number of VFs (%d) down to maximum allowed (%d)\n",
+ num_vfs_param, BNX2X_NR_VIRTFN(bp));
+ num_vfs_param = BNX2X_NR_VIRTFN(bp);
+ }
+
+ bp->requested_nr_virtfn = num_vfs_param;
+ if (num_vfs_param == 0) {
+ bnx2x_set_pf_tx_switching(bp, false);
+ bnx2x_disable_sriov(bp);
+ return 0;
+ } else {
+ return bnx2x_enable_sriov(bp);
+ }
+}
+
+#define IGU_ENTRY_SIZE 4
+
+int bnx2x_enable_sriov(struct bnx2x *bp)
+{
+ int rc = 0, req_vfs = bp->requested_nr_virtfn;
+ int vf_idx, sb_idx, vfq_idx, qcount, first_vf;
+ u32 igu_entry, address;
+ u16 num_vf_queues;
+
+ if (req_vfs == 0)
+ return 0;
+
+ first_vf = bp->vfdb->sriov.first_vf_in_pf;
+
+ /* statically distribute vf sb pool between VFs */
+ num_vf_queues = min_t(u16, BNX2X_VF_MAX_QUEUES,
+ BP_VFDB(bp)->vf_sbs_pool / req_vfs);
+
+ /* zero previous values learned from igu cam */
+ for (vf_idx = 0; vf_idx < req_vfs; vf_idx++) {
+ struct bnx2x_virtf *vf = BP_VF(bp, vf_idx);
+
+ vf->sb_count = 0;
+ vf_sb_count(BP_VF(bp, vf_idx)) = 0;
+ }
+ bp->vfdb->vf_sbs_pool = 0;
+
+ /* prepare IGU cam */
+ sb_idx = BP_VFDB(bp)->first_vf_igu_entry;
+ address = IGU_REG_MAPPING_MEMORY + sb_idx * IGU_ENTRY_SIZE;
+ for (vf_idx = first_vf; vf_idx < first_vf + req_vfs; vf_idx++) {
+ for (vfq_idx = 0; vfq_idx < num_vf_queues; vfq_idx++) {
+ igu_entry = vf_idx << IGU_REG_MAPPING_MEMORY_FID_SHIFT |
+ vfq_idx << IGU_REG_MAPPING_MEMORY_VECTOR_SHIFT |
+ IGU_REG_MAPPING_MEMORY_VALID;
+ DP(BNX2X_MSG_IOV, "assigning sb %d to vf %d\n",
+ sb_idx, vf_idx);
+ REG_WR(bp, address, igu_entry);
+ sb_idx++;
+ address += IGU_ENTRY_SIZE;
+ }
+ }
+
+ /* Reinitialize vf database according to igu cam */
+ bnx2x_get_vf_igu_cam_info(bp);
+
+ DP(BNX2X_MSG_IOV, "vf_sbs_pool %d, num_vf_queues %d\n",
+ BP_VFDB(bp)->vf_sbs_pool, num_vf_queues);
+
+ qcount = 0;
+ for_each_vf(bp, vf_idx) {
+ struct bnx2x_virtf *vf = BP_VF(bp, vf_idx);
+
+ /* set local queue arrays */
+ vf->vfqs = &bp->vfdb->vfqs[qcount];
+ qcount += vf_sb_count(vf);
+ bnx2x_iov_static_resc(bp, vf);
+ }
+
+ /* prepare msix vectors in VF configuration space - the value in the
+ * PCI configuration space should be the index of the last entry,
+ * namely one less than the actual size of the table
+ */
+ for (vf_idx = first_vf; vf_idx < first_vf + req_vfs; vf_idx++) {
+ bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf_idx));
+ REG_WR(bp, PCICFG_OFFSET + GRC_CONFIG_REG_VF_MSIX_CONTROL,
+ num_vf_queues - 1);
+ DP(BNX2X_MSG_IOV, "set msix vec num in VF %d cfg space to %d\n",
+ vf_idx, num_vf_queues - 1);
+ }
+ bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
+
+ /* enable sriov. This will probe all the VFs, and consequentially cause
+ * the "acquire" messages to appear on the VF PF channel.
+ */
+ DP(BNX2X_MSG_IOV, "about to call enable sriov\n");
+ bnx2x_disable_sriov(bp);
+
+ rc = bnx2x_set_pf_tx_switching(bp, true);
+ if (rc)
+ return rc;
+
+ rc = pci_enable_sriov(bp->pdev, req_vfs);
+ if (rc) {
+ BNX2X_ERR("pci_enable_sriov failed with %d\n", rc);
+ return rc;
+ }
+ DP(BNX2X_MSG_IOV, "sriov enabled (%d vfs)\n", req_vfs);
+ return req_vfs;
+}
+
+void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp)
+{
+ int vfidx;
+ struct pf_vf_bulletin_content *bulletin;
+
+ DP(BNX2X_MSG_IOV, "configuring vlan for VFs from sp-task\n");
+ for_each_vf(bp, vfidx) {
+ bulletin = BP_VF_BULLETIN(bp, vfidx);
+ if (bulletin->valid_bitmap & (1 << VLAN_VALID))
+ bnx2x_set_vf_vlan(bp->dev, vfidx, bulletin->vlan, 0,
+ htons(ETH_P_8021Q));
+ }
+}
+
+void bnx2x_disable_sriov(struct bnx2x *bp)
+{
+ if (pci_vfs_assigned(bp->pdev)) {
+ DP(BNX2X_MSG_IOV,
+ "Unloading driver while VFs are assigned - VFs will not be deallocated\n");
+ return;
+ }
+
+ pci_disable_sriov(bp->pdev);
+}
+
+static int bnx2x_vf_op_prep(struct bnx2x *bp, int vfidx,
+ struct bnx2x_virtf **vf,
+ struct pf_vf_bulletin_content **bulletin,
+ bool test_queue)
+{
+ if (bp->state != BNX2X_STATE_OPEN) {
+ BNX2X_ERR("PF is down - can't utilize iov-related functionality\n");
+ return -EINVAL;
+ }
+
+ if (!IS_SRIOV(bp)) {
+ BNX2X_ERR("sriov is disabled - can't utilize iov-related functionality\n");
+ return -EINVAL;
+ }
+
+ if (vfidx >= BNX2X_NR_VIRTFN(bp)) {
+ BNX2X_ERR("VF is uninitialized - can't utilize iov-related functionality. vfidx was %d BNX2X_NR_VIRTFN was %d\n",
+ vfidx, BNX2X_NR_VIRTFN(bp));
+ return -EINVAL;
+ }
+
+ /* init members */
+ *vf = BP_VF(bp, vfidx);
+ *bulletin = BP_VF_BULLETIN(bp, vfidx);
+
+ if (!*vf) {
+ BNX2X_ERR("Unable to get VF structure for vfidx %d\n", vfidx);
+ return -EINVAL;
+ }
+
+ if (test_queue && !(*vf)->vfqs) {
+ BNX2X_ERR("vfqs struct is null. Was this invoked before dynamically enabling SR-IOV? vfidx was %d\n",
+ vfidx);
+ return -EINVAL;
+ }
+
+ if (!*bulletin) {
+ BNX2X_ERR("Bulletin Board struct is null for vfidx %d\n",
+ vfidx);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int bnx2x_get_vf_config(struct net_device *dev, int vfidx,
+ struct ifla_vf_info *ivi)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+ struct bnx2x_virtf *vf = NULL;
+ struct pf_vf_bulletin_content *bulletin = NULL;
+ struct bnx2x_vlan_mac_obj *mac_obj;
+ struct bnx2x_vlan_mac_obj *vlan_obj;
+ int rc;
+
+ /* sanity and init */
+ rc = bnx2x_vf_op_prep(bp, vfidx, &vf, &bulletin, true);
+ if (rc)
+ return rc;
+
+ mac_obj = &bnx2x_leading_vfq(vf, mac_obj);
+ vlan_obj = &bnx2x_leading_vfq(vf, vlan_obj);
+ if (!mac_obj || !vlan_obj) {
+ BNX2X_ERR("VF partially initialized\n");
+ return -EINVAL;
+ }
+
+ ivi->vf = vfidx;
+ ivi->qos = 0;
+ ivi->max_tx_rate = 10000; /* always 10G. TBA take from link struct */
+ ivi->min_tx_rate = 0;
+ ivi->spoofchk = vf->spoofchk ? 1 : 0;
+ ivi->linkstate = vf->link_cfg;
+ if (vf->state == VF_ENABLED) {
+ /* mac and vlan are in vlan_mac objects */
+ if (bnx2x_validate_vf_sp_objs(bp, vf, false)) {
+ mac_obj->get_n_elements(bp, mac_obj, 1, (u8 *)&ivi->mac,
+ 0, ETH_ALEN);
+ vlan_obj->get_n_elements(bp, vlan_obj, 1,
+ (u8 *)&ivi->vlan, 0,
+ VLAN_HLEN);
+ }
+ } else {
+ mutex_lock(&bp->vfdb->bulletin_mutex);
+ /* mac */
+ if (bulletin->valid_bitmap & (1 << MAC_ADDR_VALID))
+ /* mac configured by ndo so its in bulletin board */
+ memcpy(&ivi->mac, bulletin->mac, ETH_ALEN);
+ else
+ /* function has not been loaded yet. Show mac as 0s */
+ eth_zero_addr(ivi->mac);
+
+ /* vlan */
+ if (bulletin->valid_bitmap & (1 << VLAN_VALID))
+ /* vlan configured by ndo so its in bulletin board */
+ memcpy(&ivi->vlan, &bulletin->vlan, VLAN_HLEN);
+ else
+ /* function has not been loaded yet. Show vlans as 0s */
+ memset(&ivi->vlan, 0, VLAN_HLEN);
+
+ mutex_unlock(&bp->vfdb->bulletin_mutex);
+ }
+
+ return 0;
+}
+
+/* New mac for VF. Consider these cases:
+ * 1. VF hasn't been acquired yet - save the mac in local bulletin board and
+ * supply at acquire.
+ * 2. VF has already been acquired but has not yet initialized - store in local
+ * bulletin board. mac will be posted on VF bulletin board after VF init. VF
+ * will configure this mac when it is ready.
+ * 3. VF has already initialized but has not yet setup a queue - post the new
+ * mac on VF's bulletin board right now. VF will configure this mac when it
+ * is ready.
+ * 4. VF has already set a queue - delete any macs already configured for this
+ * queue and manually config the new mac.
+ * In any event, once this function has been called refuse any attempts by the
+ * VF to configure any mac for itself except for this mac. In case of a race
+ * where the VF fails to see the new post on its bulletin board before sending a
+ * mac configuration request, the PF will simply fail the request and VF can try
+ * again after consulting its bulletin board.
+ */
+int bnx2x_set_vf_mac(struct net_device *dev, int vfidx, u8 *mac)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+ int rc, q_logical_state;
+ struct bnx2x_virtf *vf = NULL;
+ struct pf_vf_bulletin_content *bulletin = NULL;
+
+ if (!is_valid_ether_addr(mac)) {
+ BNX2X_ERR("mac address invalid\n");
+ return -EINVAL;
+ }
+
+ /* sanity and init */
+ rc = bnx2x_vf_op_prep(bp, vfidx, &vf, &bulletin, true);
+ if (rc)
+ return rc;
+
+ mutex_lock(&bp->vfdb->bulletin_mutex);
+
+ /* update PF's copy of the VF's bulletin. Will no longer accept mac
+ * configuration requests from vf unless match this mac
+ */
+ bulletin->valid_bitmap |= 1 << MAC_ADDR_VALID;
+ memcpy(bulletin->mac, mac, ETH_ALEN);
+
+ /* Post update on VF's bulletin board */
+ rc = bnx2x_post_vf_bulletin(bp, vfidx);
+
+ /* release lock before checking return code */
+ mutex_unlock(&bp->vfdb->bulletin_mutex);
+
+ if (rc) {
+ BNX2X_ERR("failed to update VF[%d] bulletin\n", vfidx);
+ return rc;
+ }
+
+ q_logical_state =
+ bnx2x_get_q_logical_state(bp, &bnx2x_leading_vfq(vf, sp_obj));
+ if (vf->state == VF_ENABLED &&
+ q_logical_state == BNX2X_Q_LOGICAL_STATE_ACTIVE) {
+ /* configure the mac in device on this vf's queue */
+ unsigned long ramrod_flags = 0;
+ struct bnx2x_vlan_mac_obj *mac_obj;
+
+ /* User should be able to see failure reason in system logs */
+ if (!bnx2x_validate_vf_sp_objs(bp, vf, true))
+ return -EINVAL;
+
+ /* must lock vfpf channel to protect against vf flows */
+ bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_MAC);
+
+ /* remove existing eth macs */
+ mac_obj = &bnx2x_leading_vfq(vf, mac_obj);
+ rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_ETH_MAC, true);
+ if (rc) {
+ BNX2X_ERR("failed to delete eth macs\n");
+ rc = -EINVAL;
+ goto out;
+ }
+
+ /* remove existing uc list macs */
+ rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_UC_LIST_MAC, true);
+ if (rc) {
+ BNX2X_ERR("failed to delete uc_list macs\n");
+ rc = -EINVAL;
+ goto out;
+ }
+
+ /* configure the new mac to device */
+ __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
+ bnx2x_set_mac_one(bp, (u8 *)&bulletin->mac, mac_obj, true,
+ BNX2X_ETH_MAC, &ramrod_flags);
+
+out:
+ bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_MAC);
+ }
+
+ return rc;
+}
+
+static void bnx2x_set_vf_vlan_acceptance(struct bnx2x *bp,
+ struct bnx2x_virtf *vf, bool accept)
+{
+ struct bnx2x_rx_mode_ramrod_params rx_ramrod;
+ unsigned long accept_flags;
+
+ /* need to remove/add the VF's accept_any_vlan bit */
+ accept_flags = bnx2x_leading_vfq(vf, accept_flags);
+ if (accept)
+ set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags);
+ else
+ clear_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags);
+
+ bnx2x_vf_prep_rx_mode(bp, LEADING_IDX, &rx_ramrod, vf,
+ accept_flags);
+ bnx2x_leading_vfq(vf, accept_flags) = accept_flags;
+ bnx2x_config_rx_mode(bp, &rx_ramrod);
+}
+
+static int bnx2x_set_vf_vlan_filter(struct bnx2x *bp, struct bnx2x_virtf *vf,
+ u16 vlan, bool add)
+{
+ struct bnx2x_vlan_mac_ramrod_params ramrod_param;
+ unsigned long ramrod_flags = 0;
+ int rc = 0;
+
+ /* configure the new vlan to device */
+ memset(&ramrod_param, 0, sizeof(ramrod_param));
+ __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
+ ramrod_param.vlan_mac_obj = &bnx2x_leading_vfq(vf, vlan_obj);
+ ramrod_param.ramrod_flags = ramrod_flags;
+ ramrod_param.user_req.u.vlan.vlan = vlan;
+ ramrod_param.user_req.cmd = add ? BNX2X_VLAN_MAC_ADD
+ : BNX2X_VLAN_MAC_DEL;
+ rc = bnx2x_config_vlan_mac(bp, &ramrod_param);
+ if (rc) {
+ BNX2X_ERR("failed to configure vlan\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos,
+ __be16 vlan_proto)
+{
+ struct pf_vf_bulletin_content *bulletin = NULL;
+ struct bnx2x *bp = netdev_priv(dev);
+ struct bnx2x_vlan_mac_obj *vlan_obj;
+ unsigned long vlan_mac_flags = 0;
+ unsigned long ramrod_flags = 0;
+ struct bnx2x_virtf *vf = NULL;
+ int i, rc;
+
+ if (vlan > 4095) {
+ BNX2X_ERR("illegal vlan value %d\n", vlan);
+ return -EINVAL;
+ }
+
+ if (vlan_proto != htons(ETH_P_8021Q))
+ return -EPROTONOSUPPORT;
+
+ DP(BNX2X_MSG_IOV, "configuring VF %d with VLAN %d qos %d\n",
+ vfidx, vlan, 0);
+
+ /* sanity and init */
+ rc = bnx2x_vf_op_prep(bp, vfidx, &vf, &bulletin, true);
+ if (rc)
+ return rc;
+
+ /* update PF's copy of the VF's bulletin. No point in posting the vlan
+ * to the VF since it doesn't have anything to do with it. But it useful
+ * to store it here in case the VF is not up yet and we can only
+ * configure the vlan later when it does. Treat vlan id 0 as remove the
+ * Host tag.
+ */
+ mutex_lock(&bp->vfdb->bulletin_mutex);
+
+ if (vlan > 0)
+ bulletin->valid_bitmap |= 1 << VLAN_VALID;
+ else
+ bulletin->valid_bitmap &= ~(1 << VLAN_VALID);
+ bulletin->vlan = vlan;
+
+ /* Post update on VF's bulletin board */
+ rc = bnx2x_post_vf_bulletin(bp, vfidx);
+ if (rc)
+ BNX2X_ERR("failed to update VF[%d] bulletin\n", vfidx);
+ mutex_unlock(&bp->vfdb->bulletin_mutex);
+
+ /* is vf initialized and queue set up? */
+ if (vf->state != VF_ENABLED ||
+ bnx2x_get_q_logical_state(bp, &bnx2x_leading_vfq(vf, sp_obj)) !=
+ BNX2X_Q_LOGICAL_STATE_ACTIVE)
+ return rc;
+
+ /* User should be able to see error in system logs */
+ if (!bnx2x_validate_vf_sp_objs(bp, vf, true))
+ return -EINVAL;
+
+ /* must lock vfpf channel to protect against vf flows */
+ bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN);
+
+ /* remove existing vlans */
+ __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
+ vlan_obj = &bnx2x_leading_vfq(vf, vlan_obj);
+ rc = vlan_obj->delete_all(bp, vlan_obj, &vlan_mac_flags,
+ &ramrod_flags);
+ if (rc) {
+ BNX2X_ERR("failed to delete vlans\n");
+ rc = -EINVAL;
+ goto out;
+ }
+
+ /* clear accept_any_vlan when HV forces vlan, otherwise
+ * according to VF capabilities
+ */
+ if (vlan || !(vf->cfg_flags & VF_CFG_VLAN_FILTER))
+ bnx2x_set_vf_vlan_acceptance(bp, vf, !vlan);
+
+ rc = bnx2x_set_vf_vlan_filter(bp, vf, vlan, true);
+ if (rc)
+ goto out;
+
+ /* send queue update ramrods to configure default vlan and
+ * silent vlan removal
+ */
+ for_each_vfq(vf, i) {
+ struct bnx2x_queue_state_params q_params = {NULL};
+ struct bnx2x_queue_update_params *update_params;
+
+ q_params.q_obj = &bnx2x_vfq(vf, i, sp_obj);
+
+ /* validate the Q is UP */
+ if (bnx2x_get_q_logical_state(bp, q_params.q_obj) !=
+ BNX2X_Q_LOGICAL_STATE_ACTIVE)
+ continue;
+
+ __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
+ q_params.cmd = BNX2X_Q_CMD_UPDATE;
+ update_params = &q_params.params.update;
+ __set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG,
+ &update_params->update_flags);
+ __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG,
+ &update_params->update_flags);
+ if (vlan == 0) {
+ /* if vlan is 0 then we want to leave the VF traffic
+ * untagged, and leave the incoming traffic untouched
+ * (i.e. do not remove any vlan tags).
+ */
+ __clear_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN,
+ &update_params->update_flags);
+ __clear_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM,
+ &update_params->update_flags);
+ } else {
+ /* configure default vlan to vf queue and set silent
+ * vlan removal (the vf remains unaware of this vlan).
+ */
+ __set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN,
+ &update_params->update_flags);
+ __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM,
+ &update_params->update_flags);
+ update_params->def_vlan = vlan;
+ update_params->silent_removal_value =
+ vlan & VLAN_VID_MASK;
+ update_params->silent_removal_mask = VLAN_VID_MASK;
+ }
+
+ /* Update the Queue state */
+ rc = bnx2x_queue_state_change(bp, &q_params);
+ if (rc) {
+ BNX2X_ERR("Failed to configure default VLAN queue %d\n",
+ i);
+ goto out;
+ }
+ }
+out:
+ bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN);
+
+ if (rc)
+ DP(BNX2X_MSG_IOV,
+ "updated VF[%d] vlan configuration (vlan = %d)\n",
+ vfidx, vlan);
+
+ return rc;
+}
+
+int bnx2x_set_vf_spoofchk(struct net_device *dev, int idx, bool val)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+ struct bnx2x_virtf *vf;
+ int i, rc = 0;
+
+ vf = BP_VF(bp, idx);
+ if (!vf)
+ return -EINVAL;
+
+ /* nothing to do */
+ if (vf->spoofchk == val)
+ return 0;
+
+ vf->spoofchk = val ? 1 : 0;
+
+ DP(BNX2X_MSG_IOV, "%s spoofchk for VF %d\n",
+ val ? "enabling" : "disabling", idx);
+
+ /* is vf initialized and queue set up? */
+ if (vf->state != VF_ENABLED ||
+ bnx2x_get_q_logical_state(bp, &bnx2x_leading_vfq(vf, sp_obj)) !=
+ BNX2X_Q_LOGICAL_STATE_ACTIVE)
+ return rc;
+
+ /* User should be able to see error in system logs */
+ if (!bnx2x_validate_vf_sp_objs(bp, vf, true))
+ return -EINVAL;
+
+ /* send queue update ramrods to configure spoofchk */
+ for_each_vfq(vf, i) {
+ struct bnx2x_queue_state_params q_params = {NULL};
+ struct bnx2x_queue_update_params *update_params;
+
+ q_params.q_obj = &bnx2x_vfq(vf, i, sp_obj);
+
+ /* validate the Q is UP */
+ if (bnx2x_get_q_logical_state(bp, q_params.q_obj) !=
+ BNX2X_Q_LOGICAL_STATE_ACTIVE)
+ continue;
+
+ __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
+ q_params.cmd = BNX2X_Q_CMD_UPDATE;
+ update_params = &q_params.params.update;
+ __set_bit(BNX2X_Q_UPDATE_ANTI_SPOOF_CHNG,
+ &update_params->update_flags);
+ if (val) {
+ __set_bit(BNX2X_Q_UPDATE_ANTI_SPOOF,
+ &update_params->update_flags);
+ } else {
+ __clear_bit(BNX2X_Q_UPDATE_ANTI_SPOOF,
+ &update_params->update_flags);
+ }
+
+ /* Update the Queue state */
+ rc = bnx2x_queue_state_change(bp, &q_params);
+ if (rc) {
+ BNX2X_ERR("Failed to %s spoofchk on VF %d - vfq %d\n",
+ val ? "enable" : "disable", idx, i);
+ goto out;
+ }
+ }
+out:
+ if (!rc)
+ DP(BNX2X_MSG_IOV,
+ "%s spoofchk for VF[%d]\n", val ? "Enabled" : "Disabled",
+ idx);
+
+ return rc;
+}
+
+/* crc is the first field in the bulletin board. Compute the crc over the
+ * entire bulletin board excluding the crc field itself. Use the length field
+ * as the Bulletin Board was posted by a PF with possibly a different version
+ * from the vf which will sample it. Therefore, the length is computed by the
+ * PF and then used blindly by the VF.
+ */
+u32 bnx2x_crc_vf_bulletin(struct pf_vf_bulletin_content *bulletin)
+{
+ return crc32(BULLETIN_CRC_SEED,
+ ((u8 *)bulletin) + sizeof(bulletin->crc),
+ bulletin->length - sizeof(bulletin->crc));
+}
+
+/* Check for new posts on the bulletin board */
+enum sample_bulletin_result bnx2x_sample_bulletin(struct bnx2x *bp)
+{
+ struct pf_vf_bulletin_content *bulletin;
+ int attempts;
+
+ /* sampling structure in mid post may result with corrupted data
+ * validate crc to ensure coherency.
+ */
+ for (attempts = 0; attempts < BULLETIN_ATTEMPTS; attempts++) {
+ u32 crc;
+
+ /* sample the bulletin board */
+ memcpy(&bp->shadow_bulletin, bp->pf2vf_bulletin,
+ sizeof(union pf_vf_bulletin));
+
+ crc = bnx2x_crc_vf_bulletin(&bp->shadow_bulletin.content);
+
+ if (bp->shadow_bulletin.content.crc == crc)
+ break;
+
+ BNX2X_ERR("bad crc on bulletin board. Contained %x computed %x\n",
+ bp->shadow_bulletin.content.crc, crc);
+ }
+
+ if (attempts >= BULLETIN_ATTEMPTS) {
+ BNX2X_ERR("pf to vf bulletin board crc was wrong %d consecutive times. Aborting\n",
+ attempts);
+ return PFVF_BULLETIN_CRC_ERR;
+ }
+ bulletin = &bp->shadow_bulletin.content;
+
+ /* bulletin board hasn't changed since last sample */
+ if (bp->old_bulletin.version == bulletin->version)
+ return PFVF_BULLETIN_UNCHANGED;
+
+ /* the mac address in bulletin board is valid and is new */
+ if (bulletin->valid_bitmap & 1 << MAC_ADDR_VALID &&
+ !ether_addr_equal(bulletin->mac, bp->old_bulletin.mac)) {
+ /* update new mac to net device */
+ eth_hw_addr_set(bp->dev, bulletin->mac);
+ }
+
+ if (bulletin->valid_bitmap & (1 << LINK_VALID)) {
+ DP(BNX2X_MSG_IOV, "link update speed %d flags %x\n",
+ bulletin->link_speed, bulletin->link_flags);
+
+ bp->vf_link_vars.line_speed = bulletin->link_speed;
+ bp->vf_link_vars.link_report_flags = 0;
+ /* Link is down */
+ if (bulletin->link_flags & VFPF_LINK_REPORT_LINK_DOWN)
+ __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
+ &bp->vf_link_vars.link_report_flags);
+ /* Full DUPLEX */
+ if (bulletin->link_flags & VFPF_LINK_REPORT_FULL_DUPLEX)
+ __set_bit(BNX2X_LINK_REPORT_FD,
+ &bp->vf_link_vars.link_report_flags);
+ /* Rx Flow Control is ON */
+ if (bulletin->link_flags & VFPF_LINK_REPORT_RX_FC_ON)
+ __set_bit(BNX2X_LINK_REPORT_RX_FC_ON,
+ &bp->vf_link_vars.link_report_flags);
+ /* Tx Flow Control is ON */
+ if (bulletin->link_flags & VFPF_LINK_REPORT_TX_FC_ON)
+ __set_bit(BNX2X_LINK_REPORT_TX_FC_ON,
+ &bp->vf_link_vars.link_report_flags);
+ __bnx2x_link_report(bp);
+ }
+
+ /* copy new bulletin board to bp */
+ memcpy(&bp->old_bulletin, bulletin,
+ sizeof(struct pf_vf_bulletin_content));
+
+ return PFVF_BULLETIN_UPDATED;
+}
+
+void bnx2x_timer_sriov(struct bnx2x *bp)
+{
+ bnx2x_sample_bulletin(bp);
+
+ /* if channel is down we need to self destruct */
+ if (bp->old_bulletin.valid_bitmap & 1 << CHANNEL_DOWN)
+ bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_VFPF_CHANNEL_DOWN,
+ BNX2X_MSG_IOV);
+}
+
+void __iomem *bnx2x_vf_doorbells(struct bnx2x *bp)
+{
+ /* vf doorbells are embedded within the regview */
+ return bp->regview + PXP_VF_ADDR_DB_START;
+}
+
+void bnx2x_vf_pci_dealloc(struct bnx2x *bp)
+{
+ BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->vf2pf_mbox_mapping,
+ sizeof(struct bnx2x_vf_mbx_msg));
+ BNX2X_PCI_FREE(bp->pf2vf_bulletin, bp->pf2vf_bulletin_mapping,
+ sizeof(union pf_vf_bulletin));
+}
+
+int bnx2x_vf_pci_alloc(struct bnx2x *bp)
+{
+ mutex_init(&bp->vf2pf_mutex);
+
+ /* allocate vf2pf mailbox for vf to pf channel */
+ bp->vf2pf_mbox = BNX2X_PCI_ALLOC(&bp->vf2pf_mbox_mapping,
+ sizeof(struct bnx2x_vf_mbx_msg));
+ if (!bp->vf2pf_mbox)
+ goto alloc_mem_err;
+
+ /* allocate pf 2 vf bulletin board */
+ bp->pf2vf_bulletin = BNX2X_PCI_ALLOC(&bp->pf2vf_bulletin_mapping,
+ sizeof(union pf_vf_bulletin));
+ if (!bp->pf2vf_bulletin)
+ goto alloc_mem_err;
+
+ bnx2x_vf_bulletin_finalize(&bp->pf2vf_bulletin->content, true);
+
+ return 0;
+
+alloc_mem_err:
+ bnx2x_vf_pci_dealloc(bp);
+ return -ENOMEM;
+}
+
+void bnx2x_iov_channel_down(struct bnx2x *bp)
+{
+ int vf_idx;
+ struct pf_vf_bulletin_content *bulletin;
+
+ if (!IS_SRIOV(bp))
+ return;
+
+ for_each_vf(bp, vf_idx) {
+ /* locate this VFs bulletin board and update the channel down
+ * bit
+ */
+ bulletin = BP_VF_BULLETIN(bp, vf_idx);
+ bulletin->valid_bitmap |= 1 << CHANNEL_DOWN;
+
+ /* update vf bulletin board */
+ bnx2x_post_vf_bulletin(bp, vf_idx);
+ }
+}
+
+void bnx2x_iov_task(struct work_struct *work)
+{
+ struct bnx2x *bp = container_of(work, struct bnx2x, iov_task.work);
+
+ if (!netif_running(bp->dev))
+ return;
+
+ if (test_and_clear_bit(BNX2X_IOV_HANDLE_FLR,
+ &bp->iov_task_state))
+ bnx2x_vf_handle_flr_event(bp);
+
+ if (test_and_clear_bit(BNX2X_IOV_HANDLE_VF_MSG,
+ &bp->iov_task_state))
+ bnx2x_vf_mbx(bp);
+}
+
+void bnx2x_schedule_iov_task(struct bnx2x *bp, enum bnx2x_iov_flag flag)
+{
+ smp_mb__before_atomic();
+ set_bit(flag, &bp->iov_task_state);
+ smp_mb__after_atomic();
+ DP(BNX2X_MSG_IOV, "Scheduling iov task [Flag: %d]\n", flag);
+ queue_delayed_work(bnx2x_iov_wq, &bp->iov_task, 0);
+}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
new file mode 100644
index 0000000000..02a4e557e1
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
@@ -0,0 +1,633 @@
+/* bnx2x_sriov.h: QLogic Everest network driver.
+ *
+ * Copyright 2009-2013 Broadcom Corporation
+ * Copyright 2014 QLogic Corporation
+ * All rights reserved
+ *
+ * Unless you and QLogic execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2, available
+ * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other QLogic software provided under a
+ * license other than the GPL, without QLogic's express prior written
+ * consent.
+ *
+ * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
+ * Written by: Shmulik Ravid
+ * Ariel Elior <ariel.elior@qlogic.com>
+ */
+#ifndef BNX2X_SRIOV_H
+#define BNX2X_SRIOV_H
+
+#include "bnx2x_vfpf.h"
+#include "bnx2x.h"
+
+enum sample_bulletin_result {
+ PFVF_BULLETIN_UNCHANGED,
+ PFVF_BULLETIN_UPDATED,
+ PFVF_BULLETIN_CRC_ERR
+};
+
+#ifdef CONFIG_BNX2X_SRIOV
+
+extern struct workqueue_struct *bnx2x_iov_wq;
+
+/* The bnx2x device structure holds vfdb structure described below.
+ * The VF array is indexed by the relative vfid.
+ */
+#define BNX2X_VF_MAX_QUEUES 16
+#define BNX2X_VF_MAX_TPA_AGG_QUEUES 8
+
+struct bnx2x_sriov {
+ u32 first_vf_in_pf;
+
+ /* standard SRIOV capability fields, mostly for debugging */
+ int pos; /* capability position */
+ int nres; /* number of resources */
+ u32 cap; /* SR-IOV Capabilities */
+ u16 ctrl; /* SR-IOV Control */
+ u16 total; /* total VFs associated with the PF */
+ u16 initial; /* initial VFs associated with the PF */
+ u16 nr_virtfn; /* number of VFs available */
+ u16 offset; /* first VF Routing ID offset */
+ u16 stride; /* following VF stride */
+ u32 pgsz; /* page size for BAR alignment */
+ u8 link; /* Function Dependency Link */
+};
+
+/* bars */
+struct bnx2x_vf_bar {
+ u64 bar;
+ u32 size;
+};
+
+struct bnx2x_vf_bar_info {
+ struct bnx2x_vf_bar bars[PCI_SRIOV_NUM_BARS];
+ u8 nr_bars;
+};
+
+/* vf queue (used both for rx or tx) */
+struct bnx2x_vf_queue {
+ struct eth_context *cxt;
+
+ /* MACs object */
+ struct bnx2x_vlan_mac_obj mac_obj;
+
+ /* VLANs object */
+ struct bnx2x_vlan_mac_obj vlan_obj;
+
+ /* VLAN-MACs object */
+ struct bnx2x_vlan_mac_obj vlan_mac_obj;
+
+ unsigned long accept_flags; /* last accept flags configured */
+
+ /* Queue Slow-path State object */
+ struct bnx2x_queue_sp_obj sp_obj;
+
+ u32 cid;
+ u16 index;
+ u16 sb_idx;
+ bool is_leading;
+ bool sp_initialized;
+};
+
+/* struct bnx2x_vf_queue_construct_params - prepare queue construction
+ * parameters: q-init, q-setup and SB index
+ */
+struct bnx2x_vf_queue_construct_params {
+ struct bnx2x_queue_state_params qstate;
+ struct bnx2x_queue_setup_params prep_qsetup;
+};
+
+/* forward */
+struct bnx2x_virtf;
+
+/* VFOP definitions */
+
+struct bnx2x_vf_mac_vlan_filter {
+ int type;
+#define BNX2X_VF_FILTER_MAC BIT(0)
+#define BNX2X_VF_FILTER_VLAN BIT(1)
+#define BNX2X_VF_FILTER_VLAN_MAC \
+ (BNX2X_VF_FILTER_MAC | BNX2X_VF_FILTER_VLAN) /*shortcut*/
+
+ bool add;
+ bool applied;
+ u8 *mac;
+ u16 vid;
+};
+
+struct bnx2x_vf_mac_vlan_filters {
+ int count;
+ struct bnx2x_vf_mac_vlan_filter filters[];
+};
+
+/* vf context */
+struct bnx2x_virtf {
+ u16 cfg_flags;
+#define VF_CFG_STATS_COALESCE 0x1
+#define VF_CFG_EXT_BULLETIN 0x2
+#define VF_CFG_VLAN_FILTER 0x4
+ u8 link_cfg; /* IFLA_VF_LINK_STATE_AUTO
+ * IFLA_VF_LINK_STATE_ENABLE
+ * IFLA_VF_LINK_STATE_DISABLE
+ */
+ u8 state;
+#define VF_FREE 0 /* VF ready to be acquired holds no resc */
+#define VF_ACQUIRED 1 /* VF acquired, but not initialized */
+#define VF_ENABLED 2 /* VF Enabled */
+#define VF_RESET 3 /* VF FLR'd, pending cleanup */
+#define VF_LOST 4 /* Recovery while VFs are loaded */
+
+ bool flr_clnup_stage; /* true during flr cleanup */
+ bool malicious; /* true if FW indicated so, until FLR */
+ /* 1(true) if spoof check is enabled */
+ u8 spoofchk;
+
+ /* dma */
+ dma_addr_t fw_stat_map;
+ u16 stats_stride;
+ dma_addr_t bulletin_map;
+
+ /* Allocated resources counters. Before the VF is acquired, the
+ * counters hold the following values:
+ *
+ * - xxq_count = 0 as the queues memory is not allocated yet.
+ *
+ * - sb_count = The number of status blocks configured for this VF in
+ * the IGU CAM. Initially read during probe.
+ *
+ * - xx_rules_count = The number of rules statically and equally
+ * allocated for each VF, during PF load.
+ */
+ struct vf_pf_resc_request alloc_resc;
+#define vf_rxq_count(vf) ((vf)->alloc_resc.num_rxqs)
+#define vf_txq_count(vf) ((vf)->alloc_resc.num_txqs)
+#define vf_sb_count(vf) ((vf)->alloc_resc.num_sbs)
+#define vf_mac_rules_cnt(vf) ((vf)->alloc_resc.num_mac_filters)
+#define vf_vlan_rules_cnt(vf) ((vf)->alloc_resc.num_vlan_filters)
+#define vf_mc_rules_cnt(vf) ((vf)->alloc_resc.num_mc_filters)
+
+ u8 sb_count; /* actual number of SBs */
+ u8 igu_base_id; /* base igu status block id */
+
+ struct bnx2x_vf_queue *vfqs;
+#define LEADING_IDX 0
+#define bnx2x_vfq_is_leading(vfq) ((vfq)->index == LEADING_IDX)
+#define bnx2x_vfq(vf, nr, var) ((vf)->vfqs[(nr)].var)
+#define bnx2x_leading_vfq(vf, var) ((vf)->vfqs[LEADING_IDX].var)
+
+ u8 index; /* index in the vf array */
+ u8 abs_vfid;
+ u8 sp_cl_id;
+ u32 error; /* 0 means all's-well */
+
+ /* BDF */
+ unsigned int domain;
+ unsigned int bus;
+ unsigned int devfn;
+
+ /* bars */
+ struct bnx2x_vf_bar bars[PCI_SRIOV_NUM_BARS];
+
+ /* set-mac ramrod state 1-pending, 0-done */
+ unsigned long filter_state;
+
+ /* leading rss client id ~~ the client id of the first rxq, must be
+ * set for each txq.
+ */
+ int leading_rss;
+
+ /* MCAST object */
+ struct bnx2x_mcast_obj mcast_obj;
+
+ /* RSS configuration object */
+ struct bnx2x_rss_config_obj rss_conf_obj;
+
+ /* slow-path operations */
+ struct mutex op_mutex; /* one vfop at a time mutex */
+ enum channel_tlvs op_current;
+
+ u8 fp_hsi;
+
+ struct bnx2x_credit_pool_obj vf_vlans_pool;
+ struct bnx2x_credit_pool_obj vf_macs_pool;
+};
+
+#define BNX2X_NR_VIRTFN(bp) ((bp)->vfdb->sriov.nr_virtfn)
+
+#define for_each_vf(bp, var) \
+ for ((var) = 0; (var) < BNX2X_NR_VIRTFN(bp); (var)++)
+
+#define for_each_vfq(vf, var) \
+ for ((var) = 0; (var) < vf_rxq_count(vf); (var)++)
+
+#define for_each_vf_sb(vf, var) \
+ for ((var) = 0; (var) < vf_sb_count(vf); (var)++)
+
+#define is_vf_multi(vf) (vf_rxq_count(vf) > 1)
+
+#define HW_VF_HANDLE(bp, abs_vfid) \
+ (u16)(BP_ABS_FUNC((bp)) | (1<<3) | ((u16)(abs_vfid) << 4))
+
+#define FW_PF_MAX_HANDLE 8
+
+#define FW_VF_HANDLE(abs_vfid) \
+ (abs_vfid + FW_PF_MAX_HANDLE)
+
+#define GET_NUM_VFS_PER_PATH(bp) 64 /* use max possible value */
+#define GET_NUM_VFS_PER_PF(bp) ((bp)->vfdb ? (bp)->vfdb->sriov.total \
+ : 0)
+#define VF_MAC_CREDIT_CNT 1
+#define VF_VLAN_CREDIT_CNT 2 /* VLAN0 + 'real' VLAN */
+
+/* locking and unlocking the channel mutex */
+void bnx2x_lock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf,
+ enum channel_tlvs tlv);
+
+void bnx2x_unlock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf,
+ enum channel_tlvs expected_tlv);
+
+/* VF mail box (aka vf-pf channel) */
+
+/* a container for the bi-directional vf<-->pf messages.
+ * The actual response will be placed according to the offset parameter
+ * provided in the request
+ */
+
+#define MBX_MSG_ALIGN 8
+#define MBX_MSG_ALIGNED_SIZE (roundup(sizeof(struct bnx2x_vf_mbx_msg), \
+ MBX_MSG_ALIGN))
+
+struct bnx2x_vf_mbx_msg {
+ union vfpf_tlvs req;
+ union pfvf_tlvs resp;
+};
+
+struct bnx2x_vf_mbx {
+ struct bnx2x_vf_mbx_msg *msg;
+ dma_addr_t msg_mapping;
+
+ /* VF GPA address */
+ u32 vf_addr_lo;
+ u32 vf_addr_hi;
+
+ struct vfpf_first_tlv first_tlv; /* saved VF request header */
+};
+
+struct bnx2x_vf_sp {
+ union {
+ struct eth_classify_rules_ramrod_data e2;
+ } mac_rdata;
+
+ union {
+ struct eth_classify_rules_ramrod_data e2;
+ } vlan_rdata;
+
+ union {
+ struct eth_classify_rules_ramrod_data e2;
+ } vlan_mac_rdata;
+
+ union {
+ struct eth_filter_rules_ramrod_data e2;
+ } rx_mode_rdata;
+
+ union {
+ struct eth_multicast_rules_ramrod_data e2;
+ } mcast_rdata;
+
+ union {
+ struct client_init_ramrod_data init_data;
+ struct client_update_ramrod_data update_data;
+ } q_data;
+
+ union {
+ struct eth_rss_update_ramrod_data e2;
+ } rss_rdata;
+};
+
+struct hw_dma {
+ void *addr;
+ dma_addr_t mapping;
+ size_t size;
+};
+
+struct bnx2x_vfdb {
+#define BP_VFDB(bp) ((bp)->vfdb)
+ /* vf array */
+ struct bnx2x_virtf *vfs;
+#define BP_VF(bp, idx) ((BP_VFDB(bp) && (bp)->vfdb->vfs) ? \
+ &((bp)->vfdb->vfs[idx]) : NULL)
+#define bnx2x_vf(bp, idx, var) ((bp)->vfdb->vfs[idx].var)
+
+ /* queue array - for all vfs */
+ struct bnx2x_vf_queue *vfqs;
+
+ /* vf HW contexts */
+ struct hw_dma context[BNX2X_VF_CIDS/ILT_PAGE_CIDS];
+#define BP_VF_CXT_PAGE(bp, i) (&(bp)->vfdb->context[i])
+
+ /* SR-IOV information */
+ struct bnx2x_sriov sriov;
+ struct hw_dma mbx_dma;
+#define BP_VF_MBX_DMA(bp) (&((bp)->vfdb->mbx_dma))
+ struct bnx2x_vf_mbx mbxs[BNX2X_MAX_NUM_OF_VFS];
+#define BP_VF_MBX(bp, vfid) (&((bp)->vfdb->mbxs[vfid]))
+
+ struct hw_dma bulletin_dma;
+#define BP_VF_BULLETIN_DMA(bp) (&((bp)->vfdb->bulletin_dma))
+#define BP_VF_BULLETIN(bp, vf) \
+ (((struct pf_vf_bulletin_content *)(BP_VF_BULLETIN_DMA(bp)->addr)) \
+ + (vf))
+
+ struct hw_dma sp_dma;
+#define bnx2x_vf_sp(bp, vf, field) ((bp)->vfdb->sp_dma.addr + \
+ (vf)->index * sizeof(struct bnx2x_vf_sp) + \
+ offsetof(struct bnx2x_vf_sp, field))
+#define bnx2x_vf_sp_map(bp, vf, field) ((bp)->vfdb->sp_dma.mapping + \
+ (vf)->index * sizeof(struct bnx2x_vf_sp) + \
+ offsetof(struct bnx2x_vf_sp, field))
+
+#define FLRD_VFS_DWORDS (BNX2X_MAX_NUM_OF_VFS / 32)
+ u32 flrd_vfs[FLRD_VFS_DWORDS];
+
+ /* the number of msix vectors belonging to this PF designated for VFs */
+ u16 vf_sbs_pool;
+ u16 first_vf_igu_entry;
+
+ /* sp_rtnl synchronization */
+ struct mutex event_mutex;
+ u64 event_occur;
+
+ /* bulletin board update synchronization */
+ struct mutex bulletin_mutex;
+};
+
+/* queue access */
+static inline struct bnx2x_vf_queue *vfq_get(struct bnx2x_virtf *vf, u8 index)
+{
+ return &(vf->vfqs[index]);
+}
+
+/* FW ids */
+static inline u8 vf_igu_sb(struct bnx2x_virtf *vf, u16 sb_idx)
+{
+ return vf->igu_base_id + sb_idx;
+}
+
+static inline u8 vf_hc_qzone(struct bnx2x_virtf *vf, u16 sb_idx)
+{
+ return vf_igu_sb(vf, sb_idx);
+}
+
+static u8 vfq_cl_id(struct bnx2x_virtf *vf, struct bnx2x_vf_queue *q)
+{
+ return vf->igu_base_id + q->index;
+}
+
+static inline u8 vfq_stat_id(struct bnx2x_virtf *vf, struct bnx2x_vf_queue *q)
+{
+ if (vf->cfg_flags & VF_CFG_STATS_COALESCE)
+ return vf->leading_rss;
+ else
+ return vfq_cl_id(vf, q);
+}
+
+static inline u8 vfq_qzone_id(struct bnx2x_virtf *vf, struct bnx2x_vf_queue *q)
+{
+ return vfq_cl_id(vf, q);
+}
+
+/* global iov routines */
+int bnx2x_iov_init_ilt(struct bnx2x *bp, u16 line);
+int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param, int num_vfs_param);
+void bnx2x_iov_remove_one(struct bnx2x *bp);
+void bnx2x_iov_free_mem(struct bnx2x *bp);
+int bnx2x_iov_alloc_mem(struct bnx2x *bp);
+int bnx2x_iov_nic_init(struct bnx2x *bp);
+int bnx2x_iov_chip_cleanup(struct bnx2x *bp);
+void bnx2x_iov_init_dq(struct bnx2x *bp);
+void bnx2x_iov_init_dmae(struct bnx2x *bp);
+void bnx2x_iov_set_queue_sp_obj(struct bnx2x *bp, int vf_cid,
+ struct bnx2x_queue_sp_obj **q_obj);
+int bnx2x_iov_eq_sp_event(struct bnx2x *bp, union event_ring_elem *elem);
+void bnx2x_iov_adjust_stats_req(struct bnx2x *bp);
+void bnx2x_iov_storm_stats_update(struct bnx2x *bp);
+/* global vf mailbox routines */
+void bnx2x_vf_mbx(struct bnx2x *bp);
+void bnx2x_vf_mbx_schedule(struct bnx2x *bp,
+ struct vf_pf_event_data *vfpf_event);
+void bnx2x_vf_enable_mbx(struct bnx2x *bp, u8 abs_vfid);
+
+/* CORE VF API */
+typedef u8 bnx2x_mac_addr_t[ETH_ALEN];
+
+/* acquire */
+int bnx2x_vf_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf,
+ struct vf_pf_resc_request *resc);
+/* init */
+int bnx2x_vf_init(struct bnx2x *bp, struct bnx2x_virtf *vf,
+ dma_addr_t *sb_map);
+
+/* VFOP queue construction helpers */
+void bnx2x_vfop_qctor_dump_tx(struct bnx2x *bp, struct bnx2x_virtf *vf,
+ struct bnx2x_queue_init_params *init_params,
+ struct bnx2x_queue_setup_params *setup_params,
+ u16 q_idx, u16 sb_idx);
+
+void bnx2x_vfop_qctor_dump_rx(struct bnx2x *bp, struct bnx2x_virtf *vf,
+ struct bnx2x_queue_init_params *init_params,
+ struct bnx2x_queue_setup_params *setup_params,
+ u16 q_idx, u16 sb_idx);
+
+void bnx2x_vfop_qctor_prep(struct bnx2x *bp,
+ struct bnx2x_virtf *vf,
+ struct bnx2x_vf_queue *q,
+ struct bnx2x_vf_queue_construct_params *p,
+ unsigned long q_type);
+
+int bnx2x_vf_mac_vlan_config_list(struct bnx2x *bp, struct bnx2x_virtf *vf,
+ struct bnx2x_vf_mac_vlan_filters *filters,
+ int qid, bool drv_only);
+
+int bnx2x_vf_queue_setup(struct bnx2x *bp, struct bnx2x_virtf *vf, int qid,
+ struct bnx2x_vf_queue_construct_params *qctor);
+
+int bnx2x_vf_queue_teardown(struct bnx2x *bp, struct bnx2x_virtf *vf, int qid);
+
+int bnx2x_vf_mcast(struct bnx2x *bp, struct bnx2x_virtf *vf,
+ bnx2x_mac_addr_t *mcasts, int mc_num, bool drv_only);
+
+int bnx2x_vf_rxmode(struct bnx2x *bp, struct bnx2x_virtf *vf,
+ int qid, unsigned long accept_flags);
+
+int bnx2x_vf_close(struct bnx2x *bp, struct bnx2x_virtf *vf);
+
+int bnx2x_vf_free(struct bnx2x *bp, struct bnx2x_virtf *vf);
+
+int bnx2x_vf_rss_update(struct bnx2x *bp, struct bnx2x_virtf *vf,
+ struct bnx2x_config_rss_params *rss);
+
+int bnx2x_vf_tpa_update(struct bnx2x *bp, struct bnx2x_virtf *vf,
+ struct vfpf_tpa_tlv *tlv,
+ struct bnx2x_queue_update_tpa_params *params);
+
+/* VF release ~ VF close + VF release-resources
+ *
+ * Release is the ultimate SW shutdown and is called whenever an
+ * irrecoverable error is encountered.
+ */
+int bnx2x_vf_release(struct bnx2x *bp, struct bnx2x_virtf *vf);
+int bnx2x_vf_idx_by_abs_fid(struct bnx2x *bp, u16 abs_vfid);
+u8 bnx2x_vf_max_queue_cnt(struct bnx2x *bp, struct bnx2x_virtf *vf);
+
+/* FLR routines */
+
+/* VF FLR helpers */
+int bnx2x_vf_flr_clnup_epilog(struct bnx2x *bp, u8 abs_vfid);
+void bnx2x_vf_enable_access(struct bnx2x *bp, u8 abs_vfid);
+
+/* Handles an FLR (or VF_DISABLE) notification form the MCP */
+void bnx2x_vf_handle_flr_event(struct bnx2x *bp);
+
+bool bnx2x_tlv_supported(u16 tlvtype);
+
+u32 bnx2x_crc_vf_bulletin(struct pf_vf_bulletin_content *bulletin);
+int bnx2x_post_vf_bulletin(struct bnx2x *bp, int vf);
+void bnx2x_vf_bulletin_finalize(struct pf_vf_bulletin_content *bulletin,
+ bool support_long);
+
+enum sample_bulletin_result bnx2x_sample_bulletin(struct bnx2x *bp);
+
+/* VF side vfpf channel functions */
+int bnx2x_vfpf_acquire(struct bnx2x *bp, u8 tx_count, u8 rx_count);
+int bnx2x_vfpf_release(struct bnx2x *bp);
+int bnx2x_vfpf_init(struct bnx2x *bp);
+void bnx2x_vfpf_close_vf(struct bnx2x *bp);
+int bnx2x_vfpf_setup_q(struct bnx2x *bp, struct bnx2x_fastpath *fp,
+ bool is_leading);
+int bnx2x_vfpf_config_mac(struct bnx2x *bp, const u8 *addr, u8 vf_qid,
+ bool set);
+int bnx2x_vfpf_config_rss(struct bnx2x *bp,
+ struct bnx2x_config_rss_params *params);
+int bnx2x_vfpf_set_mcast(struct net_device *dev);
+int bnx2x_vfpf_storm_rx_mode(struct bnx2x *bp);
+
+static inline void bnx2x_vf_fill_fw_str(struct bnx2x *bp, char *buf,
+ size_t buf_len)
+{
+ strscpy(buf, bp->acquire_resp.pfdev_info.fw_ver, buf_len);
+}
+
+static inline int bnx2x_vf_ustorm_prods_offset(struct bnx2x *bp,
+ struct bnx2x_fastpath *fp)
+{
+ return PXP_VF_ADDR_USDM_QUEUES_START +
+ bp->acquire_resp.resc.hw_qid[fp->index] *
+ sizeof(struct ustorm_queue_zone_data);
+}
+
+enum sample_bulletin_result bnx2x_sample_bulletin(struct bnx2x *bp);
+void bnx2x_timer_sriov(struct bnx2x *bp);
+void __iomem *bnx2x_vf_doorbells(struct bnx2x *bp);
+void bnx2x_vf_pci_dealloc(struct bnx2x *bp);
+int bnx2x_vf_pci_alloc(struct bnx2x *bp);
+int bnx2x_enable_sriov(struct bnx2x *bp);
+void bnx2x_disable_sriov(struct bnx2x *bp);
+static inline int bnx2x_vf_headroom(struct bnx2x *bp)
+{
+ return bp->vfdb->sriov.nr_virtfn * BNX2X_CIDS_PER_VF;
+}
+void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp);
+int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs);
+void bnx2x_iov_channel_down(struct bnx2x *bp);
+
+void bnx2x_iov_task(struct work_struct *work);
+
+void bnx2x_schedule_iov_task(struct bnx2x *bp, enum bnx2x_iov_flag flag);
+
+void bnx2x_iov_link_update(struct bnx2x *bp);
+int bnx2x_iov_link_update_vf(struct bnx2x *bp, int idx);
+
+int bnx2x_set_vf_link_state(struct net_device *dev, int vf, int link_state);
+
+int bnx2x_vfpf_update_vlan(struct bnx2x *bp, u16 vid, u8 vf_qid, bool add);
+#else /* CONFIG_BNX2X_SRIOV */
+
+#define GET_NUM_VFS_PER_PATH(bp) 0
+#define GET_NUM_VFS_PER_PF(bp) 0
+#define VF_MAC_CREDIT_CNT 0
+#define VF_VLAN_CREDIT_CNT 0
+
+static inline void bnx2x_iov_set_queue_sp_obj(struct bnx2x *bp, int vf_cid,
+ struct bnx2x_queue_sp_obj **q_obj) {}
+static inline void bnx2x_vf_handle_flr_event(struct bnx2x *bp) {}
+static inline int bnx2x_iov_eq_sp_event(struct bnx2x *bp,
+ union event_ring_elem *elem) {return 1; }
+static inline void bnx2x_vf_mbx(struct bnx2x *bp) {}
+static inline void bnx2x_vf_mbx_schedule(struct bnx2x *bp,
+ struct vf_pf_event_data *vfpf_event) {}
+static inline int bnx2x_iov_init_ilt(struct bnx2x *bp, u16 line) {return line; }
+static inline void bnx2x_iov_init_dq(struct bnx2x *bp) {}
+static inline int bnx2x_iov_alloc_mem(struct bnx2x *bp) {return 0; }
+static inline void bnx2x_iov_free_mem(struct bnx2x *bp) {}
+static inline int bnx2x_iov_chip_cleanup(struct bnx2x *bp) {return 0; }
+static inline void bnx2x_iov_init_dmae(struct bnx2x *bp) {}
+static inline int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param,
+ int num_vfs_param) {return 0; }
+static inline void bnx2x_iov_remove_one(struct bnx2x *bp) {}
+static inline int bnx2x_enable_sriov(struct bnx2x *bp) {return 0; }
+static inline void bnx2x_disable_sriov(struct bnx2x *bp) {}
+static inline int bnx2x_vfpf_acquire(struct bnx2x *bp,
+ u8 tx_count, u8 rx_count) {return 0; }
+static inline int bnx2x_vfpf_release(struct bnx2x *bp) {return 0; }
+static inline int bnx2x_vfpf_init(struct bnx2x *bp) {return 0; }
+static inline void bnx2x_vfpf_close_vf(struct bnx2x *bp) {}
+static inline int bnx2x_vfpf_setup_q(struct bnx2x *bp, struct bnx2x_fastpath *fp, bool is_leading) {return 0; }
+static inline int bnx2x_vfpf_config_mac(struct bnx2x *bp, const u8 *addr,
+ u8 vf_qid, bool set) {return 0; }
+static inline int bnx2x_vfpf_config_rss(struct bnx2x *bp,
+ struct bnx2x_config_rss_params *params) {return 0; }
+static inline int bnx2x_vfpf_set_mcast(struct net_device *dev) {return 0; }
+static inline int bnx2x_vfpf_storm_rx_mode(struct bnx2x *bp) {return 0; }
+static inline int bnx2x_iov_nic_init(struct bnx2x *bp) {return 0; }
+static inline int bnx2x_vf_headroom(struct bnx2x *bp) {return 0; }
+static inline void bnx2x_iov_adjust_stats_req(struct bnx2x *bp) {}
+static inline void bnx2x_vf_fill_fw_str(struct bnx2x *bp, char *buf,
+ size_t buf_len) {}
+static inline int bnx2x_vf_ustorm_prods_offset(struct bnx2x *bp,
+ struct bnx2x_fastpath *fp) {return 0; }
+static inline enum sample_bulletin_result bnx2x_sample_bulletin(struct bnx2x *bp)
+{
+ return PFVF_BULLETIN_UNCHANGED;
+}
+static inline void bnx2x_timer_sriov(struct bnx2x *bp) {}
+
+static inline void __iomem *bnx2x_vf_doorbells(struct bnx2x *bp)
+{
+ return NULL;
+}
+
+static inline void bnx2x_vf_pci_dealloc(struct bnx2x *bp) {}
+static inline int bnx2x_vf_pci_alloc(struct bnx2x *bp) {return 0; }
+static inline void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp) {}
+static inline int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs) {return 0; }
+static inline void bnx2x_iov_channel_down(struct bnx2x *bp) {}
+
+static inline void bnx2x_iov_task(struct work_struct *work) {}
+static inline void bnx2x_schedule_iov_task(struct bnx2x *bp, enum bnx2x_iov_flag flag) {}
+static inline void bnx2x_iov_link_update(struct bnx2x *bp) {}
+static inline int bnx2x_iov_link_update_vf(struct bnx2x *bp, int idx) {return 0; }
+
+static inline int bnx2x_set_vf_link_state(struct net_device *dev, int vf,
+ int link_state) {return 0; }
+struct pf_vf_bulletin_content;
+static inline void bnx2x_vf_bulletin_finalize(struct pf_vf_bulletin_content *bulletin,
+ bool support_long) {}
+
+static inline int bnx2x_vfpf_update_vlan(struct bnx2x *bp, u16 vid, u8 vf_qid, bool add) {return 0; }
+
+#endif /* CONFIG_BNX2X_SRIOV */
+#endif /* bnx2x_sriov.h */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
new file mode 100644
index 0000000000..2bb133ae61
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
@@ -0,0 +1,2007 @@
+/* bnx2x_stats.c: QLogic Everest network driver.
+ *
+ * Copyright (c) 2007-2013 Broadcom Corporation
+ * Copyright (c) 2014 QLogic Corporation
+ * All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
+ * Written by: Eliezer Tamir
+ * Based on code from Michael Chan's bnx2 driver
+ * UDP CSUM errata workaround by Arik Gendelman
+ * Slowpath and fastpath rework by Vladislav Zolotarov
+ * Statistics and Link management by Yitchak Gertner
+ *
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include "bnx2x_stats.h"
+#include "bnx2x_cmn.h"
+#include "bnx2x_sriov.h"
+
+extern const u32 dmae_reg_go_c[];
+
+/* Statistics */
+
+/*
+ * General service functions
+ */
+
+static inline long bnx2x_hilo(u32 *hiref)
+{
+ u32 lo = *(hiref + 1);
+#if (BITS_PER_LONG == 64)
+ u32 hi = *hiref;
+
+ return HILO_U64(hi, lo);
+#else
+ return lo;
+#endif
+}
+
+static inline u16 bnx2x_get_port_stats_dma_len(struct bnx2x *bp)
+{
+ u16 res = 0;
+
+ /* 'newest' convention - shmem2 cotains the size of the port stats */
+ if (SHMEM2_HAS(bp, sizeof_port_stats)) {
+ u32 size = SHMEM2_RD(bp, sizeof_port_stats);
+ if (size)
+ res = size;
+
+ /* prevent newer BC from causing buffer overflow */
+ if (res > sizeof(struct host_port_stats))
+ res = sizeof(struct host_port_stats);
+ }
+
+ /* Older convention - all BCs support the port stats' fields up until
+ * the 'not_used' field
+ */
+ if (!res) {
+ res = offsetof(struct host_port_stats, not_used) + 4;
+
+ /* if PFC stats are supported by the MFW, DMA them as well */
+ if (bp->flags & BC_SUPPORTS_PFC_STATS) {
+ res += offsetof(struct host_port_stats,
+ pfc_frames_rx_lo) -
+ offsetof(struct host_port_stats,
+ pfc_frames_tx_hi) + 4 ;
+ }
+ }
+
+ res >>= 2;
+
+ WARN_ON(res > 2 * DMAE_LEN32_RD_MAX);
+ return res;
+}
+
+/*
+ * Init service functions
+ */
+
+static void bnx2x_dp_stats(struct bnx2x *bp)
+{
+ int i;
+
+ DP(BNX2X_MSG_STATS, "dumping stats:\n"
+ "fw_stats_req\n"
+ " hdr\n"
+ " cmd_num %d\n"
+ " reserved0 %d\n"
+ " drv_stats_counter %d\n"
+ " reserved1 %d\n"
+ " stats_counters_addrs %x %x\n",
+ bp->fw_stats_req->hdr.cmd_num,
+ bp->fw_stats_req->hdr.reserved0,
+ bp->fw_stats_req->hdr.drv_stats_counter,
+ bp->fw_stats_req->hdr.reserved1,
+ bp->fw_stats_req->hdr.stats_counters_addrs.hi,
+ bp->fw_stats_req->hdr.stats_counters_addrs.lo);
+
+ for (i = 0; i < bp->fw_stats_req->hdr.cmd_num; i++) {
+ DP(BNX2X_MSG_STATS,
+ "query[%d]\n"
+ " kind %d\n"
+ " index %d\n"
+ " funcID %d\n"
+ " reserved %d\n"
+ " address %x %x\n",
+ i, bp->fw_stats_req->query[i].kind,
+ bp->fw_stats_req->query[i].index,
+ bp->fw_stats_req->query[i].funcID,
+ bp->fw_stats_req->query[i].reserved,
+ bp->fw_stats_req->query[i].address.hi,
+ bp->fw_stats_req->query[i].address.lo);
+ }
+}
+
+/* Post the next statistics ramrod. Protect it with the spin in
+ * order to ensure the strict order between statistics ramrods
+ * (each ramrod has a sequence number passed in a
+ * bp->fw_stats_req->hdr.drv_stats_counter and ramrods must be
+ * sent in order).
+ */
+static void bnx2x_storm_stats_post(struct bnx2x *bp)
+{
+ int rc;
+
+ if (bp->stats_pending)
+ return;
+
+ bp->fw_stats_req->hdr.drv_stats_counter =
+ cpu_to_le16(bp->stats_counter++);
+
+ DP(BNX2X_MSG_STATS, "Sending statistics ramrod %d\n",
+ le16_to_cpu(bp->fw_stats_req->hdr.drv_stats_counter));
+
+ /* adjust the ramrod to include VF queues statistics */
+ bnx2x_iov_adjust_stats_req(bp);
+ bnx2x_dp_stats(bp);
+
+ /* send FW stats ramrod */
+ rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_STAT_QUERY, 0,
+ U64_HI(bp->fw_stats_req_mapping),
+ U64_LO(bp->fw_stats_req_mapping),
+ NONE_CONNECTION_TYPE);
+ if (rc == 0)
+ bp->stats_pending = 1;
+}
+
+static void bnx2x_hw_stats_post(struct bnx2x *bp)
+{
+ struct dmae_command *dmae = &bp->stats_dmae;
+ u32 *stats_comp = bnx2x_sp(bp, stats_comp);
+
+ *stats_comp = DMAE_COMP_VAL;
+ if (CHIP_REV_IS_SLOW(bp))
+ return;
+
+ /* Update MCP's statistics if possible */
+ if (bp->func_stx)
+ memcpy(bnx2x_sp(bp, func_stats), &bp->func_stats,
+ sizeof(bp->func_stats));
+
+ /* loader */
+ if (bp->executer_idx) {
+ int loader_idx = PMF_DMAE_C(bp);
+ u32 opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_PCI, DMAE_DST_GRC,
+ true, DMAE_COMP_GRC);
+ opcode = bnx2x_dmae_opcode_clr_src_reset(opcode);
+
+ memset(dmae, 0, sizeof(struct dmae_command));
+ dmae->opcode = opcode;
+ dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
+ dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
+ dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
+ sizeof(struct dmae_command) *
+ (loader_idx + 1)) >> 2;
+ dmae->dst_addr_hi = 0;
+ dmae->len = sizeof(struct dmae_command) >> 2;
+ if (CHIP_IS_E1(bp))
+ dmae->len--;
+ dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
+ dmae->comp_addr_hi = 0;
+ dmae->comp_val = 1;
+
+ *stats_comp = 0;
+ bnx2x_post_dmae(bp, dmae, loader_idx);
+
+ } else if (bp->func_stx) {
+ *stats_comp = 0;
+ bnx2x_issue_dmae_with_comp(bp, dmae, stats_comp);
+ }
+}
+
+static void bnx2x_stats_comp(struct bnx2x *bp)
+{
+ u32 *stats_comp = bnx2x_sp(bp, stats_comp);
+ int cnt = 10;
+
+ might_sleep();
+ while (*stats_comp != DMAE_COMP_VAL) {
+ if (!cnt) {
+ BNX2X_ERR("timeout waiting for stats finished\n");
+ break;
+ }
+ cnt--;
+ usleep_range(1000, 2000);
+ }
+}
+
+/*
+ * Statistics service functions
+ */
+
+/* should be called under stats_sema */
+static void bnx2x_stats_pmf_update(struct bnx2x *bp)
+{
+ struct dmae_command *dmae;
+ u32 opcode;
+ int loader_idx = PMF_DMAE_C(bp);
+ u32 *stats_comp = bnx2x_sp(bp, stats_comp);
+
+ /* sanity */
+ if (!bp->port.pmf || !bp->port.port_stx) {
+ BNX2X_ERR("BUG!\n");
+ return;
+ }
+
+ bp->executer_idx = 0;
+
+ opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_GRC, DMAE_DST_PCI, false, 0);
+
+ dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
+ dmae->opcode = bnx2x_dmae_opcode_add_comp(opcode, DMAE_COMP_GRC);
+ dmae->src_addr_lo = bp->port.port_stx >> 2;
+ dmae->src_addr_hi = 0;
+ dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
+ dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
+ dmae->len = DMAE_LEN32_RD_MAX;
+ dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
+ dmae->comp_addr_hi = 0;
+ dmae->comp_val = 1;
+
+ dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
+ dmae->opcode = bnx2x_dmae_opcode_add_comp(opcode, DMAE_COMP_PCI);
+ dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
+ dmae->src_addr_hi = 0;
+ dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
+ DMAE_LEN32_RD_MAX * 4);
+ dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
+ DMAE_LEN32_RD_MAX * 4);
+ dmae->len = bnx2x_get_port_stats_dma_len(bp) - DMAE_LEN32_RD_MAX;
+
+ dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
+ dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
+ dmae->comp_val = DMAE_COMP_VAL;
+
+ *stats_comp = 0;
+ bnx2x_hw_stats_post(bp);
+ bnx2x_stats_comp(bp);
+}
+
+static void bnx2x_port_stats_init(struct bnx2x *bp)
+{
+ struct dmae_command *dmae;
+ int port = BP_PORT(bp);
+ u32 opcode;
+ int loader_idx = PMF_DMAE_C(bp);
+ u32 mac_addr;
+ u32 *stats_comp = bnx2x_sp(bp, stats_comp);
+
+ /* sanity */
+ if (!bp->link_vars.link_up || !bp->port.pmf) {
+ BNX2X_ERR("BUG!\n");
+ return;
+ }
+
+ bp->executer_idx = 0;
+
+ /* MCP */
+ opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_PCI, DMAE_DST_GRC,
+ true, DMAE_COMP_GRC);
+
+ if (bp->port.port_stx) {
+
+ dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
+ dmae->opcode = opcode;
+ dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
+ dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
+ dmae->dst_addr_lo = bp->port.port_stx >> 2;
+ dmae->dst_addr_hi = 0;
+ dmae->len = bnx2x_get_port_stats_dma_len(bp);
+ dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
+ dmae->comp_addr_hi = 0;
+ dmae->comp_val = 1;
+ }
+
+ if (bp->func_stx) {
+
+ dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
+ dmae->opcode = opcode;
+ dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
+ dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
+ dmae->dst_addr_lo = bp->func_stx >> 2;
+ dmae->dst_addr_hi = 0;
+ dmae->len = sizeof(struct host_func_stats) >> 2;
+ dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
+ dmae->comp_addr_hi = 0;
+ dmae->comp_val = 1;
+ }
+
+ /* MAC */
+ opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_GRC, DMAE_DST_PCI,
+ true, DMAE_COMP_GRC);
+
+ /* EMAC is special */
+ if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
+ mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
+
+ /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
+ dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
+ dmae->opcode = opcode;
+ dmae->src_addr_lo = (mac_addr +
+ EMAC_REG_EMAC_RX_STAT_AC) >> 2;
+ dmae->src_addr_hi = 0;
+ dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
+ dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
+ dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
+ dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
+ dmae->comp_addr_hi = 0;
+ dmae->comp_val = 1;
+
+ /* EMAC_REG_EMAC_RX_STAT_AC_28 */
+ dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
+ dmae->opcode = opcode;
+ dmae->src_addr_lo = (mac_addr +
+ EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
+ dmae->src_addr_hi = 0;
+ dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
+ offsetof(struct emac_stats, rx_stat_falsecarriererrors));
+ dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
+ offsetof(struct emac_stats, rx_stat_falsecarriererrors));
+ dmae->len = 1;
+ dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
+ dmae->comp_addr_hi = 0;
+ dmae->comp_val = 1;
+
+ /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
+ dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
+ dmae->opcode = opcode;
+ dmae->src_addr_lo = (mac_addr +
+ EMAC_REG_EMAC_TX_STAT_AC) >> 2;
+ dmae->src_addr_hi = 0;
+ dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
+ offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
+ dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
+ offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
+ dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
+ dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
+ dmae->comp_addr_hi = 0;
+ dmae->comp_val = 1;
+ } else {
+ u32 tx_src_addr_lo, rx_src_addr_lo;
+ u16 rx_len, tx_len;
+
+ /* configure the params according to MAC type */
+ switch (bp->link_vars.mac_type) {
+ case MAC_TYPE_BMAC:
+ mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
+ NIG_REG_INGRESS_BMAC0_MEM);
+
+ /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
+ BIGMAC_REGISTER_TX_STAT_GTBYT */
+ if (CHIP_IS_E1x(bp)) {
+ tx_src_addr_lo = (mac_addr +
+ BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
+ tx_len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
+ BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
+ rx_src_addr_lo = (mac_addr +
+ BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
+ rx_len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
+ BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
+ } else {
+ tx_src_addr_lo = (mac_addr +
+ BIGMAC2_REGISTER_TX_STAT_GTPOK) >> 2;
+ tx_len = (8 + BIGMAC2_REGISTER_TX_STAT_GTBYT -
+ BIGMAC2_REGISTER_TX_STAT_GTPOK) >> 2;
+ rx_src_addr_lo = (mac_addr +
+ BIGMAC2_REGISTER_RX_STAT_GR64) >> 2;
+ rx_len = (8 + BIGMAC2_REGISTER_RX_STAT_GRIPJ -
+ BIGMAC2_REGISTER_RX_STAT_GR64) >> 2;
+ }
+ break;
+
+ case MAC_TYPE_UMAC: /* handled by MSTAT */
+ case MAC_TYPE_XMAC: /* handled by MSTAT */
+ default:
+ mac_addr = port ? GRCBASE_MSTAT1 : GRCBASE_MSTAT0;
+ tx_src_addr_lo = (mac_addr +
+ MSTAT_REG_TX_STAT_GTXPOK_LO) >> 2;
+ rx_src_addr_lo = (mac_addr +
+ MSTAT_REG_RX_STAT_GR64_LO) >> 2;
+ tx_len = sizeof(bp->slowpath->
+ mac_stats.mstat_stats.stats_tx) >> 2;
+ rx_len = sizeof(bp->slowpath->
+ mac_stats.mstat_stats.stats_rx) >> 2;
+ break;
+ }
+
+ /* TX stats */
+ dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
+ dmae->opcode = opcode;
+ dmae->src_addr_lo = tx_src_addr_lo;
+ dmae->src_addr_hi = 0;
+ dmae->len = tx_len;
+ dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
+ dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
+ dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
+ dmae->comp_addr_hi = 0;
+ dmae->comp_val = 1;
+
+ /* RX stats */
+ dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
+ dmae->opcode = opcode;
+ dmae->src_addr_hi = 0;
+ dmae->src_addr_lo = rx_src_addr_lo;
+ dmae->dst_addr_lo =
+ U64_LO(bnx2x_sp_mapping(bp, mac_stats) + (tx_len << 2));
+ dmae->dst_addr_hi =
+ U64_HI(bnx2x_sp_mapping(bp, mac_stats) + (tx_len << 2));
+ dmae->len = rx_len;
+ dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
+ dmae->comp_addr_hi = 0;
+ dmae->comp_val = 1;
+ }
+
+ /* NIG */
+ if (!CHIP_IS_E3(bp)) {
+ dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
+ dmae->opcode = opcode;
+ dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
+ NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
+ dmae->src_addr_hi = 0;
+ dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
+ offsetof(struct nig_stats, egress_mac_pkt0_lo));
+ dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
+ offsetof(struct nig_stats, egress_mac_pkt0_lo));
+ dmae->len = (2*sizeof(u32)) >> 2;
+ dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
+ dmae->comp_addr_hi = 0;
+ dmae->comp_val = 1;
+
+ dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
+ dmae->opcode = opcode;
+ dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
+ NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
+ dmae->src_addr_hi = 0;
+ dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
+ offsetof(struct nig_stats, egress_mac_pkt1_lo));
+ dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
+ offsetof(struct nig_stats, egress_mac_pkt1_lo));
+ dmae->len = (2*sizeof(u32)) >> 2;
+ dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
+ dmae->comp_addr_hi = 0;
+ dmae->comp_val = 1;
+ }
+
+ dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
+ dmae->opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_GRC, DMAE_DST_PCI,
+ true, DMAE_COMP_PCI);
+ dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
+ NIG_REG_STAT0_BRB_DISCARD) >> 2;
+ dmae->src_addr_hi = 0;
+ dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
+ dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
+ dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
+
+ dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
+ dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
+ dmae->comp_val = DMAE_COMP_VAL;
+
+ *stats_comp = 0;
+}
+
+static void bnx2x_func_stats_init(struct bnx2x *bp)
+{
+ struct dmae_command *dmae = &bp->stats_dmae;
+ u32 *stats_comp = bnx2x_sp(bp, stats_comp);
+
+ /* sanity */
+ if (!bp->func_stx) {
+ BNX2X_ERR("BUG!\n");
+ return;
+ }
+
+ bp->executer_idx = 0;
+ memset(dmae, 0, sizeof(struct dmae_command));
+
+ dmae->opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_PCI, DMAE_DST_GRC,
+ true, DMAE_COMP_PCI);
+ dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
+ dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
+ dmae->dst_addr_lo = bp->func_stx >> 2;
+ dmae->dst_addr_hi = 0;
+ dmae->len = sizeof(struct host_func_stats) >> 2;
+ dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
+ dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
+ dmae->comp_val = DMAE_COMP_VAL;
+
+ *stats_comp = 0;
+}
+
+/* should be called under stats_sema */
+static void bnx2x_stats_start(struct bnx2x *bp)
+{
+ if (IS_PF(bp)) {
+ if (bp->port.pmf)
+ bnx2x_port_stats_init(bp);
+
+ else if (bp->func_stx)
+ bnx2x_func_stats_init(bp);
+
+ bnx2x_hw_stats_post(bp);
+ bnx2x_storm_stats_post(bp);
+ }
+}
+
+static void bnx2x_stats_pmf_start(struct bnx2x *bp)
+{
+ bnx2x_stats_comp(bp);
+ bnx2x_stats_pmf_update(bp);
+ bnx2x_stats_start(bp);
+}
+
+static void bnx2x_stats_restart(struct bnx2x *bp)
+{
+ /* vfs travel through here as part of the statistics FSM, but no action
+ * is required
+ */
+ if (IS_VF(bp))
+ return;
+
+ bnx2x_stats_comp(bp);
+ bnx2x_stats_start(bp);
+}
+
+static void bnx2x_bmac_stats_update(struct bnx2x *bp)
+{
+ struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
+ struct bnx2x_eth_stats *estats = &bp->eth_stats;
+ struct {
+ u32 lo;
+ u32 hi;
+ } diff;
+
+ if (CHIP_IS_E1x(bp)) {
+ struct bmac1_stats *new = bnx2x_sp(bp, mac_stats.bmac1_stats);
+
+ /* the macros below will use "bmac1_stats" type */
+ UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
+ UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
+ UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
+ UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
+ UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
+ UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
+ UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
+ UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
+ UPDATE_STAT64(rx_stat_grxpf, rx_stat_mac_xpf);
+
+ UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
+ UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
+ UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
+ UPDATE_STAT64(tx_stat_gt127,
+ tx_stat_etherstatspkts65octetsto127octets);
+ UPDATE_STAT64(tx_stat_gt255,
+ tx_stat_etherstatspkts128octetsto255octets);
+ UPDATE_STAT64(tx_stat_gt511,
+ tx_stat_etherstatspkts256octetsto511octets);
+ UPDATE_STAT64(tx_stat_gt1023,
+ tx_stat_etherstatspkts512octetsto1023octets);
+ UPDATE_STAT64(tx_stat_gt1518,
+ tx_stat_etherstatspkts1024octetsto1522octets);
+ UPDATE_STAT64(tx_stat_gt2047, tx_stat_mac_2047);
+ UPDATE_STAT64(tx_stat_gt4095, tx_stat_mac_4095);
+ UPDATE_STAT64(tx_stat_gt9216, tx_stat_mac_9216);
+ UPDATE_STAT64(tx_stat_gt16383, tx_stat_mac_16383);
+ UPDATE_STAT64(tx_stat_gterr,
+ tx_stat_dot3statsinternalmactransmiterrors);
+ UPDATE_STAT64(tx_stat_gtufl, tx_stat_mac_ufl);
+
+ } else {
+ struct bmac2_stats *new = bnx2x_sp(bp, mac_stats.bmac2_stats);
+
+ /* the macros below will use "bmac2_stats" type */
+ UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
+ UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
+ UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
+ UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
+ UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
+ UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
+ UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
+ UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
+ UPDATE_STAT64(rx_stat_grxpf, rx_stat_mac_xpf);
+ UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
+ UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
+ UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
+ UPDATE_STAT64(tx_stat_gt127,
+ tx_stat_etherstatspkts65octetsto127octets);
+ UPDATE_STAT64(tx_stat_gt255,
+ tx_stat_etherstatspkts128octetsto255octets);
+ UPDATE_STAT64(tx_stat_gt511,
+ tx_stat_etherstatspkts256octetsto511octets);
+ UPDATE_STAT64(tx_stat_gt1023,
+ tx_stat_etherstatspkts512octetsto1023octets);
+ UPDATE_STAT64(tx_stat_gt1518,
+ tx_stat_etherstatspkts1024octetsto1522octets);
+ UPDATE_STAT64(tx_stat_gt2047, tx_stat_mac_2047);
+ UPDATE_STAT64(tx_stat_gt4095, tx_stat_mac_4095);
+ UPDATE_STAT64(tx_stat_gt9216, tx_stat_mac_9216);
+ UPDATE_STAT64(tx_stat_gt16383, tx_stat_mac_16383);
+ UPDATE_STAT64(tx_stat_gterr,
+ tx_stat_dot3statsinternalmactransmiterrors);
+ UPDATE_STAT64(tx_stat_gtufl, tx_stat_mac_ufl);
+
+ /* collect PFC stats */
+ pstats->pfc_frames_tx_hi = new->tx_stat_gtpp_hi;
+ pstats->pfc_frames_tx_lo = new->tx_stat_gtpp_lo;
+
+ pstats->pfc_frames_rx_hi = new->rx_stat_grpp_hi;
+ pstats->pfc_frames_rx_lo = new->rx_stat_grpp_lo;
+ }
+
+ estats->pause_frames_received_hi =
+ pstats->mac_stx[1].rx_stat_mac_xpf_hi;
+ estats->pause_frames_received_lo =
+ pstats->mac_stx[1].rx_stat_mac_xpf_lo;
+
+ estats->pause_frames_sent_hi =
+ pstats->mac_stx[1].tx_stat_outxoffsent_hi;
+ estats->pause_frames_sent_lo =
+ pstats->mac_stx[1].tx_stat_outxoffsent_lo;
+
+ estats->pfc_frames_received_hi =
+ pstats->pfc_frames_rx_hi;
+ estats->pfc_frames_received_lo =
+ pstats->pfc_frames_rx_lo;
+ estats->pfc_frames_sent_hi =
+ pstats->pfc_frames_tx_hi;
+ estats->pfc_frames_sent_lo =
+ pstats->pfc_frames_tx_lo;
+}
+
+static void bnx2x_mstat_stats_update(struct bnx2x *bp)
+{
+ struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
+ struct bnx2x_eth_stats *estats = &bp->eth_stats;
+
+ struct mstat_stats *new = bnx2x_sp(bp, mac_stats.mstat_stats);
+
+ ADD_STAT64(stats_rx.rx_grerb, rx_stat_ifhcinbadoctets);
+ ADD_STAT64(stats_rx.rx_grfcs, rx_stat_dot3statsfcserrors);
+ ADD_STAT64(stats_rx.rx_grund, rx_stat_etherstatsundersizepkts);
+ ADD_STAT64(stats_rx.rx_grovr, rx_stat_dot3statsframestoolong);
+ ADD_STAT64(stats_rx.rx_grfrg, rx_stat_etherstatsfragments);
+ ADD_STAT64(stats_rx.rx_grxcf, rx_stat_maccontrolframesreceived);
+ ADD_STAT64(stats_rx.rx_grxpf, rx_stat_xoffstateentered);
+ ADD_STAT64(stats_rx.rx_grxpf, rx_stat_mac_xpf);
+ ADD_STAT64(stats_tx.tx_gtxpf, tx_stat_outxoffsent);
+ ADD_STAT64(stats_tx.tx_gtxpf, tx_stat_flowcontroldone);
+
+ /* collect pfc stats */
+ ADD_64(pstats->pfc_frames_tx_hi, new->stats_tx.tx_gtxpp_hi,
+ pstats->pfc_frames_tx_lo, new->stats_tx.tx_gtxpp_lo);
+ ADD_64(pstats->pfc_frames_rx_hi, new->stats_rx.rx_grxpp_hi,
+ pstats->pfc_frames_rx_lo, new->stats_rx.rx_grxpp_lo);
+
+ ADD_STAT64(stats_tx.tx_gt64, tx_stat_etherstatspkts64octets);
+ ADD_STAT64(stats_tx.tx_gt127,
+ tx_stat_etherstatspkts65octetsto127octets);
+ ADD_STAT64(stats_tx.tx_gt255,
+ tx_stat_etherstatspkts128octetsto255octets);
+ ADD_STAT64(stats_tx.tx_gt511,
+ tx_stat_etherstatspkts256octetsto511octets);
+ ADD_STAT64(stats_tx.tx_gt1023,
+ tx_stat_etherstatspkts512octetsto1023octets);
+ ADD_STAT64(stats_tx.tx_gt1518,
+ tx_stat_etherstatspkts1024octetsto1522octets);
+ ADD_STAT64(stats_tx.tx_gt2047, tx_stat_mac_2047);
+
+ ADD_STAT64(stats_tx.tx_gt4095, tx_stat_mac_4095);
+ ADD_STAT64(stats_tx.tx_gt9216, tx_stat_mac_9216);
+ ADD_STAT64(stats_tx.tx_gt16383, tx_stat_mac_16383);
+
+ ADD_STAT64(stats_tx.tx_gterr,
+ tx_stat_dot3statsinternalmactransmiterrors);
+ ADD_STAT64(stats_tx.tx_gtufl, tx_stat_mac_ufl);
+
+ estats->etherstatspkts1024octetsto1522octets_hi =
+ pstats->mac_stx[1].tx_stat_etherstatspkts1024octetsto1522octets_hi;
+ estats->etherstatspkts1024octetsto1522octets_lo =
+ pstats->mac_stx[1].tx_stat_etherstatspkts1024octetsto1522octets_lo;
+
+ estats->etherstatspktsover1522octets_hi =
+ pstats->mac_stx[1].tx_stat_mac_2047_hi;
+ estats->etherstatspktsover1522octets_lo =
+ pstats->mac_stx[1].tx_stat_mac_2047_lo;
+
+ ADD_64(estats->etherstatspktsover1522octets_hi,
+ pstats->mac_stx[1].tx_stat_mac_4095_hi,
+ estats->etherstatspktsover1522octets_lo,
+ pstats->mac_stx[1].tx_stat_mac_4095_lo);
+
+ ADD_64(estats->etherstatspktsover1522octets_hi,
+ pstats->mac_stx[1].tx_stat_mac_9216_hi,
+ estats->etherstatspktsover1522octets_lo,
+ pstats->mac_stx[1].tx_stat_mac_9216_lo);
+
+ ADD_64(estats->etherstatspktsover1522octets_hi,
+ pstats->mac_stx[1].tx_stat_mac_16383_hi,
+ estats->etherstatspktsover1522octets_lo,
+ pstats->mac_stx[1].tx_stat_mac_16383_lo);
+
+ estats->pause_frames_received_hi =
+ pstats->mac_stx[1].rx_stat_mac_xpf_hi;
+ estats->pause_frames_received_lo =
+ pstats->mac_stx[1].rx_stat_mac_xpf_lo;
+
+ estats->pause_frames_sent_hi =
+ pstats->mac_stx[1].tx_stat_outxoffsent_hi;
+ estats->pause_frames_sent_lo =
+ pstats->mac_stx[1].tx_stat_outxoffsent_lo;
+
+ estats->pfc_frames_received_hi =
+ pstats->pfc_frames_rx_hi;
+ estats->pfc_frames_received_lo =
+ pstats->pfc_frames_rx_lo;
+ estats->pfc_frames_sent_hi =
+ pstats->pfc_frames_tx_hi;
+ estats->pfc_frames_sent_lo =
+ pstats->pfc_frames_tx_lo;
+}
+
+static void bnx2x_emac_stats_update(struct bnx2x *bp)
+{
+ struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
+ struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
+ struct bnx2x_eth_stats *estats = &bp->eth_stats;
+
+ UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
+ UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
+ UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
+ UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
+ UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
+ UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
+ UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
+ UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
+ UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
+ UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
+ UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
+ UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
+ UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
+ UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
+ UPDATE_EXTEND_STAT(tx_stat_outxonsent);
+ UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
+ UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
+ UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
+ UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
+ UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
+ UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
+ UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
+ UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
+ UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
+ UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
+ UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
+ UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
+ UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
+ UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
+ UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
+ UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
+
+ estats->pause_frames_received_hi =
+ pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
+ estats->pause_frames_received_lo =
+ pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
+ ADD_64(estats->pause_frames_received_hi,
+ pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
+ estats->pause_frames_received_lo,
+ pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
+
+ estats->pause_frames_sent_hi =
+ pstats->mac_stx[1].tx_stat_outxonsent_hi;
+ estats->pause_frames_sent_lo =
+ pstats->mac_stx[1].tx_stat_outxonsent_lo;
+ ADD_64(estats->pause_frames_sent_hi,
+ pstats->mac_stx[1].tx_stat_outxoffsent_hi,
+ estats->pause_frames_sent_lo,
+ pstats->mac_stx[1].tx_stat_outxoffsent_lo);
+}
+
+static int bnx2x_hw_stats_update(struct bnx2x *bp)
+{
+ struct nig_stats *new = bnx2x_sp(bp, nig_stats);
+ struct nig_stats *old = &(bp->port.old_nig_stats);
+ struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
+ struct bnx2x_eth_stats *estats = &bp->eth_stats;
+ struct {
+ u32 lo;
+ u32 hi;
+ } diff;
+
+ switch (bp->link_vars.mac_type) {
+ case MAC_TYPE_BMAC:
+ bnx2x_bmac_stats_update(bp);
+ break;
+
+ case MAC_TYPE_EMAC:
+ bnx2x_emac_stats_update(bp);
+ break;
+
+ case MAC_TYPE_UMAC:
+ case MAC_TYPE_XMAC:
+ bnx2x_mstat_stats_update(bp);
+ break;
+
+ case MAC_TYPE_NONE: /* unreached */
+ DP(BNX2X_MSG_STATS,
+ "stats updated by DMAE but no MAC active\n");
+ return -1;
+
+ default: /* unreached */
+ BNX2X_ERR("Unknown MAC type\n");
+ }
+
+ ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
+ new->brb_discard - old->brb_discard);
+ ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
+ new->brb_truncate - old->brb_truncate);
+
+ if (!CHIP_IS_E3(bp)) {
+ UPDATE_STAT64_NIG(egress_mac_pkt0,
+ etherstatspkts1024octetsto1522octets);
+ UPDATE_STAT64_NIG(egress_mac_pkt1,
+ etherstatspktsover1522octets);
+ }
+
+ memcpy(old, new, sizeof(struct nig_stats));
+
+ BUILD_BUG_ON(sizeof(estats->shared) != sizeof(pstats->mac_stx[1]));
+ memcpy(&(estats->shared), &(pstats->mac_stx[1]),
+ sizeof(struct mac_stx));
+ estats->brb_drop_hi = pstats->brb_drop_hi;
+ estats->brb_drop_lo = pstats->brb_drop_lo;
+
+ pstats->host_port_stats_counter++;
+
+ if (CHIP_IS_E3(bp)) {
+ u32 lpi_reg = BP_PORT(bp) ? MISC_REG_CPMU_LP_SM_ENT_CNT_P1
+ : MISC_REG_CPMU_LP_SM_ENT_CNT_P0;
+ estats->eee_tx_lpi += REG_RD(bp, lpi_reg);
+ }
+
+ if (!BP_NOMCP(bp)) {
+ u32 nig_timer_max =
+ SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
+ if (nig_timer_max != estats->nig_timer_max) {
+ estats->nig_timer_max = nig_timer_max;
+ BNX2X_ERR("NIG timer max (%u)\n",
+ estats->nig_timer_max);
+ }
+ }
+
+ return 0;
+}
+
+static int bnx2x_storm_stats_validate_counters(struct bnx2x *bp)
+{
+ struct stats_counter *counters = &bp->fw_stats_data->storm_counters;
+ u16 cur_stats_counter;
+ /* Make sure we use the value of the counter
+ * used for sending the last stats ramrod.
+ */
+ cur_stats_counter = bp->stats_counter - 1;
+
+ /* are storm stats valid? */
+ if (le16_to_cpu(counters->xstats_counter) != cur_stats_counter) {
+ DP(BNX2X_MSG_STATS,
+ "stats not updated by xstorm xstorm counter (0x%x) != stats_counter (0x%x)\n",
+ le16_to_cpu(counters->xstats_counter), bp->stats_counter);
+ return -EAGAIN;
+ }
+
+ if (le16_to_cpu(counters->ustats_counter) != cur_stats_counter) {
+ DP(BNX2X_MSG_STATS,
+ "stats not updated by ustorm ustorm counter (0x%x) != stats_counter (0x%x)\n",
+ le16_to_cpu(counters->ustats_counter), bp->stats_counter);
+ return -EAGAIN;
+ }
+
+ if (le16_to_cpu(counters->cstats_counter) != cur_stats_counter) {
+ DP(BNX2X_MSG_STATS,
+ "stats not updated by cstorm cstorm counter (0x%x) != stats_counter (0x%x)\n",
+ le16_to_cpu(counters->cstats_counter), bp->stats_counter);
+ return -EAGAIN;
+ }
+
+ if (le16_to_cpu(counters->tstats_counter) != cur_stats_counter) {
+ DP(BNX2X_MSG_STATS,
+ "stats not updated by tstorm tstorm counter (0x%x) != stats_counter (0x%x)\n",
+ le16_to_cpu(counters->tstats_counter), bp->stats_counter);
+ return -EAGAIN;
+ }
+ return 0;
+}
+
+static int bnx2x_storm_stats_update(struct bnx2x *bp)
+{
+ struct tstorm_per_port_stats *tport =
+ &bp->fw_stats_data->port.tstorm_port_statistics;
+ struct tstorm_per_pf_stats *tfunc =
+ &bp->fw_stats_data->pf.tstorm_pf_statistics;
+ struct host_func_stats *fstats = &bp->func_stats;
+ struct bnx2x_eth_stats *estats = &bp->eth_stats;
+ struct bnx2x_eth_stats_old *estats_old = &bp->eth_stats_old;
+ int i;
+
+ /* vfs stat counter is managed by pf */
+ if (IS_PF(bp) && bnx2x_storm_stats_validate_counters(bp))
+ return -EAGAIN;
+
+ estats->error_bytes_received_hi = 0;
+ estats->error_bytes_received_lo = 0;
+
+ for_each_eth_queue(bp, i) {
+ struct bnx2x_fastpath *fp = &bp->fp[i];
+ struct tstorm_per_queue_stats *tclient =
+ &bp->fw_stats_data->queue_stats[i].
+ tstorm_queue_statistics;
+ struct tstorm_per_queue_stats *old_tclient =
+ &bnx2x_fp_stats(bp, fp)->old_tclient;
+ struct ustorm_per_queue_stats *uclient =
+ &bp->fw_stats_data->queue_stats[i].
+ ustorm_queue_statistics;
+ struct ustorm_per_queue_stats *old_uclient =
+ &bnx2x_fp_stats(bp, fp)->old_uclient;
+ struct xstorm_per_queue_stats *xclient =
+ &bp->fw_stats_data->queue_stats[i].
+ xstorm_queue_statistics;
+ struct xstorm_per_queue_stats *old_xclient =
+ &bnx2x_fp_stats(bp, fp)->old_xclient;
+ struct bnx2x_eth_q_stats *qstats =
+ &bnx2x_fp_stats(bp, fp)->eth_q_stats;
+ struct bnx2x_eth_q_stats_old *qstats_old =
+ &bnx2x_fp_stats(bp, fp)->eth_q_stats_old;
+
+ u32 diff;
+
+ DP(BNX2X_MSG_STATS, "queue[%d]: ucast_sent 0x%x, bcast_sent 0x%x mcast_sent 0x%x\n",
+ i, xclient->ucast_pkts_sent,
+ xclient->bcast_pkts_sent, xclient->mcast_pkts_sent);
+
+ DP(BNX2X_MSG_STATS, "---------------\n");
+
+ UPDATE_QSTAT(tclient->rcv_bcast_bytes,
+ total_broadcast_bytes_received);
+ UPDATE_QSTAT(tclient->rcv_mcast_bytes,
+ total_multicast_bytes_received);
+ UPDATE_QSTAT(tclient->rcv_ucast_bytes,
+ total_unicast_bytes_received);
+
+ /*
+ * sum to total_bytes_received all
+ * unicast/multicast/broadcast
+ */
+ qstats->total_bytes_received_hi =
+ qstats->total_broadcast_bytes_received_hi;
+ qstats->total_bytes_received_lo =
+ qstats->total_broadcast_bytes_received_lo;
+
+ ADD_64(qstats->total_bytes_received_hi,
+ qstats->total_multicast_bytes_received_hi,
+ qstats->total_bytes_received_lo,
+ qstats->total_multicast_bytes_received_lo);
+
+ ADD_64(qstats->total_bytes_received_hi,
+ qstats->total_unicast_bytes_received_hi,
+ qstats->total_bytes_received_lo,
+ qstats->total_unicast_bytes_received_lo);
+
+ qstats->valid_bytes_received_hi =
+ qstats->total_bytes_received_hi;
+ qstats->valid_bytes_received_lo =
+ qstats->total_bytes_received_lo;
+
+ UPDATE_EXTEND_TSTAT(rcv_ucast_pkts,
+ total_unicast_packets_received);
+ UPDATE_EXTEND_TSTAT(rcv_mcast_pkts,
+ total_multicast_packets_received);
+ UPDATE_EXTEND_TSTAT(rcv_bcast_pkts,
+ total_broadcast_packets_received);
+ UPDATE_EXTEND_E_TSTAT(pkts_too_big_discard,
+ etherstatsoverrsizepkts, 32);
+ UPDATE_EXTEND_E_TSTAT(no_buff_discard, no_buff_discard, 16);
+
+ SUB_EXTEND_USTAT(ucast_no_buff_pkts,
+ total_unicast_packets_received);
+ SUB_EXTEND_USTAT(mcast_no_buff_pkts,
+ total_multicast_packets_received);
+ SUB_EXTEND_USTAT(bcast_no_buff_pkts,
+ total_broadcast_packets_received);
+ UPDATE_EXTEND_E_USTAT(ucast_no_buff_pkts, no_buff_discard);
+ UPDATE_EXTEND_E_USTAT(mcast_no_buff_pkts, no_buff_discard);
+ UPDATE_EXTEND_E_USTAT(bcast_no_buff_pkts, no_buff_discard);
+
+ UPDATE_QSTAT(xclient->bcast_bytes_sent,
+ total_broadcast_bytes_transmitted);
+ UPDATE_QSTAT(xclient->mcast_bytes_sent,
+ total_multicast_bytes_transmitted);
+ UPDATE_QSTAT(xclient->ucast_bytes_sent,
+ total_unicast_bytes_transmitted);
+
+ /*
+ * sum to total_bytes_transmitted all
+ * unicast/multicast/broadcast
+ */
+ qstats->total_bytes_transmitted_hi =
+ qstats->total_unicast_bytes_transmitted_hi;
+ qstats->total_bytes_transmitted_lo =
+ qstats->total_unicast_bytes_transmitted_lo;
+
+ ADD_64(qstats->total_bytes_transmitted_hi,
+ qstats->total_broadcast_bytes_transmitted_hi,
+ qstats->total_bytes_transmitted_lo,
+ qstats->total_broadcast_bytes_transmitted_lo);
+
+ ADD_64(qstats->total_bytes_transmitted_hi,
+ qstats->total_multicast_bytes_transmitted_hi,
+ qstats->total_bytes_transmitted_lo,
+ qstats->total_multicast_bytes_transmitted_lo);
+
+ UPDATE_EXTEND_XSTAT(ucast_pkts_sent,
+ total_unicast_packets_transmitted);
+ UPDATE_EXTEND_XSTAT(mcast_pkts_sent,
+ total_multicast_packets_transmitted);
+ UPDATE_EXTEND_XSTAT(bcast_pkts_sent,
+ total_broadcast_packets_transmitted);
+
+ UPDATE_EXTEND_TSTAT(checksum_discard,
+ total_packets_received_checksum_discarded);
+ UPDATE_EXTEND_TSTAT(ttl0_discard,
+ total_packets_received_ttl0_discarded);
+
+ UPDATE_EXTEND_XSTAT(error_drop_pkts,
+ total_transmitted_dropped_packets_error);
+
+ /* TPA aggregations completed */
+ UPDATE_EXTEND_E_USTAT(coalesced_events, total_tpa_aggregations);
+ /* Number of network frames aggregated by TPA */
+ UPDATE_EXTEND_E_USTAT(coalesced_pkts,
+ total_tpa_aggregated_frames);
+ /* Total number of bytes in completed TPA aggregations */
+ UPDATE_QSTAT(uclient->coalesced_bytes, total_tpa_bytes);
+
+ UPDATE_ESTAT_QSTAT_64(total_tpa_bytes);
+
+ UPDATE_FSTAT_QSTAT(total_bytes_received);
+ UPDATE_FSTAT_QSTAT(total_bytes_transmitted);
+ UPDATE_FSTAT_QSTAT(total_unicast_packets_received);
+ UPDATE_FSTAT_QSTAT(total_multicast_packets_received);
+ UPDATE_FSTAT_QSTAT(total_broadcast_packets_received);
+ UPDATE_FSTAT_QSTAT(total_unicast_packets_transmitted);
+ UPDATE_FSTAT_QSTAT(total_multicast_packets_transmitted);
+ UPDATE_FSTAT_QSTAT(total_broadcast_packets_transmitted);
+ UPDATE_FSTAT_QSTAT(valid_bytes_received);
+ }
+
+ ADD_64(estats->total_bytes_received_hi,
+ estats->rx_stat_ifhcinbadoctets_hi,
+ estats->total_bytes_received_lo,
+ estats->rx_stat_ifhcinbadoctets_lo);
+
+ ADD_64_LE(estats->total_bytes_received_hi,
+ tfunc->rcv_error_bytes.hi,
+ estats->total_bytes_received_lo,
+ tfunc->rcv_error_bytes.lo);
+
+ ADD_64_LE(estats->error_bytes_received_hi,
+ tfunc->rcv_error_bytes.hi,
+ estats->error_bytes_received_lo,
+ tfunc->rcv_error_bytes.lo);
+
+ UPDATE_ESTAT(etherstatsoverrsizepkts, rx_stat_dot3statsframestoolong);
+
+ ADD_64(estats->error_bytes_received_hi,
+ estats->rx_stat_ifhcinbadoctets_hi,
+ estats->error_bytes_received_lo,
+ estats->rx_stat_ifhcinbadoctets_lo);
+
+ if (bp->port.pmf) {
+ struct bnx2x_fw_port_stats_old *fwstats = &bp->fw_stats_old;
+ UPDATE_FW_STAT(mac_filter_discard);
+ UPDATE_FW_STAT(mf_tag_discard);
+ UPDATE_FW_STAT(brb_truncate_discard);
+ UPDATE_FW_STAT(mac_discard);
+ }
+
+ fstats->host_func_stats_start = ++fstats->host_func_stats_end;
+
+ bp->stats_pending = 0;
+
+ return 0;
+}
+
+static void bnx2x_net_stats_update(struct bnx2x *bp)
+{
+ struct bnx2x_eth_stats *estats = &bp->eth_stats;
+ struct net_device_stats *nstats = &bp->dev->stats;
+ unsigned long tmp;
+ int i;
+
+ nstats->rx_packets =
+ bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
+ bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
+ bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
+
+ nstats->tx_packets =
+ bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
+ bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
+ bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
+
+ nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
+
+ nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
+
+ tmp = estats->mac_discard;
+ for_each_rx_queue(bp, i) {
+ struct tstorm_per_queue_stats *old_tclient =
+ &bp->fp_stats[i].old_tclient;
+ tmp += le32_to_cpu(old_tclient->checksum_discard);
+ }
+ nstats->rx_dropped = tmp + bp->net_stats_old.rx_dropped;
+
+ nstats->tx_dropped = 0;
+
+ nstats->multicast =
+ bnx2x_hilo(&estats->total_multicast_packets_received_hi);
+
+ nstats->collisions =
+ bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi);
+
+ nstats->rx_length_errors =
+ bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) +
+ bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi);
+ nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) +
+ bnx2x_hilo(&estats->brb_truncate_hi);
+ nstats->rx_crc_errors =
+ bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi);
+ nstats->rx_frame_errors =
+ bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi);
+ nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi);
+ nstats->rx_missed_errors = 0;
+
+ nstats->rx_errors = nstats->rx_length_errors +
+ nstats->rx_over_errors +
+ nstats->rx_crc_errors +
+ nstats->rx_frame_errors +
+ nstats->rx_fifo_errors +
+ nstats->rx_missed_errors;
+
+ nstats->tx_aborted_errors =
+ bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) +
+ bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi);
+ nstats->tx_carrier_errors =
+ bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi);
+ nstats->tx_fifo_errors = 0;
+ nstats->tx_heartbeat_errors = 0;
+ nstats->tx_window_errors = 0;
+
+ nstats->tx_errors = nstats->tx_aborted_errors +
+ nstats->tx_carrier_errors +
+ bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi);
+}
+
+static void bnx2x_drv_stats_update(struct bnx2x *bp)
+{
+ struct bnx2x_eth_stats *estats = &bp->eth_stats;
+ int i;
+
+ for_each_queue(bp, i) {
+ struct bnx2x_eth_q_stats *qstats = &bp->fp_stats[i].eth_q_stats;
+ struct bnx2x_eth_q_stats_old *qstats_old =
+ &bp->fp_stats[i].eth_q_stats_old;
+
+ UPDATE_ESTAT_QSTAT(driver_xoff);
+ UPDATE_ESTAT_QSTAT(rx_err_discard_pkt);
+ UPDATE_ESTAT_QSTAT(rx_skb_alloc_failed);
+ UPDATE_ESTAT_QSTAT(hw_csum_err);
+ UPDATE_ESTAT_QSTAT(driver_filtered_tx_pkt);
+ }
+}
+
+static bool bnx2x_edebug_stats_stopped(struct bnx2x *bp)
+{
+ u32 val;
+
+ if (SHMEM2_HAS(bp, edebug_driver_if[1])) {
+ val = SHMEM2_RD(bp, edebug_driver_if[1]);
+
+ if (val == EDEBUG_DRIVER_IF_OP_CODE_DISABLE_STAT)
+ return true;
+ }
+
+ return false;
+}
+
+static void bnx2x_stats_update(struct bnx2x *bp)
+{
+ u32 *stats_comp = bnx2x_sp(bp, stats_comp);
+
+ if (bnx2x_edebug_stats_stopped(bp))
+ return;
+
+ if (IS_PF(bp)) {
+ if (*stats_comp != DMAE_COMP_VAL)
+ return;
+
+ if (bp->port.pmf)
+ bnx2x_hw_stats_update(bp);
+
+ if (bnx2x_storm_stats_update(bp)) {
+ if (bp->stats_pending++ == 3) {
+ BNX2X_ERR("storm stats were not updated for 3 times\n");
+ bnx2x_panic();
+ }
+ return;
+ }
+ } else {
+ /* vf doesn't collect HW statistics, and doesn't get completions
+ * perform only update
+ */
+ bnx2x_storm_stats_update(bp);
+ }
+
+ bnx2x_net_stats_update(bp);
+ bnx2x_drv_stats_update(bp);
+
+ /* vf is done */
+ if (IS_VF(bp))
+ return;
+
+ if (netif_msg_timer(bp)) {
+ struct bnx2x_eth_stats *estats = &bp->eth_stats;
+
+ netdev_dbg(bp->dev, "brb drops %u brb truncate %u\n",
+ estats->brb_drop_lo, estats->brb_truncate_lo);
+ }
+
+ bnx2x_hw_stats_post(bp);
+ bnx2x_storm_stats_post(bp);
+}
+
+static void bnx2x_port_stats_stop(struct bnx2x *bp)
+{
+ struct dmae_command *dmae;
+ u32 opcode;
+ int loader_idx = PMF_DMAE_C(bp);
+ u32 *stats_comp = bnx2x_sp(bp, stats_comp);
+
+ bp->executer_idx = 0;
+
+ opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_PCI, DMAE_DST_GRC, false, 0);
+
+ if (bp->port.port_stx) {
+
+ dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
+ if (bp->func_stx)
+ dmae->opcode = bnx2x_dmae_opcode_add_comp(
+ opcode, DMAE_COMP_GRC);
+ else
+ dmae->opcode = bnx2x_dmae_opcode_add_comp(
+ opcode, DMAE_COMP_PCI);
+
+ dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
+ dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
+ dmae->dst_addr_lo = bp->port.port_stx >> 2;
+ dmae->dst_addr_hi = 0;
+ dmae->len = bnx2x_get_port_stats_dma_len(bp);
+ if (bp->func_stx) {
+ dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
+ dmae->comp_addr_hi = 0;
+ dmae->comp_val = 1;
+ } else {
+ dmae->comp_addr_lo =
+ U64_LO(bnx2x_sp_mapping(bp, stats_comp));
+ dmae->comp_addr_hi =
+ U64_HI(bnx2x_sp_mapping(bp, stats_comp));
+ dmae->comp_val = DMAE_COMP_VAL;
+
+ *stats_comp = 0;
+ }
+ }
+
+ if (bp->func_stx) {
+
+ dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
+ dmae->opcode =
+ bnx2x_dmae_opcode_add_comp(opcode, DMAE_COMP_PCI);
+ dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
+ dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
+ dmae->dst_addr_lo = bp->func_stx >> 2;
+ dmae->dst_addr_hi = 0;
+ dmae->len = sizeof(struct host_func_stats) >> 2;
+ dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
+ dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
+ dmae->comp_val = DMAE_COMP_VAL;
+
+ *stats_comp = 0;
+ }
+}
+
+static void bnx2x_stats_stop(struct bnx2x *bp)
+{
+ bool update = false;
+
+ bnx2x_stats_comp(bp);
+
+ if (bp->port.pmf)
+ update = (bnx2x_hw_stats_update(bp) == 0);
+
+ update |= (bnx2x_storm_stats_update(bp) == 0);
+
+ if (update) {
+ bnx2x_net_stats_update(bp);
+
+ if (bp->port.pmf)
+ bnx2x_port_stats_stop(bp);
+
+ bnx2x_hw_stats_post(bp);
+ bnx2x_stats_comp(bp);
+ }
+}
+
+static void bnx2x_stats_do_nothing(struct bnx2x *bp)
+{
+}
+
+static const struct {
+ void (*action)(struct bnx2x *bp);
+ enum bnx2x_stats_state next_state;
+} bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
+/* state event */
+{
+/* DISABLED PMF */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
+/* LINK_UP */ {bnx2x_stats_start, STATS_STATE_ENABLED},
+/* UPDATE */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
+/* STOP */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
+},
+{
+/* ENABLED PMF */ {bnx2x_stats_pmf_start, STATS_STATE_ENABLED},
+/* LINK_UP */ {bnx2x_stats_restart, STATS_STATE_ENABLED},
+/* UPDATE */ {bnx2x_stats_update, STATS_STATE_ENABLED},
+/* STOP */ {bnx2x_stats_stop, STATS_STATE_DISABLED}
+}
+};
+
+void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
+{
+ enum bnx2x_stats_state state = bp->stats_state;
+
+ if (unlikely(bp->panic))
+ return;
+
+ /* Statistics update run from timer context, and we don't want to stop
+ * that context in case someone is in the middle of a transition.
+ * For other events, wait a bit until lock is taken.
+ */
+ if (down_trylock(&bp->stats_lock)) {
+ if (event == STATS_EVENT_UPDATE)
+ return;
+
+ DP(BNX2X_MSG_STATS,
+ "Unlikely stats' lock contention [event %d]\n", event);
+ if (unlikely(down_timeout(&bp->stats_lock, HZ / 10))) {
+ BNX2X_ERR("Failed to take stats lock [event %d]\n",
+ event);
+ return;
+ }
+ }
+
+ bnx2x_stats_stm[state][event].action(bp);
+ bp->stats_state = bnx2x_stats_stm[state][event].next_state;
+
+ up(&bp->stats_lock);
+
+ if ((event != STATS_EVENT_UPDATE) || netif_msg_timer(bp))
+ DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
+ state, event, bp->stats_state);
+}
+
+static void bnx2x_port_stats_base_init(struct bnx2x *bp)
+{
+ struct dmae_command *dmae;
+ u32 *stats_comp = bnx2x_sp(bp, stats_comp);
+
+ /* sanity */
+ if (!bp->port.pmf || !bp->port.port_stx) {
+ BNX2X_ERR("BUG!\n");
+ return;
+ }
+
+ bp->executer_idx = 0;
+
+ dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
+ dmae->opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_PCI, DMAE_DST_GRC,
+ true, DMAE_COMP_PCI);
+ dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
+ dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
+ dmae->dst_addr_lo = bp->port.port_stx >> 2;
+ dmae->dst_addr_hi = 0;
+ dmae->len = bnx2x_get_port_stats_dma_len(bp);
+ dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
+ dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
+ dmae->comp_val = DMAE_COMP_VAL;
+
+ *stats_comp = 0;
+ bnx2x_hw_stats_post(bp);
+ bnx2x_stats_comp(bp);
+}
+
+/* This function will prepare the statistics ramrod data the way
+ * we will only have to increment the statistics counter and
+ * send the ramrod each time we have to.
+ */
+static void bnx2x_prep_fw_stats_req(struct bnx2x *bp)
+{
+ int i;
+ int first_queue_query_index;
+ struct stats_query_header *stats_hdr = &bp->fw_stats_req->hdr;
+
+ dma_addr_t cur_data_offset;
+ struct stats_query_entry *cur_query_entry;
+
+ stats_hdr->cmd_num = bp->fw_stats_num;
+ stats_hdr->drv_stats_counter = 0;
+
+ /* storm_counters struct contains the counters of completed
+ * statistics requests per storm which are incremented by FW
+ * each time it completes hadning a statistics ramrod. We will
+ * check these counters in the timer handler and discard a
+ * (statistics) ramrod completion.
+ */
+ cur_data_offset = bp->fw_stats_data_mapping +
+ offsetof(struct bnx2x_fw_stats_data, storm_counters);
+
+ stats_hdr->stats_counters_addrs.hi =
+ cpu_to_le32(U64_HI(cur_data_offset));
+ stats_hdr->stats_counters_addrs.lo =
+ cpu_to_le32(U64_LO(cur_data_offset));
+
+ /* prepare to the first stats ramrod (will be completed with
+ * the counters equal to zero) - init counters to somethig different.
+ */
+ memset(&bp->fw_stats_data->storm_counters, 0xff,
+ sizeof(struct stats_counter));
+
+ /**** Port FW statistics data ****/
+ cur_data_offset = bp->fw_stats_data_mapping +
+ offsetof(struct bnx2x_fw_stats_data, port);
+
+ cur_query_entry = &bp->fw_stats_req->query[BNX2X_PORT_QUERY_IDX];
+
+ cur_query_entry->kind = STATS_TYPE_PORT;
+ /* For port query index is a DONT CARE */
+ cur_query_entry->index = BP_PORT(bp);
+ /* For port query funcID is a DONT CARE */
+ cur_query_entry->funcID = cpu_to_le16(BP_FUNC(bp));
+ cur_query_entry->address.hi = cpu_to_le32(U64_HI(cur_data_offset));
+ cur_query_entry->address.lo = cpu_to_le32(U64_LO(cur_data_offset));
+
+ /**** PF FW statistics data ****/
+ cur_data_offset = bp->fw_stats_data_mapping +
+ offsetof(struct bnx2x_fw_stats_data, pf);
+
+ cur_query_entry = &bp->fw_stats_req->query[BNX2X_PF_QUERY_IDX];
+
+ cur_query_entry->kind = STATS_TYPE_PF;
+ /* For PF query index is a DONT CARE */
+ cur_query_entry->index = BP_PORT(bp);
+ cur_query_entry->funcID = cpu_to_le16(BP_FUNC(bp));
+ cur_query_entry->address.hi = cpu_to_le32(U64_HI(cur_data_offset));
+ cur_query_entry->address.lo = cpu_to_le32(U64_LO(cur_data_offset));
+
+ /**** FCoE FW statistics data ****/
+ if (!NO_FCOE(bp)) {
+ cur_data_offset = bp->fw_stats_data_mapping +
+ offsetof(struct bnx2x_fw_stats_data, fcoe);
+
+ cur_query_entry =
+ &bp->fw_stats_req->query[BNX2X_FCOE_QUERY_IDX];
+
+ cur_query_entry->kind = STATS_TYPE_FCOE;
+ /* For FCoE query index is a DONT CARE */
+ cur_query_entry->index = BP_PORT(bp);
+ cur_query_entry->funcID = cpu_to_le16(BP_FUNC(bp));
+ cur_query_entry->address.hi =
+ cpu_to_le32(U64_HI(cur_data_offset));
+ cur_query_entry->address.lo =
+ cpu_to_le32(U64_LO(cur_data_offset));
+ }
+
+ /**** Clients' queries ****/
+ cur_data_offset = bp->fw_stats_data_mapping +
+ offsetof(struct bnx2x_fw_stats_data, queue_stats);
+
+ /* first queue query index depends whether FCoE offloaded request will
+ * be included in the ramrod
+ */
+ if (!NO_FCOE(bp))
+ first_queue_query_index = BNX2X_FIRST_QUEUE_QUERY_IDX;
+ else
+ first_queue_query_index = BNX2X_FIRST_QUEUE_QUERY_IDX - 1;
+
+ for_each_eth_queue(bp, i) {
+ cur_query_entry =
+ &bp->fw_stats_req->
+ query[first_queue_query_index + i];
+
+ cur_query_entry->kind = STATS_TYPE_QUEUE;
+ cur_query_entry->index = bnx2x_stats_id(&bp->fp[i]);
+ cur_query_entry->funcID = cpu_to_le16(BP_FUNC(bp));
+ cur_query_entry->address.hi =
+ cpu_to_le32(U64_HI(cur_data_offset));
+ cur_query_entry->address.lo =
+ cpu_to_le32(U64_LO(cur_data_offset));
+
+ cur_data_offset += sizeof(struct per_queue_stats);
+ }
+
+ /* add FCoE queue query if needed */
+ if (!NO_FCOE(bp)) {
+ cur_query_entry =
+ &bp->fw_stats_req->
+ query[first_queue_query_index + i];
+
+ cur_query_entry->kind = STATS_TYPE_QUEUE;
+ cur_query_entry->index = bnx2x_stats_id(&bp->fp[FCOE_IDX(bp)]);
+ cur_query_entry->funcID = cpu_to_le16(BP_FUNC(bp));
+ cur_query_entry->address.hi =
+ cpu_to_le32(U64_HI(cur_data_offset));
+ cur_query_entry->address.lo =
+ cpu_to_le32(U64_LO(cur_data_offset));
+ }
+}
+
+void bnx2x_memset_stats(struct bnx2x *bp)
+{
+ int i;
+
+ /* function stats */
+ for_each_queue(bp, i) {
+ struct bnx2x_fp_stats *fp_stats = &bp->fp_stats[i];
+
+ memset(&fp_stats->old_tclient, 0,
+ sizeof(fp_stats->old_tclient));
+ memset(&fp_stats->old_uclient, 0,
+ sizeof(fp_stats->old_uclient));
+ memset(&fp_stats->old_xclient, 0,
+ sizeof(fp_stats->old_xclient));
+ if (bp->stats_init) {
+ memset(&fp_stats->eth_q_stats, 0,
+ sizeof(fp_stats->eth_q_stats));
+ memset(&fp_stats->eth_q_stats_old, 0,
+ sizeof(fp_stats->eth_q_stats_old));
+ }
+ }
+
+ memset(&bp->dev->stats, 0, sizeof(bp->dev->stats));
+
+ if (bp->stats_init) {
+ memset(&bp->net_stats_old, 0, sizeof(bp->net_stats_old));
+ memset(&bp->fw_stats_old, 0, sizeof(bp->fw_stats_old));
+ memset(&bp->eth_stats_old, 0, sizeof(bp->eth_stats_old));
+ memset(&bp->eth_stats, 0, sizeof(bp->eth_stats));
+ memset(&bp->func_stats, 0, sizeof(bp->func_stats));
+ }
+
+ bp->stats_state = STATS_STATE_DISABLED;
+
+ if (bp->port.pmf && bp->port.port_stx)
+ bnx2x_port_stats_base_init(bp);
+
+ /* mark the end of statistics initialization */
+ bp->stats_init = false;
+}
+
+void bnx2x_stats_init(struct bnx2x *bp)
+{
+ int /*abs*/port = BP_PORT(bp);
+ int mb_idx = BP_FW_MB_IDX(bp);
+
+ if (IS_VF(bp)) {
+ bnx2x_memset_stats(bp);
+ return;
+ }
+
+ bp->stats_pending = 0;
+ bp->executer_idx = 0;
+ bp->stats_counter = 0;
+
+ /* port and func stats for management */
+ if (!BP_NOMCP(bp)) {
+ bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
+ bp->func_stx = SHMEM_RD(bp, func_mb[mb_idx].fw_mb_param);
+
+ } else {
+ bp->port.port_stx = 0;
+ bp->func_stx = 0;
+ }
+ DP(BNX2X_MSG_STATS, "port_stx 0x%x func_stx 0x%x\n",
+ bp->port.port_stx, bp->func_stx);
+
+ /* pmf should retrieve port statistics from SP on a non-init*/
+ if (!bp->stats_init && bp->port.pmf && bp->port.port_stx)
+ bnx2x_stats_handle(bp, STATS_EVENT_PMF);
+
+ port = BP_PORT(bp);
+ /* port stats */
+ memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
+ bp->port.old_nig_stats.brb_discard =
+ REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
+ bp->port.old_nig_stats.brb_truncate =
+ REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
+ if (!CHIP_IS_E3(bp)) {
+ REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
+ &(bp->port.old_nig_stats.egress_mac_pkt0), 2);
+ REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
+ &(bp->port.old_nig_stats.egress_mac_pkt1), 2);
+ }
+
+ /* Prepare statistics ramrod data */
+ bnx2x_prep_fw_stats_req(bp);
+
+ /* Clean SP from previous statistics */
+ if (bp->stats_init) {
+ if (bp->func_stx) {
+ memset(bnx2x_sp(bp, func_stats), 0,
+ sizeof(struct host_func_stats));
+ bnx2x_func_stats_init(bp);
+ bnx2x_hw_stats_post(bp);
+ bnx2x_stats_comp(bp);
+ }
+ }
+
+ bnx2x_memset_stats(bp);
+}
+
+void bnx2x_save_statistics(struct bnx2x *bp)
+{
+ int i;
+ struct net_device_stats *nstats = &bp->dev->stats;
+
+ /* save queue statistics */
+ for_each_eth_queue(bp, i) {
+ struct bnx2x_fastpath *fp = &bp->fp[i];
+ struct bnx2x_eth_q_stats *qstats =
+ &bnx2x_fp_stats(bp, fp)->eth_q_stats;
+ struct bnx2x_eth_q_stats_old *qstats_old =
+ &bnx2x_fp_stats(bp, fp)->eth_q_stats_old;
+
+ UPDATE_QSTAT_OLD(total_unicast_bytes_received_hi);
+ UPDATE_QSTAT_OLD(total_unicast_bytes_received_lo);
+ UPDATE_QSTAT_OLD(total_broadcast_bytes_received_hi);
+ UPDATE_QSTAT_OLD(total_broadcast_bytes_received_lo);
+ UPDATE_QSTAT_OLD(total_multicast_bytes_received_hi);
+ UPDATE_QSTAT_OLD(total_multicast_bytes_received_lo);
+ UPDATE_QSTAT_OLD(total_unicast_bytes_transmitted_hi);
+ UPDATE_QSTAT_OLD(total_unicast_bytes_transmitted_lo);
+ UPDATE_QSTAT_OLD(total_broadcast_bytes_transmitted_hi);
+ UPDATE_QSTAT_OLD(total_broadcast_bytes_transmitted_lo);
+ UPDATE_QSTAT_OLD(total_multicast_bytes_transmitted_hi);
+ UPDATE_QSTAT_OLD(total_multicast_bytes_transmitted_lo);
+ UPDATE_QSTAT_OLD(total_tpa_bytes_hi);
+ UPDATE_QSTAT_OLD(total_tpa_bytes_lo);
+ }
+
+ /* save net_device_stats statistics */
+ bp->net_stats_old.rx_dropped = nstats->rx_dropped;
+
+ /* store port firmware statistics */
+ if (bp->port.pmf && IS_MF(bp)) {
+ struct bnx2x_eth_stats *estats = &bp->eth_stats;
+ struct bnx2x_fw_port_stats_old *fwstats = &bp->fw_stats_old;
+ UPDATE_FW_STAT_OLD(mac_filter_discard);
+ UPDATE_FW_STAT_OLD(mf_tag_discard);
+ UPDATE_FW_STAT_OLD(brb_truncate_discard);
+ UPDATE_FW_STAT_OLD(mac_discard);
+ }
+}
+
+void bnx2x_afex_collect_stats(struct bnx2x *bp, void *void_afex_stats,
+ u32 stats_type)
+{
+ int i;
+ struct afex_stats *afex_stats = (struct afex_stats *)void_afex_stats;
+ struct bnx2x_eth_stats *estats = &bp->eth_stats;
+ struct per_queue_stats *fcoe_q_stats =
+ &bp->fw_stats_data->queue_stats[FCOE_IDX(bp)];
+
+ struct tstorm_per_queue_stats *fcoe_q_tstorm_stats =
+ &fcoe_q_stats->tstorm_queue_statistics;
+
+ struct ustorm_per_queue_stats *fcoe_q_ustorm_stats =
+ &fcoe_q_stats->ustorm_queue_statistics;
+
+ struct xstorm_per_queue_stats *fcoe_q_xstorm_stats =
+ &fcoe_q_stats->xstorm_queue_statistics;
+
+ struct fcoe_statistics_params *fw_fcoe_stat =
+ &bp->fw_stats_data->fcoe;
+
+ memset(afex_stats, 0, sizeof(struct afex_stats));
+
+ for_each_eth_queue(bp, i) {
+ struct bnx2x_eth_q_stats *qstats = &bp->fp_stats[i].eth_q_stats;
+
+ ADD_64(afex_stats->rx_unicast_bytes_hi,
+ qstats->total_unicast_bytes_received_hi,
+ afex_stats->rx_unicast_bytes_lo,
+ qstats->total_unicast_bytes_received_lo);
+
+ ADD_64(afex_stats->rx_broadcast_bytes_hi,
+ qstats->total_broadcast_bytes_received_hi,
+ afex_stats->rx_broadcast_bytes_lo,
+ qstats->total_broadcast_bytes_received_lo);
+
+ ADD_64(afex_stats->rx_multicast_bytes_hi,
+ qstats->total_multicast_bytes_received_hi,
+ afex_stats->rx_multicast_bytes_lo,
+ qstats->total_multicast_bytes_received_lo);
+
+ ADD_64(afex_stats->rx_unicast_frames_hi,
+ qstats->total_unicast_packets_received_hi,
+ afex_stats->rx_unicast_frames_lo,
+ qstats->total_unicast_packets_received_lo);
+
+ ADD_64(afex_stats->rx_broadcast_frames_hi,
+ qstats->total_broadcast_packets_received_hi,
+ afex_stats->rx_broadcast_frames_lo,
+ qstats->total_broadcast_packets_received_lo);
+
+ ADD_64(afex_stats->rx_multicast_frames_hi,
+ qstats->total_multicast_packets_received_hi,
+ afex_stats->rx_multicast_frames_lo,
+ qstats->total_multicast_packets_received_lo);
+
+ /* sum to rx_frames_discarded all discraded
+ * packets due to size, ttl0 and checksum
+ */
+ ADD_64(afex_stats->rx_frames_discarded_hi,
+ qstats->total_packets_received_checksum_discarded_hi,
+ afex_stats->rx_frames_discarded_lo,
+ qstats->total_packets_received_checksum_discarded_lo);
+
+ ADD_64(afex_stats->rx_frames_discarded_hi,
+ qstats->total_packets_received_ttl0_discarded_hi,
+ afex_stats->rx_frames_discarded_lo,
+ qstats->total_packets_received_ttl0_discarded_lo);
+
+ ADD_64(afex_stats->rx_frames_discarded_hi,
+ qstats->etherstatsoverrsizepkts_hi,
+ afex_stats->rx_frames_discarded_lo,
+ qstats->etherstatsoverrsizepkts_lo);
+
+ ADD_64(afex_stats->rx_frames_dropped_hi,
+ qstats->no_buff_discard_hi,
+ afex_stats->rx_frames_dropped_lo,
+ qstats->no_buff_discard_lo);
+
+ ADD_64(afex_stats->tx_unicast_bytes_hi,
+ qstats->total_unicast_bytes_transmitted_hi,
+ afex_stats->tx_unicast_bytes_lo,
+ qstats->total_unicast_bytes_transmitted_lo);
+
+ ADD_64(afex_stats->tx_broadcast_bytes_hi,
+ qstats->total_broadcast_bytes_transmitted_hi,
+ afex_stats->tx_broadcast_bytes_lo,
+ qstats->total_broadcast_bytes_transmitted_lo);
+
+ ADD_64(afex_stats->tx_multicast_bytes_hi,
+ qstats->total_multicast_bytes_transmitted_hi,
+ afex_stats->tx_multicast_bytes_lo,
+ qstats->total_multicast_bytes_transmitted_lo);
+
+ ADD_64(afex_stats->tx_unicast_frames_hi,
+ qstats->total_unicast_packets_transmitted_hi,
+ afex_stats->tx_unicast_frames_lo,
+ qstats->total_unicast_packets_transmitted_lo);
+
+ ADD_64(afex_stats->tx_broadcast_frames_hi,
+ qstats->total_broadcast_packets_transmitted_hi,
+ afex_stats->tx_broadcast_frames_lo,
+ qstats->total_broadcast_packets_transmitted_lo);
+
+ ADD_64(afex_stats->tx_multicast_frames_hi,
+ qstats->total_multicast_packets_transmitted_hi,
+ afex_stats->tx_multicast_frames_lo,
+ qstats->total_multicast_packets_transmitted_lo);
+
+ ADD_64(afex_stats->tx_frames_dropped_hi,
+ qstats->total_transmitted_dropped_packets_error_hi,
+ afex_stats->tx_frames_dropped_lo,
+ qstats->total_transmitted_dropped_packets_error_lo);
+ }
+
+ /* now add FCoE statistics which are collected separately
+ * (both offloaded and non offloaded)
+ */
+ if (!NO_FCOE(bp)) {
+ ADD_64_LE(afex_stats->rx_unicast_bytes_hi,
+ LE32_0,
+ afex_stats->rx_unicast_bytes_lo,
+ fw_fcoe_stat->rx_stat0.fcoe_rx_byte_cnt);
+
+ ADD_64_LE(afex_stats->rx_unicast_bytes_hi,
+ fcoe_q_tstorm_stats->rcv_ucast_bytes.hi,
+ afex_stats->rx_unicast_bytes_lo,
+ fcoe_q_tstorm_stats->rcv_ucast_bytes.lo);
+
+ ADD_64_LE(afex_stats->rx_broadcast_bytes_hi,
+ fcoe_q_tstorm_stats->rcv_bcast_bytes.hi,
+ afex_stats->rx_broadcast_bytes_lo,
+ fcoe_q_tstorm_stats->rcv_bcast_bytes.lo);
+
+ ADD_64_LE(afex_stats->rx_multicast_bytes_hi,
+ fcoe_q_tstorm_stats->rcv_mcast_bytes.hi,
+ afex_stats->rx_multicast_bytes_lo,
+ fcoe_q_tstorm_stats->rcv_mcast_bytes.lo);
+
+ ADD_64_LE(afex_stats->rx_unicast_frames_hi,
+ LE32_0,
+ afex_stats->rx_unicast_frames_lo,
+ fw_fcoe_stat->rx_stat0.fcoe_rx_pkt_cnt);
+
+ ADD_64_LE(afex_stats->rx_unicast_frames_hi,
+ LE32_0,
+ afex_stats->rx_unicast_frames_lo,
+ fcoe_q_tstorm_stats->rcv_ucast_pkts);
+
+ ADD_64_LE(afex_stats->rx_broadcast_frames_hi,
+ LE32_0,
+ afex_stats->rx_broadcast_frames_lo,
+ fcoe_q_tstorm_stats->rcv_bcast_pkts);
+
+ ADD_64_LE(afex_stats->rx_multicast_frames_hi,
+ LE32_0,
+ afex_stats->rx_multicast_frames_lo,
+ fcoe_q_tstorm_stats->rcv_ucast_pkts);
+
+ ADD_64_LE(afex_stats->rx_frames_discarded_hi,
+ LE32_0,
+ afex_stats->rx_frames_discarded_lo,
+ fcoe_q_tstorm_stats->checksum_discard);
+
+ ADD_64_LE(afex_stats->rx_frames_discarded_hi,
+ LE32_0,
+ afex_stats->rx_frames_discarded_lo,
+ fcoe_q_tstorm_stats->pkts_too_big_discard);
+
+ ADD_64_LE(afex_stats->rx_frames_discarded_hi,
+ LE32_0,
+ afex_stats->rx_frames_discarded_lo,
+ fcoe_q_tstorm_stats->ttl0_discard);
+
+ ADD_64_LE16(afex_stats->rx_frames_dropped_hi,
+ LE16_0,
+ afex_stats->rx_frames_dropped_lo,
+ fcoe_q_tstorm_stats->no_buff_discard);
+
+ ADD_64_LE(afex_stats->rx_frames_dropped_hi,
+ LE32_0,
+ afex_stats->rx_frames_dropped_lo,
+ fcoe_q_ustorm_stats->ucast_no_buff_pkts);
+
+ ADD_64_LE(afex_stats->rx_frames_dropped_hi,
+ LE32_0,
+ afex_stats->rx_frames_dropped_lo,
+ fcoe_q_ustorm_stats->mcast_no_buff_pkts);
+
+ ADD_64_LE(afex_stats->rx_frames_dropped_hi,
+ LE32_0,
+ afex_stats->rx_frames_dropped_lo,
+ fcoe_q_ustorm_stats->bcast_no_buff_pkts);
+
+ ADD_64_LE(afex_stats->rx_frames_dropped_hi,
+ LE32_0,
+ afex_stats->rx_frames_dropped_lo,
+ fw_fcoe_stat->rx_stat1.fcoe_rx_drop_pkt_cnt);
+
+ ADD_64_LE(afex_stats->rx_frames_dropped_hi,
+ LE32_0,
+ afex_stats->rx_frames_dropped_lo,
+ fw_fcoe_stat->rx_stat2.fcoe_rx_drop_pkt_cnt);
+
+ ADD_64_LE(afex_stats->tx_unicast_bytes_hi,
+ LE32_0,
+ afex_stats->tx_unicast_bytes_lo,
+ fw_fcoe_stat->tx_stat.fcoe_tx_byte_cnt);
+
+ ADD_64_LE(afex_stats->tx_unicast_bytes_hi,
+ fcoe_q_xstorm_stats->ucast_bytes_sent.hi,
+ afex_stats->tx_unicast_bytes_lo,
+ fcoe_q_xstorm_stats->ucast_bytes_sent.lo);
+
+ ADD_64_LE(afex_stats->tx_broadcast_bytes_hi,
+ fcoe_q_xstorm_stats->bcast_bytes_sent.hi,
+ afex_stats->tx_broadcast_bytes_lo,
+ fcoe_q_xstorm_stats->bcast_bytes_sent.lo);
+
+ ADD_64_LE(afex_stats->tx_multicast_bytes_hi,
+ fcoe_q_xstorm_stats->mcast_bytes_sent.hi,
+ afex_stats->tx_multicast_bytes_lo,
+ fcoe_q_xstorm_stats->mcast_bytes_sent.lo);
+
+ ADD_64_LE(afex_stats->tx_unicast_frames_hi,
+ LE32_0,
+ afex_stats->tx_unicast_frames_lo,
+ fw_fcoe_stat->tx_stat.fcoe_tx_pkt_cnt);
+
+ ADD_64_LE(afex_stats->tx_unicast_frames_hi,
+ LE32_0,
+ afex_stats->tx_unicast_frames_lo,
+ fcoe_q_xstorm_stats->ucast_pkts_sent);
+
+ ADD_64_LE(afex_stats->tx_broadcast_frames_hi,
+ LE32_0,
+ afex_stats->tx_broadcast_frames_lo,
+ fcoe_q_xstorm_stats->bcast_pkts_sent);
+
+ ADD_64_LE(afex_stats->tx_multicast_frames_hi,
+ LE32_0,
+ afex_stats->tx_multicast_frames_lo,
+ fcoe_q_xstorm_stats->mcast_pkts_sent);
+
+ ADD_64_LE(afex_stats->tx_frames_dropped_hi,
+ LE32_0,
+ afex_stats->tx_frames_dropped_lo,
+ fcoe_q_xstorm_stats->error_drop_pkts);
+ }
+
+ /* if port stats are requested, add them to the PMF
+ * stats, as anyway they will be accumulated by the
+ * MCP before sent to the switch
+ */
+ if ((bp->port.pmf) && (stats_type == VICSTATST_UIF_INDEX)) {
+ ADD_64(afex_stats->rx_frames_dropped_hi,
+ 0,
+ afex_stats->rx_frames_dropped_lo,
+ estats->mac_filter_discard);
+ ADD_64(afex_stats->rx_frames_dropped_hi,
+ 0,
+ afex_stats->rx_frames_dropped_lo,
+ estats->brb_truncate_discard);
+ ADD_64(afex_stats->rx_frames_discarded_hi,
+ 0,
+ afex_stats->rx_frames_discarded_lo,
+ estats->mac_discard);
+ }
+}
+
+int bnx2x_stats_safe_exec(struct bnx2x *bp,
+ void (func_to_exec)(void *cookie),
+ void *cookie)
+{
+ int cnt = 10, rc = 0;
+
+ /* Wait for statistics to end [while blocking further requests],
+ * then run supplied function 'safely'.
+ */
+ rc = down_timeout(&bp->stats_lock, HZ / 10);
+ if (unlikely(rc)) {
+ BNX2X_ERR("Failed to take statistics lock for safe execution\n");
+ goto out_no_lock;
+ }
+
+ bnx2x_stats_comp(bp);
+ while (bp->stats_pending && cnt--)
+ if (bnx2x_storm_stats_update(bp))
+ usleep_range(1000, 2000);
+ if (bp->stats_pending) {
+ BNX2X_ERR("Failed to wait for stats pending to clear [possibly FW is stuck]\n");
+ rc = -EBUSY;
+ goto out;
+ }
+
+ func_to_exec(cookie);
+
+out:
+ /* No need to restart statistics - if they're enabled, the timer
+ * will restart the statistics.
+ */
+ up(&bp->stats_lock);
+out_no_lock:
+ return rc;
+}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
new file mode 100644
index 0000000000..ae93c07870
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
@@ -0,0 +1,566 @@
+/* bnx2x_stats.h: QLogic Everest network driver.
+ *
+ * Copyright (c) 2007-2013 Broadcom Corporation
+ * Copyright (c) 2014 QLogic Corporation
+ * All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
+ * Written by: Eliezer Tamir
+ * Based on code from Michael Chan's bnx2 driver
+ * UDP CSUM errata workaround by Arik Gendelman
+ * Slowpath and fastpath rework by Vladislav Zolotarov
+ * Statistics and Link management by Yitchak Gertner
+ *
+ */
+#ifndef BNX2X_STATS_H
+#define BNX2X_STATS_H
+
+#include <linux/types.h>
+
+struct nig_stats {
+ u32 brb_discard;
+ u32 brb_packet;
+ u32 brb_truncate;
+ u32 flow_ctrl_discard;
+ u32 flow_ctrl_octets;
+ u32 flow_ctrl_packet;
+ u32 mng_discard;
+ u32 mng_octet_inp;
+ u32 mng_octet_out;
+ u32 mng_packet_inp;
+ u32 mng_packet_out;
+ u32 pbf_octets;
+ u32 pbf_packet;
+ u32 safc_inp;
+ struct_group(egress_mac_pkt0,
+ u32 egress_mac_pkt0_lo;
+ u32 egress_mac_pkt0_hi;
+ );
+ struct_group(egress_mac_pkt1,
+ u32 egress_mac_pkt1_lo;
+ u32 egress_mac_pkt1_hi;
+ );
+};
+
+enum bnx2x_stats_event {
+ STATS_EVENT_PMF = 0,
+ STATS_EVENT_LINK_UP,
+ STATS_EVENT_UPDATE,
+ STATS_EVENT_STOP,
+ STATS_EVENT_MAX
+};
+
+enum bnx2x_stats_state {
+ STATS_STATE_DISABLED = 0,
+ STATS_STATE_ENABLED,
+ STATS_STATE_MAX
+};
+
+struct bnx2x_eth_stats {
+ u32 total_bytes_received_hi;
+ u32 total_bytes_received_lo;
+ u32 total_bytes_transmitted_hi;
+ u32 total_bytes_transmitted_lo;
+ u32 total_unicast_packets_received_hi;
+ u32 total_unicast_packets_received_lo;
+ u32 total_multicast_packets_received_hi;
+ u32 total_multicast_packets_received_lo;
+ u32 total_broadcast_packets_received_hi;
+ u32 total_broadcast_packets_received_lo;
+ u32 total_unicast_packets_transmitted_hi;
+ u32 total_unicast_packets_transmitted_lo;
+ u32 total_multicast_packets_transmitted_hi;
+ u32 total_multicast_packets_transmitted_lo;
+ u32 total_broadcast_packets_transmitted_hi;
+ u32 total_broadcast_packets_transmitted_lo;
+ u32 valid_bytes_received_hi;
+ u32 valid_bytes_received_lo;
+
+ u32 error_bytes_received_hi;
+ u32 error_bytes_received_lo;
+ u32 etherstatsoverrsizepkts_hi;
+ u32 etherstatsoverrsizepkts_lo;
+ u32 no_buff_discard_hi;
+ u32 no_buff_discard_lo;
+
+ struct_group(shared,
+ u32 rx_stat_ifhcinbadoctets_hi;
+ u32 rx_stat_ifhcinbadoctets_lo;
+ u32 tx_stat_ifhcoutbadoctets_hi;
+ u32 tx_stat_ifhcoutbadoctets_lo;
+ u32 rx_stat_dot3statsfcserrors_hi;
+ u32 rx_stat_dot3statsfcserrors_lo;
+ u32 rx_stat_dot3statsalignmenterrors_hi;
+ u32 rx_stat_dot3statsalignmenterrors_lo;
+ u32 rx_stat_dot3statscarriersenseerrors_hi;
+ u32 rx_stat_dot3statscarriersenseerrors_lo;
+ u32 rx_stat_falsecarriererrors_hi;
+ u32 rx_stat_falsecarriererrors_lo;
+ u32 rx_stat_etherstatsundersizepkts_hi;
+ u32 rx_stat_etherstatsundersizepkts_lo;
+ u32 rx_stat_dot3statsframestoolong_hi;
+ u32 rx_stat_dot3statsframestoolong_lo;
+ u32 rx_stat_etherstatsfragments_hi;
+ u32 rx_stat_etherstatsfragments_lo;
+ u32 rx_stat_etherstatsjabbers_hi;
+ u32 rx_stat_etherstatsjabbers_lo;
+ u32 rx_stat_maccontrolframesreceived_hi;
+ u32 rx_stat_maccontrolframesreceived_lo;
+ u32 rx_stat_bmac_xpf_hi;
+ u32 rx_stat_bmac_xpf_lo;
+ u32 rx_stat_bmac_xcf_hi;
+ u32 rx_stat_bmac_xcf_lo;
+ u32 rx_stat_xoffstateentered_hi;
+ u32 rx_stat_xoffstateentered_lo;
+ u32 rx_stat_xonpauseframesreceived_hi;
+ u32 rx_stat_xonpauseframesreceived_lo;
+ u32 rx_stat_xoffpauseframesreceived_hi;
+ u32 rx_stat_xoffpauseframesreceived_lo;
+ u32 tx_stat_outxonsent_hi;
+ u32 tx_stat_outxonsent_lo;
+ u32 tx_stat_outxoffsent_hi;
+ u32 tx_stat_outxoffsent_lo;
+ u32 tx_stat_flowcontroldone_hi;
+ u32 tx_stat_flowcontroldone_lo;
+ u32 tx_stat_etherstatscollisions_hi;
+ u32 tx_stat_etherstatscollisions_lo;
+ u32 tx_stat_dot3statssinglecollisionframes_hi;
+ u32 tx_stat_dot3statssinglecollisionframes_lo;
+ u32 tx_stat_dot3statsmultiplecollisionframes_hi;
+ u32 tx_stat_dot3statsmultiplecollisionframes_lo;
+ u32 tx_stat_dot3statsdeferredtransmissions_hi;
+ u32 tx_stat_dot3statsdeferredtransmissions_lo;
+ u32 tx_stat_dot3statsexcessivecollisions_hi;
+ u32 tx_stat_dot3statsexcessivecollisions_lo;
+ u32 tx_stat_dot3statslatecollisions_hi;
+ u32 tx_stat_dot3statslatecollisions_lo;
+ u32 tx_stat_etherstatspkts64octets_hi;
+ u32 tx_stat_etherstatspkts64octets_lo;
+ u32 tx_stat_etherstatspkts65octetsto127octets_hi;
+ u32 tx_stat_etherstatspkts65octetsto127octets_lo;
+ u32 tx_stat_etherstatspkts128octetsto255octets_hi;
+ u32 tx_stat_etherstatspkts128octetsto255octets_lo;
+ u32 tx_stat_etherstatspkts256octetsto511octets_hi;
+ u32 tx_stat_etherstatspkts256octetsto511octets_lo;
+ u32 tx_stat_etherstatspkts512octetsto1023octets_hi;
+ u32 tx_stat_etherstatspkts512octetsto1023octets_lo;
+ u32 tx_stat_etherstatspkts1024octetsto1522octets_hi;
+ u32 tx_stat_etherstatspkts1024octetsto1522octets_lo;
+ u32 tx_stat_etherstatspktsover1522octets_hi;
+ u32 tx_stat_etherstatspktsover1522octets_lo;
+ u32 tx_stat_bmac_2047_hi;
+ u32 tx_stat_bmac_2047_lo;
+ u32 tx_stat_bmac_4095_hi;
+ u32 tx_stat_bmac_4095_lo;
+ u32 tx_stat_bmac_9216_hi;
+ u32 tx_stat_bmac_9216_lo;
+ u32 tx_stat_bmac_16383_hi;
+ u32 tx_stat_bmac_16383_lo;
+ u32 tx_stat_dot3statsinternalmactransmiterrors_hi;
+ u32 tx_stat_dot3statsinternalmactransmiterrors_lo;
+ u32 tx_stat_bmac_ufl_hi;
+ u32 tx_stat_bmac_ufl_lo;
+ );
+
+ u32 pause_frames_received_hi;
+ u32 pause_frames_received_lo;
+ u32 pause_frames_sent_hi;
+ u32 pause_frames_sent_lo;
+
+ u32 etherstatspkts1024octetsto1522octets_hi;
+ u32 etherstatspkts1024octetsto1522octets_lo;
+ u32 etherstatspktsover1522octets_hi;
+ u32 etherstatspktsover1522octets_lo;
+
+ u32 brb_drop_hi;
+ u32 brb_drop_lo;
+ u32 brb_truncate_hi;
+ u32 brb_truncate_lo;
+
+ u32 mac_filter_discard;
+ u32 mf_tag_discard;
+ u32 brb_truncate_discard;
+ u32 mac_discard;
+
+ u32 driver_xoff;
+ u32 rx_err_discard_pkt;
+ u32 rx_skb_alloc_failed;
+ u32 hw_csum_err;
+
+ u32 nig_timer_max;
+
+ /* TPA */
+ u32 total_tpa_aggregations_hi;
+ u32 total_tpa_aggregations_lo;
+ u32 total_tpa_aggregated_frames_hi;
+ u32 total_tpa_aggregated_frames_lo;
+ u32 total_tpa_bytes_hi;
+ u32 total_tpa_bytes_lo;
+
+ /* PFC */
+ u32 pfc_frames_received_hi;
+ u32 pfc_frames_received_lo;
+ u32 pfc_frames_sent_hi;
+ u32 pfc_frames_sent_lo;
+
+ /* Recovery */
+ u32 recoverable_error;
+ u32 unrecoverable_error;
+ u32 driver_filtered_tx_pkt;
+ /* src: Clear-on-Read register; Will not survive PMF Migration */
+ u32 eee_tx_lpi;
+
+ /* PTP */
+ u32 ptp_skip_tx_ts;
+};
+
+struct bnx2x_eth_q_stats {
+ u32 total_unicast_bytes_received_hi;
+ u32 total_unicast_bytes_received_lo;
+ u32 total_broadcast_bytes_received_hi;
+ u32 total_broadcast_bytes_received_lo;
+ u32 total_multicast_bytes_received_hi;
+ u32 total_multicast_bytes_received_lo;
+ u32 total_bytes_received_hi;
+ u32 total_bytes_received_lo;
+ u32 total_unicast_bytes_transmitted_hi;
+ u32 total_unicast_bytes_transmitted_lo;
+ u32 total_broadcast_bytes_transmitted_hi;
+ u32 total_broadcast_bytes_transmitted_lo;
+ u32 total_multicast_bytes_transmitted_hi;
+ u32 total_multicast_bytes_transmitted_lo;
+ u32 total_bytes_transmitted_hi;
+ u32 total_bytes_transmitted_lo;
+ u32 total_unicast_packets_received_hi;
+ u32 total_unicast_packets_received_lo;
+ u32 total_multicast_packets_received_hi;
+ u32 total_multicast_packets_received_lo;
+ u32 total_broadcast_packets_received_hi;
+ u32 total_broadcast_packets_received_lo;
+ u32 total_unicast_packets_transmitted_hi;
+ u32 total_unicast_packets_transmitted_lo;
+ u32 total_multicast_packets_transmitted_hi;
+ u32 total_multicast_packets_transmitted_lo;
+ u32 total_broadcast_packets_transmitted_hi;
+ u32 total_broadcast_packets_transmitted_lo;
+ u32 valid_bytes_received_hi;
+ u32 valid_bytes_received_lo;
+
+ u32 etherstatsoverrsizepkts_hi;
+ u32 etherstatsoverrsizepkts_lo;
+ u32 no_buff_discard_hi;
+ u32 no_buff_discard_lo;
+
+ u32 driver_xoff;
+ u32 rx_err_discard_pkt;
+ u32 rx_skb_alloc_failed;
+ u32 hw_csum_err;
+
+ u32 total_packets_received_checksum_discarded_hi;
+ u32 total_packets_received_checksum_discarded_lo;
+ u32 total_packets_received_ttl0_discarded_hi;
+ u32 total_packets_received_ttl0_discarded_lo;
+ u32 total_transmitted_dropped_packets_error_hi;
+ u32 total_transmitted_dropped_packets_error_lo;
+
+ /* TPA */
+ u32 total_tpa_aggregations_hi;
+ u32 total_tpa_aggregations_lo;
+ u32 total_tpa_aggregated_frames_hi;
+ u32 total_tpa_aggregated_frames_lo;
+ u32 total_tpa_bytes_hi;
+ u32 total_tpa_bytes_lo;
+ u32 driver_filtered_tx_pkt;
+};
+
+struct bnx2x_eth_stats_old {
+ u32 rx_stat_dot3statsframestoolong_hi;
+ u32 rx_stat_dot3statsframestoolong_lo;
+};
+
+struct bnx2x_eth_q_stats_old {
+ /* Fields to perserve over fw reset*/
+ u32 total_unicast_bytes_received_hi;
+ u32 total_unicast_bytes_received_lo;
+ u32 total_broadcast_bytes_received_hi;
+ u32 total_broadcast_bytes_received_lo;
+ u32 total_multicast_bytes_received_hi;
+ u32 total_multicast_bytes_received_lo;
+ u32 total_unicast_bytes_transmitted_hi;
+ u32 total_unicast_bytes_transmitted_lo;
+ u32 total_broadcast_bytes_transmitted_hi;
+ u32 total_broadcast_bytes_transmitted_lo;
+ u32 total_multicast_bytes_transmitted_hi;
+ u32 total_multicast_bytes_transmitted_lo;
+ u32 total_tpa_bytes_hi;
+ u32 total_tpa_bytes_lo;
+
+ /* Fields to perserve last of */
+ u32 total_bytes_received_hi;
+ u32 total_bytes_received_lo;
+ u32 total_bytes_transmitted_hi;
+ u32 total_bytes_transmitted_lo;
+ u32 total_unicast_packets_received_hi;
+ u32 total_unicast_packets_received_lo;
+ u32 total_multicast_packets_received_hi;
+ u32 total_multicast_packets_received_lo;
+ u32 total_broadcast_packets_received_hi;
+ u32 total_broadcast_packets_received_lo;
+ u32 total_unicast_packets_transmitted_hi;
+ u32 total_unicast_packets_transmitted_lo;
+ u32 total_multicast_packets_transmitted_hi;
+ u32 total_multicast_packets_transmitted_lo;
+ u32 total_broadcast_packets_transmitted_hi;
+ u32 total_broadcast_packets_transmitted_lo;
+ u32 valid_bytes_received_hi;
+ u32 valid_bytes_received_lo;
+
+ u32 total_tpa_bytes_hi_old;
+ u32 total_tpa_bytes_lo_old;
+
+ u32 driver_xoff_old;
+ u32 rx_err_discard_pkt_old;
+ u32 rx_skb_alloc_failed_old;
+ u32 hw_csum_err_old;
+ u32 driver_filtered_tx_pkt_old;
+};
+
+struct bnx2x_net_stats_old {
+ u32 rx_dropped;
+};
+
+struct bnx2x_fw_port_stats_old {
+ u32 mac_filter_discard;
+ u32 mf_tag_discard;
+ u32 brb_truncate_discard;
+ u32 mac_discard;
+};
+
+/****************************************************************************
+* Macros
+****************************************************************************/
+
+/* sum[hi:lo] += add[hi:lo] */
+#define ADD_64(s_hi, a_hi, s_lo, a_lo) \
+ do { \
+ s_lo += a_lo; \
+ s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
+ } while (0)
+
+#define LE32_0 ((__force __le32) 0)
+#define LE16_0 ((__force __le16) 0)
+
+/* The _force is for cases where high value is 0 */
+#define ADD_64_LE(s_hi, a_hi_le, s_lo, a_lo_le) \
+ ADD_64(s_hi, le32_to_cpu(a_hi_le), \
+ s_lo, le32_to_cpu(a_lo_le))
+
+#define ADD_64_LE16(s_hi, a_hi_le, s_lo, a_lo_le) \
+ ADD_64(s_hi, le16_to_cpu(a_hi_le), \
+ s_lo, le16_to_cpu(a_lo_le))
+
+/* difference = minuend - subtrahend */
+#define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
+ do { \
+ if (m_lo < s_lo) { \
+ /* underflow */ \
+ d_hi = m_hi - s_hi; \
+ if (d_hi > 0) { \
+ /* we can 'loan' 1 */ \
+ d_hi--; \
+ d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
+ } else { \
+ /* m_hi <= s_hi */ \
+ d_hi = 0; \
+ d_lo = 0; \
+ } \
+ } else { \
+ /* m_lo >= s_lo */ \
+ if (m_hi < s_hi) { \
+ d_hi = 0; \
+ d_lo = 0; \
+ } else { \
+ /* m_hi >= s_hi */ \
+ d_hi = m_hi - s_hi; \
+ d_lo = m_lo - s_lo; \
+ } \
+ } \
+ } while (0)
+
+#define UPDATE_STAT64(s, t) \
+ do { \
+ DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
+ diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
+ pstats->mac_stx[0].t##_hi = new->s##_hi; \
+ pstats->mac_stx[0].t##_lo = new->s##_lo; \
+ ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
+ pstats->mac_stx[1].t##_lo, diff.lo); \
+ } while (0)
+
+#define UPDATE_STAT64_NIG(s, t) \
+ do { \
+ DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
+ diff.lo, new->s##_lo, old->s##_lo); \
+ ADD_64(estats->t##_hi, diff.hi, \
+ estats->t##_lo, diff.lo); \
+ } while (0)
+
+/* sum[hi:lo] += add */
+#define ADD_EXTEND_64(s_hi, s_lo, a) \
+ do { \
+ s_lo += a; \
+ s_hi += (s_lo < a) ? 1 : 0; \
+ } while (0)
+
+#define ADD_STAT64(diff, t) \
+ do { \
+ ADD_64(pstats->mac_stx[1].t##_hi, new->diff##_hi, \
+ pstats->mac_stx[1].t##_lo, new->diff##_lo); \
+ } while (0)
+
+#define UPDATE_EXTEND_STAT(s) \
+ do { \
+ ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
+ pstats->mac_stx[1].s##_lo, \
+ new->s); \
+ } while (0)
+
+#define UPDATE_EXTEND_TSTAT_X(s, t, size) \
+ do { \
+ diff = le##size##_to_cpu(tclient->s) - \
+ le##size##_to_cpu(old_tclient->s); \
+ old_tclient->s = tclient->s; \
+ ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
+ } while (0)
+
+#define UPDATE_EXTEND_TSTAT(s, t) UPDATE_EXTEND_TSTAT_X(s, t, 32)
+
+#define UPDATE_EXTEND_E_TSTAT(s, t, size) \
+ do { \
+ UPDATE_EXTEND_TSTAT_X(s, t, size); \
+ ADD_EXTEND_64(estats->t##_hi, estats->t##_lo, diff); \
+ } while (0)
+
+#define UPDATE_EXTEND_USTAT(s, t) \
+ do { \
+ diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
+ old_uclient->s = uclient->s; \
+ ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
+ } while (0)
+
+#define UPDATE_EXTEND_E_USTAT(s, t) \
+ do { \
+ UPDATE_EXTEND_USTAT(s, t); \
+ ADD_EXTEND_64(estats->t##_hi, estats->t##_lo, diff); \
+ } while (0)
+
+#define UPDATE_EXTEND_XSTAT(s, t) \
+ do { \
+ diff = le32_to_cpu(xclient->s) - le32_to_cpu(old_xclient->s); \
+ old_xclient->s = xclient->s; \
+ ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
+ } while (0)
+
+#define UPDATE_QSTAT(s, t) \
+ do { \
+ qstats->t##_lo = qstats_old->t##_lo + le32_to_cpu(s.lo); \
+ qstats->t##_hi = qstats_old->t##_hi + le32_to_cpu(s.hi) \
+ + ((qstats->t##_lo < qstats_old->t##_lo) ? 1 : 0); \
+ } while (0)
+
+#define UPDATE_QSTAT_OLD(f) \
+ do { \
+ qstats_old->f = qstats->f; \
+ } while (0)
+
+#define UPDATE_ESTAT_QSTAT_64(s) \
+ do { \
+ ADD_64(estats->s##_hi, qstats->s##_hi, \
+ estats->s##_lo, qstats->s##_lo); \
+ SUB_64(estats->s##_hi, qstats_old->s##_hi_old, \
+ estats->s##_lo, qstats_old->s##_lo_old); \
+ qstats_old->s##_hi_old = qstats->s##_hi; \
+ qstats_old->s##_lo_old = qstats->s##_lo; \
+ } while (0)
+
+#define UPDATE_ESTAT_QSTAT(s) \
+ do { \
+ estats->s += qstats->s; \
+ estats->s -= qstats_old->s##_old; \
+ qstats_old->s##_old = qstats->s; \
+ } while (0)
+
+#define UPDATE_FSTAT_QSTAT(s) \
+ do { \
+ ADD_64(fstats->s##_hi, qstats->s##_hi, \
+ fstats->s##_lo, qstats->s##_lo); \
+ SUB_64(fstats->s##_hi, qstats_old->s##_hi, \
+ fstats->s##_lo, qstats_old->s##_lo); \
+ estats->s##_hi = fstats->s##_hi; \
+ estats->s##_lo = fstats->s##_lo; \
+ qstats_old->s##_hi = qstats->s##_hi; \
+ qstats_old->s##_lo = qstats->s##_lo; \
+ } while (0)
+
+#define UPDATE_FW_STAT(s) \
+ do { \
+ estats->s = le32_to_cpu(tport->s) + fwstats->s; \
+ } while (0)
+
+#define UPDATE_FW_STAT_OLD(f) \
+ do { \
+ fwstats->f = estats->f; \
+ } while (0)
+
+#define UPDATE_ESTAT(s, t) \
+ do { \
+ SUB_64(estats->s##_hi, estats_old->t##_hi, \
+ estats->s##_lo, estats_old->t##_lo); \
+ ADD_64(estats->s##_hi, estats->t##_hi, \
+ estats->s##_lo, estats->t##_lo); \
+ estats_old->t##_hi = estats->t##_hi; \
+ estats_old->t##_lo = estats->t##_lo; \
+ } while (0)
+
+/* minuend -= subtrahend */
+#define SUB_64(m_hi, s_hi, m_lo, s_lo) \
+ do { \
+ DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
+ } while (0)
+
+/* minuend[hi:lo] -= subtrahend */
+#define SUB_EXTEND_64(m_hi, m_lo, s) \
+ do { \
+ SUB_64(m_hi, 0, m_lo, s); \
+ } while (0)
+
+#define SUB_EXTEND_USTAT(s, t) \
+ do { \
+ diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
+ SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
+ } while (0)
+
+/* forward */
+struct bnx2x;
+
+void bnx2x_memset_stats(struct bnx2x *bp);
+void bnx2x_stats_init(struct bnx2x *bp);
+void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
+int bnx2x_stats_safe_exec(struct bnx2x *bp,
+ void (func_to_exec)(void *cookie),
+ void *cookie);
+
+/**
+ * bnx2x_save_statistics - save statistics when unloading.
+ *
+ * @bp: driver handle
+ */
+void bnx2x_save_statistics(struct bnx2x *bp);
+
+void bnx2x_afex_collect_stats(struct bnx2x *bp, void *void_afex_stats,
+ u32 stats_type);
+#endif /* BNX2X_STATS_H */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
new file mode 100644
index 0000000000..8946a931e8
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
@@ -0,0 +1,2314 @@
+/* bnx2x_vfpf.c: QLogic Everest network driver.
+ *
+ * Copyright 2009-2013 Broadcom Corporation
+ * Copyright 2014 QLogic Corporation
+ * All rights reserved
+ *
+ * Unless you and QLogic execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2, available
+ * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other QLogic software provided under a
+ * license other than the GPL, without QLogic's express prior written
+ * consent.
+ *
+ * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
+ * Written by: Shmulik Ravid
+ * Ariel Elior <ariel.elior@qlogic.com>
+ */
+
+#include "bnx2x.h"
+#include "bnx2x_cmn.h"
+#include <linux/crc32.h>
+
+static int bnx2x_vfpf_teardown_queue(struct bnx2x *bp, int qidx);
+
+/* place a given tlv on the tlv buffer at a given offset */
+static void bnx2x_add_tlv(struct bnx2x *bp, void *tlvs_list,
+ u16 offset, u16 type, u16 length)
+{
+ struct channel_tlv *tl =
+ (struct channel_tlv *)(tlvs_list + offset);
+
+ tl->type = type;
+ tl->length = length;
+}
+
+/* Clear the mailbox and init the header of the first tlv */
+static void bnx2x_vfpf_prep(struct bnx2x *bp, struct vfpf_first_tlv *first_tlv,
+ u16 type, u16 length)
+{
+ mutex_lock(&bp->vf2pf_mutex);
+
+ DP(BNX2X_MSG_IOV, "preparing to send %d tlv over vf pf channel\n",
+ type);
+
+ /* Clear mailbox */
+ memset(bp->vf2pf_mbox, 0, sizeof(struct bnx2x_vf_mbx_msg));
+
+ /* init type and length */
+ bnx2x_add_tlv(bp, &first_tlv->tl, 0, type, length);
+
+ /* init first tlv header */
+ first_tlv->resp_msg_offset = sizeof(bp->vf2pf_mbox->req);
+}
+
+/* releases the mailbox */
+static void bnx2x_vfpf_finalize(struct bnx2x *bp,
+ struct vfpf_first_tlv *first_tlv)
+{
+ DP(BNX2X_MSG_IOV, "done sending [%d] tlv over vf pf channel\n",
+ first_tlv->tl.type);
+
+ mutex_unlock(&bp->vf2pf_mutex);
+}
+
+/* Finds a TLV by type in a TLV buffer; If found, returns pointer to the TLV */
+static void *bnx2x_search_tlv_list(struct bnx2x *bp, void *tlvs_list,
+ enum channel_tlvs req_tlv)
+{
+ struct channel_tlv *tlv = (struct channel_tlv *)tlvs_list;
+
+ do {
+ if (tlv->type == req_tlv)
+ return tlv;
+
+ if (!tlv->length) {
+ BNX2X_ERR("Found TLV with length 0\n");
+ return NULL;
+ }
+
+ tlvs_list += tlv->length;
+ tlv = (struct channel_tlv *)tlvs_list;
+ } while (tlv->type != CHANNEL_TLV_LIST_END);
+
+ DP(BNX2X_MSG_IOV, "TLV list does not contain %d TLV\n", req_tlv);
+
+ return NULL;
+}
+
+/* list the types and lengths of the tlvs on the buffer */
+static void bnx2x_dp_tlv_list(struct bnx2x *bp, void *tlvs_list)
+{
+ int i = 1;
+ struct channel_tlv *tlv = (struct channel_tlv *)tlvs_list;
+
+ while (tlv->type != CHANNEL_TLV_LIST_END) {
+ /* output tlv */
+ DP(BNX2X_MSG_IOV, "TLV number %d: type %d, length %d\n", i,
+ tlv->type, tlv->length);
+
+ /* advance to next tlv */
+ tlvs_list += tlv->length;
+
+ /* cast general tlv list pointer to channel tlv header*/
+ tlv = (struct channel_tlv *)tlvs_list;
+
+ i++;
+
+ /* break condition for this loop */
+ if (i > MAX_TLVS_IN_LIST) {
+ WARN(true, "corrupt tlvs");
+ return;
+ }
+ }
+
+ /* output last tlv */
+ DP(BNX2X_MSG_IOV, "TLV number %d: type %d, length %d\n", i,
+ tlv->type, tlv->length);
+}
+
+/* test whether we support a tlv type */
+bool bnx2x_tlv_supported(u16 tlvtype)
+{
+ return CHANNEL_TLV_NONE < tlvtype && tlvtype < CHANNEL_TLV_MAX;
+}
+
+static inline int bnx2x_pfvf_status_codes(int rc)
+{
+ switch (rc) {
+ case 0:
+ return PFVF_STATUS_SUCCESS;
+ case -ENOMEM:
+ return PFVF_STATUS_NO_RESOURCE;
+ default:
+ return PFVF_STATUS_FAILURE;
+ }
+}
+
+static int bnx2x_send_msg2pf(struct bnx2x *bp, u8 *done, dma_addr_t msg_mapping)
+{
+ struct cstorm_vf_zone_data __iomem *zone_data =
+ REG_ADDR(bp, PXP_VF_ADDR_CSDM_GLOBAL_START);
+ int tout = 100, interval = 100; /* wait for 10 seconds */
+
+ if (*done) {
+ BNX2X_ERR("done was non zero before message to pf was sent\n");
+ WARN_ON(true);
+ return -EINVAL;
+ }
+
+ /* if PF indicated channel is down avoid sending message. Return success
+ * so calling flow can continue
+ */
+ bnx2x_sample_bulletin(bp);
+ if (bp->old_bulletin.valid_bitmap & 1 << CHANNEL_DOWN) {
+ DP(BNX2X_MSG_IOV, "detecting channel down. Aborting message\n");
+ *done = PFVF_STATUS_SUCCESS;
+ return -EINVAL;
+ }
+
+ /* Write message address */
+ writel(U64_LO(msg_mapping),
+ &zone_data->non_trigger.vf_pf_channel.msg_addr_lo);
+ writel(U64_HI(msg_mapping),
+ &zone_data->non_trigger.vf_pf_channel.msg_addr_hi);
+
+ /* make sure the address is written before FW accesses it */
+ wmb();
+
+ /* Trigger the PF FW */
+ writeb_relaxed(1, &zone_data->trigger.vf_pf_channel.addr_valid);
+
+ /* Wait for PF to complete */
+ while ((tout >= 0) && (!*done)) {
+ msleep(interval);
+ tout -= 1;
+
+ /* progress indicator - HV can take its own sweet time in
+ * answering VFs...
+ */
+ DP_CONT(BNX2X_MSG_IOV, ".");
+ }
+
+ if (!*done) {
+ BNX2X_ERR("PF response has timed out\n");
+ return -EAGAIN;
+ }
+ DP(BNX2X_MSG_SP, "Got a response from PF\n");
+ return 0;
+}
+
+static int bnx2x_get_vf_id(struct bnx2x *bp, u32 *vf_id)
+{
+ u32 me_reg;
+ int tout = 10, interval = 100; /* Wait for 1 sec */
+
+ do {
+ /* pxp traps vf read of doorbells and returns me reg value */
+ me_reg = readl(bp->doorbells);
+ if (GOOD_ME_REG(me_reg))
+ break;
+
+ msleep(interval);
+
+ BNX2X_ERR("Invalid ME register value: 0x%08x\n. Is pf driver up?",
+ me_reg);
+ } while (tout-- > 0);
+
+ if (!GOOD_ME_REG(me_reg)) {
+ BNX2X_ERR("Invalid ME register value: 0x%08x\n", me_reg);
+ return -EINVAL;
+ }
+
+ DP(BNX2X_MSG_IOV, "valid ME register value: 0x%08x\n", me_reg);
+
+ *vf_id = (me_reg & ME_REG_VF_NUM_MASK) >> ME_REG_VF_NUM_SHIFT;
+
+ return 0;
+}
+
+int bnx2x_vfpf_acquire(struct bnx2x *bp, u8 tx_count, u8 rx_count)
+{
+ int rc = 0, attempts = 0;
+ struct vfpf_acquire_tlv *req = &bp->vf2pf_mbox->req.acquire;
+ struct pfvf_acquire_resp_tlv *resp = &bp->vf2pf_mbox->resp.acquire_resp;
+ struct vfpf_port_phys_id_resp_tlv *phys_port_resp;
+ struct vfpf_fp_hsi_resp_tlv *fp_hsi_resp;
+ u32 vf_id;
+ bool resources_acquired = false;
+
+ /* clear mailbox and prep first tlv */
+ bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_ACQUIRE, sizeof(*req));
+
+ if (bnx2x_get_vf_id(bp, &vf_id)) {
+ rc = -EAGAIN;
+ goto out;
+ }
+
+ req->vfdev_info.vf_id = vf_id;
+ req->vfdev_info.vf_os = 0;
+ req->vfdev_info.fp_hsi_ver = ETH_FP_HSI_VERSION;
+
+ req->resc_request.num_rxqs = rx_count;
+ req->resc_request.num_txqs = tx_count;
+ req->resc_request.num_sbs = bp->igu_sb_cnt;
+ req->resc_request.num_mac_filters = VF_ACQUIRE_MAC_FILTERS;
+ req->resc_request.num_mc_filters = VF_ACQUIRE_MC_FILTERS;
+ req->resc_request.num_vlan_filters = VF_ACQUIRE_VLAN_FILTERS;
+
+ /* pf 2 vf bulletin board address */
+ req->bulletin_addr = bp->pf2vf_bulletin_mapping;
+
+ /* Request physical port identifier */
+ bnx2x_add_tlv(bp, req, req->first_tlv.tl.length,
+ CHANNEL_TLV_PHYS_PORT_ID, sizeof(struct channel_tlv));
+
+ /* Bulletin support for bulletin board with length > legacy length */
+ req->vfdev_info.caps |= VF_CAP_SUPPORT_EXT_BULLETIN;
+ /* vlan filtering is supported */
+ req->vfdev_info.caps |= VF_CAP_SUPPORT_VLAN_FILTER;
+
+ /* add list termination tlv */
+ bnx2x_add_tlv(bp, req,
+ req->first_tlv.tl.length + sizeof(struct channel_tlv),
+ CHANNEL_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
+
+ /* output tlvs list */
+ bnx2x_dp_tlv_list(bp, req);
+
+ while (!resources_acquired) {
+ DP(BNX2X_MSG_SP, "attempting to acquire resources\n");
+
+ /* send acquire request */
+ rc = bnx2x_send_msg2pf(bp,
+ &resp->hdr.status,
+ bp->vf2pf_mbox_mapping);
+
+ /* PF timeout */
+ if (rc)
+ goto out;
+
+ /* copy acquire response from buffer to bp */
+ memcpy(&bp->acquire_resp, resp, sizeof(bp->acquire_resp));
+
+ attempts++;
+
+ /* test whether the PF accepted our request. If not, humble
+ * the request and try again.
+ */
+ if (bp->acquire_resp.hdr.status == PFVF_STATUS_SUCCESS) {
+ DP(BNX2X_MSG_SP, "resources acquired\n");
+ resources_acquired = true;
+ } else if (bp->acquire_resp.hdr.status ==
+ PFVF_STATUS_NO_RESOURCE &&
+ attempts < VF_ACQUIRE_THRESH) {
+ DP(BNX2X_MSG_SP,
+ "PF unwilling to fulfill resource request. Try PF recommended amount\n");
+
+ /* humble our request */
+ req->resc_request.num_txqs =
+ min(req->resc_request.num_txqs,
+ bp->acquire_resp.resc.num_txqs);
+ req->resc_request.num_rxqs =
+ min(req->resc_request.num_rxqs,
+ bp->acquire_resp.resc.num_rxqs);
+ req->resc_request.num_sbs =
+ min(req->resc_request.num_sbs,
+ bp->acquire_resp.resc.num_sbs);
+ req->resc_request.num_mac_filters =
+ min(req->resc_request.num_mac_filters,
+ bp->acquire_resp.resc.num_mac_filters);
+ req->resc_request.num_vlan_filters =
+ min(req->resc_request.num_vlan_filters,
+ bp->acquire_resp.resc.num_vlan_filters);
+ req->resc_request.num_mc_filters =
+ min(req->resc_request.num_mc_filters,
+ bp->acquire_resp.resc.num_mc_filters);
+
+ /* Clear response buffer */
+ memset(&bp->vf2pf_mbox->resp, 0,
+ sizeof(union pfvf_tlvs));
+ } else {
+ /* Determine reason of PF failure of acquire process */
+ fp_hsi_resp = bnx2x_search_tlv_list(bp, resp,
+ CHANNEL_TLV_FP_HSI_SUPPORT);
+ if (fp_hsi_resp && !fp_hsi_resp->is_supported)
+ BNX2X_ERR("Old hypervisor - doesn't support current fastpath HSI version; Need to downgrade VF driver [or upgrade hypervisor]\n");
+ else
+ BNX2X_ERR("Failed to get the requested amount of resources: %d. Breaking...\n",
+ bp->acquire_resp.hdr.status);
+ rc = -EAGAIN;
+ goto out;
+ }
+ }
+
+ /* Retrieve physical port id (if possible) */
+ phys_port_resp = (struct vfpf_port_phys_id_resp_tlv *)
+ bnx2x_search_tlv_list(bp, resp,
+ CHANNEL_TLV_PHYS_PORT_ID);
+ if (phys_port_resp) {
+ memcpy(bp->phys_port_id, phys_port_resp->id, ETH_ALEN);
+ bp->flags |= HAS_PHYS_PORT_ID;
+ }
+
+ /* Old Hypevisors might not even support the FP_HSI_SUPPORT TLV.
+ * If that's the case, we need to make certain required FW was
+ * supported by such a hypervisor [i.e., v0-v2].
+ */
+ fp_hsi_resp = bnx2x_search_tlv_list(bp, resp,
+ CHANNEL_TLV_FP_HSI_SUPPORT);
+ if (!fp_hsi_resp && (ETH_FP_HSI_VERSION > ETH_FP_HSI_VER_2)) {
+ BNX2X_ERR("Old hypervisor - need to downgrade VF's driver\n");
+
+ /* Since acquire succeeded on the PF side, we need to send a
+ * release message in order to allow future probes.
+ */
+ bnx2x_vfpf_finalize(bp, &req->first_tlv);
+ bnx2x_vfpf_release(bp);
+
+ rc = -EINVAL;
+ goto out;
+ }
+
+ /* get HW info */
+ bp->common.chip_id |= (bp->acquire_resp.pfdev_info.chip_num & 0xffff);
+ bp->link_params.chip_id = bp->common.chip_id;
+ bp->db_size = bp->acquire_resp.pfdev_info.db_size;
+ bp->common.int_block = INT_BLOCK_IGU;
+ bp->common.chip_port_mode = CHIP_2_PORT_MODE;
+ bp->igu_dsb_id = -1;
+ bp->mf_ov = 0;
+ bp->mf_mode = 0;
+ bp->common.flash_size = 0;
+ bp->flags |=
+ NO_WOL_FLAG | NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG | NO_FCOE_FLAG;
+ bp->igu_sb_cnt = bp->acquire_resp.resc.num_sbs;
+ bp->igu_base_sb = bp->acquire_resp.resc.hw_sbs[0].hw_sb_id;
+ bp->vlan_credit = bp->acquire_resp.resc.num_vlan_filters;
+
+ strscpy(bp->fw_ver, bp->acquire_resp.pfdev_info.fw_ver,
+ sizeof(bp->fw_ver));
+
+ if (is_valid_ether_addr(bp->acquire_resp.resc.current_mac_addr))
+ eth_hw_addr_set(bp->dev,
+ bp->acquire_resp.resc.current_mac_addr);
+
+out:
+ bnx2x_vfpf_finalize(bp, &req->first_tlv);
+ return rc;
+}
+
+int bnx2x_vfpf_release(struct bnx2x *bp)
+{
+ struct vfpf_release_tlv *req = &bp->vf2pf_mbox->req.release;
+ struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
+ u32 rc, vf_id;
+
+ /* clear mailbox and prep first tlv */
+ bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_RELEASE, sizeof(*req));
+
+ if (bnx2x_get_vf_id(bp, &vf_id)) {
+ rc = -EAGAIN;
+ goto out;
+ }
+
+ req->vf_id = vf_id;
+
+ /* add list termination tlv */
+ bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
+
+ /* output tlvs list */
+ bnx2x_dp_tlv_list(bp, req);
+
+ /* send release request */
+ rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
+
+ if (rc)
+ /* PF timeout */
+ goto out;
+
+ if (resp->hdr.status == PFVF_STATUS_SUCCESS) {
+ /* PF released us */
+ DP(BNX2X_MSG_SP, "vf released\n");
+ } else {
+ /* PF reports error */
+ BNX2X_ERR("PF failed our release request - are we out of sync? Response status: %d\n",
+ resp->hdr.status);
+ rc = -EAGAIN;
+ goto out;
+ }
+out:
+ bnx2x_vfpf_finalize(bp, &req->first_tlv);
+
+ return rc;
+}
+
+/* Tell PF about SB addresses */
+int bnx2x_vfpf_init(struct bnx2x *bp)
+{
+ struct vfpf_init_tlv *req = &bp->vf2pf_mbox->req.init;
+ struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
+ int rc, i;
+
+ /* clear mailbox and prep first tlv */
+ bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_INIT, sizeof(*req));
+
+ /* status blocks */
+ for_each_eth_queue(bp, i)
+ req->sb_addr[i] = (dma_addr_t)bnx2x_fp(bp, i,
+ status_blk_mapping);
+
+ /* statistics - requests only supports single queue for now */
+ req->stats_addr = bp->fw_stats_data_mapping +
+ offsetof(struct bnx2x_fw_stats_data, queue_stats);
+
+ req->stats_stride = sizeof(struct per_queue_stats);
+
+ /* add list termination tlv */
+ bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
+
+ /* output tlvs list */
+ bnx2x_dp_tlv_list(bp, req);
+
+ rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
+ if (rc)
+ goto out;
+
+ if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
+ BNX2X_ERR("INIT VF failed: %d. Breaking...\n",
+ resp->hdr.status);
+ rc = -EAGAIN;
+ goto out;
+ }
+
+ DP(BNX2X_MSG_SP, "INIT VF Succeeded\n");
+out:
+ bnx2x_vfpf_finalize(bp, &req->first_tlv);
+
+ return rc;
+}
+
+/* CLOSE VF - opposite to INIT_VF */
+void bnx2x_vfpf_close_vf(struct bnx2x *bp)
+{
+ struct vfpf_close_tlv *req = &bp->vf2pf_mbox->req.close;
+ struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
+ int i, rc;
+ u32 vf_id;
+
+ /* If we haven't got a valid VF id, there is no sense to
+ * continue with sending messages
+ */
+ if (bnx2x_get_vf_id(bp, &vf_id))
+ goto free_irq;
+
+ /* Close the queues */
+ for_each_queue(bp, i)
+ bnx2x_vfpf_teardown_queue(bp, i);
+
+ /* remove mac */
+ bnx2x_vfpf_config_mac(bp, bp->dev->dev_addr, bp->fp->index, false);
+
+ /* clear mailbox and prep first tlv */
+ bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_CLOSE, sizeof(*req));
+
+ req->vf_id = vf_id;
+
+ /* add list termination tlv */
+ bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
+
+ /* output tlvs list */
+ bnx2x_dp_tlv_list(bp, req);
+
+ rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
+
+ if (rc)
+ BNX2X_ERR("Sending CLOSE failed. rc was: %d\n", rc);
+
+ else if (resp->hdr.status != PFVF_STATUS_SUCCESS)
+ BNX2X_ERR("Sending CLOSE failed: pf response was %d\n",
+ resp->hdr.status);
+
+ bnx2x_vfpf_finalize(bp, &req->first_tlv);
+
+free_irq:
+ if (!bp->nic_stopped) {
+ /* Disable HW interrupts, NAPI */
+ bnx2x_netif_stop(bp, 0);
+ /* Delete all NAPI objects */
+ bnx2x_del_all_napi(bp);
+
+ /* Release IRQs */
+ bnx2x_free_irq(bp);
+ bp->nic_stopped = true;
+ }
+}
+
+static void bnx2x_leading_vfq_init(struct bnx2x *bp, struct bnx2x_virtf *vf,
+ struct bnx2x_vf_queue *q)
+{
+ u8 cl_id = vfq_cl_id(vf, q);
+ u8 func_id = FW_VF_HANDLE(vf->abs_vfid);
+
+ /* mac */
+ bnx2x_init_mac_obj(bp, &q->mac_obj,
+ cl_id, q->cid, func_id,
+ bnx2x_vf_sp(bp, vf, mac_rdata),
+ bnx2x_vf_sp_map(bp, vf, mac_rdata),
+ BNX2X_FILTER_MAC_PENDING,
+ &vf->filter_state,
+ BNX2X_OBJ_TYPE_RX_TX,
+ &vf->vf_macs_pool);
+ /* vlan */
+ bnx2x_init_vlan_obj(bp, &q->vlan_obj,
+ cl_id, q->cid, func_id,
+ bnx2x_vf_sp(bp, vf, vlan_rdata),
+ bnx2x_vf_sp_map(bp, vf, vlan_rdata),
+ BNX2X_FILTER_VLAN_PENDING,
+ &vf->filter_state,
+ BNX2X_OBJ_TYPE_RX_TX,
+ &vf->vf_vlans_pool);
+ /* vlan-mac */
+ bnx2x_init_vlan_mac_obj(bp, &q->vlan_mac_obj,
+ cl_id, q->cid, func_id,
+ bnx2x_vf_sp(bp, vf, vlan_mac_rdata),
+ bnx2x_vf_sp_map(bp, vf, vlan_mac_rdata),
+ BNX2X_FILTER_VLAN_MAC_PENDING,
+ &vf->filter_state,
+ BNX2X_OBJ_TYPE_RX_TX,
+ &vf->vf_macs_pool,
+ &vf->vf_vlans_pool);
+ /* mcast */
+ bnx2x_init_mcast_obj(bp, &vf->mcast_obj, cl_id,
+ q->cid, func_id, func_id,
+ bnx2x_vf_sp(bp, vf, mcast_rdata),
+ bnx2x_vf_sp_map(bp, vf, mcast_rdata),
+ BNX2X_FILTER_MCAST_PENDING,
+ &vf->filter_state,
+ BNX2X_OBJ_TYPE_RX_TX);
+
+ /* rss */
+ bnx2x_init_rss_config_obj(bp, &vf->rss_conf_obj, cl_id, q->cid,
+ func_id, func_id,
+ bnx2x_vf_sp(bp, vf, rss_rdata),
+ bnx2x_vf_sp_map(bp, vf, rss_rdata),
+ BNX2X_FILTER_RSS_CONF_PENDING,
+ &vf->filter_state,
+ BNX2X_OBJ_TYPE_RX_TX);
+
+ vf->leading_rss = cl_id;
+ q->is_leading = true;
+ q->sp_initialized = true;
+}
+
+/* ask the pf to open a queue for the vf */
+int bnx2x_vfpf_setup_q(struct bnx2x *bp, struct bnx2x_fastpath *fp,
+ bool is_leading)
+{
+ struct vfpf_setup_q_tlv *req = &bp->vf2pf_mbox->req.setup_q;
+ struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
+ u8 fp_idx = fp->index;
+ u16 tpa_agg_size = 0, flags = 0;
+ int rc;
+
+ /* clear mailbox and prep first tlv */
+ bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_SETUP_Q, sizeof(*req));
+
+ /* select tpa mode to request */
+ if (fp->mode != TPA_MODE_DISABLED) {
+ flags |= VFPF_QUEUE_FLG_TPA;
+ flags |= VFPF_QUEUE_FLG_TPA_IPV6;
+ if (fp->mode == TPA_MODE_GRO)
+ flags |= VFPF_QUEUE_FLG_TPA_GRO;
+ tpa_agg_size = TPA_AGG_SIZE;
+ }
+
+ if (is_leading)
+ flags |= VFPF_QUEUE_FLG_LEADING_RSS;
+
+ /* calculate queue flags */
+ flags |= VFPF_QUEUE_FLG_STATS;
+ flags |= VFPF_QUEUE_FLG_CACHE_ALIGN;
+ flags |= VFPF_QUEUE_FLG_VLAN;
+
+ /* Common */
+ req->vf_qid = fp_idx;
+ req->param_valid = VFPF_RXQ_VALID | VFPF_TXQ_VALID;
+
+ /* Rx */
+ req->rxq.rcq_addr = fp->rx_comp_mapping;
+ req->rxq.rcq_np_addr = fp->rx_comp_mapping + BCM_PAGE_SIZE;
+ req->rxq.rxq_addr = fp->rx_desc_mapping;
+ req->rxq.sge_addr = fp->rx_sge_mapping;
+ req->rxq.vf_sb = fp_idx;
+ req->rxq.sb_index = HC_INDEX_ETH_RX_CQ_CONS;
+ req->rxq.hc_rate = bp->rx_ticks ? 1000000/bp->rx_ticks : 0;
+ req->rxq.mtu = bp->dev->mtu;
+ req->rxq.buf_sz = fp->rx_buf_size;
+ req->rxq.sge_buf_sz = BCM_PAGE_SIZE * PAGES_PER_SGE;
+ req->rxq.tpa_agg_sz = tpa_agg_size;
+ req->rxq.max_sge_pkt = SGE_PAGE_ALIGN(bp->dev->mtu) >> SGE_PAGE_SHIFT;
+ req->rxq.max_sge_pkt = ((req->rxq.max_sge_pkt + PAGES_PER_SGE - 1) &
+ (~(PAGES_PER_SGE-1))) >> PAGES_PER_SGE_SHIFT;
+ req->rxq.flags = flags;
+ req->rxq.drop_flags = 0;
+ req->rxq.cache_line_log = BNX2X_RX_ALIGN_SHIFT;
+ req->rxq.stat_id = -1; /* No stats at the moment */
+
+ /* Tx */
+ req->txq.txq_addr = fp->txdata_ptr[FIRST_TX_COS_INDEX]->tx_desc_mapping;
+ req->txq.vf_sb = fp_idx;
+ req->txq.sb_index = HC_INDEX_ETH_TX_CQ_CONS_COS0;
+ req->txq.hc_rate = bp->tx_ticks ? 1000000/bp->tx_ticks : 0;
+ req->txq.flags = flags;
+ req->txq.traffic_type = LLFC_TRAFFIC_TYPE_NW;
+
+ /* add list termination tlv */
+ bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
+
+ /* output tlvs list */
+ bnx2x_dp_tlv_list(bp, req);
+
+ rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
+ if (rc)
+ BNX2X_ERR("Sending SETUP_Q message for queue[%d] failed!\n",
+ fp_idx);
+
+ if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
+ BNX2X_ERR("Status of SETUP_Q for queue[%d] is %d\n",
+ fp_idx, resp->hdr.status);
+ rc = -EINVAL;
+ }
+
+ bnx2x_vfpf_finalize(bp, &req->first_tlv);
+
+ return rc;
+}
+
+static int bnx2x_vfpf_teardown_queue(struct bnx2x *bp, int qidx)
+{
+ struct vfpf_q_op_tlv *req = &bp->vf2pf_mbox->req.q_op;
+ struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
+ int rc;
+
+ /* clear mailbox and prep first tlv */
+ bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_TEARDOWN_Q,
+ sizeof(*req));
+
+ req->vf_qid = qidx;
+
+ /* add list termination tlv */
+ bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
+
+ /* output tlvs list */
+ bnx2x_dp_tlv_list(bp, req);
+
+ rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
+
+ if (rc) {
+ BNX2X_ERR("Sending TEARDOWN for queue %d failed: %d\n", qidx,
+ rc);
+ goto out;
+ }
+
+ /* PF failed the transaction */
+ if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
+ BNX2X_ERR("TEARDOWN for queue %d failed: %d\n", qidx,
+ resp->hdr.status);
+ rc = -EINVAL;
+ }
+
+out:
+ bnx2x_vfpf_finalize(bp, &req->first_tlv);
+
+ return rc;
+}
+
+/* request pf to add a mac for the vf */
+int bnx2x_vfpf_config_mac(struct bnx2x *bp, const u8 *addr, u8 vf_qid, bool set)
+{
+ struct vfpf_set_q_filters_tlv *req = &bp->vf2pf_mbox->req.set_q_filters;
+ struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
+ struct pf_vf_bulletin_content bulletin = bp->pf2vf_bulletin->content;
+ int rc = 0;
+
+ /* clear mailbox and prep first tlv */
+ bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_SET_Q_FILTERS,
+ sizeof(*req));
+
+ req->flags = VFPF_SET_Q_FILTERS_MAC_VLAN_CHANGED;
+ req->vf_qid = vf_qid;
+ req->n_mac_vlan_filters = 1;
+
+ req->filters[0].flags = VFPF_Q_FILTER_DEST_MAC_VALID;
+ if (set)
+ req->filters[0].flags |= VFPF_Q_FILTER_SET;
+
+ /* sample bulletin board for new mac */
+ bnx2x_sample_bulletin(bp);
+
+ /* copy mac from device to request */
+ memcpy(req->filters[0].mac, addr, ETH_ALEN);
+
+ /* add list termination tlv */
+ bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
+
+ /* output tlvs list */
+ bnx2x_dp_tlv_list(bp, req);
+
+ /* send message to pf */
+ rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
+ if (rc) {
+ BNX2X_ERR("failed to send message to pf. rc was %d\n", rc);
+ goto out;
+ }
+
+ /* failure may mean PF was configured with a new mac for us */
+ while (resp->hdr.status == PFVF_STATUS_FAILURE) {
+ DP(BNX2X_MSG_IOV,
+ "vfpf SET MAC failed. Check bulletin board for new posts\n");
+
+ /* copy mac from bulletin to device */
+ eth_hw_addr_set(bp->dev, bulletin.mac);
+
+ /* check if bulletin board was updated */
+ if (bnx2x_sample_bulletin(bp) == PFVF_BULLETIN_UPDATED) {
+ /* copy mac from device to request */
+ memcpy(req->filters[0].mac, bp->dev->dev_addr,
+ ETH_ALEN);
+
+ /* send message to pf */
+ rc = bnx2x_send_msg2pf(bp, &resp->hdr.status,
+ bp->vf2pf_mbox_mapping);
+ } else {
+ /* no new info in bulletin */
+ break;
+ }
+ }
+
+ if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
+ BNX2X_ERR("vfpf SET MAC failed: %d\n", resp->hdr.status);
+ rc = -EINVAL;
+ }
+out:
+ bnx2x_vfpf_finalize(bp, &req->first_tlv);
+
+ return rc;
+}
+
+/* request pf to config rss table for vf queues*/
+int bnx2x_vfpf_config_rss(struct bnx2x *bp,
+ struct bnx2x_config_rss_params *params)
+{
+ struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
+ struct vfpf_rss_tlv *req = &bp->vf2pf_mbox->req.update_rss;
+ int rc = 0;
+
+ /* clear mailbox and prep first tlv */
+ bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_UPDATE_RSS,
+ sizeof(*req));
+
+ /* add list termination tlv */
+ bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
+
+ memcpy(req->ind_table, params->ind_table, T_ETH_INDIRECTION_TABLE_SIZE);
+ memcpy(req->rss_key, params->rss_key, sizeof(params->rss_key));
+ req->ind_table_size = T_ETH_INDIRECTION_TABLE_SIZE;
+ req->rss_key_size = T_ETH_RSS_KEY;
+ req->rss_result_mask = params->rss_result_mask;
+
+ /* flags handled individually for backward/forward compatibility */
+ if (params->rss_flags & (1 << BNX2X_RSS_MODE_DISABLED))
+ req->rss_flags |= VFPF_RSS_MODE_DISABLED;
+ if (params->rss_flags & (1 << BNX2X_RSS_MODE_REGULAR))
+ req->rss_flags |= VFPF_RSS_MODE_REGULAR;
+ if (params->rss_flags & (1 << BNX2X_RSS_SET_SRCH))
+ req->rss_flags |= VFPF_RSS_SET_SRCH;
+ if (params->rss_flags & (1 << BNX2X_RSS_IPV4))
+ req->rss_flags |= VFPF_RSS_IPV4;
+ if (params->rss_flags & (1 << BNX2X_RSS_IPV4_TCP))
+ req->rss_flags |= VFPF_RSS_IPV4_TCP;
+ if (params->rss_flags & (1 << BNX2X_RSS_IPV4_UDP))
+ req->rss_flags |= VFPF_RSS_IPV4_UDP;
+ if (params->rss_flags & (1 << BNX2X_RSS_IPV6))
+ req->rss_flags |= VFPF_RSS_IPV6;
+ if (params->rss_flags & (1 << BNX2X_RSS_IPV6_TCP))
+ req->rss_flags |= VFPF_RSS_IPV6_TCP;
+ if (params->rss_flags & (1 << BNX2X_RSS_IPV6_UDP))
+ req->rss_flags |= VFPF_RSS_IPV6_UDP;
+
+ DP(BNX2X_MSG_IOV, "rss flags %x\n", req->rss_flags);
+
+ /* output tlvs list */
+ bnx2x_dp_tlv_list(bp, req);
+
+ /* send message to pf */
+ rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
+ if (rc) {
+ BNX2X_ERR("failed to send message to pf. rc was %d\n", rc);
+ goto out;
+ }
+
+ if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
+ /* Since older drivers don't support this feature (and VF has
+ * no way of knowing other than failing this), don't propagate
+ * an error in this case.
+ */
+ DP(BNX2X_MSG_IOV,
+ "Failed to send rss message to PF over VF-PF channel [%d]\n",
+ resp->hdr.status);
+ }
+out:
+ bnx2x_vfpf_finalize(bp, &req->first_tlv);
+
+ return rc;
+}
+
+int bnx2x_vfpf_set_mcast(struct net_device *dev)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+ struct vfpf_set_q_filters_tlv *req = &bp->vf2pf_mbox->req.set_q_filters;
+ struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
+ int rc = 0, i = 0;
+ struct netdev_hw_addr *ha;
+
+ if (bp->state != BNX2X_STATE_OPEN) {
+ DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
+ return -EINVAL;
+ }
+
+ /* clear mailbox and prep first tlv */
+ bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_SET_Q_FILTERS,
+ sizeof(*req));
+
+ /* Get Rx mode requested */
+ DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
+
+ /* We support PFVF_MAX_MULTICAST_PER_VF mcast addresses tops */
+ if (netdev_mc_count(dev) > PFVF_MAX_MULTICAST_PER_VF) {
+ DP(NETIF_MSG_IFUP,
+ "VF supports not more than %d multicast MAC addresses\n",
+ PFVF_MAX_MULTICAST_PER_VF);
+ rc = -EINVAL;
+ goto out;
+ }
+
+ netdev_for_each_mc_addr(ha, dev) {
+ DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
+ bnx2x_mc_addr(ha));
+ memcpy(req->multicast[i], bnx2x_mc_addr(ha), ETH_ALEN);
+ i++;
+ }
+
+ req->n_multicast = i;
+ req->flags |= VFPF_SET_Q_FILTERS_MULTICAST_CHANGED;
+ req->vf_qid = 0;
+
+ /* add list termination tlv */
+ bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
+
+ /* output tlvs list */
+ bnx2x_dp_tlv_list(bp, req);
+ rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
+ if (rc) {
+ BNX2X_ERR("Sending a message failed: %d\n", rc);
+ goto out;
+ }
+
+ if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
+ BNX2X_ERR("Set Rx mode/multicast failed: %d\n",
+ resp->hdr.status);
+ rc = -EINVAL;
+ }
+out:
+ bnx2x_vfpf_finalize(bp, &req->first_tlv);
+
+ return rc;
+}
+
+/* request pf to add a vlan for the vf */
+int bnx2x_vfpf_update_vlan(struct bnx2x *bp, u16 vid, u8 vf_qid, bool add)
+{
+ struct vfpf_set_q_filters_tlv *req = &bp->vf2pf_mbox->req.set_q_filters;
+ struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
+ int rc = 0;
+
+ if (!(bp->acquire_resp.pfdev_info.pf_cap & PFVF_CAP_VLAN_FILTER)) {
+ DP(BNX2X_MSG_IOV, "HV does not support vlan filtering\n");
+ return 0;
+ }
+
+ /* clear mailbox and prep first tlv */
+ bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_SET_Q_FILTERS,
+ sizeof(*req));
+
+ req->flags = VFPF_SET_Q_FILTERS_MAC_VLAN_CHANGED;
+ req->vf_qid = vf_qid;
+ req->n_mac_vlan_filters = 1;
+
+ req->filters[0].flags = VFPF_Q_FILTER_VLAN_TAG_VALID;
+
+ if (add)
+ req->filters[0].flags |= VFPF_Q_FILTER_SET;
+
+ /* sample bulletin board for hypervisor vlan */
+ bnx2x_sample_bulletin(bp);
+
+ if (bp->shadow_bulletin.content.valid_bitmap & 1 << VLAN_VALID) {
+ BNX2X_ERR("Hypervisor will decline the request, avoiding\n");
+ rc = -EINVAL;
+ goto out;
+ }
+
+ req->filters[0].vlan_tag = vid;
+
+ /* add list termination tlv */
+ bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
+
+ /* output tlvs list */
+ bnx2x_dp_tlv_list(bp, req);
+
+ /* send message to pf */
+ rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
+ if (rc) {
+ BNX2X_ERR("failed to send message to pf. rc was %d\n", rc);
+ goto out;
+ }
+
+ if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
+ BNX2X_ERR("vfpf %s VLAN %d failed\n", add ? "add" : "del",
+ vid);
+ rc = -EINVAL;
+ }
+out:
+ bnx2x_vfpf_finalize(bp, &req->first_tlv);
+
+ return rc;
+}
+
+int bnx2x_vfpf_storm_rx_mode(struct bnx2x *bp)
+{
+ int mode = bp->rx_mode;
+ struct vfpf_set_q_filters_tlv *req = &bp->vf2pf_mbox->req.set_q_filters;
+ struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
+ int rc;
+
+ /* clear mailbox and prep first tlv */
+ bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_SET_Q_FILTERS,
+ sizeof(*req));
+
+ DP(NETIF_MSG_IFUP, "Rx mode is %d\n", mode);
+
+ /* Ignore everything accept MODE_NONE */
+ if (mode == BNX2X_RX_MODE_NONE) {
+ req->rx_mask = VFPF_RX_MASK_ACCEPT_NONE;
+ } else {
+ /* Current PF driver will not look at the specific flags,
+ * but they are required when working with older drivers on hv.
+ */
+ req->rx_mask = VFPF_RX_MASK_ACCEPT_MATCHED_MULTICAST;
+ req->rx_mask |= VFPF_RX_MASK_ACCEPT_MATCHED_UNICAST;
+ req->rx_mask |= VFPF_RX_MASK_ACCEPT_BROADCAST;
+ if (mode == BNX2X_RX_MODE_PROMISC)
+ req->rx_mask |= VFPF_RX_MASK_ACCEPT_ANY_VLAN;
+ }
+
+ if (bp->accept_any_vlan)
+ req->rx_mask |= VFPF_RX_MASK_ACCEPT_ANY_VLAN;
+
+ req->flags |= VFPF_SET_Q_FILTERS_RX_MASK_CHANGED;
+ req->vf_qid = 0;
+
+ /* add list termination tlv */
+ bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
+
+ /* output tlvs list */
+ bnx2x_dp_tlv_list(bp, req);
+
+ rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
+ if (rc)
+ BNX2X_ERR("Sending a message failed: %d\n", rc);
+
+ if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
+ BNX2X_ERR("Set Rx mode failed: %d\n", resp->hdr.status);
+ rc = -EINVAL;
+ }
+
+ bnx2x_vfpf_finalize(bp, &req->first_tlv);
+
+ return rc;
+}
+
+/* General service functions */
+static void storm_memset_vf_mbx_ack(struct bnx2x *bp, u16 abs_fid)
+{
+ u32 addr = BAR_CSTRORM_INTMEM +
+ CSTORM_VF_PF_CHANNEL_STATE_OFFSET(abs_fid);
+
+ REG_WR8(bp, addr, VF_PF_CHANNEL_STATE_READY);
+}
+
+static void storm_memset_vf_mbx_valid(struct bnx2x *bp, u16 abs_fid)
+{
+ u32 addr = BAR_CSTRORM_INTMEM +
+ CSTORM_VF_PF_CHANNEL_VALID_OFFSET(abs_fid);
+
+ REG_WR8(bp, addr, 1);
+}
+
+/* enable vf_pf mailbox (aka vf-pf-channel) */
+void bnx2x_vf_enable_mbx(struct bnx2x *bp, u8 abs_vfid)
+{
+ bnx2x_vf_flr_clnup_epilog(bp, abs_vfid);
+
+ /* enable the mailbox in the FW */
+ storm_memset_vf_mbx_ack(bp, abs_vfid);
+ storm_memset_vf_mbx_valid(bp, abs_vfid);
+
+ /* enable the VF access to the mailbox */
+ bnx2x_vf_enable_access(bp, abs_vfid);
+}
+
+/* this works only on !E1h */
+static int bnx2x_copy32_vf_dmae(struct bnx2x *bp, u8 from_vf,
+ dma_addr_t pf_addr, u8 vfid, u32 vf_addr_hi,
+ u32 vf_addr_lo, u32 len32)
+{
+ struct dmae_command dmae;
+
+ if (CHIP_IS_E1x(bp)) {
+ BNX2X_ERR("Chip revision does not support VFs\n");
+ return DMAE_NOT_RDY;
+ }
+
+ if (!bp->dmae_ready) {
+ BNX2X_ERR("DMAE is not ready, can not copy\n");
+ return DMAE_NOT_RDY;
+ }
+
+ /* set opcode and fixed command fields */
+ bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_PCI, DMAE_DST_PCI);
+
+ if (from_vf) {
+ dmae.opcode_iov = (vfid << DMAE_COMMAND_SRC_VFID_SHIFT) |
+ (DMAE_SRC_VF << DMAE_COMMAND_SRC_VFPF_SHIFT) |
+ (DMAE_DST_PF << DMAE_COMMAND_DST_VFPF_SHIFT);
+
+ dmae.opcode |= (DMAE_C_DST << DMAE_COMMAND_C_FUNC_SHIFT);
+
+ dmae.src_addr_lo = vf_addr_lo;
+ dmae.src_addr_hi = vf_addr_hi;
+ dmae.dst_addr_lo = U64_LO(pf_addr);
+ dmae.dst_addr_hi = U64_HI(pf_addr);
+ } else {
+ dmae.opcode_iov = (vfid << DMAE_COMMAND_DST_VFID_SHIFT) |
+ (DMAE_DST_VF << DMAE_COMMAND_DST_VFPF_SHIFT) |
+ (DMAE_SRC_PF << DMAE_COMMAND_SRC_VFPF_SHIFT);
+
+ dmae.opcode |= (DMAE_C_SRC << DMAE_COMMAND_C_FUNC_SHIFT);
+
+ dmae.src_addr_lo = U64_LO(pf_addr);
+ dmae.src_addr_hi = U64_HI(pf_addr);
+ dmae.dst_addr_lo = vf_addr_lo;
+ dmae.dst_addr_hi = vf_addr_hi;
+ }
+ dmae.len = len32;
+
+ /* issue the command and wait for completion */
+ return bnx2x_issue_dmae_with_comp(bp, &dmae, bnx2x_sp(bp, wb_comp));
+}
+
+static void bnx2x_vf_mbx_resp_single_tlv(struct bnx2x *bp,
+ struct bnx2x_virtf *vf)
+{
+ struct bnx2x_vf_mbx *mbx = BP_VF_MBX(bp, vf->index);
+ u16 length, type;
+
+ /* prepare response */
+ type = mbx->first_tlv.tl.type;
+ length = type == CHANNEL_TLV_ACQUIRE ?
+ sizeof(struct pfvf_acquire_resp_tlv) :
+ sizeof(struct pfvf_general_resp_tlv);
+ bnx2x_add_tlv(bp, &mbx->msg->resp, 0, type, length);
+ bnx2x_add_tlv(bp, &mbx->msg->resp, length, CHANNEL_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
+}
+
+static void bnx2x_vf_mbx_resp_send_msg(struct bnx2x *bp,
+ struct bnx2x_virtf *vf,
+ int vf_rc)
+{
+ struct bnx2x_vf_mbx *mbx = BP_VF_MBX(bp, vf->index);
+ struct pfvf_general_resp_tlv *resp = &mbx->msg->resp.general_resp;
+ dma_addr_t pf_addr;
+ u64 vf_addr;
+ int rc;
+
+ bnx2x_dp_tlv_list(bp, resp);
+ DP(BNX2X_MSG_IOV, "mailbox vf address hi 0x%x, lo 0x%x, offset 0x%x\n",
+ mbx->vf_addr_hi, mbx->vf_addr_lo, mbx->first_tlv.resp_msg_offset);
+
+ resp->hdr.status = bnx2x_pfvf_status_codes(vf_rc);
+
+ /* send response */
+ vf_addr = HILO_U64(mbx->vf_addr_hi, mbx->vf_addr_lo) +
+ mbx->first_tlv.resp_msg_offset;
+ pf_addr = mbx->msg_mapping +
+ offsetof(struct bnx2x_vf_mbx_msg, resp);
+
+ /* Copy the response buffer. The first u64 is written afterwards, as
+ * the vf is sensitive to the header being written
+ */
+ vf_addr += sizeof(u64);
+ pf_addr += sizeof(u64);
+ rc = bnx2x_copy32_vf_dmae(bp, false, pf_addr, vf->abs_vfid,
+ U64_HI(vf_addr),
+ U64_LO(vf_addr),
+ (sizeof(union pfvf_tlvs) - sizeof(u64))/4);
+ if (rc) {
+ BNX2X_ERR("Failed to copy response body to VF %d\n",
+ vf->abs_vfid);
+ goto mbx_error;
+ }
+ vf_addr -= sizeof(u64);
+ pf_addr -= sizeof(u64);
+
+ /* ack the FW */
+ storm_memset_vf_mbx_ack(bp, vf->abs_vfid);
+
+ /* copy the response header including status-done field,
+ * must be last dmae, must be after FW is acked
+ */
+ rc = bnx2x_copy32_vf_dmae(bp, false, pf_addr, vf->abs_vfid,
+ U64_HI(vf_addr),
+ U64_LO(vf_addr),
+ sizeof(u64)/4);
+
+ /* unlock channel mutex */
+ bnx2x_unlock_vf_pf_channel(bp, vf, mbx->first_tlv.tl.type);
+
+ if (rc) {
+ BNX2X_ERR("Failed to copy response status to VF %d\n",
+ vf->abs_vfid);
+ goto mbx_error;
+ }
+ return;
+
+mbx_error:
+ bnx2x_vf_release(bp, vf);
+}
+
+static void bnx2x_vf_mbx_resp(struct bnx2x *bp,
+ struct bnx2x_virtf *vf,
+ int rc)
+{
+ bnx2x_vf_mbx_resp_single_tlv(bp, vf);
+ bnx2x_vf_mbx_resp_send_msg(bp, vf, rc);
+}
+
+static void bnx2x_vf_mbx_resp_phys_port(struct bnx2x *bp,
+ struct bnx2x_virtf *vf,
+ void *buffer,
+ u16 *offset)
+{
+ struct vfpf_port_phys_id_resp_tlv *port_id;
+
+ if (!(bp->flags & HAS_PHYS_PORT_ID))
+ return;
+
+ bnx2x_add_tlv(bp, buffer, *offset, CHANNEL_TLV_PHYS_PORT_ID,
+ sizeof(struct vfpf_port_phys_id_resp_tlv));
+
+ port_id = (struct vfpf_port_phys_id_resp_tlv *)
+ (((u8 *)buffer) + *offset);
+ memcpy(port_id->id, bp->phys_port_id, ETH_ALEN);
+
+ /* Offset should continue representing the offset to the tail
+ * of TLV data (outside this function scope)
+ */
+ *offset += sizeof(struct vfpf_port_phys_id_resp_tlv);
+}
+
+static void bnx2x_vf_mbx_resp_fp_hsi_ver(struct bnx2x *bp,
+ struct bnx2x_virtf *vf,
+ void *buffer,
+ u16 *offset)
+{
+ struct vfpf_fp_hsi_resp_tlv *fp_hsi;
+
+ bnx2x_add_tlv(bp, buffer, *offset, CHANNEL_TLV_FP_HSI_SUPPORT,
+ sizeof(struct vfpf_fp_hsi_resp_tlv));
+
+ fp_hsi = (struct vfpf_fp_hsi_resp_tlv *)
+ (((u8 *)buffer) + *offset);
+ fp_hsi->is_supported = (vf->fp_hsi > ETH_FP_HSI_VERSION) ? 0 : 1;
+
+ /* Offset should continue representing the offset to the tail
+ * of TLV data (outside this function scope)
+ */
+ *offset += sizeof(struct vfpf_fp_hsi_resp_tlv);
+}
+
+static void bnx2x_vf_mbx_acquire_resp(struct bnx2x *bp, struct bnx2x_virtf *vf,
+ struct bnx2x_vf_mbx *mbx, int vfop_status)
+{
+ int i;
+ struct pfvf_acquire_resp_tlv *resp = &mbx->msg->resp.acquire_resp;
+ struct pf_vf_resc *resc = &resp->resc;
+ u8 status = bnx2x_pfvf_status_codes(vfop_status);
+ u16 length;
+
+ memset(resp, 0, sizeof(*resp));
+
+ /* fill in pfdev info */
+ resp->pfdev_info.chip_num = bp->common.chip_id;
+ resp->pfdev_info.db_size = bp->db_size;
+ resp->pfdev_info.indices_per_sb = HC_SB_MAX_INDICES_E2;
+ resp->pfdev_info.pf_cap = (PFVF_CAP_RSS |
+ PFVF_CAP_TPA |
+ PFVF_CAP_TPA_UPDATE |
+ PFVF_CAP_VLAN_FILTER);
+ bnx2x_fill_fw_str(bp, resp->pfdev_info.fw_ver,
+ sizeof(resp->pfdev_info.fw_ver));
+
+ if (status == PFVF_STATUS_NO_RESOURCE ||
+ status == PFVF_STATUS_SUCCESS) {
+ /* set resources numbers, if status equals NO_RESOURCE these
+ * are max possible numbers
+ */
+ resc->num_rxqs = vf_rxq_count(vf) ? :
+ bnx2x_vf_max_queue_cnt(bp, vf);
+ resc->num_txqs = vf_txq_count(vf) ? :
+ bnx2x_vf_max_queue_cnt(bp, vf);
+ resc->num_sbs = vf_sb_count(vf);
+ resc->num_mac_filters = vf_mac_rules_cnt(vf);
+ resc->num_vlan_filters = vf_vlan_rules_cnt(vf);
+ resc->num_mc_filters = 0;
+
+ if (status == PFVF_STATUS_SUCCESS) {
+ /* fill in the allocated resources */
+ struct pf_vf_bulletin_content *bulletin =
+ BP_VF_BULLETIN(bp, vf->index);
+
+ for_each_vfq(vf, i)
+ resc->hw_qid[i] =
+ vfq_qzone_id(vf, vfq_get(vf, i));
+
+ for_each_vf_sb(vf, i) {
+ resc->hw_sbs[i].hw_sb_id = vf_igu_sb(vf, i);
+ resc->hw_sbs[i].sb_qid = vf_hc_qzone(vf, i);
+ }
+
+ /* if a mac has been set for this vf, supply it */
+ if (bulletin->valid_bitmap & 1 << MAC_ADDR_VALID) {
+ memcpy(resc->current_mac_addr, bulletin->mac,
+ ETH_ALEN);
+ }
+ }
+ }
+
+ DP(BNX2X_MSG_IOV, "VF[%d] ACQUIRE_RESPONSE: pfdev_info- chip_num=0x%x, db_size=%d, idx_per_sb=%d, pf_cap=0x%x\n"
+ "resources- n_rxq-%d, n_txq-%d, n_sbs-%d, n_macs-%d, n_vlans-%d, n_mcs-%d, fw_ver: '%s'\n",
+ vf->abs_vfid,
+ resp->pfdev_info.chip_num,
+ resp->pfdev_info.db_size,
+ resp->pfdev_info.indices_per_sb,
+ resp->pfdev_info.pf_cap,
+ resc->num_rxqs,
+ resc->num_txqs,
+ resc->num_sbs,
+ resc->num_mac_filters,
+ resc->num_vlan_filters,
+ resc->num_mc_filters,
+ resp->pfdev_info.fw_ver);
+
+ DP_CONT(BNX2X_MSG_IOV, "hw_qids- [ ");
+ for (i = 0; i < vf_rxq_count(vf); i++)
+ DP_CONT(BNX2X_MSG_IOV, "%d ", resc->hw_qid[i]);
+ DP_CONT(BNX2X_MSG_IOV, "], sb_info- [ ");
+ for (i = 0; i < vf_sb_count(vf); i++)
+ DP_CONT(BNX2X_MSG_IOV, "%d:%d ",
+ resc->hw_sbs[i].hw_sb_id,
+ resc->hw_sbs[i].sb_qid);
+ DP_CONT(BNX2X_MSG_IOV, "]\n");
+
+ /* prepare response */
+ length = sizeof(struct pfvf_acquire_resp_tlv);
+ bnx2x_add_tlv(bp, &mbx->msg->resp, 0, CHANNEL_TLV_ACQUIRE, length);
+
+ /* Handle possible VF requests for physical port identifiers.
+ * 'length' should continue to indicate the offset of the first empty
+ * place in the buffer (i.e., where next TLV should be inserted)
+ */
+ if (bnx2x_search_tlv_list(bp, &mbx->msg->req,
+ CHANNEL_TLV_PHYS_PORT_ID))
+ bnx2x_vf_mbx_resp_phys_port(bp, vf, &mbx->msg->resp, &length);
+
+ /* `New' vfs will want to know if fastpath HSI is supported, since
+ * if that's not the case they could print into system log the fact
+ * the driver version must be updated.
+ */
+ bnx2x_vf_mbx_resp_fp_hsi_ver(bp, vf, &mbx->msg->resp, &length);
+
+ bnx2x_add_tlv(bp, &mbx->msg->resp, length, CHANNEL_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
+
+ /* send the response */
+ bnx2x_vf_mbx_resp_send_msg(bp, vf, vfop_status);
+}
+
+static bool bnx2x_vf_mbx_is_windows_vm(struct bnx2x *bp,
+ struct vfpf_acquire_tlv *acquire)
+{
+ /* Windows driver does one of three things:
+ * 1. Old driver doesn't have bulletin board address set.
+ * 2. 'Middle' driver sends mc_num == 32.
+ * 3. New driver sets the OS field.
+ */
+ if (!acquire->bulletin_addr ||
+ acquire->resc_request.num_mc_filters == 32 ||
+ ((acquire->vfdev_info.vf_os & VF_OS_MASK) ==
+ VF_OS_WINDOWS))
+ return true;
+
+ return false;
+}
+
+static int bnx2x_vf_mbx_acquire_chk_dorq(struct bnx2x *bp,
+ struct bnx2x_virtf *vf,
+ struct bnx2x_vf_mbx *mbx)
+{
+ /* Linux drivers which correctly set the doorbell size also
+ * send a physical port request
+ */
+ if (bnx2x_search_tlv_list(bp, &mbx->msg->req,
+ CHANNEL_TLV_PHYS_PORT_ID))
+ return 0;
+
+ /* Issue does not exist in windows VMs */
+ if (bnx2x_vf_mbx_is_windows_vm(bp, &mbx->msg->req.acquire))
+ return 0;
+
+ return -EOPNOTSUPP;
+}
+
+static void bnx2x_vf_mbx_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf,
+ struct bnx2x_vf_mbx *mbx)
+{
+ int rc;
+ struct vfpf_acquire_tlv *acquire = &mbx->msg->req.acquire;
+
+ /* log vfdef info */
+ DP(BNX2X_MSG_IOV,
+ "VF[%d] ACQUIRE: vfdev_info- vf_id %d, vf_os %d resources- n_rxq-%d, n_txq-%d, n_sbs-%d, n_macs-%d, n_vlans-%d, n_mcs-%d\n",
+ vf->abs_vfid, acquire->vfdev_info.vf_id, acquire->vfdev_info.vf_os,
+ acquire->resc_request.num_rxqs, acquire->resc_request.num_txqs,
+ acquire->resc_request.num_sbs, acquire->resc_request.num_mac_filters,
+ acquire->resc_request.num_vlan_filters,
+ acquire->resc_request.num_mc_filters);
+
+ /* Prevent VFs with old drivers from loading, since they calculate
+ * CIDs incorrectly requiring a VF-flr [VM reboot] in order to recover
+ * while being upgraded.
+ */
+ rc = bnx2x_vf_mbx_acquire_chk_dorq(bp, vf, mbx);
+ if (rc) {
+ DP(BNX2X_MSG_IOV,
+ "VF [%d] - Can't support acquire request due to doorbell mismatch. Please update VM driver\n",
+ vf->abs_vfid);
+ goto out;
+ }
+
+ /* Verify the VF fastpath HSI can be supported by the loaded FW.
+ * Linux vfs should be oblivious to changes between v0 and v2.
+ */
+ if (bnx2x_vf_mbx_is_windows_vm(bp, &mbx->msg->req.acquire))
+ vf->fp_hsi = acquire->vfdev_info.fp_hsi_ver;
+ else
+ vf->fp_hsi = max_t(u8, acquire->vfdev_info.fp_hsi_ver,
+ ETH_FP_HSI_VER_2);
+ if (vf->fp_hsi > ETH_FP_HSI_VERSION) {
+ DP(BNX2X_MSG_IOV,
+ "VF [%d] - Can't support acquire request since VF requests a FW version which is too new [%02x > %02x]\n",
+ vf->abs_vfid, acquire->vfdev_info.fp_hsi_ver,
+ ETH_FP_HSI_VERSION);
+ rc = -EINVAL;
+ goto out;
+ }
+
+ /* acquire the resources */
+ rc = bnx2x_vf_acquire(bp, vf, &acquire->resc_request);
+
+ /* store address of vf's bulletin board */
+ vf->bulletin_map = acquire->bulletin_addr;
+ if (acquire->vfdev_info.caps & VF_CAP_SUPPORT_EXT_BULLETIN) {
+ DP(BNX2X_MSG_IOV, "VF[%d] supports long bulletin boards\n",
+ vf->abs_vfid);
+ vf->cfg_flags |= VF_CFG_EXT_BULLETIN;
+ } else {
+ vf->cfg_flags &= ~VF_CFG_EXT_BULLETIN;
+ }
+
+ if (acquire->vfdev_info.caps & VF_CAP_SUPPORT_VLAN_FILTER) {
+ DP(BNX2X_MSG_IOV, "VF[%d] supports vlan filtering\n",
+ vf->abs_vfid);
+ vf->cfg_flags |= VF_CFG_VLAN_FILTER;
+ } else {
+ vf->cfg_flags &= ~VF_CFG_VLAN_FILTER;
+ }
+
+out:
+ /* response */
+ bnx2x_vf_mbx_acquire_resp(bp, vf, mbx, rc);
+}
+
+static void bnx2x_vf_mbx_init_vf(struct bnx2x *bp, struct bnx2x_virtf *vf,
+ struct bnx2x_vf_mbx *mbx)
+{
+ struct vfpf_init_tlv *init = &mbx->msg->req.init;
+ int rc;
+
+ /* record ghost addresses from vf message */
+ vf->fw_stat_map = init->stats_addr;
+ vf->stats_stride = init->stats_stride;
+ rc = bnx2x_vf_init(bp, vf, (dma_addr_t *)init->sb_addr);
+
+ /* set VF multiqueue statistics collection mode */
+ if (init->flags & VFPF_INIT_FLG_STATS_COALESCE)
+ vf->cfg_flags |= VF_CFG_STATS_COALESCE;
+
+ /* Update VF's view of link state */
+ if (vf->cfg_flags & VF_CFG_EXT_BULLETIN)
+ bnx2x_iov_link_update_vf(bp, vf->index);
+
+ /* response */
+ bnx2x_vf_mbx_resp(bp, vf, rc);
+}
+
+/* convert MBX queue-flags to standard SP queue-flags */
+static void bnx2x_vf_mbx_set_q_flags(struct bnx2x *bp, u32 mbx_q_flags,
+ unsigned long *sp_q_flags)
+{
+ if (mbx_q_flags & VFPF_QUEUE_FLG_TPA)
+ __set_bit(BNX2X_Q_FLG_TPA, sp_q_flags);
+ if (mbx_q_flags & VFPF_QUEUE_FLG_TPA_IPV6)
+ __set_bit(BNX2X_Q_FLG_TPA_IPV6, sp_q_flags);
+ if (mbx_q_flags & VFPF_QUEUE_FLG_TPA_GRO)
+ __set_bit(BNX2X_Q_FLG_TPA_GRO, sp_q_flags);
+ if (mbx_q_flags & VFPF_QUEUE_FLG_STATS)
+ __set_bit(BNX2X_Q_FLG_STATS, sp_q_flags);
+ if (mbx_q_flags & VFPF_QUEUE_FLG_VLAN)
+ __set_bit(BNX2X_Q_FLG_VLAN, sp_q_flags);
+ if (mbx_q_flags & VFPF_QUEUE_FLG_COS)
+ __set_bit(BNX2X_Q_FLG_COS, sp_q_flags);
+ if (mbx_q_flags & VFPF_QUEUE_FLG_HC)
+ __set_bit(BNX2X_Q_FLG_HC, sp_q_flags);
+ if (mbx_q_flags & VFPF_QUEUE_FLG_DHC)
+ __set_bit(BNX2X_Q_FLG_DHC, sp_q_flags);
+ if (mbx_q_flags & VFPF_QUEUE_FLG_LEADING_RSS)
+ __set_bit(BNX2X_Q_FLG_LEADING_RSS, sp_q_flags);
+
+ /* outer vlan removal is set according to PF's multi function mode */
+ if (IS_MF_SD(bp))
+ __set_bit(BNX2X_Q_FLG_OV, sp_q_flags);
+}
+
+static void bnx2x_vf_mbx_setup_q(struct bnx2x *bp, struct bnx2x_virtf *vf,
+ struct bnx2x_vf_mbx *mbx)
+{
+ struct vfpf_setup_q_tlv *setup_q = &mbx->msg->req.setup_q;
+ struct bnx2x_vf_queue_construct_params qctor;
+ int rc = 0;
+
+ /* verify vf_qid */
+ if (setup_q->vf_qid >= vf_rxq_count(vf)) {
+ BNX2X_ERR("vf_qid %d invalid, max queue count is %d\n",
+ setup_q->vf_qid, vf_rxq_count(vf));
+ rc = -EINVAL;
+ goto response;
+ }
+
+ /* tx queues must be setup alongside rx queues thus if the rx queue
+ * is not marked as valid there's nothing to do.
+ */
+ if (setup_q->param_valid & (VFPF_RXQ_VALID|VFPF_TXQ_VALID)) {
+ struct bnx2x_vf_queue *q = vfq_get(vf, setup_q->vf_qid);
+ unsigned long q_type = 0;
+
+ struct bnx2x_queue_init_params *init_p;
+ struct bnx2x_queue_setup_params *setup_p;
+
+ if (bnx2x_vfq_is_leading(q))
+ bnx2x_leading_vfq_init(bp, vf, q);
+
+ /* re-init the VF operation context */
+ memset(&qctor, 0 ,
+ sizeof(struct bnx2x_vf_queue_construct_params));
+ setup_p = &qctor.prep_qsetup;
+ init_p = &qctor.qstate.params.init;
+
+ /* activate immediately */
+ __set_bit(BNX2X_Q_FLG_ACTIVE, &setup_p->flags);
+
+ if (setup_q->param_valid & VFPF_TXQ_VALID) {
+ struct bnx2x_txq_setup_params *txq_params =
+ &setup_p->txq_params;
+
+ __set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type);
+
+ /* save sb resource index */
+ q->sb_idx = setup_q->txq.vf_sb;
+
+ /* tx init */
+ init_p->tx.hc_rate = setup_q->txq.hc_rate;
+ init_p->tx.sb_cq_index = setup_q->txq.sb_index;
+
+ bnx2x_vf_mbx_set_q_flags(bp, setup_q->txq.flags,
+ &init_p->tx.flags);
+
+ /* tx setup - flags */
+ bnx2x_vf_mbx_set_q_flags(bp, setup_q->txq.flags,
+ &setup_p->flags);
+
+ /* tx setup - general, nothing */
+
+ /* tx setup - tx */
+ txq_params->dscr_map = setup_q->txq.txq_addr;
+ txq_params->sb_cq_index = setup_q->txq.sb_index;
+ txq_params->traffic_type = setup_q->txq.traffic_type;
+
+ bnx2x_vfop_qctor_dump_tx(bp, vf, init_p, setup_p,
+ q->index, q->sb_idx);
+ }
+
+ if (setup_q->param_valid & VFPF_RXQ_VALID) {
+ struct bnx2x_rxq_setup_params *rxq_params =
+ &setup_p->rxq_params;
+
+ __set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type);
+
+ /* Note: there is no support for different SBs
+ * for TX and RX
+ */
+ q->sb_idx = setup_q->rxq.vf_sb;
+
+ /* rx init */
+ init_p->rx.hc_rate = setup_q->rxq.hc_rate;
+ init_p->rx.sb_cq_index = setup_q->rxq.sb_index;
+ bnx2x_vf_mbx_set_q_flags(bp, setup_q->rxq.flags,
+ &init_p->rx.flags);
+
+ /* rx setup - flags */
+ bnx2x_vf_mbx_set_q_flags(bp, setup_q->rxq.flags,
+ &setup_p->flags);
+
+ /* rx setup - general */
+ setup_p->gen_params.mtu = setup_q->rxq.mtu;
+
+ /* rx setup - rx */
+ rxq_params->drop_flags = setup_q->rxq.drop_flags;
+ rxq_params->dscr_map = setup_q->rxq.rxq_addr;
+ rxq_params->sge_map = setup_q->rxq.sge_addr;
+ rxq_params->rcq_map = setup_q->rxq.rcq_addr;
+ rxq_params->rcq_np_map = setup_q->rxq.rcq_np_addr;
+ rxq_params->buf_sz = setup_q->rxq.buf_sz;
+ rxq_params->tpa_agg_sz = setup_q->rxq.tpa_agg_sz;
+ rxq_params->max_sges_pkt = setup_q->rxq.max_sge_pkt;
+ rxq_params->sge_buf_sz = setup_q->rxq.sge_buf_sz;
+ rxq_params->cache_line_log =
+ setup_q->rxq.cache_line_log;
+ rxq_params->sb_cq_index = setup_q->rxq.sb_index;
+
+ /* rx setup - multicast engine */
+ if (bnx2x_vfq_is_leading(q)) {
+ u8 mcast_id = FW_VF_HANDLE(vf->abs_vfid);
+
+ rxq_params->mcast_engine_id = mcast_id;
+ __set_bit(BNX2X_Q_FLG_MCAST, &setup_p->flags);
+ }
+
+ bnx2x_vfop_qctor_dump_rx(bp, vf, init_p, setup_p,
+ q->index, q->sb_idx);
+ }
+ /* complete the preparations */
+ bnx2x_vfop_qctor_prep(bp, vf, q, &qctor, q_type);
+
+ rc = bnx2x_vf_queue_setup(bp, vf, q->index, &qctor);
+ if (rc)
+ goto response;
+ }
+response:
+ bnx2x_vf_mbx_resp(bp, vf, rc);
+}
+
+static int bnx2x_vf_mbx_macvlan_list(struct bnx2x *bp,
+ struct bnx2x_virtf *vf,
+ struct vfpf_set_q_filters_tlv *tlv,
+ struct bnx2x_vf_mac_vlan_filters **pfl,
+ u32 type_flag)
+{
+ int i, j;
+ struct bnx2x_vf_mac_vlan_filters *fl = NULL;
+
+ fl = kzalloc(struct_size(fl, filters, tlv->n_mac_vlan_filters),
+ GFP_KERNEL);
+ if (!fl)
+ return -ENOMEM;
+
+ for (i = 0, j = 0; i < tlv->n_mac_vlan_filters; i++) {
+ struct vfpf_q_mac_vlan_filter *msg_filter = &tlv->filters[i];
+
+ if ((msg_filter->flags & type_flag) != type_flag)
+ continue;
+ memset(&fl->filters[j], 0, sizeof(fl->filters[j]));
+ if (type_flag & VFPF_Q_FILTER_DEST_MAC_VALID) {
+ fl->filters[j].mac = msg_filter->mac;
+ fl->filters[j].type |= BNX2X_VF_FILTER_MAC;
+ }
+ if (type_flag & VFPF_Q_FILTER_VLAN_TAG_VALID) {
+ fl->filters[j].vid = msg_filter->vlan_tag;
+ fl->filters[j].type |= BNX2X_VF_FILTER_VLAN;
+ }
+ fl->filters[j].add = !!(msg_filter->flags & VFPF_Q_FILTER_SET);
+ fl->count++;
+ j++;
+ }
+ if (!fl->count)
+ kfree(fl);
+ else
+ *pfl = fl;
+
+ return 0;
+}
+
+static int bnx2x_vf_filters_contain(struct vfpf_set_q_filters_tlv *filters,
+ u32 flags)
+{
+ int i, cnt = 0;
+
+ for (i = 0; i < filters->n_mac_vlan_filters; i++)
+ if ((filters->filters[i].flags & flags) == flags)
+ cnt++;
+
+ return cnt;
+}
+
+static void bnx2x_vf_mbx_dp_q_filter(struct bnx2x *bp, int msglvl, int idx,
+ struct vfpf_q_mac_vlan_filter *filter)
+{
+ DP(msglvl, "MAC-VLAN[%d] -- flags=0x%x\n", idx, filter->flags);
+ if (filter->flags & VFPF_Q_FILTER_VLAN_TAG_VALID)
+ DP_CONT(msglvl, ", vlan=%d", filter->vlan_tag);
+ if (filter->flags & VFPF_Q_FILTER_DEST_MAC_VALID)
+ DP_CONT(msglvl, ", MAC=%pM", filter->mac);
+ DP_CONT(msglvl, "\n");
+}
+
+static void bnx2x_vf_mbx_dp_q_filters(struct bnx2x *bp, int msglvl,
+ struct vfpf_set_q_filters_tlv *filters)
+{
+ int i;
+
+ if (filters->flags & VFPF_SET_Q_FILTERS_MAC_VLAN_CHANGED)
+ for (i = 0; i < filters->n_mac_vlan_filters; i++)
+ bnx2x_vf_mbx_dp_q_filter(bp, msglvl, i,
+ &filters->filters[i]);
+
+ if (filters->flags & VFPF_SET_Q_FILTERS_RX_MASK_CHANGED)
+ DP(msglvl, "RX-MASK=0x%x\n", filters->rx_mask);
+
+ if (filters->flags & VFPF_SET_Q_FILTERS_MULTICAST_CHANGED)
+ for (i = 0; i < filters->n_multicast; i++)
+ DP(msglvl, "MULTICAST=%pM\n", filters->multicast[i]);
+}
+
+#define VFPF_MAC_FILTER VFPF_Q_FILTER_DEST_MAC_VALID
+#define VFPF_VLAN_FILTER VFPF_Q_FILTER_VLAN_TAG_VALID
+#define VFPF_VLAN_MAC_FILTER (VFPF_VLAN_FILTER | VFPF_MAC_FILTER)
+
+static int bnx2x_vf_mbx_qfilters(struct bnx2x *bp, struct bnx2x_virtf *vf)
+{
+ int rc = 0;
+
+ struct vfpf_set_q_filters_tlv *msg =
+ &BP_VF_MBX(bp, vf->index)->msg->req.set_q_filters;
+
+ /* check for any mac/vlan changes */
+ if (msg->flags & VFPF_SET_Q_FILTERS_MAC_VLAN_CHANGED) {
+ struct bnx2x_vf_mac_vlan_filters *fl = NULL;
+
+ /* build vlan-mac list */
+ rc = bnx2x_vf_mbx_macvlan_list(bp, vf, msg, &fl,
+ VFPF_VLAN_MAC_FILTER);
+ if (rc)
+ goto op_err;
+
+ if (fl) {
+
+ /* set vlan-mac list */
+ rc = bnx2x_vf_mac_vlan_config_list(bp, vf, fl,
+ msg->vf_qid,
+ false);
+ if (rc)
+ goto op_err;
+ }
+
+ /* build mac list */
+ fl = NULL;
+
+ rc = bnx2x_vf_mbx_macvlan_list(bp, vf, msg, &fl,
+ VFPF_MAC_FILTER);
+ if (rc)
+ goto op_err;
+
+ if (fl) {
+ /* set mac list */
+ rc = bnx2x_vf_mac_vlan_config_list(bp, vf, fl,
+ msg->vf_qid,
+ false);
+ if (rc)
+ goto op_err;
+ }
+
+ /* build vlan list */
+ fl = NULL;
+
+ rc = bnx2x_vf_mbx_macvlan_list(bp, vf, msg, &fl,
+ VFPF_VLAN_FILTER);
+ if (rc)
+ goto op_err;
+
+ if (fl) {
+ /* set vlan list */
+ rc = bnx2x_vf_mac_vlan_config_list(bp, vf, fl,
+ msg->vf_qid,
+ false);
+ if (rc)
+ goto op_err;
+ }
+
+ }
+
+ if (msg->flags & VFPF_SET_Q_FILTERS_RX_MASK_CHANGED) {
+ unsigned long accept = 0;
+ struct pf_vf_bulletin_content *bulletin =
+ BP_VF_BULLETIN(bp, vf->index);
+
+ /* Ignore VF requested mode; instead set a regular mode */
+ if (msg->rx_mask != VFPF_RX_MASK_ACCEPT_NONE) {
+ __set_bit(BNX2X_ACCEPT_UNICAST, &accept);
+ __set_bit(BNX2X_ACCEPT_MULTICAST, &accept);
+ __set_bit(BNX2X_ACCEPT_BROADCAST, &accept);
+ }
+
+ /* any_vlan is not configured if HV is forcing VLAN
+ * any_vlan is configured if
+ * 1. VF does not support vlan filtering
+ * OR
+ * 2. VF supports vlan filtering and explicitly requested it
+ */
+ if (!(bulletin->valid_bitmap & (1 << VLAN_VALID)) &&
+ (!(vf->cfg_flags & VF_CFG_VLAN_FILTER) ||
+ msg->rx_mask & VFPF_RX_MASK_ACCEPT_ANY_VLAN))
+ __set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept);
+
+ /* set rx-mode */
+ rc = bnx2x_vf_rxmode(bp, vf, msg->vf_qid, accept);
+ if (rc)
+ goto op_err;
+ }
+
+ if (msg->flags & VFPF_SET_Q_FILTERS_MULTICAST_CHANGED) {
+ /* set mcasts */
+ rc = bnx2x_vf_mcast(bp, vf, msg->multicast,
+ msg->n_multicast, false);
+ if (rc)
+ goto op_err;
+ }
+op_err:
+ if (rc)
+ BNX2X_ERR("QFILTERS[%d:%d] error: rc %d\n",
+ vf->abs_vfid, msg->vf_qid, rc);
+ return rc;
+}
+
+static int bnx2x_filters_validate_mac(struct bnx2x *bp,
+ struct bnx2x_virtf *vf,
+ struct vfpf_set_q_filters_tlv *filters)
+{
+ struct pf_vf_bulletin_content *bulletin = BP_VF_BULLETIN(bp, vf->index);
+ int rc = 0;
+
+ /* if a mac was already set for this VF via the set vf mac ndo, we only
+ * accept mac configurations of that mac. Why accept them at all?
+ * because PF may have been unable to configure the mac at the time
+ * since queue was not set up.
+ */
+ if (bulletin->valid_bitmap & 1 << MAC_ADDR_VALID) {
+ struct vfpf_q_mac_vlan_filter *filter = NULL;
+ int i;
+
+ for (i = 0; i < filters->n_mac_vlan_filters; i++) {
+ if (!(filters->filters[i].flags &
+ VFPF_Q_FILTER_DEST_MAC_VALID))
+ continue;
+
+ /* once a mac was set by ndo can only accept
+ * a single mac...
+ */
+ if (filter) {
+ BNX2X_ERR("VF[%d] requested the addition of multiple macs after set_vf_mac ndo was called [%d filters]\n",
+ vf->abs_vfid,
+ filters->n_mac_vlan_filters);
+ rc = -EPERM;
+ goto response;
+ }
+
+ filter = &filters->filters[i];
+ }
+
+ /* ...and only the mac set by the ndo */
+ if (filter &&
+ !ether_addr_equal(filter->mac, bulletin->mac)) {
+ BNX2X_ERR("VF[%d] requested the addition of a mac address not matching the one configured by set_vf_mac ndo\n",
+ vf->abs_vfid);
+
+ rc = -EPERM;
+ goto response;
+ }
+ }
+
+response:
+ return rc;
+}
+
+static int bnx2x_filters_validate_vlan(struct bnx2x *bp,
+ struct bnx2x_virtf *vf,
+ struct vfpf_set_q_filters_tlv *filters)
+{
+ struct pf_vf_bulletin_content *bulletin = BP_VF_BULLETIN(bp, vf->index);
+ int rc = 0;
+
+ /* if vlan was set by hypervisor we don't allow guest to config vlan */
+ if (bulletin->valid_bitmap & 1 << VLAN_VALID) {
+ /* search for vlan filters */
+
+ if (bnx2x_vf_filters_contain(filters,
+ VFPF_Q_FILTER_VLAN_TAG_VALID)) {
+ BNX2X_ERR("VF[%d] attempted to configure vlan but one was already set by Hypervisor. Aborting request\n",
+ vf->abs_vfid);
+ rc = -EPERM;
+ goto response;
+ }
+ }
+
+ /* verify vf_qid */
+ if (filters->vf_qid > vf_rxq_count(vf)) {
+ rc = -EPERM;
+ goto response;
+ }
+
+response:
+ return rc;
+}
+
+static void bnx2x_vf_mbx_set_q_filters(struct bnx2x *bp,
+ struct bnx2x_virtf *vf,
+ struct bnx2x_vf_mbx *mbx)
+{
+ struct vfpf_set_q_filters_tlv *filters = &mbx->msg->req.set_q_filters;
+ int rc;
+
+ rc = bnx2x_filters_validate_mac(bp, vf, filters);
+ if (rc)
+ goto response;
+
+ rc = bnx2x_filters_validate_vlan(bp, vf, filters);
+ if (rc)
+ goto response;
+
+ DP(BNX2X_MSG_IOV, "VF[%d] Q_FILTERS: queue[%d]\n",
+ vf->abs_vfid,
+ filters->vf_qid);
+
+ /* print q_filter message */
+ bnx2x_vf_mbx_dp_q_filters(bp, BNX2X_MSG_IOV, filters);
+
+ rc = bnx2x_vf_mbx_qfilters(bp, vf);
+response:
+ bnx2x_vf_mbx_resp(bp, vf, rc);
+}
+
+static void bnx2x_vf_mbx_teardown_q(struct bnx2x *bp, struct bnx2x_virtf *vf,
+ struct bnx2x_vf_mbx *mbx)
+{
+ int qid = mbx->msg->req.q_op.vf_qid;
+ int rc;
+
+ DP(BNX2X_MSG_IOV, "VF[%d] Q_TEARDOWN: vf_qid=%d\n",
+ vf->abs_vfid, qid);
+
+ rc = bnx2x_vf_queue_teardown(bp, vf, qid);
+ bnx2x_vf_mbx_resp(bp, vf, rc);
+}
+
+static void bnx2x_vf_mbx_close_vf(struct bnx2x *bp, struct bnx2x_virtf *vf,
+ struct bnx2x_vf_mbx *mbx)
+{
+ int rc;
+
+ DP(BNX2X_MSG_IOV, "VF[%d] VF_CLOSE\n", vf->abs_vfid);
+
+ rc = bnx2x_vf_close(bp, vf);
+ bnx2x_vf_mbx_resp(bp, vf, rc);
+}
+
+static void bnx2x_vf_mbx_release_vf(struct bnx2x *bp, struct bnx2x_virtf *vf,
+ struct bnx2x_vf_mbx *mbx)
+{
+ int rc;
+
+ DP(BNX2X_MSG_IOV, "VF[%d] VF_RELEASE\n", vf->abs_vfid);
+
+ rc = bnx2x_vf_free(bp, vf);
+ bnx2x_vf_mbx_resp(bp, vf, rc);
+}
+
+static void bnx2x_vf_mbx_update_rss(struct bnx2x *bp, struct bnx2x_virtf *vf,
+ struct bnx2x_vf_mbx *mbx)
+{
+ struct bnx2x_config_rss_params rss;
+ struct vfpf_rss_tlv *rss_tlv = &mbx->msg->req.update_rss;
+ int rc = 0;
+
+ if (rss_tlv->ind_table_size != T_ETH_INDIRECTION_TABLE_SIZE ||
+ rss_tlv->rss_key_size != T_ETH_RSS_KEY) {
+ BNX2X_ERR("failing rss configuration of vf %d due to size mismatch\n",
+ vf->index);
+ rc = -EINVAL;
+ goto mbx_resp;
+ }
+
+ memset(&rss, 0, sizeof(struct bnx2x_config_rss_params));
+
+ /* set vfop params according to rss tlv */
+ memcpy(rss.ind_table, rss_tlv->ind_table,
+ T_ETH_INDIRECTION_TABLE_SIZE);
+ memcpy(rss.rss_key, rss_tlv->rss_key, sizeof(rss_tlv->rss_key));
+ rss.rss_obj = &vf->rss_conf_obj;
+ rss.rss_result_mask = rss_tlv->rss_result_mask;
+
+ /* flags handled individually for backward/forward compatibility */
+ rss.rss_flags = 0;
+ rss.ramrod_flags = 0;
+
+ if (rss_tlv->rss_flags & VFPF_RSS_MODE_DISABLED)
+ __set_bit(BNX2X_RSS_MODE_DISABLED, &rss.rss_flags);
+ if (rss_tlv->rss_flags & VFPF_RSS_MODE_REGULAR)
+ __set_bit(BNX2X_RSS_MODE_REGULAR, &rss.rss_flags);
+ if (rss_tlv->rss_flags & VFPF_RSS_SET_SRCH)
+ __set_bit(BNX2X_RSS_SET_SRCH, &rss.rss_flags);
+ if (rss_tlv->rss_flags & VFPF_RSS_IPV4)
+ __set_bit(BNX2X_RSS_IPV4, &rss.rss_flags);
+ if (rss_tlv->rss_flags & VFPF_RSS_IPV4_TCP)
+ __set_bit(BNX2X_RSS_IPV4_TCP, &rss.rss_flags);
+ if (rss_tlv->rss_flags & VFPF_RSS_IPV4_UDP)
+ __set_bit(BNX2X_RSS_IPV4_UDP, &rss.rss_flags);
+ if (rss_tlv->rss_flags & VFPF_RSS_IPV6)
+ __set_bit(BNX2X_RSS_IPV6, &rss.rss_flags);
+ if (rss_tlv->rss_flags & VFPF_RSS_IPV6_TCP)
+ __set_bit(BNX2X_RSS_IPV6_TCP, &rss.rss_flags);
+ if (rss_tlv->rss_flags & VFPF_RSS_IPV6_UDP)
+ __set_bit(BNX2X_RSS_IPV6_UDP, &rss.rss_flags);
+
+ if ((!(rss_tlv->rss_flags & VFPF_RSS_IPV4_TCP) &&
+ rss_tlv->rss_flags & VFPF_RSS_IPV4_UDP) ||
+ (!(rss_tlv->rss_flags & VFPF_RSS_IPV6_TCP) &&
+ rss_tlv->rss_flags & VFPF_RSS_IPV6_UDP)) {
+ BNX2X_ERR("about to hit a FW assert. aborting...\n");
+ rc = -EINVAL;
+ goto mbx_resp;
+ }
+
+ rc = bnx2x_vf_rss_update(bp, vf, &rss);
+mbx_resp:
+ bnx2x_vf_mbx_resp(bp, vf, rc);
+}
+
+static int bnx2x_validate_tpa_params(struct bnx2x *bp,
+ struct vfpf_tpa_tlv *tpa_tlv)
+{
+ int rc = 0;
+
+ if (tpa_tlv->tpa_client_info.max_sges_for_packet >
+ U_ETH_MAX_SGES_FOR_PACKET) {
+ rc = -EINVAL;
+ BNX2X_ERR("TPA update: max_sges received %d, max is %d\n",
+ tpa_tlv->tpa_client_info.max_sges_for_packet,
+ U_ETH_MAX_SGES_FOR_PACKET);
+ }
+
+ if (tpa_tlv->tpa_client_info.max_tpa_queues > MAX_AGG_QS(bp)) {
+ rc = -EINVAL;
+ BNX2X_ERR("TPA update: max_tpa_queues received %d, max is %d\n",
+ tpa_tlv->tpa_client_info.max_tpa_queues,
+ MAX_AGG_QS(bp));
+ }
+
+ return rc;
+}
+
+static void bnx2x_vf_mbx_update_tpa(struct bnx2x *bp, struct bnx2x_virtf *vf,
+ struct bnx2x_vf_mbx *mbx)
+{
+ struct bnx2x_queue_update_tpa_params vf_op_params;
+ struct vfpf_tpa_tlv *tpa_tlv = &mbx->msg->req.update_tpa;
+ int rc = 0;
+
+ memset(&vf_op_params, 0, sizeof(vf_op_params));
+
+ if (bnx2x_validate_tpa_params(bp, tpa_tlv))
+ goto mbx_resp;
+
+ vf_op_params.complete_on_both_clients =
+ tpa_tlv->tpa_client_info.complete_on_both_clients;
+ vf_op_params.dont_verify_thr =
+ tpa_tlv->tpa_client_info.dont_verify_thr;
+ vf_op_params.max_agg_sz =
+ tpa_tlv->tpa_client_info.max_agg_size;
+ vf_op_params.max_sges_pkt =
+ tpa_tlv->tpa_client_info.max_sges_for_packet;
+ vf_op_params.max_tpa_queues =
+ tpa_tlv->tpa_client_info.max_tpa_queues;
+ vf_op_params.sge_buff_sz =
+ tpa_tlv->tpa_client_info.sge_buff_size;
+ vf_op_params.sge_pause_thr_high =
+ tpa_tlv->tpa_client_info.sge_pause_thr_high;
+ vf_op_params.sge_pause_thr_low =
+ tpa_tlv->tpa_client_info.sge_pause_thr_low;
+ vf_op_params.tpa_mode =
+ tpa_tlv->tpa_client_info.tpa_mode;
+ vf_op_params.update_ipv4 =
+ tpa_tlv->tpa_client_info.update_ipv4;
+ vf_op_params.update_ipv6 =
+ tpa_tlv->tpa_client_info.update_ipv6;
+
+ rc = bnx2x_vf_tpa_update(bp, vf, tpa_tlv, &vf_op_params);
+
+mbx_resp:
+ bnx2x_vf_mbx_resp(bp, vf, rc);
+}
+
+/* dispatch request */
+static void bnx2x_vf_mbx_request(struct bnx2x *bp, struct bnx2x_virtf *vf,
+ struct bnx2x_vf_mbx *mbx)
+{
+ int i;
+
+ if (vf->state == VF_LOST) {
+ /* Just ack the FW and return if VFs are lost
+ * in case of parity error. VFs are supposed to be timedout
+ * on waiting for PF response.
+ */
+ DP(BNX2X_MSG_IOV,
+ "VF 0x%x lost, not handling the request\n", vf->abs_vfid);
+
+ storm_memset_vf_mbx_ack(bp, vf->abs_vfid);
+ return;
+ }
+
+ /* check if tlv type is known */
+ if (bnx2x_tlv_supported(mbx->first_tlv.tl.type)) {
+ /* Lock the per vf op mutex and note the locker's identity.
+ * The unlock will take place in mbx response.
+ */
+ bnx2x_lock_vf_pf_channel(bp, vf, mbx->first_tlv.tl.type);
+
+ /* switch on the opcode */
+ switch (mbx->first_tlv.tl.type) {
+ case CHANNEL_TLV_ACQUIRE:
+ bnx2x_vf_mbx_acquire(bp, vf, mbx);
+ return;
+ case CHANNEL_TLV_INIT:
+ bnx2x_vf_mbx_init_vf(bp, vf, mbx);
+ return;
+ case CHANNEL_TLV_SETUP_Q:
+ bnx2x_vf_mbx_setup_q(bp, vf, mbx);
+ return;
+ case CHANNEL_TLV_SET_Q_FILTERS:
+ bnx2x_vf_mbx_set_q_filters(bp, vf, mbx);
+ return;
+ case CHANNEL_TLV_TEARDOWN_Q:
+ bnx2x_vf_mbx_teardown_q(bp, vf, mbx);
+ return;
+ case CHANNEL_TLV_CLOSE:
+ bnx2x_vf_mbx_close_vf(bp, vf, mbx);
+ return;
+ case CHANNEL_TLV_RELEASE:
+ bnx2x_vf_mbx_release_vf(bp, vf, mbx);
+ return;
+ case CHANNEL_TLV_UPDATE_RSS:
+ bnx2x_vf_mbx_update_rss(bp, vf, mbx);
+ return;
+ case CHANNEL_TLV_UPDATE_TPA:
+ bnx2x_vf_mbx_update_tpa(bp, vf, mbx);
+ return;
+ }
+
+ } else {
+ /* unknown TLV - this may belong to a VF driver from the future
+ * - a version written after this PF driver was written, which
+ * supports features unknown as of yet. Too bad since we don't
+ * support them. Or this may be because someone wrote a crappy
+ * VF driver and is sending garbage over the channel.
+ */
+ BNX2X_ERR("unknown TLV. type %d length %d vf->state was %d. first 20 bytes of mailbox buffer:\n",
+ mbx->first_tlv.tl.type, mbx->first_tlv.tl.length,
+ vf->state);
+ for (i = 0; i < 20; i++)
+ DP_CONT(BNX2X_MSG_IOV, "%x ",
+ mbx->msg->req.tlv_buf_size.tlv_buffer[i]);
+ }
+
+ /* can we respond to VF (do we have an address for it?) */
+ if (vf->state == VF_ACQUIRED || vf->state == VF_ENABLED) {
+ /* notify the VF that we do not support this request */
+ bnx2x_vf_mbx_resp(bp, vf, PFVF_STATUS_NOT_SUPPORTED);
+ } else {
+ /* can't send a response since this VF is unknown to us
+ * just ack the FW to release the mailbox and unlock
+ * the channel.
+ */
+ storm_memset_vf_mbx_ack(bp, vf->abs_vfid);
+ /* Firmware ack should be written before unlocking channel */
+ bnx2x_unlock_vf_pf_channel(bp, vf, mbx->first_tlv.tl.type);
+ }
+}
+
+void bnx2x_vf_mbx_schedule(struct bnx2x *bp,
+ struct vf_pf_event_data *vfpf_event)
+{
+ u8 vf_idx;
+
+ DP(BNX2X_MSG_IOV,
+ "vf pf event received: vfid %d, address_hi %x, address lo %x",
+ vfpf_event->vf_id, vfpf_event->msg_addr_hi, vfpf_event->msg_addr_lo);
+ /* Sanity checks consider removing later */
+
+ /* check if the vf_id is valid */
+ if (vfpf_event->vf_id - BP_VFDB(bp)->sriov.first_vf_in_pf >
+ BNX2X_NR_VIRTFN(bp)) {
+ BNX2X_ERR("Illegal vf_id %d max allowed: %d\n",
+ vfpf_event->vf_id, BNX2X_NR_VIRTFN(bp));
+ return;
+ }
+
+ vf_idx = bnx2x_vf_idx_by_abs_fid(bp, vfpf_event->vf_id);
+
+ /* Update VFDB with current message and schedule its handling */
+ mutex_lock(&BP_VFDB(bp)->event_mutex);
+ BP_VF_MBX(bp, vf_idx)->vf_addr_hi =
+ le32_to_cpu(vfpf_event->msg_addr_hi);
+ BP_VF_MBX(bp, vf_idx)->vf_addr_lo =
+ le32_to_cpu(vfpf_event->msg_addr_lo);
+ BP_VFDB(bp)->event_occur |= (1ULL << vf_idx);
+ mutex_unlock(&BP_VFDB(bp)->event_mutex);
+
+ bnx2x_schedule_iov_task(bp, BNX2X_IOV_HANDLE_VF_MSG);
+}
+
+/* handle new vf-pf messages */
+void bnx2x_vf_mbx(struct bnx2x *bp)
+{
+ struct bnx2x_vfdb *vfdb = BP_VFDB(bp);
+ u64 events;
+ u8 vf_idx;
+ int rc;
+
+ if (!vfdb)
+ return;
+
+ mutex_lock(&vfdb->event_mutex);
+ events = vfdb->event_occur;
+ vfdb->event_occur = 0;
+ mutex_unlock(&vfdb->event_mutex);
+
+ for_each_vf(bp, vf_idx) {
+ struct bnx2x_vf_mbx *mbx = BP_VF_MBX(bp, vf_idx);
+ struct bnx2x_virtf *vf = BP_VF(bp, vf_idx);
+
+ /* Handle VFs which have pending events */
+ if (!(events & (1ULL << vf_idx)))
+ continue;
+
+ DP(BNX2X_MSG_IOV,
+ "Handling vf pf event vfid %d, address: [%x:%x], resp_offset 0x%x\n",
+ vf_idx, mbx->vf_addr_hi, mbx->vf_addr_lo,
+ mbx->first_tlv.resp_msg_offset);
+
+ /* dmae to get the VF request */
+ rc = bnx2x_copy32_vf_dmae(bp, true, mbx->msg_mapping,
+ vf->abs_vfid, mbx->vf_addr_hi,
+ mbx->vf_addr_lo,
+ sizeof(union vfpf_tlvs)/4);
+ if (rc) {
+ BNX2X_ERR("Failed to copy request VF %d\n",
+ vf->abs_vfid);
+ bnx2x_vf_release(bp, vf);
+ return;
+ }
+
+ /* process the VF message header */
+ mbx->first_tlv = mbx->msg->req.first_tlv;
+
+ /* Clean response buffer to refrain from falsely
+ * seeing chains.
+ */
+ memset(&mbx->msg->resp, 0, sizeof(union pfvf_tlvs));
+
+ /* dispatch the request (will prepare the response) */
+ bnx2x_vf_mbx_request(bp, vf, mbx);
+ }
+}
+
+void bnx2x_vf_bulletin_finalize(struct pf_vf_bulletin_content *bulletin,
+ bool support_long)
+{
+ /* Older VFs contain a bug where they can't check CRC for bulletin
+ * boards of length greater than legacy size.
+ */
+ bulletin->length = support_long ? BULLETIN_CONTENT_SIZE :
+ BULLETIN_CONTENT_LEGACY_SIZE;
+ bulletin->crc = bnx2x_crc_vf_bulletin(bulletin);
+}
+
+/* propagate local bulletin board to vf */
+int bnx2x_post_vf_bulletin(struct bnx2x *bp, int vf)
+{
+ struct pf_vf_bulletin_content *bulletin = BP_VF_BULLETIN(bp, vf);
+ dma_addr_t pf_addr = BP_VF_BULLETIN_DMA(bp)->mapping +
+ vf * BULLETIN_CONTENT_SIZE;
+ dma_addr_t vf_addr = bnx2x_vf(bp, vf, bulletin_map);
+ int rc;
+
+ /* can only update vf after init took place */
+ if (bnx2x_vf(bp, vf, state) != VF_ENABLED &&
+ bnx2x_vf(bp, vf, state) != VF_ACQUIRED)
+ return 0;
+
+ /* increment bulletin board version and compute crc */
+ bulletin->version++;
+ bnx2x_vf_bulletin_finalize(bulletin,
+ (bnx2x_vf(bp, vf, cfg_flags) &
+ VF_CFG_EXT_BULLETIN) ? true : false);
+
+ /* propagate bulletin board via dmae to vm memory */
+ rc = bnx2x_copy32_vf_dmae(bp, false, pf_addr,
+ bnx2x_vf(bp, vf, abs_vfid), U64_HI(vf_addr),
+ U64_LO(vf_addr), bulletin->length / 4);
+ return rc;
+}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h
new file mode 100644
index 0000000000..64f2b52c58
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h
@@ -0,0 +1,473 @@
+/* bnx2x_vfpf.h: Qlogic Everest network driver.
+ *
+ * Copyright (c) 2011-2013 Broadcom Corporation
+ * Copyright (c) 2014 QLogic Corporation
+ * All rights reserved
+ *
+ * Unless you and Qlogic execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the “GPL”),
+ * available at http://www.gnu.org/licenses/gpl-2.0.html, with the following
+ * added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions
+ * of the license of that module. An independent module is a module which is
+ * not derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
+ * Written by: Ariel Elior <ariel.elior@qlogic.com>
+ */
+#ifndef VF_PF_IF_H
+#define VF_PF_IF_H
+
+#ifdef CONFIG_BNX2X_SRIOV
+
+/* Common definitions for all HVs */
+struct vf_pf_resc_request {
+ u8 num_rxqs;
+ u8 num_txqs;
+ u8 num_sbs;
+ u8 num_mac_filters;
+ u8 num_vlan_filters;
+ u8 num_mc_filters; /* No limit so superfluous */
+};
+
+struct hw_sb_info {
+ u8 hw_sb_id; /* aka absolute igu id, used to ack the sb */
+ u8 sb_qid; /* used to update DHC for sb */
+};
+
+/* HW VF-PF channel definitions
+ * A.K.A VF-PF mailbox
+ */
+#define TLV_BUFFER_SIZE 1024
+#define PF_VF_BULLETIN_SIZE 512
+
+#define VFPF_QUEUE_FLG_TPA 0x0001
+#define VFPF_QUEUE_FLG_TPA_IPV6 0x0002
+#define VFPF_QUEUE_FLG_TPA_GRO 0x0004
+#define VFPF_QUEUE_FLG_CACHE_ALIGN 0x0008
+#define VFPF_QUEUE_FLG_STATS 0x0010
+#define VFPF_QUEUE_FLG_OV 0x0020
+#define VFPF_QUEUE_FLG_VLAN 0x0040
+#define VFPF_QUEUE_FLG_COS 0x0080
+#define VFPF_QUEUE_FLG_HC 0x0100
+#define VFPF_QUEUE_FLG_DHC 0x0200
+#define VFPF_QUEUE_FLG_LEADING_RSS 0x0400
+
+#define VFPF_QUEUE_DROP_IP_CS_ERR (1 << 0)
+#define VFPF_QUEUE_DROP_TCP_CS_ERR (1 << 1)
+#define VFPF_QUEUE_DROP_TTL0 (1 << 2)
+#define VFPF_QUEUE_DROP_UDP_CS_ERR (1 << 3)
+
+#define VFPF_RX_MASK_ACCEPT_NONE 0x00000000
+#define VFPF_RX_MASK_ACCEPT_MATCHED_UNICAST 0x00000001
+#define VFPF_RX_MASK_ACCEPT_MATCHED_MULTICAST 0x00000002
+#define VFPF_RX_MASK_ACCEPT_ALL_UNICAST 0x00000004
+#define VFPF_RX_MASK_ACCEPT_ALL_MULTICAST 0x00000008
+#define VFPF_RX_MASK_ACCEPT_BROADCAST 0x00000010
+#define VFPF_RX_MASK_ACCEPT_ANY_VLAN 0x00000020
+
+#define BULLETIN_CONTENT_SIZE (sizeof(struct pf_vf_bulletin_content))
+#define BULLETIN_CONTENT_LEGACY_SIZE (32)
+#define BULLETIN_ATTEMPTS 5 /* crc failures before throwing towel */
+#define BULLETIN_CRC_SEED 0
+
+enum {
+ PFVF_STATUS_WAITING = 0,
+ PFVF_STATUS_SUCCESS,
+ PFVF_STATUS_FAILURE,
+ PFVF_STATUS_NOT_SUPPORTED,
+ PFVF_STATUS_NO_RESOURCE
+};
+
+/* vf pf channel tlvs */
+/* general tlv header (used for both vf->pf request and pf->vf response) */
+struct channel_tlv {
+ u16 type;
+ u16 length;
+};
+
+/* header of first vf->pf tlv carries the offset used to calculate response
+ * buffer address
+ */
+struct vfpf_first_tlv {
+ struct channel_tlv tl;
+ u32 resp_msg_offset;
+};
+
+/* header of pf->vf tlvs, carries the status of handling the request */
+struct pfvf_tlv {
+ struct channel_tlv tl;
+ u8 status;
+ u8 padding[3];
+};
+
+/* response tlv used for most tlvs */
+struct pfvf_general_resp_tlv {
+ struct pfvf_tlv hdr;
+};
+
+/* used to terminate and pad a tlv list */
+struct channel_list_end_tlv {
+ struct channel_tlv tl;
+ u8 padding[4];
+};
+
+/* Acquire */
+struct vfpf_acquire_tlv {
+ struct vfpf_first_tlv first_tlv;
+
+ struct vf_pf_vfdev_info {
+ /* the following fields are for debug purposes */
+ u8 vf_id; /* ME register value */
+ u8 vf_os; /* e.g. Linux, W2K8 */
+#define VF_OS_SUBVERSION_MASK (0x1f)
+#define VF_OS_MASK (0xe0)
+#define VF_OS_SHIFT (5)
+#define VF_OS_UNDEFINED (0 << VF_OS_SHIFT)
+#define VF_OS_WINDOWS (1 << VF_OS_SHIFT)
+
+ u8 fp_hsi_ver;
+ u8 caps;
+#define VF_CAP_SUPPORT_EXT_BULLETIN (1 << 0)
+#define VF_CAP_SUPPORT_VLAN_FILTER (1 << 1)
+ } vfdev_info;
+
+ struct vf_pf_resc_request resc_request;
+
+ aligned_u64 bulletin_addr;
+};
+
+/* simple operation request on queue */
+struct vfpf_q_op_tlv {
+ struct vfpf_first_tlv first_tlv;
+ u8 vf_qid;
+ u8 padding[3];
+};
+
+/* receive side scaling tlv */
+struct vfpf_rss_tlv {
+ struct vfpf_first_tlv first_tlv;
+ u32 rss_flags;
+#define VFPF_RSS_MODE_DISABLED (1 << 0)
+#define VFPF_RSS_MODE_REGULAR (1 << 1)
+#define VFPF_RSS_SET_SRCH (1 << 2)
+#define VFPF_RSS_IPV4 (1 << 3)
+#define VFPF_RSS_IPV4_TCP (1 << 4)
+#define VFPF_RSS_IPV4_UDP (1 << 5)
+#define VFPF_RSS_IPV6 (1 << 6)
+#define VFPF_RSS_IPV6_TCP (1 << 7)
+#define VFPF_RSS_IPV6_UDP (1 << 8)
+ u8 rss_result_mask;
+ u8 ind_table_size;
+ u8 rss_key_size;
+ u8 padding;
+ u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE];
+ u32 rss_key[T_ETH_RSS_KEY]; /* hash values */
+};
+
+/* acquire response tlv - carries the allocated resources */
+struct pfvf_acquire_resp_tlv {
+ struct pfvf_tlv hdr;
+ struct pf_vf_pfdev_info {
+ u32 chip_num;
+ u32 pf_cap;
+#define PFVF_CAP_RSS 0x00000001
+#define PFVF_CAP_DHC 0x00000002
+#define PFVF_CAP_TPA 0x00000004
+#define PFVF_CAP_TPA_UPDATE 0x00000008
+#define PFVF_CAP_VLAN_FILTER 0x00000010
+
+ char fw_ver[32];
+ u16 db_size;
+ u8 indices_per_sb;
+ u8 padding;
+ } pfdev_info;
+ struct pf_vf_resc {
+ /* in case of status NO_RESOURCE in message hdr, pf will fill
+ * this struct with suggested amount of resources for next
+ * acquire request
+ */
+#define PFVF_MAX_QUEUES_PER_VF 16
+#define PFVF_MAX_SBS_PER_VF 16
+ struct hw_sb_info hw_sbs[PFVF_MAX_SBS_PER_VF];
+ u8 hw_qid[PFVF_MAX_QUEUES_PER_VF];
+ u8 num_rxqs;
+ u8 num_txqs;
+ u8 num_sbs;
+ u8 num_mac_filters;
+ u8 num_vlan_filters;
+ u8 num_mc_filters;
+ u8 permanent_mac_addr[ETH_ALEN];
+ u8 current_mac_addr[ETH_ALEN];
+ u8 padding[2];
+ } resc;
+};
+
+struct vfpf_port_phys_id_resp_tlv {
+ struct channel_tlv tl;
+ u8 id[ETH_ALEN];
+ u8 padding[2];
+};
+
+struct vfpf_fp_hsi_resp_tlv {
+ struct channel_tlv tl;
+ u8 is_supported;
+ u8 padding[3];
+};
+
+#define VFPF_INIT_FLG_STATS_COALESCE (1 << 0) /* when set the VFs queues
+ * stats will be coalesced on
+ * the leading RSS queue
+ */
+
+/* Init VF */
+struct vfpf_init_tlv {
+ struct vfpf_first_tlv first_tlv;
+ aligned_u64 sb_addr[PFVF_MAX_SBS_PER_VF]; /* vf_sb based */
+ aligned_u64 spq_addr;
+ aligned_u64 stats_addr;
+ u16 stats_stride;
+ u32 flags;
+ u32 padding[2];
+};
+
+/* Setup Queue */
+struct vfpf_setup_q_tlv {
+ struct vfpf_first_tlv first_tlv;
+
+ struct vf_pf_rxq_params {
+ /* physical addresses */
+ aligned_u64 rcq_addr;
+ aligned_u64 rcq_np_addr;
+ aligned_u64 rxq_addr;
+ aligned_u64 sge_addr;
+
+ /* sb + hc info */
+ u8 vf_sb; /* index in hw_sbs[] */
+ u8 sb_index; /* Index in the SB */
+ u16 hc_rate; /* desired interrupts per sec. */
+ /* valid iff VFPF_QUEUE_FLG_HC */
+ /* rx buffer info */
+ u16 mtu;
+ u16 buf_sz;
+ u16 flags; /* VFPF_QUEUE_FLG_X flags */
+ u16 stat_id; /* valid iff VFPF_QUEUE_FLG_STATS */
+
+ /* valid iff VFPF_QUEUE_FLG_TPA */
+ u16 sge_buf_sz;
+ u16 tpa_agg_sz;
+ u8 max_sge_pkt;
+
+ u8 drop_flags; /* VFPF_QUEUE_DROP_X, for Linux VMs
+ * all the flags are turned off
+ */
+
+ u8 cache_line_log; /* VFPF_QUEUE_FLG_CACHE_ALIGN */
+ u8 padding;
+ } rxq;
+
+ struct vf_pf_txq_params {
+ /* physical addresses */
+ aligned_u64 txq_addr;
+
+ /* sb + hc info */
+ u8 vf_sb; /* index in hw_sbs[] */
+ u8 sb_index; /* Index in the SB */
+ u16 hc_rate; /* desired interrupts per sec. */
+ /* valid iff VFPF_QUEUE_FLG_HC */
+ u32 flags; /* VFPF_QUEUE_FLG_X flags */
+ u16 stat_id; /* valid iff VFPF_QUEUE_FLG_STATS */
+ u8 traffic_type; /* see in setup_context() */
+ u8 padding;
+ } txq;
+
+ u8 vf_qid; /* index in hw_qid[] */
+ u8 param_valid;
+#define VFPF_RXQ_VALID 0x01
+#define VFPF_TXQ_VALID 0x02
+ u8 padding[2];
+};
+
+/* Set Queue Filters */
+struct vfpf_q_mac_vlan_filter {
+ u32 flags;
+#define VFPF_Q_FILTER_DEST_MAC_VALID 0x01
+#define VFPF_Q_FILTER_VLAN_TAG_VALID 0x02
+#define VFPF_Q_FILTER_SET 0x100 /* set/clear */
+ u8 mac[ETH_ALEN];
+ u16 vlan_tag;
+};
+
+/* configure queue filters */
+struct vfpf_set_q_filters_tlv {
+ struct vfpf_first_tlv first_tlv;
+
+ u32 flags;
+#define VFPF_SET_Q_FILTERS_MAC_VLAN_CHANGED 0x01
+#define VFPF_SET_Q_FILTERS_MULTICAST_CHANGED 0x02
+#define VFPF_SET_Q_FILTERS_RX_MASK_CHANGED 0x04
+
+ u8 vf_qid; /* index in hw_qid[] */
+ u8 n_mac_vlan_filters;
+ u8 n_multicast;
+ u8 padding;
+
+#define PFVF_MAX_MAC_FILTERS 16
+#define PFVF_MAX_VLAN_FILTERS 16
+#define PFVF_MAX_FILTERS (PFVF_MAX_MAC_FILTERS +\
+ PFVF_MAX_VLAN_FILTERS)
+ struct vfpf_q_mac_vlan_filter filters[PFVF_MAX_FILTERS];
+
+#define PFVF_MAX_MULTICAST_PER_VF 32
+ u8 multicast[PFVF_MAX_MULTICAST_PER_VF][ETH_ALEN];
+
+ u32 rx_mask; /* see mask constants at the top of the file */
+};
+
+struct vfpf_tpa_tlv {
+ struct vfpf_first_tlv first_tlv;
+
+ struct vf_pf_tpa_client_info {
+ aligned_u64 sge_addr[PFVF_MAX_QUEUES_PER_VF];
+ u8 update_ipv4;
+ u8 update_ipv6;
+ u8 max_tpa_queues;
+ u8 max_sges_for_packet;
+ u8 complete_on_both_clients;
+ u8 dont_verify_thr;
+ u8 tpa_mode;
+ u16 sge_buff_size;
+ u16 max_agg_size;
+ u16 sge_pause_thr_low;
+ u16 sge_pause_thr_high;
+ } tpa_client_info;
+};
+
+/* close VF (disable VF) */
+struct vfpf_close_tlv {
+ struct vfpf_first_tlv first_tlv;
+ u16 vf_id; /* for debug */
+ u8 padding[2];
+};
+
+/* release the VF's acquired resources */
+struct vfpf_release_tlv {
+ struct vfpf_first_tlv first_tlv;
+ u16 vf_id;
+ u8 padding[2];
+};
+
+struct tlv_buffer_size {
+ u8 tlv_buffer[TLV_BUFFER_SIZE];
+};
+
+union vfpf_tlvs {
+ struct vfpf_first_tlv first_tlv;
+ struct vfpf_acquire_tlv acquire;
+ struct vfpf_init_tlv init;
+ struct vfpf_close_tlv close;
+ struct vfpf_q_op_tlv q_op;
+ struct vfpf_setup_q_tlv setup_q;
+ struct vfpf_set_q_filters_tlv set_q_filters;
+ struct vfpf_release_tlv release;
+ struct vfpf_rss_tlv update_rss;
+ struct vfpf_tpa_tlv update_tpa;
+ struct channel_list_end_tlv list_end;
+ struct tlv_buffer_size tlv_buf_size;
+};
+
+union pfvf_tlvs {
+ struct pfvf_general_resp_tlv general_resp;
+ struct pfvf_acquire_resp_tlv acquire_resp;
+ struct channel_list_end_tlv list_end;
+ struct tlv_buffer_size tlv_buf_size;
+};
+
+/* This is a structure which is allocated in the VF, which the PF may update
+ * when it deems it necessary to do so. The bulletin board is sampled
+ * periodically by the VF. A copy per VF is maintained in the PF (to prevent
+ * loss of data upon multiple updates (or the need for read modify write)).
+ */
+struct pf_vf_bulletin_size {
+ u8 size[PF_VF_BULLETIN_SIZE];
+};
+
+struct pf_vf_bulletin_content {
+ u32 crc; /* crc of structure to ensure is not in
+ * mid-update
+ */
+ u16 version;
+ u16 length;
+
+ aligned_u64 valid_bitmap; /* bitmap indicating which fields
+ * hold valid values
+ */
+
+#define MAC_ADDR_VALID 0 /* alert the vf that a new mac address
+ * is available for it
+ */
+#define VLAN_VALID 1 /* when set, the vf should not access
+ * the vfpf channel
+ */
+#define CHANNEL_DOWN 2 /* vfpf channel is disabled. VFs are not
+ * to attempt to send messages on the
+ * channel after this bit is set
+ */
+#define LINK_VALID 3 /* alert the VF thet a new link status
+ * update is available for it
+ */
+ u8 mac[ETH_ALEN];
+ u8 mac_padding[2];
+
+ u16 vlan;
+ u8 vlan_padding[6];
+
+ u16 link_speed; /* Effective line speed */
+ u8 link_speed_padding[6];
+ u32 link_flags; /* VFPF_LINK_REPORT_XXX flags */
+#define VFPF_LINK_REPORT_LINK_DOWN (1 << 0)
+#define VFPF_LINK_REPORT_FULL_DUPLEX (1 << 1)
+#define VFPF_LINK_REPORT_RX_FC_ON (1 << 2)
+#define VFPF_LINK_REPORT_TX_FC_ON (1 << 3)
+ u8 link_flags_padding[4];
+};
+
+union pf_vf_bulletin {
+ struct pf_vf_bulletin_content content;
+ struct pf_vf_bulletin_size size;
+};
+
+#define MAX_TLVS_IN_LIST 50
+
+enum channel_tlvs {
+ CHANNEL_TLV_NONE,
+ CHANNEL_TLV_ACQUIRE,
+ CHANNEL_TLV_INIT,
+ CHANNEL_TLV_SETUP_Q,
+ CHANNEL_TLV_SET_Q_FILTERS,
+ CHANNEL_TLV_ACTIVATE_Q,
+ CHANNEL_TLV_DEACTIVATE_Q,
+ CHANNEL_TLV_TEARDOWN_Q,
+ CHANNEL_TLV_CLOSE,
+ CHANNEL_TLV_RELEASE,
+ CHANNEL_TLV_UPDATE_RSS_DEPRECATED,
+ CHANNEL_TLV_PF_RELEASE_VF,
+ CHANNEL_TLV_LIST_END,
+ CHANNEL_TLV_FLR,
+ CHANNEL_TLV_PF_SET_MAC,
+ CHANNEL_TLV_PF_SET_VLAN,
+ CHANNEL_TLV_UPDATE_RSS,
+ CHANNEL_TLV_PHYS_PORT_ID,
+ CHANNEL_TLV_UPDATE_TPA,
+ CHANNEL_TLV_FP_HSI_SUPPORT,
+ CHANNEL_TLV_MAX
+};
+
+#endif /* CONFIG_BNX2X_SRIOV */
+#endif /* VF_PF_IF_H */