summaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/chelsio/cxgb3
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/chelsio/cxgb3')
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/Makefile9
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/adapter.h334
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/ael1002.c936
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/aq100x.c354
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/common.h773
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_ctl_defs.h189
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_defs.h111
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_ioctl.h177
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c3472
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c1402
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.h209
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/firmware_exports.h177
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/l2t.c465
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/l2t.h148
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/mc5.c422
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/regs.h2564
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/sge.c3383
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/sge_defs.h256
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/t3_cpl.h1495
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/t3_hw.c3748
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/t3cdev.h70
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/version.h42
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/vsc8211.c416
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/xgmac.c657
24 files changed, 21809 insertions, 0 deletions
diff --git a/drivers/net/ethernet/chelsio/cxgb3/Makefile b/drivers/net/ethernet/chelsio/cxgb3/Makefile
new file mode 100644
index 0000000000..f65f0d93be
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb3/Makefile
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Chelsio T3 driver
+#
+
+obj-$(CONFIG_CHELSIO_T3) += cxgb3.o
+
+cxgb3-objs := cxgb3_main.o ael1002.o vsc8211.o t3_hw.o mc5.o \
+ xgmac.o sge.o l2t.o cxgb3_offload.o aq100x.o
diff --git a/drivers/net/ethernet/chelsio/cxgb3/adapter.h b/drivers/net/ethernet/chelsio/cxgb3/adapter.h
new file mode 100644
index 0000000000..6d682b7c7a
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb3/adapter.h
@@ -0,0 +1,334 @@
+/*
+ * Copyright (c) 2003-2008 Chelsio, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+/* This file should not be included directly. Include common.h instead. */
+
+#ifndef __T3_ADAPTER_H__
+#define __T3_ADAPTER_H__
+
+#include <linux/pci.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <linux/timer.h>
+#include <linux/cache.h>
+#include <linux/mutex.h>
+#include <linux/bitops.h>
+#include "t3cdev.h"
+#include <asm/io.h>
+
+struct adapter;
+struct sge_qset;
+struct port_info;
+
+enum mac_idx_types {
+ LAN_MAC_IDX = 0,
+ SAN_MAC_IDX,
+
+ MAX_MAC_IDX
+};
+
+struct iscsi_config {
+ __u8 mac_addr[ETH_ALEN];
+ __u32 flags;
+ int (*send)(struct port_info *pi, struct sk_buff **skb);
+ int (*recv)(struct port_info *pi, struct sk_buff *skb);
+};
+
+struct port_info {
+ struct adapter *adapter;
+ struct sge_qset *qs;
+ u8 port_id;
+ u8 nqsets;
+ u8 first_qset;
+ struct cphy phy;
+ struct cmac mac;
+ struct link_config link_config;
+ int activity;
+ __be32 iscsi_ipv4addr;
+ struct iscsi_config iscsic;
+
+ int link_fault; /* link fault was detected */
+};
+
+enum { /* adapter flags */
+ FULL_INIT_DONE = (1 << 0),
+ USING_MSI = (1 << 1),
+ USING_MSIX = (1 << 2),
+ QUEUES_BOUND = (1 << 3),
+ TP_PARITY_INIT = (1 << 4),
+ NAPI_INIT = (1 << 5),
+};
+
+struct fl_pg_chunk {
+ struct page *page;
+ void *va;
+ unsigned int offset;
+ unsigned long *p_cnt;
+ dma_addr_t mapping;
+};
+
+struct rx_desc;
+struct rx_sw_desc;
+
+struct sge_fl { /* SGE per free-buffer list state */
+ unsigned int buf_size; /* size of each Rx buffer */
+ unsigned int credits; /* # of available Rx buffers */
+ unsigned int pend_cred; /* new buffers since last FL DB ring */
+ unsigned int size; /* capacity of free list */
+ unsigned int cidx; /* consumer index */
+ unsigned int pidx; /* producer index */
+ unsigned int gen; /* free list generation */
+ struct fl_pg_chunk pg_chunk;/* page chunk cache */
+ unsigned int use_pages; /* whether FL uses pages or sk_buffs */
+ unsigned int order; /* order of page allocations */
+ unsigned int alloc_size; /* size of allocated buffer */
+ struct rx_desc *desc; /* address of HW Rx descriptor ring */
+ struct rx_sw_desc *sdesc; /* address of SW Rx descriptor ring */
+ dma_addr_t phys_addr; /* physical address of HW ring start */
+ unsigned int cntxt_id; /* SGE context id for the free list */
+ unsigned long empty; /* # of times queue ran out of buffers */
+ unsigned long alloc_failed; /* # of times buffer allocation failed */
+};
+
+/*
+ * Bundle size for grouping offload RX packets for delivery to the stack.
+ * Don't make this too big as we do prefetch on each packet in a bundle.
+ */
+# define RX_BUNDLE_SIZE 8
+
+struct rsp_desc;
+
+struct sge_rspq { /* state for an SGE response queue */
+ unsigned int credits; /* # of pending response credits */
+ unsigned int size; /* capacity of response queue */
+ unsigned int cidx; /* consumer index */
+ unsigned int gen; /* current generation bit */
+ unsigned int polling; /* is the queue serviced through NAPI? */
+ unsigned int holdoff_tmr; /* interrupt holdoff timer in 100ns */
+ unsigned int next_holdoff; /* holdoff time for next interrupt */
+ unsigned int rx_recycle_buf; /* whether recycling occurred
+ within current sop-eop */
+ struct rsp_desc *desc; /* address of HW response ring */
+ dma_addr_t phys_addr; /* physical address of the ring */
+ unsigned int cntxt_id; /* SGE context id for the response q */
+ spinlock_t lock; /* guards response processing */
+ struct sk_buff_head rx_queue; /* offload packet receive queue */
+ struct sk_buff *pg_skb; /* used to build frag list in napi handler */
+
+ unsigned long offload_pkts;
+ unsigned long offload_bundles;
+ unsigned long eth_pkts; /* # of ethernet packets */
+ unsigned long pure_rsps; /* # of pure (non-data) responses */
+ unsigned long imm_data; /* responses with immediate data */
+ unsigned long rx_drops; /* # of packets dropped due to no mem */
+ unsigned long async_notif; /* # of asynchronous notification events */
+ unsigned long empty; /* # of times queue ran out of credits */
+ unsigned long nomem; /* # of responses deferred due to no mem */
+ unsigned long unhandled_irqs; /* # of spurious intrs */
+ unsigned long starved;
+ unsigned long restarted;
+};
+
+struct tx_desc;
+struct tx_sw_desc;
+
+struct sge_txq { /* state for an SGE Tx queue */
+ unsigned long flags; /* HW DMA fetch status */
+ unsigned int in_use; /* # of in-use Tx descriptors */
+ unsigned int size; /* # of descriptors */
+ unsigned int processed; /* total # of descs HW has processed */
+ unsigned int cleaned; /* total # of descs SW has reclaimed */
+ unsigned int stop_thres; /* SW TX queue suspend threshold */
+ unsigned int cidx; /* consumer index */
+ unsigned int pidx; /* producer index */
+ unsigned int gen; /* current value of generation bit */
+ unsigned int unacked; /* Tx descriptors used since last COMPL */
+ struct tx_desc *desc; /* address of HW Tx descriptor ring */
+ struct tx_sw_desc *sdesc; /* address of SW Tx descriptor ring */
+ spinlock_t lock; /* guards enqueueing of new packets */
+ unsigned int token; /* WR token */
+ dma_addr_t phys_addr; /* physical address of the ring */
+ struct sk_buff_head sendq; /* List of backpressured offload packets */
+ struct work_struct qresume_task; /* restarts the queue */
+ unsigned int cntxt_id; /* SGE context id for the Tx q */
+ unsigned long stops; /* # of times q has been stopped */
+ unsigned long restarts; /* # of queue restarts */
+};
+
+enum { /* per port SGE statistics */
+ SGE_PSTAT_TSO, /* # of TSO requests */
+ SGE_PSTAT_RX_CSUM_GOOD, /* # of successful RX csum offloads */
+ SGE_PSTAT_TX_CSUM, /* # of TX checksum offloads */
+ SGE_PSTAT_VLANEX, /* # of VLAN tag extractions */
+ SGE_PSTAT_VLANINS, /* # of VLAN tag insertions */
+
+ SGE_PSTAT_MAX /* must be last */
+};
+
+struct napi_gro_fraginfo;
+
+struct sge_qset { /* an SGE queue set */
+ struct adapter *adap;
+ struct napi_struct napi;
+ struct sge_rspq rspq;
+ struct sge_fl fl[SGE_RXQ_PER_SET];
+ struct sge_txq txq[SGE_TXQ_PER_SET];
+ int nomem;
+ void *lro_va;
+ struct net_device *netdev;
+ struct netdev_queue *tx_q; /* associated netdev TX queue */
+ unsigned long txq_stopped; /* which Tx queues are stopped */
+ struct timer_list tx_reclaim_timer; /* reclaims TX buffers */
+ struct timer_list rx_reclaim_timer; /* reclaims RX buffers */
+ unsigned long port_stats[SGE_PSTAT_MAX];
+} ____cacheline_aligned;
+
+struct sge {
+ struct sge_qset qs[SGE_QSETS];
+ spinlock_t reg_lock; /* guards non-atomic SGE registers (eg context) */
+};
+
+struct adapter {
+ struct t3cdev tdev;
+ struct list_head adapter_list;
+ void __iomem *regs;
+ struct pci_dev *pdev;
+ unsigned long registered_device_map;
+ unsigned long open_device_map;
+ unsigned long flags;
+
+ const char *name;
+ int msg_enable;
+ unsigned int mmio_len;
+
+ struct adapter_params params;
+ unsigned int slow_intr_mask;
+ unsigned long irq_stats[IRQ_NUM_STATS];
+
+ int msix_nvectors;
+ struct {
+ unsigned short vec;
+ char desc[22];
+ } msix_info[SGE_QSETS + 1];
+
+ /* T3 modules */
+ struct sge sge;
+ struct mc7 pmrx;
+ struct mc7 pmtx;
+ struct mc7 cm;
+ struct mc5 mc5;
+
+ struct net_device *port[MAX_NPORTS];
+ unsigned int check_task_cnt;
+ struct delayed_work adap_check_task;
+ struct work_struct ext_intr_handler_task;
+ struct work_struct fatal_error_handler_task;
+ struct work_struct link_fault_handler_task;
+
+ struct work_struct db_full_task;
+ struct work_struct db_empty_task;
+ struct work_struct db_drop_task;
+
+ struct dentry *debugfs_root;
+
+ struct mutex mdio_lock;
+ spinlock_t stats_lock;
+ spinlock_t work_lock;
+
+ struct sk_buff *nofail_skb;
+};
+
+static inline u32 t3_read_reg(struct adapter *adapter, u32 reg_addr)
+{
+ u32 val = readl(adapter->regs + reg_addr);
+
+ CH_DBG(adapter, MMIO, "read register 0x%x value 0x%x\n", reg_addr, val);
+ return val;
+}
+
+static inline void t3_write_reg(struct adapter *adapter, u32 reg_addr, u32 val)
+{
+ CH_DBG(adapter, MMIO, "setting register 0x%x to 0x%x\n", reg_addr, val);
+ writel(val, adapter->regs + reg_addr);
+}
+
+static inline struct port_info *adap2pinfo(struct adapter *adap, int idx)
+{
+ return netdev_priv(adap->port[idx]);
+}
+
+static inline int phy2portid(struct cphy *phy)
+{
+ struct adapter *adap = phy->adapter;
+ struct port_info *port0 = adap2pinfo(adap, 0);
+
+ return &port0->phy == phy ? 0 : 1;
+}
+
+#define OFFLOAD_DEVMAP_BIT 15
+
+#define tdev2adap(d) container_of(d, struct adapter, tdev)
+
+static inline int offload_running(struct adapter *adapter)
+{
+ return test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
+}
+
+int t3_offload_tx(struct t3cdev *tdev, struct sk_buff *skb);
+
+void t3_os_ext_intr_handler(struct adapter *adapter);
+void t3_os_link_changed(struct adapter *adapter, int port_id, int link_status,
+ int speed, int duplex, int fc);
+void t3_os_phymod_changed(struct adapter *adap, int port_id);
+void t3_os_link_fault(struct adapter *adapter, int port_id, int state);
+void t3_os_link_fault_handler(struct adapter *adapter, int port_id);
+
+void t3_sge_start(struct adapter *adap);
+void t3_sge_stop_dma(struct adapter *adap);
+void t3_sge_stop(struct adapter *adap);
+void t3_start_sge_timers(struct adapter *adap);
+void t3_stop_sge_timers(struct adapter *adap);
+void t3_free_sge_resources(struct adapter *adap);
+void t3_sge_err_intr_handler(struct adapter *adapter);
+irq_handler_t t3_intr_handler(struct adapter *adap, int polling);
+netdev_tx_t t3_eth_xmit(struct sk_buff *skb, struct net_device *dev);
+int t3_mgmt_tx(struct adapter *adap, struct sk_buff *skb);
+void t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p);
+int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
+ int irq_vec_idx, const struct qset_params *p,
+ int ntxq, struct net_device *dev,
+ struct netdev_queue *netdevq);
+extern struct workqueue_struct *cxgb3_wq;
+
+int t3_get_edc_fw(struct cphy *phy, int edc_idx, int size);
+
+#endif /* __T3_ADAPTER_H__ */
diff --git a/drivers/net/ethernet/chelsio/cxgb3/ael1002.c b/drivers/net/ethernet/chelsio/cxgb3/ael1002.c
new file mode 100644
index 0000000000..9d591f0ddf
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb3/ael1002.c
@@ -0,0 +1,936 @@
+/*
+ * Copyright (c) 2005-2008 Chelsio, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "common.h"
+#include "regs.h"
+
+enum {
+ AEL100X_TX_CONFIG1 = 0xc002,
+ AEL1002_PWR_DOWN_HI = 0xc011,
+ AEL1002_PWR_DOWN_LO = 0xc012,
+ AEL1002_XFI_EQL = 0xc015,
+ AEL1002_LB_EN = 0xc017,
+ AEL_OPT_SETTINGS = 0xc017,
+ AEL_I2C_CTRL = 0xc30a,
+ AEL_I2C_DATA = 0xc30b,
+ AEL_I2C_STAT = 0xc30c,
+ AEL2005_GPIO_CTRL = 0xc214,
+ AEL2005_GPIO_STAT = 0xc215,
+
+ AEL2020_GPIO_INTR = 0xc103, /* Latch High (LH) */
+ AEL2020_GPIO_CTRL = 0xc108, /* Store Clear (SC) */
+ AEL2020_GPIO_STAT = 0xc10c, /* Read Only (RO) */
+ AEL2020_GPIO_CFG = 0xc110, /* Read Write (RW) */
+
+ AEL2020_GPIO_SDA = 0, /* IN: i2c serial data */
+ AEL2020_GPIO_MODDET = 1, /* IN: Module Detect */
+ AEL2020_GPIO_0 = 3, /* IN: unassigned */
+ AEL2020_GPIO_1 = 2, /* OUT: unassigned */
+ AEL2020_GPIO_LSTAT = AEL2020_GPIO_1, /* wired to link status LED */
+};
+
+enum { edc_none, edc_sr, edc_twinax };
+
+/* PHY module I2C device address */
+enum {
+ MODULE_DEV_ADDR = 0xa0,
+ SFF_DEV_ADDR = 0xa2,
+};
+
+/* PHY transceiver type */
+enum {
+ phy_transtype_unknown = 0,
+ phy_transtype_sfp = 3,
+ phy_transtype_xfp = 6,
+};
+
+#define AEL2005_MODDET_IRQ 4
+
+struct reg_val {
+ unsigned short mmd_addr;
+ unsigned short reg_addr;
+ unsigned short clear_bits;
+ unsigned short set_bits;
+};
+
+static int set_phy_regs(struct cphy *phy, const struct reg_val *rv)
+{
+ int err;
+
+ for (err = 0; rv->mmd_addr && !err; rv++) {
+ if (rv->clear_bits == 0xffff)
+ err = t3_mdio_write(phy, rv->mmd_addr, rv->reg_addr,
+ rv->set_bits);
+ else
+ err = t3_mdio_change_bits(phy, rv->mmd_addr,
+ rv->reg_addr, rv->clear_bits,
+ rv->set_bits);
+ }
+ return err;
+}
+
+static void ael100x_txon(struct cphy *phy)
+{
+ int tx_on_gpio =
+ phy->mdio.prtad == 0 ? F_GPIO7_OUT_VAL : F_GPIO2_OUT_VAL;
+
+ msleep(100);
+ t3_set_reg_field(phy->adapter, A_T3DBG_GPIO_EN, 0, tx_on_gpio);
+ msleep(30);
+}
+
+/*
+ * Read an 8-bit word from a device attached to the PHY's i2c bus.
+ */
+static int ael_i2c_rd(struct cphy *phy, int dev_addr, int word_addr)
+{
+ int i, err;
+ unsigned int stat, data;
+
+ err = t3_mdio_write(phy, MDIO_MMD_PMAPMD, AEL_I2C_CTRL,
+ (dev_addr << 8) | (1 << 8) | word_addr);
+ if (err)
+ return err;
+
+ for (i = 0; i < 200; i++) {
+ msleep(1);
+ err = t3_mdio_read(phy, MDIO_MMD_PMAPMD, AEL_I2C_STAT, &stat);
+ if (err)
+ return err;
+ if ((stat & 3) == 1) {
+ err = t3_mdio_read(phy, MDIO_MMD_PMAPMD, AEL_I2C_DATA,
+ &data);
+ if (err)
+ return err;
+ return data >> 8;
+ }
+ }
+ CH_WARN(phy->adapter, "PHY %u i2c read of dev.addr %#x.%#x timed out\n",
+ phy->mdio.prtad, dev_addr, word_addr);
+ return -ETIMEDOUT;
+}
+
+static int ael1002_power_down(struct cphy *phy, int enable)
+{
+ int err;
+
+ err = t3_mdio_write(phy, MDIO_MMD_PMAPMD, MDIO_PMA_TXDIS, !!enable);
+ if (!err)
+ err = mdio_set_flag(&phy->mdio, phy->mdio.prtad,
+ MDIO_MMD_PMAPMD, MDIO_CTRL1,
+ MDIO_CTRL1_LPOWER, enable);
+ return err;
+}
+
+static int ael1002_reset(struct cphy *phy, int wait)
+{
+ int err;
+
+ if ((err = ael1002_power_down(phy, 0)) ||
+ (err = t3_mdio_write(phy, MDIO_MMD_PMAPMD, AEL100X_TX_CONFIG1, 1)) ||
+ (err = t3_mdio_write(phy, MDIO_MMD_PMAPMD, AEL1002_PWR_DOWN_HI, 0)) ||
+ (err = t3_mdio_write(phy, MDIO_MMD_PMAPMD, AEL1002_PWR_DOWN_LO, 0)) ||
+ (err = t3_mdio_write(phy, MDIO_MMD_PMAPMD, AEL1002_XFI_EQL, 0x18)) ||
+ (err = t3_mdio_change_bits(phy, MDIO_MMD_PMAPMD, AEL1002_LB_EN,
+ 0, 1 << 5)))
+ return err;
+ return 0;
+}
+
+static int ael1002_intr_noop(struct cphy *phy)
+{
+ return 0;
+}
+
+/*
+ * Get link status for a 10GBASE-R device.
+ */
+static int get_link_status_r(struct cphy *phy, int *link_ok, int *speed,
+ int *duplex, int *fc)
+{
+ if (link_ok) {
+ unsigned int stat0, stat1, stat2;
+ int err = t3_mdio_read(phy, MDIO_MMD_PMAPMD,
+ MDIO_PMA_RXDET, &stat0);
+
+ if (!err)
+ err = t3_mdio_read(phy, MDIO_MMD_PCS,
+ MDIO_PCS_10GBRT_STAT1, &stat1);
+ if (!err)
+ err = t3_mdio_read(phy, MDIO_MMD_PHYXS,
+ MDIO_PHYXS_LNSTAT, &stat2);
+ if (err)
+ return err;
+ *link_ok = (stat0 & stat1 & (stat2 >> 12)) & 1;
+ }
+ if (speed)
+ *speed = SPEED_10000;
+ if (duplex)
+ *duplex = DUPLEX_FULL;
+ return 0;
+}
+
+static const struct cphy_ops ael1002_ops = {
+ .reset = ael1002_reset,
+ .intr_enable = ael1002_intr_noop,
+ .intr_disable = ael1002_intr_noop,
+ .intr_clear = ael1002_intr_noop,
+ .intr_handler = ael1002_intr_noop,
+ .get_link_status = get_link_status_r,
+ .power_down = ael1002_power_down,
+ .mmds = MDIO_DEVS_PMAPMD | MDIO_DEVS_PCS | MDIO_DEVS_PHYXS,
+};
+
+int t3_ael1002_phy_prep(struct cphy *phy, struct adapter *adapter,
+ int phy_addr, const struct mdio_ops *mdio_ops)
+{
+ cphy_init(phy, adapter, phy_addr, &ael1002_ops, mdio_ops,
+ SUPPORTED_10000baseT_Full | SUPPORTED_AUI | SUPPORTED_FIBRE,
+ "10GBASE-R");
+ ael100x_txon(phy);
+ return 0;
+}
+
+static int ael1006_reset(struct cphy *phy, int wait)
+{
+ return t3_phy_reset(phy, MDIO_MMD_PMAPMD, wait);
+}
+
+static const struct cphy_ops ael1006_ops = {
+ .reset = ael1006_reset,
+ .intr_enable = t3_phy_lasi_intr_enable,
+ .intr_disable = t3_phy_lasi_intr_disable,
+ .intr_clear = t3_phy_lasi_intr_clear,
+ .intr_handler = t3_phy_lasi_intr_handler,
+ .get_link_status = get_link_status_r,
+ .power_down = ael1002_power_down,
+ .mmds = MDIO_DEVS_PMAPMD | MDIO_DEVS_PCS | MDIO_DEVS_PHYXS,
+};
+
+int t3_ael1006_phy_prep(struct cphy *phy, struct adapter *adapter,
+ int phy_addr, const struct mdio_ops *mdio_ops)
+{
+ cphy_init(phy, adapter, phy_addr, &ael1006_ops, mdio_ops,
+ SUPPORTED_10000baseT_Full | SUPPORTED_AUI | SUPPORTED_FIBRE,
+ "10GBASE-SR");
+ ael100x_txon(phy);
+ return 0;
+}
+
+/*
+ * Decode our module type.
+ */
+static int ael2xxx_get_module_type(struct cphy *phy, int delay_ms)
+{
+ int v;
+
+ if (delay_ms)
+ msleep(delay_ms);
+
+ /* see SFF-8472 for below */
+ v = ael_i2c_rd(phy, MODULE_DEV_ADDR, 3);
+ if (v < 0)
+ return v;
+
+ if (v == 0x10)
+ return phy_modtype_sr;
+ if (v == 0x20)
+ return phy_modtype_lr;
+ if (v == 0x40)
+ return phy_modtype_lrm;
+
+ v = ael_i2c_rd(phy, MODULE_DEV_ADDR, 6);
+ if (v < 0)
+ return v;
+ if (v != 4)
+ goto unknown;
+
+ v = ael_i2c_rd(phy, MODULE_DEV_ADDR, 10);
+ if (v < 0)
+ return v;
+
+ if (v & 0x80) {
+ v = ael_i2c_rd(phy, MODULE_DEV_ADDR, 0x12);
+ if (v < 0)
+ return v;
+ return v > 10 ? phy_modtype_twinax_long : phy_modtype_twinax;
+ }
+unknown:
+ return phy_modtype_unknown;
+}
+
+/*
+ * Code to support the Aeluros/NetLogic 2005 10Gb PHY.
+ */
+static int ael2005_setup_sr_edc(struct cphy *phy)
+{
+ static const struct reg_val regs[] = {
+ { MDIO_MMD_PMAPMD, 0xc003, 0xffff, 0x181 },
+ { MDIO_MMD_PMAPMD, 0xc010, 0xffff, 0x448a },
+ { MDIO_MMD_PMAPMD, 0xc04a, 0xffff, 0x5200 },
+ { 0, 0, 0, 0 }
+ };
+
+ int i, err;
+
+ err = set_phy_regs(phy, regs);
+ if (err)
+ return err;
+
+ msleep(50);
+
+ if (phy->priv != edc_sr)
+ err = t3_get_edc_fw(phy, EDC_OPT_AEL2005,
+ EDC_OPT_AEL2005_SIZE);
+ if (err)
+ return err;
+
+ for (i = 0; i < EDC_OPT_AEL2005_SIZE / sizeof(u16) && !err; i += 2)
+ err = t3_mdio_write(phy, MDIO_MMD_PMAPMD,
+ phy->phy_cache[i],
+ phy->phy_cache[i + 1]);
+ if (!err)
+ phy->priv = edc_sr;
+ return err;
+}
+
+static int ael2005_setup_twinax_edc(struct cphy *phy, int modtype)
+{
+ static const struct reg_val regs[] = {
+ { MDIO_MMD_PMAPMD, 0xc04a, 0xffff, 0x5a00 },
+ { 0, 0, 0, 0 }
+ };
+ static const struct reg_val preemphasis[] = {
+ { MDIO_MMD_PMAPMD, 0xc014, 0xffff, 0xfe16 },
+ { MDIO_MMD_PMAPMD, 0xc015, 0xffff, 0xa000 },
+ { 0, 0, 0, 0 }
+ };
+ int i, err;
+
+ err = set_phy_regs(phy, regs);
+ if (!err && modtype == phy_modtype_twinax_long)
+ err = set_phy_regs(phy, preemphasis);
+ if (err)
+ return err;
+
+ msleep(50);
+
+ if (phy->priv != edc_twinax)
+ err = t3_get_edc_fw(phy, EDC_TWX_AEL2005,
+ EDC_TWX_AEL2005_SIZE);
+ if (err)
+ return err;
+
+ for (i = 0; i < EDC_TWX_AEL2005_SIZE / sizeof(u16) && !err; i += 2)
+ err = t3_mdio_write(phy, MDIO_MMD_PMAPMD,
+ phy->phy_cache[i],
+ phy->phy_cache[i + 1]);
+ if (!err)
+ phy->priv = edc_twinax;
+ return err;
+}
+
+static int ael2005_get_module_type(struct cphy *phy, int delay_ms)
+{
+ int v;
+ unsigned int stat;
+
+ v = t3_mdio_read(phy, MDIO_MMD_PMAPMD, AEL2005_GPIO_CTRL, &stat);
+ if (v)
+ return v;
+
+ if (stat & (1 << 8)) /* module absent */
+ return phy_modtype_none;
+
+ return ael2xxx_get_module_type(phy, delay_ms);
+}
+
+static int ael2005_intr_enable(struct cphy *phy)
+{
+ int err = t3_mdio_write(phy, MDIO_MMD_PMAPMD, AEL2005_GPIO_CTRL, 0x200);
+ return err ? err : t3_phy_lasi_intr_enable(phy);
+}
+
+static int ael2005_intr_disable(struct cphy *phy)
+{
+ int err = t3_mdio_write(phy, MDIO_MMD_PMAPMD, AEL2005_GPIO_CTRL, 0x100);
+ return err ? err : t3_phy_lasi_intr_disable(phy);
+}
+
+static int ael2005_intr_clear(struct cphy *phy)
+{
+ int err = t3_mdio_write(phy, MDIO_MMD_PMAPMD, AEL2005_GPIO_CTRL, 0xd00);
+ return err ? err : t3_phy_lasi_intr_clear(phy);
+}
+
+static int ael2005_reset(struct cphy *phy, int wait)
+{
+ static const struct reg_val regs0[] = {
+ { MDIO_MMD_PMAPMD, 0xc001, 0, 1 << 5 },
+ { MDIO_MMD_PMAPMD, 0xc017, 0, 1 << 5 },
+ { MDIO_MMD_PMAPMD, 0xc013, 0xffff, 0xf341 },
+ { MDIO_MMD_PMAPMD, 0xc210, 0xffff, 0x8000 },
+ { MDIO_MMD_PMAPMD, 0xc210, 0xffff, 0x8100 },
+ { MDIO_MMD_PMAPMD, 0xc210, 0xffff, 0x8000 },
+ { MDIO_MMD_PMAPMD, 0xc210, 0xffff, 0 },
+ { 0, 0, 0, 0 }
+ };
+ static const struct reg_val regs1[] = {
+ { MDIO_MMD_PMAPMD, 0xca00, 0xffff, 0x0080 },
+ { MDIO_MMD_PMAPMD, 0xca12, 0xffff, 0 },
+ { 0, 0, 0, 0 }
+ };
+
+ int err;
+ unsigned int lasi_ctrl;
+
+ err = t3_mdio_read(phy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_CTRL,
+ &lasi_ctrl);
+ if (err)
+ return err;
+
+ err = t3_phy_reset(phy, MDIO_MMD_PMAPMD, 0);
+ if (err)
+ return err;
+
+ msleep(125);
+ phy->priv = edc_none;
+ err = set_phy_regs(phy, regs0);
+ if (err)
+ return err;
+
+ msleep(50);
+
+ err = ael2005_get_module_type(phy, 0);
+ if (err < 0)
+ return err;
+ phy->modtype = err;
+
+ if (err == phy_modtype_twinax || err == phy_modtype_twinax_long)
+ err = ael2005_setup_twinax_edc(phy, err);
+ else
+ err = ael2005_setup_sr_edc(phy);
+ if (err)
+ return err;
+
+ err = set_phy_regs(phy, regs1);
+ if (err)
+ return err;
+
+ /* reset wipes out interrupts, reenable them if they were on */
+ if (lasi_ctrl & 1)
+ err = ael2005_intr_enable(phy);
+ return err;
+}
+
+static int ael2005_intr_handler(struct cphy *phy)
+{
+ unsigned int stat;
+ int ret, edc_needed, cause = 0;
+
+ ret = t3_mdio_read(phy, MDIO_MMD_PMAPMD, AEL2005_GPIO_STAT, &stat);
+ if (ret)
+ return ret;
+
+ if (stat & AEL2005_MODDET_IRQ) {
+ ret = t3_mdio_write(phy, MDIO_MMD_PMAPMD, AEL2005_GPIO_CTRL,
+ 0xd00);
+ if (ret)
+ return ret;
+
+ /* modules have max 300 ms init time after hot plug */
+ ret = ael2005_get_module_type(phy, 300);
+ if (ret < 0)
+ return ret;
+
+ phy->modtype = ret;
+ if (ret == phy_modtype_none)
+ edc_needed = phy->priv; /* on unplug retain EDC */
+ else if (ret == phy_modtype_twinax ||
+ ret == phy_modtype_twinax_long)
+ edc_needed = edc_twinax;
+ else
+ edc_needed = edc_sr;
+
+ if (edc_needed != phy->priv) {
+ ret = ael2005_reset(phy, 0);
+ return ret ? ret : cphy_cause_module_change;
+ }
+ cause = cphy_cause_module_change;
+ }
+
+ ret = t3_phy_lasi_intr_handler(phy);
+ if (ret < 0)
+ return ret;
+
+ ret |= cause;
+ return ret ? ret : cphy_cause_link_change;
+}
+
+static const struct cphy_ops ael2005_ops = {
+ .reset = ael2005_reset,
+ .intr_enable = ael2005_intr_enable,
+ .intr_disable = ael2005_intr_disable,
+ .intr_clear = ael2005_intr_clear,
+ .intr_handler = ael2005_intr_handler,
+ .get_link_status = get_link_status_r,
+ .power_down = ael1002_power_down,
+ .mmds = MDIO_DEVS_PMAPMD | MDIO_DEVS_PCS | MDIO_DEVS_PHYXS,
+};
+
+int t3_ael2005_phy_prep(struct cphy *phy, struct adapter *adapter,
+ int phy_addr, const struct mdio_ops *mdio_ops)
+{
+ cphy_init(phy, adapter, phy_addr, &ael2005_ops, mdio_ops,
+ SUPPORTED_10000baseT_Full | SUPPORTED_AUI | SUPPORTED_FIBRE |
+ SUPPORTED_IRQ, "10GBASE-R");
+ msleep(125);
+ return t3_mdio_change_bits(phy, MDIO_MMD_PMAPMD, AEL_OPT_SETTINGS, 0,
+ 1 << 5);
+}
+
+/*
+ * Setup EDC and other parameters for operation with an optical module.
+ */
+static int ael2020_setup_sr_edc(struct cphy *phy)
+{
+ static const struct reg_val regs[] = {
+ /* set CDR offset to 10 */
+ { MDIO_MMD_PMAPMD, 0xcc01, 0xffff, 0x488a },
+
+ /* adjust 10G RX bias current */
+ { MDIO_MMD_PMAPMD, 0xcb1b, 0xffff, 0x0200 },
+ { MDIO_MMD_PMAPMD, 0xcb1c, 0xffff, 0x00f0 },
+ { MDIO_MMD_PMAPMD, 0xcc06, 0xffff, 0x00e0 },
+
+ /* end */
+ { 0, 0, 0, 0 }
+ };
+ int err;
+
+ err = set_phy_regs(phy, regs);
+ msleep(50);
+ if (err)
+ return err;
+
+ phy->priv = edc_sr;
+ return 0;
+}
+
+/*
+ * Setup EDC and other parameters for operation with an TWINAX module.
+ */
+static int ael2020_setup_twinax_edc(struct cphy *phy, int modtype)
+{
+ /* set uC to 40MHz */
+ static const struct reg_val uCclock40MHz[] = {
+ { MDIO_MMD_PMAPMD, 0xff28, 0xffff, 0x4001 },
+ { MDIO_MMD_PMAPMD, 0xff2a, 0xffff, 0x0002 },
+ { 0, 0, 0, 0 }
+ };
+
+ /* activate uC clock */
+ static const struct reg_val uCclockActivate[] = {
+ { MDIO_MMD_PMAPMD, 0xd000, 0xffff, 0x5200 },
+ { 0, 0, 0, 0 }
+ };
+
+ /* set PC to start of SRAM and activate uC */
+ static const struct reg_val uCactivate[] = {
+ { MDIO_MMD_PMAPMD, 0xd080, 0xffff, 0x0100 },
+ { MDIO_MMD_PMAPMD, 0xd092, 0xffff, 0x0000 },
+ { 0, 0, 0, 0 }
+ };
+ int i, err;
+
+ /* set uC clock and activate it */
+ err = set_phy_regs(phy, uCclock40MHz);
+ msleep(500);
+ if (err)
+ return err;
+ err = set_phy_regs(phy, uCclockActivate);
+ msleep(500);
+ if (err)
+ return err;
+
+ if (phy->priv != edc_twinax)
+ err = t3_get_edc_fw(phy, EDC_TWX_AEL2020,
+ EDC_TWX_AEL2020_SIZE);
+ if (err)
+ return err;
+
+ for (i = 0; i < EDC_TWX_AEL2020_SIZE / sizeof(u16) && !err; i += 2)
+ err = t3_mdio_write(phy, MDIO_MMD_PMAPMD,
+ phy->phy_cache[i],
+ phy->phy_cache[i + 1]);
+ /* activate uC */
+ err = set_phy_regs(phy, uCactivate);
+ if (!err)
+ phy->priv = edc_twinax;
+ return err;
+}
+
+/*
+ * Return Module Type.
+ */
+static int ael2020_get_module_type(struct cphy *phy, int delay_ms)
+{
+ int v;
+ unsigned int stat;
+
+ v = t3_mdio_read(phy, MDIO_MMD_PMAPMD, AEL2020_GPIO_STAT, &stat);
+ if (v)
+ return v;
+
+ if (stat & (0x1 << (AEL2020_GPIO_MODDET*4))) {
+ /* module absent */
+ return phy_modtype_none;
+ }
+
+ return ael2xxx_get_module_type(phy, delay_ms);
+}
+
+/*
+ * Enable PHY interrupts. We enable "Module Detection" interrupts (on any
+ * state transition) and then generic Link Alarm Status Interrupt (LASI).
+ */
+static int ael2020_intr_enable(struct cphy *phy)
+{
+ static const struct reg_val regs[] = {
+ /* output Module's Loss Of Signal (LOS) to LED */
+ { MDIO_MMD_PMAPMD, AEL2020_GPIO_CFG+AEL2020_GPIO_LSTAT,
+ 0xffff, 0x4 },
+ { MDIO_MMD_PMAPMD, AEL2020_GPIO_CTRL,
+ 0xffff, 0x8 << (AEL2020_GPIO_LSTAT*4) },
+
+ /* enable module detect status change interrupts */
+ { MDIO_MMD_PMAPMD, AEL2020_GPIO_CTRL,
+ 0xffff, 0x2 << (AEL2020_GPIO_MODDET*4) },
+
+ /* end */
+ { 0, 0, 0, 0 }
+ };
+ int err, link_ok = 0;
+
+ /* set up "link status" LED and enable module change interrupts */
+ err = set_phy_regs(phy, regs);
+ if (err)
+ return err;
+
+ err = get_link_status_r(phy, &link_ok, NULL, NULL, NULL);
+ if (err)
+ return err;
+ if (link_ok)
+ t3_link_changed(phy->adapter,
+ phy2portid(phy));
+
+ err = t3_phy_lasi_intr_enable(phy);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+/*
+ * Disable PHY interrupts. The mirror of the above ...
+ */
+static int ael2020_intr_disable(struct cphy *phy)
+{
+ static const struct reg_val regs[] = {
+ /* reset "link status" LED to "off" */
+ { MDIO_MMD_PMAPMD, AEL2020_GPIO_CTRL,
+ 0xffff, 0xb << (AEL2020_GPIO_LSTAT*4) },
+
+ /* disable module detect status change interrupts */
+ { MDIO_MMD_PMAPMD, AEL2020_GPIO_CTRL,
+ 0xffff, 0x1 << (AEL2020_GPIO_MODDET*4) },
+
+ /* end */
+ { 0, 0, 0, 0 }
+ };
+ int err;
+
+ /* turn off "link status" LED and disable module change interrupts */
+ err = set_phy_regs(phy, regs);
+ if (err)
+ return err;
+
+ return t3_phy_lasi_intr_disable(phy);
+}
+
+/*
+ * Clear PHY interrupt state.
+ */
+static int ael2020_intr_clear(struct cphy *phy)
+{
+ /*
+ * The GPIO Interrupt register on the AEL2020 is a "Latching High"
+ * (LH) register which is cleared to the current state when it's read.
+ * Thus, we simply read the register and discard the result.
+ */
+ unsigned int stat;
+ int err = t3_mdio_read(phy, MDIO_MMD_PMAPMD, AEL2020_GPIO_INTR, &stat);
+ return err ? err : t3_phy_lasi_intr_clear(phy);
+}
+
+static const struct reg_val ael2020_reset_regs[] = {
+ /* Erratum #2: CDRLOL asserted, causing PMA link down status */
+ { MDIO_MMD_PMAPMD, 0xc003, 0xffff, 0x3101 },
+
+ /* force XAUI to send LF when RX_LOS is asserted */
+ { MDIO_MMD_PMAPMD, 0xcd40, 0xffff, 0x0001 },
+
+ /* allow writes to transceiver module EEPROM on i2c bus */
+ { MDIO_MMD_PMAPMD, 0xff02, 0xffff, 0x0023 },
+ { MDIO_MMD_PMAPMD, 0xff03, 0xffff, 0x0000 },
+ { MDIO_MMD_PMAPMD, 0xff04, 0xffff, 0x0000 },
+
+ /* end */
+ { 0, 0, 0, 0 }
+};
+/*
+ * Reset the PHY and put it into a canonical operating state.
+ */
+static int ael2020_reset(struct cphy *phy, int wait)
+{
+ int err;
+ unsigned int lasi_ctrl;
+
+ /* grab current interrupt state */
+ err = t3_mdio_read(phy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_CTRL,
+ &lasi_ctrl);
+ if (err)
+ return err;
+
+ err = t3_phy_reset(phy, MDIO_MMD_PMAPMD, 125);
+ if (err)
+ return err;
+ msleep(100);
+
+ /* basic initialization for all module types */
+ phy->priv = edc_none;
+ err = set_phy_regs(phy, ael2020_reset_regs);
+ if (err)
+ return err;
+
+ /* determine module type and perform appropriate initialization */
+ err = ael2020_get_module_type(phy, 0);
+ if (err < 0)
+ return err;
+ phy->modtype = (u8)err;
+ if (err == phy_modtype_twinax || err == phy_modtype_twinax_long)
+ err = ael2020_setup_twinax_edc(phy, err);
+ else
+ err = ael2020_setup_sr_edc(phy);
+ if (err)
+ return err;
+
+ /* reset wipes out interrupts, reenable them if they were on */
+ if (lasi_ctrl & 1)
+ err = ael2005_intr_enable(phy);
+ return err;
+}
+
+/*
+ * Handle a PHY interrupt.
+ */
+static int ael2020_intr_handler(struct cphy *phy)
+{
+ unsigned int stat;
+ int ret, edc_needed, cause = 0;
+
+ ret = t3_mdio_read(phy, MDIO_MMD_PMAPMD, AEL2020_GPIO_INTR, &stat);
+ if (ret)
+ return ret;
+
+ if (stat & (0x1 << AEL2020_GPIO_MODDET)) {
+ /* modules have max 300 ms init time after hot plug */
+ ret = ael2020_get_module_type(phy, 300);
+ if (ret < 0)
+ return ret;
+
+ phy->modtype = (u8)ret;
+ if (ret == phy_modtype_none)
+ edc_needed = phy->priv; /* on unplug retain EDC */
+ else if (ret == phy_modtype_twinax ||
+ ret == phy_modtype_twinax_long)
+ edc_needed = edc_twinax;
+ else
+ edc_needed = edc_sr;
+
+ if (edc_needed != phy->priv) {
+ ret = ael2020_reset(phy, 0);
+ return ret ? ret : cphy_cause_module_change;
+ }
+ cause = cphy_cause_module_change;
+ }
+
+ ret = t3_phy_lasi_intr_handler(phy);
+ if (ret < 0)
+ return ret;
+
+ ret |= cause;
+ return ret ? ret : cphy_cause_link_change;
+}
+
+static const struct cphy_ops ael2020_ops = {
+ .reset = ael2020_reset,
+ .intr_enable = ael2020_intr_enable,
+ .intr_disable = ael2020_intr_disable,
+ .intr_clear = ael2020_intr_clear,
+ .intr_handler = ael2020_intr_handler,
+ .get_link_status = get_link_status_r,
+ .power_down = ael1002_power_down,
+ .mmds = MDIO_DEVS_PMAPMD | MDIO_DEVS_PCS | MDIO_DEVS_PHYXS,
+};
+
+int t3_ael2020_phy_prep(struct cphy *phy, struct adapter *adapter, int phy_addr,
+ const struct mdio_ops *mdio_ops)
+{
+ cphy_init(phy, adapter, phy_addr, &ael2020_ops, mdio_ops,
+ SUPPORTED_10000baseT_Full | SUPPORTED_AUI | SUPPORTED_FIBRE |
+ SUPPORTED_IRQ, "10GBASE-R");
+ msleep(125);
+
+ return set_phy_regs(phy, ael2020_reset_regs);
+}
+
+/*
+ * Get link status for a 10GBASE-X device.
+ */
+static int get_link_status_x(struct cphy *phy, int *link_ok, int *speed,
+ int *duplex, int *fc)
+{
+ if (link_ok) {
+ unsigned int stat0, stat1, stat2;
+ int err = t3_mdio_read(phy, MDIO_MMD_PMAPMD,
+ MDIO_PMA_RXDET, &stat0);
+
+ if (!err)
+ err = t3_mdio_read(phy, MDIO_MMD_PCS,
+ MDIO_PCS_10GBX_STAT1, &stat1);
+ if (!err)
+ err = t3_mdio_read(phy, MDIO_MMD_PHYXS,
+ MDIO_PHYXS_LNSTAT, &stat2);
+ if (err)
+ return err;
+ *link_ok = (stat0 & (stat1 >> 12) & (stat2 >> 12)) & 1;
+ }
+ if (speed)
+ *speed = SPEED_10000;
+ if (duplex)
+ *duplex = DUPLEX_FULL;
+ return 0;
+}
+
+static const struct cphy_ops qt2045_ops = {
+ .reset = ael1006_reset,
+ .intr_enable = t3_phy_lasi_intr_enable,
+ .intr_disable = t3_phy_lasi_intr_disable,
+ .intr_clear = t3_phy_lasi_intr_clear,
+ .intr_handler = t3_phy_lasi_intr_handler,
+ .get_link_status = get_link_status_x,
+ .power_down = ael1002_power_down,
+ .mmds = MDIO_DEVS_PMAPMD | MDIO_DEVS_PCS | MDIO_DEVS_PHYXS,
+};
+
+int t3_qt2045_phy_prep(struct cphy *phy, struct adapter *adapter,
+ int phy_addr, const struct mdio_ops *mdio_ops)
+{
+ unsigned int stat;
+
+ cphy_init(phy, adapter, phy_addr, &qt2045_ops, mdio_ops,
+ SUPPORTED_10000baseT_Full | SUPPORTED_AUI | SUPPORTED_TP,
+ "10GBASE-CX4");
+
+ /*
+ * Some cards where the PHY is supposed to be at address 0 actually
+ * have it at 1.
+ */
+ if (!phy_addr &&
+ !t3_mdio_read(phy, MDIO_MMD_PMAPMD, MDIO_STAT1, &stat) &&
+ stat == 0xffff)
+ phy->mdio.prtad = 1;
+ return 0;
+}
+
+static int xaui_direct_reset(struct cphy *phy, int wait)
+{
+ return 0;
+}
+
+static int xaui_direct_get_link_status(struct cphy *phy, int *link_ok,
+ int *speed, int *duplex, int *fc)
+{
+ if (link_ok) {
+ unsigned int status;
+ int prtad = phy->mdio.prtad;
+
+ status = t3_read_reg(phy->adapter,
+ XGM_REG(A_XGM_SERDES_STAT0, prtad)) |
+ t3_read_reg(phy->adapter,
+ XGM_REG(A_XGM_SERDES_STAT1, prtad)) |
+ t3_read_reg(phy->adapter,
+ XGM_REG(A_XGM_SERDES_STAT2, prtad)) |
+ t3_read_reg(phy->adapter,
+ XGM_REG(A_XGM_SERDES_STAT3, prtad));
+ *link_ok = !(status & F_LOWSIG0);
+ }
+ if (speed)
+ *speed = SPEED_10000;
+ if (duplex)
+ *duplex = DUPLEX_FULL;
+ return 0;
+}
+
+static int xaui_direct_power_down(struct cphy *phy, int enable)
+{
+ return 0;
+}
+
+static const struct cphy_ops xaui_direct_ops = {
+ .reset = xaui_direct_reset,
+ .intr_enable = ael1002_intr_noop,
+ .intr_disable = ael1002_intr_noop,
+ .intr_clear = ael1002_intr_noop,
+ .intr_handler = ael1002_intr_noop,
+ .get_link_status = xaui_direct_get_link_status,
+ .power_down = xaui_direct_power_down,
+};
+
+int t3_xaui_direct_phy_prep(struct cphy *phy, struct adapter *adapter,
+ int phy_addr, const struct mdio_ops *mdio_ops)
+{
+ cphy_init(phy, adapter, phy_addr, &xaui_direct_ops, mdio_ops,
+ SUPPORTED_10000baseT_Full | SUPPORTED_AUI | SUPPORTED_TP,
+ "10GBASE-CX4");
+ return 0;
+}
diff --git a/drivers/net/ethernet/chelsio/cxgb3/aq100x.c b/drivers/net/ethernet/chelsio/cxgb3/aq100x.c
new file mode 100644
index 0000000000..6af5d200e4
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb3/aq100x.c
@@ -0,0 +1,354 @@
+/*
+ * Copyright (c) 2005-2008 Chelsio, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "common.h"
+#include "regs.h"
+
+enum {
+ /* MDIO_DEV_PMA_PMD registers */
+ AQ_LINK_STAT = 0xe800,
+ AQ_IMASK_PMA = 0xf000,
+
+ /* MDIO_DEV_XGXS registers */
+ AQ_XAUI_RX_CFG = 0xc400,
+ AQ_XAUI_TX_CFG = 0xe400,
+
+ /* MDIO_DEV_ANEG registers */
+ AQ_1G_CTRL = 0xc400,
+ AQ_ANEG_STAT = 0xc800,
+
+ /* MDIO_DEV_VEND1 registers */
+ AQ_FW_VERSION = 0x0020,
+ AQ_IFLAG_GLOBAL = 0xfc00,
+ AQ_IMASK_GLOBAL = 0xff00,
+};
+
+enum {
+ IMASK_PMA = 1 << 2,
+ IMASK_GLOBAL = 1 << 15,
+ ADV_1G_FULL = 1 << 15,
+ ADV_1G_HALF = 1 << 14,
+ ADV_10G_FULL = 1 << 12,
+ AQ_RESET = (1 << 14) | (1 << 15),
+ AQ_LOWPOWER = 1 << 12,
+};
+
+static int aq100x_reset(struct cphy *phy, int wait)
+{
+ /*
+ * Ignore the caller specified wait time; always wait for the reset to
+ * complete. Can take up to 3s.
+ */
+ int err = t3_phy_reset(phy, MDIO_MMD_VEND1, 3000);
+
+ if (err)
+ CH_WARN(phy->adapter, "PHY%d: reset failed (0x%x).\n",
+ phy->mdio.prtad, err);
+
+ return err;
+}
+
+static int aq100x_intr_enable(struct cphy *phy)
+{
+ int err = t3_mdio_write(phy, MDIO_MMD_PMAPMD, AQ_IMASK_PMA, IMASK_PMA);
+ if (err)
+ return err;
+
+ err = t3_mdio_write(phy, MDIO_MMD_VEND1, AQ_IMASK_GLOBAL, IMASK_GLOBAL);
+ return err;
+}
+
+static int aq100x_intr_disable(struct cphy *phy)
+{
+ return t3_mdio_write(phy, MDIO_MMD_VEND1, AQ_IMASK_GLOBAL, 0);
+}
+
+static int aq100x_intr_clear(struct cphy *phy)
+{
+ unsigned int v;
+
+ t3_mdio_read(phy, MDIO_MMD_VEND1, AQ_IFLAG_GLOBAL, &v);
+ t3_mdio_read(phy, MDIO_MMD_PMAPMD, MDIO_STAT1, &v);
+
+ return 0;
+}
+
+static int aq100x_intr_handler(struct cphy *phy)
+{
+ int err;
+ unsigned int cause, v;
+
+ err = t3_mdio_read(phy, MDIO_MMD_VEND1, AQ_IFLAG_GLOBAL, &cause);
+ if (err)
+ return err;
+
+ /* Read (and reset) the latching version of the status */
+ t3_mdio_read(phy, MDIO_MMD_PMAPMD, MDIO_STAT1, &v);
+
+ return cphy_cause_link_change;
+}
+
+static int aq100x_power_down(struct cphy *phy, int off)
+{
+ return mdio_set_flag(&phy->mdio, phy->mdio.prtad,
+ MDIO_MMD_PMAPMD, MDIO_CTRL1,
+ MDIO_CTRL1_LPOWER, off);
+}
+
+static int aq100x_autoneg_enable(struct cphy *phy)
+{
+ int err;
+
+ err = aq100x_power_down(phy, 0);
+ if (!err)
+ err = mdio_set_flag(&phy->mdio, phy->mdio.prtad,
+ MDIO_MMD_AN, MDIO_CTRL1,
+ BMCR_ANENABLE | BMCR_ANRESTART, 1);
+
+ return err;
+}
+
+static int aq100x_autoneg_restart(struct cphy *phy)
+{
+ int err;
+
+ err = aq100x_power_down(phy, 0);
+ if (!err)
+ err = mdio_set_flag(&phy->mdio, phy->mdio.prtad,
+ MDIO_MMD_AN, MDIO_CTRL1,
+ BMCR_ANENABLE | BMCR_ANRESTART, 1);
+
+ return err;
+}
+
+static int aq100x_advertise(struct cphy *phy, unsigned int advertise_map)
+{
+ unsigned int adv;
+ int err;
+
+ /* 10G advertisement */
+ adv = 0;
+ if (advertise_map & ADVERTISED_10000baseT_Full)
+ adv |= ADV_10G_FULL;
+ err = t3_mdio_change_bits(phy, MDIO_MMD_AN, MDIO_AN_10GBT_CTRL,
+ ADV_10G_FULL, adv);
+ if (err)
+ return err;
+
+ /* 1G advertisement */
+ adv = 0;
+ if (advertise_map & ADVERTISED_1000baseT_Full)
+ adv |= ADV_1G_FULL;
+ if (advertise_map & ADVERTISED_1000baseT_Half)
+ adv |= ADV_1G_HALF;
+ err = t3_mdio_change_bits(phy, MDIO_MMD_AN, AQ_1G_CTRL,
+ ADV_1G_FULL | ADV_1G_HALF, adv);
+ if (err)
+ return err;
+
+ /* 100M, pause advertisement */
+ adv = 0;
+ if (advertise_map & ADVERTISED_100baseT_Half)
+ adv |= ADVERTISE_100HALF;
+ if (advertise_map & ADVERTISED_100baseT_Full)
+ adv |= ADVERTISE_100FULL;
+ if (advertise_map & ADVERTISED_Pause)
+ adv |= ADVERTISE_PAUSE_CAP;
+ if (advertise_map & ADVERTISED_Asym_Pause)
+ adv |= ADVERTISE_PAUSE_ASYM;
+ err = t3_mdio_change_bits(phy, MDIO_MMD_AN, MDIO_AN_ADVERTISE,
+ 0xfe0, adv);
+
+ return err;
+}
+
+static int aq100x_set_loopback(struct cphy *phy, int mmd, int dir, int enable)
+{
+ return mdio_set_flag(&phy->mdio, phy->mdio.prtad,
+ MDIO_MMD_PMAPMD, MDIO_CTRL1,
+ BMCR_LOOPBACK, enable);
+}
+
+static int aq100x_set_speed_duplex(struct cphy *phy, int speed, int duplex)
+{
+ /* no can do */
+ return -1;
+}
+
+static int aq100x_get_link_status(struct cphy *phy, int *link_ok,
+ int *speed, int *duplex, int *fc)
+{
+ int err;
+ unsigned int v;
+
+ if (link_ok) {
+ err = t3_mdio_read(phy, MDIO_MMD_PMAPMD, AQ_LINK_STAT, &v);
+ if (err)
+ return err;
+
+ *link_ok = v & 1;
+ if (!*link_ok)
+ return 0;
+ }
+
+ err = t3_mdio_read(phy, MDIO_MMD_AN, AQ_ANEG_STAT, &v);
+ if (err)
+ return err;
+
+ if (speed) {
+ switch (v & 0x6) {
+ case 0x6:
+ *speed = SPEED_10000;
+ break;
+ case 0x4:
+ *speed = SPEED_1000;
+ break;
+ case 0x2:
+ *speed = SPEED_100;
+ break;
+ case 0x0:
+ *speed = SPEED_10;
+ break;
+ }
+ }
+
+ if (duplex)
+ *duplex = v & 1 ? DUPLEX_FULL : DUPLEX_HALF;
+
+ return 0;
+}
+
+static const struct cphy_ops aq100x_ops = {
+ .reset = aq100x_reset,
+ .intr_enable = aq100x_intr_enable,
+ .intr_disable = aq100x_intr_disable,
+ .intr_clear = aq100x_intr_clear,
+ .intr_handler = aq100x_intr_handler,
+ .autoneg_enable = aq100x_autoneg_enable,
+ .autoneg_restart = aq100x_autoneg_restart,
+ .advertise = aq100x_advertise,
+ .set_loopback = aq100x_set_loopback,
+ .set_speed_duplex = aq100x_set_speed_duplex,
+ .get_link_status = aq100x_get_link_status,
+ .power_down = aq100x_power_down,
+ .mmds = MDIO_DEVS_PMAPMD | MDIO_DEVS_PCS | MDIO_DEVS_PHYXS,
+};
+
+int t3_aq100x_phy_prep(struct cphy *phy, struct adapter *adapter, int phy_addr,
+ const struct mdio_ops *mdio_ops)
+{
+ unsigned int v, v2, gpio, wait;
+ int err;
+
+ cphy_init(phy, adapter, phy_addr, &aq100x_ops, mdio_ops,
+ SUPPORTED_1000baseT_Full | SUPPORTED_10000baseT_Full |
+ SUPPORTED_TP | SUPPORTED_Autoneg | SUPPORTED_AUI,
+ "1000/10GBASE-T");
+
+ /*
+ * The PHY has been out of reset ever since the system powered up. So
+ * we do a hard reset over here.
+ */
+ gpio = phy_addr ? F_GPIO10_OUT_VAL : F_GPIO6_OUT_VAL;
+ t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, gpio, 0);
+ msleep(1);
+ t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, gpio, gpio);
+
+ /*
+ * Give it enough time to load the firmware and get ready for mdio.
+ */
+ msleep(1000);
+ wait = 500; /* in 10ms increments */
+ do {
+ err = t3_mdio_read(phy, MDIO_MMD_VEND1, MDIO_CTRL1, &v);
+ if (err || v == 0xffff) {
+
+ /* Allow prep_adapter to succeed when ffff is read */
+
+ CH_WARN(adapter, "PHY%d: reset failed (0x%x, 0x%x).\n",
+ phy_addr, err, v);
+ goto done;
+ }
+
+ v &= AQ_RESET;
+ if (v)
+ msleep(10);
+ } while (v && --wait);
+ if (v) {
+ CH_WARN(adapter, "PHY%d: reset timed out (0x%x).\n",
+ phy_addr, v);
+
+ goto done; /* let prep_adapter succeed */
+ }
+
+ /* Datasheet says 3s max but this has been observed */
+ wait = (500 - wait) * 10 + 1000;
+ if (wait > 3000)
+ CH_WARN(adapter, "PHY%d: reset took %ums\n", phy_addr, wait);
+
+ /* Firmware version check. */
+ t3_mdio_read(phy, MDIO_MMD_VEND1, AQ_FW_VERSION, &v);
+ if (v != 101)
+ CH_WARN(adapter, "PHY%d: unsupported firmware %d\n",
+ phy_addr, v);
+
+ /*
+ * The PHY should start in really-low-power mode. Prepare it for normal
+ * operations.
+ */
+ err = t3_mdio_read(phy, MDIO_MMD_VEND1, MDIO_CTRL1, &v);
+ if (err)
+ return err;
+ if (v & AQ_LOWPOWER) {
+ err = t3_mdio_change_bits(phy, MDIO_MMD_VEND1, MDIO_CTRL1,
+ AQ_LOWPOWER, 0);
+ if (err)
+ return err;
+ msleep(10);
+ } else
+ CH_WARN(adapter, "PHY%d does not start in low power mode.\n",
+ phy_addr);
+
+ /*
+ * Verify XAUI settings, but let prep succeed no matter what.
+ */
+ v = v2 = 0;
+ t3_mdio_read(phy, MDIO_MMD_PHYXS, AQ_XAUI_RX_CFG, &v);
+ t3_mdio_read(phy, MDIO_MMD_PHYXS, AQ_XAUI_TX_CFG, &v2);
+ if (v != 0x1b || v2 != 0x1b)
+ CH_WARN(adapter,
+ "PHY%d: incorrect XAUI settings (0x%x, 0x%x).\n",
+ phy_addr, v, v2);
+
+done:
+ return err;
+}
diff --git a/drivers/net/ethernet/chelsio/cxgb3/common.h b/drivers/net/ethernet/chelsio/cxgb3/common.h
new file mode 100644
index 0000000000..ecd025dda8
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb3/common.h
@@ -0,0 +1,773 @@
+/*
+ * Copyright (c) 2005-2008 Chelsio, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __CHELSIO_COMMON_H
+#define __CHELSIO_COMMON_H
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/ctype.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/ethtool.h>
+#include <linux/mdio.h>
+#include "version.h"
+
+#define CH_ERR(adap, fmt, ...) dev_err(&adap->pdev->dev, fmt, ##__VA_ARGS__)
+#define CH_WARN(adap, fmt, ...) dev_warn(&adap->pdev->dev, fmt, ##__VA_ARGS__)
+#define CH_ALERT(adap, fmt, ...) dev_alert(&adap->pdev->dev, fmt, ##__VA_ARGS__)
+
+/*
+ * More powerful macro that selectively prints messages based on msg_enable.
+ * For info and debugging messages.
+ */
+#define CH_MSG(adapter, level, category, fmt, ...) do { \
+ if ((adapter)->msg_enable & NETIF_MSG_##category) \
+ dev_printk(KERN_##level, &adapter->pdev->dev, fmt, \
+ ## __VA_ARGS__); \
+} while (0)
+
+#ifdef DEBUG
+# define CH_DBG(adapter, category, fmt, ...) \
+ CH_MSG(adapter, DEBUG, category, fmt, ## __VA_ARGS__)
+#else
+# define CH_DBG(adapter, category, fmt, ...)
+#endif
+
+/* Additional NETIF_MSG_* categories */
+#define NETIF_MSG_MMIO 0x8000000
+
+enum {
+ MAX_NPORTS = 2, /* max # of ports */
+ MAX_FRAME_SIZE = 10240, /* max MAC frame size, including header + FCS */
+ EEPROMSIZE = 8192, /* Serial EEPROM size */
+ SERNUM_LEN = 16, /* Serial # length */
+ RSS_TABLE_SIZE = 64, /* size of RSS lookup and mapping tables */
+ TCB_SIZE = 128, /* TCB size */
+ NMTUS = 16, /* size of MTU table */
+ NCCTRL_WIN = 32, /* # of congestion control windows */
+ PROTO_SRAM_LINES = 128, /* size of TP sram */
+};
+
+#define MAX_RX_COALESCING_LEN 12288U
+
+enum {
+ PAUSE_RX = 1 << 0,
+ PAUSE_TX = 1 << 1,
+ PAUSE_AUTONEG = 1 << 2
+};
+
+enum {
+ SUPPORTED_IRQ = 1 << 24
+};
+
+enum { /* adapter interrupt-maintained statistics */
+ STAT_ULP_CH0_PBL_OOB,
+ STAT_ULP_CH1_PBL_OOB,
+ STAT_PCI_CORR_ECC,
+
+ IRQ_NUM_STATS /* keep last */
+};
+
+#define TP_VERSION_MAJOR 1
+#define TP_VERSION_MINOR 1
+#define TP_VERSION_MICRO 0
+
+#define S_TP_VERSION_MAJOR 16
+#define M_TP_VERSION_MAJOR 0xFF
+#define V_TP_VERSION_MAJOR(x) ((x) << S_TP_VERSION_MAJOR)
+#define G_TP_VERSION_MAJOR(x) \
+ (((x) >> S_TP_VERSION_MAJOR) & M_TP_VERSION_MAJOR)
+
+#define S_TP_VERSION_MINOR 8
+#define M_TP_VERSION_MINOR 0xFF
+#define V_TP_VERSION_MINOR(x) ((x) << S_TP_VERSION_MINOR)
+#define G_TP_VERSION_MINOR(x) \
+ (((x) >> S_TP_VERSION_MINOR) & M_TP_VERSION_MINOR)
+
+#define S_TP_VERSION_MICRO 0
+#define M_TP_VERSION_MICRO 0xFF
+#define V_TP_VERSION_MICRO(x) ((x) << S_TP_VERSION_MICRO)
+#define G_TP_VERSION_MICRO(x) \
+ (((x) >> S_TP_VERSION_MICRO) & M_TP_VERSION_MICRO)
+
+enum {
+ SGE_QSETS = 8, /* # of SGE Tx/Rx/RspQ sets */
+ SGE_RXQ_PER_SET = 2, /* # of Rx queues per set */
+ SGE_TXQ_PER_SET = 3 /* # of Tx queues per set */
+};
+
+enum sge_context_type { /* SGE egress context types */
+ SGE_CNTXT_RDMA = 0,
+ SGE_CNTXT_ETH = 2,
+ SGE_CNTXT_OFLD = 4,
+ SGE_CNTXT_CTRL = 5
+};
+
+enum {
+ AN_PKT_SIZE = 32, /* async notification packet size */
+ IMMED_PKT_SIZE = 48 /* packet size for immediate data */
+};
+
+struct sg_ent { /* SGE scatter/gather entry */
+ __be32 len[2];
+ __be64 addr[2];
+};
+
+#ifndef SGE_NUM_GENBITS
+/* Must be 1 or 2 */
+# define SGE_NUM_GENBITS 2
+#endif
+
+#define TX_DESC_FLITS 16U
+#define WR_FLITS (TX_DESC_FLITS + 1 - SGE_NUM_GENBITS)
+
+struct cphy;
+struct adapter;
+
+struct mdio_ops {
+ int (*read)(struct net_device *dev, int phy_addr, int mmd_addr,
+ u16 reg_addr);
+ int (*write)(struct net_device *dev, int phy_addr, int mmd_addr,
+ u16 reg_addr, u16 val);
+ unsigned mode_support;
+};
+
+struct adapter_info {
+ unsigned char nports0; /* # of ports on channel 0 */
+ unsigned char nports1; /* # of ports on channel 1 */
+ unsigned char phy_base_addr; /* MDIO PHY base address */
+ unsigned int gpio_out; /* GPIO output settings */
+ unsigned char gpio_intr[MAX_NPORTS]; /* GPIO PHY IRQ pins */
+ unsigned long caps; /* adapter capabilities */
+ const struct mdio_ops *mdio_ops; /* MDIO operations */
+ const char *desc; /* product description */
+};
+
+struct mc5_stats {
+ unsigned long parity_err;
+ unsigned long active_rgn_full;
+ unsigned long nfa_srch_err;
+ unsigned long unknown_cmd;
+ unsigned long reqq_parity_err;
+ unsigned long dispq_parity_err;
+ unsigned long del_act_empty;
+};
+
+struct mc7_stats {
+ unsigned long corr_err;
+ unsigned long uncorr_err;
+ unsigned long parity_err;
+ unsigned long addr_err;
+};
+
+struct mac_stats {
+ u64 tx_octets; /* total # of octets in good frames */
+ u64 tx_octets_bad; /* total # of octets in error frames */
+ u64 tx_frames; /* all good frames */
+ u64 tx_mcast_frames; /* good multicast frames */
+ u64 tx_bcast_frames; /* good broadcast frames */
+ u64 tx_pause; /* # of transmitted pause frames */
+ u64 tx_deferred; /* frames with deferred transmissions */
+ u64 tx_late_collisions; /* # of late collisions */
+ u64 tx_total_collisions; /* # of total collisions */
+ u64 tx_excess_collisions; /* frame errors from excessive collissions */
+ u64 tx_underrun; /* # of Tx FIFO underruns */
+ u64 tx_len_errs; /* # of Tx length errors */
+ u64 tx_mac_internal_errs; /* # of internal MAC errors on Tx */
+ u64 tx_excess_deferral; /* # of frames with excessive deferral */
+ u64 tx_fcs_errs; /* # of frames with bad FCS */
+
+ u64 tx_frames_64; /* # of Tx frames in a particular range */
+ u64 tx_frames_65_127;
+ u64 tx_frames_128_255;
+ u64 tx_frames_256_511;
+ u64 tx_frames_512_1023;
+ u64 tx_frames_1024_1518;
+ u64 tx_frames_1519_max;
+
+ u64 rx_octets; /* total # of octets in good frames */
+ u64 rx_octets_bad; /* total # of octets in error frames */
+ u64 rx_frames; /* all good frames */
+ u64 rx_mcast_frames; /* good multicast frames */
+ u64 rx_bcast_frames; /* good broadcast frames */
+ u64 rx_pause; /* # of received pause frames */
+ u64 rx_fcs_errs; /* # of received frames with bad FCS */
+ u64 rx_align_errs; /* alignment errors */
+ u64 rx_symbol_errs; /* symbol errors */
+ u64 rx_data_errs; /* data errors */
+ u64 rx_sequence_errs; /* sequence errors */
+ u64 rx_runt; /* # of runt frames */
+ u64 rx_jabber; /* # of jabber frames */
+ u64 rx_short; /* # of short frames */
+ u64 rx_too_long; /* # of oversized frames */
+ u64 rx_mac_internal_errs; /* # of internal MAC errors on Rx */
+
+ u64 rx_frames_64; /* # of Rx frames in a particular range */
+ u64 rx_frames_65_127;
+ u64 rx_frames_128_255;
+ u64 rx_frames_256_511;
+ u64 rx_frames_512_1023;
+ u64 rx_frames_1024_1518;
+ u64 rx_frames_1519_max;
+
+ u64 rx_cong_drops; /* # of Rx drops due to SGE congestion */
+
+ unsigned long tx_fifo_parity_err;
+ unsigned long rx_fifo_parity_err;
+ unsigned long tx_fifo_urun;
+ unsigned long rx_fifo_ovfl;
+ unsigned long serdes_signal_loss;
+ unsigned long xaui_pcs_ctc_err;
+ unsigned long xaui_pcs_align_change;
+
+ unsigned long num_toggled; /* # times toggled TxEn due to stuck TX */
+ unsigned long num_resets; /* # times reset due to stuck TX */
+
+ unsigned long link_faults; /* # detected link faults */
+};
+
+struct tp_mib_stats {
+ u32 ipInReceive_hi;
+ u32 ipInReceive_lo;
+ u32 ipInHdrErrors_hi;
+ u32 ipInHdrErrors_lo;
+ u32 ipInAddrErrors_hi;
+ u32 ipInAddrErrors_lo;
+ u32 ipInUnknownProtos_hi;
+ u32 ipInUnknownProtos_lo;
+ u32 ipInDiscards_hi;
+ u32 ipInDiscards_lo;
+ u32 ipInDelivers_hi;
+ u32 ipInDelivers_lo;
+ u32 ipOutRequests_hi;
+ u32 ipOutRequests_lo;
+ u32 ipOutDiscards_hi;
+ u32 ipOutDiscards_lo;
+ u32 ipOutNoRoutes_hi;
+ u32 ipOutNoRoutes_lo;
+ u32 ipReasmTimeout;
+ u32 ipReasmReqds;
+ u32 ipReasmOKs;
+ u32 ipReasmFails;
+
+ u32 reserved[8];
+
+ u32 tcpActiveOpens;
+ u32 tcpPassiveOpens;
+ u32 tcpAttemptFails;
+ u32 tcpEstabResets;
+ u32 tcpOutRsts;
+ u32 tcpCurrEstab;
+ u32 tcpInSegs_hi;
+ u32 tcpInSegs_lo;
+ u32 tcpOutSegs_hi;
+ u32 tcpOutSegs_lo;
+ u32 tcpRetransSeg_hi;
+ u32 tcpRetransSeg_lo;
+ u32 tcpInErrs_hi;
+ u32 tcpInErrs_lo;
+ u32 tcpRtoMin;
+ u32 tcpRtoMax;
+};
+
+struct tp_params {
+ unsigned int nchan; /* # of channels */
+ unsigned int pmrx_size; /* total PMRX capacity */
+ unsigned int pmtx_size; /* total PMTX capacity */
+ unsigned int cm_size; /* total CM capacity */
+ unsigned int chan_rx_size; /* per channel Rx size */
+ unsigned int chan_tx_size; /* per channel Tx size */
+ unsigned int rx_pg_size; /* Rx page size */
+ unsigned int tx_pg_size; /* Tx page size */
+ unsigned int rx_num_pgs; /* # of Rx pages */
+ unsigned int tx_num_pgs; /* # of Tx pages */
+ unsigned int ntimer_qs; /* # of timer queues */
+};
+
+struct qset_params { /* SGE queue set parameters */
+ unsigned int polling; /* polling/interrupt service for rspq */
+ unsigned int coalesce_usecs; /* irq coalescing timer */
+ unsigned int rspq_size; /* # of entries in response queue */
+ unsigned int fl_size; /* # of entries in regular free list */
+ unsigned int jumbo_size; /* # of entries in jumbo free list */
+ unsigned int txq_size[SGE_TXQ_PER_SET]; /* Tx queue sizes */
+ unsigned int cong_thres; /* FL congestion threshold */
+ unsigned int vector; /* Interrupt (line or vector) number */
+};
+
+struct sge_params {
+ unsigned int max_pkt_size; /* max offload pkt size */
+ struct qset_params qset[SGE_QSETS];
+};
+
+struct mc5_params {
+ unsigned int mode; /* selects MC5 width */
+ unsigned int nservers; /* size of server region */
+ unsigned int nfilters; /* size of filter region */
+ unsigned int nroutes; /* size of routing region */
+};
+
+/* Default MC5 region sizes */
+enum {
+ DEFAULT_NSERVERS = 512,
+ DEFAULT_NFILTERS = 128
+};
+
+/* MC5 modes, these must be non-0 */
+enum {
+ MC5_MODE_144_BIT = 1,
+ MC5_MODE_72_BIT = 2
+};
+
+/* MC5 min active region size */
+enum { MC5_MIN_TIDS = 16 };
+
+struct vpd_params {
+ unsigned int cclk;
+ unsigned int mclk;
+ unsigned int uclk;
+ unsigned int mdc;
+ unsigned int mem_timing;
+ u8 sn[SERNUM_LEN + 1];
+ u8 eth_base[6];
+ u8 port_type[MAX_NPORTS];
+ unsigned short xauicfg[2];
+};
+
+struct pci_params {
+ unsigned int vpd_cap_addr;
+ unsigned short speed;
+ unsigned char width;
+ unsigned char variant;
+};
+
+enum {
+ PCI_VARIANT_PCI,
+ PCI_VARIANT_PCIX_MODE1_PARITY,
+ PCI_VARIANT_PCIX_MODE1_ECC,
+ PCI_VARIANT_PCIX_266_MODE2,
+ PCI_VARIANT_PCIE
+};
+
+struct adapter_params {
+ struct sge_params sge;
+ struct mc5_params mc5;
+ struct tp_params tp;
+ struct vpd_params vpd;
+ struct pci_params pci;
+
+ const struct adapter_info *info;
+
+ unsigned short mtus[NMTUS];
+ unsigned short a_wnd[NCCTRL_WIN];
+ unsigned short b_wnd[NCCTRL_WIN];
+
+ unsigned int nports; /* # of ethernet ports */
+ unsigned int chan_map; /* bitmap of in-use Tx channels */
+ unsigned int stats_update_period; /* MAC stats accumulation period */
+ unsigned int linkpoll_period; /* link poll period in 0.1s */
+ unsigned int rev; /* chip revision */
+ unsigned int offload;
+};
+
+enum { /* chip revisions */
+ T3_REV_A = 0,
+ T3_REV_B = 2,
+ T3_REV_B2 = 3,
+ T3_REV_C = 4,
+};
+
+struct trace_params {
+ u32 sip;
+ u32 sip_mask;
+ u32 dip;
+ u32 dip_mask;
+ u16 sport;
+ u16 sport_mask;
+ u16 dport;
+ u16 dport_mask;
+ u32 vlan:12;
+ u32 vlan_mask:12;
+ u32 intf:4;
+ u32 intf_mask:4;
+ u8 proto;
+ u8 proto_mask;
+};
+
+struct link_config {
+ unsigned int supported; /* link capabilities */
+ unsigned int advertising; /* advertised capabilities */
+ unsigned short requested_speed; /* speed user has requested */
+ unsigned short speed; /* actual link speed */
+ unsigned char requested_duplex; /* duplex user has requested */
+ unsigned char duplex; /* actual link duplex */
+ unsigned char requested_fc; /* flow control user has requested */
+ unsigned char fc; /* actual link flow control */
+ unsigned char autoneg; /* autonegotiating? */
+ unsigned int link_ok; /* link up? */
+};
+
+#define SPEED_INVALID 0xffff
+#define DUPLEX_INVALID 0xff
+
+struct mc5 {
+ struct adapter *adapter;
+ unsigned int tcam_size;
+ unsigned char part_type;
+ unsigned char parity_enabled;
+ unsigned char mode;
+ struct mc5_stats stats;
+};
+
+static inline unsigned int t3_mc5_size(const struct mc5 *p)
+{
+ return p->tcam_size;
+}
+
+struct mc7 {
+ struct adapter *adapter; /* backpointer to adapter */
+ unsigned int size; /* memory size in bytes */
+ unsigned int width; /* MC7 interface width */
+ unsigned int offset; /* register address offset for MC7 instance */
+ const char *name; /* name of MC7 instance */
+ struct mc7_stats stats; /* MC7 statistics */
+};
+
+static inline unsigned int t3_mc7_size(const struct mc7 *p)
+{
+ return p->size;
+}
+
+struct cmac {
+ struct adapter *adapter;
+ unsigned int offset;
+ unsigned int nucast; /* # of address filters for unicast MACs */
+ unsigned int tx_tcnt;
+ unsigned int tx_xcnt;
+ u64 tx_mcnt;
+ unsigned int rx_xcnt;
+ unsigned int rx_ocnt;
+ u64 rx_mcnt;
+ unsigned int toggle_cnt;
+ unsigned int txen;
+ u64 rx_pause;
+ struct mac_stats stats;
+};
+
+enum {
+ MAC_DIRECTION_RX = 1,
+ MAC_DIRECTION_TX = 2,
+ MAC_RXFIFO_SIZE = 32768
+};
+
+/* PHY loopback direction */
+enum {
+ PHY_LOOPBACK_TX = 1,
+ PHY_LOOPBACK_RX = 2
+};
+
+/* PHY interrupt types */
+enum {
+ cphy_cause_link_change = 1,
+ cphy_cause_fifo_error = 2,
+ cphy_cause_module_change = 4,
+};
+
+/* PHY module types */
+enum {
+ phy_modtype_none,
+ phy_modtype_sr,
+ phy_modtype_lr,
+ phy_modtype_lrm,
+ phy_modtype_twinax,
+ phy_modtype_twinax_long,
+ phy_modtype_unknown
+};
+
+/* PHY operations */
+struct cphy_ops {
+ int (*reset)(struct cphy *phy, int wait);
+
+ int (*intr_enable)(struct cphy *phy);
+ int (*intr_disable)(struct cphy *phy);
+ int (*intr_clear)(struct cphy *phy);
+ int (*intr_handler)(struct cphy *phy);
+
+ int (*autoneg_enable)(struct cphy *phy);
+ int (*autoneg_restart)(struct cphy *phy);
+
+ int (*advertise)(struct cphy *phy, unsigned int advertise_map);
+ int (*set_loopback)(struct cphy *phy, int mmd, int dir, int enable);
+ int (*set_speed_duplex)(struct cphy *phy, int speed, int duplex);
+ int (*get_link_status)(struct cphy *phy, int *link_ok, int *speed,
+ int *duplex, int *fc);
+ int (*power_down)(struct cphy *phy, int enable);
+
+ u32 mmds;
+};
+enum {
+ EDC_OPT_AEL2005 = 0,
+ EDC_OPT_AEL2005_SIZE = 1084,
+ EDC_TWX_AEL2005 = 1,
+ EDC_TWX_AEL2005_SIZE = 1464,
+ EDC_TWX_AEL2020 = 2,
+ EDC_TWX_AEL2020_SIZE = 1628,
+ EDC_MAX_SIZE = EDC_TWX_AEL2020_SIZE, /* Max cache size */
+};
+
+/* A PHY instance */
+struct cphy {
+ u8 modtype; /* PHY module type */
+ short priv; /* scratch pad */
+ unsigned int caps; /* PHY capabilities */
+ struct adapter *adapter; /* associated adapter */
+ const char *desc; /* PHY description */
+ unsigned long fifo_errors; /* FIFO over/under-flows */
+ const struct cphy_ops *ops; /* PHY operations */
+ struct mdio_if_info mdio;
+ u16 phy_cache[EDC_MAX_SIZE]; /* EDC cache */
+};
+
+/* Convenience MDIO read/write wrappers */
+static inline int t3_mdio_read(struct cphy *phy, int mmd, int reg,
+ unsigned int *valp)
+{
+ int rc = phy->mdio.mdio_read(phy->mdio.dev, phy->mdio.prtad, mmd, reg);
+ *valp = (rc >= 0) ? rc : -1;
+ return (rc >= 0) ? 0 : rc;
+}
+
+static inline int t3_mdio_write(struct cphy *phy, int mmd, int reg,
+ unsigned int val)
+{
+ return phy->mdio.mdio_write(phy->mdio.dev, phy->mdio.prtad, mmd,
+ reg, val);
+}
+
+/* Convenience initializer */
+static inline void cphy_init(struct cphy *phy, struct adapter *adapter,
+ int phy_addr, const struct cphy_ops *phy_ops,
+ const struct mdio_ops *mdio_ops,
+ unsigned int caps, const char *desc)
+{
+ phy->caps = caps;
+ phy->adapter = adapter;
+ phy->desc = desc;
+ phy->ops = phy_ops;
+ if (mdio_ops) {
+ phy->mdio.prtad = phy_addr;
+ phy->mdio.mmds = phy_ops->mmds;
+ phy->mdio.mode_support = mdio_ops->mode_support;
+ phy->mdio.mdio_read = mdio_ops->read;
+ phy->mdio.mdio_write = mdio_ops->write;
+ }
+}
+
+/* Accumulate MAC statistics every 180 seconds. For 1G we multiply by 10. */
+#define MAC_STATS_ACCUM_SECS 180
+
+#define XGM_REG(reg_addr, idx) \
+ ((reg_addr) + (idx) * (XGMAC0_1_BASE_ADDR - XGMAC0_0_BASE_ADDR))
+
+struct addr_val_pair {
+ unsigned int reg_addr;
+ unsigned int val;
+};
+
+#include "adapter.h"
+
+#ifndef PCI_VENDOR_ID_CHELSIO
+# define PCI_VENDOR_ID_CHELSIO 0x1425
+#endif
+
+#define for_each_port(adapter, iter) \
+ for (iter = 0; iter < (adapter)->params.nports; ++iter)
+
+#define adapter_info(adap) ((adap)->params.info)
+
+static inline int uses_xaui(const struct adapter *adap)
+{
+ return adapter_info(adap)->caps & SUPPORTED_AUI;
+}
+
+static inline int is_10G(const struct adapter *adap)
+{
+ return adapter_info(adap)->caps & SUPPORTED_10000baseT_Full;
+}
+
+static inline int is_offload(const struct adapter *adap)
+{
+ return adap->params.offload;
+}
+
+static inline unsigned int core_ticks_per_usec(const struct adapter *adap)
+{
+ return adap->params.vpd.cclk / 1000;
+}
+
+static inline unsigned int is_pcie(const struct adapter *adap)
+{
+ return adap->params.pci.variant == PCI_VARIANT_PCIE;
+}
+
+void t3_set_reg_field(struct adapter *adap, unsigned int addr, u32 mask,
+ u32 val);
+void t3_write_regs(struct adapter *adapter, const struct addr_val_pair *p,
+ int n, unsigned int offset);
+int t3_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
+ int polarity, int attempts, int delay, u32 *valp);
+static inline int t3_wait_op_done(struct adapter *adapter, int reg, u32 mask,
+ int polarity, int attempts, int delay)
+{
+ return t3_wait_op_done_val(adapter, reg, mask, polarity, attempts,
+ delay, NULL);
+}
+int t3_mdio_change_bits(struct cphy *phy, int mmd, int reg, unsigned int clear,
+ unsigned int set);
+int t3_phy_reset(struct cphy *phy, int mmd, int wait);
+int t3_phy_advertise(struct cphy *phy, unsigned int advert);
+int t3_phy_advertise_fiber(struct cphy *phy, unsigned int advert);
+int t3_set_phy_speed_duplex(struct cphy *phy, int speed, int duplex);
+int t3_phy_lasi_intr_enable(struct cphy *phy);
+int t3_phy_lasi_intr_disable(struct cphy *phy);
+int t3_phy_lasi_intr_clear(struct cphy *phy);
+int t3_phy_lasi_intr_handler(struct cphy *phy);
+
+void t3_intr_enable(struct adapter *adapter);
+void t3_intr_disable(struct adapter *adapter);
+void t3_intr_clear(struct adapter *adapter);
+void t3_xgm_intr_enable(struct adapter *adapter, int idx);
+void t3_xgm_intr_disable(struct adapter *adapter, int idx);
+void t3_port_intr_enable(struct adapter *adapter, int idx);
+void t3_port_intr_disable(struct adapter *adapter, int idx);
+int t3_slow_intr_handler(struct adapter *adapter);
+int t3_phy_intr_handler(struct adapter *adapter);
+
+void t3_link_changed(struct adapter *adapter, int port_id);
+void t3_link_fault(struct adapter *adapter, int port_id);
+int t3_link_start(struct cphy *phy, struct cmac *mac, struct link_config *lc);
+const struct adapter_info *t3_get_adapter_info(unsigned int board_id);
+int t3_seeprom_wp(struct adapter *adapter, int enable);
+int t3_get_tp_version(struct adapter *adapter, u32 *vers);
+int t3_check_tpsram_version(struct adapter *adapter);
+int t3_check_tpsram(struct adapter *adapter, const u8 *tp_ram,
+ unsigned int size);
+int t3_set_proto_sram(struct adapter *adap, const u8 *data);
+int t3_load_fw(struct adapter *adapter, const u8 * fw_data, unsigned int size);
+int t3_get_fw_version(struct adapter *adapter, u32 *vers);
+int t3_check_fw_version(struct adapter *adapter);
+int t3_init_hw(struct adapter *adapter, u32 fw_params);
+int t3_reset_adapter(struct adapter *adapter);
+int t3_prep_adapter(struct adapter *adapter, const struct adapter_info *ai,
+ int reset);
+int t3_replay_prep_adapter(struct adapter *adapter);
+void t3_led_ready(struct adapter *adapter);
+void t3_fatal_err(struct adapter *adapter);
+void t3_set_vlan_accel(struct adapter *adapter, unsigned int ports, int on);
+void t3_config_rss(struct adapter *adapter, unsigned int rss_config,
+ const u8 * cpus, const u16 *rspq);
+int t3_cim_ctl_blk_read(struct adapter *adap, unsigned int addr,
+ unsigned int n, unsigned int *valp);
+int t3_mc7_bd_read(struct mc7 *mc7, unsigned int start, unsigned int n,
+ u64 *buf);
+
+int t3_mac_reset(struct cmac *mac);
+void t3b_pcs_reset(struct cmac *mac);
+void t3_mac_disable_exact_filters(struct cmac *mac);
+void t3_mac_enable_exact_filters(struct cmac *mac);
+int t3_mac_enable(struct cmac *mac, int which);
+int t3_mac_disable(struct cmac *mac, int which);
+int t3_mac_set_mtu(struct cmac *mac, unsigned int mtu);
+int t3_mac_set_rx_mode(struct cmac *mac, struct net_device *dev);
+int t3_mac_set_address(struct cmac *mac, unsigned int idx, const u8 addr[6]);
+int t3_mac_set_num_ucast(struct cmac *mac, int n);
+const struct mac_stats *t3_mac_update_stats(struct cmac *mac);
+int t3_mac_set_speed_duplex_fc(struct cmac *mac, int speed, int duplex, int fc);
+int t3b2_mac_watchdog_task(struct cmac *mac);
+
+void t3_mc5_prep(struct adapter *adapter, struct mc5 *mc5, int mode);
+int t3_mc5_init(struct mc5 *mc5, unsigned int nservers, unsigned int nfilters,
+ unsigned int nroutes);
+void t3_mc5_intr_handler(struct mc5 *mc5);
+
+void t3_tp_set_offload_mode(struct adapter *adap, int enable);
+void t3_tp_get_mib_stats(struct adapter *adap, struct tp_mib_stats *tps);
+void t3_load_mtus(struct adapter *adap, unsigned short mtus[NMTUS],
+ unsigned short alpha[NCCTRL_WIN],
+ unsigned short beta[NCCTRL_WIN], unsigned short mtu_cap);
+void t3_config_trace_filter(struct adapter *adapter,
+ const struct trace_params *tp, int filter_index,
+ int invert, int enable);
+int t3_config_sched(struct adapter *adap, unsigned int kbps, int sched);
+
+void t3_sge_prep(struct adapter *adap, struct sge_params *p);
+void t3_sge_init(struct adapter *adap, struct sge_params *p);
+int t3_sge_init_ecntxt(struct adapter *adapter, unsigned int id, int gts_enable,
+ enum sge_context_type type, int respq, u64 base_addr,
+ unsigned int size, unsigned int token, int gen,
+ unsigned int cidx);
+int t3_sge_init_flcntxt(struct adapter *adapter, unsigned int id,
+ int gts_enable, u64 base_addr, unsigned int size,
+ unsigned int esize, unsigned int cong_thres, int gen,
+ unsigned int cidx);
+int t3_sge_init_rspcntxt(struct adapter *adapter, unsigned int id,
+ int irq_vec_idx, u64 base_addr, unsigned int size,
+ unsigned int fl_thres, int gen, unsigned int cidx);
+int t3_sge_init_cqcntxt(struct adapter *adapter, unsigned int id, u64 base_addr,
+ unsigned int size, int rspq, int ovfl_mode,
+ unsigned int credits, unsigned int credit_thres);
+int t3_sge_enable_ecntxt(struct adapter *adapter, unsigned int id, int enable);
+int t3_sge_disable_fl(struct adapter *adapter, unsigned int id);
+int t3_sge_disable_rspcntxt(struct adapter *adapter, unsigned int id);
+int t3_sge_disable_cqcntxt(struct adapter *adapter, unsigned int id);
+int t3_sge_cqcntxt_op(struct adapter *adapter, unsigned int id, unsigned int op,
+ unsigned int credits);
+
+int t3_vsc8211_phy_prep(struct cphy *phy, struct adapter *adapter,
+ int phy_addr, const struct mdio_ops *mdio_ops);
+int t3_ael1002_phy_prep(struct cphy *phy, struct adapter *adapter,
+ int phy_addr, const struct mdio_ops *mdio_ops);
+int t3_ael1006_phy_prep(struct cphy *phy, struct adapter *adapter,
+ int phy_addr, const struct mdio_ops *mdio_ops);
+int t3_ael2005_phy_prep(struct cphy *phy, struct adapter *adapter,
+ int phy_addr, const struct mdio_ops *mdio_ops);
+int t3_ael2020_phy_prep(struct cphy *phy, struct adapter *adapter,
+ int phy_addr, const struct mdio_ops *mdio_ops);
+int t3_qt2045_phy_prep(struct cphy *phy, struct adapter *adapter, int phy_addr,
+ const struct mdio_ops *mdio_ops);
+int t3_xaui_direct_phy_prep(struct cphy *phy, struct adapter *adapter,
+ int phy_addr, const struct mdio_ops *mdio_ops);
+int t3_aq100x_phy_prep(struct cphy *phy, struct adapter *adapter,
+ int phy_addr, const struct mdio_ops *mdio_ops);
+
+extern struct workqueue_struct *cxgb3_wq;
+#endif /* __CHELSIO_COMMON_H */
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_ctl_defs.h b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_ctl_defs.h
new file mode 100644
index 0000000000..369fe711fd
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_ctl_defs.h
@@ -0,0 +1,189 @@
+/*
+ * Copyright (c) 2003-2008 Chelsio, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef _CXGB3_OFFLOAD_CTL_DEFS_H
+#define _CXGB3_OFFLOAD_CTL_DEFS_H
+
+enum {
+ GET_MAX_OUTSTANDING_WR = 0,
+ GET_TX_MAX_CHUNK = 1,
+ GET_TID_RANGE = 2,
+ GET_STID_RANGE = 3,
+ GET_RTBL_RANGE = 4,
+ GET_L2T_CAPACITY = 5,
+ GET_MTUS = 6,
+ GET_WR_LEN = 7,
+ GET_IFF_FROM_MAC = 8,
+ GET_DDP_PARAMS = 9,
+ GET_PORTS = 10,
+
+ ULP_ISCSI_GET_PARAMS = 11,
+ ULP_ISCSI_SET_PARAMS = 12,
+
+ RDMA_GET_PARAMS = 13,
+ RDMA_CQ_OP = 14,
+ RDMA_CQ_SETUP = 15,
+ RDMA_CQ_DISABLE = 16,
+ RDMA_CTRL_QP_SETUP = 17,
+ RDMA_GET_MEM = 18,
+ RDMA_GET_MIB = 19,
+
+ GET_RX_PAGE_INFO = 50,
+ GET_ISCSI_IPV4ADDR = 51,
+
+ GET_EMBEDDED_INFO = 70,
+};
+
+/*
+ * Structure used to describe a TID range. Valid TIDs are [base, base+num).
+ */
+struct tid_range {
+ unsigned int base; /* first TID */
+ unsigned int num; /* number of TIDs in range */
+};
+
+/*
+ * Structure used to request the size and contents of the MTU table.
+ */
+struct mtutab {
+ unsigned int size; /* # of entries in the MTU table */
+ const unsigned short *mtus; /* the MTU table values */
+};
+
+struct net_device;
+
+/*
+ * Structure used to request the adapter net_device owning a given MAC address.
+ */
+struct iff_mac {
+ struct net_device *dev; /* the net_device */
+ const unsigned char *mac_addr; /* MAC address to lookup */
+ u16 vlan_tag;
+};
+
+/* Structure used to request a port's iSCSI IPv4 address */
+struct iscsi_ipv4addr {
+ struct net_device *dev; /* the net_device */
+ __be32 ipv4addr; /* the return iSCSI IPv4 address */
+};
+
+struct pci_dev;
+
+/*
+ * Structure used to request the TCP DDP parameters.
+ */
+struct ddp_params {
+ unsigned int llimit; /* TDDP region start address */
+ unsigned int ulimit; /* TDDP region end address */
+ unsigned int tag_mask; /* TDDP tag mask */
+ struct pci_dev *pdev;
+};
+
+struct adap_ports {
+ unsigned int nports; /* number of ports on this adapter */
+ struct net_device *lldevs[2];
+};
+
+/*
+ * Structure used to return information to the iscsi layer.
+ */
+struct ulp_iscsi_info {
+ unsigned int offset;
+ unsigned int llimit;
+ unsigned int ulimit;
+ unsigned int tagmask;
+ u8 pgsz_factor[4];
+ unsigned int max_rxsz;
+ unsigned int max_txsz;
+ struct pci_dev *pdev;
+};
+
+/*
+ * Structure used to return information to the RDMA layer.
+ */
+struct rdma_info {
+ unsigned int tpt_base; /* TPT base address */
+ unsigned int tpt_top; /* TPT last entry address */
+ unsigned int pbl_base; /* PBL base address */
+ unsigned int pbl_top; /* PBL last entry address */
+ unsigned int rqt_base; /* RQT base address */
+ unsigned int rqt_top; /* RQT last entry address */
+ unsigned int udbell_len; /* user doorbell region length */
+ unsigned long udbell_physbase; /* user doorbell physical start addr */
+ void __iomem *kdb_addr; /* kernel doorbell register address */
+ struct pci_dev *pdev; /* associated PCI device */
+};
+
+/*
+ * Structure used to request an operation on an RDMA completion queue.
+ */
+struct rdma_cq_op {
+ unsigned int id;
+ unsigned int op;
+ unsigned int credits;
+};
+
+/*
+ * Structure used to setup RDMA completion queues.
+ */
+struct rdma_cq_setup {
+ unsigned int id;
+ unsigned long long base_addr;
+ unsigned int size;
+ unsigned int credits;
+ unsigned int credit_thres;
+ unsigned int ovfl_mode;
+};
+
+/*
+ * Structure used to setup the RDMA control egress context.
+ */
+struct rdma_ctrlqp_setup {
+ unsigned long long base_addr;
+ unsigned int size;
+};
+
+/*
+ * Offload TX/RX page information.
+ */
+struct ofld_page_info {
+ unsigned int page_size; /* Page size, should be a power of 2 */
+ unsigned int num; /* Number of pages */
+};
+
+/*
+ * Structure used to get firmware and protocol engine versions.
+ */
+struct ch_embedded_info {
+ u32 fw_vers;
+ u32 tp_vers;
+};
+#endif /* _CXGB3_OFFLOAD_CTL_DEFS_H */
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_defs.h b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_defs.h
new file mode 100644
index 0000000000..f04e81f337
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_defs.h
@@ -0,0 +1,111 @@
+/*
+ * Copyright (c) 2006-2008 Chelsio, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef _CHELSIO_DEFS_H
+#define _CHELSIO_DEFS_H
+
+#include <linux/skbuff.h>
+#include <net/tcp.h>
+
+#include "t3cdev.h"
+
+#include "cxgb3_offload.h"
+
+#define VALIDATE_TID 1
+
+/*
+ * Map an ATID or STID to their entries in the corresponding TID tables.
+ */
+static inline union active_open_entry *atid2entry(const struct tid_info *t,
+ unsigned int atid)
+{
+ return &t->atid_tab[atid - t->atid_base];
+}
+
+static inline union listen_entry *stid2entry(const struct tid_info *t,
+ unsigned int stid)
+{
+ return &t->stid_tab[stid - t->stid_base];
+}
+
+/*
+ * Find the connection corresponding to a TID.
+ */
+static inline struct t3c_tid_entry *lookup_tid(const struct tid_info *t,
+ unsigned int tid)
+{
+ struct t3c_tid_entry *t3c_tid = tid < t->ntids ?
+ &(t->tid_tab[tid]) : NULL;
+
+ return (t3c_tid && t3c_tid->client) ? t3c_tid : NULL;
+}
+
+/*
+ * Find the connection corresponding to a server TID.
+ */
+static inline struct t3c_tid_entry *lookup_stid(const struct tid_info *t,
+ unsigned int tid)
+{
+ union listen_entry *e;
+
+ if (tid < t->stid_base || tid >= t->stid_base + t->nstids)
+ return NULL;
+
+ e = stid2entry(t, tid);
+ if ((void *)e->next >= (void *)t->tid_tab &&
+ (void *)e->next < (void *)&t->atid_tab[t->natids])
+ return NULL;
+
+ return &e->t3c_tid;
+}
+
+/*
+ * Find the connection corresponding to an active-open TID.
+ */
+static inline struct t3c_tid_entry *lookup_atid(const struct tid_info *t,
+ unsigned int tid)
+{
+ union active_open_entry *e;
+
+ if (tid < t->atid_base || tid >= t->atid_base + t->natids)
+ return NULL;
+
+ e = atid2entry(t, tid);
+ if ((void *)e->next >= (void *)t->tid_tab &&
+ (void *)e->next < (void *)&t->atid_tab[t->natids])
+ return NULL;
+
+ return &e->t3c_tid;
+}
+
+int attach_t3cdev(struct t3cdev *dev);
+void detach_t3cdev(struct t3cdev *dev);
+#endif
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_ioctl.h b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_ioctl.h
new file mode 100644
index 0000000000..401827b82a
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_ioctl.h
@@ -0,0 +1,177 @@
+/*
+ * Copyright (c) 2003-2008 Chelsio, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __CHIOCTL_H__
+#define __CHIOCTL_H__
+
+/*
+ * Ioctl commands specific to this driver.
+ */
+enum {
+ CHELSIO_GETMTUTAB = 1029,
+ CHELSIO_SETMTUTAB = 1030,
+ CHELSIO_SET_PM = 1032,
+ CHELSIO_GET_PM = 1033,
+ CHELSIO_GET_MEM = 1038,
+ CHELSIO_LOAD_FW = 1041,
+ CHELSIO_SET_TRACE_FILTER = 1044,
+ CHELSIO_SET_QSET_PARAMS = 1045,
+ CHELSIO_GET_QSET_PARAMS = 1046,
+ CHELSIO_SET_QSET_NUM = 1047,
+ CHELSIO_GET_QSET_NUM = 1048,
+};
+
+struct ch_reg {
+ uint32_t cmd;
+ uint32_t addr;
+ uint32_t val;
+};
+
+struct ch_cntxt {
+ uint32_t cmd;
+ uint32_t cntxt_type;
+ uint32_t cntxt_id;
+ uint32_t data[4];
+};
+
+/* context types */
+enum { CNTXT_TYPE_EGRESS, CNTXT_TYPE_FL, CNTXT_TYPE_RSP, CNTXT_TYPE_CQ };
+
+struct ch_desc {
+ uint32_t cmd;
+ uint32_t queue_num;
+ uint32_t idx;
+ uint32_t size;
+ uint8_t data[128];
+};
+
+struct ch_mem_range {
+ uint32_t cmd;
+ uint32_t mem_id;
+ uint32_t addr;
+ uint32_t len;
+ uint32_t version;
+ uint8_t buf[];
+};
+
+struct ch_qset_params {
+ uint32_t cmd;
+ uint32_t qset_idx;
+ int32_t txq_size[3];
+ int32_t rspq_size;
+ int32_t fl_size[2];
+ int32_t intr_lat;
+ int32_t polling;
+ int32_t lro;
+ int32_t cong_thres;
+ int32_t vector;
+ int32_t qnum;
+};
+
+struct ch_pktsched_params {
+ uint32_t cmd;
+ uint8_t sched;
+ uint8_t idx;
+ uint8_t min;
+ uint8_t max;
+ uint8_t binding;
+};
+
+#ifndef TCB_SIZE
+# define TCB_SIZE 128
+#endif
+
+/* TCB size in 32-bit words */
+#define TCB_WORDS (TCB_SIZE / 4)
+
+enum { MEM_CM, MEM_PMRX, MEM_PMTX }; /* ch_mem_range.mem_id values */
+
+struct ch_mtus {
+ uint32_t cmd;
+ uint32_t nmtus;
+ uint16_t mtus[NMTUS];
+};
+
+struct ch_pm {
+ uint32_t cmd;
+ uint32_t tx_pg_sz;
+ uint32_t tx_num_pg;
+ uint32_t rx_pg_sz;
+ uint32_t rx_num_pg;
+ uint32_t pm_total;
+};
+
+struct ch_tcam {
+ uint32_t cmd;
+ uint32_t tcam_size;
+ uint32_t nservers;
+ uint32_t nroutes;
+ uint32_t nfilters;
+};
+
+struct ch_tcb {
+ uint32_t cmd;
+ uint32_t tcb_index;
+ uint32_t tcb_data[TCB_WORDS];
+};
+
+struct ch_tcam_word {
+ uint32_t cmd;
+ uint32_t addr;
+ uint32_t buf[3];
+};
+
+struct ch_trace {
+ uint32_t cmd;
+ uint32_t sip;
+ uint32_t sip_mask;
+ uint32_t dip;
+ uint32_t dip_mask;
+ uint16_t sport;
+ uint16_t sport_mask;
+ uint16_t dport;
+ uint16_t dport_mask;
+ uint32_t vlan:12;
+ uint32_t vlan_mask:12;
+ uint32_t intf:4;
+ uint32_t intf_mask:4;
+ uint8_t proto;
+ uint8_t proto_mask;
+ uint8_t invert_match:1;
+ uint8_t config_tx:1;
+ uint8_t config_rx:1;
+ uint8_t trace_tx:1;
+ uint8_t trace_rx:1;
+};
+
+#define SIOCCHIOCTL SIOCDEVPRIVATE
+
+#endif
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
new file mode 100644
index 0000000000..d117022d15
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
@@ -0,0 +1,3472 @@
+/*
+ * Copyright (c) 2003-2008 Chelsio, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/dma-mapping.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/if_vlan.h>
+#include <linux/mdio.h>
+#include <linux/sockios.h>
+#include <linux/workqueue.h>
+#include <linux/proc_fs.h>
+#include <linux/rtnetlink.h>
+#include <linux/firmware.h>
+#include <linux/log2.h>
+#include <linux/stringify.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/nospec.h>
+
+#include "common.h"
+#include "cxgb3_ioctl.h"
+#include "regs.h"
+#include "cxgb3_offload.h"
+#include "version.h"
+
+#include "cxgb3_ctl_defs.h"
+#include "t3_cpl.h"
+#include "firmware_exports.h"
+
+enum {
+ MAX_TXQ_ENTRIES = 16384,
+ MAX_CTRL_TXQ_ENTRIES = 1024,
+ MAX_RSPQ_ENTRIES = 16384,
+ MAX_RX_BUFFERS = 16384,
+ MAX_RX_JUMBO_BUFFERS = 16384,
+ MIN_TXQ_ENTRIES = 4,
+ MIN_CTRL_TXQ_ENTRIES = 4,
+ MIN_RSPQ_ENTRIES = 32,
+ MIN_FL_ENTRIES = 32
+};
+
+#define PORT_MASK ((1 << MAX_NPORTS) - 1)
+
+#define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
+ NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
+ NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
+
+#define EEPROM_MAGIC 0x38E2F10C
+
+#define CH_DEVICE(devid, idx) \
+ { PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, PCI_ANY_ID, 0, 0, idx }
+
+static const struct pci_device_id cxgb3_pci_tbl[] = {
+ CH_DEVICE(0x20, 0), /* PE9000 */
+ CH_DEVICE(0x21, 1), /* T302E */
+ CH_DEVICE(0x22, 2), /* T310E */
+ CH_DEVICE(0x23, 3), /* T320X */
+ CH_DEVICE(0x24, 1), /* T302X */
+ CH_DEVICE(0x25, 3), /* T320E */
+ CH_DEVICE(0x26, 2), /* T310X */
+ CH_DEVICE(0x30, 2), /* T3B10 */
+ CH_DEVICE(0x31, 3), /* T3B20 */
+ CH_DEVICE(0x32, 1), /* T3B02 */
+ CH_DEVICE(0x35, 6), /* T3C20-derived T3C10 */
+ CH_DEVICE(0x36, 3), /* S320E-CR */
+ CH_DEVICE(0x37, 7), /* N320E-G2 */
+ {0,}
+};
+
+MODULE_DESCRIPTION(DRV_DESC);
+MODULE_AUTHOR("Chelsio Communications");
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DEVICE_TABLE(pci, cxgb3_pci_tbl);
+
+static int dflt_msg_enable = DFLT_MSG_ENABLE;
+
+module_param(dflt_msg_enable, int, 0644);
+MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T3 default message enable bitmap");
+
+/*
+ * The driver uses the best interrupt scheme available on a platform in the
+ * order MSI-X, MSI, legacy pin interrupts. This parameter determines which
+ * of these schemes the driver may consider as follows:
+ *
+ * msi = 2: choose from among all three options
+ * msi = 1: only consider MSI and pin interrupts
+ * msi = 0: force pin interrupts
+ */
+static int msi = 2;
+
+module_param(msi, int, 0644);
+MODULE_PARM_DESC(msi, "whether to use MSI or MSI-X");
+
+/*
+ * The driver enables offload as a default.
+ * To disable it, use ofld_disable = 1.
+ */
+
+static int ofld_disable = 0;
+
+module_param(ofld_disable, int, 0644);
+MODULE_PARM_DESC(ofld_disable, "whether to enable offload at init time or not");
+
+/*
+ * We have work elements that we need to cancel when an interface is taken
+ * down. Normally the work elements would be executed by keventd but that
+ * can deadlock because of linkwatch. If our close method takes the rtnl
+ * lock and linkwatch is ahead of our work elements in keventd, linkwatch
+ * will block keventd as it needs the rtnl lock, and we'll deadlock waiting
+ * for our work to complete. Get our own work queue to solve this.
+ */
+struct workqueue_struct *cxgb3_wq;
+
+/**
+ * link_report - show link status and link speed/duplex
+ * @dev: the port whose settings are to be reported
+ *
+ * Shows the link status, speed, and duplex of a port.
+ */
+static void link_report(struct net_device *dev)
+{
+ if (!netif_carrier_ok(dev))
+ netdev_info(dev, "link down\n");
+ else {
+ const char *s = "10Mbps";
+ const struct port_info *p = netdev_priv(dev);
+
+ switch (p->link_config.speed) {
+ case SPEED_10000:
+ s = "10Gbps";
+ break;
+ case SPEED_1000:
+ s = "1000Mbps";
+ break;
+ case SPEED_100:
+ s = "100Mbps";
+ break;
+ }
+
+ netdev_info(dev, "link up, %s, %s-duplex\n",
+ s, p->link_config.duplex == DUPLEX_FULL
+ ? "full" : "half");
+ }
+}
+
+static void enable_tx_fifo_drain(struct adapter *adapter,
+ struct port_info *pi)
+{
+ t3_set_reg_field(adapter, A_XGM_TXFIFO_CFG + pi->mac.offset, 0,
+ F_ENDROPPKT);
+ t3_write_reg(adapter, A_XGM_RX_CTRL + pi->mac.offset, 0);
+ t3_write_reg(adapter, A_XGM_TX_CTRL + pi->mac.offset, F_TXEN);
+ t3_write_reg(adapter, A_XGM_RX_CTRL + pi->mac.offset, F_RXEN);
+}
+
+static void disable_tx_fifo_drain(struct adapter *adapter,
+ struct port_info *pi)
+{
+ t3_set_reg_field(adapter, A_XGM_TXFIFO_CFG + pi->mac.offset,
+ F_ENDROPPKT, 0);
+}
+
+void t3_os_link_fault(struct adapter *adap, int port_id, int state)
+{
+ struct net_device *dev = adap->port[port_id];
+ struct port_info *pi = netdev_priv(dev);
+
+ if (state == netif_carrier_ok(dev))
+ return;
+
+ if (state) {
+ struct cmac *mac = &pi->mac;
+
+ netif_carrier_on(dev);
+
+ disable_tx_fifo_drain(adap, pi);
+
+ /* Clear local faults */
+ t3_xgm_intr_disable(adap, pi->port_id);
+ t3_read_reg(adap, A_XGM_INT_STATUS +
+ pi->mac.offset);
+ t3_write_reg(adap,
+ A_XGM_INT_CAUSE + pi->mac.offset,
+ F_XGM_INT);
+
+ t3_set_reg_field(adap,
+ A_XGM_INT_ENABLE +
+ pi->mac.offset,
+ F_XGM_INT, F_XGM_INT);
+ t3_xgm_intr_enable(adap, pi->port_id);
+
+ t3_mac_enable(mac, MAC_DIRECTION_TX);
+ } else {
+ netif_carrier_off(dev);
+
+ /* Flush TX FIFO */
+ enable_tx_fifo_drain(adap, pi);
+ }
+ link_report(dev);
+}
+
+/**
+ * t3_os_link_changed - handle link status changes
+ * @adapter: the adapter associated with the link change
+ * @port_id: the port index whose limk status has changed
+ * @link_stat: the new status of the link
+ * @speed: the new speed setting
+ * @duplex: the new duplex setting
+ * @pause: the new flow-control setting
+ *
+ * This is the OS-dependent handler for link status changes. The OS
+ * neutral handler takes care of most of the processing for these events,
+ * then calls this handler for any OS-specific processing.
+ */
+void t3_os_link_changed(struct adapter *adapter, int port_id, int link_stat,
+ int speed, int duplex, int pause)
+{
+ struct net_device *dev = adapter->port[port_id];
+ struct port_info *pi = netdev_priv(dev);
+ struct cmac *mac = &pi->mac;
+
+ /* Skip changes from disabled ports. */
+ if (!netif_running(dev))
+ return;
+
+ if (link_stat != netif_carrier_ok(dev)) {
+ if (link_stat) {
+ disable_tx_fifo_drain(adapter, pi);
+
+ t3_mac_enable(mac, MAC_DIRECTION_RX);
+
+ /* Clear local faults */
+ t3_xgm_intr_disable(adapter, pi->port_id);
+ t3_read_reg(adapter, A_XGM_INT_STATUS +
+ pi->mac.offset);
+ t3_write_reg(adapter,
+ A_XGM_INT_CAUSE + pi->mac.offset,
+ F_XGM_INT);
+
+ t3_set_reg_field(adapter,
+ A_XGM_INT_ENABLE + pi->mac.offset,
+ F_XGM_INT, F_XGM_INT);
+ t3_xgm_intr_enable(adapter, pi->port_id);
+
+ netif_carrier_on(dev);
+ } else {
+ netif_carrier_off(dev);
+
+ t3_xgm_intr_disable(adapter, pi->port_id);
+ t3_read_reg(adapter, A_XGM_INT_STATUS + pi->mac.offset);
+ t3_set_reg_field(adapter,
+ A_XGM_INT_ENABLE + pi->mac.offset,
+ F_XGM_INT, 0);
+
+ if (is_10G(adapter))
+ pi->phy.ops->power_down(&pi->phy, 1);
+
+ t3_read_reg(adapter, A_XGM_INT_STATUS + pi->mac.offset);
+ t3_mac_disable(mac, MAC_DIRECTION_RX);
+ t3_link_start(&pi->phy, mac, &pi->link_config);
+
+ /* Flush TX FIFO */
+ enable_tx_fifo_drain(adapter, pi);
+ }
+
+ link_report(dev);
+ }
+}
+
+/**
+ * t3_os_phymod_changed - handle PHY module changes
+ * @adap: the adapter associated with the link change
+ * @port_id: the port index whose limk status has changed
+ *
+ * This is the OS-dependent handler for PHY module changes. It is
+ * invoked when a PHY module is removed or inserted for any OS-specific
+ * processing.
+ */
+void t3_os_phymod_changed(struct adapter *adap, int port_id)
+{
+ static const char *mod_str[] = {
+ NULL, "SR", "LR", "LRM", "TWINAX", "TWINAX", "unknown"
+ };
+
+ const struct net_device *dev = adap->port[port_id];
+ const struct port_info *pi = netdev_priv(dev);
+
+ if (pi->phy.modtype == phy_modtype_none)
+ netdev_info(dev, "PHY module unplugged\n");
+ else
+ netdev_info(dev, "%s PHY module inserted\n",
+ mod_str[pi->phy.modtype]);
+}
+
+static void cxgb_set_rxmode(struct net_device *dev)
+{
+ struct port_info *pi = netdev_priv(dev);
+
+ t3_mac_set_rx_mode(&pi->mac, dev);
+}
+
+/**
+ * link_start - enable a port
+ * @dev: the device to enable
+ *
+ * Performs the MAC and PHY actions needed to enable a port.
+ */
+static void link_start(struct net_device *dev)
+{
+ struct port_info *pi = netdev_priv(dev);
+ struct cmac *mac = &pi->mac;
+
+ t3_mac_reset(mac);
+ t3_mac_set_num_ucast(mac, MAX_MAC_IDX);
+ t3_mac_set_mtu(mac, dev->mtu);
+ t3_mac_set_address(mac, LAN_MAC_IDX, dev->dev_addr);
+ t3_mac_set_address(mac, SAN_MAC_IDX, pi->iscsic.mac_addr);
+ t3_mac_set_rx_mode(mac, dev);
+ t3_link_start(&pi->phy, mac, &pi->link_config);
+ t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
+}
+
+static inline void cxgb_disable_msi(struct adapter *adapter)
+{
+ if (adapter->flags & USING_MSIX) {
+ pci_disable_msix(adapter->pdev);
+ adapter->flags &= ~USING_MSIX;
+ } else if (adapter->flags & USING_MSI) {
+ pci_disable_msi(adapter->pdev);
+ adapter->flags &= ~USING_MSI;
+ }
+}
+
+/*
+ * Interrupt handler for asynchronous events used with MSI-X.
+ */
+static irqreturn_t t3_async_intr_handler(int irq, void *cookie)
+{
+ t3_slow_intr_handler(cookie);
+ return IRQ_HANDLED;
+}
+
+/*
+ * Name the MSI-X interrupts.
+ */
+static void name_msix_vecs(struct adapter *adap)
+{
+ int i, j, msi_idx = 1, n = sizeof(adap->msix_info[0].desc) - 1;
+
+ snprintf(adap->msix_info[0].desc, n, "%s", adap->name);
+ adap->msix_info[0].desc[n] = 0;
+
+ for_each_port(adap, j) {
+ struct net_device *d = adap->port[j];
+ const struct port_info *pi = netdev_priv(d);
+
+ for (i = 0; i < pi->nqsets; i++, msi_idx++) {
+ snprintf(adap->msix_info[msi_idx].desc, n,
+ "%s-%d", d->name, pi->first_qset + i);
+ adap->msix_info[msi_idx].desc[n] = 0;
+ }
+ }
+}
+
+static int request_msix_data_irqs(struct adapter *adap)
+{
+ int i, j, err, qidx = 0;
+
+ for_each_port(adap, i) {
+ int nqsets = adap2pinfo(adap, i)->nqsets;
+
+ for (j = 0; j < nqsets; ++j) {
+ err = request_irq(adap->msix_info[qidx + 1].vec,
+ t3_intr_handler(adap,
+ adap->sge.qs[qidx].
+ rspq.polling), 0,
+ adap->msix_info[qidx + 1].desc,
+ &adap->sge.qs[qidx]);
+ if (err) {
+ while (--qidx >= 0)
+ free_irq(adap->msix_info[qidx + 1].vec,
+ &adap->sge.qs[qidx]);
+ return err;
+ }
+ qidx++;
+ }
+ }
+ return 0;
+}
+
+static void free_irq_resources(struct adapter *adapter)
+{
+ if (adapter->flags & USING_MSIX) {
+ int i, n = 0;
+
+ free_irq(adapter->msix_info[0].vec, adapter);
+ for_each_port(adapter, i)
+ n += adap2pinfo(adapter, i)->nqsets;
+
+ for (i = 0; i < n; ++i)
+ free_irq(adapter->msix_info[i + 1].vec,
+ &adapter->sge.qs[i]);
+ } else
+ free_irq(adapter->pdev->irq, adapter);
+}
+
+static int await_mgmt_replies(struct adapter *adap, unsigned long init_cnt,
+ unsigned long n)
+{
+ int attempts = 10;
+
+ while (adap->sge.qs[0].rspq.offload_pkts < init_cnt + n) {
+ if (!--attempts)
+ return -ETIMEDOUT;
+ msleep(10);
+ }
+ return 0;
+}
+
+static int init_tp_parity(struct adapter *adap)
+{
+ int i;
+ struct sk_buff *skb;
+ struct cpl_set_tcb_field *greq;
+ unsigned long cnt = adap->sge.qs[0].rspq.offload_pkts;
+
+ t3_tp_set_offload_mode(adap, 1);
+
+ for (i = 0; i < 16; i++) {
+ struct cpl_smt_write_req *req;
+
+ skb = alloc_skb(sizeof(*req), GFP_KERNEL);
+ if (!skb)
+ skb = adap->nofail_skb;
+ if (!skb)
+ goto alloc_skb_fail;
+
+ req = __skb_put_zero(skb, sizeof(*req));
+ req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
+ OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, i));
+ req->mtu_idx = NMTUS - 1;
+ req->iff = i;
+ t3_mgmt_tx(adap, skb);
+ if (skb == adap->nofail_skb) {
+ await_mgmt_replies(adap, cnt, i + 1);
+ adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
+ if (!adap->nofail_skb)
+ goto alloc_skb_fail;
+ }
+ }
+
+ for (i = 0; i < 2048; i++) {
+ struct cpl_l2t_write_req *req;
+
+ skb = alloc_skb(sizeof(*req), GFP_KERNEL);
+ if (!skb)
+ skb = adap->nofail_skb;
+ if (!skb)
+ goto alloc_skb_fail;
+
+ req = __skb_put_zero(skb, sizeof(*req));
+ req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
+ OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, i));
+ req->params = htonl(V_L2T_W_IDX(i));
+ t3_mgmt_tx(adap, skb);
+ if (skb == adap->nofail_skb) {
+ await_mgmt_replies(adap, cnt, 16 + i + 1);
+ adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
+ if (!adap->nofail_skb)
+ goto alloc_skb_fail;
+ }
+ }
+
+ for (i = 0; i < 2048; i++) {
+ struct cpl_rte_write_req *req;
+
+ skb = alloc_skb(sizeof(*req), GFP_KERNEL);
+ if (!skb)
+ skb = adap->nofail_skb;
+ if (!skb)
+ goto alloc_skb_fail;
+
+ req = __skb_put_zero(skb, sizeof(*req));
+ req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
+ OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_RTE_WRITE_REQ, i));
+ req->l2t_idx = htonl(V_L2T_W_IDX(i));
+ t3_mgmt_tx(adap, skb);
+ if (skb == adap->nofail_skb) {
+ await_mgmt_replies(adap, cnt, 16 + 2048 + i + 1);
+ adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
+ if (!adap->nofail_skb)
+ goto alloc_skb_fail;
+ }
+ }
+
+ skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
+ if (!skb)
+ skb = adap->nofail_skb;
+ if (!skb)
+ goto alloc_skb_fail;
+
+ greq = __skb_put_zero(skb, sizeof(*greq));
+ greq->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
+ OPCODE_TID(greq) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, 0));
+ greq->mask = cpu_to_be64(1);
+ t3_mgmt_tx(adap, skb);
+
+ i = await_mgmt_replies(adap, cnt, 16 + 2048 + 2048 + 1);
+ if (skb == adap->nofail_skb) {
+ i = await_mgmt_replies(adap, cnt, 16 + 2048 + 2048 + 1);
+ adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
+ }
+
+ t3_tp_set_offload_mode(adap, 0);
+ return i;
+
+alloc_skb_fail:
+ t3_tp_set_offload_mode(adap, 0);
+ return -ENOMEM;
+}
+
+/**
+ * setup_rss - configure RSS
+ * @adap: the adapter
+ *
+ * Sets up RSS to distribute packets to multiple receive queues. We
+ * configure the RSS CPU lookup table to distribute to the number of HW
+ * receive queues, and the response queue lookup table to narrow that
+ * down to the response queues actually configured for each port.
+ * We always configure the RSS mapping for two ports since the mapping
+ * table has plenty of entries.
+ */
+static void setup_rss(struct adapter *adap)
+{
+ int i;
+ unsigned int nq0 = adap2pinfo(adap, 0)->nqsets;
+ unsigned int nq1 = adap->port[1] ? adap2pinfo(adap, 1)->nqsets : 1;
+ u8 cpus[SGE_QSETS + 1];
+ u16 rspq_map[RSS_TABLE_SIZE + 1];
+
+ for (i = 0; i < SGE_QSETS; ++i)
+ cpus[i] = i;
+ cpus[SGE_QSETS] = 0xff; /* terminator */
+
+ for (i = 0; i < RSS_TABLE_SIZE / 2; ++i) {
+ rspq_map[i] = i % nq0;
+ rspq_map[i + RSS_TABLE_SIZE / 2] = (i % nq1) + nq0;
+ }
+ rspq_map[RSS_TABLE_SIZE] = 0xffff; /* terminator */
+
+ t3_config_rss(adap, F_RQFEEDBACKENABLE | F_TNLLKPEN | F_TNLMAPEN |
+ F_TNLPRTEN | F_TNL2TUPEN | F_TNL4TUPEN |
+ V_RRCPLCPUSIZE(6) | F_HASHTOEPLITZ, cpus, rspq_map);
+}
+
+static void ring_dbs(struct adapter *adap)
+{
+ int i, j;
+
+ for (i = 0; i < SGE_QSETS; i++) {
+ struct sge_qset *qs = &adap->sge.qs[i];
+
+ if (qs->adap)
+ for (j = 0; j < SGE_TXQ_PER_SET; j++)
+ t3_write_reg(adap, A_SG_KDOORBELL, F_SELEGRCNTX | V_EGRCNTX(qs->txq[j].cntxt_id));
+ }
+}
+
+static void init_napi(struct adapter *adap)
+{
+ int i;
+
+ for (i = 0; i < SGE_QSETS; i++) {
+ struct sge_qset *qs = &adap->sge.qs[i];
+
+ if (qs->adap)
+ netif_napi_add(qs->netdev, &qs->napi, qs->napi.poll);
+ }
+
+ /*
+ * netif_napi_add() can be called only once per napi_struct because it
+ * adds each new napi_struct to a list. Be careful not to call it a
+ * second time, e.g., during EEH recovery, by making a note of it.
+ */
+ adap->flags |= NAPI_INIT;
+}
+
+/*
+ * Wait until all NAPI handlers are descheduled. This includes the handlers of
+ * both netdevices representing interfaces and the dummy ones for the extra
+ * queues.
+ */
+static void quiesce_rx(struct adapter *adap)
+{
+ int i;
+
+ for (i = 0; i < SGE_QSETS; i++)
+ if (adap->sge.qs[i].adap)
+ napi_disable(&adap->sge.qs[i].napi);
+}
+
+static void enable_all_napi(struct adapter *adap)
+{
+ int i;
+ for (i = 0; i < SGE_QSETS; i++)
+ if (adap->sge.qs[i].adap)
+ napi_enable(&adap->sge.qs[i].napi);
+}
+
+/**
+ * setup_sge_qsets - configure SGE Tx/Rx/response queues
+ * @adap: the adapter
+ *
+ * Determines how many sets of SGE queues to use and initializes them.
+ * We support multiple queue sets per port if we have MSI-X, otherwise
+ * just one queue set per port.
+ */
+static int setup_sge_qsets(struct adapter *adap)
+{
+ int i, j, err, irq_idx = 0, qset_idx = 0;
+ unsigned int ntxq = SGE_TXQ_PER_SET;
+
+ if (adap->params.rev > 0 && !(adap->flags & USING_MSI))
+ irq_idx = -1;
+
+ for_each_port(adap, i) {
+ struct net_device *dev = adap->port[i];
+ struct port_info *pi = netdev_priv(dev);
+
+ pi->qs = &adap->sge.qs[pi->first_qset];
+ for (j = 0; j < pi->nqsets; ++j, ++qset_idx) {
+ err = t3_sge_alloc_qset(adap, qset_idx, 1,
+ (adap->flags & USING_MSIX) ? qset_idx + 1 :
+ irq_idx,
+ &adap->params.sge.qset[qset_idx], ntxq, dev,
+ netdev_get_tx_queue(dev, j));
+ if (err) {
+ t3_free_sge_resources(adap);
+ return err;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static ssize_t attr_show(struct device *d, char *buf,
+ ssize_t(*format) (struct net_device *, char *))
+{
+ ssize_t len;
+
+ /* Synchronize with ioctls that may shut down the device */
+ rtnl_lock();
+ len = (*format) (to_net_dev(d), buf);
+ rtnl_unlock();
+ return len;
+}
+
+static ssize_t attr_store(struct device *d,
+ const char *buf, size_t len,
+ ssize_t(*set) (struct net_device *, unsigned int),
+ unsigned int min_val, unsigned int max_val)
+{
+ ssize_t ret;
+ unsigned int val;
+
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+
+ ret = kstrtouint(buf, 0, &val);
+ if (ret)
+ return ret;
+ if (val < min_val || val > max_val)
+ return -EINVAL;
+
+ rtnl_lock();
+ ret = (*set) (to_net_dev(d), val);
+ if (!ret)
+ ret = len;
+ rtnl_unlock();
+ return ret;
+}
+
+#define CXGB3_SHOW(name, val_expr) \
+static ssize_t format_##name(struct net_device *dev, char *buf) \
+{ \
+ struct port_info *pi = netdev_priv(dev); \
+ struct adapter *adap = pi->adapter; \
+ return sprintf(buf, "%u\n", val_expr); \
+} \
+static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
+ char *buf) \
+{ \
+ return attr_show(d, buf, format_##name); \
+}
+
+static ssize_t set_nfilters(struct net_device *dev, unsigned int val)
+{
+ struct port_info *pi = netdev_priv(dev);
+ struct adapter *adap = pi->adapter;
+ int min_tids = is_offload(adap) ? MC5_MIN_TIDS : 0;
+
+ if (adap->flags & FULL_INIT_DONE)
+ return -EBUSY;
+ if (val && adap->params.rev == 0)
+ return -EINVAL;
+ if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers -
+ min_tids)
+ return -EINVAL;
+ adap->params.mc5.nfilters = val;
+ return 0;
+}
+
+static ssize_t store_nfilters(struct device *d, struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ return attr_store(d, buf, len, set_nfilters, 0, ~0);
+}
+
+static ssize_t set_nservers(struct net_device *dev, unsigned int val)
+{
+ struct port_info *pi = netdev_priv(dev);
+ struct adapter *adap = pi->adapter;
+
+ if (adap->flags & FULL_INIT_DONE)
+ return -EBUSY;
+ if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nfilters -
+ MC5_MIN_TIDS)
+ return -EINVAL;
+ adap->params.mc5.nservers = val;
+ return 0;
+}
+
+static ssize_t store_nservers(struct device *d, struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ return attr_store(d, buf, len, set_nservers, 0, ~0);
+}
+
+#define CXGB3_ATTR_R(name, val_expr) \
+CXGB3_SHOW(name, val_expr) \
+static DEVICE_ATTR(name, 0444, show_##name, NULL)
+
+#define CXGB3_ATTR_RW(name, val_expr, store_method) \
+CXGB3_SHOW(name, val_expr) \
+static DEVICE_ATTR(name, 0644, show_##name, store_method)
+
+CXGB3_ATTR_R(cam_size, t3_mc5_size(&adap->mc5));
+CXGB3_ATTR_RW(nfilters, adap->params.mc5.nfilters, store_nfilters);
+CXGB3_ATTR_RW(nservers, adap->params.mc5.nservers, store_nservers);
+
+static struct attribute *cxgb3_attrs[] = {
+ &dev_attr_cam_size.attr,
+ &dev_attr_nfilters.attr,
+ &dev_attr_nservers.attr,
+ NULL
+};
+
+static const struct attribute_group cxgb3_attr_group = {
+ .attrs = cxgb3_attrs,
+};
+
+static ssize_t tm_attr_show(struct device *d,
+ char *buf, int sched)
+{
+ struct port_info *pi = netdev_priv(to_net_dev(d));
+ struct adapter *adap = pi->adapter;
+ unsigned int v, addr, bpt, cpt;
+ ssize_t len;
+
+ addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2;
+ rtnl_lock();
+ t3_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
+ v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
+ if (sched & 1)
+ v >>= 16;
+ bpt = (v >> 8) & 0xff;
+ cpt = v & 0xff;
+ if (!cpt)
+ len = sprintf(buf, "disabled\n");
+ else {
+ v = (adap->params.vpd.cclk * 1000) / cpt;
+ len = sprintf(buf, "%u Kbps\n", (v * bpt) / 125);
+ }
+ rtnl_unlock();
+ return len;
+}
+
+static ssize_t tm_attr_store(struct device *d,
+ const char *buf, size_t len, int sched)
+{
+ struct port_info *pi = netdev_priv(to_net_dev(d));
+ struct adapter *adap = pi->adapter;
+ unsigned int val;
+ ssize_t ret;
+
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+
+ ret = kstrtouint(buf, 0, &val);
+ if (ret)
+ return ret;
+ if (val > 10000000)
+ return -EINVAL;
+
+ rtnl_lock();
+ ret = t3_config_sched(adap, val, sched);
+ if (!ret)
+ ret = len;
+ rtnl_unlock();
+ return ret;
+}
+
+#define TM_ATTR(name, sched) \
+static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
+ char *buf) \
+{ \
+ return tm_attr_show(d, buf, sched); \
+} \
+static ssize_t store_##name(struct device *d, struct device_attribute *attr, \
+ const char *buf, size_t len) \
+{ \
+ return tm_attr_store(d, buf, len, sched); \
+} \
+static DEVICE_ATTR(name, 0644, show_##name, store_##name)
+
+TM_ATTR(sched0, 0);
+TM_ATTR(sched1, 1);
+TM_ATTR(sched2, 2);
+TM_ATTR(sched3, 3);
+TM_ATTR(sched4, 4);
+TM_ATTR(sched5, 5);
+TM_ATTR(sched6, 6);
+TM_ATTR(sched7, 7);
+
+static struct attribute *offload_attrs[] = {
+ &dev_attr_sched0.attr,
+ &dev_attr_sched1.attr,
+ &dev_attr_sched2.attr,
+ &dev_attr_sched3.attr,
+ &dev_attr_sched4.attr,
+ &dev_attr_sched5.attr,
+ &dev_attr_sched6.attr,
+ &dev_attr_sched7.attr,
+ NULL
+};
+
+static const struct attribute_group offload_attr_group = {
+ .attrs = offload_attrs,
+};
+
+/*
+ * Sends an sk_buff to an offload queue driver
+ * after dealing with any active network taps.
+ */
+static inline int offload_tx(struct t3cdev *tdev, struct sk_buff *skb)
+{
+ int ret;
+
+ local_bh_disable();
+ ret = t3_offload_tx(tdev, skb);
+ local_bh_enable();
+ return ret;
+}
+
+static int write_smt_entry(struct adapter *adapter, int idx)
+{
+ struct cpl_smt_write_req *req;
+ struct port_info *pi = netdev_priv(adapter->port[idx]);
+ struct sk_buff *skb = alloc_skb(sizeof(*req), GFP_KERNEL);
+
+ if (!skb)
+ return -ENOMEM;
+
+ req = __skb_put(skb, sizeof(*req));
+ req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
+ OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, idx));
+ req->mtu_idx = NMTUS - 1; /* should be 0 but there's a T3 bug */
+ req->iff = idx;
+ memcpy(req->src_mac0, adapter->port[idx]->dev_addr, ETH_ALEN);
+ memcpy(req->src_mac1, pi->iscsic.mac_addr, ETH_ALEN);
+ skb->priority = 1;
+ offload_tx(&adapter->tdev, skb);
+ return 0;
+}
+
+static int init_smt(struct adapter *adapter)
+{
+ int i;
+
+ for_each_port(adapter, i)
+ write_smt_entry(adapter, i);
+ return 0;
+}
+
+static void init_port_mtus(struct adapter *adapter)
+{
+ unsigned int mtus = adapter->port[0]->mtu;
+
+ if (adapter->port[1])
+ mtus |= adapter->port[1]->mtu << 16;
+ t3_write_reg(adapter, A_TP_MTU_PORT_TABLE, mtus);
+}
+
+static int send_pktsched_cmd(struct adapter *adap, int sched, int qidx, int lo,
+ int hi, int port)
+{
+ struct sk_buff *skb;
+ struct mngt_pktsched_wr *req;
+ int ret;
+
+ skb = alloc_skb(sizeof(*req), GFP_KERNEL);
+ if (!skb)
+ skb = adap->nofail_skb;
+ if (!skb)
+ return -ENOMEM;
+
+ req = skb_put(skb, sizeof(*req));
+ req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_MNGT));
+ req->mngt_opcode = FW_MNGTOPCODE_PKTSCHED_SET;
+ req->sched = sched;
+ req->idx = qidx;
+ req->min = lo;
+ req->max = hi;
+ req->binding = port;
+ ret = t3_mgmt_tx(adap, skb);
+ if (skb == adap->nofail_skb) {
+ adap->nofail_skb = alloc_skb(sizeof(struct cpl_set_tcb_field),
+ GFP_KERNEL);
+ if (!adap->nofail_skb)
+ ret = -ENOMEM;
+ }
+
+ return ret;
+}
+
+static int bind_qsets(struct adapter *adap)
+{
+ int i, j, err = 0;
+
+ for_each_port(adap, i) {
+ const struct port_info *pi = adap2pinfo(adap, i);
+
+ for (j = 0; j < pi->nqsets; ++j) {
+ int ret = send_pktsched_cmd(adap, 1,
+ pi->first_qset + j, -1,
+ -1, i);
+ if (ret)
+ err = ret;
+ }
+ }
+
+ return err;
+}
+
+#define FW_VERSION __stringify(FW_VERSION_MAJOR) "." \
+ __stringify(FW_VERSION_MINOR) "." __stringify(FW_VERSION_MICRO)
+#define FW_FNAME "cxgb3/t3fw-" FW_VERSION ".bin"
+#define TPSRAM_VERSION __stringify(TP_VERSION_MAJOR) "." \
+ __stringify(TP_VERSION_MINOR) "." __stringify(TP_VERSION_MICRO)
+#define TPSRAM_NAME "cxgb3/t3%c_psram-" TPSRAM_VERSION ".bin"
+#define AEL2005_OPT_EDC_NAME "cxgb3/ael2005_opt_edc.bin"
+#define AEL2005_TWX_EDC_NAME "cxgb3/ael2005_twx_edc.bin"
+#define AEL2020_TWX_EDC_NAME "cxgb3/ael2020_twx_edc.bin"
+MODULE_FIRMWARE(FW_FNAME);
+MODULE_FIRMWARE("cxgb3/t3b_psram-" TPSRAM_VERSION ".bin");
+MODULE_FIRMWARE("cxgb3/t3c_psram-" TPSRAM_VERSION ".bin");
+MODULE_FIRMWARE(AEL2005_OPT_EDC_NAME);
+MODULE_FIRMWARE(AEL2005_TWX_EDC_NAME);
+MODULE_FIRMWARE(AEL2020_TWX_EDC_NAME);
+
+static inline const char *get_edc_fw_name(int edc_idx)
+{
+ const char *fw_name = NULL;
+
+ switch (edc_idx) {
+ case EDC_OPT_AEL2005:
+ fw_name = AEL2005_OPT_EDC_NAME;
+ break;
+ case EDC_TWX_AEL2005:
+ fw_name = AEL2005_TWX_EDC_NAME;
+ break;
+ case EDC_TWX_AEL2020:
+ fw_name = AEL2020_TWX_EDC_NAME;
+ break;
+ }
+ return fw_name;
+}
+
+int t3_get_edc_fw(struct cphy *phy, int edc_idx, int size)
+{
+ struct adapter *adapter = phy->adapter;
+ const struct firmware *fw;
+ const char *fw_name;
+ u32 csum;
+ const __be32 *p;
+ u16 *cache = phy->phy_cache;
+ int i, ret = -EINVAL;
+
+ fw_name = get_edc_fw_name(edc_idx);
+ if (fw_name)
+ ret = request_firmware(&fw, fw_name, &adapter->pdev->dev);
+ if (ret < 0) {
+ dev_err(&adapter->pdev->dev,
+ "could not upgrade firmware: unable to load %s\n",
+ fw_name);
+ return ret;
+ }
+
+ /* check size, take checksum in account */
+ if (fw->size > size + 4) {
+ CH_ERR(adapter, "firmware image too large %u, expected %d\n",
+ (unsigned int)fw->size, size + 4);
+ ret = -EINVAL;
+ }
+
+ /* compute checksum */
+ p = (const __be32 *)fw->data;
+ for (csum = 0, i = 0; i < fw->size / sizeof(csum); i++)
+ csum += ntohl(p[i]);
+
+ if (csum != 0xffffffff) {
+ CH_ERR(adapter, "corrupted firmware image, checksum %u\n",
+ csum);
+ ret = -EINVAL;
+ }
+
+ for (i = 0; i < size / 4 ; i++) {
+ *cache++ = (be32_to_cpu(p[i]) & 0xffff0000) >> 16;
+ *cache++ = be32_to_cpu(p[i]) & 0xffff;
+ }
+
+ release_firmware(fw);
+
+ return ret;
+}
+
+static int upgrade_fw(struct adapter *adap)
+{
+ int ret;
+ const struct firmware *fw;
+ struct device *dev = &adap->pdev->dev;
+
+ ret = request_firmware(&fw, FW_FNAME, dev);
+ if (ret < 0) {
+ dev_err(dev, "could not upgrade firmware: unable to load %s\n",
+ FW_FNAME);
+ return ret;
+ }
+ ret = t3_load_fw(adap, fw->data, fw->size);
+ release_firmware(fw);
+
+ if (ret == 0)
+ dev_info(dev, "successful upgrade to firmware %d.%d.%d\n",
+ FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO);
+ else
+ dev_err(dev, "failed to upgrade to firmware %d.%d.%d\n",
+ FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO);
+
+ return ret;
+}
+
+static inline char t3rev2char(struct adapter *adapter)
+{
+ char rev = 0;
+
+ switch(adapter->params.rev) {
+ case T3_REV_B:
+ case T3_REV_B2:
+ rev = 'b';
+ break;
+ case T3_REV_C:
+ rev = 'c';
+ break;
+ }
+ return rev;
+}
+
+static int update_tpsram(struct adapter *adap)
+{
+ const struct firmware *tpsram;
+ char buf[64];
+ struct device *dev = &adap->pdev->dev;
+ int ret;
+ char rev;
+
+ rev = t3rev2char(adap);
+ if (!rev)
+ return 0;
+
+ snprintf(buf, sizeof(buf), TPSRAM_NAME, rev);
+
+ ret = request_firmware(&tpsram, buf, dev);
+ if (ret < 0) {
+ dev_err(dev, "could not load TP SRAM: unable to load %s\n",
+ buf);
+ return ret;
+ }
+
+ ret = t3_check_tpsram(adap, tpsram->data, tpsram->size);
+ if (ret)
+ goto release_tpsram;
+
+ ret = t3_set_proto_sram(adap, tpsram->data);
+ if (ret == 0)
+ dev_info(dev,
+ "successful update of protocol engine "
+ "to %d.%d.%d\n",
+ TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
+ else
+ dev_err(dev, "failed to update of protocol engine %d.%d.%d\n",
+ TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
+ if (ret)
+ dev_err(dev, "loading protocol SRAM failed\n");
+
+release_tpsram:
+ release_firmware(tpsram);
+
+ return ret;
+}
+
+/**
+ * t3_synchronize_rx - wait for current Rx processing on a port to complete
+ * @adap: the adapter
+ * @p: the port
+ *
+ * Ensures that current Rx processing on any of the queues associated with
+ * the given port completes before returning. We do this by acquiring and
+ * releasing the locks of the response queues associated with the port.
+ */
+static void t3_synchronize_rx(struct adapter *adap, const struct port_info *p)
+{
+ int i;
+
+ for (i = p->first_qset; i < p->first_qset + p->nqsets; i++) {
+ struct sge_rspq *q = &adap->sge.qs[i].rspq;
+
+ spin_lock_irq(&q->lock);
+ spin_unlock_irq(&q->lock);
+ }
+}
+
+static void cxgb_vlan_mode(struct net_device *dev, netdev_features_t features)
+{
+ struct port_info *pi = netdev_priv(dev);
+ struct adapter *adapter = pi->adapter;
+
+ if (adapter->params.rev > 0) {
+ t3_set_vlan_accel(adapter, 1 << pi->port_id,
+ features & NETIF_F_HW_VLAN_CTAG_RX);
+ } else {
+ /* single control for all ports */
+ unsigned int i, have_vlans = features & NETIF_F_HW_VLAN_CTAG_RX;
+
+ for_each_port(adapter, i)
+ have_vlans |=
+ adapter->port[i]->features &
+ NETIF_F_HW_VLAN_CTAG_RX;
+
+ t3_set_vlan_accel(adapter, 1, have_vlans);
+ }
+ t3_synchronize_rx(adapter, pi);
+}
+
+/**
+ * cxgb_up - enable the adapter
+ * @adap: adapter being enabled
+ *
+ * Called when the first port is enabled, this function performs the
+ * actions necessary to make an adapter operational, such as completing
+ * the initialization of HW modules, and enabling interrupts.
+ *
+ * Must be called with the rtnl lock held.
+ */
+static int cxgb_up(struct adapter *adap)
+{
+ int i, err;
+
+ if (!(adap->flags & FULL_INIT_DONE)) {
+ err = t3_check_fw_version(adap);
+ if (err == -EINVAL) {
+ err = upgrade_fw(adap);
+ CH_WARN(adap, "FW upgrade to %d.%d.%d %s\n",
+ FW_VERSION_MAJOR, FW_VERSION_MINOR,
+ FW_VERSION_MICRO, err ? "failed" : "succeeded");
+ }
+
+ err = t3_check_tpsram_version(adap);
+ if (err == -EINVAL) {
+ err = update_tpsram(adap);
+ CH_WARN(adap, "TP upgrade to %d.%d.%d %s\n",
+ TP_VERSION_MAJOR, TP_VERSION_MINOR,
+ TP_VERSION_MICRO, err ? "failed" : "succeeded");
+ }
+
+ /*
+ * Clear interrupts now to catch errors if t3_init_hw fails.
+ * We clear them again later as initialization may trigger
+ * conditions that can interrupt.
+ */
+ t3_intr_clear(adap);
+
+ err = t3_init_hw(adap, 0);
+ if (err)
+ goto out;
+
+ t3_set_reg_field(adap, A_TP_PARA_REG5, 0, F_RXDDPOFFINIT);
+ t3_write_reg(adap, A_ULPRX_TDDP_PSZ, V_HPZ0(PAGE_SHIFT - 12));
+
+ err = setup_sge_qsets(adap);
+ if (err)
+ goto out;
+
+ for_each_port(adap, i)
+ cxgb_vlan_mode(adap->port[i], adap->port[i]->features);
+
+ setup_rss(adap);
+ if (!(adap->flags & NAPI_INIT))
+ init_napi(adap);
+
+ t3_start_sge_timers(adap);
+ adap->flags |= FULL_INIT_DONE;
+ }
+
+ t3_intr_clear(adap);
+
+ if (adap->flags & USING_MSIX) {
+ name_msix_vecs(adap);
+ err = request_irq(adap->msix_info[0].vec,
+ t3_async_intr_handler, 0,
+ adap->msix_info[0].desc, adap);
+ if (err)
+ goto irq_err;
+
+ err = request_msix_data_irqs(adap);
+ if (err) {
+ free_irq(adap->msix_info[0].vec, adap);
+ goto irq_err;
+ }
+ } else {
+ err = request_irq(adap->pdev->irq,
+ t3_intr_handler(adap, adap->sge.qs[0].rspq.polling),
+ (adap->flags & USING_MSI) ? 0 : IRQF_SHARED,
+ adap->name, adap);
+ if (err)
+ goto irq_err;
+ }
+
+ enable_all_napi(adap);
+ t3_sge_start(adap);
+ t3_intr_enable(adap);
+
+ if (adap->params.rev >= T3_REV_C && !(adap->flags & TP_PARITY_INIT) &&
+ is_offload(adap) && init_tp_parity(adap) == 0)
+ adap->flags |= TP_PARITY_INIT;
+
+ if (adap->flags & TP_PARITY_INIT) {
+ t3_write_reg(adap, A_TP_INT_CAUSE,
+ F_CMCACHEPERR | F_ARPLUTPERR);
+ t3_write_reg(adap, A_TP_INT_ENABLE, 0x7fbfffff);
+ }
+
+ if (!(adap->flags & QUEUES_BOUND)) {
+ int ret = bind_qsets(adap);
+
+ if (ret < 0) {
+ CH_ERR(adap, "failed to bind qsets, err %d\n", ret);
+ t3_intr_disable(adap);
+ quiesce_rx(adap);
+ free_irq_resources(adap);
+ err = ret;
+ goto out;
+ }
+ adap->flags |= QUEUES_BOUND;
+ }
+
+out:
+ return err;
+irq_err:
+ CH_ERR(adap, "request_irq failed, err %d\n", err);
+ goto out;
+}
+
+/*
+ * Release resources when all the ports and offloading have been stopped.
+ */
+static void cxgb_down(struct adapter *adapter, int on_wq)
+{
+ t3_sge_stop(adapter);
+ spin_lock_irq(&adapter->work_lock); /* sync with PHY intr task */
+ t3_intr_disable(adapter);
+ spin_unlock_irq(&adapter->work_lock);
+
+ free_irq_resources(adapter);
+ quiesce_rx(adapter);
+ t3_sge_stop(adapter);
+ if (!on_wq)
+ flush_workqueue(cxgb3_wq);/* wait for external IRQ handler */
+}
+
+static void schedule_chk_task(struct adapter *adap)
+{
+ unsigned int timeo;
+
+ timeo = adap->params.linkpoll_period ?
+ (HZ * adap->params.linkpoll_period) / 10 :
+ adap->params.stats_update_period * HZ;
+ if (timeo)
+ queue_delayed_work(cxgb3_wq, &adap->adap_check_task, timeo);
+}
+
+static int offload_open(struct net_device *dev)
+{
+ struct port_info *pi = netdev_priv(dev);
+ struct adapter *adapter = pi->adapter;
+ struct t3cdev *tdev = dev2t3cdev(dev);
+ int adap_up = adapter->open_device_map & PORT_MASK;
+ int err;
+
+ if (test_and_set_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
+ return 0;
+
+ if (!adap_up && (err = cxgb_up(adapter)) < 0)
+ goto out;
+
+ t3_tp_set_offload_mode(adapter, 1);
+ tdev->lldev = adapter->port[0];
+ err = cxgb3_offload_activate(adapter);
+ if (err)
+ goto out;
+
+ init_port_mtus(adapter);
+ t3_load_mtus(adapter, adapter->params.mtus, adapter->params.a_wnd,
+ adapter->params.b_wnd,
+ adapter->params.rev == 0 ?
+ adapter->port[0]->mtu : 0xffff);
+ init_smt(adapter);
+
+ if (sysfs_create_group(&tdev->lldev->dev.kobj, &offload_attr_group))
+ dev_dbg(&dev->dev, "cannot create sysfs group\n");
+
+ /* Call back all registered clients */
+ cxgb3_add_clients(tdev);
+
+out:
+ /* restore them in case the offload module has changed them */
+ if (err) {
+ t3_tp_set_offload_mode(adapter, 0);
+ clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
+ cxgb3_set_dummy_ops(tdev);
+ }
+ return err;
+}
+
+static int offload_close(struct t3cdev *tdev)
+{
+ struct adapter *adapter = tdev2adap(tdev);
+ struct t3c_data *td = T3C_DATA(tdev);
+
+ if (!test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
+ return 0;
+
+ /* Call back all registered clients */
+ cxgb3_remove_clients(tdev);
+
+ sysfs_remove_group(&tdev->lldev->dev.kobj, &offload_attr_group);
+
+ /* Flush work scheduled while releasing TIDs */
+ flush_work(&td->tid_release_task);
+
+ tdev->lldev = NULL;
+ cxgb3_set_dummy_ops(tdev);
+ t3_tp_set_offload_mode(adapter, 0);
+ clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
+
+ if (!adapter->open_device_map)
+ cxgb_down(adapter, 0);
+
+ cxgb3_offload_deactivate(adapter);
+ return 0;
+}
+
+static int cxgb_open(struct net_device *dev)
+{
+ struct port_info *pi = netdev_priv(dev);
+ struct adapter *adapter = pi->adapter;
+ int other_ports = adapter->open_device_map & PORT_MASK;
+ int err;
+
+ if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0)
+ return err;
+
+ set_bit(pi->port_id, &adapter->open_device_map);
+ if (is_offload(adapter) && !ofld_disable) {
+ err = offload_open(dev);
+ if (err)
+ pr_warn("Could not initialize offload capabilities\n");
+ }
+
+ netif_set_real_num_tx_queues(dev, pi->nqsets);
+ err = netif_set_real_num_rx_queues(dev, pi->nqsets);
+ if (err)
+ return err;
+ link_start(dev);
+ t3_port_intr_enable(adapter, pi->port_id);
+ netif_tx_start_all_queues(dev);
+ if (!other_ports)
+ schedule_chk_task(adapter);
+
+ cxgb3_event_notify(&adapter->tdev, OFFLOAD_PORT_UP, pi->port_id);
+ return 0;
+}
+
+static int __cxgb_close(struct net_device *dev, int on_wq)
+{
+ struct port_info *pi = netdev_priv(dev);
+ struct adapter *adapter = pi->adapter;
+
+
+ if (!adapter->open_device_map)
+ return 0;
+
+ /* Stop link fault interrupts */
+ t3_xgm_intr_disable(adapter, pi->port_id);
+ t3_read_reg(adapter, A_XGM_INT_STATUS + pi->mac.offset);
+
+ t3_port_intr_disable(adapter, pi->port_id);
+ netif_tx_stop_all_queues(dev);
+ pi->phy.ops->power_down(&pi->phy, 1);
+ netif_carrier_off(dev);
+ t3_mac_disable(&pi->mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX);
+
+ spin_lock_irq(&adapter->work_lock); /* sync with update task */
+ clear_bit(pi->port_id, &adapter->open_device_map);
+ spin_unlock_irq(&adapter->work_lock);
+
+ if (!(adapter->open_device_map & PORT_MASK))
+ cancel_delayed_work_sync(&adapter->adap_check_task);
+
+ if (!adapter->open_device_map)
+ cxgb_down(adapter, on_wq);
+
+ cxgb3_event_notify(&adapter->tdev, OFFLOAD_PORT_DOWN, pi->port_id);
+ return 0;
+}
+
+static int cxgb_close(struct net_device *dev)
+{
+ return __cxgb_close(dev, 0);
+}
+
+static struct net_device_stats *cxgb_get_stats(struct net_device *dev)
+{
+ struct port_info *pi = netdev_priv(dev);
+ struct adapter *adapter = pi->adapter;
+ struct net_device_stats *ns = &dev->stats;
+ const struct mac_stats *pstats;
+
+ spin_lock(&adapter->stats_lock);
+ pstats = t3_mac_update_stats(&pi->mac);
+ spin_unlock(&adapter->stats_lock);
+
+ ns->tx_bytes = pstats->tx_octets;
+ ns->tx_packets = pstats->tx_frames;
+ ns->rx_bytes = pstats->rx_octets;
+ ns->rx_packets = pstats->rx_frames;
+ ns->multicast = pstats->rx_mcast_frames;
+
+ ns->tx_errors = pstats->tx_underrun;
+ ns->rx_errors = pstats->rx_symbol_errs + pstats->rx_fcs_errs +
+ pstats->rx_too_long + pstats->rx_jabber + pstats->rx_short +
+ pstats->rx_fifo_ovfl;
+
+ /* detailed rx_errors */
+ ns->rx_length_errors = pstats->rx_jabber + pstats->rx_too_long;
+ ns->rx_over_errors = 0;
+ ns->rx_crc_errors = pstats->rx_fcs_errs;
+ ns->rx_frame_errors = pstats->rx_symbol_errs;
+ ns->rx_fifo_errors = pstats->rx_fifo_ovfl;
+ ns->rx_missed_errors = pstats->rx_cong_drops;
+
+ /* detailed tx_errors */
+ ns->tx_aborted_errors = 0;
+ ns->tx_carrier_errors = 0;
+ ns->tx_fifo_errors = pstats->tx_underrun;
+ ns->tx_heartbeat_errors = 0;
+ ns->tx_window_errors = 0;
+ return ns;
+}
+
+static u32 get_msglevel(struct net_device *dev)
+{
+ struct port_info *pi = netdev_priv(dev);
+ struct adapter *adapter = pi->adapter;
+
+ return adapter->msg_enable;
+}
+
+static void set_msglevel(struct net_device *dev, u32 val)
+{
+ struct port_info *pi = netdev_priv(dev);
+ struct adapter *adapter = pi->adapter;
+
+ adapter->msg_enable = val;
+}
+
+static const char stats_strings[][ETH_GSTRING_LEN] = {
+ "TxOctetsOK ",
+ "TxFramesOK ",
+ "TxMulticastFramesOK",
+ "TxBroadcastFramesOK",
+ "TxPauseFrames ",
+ "TxUnderrun ",
+ "TxExtUnderrun ",
+
+ "TxFrames64 ",
+ "TxFrames65To127 ",
+ "TxFrames128To255 ",
+ "TxFrames256To511 ",
+ "TxFrames512To1023 ",
+ "TxFrames1024To1518 ",
+ "TxFrames1519ToMax ",
+
+ "RxOctetsOK ",
+ "RxFramesOK ",
+ "RxMulticastFramesOK",
+ "RxBroadcastFramesOK",
+ "RxPauseFrames ",
+ "RxFCSErrors ",
+ "RxSymbolErrors ",
+ "RxShortErrors ",
+ "RxJabberErrors ",
+ "RxLengthErrors ",
+ "RxFIFOoverflow ",
+
+ "RxFrames64 ",
+ "RxFrames65To127 ",
+ "RxFrames128To255 ",
+ "RxFrames256To511 ",
+ "RxFrames512To1023 ",
+ "RxFrames1024To1518 ",
+ "RxFrames1519ToMax ",
+
+ "PhyFIFOErrors ",
+ "TSO ",
+ "VLANextractions ",
+ "VLANinsertions ",
+ "TxCsumOffload ",
+ "RxCsumGood ",
+ "LroAggregated ",
+ "LroFlushed ",
+ "LroNoDesc ",
+ "RxDrops ",
+
+ "CheckTXEnToggled ",
+ "CheckResets ",
+
+ "LinkFaults ",
+};
+
+static int get_sset_count(struct net_device *dev, int sset)
+{
+ switch (sset) {
+ case ETH_SS_STATS:
+ return ARRAY_SIZE(stats_strings);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+#define T3_REGMAP_SIZE (3 * 1024)
+
+static int get_regs_len(struct net_device *dev)
+{
+ return T3_REGMAP_SIZE;
+}
+
+static int get_eeprom_len(struct net_device *dev)
+{
+ return EEPROMSIZE;
+}
+
+static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
+{
+ struct port_info *pi = netdev_priv(dev);
+ struct adapter *adapter = pi->adapter;
+ u32 fw_vers = 0;
+ u32 tp_vers = 0;
+
+ spin_lock(&adapter->stats_lock);
+ t3_get_fw_version(adapter, &fw_vers);
+ t3_get_tp_version(adapter, &tp_vers);
+ spin_unlock(&adapter->stats_lock);
+
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->bus_info, pci_name(adapter->pdev),
+ sizeof(info->bus_info));
+ if (fw_vers)
+ snprintf(info->fw_version, sizeof(info->fw_version),
+ "%s %u.%u.%u TP %u.%u.%u",
+ G_FW_VERSION_TYPE(fw_vers) ? "T" : "N",
+ G_FW_VERSION_MAJOR(fw_vers),
+ G_FW_VERSION_MINOR(fw_vers),
+ G_FW_VERSION_MICRO(fw_vers),
+ G_TP_VERSION_MAJOR(tp_vers),
+ G_TP_VERSION_MINOR(tp_vers),
+ G_TP_VERSION_MICRO(tp_vers));
+}
+
+static void get_strings(struct net_device *dev, u32 stringset, u8 * data)
+{
+ if (stringset == ETH_SS_STATS)
+ memcpy(data, stats_strings, sizeof(stats_strings));
+}
+
+static unsigned long collect_sge_port_stats(struct adapter *adapter,
+ struct port_info *p, int idx)
+{
+ int i;
+ unsigned long tot = 0;
+
+ for (i = p->first_qset; i < p->first_qset + p->nqsets; ++i)
+ tot += adapter->sge.qs[i].port_stats[idx];
+ return tot;
+}
+
+static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
+ u64 *data)
+{
+ struct port_info *pi = netdev_priv(dev);
+ struct adapter *adapter = pi->adapter;
+ const struct mac_stats *s;
+
+ spin_lock(&adapter->stats_lock);
+ s = t3_mac_update_stats(&pi->mac);
+ spin_unlock(&adapter->stats_lock);
+
+ *data++ = s->tx_octets;
+ *data++ = s->tx_frames;
+ *data++ = s->tx_mcast_frames;
+ *data++ = s->tx_bcast_frames;
+ *data++ = s->tx_pause;
+ *data++ = s->tx_underrun;
+ *data++ = s->tx_fifo_urun;
+
+ *data++ = s->tx_frames_64;
+ *data++ = s->tx_frames_65_127;
+ *data++ = s->tx_frames_128_255;
+ *data++ = s->tx_frames_256_511;
+ *data++ = s->tx_frames_512_1023;
+ *data++ = s->tx_frames_1024_1518;
+ *data++ = s->tx_frames_1519_max;
+
+ *data++ = s->rx_octets;
+ *data++ = s->rx_frames;
+ *data++ = s->rx_mcast_frames;
+ *data++ = s->rx_bcast_frames;
+ *data++ = s->rx_pause;
+ *data++ = s->rx_fcs_errs;
+ *data++ = s->rx_symbol_errs;
+ *data++ = s->rx_short;
+ *data++ = s->rx_jabber;
+ *data++ = s->rx_too_long;
+ *data++ = s->rx_fifo_ovfl;
+
+ *data++ = s->rx_frames_64;
+ *data++ = s->rx_frames_65_127;
+ *data++ = s->rx_frames_128_255;
+ *data++ = s->rx_frames_256_511;
+ *data++ = s->rx_frames_512_1023;
+ *data++ = s->rx_frames_1024_1518;
+ *data++ = s->rx_frames_1519_max;
+
+ *data++ = pi->phy.fifo_errors;
+
+ *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TSO);
+ *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANEX);
+ *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANINS);
+ *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TX_CSUM);
+ *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_RX_CSUM_GOOD);
+ *data++ = 0;
+ *data++ = 0;
+ *data++ = 0;
+ *data++ = s->rx_cong_drops;
+
+ *data++ = s->num_toggled;
+ *data++ = s->num_resets;
+
+ *data++ = s->link_faults;
+}
+
+static inline void reg_block_dump(struct adapter *ap, void *buf,
+ unsigned int start, unsigned int end)
+{
+ u32 *p = buf + start;
+
+ for (; start <= end; start += sizeof(u32))
+ *p++ = t3_read_reg(ap, start);
+}
+
+static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
+ void *buf)
+{
+ struct port_info *pi = netdev_priv(dev);
+ struct adapter *ap = pi->adapter;
+
+ /*
+ * Version scheme:
+ * bits 0..9: chip version
+ * bits 10..15: chip revision
+ * bit 31: set for PCIe cards
+ */
+ regs->version = 3 | (ap->params.rev << 10) | (is_pcie(ap) << 31);
+
+ /*
+ * We skip the MAC statistics registers because they are clear-on-read.
+ * Also reading multi-register stats would need to synchronize with the
+ * periodic mac stats accumulation. Hard to justify the complexity.
+ */
+ memset(buf, 0, T3_REGMAP_SIZE);
+ reg_block_dump(ap, buf, 0, A_SG_RSPQ_CREDIT_RETURN);
+ reg_block_dump(ap, buf, A_SG_HI_DRB_HI_THRSH, A_ULPRX_PBL_ULIMIT);
+ reg_block_dump(ap, buf, A_ULPTX_CONFIG, A_MPS_INT_CAUSE);
+ reg_block_dump(ap, buf, A_CPL_SWITCH_CNTRL, A_CPL_MAP_TBL_DATA);
+ reg_block_dump(ap, buf, A_SMB_GLOBAL_TIME_CFG, A_XGM_SERDES_STAT3);
+ reg_block_dump(ap, buf, A_XGM_SERDES_STATUS0,
+ XGM_REG(A_XGM_SERDES_STAT3, 1));
+ reg_block_dump(ap, buf, XGM_REG(A_XGM_SERDES_STATUS0, 1),
+ XGM_REG(A_XGM_RX_SPI4_SOP_EOP_CNT, 1));
+}
+
+static int restart_autoneg(struct net_device *dev)
+{
+ struct port_info *p = netdev_priv(dev);
+
+ if (!netif_running(dev))
+ return -EAGAIN;
+ if (p->link_config.autoneg != AUTONEG_ENABLE)
+ return -EINVAL;
+ p->phy.ops->autoneg_restart(&p->phy);
+ return 0;
+}
+
+static int set_phys_id(struct net_device *dev,
+ enum ethtool_phys_id_state state)
+{
+ struct port_info *pi = netdev_priv(dev);
+ struct adapter *adapter = pi->adapter;
+
+ switch (state) {
+ case ETHTOOL_ID_ACTIVE:
+ return 1; /* cycle on/off once per second */
+
+ case ETHTOOL_ID_OFF:
+ t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL, 0);
+ break;
+
+ case ETHTOOL_ID_ON:
+ case ETHTOOL_ID_INACTIVE:
+ t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
+ F_GPIO0_OUT_VAL);
+ }
+
+ return 0;
+}
+
+static int get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd)
+{
+ struct port_info *p = netdev_priv(dev);
+ u32 supported;
+
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
+ p->link_config.supported);
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
+ p->link_config.advertising);
+
+ if (netif_carrier_ok(dev)) {
+ cmd->base.speed = p->link_config.speed;
+ cmd->base.duplex = p->link_config.duplex;
+ } else {
+ cmd->base.speed = SPEED_UNKNOWN;
+ cmd->base.duplex = DUPLEX_UNKNOWN;
+ }
+
+ ethtool_convert_link_mode_to_legacy_u32(&supported,
+ cmd->link_modes.supported);
+
+ cmd->base.port = (supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
+ cmd->base.phy_address = p->phy.mdio.prtad;
+ cmd->base.autoneg = p->link_config.autoneg;
+ return 0;
+}
+
+static int speed_duplex_to_caps(int speed, int duplex)
+{
+ int cap = 0;
+
+ switch (speed) {
+ case SPEED_10:
+ if (duplex == DUPLEX_FULL)
+ cap = SUPPORTED_10baseT_Full;
+ else
+ cap = SUPPORTED_10baseT_Half;
+ break;
+ case SPEED_100:
+ if (duplex == DUPLEX_FULL)
+ cap = SUPPORTED_100baseT_Full;
+ else
+ cap = SUPPORTED_100baseT_Half;
+ break;
+ case SPEED_1000:
+ if (duplex == DUPLEX_FULL)
+ cap = SUPPORTED_1000baseT_Full;
+ else
+ cap = SUPPORTED_1000baseT_Half;
+ break;
+ case SPEED_10000:
+ if (duplex == DUPLEX_FULL)
+ cap = SUPPORTED_10000baseT_Full;
+ }
+ return cap;
+}
+
+#define ADVERTISED_MASK (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
+ ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
+ ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | \
+ ADVERTISED_10000baseT_Full)
+
+static int set_link_ksettings(struct net_device *dev,
+ const struct ethtool_link_ksettings *cmd)
+{
+ struct port_info *p = netdev_priv(dev);
+ struct link_config *lc = &p->link_config;
+ u32 advertising;
+
+ ethtool_convert_link_mode_to_legacy_u32(&advertising,
+ cmd->link_modes.advertising);
+
+ if (!(lc->supported & SUPPORTED_Autoneg)) {
+ /*
+ * PHY offers a single speed/duplex. See if that's what's
+ * being requested.
+ */
+ if (cmd->base.autoneg == AUTONEG_DISABLE) {
+ u32 speed = cmd->base.speed;
+ int cap = speed_duplex_to_caps(speed, cmd->base.duplex);
+ if (lc->supported & cap)
+ return 0;
+ }
+ return -EINVAL;
+ }
+
+ if (cmd->base.autoneg == AUTONEG_DISABLE) {
+ u32 speed = cmd->base.speed;
+ int cap = speed_duplex_to_caps(speed, cmd->base.duplex);
+
+ if (!(lc->supported & cap) || (speed == SPEED_1000))
+ return -EINVAL;
+ lc->requested_speed = speed;
+ lc->requested_duplex = cmd->base.duplex;
+ lc->advertising = 0;
+ } else {
+ advertising &= ADVERTISED_MASK;
+ advertising &= lc->supported;
+ if (!advertising)
+ return -EINVAL;
+ lc->requested_speed = SPEED_INVALID;
+ lc->requested_duplex = DUPLEX_INVALID;
+ lc->advertising = advertising | ADVERTISED_Autoneg;
+ }
+ lc->autoneg = cmd->base.autoneg;
+ if (netif_running(dev))
+ t3_link_start(&p->phy, &p->mac, lc);
+ return 0;
+}
+
+static void get_pauseparam(struct net_device *dev,
+ struct ethtool_pauseparam *epause)
+{
+ struct port_info *p = netdev_priv(dev);
+
+ epause->autoneg = (p->link_config.requested_fc & PAUSE_AUTONEG) != 0;
+ epause->rx_pause = (p->link_config.fc & PAUSE_RX) != 0;
+ epause->tx_pause = (p->link_config.fc & PAUSE_TX) != 0;
+}
+
+static int set_pauseparam(struct net_device *dev,
+ struct ethtool_pauseparam *epause)
+{
+ struct port_info *p = netdev_priv(dev);
+ struct link_config *lc = &p->link_config;
+
+ if (epause->autoneg == AUTONEG_DISABLE)
+ lc->requested_fc = 0;
+ else if (lc->supported & SUPPORTED_Autoneg)
+ lc->requested_fc = PAUSE_AUTONEG;
+ else
+ return -EINVAL;
+
+ if (epause->rx_pause)
+ lc->requested_fc |= PAUSE_RX;
+ if (epause->tx_pause)
+ lc->requested_fc |= PAUSE_TX;
+ if (lc->autoneg == AUTONEG_ENABLE) {
+ if (netif_running(dev))
+ t3_link_start(&p->phy, &p->mac, lc);
+ } else {
+ lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
+ if (netif_running(dev))
+ t3_mac_set_speed_duplex_fc(&p->mac, -1, -1, lc->fc);
+ }
+ return 0;
+}
+
+static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e,
+ struct kernel_ethtool_ringparam *kernel_e,
+ struct netlink_ext_ack *extack)
+{
+ struct port_info *pi = netdev_priv(dev);
+ struct adapter *adapter = pi->adapter;
+ const struct qset_params *q = &adapter->params.sge.qset[pi->first_qset];
+
+ e->rx_max_pending = MAX_RX_BUFFERS;
+ e->rx_jumbo_max_pending = MAX_RX_JUMBO_BUFFERS;
+ e->tx_max_pending = MAX_TXQ_ENTRIES;
+
+ e->rx_pending = q->fl_size;
+ e->rx_mini_pending = q->rspq_size;
+ e->rx_jumbo_pending = q->jumbo_size;
+ e->tx_pending = q->txq_size[0];
+}
+
+static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e,
+ struct kernel_ethtool_ringparam *kernel_e,
+ struct netlink_ext_ack *extack)
+{
+ struct port_info *pi = netdev_priv(dev);
+ struct adapter *adapter = pi->adapter;
+ struct qset_params *q;
+ int i;
+
+ if (e->rx_pending > MAX_RX_BUFFERS ||
+ e->rx_jumbo_pending > MAX_RX_JUMBO_BUFFERS ||
+ e->tx_pending > MAX_TXQ_ENTRIES ||
+ e->rx_mini_pending > MAX_RSPQ_ENTRIES ||
+ e->rx_mini_pending < MIN_RSPQ_ENTRIES ||
+ e->rx_pending < MIN_FL_ENTRIES ||
+ e->rx_jumbo_pending < MIN_FL_ENTRIES ||
+ e->tx_pending < adapter->params.nports * MIN_TXQ_ENTRIES)
+ return -EINVAL;
+
+ if (adapter->flags & FULL_INIT_DONE)
+ return -EBUSY;
+
+ q = &adapter->params.sge.qset[pi->first_qset];
+ for (i = 0; i < pi->nqsets; ++i, ++q) {
+ q->rspq_size = e->rx_mini_pending;
+ q->fl_size = e->rx_pending;
+ q->jumbo_size = e->rx_jumbo_pending;
+ q->txq_size[0] = e->tx_pending;
+ q->txq_size[1] = e->tx_pending;
+ q->txq_size[2] = e->tx_pending;
+ }
+ return 0;
+}
+
+static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c,
+ struct kernel_ethtool_coalesce *kernel_coal,
+ struct netlink_ext_ack *extack)
+{
+ struct port_info *pi = netdev_priv(dev);
+ struct adapter *adapter = pi->adapter;
+ struct qset_params *qsp;
+ struct sge_qset *qs;
+ int i;
+
+ if (c->rx_coalesce_usecs * 10 > M_NEWTIMER)
+ return -EINVAL;
+
+ for (i = 0; i < pi->nqsets; i++) {
+ qsp = &adapter->params.sge.qset[i];
+ qs = &adapter->sge.qs[i];
+ qsp->coalesce_usecs = c->rx_coalesce_usecs;
+ t3_update_qset_coalesce(qs, qsp);
+ }
+
+ return 0;
+}
+
+static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c,
+ struct kernel_ethtool_coalesce *kernel_coal,
+ struct netlink_ext_ack *extack)
+{
+ struct port_info *pi = netdev_priv(dev);
+ struct adapter *adapter = pi->adapter;
+ struct qset_params *q = adapter->params.sge.qset;
+
+ c->rx_coalesce_usecs = q->coalesce_usecs;
+ return 0;
+}
+
+static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
+ u8 * data)
+{
+ struct port_info *pi = netdev_priv(dev);
+ struct adapter *adapter = pi->adapter;
+ int cnt;
+
+ e->magic = EEPROM_MAGIC;
+ cnt = pci_read_vpd(adapter->pdev, e->offset, e->len, data);
+ if (cnt < 0)
+ return cnt;
+
+ e->len = cnt;
+
+ return 0;
+}
+
+static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
+ u8 * data)
+{
+ struct port_info *pi = netdev_priv(dev);
+ struct adapter *adapter = pi->adapter;
+ u32 aligned_offset, aligned_len;
+ u8 *buf;
+ int err;
+
+ if (eeprom->magic != EEPROM_MAGIC)
+ return -EINVAL;
+
+ aligned_offset = eeprom->offset & ~3;
+ aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
+
+ if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
+ buf = kmalloc(aligned_len, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+ err = pci_read_vpd(adapter->pdev, aligned_offset, aligned_len,
+ buf);
+ if (err < 0)
+ goto out;
+ memcpy(buf + (eeprom->offset & 3), data, eeprom->len);
+ } else
+ buf = data;
+
+ err = t3_seeprom_wp(adapter, 0);
+ if (err)
+ goto out;
+
+ err = pci_write_vpd(adapter->pdev, aligned_offset, aligned_len, buf);
+ if (err >= 0)
+ err = t3_seeprom_wp(adapter, 1);
+out:
+ if (buf != data)
+ kfree(buf);
+ return err < 0 ? err : 0;
+}
+
+static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+ wol->supported = 0;
+ wol->wolopts = 0;
+ memset(&wol->sopass, 0, sizeof(wol->sopass));
+}
+
+static const struct ethtool_ops cxgb_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS,
+ .get_drvinfo = get_drvinfo,
+ .get_msglevel = get_msglevel,
+ .set_msglevel = set_msglevel,
+ .get_ringparam = get_sge_param,
+ .set_ringparam = set_sge_param,
+ .get_coalesce = get_coalesce,
+ .set_coalesce = set_coalesce,
+ .get_eeprom_len = get_eeprom_len,
+ .get_eeprom = get_eeprom,
+ .set_eeprom = set_eeprom,
+ .get_pauseparam = get_pauseparam,
+ .set_pauseparam = set_pauseparam,
+ .get_link = ethtool_op_get_link,
+ .get_strings = get_strings,
+ .set_phys_id = set_phys_id,
+ .nway_reset = restart_autoneg,
+ .get_sset_count = get_sset_count,
+ .get_ethtool_stats = get_stats,
+ .get_regs_len = get_regs_len,
+ .get_regs = get_regs,
+ .get_wol = get_wol,
+ .get_link_ksettings = get_link_ksettings,
+ .set_link_ksettings = set_link_ksettings,
+};
+
+static int cxgb_in_range(int val, int lo, int hi)
+{
+ return val < 0 || (val <= hi && val >= lo);
+}
+
+static int cxgb_siocdevprivate(struct net_device *dev,
+ struct ifreq *ifreq,
+ void __user *useraddr,
+ int cmd)
+{
+ struct port_info *pi = netdev_priv(dev);
+ struct adapter *adapter = pi->adapter;
+ int ret;
+
+ if (cmd != SIOCCHIOCTL)
+ return -EOPNOTSUPP;
+
+ if (copy_from_user(&cmd, useraddr, sizeof(cmd)))
+ return -EFAULT;
+
+ switch (cmd) {
+ case CHELSIO_SET_QSET_PARAMS:{
+ int i;
+ struct qset_params *q;
+ struct ch_qset_params t;
+ int q1 = pi->first_qset;
+ int nqsets = pi->nqsets;
+
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+ if (copy_from_user(&t, useraddr, sizeof(t)))
+ return -EFAULT;
+ if (t.cmd != CHELSIO_SET_QSET_PARAMS)
+ return -EINVAL;
+ if (t.qset_idx >= SGE_QSETS)
+ return -EINVAL;
+ if (!cxgb_in_range(t.intr_lat, 0, M_NEWTIMER) ||
+ !cxgb_in_range(t.cong_thres, 0, 255) ||
+ !cxgb_in_range(t.txq_size[0], MIN_TXQ_ENTRIES,
+ MAX_TXQ_ENTRIES) ||
+ !cxgb_in_range(t.txq_size[1], MIN_TXQ_ENTRIES,
+ MAX_TXQ_ENTRIES) ||
+ !cxgb_in_range(t.txq_size[2], MIN_CTRL_TXQ_ENTRIES,
+ MAX_CTRL_TXQ_ENTRIES) ||
+ !cxgb_in_range(t.fl_size[0], MIN_FL_ENTRIES,
+ MAX_RX_BUFFERS) ||
+ !cxgb_in_range(t.fl_size[1], MIN_FL_ENTRIES,
+ MAX_RX_JUMBO_BUFFERS) ||
+ !cxgb_in_range(t.rspq_size, MIN_RSPQ_ENTRIES,
+ MAX_RSPQ_ENTRIES))
+ return -EINVAL;
+
+ if ((adapter->flags & FULL_INIT_DONE) &&
+ (t.rspq_size >= 0 || t.fl_size[0] >= 0 ||
+ t.fl_size[1] >= 0 || t.txq_size[0] >= 0 ||
+ t.txq_size[1] >= 0 || t.txq_size[2] >= 0 ||
+ t.polling >= 0 || t.cong_thres >= 0))
+ return -EBUSY;
+
+ /* Allow setting of any available qset when offload enabled */
+ if (test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
+ q1 = 0;
+ for_each_port(adapter, i) {
+ pi = adap2pinfo(adapter, i);
+ nqsets += pi->first_qset + pi->nqsets;
+ }
+ }
+
+ if (t.qset_idx < q1)
+ return -EINVAL;
+ if (t.qset_idx > q1 + nqsets - 1)
+ return -EINVAL;
+
+ q = &adapter->params.sge.qset[t.qset_idx];
+
+ if (t.rspq_size >= 0)
+ q->rspq_size = t.rspq_size;
+ if (t.fl_size[0] >= 0)
+ q->fl_size = t.fl_size[0];
+ if (t.fl_size[1] >= 0)
+ q->jumbo_size = t.fl_size[1];
+ if (t.txq_size[0] >= 0)
+ q->txq_size[0] = t.txq_size[0];
+ if (t.txq_size[1] >= 0)
+ q->txq_size[1] = t.txq_size[1];
+ if (t.txq_size[2] >= 0)
+ q->txq_size[2] = t.txq_size[2];
+ if (t.cong_thres >= 0)
+ q->cong_thres = t.cong_thres;
+ if (t.intr_lat >= 0) {
+ struct sge_qset *qs =
+ &adapter->sge.qs[t.qset_idx];
+
+ q->coalesce_usecs = t.intr_lat;
+ t3_update_qset_coalesce(qs, q);
+ }
+ if (t.polling >= 0) {
+ if (adapter->flags & USING_MSIX)
+ q->polling = t.polling;
+ else {
+ /* No polling with INTx for T3A */
+ if (adapter->params.rev == 0 &&
+ !(adapter->flags & USING_MSI))
+ t.polling = 0;
+
+ for (i = 0; i < SGE_QSETS; i++) {
+ q = &adapter->params.sge.
+ qset[i];
+ q->polling = t.polling;
+ }
+ }
+ }
+
+ if (t.lro >= 0) {
+ if (t.lro)
+ dev->wanted_features |= NETIF_F_GRO;
+ else
+ dev->wanted_features &= ~NETIF_F_GRO;
+ netdev_update_features(dev);
+ }
+
+ break;
+ }
+ case CHELSIO_GET_QSET_PARAMS:{
+ struct qset_params *q;
+ struct ch_qset_params t;
+ int q1 = pi->first_qset;
+ int nqsets = pi->nqsets;
+ int i;
+
+ if (copy_from_user(&t, useraddr, sizeof(t)))
+ return -EFAULT;
+
+ if (t.cmd != CHELSIO_GET_QSET_PARAMS)
+ return -EINVAL;
+
+ /* Display qsets for all ports when offload enabled */
+ if (test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
+ q1 = 0;
+ for_each_port(adapter, i) {
+ pi = adap2pinfo(adapter, i);
+ nqsets = pi->first_qset + pi->nqsets;
+ }
+ }
+
+ if (t.qset_idx >= nqsets)
+ return -EINVAL;
+ t.qset_idx = array_index_nospec(t.qset_idx, nqsets);
+
+ q = &adapter->params.sge.qset[q1 + t.qset_idx];
+ t.rspq_size = q->rspq_size;
+ t.txq_size[0] = q->txq_size[0];
+ t.txq_size[1] = q->txq_size[1];
+ t.txq_size[2] = q->txq_size[2];
+ t.fl_size[0] = q->fl_size;
+ t.fl_size[1] = q->jumbo_size;
+ t.polling = q->polling;
+ t.lro = !!(dev->features & NETIF_F_GRO);
+ t.intr_lat = q->coalesce_usecs;
+ t.cong_thres = q->cong_thres;
+ t.qnum = q1;
+
+ if (adapter->flags & USING_MSIX)
+ t.vector = adapter->msix_info[q1 + t.qset_idx + 1].vec;
+ else
+ t.vector = adapter->pdev->irq;
+
+ if (copy_to_user(useraddr, &t, sizeof(t)))
+ return -EFAULT;
+ break;
+ }
+ case CHELSIO_SET_QSET_NUM:{
+ struct ch_reg edata;
+ unsigned int i, first_qset = 0, other_qsets = 0;
+
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+ if (adapter->flags & FULL_INIT_DONE)
+ return -EBUSY;
+ if (copy_from_user(&edata, useraddr, sizeof(edata)))
+ return -EFAULT;
+ if (edata.cmd != CHELSIO_SET_QSET_NUM)
+ return -EINVAL;
+ if (edata.val < 1 ||
+ (edata.val > 1 && !(adapter->flags & USING_MSIX)))
+ return -EINVAL;
+
+ for_each_port(adapter, i)
+ if (adapter->port[i] && adapter->port[i] != dev)
+ other_qsets += adap2pinfo(adapter, i)->nqsets;
+
+ if (edata.val + other_qsets > SGE_QSETS)
+ return -EINVAL;
+
+ pi->nqsets = edata.val;
+
+ for_each_port(adapter, i)
+ if (adapter->port[i]) {
+ pi = adap2pinfo(adapter, i);
+ pi->first_qset = first_qset;
+ first_qset += pi->nqsets;
+ }
+ break;
+ }
+ case CHELSIO_GET_QSET_NUM:{
+ struct ch_reg edata;
+
+ memset(&edata, 0, sizeof(struct ch_reg));
+
+ edata.cmd = CHELSIO_GET_QSET_NUM;
+ edata.val = pi->nqsets;
+ if (copy_to_user(useraddr, &edata, sizeof(edata)))
+ return -EFAULT;
+ break;
+ }
+ case CHELSIO_LOAD_FW:{
+ u8 *fw_data;
+ struct ch_mem_range t;
+
+ if (!capable(CAP_SYS_RAWIO))
+ return -EPERM;
+ if (copy_from_user(&t, useraddr, sizeof(t)))
+ return -EFAULT;
+ if (t.cmd != CHELSIO_LOAD_FW)
+ return -EINVAL;
+ /* Check t.len sanity ? */
+ fw_data = memdup_user(useraddr + sizeof(t), t.len);
+ if (IS_ERR(fw_data))
+ return PTR_ERR(fw_data);
+
+ ret = t3_load_fw(adapter, fw_data, t.len);
+ kfree(fw_data);
+ if (ret)
+ return ret;
+ break;
+ }
+ case CHELSIO_SETMTUTAB:{
+ struct ch_mtus m;
+ int i;
+
+ if (!is_offload(adapter))
+ return -EOPNOTSUPP;
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+ if (offload_running(adapter))
+ return -EBUSY;
+ if (copy_from_user(&m, useraddr, sizeof(m)))
+ return -EFAULT;
+ if (m.cmd != CHELSIO_SETMTUTAB)
+ return -EINVAL;
+ if (m.nmtus != NMTUS)
+ return -EINVAL;
+ if (m.mtus[0] < 81) /* accommodate SACK */
+ return -EINVAL;
+
+ /* MTUs must be in ascending order */
+ for (i = 1; i < NMTUS; ++i)
+ if (m.mtus[i] < m.mtus[i - 1])
+ return -EINVAL;
+
+ memcpy(adapter->params.mtus, m.mtus,
+ sizeof(adapter->params.mtus));
+ break;
+ }
+ case CHELSIO_GET_PM:{
+ struct tp_params *p = &adapter->params.tp;
+ struct ch_pm m = {.cmd = CHELSIO_GET_PM };
+
+ if (!is_offload(adapter))
+ return -EOPNOTSUPP;
+ m.tx_pg_sz = p->tx_pg_size;
+ m.tx_num_pg = p->tx_num_pgs;
+ m.rx_pg_sz = p->rx_pg_size;
+ m.rx_num_pg = p->rx_num_pgs;
+ m.pm_total = p->pmtx_size + p->chan_rx_size * p->nchan;
+ if (copy_to_user(useraddr, &m, sizeof(m)))
+ return -EFAULT;
+ break;
+ }
+ case CHELSIO_SET_PM:{
+ struct ch_pm m;
+ struct tp_params *p = &adapter->params.tp;
+
+ if (!is_offload(adapter))
+ return -EOPNOTSUPP;
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+ if (adapter->flags & FULL_INIT_DONE)
+ return -EBUSY;
+ if (copy_from_user(&m, useraddr, sizeof(m)))
+ return -EFAULT;
+ if (m.cmd != CHELSIO_SET_PM)
+ return -EINVAL;
+ if (!is_power_of_2(m.rx_pg_sz) ||
+ !is_power_of_2(m.tx_pg_sz))
+ return -EINVAL; /* not power of 2 */
+ if (!(m.rx_pg_sz & 0x14000))
+ return -EINVAL; /* not 16KB or 64KB */
+ if (!(m.tx_pg_sz & 0x1554000))
+ return -EINVAL;
+ if (m.tx_num_pg == -1)
+ m.tx_num_pg = p->tx_num_pgs;
+ if (m.rx_num_pg == -1)
+ m.rx_num_pg = p->rx_num_pgs;
+ if (m.tx_num_pg % 24 || m.rx_num_pg % 24)
+ return -EINVAL;
+ if (m.rx_num_pg * m.rx_pg_sz > p->chan_rx_size ||
+ m.tx_num_pg * m.tx_pg_sz > p->chan_tx_size)
+ return -EINVAL;
+ p->rx_pg_size = m.rx_pg_sz;
+ p->tx_pg_size = m.tx_pg_sz;
+ p->rx_num_pgs = m.rx_num_pg;
+ p->tx_num_pgs = m.tx_num_pg;
+ break;
+ }
+ case CHELSIO_GET_MEM:{
+ struct ch_mem_range t;
+ struct mc7 *mem;
+ u64 buf[32];
+
+ if (!is_offload(adapter))
+ return -EOPNOTSUPP;
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+ if (!(adapter->flags & FULL_INIT_DONE))
+ return -EIO; /* need the memory controllers */
+ if (copy_from_user(&t, useraddr, sizeof(t)))
+ return -EFAULT;
+ if (t.cmd != CHELSIO_GET_MEM)
+ return -EINVAL;
+ if ((t.addr & 7) || (t.len & 7))
+ return -EINVAL;
+ if (t.mem_id == MEM_CM)
+ mem = &adapter->cm;
+ else if (t.mem_id == MEM_PMRX)
+ mem = &adapter->pmrx;
+ else if (t.mem_id == MEM_PMTX)
+ mem = &adapter->pmtx;
+ else
+ return -EINVAL;
+
+ /*
+ * Version scheme:
+ * bits 0..9: chip version
+ * bits 10..15: chip revision
+ */
+ t.version = 3 | (adapter->params.rev << 10);
+ if (copy_to_user(useraddr, &t, sizeof(t)))
+ return -EFAULT;
+
+ /*
+ * Read 256 bytes at a time as len can be large and we don't
+ * want to use huge intermediate buffers.
+ */
+ useraddr += sizeof(t); /* advance to start of buffer */
+ while (t.len) {
+ unsigned int chunk =
+ min_t(unsigned int, t.len, sizeof(buf));
+
+ ret =
+ t3_mc7_bd_read(mem, t.addr / 8, chunk / 8,
+ buf);
+ if (ret)
+ return ret;
+ if (copy_to_user(useraddr, buf, chunk))
+ return -EFAULT;
+ useraddr += chunk;
+ t.addr += chunk;
+ t.len -= chunk;
+ }
+ break;
+ }
+ case CHELSIO_SET_TRACE_FILTER:{
+ struct ch_trace t;
+ const struct trace_params *tp;
+
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+ if (!offload_running(adapter))
+ return -EAGAIN;
+ if (copy_from_user(&t, useraddr, sizeof(t)))
+ return -EFAULT;
+ if (t.cmd != CHELSIO_SET_TRACE_FILTER)
+ return -EINVAL;
+
+ tp = (const struct trace_params *)&t.sip;
+ if (t.config_tx)
+ t3_config_trace_filter(adapter, tp, 0,
+ t.invert_match,
+ t.trace_tx);
+ if (t.config_rx)
+ t3_config_trace_filter(adapter, tp, 1,
+ t.invert_match,
+ t.trace_rx);
+ break;
+ }
+ default:
+ return -EOPNOTSUPP;
+ }
+ return 0;
+}
+
+static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
+{
+ struct mii_ioctl_data *data = if_mii(req);
+ struct port_info *pi = netdev_priv(dev);
+ struct adapter *adapter = pi->adapter;
+
+ switch (cmd) {
+ case SIOCGMIIREG:
+ case SIOCSMIIREG:
+ /* Convert phy_id from older PRTAD/DEVAD format */
+ if (is_10G(adapter) &&
+ !mdio_phy_id_is_c45(data->phy_id) &&
+ (data->phy_id & 0x1f00) &&
+ !(data->phy_id & 0xe0e0))
+ data->phy_id = mdio_phy_id_c45(data->phy_id >> 8,
+ data->phy_id & 0x1f);
+ fallthrough;
+ case SIOCGMIIPHY:
+ return mdio_mii_ioctl(&pi->phy.mdio, data, cmd);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
+{
+ struct port_info *pi = netdev_priv(dev);
+ struct adapter *adapter = pi->adapter;
+ int ret;
+
+ if ((ret = t3_mac_set_mtu(&pi->mac, new_mtu)))
+ return ret;
+ dev->mtu = new_mtu;
+ init_port_mtus(adapter);
+ if (adapter->params.rev == 0 && offload_running(adapter))
+ t3_load_mtus(adapter, adapter->params.mtus,
+ adapter->params.a_wnd, adapter->params.b_wnd,
+ adapter->port[0]->mtu);
+ return 0;
+}
+
+static int cxgb_set_mac_addr(struct net_device *dev, void *p)
+{
+ struct port_info *pi = netdev_priv(dev);
+ struct adapter *adapter = pi->adapter;
+ struct sockaddr *addr = p;
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ eth_hw_addr_set(dev, addr->sa_data);
+ t3_mac_set_address(&pi->mac, LAN_MAC_IDX, dev->dev_addr);
+ if (offload_running(adapter))
+ write_smt_entry(adapter, pi->port_id);
+ return 0;
+}
+
+static netdev_features_t cxgb_fix_features(struct net_device *dev,
+ netdev_features_t features)
+{
+ /*
+ * Since there is no support for separate rx/tx vlan accel
+ * enable/disable make sure tx flag is always in same state as rx.
+ */
+ if (features & NETIF_F_HW_VLAN_CTAG_RX)
+ features |= NETIF_F_HW_VLAN_CTAG_TX;
+ else
+ features &= ~NETIF_F_HW_VLAN_CTAG_TX;
+
+ return features;
+}
+
+static int cxgb_set_features(struct net_device *dev, netdev_features_t features)
+{
+ netdev_features_t changed = dev->features ^ features;
+
+ if (changed & NETIF_F_HW_VLAN_CTAG_RX)
+ cxgb_vlan_mode(dev, features);
+
+ return 0;
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void cxgb_netpoll(struct net_device *dev)
+{
+ struct port_info *pi = netdev_priv(dev);
+ struct adapter *adapter = pi->adapter;
+ int qidx;
+
+ for (qidx = pi->first_qset; qidx < pi->first_qset + pi->nqsets; qidx++) {
+ struct sge_qset *qs = &adapter->sge.qs[qidx];
+ void *source;
+
+ if (adapter->flags & USING_MSIX)
+ source = qs;
+ else
+ source = adapter;
+
+ t3_intr_handler(adapter, qs->rspq.polling) (0, source);
+ }
+}
+#endif
+
+/*
+ * Periodic accumulation of MAC statistics.
+ */
+static void mac_stats_update(struct adapter *adapter)
+{
+ int i;
+
+ for_each_port(adapter, i) {
+ struct net_device *dev = adapter->port[i];
+ struct port_info *p = netdev_priv(dev);
+
+ if (netif_running(dev)) {
+ spin_lock(&adapter->stats_lock);
+ t3_mac_update_stats(&p->mac);
+ spin_unlock(&adapter->stats_lock);
+ }
+ }
+}
+
+static void check_link_status(struct adapter *adapter)
+{
+ int i;
+
+ for_each_port(adapter, i) {
+ struct net_device *dev = adapter->port[i];
+ struct port_info *p = netdev_priv(dev);
+ int link_fault;
+
+ spin_lock_irq(&adapter->work_lock);
+ link_fault = p->link_fault;
+ spin_unlock_irq(&adapter->work_lock);
+
+ if (link_fault) {
+ t3_link_fault(adapter, i);
+ continue;
+ }
+
+ if (!(p->phy.caps & SUPPORTED_IRQ) && netif_running(dev)) {
+ t3_xgm_intr_disable(adapter, i);
+ t3_read_reg(adapter, A_XGM_INT_STATUS + p->mac.offset);
+
+ t3_link_changed(adapter, i);
+ t3_xgm_intr_enable(adapter, i);
+ }
+ }
+}
+
+static void check_t3b2_mac(struct adapter *adapter)
+{
+ int i;
+
+ if (!rtnl_trylock()) /* synchronize with ifdown */
+ return;
+
+ for_each_port(adapter, i) {
+ struct net_device *dev = adapter->port[i];
+ struct port_info *p = netdev_priv(dev);
+ int status;
+
+ if (!netif_running(dev))
+ continue;
+
+ status = 0;
+ if (netif_running(dev) && netif_carrier_ok(dev))
+ status = t3b2_mac_watchdog_task(&p->mac);
+ if (status == 1)
+ p->mac.stats.num_toggled++;
+ else if (status == 2) {
+ struct cmac *mac = &p->mac;
+
+ t3_mac_set_mtu(mac, dev->mtu);
+ t3_mac_set_address(mac, LAN_MAC_IDX, dev->dev_addr);
+ cxgb_set_rxmode(dev);
+ t3_link_start(&p->phy, mac, &p->link_config);
+ t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
+ t3_port_intr_enable(adapter, p->port_id);
+ p->mac.stats.num_resets++;
+ }
+ }
+ rtnl_unlock();
+}
+
+
+static void t3_adap_check_task(struct work_struct *work)
+{
+ struct adapter *adapter = container_of(work, struct adapter,
+ adap_check_task.work);
+ const struct adapter_params *p = &adapter->params;
+ int port;
+ unsigned int v, status, reset;
+
+ adapter->check_task_cnt++;
+
+ check_link_status(adapter);
+
+ /* Accumulate MAC stats if needed */
+ if (!p->linkpoll_period ||
+ (adapter->check_task_cnt * p->linkpoll_period) / 10 >=
+ p->stats_update_period) {
+ mac_stats_update(adapter);
+ adapter->check_task_cnt = 0;
+ }
+
+ if (p->rev == T3_REV_B2)
+ check_t3b2_mac(adapter);
+
+ /*
+ * Scan the XGMAC's to check for various conditions which we want to
+ * monitor in a periodic polling manner rather than via an interrupt
+ * condition. This is used for conditions which would otherwise flood
+ * the system with interrupts and we only really need to know that the
+ * conditions are "happening" ... For each condition we count the
+ * detection of the condition and reset it for the next polling loop.
+ */
+ for_each_port(adapter, port) {
+ struct cmac *mac = &adap2pinfo(adapter, port)->mac;
+ u32 cause;
+
+ cause = t3_read_reg(adapter, A_XGM_INT_CAUSE + mac->offset);
+ reset = 0;
+ if (cause & F_RXFIFO_OVERFLOW) {
+ mac->stats.rx_fifo_ovfl++;
+ reset |= F_RXFIFO_OVERFLOW;
+ }
+
+ t3_write_reg(adapter, A_XGM_INT_CAUSE + mac->offset, reset);
+ }
+
+ /*
+ * We do the same as above for FL_EMPTY interrupts.
+ */
+ status = t3_read_reg(adapter, A_SG_INT_CAUSE);
+ reset = 0;
+
+ if (status & F_FLEMPTY) {
+ struct sge_qset *qs = &adapter->sge.qs[0];
+ int i = 0;
+
+ reset |= F_FLEMPTY;
+
+ v = (t3_read_reg(adapter, A_SG_RSPQ_FL_STATUS) >> S_FL0EMPTY) &
+ 0xffff;
+
+ while (v) {
+ qs->fl[i].empty += (v & 1);
+ if (i)
+ qs++;
+ i ^= 1;
+ v >>= 1;
+ }
+ }
+
+ t3_write_reg(adapter, A_SG_INT_CAUSE, reset);
+
+ /* Schedule the next check update if any port is active. */
+ spin_lock_irq(&adapter->work_lock);
+ if (adapter->open_device_map & PORT_MASK)
+ schedule_chk_task(adapter);
+ spin_unlock_irq(&adapter->work_lock);
+}
+
+static void db_full_task(struct work_struct *work)
+{
+ struct adapter *adapter = container_of(work, struct adapter,
+ db_full_task);
+
+ cxgb3_event_notify(&adapter->tdev, OFFLOAD_DB_FULL, 0);
+}
+
+static void db_empty_task(struct work_struct *work)
+{
+ struct adapter *adapter = container_of(work, struct adapter,
+ db_empty_task);
+
+ cxgb3_event_notify(&adapter->tdev, OFFLOAD_DB_EMPTY, 0);
+}
+
+static void db_drop_task(struct work_struct *work)
+{
+ struct adapter *adapter = container_of(work, struct adapter,
+ db_drop_task);
+ unsigned long delay = 1000;
+ unsigned short r;
+
+ cxgb3_event_notify(&adapter->tdev, OFFLOAD_DB_DROP, 0);
+
+ /*
+ * Sleep a while before ringing the driver qset dbs.
+ * The delay is between 1000-2023 usecs.
+ */
+ get_random_bytes(&r, 2);
+ delay += r & 1023;
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(usecs_to_jiffies(delay));
+ ring_dbs(adapter);
+}
+
+/*
+ * Processes external (PHY) interrupts in process context.
+ */
+static void ext_intr_task(struct work_struct *work)
+{
+ struct adapter *adapter = container_of(work, struct adapter,
+ ext_intr_handler_task);
+ int i;
+
+ /* Disable link fault interrupts */
+ for_each_port(adapter, i) {
+ struct net_device *dev = adapter->port[i];
+ struct port_info *p = netdev_priv(dev);
+
+ t3_xgm_intr_disable(adapter, i);
+ t3_read_reg(adapter, A_XGM_INT_STATUS + p->mac.offset);
+ }
+
+ /* Re-enable link fault interrupts */
+ t3_phy_intr_handler(adapter);
+
+ for_each_port(adapter, i)
+ t3_xgm_intr_enable(adapter, i);
+
+ /* Now reenable external interrupts */
+ spin_lock_irq(&adapter->work_lock);
+ if (adapter->slow_intr_mask) {
+ adapter->slow_intr_mask |= F_T3DBG;
+ t3_write_reg(adapter, A_PL_INT_CAUSE0, F_T3DBG);
+ t3_write_reg(adapter, A_PL_INT_ENABLE0,
+ adapter->slow_intr_mask);
+ }
+ spin_unlock_irq(&adapter->work_lock);
+}
+
+/*
+ * Interrupt-context handler for external (PHY) interrupts.
+ */
+void t3_os_ext_intr_handler(struct adapter *adapter)
+{
+ /*
+ * Schedule a task to handle external interrupts as they may be slow
+ * and we use a mutex to protect MDIO registers. We disable PHY
+ * interrupts in the meantime and let the task reenable them when
+ * it's done.
+ */
+ spin_lock(&adapter->work_lock);
+ if (adapter->slow_intr_mask) {
+ adapter->slow_intr_mask &= ~F_T3DBG;
+ t3_write_reg(adapter, A_PL_INT_ENABLE0,
+ adapter->slow_intr_mask);
+ queue_work(cxgb3_wq, &adapter->ext_intr_handler_task);
+ }
+ spin_unlock(&adapter->work_lock);
+}
+
+void t3_os_link_fault_handler(struct adapter *adapter, int port_id)
+{
+ struct net_device *netdev = adapter->port[port_id];
+ struct port_info *pi = netdev_priv(netdev);
+
+ spin_lock(&adapter->work_lock);
+ pi->link_fault = 1;
+ spin_unlock(&adapter->work_lock);
+}
+
+static int t3_adapter_error(struct adapter *adapter, int reset, int on_wq)
+{
+ int i, ret = 0;
+
+ if (is_offload(adapter) &&
+ test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
+ cxgb3_event_notify(&adapter->tdev, OFFLOAD_STATUS_DOWN, 0);
+ offload_close(&adapter->tdev);
+ }
+
+ /* Stop all ports */
+ for_each_port(adapter, i) {
+ struct net_device *netdev = adapter->port[i];
+
+ if (netif_running(netdev))
+ __cxgb_close(netdev, on_wq);
+ }
+
+ /* Stop SGE timers */
+ t3_stop_sge_timers(adapter);
+
+ adapter->flags &= ~FULL_INIT_DONE;
+
+ if (reset)
+ ret = t3_reset_adapter(adapter);
+
+ pci_disable_device(adapter->pdev);
+
+ return ret;
+}
+
+static int t3_reenable_adapter(struct adapter *adapter)
+{
+ if (pci_enable_device(adapter->pdev)) {
+ dev_err(&adapter->pdev->dev,
+ "Cannot re-enable PCI device after reset.\n");
+ goto err;
+ }
+ pci_set_master(adapter->pdev);
+ pci_restore_state(adapter->pdev);
+ pci_save_state(adapter->pdev);
+
+ /* Free sge resources */
+ t3_free_sge_resources(adapter);
+
+ if (t3_replay_prep_adapter(adapter))
+ goto err;
+
+ return 0;
+err:
+ return -1;
+}
+
+static void t3_resume_ports(struct adapter *adapter)
+{
+ int i;
+
+ /* Restart the ports */
+ for_each_port(adapter, i) {
+ struct net_device *netdev = adapter->port[i];
+
+ if (netif_running(netdev)) {
+ if (cxgb_open(netdev)) {
+ dev_err(&adapter->pdev->dev,
+ "can't bring device back up"
+ " after reset\n");
+ continue;
+ }
+ }
+ }
+
+ if (is_offload(adapter) && !ofld_disable)
+ cxgb3_event_notify(&adapter->tdev, OFFLOAD_STATUS_UP, 0);
+}
+
+/*
+ * processes a fatal error.
+ * Bring the ports down, reset the chip, bring the ports back up.
+ */
+static void fatal_error_task(struct work_struct *work)
+{
+ struct adapter *adapter = container_of(work, struct adapter,
+ fatal_error_handler_task);
+ int err = 0;
+
+ rtnl_lock();
+ err = t3_adapter_error(adapter, 1, 1);
+ if (!err)
+ err = t3_reenable_adapter(adapter);
+ if (!err)
+ t3_resume_ports(adapter);
+
+ CH_ALERT(adapter, "adapter reset %s\n", err ? "failed" : "succeeded");
+ rtnl_unlock();
+}
+
+void t3_fatal_err(struct adapter *adapter)
+{
+ unsigned int fw_status[4];
+
+ if (adapter->flags & FULL_INIT_DONE) {
+ t3_sge_stop_dma(adapter);
+ t3_write_reg(adapter, A_XGM_TX_CTRL, 0);
+ t3_write_reg(adapter, A_XGM_RX_CTRL, 0);
+ t3_write_reg(adapter, XGM_REG(A_XGM_TX_CTRL, 1), 0);
+ t3_write_reg(adapter, XGM_REG(A_XGM_RX_CTRL, 1), 0);
+
+ spin_lock(&adapter->work_lock);
+ t3_intr_disable(adapter);
+ queue_work(cxgb3_wq, &adapter->fatal_error_handler_task);
+ spin_unlock(&adapter->work_lock);
+ }
+ CH_ALERT(adapter, "encountered fatal error, operation suspended\n");
+ if (!t3_cim_ctl_blk_read(adapter, 0xa0, 4, fw_status))
+ CH_ALERT(adapter, "FW status: 0x%x, 0x%x, 0x%x, 0x%x\n",
+ fw_status[0], fw_status[1],
+ fw_status[2], fw_status[3]);
+}
+
+/**
+ * t3_io_error_detected - called when PCI error is detected
+ * @pdev: Pointer to PCI device
+ * @state: The current pci connection state
+ *
+ * This function is called after a PCI bus error affecting
+ * this device has been detected.
+ */
+static pci_ers_result_t t3_io_error_detected(struct pci_dev *pdev,
+ pci_channel_state_t state)
+{
+ struct adapter *adapter = pci_get_drvdata(pdev);
+
+ if (state == pci_channel_io_perm_failure)
+ return PCI_ERS_RESULT_DISCONNECT;
+
+ t3_adapter_error(adapter, 0, 0);
+
+ /* Request a slot reset. */
+ return PCI_ERS_RESULT_NEED_RESET;
+}
+
+/**
+ * t3_io_slot_reset - called after the pci bus has been reset.
+ * @pdev: Pointer to PCI device
+ *
+ * Restart the card from scratch, as if from a cold-boot.
+ */
+static pci_ers_result_t t3_io_slot_reset(struct pci_dev *pdev)
+{
+ struct adapter *adapter = pci_get_drvdata(pdev);
+
+ if (!t3_reenable_adapter(adapter))
+ return PCI_ERS_RESULT_RECOVERED;
+
+ return PCI_ERS_RESULT_DISCONNECT;
+}
+
+/**
+ * t3_io_resume - called when traffic can start flowing again.
+ * @pdev: Pointer to PCI device
+ *
+ * This callback is called when the error recovery driver tells us that
+ * its OK to resume normal operation.
+ */
+static void t3_io_resume(struct pci_dev *pdev)
+{
+ struct adapter *adapter = pci_get_drvdata(pdev);
+
+ CH_ALERT(adapter, "adapter recovering, PEX ERR 0x%x\n",
+ t3_read_reg(adapter, A_PCIE_PEX_ERR));
+
+ rtnl_lock();
+ t3_resume_ports(adapter);
+ rtnl_unlock();
+}
+
+static const struct pci_error_handlers t3_err_handler = {
+ .error_detected = t3_io_error_detected,
+ .slot_reset = t3_io_slot_reset,
+ .resume = t3_io_resume,
+};
+
+/*
+ * Set the number of qsets based on the number of CPUs and the number of ports,
+ * not to exceed the number of available qsets, assuming there are enough qsets
+ * per port in HW.
+ */
+static void set_nqsets(struct adapter *adap)
+{
+ int i, j = 0;
+ int num_cpus = netif_get_num_default_rss_queues();
+ int hwports = adap->params.nports;
+ int nqsets = adap->msix_nvectors - 1;
+
+ if (adap->params.rev > 0 && adap->flags & USING_MSIX) {
+ if (hwports == 2 &&
+ (hwports * nqsets > SGE_QSETS ||
+ num_cpus >= nqsets / hwports))
+ nqsets /= hwports;
+ if (nqsets > num_cpus)
+ nqsets = num_cpus;
+ if (nqsets < 1 || hwports == 4)
+ nqsets = 1;
+ } else {
+ nqsets = 1;
+ }
+
+ for_each_port(adap, i) {
+ struct port_info *pi = adap2pinfo(adap, i);
+
+ pi->first_qset = j;
+ pi->nqsets = nqsets;
+ j = pi->first_qset + nqsets;
+
+ dev_info(&adap->pdev->dev,
+ "Port %d using %d queue sets.\n", i, nqsets);
+ }
+}
+
+static int cxgb_enable_msix(struct adapter *adap)
+{
+ struct msix_entry entries[SGE_QSETS + 1];
+ int vectors;
+ int i;
+
+ vectors = ARRAY_SIZE(entries);
+ for (i = 0; i < vectors; ++i)
+ entries[i].entry = i;
+
+ vectors = pci_enable_msix_range(adap->pdev, entries,
+ adap->params.nports + 1, vectors);
+ if (vectors < 0)
+ return vectors;
+
+ for (i = 0; i < vectors; ++i)
+ adap->msix_info[i].vec = entries[i].vector;
+ adap->msix_nvectors = vectors;
+
+ return 0;
+}
+
+static void print_port_info(struct adapter *adap, const struct adapter_info *ai)
+{
+ static const char *pci_variant[] = {
+ "PCI", "PCI-X", "PCI-X ECC", "PCI-X 266", "PCI Express"
+ };
+
+ int i;
+ char buf[80];
+
+ if (is_pcie(adap))
+ snprintf(buf, sizeof(buf), "%s x%d",
+ pci_variant[adap->params.pci.variant],
+ adap->params.pci.width);
+ else
+ snprintf(buf, sizeof(buf), "%s %dMHz/%d-bit",
+ pci_variant[adap->params.pci.variant],
+ adap->params.pci.speed, adap->params.pci.width);
+
+ for_each_port(adap, i) {
+ struct net_device *dev = adap->port[i];
+ const struct port_info *pi = netdev_priv(dev);
+
+ if (!test_bit(i, &adap->registered_device_map))
+ continue;
+ netdev_info(dev, "%s %s %sNIC (rev %d) %s%s\n",
+ ai->desc, pi->phy.desc,
+ is_offload(adap) ? "R" : "", adap->params.rev, buf,
+ (adap->flags & USING_MSIX) ? " MSI-X" :
+ (adap->flags & USING_MSI) ? " MSI" : "");
+ if (adap->name == dev->name && adap->params.vpd.mclk)
+ pr_info("%s: %uMB CM, %uMB PMTX, %uMB PMRX, S/N: %s\n",
+ adap->name, t3_mc7_size(&adap->cm) >> 20,
+ t3_mc7_size(&adap->pmtx) >> 20,
+ t3_mc7_size(&adap->pmrx) >> 20,
+ adap->params.vpd.sn);
+ }
+}
+
+static const struct net_device_ops cxgb_netdev_ops = {
+ .ndo_open = cxgb_open,
+ .ndo_stop = cxgb_close,
+ .ndo_start_xmit = t3_eth_xmit,
+ .ndo_get_stats = cxgb_get_stats,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_rx_mode = cxgb_set_rxmode,
+ .ndo_eth_ioctl = cxgb_ioctl,
+ .ndo_siocdevprivate = cxgb_siocdevprivate,
+ .ndo_change_mtu = cxgb_change_mtu,
+ .ndo_set_mac_address = cxgb_set_mac_addr,
+ .ndo_fix_features = cxgb_fix_features,
+ .ndo_set_features = cxgb_set_features,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = cxgb_netpoll,
+#endif
+};
+
+static void cxgb3_init_iscsi_mac(struct net_device *dev)
+{
+ struct port_info *pi = netdev_priv(dev);
+
+ memcpy(pi->iscsic.mac_addr, dev->dev_addr, ETH_ALEN);
+ pi->iscsic.mac_addr[3] |= 0x80;
+}
+
+#define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
+#define VLAN_FEAT (NETIF_F_SG | NETIF_F_IP_CSUM | TSO_FLAGS | \
+ NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA)
+static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ int i, err;
+ resource_size_t mmio_start, mmio_len;
+ const struct adapter_info *ai;
+ struct adapter *adapter = NULL;
+ struct port_info *pi;
+
+ if (!cxgb3_wq) {
+ cxgb3_wq = create_singlethread_workqueue(DRV_NAME);
+ if (!cxgb3_wq) {
+ pr_err("cannot initialize work queue\n");
+ return -ENOMEM;
+ }
+ }
+
+ err = pci_enable_device(pdev);
+ if (err) {
+ dev_err(&pdev->dev, "cannot enable PCI device\n");
+ goto out;
+ }
+
+ err = pci_request_regions(pdev, DRV_NAME);
+ if (err) {
+ /* Just info, some other driver may have claimed the device. */
+ dev_info(&pdev->dev, "cannot obtain PCI resources\n");
+ goto out_disable_device;
+ }
+
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (err) {
+ dev_err(&pdev->dev, "no usable DMA configuration\n");
+ goto out_release_regions;
+ }
+
+ pci_set_master(pdev);
+ pci_save_state(pdev);
+
+ mmio_start = pci_resource_start(pdev, 0);
+ mmio_len = pci_resource_len(pdev, 0);
+ ai = t3_get_adapter_info(ent->driver_data);
+
+ adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
+ if (!adapter) {
+ err = -ENOMEM;
+ goto out_release_regions;
+ }
+
+ adapter->nofail_skb =
+ alloc_skb(sizeof(struct cpl_set_tcb_field), GFP_KERNEL);
+ if (!adapter->nofail_skb) {
+ dev_err(&pdev->dev, "cannot allocate nofail buffer\n");
+ err = -ENOMEM;
+ goto out_free_adapter;
+ }
+
+ adapter->regs = ioremap(mmio_start, mmio_len);
+ if (!adapter->regs) {
+ dev_err(&pdev->dev, "cannot map device registers\n");
+ err = -ENOMEM;
+ goto out_free_adapter_nofail;
+ }
+
+ adapter->pdev = pdev;
+ adapter->name = pci_name(pdev);
+ adapter->msg_enable = dflt_msg_enable;
+ adapter->mmio_len = mmio_len;
+
+ mutex_init(&adapter->mdio_lock);
+ spin_lock_init(&adapter->work_lock);
+ spin_lock_init(&adapter->stats_lock);
+
+ INIT_LIST_HEAD(&adapter->adapter_list);
+ INIT_WORK(&adapter->ext_intr_handler_task, ext_intr_task);
+ INIT_WORK(&adapter->fatal_error_handler_task, fatal_error_task);
+
+ INIT_WORK(&adapter->db_full_task, db_full_task);
+ INIT_WORK(&adapter->db_empty_task, db_empty_task);
+ INIT_WORK(&adapter->db_drop_task, db_drop_task);
+
+ INIT_DELAYED_WORK(&adapter->adap_check_task, t3_adap_check_task);
+
+ for (i = 0; i < ai->nports0 + ai->nports1; ++i) {
+ struct net_device *netdev;
+
+ netdev = alloc_etherdev_mq(sizeof(struct port_info), SGE_QSETS);
+ if (!netdev) {
+ err = -ENOMEM;
+ goto out_free_dev;
+ }
+
+ SET_NETDEV_DEV(netdev, &pdev->dev);
+
+ adapter->port[i] = netdev;
+ pi = netdev_priv(netdev);
+ pi->adapter = adapter;
+ pi->port_id = i;
+ netif_carrier_off(netdev);
+ netdev->irq = pdev->irq;
+ netdev->mem_start = mmio_start;
+ netdev->mem_end = mmio_start + mmio_len - 1;
+ netdev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM |
+ NETIF_F_TSO | NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_RX;
+ netdev->features |= netdev->hw_features |
+ NETIF_F_HW_VLAN_CTAG_TX;
+ netdev->vlan_features |= netdev->features & VLAN_FEAT;
+
+ netdev->features |= NETIF_F_HIGHDMA;
+
+ netdev->netdev_ops = &cxgb_netdev_ops;
+ netdev->ethtool_ops = &cxgb_ethtool_ops;
+ netdev->min_mtu = 81;
+ netdev->max_mtu = ETH_MAX_MTU;
+ netdev->dev_port = pi->port_id;
+ }
+
+ pci_set_drvdata(pdev, adapter);
+ if (t3_prep_adapter(adapter, ai, 1) < 0) {
+ err = -ENODEV;
+ goto out_free_dev;
+ }
+
+ /*
+ * The card is now ready to go. If any errors occur during device
+ * registration we do not fail the whole card but rather proceed only
+ * with the ports we manage to register successfully. However we must
+ * register at least one net device.
+ */
+ for_each_port(adapter, i) {
+ err = register_netdev(adapter->port[i]);
+ if (err)
+ dev_warn(&pdev->dev,
+ "cannot register net device %s, skipping\n",
+ adapter->port[i]->name);
+ else {
+ /*
+ * Change the name we use for messages to the name of
+ * the first successfully registered interface.
+ */
+ if (!adapter->registered_device_map)
+ adapter->name = adapter->port[i]->name;
+
+ __set_bit(i, &adapter->registered_device_map);
+ }
+ }
+ if (!adapter->registered_device_map) {
+ dev_err(&pdev->dev, "could not register any net devices\n");
+ err = -ENODEV;
+ goto out_free_dev;
+ }
+
+ for_each_port(adapter, i)
+ cxgb3_init_iscsi_mac(adapter->port[i]);
+
+ /* Driver's ready. Reflect it on LEDs */
+ t3_led_ready(adapter);
+
+ if (is_offload(adapter)) {
+ __set_bit(OFFLOAD_DEVMAP_BIT, &adapter->registered_device_map);
+ cxgb3_adapter_ofld(adapter);
+ }
+
+ /* See what interrupts we'll be using */
+ if (msi > 1 && cxgb_enable_msix(adapter) == 0)
+ adapter->flags |= USING_MSIX;
+ else if (msi > 0 && pci_enable_msi(pdev) == 0)
+ adapter->flags |= USING_MSI;
+
+ set_nqsets(adapter);
+
+ err = sysfs_create_group(&adapter->port[0]->dev.kobj,
+ &cxgb3_attr_group);
+ if (err) {
+ dev_err(&pdev->dev, "cannot create sysfs group\n");
+ goto out_close_led;
+ }
+
+ print_port_info(adapter, ai);
+ return 0;
+
+out_close_led:
+ t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL, 0);
+
+out_free_dev:
+ iounmap(adapter->regs);
+ for (i = ai->nports0 + ai->nports1 - 1; i >= 0; --i)
+ if (adapter->port[i])
+ free_netdev(adapter->port[i]);
+
+out_free_adapter_nofail:
+ kfree_skb(adapter->nofail_skb);
+
+out_free_adapter:
+ kfree(adapter);
+
+out_release_regions:
+ pci_release_regions(pdev);
+out_disable_device:
+ pci_disable_device(pdev);
+out:
+ return err;
+}
+
+static void remove_one(struct pci_dev *pdev)
+{
+ struct adapter *adapter = pci_get_drvdata(pdev);
+
+ if (adapter) {
+ int i;
+
+ t3_sge_stop(adapter);
+ sysfs_remove_group(&adapter->port[0]->dev.kobj,
+ &cxgb3_attr_group);
+
+ if (is_offload(adapter)) {
+ cxgb3_adapter_unofld(adapter);
+ if (test_bit(OFFLOAD_DEVMAP_BIT,
+ &adapter->open_device_map))
+ offload_close(&adapter->tdev);
+ }
+
+ for_each_port(adapter, i)
+ if (test_bit(i, &adapter->registered_device_map))
+ unregister_netdev(adapter->port[i]);
+
+ t3_stop_sge_timers(adapter);
+ t3_free_sge_resources(adapter);
+ cxgb_disable_msi(adapter);
+
+ for_each_port(adapter, i)
+ if (adapter->port[i])
+ free_netdev(adapter->port[i]);
+
+ iounmap(adapter->regs);
+ kfree_skb(adapter->nofail_skb);
+ kfree(adapter);
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+ }
+}
+
+static struct pci_driver driver = {
+ .name = DRV_NAME,
+ .id_table = cxgb3_pci_tbl,
+ .probe = init_one,
+ .remove = remove_one,
+ .err_handler = &t3_err_handler,
+};
+
+static int __init cxgb3_init_module(void)
+{
+ int ret;
+
+ cxgb3_offload_init();
+
+ ret = pci_register_driver(&driver);
+ return ret;
+}
+
+static void __exit cxgb3_cleanup_module(void)
+{
+ pci_unregister_driver(&driver);
+ if (cxgb3_wq)
+ destroy_workqueue(cxgb3_wq);
+}
+
+module_init(cxgb3_init_module);
+module_exit(cxgb3_cleanup_module);
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c
new file mode 100644
index 0000000000..89256b8668
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c
@@ -0,0 +1,1402 @@
+/*
+ * Copyright (c) 2006-2008 Chelsio, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <net/neighbour.h>
+#include <linux/notifier.h>
+#include <linux/atomic.h>
+#include <linux/proc_fs.h>
+#include <linux/if_vlan.h>
+#include <net/netevent.h>
+#include <linux/highmem.h>
+#include <linux/vmalloc.h>
+#include <linux/export.h>
+
+#include "common.h"
+#include "regs.h"
+#include "cxgb3_ioctl.h"
+#include "cxgb3_ctl_defs.h"
+#include "cxgb3_defs.h"
+#include "l2t.h"
+#include "firmware_exports.h"
+#include "cxgb3_offload.h"
+
+static LIST_HEAD(client_list);
+static LIST_HEAD(ofld_dev_list);
+static DEFINE_MUTEX(cxgb3_db_lock);
+
+static DEFINE_RWLOCK(adapter_list_lock);
+static LIST_HEAD(adapter_list);
+
+static const unsigned int MAX_ATIDS = 64 * 1024;
+static const unsigned int ATID_BASE = 0x10000;
+
+static void cxgb_neigh_update(struct neighbour *neigh);
+static void cxgb_redirect(struct dst_entry *old, struct dst_entry *new,
+ struct neighbour *neigh, const void *daddr);
+
+static inline int offload_activated(struct t3cdev *tdev)
+{
+ const struct adapter *adapter = tdev2adap(tdev);
+
+ return test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
+}
+
+/**
+ * cxgb3_register_client - register an offload client
+ * @client: the client
+ *
+ * Add the client to the client list,
+ * and call backs the client for each activated offload device
+ */
+void cxgb3_register_client(struct cxgb3_client *client)
+{
+ struct t3cdev *tdev;
+
+ mutex_lock(&cxgb3_db_lock);
+ list_add_tail(&client->client_list, &client_list);
+
+ if (client->add) {
+ list_for_each_entry(tdev, &ofld_dev_list, ofld_dev_list) {
+ if (offload_activated(tdev))
+ client->add(tdev);
+ }
+ }
+ mutex_unlock(&cxgb3_db_lock);
+}
+
+EXPORT_SYMBOL(cxgb3_register_client);
+
+/**
+ * cxgb3_unregister_client - unregister an offload client
+ * @client: the client
+ *
+ * Remove the client to the client list,
+ * and call backs the client for each activated offload device.
+ */
+void cxgb3_unregister_client(struct cxgb3_client *client)
+{
+ struct t3cdev *tdev;
+
+ mutex_lock(&cxgb3_db_lock);
+ list_del(&client->client_list);
+
+ if (client->remove) {
+ list_for_each_entry(tdev, &ofld_dev_list, ofld_dev_list) {
+ if (offload_activated(tdev))
+ client->remove(tdev);
+ }
+ }
+ mutex_unlock(&cxgb3_db_lock);
+}
+
+EXPORT_SYMBOL(cxgb3_unregister_client);
+
+/**
+ * cxgb3_add_clients - activate registered clients for an offload device
+ * @tdev: the offload device
+ *
+ * Call backs all registered clients once a offload device is activated
+ */
+void cxgb3_add_clients(struct t3cdev *tdev)
+{
+ struct cxgb3_client *client;
+
+ mutex_lock(&cxgb3_db_lock);
+ list_for_each_entry(client, &client_list, client_list) {
+ if (client->add)
+ client->add(tdev);
+ }
+ mutex_unlock(&cxgb3_db_lock);
+}
+
+/**
+ * cxgb3_remove_clients - deactivates registered clients
+ * for an offload device
+ * @tdev: the offload device
+ *
+ * Call backs all registered clients once a offload device is deactivated
+ */
+void cxgb3_remove_clients(struct t3cdev *tdev)
+{
+ struct cxgb3_client *client;
+
+ mutex_lock(&cxgb3_db_lock);
+ list_for_each_entry(client, &client_list, client_list) {
+ if (client->remove)
+ client->remove(tdev);
+ }
+ mutex_unlock(&cxgb3_db_lock);
+}
+
+void cxgb3_event_notify(struct t3cdev *tdev, u32 event, u32 port)
+{
+ struct cxgb3_client *client;
+
+ mutex_lock(&cxgb3_db_lock);
+ list_for_each_entry(client, &client_list, client_list) {
+ if (client->event_handler)
+ client->event_handler(tdev, event, port);
+ }
+ mutex_unlock(&cxgb3_db_lock);
+}
+
+static struct net_device *get_iff_from_mac(struct adapter *adapter,
+ const unsigned char *mac,
+ unsigned int vlan)
+{
+ int i;
+
+ for_each_port(adapter, i) {
+ struct net_device *dev = adapter->port[i];
+
+ if (ether_addr_equal(dev->dev_addr, mac)) {
+ rcu_read_lock();
+ if (vlan && vlan != VLAN_VID_MASK) {
+ dev = __vlan_find_dev_deep_rcu(dev, htons(ETH_P_8021Q), vlan);
+ } else if (netif_is_bond_slave(dev)) {
+ struct net_device *upper_dev;
+
+ while ((upper_dev =
+ netdev_master_upper_dev_get_rcu(dev)))
+ dev = upper_dev;
+ }
+ rcu_read_unlock();
+ return dev;
+ }
+ }
+ return NULL;
+}
+
+static int cxgb_ulp_iscsi_ctl(struct adapter *adapter, unsigned int req,
+ void *data)
+{
+ int i;
+ int ret = 0;
+ unsigned int val = 0;
+ struct ulp_iscsi_info *uiip = data;
+
+ switch (req) {
+ case ULP_ISCSI_GET_PARAMS:
+ uiip->pdev = adapter->pdev;
+ uiip->llimit = t3_read_reg(adapter, A_ULPRX_ISCSI_LLIMIT);
+ uiip->ulimit = t3_read_reg(adapter, A_ULPRX_ISCSI_ULIMIT);
+ uiip->tagmask = t3_read_reg(adapter, A_ULPRX_ISCSI_TAGMASK);
+
+ val = t3_read_reg(adapter, A_ULPRX_ISCSI_PSZ);
+ for (i = 0; i < 4; i++, val >>= 8)
+ uiip->pgsz_factor[i] = val & 0xFF;
+
+ val = t3_read_reg(adapter, A_TP_PARA_REG7);
+ uiip->max_txsz =
+ uiip->max_rxsz = min((val >> S_PMMAXXFERLEN0)&M_PMMAXXFERLEN0,
+ (val >> S_PMMAXXFERLEN1)&M_PMMAXXFERLEN1);
+ /*
+ * On tx, the iscsi pdu has to be <= tx page size and has to
+ * fit into the Tx PM FIFO.
+ */
+ val = min(adapter->params.tp.tx_pg_size,
+ t3_read_reg(adapter, A_PM1_TX_CFG) >> 17);
+ uiip->max_txsz = min(val, uiip->max_txsz);
+
+ /* set MaxRxData to 16224 */
+ val = t3_read_reg(adapter, A_TP_PARA_REG2);
+ if ((val >> S_MAXRXDATA) != 0x3f60) {
+ val &= (M_RXCOALESCESIZE << S_RXCOALESCESIZE);
+ val |= V_MAXRXDATA(0x3f60);
+ pr_info("%s, iscsi set MaxRxData to 16224 (0x%x)\n",
+ adapter->name, val);
+ t3_write_reg(adapter, A_TP_PARA_REG2, val);
+ }
+
+ /*
+ * on rx, the iscsi pdu has to be < rx page size and the
+ * max rx data length programmed in TP
+ */
+ val = min(adapter->params.tp.rx_pg_size,
+ ((t3_read_reg(adapter, A_TP_PARA_REG2)) >>
+ S_MAXRXDATA) & M_MAXRXDATA);
+ uiip->max_rxsz = min(val, uiip->max_rxsz);
+ break;
+ case ULP_ISCSI_SET_PARAMS:
+ t3_write_reg(adapter, A_ULPRX_ISCSI_TAGMASK, uiip->tagmask);
+ /* program the ddp page sizes */
+ for (i = 0; i < 4; i++)
+ val |= (uiip->pgsz_factor[i] & 0xF) << (8 * i);
+ if (val && (val != t3_read_reg(adapter, A_ULPRX_ISCSI_PSZ))) {
+ pr_info("%s, setting iscsi pgsz 0x%x, %u,%u,%u,%u\n",
+ adapter->name, val, uiip->pgsz_factor[0],
+ uiip->pgsz_factor[1], uiip->pgsz_factor[2],
+ uiip->pgsz_factor[3]);
+ t3_write_reg(adapter, A_ULPRX_ISCSI_PSZ, val);
+ }
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ }
+ return ret;
+}
+
+/* Response queue used for RDMA events. */
+#define ASYNC_NOTIF_RSPQ 0
+
+static int cxgb_rdma_ctl(struct adapter *adapter, unsigned int req, void *data)
+{
+ int ret = 0;
+
+ switch (req) {
+ case RDMA_GET_PARAMS: {
+ struct rdma_info *rdma = data;
+ struct pci_dev *pdev = adapter->pdev;
+
+ rdma->udbell_physbase = pci_resource_start(pdev, 2);
+ rdma->udbell_len = pci_resource_len(pdev, 2);
+ rdma->tpt_base =
+ t3_read_reg(adapter, A_ULPTX_TPT_LLIMIT);
+ rdma->tpt_top = t3_read_reg(adapter, A_ULPTX_TPT_ULIMIT);
+ rdma->pbl_base =
+ t3_read_reg(adapter, A_ULPTX_PBL_LLIMIT);
+ rdma->pbl_top = t3_read_reg(adapter, A_ULPTX_PBL_ULIMIT);
+ rdma->rqt_base = t3_read_reg(adapter, A_ULPRX_RQ_LLIMIT);
+ rdma->rqt_top = t3_read_reg(adapter, A_ULPRX_RQ_ULIMIT);
+ rdma->kdb_addr = adapter->regs + A_SG_KDOORBELL;
+ rdma->pdev = pdev;
+ break;
+ }
+ case RDMA_CQ_OP:{
+ unsigned long flags;
+ struct rdma_cq_op *rdma = data;
+
+ /* may be called in any context */
+ spin_lock_irqsave(&adapter->sge.reg_lock, flags);
+ ret = t3_sge_cqcntxt_op(adapter, rdma->id, rdma->op,
+ rdma->credits);
+ spin_unlock_irqrestore(&adapter->sge.reg_lock, flags);
+ break;
+ }
+ case RDMA_GET_MEM:{
+ struct ch_mem_range *t = data;
+ struct mc7 *mem;
+
+ if ((t->addr & 7) || (t->len & 7))
+ return -EINVAL;
+ if (t->mem_id == MEM_CM)
+ mem = &adapter->cm;
+ else if (t->mem_id == MEM_PMRX)
+ mem = &adapter->pmrx;
+ else if (t->mem_id == MEM_PMTX)
+ mem = &adapter->pmtx;
+ else
+ return -EINVAL;
+
+ ret =
+ t3_mc7_bd_read(mem, t->addr / 8, t->len / 8,
+ (u64 *) t->buf);
+ if (ret)
+ return ret;
+ break;
+ }
+ case RDMA_CQ_SETUP:{
+ struct rdma_cq_setup *rdma = data;
+
+ spin_lock_irq(&adapter->sge.reg_lock);
+ ret =
+ t3_sge_init_cqcntxt(adapter, rdma->id,
+ rdma->base_addr, rdma->size,
+ ASYNC_NOTIF_RSPQ,
+ rdma->ovfl_mode, rdma->credits,
+ rdma->credit_thres);
+ spin_unlock_irq(&adapter->sge.reg_lock);
+ break;
+ }
+ case RDMA_CQ_DISABLE:
+ spin_lock_irq(&adapter->sge.reg_lock);
+ ret = t3_sge_disable_cqcntxt(adapter, *(unsigned int *)data);
+ spin_unlock_irq(&adapter->sge.reg_lock);
+ break;
+ case RDMA_CTRL_QP_SETUP:{
+ struct rdma_ctrlqp_setup *rdma = data;
+
+ spin_lock_irq(&adapter->sge.reg_lock);
+ ret = t3_sge_init_ecntxt(adapter, FW_RI_SGEEC_START, 0,
+ SGE_CNTXT_RDMA,
+ ASYNC_NOTIF_RSPQ,
+ rdma->base_addr, rdma->size,
+ FW_RI_TID_START, 1, 0);
+ spin_unlock_irq(&adapter->sge.reg_lock);
+ break;
+ }
+ case RDMA_GET_MIB: {
+ spin_lock(&adapter->stats_lock);
+ t3_tp_get_mib_stats(adapter, (struct tp_mib_stats *)data);
+ spin_unlock(&adapter->stats_lock);
+ break;
+ }
+ default:
+ ret = -EOPNOTSUPP;
+ }
+ return ret;
+}
+
+static int cxgb_offload_ctl(struct t3cdev *tdev, unsigned int req, void *data)
+{
+ struct adapter *adapter = tdev2adap(tdev);
+ struct tid_range *tid;
+ struct mtutab *mtup;
+ struct iff_mac *iffmacp;
+ struct ddp_params *ddpp;
+ struct adap_ports *ports;
+ struct ofld_page_info *rx_page_info;
+ struct tp_params *tp = &adapter->params.tp;
+ int i;
+
+ switch (req) {
+ case GET_MAX_OUTSTANDING_WR:
+ *(unsigned int *)data = FW_WR_NUM;
+ break;
+ case GET_WR_LEN:
+ *(unsigned int *)data = WR_FLITS;
+ break;
+ case GET_TX_MAX_CHUNK:
+ *(unsigned int *)data = 1 << 20; /* 1MB */
+ break;
+ case GET_TID_RANGE:
+ tid = data;
+ tid->num = t3_mc5_size(&adapter->mc5) -
+ adapter->params.mc5.nroutes -
+ adapter->params.mc5.nfilters - adapter->params.mc5.nservers;
+ tid->base = 0;
+ break;
+ case GET_STID_RANGE:
+ tid = data;
+ tid->num = adapter->params.mc5.nservers;
+ tid->base = t3_mc5_size(&adapter->mc5) - tid->num -
+ adapter->params.mc5.nfilters - adapter->params.mc5.nroutes;
+ break;
+ case GET_L2T_CAPACITY:
+ *(unsigned int *)data = 2048;
+ break;
+ case GET_MTUS:
+ mtup = data;
+ mtup->size = NMTUS;
+ mtup->mtus = adapter->params.mtus;
+ break;
+ case GET_IFF_FROM_MAC:
+ iffmacp = data;
+ iffmacp->dev = get_iff_from_mac(adapter, iffmacp->mac_addr,
+ iffmacp->vlan_tag &
+ VLAN_VID_MASK);
+ break;
+ case GET_DDP_PARAMS:
+ ddpp = data;
+ ddpp->llimit = t3_read_reg(adapter, A_ULPRX_TDDP_LLIMIT);
+ ddpp->ulimit = t3_read_reg(adapter, A_ULPRX_TDDP_ULIMIT);
+ ddpp->tag_mask = t3_read_reg(adapter, A_ULPRX_TDDP_TAGMASK);
+ break;
+ case GET_PORTS:
+ ports = data;
+ ports->nports = adapter->params.nports;
+ for_each_port(adapter, i)
+ ports->lldevs[i] = adapter->port[i];
+ break;
+ case ULP_ISCSI_GET_PARAMS:
+ case ULP_ISCSI_SET_PARAMS:
+ if (!offload_running(adapter))
+ return -EAGAIN;
+ return cxgb_ulp_iscsi_ctl(adapter, req, data);
+ case RDMA_GET_PARAMS:
+ case RDMA_CQ_OP:
+ case RDMA_CQ_SETUP:
+ case RDMA_CQ_DISABLE:
+ case RDMA_CTRL_QP_SETUP:
+ case RDMA_GET_MEM:
+ case RDMA_GET_MIB:
+ if (!offload_running(adapter))
+ return -EAGAIN;
+ return cxgb_rdma_ctl(adapter, req, data);
+ case GET_RX_PAGE_INFO:
+ rx_page_info = data;
+ rx_page_info->page_size = tp->rx_pg_size;
+ rx_page_info->num = tp->rx_num_pgs;
+ break;
+ case GET_ISCSI_IPV4ADDR: {
+ struct iscsi_ipv4addr *p = data;
+ struct port_info *pi = netdev_priv(p->dev);
+ p->ipv4addr = pi->iscsi_ipv4addr;
+ break;
+ }
+ case GET_EMBEDDED_INFO: {
+ struct ch_embedded_info *e = data;
+
+ spin_lock(&adapter->stats_lock);
+ t3_get_fw_version(adapter, &e->fw_vers);
+ t3_get_tp_version(adapter, &e->tp_vers);
+ spin_unlock(&adapter->stats_lock);
+ break;
+ }
+ default:
+ return -EOPNOTSUPP;
+ }
+ return 0;
+}
+
+/*
+ * Dummy handler for Rx offload packets in case we get an offload packet before
+ * proper processing is setup. This complains and drops the packet as it isn't
+ * normal to get offload packets at this stage.
+ */
+static int rx_offload_blackhole(struct t3cdev *dev, struct sk_buff **skbs,
+ int n)
+{
+ while (n--)
+ dev_kfree_skb_any(skbs[n]);
+ return 0;
+}
+
+static void dummy_neigh_update(struct t3cdev *dev, struct neighbour *neigh)
+{
+}
+
+void cxgb3_set_dummy_ops(struct t3cdev *dev)
+{
+ dev->recv = rx_offload_blackhole;
+ dev->neigh_update = dummy_neigh_update;
+}
+
+/*
+ * Free an active-open TID.
+ */
+void *cxgb3_free_atid(struct t3cdev *tdev, int atid)
+{
+ struct tid_info *t = &(T3C_DATA(tdev))->tid_maps;
+ union active_open_entry *p = atid2entry(t, atid);
+ void *ctx = p->t3c_tid.ctx;
+
+ spin_lock_bh(&t->atid_lock);
+ p->next = t->afree;
+ t->afree = p;
+ t->atids_in_use--;
+ spin_unlock_bh(&t->atid_lock);
+
+ return ctx;
+}
+
+EXPORT_SYMBOL(cxgb3_free_atid);
+
+/*
+ * Free a server TID and return it to the free pool.
+ */
+void cxgb3_free_stid(struct t3cdev *tdev, int stid)
+{
+ struct tid_info *t = &(T3C_DATA(tdev))->tid_maps;
+ union listen_entry *p = stid2entry(t, stid);
+
+ spin_lock_bh(&t->stid_lock);
+ p->next = t->sfree;
+ t->sfree = p;
+ t->stids_in_use--;
+ spin_unlock_bh(&t->stid_lock);
+}
+
+EXPORT_SYMBOL(cxgb3_free_stid);
+
+void cxgb3_insert_tid(struct t3cdev *tdev, struct cxgb3_client *client,
+ void *ctx, unsigned int tid)
+{
+ struct tid_info *t = &(T3C_DATA(tdev))->tid_maps;
+
+ t->tid_tab[tid].client = client;
+ t->tid_tab[tid].ctx = ctx;
+ atomic_inc(&t->tids_in_use);
+}
+
+EXPORT_SYMBOL(cxgb3_insert_tid);
+
+/*
+ * Populate a TID_RELEASE WR. The skb must be already propely sized.
+ */
+static inline void mk_tid_release(struct sk_buff *skb, unsigned int tid)
+{
+ struct cpl_tid_release *req;
+
+ skb->priority = CPL_PRIORITY_SETUP;
+ req = __skb_put(skb, sizeof(*req));
+ req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
+ OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE, tid));
+}
+
+static void t3_process_tid_release_list(struct work_struct *work)
+{
+ struct t3c_data *td = container_of(work, struct t3c_data,
+ tid_release_task);
+ struct sk_buff *skb;
+ struct t3cdev *tdev = td->dev;
+
+
+ spin_lock_bh(&td->tid_release_lock);
+ while (td->tid_release_list) {
+ struct t3c_tid_entry *p = td->tid_release_list;
+
+ td->tid_release_list = p->ctx;
+ spin_unlock_bh(&td->tid_release_lock);
+
+ skb = alloc_skb(sizeof(struct cpl_tid_release),
+ GFP_KERNEL);
+ if (!skb)
+ skb = td->nofail_skb;
+ if (!skb) {
+ spin_lock_bh(&td->tid_release_lock);
+ p->ctx = (void *)td->tid_release_list;
+ td->tid_release_list = p;
+ break;
+ }
+ mk_tid_release(skb, p - td->tid_maps.tid_tab);
+ cxgb3_ofld_send(tdev, skb);
+ p->ctx = NULL;
+ if (skb == td->nofail_skb)
+ td->nofail_skb =
+ alloc_skb(sizeof(struct cpl_tid_release),
+ GFP_KERNEL);
+ spin_lock_bh(&td->tid_release_lock);
+ }
+ td->release_list_incomplete = (td->tid_release_list == NULL) ? 0 : 1;
+ spin_unlock_bh(&td->tid_release_lock);
+
+ if (!td->nofail_skb)
+ td->nofail_skb =
+ alloc_skb(sizeof(struct cpl_tid_release),
+ GFP_KERNEL);
+}
+
+/* use ctx as a next pointer in the tid release list */
+void cxgb3_queue_tid_release(struct t3cdev *tdev, unsigned int tid)
+{
+ struct t3c_data *td = T3C_DATA(tdev);
+ struct t3c_tid_entry *p = &td->tid_maps.tid_tab[tid];
+
+ spin_lock_bh(&td->tid_release_lock);
+ p->ctx = (void *)td->tid_release_list;
+ p->client = NULL;
+ td->tid_release_list = p;
+ if (!p->ctx || td->release_list_incomplete)
+ schedule_work(&td->tid_release_task);
+ spin_unlock_bh(&td->tid_release_lock);
+}
+
+EXPORT_SYMBOL(cxgb3_queue_tid_release);
+
+/*
+ * Remove a tid from the TID table. A client may defer processing its last
+ * CPL message if it is locked at the time it arrives, and while the message
+ * sits in the client's backlog the TID may be reused for another connection.
+ * To handle this we atomically switch the TID association if it still points
+ * to the original client context.
+ */
+void cxgb3_remove_tid(struct t3cdev *tdev, void *ctx, unsigned int tid)
+{
+ struct tid_info *t = &(T3C_DATA(tdev))->tid_maps;
+
+ BUG_ON(tid >= t->ntids);
+ if (tdev->type == T3A)
+ (void)cmpxchg(&t->tid_tab[tid].ctx, ctx, NULL);
+ else {
+ struct sk_buff *skb;
+
+ skb = alloc_skb(sizeof(struct cpl_tid_release), GFP_ATOMIC);
+ if (likely(skb)) {
+ mk_tid_release(skb, tid);
+ cxgb3_ofld_send(tdev, skb);
+ t->tid_tab[tid].ctx = NULL;
+ } else
+ cxgb3_queue_tid_release(tdev, tid);
+ }
+ atomic_dec(&t->tids_in_use);
+}
+
+EXPORT_SYMBOL(cxgb3_remove_tid);
+
+int cxgb3_alloc_atid(struct t3cdev *tdev, struct cxgb3_client *client,
+ void *ctx)
+{
+ int atid = -1;
+ struct tid_info *t = &(T3C_DATA(tdev))->tid_maps;
+
+ spin_lock_bh(&t->atid_lock);
+ if (t->afree &&
+ t->atids_in_use + atomic_read(&t->tids_in_use) + MC5_MIN_TIDS <=
+ t->ntids) {
+ union active_open_entry *p = t->afree;
+
+ atid = (p - t->atid_tab) + t->atid_base;
+ t->afree = p->next;
+ p->t3c_tid.ctx = ctx;
+ p->t3c_tid.client = client;
+ t->atids_in_use++;
+ }
+ spin_unlock_bh(&t->atid_lock);
+ return atid;
+}
+
+EXPORT_SYMBOL(cxgb3_alloc_atid);
+
+int cxgb3_alloc_stid(struct t3cdev *tdev, struct cxgb3_client *client,
+ void *ctx)
+{
+ int stid = -1;
+ struct tid_info *t = &(T3C_DATA(tdev))->tid_maps;
+
+ spin_lock_bh(&t->stid_lock);
+ if (t->sfree) {
+ union listen_entry *p = t->sfree;
+
+ stid = (p - t->stid_tab) + t->stid_base;
+ t->sfree = p->next;
+ p->t3c_tid.ctx = ctx;
+ p->t3c_tid.client = client;
+ t->stids_in_use++;
+ }
+ spin_unlock_bh(&t->stid_lock);
+ return stid;
+}
+
+EXPORT_SYMBOL(cxgb3_alloc_stid);
+
+/* Get the t3cdev associated with a net_device */
+struct t3cdev *dev2t3cdev(struct net_device *dev)
+{
+ const struct port_info *pi = netdev_priv(dev);
+
+ return (struct t3cdev *)pi->adapter;
+}
+
+EXPORT_SYMBOL(dev2t3cdev);
+
+static int do_smt_write_rpl(struct t3cdev *dev, struct sk_buff *skb)
+{
+ struct cpl_smt_write_rpl *rpl = cplhdr(skb);
+
+ if (rpl->status != CPL_ERR_NONE)
+ pr_err("Unexpected SMT_WRITE_RPL status %u for entry %u\n",
+ rpl->status, GET_TID(rpl));
+
+ return CPL_RET_BUF_DONE;
+}
+
+static int do_l2t_write_rpl(struct t3cdev *dev, struct sk_buff *skb)
+{
+ struct cpl_l2t_write_rpl *rpl = cplhdr(skb);
+
+ if (rpl->status != CPL_ERR_NONE)
+ pr_err("Unexpected L2T_WRITE_RPL status %u for entry %u\n",
+ rpl->status, GET_TID(rpl));
+
+ return CPL_RET_BUF_DONE;
+}
+
+static int do_rte_write_rpl(struct t3cdev *dev, struct sk_buff *skb)
+{
+ struct cpl_rte_write_rpl *rpl = cplhdr(skb);
+
+ if (rpl->status != CPL_ERR_NONE)
+ pr_err("Unexpected RTE_WRITE_RPL status %u for entry %u\n",
+ rpl->status, GET_TID(rpl));
+
+ return CPL_RET_BUF_DONE;
+}
+
+static int do_act_open_rpl(struct t3cdev *dev, struct sk_buff *skb)
+{
+ struct cpl_act_open_rpl *rpl = cplhdr(skb);
+ unsigned int atid = G_TID(ntohl(rpl->atid));
+ struct t3c_tid_entry *t3c_tid;
+
+ t3c_tid = lookup_atid(&(T3C_DATA(dev))->tid_maps, atid);
+ if (t3c_tid && t3c_tid->ctx && t3c_tid->client &&
+ t3c_tid->client->handlers &&
+ t3c_tid->client->handlers[CPL_ACT_OPEN_RPL]) {
+ return t3c_tid->client->handlers[CPL_ACT_OPEN_RPL] (dev, skb,
+ t3c_tid->
+ ctx);
+ } else {
+ pr_err("%s: received clientless CPL command 0x%x\n",
+ dev->name, CPL_ACT_OPEN_RPL);
+ return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG;
+ }
+}
+
+static int do_stid_rpl(struct t3cdev *dev, struct sk_buff *skb)
+{
+ union opcode_tid *p = cplhdr(skb);
+ unsigned int stid = G_TID(ntohl(p->opcode_tid));
+ struct t3c_tid_entry *t3c_tid;
+
+ t3c_tid = lookup_stid(&(T3C_DATA(dev))->tid_maps, stid);
+ if (t3c_tid && t3c_tid->ctx && t3c_tid->client->handlers &&
+ t3c_tid->client->handlers[p->opcode]) {
+ return t3c_tid->client->handlers[p->opcode] (dev, skb,
+ t3c_tid->ctx);
+ } else {
+ pr_err("%s: received clientless CPL command 0x%x\n",
+ dev->name, p->opcode);
+ return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG;
+ }
+}
+
+static int do_hwtid_rpl(struct t3cdev *dev, struct sk_buff *skb)
+{
+ union opcode_tid *p = cplhdr(skb);
+ unsigned int hwtid = G_TID(ntohl(p->opcode_tid));
+ struct t3c_tid_entry *t3c_tid;
+
+ t3c_tid = lookup_tid(&(T3C_DATA(dev))->tid_maps, hwtid);
+ if (t3c_tid && t3c_tid->ctx && t3c_tid->client->handlers &&
+ t3c_tid->client->handlers[p->opcode]) {
+ return t3c_tid->client->handlers[p->opcode]
+ (dev, skb, t3c_tid->ctx);
+ } else {
+ pr_err("%s: received clientless CPL command 0x%x\n",
+ dev->name, p->opcode);
+ return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG;
+ }
+}
+
+static int do_cr(struct t3cdev *dev, struct sk_buff *skb)
+{
+ struct cpl_pass_accept_req *req = cplhdr(skb);
+ unsigned int stid = G_PASS_OPEN_TID(ntohl(req->tos_tid));
+ struct tid_info *t = &(T3C_DATA(dev))->tid_maps;
+ struct t3c_tid_entry *t3c_tid;
+ unsigned int tid = GET_TID(req);
+
+ if (unlikely(tid >= t->ntids)) {
+ printk("%s: passive open TID %u too large\n",
+ dev->name, tid);
+ t3_fatal_err(tdev2adap(dev));
+ return CPL_RET_BUF_DONE;
+ }
+
+ t3c_tid = lookup_stid(t, stid);
+ if (t3c_tid && t3c_tid->ctx && t3c_tid->client->handlers &&
+ t3c_tid->client->handlers[CPL_PASS_ACCEPT_REQ]) {
+ return t3c_tid->client->handlers[CPL_PASS_ACCEPT_REQ]
+ (dev, skb, t3c_tid->ctx);
+ } else {
+ pr_err("%s: received clientless CPL command 0x%x\n",
+ dev->name, CPL_PASS_ACCEPT_REQ);
+ return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG;
+ }
+}
+
+/*
+ * Returns an sk_buff for a reply CPL message of size len. If the input
+ * sk_buff has no other users it is trimmed and reused, otherwise a new buffer
+ * is allocated. The input skb must be of size at least len. Note that this
+ * operation does not destroy the original skb data even if it decides to reuse
+ * the buffer.
+ */
+static struct sk_buff *cxgb3_get_cpl_reply_skb(struct sk_buff *skb, size_t len,
+ gfp_t gfp)
+{
+ if (likely(!skb_cloned(skb))) {
+ BUG_ON(skb->len < len);
+ __skb_trim(skb, len);
+ skb_get(skb);
+ } else {
+ skb = alloc_skb(len, gfp);
+ if (skb)
+ __skb_put(skb, len);
+ }
+ return skb;
+}
+
+static int do_abort_req_rss(struct t3cdev *dev, struct sk_buff *skb)
+{
+ union opcode_tid *p = cplhdr(skb);
+ unsigned int hwtid = G_TID(ntohl(p->opcode_tid));
+ struct t3c_tid_entry *t3c_tid;
+
+ t3c_tid = lookup_tid(&(T3C_DATA(dev))->tid_maps, hwtid);
+ if (t3c_tid && t3c_tid->ctx && t3c_tid->client->handlers &&
+ t3c_tid->client->handlers[p->opcode]) {
+ return t3c_tid->client->handlers[p->opcode]
+ (dev, skb, t3c_tid->ctx);
+ } else {
+ struct cpl_abort_req_rss *req = cplhdr(skb);
+ struct cpl_abort_rpl *rpl;
+ struct sk_buff *reply_skb;
+ unsigned int tid = GET_TID(req);
+ u8 cmd = req->status;
+
+ if (req->status == CPL_ERR_RTX_NEG_ADVICE ||
+ req->status == CPL_ERR_PERSIST_NEG_ADVICE)
+ goto out;
+
+ reply_skb = cxgb3_get_cpl_reply_skb(skb,
+ sizeof(struct
+ cpl_abort_rpl),
+ GFP_ATOMIC);
+
+ if (!reply_skb) {
+ printk("do_abort_req_rss: couldn't get skb!\n");
+ goto out;
+ }
+ reply_skb->priority = CPL_PRIORITY_DATA;
+ __skb_put(reply_skb, sizeof(struct cpl_abort_rpl));
+ rpl = cplhdr(reply_skb);
+ rpl->wr.wr_hi =
+ htonl(V_WR_OP(FW_WROPCODE_OFLD_HOST_ABORT_CON_RPL));
+ rpl->wr.wr_lo = htonl(V_WR_TID(tid));
+ OPCODE_TID(rpl) = htonl(MK_OPCODE_TID(CPL_ABORT_RPL, tid));
+ rpl->cmd = cmd;
+ cxgb3_ofld_send(dev, reply_skb);
+out:
+ return CPL_RET_BUF_DONE;
+ }
+}
+
+static int do_act_establish(struct t3cdev *dev, struct sk_buff *skb)
+{
+ struct cpl_act_establish *req = cplhdr(skb);
+ unsigned int atid = G_PASS_OPEN_TID(ntohl(req->tos_tid));
+ struct tid_info *t = &(T3C_DATA(dev))->tid_maps;
+ struct t3c_tid_entry *t3c_tid;
+ unsigned int tid = GET_TID(req);
+
+ if (unlikely(tid >= t->ntids)) {
+ printk("%s: active establish TID %u too large\n",
+ dev->name, tid);
+ t3_fatal_err(tdev2adap(dev));
+ return CPL_RET_BUF_DONE;
+ }
+
+ t3c_tid = lookup_atid(t, atid);
+ if (t3c_tid && t3c_tid->ctx && t3c_tid->client->handlers &&
+ t3c_tid->client->handlers[CPL_ACT_ESTABLISH]) {
+ return t3c_tid->client->handlers[CPL_ACT_ESTABLISH]
+ (dev, skb, t3c_tid->ctx);
+ } else {
+ pr_err("%s: received clientless CPL command 0x%x\n",
+ dev->name, CPL_ACT_ESTABLISH);
+ return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG;
+ }
+}
+
+static int do_trace(struct t3cdev *dev, struct sk_buff *skb)
+{
+ struct cpl_trace_pkt *p = cplhdr(skb);
+
+ skb->protocol = htons(0xffff);
+ skb->dev = dev->lldev;
+ skb_pull(skb, sizeof(*p));
+ skb_reset_mac_header(skb);
+ netif_receive_skb(skb);
+ return 0;
+}
+
+/*
+ * That skb would better have come from process_responses() where we abuse
+ * ->priority and ->csum to carry our data. NB: if we get to per-arch
+ * ->csum, the things might get really interesting here.
+ */
+
+static inline u32 get_hwtid(struct sk_buff *skb)
+{
+ return ntohl((__force __be32)skb->priority) >> 8 & 0xfffff;
+}
+
+static inline u32 get_opcode(struct sk_buff *skb)
+{
+ return G_OPCODE(ntohl((__force __be32)skb->csum));
+}
+
+static int do_term(struct t3cdev *dev, struct sk_buff *skb)
+{
+ unsigned int hwtid = get_hwtid(skb);
+ unsigned int opcode = get_opcode(skb);
+ struct t3c_tid_entry *t3c_tid;
+
+ t3c_tid = lookup_tid(&(T3C_DATA(dev))->tid_maps, hwtid);
+ if (t3c_tid && t3c_tid->ctx && t3c_tid->client->handlers &&
+ t3c_tid->client->handlers[opcode]) {
+ return t3c_tid->client->handlers[opcode] (dev, skb,
+ t3c_tid->ctx);
+ } else {
+ pr_err("%s: received clientless CPL command 0x%x\n",
+ dev->name, opcode);
+ return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG;
+ }
+}
+
+static int nb_callback(struct notifier_block *self, unsigned long event,
+ void *ctx)
+{
+ switch (event) {
+ case (NETEVENT_NEIGH_UPDATE):{
+ cxgb_neigh_update((struct neighbour *)ctx);
+ break;
+ }
+ case (NETEVENT_REDIRECT):{
+ struct netevent_redirect *nr = ctx;
+ cxgb_redirect(nr->old, nr->new, nr->neigh,
+ nr->daddr);
+ cxgb_neigh_update(nr->neigh);
+ break;
+ }
+ default:
+ break;
+ }
+ return 0;
+}
+
+static struct notifier_block nb = {
+ .notifier_call = nb_callback
+};
+
+/*
+ * Process a received packet with an unknown/unexpected CPL opcode.
+ */
+static int do_bad_cpl(struct t3cdev *dev, struct sk_buff *skb)
+{
+ pr_err("%s: received bad CPL command 0x%x\n", dev->name, *skb->data);
+ return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG;
+}
+
+/*
+ * Handlers for each CPL opcode
+ */
+static cpl_handler_func cpl_handlers[NUM_CPL_CMDS];
+
+/*
+ * Add a new handler to the CPL dispatch table. A NULL handler may be supplied
+ * to unregister an existing handler.
+ */
+void t3_register_cpl_handler(unsigned int opcode, cpl_handler_func h)
+{
+ if (opcode < NUM_CPL_CMDS)
+ cpl_handlers[opcode] = h ? h : do_bad_cpl;
+ else
+ pr_err("T3C: handler registration for opcode %x failed\n",
+ opcode);
+}
+
+EXPORT_SYMBOL(t3_register_cpl_handler);
+
+/*
+ * T3CDEV's receive method.
+ */
+static int process_rx(struct t3cdev *dev, struct sk_buff **skbs, int n)
+{
+ while (n--) {
+ struct sk_buff *skb = *skbs++;
+ unsigned int opcode = get_opcode(skb);
+ int ret = cpl_handlers[opcode] (dev, skb);
+
+#if VALIDATE_TID
+ if (ret & CPL_RET_UNKNOWN_TID) {
+ union opcode_tid *p = cplhdr(skb);
+
+ pr_err("%s: CPL message (opcode %u) had unknown TID %u\n",
+ dev->name, opcode, G_TID(ntohl(p->opcode_tid)));
+ }
+#endif
+ if (ret & CPL_RET_BUF_DONE)
+ kfree_skb(skb);
+ }
+ return 0;
+}
+
+/*
+ * Sends an sk_buff to a T3C driver after dealing with any active network taps.
+ */
+int cxgb3_ofld_send(struct t3cdev *dev, struct sk_buff *skb)
+{
+ int r;
+
+ local_bh_disable();
+ r = dev->send(dev, skb);
+ local_bh_enable();
+ return r;
+}
+
+EXPORT_SYMBOL(cxgb3_ofld_send);
+
+static int is_offloading(struct net_device *dev)
+{
+ struct adapter *adapter;
+ int i;
+
+ read_lock_bh(&adapter_list_lock);
+ list_for_each_entry(adapter, &adapter_list, adapter_list) {
+ for_each_port(adapter, i) {
+ if (dev == adapter->port[i]) {
+ read_unlock_bh(&adapter_list_lock);
+ return 1;
+ }
+ }
+ }
+ read_unlock_bh(&adapter_list_lock);
+ return 0;
+}
+
+static void cxgb_neigh_update(struct neighbour *neigh)
+{
+ struct net_device *dev;
+
+ if (!neigh)
+ return;
+ dev = neigh->dev;
+ if (dev && (is_offloading(dev))) {
+ struct t3cdev *tdev = dev2t3cdev(dev);
+
+ BUG_ON(!tdev);
+ t3_l2t_update(tdev, neigh);
+ }
+}
+
+static void set_l2t_ix(struct t3cdev *tdev, u32 tid, struct l2t_entry *e)
+{
+ struct sk_buff *skb;
+ struct cpl_set_tcb_field *req;
+
+ skb = alloc_skb(sizeof(*req), GFP_ATOMIC);
+ if (!skb) {
+ pr_err("%s: cannot allocate skb!\n", __func__);
+ return;
+ }
+ skb->priority = CPL_PRIORITY_CONTROL;
+ req = skb_put(skb, sizeof(*req));
+ req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
+ OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid));
+ req->reply = 0;
+ req->cpu_idx = 0;
+ req->word = htons(W_TCB_L2T_IX);
+ req->mask = cpu_to_be64(V_TCB_L2T_IX(M_TCB_L2T_IX));
+ req->val = cpu_to_be64(V_TCB_L2T_IX(e->idx));
+ tdev->send(tdev, skb);
+}
+
+static void cxgb_redirect(struct dst_entry *old, struct dst_entry *new,
+ struct neighbour *neigh,
+ const void *daddr)
+{
+ struct net_device *dev;
+ struct tid_info *ti;
+ struct t3cdev *tdev;
+ u32 tid;
+ int update_tcb;
+ struct l2t_entry *e;
+ struct t3c_tid_entry *te;
+
+ dev = neigh->dev;
+
+ if (!is_offloading(dev))
+ return;
+ tdev = dev2t3cdev(dev);
+ BUG_ON(!tdev);
+
+ /* Add new L2T entry */
+ e = t3_l2t_get(tdev, new, dev, daddr);
+ if (!e) {
+ pr_err("%s: couldn't allocate new l2t entry!\n", __func__);
+ return;
+ }
+
+ /* Walk tid table and notify clients of dst change. */
+ ti = &(T3C_DATA(tdev))->tid_maps;
+ for (tid = 0; tid < ti->ntids; tid++) {
+ te = lookup_tid(ti, tid);
+ BUG_ON(!te);
+ if (te && te->ctx && te->client && te->client->redirect) {
+ update_tcb = te->client->redirect(te->ctx, old, new, e);
+ if (update_tcb) {
+ rcu_read_lock();
+ l2t_hold(L2DATA(tdev), e);
+ rcu_read_unlock();
+ set_l2t_ix(tdev, tid, e);
+ }
+ }
+ }
+ l2t_release(tdev, e);
+}
+
+/*
+ * Allocate and initialize the TID tables. Returns 0 on success.
+ */
+static int init_tid_tabs(struct tid_info *t, unsigned int ntids,
+ unsigned int natids, unsigned int nstids,
+ unsigned int atid_base, unsigned int stid_base)
+{
+ unsigned long size = ntids * sizeof(*t->tid_tab) +
+ natids * sizeof(*t->atid_tab) + nstids * sizeof(*t->stid_tab);
+
+ t->tid_tab = kvzalloc(size, GFP_KERNEL);
+ if (!t->tid_tab)
+ return -ENOMEM;
+
+ t->stid_tab = (union listen_entry *)&t->tid_tab[ntids];
+ t->atid_tab = (union active_open_entry *)&t->stid_tab[nstids];
+ t->ntids = ntids;
+ t->nstids = nstids;
+ t->stid_base = stid_base;
+ t->sfree = NULL;
+ t->natids = natids;
+ t->atid_base = atid_base;
+ t->afree = NULL;
+ t->stids_in_use = t->atids_in_use = 0;
+ atomic_set(&t->tids_in_use, 0);
+ spin_lock_init(&t->stid_lock);
+ spin_lock_init(&t->atid_lock);
+
+ /*
+ * Setup the free lists for stid_tab and atid_tab.
+ */
+ if (nstids) {
+ while (--nstids)
+ t->stid_tab[nstids - 1].next = &t->stid_tab[nstids];
+ t->sfree = t->stid_tab;
+ }
+ if (natids) {
+ while (--natids)
+ t->atid_tab[natids - 1].next = &t->atid_tab[natids];
+ t->afree = t->atid_tab;
+ }
+ return 0;
+}
+
+static void free_tid_maps(struct tid_info *t)
+{
+ kvfree(t->tid_tab);
+}
+
+static inline void add_adapter(struct adapter *adap)
+{
+ write_lock_bh(&adapter_list_lock);
+ list_add_tail(&adap->adapter_list, &adapter_list);
+ write_unlock_bh(&adapter_list_lock);
+}
+
+static inline void remove_adapter(struct adapter *adap)
+{
+ write_lock_bh(&adapter_list_lock);
+ list_del(&adap->adapter_list);
+ write_unlock_bh(&adapter_list_lock);
+}
+
+int cxgb3_offload_activate(struct adapter *adapter)
+{
+ struct t3cdev *dev = &adapter->tdev;
+ int natids, err;
+ struct t3c_data *t;
+ struct tid_range stid_range, tid_range;
+ struct mtutab mtutab;
+ unsigned int l2t_capacity;
+ struct l2t_data *l2td;
+
+ t = kzalloc(sizeof(*t), GFP_KERNEL);
+ if (!t)
+ return -ENOMEM;
+
+ err = -EOPNOTSUPP;
+ if (dev->ctl(dev, GET_TX_MAX_CHUNK, &t->tx_max_chunk) < 0 ||
+ dev->ctl(dev, GET_MAX_OUTSTANDING_WR, &t->max_wrs) < 0 ||
+ dev->ctl(dev, GET_L2T_CAPACITY, &l2t_capacity) < 0 ||
+ dev->ctl(dev, GET_MTUS, &mtutab) < 0 ||
+ dev->ctl(dev, GET_TID_RANGE, &tid_range) < 0 ||
+ dev->ctl(dev, GET_STID_RANGE, &stid_range) < 0)
+ goto out_free;
+
+ err = -ENOMEM;
+ l2td = t3_init_l2t(l2t_capacity);
+ if (!l2td)
+ goto out_free;
+
+ natids = min(tid_range.num / 2, MAX_ATIDS);
+ err = init_tid_tabs(&t->tid_maps, tid_range.num, natids,
+ stid_range.num, ATID_BASE, stid_range.base);
+ if (err)
+ goto out_free_l2t;
+
+ t->mtus = mtutab.mtus;
+ t->nmtus = mtutab.size;
+
+ INIT_WORK(&t->tid_release_task, t3_process_tid_release_list);
+ spin_lock_init(&t->tid_release_lock);
+ INIT_LIST_HEAD(&t->list_node);
+ t->dev = dev;
+
+ RCU_INIT_POINTER(dev->l2opt, l2td);
+ T3C_DATA(dev) = t;
+ dev->recv = process_rx;
+ dev->neigh_update = t3_l2t_update;
+
+ /* Register netevent handler once */
+ if (list_empty(&adapter_list))
+ register_netevent_notifier(&nb);
+
+ t->nofail_skb = alloc_skb(sizeof(struct cpl_tid_release), GFP_KERNEL);
+ t->release_list_incomplete = 0;
+
+ add_adapter(adapter);
+ return 0;
+
+out_free_l2t:
+ kvfree(l2td);
+out_free:
+ kfree(t);
+ return err;
+}
+
+static void clean_l2_data(struct rcu_head *head)
+{
+ struct l2t_data *d = container_of(head, struct l2t_data, rcu_head);
+ kvfree(d);
+}
+
+
+void cxgb3_offload_deactivate(struct adapter *adapter)
+{
+ struct t3cdev *tdev = &adapter->tdev;
+ struct t3c_data *t = T3C_DATA(tdev);
+ struct l2t_data *d;
+
+ remove_adapter(adapter);
+ if (list_empty(&adapter_list))
+ unregister_netevent_notifier(&nb);
+
+ free_tid_maps(&t->tid_maps);
+ T3C_DATA(tdev) = NULL;
+ rcu_read_lock();
+ d = L2DATA(tdev);
+ rcu_read_unlock();
+ RCU_INIT_POINTER(tdev->l2opt, NULL);
+ call_rcu(&d->rcu_head, clean_l2_data);
+ kfree_skb(t->nofail_skb);
+ kfree(t);
+}
+
+static inline void register_tdev(struct t3cdev *tdev)
+{
+ static int unit;
+
+ mutex_lock(&cxgb3_db_lock);
+ snprintf(tdev->name, sizeof(tdev->name), "ofld_dev%d", unit++);
+ list_add_tail(&tdev->ofld_dev_list, &ofld_dev_list);
+ mutex_unlock(&cxgb3_db_lock);
+}
+
+static inline void unregister_tdev(struct t3cdev *tdev)
+{
+ mutex_lock(&cxgb3_db_lock);
+ list_del(&tdev->ofld_dev_list);
+ mutex_unlock(&cxgb3_db_lock);
+}
+
+static inline int adap2type(struct adapter *adapter)
+{
+ int type = 0;
+
+ switch (adapter->params.rev) {
+ case T3_REV_A:
+ type = T3A;
+ break;
+ case T3_REV_B:
+ case T3_REV_B2:
+ type = T3B;
+ break;
+ case T3_REV_C:
+ type = T3C;
+ break;
+ }
+ return type;
+}
+
+void cxgb3_adapter_ofld(struct adapter *adapter)
+{
+ struct t3cdev *tdev = &adapter->tdev;
+
+ INIT_LIST_HEAD(&tdev->ofld_dev_list);
+
+ cxgb3_set_dummy_ops(tdev);
+ tdev->send = t3_offload_tx;
+ tdev->ctl = cxgb_offload_ctl;
+ tdev->type = adap2type(adapter);
+
+ register_tdev(tdev);
+}
+
+void cxgb3_adapter_unofld(struct adapter *adapter)
+{
+ struct t3cdev *tdev = &adapter->tdev;
+
+ tdev->recv = NULL;
+ tdev->neigh_update = NULL;
+
+ unregister_tdev(tdev);
+}
+
+void __init cxgb3_offload_init(void)
+{
+ int i;
+
+ for (i = 0; i < NUM_CPL_CMDS; ++i)
+ cpl_handlers[i] = do_bad_cpl;
+
+ t3_register_cpl_handler(CPL_SMT_WRITE_RPL, do_smt_write_rpl);
+ t3_register_cpl_handler(CPL_L2T_WRITE_RPL, do_l2t_write_rpl);
+ t3_register_cpl_handler(CPL_RTE_WRITE_RPL, do_rte_write_rpl);
+ t3_register_cpl_handler(CPL_PASS_OPEN_RPL, do_stid_rpl);
+ t3_register_cpl_handler(CPL_CLOSE_LISTSRV_RPL, do_stid_rpl);
+ t3_register_cpl_handler(CPL_PASS_ACCEPT_REQ, do_cr);
+ t3_register_cpl_handler(CPL_PASS_ESTABLISH, do_hwtid_rpl);
+ t3_register_cpl_handler(CPL_ABORT_RPL_RSS, do_hwtid_rpl);
+ t3_register_cpl_handler(CPL_ABORT_RPL, do_hwtid_rpl);
+ t3_register_cpl_handler(CPL_RX_URG_NOTIFY, do_hwtid_rpl);
+ t3_register_cpl_handler(CPL_RX_DATA, do_hwtid_rpl);
+ t3_register_cpl_handler(CPL_TX_DATA_ACK, do_hwtid_rpl);
+ t3_register_cpl_handler(CPL_TX_DMA_ACK, do_hwtid_rpl);
+ t3_register_cpl_handler(CPL_ACT_OPEN_RPL, do_act_open_rpl);
+ t3_register_cpl_handler(CPL_PEER_CLOSE, do_hwtid_rpl);
+ t3_register_cpl_handler(CPL_CLOSE_CON_RPL, do_hwtid_rpl);
+ t3_register_cpl_handler(CPL_ABORT_REQ_RSS, do_abort_req_rss);
+ t3_register_cpl_handler(CPL_ACT_ESTABLISH, do_act_establish);
+ t3_register_cpl_handler(CPL_SET_TCB_RPL, do_hwtid_rpl);
+ t3_register_cpl_handler(CPL_GET_TCB_RPL, do_hwtid_rpl);
+ t3_register_cpl_handler(CPL_RDMA_TERMINATE, do_term);
+ t3_register_cpl_handler(CPL_RDMA_EC_STATUS, do_hwtid_rpl);
+ t3_register_cpl_handler(CPL_TRACE_PKT, do_trace);
+ t3_register_cpl_handler(CPL_RX_DATA_DDP, do_hwtid_rpl);
+ t3_register_cpl_handler(CPL_RX_DDP_COMPLETE, do_hwtid_rpl);
+ t3_register_cpl_handler(CPL_ISCSI_HDR, do_hwtid_rpl);
+}
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.h b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.h
new file mode 100644
index 0000000000..929c298115
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.h
@@ -0,0 +1,209 @@
+/*
+ * Copyright (c) 2006-2008 Chelsio, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef _CXGB3_OFFLOAD_H
+#define _CXGB3_OFFLOAD_H
+
+#include <linux/list.h>
+#include <linux/skbuff.h>
+
+#include "l2t.h"
+
+#include "t3cdev.h"
+#include "t3_cpl.h"
+
+struct adapter;
+
+void cxgb3_offload_init(void);
+
+void cxgb3_adapter_ofld(struct adapter *adapter);
+void cxgb3_adapter_unofld(struct adapter *adapter);
+int cxgb3_offload_activate(struct adapter *adapter);
+void cxgb3_offload_deactivate(struct adapter *adapter);
+
+void cxgb3_set_dummy_ops(struct t3cdev *dev);
+
+struct t3cdev *dev2t3cdev(struct net_device *dev);
+
+/*
+ * Client registration. Users of T3 driver must register themselves.
+ * The T3 driver will call the add function of every client for each T3
+ * adapter activated, passing up the t3cdev ptr. Each client fills out an
+ * array of callback functions to process CPL messages.
+ */
+
+void cxgb3_register_client(struct cxgb3_client *client);
+void cxgb3_unregister_client(struct cxgb3_client *client);
+void cxgb3_add_clients(struct t3cdev *tdev);
+void cxgb3_remove_clients(struct t3cdev *tdev);
+void cxgb3_event_notify(struct t3cdev *tdev, u32 event, u32 port);
+
+typedef int (*cxgb3_cpl_handler_func)(struct t3cdev *dev,
+ struct sk_buff *skb, void *ctx);
+
+enum {
+ OFFLOAD_STATUS_UP,
+ OFFLOAD_STATUS_DOWN,
+ OFFLOAD_PORT_DOWN,
+ OFFLOAD_PORT_UP,
+ OFFLOAD_DB_FULL,
+ OFFLOAD_DB_EMPTY,
+ OFFLOAD_DB_DROP
+};
+
+struct cxgb3_client {
+ char *name;
+ void (*add) (struct t3cdev *);
+ void (*remove) (struct t3cdev *);
+ cxgb3_cpl_handler_func *handlers;
+ int (*redirect)(void *ctx, struct dst_entry *old,
+ struct dst_entry *new, struct l2t_entry *l2t);
+ struct list_head client_list;
+ void (*event_handler)(struct t3cdev *tdev, u32 event, u32 port);
+};
+
+/*
+ * TID allocation services.
+ */
+int cxgb3_alloc_atid(struct t3cdev *dev, struct cxgb3_client *client,
+ void *ctx);
+int cxgb3_alloc_stid(struct t3cdev *dev, struct cxgb3_client *client,
+ void *ctx);
+void *cxgb3_free_atid(struct t3cdev *dev, int atid);
+void cxgb3_free_stid(struct t3cdev *dev, int stid);
+void cxgb3_insert_tid(struct t3cdev *dev, struct cxgb3_client *client,
+ void *ctx, unsigned int tid);
+void cxgb3_queue_tid_release(struct t3cdev *dev, unsigned int tid);
+void cxgb3_remove_tid(struct t3cdev *dev, void *ctx, unsigned int tid);
+
+struct t3c_tid_entry {
+ struct cxgb3_client *client;
+ void *ctx;
+};
+
+/* CPL message priority levels */
+enum {
+ CPL_PRIORITY_DATA = 0, /* data messages */
+ CPL_PRIORITY_SETUP = 1, /* connection setup messages */
+ CPL_PRIORITY_TEARDOWN = 0, /* connection teardown messages */
+ CPL_PRIORITY_LISTEN = 1, /* listen start/stop messages */
+ CPL_PRIORITY_ACK = 1, /* RX ACK messages */
+ CPL_PRIORITY_CONTROL = 1 /* offload control messages */
+};
+
+/* Flags for return value of CPL message handlers */
+enum {
+ CPL_RET_BUF_DONE = 1, /* buffer processing done, buffer may be freed */
+ CPL_RET_BAD_MSG = 2, /* bad CPL message (e.g., unknown opcode) */
+ CPL_RET_UNKNOWN_TID = 4 /* unexpected unknown TID */
+};
+
+typedef int (*cpl_handler_func)(struct t3cdev *dev, struct sk_buff *skb);
+
+/*
+ * Returns a pointer to the first byte of the CPL header in an sk_buff that
+ * contains a CPL message.
+ */
+static inline void *cplhdr(struct sk_buff *skb)
+{
+ return skb->data;
+}
+
+void t3_register_cpl_handler(unsigned int opcode, cpl_handler_func h);
+
+union listen_entry {
+ struct t3c_tid_entry t3c_tid;
+ union listen_entry *next;
+};
+
+union active_open_entry {
+ struct t3c_tid_entry t3c_tid;
+ union active_open_entry *next;
+};
+
+/*
+ * Holds the size, base address, free list start, etc of the TID, server TID,
+ * and active-open TID tables for a offload device.
+ * The tables themselves are allocated dynamically.
+ */
+struct tid_info {
+ struct t3c_tid_entry *tid_tab;
+ unsigned int ntids;
+ atomic_t tids_in_use;
+
+ union listen_entry *stid_tab;
+ unsigned int nstids;
+ unsigned int stid_base;
+
+ union active_open_entry *atid_tab;
+ unsigned int natids;
+ unsigned int atid_base;
+
+ /*
+ * The following members are accessed R/W so we put them in their own
+ * cache lines.
+ *
+ * XXX We could combine the atid fields above with the lock here since
+ * atids are use once (unlike other tids). OTOH the above fields are
+ * usually in cache due to tid_tab.
+ */
+ spinlock_t atid_lock ____cacheline_aligned_in_smp;
+ union active_open_entry *afree;
+ unsigned int atids_in_use;
+
+ spinlock_t stid_lock ____cacheline_aligned;
+ union listen_entry *sfree;
+ unsigned int stids_in_use;
+};
+
+struct t3c_data {
+ struct list_head list_node;
+ struct t3cdev *dev;
+ unsigned int tx_max_chunk; /* max payload for TX_DATA */
+ unsigned int max_wrs; /* max in-flight WRs per connection */
+ unsigned int nmtus;
+ const unsigned short *mtus;
+ struct tid_info tid_maps;
+
+ struct t3c_tid_entry *tid_release_list;
+ spinlock_t tid_release_lock;
+ struct work_struct tid_release_task;
+
+ struct sk_buff *nofail_skb;
+ unsigned int release_list_incomplete;
+};
+
+/*
+ * t3cdev -> t3c_data accessor
+ */
+#define T3C_DATA(dev) (*(struct t3c_data **)&(dev)->l4opt)
+
+#endif
diff --git a/drivers/net/ethernet/chelsio/cxgb3/firmware_exports.h b/drivers/net/ethernet/chelsio/cxgb3/firmware_exports.h
new file mode 100644
index 0000000000..0d9b0e6dcc
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb3/firmware_exports.h
@@ -0,0 +1,177 @@
+/*
+ * Copyright (c) 2004-2008 Chelsio, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef _FIRMWARE_EXPORTS_H_
+#define _FIRMWARE_EXPORTS_H_
+
+/* WR OPCODES supported by the firmware.
+ */
+#define FW_WROPCODE_FORWARD 0x01
+#define FW_WROPCODE_BYPASS 0x05
+
+#define FW_WROPCODE_TUNNEL_TX_PKT 0x03
+
+#define FW_WROPOCDE_ULPTX_DATA_SGL 0x00
+#define FW_WROPCODE_ULPTX_MEM_READ 0x02
+#define FW_WROPCODE_ULPTX_PKT 0x04
+#define FW_WROPCODE_ULPTX_INVALIDATE 0x06
+
+#define FW_WROPCODE_TUNNEL_RX_PKT 0x07
+
+#define FW_WROPCODE_OFLD_GETTCB_RPL 0x08
+#define FW_WROPCODE_OFLD_CLOSE_CON 0x09
+#define FW_WROPCODE_OFLD_TP_ABORT_CON_REQ 0x0A
+#define FW_WROPCODE_OFLD_HOST_ABORT_CON_RPL 0x0F
+#define FW_WROPCODE_OFLD_HOST_ABORT_CON_REQ 0x0B
+#define FW_WROPCODE_OFLD_TP_ABORT_CON_RPL 0x0C
+#define FW_WROPCODE_OFLD_TX_DATA 0x0D
+#define FW_WROPCODE_OFLD_TX_DATA_ACK 0x0E
+
+#define FW_WROPCODE_RI_RDMA_INIT 0x10
+#define FW_WROPCODE_RI_RDMA_WRITE 0x11
+#define FW_WROPCODE_RI_RDMA_READ_REQ 0x12
+#define FW_WROPCODE_RI_RDMA_READ_RESP 0x13
+#define FW_WROPCODE_RI_SEND 0x14
+#define FW_WROPCODE_RI_TERMINATE 0x15
+#define FW_WROPCODE_RI_RDMA_READ 0x16
+#define FW_WROPCODE_RI_RECEIVE 0x17
+#define FW_WROPCODE_RI_BIND_MW 0x18
+#define FW_WROPCODE_RI_FASTREGISTER_MR 0x19
+#define FW_WROPCODE_RI_LOCAL_INV 0x1A
+#define FW_WROPCODE_RI_MODIFY_QP 0x1B
+#define FW_WROPCODE_RI_BYPASS 0x1C
+
+#define FW_WROPOCDE_RSVD 0x1E
+
+#define FW_WROPCODE_SGE_EGRESSCONTEXT_RR 0x1F
+
+#define FW_WROPCODE_MNGT 0x1D
+#define FW_MNGTOPCODE_PKTSCHED_SET 0x00
+
+/* Maximum size of a WR sent from the host, limited by the SGE.
+ *
+ * Note: WR coming from ULP or TP are only limited by CIM.
+ */
+#define FW_WR_SIZE 128
+
+/* Maximum number of outstanding WRs sent from the host. Value must be
+ * programmed in the CTRL/TUNNEL/QP SGE Egress Context and used by
+ * offload modules to limit the number of WRs per connection.
+ */
+#define FW_T3_WR_NUM 16
+#define FW_N3_WR_NUM 7
+
+#ifndef N3
+# define FW_WR_NUM FW_T3_WR_NUM
+#else
+# define FW_WR_NUM FW_N3_WR_NUM
+#endif
+
+/* FW_TUNNEL_NUM corresponds to the number of supported TUNNEL Queues. These
+ * queues must start at SGE Egress Context FW_TUNNEL_SGEEC_START and must
+ * start at 'TID' (or 'uP Token') FW_TUNNEL_TID_START.
+ *
+ * Ingress Traffic (e.g. DMA completion credit) for TUNNEL Queue[i] is sent
+ * to RESP Queue[i].
+ */
+#define FW_TUNNEL_NUM 8
+#define FW_TUNNEL_SGEEC_START 8
+#define FW_TUNNEL_TID_START 65544
+
+/* FW_CTRL_NUM corresponds to the number of supported CTRL Queues. These queues
+ * must start at SGE Egress Context FW_CTRL_SGEEC_START and must start at 'TID'
+ * (or 'uP Token') FW_CTRL_TID_START.
+ *
+ * Ingress Traffic for CTRL Queue[i] is sent to RESP Queue[i].
+ */
+#define FW_CTRL_NUM 8
+#define FW_CTRL_SGEEC_START 65528
+#define FW_CTRL_TID_START 65536
+
+/* FW_OFLD_NUM corresponds to the number of supported OFFLOAD Queues. These
+ * queues must start at SGE Egress Context FW_OFLD_SGEEC_START.
+ *
+ * Note: the 'uP Token' in the SGE Egress Context fields is irrelevant for
+ * OFFLOAD Queues, as the host is responsible for providing the correct TID in
+ * every WR.
+ *
+ * Ingress Trafffic for OFFLOAD Queue[i] is sent to RESP Queue[i].
+ */
+#define FW_OFLD_NUM 8
+#define FW_OFLD_SGEEC_START 0
+
+/*
+ *
+ */
+#define FW_RI_NUM 1
+#define FW_RI_SGEEC_START 65527
+#define FW_RI_TID_START 65552
+
+/*
+ * The RX_PKT_TID
+ */
+#define FW_RX_PKT_NUM 1
+#define FW_RX_PKT_TID_START 65553
+
+/* FW_WRC_NUM corresponds to the number of Work Request Context that supported
+ * by the firmware.
+ */
+#define FW_WRC_NUM \
+ (65536 + FW_TUNNEL_NUM + FW_CTRL_NUM + FW_RI_NUM + FW_RX_PKT_NUM)
+
+/*
+ * FW type and version.
+ */
+#define S_FW_VERSION_TYPE 28
+#define M_FW_VERSION_TYPE 0xF
+#define V_FW_VERSION_TYPE(x) ((x) << S_FW_VERSION_TYPE)
+#define G_FW_VERSION_TYPE(x) \
+ (((x) >> S_FW_VERSION_TYPE) & M_FW_VERSION_TYPE)
+
+#define S_FW_VERSION_MAJOR 16
+#define M_FW_VERSION_MAJOR 0xFFF
+#define V_FW_VERSION_MAJOR(x) ((x) << S_FW_VERSION_MAJOR)
+#define G_FW_VERSION_MAJOR(x) \
+ (((x) >> S_FW_VERSION_MAJOR) & M_FW_VERSION_MAJOR)
+
+#define S_FW_VERSION_MINOR 8
+#define M_FW_VERSION_MINOR 0xFF
+#define V_FW_VERSION_MINOR(x) ((x) << S_FW_VERSION_MINOR)
+#define G_FW_VERSION_MINOR(x) \
+ (((x) >> S_FW_VERSION_MINOR) & M_FW_VERSION_MINOR)
+
+#define S_FW_VERSION_MICRO 0
+#define M_FW_VERSION_MICRO 0xFF
+#define V_FW_VERSION_MICRO(x) ((x) << S_FW_VERSION_MICRO)
+#define G_FW_VERSION_MICRO(x) \
+ (((x) >> S_FW_VERSION_MICRO) & M_FW_VERSION_MICRO)
+
+#endif /* _FIRMWARE_EXPORTS_H_ */
diff --git a/drivers/net/ethernet/chelsio/cxgb3/l2t.c b/drivers/net/ethernet/chelsio/cxgb3/l2t.c
new file mode 100644
index 0000000000..9749d1239f
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb3/l2t.c
@@ -0,0 +1,465 @@
+/*
+ * Copyright (c) 2003-2008 Chelsio, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/if.h>
+#include <linux/if_vlan.h>
+#include <linux/jhash.h>
+#include <linux/slab.h>
+#include <linux/export.h>
+#include <net/neighbour.h>
+#include "common.h"
+#include "t3cdev.h"
+#include "cxgb3_defs.h"
+#include "l2t.h"
+#include "t3_cpl.h"
+#include "firmware_exports.h"
+
+#define VLAN_NONE 0xfff
+
+/*
+ * Module locking notes: There is a RW lock protecting the L2 table as a
+ * whole plus a spinlock per L2T entry. Entry lookups and allocations happen
+ * under the protection of the table lock, individual entry changes happen
+ * while holding that entry's spinlock. The table lock nests outside the
+ * entry locks. Allocations of new entries take the table lock as writers so
+ * no other lookups can happen while allocating new entries. Entry updates
+ * take the table lock as readers so multiple entries can be updated in
+ * parallel. An L2T entry can be dropped by decrementing its reference count
+ * and therefore can happen in parallel with entry allocation but no entry
+ * can change state or increment its ref count during allocation as both of
+ * these perform lookups.
+ */
+
+static inline unsigned int vlan_prio(const struct l2t_entry *e)
+{
+ return e->vlan >> 13;
+}
+
+static inline unsigned int arp_hash(u32 key, int ifindex,
+ const struct l2t_data *d)
+{
+ return jhash_2words(key, ifindex, 0) & (d->nentries - 1);
+}
+
+static inline void neigh_replace(struct l2t_entry *e, struct neighbour *n)
+{
+ neigh_hold(n);
+ if (e->neigh)
+ neigh_release(e->neigh);
+ e->neigh = n;
+}
+
+/*
+ * Set up an L2T entry and send any packets waiting in the arp queue. The
+ * supplied skb is used for the CPL_L2T_WRITE_REQ. Must be called with the
+ * entry locked.
+ */
+static int setup_l2e_send_pending(struct t3cdev *dev, struct sk_buff *skb,
+ struct l2t_entry *e)
+{
+ struct cpl_l2t_write_req *req;
+ struct sk_buff *tmp;
+
+ if (!skb) {
+ skb = alloc_skb(sizeof(*req), GFP_ATOMIC);
+ if (!skb)
+ return -ENOMEM;
+ }
+
+ req = __skb_put(skb, sizeof(*req));
+ req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
+ OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, e->idx));
+ req->params = htonl(V_L2T_W_IDX(e->idx) | V_L2T_W_IFF(e->smt_idx) |
+ V_L2T_W_VLAN(e->vlan & VLAN_VID_MASK) |
+ V_L2T_W_PRIO(vlan_prio(e)));
+ memcpy(e->dmac, e->neigh->ha, sizeof(e->dmac));
+ memcpy(req->dst_mac, e->dmac, sizeof(req->dst_mac));
+ skb->priority = CPL_PRIORITY_CONTROL;
+ cxgb3_ofld_send(dev, skb);
+
+ skb_queue_walk_safe(&e->arpq, skb, tmp) {
+ __skb_unlink(skb, &e->arpq);
+ cxgb3_ofld_send(dev, skb);
+ }
+ e->state = L2T_STATE_VALID;
+
+ return 0;
+}
+
+/*
+ * Add a packet to the an L2T entry's queue of packets awaiting resolution.
+ * Must be called with the entry's lock held.
+ */
+static inline void arpq_enqueue(struct l2t_entry *e, struct sk_buff *skb)
+{
+ __skb_queue_tail(&e->arpq, skb);
+}
+
+int t3_l2t_send_slow(struct t3cdev *dev, struct sk_buff *skb,
+ struct l2t_entry *e)
+{
+again:
+ switch (e->state) {
+ case L2T_STATE_STALE: /* entry is stale, kick off revalidation */
+ neigh_event_send(e->neigh, NULL);
+ spin_lock_bh(&e->lock);
+ if (e->state == L2T_STATE_STALE)
+ e->state = L2T_STATE_VALID;
+ spin_unlock_bh(&e->lock);
+ fallthrough;
+ case L2T_STATE_VALID: /* fast-path, send the packet on */
+ return cxgb3_ofld_send(dev, skb);
+ case L2T_STATE_RESOLVING:
+ spin_lock_bh(&e->lock);
+ if (e->state != L2T_STATE_RESOLVING) {
+ /* ARP already completed */
+ spin_unlock_bh(&e->lock);
+ goto again;
+ }
+ arpq_enqueue(e, skb);
+ spin_unlock_bh(&e->lock);
+
+ /*
+ * Only the first packet added to the arpq should kick off
+ * resolution. However, because the alloc_skb below can fail,
+ * we allow each packet added to the arpq to retry resolution
+ * as a way of recovering from transient memory exhaustion.
+ * A better way would be to use a work request to retry L2T
+ * entries when there's no memory.
+ */
+ if (!neigh_event_send(e->neigh, NULL)) {
+ skb = alloc_skb(sizeof(struct cpl_l2t_write_req),
+ GFP_ATOMIC);
+ if (!skb)
+ break;
+
+ spin_lock_bh(&e->lock);
+ if (!skb_queue_empty(&e->arpq))
+ setup_l2e_send_pending(dev, skb, e);
+ else /* we lost the race */
+ __kfree_skb(skb);
+ spin_unlock_bh(&e->lock);
+ }
+ }
+ return 0;
+}
+
+EXPORT_SYMBOL(t3_l2t_send_slow);
+
+void t3_l2t_send_event(struct t3cdev *dev, struct l2t_entry *e)
+{
+again:
+ switch (e->state) {
+ case L2T_STATE_STALE: /* entry is stale, kick off revalidation */
+ neigh_event_send(e->neigh, NULL);
+ spin_lock_bh(&e->lock);
+ if (e->state == L2T_STATE_STALE) {
+ e->state = L2T_STATE_VALID;
+ }
+ spin_unlock_bh(&e->lock);
+ return;
+ case L2T_STATE_VALID: /* fast-path, send the packet on */
+ return;
+ case L2T_STATE_RESOLVING:
+ spin_lock_bh(&e->lock);
+ if (e->state != L2T_STATE_RESOLVING) {
+ /* ARP already completed */
+ spin_unlock_bh(&e->lock);
+ goto again;
+ }
+ spin_unlock_bh(&e->lock);
+
+ /*
+ * Only the first packet added to the arpq should kick off
+ * resolution. However, because the alloc_skb below can fail,
+ * we allow each packet added to the arpq to retry resolution
+ * as a way of recovering from transient memory exhaustion.
+ * A better way would be to use a work request to retry L2T
+ * entries when there's no memory.
+ */
+ neigh_event_send(e->neigh, NULL);
+ }
+}
+
+EXPORT_SYMBOL(t3_l2t_send_event);
+
+/*
+ * Allocate a free L2T entry. Must be called with l2t_data.lock held.
+ */
+static struct l2t_entry *alloc_l2e(struct l2t_data *d)
+{
+ struct l2t_entry *end, *e, **p;
+
+ if (!atomic_read(&d->nfree))
+ return NULL;
+
+ /* there's definitely a free entry */
+ for (e = d->rover, end = &d->l2tab[d->nentries]; e != end; ++e)
+ if (atomic_read(&e->refcnt) == 0)
+ goto found;
+
+ for (e = &d->l2tab[1]; atomic_read(&e->refcnt); ++e) ;
+found:
+ d->rover = e + 1;
+ atomic_dec(&d->nfree);
+
+ /*
+ * The entry we found may be an inactive entry that is
+ * presently in the hash table. We need to remove it.
+ */
+ if (e->state != L2T_STATE_UNUSED) {
+ int hash = arp_hash(e->addr, e->ifindex, d);
+
+ for (p = &d->l2tab[hash].first; *p; p = &(*p)->next)
+ if (*p == e) {
+ *p = e->next;
+ break;
+ }
+ e->state = L2T_STATE_UNUSED;
+ }
+ return e;
+}
+
+/*
+ * Called when an L2T entry has no more users. The entry is left in the hash
+ * table since it is likely to be reused but we also bump nfree to indicate
+ * that the entry can be reallocated for a different neighbor. We also drop
+ * the existing neighbor reference in case the neighbor is going away and is
+ * waiting on our reference.
+ *
+ * Because entries can be reallocated to other neighbors once their ref count
+ * drops to 0 we need to take the entry's lock to avoid races with a new
+ * incarnation.
+ */
+void t3_l2e_free(struct l2t_data *d, struct l2t_entry *e)
+{
+ spin_lock_bh(&e->lock);
+ if (atomic_read(&e->refcnt) == 0) { /* hasn't been recycled */
+ if (e->neigh) {
+ neigh_release(e->neigh);
+ e->neigh = NULL;
+ }
+ }
+ spin_unlock_bh(&e->lock);
+ atomic_inc(&d->nfree);
+}
+
+EXPORT_SYMBOL(t3_l2e_free);
+
+/*
+ * Update an L2T entry that was previously used for the same next hop as neigh.
+ * Must be called with softirqs disabled.
+ */
+static inline void reuse_entry(struct l2t_entry *e, struct neighbour *neigh)
+{
+ unsigned int nud_state;
+
+ spin_lock(&e->lock); /* avoid race with t3_l2t_free */
+
+ if (neigh != e->neigh)
+ neigh_replace(e, neigh);
+ nud_state = neigh->nud_state;
+ if (memcmp(e->dmac, neigh->ha, sizeof(e->dmac)) ||
+ !(nud_state & NUD_VALID))
+ e->state = L2T_STATE_RESOLVING;
+ else if (nud_state & NUD_CONNECTED)
+ e->state = L2T_STATE_VALID;
+ else
+ e->state = L2T_STATE_STALE;
+ spin_unlock(&e->lock);
+}
+
+struct l2t_entry *t3_l2t_get(struct t3cdev *cdev, struct dst_entry *dst,
+ struct net_device *dev, const void *daddr)
+{
+ struct l2t_entry *e = NULL;
+ struct neighbour *neigh;
+ struct port_info *p;
+ struct l2t_data *d;
+ int hash;
+ u32 addr;
+ int ifidx;
+ int smt_idx;
+
+ rcu_read_lock();
+ neigh = dst_neigh_lookup(dst, daddr);
+ if (!neigh)
+ goto done_rcu;
+
+ addr = *(u32 *) neigh->primary_key;
+ ifidx = neigh->dev->ifindex;
+
+ if (!dev)
+ dev = neigh->dev;
+ p = netdev_priv(dev);
+ smt_idx = p->port_id;
+
+ d = L2DATA(cdev);
+ if (!d)
+ goto done_rcu;
+
+ hash = arp_hash(addr, ifidx, d);
+
+ write_lock_bh(&d->lock);
+ for (e = d->l2tab[hash].first; e; e = e->next)
+ if (e->addr == addr && e->ifindex == ifidx &&
+ e->smt_idx == smt_idx) {
+ l2t_hold(d, e);
+ if (atomic_read(&e->refcnt) == 1)
+ reuse_entry(e, neigh);
+ goto done_unlock;
+ }
+
+ /* Need to allocate a new entry */
+ e = alloc_l2e(d);
+ if (e) {
+ spin_lock(&e->lock); /* avoid race with t3_l2t_free */
+ e->next = d->l2tab[hash].first;
+ d->l2tab[hash].first = e;
+ e->state = L2T_STATE_RESOLVING;
+ e->addr = addr;
+ e->ifindex = ifidx;
+ e->smt_idx = smt_idx;
+ atomic_set(&e->refcnt, 1);
+ neigh_replace(e, neigh);
+ if (is_vlan_dev(neigh->dev))
+ e->vlan = vlan_dev_vlan_id(neigh->dev);
+ else
+ e->vlan = VLAN_NONE;
+ spin_unlock(&e->lock);
+ }
+done_unlock:
+ write_unlock_bh(&d->lock);
+done_rcu:
+ if (neigh)
+ neigh_release(neigh);
+ rcu_read_unlock();
+ return e;
+}
+
+EXPORT_SYMBOL(t3_l2t_get);
+
+/*
+ * Called when address resolution fails for an L2T entry to handle packets
+ * on the arpq head. If a packet specifies a failure handler it is invoked,
+ * otherwise the packets is sent to the offload device.
+ *
+ * XXX: maybe we should abandon the latter behavior and just require a failure
+ * handler.
+ */
+static void handle_failed_resolution(struct t3cdev *dev, struct sk_buff_head *arpq)
+{
+ struct sk_buff *skb, *tmp;
+
+ skb_queue_walk_safe(arpq, skb, tmp) {
+ struct l2t_skb_cb *cb = L2T_SKB_CB(skb);
+
+ __skb_unlink(skb, arpq);
+ if (cb->arp_failure_handler)
+ cb->arp_failure_handler(dev, skb);
+ else
+ cxgb3_ofld_send(dev, skb);
+ }
+}
+
+/*
+ * Called when the host's ARP layer makes a change to some entry that is
+ * loaded into the HW L2 table.
+ */
+void t3_l2t_update(struct t3cdev *dev, struct neighbour *neigh)
+{
+ struct sk_buff_head arpq;
+ struct l2t_entry *e;
+ struct l2t_data *d = L2DATA(dev);
+ u32 addr = *(u32 *) neigh->primary_key;
+ int ifidx = neigh->dev->ifindex;
+ int hash = arp_hash(addr, ifidx, d);
+
+ read_lock_bh(&d->lock);
+ for (e = d->l2tab[hash].first; e; e = e->next)
+ if (e->addr == addr && e->ifindex == ifidx) {
+ spin_lock(&e->lock);
+ goto found;
+ }
+ read_unlock_bh(&d->lock);
+ return;
+
+found:
+ __skb_queue_head_init(&arpq);
+
+ read_unlock(&d->lock);
+ if (atomic_read(&e->refcnt)) {
+ if (neigh != e->neigh)
+ neigh_replace(e, neigh);
+
+ if (e->state == L2T_STATE_RESOLVING) {
+ if (neigh->nud_state & NUD_FAILED) {
+ skb_queue_splice_init(&e->arpq, &arpq);
+ } else if (neigh->nud_state & (NUD_CONNECTED|NUD_STALE))
+ setup_l2e_send_pending(dev, NULL, e);
+ } else {
+ e->state = neigh->nud_state & NUD_CONNECTED ?
+ L2T_STATE_VALID : L2T_STATE_STALE;
+ if (!ether_addr_equal(e->dmac, neigh->ha))
+ setup_l2e_send_pending(dev, NULL, e);
+ }
+ }
+ spin_unlock_bh(&e->lock);
+
+ if (!skb_queue_empty(&arpq))
+ handle_failed_resolution(dev, &arpq);
+}
+
+struct l2t_data *t3_init_l2t(unsigned int l2t_capacity)
+{
+ struct l2t_data *d;
+ int i;
+
+ d = kvzalloc(struct_size(d, l2tab, l2t_capacity), GFP_KERNEL);
+ if (!d)
+ return NULL;
+
+ d->nentries = l2t_capacity;
+ d->rover = &d->l2tab[1]; /* entry 0 is not used */
+ atomic_set(&d->nfree, l2t_capacity - 1);
+ rwlock_init(&d->lock);
+
+ for (i = 0; i < l2t_capacity; ++i) {
+ d->l2tab[i].idx = i;
+ d->l2tab[i].state = L2T_STATE_UNUSED;
+ __skb_queue_head_init(&d->l2tab[i].arpq);
+ spin_lock_init(&d->l2tab[i].lock);
+ atomic_set(&d->l2tab[i].refcnt, 0);
+ }
+ return d;
+}
diff --git a/drivers/net/ethernet/chelsio/cxgb3/l2t.h b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
new file mode 100644
index 0000000000..ea75f27502
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
@@ -0,0 +1,148 @@
+/*
+ * Copyright (c) 2003-2008 Chelsio, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef _CHELSIO_L2T_H
+#define _CHELSIO_L2T_H
+
+#include <linux/spinlock.h>
+#include "t3cdev.h"
+#include <linux/atomic.h>
+
+enum {
+ L2T_STATE_VALID, /* entry is up to date */
+ L2T_STATE_STALE, /* entry may be used but needs revalidation */
+ L2T_STATE_RESOLVING, /* entry needs address resolution */
+ L2T_STATE_UNUSED /* entry not in use */
+};
+
+struct neighbour;
+struct sk_buff;
+
+/*
+ * Each L2T entry plays multiple roles. First of all, it keeps state for the
+ * corresponding entry of the HW L2 table and maintains a queue of offload
+ * packets awaiting address resolution. Second, it is a node of a hash table
+ * chain, where the nodes of the chain are linked together through their next
+ * pointer. Finally, each node is a bucket of a hash table, pointing to the
+ * first element in its chain through its first pointer.
+ */
+struct l2t_entry {
+ u16 state; /* entry state */
+ u16 idx; /* entry index */
+ u32 addr; /* dest IP address */
+ int ifindex; /* neighbor's net_device's ifindex */
+ u16 smt_idx; /* SMT index */
+ u16 vlan; /* VLAN TCI (id: bits 0-11, prio: 13-15 */
+ struct neighbour *neigh; /* associated neighbour */
+ struct l2t_entry *first; /* start of hash chain */
+ struct l2t_entry *next; /* next l2t_entry on chain */
+ struct sk_buff_head arpq; /* queue of packets awaiting resolution */
+ spinlock_t lock;
+ atomic_t refcnt; /* entry reference count */
+ u8 dmac[6]; /* neighbour's MAC address */
+};
+
+struct l2t_data {
+ unsigned int nentries; /* number of entries */
+ struct l2t_entry *rover; /* starting point for next allocation */
+ atomic_t nfree; /* number of free entries */
+ rwlock_t lock;
+ struct rcu_head rcu_head; /* to handle rcu cleanup */
+ struct l2t_entry l2tab[];
+};
+
+typedef void (*arp_failure_handler_func)(struct t3cdev * dev,
+ struct sk_buff * skb);
+
+/*
+ * Callback stored in an skb to handle address resolution failure.
+ */
+struct l2t_skb_cb {
+ arp_failure_handler_func arp_failure_handler;
+};
+
+#define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb)
+
+static inline void set_arp_failure_handler(struct sk_buff *skb,
+ arp_failure_handler_func hnd)
+{
+ L2T_SKB_CB(skb)->arp_failure_handler = hnd;
+}
+
+/*
+ * Getting to the L2 data from an offload device.
+ */
+#define L2DATA(cdev) (rcu_dereference((cdev)->l2opt))
+
+#define W_TCB_L2T_IX 0
+#define S_TCB_L2T_IX 7
+#define M_TCB_L2T_IX 0x7ffULL
+#define V_TCB_L2T_IX(x) ((x) << S_TCB_L2T_IX)
+
+void t3_l2e_free(struct l2t_data *d, struct l2t_entry *e);
+void t3_l2t_update(struct t3cdev *dev, struct neighbour *neigh);
+struct l2t_entry *t3_l2t_get(struct t3cdev *cdev, struct dst_entry *dst,
+ struct net_device *dev, const void *daddr);
+int t3_l2t_send_slow(struct t3cdev *dev, struct sk_buff *skb,
+ struct l2t_entry *e);
+void t3_l2t_send_event(struct t3cdev *dev, struct l2t_entry *e);
+struct l2t_data *t3_init_l2t(unsigned int l2t_capacity);
+
+int cxgb3_ofld_send(struct t3cdev *dev, struct sk_buff *skb);
+
+static inline int l2t_send(struct t3cdev *dev, struct sk_buff *skb,
+ struct l2t_entry *e)
+{
+ if (likely(e->state == L2T_STATE_VALID))
+ return cxgb3_ofld_send(dev, skb);
+ return t3_l2t_send_slow(dev, skb, e);
+}
+
+static inline void l2t_release(struct t3cdev *t, struct l2t_entry *e)
+{
+ struct l2t_data *d;
+
+ rcu_read_lock();
+ d = L2DATA(t);
+
+ if (atomic_dec_and_test(&e->refcnt) && d)
+ t3_l2e_free(d, e);
+
+ rcu_read_unlock();
+}
+
+static inline void l2t_hold(struct l2t_data *d, struct l2t_entry *e)
+{
+ if (d && atomic_add_return(1, &e->refcnt) == 1) /* 0 -> 1 transition */
+ atomic_dec(&d->nfree);
+}
+
+#endif
diff --git a/drivers/net/ethernet/chelsio/cxgb3/mc5.c b/drivers/net/ethernet/chelsio/cxgb3/mc5.c
new file mode 100644
index 0000000000..338301b115
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb3/mc5.c
@@ -0,0 +1,422 @@
+/*
+ * Copyright (c) 2003-2008 Chelsio, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "common.h"
+#include "regs.h"
+
+enum {
+ IDT75P52100 = 4,
+ IDT75N43102 = 5
+};
+
+/* DBGI command mode */
+enum {
+ DBGI_MODE_MBUS = 0,
+ DBGI_MODE_IDT52100 = 5
+};
+
+/* IDT 75P52100 commands */
+#define IDT_CMD_READ 0
+#define IDT_CMD_WRITE 1
+#define IDT_CMD_SEARCH 2
+#define IDT_CMD_LEARN 3
+
+/* IDT LAR register address and value for 144-bit mode (low 32 bits) */
+#define IDT_LAR_ADR0 0x180006
+#define IDT_LAR_MODE144 0xffff0000
+
+/* IDT SCR and SSR addresses (low 32 bits) */
+#define IDT_SCR_ADR0 0x180000
+#define IDT_SSR0_ADR0 0x180002
+#define IDT_SSR1_ADR0 0x180004
+
+/* IDT GMR base address (low 32 bits) */
+#define IDT_GMR_BASE_ADR0 0x180020
+
+/* IDT data and mask array base addresses (low 32 bits) */
+#define IDT_DATARY_BASE_ADR0 0
+#define IDT_MSKARY_BASE_ADR0 0x80000
+
+/* IDT 75N43102 commands */
+#define IDT4_CMD_SEARCH144 3
+#define IDT4_CMD_WRITE 4
+#define IDT4_CMD_READ 5
+
+/* IDT 75N43102 SCR address (low 32 bits) */
+#define IDT4_SCR_ADR0 0x3
+
+/* IDT 75N43102 GMR base addresses (low 32 bits) */
+#define IDT4_GMR_BASE0 0x10
+#define IDT4_GMR_BASE1 0x20
+#define IDT4_GMR_BASE2 0x30
+
+/* IDT 75N43102 data and mask array base addresses (low 32 bits) */
+#define IDT4_DATARY_BASE_ADR0 0x1000000
+#define IDT4_MSKARY_BASE_ADR0 0x2000000
+
+#define MAX_WRITE_ATTEMPTS 5
+
+#define MAX_ROUTES 2048
+
+/*
+ * Issue a command to the TCAM and wait for its completion. The address and
+ * any data required by the command must have been setup by the caller.
+ */
+static int mc5_cmd_write(struct adapter *adapter, u32 cmd)
+{
+ t3_write_reg(adapter, A_MC5_DB_DBGI_REQ_CMD, cmd);
+ return t3_wait_op_done(adapter, A_MC5_DB_DBGI_RSP_STATUS,
+ F_DBGIRSPVALID, 1, MAX_WRITE_ATTEMPTS, 1);
+}
+
+static inline void dbgi_wr_data3(struct adapter *adapter, u32 v1, u32 v2,
+ u32 v3)
+{
+ t3_write_reg(adapter, A_MC5_DB_DBGI_REQ_DATA0, v1);
+ t3_write_reg(adapter, A_MC5_DB_DBGI_REQ_DATA1, v2);
+ t3_write_reg(adapter, A_MC5_DB_DBGI_REQ_DATA2, v3);
+}
+
+/*
+ * Write data to the TCAM register at address (0, 0, addr_lo) using the TCAM
+ * command cmd. The data to be written must have been set up by the caller.
+ * Returns -1 on failure, 0 on success.
+ */
+static int mc5_write(struct adapter *adapter, u32 addr_lo, u32 cmd)
+{
+ t3_write_reg(adapter, A_MC5_DB_DBGI_REQ_ADDR0, addr_lo);
+ if (mc5_cmd_write(adapter, cmd) == 0)
+ return 0;
+ CH_ERR(adapter, "MC5 timeout writing to TCAM address 0x%x\n",
+ addr_lo);
+ return -1;
+}
+
+static int init_mask_data_array(struct mc5 *mc5, u32 mask_array_base,
+ u32 data_array_base, u32 write_cmd,
+ int addr_shift)
+{
+ unsigned int i;
+ struct adapter *adap = mc5->adapter;
+
+ /*
+ * We need the size of the TCAM data and mask arrays in terms of
+ * 72-bit entries.
+ */
+ unsigned int size72 = mc5->tcam_size;
+ unsigned int server_base = t3_read_reg(adap, A_MC5_DB_SERVER_INDEX);
+
+ if (mc5->mode == MC5_MODE_144_BIT) {
+ size72 *= 2; /* 1 144-bit entry is 2 72-bit entries */
+ server_base *= 2;
+ }
+
+ /* Clear the data array */
+ dbgi_wr_data3(adap, 0, 0, 0);
+ for (i = 0; i < size72; i++)
+ if (mc5_write(adap, data_array_base + (i << addr_shift),
+ write_cmd))
+ return -1;
+
+ /* Initialize the mask array. */
+ dbgi_wr_data3(adap, 0xffffffff, 0xffffffff, 0xff);
+ for (i = 0; i < size72; i++) {
+ if (i == server_base) /* entering server or routing region */
+ t3_write_reg(adap, A_MC5_DB_DBGI_REQ_DATA0,
+ mc5->mode == MC5_MODE_144_BIT ?
+ 0xfffffff9 : 0xfffffffd);
+ if (mc5_write(adap, mask_array_base + (i << addr_shift),
+ write_cmd))
+ return -1;
+ }
+ return 0;
+}
+
+static int init_idt52100(struct mc5 *mc5)
+{
+ int i;
+ struct adapter *adap = mc5->adapter;
+
+ t3_write_reg(adap, A_MC5_DB_RSP_LATENCY,
+ V_RDLAT(0x15) | V_LRNLAT(0x15) | V_SRCHLAT(0x15));
+ t3_write_reg(adap, A_MC5_DB_PART_ID_INDEX, 2);
+
+ /*
+ * Use GMRs 14-15 for ELOOKUP, GMRs 12-13 for SYN lookups, and
+ * GMRs 8-9 for ACK- and AOPEN searches.
+ */
+ t3_write_reg(adap, A_MC5_DB_POPEN_DATA_WR_CMD, IDT_CMD_WRITE);
+ t3_write_reg(adap, A_MC5_DB_POPEN_MASK_WR_CMD, IDT_CMD_WRITE);
+ t3_write_reg(adap, A_MC5_DB_AOPEN_SRCH_CMD, IDT_CMD_SEARCH);
+ t3_write_reg(adap, A_MC5_DB_AOPEN_LRN_CMD, IDT_CMD_LEARN);
+ t3_write_reg(adap, A_MC5_DB_SYN_SRCH_CMD, IDT_CMD_SEARCH | 0x6000);
+ t3_write_reg(adap, A_MC5_DB_SYN_LRN_CMD, IDT_CMD_LEARN);
+ t3_write_reg(adap, A_MC5_DB_ACK_SRCH_CMD, IDT_CMD_SEARCH);
+ t3_write_reg(adap, A_MC5_DB_ACK_LRN_CMD, IDT_CMD_LEARN);
+ t3_write_reg(adap, A_MC5_DB_ILOOKUP_CMD, IDT_CMD_SEARCH);
+ t3_write_reg(adap, A_MC5_DB_ELOOKUP_CMD, IDT_CMD_SEARCH | 0x7000);
+ t3_write_reg(adap, A_MC5_DB_DATA_WRITE_CMD, IDT_CMD_WRITE);
+ t3_write_reg(adap, A_MC5_DB_DATA_READ_CMD, IDT_CMD_READ);
+
+ /* Set DBGI command mode for IDT TCAM. */
+ t3_write_reg(adap, A_MC5_DB_DBGI_CONFIG, DBGI_MODE_IDT52100);
+
+ /* Set up LAR */
+ dbgi_wr_data3(adap, IDT_LAR_MODE144, 0, 0);
+ if (mc5_write(adap, IDT_LAR_ADR0, IDT_CMD_WRITE))
+ goto err;
+
+ /* Set up SSRs */
+ dbgi_wr_data3(adap, 0xffffffff, 0xffffffff, 0);
+ if (mc5_write(adap, IDT_SSR0_ADR0, IDT_CMD_WRITE) ||
+ mc5_write(adap, IDT_SSR1_ADR0, IDT_CMD_WRITE))
+ goto err;
+
+ /* Set up GMRs */
+ for (i = 0; i < 32; ++i) {
+ if (i >= 12 && i < 15)
+ dbgi_wr_data3(adap, 0xfffffff9, 0xffffffff, 0xff);
+ else if (i == 15)
+ dbgi_wr_data3(adap, 0xfffffff9, 0xffff8007, 0xff);
+ else
+ dbgi_wr_data3(adap, 0xffffffff, 0xffffffff, 0xff);
+
+ if (mc5_write(adap, IDT_GMR_BASE_ADR0 + i, IDT_CMD_WRITE))
+ goto err;
+ }
+
+ /* Set up SCR */
+ dbgi_wr_data3(adap, 1, 0, 0);
+ if (mc5_write(adap, IDT_SCR_ADR0, IDT_CMD_WRITE))
+ goto err;
+
+ return init_mask_data_array(mc5, IDT_MSKARY_BASE_ADR0,
+ IDT_DATARY_BASE_ADR0, IDT_CMD_WRITE, 0);
+err:
+ return -EIO;
+}
+
+static int init_idt43102(struct mc5 *mc5)
+{
+ int i;
+ struct adapter *adap = mc5->adapter;
+
+ t3_write_reg(adap, A_MC5_DB_RSP_LATENCY,
+ adap->params.rev == 0 ? V_RDLAT(0xd) | V_SRCHLAT(0x11) :
+ V_RDLAT(0xd) | V_SRCHLAT(0x12));
+
+ /*
+ * Use GMRs 24-25 for ELOOKUP, GMRs 20-21 for SYN lookups, and no mask
+ * for ACK- and AOPEN searches.
+ */
+ t3_write_reg(adap, A_MC5_DB_POPEN_DATA_WR_CMD, IDT4_CMD_WRITE);
+ t3_write_reg(adap, A_MC5_DB_POPEN_MASK_WR_CMD, IDT4_CMD_WRITE);
+ t3_write_reg(adap, A_MC5_DB_AOPEN_SRCH_CMD,
+ IDT4_CMD_SEARCH144 | 0x3800);
+ t3_write_reg(adap, A_MC5_DB_SYN_SRCH_CMD, IDT4_CMD_SEARCH144);
+ t3_write_reg(adap, A_MC5_DB_ACK_SRCH_CMD, IDT4_CMD_SEARCH144 | 0x3800);
+ t3_write_reg(adap, A_MC5_DB_ILOOKUP_CMD, IDT4_CMD_SEARCH144 | 0x3800);
+ t3_write_reg(adap, A_MC5_DB_ELOOKUP_CMD, IDT4_CMD_SEARCH144 | 0x800);
+ t3_write_reg(adap, A_MC5_DB_DATA_WRITE_CMD, IDT4_CMD_WRITE);
+ t3_write_reg(adap, A_MC5_DB_DATA_READ_CMD, IDT4_CMD_READ);
+
+ t3_write_reg(adap, A_MC5_DB_PART_ID_INDEX, 3);
+
+ /* Set DBGI command mode for IDT TCAM. */
+ t3_write_reg(adap, A_MC5_DB_DBGI_CONFIG, DBGI_MODE_IDT52100);
+
+ /* Set up GMRs */
+ dbgi_wr_data3(adap, 0xffffffff, 0xffffffff, 0xff);
+ for (i = 0; i < 7; ++i)
+ if (mc5_write(adap, IDT4_GMR_BASE0 + i, IDT4_CMD_WRITE))
+ goto err;
+
+ for (i = 0; i < 4; ++i)
+ if (mc5_write(adap, IDT4_GMR_BASE2 + i, IDT4_CMD_WRITE))
+ goto err;
+
+ dbgi_wr_data3(adap, 0xfffffff9, 0xffffffff, 0xff);
+ if (mc5_write(adap, IDT4_GMR_BASE1, IDT4_CMD_WRITE) ||
+ mc5_write(adap, IDT4_GMR_BASE1 + 1, IDT4_CMD_WRITE) ||
+ mc5_write(adap, IDT4_GMR_BASE1 + 4, IDT4_CMD_WRITE))
+ goto err;
+
+ dbgi_wr_data3(adap, 0xfffffff9, 0xffff8007, 0xff);
+ if (mc5_write(adap, IDT4_GMR_BASE1 + 5, IDT4_CMD_WRITE))
+ goto err;
+
+ /* Set up SCR */
+ dbgi_wr_data3(adap, 0xf0000000, 0, 0);
+ if (mc5_write(adap, IDT4_SCR_ADR0, IDT4_CMD_WRITE))
+ goto err;
+
+ return init_mask_data_array(mc5, IDT4_MSKARY_BASE_ADR0,
+ IDT4_DATARY_BASE_ADR0, IDT4_CMD_WRITE, 1);
+err:
+ return -EIO;
+}
+
+/* Put MC5 in DBGI mode. */
+static inline void mc5_dbgi_mode_enable(const struct mc5 *mc5)
+{
+ t3_write_reg(mc5->adapter, A_MC5_DB_CONFIG,
+ V_TMMODE(mc5->mode == MC5_MODE_72_BIT) | F_DBGIEN);
+}
+
+/* Put MC5 in M-Bus mode. */
+static void mc5_dbgi_mode_disable(const struct mc5 *mc5)
+{
+ t3_write_reg(mc5->adapter, A_MC5_DB_CONFIG,
+ V_TMMODE(mc5->mode == MC5_MODE_72_BIT) |
+ V_COMPEN(mc5->mode == MC5_MODE_72_BIT) |
+ V_PRTYEN(mc5->parity_enabled) | F_MBUSEN);
+}
+
+/*
+ * Initialization that requires the OS and protocol layers to already
+ * be initialized goes here.
+ */
+int t3_mc5_init(struct mc5 *mc5, unsigned int nservers, unsigned int nfilters,
+ unsigned int nroutes)
+{
+ u32 cfg;
+ int err;
+ unsigned int tcam_size = mc5->tcam_size;
+ struct adapter *adap = mc5->adapter;
+
+ if (!tcam_size)
+ return 0;
+
+ if (nroutes > MAX_ROUTES || nroutes + nservers + nfilters > tcam_size)
+ return -EINVAL;
+
+ /* Reset the TCAM */
+ cfg = t3_read_reg(adap, A_MC5_DB_CONFIG) & ~F_TMMODE;
+ cfg |= V_TMMODE(mc5->mode == MC5_MODE_72_BIT) | F_TMRST;
+ t3_write_reg(adap, A_MC5_DB_CONFIG, cfg);
+ if (t3_wait_op_done(adap, A_MC5_DB_CONFIG, F_TMRDY, 1, 500, 0)) {
+ CH_ERR(adap, "TCAM reset timed out\n");
+ return -1;
+ }
+
+ t3_write_reg(adap, A_MC5_DB_ROUTING_TABLE_INDEX, tcam_size - nroutes);
+ t3_write_reg(adap, A_MC5_DB_FILTER_TABLE,
+ tcam_size - nroutes - nfilters);
+ t3_write_reg(adap, A_MC5_DB_SERVER_INDEX,
+ tcam_size - nroutes - nfilters - nservers);
+
+ mc5->parity_enabled = 1;
+
+ /* All the TCAM addresses we access have only the low 32 bits non 0 */
+ t3_write_reg(adap, A_MC5_DB_DBGI_REQ_ADDR1, 0);
+ t3_write_reg(adap, A_MC5_DB_DBGI_REQ_ADDR2, 0);
+
+ mc5_dbgi_mode_enable(mc5);
+
+ switch (mc5->part_type) {
+ case IDT75P52100:
+ err = init_idt52100(mc5);
+ break;
+ case IDT75N43102:
+ err = init_idt43102(mc5);
+ break;
+ default:
+ CH_ERR(adap, "Unsupported TCAM type %d\n", mc5->part_type);
+ err = -EINVAL;
+ break;
+ }
+
+ mc5_dbgi_mode_disable(mc5);
+ return err;
+}
+
+
+#define MC5_INT_FATAL (F_PARITYERR | F_REQQPARERR | F_DISPQPARERR)
+
+/*
+ * MC5 interrupt handler
+ */
+void t3_mc5_intr_handler(struct mc5 *mc5)
+{
+ struct adapter *adap = mc5->adapter;
+ u32 cause = t3_read_reg(adap, A_MC5_DB_INT_CAUSE);
+
+ if ((cause & F_PARITYERR) && mc5->parity_enabled) {
+ CH_ALERT(adap, "MC5 parity error\n");
+ mc5->stats.parity_err++;
+ }
+
+ if (cause & F_REQQPARERR) {
+ CH_ALERT(adap, "MC5 request queue parity error\n");
+ mc5->stats.reqq_parity_err++;
+ }
+
+ if (cause & F_DISPQPARERR) {
+ CH_ALERT(adap, "MC5 dispatch queue parity error\n");
+ mc5->stats.dispq_parity_err++;
+ }
+
+ if (cause & F_ACTRGNFULL)
+ mc5->stats.active_rgn_full++;
+ if (cause & F_NFASRCHFAIL)
+ mc5->stats.nfa_srch_err++;
+ if (cause & F_UNKNOWNCMD)
+ mc5->stats.unknown_cmd++;
+ if (cause & F_DELACTEMPTY)
+ mc5->stats.del_act_empty++;
+ if (cause & MC5_INT_FATAL)
+ t3_fatal_err(adap);
+
+ t3_write_reg(adap, A_MC5_DB_INT_CAUSE, cause);
+}
+
+void t3_mc5_prep(struct adapter *adapter, struct mc5 *mc5, int mode)
+{
+#define K * 1024
+
+ static unsigned int tcam_part_size[] = { /* in K 72-bit entries */
+ 64 K, 128 K, 256 K, 32 K
+ };
+
+#undef K
+
+ u32 cfg = t3_read_reg(adapter, A_MC5_DB_CONFIG);
+
+ mc5->adapter = adapter;
+ mc5->mode = (unsigned char)mode;
+ mc5->part_type = (unsigned char)G_TMTYPE(cfg);
+ if (cfg & F_TMTYPEHI)
+ mc5->part_type |= 4;
+
+ mc5->tcam_size = tcam_part_size[G_TMPARTSIZE(cfg)];
+ if (mode == MC5_MODE_144_BIT)
+ mc5->tcam_size /= 2;
+}
diff --git a/drivers/net/ethernet/chelsio/cxgb3/regs.h b/drivers/net/ethernet/chelsio/cxgb3/regs.h
new file mode 100644
index 0000000000..174eb45100
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb3/regs.h
@@ -0,0 +1,2564 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#define A_SG_CONTROL 0x0
+
+#define S_CONGMODE 29
+#define V_CONGMODE(x) ((x) << S_CONGMODE)
+#define F_CONGMODE V_CONGMODE(1U)
+
+#define S_TNLFLMODE 28
+#define V_TNLFLMODE(x) ((x) << S_TNLFLMODE)
+#define F_TNLFLMODE V_TNLFLMODE(1U)
+
+#define S_FATLPERREN 27
+#define V_FATLPERREN(x) ((x) << S_FATLPERREN)
+#define F_FATLPERREN V_FATLPERREN(1U)
+
+#define S_DROPPKT 20
+#define V_DROPPKT(x) ((x) << S_DROPPKT)
+#define F_DROPPKT V_DROPPKT(1U)
+
+#define S_EGRGENCTRL 19
+#define V_EGRGENCTRL(x) ((x) << S_EGRGENCTRL)
+#define F_EGRGENCTRL V_EGRGENCTRL(1U)
+
+#define S_USERSPACESIZE 14
+#define M_USERSPACESIZE 0x1f
+#define V_USERSPACESIZE(x) ((x) << S_USERSPACESIZE)
+
+#define S_HOSTPAGESIZE 11
+#define M_HOSTPAGESIZE 0x7
+#define V_HOSTPAGESIZE(x) ((x) << S_HOSTPAGESIZE)
+
+#define S_FLMODE 9
+#define V_FLMODE(x) ((x) << S_FLMODE)
+#define F_FLMODE V_FLMODE(1U)
+
+#define S_PKTSHIFT 6
+#define M_PKTSHIFT 0x7
+#define V_PKTSHIFT(x) ((x) << S_PKTSHIFT)
+
+#define S_ONEINTMULTQ 5
+#define V_ONEINTMULTQ(x) ((x) << S_ONEINTMULTQ)
+#define F_ONEINTMULTQ V_ONEINTMULTQ(1U)
+
+#define S_BIGENDIANINGRESS 2
+#define V_BIGENDIANINGRESS(x) ((x) << S_BIGENDIANINGRESS)
+#define F_BIGENDIANINGRESS V_BIGENDIANINGRESS(1U)
+
+#define S_ISCSICOALESCING 1
+#define V_ISCSICOALESCING(x) ((x) << S_ISCSICOALESCING)
+#define F_ISCSICOALESCING V_ISCSICOALESCING(1U)
+
+#define S_GLOBALENABLE 0
+#define V_GLOBALENABLE(x) ((x) << S_GLOBALENABLE)
+#define F_GLOBALENABLE V_GLOBALENABLE(1U)
+
+#define S_AVOIDCQOVFL 24
+#define V_AVOIDCQOVFL(x) ((x) << S_AVOIDCQOVFL)
+#define F_AVOIDCQOVFL V_AVOIDCQOVFL(1U)
+
+#define S_OPTONEINTMULTQ 23
+#define V_OPTONEINTMULTQ(x) ((x) << S_OPTONEINTMULTQ)
+#define F_OPTONEINTMULTQ V_OPTONEINTMULTQ(1U)
+
+#define S_CQCRDTCTRL 22
+#define V_CQCRDTCTRL(x) ((x) << S_CQCRDTCTRL)
+#define F_CQCRDTCTRL V_CQCRDTCTRL(1U)
+
+#define A_SG_KDOORBELL 0x4
+
+#define S_SELEGRCNTX 31
+#define V_SELEGRCNTX(x) ((x) << S_SELEGRCNTX)
+#define F_SELEGRCNTX V_SELEGRCNTX(1U)
+
+#define S_EGRCNTX 0
+#define M_EGRCNTX 0xffff
+#define V_EGRCNTX(x) ((x) << S_EGRCNTX)
+
+#define A_SG_GTS 0x8
+
+#define S_RSPQ 29
+#define M_RSPQ 0x7
+#define V_RSPQ(x) ((x) << S_RSPQ)
+#define G_RSPQ(x) (((x) >> S_RSPQ) & M_RSPQ)
+
+#define S_NEWTIMER 16
+#define M_NEWTIMER 0x1fff
+#define V_NEWTIMER(x) ((x) << S_NEWTIMER)
+
+#define S_NEWINDEX 0
+#define M_NEWINDEX 0xffff
+#define V_NEWINDEX(x) ((x) << S_NEWINDEX)
+
+#define A_SG_CONTEXT_CMD 0xc
+
+#define S_CONTEXT_CMD_OPCODE 28
+#define M_CONTEXT_CMD_OPCODE 0xf
+#define V_CONTEXT_CMD_OPCODE(x) ((x) << S_CONTEXT_CMD_OPCODE)
+
+#define S_CONTEXT_CMD_BUSY 27
+#define V_CONTEXT_CMD_BUSY(x) ((x) << S_CONTEXT_CMD_BUSY)
+#define F_CONTEXT_CMD_BUSY V_CONTEXT_CMD_BUSY(1U)
+
+#define S_CQ_CREDIT 20
+
+#define M_CQ_CREDIT 0x7f
+
+#define V_CQ_CREDIT(x) ((x) << S_CQ_CREDIT)
+
+#define G_CQ_CREDIT(x) (((x) >> S_CQ_CREDIT) & M_CQ_CREDIT)
+
+#define S_CQ 19
+
+#define V_CQ(x) ((x) << S_CQ)
+#define F_CQ V_CQ(1U)
+
+#define S_RESPONSEQ 18
+#define V_RESPONSEQ(x) ((x) << S_RESPONSEQ)
+#define F_RESPONSEQ V_RESPONSEQ(1U)
+
+#define S_EGRESS 17
+#define V_EGRESS(x) ((x) << S_EGRESS)
+#define F_EGRESS V_EGRESS(1U)
+
+#define S_FREELIST 16
+#define V_FREELIST(x) ((x) << S_FREELIST)
+#define F_FREELIST V_FREELIST(1U)
+
+#define S_CONTEXT 0
+#define M_CONTEXT 0xffff
+#define V_CONTEXT(x) ((x) << S_CONTEXT)
+
+#define G_CONTEXT(x) (((x) >> S_CONTEXT) & M_CONTEXT)
+
+#define A_SG_CONTEXT_DATA0 0x10
+
+#define A_SG_CONTEXT_DATA1 0x14
+
+#define A_SG_CONTEXT_DATA2 0x18
+
+#define A_SG_CONTEXT_DATA3 0x1c
+
+#define A_SG_CONTEXT_MASK0 0x20
+
+#define A_SG_CONTEXT_MASK1 0x24
+
+#define A_SG_CONTEXT_MASK2 0x28
+
+#define A_SG_CONTEXT_MASK3 0x2c
+
+#define A_SG_RSPQ_CREDIT_RETURN 0x30
+
+#define S_CREDITS 0
+#define M_CREDITS 0xffff
+#define V_CREDITS(x) ((x) << S_CREDITS)
+
+#define A_SG_DATA_INTR 0x34
+
+#define S_ERRINTR 31
+#define V_ERRINTR(x) ((x) << S_ERRINTR)
+#define F_ERRINTR V_ERRINTR(1U)
+
+#define A_SG_HI_DRB_HI_THRSH 0x38
+
+#define A_SG_HI_DRB_LO_THRSH 0x3c
+
+#define A_SG_LO_DRB_HI_THRSH 0x40
+
+#define A_SG_LO_DRB_LO_THRSH 0x44
+
+#define A_SG_RSPQ_FL_STATUS 0x4c
+
+#define S_RSPQ0DISABLED 8
+
+#define S_FL0EMPTY 16
+#define V_FL0EMPTY(x) ((x) << S_FL0EMPTY)
+#define F_FL0EMPTY V_FL0EMPTY(1U)
+
+#define A_SG_EGR_RCQ_DRB_THRSH 0x54
+
+#define S_HIRCQDRBTHRSH 16
+#define M_HIRCQDRBTHRSH 0x7ff
+#define V_HIRCQDRBTHRSH(x) ((x) << S_HIRCQDRBTHRSH)
+
+#define S_LORCQDRBTHRSH 0
+#define M_LORCQDRBTHRSH 0x7ff
+#define V_LORCQDRBTHRSH(x) ((x) << S_LORCQDRBTHRSH)
+
+#define A_SG_EGR_CNTX_BADDR 0x58
+
+#define A_SG_INT_CAUSE 0x5c
+
+#define S_HIRCQPARITYERROR 31
+#define V_HIRCQPARITYERROR(x) ((x) << S_HIRCQPARITYERROR)
+#define F_HIRCQPARITYERROR V_HIRCQPARITYERROR(1U)
+
+#define S_LORCQPARITYERROR 30
+#define V_LORCQPARITYERROR(x) ((x) << S_LORCQPARITYERROR)
+#define F_LORCQPARITYERROR V_LORCQPARITYERROR(1U)
+
+#define S_HIDRBPARITYERROR 29
+#define V_HIDRBPARITYERROR(x) ((x) << S_HIDRBPARITYERROR)
+#define F_HIDRBPARITYERROR V_HIDRBPARITYERROR(1U)
+
+#define S_LODRBPARITYERROR 28
+#define V_LODRBPARITYERROR(x) ((x) << S_LODRBPARITYERROR)
+#define F_LODRBPARITYERROR V_LODRBPARITYERROR(1U)
+
+#define S_FLPARITYERROR 22
+#define M_FLPARITYERROR 0x3f
+#define V_FLPARITYERROR(x) ((x) << S_FLPARITYERROR)
+#define G_FLPARITYERROR(x) (((x) >> S_FLPARITYERROR) & M_FLPARITYERROR)
+
+#define S_ITPARITYERROR 20
+#define M_ITPARITYERROR 0x3
+#define V_ITPARITYERROR(x) ((x) << S_ITPARITYERROR)
+#define G_ITPARITYERROR(x) (((x) >> S_ITPARITYERROR) & M_ITPARITYERROR)
+
+#define S_IRPARITYERROR 19
+#define V_IRPARITYERROR(x) ((x) << S_IRPARITYERROR)
+#define F_IRPARITYERROR V_IRPARITYERROR(1U)
+
+#define S_RCPARITYERROR 18
+#define V_RCPARITYERROR(x) ((x) << S_RCPARITYERROR)
+#define F_RCPARITYERROR V_RCPARITYERROR(1U)
+
+#define S_OCPARITYERROR 17
+#define V_OCPARITYERROR(x) ((x) << S_OCPARITYERROR)
+#define F_OCPARITYERROR V_OCPARITYERROR(1U)
+
+#define S_CPPARITYERROR 16
+#define V_CPPARITYERROR(x) ((x) << S_CPPARITYERROR)
+#define F_CPPARITYERROR V_CPPARITYERROR(1U)
+
+#define S_R_REQ_FRAMINGERROR 15
+#define V_R_REQ_FRAMINGERROR(x) ((x) << S_R_REQ_FRAMINGERROR)
+#define F_R_REQ_FRAMINGERROR V_R_REQ_FRAMINGERROR(1U)
+
+#define S_UC_REQ_FRAMINGERROR 14
+#define V_UC_REQ_FRAMINGERROR(x) ((x) << S_UC_REQ_FRAMINGERROR)
+#define F_UC_REQ_FRAMINGERROR V_UC_REQ_FRAMINGERROR(1U)
+
+#define S_HICTLDRBDROPERR 13
+#define V_HICTLDRBDROPERR(x) ((x) << S_HICTLDRBDROPERR)
+#define F_HICTLDRBDROPERR V_HICTLDRBDROPERR(1U)
+
+#define S_LOCTLDRBDROPERR 12
+#define V_LOCTLDRBDROPERR(x) ((x) << S_LOCTLDRBDROPERR)
+#define F_LOCTLDRBDROPERR V_LOCTLDRBDROPERR(1U)
+
+#define S_HIPIODRBDROPERR 11
+#define V_HIPIODRBDROPERR(x) ((x) << S_HIPIODRBDROPERR)
+#define F_HIPIODRBDROPERR V_HIPIODRBDROPERR(1U)
+
+#define S_LOPIODRBDROPERR 10
+#define V_LOPIODRBDROPERR(x) ((x) << S_LOPIODRBDROPERR)
+#define F_LOPIODRBDROPERR V_LOPIODRBDROPERR(1U)
+
+#define S_HIPRIORITYDBFULL 7
+#define V_HIPRIORITYDBFULL(x) ((x) << S_HIPRIORITYDBFULL)
+#define F_HIPRIORITYDBFULL V_HIPRIORITYDBFULL(1U)
+
+#define S_HIPRIORITYDBEMPTY 6
+#define V_HIPRIORITYDBEMPTY(x) ((x) << S_HIPRIORITYDBEMPTY)
+#define F_HIPRIORITYDBEMPTY V_HIPRIORITYDBEMPTY(1U)
+
+#define S_LOPRIORITYDBFULL 5
+#define V_LOPRIORITYDBFULL(x) ((x) << S_LOPRIORITYDBFULL)
+#define F_LOPRIORITYDBFULL V_LOPRIORITYDBFULL(1U)
+
+#define S_LOPRIORITYDBEMPTY 4
+#define V_LOPRIORITYDBEMPTY(x) ((x) << S_LOPRIORITYDBEMPTY)
+#define F_LOPRIORITYDBEMPTY V_LOPRIORITYDBEMPTY(1U)
+
+#define S_RSPQDISABLED 3
+#define V_RSPQDISABLED(x) ((x) << S_RSPQDISABLED)
+#define F_RSPQDISABLED V_RSPQDISABLED(1U)
+
+#define S_RSPQCREDITOVERFOW 2
+#define V_RSPQCREDITOVERFOW(x) ((x) << S_RSPQCREDITOVERFOW)
+#define F_RSPQCREDITOVERFOW V_RSPQCREDITOVERFOW(1U)
+
+#define S_FLEMPTY 1
+#define V_FLEMPTY(x) ((x) << S_FLEMPTY)
+#define F_FLEMPTY V_FLEMPTY(1U)
+
+#define A_SG_INT_ENABLE 0x60
+
+#define A_SG_CMDQ_CREDIT_TH 0x64
+
+#define S_TIMEOUT 8
+#define M_TIMEOUT 0xffffff
+#define V_TIMEOUT(x) ((x) << S_TIMEOUT)
+
+#define S_THRESHOLD 0
+#define M_THRESHOLD 0xff
+#define V_THRESHOLD(x) ((x) << S_THRESHOLD)
+
+#define A_SG_TIMER_TICK 0x68
+
+#define A_SG_CQ_CONTEXT_BADDR 0x6c
+
+#define A_SG_OCO_BASE 0x70
+
+#define S_BASE1 16
+#define M_BASE1 0xffff
+#define V_BASE1(x) ((x) << S_BASE1)
+
+#define A_SG_DRB_PRI_THRESH 0x74
+
+#define A_PCIX_INT_ENABLE 0x80
+
+#define S_MSIXPARERR 22
+#define M_MSIXPARERR 0x7
+
+#define V_MSIXPARERR(x) ((x) << S_MSIXPARERR)
+
+#define S_CFPARERR 18
+#define M_CFPARERR 0xf
+
+#define V_CFPARERR(x) ((x) << S_CFPARERR)
+
+#define S_RFPARERR 14
+#define M_RFPARERR 0xf
+
+#define V_RFPARERR(x) ((x) << S_RFPARERR)
+
+#define S_WFPARERR 12
+#define M_WFPARERR 0x3
+
+#define V_WFPARERR(x) ((x) << S_WFPARERR)
+
+#define S_PIOPARERR 11
+#define V_PIOPARERR(x) ((x) << S_PIOPARERR)
+#define F_PIOPARERR V_PIOPARERR(1U)
+
+#define S_DETUNCECCERR 10
+#define V_DETUNCECCERR(x) ((x) << S_DETUNCECCERR)
+#define F_DETUNCECCERR V_DETUNCECCERR(1U)
+
+#define S_DETCORECCERR 9
+#define V_DETCORECCERR(x) ((x) << S_DETCORECCERR)
+#define F_DETCORECCERR V_DETCORECCERR(1U)
+
+#define S_RCVSPLCMPERR 8
+#define V_RCVSPLCMPERR(x) ((x) << S_RCVSPLCMPERR)
+#define F_RCVSPLCMPERR V_RCVSPLCMPERR(1U)
+
+#define S_UNXSPLCMP 7
+#define V_UNXSPLCMP(x) ((x) << S_UNXSPLCMP)
+#define F_UNXSPLCMP V_UNXSPLCMP(1U)
+
+#define S_SPLCMPDIS 6
+#define V_SPLCMPDIS(x) ((x) << S_SPLCMPDIS)
+#define F_SPLCMPDIS V_SPLCMPDIS(1U)
+
+#define S_DETPARERR 5
+#define V_DETPARERR(x) ((x) << S_DETPARERR)
+#define F_DETPARERR V_DETPARERR(1U)
+
+#define S_SIGSYSERR 4
+#define V_SIGSYSERR(x) ((x) << S_SIGSYSERR)
+#define F_SIGSYSERR V_SIGSYSERR(1U)
+
+#define S_RCVMSTABT 3
+#define V_RCVMSTABT(x) ((x) << S_RCVMSTABT)
+#define F_RCVMSTABT V_RCVMSTABT(1U)
+
+#define S_RCVTARABT 2
+#define V_RCVTARABT(x) ((x) << S_RCVTARABT)
+#define F_RCVTARABT V_RCVTARABT(1U)
+
+#define S_SIGTARABT 1
+#define V_SIGTARABT(x) ((x) << S_SIGTARABT)
+#define F_SIGTARABT V_SIGTARABT(1U)
+
+#define S_MSTDETPARERR 0
+#define V_MSTDETPARERR(x) ((x) << S_MSTDETPARERR)
+#define F_MSTDETPARERR V_MSTDETPARERR(1U)
+
+#define A_PCIX_INT_CAUSE 0x84
+
+#define A_PCIX_CFG 0x88
+
+#define S_DMASTOPEN 19
+#define V_DMASTOPEN(x) ((x) << S_DMASTOPEN)
+#define F_DMASTOPEN V_DMASTOPEN(1U)
+
+#define S_CLIDECEN 18
+#define V_CLIDECEN(x) ((x) << S_CLIDECEN)
+#define F_CLIDECEN V_CLIDECEN(1U)
+
+#define A_PCIX_MODE 0x8c
+
+#define S_PCLKRANGE 6
+#define M_PCLKRANGE 0x3
+#define V_PCLKRANGE(x) ((x) << S_PCLKRANGE)
+#define G_PCLKRANGE(x) (((x) >> S_PCLKRANGE) & M_PCLKRANGE)
+
+#define S_PCIXINITPAT 2
+#define M_PCIXINITPAT 0xf
+#define V_PCIXINITPAT(x) ((x) << S_PCIXINITPAT)
+#define G_PCIXINITPAT(x) (((x) >> S_PCIXINITPAT) & M_PCIXINITPAT)
+
+#define S_64BIT 0
+#define V_64BIT(x) ((x) << S_64BIT)
+#define F_64BIT V_64BIT(1U)
+
+#define A_PCIE_INT_ENABLE 0x80
+
+#define S_BISTERR 15
+#define M_BISTERR 0xff
+
+#define V_BISTERR(x) ((x) << S_BISTERR)
+
+#define S_TXPARERR 18
+#define V_TXPARERR(x) ((x) << S_TXPARERR)
+#define F_TXPARERR V_TXPARERR(1U)
+
+#define S_RXPARERR 17
+#define V_RXPARERR(x) ((x) << S_RXPARERR)
+#define F_RXPARERR V_RXPARERR(1U)
+
+#define S_RETRYLUTPARERR 16
+#define V_RETRYLUTPARERR(x) ((x) << S_RETRYLUTPARERR)
+#define F_RETRYLUTPARERR V_RETRYLUTPARERR(1U)
+
+#define S_RETRYBUFPARERR 15
+#define V_RETRYBUFPARERR(x) ((x) << S_RETRYBUFPARERR)
+#define F_RETRYBUFPARERR V_RETRYBUFPARERR(1U)
+
+#define S_PCIE_MSIXPARERR 12
+#define M_PCIE_MSIXPARERR 0x7
+
+#define V_PCIE_MSIXPARERR(x) ((x) << S_PCIE_MSIXPARERR)
+
+#define S_PCIE_CFPARERR 11
+#define V_PCIE_CFPARERR(x) ((x) << S_PCIE_CFPARERR)
+#define F_PCIE_CFPARERR V_PCIE_CFPARERR(1U)
+
+#define S_PCIE_RFPARERR 10
+#define V_PCIE_RFPARERR(x) ((x) << S_PCIE_RFPARERR)
+#define F_PCIE_RFPARERR V_PCIE_RFPARERR(1U)
+
+#define S_PCIE_WFPARERR 9
+#define V_PCIE_WFPARERR(x) ((x) << S_PCIE_WFPARERR)
+#define F_PCIE_WFPARERR V_PCIE_WFPARERR(1U)
+
+#define S_PCIE_PIOPARERR 8
+#define V_PCIE_PIOPARERR(x) ((x) << S_PCIE_PIOPARERR)
+#define F_PCIE_PIOPARERR V_PCIE_PIOPARERR(1U)
+
+#define S_UNXSPLCPLERRC 7
+#define V_UNXSPLCPLERRC(x) ((x) << S_UNXSPLCPLERRC)
+#define F_UNXSPLCPLERRC V_UNXSPLCPLERRC(1U)
+
+#define S_UNXSPLCPLERRR 6
+#define V_UNXSPLCPLERRR(x) ((x) << S_UNXSPLCPLERRR)
+#define F_UNXSPLCPLERRR V_UNXSPLCPLERRR(1U)
+
+#define S_PEXERR 0
+#define V_PEXERR(x) ((x) << S_PEXERR)
+#define F_PEXERR V_PEXERR(1U)
+
+#define A_PCIE_INT_CAUSE 0x84
+
+#define S_PCIE_DMASTOPEN 24
+#define V_PCIE_DMASTOPEN(x) ((x) << S_PCIE_DMASTOPEN)
+#define F_PCIE_DMASTOPEN V_PCIE_DMASTOPEN(1U)
+
+#define A_PCIE_CFG 0x88
+
+#define S_ENABLELINKDWNDRST 21
+#define V_ENABLELINKDWNDRST(x) ((x) << S_ENABLELINKDWNDRST)
+#define F_ENABLELINKDWNDRST V_ENABLELINKDWNDRST(1U)
+
+#define S_ENABLELINKDOWNRST 20
+#define V_ENABLELINKDOWNRST(x) ((x) << S_ENABLELINKDOWNRST)
+#define F_ENABLELINKDOWNRST V_ENABLELINKDOWNRST(1U)
+
+#define S_PCIE_CLIDECEN 16
+#define V_PCIE_CLIDECEN(x) ((x) << S_PCIE_CLIDECEN)
+#define F_PCIE_CLIDECEN V_PCIE_CLIDECEN(1U)
+
+#define S_CRSTWRMMODE 0
+#define V_CRSTWRMMODE(x) ((x) << S_CRSTWRMMODE)
+#define F_CRSTWRMMODE V_CRSTWRMMODE(1U)
+
+#define A_PCIE_MODE 0x8c
+
+#define S_NUMFSTTRNSEQRX 10
+#define M_NUMFSTTRNSEQRX 0xff
+#define V_NUMFSTTRNSEQRX(x) ((x) << S_NUMFSTTRNSEQRX)
+#define G_NUMFSTTRNSEQRX(x) (((x) >> S_NUMFSTTRNSEQRX) & M_NUMFSTTRNSEQRX)
+
+#define A_PCIE_PEX_CTRL0 0x98
+
+#define S_NUMFSTTRNSEQ 22
+#define M_NUMFSTTRNSEQ 0xff
+#define V_NUMFSTTRNSEQ(x) ((x) << S_NUMFSTTRNSEQ)
+#define G_NUMFSTTRNSEQ(x) (((x) >> S_NUMFSTTRNSEQ) & M_NUMFSTTRNSEQ)
+
+#define S_REPLAYLMT 2
+#define M_REPLAYLMT 0xfffff
+
+#define V_REPLAYLMT(x) ((x) << S_REPLAYLMT)
+
+#define A_PCIE_PEX_CTRL1 0x9c
+
+#define S_T3A_ACKLAT 0
+#define M_T3A_ACKLAT 0x7ff
+
+#define V_T3A_ACKLAT(x) ((x) << S_T3A_ACKLAT)
+
+#define S_ACKLAT 0
+#define M_ACKLAT 0x1fff
+
+#define V_ACKLAT(x) ((x) << S_ACKLAT)
+
+#define A_PCIE_PEX_ERR 0xa4
+
+#define A_T3DBG_GPIO_EN 0xd0
+
+#define S_GPIO11_OEN 27
+#define V_GPIO11_OEN(x) ((x) << S_GPIO11_OEN)
+#define F_GPIO11_OEN V_GPIO11_OEN(1U)
+
+#define S_GPIO10_OEN 26
+#define V_GPIO10_OEN(x) ((x) << S_GPIO10_OEN)
+#define F_GPIO10_OEN V_GPIO10_OEN(1U)
+
+#define S_GPIO7_OEN 23
+#define V_GPIO7_OEN(x) ((x) << S_GPIO7_OEN)
+#define F_GPIO7_OEN V_GPIO7_OEN(1U)
+
+#define S_GPIO6_OEN 22
+#define V_GPIO6_OEN(x) ((x) << S_GPIO6_OEN)
+#define F_GPIO6_OEN V_GPIO6_OEN(1U)
+
+#define S_GPIO5_OEN 21
+#define V_GPIO5_OEN(x) ((x) << S_GPIO5_OEN)
+#define F_GPIO5_OEN V_GPIO5_OEN(1U)
+
+#define S_GPIO4_OEN 20
+#define V_GPIO4_OEN(x) ((x) << S_GPIO4_OEN)
+#define F_GPIO4_OEN V_GPIO4_OEN(1U)
+
+#define S_GPIO2_OEN 18
+#define V_GPIO2_OEN(x) ((x) << S_GPIO2_OEN)
+#define F_GPIO2_OEN V_GPIO2_OEN(1U)
+
+#define S_GPIO1_OEN 17
+#define V_GPIO1_OEN(x) ((x) << S_GPIO1_OEN)
+#define F_GPIO1_OEN V_GPIO1_OEN(1U)
+
+#define S_GPIO0_OEN 16
+#define V_GPIO0_OEN(x) ((x) << S_GPIO0_OEN)
+#define F_GPIO0_OEN V_GPIO0_OEN(1U)
+
+#define S_GPIO10_OUT_VAL 10
+#define V_GPIO10_OUT_VAL(x) ((x) << S_GPIO10_OUT_VAL)
+#define F_GPIO10_OUT_VAL V_GPIO10_OUT_VAL(1U)
+
+#define S_GPIO7_OUT_VAL 7
+#define V_GPIO7_OUT_VAL(x) ((x) << S_GPIO7_OUT_VAL)
+#define F_GPIO7_OUT_VAL V_GPIO7_OUT_VAL(1U)
+
+#define S_GPIO6_OUT_VAL 6
+#define V_GPIO6_OUT_VAL(x) ((x) << S_GPIO6_OUT_VAL)
+#define F_GPIO6_OUT_VAL V_GPIO6_OUT_VAL(1U)
+
+#define S_GPIO5_OUT_VAL 5
+#define V_GPIO5_OUT_VAL(x) ((x) << S_GPIO5_OUT_VAL)
+#define F_GPIO5_OUT_VAL V_GPIO5_OUT_VAL(1U)
+
+#define S_GPIO4_OUT_VAL 4
+#define V_GPIO4_OUT_VAL(x) ((x) << S_GPIO4_OUT_VAL)
+#define F_GPIO4_OUT_VAL V_GPIO4_OUT_VAL(1U)
+
+#define S_GPIO2_OUT_VAL 2
+#define V_GPIO2_OUT_VAL(x) ((x) << S_GPIO2_OUT_VAL)
+#define F_GPIO2_OUT_VAL V_GPIO2_OUT_VAL(1U)
+
+#define S_GPIO1_OUT_VAL 1
+#define V_GPIO1_OUT_VAL(x) ((x) << S_GPIO1_OUT_VAL)
+#define F_GPIO1_OUT_VAL V_GPIO1_OUT_VAL(1U)
+
+#define S_GPIO0_OUT_VAL 0
+#define V_GPIO0_OUT_VAL(x) ((x) << S_GPIO0_OUT_VAL)
+#define F_GPIO0_OUT_VAL V_GPIO0_OUT_VAL(1U)
+
+#define A_T3DBG_INT_ENABLE 0xd8
+
+#define S_GPIO11 11
+#define V_GPIO11(x) ((x) << S_GPIO11)
+#define F_GPIO11 V_GPIO11(1U)
+
+#define S_GPIO10 10
+#define V_GPIO10(x) ((x) << S_GPIO10)
+#define F_GPIO10 V_GPIO10(1U)
+
+#define S_GPIO9 9
+#define V_GPIO9(x) ((x) << S_GPIO9)
+#define F_GPIO9 V_GPIO9(1U)
+
+#define S_GPIO7 7
+#define V_GPIO7(x) ((x) << S_GPIO7)
+#define F_GPIO7 V_GPIO7(1U)
+
+#define S_GPIO6 6
+#define V_GPIO6(x) ((x) << S_GPIO6)
+#define F_GPIO6 V_GPIO6(1U)
+
+#define S_GPIO5 5
+#define V_GPIO5(x) ((x) << S_GPIO5)
+#define F_GPIO5 V_GPIO5(1U)
+
+#define S_GPIO4 4
+#define V_GPIO4(x) ((x) << S_GPIO4)
+#define F_GPIO4 V_GPIO4(1U)
+
+#define S_GPIO3 3
+#define V_GPIO3(x) ((x) << S_GPIO3)
+#define F_GPIO3 V_GPIO3(1U)
+
+#define S_GPIO2 2
+#define V_GPIO2(x) ((x) << S_GPIO2)
+#define F_GPIO2 V_GPIO2(1U)
+
+#define S_GPIO1 1
+#define V_GPIO1(x) ((x) << S_GPIO1)
+#define F_GPIO1 V_GPIO1(1U)
+
+#define S_GPIO0 0
+#define V_GPIO0(x) ((x) << S_GPIO0)
+#define F_GPIO0 V_GPIO0(1U)
+
+#define A_T3DBG_INT_CAUSE 0xdc
+
+#define A_T3DBG_GPIO_ACT_LOW 0xf0
+
+#define MC7_PMRX_BASE_ADDR 0x100
+
+#define A_MC7_CFG 0x100
+
+#define S_IFEN 13
+#define V_IFEN(x) ((x) << S_IFEN)
+#define F_IFEN V_IFEN(1U)
+
+#define S_TERM150 11
+#define V_TERM150(x) ((x) << S_TERM150)
+#define F_TERM150 V_TERM150(1U)
+
+#define S_SLOW 10
+#define V_SLOW(x) ((x) << S_SLOW)
+#define F_SLOW V_SLOW(1U)
+
+#define S_WIDTH 8
+#define M_WIDTH 0x3
+#define V_WIDTH(x) ((x) << S_WIDTH)
+#define G_WIDTH(x) (((x) >> S_WIDTH) & M_WIDTH)
+
+#define S_BKS 6
+#define V_BKS(x) ((x) << S_BKS)
+#define F_BKS V_BKS(1U)
+
+#define S_ORG 5
+#define V_ORG(x) ((x) << S_ORG)
+#define F_ORG V_ORG(1U)
+
+#define S_DEN 2
+#define M_DEN 0x7
+#define V_DEN(x) ((x) << S_DEN)
+#define G_DEN(x) (((x) >> S_DEN) & M_DEN)
+
+#define S_RDY 1
+#define V_RDY(x) ((x) << S_RDY)
+#define F_RDY V_RDY(1U)
+
+#define S_CLKEN 0
+#define V_CLKEN(x) ((x) << S_CLKEN)
+#define F_CLKEN V_CLKEN(1U)
+
+#define A_MC7_MODE 0x104
+
+#define S_BUSY 31
+#define V_BUSY(x) ((x) << S_BUSY)
+#define F_BUSY V_BUSY(1U)
+
+#define A_MC7_EXT_MODE1 0x108
+
+#define A_MC7_EXT_MODE2 0x10c
+
+#define A_MC7_EXT_MODE3 0x110
+
+#define A_MC7_PRE 0x114
+
+#define A_MC7_REF 0x118
+
+#define S_PREREFDIV 1
+#define M_PREREFDIV 0x3fff
+#define V_PREREFDIV(x) ((x) << S_PREREFDIV)
+
+#define S_PERREFEN 0
+#define V_PERREFEN(x) ((x) << S_PERREFEN)
+#define F_PERREFEN V_PERREFEN(1U)
+
+#define A_MC7_DLL 0x11c
+
+#define S_DLLENB 1
+#define V_DLLENB(x) ((x) << S_DLLENB)
+#define F_DLLENB V_DLLENB(1U)
+
+#define S_DLLRST 0
+#define V_DLLRST(x) ((x) << S_DLLRST)
+#define F_DLLRST V_DLLRST(1U)
+
+#define A_MC7_PARM 0x120
+
+#define S_ACTTOPREDLY 26
+#define M_ACTTOPREDLY 0xf
+#define V_ACTTOPREDLY(x) ((x) << S_ACTTOPREDLY)
+
+#define S_ACTTORDWRDLY 23
+#define M_ACTTORDWRDLY 0x7
+#define V_ACTTORDWRDLY(x) ((x) << S_ACTTORDWRDLY)
+
+#define S_PRECYC 20
+#define M_PRECYC 0x7
+#define V_PRECYC(x) ((x) << S_PRECYC)
+
+#define S_REFCYC 13
+#define M_REFCYC 0x7f
+#define V_REFCYC(x) ((x) << S_REFCYC)
+
+#define S_BKCYC 8
+#define M_BKCYC 0x1f
+#define V_BKCYC(x) ((x) << S_BKCYC)
+
+#define S_WRTORDDLY 4
+#define M_WRTORDDLY 0xf
+#define V_WRTORDDLY(x) ((x) << S_WRTORDDLY)
+
+#define S_RDTOWRDLY 0
+#define M_RDTOWRDLY 0xf
+#define V_RDTOWRDLY(x) ((x) << S_RDTOWRDLY)
+
+#define A_MC7_CAL 0x128
+
+#define S_CAL_FAULT 30
+#define V_CAL_FAULT(x) ((x) << S_CAL_FAULT)
+#define F_CAL_FAULT V_CAL_FAULT(1U)
+
+#define S_SGL_CAL_EN 20
+#define V_SGL_CAL_EN(x) ((x) << S_SGL_CAL_EN)
+#define F_SGL_CAL_EN V_SGL_CAL_EN(1U)
+
+#define A_MC7_ERR_ADDR 0x12c
+
+#define A_MC7_ECC 0x130
+
+#define S_ECCCHKEN 1
+#define V_ECCCHKEN(x) ((x) << S_ECCCHKEN)
+#define F_ECCCHKEN V_ECCCHKEN(1U)
+
+#define S_ECCGENEN 0
+#define V_ECCGENEN(x) ((x) << S_ECCGENEN)
+#define F_ECCGENEN V_ECCGENEN(1U)
+
+#define A_MC7_CE_ADDR 0x134
+
+#define A_MC7_CE_DATA0 0x138
+
+#define A_MC7_CE_DATA1 0x13c
+
+#define A_MC7_CE_DATA2 0x140
+
+#define S_DATA 0
+#define M_DATA 0xff
+
+#define G_DATA(x) (((x) >> S_DATA) & M_DATA)
+
+#define A_MC7_UE_ADDR 0x144
+
+#define A_MC7_UE_DATA0 0x148
+
+#define A_MC7_UE_DATA1 0x14c
+
+#define A_MC7_UE_DATA2 0x150
+
+#define A_MC7_BD_ADDR 0x154
+
+#define S_ADDR 3
+
+#define M_ADDR 0x1fffffff
+
+#define A_MC7_BD_DATA0 0x158
+
+#define A_MC7_BD_DATA1 0x15c
+
+#define A_MC7_BD_OP 0x164
+
+#define S_OP 0
+
+#define V_OP(x) ((x) << S_OP)
+#define F_OP V_OP(1U)
+
+#define A_MC7_BIST_ADDR_BEG 0x168
+
+#define A_MC7_BIST_ADDR_END 0x16c
+
+#define A_MC7_BIST_DATA 0x170
+
+#define A_MC7_BIST_OP 0x174
+
+#define S_CONT 3
+#define V_CONT(x) ((x) << S_CONT)
+#define F_CONT V_CONT(1U)
+
+#define A_MC7_INT_ENABLE 0x178
+
+#define S_AE 17
+#define V_AE(x) ((x) << S_AE)
+#define F_AE V_AE(1U)
+
+#define S_PE 2
+#define M_PE 0x7fff
+
+#define V_PE(x) ((x) << S_PE)
+
+#define G_PE(x) (((x) >> S_PE) & M_PE)
+
+#define S_UE 1
+#define V_UE(x) ((x) << S_UE)
+#define F_UE V_UE(1U)
+
+#define S_CE 0
+#define V_CE(x) ((x) << S_CE)
+#define F_CE V_CE(1U)
+
+#define A_MC7_INT_CAUSE 0x17c
+
+#define MC7_PMTX_BASE_ADDR 0x180
+
+#define MC7_CM_BASE_ADDR 0x200
+
+#define A_CIM_BOOT_CFG 0x280
+
+#define S_BOOTADDR 2
+#define M_BOOTADDR 0x3fffffff
+#define V_BOOTADDR(x) ((x) << S_BOOTADDR)
+
+#define A_CIM_SDRAM_BASE_ADDR 0x28c
+
+#define A_CIM_SDRAM_ADDR_SIZE 0x290
+
+#define A_CIM_HOST_INT_ENABLE 0x298
+
+#define S_DTAGPARERR 28
+#define V_DTAGPARERR(x) ((x) << S_DTAGPARERR)
+#define F_DTAGPARERR V_DTAGPARERR(1U)
+
+#define S_ITAGPARERR 27
+#define V_ITAGPARERR(x) ((x) << S_ITAGPARERR)
+#define F_ITAGPARERR V_ITAGPARERR(1U)
+
+#define S_IBQTPPARERR 26
+#define V_IBQTPPARERR(x) ((x) << S_IBQTPPARERR)
+#define F_IBQTPPARERR V_IBQTPPARERR(1U)
+
+#define S_IBQULPPARERR 25
+#define V_IBQULPPARERR(x) ((x) << S_IBQULPPARERR)
+#define F_IBQULPPARERR V_IBQULPPARERR(1U)
+
+#define S_IBQSGEHIPARERR 24
+#define V_IBQSGEHIPARERR(x) ((x) << S_IBQSGEHIPARERR)
+#define F_IBQSGEHIPARERR V_IBQSGEHIPARERR(1U)
+
+#define S_IBQSGELOPARERR 23
+#define V_IBQSGELOPARERR(x) ((x) << S_IBQSGELOPARERR)
+#define F_IBQSGELOPARERR V_IBQSGELOPARERR(1U)
+
+#define S_OBQULPLOPARERR 22
+#define V_OBQULPLOPARERR(x) ((x) << S_OBQULPLOPARERR)
+#define F_OBQULPLOPARERR V_OBQULPLOPARERR(1U)
+
+#define S_OBQULPHIPARERR 21
+#define V_OBQULPHIPARERR(x) ((x) << S_OBQULPHIPARERR)
+#define F_OBQULPHIPARERR V_OBQULPHIPARERR(1U)
+
+#define S_OBQSGEPARERR 20
+#define V_OBQSGEPARERR(x) ((x) << S_OBQSGEPARERR)
+#define F_OBQSGEPARERR V_OBQSGEPARERR(1U)
+
+#define S_DCACHEPARERR 19
+#define V_DCACHEPARERR(x) ((x) << S_DCACHEPARERR)
+#define F_DCACHEPARERR V_DCACHEPARERR(1U)
+
+#define S_ICACHEPARERR 18
+#define V_ICACHEPARERR(x) ((x) << S_ICACHEPARERR)
+#define F_ICACHEPARERR V_ICACHEPARERR(1U)
+
+#define S_DRAMPARERR 17
+#define V_DRAMPARERR(x) ((x) << S_DRAMPARERR)
+#define F_DRAMPARERR V_DRAMPARERR(1U)
+
+#define A_CIM_HOST_INT_CAUSE 0x29c
+
+#define S_BLKWRPLINT 12
+#define V_BLKWRPLINT(x) ((x) << S_BLKWRPLINT)
+#define F_BLKWRPLINT V_BLKWRPLINT(1U)
+
+#define S_BLKRDPLINT 11
+#define V_BLKRDPLINT(x) ((x) << S_BLKRDPLINT)
+#define F_BLKRDPLINT V_BLKRDPLINT(1U)
+
+#define S_BLKWRCTLINT 10
+#define V_BLKWRCTLINT(x) ((x) << S_BLKWRCTLINT)
+#define F_BLKWRCTLINT V_BLKWRCTLINT(1U)
+
+#define S_BLKRDCTLINT 9
+#define V_BLKRDCTLINT(x) ((x) << S_BLKRDCTLINT)
+#define F_BLKRDCTLINT V_BLKRDCTLINT(1U)
+
+#define S_BLKWRFLASHINT 8
+#define V_BLKWRFLASHINT(x) ((x) << S_BLKWRFLASHINT)
+#define F_BLKWRFLASHINT V_BLKWRFLASHINT(1U)
+
+#define S_BLKRDFLASHINT 7
+#define V_BLKRDFLASHINT(x) ((x) << S_BLKRDFLASHINT)
+#define F_BLKRDFLASHINT V_BLKRDFLASHINT(1U)
+
+#define S_SGLWRFLASHINT 6
+#define V_SGLWRFLASHINT(x) ((x) << S_SGLWRFLASHINT)
+#define F_SGLWRFLASHINT V_SGLWRFLASHINT(1U)
+
+#define S_WRBLKFLASHINT 5
+#define V_WRBLKFLASHINT(x) ((x) << S_WRBLKFLASHINT)
+#define F_WRBLKFLASHINT V_WRBLKFLASHINT(1U)
+
+#define S_BLKWRBOOTINT 4
+#define V_BLKWRBOOTINT(x) ((x) << S_BLKWRBOOTINT)
+#define F_BLKWRBOOTINT V_BLKWRBOOTINT(1U)
+
+#define S_FLASHRANGEINT 2
+#define V_FLASHRANGEINT(x) ((x) << S_FLASHRANGEINT)
+#define F_FLASHRANGEINT V_FLASHRANGEINT(1U)
+
+#define S_SDRAMRANGEINT 1
+#define V_SDRAMRANGEINT(x) ((x) << S_SDRAMRANGEINT)
+#define F_SDRAMRANGEINT V_SDRAMRANGEINT(1U)
+
+#define S_RSVDSPACEINT 0
+#define V_RSVDSPACEINT(x) ((x) << S_RSVDSPACEINT)
+#define F_RSVDSPACEINT V_RSVDSPACEINT(1U)
+
+#define A_CIM_HOST_ACC_CTRL 0x2b0
+
+#define S_HOSTBUSY 17
+#define V_HOSTBUSY(x) ((x) << S_HOSTBUSY)
+#define F_HOSTBUSY V_HOSTBUSY(1U)
+
+#define A_CIM_HOST_ACC_DATA 0x2b4
+
+#define A_CIM_IBQ_DBG_CFG 0x2c0
+
+#define S_IBQDBGADDR 16
+#define M_IBQDBGADDR 0x1ff
+#define V_IBQDBGADDR(x) ((x) << S_IBQDBGADDR)
+#define G_IBQDBGADDR(x) (((x) >> S_IBQDBGADDR) & M_IBQDBGADDR)
+
+#define S_IBQDBGQID 3
+#define M_IBQDBGQID 0x3
+#define V_IBQDBGQID(x) ((x) << S_IBQDBGQID)
+#define G_IBQDBGQID(x) (((x) >> S_IBQDBGQID) & M_IBQDBGQID)
+
+#define S_IBQDBGWR 2
+#define V_IBQDBGWR(x) ((x) << S_IBQDBGWR)
+#define F_IBQDBGWR V_IBQDBGWR(1U)
+
+#define S_IBQDBGBUSY 1
+#define V_IBQDBGBUSY(x) ((x) << S_IBQDBGBUSY)
+#define F_IBQDBGBUSY V_IBQDBGBUSY(1U)
+
+#define S_IBQDBGEN 0
+#define V_IBQDBGEN(x) ((x) << S_IBQDBGEN)
+#define F_IBQDBGEN V_IBQDBGEN(1U)
+
+#define A_CIM_IBQ_DBG_DATA 0x2c8
+
+#define A_TP_IN_CONFIG 0x300
+
+#define S_RXFBARBPRIO 25
+#define V_RXFBARBPRIO(x) ((x) << S_RXFBARBPRIO)
+#define F_RXFBARBPRIO V_RXFBARBPRIO(1U)
+
+#define S_TXFBARBPRIO 24
+#define V_TXFBARBPRIO(x) ((x) << S_TXFBARBPRIO)
+#define F_TXFBARBPRIO V_TXFBARBPRIO(1U)
+
+#define S_NICMODE 14
+#define V_NICMODE(x) ((x) << S_NICMODE)
+#define F_NICMODE V_NICMODE(1U)
+
+#define S_IPV6ENABLE 15
+#define V_IPV6ENABLE(x) ((x) << S_IPV6ENABLE)
+#define F_IPV6ENABLE V_IPV6ENABLE(1U)
+
+#define A_TP_OUT_CONFIG 0x304
+
+#define S_VLANEXTRACTIONENABLE 12
+
+#define A_TP_GLOBAL_CONFIG 0x308
+
+#define S_TXPACINGENABLE 24
+#define V_TXPACINGENABLE(x) ((x) << S_TXPACINGENABLE)
+#define F_TXPACINGENABLE V_TXPACINGENABLE(1U)
+
+#define S_PATHMTU 15
+#define V_PATHMTU(x) ((x) << S_PATHMTU)
+#define F_PATHMTU V_PATHMTU(1U)
+
+#define S_IPCHECKSUMOFFLOAD 13
+#define V_IPCHECKSUMOFFLOAD(x) ((x) << S_IPCHECKSUMOFFLOAD)
+#define F_IPCHECKSUMOFFLOAD V_IPCHECKSUMOFFLOAD(1U)
+
+#define S_UDPCHECKSUMOFFLOAD 12
+#define V_UDPCHECKSUMOFFLOAD(x) ((x) << S_UDPCHECKSUMOFFLOAD)
+#define F_UDPCHECKSUMOFFLOAD V_UDPCHECKSUMOFFLOAD(1U)
+
+#define S_TCPCHECKSUMOFFLOAD 11
+#define V_TCPCHECKSUMOFFLOAD(x) ((x) << S_TCPCHECKSUMOFFLOAD)
+#define F_TCPCHECKSUMOFFLOAD V_TCPCHECKSUMOFFLOAD(1U)
+
+#define S_IPTTL 0
+#define M_IPTTL 0xff
+#define V_IPTTL(x) ((x) << S_IPTTL)
+
+#define A_TP_CMM_MM_BASE 0x314
+
+#define A_TP_CMM_TIMER_BASE 0x318
+
+#define S_CMTIMERMAXNUM 28
+#define M_CMTIMERMAXNUM 0x3
+#define V_CMTIMERMAXNUM(x) ((x) << S_CMTIMERMAXNUM)
+
+#define A_TP_PMM_SIZE 0x31c
+
+#define A_TP_PMM_TX_BASE 0x320
+
+#define A_TP_PMM_RX_BASE 0x328
+
+#define A_TP_PMM_RX_PAGE_SIZE 0x32c
+
+#define A_TP_PMM_RX_MAX_PAGE 0x330
+
+#define A_TP_PMM_TX_PAGE_SIZE 0x334
+
+#define A_TP_PMM_TX_MAX_PAGE 0x338
+
+#define A_TP_TCP_OPTIONS 0x340
+
+#define S_MTUDEFAULT 16
+#define M_MTUDEFAULT 0xffff
+#define V_MTUDEFAULT(x) ((x) << S_MTUDEFAULT)
+
+#define S_MTUENABLE 10
+#define V_MTUENABLE(x) ((x) << S_MTUENABLE)
+#define F_MTUENABLE V_MTUENABLE(1U)
+
+#define S_SACKRX 8
+#define V_SACKRX(x) ((x) << S_SACKRX)
+#define F_SACKRX V_SACKRX(1U)
+
+#define S_SACKMODE 4
+
+#define M_SACKMODE 0x3
+
+#define V_SACKMODE(x) ((x) << S_SACKMODE)
+
+#define S_WINDOWSCALEMODE 2
+#define M_WINDOWSCALEMODE 0x3
+#define V_WINDOWSCALEMODE(x) ((x) << S_WINDOWSCALEMODE)
+
+#define S_TIMESTAMPSMODE 0
+
+#define M_TIMESTAMPSMODE 0x3
+
+#define V_TIMESTAMPSMODE(x) ((x) << S_TIMESTAMPSMODE)
+
+#define A_TP_DACK_CONFIG 0x344
+
+#define S_AUTOSTATE3 30
+#define M_AUTOSTATE3 0x3
+#define V_AUTOSTATE3(x) ((x) << S_AUTOSTATE3)
+
+#define S_AUTOSTATE2 28
+#define M_AUTOSTATE2 0x3
+#define V_AUTOSTATE2(x) ((x) << S_AUTOSTATE2)
+
+#define S_AUTOSTATE1 26
+#define M_AUTOSTATE1 0x3
+#define V_AUTOSTATE1(x) ((x) << S_AUTOSTATE1)
+
+#define S_BYTETHRESHOLD 5
+#define M_BYTETHRESHOLD 0xfffff
+#define V_BYTETHRESHOLD(x) ((x) << S_BYTETHRESHOLD)
+
+#define S_MSSTHRESHOLD 3
+#define M_MSSTHRESHOLD 0x3
+#define V_MSSTHRESHOLD(x) ((x) << S_MSSTHRESHOLD)
+
+#define S_AUTOCAREFUL 2
+#define V_AUTOCAREFUL(x) ((x) << S_AUTOCAREFUL)
+#define F_AUTOCAREFUL V_AUTOCAREFUL(1U)
+
+#define S_AUTOENABLE 1
+#define V_AUTOENABLE(x) ((x) << S_AUTOENABLE)
+#define F_AUTOENABLE V_AUTOENABLE(1U)
+
+#define S_DACK_MODE 0
+#define V_DACK_MODE(x) ((x) << S_DACK_MODE)
+#define F_DACK_MODE V_DACK_MODE(1U)
+
+#define A_TP_PC_CONFIG 0x348
+
+#define S_TXTOSQUEUEMAPMODE 26
+#define V_TXTOSQUEUEMAPMODE(x) ((x) << S_TXTOSQUEUEMAPMODE)
+#define F_TXTOSQUEUEMAPMODE V_TXTOSQUEUEMAPMODE(1U)
+
+#define S_ENABLEEPCMDAFULL 23
+#define V_ENABLEEPCMDAFULL(x) ((x) << S_ENABLEEPCMDAFULL)
+#define F_ENABLEEPCMDAFULL V_ENABLEEPCMDAFULL(1U)
+
+#define S_MODULATEUNIONMODE 22
+#define V_MODULATEUNIONMODE(x) ((x) << S_MODULATEUNIONMODE)
+#define F_MODULATEUNIONMODE V_MODULATEUNIONMODE(1U)
+
+#define S_TXDEFERENABLE 20
+#define V_TXDEFERENABLE(x) ((x) << S_TXDEFERENABLE)
+#define F_TXDEFERENABLE V_TXDEFERENABLE(1U)
+
+#define S_RXCONGESTIONMODE 19
+#define V_RXCONGESTIONMODE(x) ((x) << S_RXCONGESTIONMODE)
+#define F_RXCONGESTIONMODE V_RXCONGESTIONMODE(1U)
+
+#define S_HEARBEATDACK 16
+#define V_HEARBEATDACK(x) ((x) << S_HEARBEATDACK)
+#define F_HEARBEATDACK V_HEARBEATDACK(1U)
+
+#define S_TXCONGESTIONMODE 15
+#define V_TXCONGESTIONMODE(x) ((x) << S_TXCONGESTIONMODE)
+#define F_TXCONGESTIONMODE V_TXCONGESTIONMODE(1U)
+
+#define S_ENABLEOCSPIFULL 30
+#define V_ENABLEOCSPIFULL(x) ((x) << S_ENABLEOCSPIFULL)
+#define F_ENABLEOCSPIFULL V_ENABLEOCSPIFULL(1U)
+
+#define S_LOCKTID 28
+#define V_LOCKTID(x) ((x) << S_LOCKTID)
+#define F_LOCKTID V_LOCKTID(1U)
+
+#define S_TABLELATENCYDELTA 0
+#define M_TABLELATENCYDELTA 0xf
+#define V_TABLELATENCYDELTA(x) ((x) << S_TABLELATENCYDELTA)
+#define G_TABLELATENCYDELTA(x) \
+ (((x) >> S_TABLELATENCYDELTA) & M_TABLELATENCYDELTA)
+
+#define A_TP_PC_CONFIG2 0x34c
+
+#define S_DISBLEDAPARBIT0 15
+#define V_DISBLEDAPARBIT0(x) ((x) << S_DISBLEDAPARBIT0)
+#define F_DISBLEDAPARBIT0 V_DISBLEDAPARBIT0(1U)
+
+#define S_ENABLEARPMISS 13
+#define V_ENABLEARPMISS(x) ((x) << S_ENABLEARPMISS)
+#define F_ENABLEARPMISS V_ENABLEARPMISS(1U)
+
+#define S_ENABLENONOFDTNLSYN 12
+#define V_ENABLENONOFDTNLSYN(x) ((x) << S_ENABLENONOFDTNLSYN)
+#define F_ENABLENONOFDTNLSYN V_ENABLENONOFDTNLSYN(1U)
+
+#define S_ENABLEIPV6RSS 11
+#define V_ENABLEIPV6RSS(x) ((x) << S_ENABLEIPV6RSS)
+#define F_ENABLEIPV6RSS V_ENABLEIPV6RSS(1U)
+
+#define S_CHDRAFULL 4
+#define V_CHDRAFULL(x) ((x) << S_CHDRAFULL)
+#define F_CHDRAFULL V_CHDRAFULL(1U)
+
+#define A_TP_TCP_BACKOFF_REG0 0x350
+
+#define A_TP_TCP_BACKOFF_REG1 0x354
+
+#define A_TP_TCP_BACKOFF_REG2 0x358
+
+#define A_TP_TCP_BACKOFF_REG3 0x35c
+
+#define A_TP_PARA_REG2 0x368
+
+#define S_MAXRXDATA 16
+#define M_MAXRXDATA 0xffff
+#define V_MAXRXDATA(x) ((x) << S_MAXRXDATA)
+
+#define S_RXCOALESCESIZE 0
+#define M_RXCOALESCESIZE 0xffff
+#define V_RXCOALESCESIZE(x) ((x) << S_RXCOALESCESIZE)
+
+#define A_TP_PARA_REG3 0x36c
+
+#define S_TXDATAACKIDX 16
+#define M_TXDATAACKIDX 0xf
+
+#define V_TXDATAACKIDX(x) ((x) << S_TXDATAACKIDX)
+
+#define S_TXPACEAUTOSTRICT 10
+#define V_TXPACEAUTOSTRICT(x) ((x) << S_TXPACEAUTOSTRICT)
+#define F_TXPACEAUTOSTRICT V_TXPACEAUTOSTRICT(1U)
+
+#define S_TXPACEFIXED 9
+#define V_TXPACEFIXED(x) ((x) << S_TXPACEFIXED)
+#define F_TXPACEFIXED V_TXPACEFIXED(1U)
+
+#define S_TXPACEAUTO 8
+#define V_TXPACEAUTO(x) ((x) << S_TXPACEAUTO)
+#define F_TXPACEAUTO V_TXPACEAUTO(1U)
+
+#define S_RXCOALESCEENABLE 1
+#define V_RXCOALESCEENABLE(x) ((x) << S_RXCOALESCEENABLE)
+#define F_RXCOALESCEENABLE V_RXCOALESCEENABLE(1U)
+
+#define S_RXCOALESCEPSHEN 0
+#define V_RXCOALESCEPSHEN(x) ((x) << S_RXCOALESCEPSHEN)
+#define F_RXCOALESCEPSHEN V_RXCOALESCEPSHEN(1U)
+
+#define A_TP_PARA_REG4 0x370
+
+#define A_TP_PARA_REG5 0x374
+
+#define S_RXDDPOFFINIT 3
+#define V_RXDDPOFFINIT(x) ((x) << S_RXDDPOFFINIT)
+#define F_RXDDPOFFINIT V_RXDDPOFFINIT(1U)
+
+#define A_TP_PARA_REG6 0x378
+
+#define S_T3A_ENABLEESND 13
+#define V_T3A_ENABLEESND(x) ((x) << S_T3A_ENABLEESND)
+#define F_T3A_ENABLEESND V_T3A_ENABLEESND(1U)
+
+#define S_ENABLEESND 11
+#define V_ENABLEESND(x) ((x) << S_ENABLEESND)
+#define F_ENABLEESND V_ENABLEESND(1U)
+
+#define A_TP_PARA_REG7 0x37c
+
+#define S_PMMAXXFERLEN1 16
+#define M_PMMAXXFERLEN1 0xffff
+#define V_PMMAXXFERLEN1(x) ((x) << S_PMMAXXFERLEN1)
+
+#define S_PMMAXXFERLEN0 0
+#define M_PMMAXXFERLEN0 0xffff
+#define V_PMMAXXFERLEN0(x) ((x) << S_PMMAXXFERLEN0)
+
+#define A_TP_TIMER_RESOLUTION 0x390
+
+#define S_TIMERRESOLUTION 16
+#define M_TIMERRESOLUTION 0xff
+#define V_TIMERRESOLUTION(x) ((x) << S_TIMERRESOLUTION)
+
+#define S_TIMESTAMPRESOLUTION 8
+#define M_TIMESTAMPRESOLUTION 0xff
+#define V_TIMESTAMPRESOLUTION(x) ((x) << S_TIMESTAMPRESOLUTION)
+
+#define S_DELAYEDACKRESOLUTION 0
+#define M_DELAYEDACKRESOLUTION 0xff
+#define V_DELAYEDACKRESOLUTION(x) ((x) << S_DELAYEDACKRESOLUTION)
+
+#define A_TP_MSL 0x394
+
+#define A_TP_RXT_MIN 0x398
+
+#define A_TP_RXT_MAX 0x39c
+
+#define A_TP_PERS_MIN 0x3a0
+
+#define A_TP_PERS_MAX 0x3a4
+
+#define A_TP_KEEP_IDLE 0x3a8
+
+#define A_TP_KEEP_INTVL 0x3ac
+
+#define A_TP_INIT_SRTT 0x3b0
+
+#define A_TP_DACK_TIMER 0x3b4
+
+#define A_TP_FINWAIT2_TIMER 0x3b8
+
+#define A_TP_SHIFT_CNT 0x3c0
+
+#define S_SYNSHIFTMAX 24
+
+#define M_SYNSHIFTMAX 0xff
+
+#define V_SYNSHIFTMAX(x) ((x) << S_SYNSHIFTMAX)
+
+#define S_RXTSHIFTMAXR1 20
+
+#define M_RXTSHIFTMAXR1 0xf
+
+#define V_RXTSHIFTMAXR1(x) ((x) << S_RXTSHIFTMAXR1)
+
+#define S_RXTSHIFTMAXR2 16
+
+#define M_RXTSHIFTMAXR2 0xf
+
+#define V_RXTSHIFTMAXR2(x) ((x) << S_RXTSHIFTMAXR2)
+
+#define S_PERSHIFTBACKOFFMAX 12
+#define M_PERSHIFTBACKOFFMAX 0xf
+#define V_PERSHIFTBACKOFFMAX(x) ((x) << S_PERSHIFTBACKOFFMAX)
+
+#define S_PERSHIFTMAX 8
+#define M_PERSHIFTMAX 0xf
+#define V_PERSHIFTMAX(x) ((x) << S_PERSHIFTMAX)
+
+#define S_KEEPALIVEMAX 0
+
+#define M_KEEPALIVEMAX 0xff
+
+#define V_KEEPALIVEMAX(x) ((x) << S_KEEPALIVEMAX)
+
+#define A_TP_MTU_PORT_TABLE 0x3d0
+
+#define A_TP_CCTRL_TABLE 0x3dc
+
+#define A_TP_MTU_TABLE 0x3e4
+
+#define A_TP_RSS_MAP_TABLE 0x3e8
+
+#define A_TP_RSS_LKP_TABLE 0x3ec
+
+#define A_TP_RSS_CONFIG 0x3f0
+
+#define S_TNL4TUPEN 29
+#define V_TNL4TUPEN(x) ((x) << S_TNL4TUPEN)
+#define F_TNL4TUPEN V_TNL4TUPEN(1U)
+
+#define S_TNL2TUPEN 28
+#define V_TNL2TUPEN(x) ((x) << S_TNL2TUPEN)
+#define F_TNL2TUPEN V_TNL2TUPEN(1U)
+
+#define S_TNLPRTEN 26
+#define V_TNLPRTEN(x) ((x) << S_TNLPRTEN)
+#define F_TNLPRTEN V_TNLPRTEN(1U)
+
+#define S_TNLMAPEN 25
+#define V_TNLMAPEN(x) ((x) << S_TNLMAPEN)
+#define F_TNLMAPEN V_TNLMAPEN(1U)
+
+#define S_TNLLKPEN 24
+#define V_TNLLKPEN(x) ((x) << S_TNLLKPEN)
+#define F_TNLLKPEN V_TNLLKPEN(1U)
+
+#define S_RRCPLMAPEN 7
+#define V_RRCPLMAPEN(x) ((x) << S_RRCPLMAPEN)
+#define F_RRCPLMAPEN V_RRCPLMAPEN(1U)
+
+#define S_RRCPLCPUSIZE 4
+#define M_RRCPLCPUSIZE 0x7
+#define V_RRCPLCPUSIZE(x) ((x) << S_RRCPLCPUSIZE)
+
+#define S_RQFEEDBACKENABLE 3
+#define V_RQFEEDBACKENABLE(x) ((x) << S_RQFEEDBACKENABLE)
+#define F_RQFEEDBACKENABLE V_RQFEEDBACKENABLE(1U)
+
+#define S_HASHTOEPLITZ 2
+#define V_HASHTOEPLITZ(x) ((x) << S_HASHTOEPLITZ)
+#define F_HASHTOEPLITZ V_HASHTOEPLITZ(1U)
+
+#define S_DISABLE 0
+
+#define A_TP_TM_PIO_ADDR 0x418
+
+#define A_TP_TM_PIO_DATA 0x41c
+
+#define A_TP_TX_MOD_QUE_TABLE 0x420
+
+#define A_TP_TX_RESOURCE_LIMIT 0x424
+
+#define A_TP_TX_MOD_QUEUE_REQ_MAP 0x428
+
+#define S_TX_MOD_QUEUE_REQ_MAP 0
+#define M_TX_MOD_QUEUE_REQ_MAP 0xff
+#define V_TX_MOD_QUEUE_REQ_MAP(x) ((x) << S_TX_MOD_QUEUE_REQ_MAP)
+
+#define A_TP_TX_MOD_QUEUE_WEIGHT1 0x42c
+
+#define A_TP_TX_MOD_QUEUE_WEIGHT0 0x430
+
+#define A_TP_MOD_CHANNEL_WEIGHT 0x434
+
+#define A_TP_MOD_RATE_LIMIT 0x438
+
+#define A_TP_PIO_ADDR 0x440
+
+#define A_TP_PIO_DATA 0x444
+
+#define A_TP_RESET 0x44c
+
+#define S_FLSTINITENABLE 1
+#define V_FLSTINITENABLE(x) ((x) << S_FLSTINITENABLE)
+#define F_FLSTINITENABLE V_FLSTINITENABLE(1U)
+
+#define S_TPRESET 0
+#define V_TPRESET(x) ((x) << S_TPRESET)
+#define F_TPRESET V_TPRESET(1U)
+
+#define A_TP_CMM_MM_RX_FLST_BASE 0x460
+
+#define A_TP_CMM_MM_TX_FLST_BASE 0x464
+
+#define A_TP_CMM_MM_PS_FLST_BASE 0x468
+
+#define A_TP_MIB_INDEX 0x450
+
+#define A_TP_MIB_RDATA 0x454
+
+#define A_TP_CMM_MM_MAX_PSTRUCT 0x46c
+
+#define A_TP_INT_ENABLE 0x470
+
+#define S_FLMTXFLSTEMPTY 30
+#define V_FLMTXFLSTEMPTY(x) ((x) << S_FLMTXFLSTEMPTY)
+#define F_FLMTXFLSTEMPTY V_FLMTXFLSTEMPTY(1U)
+
+#define S_FLMRXFLSTEMPTY 29
+#define V_FLMRXFLSTEMPTY(x) ((x) << S_FLMRXFLSTEMPTY)
+#define F_FLMRXFLSTEMPTY V_FLMRXFLSTEMPTY(1U)
+
+#define S_ARPLUTPERR 26
+#define V_ARPLUTPERR(x) ((x) << S_ARPLUTPERR)
+#define F_ARPLUTPERR V_ARPLUTPERR(1U)
+
+#define S_CMCACHEPERR 24
+#define V_CMCACHEPERR(x) ((x) << S_CMCACHEPERR)
+#define F_CMCACHEPERR V_CMCACHEPERR(1U)
+
+#define A_TP_INT_CAUSE 0x474
+
+#define A_TP_TX_MOD_Q1_Q0_RATE_LIMIT 0x8
+
+#define A_TP_TX_DROP_CFG_CH0 0x12b
+
+#define A_TP_TX_DROP_MODE 0x12f
+
+#define A_TP_EGRESS_CONFIG 0x145
+
+#define S_REWRITEFORCETOSIZE 0
+#define V_REWRITEFORCETOSIZE(x) ((x) << S_REWRITEFORCETOSIZE)
+#define F_REWRITEFORCETOSIZE V_REWRITEFORCETOSIZE(1U)
+
+#define A_TP_TX_TRC_KEY0 0x20
+
+#define A_TP_RX_TRC_KEY0 0x120
+
+#define A_TP_TX_DROP_CNT_CH0 0x12d
+
+#define S_TXDROPCNTCH0RCVD 0
+#define M_TXDROPCNTCH0RCVD 0xffff
+#define V_TXDROPCNTCH0RCVD(x) ((x) << S_TXDROPCNTCH0RCVD)
+#define G_TXDROPCNTCH0RCVD(x) (((x) >> S_TXDROPCNTCH0RCVD) & \
+ M_TXDROPCNTCH0RCVD)
+
+#define A_TP_PROXY_FLOW_CNTL 0x4b0
+
+#define A_TP_EMBED_OP_FIELD0 0x4e8
+#define A_TP_EMBED_OP_FIELD1 0x4ec
+#define A_TP_EMBED_OP_FIELD2 0x4f0
+#define A_TP_EMBED_OP_FIELD3 0x4f4
+#define A_TP_EMBED_OP_FIELD4 0x4f8
+#define A_TP_EMBED_OP_FIELD5 0x4fc
+
+#define A_ULPRX_CTL 0x500
+
+#define S_ROUND_ROBIN 4
+#define V_ROUND_ROBIN(x) ((x) << S_ROUND_ROBIN)
+#define F_ROUND_ROBIN V_ROUND_ROBIN(1U)
+
+#define A_ULPRX_INT_ENABLE 0x504
+
+#define S_DATASELFRAMEERR0 7
+#define V_DATASELFRAMEERR0(x) ((x) << S_DATASELFRAMEERR0)
+#define F_DATASELFRAMEERR0 V_DATASELFRAMEERR0(1U)
+
+#define S_DATASELFRAMEERR1 6
+#define V_DATASELFRAMEERR1(x) ((x) << S_DATASELFRAMEERR1)
+#define F_DATASELFRAMEERR1 V_DATASELFRAMEERR1(1U)
+
+#define S_PCMDMUXPERR 5
+#define V_PCMDMUXPERR(x) ((x) << S_PCMDMUXPERR)
+#define F_PCMDMUXPERR V_PCMDMUXPERR(1U)
+
+#define S_ARBFPERR 4
+#define V_ARBFPERR(x) ((x) << S_ARBFPERR)
+#define F_ARBFPERR V_ARBFPERR(1U)
+
+#define S_ARBPF0PERR 3
+#define V_ARBPF0PERR(x) ((x) << S_ARBPF0PERR)
+#define F_ARBPF0PERR V_ARBPF0PERR(1U)
+
+#define S_ARBPF1PERR 2
+#define V_ARBPF1PERR(x) ((x) << S_ARBPF1PERR)
+#define F_ARBPF1PERR V_ARBPF1PERR(1U)
+
+#define S_PARERRPCMD 1
+#define V_PARERRPCMD(x) ((x) << S_PARERRPCMD)
+#define F_PARERRPCMD V_PARERRPCMD(1U)
+
+#define S_PARERRDATA 0
+#define V_PARERRDATA(x) ((x) << S_PARERRDATA)
+#define F_PARERRDATA V_PARERRDATA(1U)
+
+#define A_ULPRX_INT_CAUSE 0x508
+
+#define A_ULPRX_ISCSI_LLIMIT 0x50c
+
+#define A_ULPRX_ISCSI_ULIMIT 0x510
+
+#define A_ULPRX_ISCSI_TAGMASK 0x514
+
+#define A_ULPRX_ISCSI_PSZ 0x518
+
+#define A_ULPRX_TDDP_LLIMIT 0x51c
+
+#define A_ULPRX_TDDP_ULIMIT 0x520
+#define A_ULPRX_TDDP_PSZ 0x528
+
+#define S_HPZ0 0
+#define M_HPZ0 0xf
+#define V_HPZ0(x) ((x) << S_HPZ0)
+#define G_HPZ0(x) (((x) >> S_HPZ0) & M_HPZ0)
+
+#define A_ULPRX_STAG_LLIMIT 0x52c
+
+#define A_ULPRX_STAG_ULIMIT 0x530
+
+#define A_ULPRX_RQ_LLIMIT 0x534
+
+#define A_ULPRX_RQ_ULIMIT 0x538
+
+#define A_ULPRX_PBL_LLIMIT 0x53c
+
+#define A_ULPRX_PBL_ULIMIT 0x540
+
+#define A_ULPRX_TDDP_TAGMASK 0x524
+
+#define A_ULPTX_CONFIG 0x580
+
+#define S_CFG_CQE_SOP_MASK 1
+#define V_CFG_CQE_SOP_MASK(x) ((x) << S_CFG_CQE_SOP_MASK)
+#define F_CFG_CQE_SOP_MASK V_CFG_CQE_SOP_MASK(1U)
+
+#define S_CFG_RR_ARB 0
+#define V_CFG_RR_ARB(x) ((x) << S_CFG_RR_ARB)
+#define F_CFG_RR_ARB V_CFG_RR_ARB(1U)
+
+#define A_ULPTX_INT_ENABLE 0x584
+
+#define S_PBL_BOUND_ERR_CH1 1
+#define V_PBL_BOUND_ERR_CH1(x) ((x) << S_PBL_BOUND_ERR_CH1)
+#define F_PBL_BOUND_ERR_CH1 V_PBL_BOUND_ERR_CH1(1U)
+
+#define S_PBL_BOUND_ERR_CH0 0
+#define V_PBL_BOUND_ERR_CH0(x) ((x) << S_PBL_BOUND_ERR_CH0)
+#define F_PBL_BOUND_ERR_CH0 V_PBL_BOUND_ERR_CH0(1U)
+
+#define A_ULPTX_INT_CAUSE 0x588
+
+#define A_ULPTX_TPT_LLIMIT 0x58c
+
+#define A_ULPTX_TPT_ULIMIT 0x590
+
+#define A_ULPTX_PBL_LLIMIT 0x594
+
+#define A_ULPTX_PBL_ULIMIT 0x598
+
+#define A_ULPTX_DMA_WEIGHT 0x5ac
+
+#define S_D1_WEIGHT 16
+#define M_D1_WEIGHT 0xffff
+#define V_D1_WEIGHT(x) ((x) << S_D1_WEIGHT)
+
+#define S_D0_WEIGHT 0
+#define M_D0_WEIGHT 0xffff
+#define V_D0_WEIGHT(x) ((x) << S_D0_WEIGHT)
+
+#define A_PM1_RX_CFG 0x5c0
+#define A_PM1_RX_MODE 0x5c4
+
+#define A_PM1_RX_INT_ENABLE 0x5d8
+
+#define S_ZERO_E_CMD_ERROR 18
+#define V_ZERO_E_CMD_ERROR(x) ((x) << S_ZERO_E_CMD_ERROR)
+#define F_ZERO_E_CMD_ERROR V_ZERO_E_CMD_ERROR(1U)
+
+#define S_IESPI0_FIFO2X_RX_FRAMING_ERROR 17
+#define V_IESPI0_FIFO2X_RX_FRAMING_ERROR(x) ((x) << S_IESPI0_FIFO2X_RX_FRAMING_ERROR)
+#define F_IESPI0_FIFO2X_RX_FRAMING_ERROR V_IESPI0_FIFO2X_RX_FRAMING_ERROR(1U)
+
+#define S_IESPI1_FIFO2X_RX_FRAMING_ERROR 16
+#define V_IESPI1_FIFO2X_RX_FRAMING_ERROR(x) ((x) << S_IESPI1_FIFO2X_RX_FRAMING_ERROR)
+#define F_IESPI1_FIFO2X_RX_FRAMING_ERROR V_IESPI1_FIFO2X_RX_FRAMING_ERROR(1U)
+
+#define S_IESPI0_RX_FRAMING_ERROR 15
+#define V_IESPI0_RX_FRAMING_ERROR(x) ((x) << S_IESPI0_RX_FRAMING_ERROR)
+#define F_IESPI0_RX_FRAMING_ERROR V_IESPI0_RX_FRAMING_ERROR(1U)
+
+#define S_IESPI1_RX_FRAMING_ERROR 14
+#define V_IESPI1_RX_FRAMING_ERROR(x) ((x) << S_IESPI1_RX_FRAMING_ERROR)
+#define F_IESPI1_RX_FRAMING_ERROR V_IESPI1_RX_FRAMING_ERROR(1U)
+
+#define S_IESPI0_TX_FRAMING_ERROR 13
+#define V_IESPI0_TX_FRAMING_ERROR(x) ((x) << S_IESPI0_TX_FRAMING_ERROR)
+#define F_IESPI0_TX_FRAMING_ERROR V_IESPI0_TX_FRAMING_ERROR(1U)
+
+#define S_IESPI1_TX_FRAMING_ERROR 12
+#define V_IESPI1_TX_FRAMING_ERROR(x) ((x) << S_IESPI1_TX_FRAMING_ERROR)
+#define F_IESPI1_TX_FRAMING_ERROR V_IESPI1_TX_FRAMING_ERROR(1U)
+
+#define S_OCSPI0_RX_FRAMING_ERROR 11
+#define V_OCSPI0_RX_FRAMING_ERROR(x) ((x) << S_OCSPI0_RX_FRAMING_ERROR)
+#define F_OCSPI0_RX_FRAMING_ERROR V_OCSPI0_RX_FRAMING_ERROR(1U)
+
+#define S_OCSPI1_RX_FRAMING_ERROR 10
+#define V_OCSPI1_RX_FRAMING_ERROR(x) ((x) << S_OCSPI1_RX_FRAMING_ERROR)
+#define F_OCSPI1_RX_FRAMING_ERROR V_OCSPI1_RX_FRAMING_ERROR(1U)
+
+#define S_OCSPI0_TX_FRAMING_ERROR 9
+#define V_OCSPI0_TX_FRAMING_ERROR(x) ((x) << S_OCSPI0_TX_FRAMING_ERROR)
+#define F_OCSPI0_TX_FRAMING_ERROR V_OCSPI0_TX_FRAMING_ERROR(1U)
+
+#define S_OCSPI1_TX_FRAMING_ERROR 8
+#define V_OCSPI1_TX_FRAMING_ERROR(x) ((x) << S_OCSPI1_TX_FRAMING_ERROR)
+#define F_OCSPI1_TX_FRAMING_ERROR V_OCSPI1_TX_FRAMING_ERROR(1U)
+
+#define S_OCSPI0_OFIFO2X_TX_FRAMING_ERROR 7
+#define V_OCSPI0_OFIFO2X_TX_FRAMING_ERROR(x) ((x) << S_OCSPI0_OFIFO2X_TX_FRAMING_ERROR)
+#define F_OCSPI0_OFIFO2X_TX_FRAMING_ERROR V_OCSPI0_OFIFO2X_TX_FRAMING_ERROR(1U)
+
+#define S_OCSPI1_OFIFO2X_TX_FRAMING_ERROR 6
+#define V_OCSPI1_OFIFO2X_TX_FRAMING_ERROR(x) ((x) << S_OCSPI1_OFIFO2X_TX_FRAMING_ERROR)
+#define F_OCSPI1_OFIFO2X_TX_FRAMING_ERROR V_OCSPI1_OFIFO2X_TX_FRAMING_ERROR(1U)
+
+#define S_IESPI_PAR_ERROR 3
+#define M_IESPI_PAR_ERROR 0x7
+
+#define V_IESPI_PAR_ERROR(x) ((x) << S_IESPI_PAR_ERROR)
+
+#define S_OCSPI_PAR_ERROR 0
+#define M_OCSPI_PAR_ERROR 0x7
+
+#define V_OCSPI_PAR_ERROR(x) ((x) << S_OCSPI_PAR_ERROR)
+
+#define A_PM1_RX_INT_CAUSE 0x5dc
+
+#define A_PM1_TX_CFG 0x5e0
+#define A_PM1_TX_MODE 0x5e4
+
+#define A_PM1_TX_INT_ENABLE 0x5f8
+
+#define S_ZERO_C_CMD_ERROR 18
+#define V_ZERO_C_CMD_ERROR(x) ((x) << S_ZERO_C_CMD_ERROR)
+#define F_ZERO_C_CMD_ERROR V_ZERO_C_CMD_ERROR(1U)
+
+#define S_ICSPI0_FIFO2X_RX_FRAMING_ERROR 17
+#define V_ICSPI0_FIFO2X_RX_FRAMING_ERROR(x) ((x) << S_ICSPI0_FIFO2X_RX_FRAMING_ERROR)
+#define F_ICSPI0_FIFO2X_RX_FRAMING_ERROR V_ICSPI0_FIFO2X_RX_FRAMING_ERROR(1U)
+
+#define S_ICSPI1_FIFO2X_RX_FRAMING_ERROR 16
+#define V_ICSPI1_FIFO2X_RX_FRAMING_ERROR(x) ((x) << S_ICSPI1_FIFO2X_RX_FRAMING_ERROR)
+#define F_ICSPI1_FIFO2X_RX_FRAMING_ERROR V_ICSPI1_FIFO2X_RX_FRAMING_ERROR(1U)
+
+#define S_ICSPI0_RX_FRAMING_ERROR 15
+#define V_ICSPI0_RX_FRAMING_ERROR(x) ((x) << S_ICSPI0_RX_FRAMING_ERROR)
+#define F_ICSPI0_RX_FRAMING_ERROR V_ICSPI0_RX_FRAMING_ERROR(1U)
+
+#define S_ICSPI1_RX_FRAMING_ERROR 14
+#define V_ICSPI1_RX_FRAMING_ERROR(x) ((x) << S_ICSPI1_RX_FRAMING_ERROR)
+#define F_ICSPI1_RX_FRAMING_ERROR V_ICSPI1_RX_FRAMING_ERROR(1U)
+
+#define S_ICSPI0_TX_FRAMING_ERROR 13
+#define V_ICSPI0_TX_FRAMING_ERROR(x) ((x) << S_ICSPI0_TX_FRAMING_ERROR)
+#define F_ICSPI0_TX_FRAMING_ERROR V_ICSPI0_TX_FRAMING_ERROR(1U)
+
+#define S_ICSPI1_TX_FRAMING_ERROR 12
+#define V_ICSPI1_TX_FRAMING_ERROR(x) ((x) << S_ICSPI1_TX_FRAMING_ERROR)
+#define F_ICSPI1_TX_FRAMING_ERROR V_ICSPI1_TX_FRAMING_ERROR(1U)
+
+#define S_OESPI0_RX_FRAMING_ERROR 11
+#define V_OESPI0_RX_FRAMING_ERROR(x) ((x) << S_OESPI0_RX_FRAMING_ERROR)
+#define F_OESPI0_RX_FRAMING_ERROR V_OESPI0_RX_FRAMING_ERROR(1U)
+
+#define S_OESPI1_RX_FRAMING_ERROR 10
+#define V_OESPI1_RX_FRAMING_ERROR(x) ((x) << S_OESPI1_RX_FRAMING_ERROR)
+#define F_OESPI1_RX_FRAMING_ERROR V_OESPI1_RX_FRAMING_ERROR(1U)
+
+#define S_OESPI0_TX_FRAMING_ERROR 9
+#define V_OESPI0_TX_FRAMING_ERROR(x) ((x) << S_OESPI0_TX_FRAMING_ERROR)
+#define F_OESPI0_TX_FRAMING_ERROR V_OESPI0_TX_FRAMING_ERROR(1U)
+
+#define S_OESPI1_TX_FRAMING_ERROR 8
+#define V_OESPI1_TX_FRAMING_ERROR(x) ((x) << S_OESPI1_TX_FRAMING_ERROR)
+#define F_OESPI1_TX_FRAMING_ERROR V_OESPI1_TX_FRAMING_ERROR(1U)
+
+#define S_OESPI0_OFIFO2X_TX_FRAMING_ERROR 7
+#define V_OESPI0_OFIFO2X_TX_FRAMING_ERROR(x) ((x) << S_OESPI0_OFIFO2X_TX_FRAMING_ERROR)
+#define F_OESPI0_OFIFO2X_TX_FRAMING_ERROR V_OESPI0_OFIFO2X_TX_FRAMING_ERROR(1U)
+
+#define S_OESPI1_OFIFO2X_TX_FRAMING_ERROR 6
+#define V_OESPI1_OFIFO2X_TX_FRAMING_ERROR(x) ((x) << S_OESPI1_OFIFO2X_TX_FRAMING_ERROR)
+#define F_OESPI1_OFIFO2X_TX_FRAMING_ERROR V_OESPI1_OFIFO2X_TX_FRAMING_ERROR(1U)
+
+#define S_ICSPI_PAR_ERROR 3
+#define M_ICSPI_PAR_ERROR 0x7
+
+#define V_ICSPI_PAR_ERROR(x) ((x) << S_ICSPI_PAR_ERROR)
+
+#define S_OESPI_PAR_ERROR 0
+#define M_OESPI_PAR_ERROR 0x7
+
+#define V_OESPI_PAR_ERROR(x) ((x) << S_OESPI_PAR_ERROR)
+
+#define A_PM1_TX_INT_CAUSE 0x5fc
+
+#define A_MPS_CFG 0x600
+
+#define S_TPRXPORTEN 4
+#define V_TPRXPORTEN(x) ((x) << S_TPRXPORTEN)
+#define F_TPRXPORTEN V_TPRXPORTEN(1U)
+
+#define S_TPTXPORT1EN 3
+#define V_TPTXPORT1EN(x) ((x) << S_TPTXPORT1EN)
+#define F_TPTXPORT1EN V_TPTXPORT1EN(1U)
+
+#define S_TPTXPORT0EN 2
+#define V_TPTXPORT0EN(x) ((x) << S_TPTXPORT0EN)
+#define F_TPTXPORT0EN V_TPTXPORT0EN(1U)
+
+#define S_PORT1ACTIVE 1
+#define V_PORT1ACTIVE(x) ((x) << S_PORT1ACTIVE)
+#define F_PORT1ACTIVE V_PORT1ACTIVE(1U)
+
+#define S_PORT0ACTIVE 0
+#define V_PORT0ACTIVE(x) ((x) << S_PORT0ACTIVE)
+#define F_PORT0ACTIVE V_PORT0ACTIVE(1U)
+
+#define S_ENFORCEPKT 11
+#define V_ENFORCEPKT(x) ((x) << S_ENFORCEPKT)
+#define F_ENFORCEPKT V_ENFORCEPKT(1U)
+
+#define A_MPS_INT_ENABLE 0x61c
+
+#define S_MCAPARERRENB 6
+#define M_MCAPARERRENB 0x7
+
+#define V_MCAPARERRENB(x) ((x) << S_MCAPARERRENB)
+
+#define S_RXTPPARERRENB 4
+#define M_RXTPPARERRENB 0x3
+
+#define V_RXTPPARERRENB(x) ((x) << S_RXTPPARERRENB)
+
+#define S_TX1TPPARERRENB 2
+#define M_TX1TPPARERRENB 0x3
+
+#define V_TX1TPPARERRENB(x) ((x) << S_TX1TPPARERRENB)
+
+#define S_TX0TPPARERRENB 0
+#define M_TX0TPPARERRENB 0x3
+
+#define V_TX0TPPARERRENB(x) ((x) << S_TX0TPPARERRENB)
+
+#define A_MPS_INT_CAUSE 0x620
+
+#define S_MCAPARERR 6
+#define M_MCAPARERR 0x7
+
+#define V_MCAPARERR(x) ((x) << S_MCAPARERR)
+
+#define S_RXTPPARERR 4
+#define M_RXTPPARERR 0x3
+
+#define V_RXTPPARERR(x) ((x) << S_RXTPPARERR)
+
+#define S_TX1TPPARERR 2
+#define M_TX1TPPARERR 0x3
+
+#define V_TX1TPPARERR(x) ((x) << S_TX1TPPARERR)
+
+#define S_TX0TPPARERR 0
+#define M_TX0TPPARERR 0x3
+
+#define V_TX0TPPARERR(x) ((x) << S_TX0TPPARERR)
+
+#define A_CPL_SWITCH_CNTRL 0x640
+
+#define A_CPL_INTR_ENABLE 0x650
+
+#define S_CIM_OP_MAP_PERR 5
+#define V_CIM_OP_MAP_PERR(x) ((x) << S_CIM_OP_MAP_PERR)
+#define F_CIM_OP_MAP_PERR V_CIM_OP_MAP_PERR(1U)
+
+#define S_CIM_OVFL_ERROR 4
+#define V_CIM_OVFL_ERROR(x) ((x) << S_CIM_OVFL_ERROR)
+#define F_CIM_OVFL_ERROR V_CIM_OVFL_ERROR(1U)
+
+#define S_TP_FRAMING_ERROR 3
+#define V_TP_FRAMING_ERROR(x) ((x) << S_TP_FRAMING_ERROR)
+#define F_TP_FRAMING_ERROR V_TP_FRAMING_ERROR(1U)
+
+#define S_SGE_FRAMING_ERROR 2
+#define V_SGE_FRAMING_ERROR(x) ((x) << S_SGE_FRAMING_ERROR)
+#define F_SGE_FRAMING_ERROR V_SGE_FRAMING_ERROR(1U)
+
+#define S_CIM_FRAMING_ERROR 1
+#define V_CIM_FRAMING_ERROR(x) ((x) << S_CIM_FRAMING_ERROR)
+#define F_CIM_FRAMING_ERROR V_CIM_FRAMING_ERROR(1U)
+
+#define S_ZERO_SWITCH_ERROR 0
+#define V_ZERO_SWITCH_ERROR(x) ((x) << S_ZERO_SWITCH_ERROR)
+#define F_ZERO_SWITCH_ERROR V_ZERO_SWITCH_ERROR(1U)
+
+#define A_CPL_INTR_CAUSE 0x654
+
+#define A_CPL_MAP_TBL_DATA 0x65c
+
+#define A_SMB_GLOBAL_TIME_CFG 0x660
+
+#define A_I2C_CFG 0x6a0
+
+#define S_I2C_CLKDIV 0
+#define M_I2C_CLKDIV 0xfff
+#define V_I2C_CLKDIV(x) ((x) << S_I2C_CLKDIV)
+
+#define A_MI1_CFG 0x6b0
+
+#define S_CLKDIV 5
+#define M_CLKDIV 0xff
+#define V_CLKDIV(x) ((x) << S_CLKDIV)
+
+#define S_ST 3
+
+#define M_ST 0x3
+
+#define V_ST(x) ((x) << S_ST)
+
+#define G_ST(x) (((x) >> S_ST) & M_ST)
+
+#define S_PREEN 2
+#define V_PREEN(x) ((x) << S_PREEN)
+#define F_PREEN V_PREEN(1U)
+
+#define S_MDIINV 1
+#define V_MDIINV(x) ((x) << S_MDIINV)
+#define F_MDIINV V_MDIINV(1U)
+
+#define S_MDIEN 0
+#define V_MDIEN(x) ((x) << S_MDIEN)
+#define F_MDIEN V_MDIEN(1U)
+
+#define A_MI1_ADDR 0x6b4
+
+#define S_PHYADDR 5
+#define M_PHYADDR 0x1f
+#define V_PHYADDR(x) ((x) << S_PHYADDR)
+
+#define S_REGADDR 0
+#define M_REGADDR 0x1f
+#define V_REGADDR(x) ((x) << S_REGADDR)
+
+#define A_MI1_DATA 0x6b8
+
+#define A_MI1_OP 0x6bc
+
+#define S_MDI_OP 0
+#define M_MDI_OP 0x3
+#define V_MDI_OP(x) ((x) << S_MDI_OP)
+
+#define A_SF_DATA 0x6d8
+
+#define A_SF_OP 0x6dc
+
+#define S_BYTECNT 1
+#define M_BYTECNT 0x3
+#define V_BYTECNT(x) ((x) << S_BYTECNT)
+
+#define A_PL_INT_ENABLE0 0x6e0
+
+#define S_T3DBG 23
+#define V_T3DBG(x) ((x) << S_T3DBG)
+#define F_T3DBG V_T3DBG(1U)
+
+#define S_XGMAC0_1 20
+#define V_XGMAC0_1(x) ((x) << S_XGMAC0_1)
+#define F_XGMAC0_1 V_XGMAC0_1(1U)
+
+#define S_XGMAC0_0 19
+#define V_XGMAC0_0(x) ((x) << S_XGMAC0_0)
+#define F_XGMAC0_0 V_XGMAC0_0(1U)
+
+#define S_MC5A 18
+#define V_MC5A(x) ((x) << S_MC5A)
+#define F_MC5A V_MC5A(1U)
+
+#define S_CPL_SWITCH 12
+#define V_CPL_SWITCH(x) ((x) << S_CPL_SWITCH)
+#define F_CPL_SWITCH V_CPL_SWITCH(1U)
+
+#define S_MPS0 11
+#define V_MPS0(x) ((x) << S_MPS0)
+#define F_MPS0 V_MPS0(1U)
+
+#define S_PM1_TX 10
+#define V_PM1_TX(x) ((x) << S_PM1_TX)
+#define F_PM1_TX V_PM1_TX(1U)
+
+#define S_PM1_RX 9
+#define V_PM1_RX(x) ((x) << S_PM1_RX)
+#define F_PM1_RX V_PM1_RX(1U)
+
+#define S_ULP2_TX 8
+#define V_ULP2_TX(x) ((x) << S_ULP2_TX)
+#define F_ULP2_TX V_ULP2_TX(1U)
+
+#define S_ULP2_RX 7
+#define V_ULP2_RX(x) ((x) << S_ULP2_RX)
+#define F_ULP2_RX V_ULP2_RX(1U)
+
+#define S_TP1 6
+#define V_TP1(x) ((x) << S_TP1)
+#define F_TP1 V_TP1(1U)
+
+#define S_CIM 5
+#define V_CIM(x) ((x) << S_CIM)
+#define F_CIM V_CIM(1U)
+
+#define S_MC7_CM 4
+#define V_MC7_CM(x) ((x) << S_MC7_CM)
+#define F_MC7_CM V_MC7_CM(1U)
+
+#define S_MC7_PMTX 3
+#define V_MC7_PMTX(x) ((x) << S_MC7_PMTX)
+#define F_MC7_PMTX V_MC7_PMTX(1U)
+
+#define S_MC7_PMRX 2
+#define V_MC7_PMRX(x) ((x) << S_MC7_PMRX)
+#define F_MC7_PMRX V_MC7_PMRX(1U)
+
+#define S_PCIM0 1
+#define V_PCIM0(x) ((x) << S_PCIM0)
+#define F_PCIM0 V_PCIM0(1U)
+
+#define S_SGE3 0
+#define V_SGE3(x) ((x) << S_SGE3)
+#define F_SGE3 V_SGE3(1U)
+
+#define A_PL_INT_CAUSE0 0x6e4
+
+#define A_PL_RST 0x6f0
+
+#define S_FATALPERREN 4
+#define V_FATALPERREN(x) ((x) << S_FATALPERREN)
+#define F_FATALPERREN V_FATALPERREN(1U)
+
+#define S_CRSTWRM 1
+#define V_CRSTWRM(x) ((x) << S_CRSTWRM)
+#define F_CRSTWRM V_CRSTWRM(1U)
+
+#define A_PL_REV 0x6f4
+
+#define A_PL_CLI 0x6f8
+
+#define A_MC5_DB_CONFIG 0x704
+
+#define S_TMTYPEHI 30
+#define V_TMTYPEHI(x) ((x) << S_TMTYPEHI)
+#define F_TMTYPEHI V_TMTYPEHI(1U)
+
+#define S_TMPARTSIZE 28
+#define M_TMPARTSIZE 0x3
+#define V_TMPARTSIZE(x) ((x) << S_TMPARTSIZE)
+#define G_TMPARTSIZE(x) (((x) >> S_TMPARTSIZE) & M_TMPARTSIZE)
+
+#define S_TMTYPE 26
+#define M_TMTYPE 0x3
+#define V_TMTYPE(x) ((x) << S_TMTYPE)
+#define G_TMTYPE(x) (((x) >> S_TMTYPE) & M_TMTYPE)
+
+#define S_COMPEN 17
+#define V_COMPEN(x) ((x) << S_COMPEN)
+#define F_COMPEN V_COMPEN(1U)
+
+#define S_PRTYEN 6
+#define V_PRTYEN(x) ((x) << S_PRTYEN)
+#define F_PRTYEN V_PRTYEN(1U)
+
+#define S_MBUSEN 5
+#define V_MBUSEN(x) ((x) << S_MBUSEN)
+#define F_MBUSEN V_MBUSEN(1U)
+
+#define S_DBGIEN 4
+#define V_DBGIEN(x) ((x) << S_DBGIEN)
+#define F_DBGIEN V_DBGIEN(1U)
+
+#define S_TMRDY 2
+#define V_TMRDY(x) ((x) << S_TMRDY)
+#define F_TMRDY V_TMRDY(1U)
+
+#define S_TMRST 1
+#define V_TMRST(x) ((x) << S_TMRST)
+#define F_TMRST V_TMRST(1U)
+
+#define S_TMMODE 0
+#define V_TMMODE(x) ((x) << S_TMMODE)
+#define F_TMMODE V_TMMODE(1U)
+
+#define A_MC5_DB_ROUTING_TABLE_INDEX 0x70c
+
+#define A_MC5_DB_FILTER_TABLE 0x710
+
+#define A_MC5_DB_SERVER_INDEX 0x714
+
+#define A_MC5_DB_RSP_LATENCY 0x720
+
+#define S_RDLAT 16
+#define M_RDLAT 0x1f
+#define V_RDLAT(x) ((x) << S_RDLAT)
+
+#define S_LRNLAT 8
+#define M_LRNLAT 0x1f
+#define V_LRNLAT(x) ((x) << S_LRNLAT)
+
+#define S_SRCHLAT 0
+#define M_SRCHLAT 0x1f
+#define V_SRCHLAT(x) ((x) << S_SRCHLAT)
+
+#define A_MC5_DB_PART_ID_INDEX 0x72c
+
+#define A_MC5_DB_INT_ENABLE 0x740
+
+#define S_DELACTEMPTY 18
+#define V_DELACTEMPTY(x) ((x) << S_DELACTEMPTY)
+#define F_DELACTEMPTY V_DELACTEMPTY(1U)
+
+#define S_DISPQPARERR 17
+#define V_DISPQPARERR(x) ((x) << S_DISPQPARERR)
+#define F_DISPQPARERR V_DISPQPARERR(1U)
+
+#define S_REQQPARERR 16
+#define V_REQQPARERR(x) ((x) << S_REQQPARERR)
+#define F_REQQPARERR V_REQQPARERR(1U)
+
+#define S_UNKNOWNCMD 15
+#define V_UNKNOWNCMD(x) ((x) << S_UNKNOWNCMD)
+#define F_UNKNOWNCMD V_UNKNOWNCMD(1U)
+
+#define S_NFASRCHFAIL 8
+#define V_NFASRCHFAIL(x) ((x) << S_NFASRCHFAIL)
+#define F_NFASRCHFAIL V_NFASRCHFAIL(1U)
+
+#define S_ACTRGNFULL 7
+#define V_ACTRGNFULL(x) ((x) << S_ACTRGNFULL)
+#define F_ACTRGNFULL V_ACTRGNFULL(1U)
+
+#define S_PARITYERR 6
+#define V_PARITYERR(x) ((x) << S_PARITYERR)
+#define F_PARITYERR V_PARITYERR(1U)
+
+#define A_MC5_DB_INT_CAUSE 0x744
+
+#define A_MC5_DB_DBGI_CONFIG 0x774
+
+#define A_MC5_DB_DBGI_REQ_CMD 0x778
+
+#define A_MC5_DB_DBGI_REQ_ADDR0 0x77c
+
+#define A_MC5_DB_DBGI_REQ_ADDR1 0x780
+
+#define A_MC5_DB_DBGI_REQ_ADDR2 0x784
+
+#define A_MC5_DB_DBGI_REQ_DATA0 0x788
+
+#define A_MC5_DB_DBGI_REQ_DATA1 0x78c
+
+#define A_MC5_DB_DBGI_REQ_DATA2 0x790
+
+#define A_MC5_DB_DBGI_RSP_STATUS 0x7b0
+
+#define S_DBGIRSPVALID 0
+#define V_DBGIRSPVALID(x) ((x) << S_DBGIRSPVALID)
+#define F_DBGIRSPVALID V_DBGIRSPVALID(1U)
+
+#define A_MC5_DB_DBGI_RSP_DATA0 0x7b4
+
+#define A_MC5_DB_DBGI_RSP_DATA1 0x7b8
+
+#define A_MC5_DB_DBGI_RSP_DATA2 0x7bc
+
+#define A_MC5_DB_POPEN_DATA_WR_CMD 0x7cc
+
+#define A_MC5_DB_POPEN_MASK_WR_CMD 0x7d0
+
+#define A_MC5_DB_AOPEN_SRCH_CMD 0x7d4
+
+#define A_MC5_DB_AOPEN_LRN_CMD 0x7d8
+
+#define A_MC5_DB_SYN_SRCH_CMD 0x7dc
+
+#define A_MC5_DB_SYN_LRN_CMD 0x7e0
+
+#define A_MC5_DB_ACK_SRCH_CMD 0x7e4
+
+#define A_MC5_DB_ACK_LRN_CMD 0x7e8
+
+#define A_MC5_DB_ILOOKUP_CMD 0x7ec
+
+#define A_MC5_DB_ELOOKUP_CMD 0x7f0
+
+#define A_MC5_DB_DATA_WRITE_CMD 0x7f4
+
+#define A_MC5_DB_DATA_READ_CMD 0x7f8
+
+#define XGMAC0_0_BASE_ADDR 0x800
+
+#define A_XGM_TX_CTRL 0x800
+
+#define S_TXEN 0
+#define V_TXEN(x) ((x) << S_TXEN)
+#define F_TXEN V_TXEN(1U)
+
+#define A_XGM_TX_CFG 0x804
+
+#define S_TXPAUSEEN 0
+#define V_TXPAUSEEN(x) ((x) << S_TXPAUSEEN)
+#define F_TXPAUSEEN V_TXPAUSEEN(1U)
+
+#define A_XGM_TX_PAUSE_QUANTA 0x808
+
+#define A_XGM_RX_CTRL 0x80c
+
+#define S_RXEN 0
+#define V_RXEN(x) ((x) << S_RXEN)
+#define F_RXEN V_RXEN(1U)
+
+#define A_XGM_RX_CFG 0x810
+
+#define S_DISPAUSEFRAMES 9
+#define V_DISPAUSEFRAMES(x) ((x) << S_DISPAUSEFRAMES)
+#define F_DISPAUSEFRAMES V_DISPAUSEFRAMES(1U)
+
+#define S_EN1536BFRAMES 8
+#define V_EN1536BFRAMES(x) ((x) << S_EN1536BFRAMES)
+#define F_EN1536BFRAMES V_EN1536BFRAMES(1U)
+
+#define S_ENJUMBO 7
+#define V_ENJUMBO(x) ((x) << S_ENJUMBO)
+#define F_ENJUMBO V_ENJUMBO(1U)
+
+#define S_RMFCS 6
+#define V_RMFCS(x) ((x) << S_RMFCS)
+#define F_RMFCS V_RMFCS(1U)
+
+#define S_ENHASHMCAST 2
+#define V_ENHASHMCAST(x) ((x) << S_ENHASHMCAST)
+#define F_ENHASHMCAST V_ENHASHMCAST(1U)
+
+#define S_COPYALLFRAMES 0
+#define V_COPYALLFRAMES(x) ((x) << S_COPYALLFRAMES)
+#define F_COPYALLFRAMES V_COPYALLFRAMES(1U)
+
+#define S_DISBCAST 1
+#define V_DISBCAST(x) ((x) << S_DISBCAST)
+#define F_DISBCAST V_DISBCAST(1U)
+
+#define A_XGM_RX_HASH_LOW 0x814
+
+#define A_XGM_RX_HASH_HIGH 0x818
+
+#define A_XGM_RX_EXACT_MATCH_LOW_1 0x81c
+
+#define A_XGM_RX_EXACT_MATCH_HIGH_1 0x820
+
+#define A_XGM_RX_EXACT_MATCH_LOW_2 0x824
+
+#define A_XGM_RX_EXACT_MATCH_LOW_3 0x82c
+
+#define A_XGM_RX_EXACT_MATCH_LOW_4 0x834
+
+#define A_XGM_RX_EXACT_MATCH_LOW_5 0x83c
+
+#define A_XGM_RX_EXACT_MATCH_LOW_6 0x844
+
+#define A_XGM_RX_EXACT_MATCH_LOW_7 0x84c
+
+#define A_XGM_RX_EXACT_MATCH_LOW_8 0x854
+
+#define A_XGM_INT_STATUS 0x86c
+
+#define S_LINKFAULTCHANGE 9
+#define V_LINKFAULTCHANGE(x) ((x) << S_LINKFAULTCHANGE)
+#define F_LINKFAULTCHANGE V_LINKFAULTCHANGE(1U)
+
+#define A_XGM_XGM_INT_ENABLE 0x874
+#define A_XGM_XGM_INT_DISABLE 0x878
+
+#define A_XGM_STAT_CTRL 0x880
+
+#define S_CLRSTATS 2
+#define V_CLRSTATS(x) ((x) << S_CLRSTATS)
+#define F_CLRSTATS V_CLRSTATS(1U)
+
+#define A_XGM_RXFIFO_CFG 0x884
+
+#define S_RXFIFO_EMPTY 31
+#define V_RXFIFO_EMPTY(x) ((x) << S_RXFIFO_EMPTY)
+#define F_RXFIFO_EMPTY V_RXFIFO_EMPTY(1U)
+
+#define S_RXFIFOPAUSEHWM 17
+#define M_RXFIFOPAUSEHWM 0xfff
+
+#define V_RXFIFOPAUSEHWM(x) ((x) << S_RXFIFOPAUSEHWM)
+
+#define G_RXFIFOPAUSEHWM(x) (((x) >> S_RXFIFOPAUSEHWM) & M_RXFIFOPAUSEHWM)
+
+#define S_RXFIFOPAUSELWM 5
+#define M_RXFIFOPAUSELWM 0xfff
+
+#define V_RXFIFOPAUSELWM(x) ((x) << S_RXFIFOPAUSELWM)
+
+#define G_RXFIFOPAUSELWM(x) (((x) >> S_RXFIFOPAUSELWM) & M_RXFIFOPAUSELWM)
+
+#define S_RXSTRFRWRD 1
+#define V_RXSTRFRWRD(x) ((x) << S_RXSTRFRWRD)
+#define F_RXSTRFRWRD V_RXSTRFRWRD(1U)
+
+#define S_DISERRFRAMES 0
+#define V_DISERRFRAMES(x) ((x) << S_DISERRFRAMES)
+#define F_DISERRFRAMES V_DISERRFRAMES(1U)
+
+#define A_XGM_TXFIFO_CFG 0x888
+
+#define S_UNDERUNFIX 22
+#define V_UNDERUNFIX(x) ((x) << S_UNDERUNFIX)
+#define F_UNDERUNFIX V_UNDERUNFIX(1U)
+
+#define S_TXIPG 13
+#define M_TXIPG 0xff
+#define V_TXIPG(x) ((x) << S_TXIPG)
+#define G_TXIPG(x) (((x) >> S_TXIPG) & M_TXIPG)
+
+#define S_TXFIFOTHRESH 4
+#define M_TXFIFOTHRESH 0x1ff
+
+#define V_TXFIFOTHRESH(x) ((x) << S_TXFIFOTHRESH)
+
+#define S_ENDROPPKT 21
+#define V_ENDROPPKT(x) ((x) << S_ENDROPPKT)
+#define F_ENDROPPKT V_ENDROPPKT(1U)
+
+#define A_XGM_SERDES_CTRL 0x890
+#define A_XGM_SERDES_CTRL0 0x8e0
+
+#define S_SERDESRESET_ 24
+#define V_SERDESRESET_(x) ((x) << S_SERDESRESET_)
+#define F_SERDESRESET_ V_SERDESRESET_(1U)
+
+#define S_RXENABLE 4
+#define V_RXENABLE(x) ((x) << S_RXENABLE)
+#define F_RXENABLE V_RXENABLE(1U)
+
+#define S_TXENABLE 3
+#define V_TXENABLE(x) ((x) << S_TXENABLE)
+#define F_TXENABLE V_TXENABLE(1U)
+
+#define A_XGM_PAUSE_TIMER 0x890
+
+#define A_XGM_RGMII_IMP 0x89c
+
+#define S_XGM_IMPSETUPDATE 6
+#define V_XGM_IMPSETUPDATE(x) ((x) << S_XGM_IMPSETUPDATE)
+#define F_XGM_IMPSETUPDATE V_XGM_IMPSETUPDATE(1U)
+
+#define S_RGMIIIMPPD 3
+#define M_RGMIIIMPPD 0x7
+#define V_RGMIIIMPPD(x) ((x) << S_RGMIIIMPPD)
+
+#define S_RGMIIIMPPU 0
+#define M_RGMIIIMPPU 0x7
+#define V_RGMIIIMPPU(x) ((x) << S_RGMIIIMPPU)
+
+#define S_CALRESET 8
+#define V_CALRESET(x) ((x) << S_CALRESET)
+#define F_CALRESET V_CALRESET(1U)
+
+#define S_CALUPDATE 7
+#define V_CALUPDATE(x) ((x) << S_CALUPDATE)
+#define F_CALUPDATE V_CALUPDATE(1U)
+
+#define A_XGM_XAUI_IMP 0x8a0
+
+#define S_CALBUSY 31
+#define V_CALBUSY(x) ((x) << S_CALBUSY)
+#define F_CALBUSY V_CALBUSY(1U)
+
+#define S_XGM_CALFAULT 29
+#define V_XGM_CALFAULT(x) ((x) << S_XGM_CALFAULT)
+#define F_XGM_CALFAULT V_XGM_CALFAULT(1U)
+
+#define S_CALIMP 24
+#define M_CALIMP 0x1f
+#define V_CALIMP(x) ((x) << S_CALIMP)
+#define G_CALIMP(x) (((x) >> S_CALIMP) & M_CALIMP)
+
+#define S_XAUIIMP 0
+#define M_XAUIIMP 0x7
+#define V_XAUIIMP(x) ((x) << S_XAUIIMP)
+
+#define A_XGM_RX_MAX_PKT_SIZE 0x8a8
+
+#define S_RXMAXFRAMERSIZE 17
+#define M_RXMAXFRAMERSIZE 0x3fff
+#define V_RXMAXFRAMERSIZE(x) ((x) << S_RXMAXFRAMERSIZE)
+#define G_RXMAXFRAMERSIZE(x) (((x) >> S_RXMAXFRAMERSIZE) & M_RXMAXFRAMERSIZE)
+
+#define S_RXENFRAMER 14
+#define V_RXENFRAMER(x) ((x) << S_RXENFRAMER)
+#define F_RXENFRAMER V_RXENFRAMER(1U)
+
+#define S_RXMAXPKTSIZE 0
+#define M_RXMAXPKTSIZE 0x3fff
+#define V_RXMAXPKTSIZE(x) ((x) << S_RXMAXPKTSIZE)
+#define G_RXMAXPKTSIZE(x) (((x) >> S_RXMAXPKTSIZE) & M_RXMAXPKTSIZE)
+
+#define A_XGM_RESET_CTRL 0x8ac
+
+#define S_XGMAC_STOP_EN 4
+#define V_XGMAC_STOP_EN(x) ((x) << S_XGMAC_STOP_EN)
+#define F_XGMAC_STOP_EN V_XGMAC_STOP_EN(1U)
+
+#define S_XG2G_RESET_ 3
+#define V_XG2G_RESET_(x) ((x) << S_XG2G_RESET_)
+#define F_XG2G_RESET_ V_XG2G_RESET_(1U)
+
+#define S_RGMII_RESET_ 2
+#define V_RGMII_RESET_(x) ((x) << S_RGMII_RESET_)
+#define F_RGMII_RESET_ V_RGMII_RESET_(1U)
+
+#define S_PCS_RESET_ 1
+#define V_PCS_RESET_(x) ((x) << S_PCS_RESET_)
+#define F_PCS_RESET_ V_PCS_RESET_(1U)
+
+#define S_MAC_RESET_ 0
+#define V_MAC_RESET_(x) ((x) << S_MAC_RESET_)
+#define F_MAC_RESET_ V_MAC_RESET_(1U)
+
+#define A_XGM_PORT_CFG 0x8b8
+
+#define S_CLKDIVRESET_ 3
+#define V_CLKDIVRESET_(x) ((x) << S_CLKDIVRESET_)
+#define F_CLKDIVRESET_ V_CLKDIVRESET_(1U)
+
+#define S_PORTSPEED 1
+#define M_PORTSPEED 0x3
+
+#define V_PORTSPEED(x) ((x) << S_PORTSPEED)
+
+#define S_ENRGMII 0
+#define V_ENRGMII(x) ((x) << S_ENRGMII)
+#define F_ENRGMII V_ENRGMII(1U)
+
+#define A_XGM_INT_ENABLE 0x8d4
+
+#define S_TXFIFO_PRTY_ERR 17
+#define M_TXFIFO_PRTY_ERR 0x7
+
+#define V_TXFIFO_PRTY_ERR(x) ((x) << S_TXFIFO_PRTY_ERR)
+
+#define S_RXFIFO_PRTY_ERR 14
+#define M_RXFIFO_PRTY_ERR 0x7
+
+#define V_RXFIFO_PRTY_ERR(x) ((x) << S_RXFIFO_PRTY_ERR)
+
+#define S_TXFIFO_UNDERRUN 13
+#define V_TXFIFO_UNDERRUN(x) ((x) << S_TXFIFO_UNDERRUN)
+#define F_TXFIFO_UNDERRUN V_TXFIFO_UNDERRUN(1U)
+
+#define S_RXFIFO_OVERFLOW 12
+#define V_RXFIFO_OVERFLOW(x) ((x) << S_RXFIFO_OVERFLOW)
+#define F_RXFIFO_OVERFLOW V_RXFIFO_OVERFLOW(1U)
+
+#define S_SERDES_LOS 4
+#define M_SERDES_LOS 0xf
+
+#define V_SERDES_LOS(x) ((x) << S_SERDES_LOS)
+
+#define S_XAUIPCSCTCERR 3
+#define V_XAUIPCSCTCERR(x) ((x) << S_XAUIPCSCTCERR)
+#define F_XAUIPCSCTCERR V_XAUIPCSCTCERR(1U)
+
+#define S_XAUIPCSALIGNCHANGE 2
+#define V_XAUIPCSALIGNCHANGE(x) ((x) << S_XAUIPCSALIGNCHANGE)
+#define F_XAUIPCSALIGNCHANGE V_XAUIPCSALIGNCHANGE(1U)
+
+#define S_XGM_INT 0
+#define V_XGM_INT(x) ((x) << S_XGM_INT)
+#define F_XGM_INT V_XGM_INT(1U)
+
+#define A_XGM_INT_CAUSE 0x8d8
+
+#define A_XGM_XAUI_ACT_CTRL 0x8dc
+
+#define S_TXACTENABLE 1
+#define V_TXACTENABLE(x) ((x) << S_TXACTENABLE)
+#define F_TXACTENABLE V_TXACTENABLE(1U)
+
+#define S_RESET3 23
+#define V_RESET3(x) ((x) << S_RESET3)
+#define F_RESET3 V_RESET3(1U)
+
+#define S_RESET2 22
+#define V_RESET2(x) ((x) << S_RESET2)
+#define F_RESET2 V_RESET2(1U)
+
+#define S_RESET1 21
+#define V_RESET1(x) ((x) << S_RESET1)
+#define F_RESET1 V_RESET1(1U)
+
+#define S_RESET0 20
+#define V_RESET0(x) ((x) << S_RESET0)
+#define F_RESET0 V_RESET0(1U)
+
+#define S_PWRDN3 19
+#define V_PWRDN3(x) ((x) << S_PWRDN3)
+#define F_PWRDN3 V_PWRDN3(1U)
+
+#define S_PWRDN2 18
+#define V_PWRDN2(x) ((x) << S_PWRDN2)
+#define F_PWRDN2 V_PWRDN2(1U)
+
+#define S_PWRDN1 17
+#define V_PWRDN1(x) ((x) << S_PWRDN1)
+#define F_PWRDN1 V_PWRDN1(1U)
+
+#define S_PWRDN0 16
+#define V_PWRDN0(x) ((x) << S_PWRDN0)
+#define F_PWRDN0 V_PWRDN0(1U)
+
+#define S_RESETPLL23 15
+#define V_RESETPLL23(x) ((x) << S_RESETPLL23)
+#define F_RESETPLL23 V_RESETPLL23(1U)
+
+#define S_RESETPLL01 14
+#define V_RESETPLL01(x) ((x) << S_RESETPLL01)
+#define F_RESETPLL01 V_RESETPLL01(1U)
+
+#define A_XGM_SERDES_STAT0 0x8f0
+#define A_XGM_SERDES_STAT1 0x8f4
+#define A_XGM_SERDES_STAT2 0x8f8
+
+#define S_LOWSIG0 0
+#define V_LOWSIG0(x) ((x) << S_LOWSIG0)
+#define F_LOWSIG0 V_LOWSIG0(1U)
+
+#define A_XGM_SERDES_STAT3 0x8fc
+
+#define A_XGM_STAT_TX_BYTE_LOW 0x900
+
+#define A_XGM_STAT_TX_BYTE_HIGH 0x904
+
+#define A_XGM_STAT_TX_FRAME_LOW 0x908
+
+#define A_XGM_STAT_TX_FRAME_HIGH 0x90c
+
+#define A_XGM_STAT_TX_BCAST 0x910
+
+#define A_XGM_STAT_TX_MCAST 0x914
+
+#define A_XGM_STAT_TX_PAUSE 0x918
+
+#define A_XGM_STAT_TX_64B_FRAMES 0x91c
+
+#define A_XGM_STAT_TX_65_127B_FRAMES 0x920
+
+#define A_XGM_STAT_TX_128_255B_FRAMES 0x924
+
+#define A_XGM_STAT_TX_256_511B_FRAMES 0x928
+
+#define A_XGM_STAT_TX_512_1023B_FRAMES 0x92c
+
+#define A_XGM_STAT_TX_1024_1518B_FRAMES 0x930
+
+#define A_XGM_STAT_TX_1519_MAXB_FRAMES 0x934
+
+#define A_XGM_STAT_TX_ERR_FRAMES 0x938
+
+#define A_XGM_STAT_RX_BYTES_LOW 0x93c
+
+#define A_XGM_STAT_RX_BYTES_HIGH 0x940
+
+#define A_XGM_STAT_RX_FRAMES_LOW 0x944
+
+#define A_XGM_STAT_RX_FRAMES_HIGH 0x948
+
+#define A_XGM_STAT_RX_BCAST_FRAMES 0x94c
+
+#define A_XGM_STAT_RX_MCAST_FRAMES 0x950
+
+#define A_XGM_STAT_RX_PAUSE_FRAMES 0x954
+
+#define A_XGM_STAT_RX_64B_FRAMES 0x958
+
+#define A_XGM_STAT_RX_65_127B_FRAMES 0x95c
+
+#define A_XGM_STAT_RX_128_255B_FRAMES 0x960
+
+#define A_XGM_STAT_RX_256_511B_FRAMES 0x964
+
+#define A_XGM_STAT_RX_512_1023B_FRAMES 0x968
+
+#define A_XGM_STAT_RX_1024_1518B_FRAMES 0x96c
+
+#define A_XGM_STAT_RX_1519_MAXB_FRAMES 0x970
+
+#define A_XGM_STAT_RX_SHORT_FRAMES 0x974
+
+#define A_XGM_STAT_RX_OVERSIZE_FRAMES 0x978
+
+#define A_XGM_STAT_RX_JABBER_FRAMES 0x97c
+
+#define A_XGM_STAT_RX_CRC_ERR_FRAMES 0x980
+
+#define A_XGM_STAT_RX_LENGTH_ERR_FRAMES 0x984
+
+#define A_XGM_STAT_RX_SYM_CODE_ERR_FRAMES 0x988
+
+#define A_XGM_SERDES_STATUS0 0x98c
+
+#define A_XGM_SERDES_STATUS1 0x990
+
+#define S_CMULOCK 31
+#define V_CMULOCK(x) ((x) << S_CMULOCK)
+#define F_CMULOCK V_CMULOCK(1U)
+
+#define A_XGM_RX_MAX_PKT_SIZE_ERR_CNT 0x9a4
+
+#define A_XGM_TX_SPI4_SOP_EOP_CNT 0x9a8
+
+#define S_TXSPI4SOPCNT 16
+#define M_TXSPI4SOPCNT 0xffff
+#define V_TXSPI4SOPCNT(x) ((x) << S_TXSPI4SOPCNT)
+#define G_TXSPI4SOPCNT(x) (((x) >> S_TXSPI4SOPCNT) & M_TXSPI4SOPCNT)
+
+#define A_XGM_RX_SPI4_SOP_EOP_CNT 0x9ac
+
+#define XGMAC0_1_BASE_ADDR 0xa00
diff --git a/drivers/net/ethernet/chelsio/cxgb3/sge.c b/drivers/net/ethernet/chelsio/cxgb3/sge.c
new file mode 100644
index 0000000000..2e9a74fe09
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb3/sge.c
@@ -0,0 +1,3383 @@
+/*
+ * Copyright (c) 2005-2008 Chelsio, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/if_vlan.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <linux/dma-mapping.h>
+#include <linux/slab.h>
+#include <linux/prefetch.h>
+#include <net/arp.h>
+#include "common.h"
+#include "regs.h"
+#include "sge_defs.h"
+#include "t3_cpl.h"
+#include "firmware_exports.h"
+#include "cxgb3_offload.h"
+
+#define USE_GTS 0
+
+#define SGE_RX_SM_BUF_SIZE 1536
+
+#define SGE_RX_COPY_THRES 256
+#define SGE_RX_PULL_LEN 128
+
+#define SGE_PG_RSVD SMP_CACHE_BYTES
+/*
+ * Page chunk size for FL0 buffers if FL0 is to be populated with page chunks.
+ * It must be a divisor of PAGE_SIZE. If set to 0 FL0 will use sk_buffs
+ * directly.
+ */
+#define FL0_PG_CHUNK_SIZE 2048
+#define FL0_PG_ORDER 0
+#define FL0_PG_ALLOC_SIZE (PAGE_SIZE << FL0_PG_ORDER)
+#define FL1_PG_CHUNK_SIZE (PAGE_SIZE > 8192 ? 16384 : 8192)
+#define FL1_PG_ORDER (PAGE_SIZE > 8192 ? 0 : 1)
+#define FL1_PG_ALLOC_SIZE (PAGE_SIZE << FL1_PG_ORDER)
+
+#define SGE_RX_DROP_THRES 16
+#define RX_RECLAIM_PERIOD (HZ/4)
+
+/*
+ * Max number of Rx buffers we replenish at a time.
+ */
+#define MAX_RX_REFILL 16U
+/*
+ * Period of the Tx buffer reclaim timer. This timer does not need to run
+ * frequently as Tx buffers are usually reclaimed by new Tx packets.
+ */
+#define TX_RECLAIM_PERIOD (HZ / 4)
+#define TX_RECLAIM_TIMER_CHUNK 64U
+#define TX_RECLAIM_CHUNK 16U
+
+/* WR size in bytes */
+#define WR_LEN (WR_FLITS * 8)
+
+/*
+ * Types of Tx queues in each queue set. Order here matters, do not change.
+ */
+enum { TXQ_ETH, TXQ_OFLD, TXQ_CTRL };
+
+/* Values for sge_txq.flags */
+enum {
+ TXQ_RUNNING = 1 << 0, /* fetch engine is running */
+ TXQ_LAST_PKT_DB = 1 << 1, /* last packet rang the doorbell */
+};
+
+struct tx_desc {
+ __be64 flit[TX_DESC_FLITS];
+};
+
+struct rx_desc {
+ __be32 addr_lo;
+ __be32 len_gen;
+ __be32 gen2;
+ __be32 addr_hi;
+};
+
+struct tx_sw_desc { /* SW state per Tx descriptor */
+ struct sk_buff *skb;
+ u8 eop; /* set if last descriptor for packet */
+ u8 addr_idx; /* buffer index of first SGL entry in descriptor */
+ u8 fragidx; /* first page fragment associated with descriptor */
+ s8 sflit; /* start flit of first SGL entry in descriptor */
+};
+
+struct rx_sw_desc { /* SW state per Rx descriptor */
+ union {
+ struct sk_buff *skb;
+ struct fl_pg_chunk pg_chunk;
+ };
+ DEFINE_DMA_UNMAP_ADDR(dma_addr);
+};
+
+struct rsp_desc { /* response queue descriptor */
+ struct rss_header rss_hdr;
+ __be32 flags;
+ __be32 len_cq;
+ struct_group(immediate,
+ u8 imm_data[47];
+ u8 intr_gen;
+ );
+};
+
+/*
+ * Holds unmapping information for Tx packets that need deferred unmapping.
+ * This structure lives at skb->head and must be allocated by callers.
+ */
+struct deferred_unmap_info {
+ struct pci_dev *pdev;
+ dma_addr_t addr[MAX_SKB_FRAGS + 1];
+};
+
+/*
+ * Maps a number of flits to the number of Tx descriptors that can hold them.
+ * The formula is
+ *
+ * desc = 1 + (flits - 2) / (WR_FLITS - 1).
+ *
+ * HW allows up to 4 descriptors to be combined into a WR.
+ */
+static u8 flit_desc_map[] = {
+ 0,
+#if SGE_NUM_GENBITS == 1
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+ 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4
+#elif SGE_NUM_GENBITS == 2
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+ 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
+#else
+# error "SGE_NUM_GENBITS must be 1 or 2"
+#endif
+};
+
+static inline struct sge_qset *rspq_to_qset(const struct sge_rspq *q)
+{
+ return container_of(q, struct sge_qset, rspq);
+}
+
+static inline struct sge_qset *txq_to_qset(const struct sge_txq *q, int qidx)
+{
+ return container_of(q, struct sge_qset, txq[qidx]);
+}
+
+/**
+ * refill_rspq - replenish an SGE response queue
+ * @adapter: the adapter
+ * @q: the response queue to replenish
+ * @credits: how many new responses to make available
+ *
+ * Replenishes a response queue by making the supplied number of responses
+ * available to HW.
+ */
+static inline void refill_rspq(struct adapter *adapter,
+ const struct sge_rspq *q, unsigned int credits)
+{
+ rmb();
+ t3_write_reg(adapter, A_SG_RSPQ_CREDIT_RETURN,
+ V_RSPQ(q->cntxt_id) | V_CREDITS(credits));
+}
+
+/**
+ * need_skb_unmap - does the platform need unmapping of sk_buffs?
+ *
+ * Returns true if the platform needs sk_buff unmapping. The compiler
+ * optimizes away unnecessary code if this returns true.
+ */
+static inline int need_skb_unmap(void)
+{
+#ifdef CONFIG_NEED_DMA_MAP_STATE
+ return 1;
+#else
+ return 0;
+#endif
+}
+
+/**
+ * unmap_skb - unmap a packet main body and its page fragments
+ * @skb: the packet
+ * @q: the Tx queue containing Tx descriptors for the packet
+ * @cidx: index of Tx descriptor
+ * @pdev: the PCI device
+ *
+ * Unmap the main body of an sk_buff and its page fragments, if any.
+ * Because of the fairly complicated structure of our SGLs and the desire
+ * to conserve space for metadata, the information necessary to unmap an
+ * sk_buff is spread across the sk_buff itself (buffer lengths), the HW Tx
+ * descriptors (the physical addresses of the various data buffers), and
+ * the SW descriptor state (assorted indices). The send functions
+ * initialize the indices for the first packet descriptor so we can unmap
+ * the buffers held in the first Tx descriptor here, and we have enough
+ * information at this point to set the state for the next Tx descriptor.
+ *
+ * Note that it is possible to clean up the first descriptor of a packet
+ * before the send routines have written the next descriptors, but this
+ * race does not cause any problem. We just end up writing the unmapping
+ * info for the descriptor first.
+ */
+static inline void unmap_skb(struct sk_buff *skb, struct sge_txq *q,
+ unsigned int cidx, struct pci_dev *pdev)
+{
+ const struct sg_ent *sgp;
+ struct tx_sw_desc *d = &q->sdesc[cidx];
+ int nfrags, frag_idx, curflit, j = d->addr_idx;
+
+ sgp = (struct sg_ent *)&q->desc[cidx].flit[d->sflit];
+ frag_idx = d->fragidx;
+
+ if (frag_idx == 0 && skb_headlen(skb)) {
+ dma_unmap_single(&pdev->dev, be64_to_cpu(sgp->addr[0]),
+ skb_headlen(skb), DMA_TO_DEVICE);
+ j = 1;
+ }
+
+ curflit = d->sflit + 1 + j;
+ nfrags = skb_shinfo(skb)->nr_frags;
+
+ while (frag_idx < nfrags && curflit < WR_FLITS) {
+ dma_unmap_page(&pdev->dev, be64_to_cpu(sgp->addr[j]),
+ skb_frag_size(&skb_shinfo(skb)->frags[frag_idx]),
+ DMA_TO_DEVICE);
+ j ^= 1;
+ if (j == 0) {
+ sgp++;
+ curflit++;
+ }
+ curflit++;
+ frag_idx++;
+ }
+
+ if (frag_idx < nfrags) { /* SGL continues into next Tx descriptor */
+ d = cidx + 1 == q->size ? q->sdesc : d + 1;
+ d->fragidx = frag_idx;
+ d->addr_idx = j;
+ d->sflit = curflit - WR_FLITS - j; /* sflit can be -1 */
+ }
+}
+
+/**
+ * free_tx_desc - reclaims Tx descriptors and their buffers
+ * @adapter: the adapter
+ * @q: the Tx queue to reclaim descriptors from
+ * @n: the number of descriptors to reclaim
+ *
+ * Reclaims Tx descriptors from an SGE Tx queue and frees the associated
+ * Tx buffers. Called with the Tx queue lock held.
+ */
+static void free_tx_desc(struct adapter *adapter, struct sge_txq *q,
+ unsigned int n)
+{
+ struct tx_sw_desc *d;
+ struct pci_dev *pdev = adapter->pdev;
+ unsigned int cidx = q->cidx;
+
+ const int need_unmap = need_skb_unmap() &&
+ q->cntxt_id >= FW_TUNNEL_SGEEC_START;
+
+ d = &q->sdesc[cidx];
+ while (n--) {
+ if (d->skb) { /* an SGL is present */
+ if (need_unmap)
+ unmap_skb(d->skb, q, cidx, pdev);
+ if (d->eop) {
+ dev_consume_skb_any(d->skb);
+ d->skb = NULL;
+ }
+ }
+ ++d;
+ if (++cidx == q->size) {
+ cidx = 0;
+ d = q->sdesc;
+ }
+ }
+ q->cidx = cidx;
+}
+
+/**
+ * reclaim_completed_tx - reclaims completed Tx descriptors
+ * @adapter: the adapter
+ * @q: the Tx queue to reclaim completed descriptors from
+ * @chunk: maximum number of descriptors to reclaim
+ *
+ * Reclaims Tx descriptors that the SGE has indicated it has processed,
+ * and frees the associated buffers if possible. Called with the Tx
+ * queue's lock held.
+ */
+static inline unsigned int reclaim_completed_tx(struct adapter *adapter,
+ struct sge_txq *q,
+ unsigned int chunk)
+{
+ unsigned int reclaim = q->processed - q->cleaned;
+
+ reclaim = min(chunk, reclaim);
+ if (reclaim) {
+ free_tx_desc(adapter, q, reclaim);
+ q->cleaned += reclaim;
+ q->in_use -= reclaim;
+ }
+ return q->processed - q->cleaned;
+}
+
+/**
+ * should_restart_tx - are there enough resources to restart a Tx queue?
+ * @q: the Tx queue
+ *
+ * Checks if there are enough descriptors to restart a suspended Tx queue.
+ */
+static inline int should_restart_tx(const struct sge_txq *q)
+{
+ unsigned int r = q->processed - q->cleaned;
+
+ return q->in_use - r < (q->size >> 1);
+}
+
+static void clear_rx_desc(struct pci_dev *pdev, const struct sge_fl *q,
+ struct rx_sw_desc *d)
+{
+ if (q->use_pages && d->pg_chunk.page) {
+ (*d->pg_chunk.p_cnt)--;
+ if (!*d->pg_chunk.p_cnt)
+ dma_unmap_page(&pdev->dev, d->pg_chunk.mapping,
+ q->alloc_size, DMA_FROM_DEVICE);
+
+ put_page(d->pg_chunk.page);
+ d->pg_chunk.page = NULL;
+ } else {
+ dma_unmap_single(&pdev->dev, dma_unmap_addr(d, dma_addr),
+ q->buf_size, DMA_FROM_DEVICE);
+ kfree_skb(d->skb);
+ d->skb = NULL;
+ }
+}
+
+/**
+ * free_rx_bufs - free the Rx buffers on an SGE free list
+ * @pdev: the PCI device associated with the adapter
+ * @q: the SGE free list to clean up
+ *
+ * Release the buffers on an SGE free-buffer Rx queue. HW fetching from
+ * this queue should be stopped before calling this function.
+ */
+static void free_rx_bufs(struct pci_dev *pdev, struct sge_fl *q)
+{
+ unsigned int cidx = q->cidx;
+
+ while (q->credits--) {
+ struct rx_sw_desc *d = &q->sdesc[cidx];
+
+
+ clear_rx_desc(pdev, q, d);
+ if (++cidx == q->size)
+ cidx = 0;
+ }
+
+ if (q->pg_chunk.page) {
+ __free_pages(q->pg_chunk.page, q->order);
+ q->pg_chunk.page = NULL;
+ }
+}
+
+/**
+ * add_one_rx_buf - add a packet buffer to a free-buffer list
+ * @va: buffer start VA
+ * @len: the buffer length
+ * @d: the HW Rx descriptor to write
+ * @sd: the SW Rx descriptor to write
+ * @gen: the generation bit value
+ * @pdev: the PCI device associated with the adapter
+ *
+ * Add a buffer of the given length to the supplied HW and SW Rx
+ * descriptors.
+ */
+static inline int add_one_rx_buf(void *va, unsigned int len,
+ struct rx_desc *d, struct rx_sw_desc *sd,
+ unsigned int gen, struct pci_dev *pdev)
+{
+ dma_addr_t mapping;
+
+ mapping = dma_map_single(&pdev->dev, va, len, DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(&pdev->dev, mapping)))
+ return -ENOMEM;
+
+ dma_unmap_addr_set(sd, dma_addr, mapping);
+
+ d->addr_lo = cpu_to_be32(mapping);
+ d->addr_hi = cpu_to_be32((u64) mapping >> 32);
+ dma_wmb();
+ d->len_gen = cpu_to_be32(V_FLD_GEN1(gen));
+ d->gen2 = cpu_to_be32(V_FLD_GEN2(gen));
+ return 0;
+}
+
+static inline int add_one_rx_chunk(dma_addr_t mapping, struct rx_desc *d,
+ unsigned int gen)
+{
+ d->addr_lo = cpu_to_be32(mapping);
+ d->addr_hi = cpu_to_be32((u64) mapping >> 32);
+ dma_wmb();
+ d->len_gen = cpu_to_be32(V_FLD_GEN1(gen));
+ d->gen2 = cpu_to_be32(V_FLD_GEN2(gen));
+ return 0;
+}
+
+static int alloc_pg_chunk(struct adapter *adapter, struct sge_fl *q,
+ struct rx_sw_desc *sd, gfp_t gfp,
+ unsigned int order)
+{
+ if (!q->pg_chunk.page) {
+ dma_addr_t mapping;
+
+ q->pg_chunk.page = alloc_pages(gfp, order);
+ if (unlikely(!q->pg_chunk.page))
+ return -ENOMEM;
+ q->pg_chunk.va = page_address(q->pg_chunk.page);
+ q->pg_chunk.p_cnt = q->pg_chunk.va + (PAGE_SIZE << order) -
+ SGE_PG_RSVD;
+ q->pg_chunk.offset = 0;
+ mapping = dma_map_page(&adapter->pdev->dev, q->pg_chunk.page,
+ 0, q->alloc_size, DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(&adapter->pdev->dev, mapping))) {
+ __free_pages(q->pg_chunk.page, order);
+ q->pg_chunk.page = NULL;
+ return -EIO;
+ }
+ q->pg_chunk.mapping = mapping;
+ }
+ sd->pg_chunk = q->pg_chunk;
+
+ prefetch(sd->pg_chunk.p_cnt);
+
+ q->pg_chunk.offset += q->buf_size;
+ if (q->pg_chunk.offset == (PAGE_SIZE << order))
+ q->pg_chunk.page = NULL;
+ else {
+ q->pg_chunk.va += q->buf_size;
+ get_page(q->pg_chunk.page);
+ }
+
+ if (sd->pg_chunk.offset == 0)
+ *sd->pg_chunk.p_cnt = 1;
+ else
+ *sd->pg_chunk.p_cnt += 1;
+
+ return 0;
+}
+
+static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q)
+{
+ if (q->pend_cred >= q->credits / 4) {
+ q->pend_cred = 0;
+ wmb();
+ t3_write_reg(adap, A_SG_KDOORBELL, V_EGRCNTX(q->cntxt_id));
+ }
+}
+
+/**
+ * refill_fl - refill an SGE free-buffer list
+ * @adap: the adapter
+ * @q: the free-list to refill
+ * @n: the number of new buffers to allocate
+ * @gfp: the gfp flags for allocating new buffers
+ *
+ * (Re)populate an SGE free-buffer list with up to @n new packet buffers,
+ * allocated with the supplied gfp flags. The caller must assure that
+ * @n does not exceed the queue's capacity.
+ */
+static int refill_fl(struct adapter *adap, struct sge_fl *q, int n, gfp_t gfp)
+{
+ struct rx_sw_desc *sd = &q->sdesc[q->pidx];
+ struct rx_desc *d = &q->desc[q->pidx];
+ unsigned int count = 0;
+
+ while (n--) {
+ dma_addr_t mapping;
+ int err;
+
+ if (q->use_pages) {
+ if (unlikely(alloc_pg_chunk(adap, q, sd, gfp,
+ q->order))) {
+nomem: q->alloc_failed++;
+ break;
+ }
+ mapping = sd->pg_chunk.mapping + sd->pg_chunk.offset;
+ dma_unmap_addr_set(sd, dma_addr, mapping);
+
+ add_one_rx_chunk(mapping, d, q->gen);
+ dma_sync_single_for_device(&adap->pdev->dev, mapping,
+ q->buf_size - SGE_PG_RSVD,
+ DMA_FROM_DEVICE);
+ } else {
+ void *buf_start;
+
+ struct sk_buff *skb = alloc_skb(q->buf_size, gfp);
+ if (!skb)
+ goto nomem;
+
+ sd->skb = skb;
+ buf_start = skb->data;
+ err = add_one_rx_buf(buf_start, q->buf_size, d, sd,
+ q->gen, adap->pdev);
+ if (unlikely(err)) {
+ clear_rx_desc(adap->pdev, q, sd);
+ break;
+ }
+ }
+
+ d++;
+ sd++;
+ if (++q->pidx == q->size) {
+ q->pidx = 0;
+ q->gen ^= 1;
+ sd = q->sdesc;
+ d = q->desc;
+ }
+ count++;
+ }
+
+ q->credits += count;
+ q->pend_cred += count;
+ ring_fl_db(adap, q);
+
+ return count;
+}
+
+static inline void __refill_fl(struct adapter *adap, struct sge_fl *fl)
+{
+ refill_fl(adap, fl, min(MAX_RX_REFILL, fl->size - fl->credits),
+ GFP_ATOMIC | __GFP_COMP);
+}
+
+/**
+ * recycle_rx_buf - recycle a receive buffer
+ * @adap: the adapter
+ * @q: the SGE free list
+ * @idx: index of buffer to recycle
+ *
+ * Recycles the specified buffer on the given free list by adding it at
+ * the next available slot on the list.
+ */
+static void recycle_rx_buf(struct adapter *adap, struct sge_fl *q,
+ unsigned int idx)
+{
+ struct rx_desc *from = &q->desc[idx];
+ struct rx_desc *to = &q->desc[q->pidx];
+
+ q->sdesc[q->pidx] = q->sdesc[idx];
+ to->addr_lo = from->addr_lo; /* already big endian */
+ to->addr_hi = from->addr_hi; /* likewise */
+ dma_wmb();
+ to->len_gen = cpu_to_be32(V_FLD_GEN1(q->gen));
+ to->gen2 = cpu_to_be32(V_FLD_GEN2(q->gen));
+
+ if (++q->pidx == q->size) {
+ q->pidx = 0;
+ q->gen ^= 1;
+ }
+
+ q->credits++;
+ q->pend_cred++;
+ ring_fl_db(adap, q);
+}
+
+/**
+ * alloc_ring - allocate resources for an SGE descriptor ring
+ * @pdev: the PCI device
+ * @nelem: the number of descriptors
+ * @elem_size: the size of each descriptor
+ * @sw_size: the size of the SW state associated with each ring element
+ * @phys: the physical address of the allocated ring
+ * @metadata: address of the array holding the SW state for the ring
+ *
+ * Allocates resources for an SGE descriptor ring, such as Tx queues,
+ * free buffer lists, or response queues. Each SGE ring requires
+ * space for its HW descriptors plus, optionally, space for the SW state
+ * associated with each HW entry (the metadata). The function returns
+ * three values: the virtual address for the HW ring (the return value
+ * of the function), the physical address of the HW ring, and the address
+ * of the SW ring.
+ */
+static void *alloc_ring(struct pci_dev *pdev, size_t nelem, size_t elem_size,
+ size_t sw_size, dma_addr_t * phys, void *metadata)
+{
+ size_t len = nelem * elem_size;
+ void *s = NULL;
+ void *p = dma_alloc_coherent(&pdev->dev, len, phys, GFP_KERNEL);
+
+ if (!p)
+ return NULL;
+ if (sw_size && metadata) {
+ s = kcalloc(nelem, sw_size, GFP_KERNEL);
+
+ if (!s) {
+ dma_free_coherent(&pdev->dev, len, p, *phys);
+ return NULL;
+ }
+ *(void **)metadata = s;
+ }
+ return p;
+}
+
+/**
+ * t3_reset_qset - reset a sge qset
+ * @q: the queue set
+ *
+ * Reset the qset structure.
+ * the NAPI structure is preserved in the event of
+ * the qset's reincarnation, for example during EEH recovery.
+ */
+static void t3_reset_qset(struct sge_qset *q)
+{
+ if (q->adap &&
+ !(q->adap->flags & NAPI_INIT)) {
+ memset(q, 0, sizeof(*q));
+ return;
+ }
+
+ q->adap = NULL;
+ memset(&q->rspq, 0, sizeof(q->rspq));
+ memset(q->fl, 0, sizeof(struct sge_fl) * SGE_RXQ_PER_SET);
+ memset(q->txq, 0, sizeof(struct sge_txq) * SGE_TXQ_PER_SET);
+ q->txq_stopped = 0;
+ q->tx_reclaim_timer.function = NULL; /* for t3_stop_sge_timers() */
+ q->rx_reclaim_timer.function = NULL;
+ q->nomem = 0;
+ napi_free_frags(&q->napi);
+}
+
+
+/**
+ * t3_free_qset - free the resources of an SGE queue set
+ * @adapter: the adapter owning the queue set
+ * @q: the queue set
+ *
+ * Release the HW and SW resources associated with an SGE queue set, such
+ * as HW contexts, packet buffers, and descriptor rings. Traffic to the
+ * queue set must be quiesced prior to calling this.
+ */
+static void t3_free_qset(struct adapter *adapter, struct sge_qset *q)
+{
+ int i;
+ struct pci_dev *pdev = adapter->pdev;
+
+ for (i = 0; i < SGE_RXQ_PER_SET; ++i)
+ if (q->fl[i].desc) {
+ spin_lock_irq(&adapter->sge.reg_lock);
+ t3_sge_disable_fl(adapter, q->fl[i].cntxt_id);
+ spin_unlock_irq(&adapter->sge.reg_lock);
+ free_rx_bufs(pdev, &q->fl[i]);
+ kfree(q->fl[i].sdesc);
+ dma_free_coherent(&pdev->dev,
+ q->fl[i].size *
+ sizeof(struct rx_desc), q->fl[i].desc,
+ q->fl[i].phys_addr);
+ }
+
+ for (i = 0; i < SGE_TXQ_PER_SET; ++i)
+ if (q->txq[i].desc) {
+ spin_lock_irq(&adapter->sge.reg_lock);
+ t3_sge_enable_ecntxt(adapter, q->txq[i].cntxt_id, 0);
+ spin_unlock_irq(&adapter->sge.reg_lock);
+ if (q->txq[i].sdesc) {
+ free_tx_desc(adapter, &q->txq[i],
+ q->txq[i].in_use);
+ kfree(q->txq[i].sdesc);
+ }
+ dma_free_coherent(&pdev->dev,
+ q->txq[i].size *
+ sizeof(struct tx_desc),
+ q->txq[i].desc, q->txq[i].phys_addr);
+ __skb_queue_purge(&q->txq[i].sendq);
+ }
+
+ if (q->rspq.desc) {
+ spin_lock_irq(&adapter->sge.reg_lock);
+ t3_sge_disable_rspcntxt(adapter, q->rspq.cntxt_id);
+ spin_unlock_irq(&adapter->sge.reg_lock);
+ dma_free_coherent(&pdev->dev,
+ q->rspq.size * sizeof(struct rsp_desc),
+ q->rspq.desc, q->rspq.phys_addr);
+ }
+
+ t3_reset_qset(q);
+}
+
+/**
+ * init_qset_cntxt - initialize an SGE queue set context info
+ * @qs: the queue set
+ * @id: the queue set id
+ *
+ * Initializes the TIDs and context ids for the queues of a queue set.
+ */
+static void init_qset_cntxt(struct sge_qset *qs, unsigned int id)
+{
+ qs->rspq.cntxt_id = id;
+ qs->fl[0].cntxt_id = 2 * id;
+ qs->fl[1].cntxt_id = 2 * id + 1;
+ qs->txq[TXQ_ETH].cntxt_id = FW_TUNNEL_SGEEC_START + id;
+ qs->txq[TXQ_ETH].token = FW_TUNNEL_TID_START + id;
+ qs->txq[TXQ_OFLD].cntxt_id = FW_OFLD_SGEEC_START + id;
+ qs->txq[TXQ_CTRL].cntxt_id = FW_CTRL_SGEEC_START + id;
+ qs->txq[TXQ_CTRL].token = FW_CTRL_TID_START + id;
+}
+
+/**
+ * sgl_len - calculates the size of an SGL of the given capacity
+ * @n: the number of SGL entries
+ *
+ * Calculates the number of flits needed for a scatter/gather list that
+ * can hold the given number of entries.
+ */
+static inline unsigned int sgl_len(unsigned int n)
+{
+ /* alternatively: 3 * (n / 2) + 2 * (n & 1) */
+ return (3 * n) / 2 + (n & 1);
+}
+
+/**
+ * flits_to_desc - returns the num of Tx descriptors for the given flits
+ * @n: the number of flits
+ *
+ * Calculates the number of Tx descriptors needed for the supplied number
+ * of flits.
+ */
+static inline unsigned int flits_to_desc(unsigned int n)
+{
+ BUG_ON(n >= ARRAY_SIZE(flit_desc_map));
+ return flit_desc_map[n];
+}
+
+/**
+ * get_packet - return the next ingress packet buffer from a free list
+ * @adap: the adapter that received the packet
+ * @fl: the SGE free list holding the packet
+ * @len: the packet length including any SGE padding
+ * @drop_thres: # of remaining buffers before we start dropping packets
+ *
+ * Get the next packet from a free list and complete setup of the
+ * sk_buff. If the packet is small we make a copy and recycle the
+ * original buffer, otherwise we use the original buffer itself. If a
+ * positive drop threshold is supplied packets are dropped and their
+ * buffers recycled if (a) the number of remaining buffers is under the
+ * threshold and the packet is too big to copy, or (b) the packet should
+ * be copied but there is no memory for the copy.
+ */
+static struct sk_buff *get_packet(struct adapter *adap, struct sge_fl *fl,
+ unsigned int len, unsigned int drop_thres)
+{
+ struct sk_buff *skb = NULL;
+ struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];
+
+ prefetch(sd->skb->data);
+ fl->credits--;
+
+ if (len <= SGE_RX_COPY_THRES) {
+ skb = alloc_skb(len, GFP_ATOMIC);
+ if (likely(skb != NULL)) {
+ __skb_put(skb, len);
+ dma_sync_single_for_cpu(&adap->pdev->dev,
+ dma_unmap_addr(sd, dma_addr),
+ len, DMA_FROM_DEVICE);
+ memcpy(skb->data, sd->skb->data, len);
+ dma_sync_single_for_device(&adap->pdev->dev,
+ dma_unmap_addr(sd, dma_addr),
+ len, DMA_FROM_DEVICE);
+ } else if (!drop_thres)
+ goto use_orig_buf;
+recycle:
+ recycle_rx_buf(adap, fl, fl->cidx);
+ return skb;
+ }
+
+ if (unlikely(fl->credits < drop_thres) &&
+ refill_fl(adap, fl, min(MAX_RX_REFILL, fl->size - fl->credits - 1),
+ GFP_ATOMIC | __GFP_COMP) == 0)
+ goto recycle;
+
+use_orig_buf:
+ dma_unmap_single(&adap->pdev->dev, dma_unmap_addr(sd, dma_addr),
+ fl->buf_size, DMA_FROM_DEVICE);
+ skb = sd->skb;
+ skb_put(skb, len);
+ __refill_fl(adap, fl);
+ return skb;
+}
+
+/**
+ * get_packet_pg - return the next ingress packet buffer from a free list
+ * @adap: the adapter that received the packet
+ * @fl: the SGE free list holding the packet
+ * @q: the queue
+ * @len: the packet length including any SGE padding
+ * @drop_thres: # of remaining buffers before we start dropping packets
+ *
+ * Get the next packet from a free list populated with page chunks.
+ * If the packet is small we make a copy and recycle the original buffer,
+ * otherwise we attach the original buffer as a page fragment to a fresh
+ * sk_buff. If a positive drop threshold is supplied packets are dropped
+ * and their buffers recycled if (a) the number of remaining buffers is
+ * under the threshold and the packet is too big to copy, or (b) there's
+ * no system memory.
+ *
+ * Note: this function is similar to @get_packet but deals with Rx buffers
+ * that are page chunks rather than sk_buffs.
+ */
+static struct sk_buff *get_packet_pg(struct adapter *adap, struct sge_fl *fl,
+ struct sge_rspq *q, unsigned int len,
+ unsigned int drop_thres)
+{
+ struct sk_buff *newskb, *skb;
+ struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];
+
+ dma_addr_t dma_addr = dma_unmap_addr(sd, dma_addr);
+
+ newskb = skb = q->pg_skb;
+ if (!skb && (len <= SGE_RX_COPY_THRES)) {
+ newskb = alloc_skb(len, GFP_ATOMIC);
+ if (likely(newskb != NULL)) {
+ __skb_put(newskb, len);
+ dma_sync_single_for_cpu(&adap->pdev->dev, dma_addr,
+ len, DMA_FROM_DEVICE);
+ memcpy(newskb->data, sd->pg_chunk.va, len);
+ dma_sync_single_for_device(&adap->pdev->dev, dma_addr,
+ len, DMA_FROM_DEVICE);
+ } else if (!drop_thres)
+ return NULL;
+recycle:
+ fl->credits--;
+ recycle_rx_buf(adap, fl, fl->cidx);
+ q->rx_recycle_buf++;
+ return newskb;
+ }
+
+ if (unlikely(q->rx_recycle_buf || (!skb && fl->credits <= drop_thres)))
+ goto recycle;
+
+ prefetch(sd->pg_chunk.p_cnt);
+
+ if (!skb)
+ newskb = alloc_skb(SGE_RX_PULL_LEN, GFP_ATOMIC);
+
+ if (unlikely(!newskb)) {
+ if (!drop_thres)
+ return NULL;
+ goto recycle;
+ }
+
+ dma_sync_single_for_cpu(&adap->pdev->dev, dma_addr, len,
+ DMA_FROM_DEVICE);
+ (*sd->pg_chunk.p_cnt)--;
+ if (!*sd->pg_chunk.p_cnt && sd->pg_chunk.page != fl->pg_chunk.page)
+ dma_unmap_page(&adap->pdev->dev, sd->pg_chunk.mapping,
+ fl->alloc_size, DMA_FROM_DEVICE);
+ if (!skb) {
+ __skb_put(newskb, SGE_RX_PULL_LEN);
+ memcpy(newskb->data, sd->pg_chunk.va, SGE_RX_PULL_LEN);
+ skb_fill_page_desc(newskb, 0, sd->pg_chunk.page,
+ sd->pg_chunk.offset + SGE_RX_PULL_LEN,
+ len - SGE_RX_PULL_LEN);
+ newskb->len = len;
+ newskb->data_len = len - SGE_RX_PULL_LEN;
+ newskb->truesize += newskb->data_len;
+ } else {
+ skb_fill_page_desc(newskb, skb_shinfo(newskb)->nr_frags,
+ sd->pg_chunk.page,
+ sd->pg_chunk.offset, len);
+ newskb->len += len;
+ newskb->data_len += len;
+ newskb->truesize += len;
+ }
+
+ fl->credits--;
+ /*
+ * We do not refill FLs here, we let the caller do it to overlap a
+ * prefetch.
+ */
+ return newskb;
+}
+
+/**
+ * get_imm_packet - return the next ingress packet buffer from a response
+ * @resp: the response descriptor containing the packet data
+ *
+ * Return a packet containing the immediate data of the given response.
+ */
+static inline struct sk_buff *get_imm_packet(const struct rsp_desc *resp)
+{
+ struct sk_buff *skb = alloc_skb(IMMED_PKT_SIZE, GFP_ATOMIC);
+
+ if (skb) {
+ __skb_put(skb, IMMED_PKT_SIZE);
+ BUILD_BUG_ON(IMMED_PKT_SIZE != sizeof(resp->immediate));
+ skb_copy_to_linear_data(skb, &resp->immediate, IMMED_PKT_SIZE);
+ }
+ return skb;
+}
+
+/**
+ * calc_tx_descs - calculate the number of Tx descriptors for a packet
+ * @skb: the packet
+ *
+ * Returns the number of Tx descriptors needed for the given Ethernet
+ * packet. Ethernet packets require addition of WR and CPL headers.
+ */
+static inline unsigned int calc_tx_descs(const struct sk_buff *skb)
+{
+ unsigned int flits;
+
+ if (skb->len <= WR_LEN - sizeof(struct cpl_tx_pkt))
+ return 1;
+
+ flits = sgl_len(skb_shinfo(skb)->nr_frags + 1) + 2;
+ if (skb_shinfo(skb)->gso_size)
+ flits++;
+ return flits_to_desc(flits);
+}
+
+/* map_skb - map a packet main body and its page fragments
+ * @pdev: the PCI device
+ * @skb: the packet
+ * @addr: placeholder to save the mapped addresses
+ *
+ * map the main body of an sk_buff and its page fragments, if any.
+ */
+static int map_skb(struct pci_dev *pdev, const struct sk_buff *skb,
+ dma_addr_t *addr)
+{
+ const skb_frag_t *fp, *end;
+ const struct skb_shared_info *si;
+
+ if (skb_headlen(skb)) {
+ *addr = dma_map_single(&pdev->dev, skb->data,
+ skb_headlen(skb), DMA_TO_DEVICE);
+ if (dma_mapping_error(&pdev->dev, *addr))
+ goto out_err;
+ addr++;
+ }
+
+ si = skb_shinfo(skb);
+ end = &si->frags[si->nr_frags];
+
+ for (fp = si->frags; fp < end; fp++) {
+ *addr = skb_frag_dma_map(&pdev->dev, fp, 0, skb_frag_size(fp),
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(&pdev->dev, *addr))
+ goto unwind;
+ addr++;
+ }
+ return 0;
+
+unwind:
+ while (fp-- > si->frags)
+ dma_unmap_page(&pdev->dev, *--addr, skb_frag_size(fp),
+ DMA_TO_DEVICE);
+
+ dma_unmap_single(&pdev->dev, addr[-1], skb_headlen(skb),
+ DMA_TO_DEVICE);
+out_err:
+ return -ENOMEM;
+}
+
+/**
+ * write_sgl - populate a scatter/gather list for a packet
+ * @skb: the packet
+ * @sgp: the SGL to populate
+ * @start: start address of skb main body data to include in the SGL
+ * @len: length of skb main body data to include in the SGL
+ * @addr: the list of the mapped addresses
+ *
+ * Copies the scatter/gather list for the buffers that make up a packet
+ * and returns the SGL size in 8-byte words. The caller must size the SGL
+ * appropriately.
+ */
+static inline unsigned int write_sgl(const struct sk_buff *skb,
+ struct sg_ent *sgp, unsigned char *start,
+ unsigned int len, const dma_addr_t *addr)
+{
+ unsigned int i, j = 0, k = 0, nfrags;
+
+ if (len) {
+ sgp->len[0] = cpu_to_be32(len);
+ sgp->addr[j++] = cpu_to_be64(addr[k++]);
+ }
+
+ nfrags = skb_shinfo(skb)->nr_frags;
+ for (i = 0; i < nfrags; i++) {
+ const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+
+ sgp->len[j] = cpu_to_be32(skb_frag_size(frag));
+ sgp->addr[j] = cpu_to_be64(addr[k++]);
+ j ^= 1;
+ if (j == 0)
+ ++sgp;
+ }
+ if (j)
+ sgp->len[j] = 0;
+ return ((nfrags + (len != 0)) * 3) / 2 + j;
+}
+
+/**
+ * check_ring_tx_db - check and potentially ring a Tx queue's doorbell
+ * @adap: the adapter
+ * @q: the Tx queue
+ *
+ * Ring the doorbel if a Tx queue is asleep. There is a natural race,
+ * where the HW is going to sleep just after we checked, however,
+ * then the interrupt handler will detect the outstanding TX packet
+ * and ring the doorbell for us.
+ *
+ * When GTS is disabled we unconditionally ring the doorbell.
+ */
+static inline void check_ring_tx_db(struct adapter *adap, struct sge_txq *q)
+{
+#if USE_GTS
+ clear_bit(TXQ_LAST_PKT_DB, &q->flags);
+ if (test_and_set_bit(TXQ_RUNNING, &q->flags) == 0) {
+ set_bit(TXQ_LAST_PKT_DB, &q->flags);
+ t3_write_reg(adap, A_SG_KDOORBELL,
+ F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
+ }
+#else
+ wmb(); /* write descriptors before telling HW */
+ t3_write_reg(adap, A_SG_KDOORBELL,
+ F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
+#endif
+}
+
+static inline void wr_gen2(struct tx_desc *d, unsigned int gen)
+{
+#if SGE_NUM_GENBITS == 2
+ d->flit[TX_DESC_FLITS - 1] = cpu_to_be64(gen);
+#endif
+}
+
+/**
+ * write_wr_hdr_sgl - write a WR header and, optionally, SGL
+ * @ndesc: number of Tx descriptors spanned by the SGL
+ * @skb: the packet corresponding to the WR
+ * @d: first Tx descriptor to be written
+ * @pidx: index of above descriptors
+ * @q: the SGE Tx queue
+ * @sgl: the SGL
+ * @flits: number of flits to the start of the SGL in the first descriptor
+ * @sgl_flits: the SGL size in flits
+ * @gen: the Tx descriptor generation
+ * @wr_hi: top 32 bits of WR header based on WR type (big endian)
+ * @wr_lo: low 32 bits of WR header based on WR type (big endian)
+ *
+ * Write a work request header and an associated SGL. If the SGL is
+ * small enough to fit into one Tx descriptor it has already been written
+ * and we just need to write the WR header. Otherwise we distribute the
+ * SGL across the number of descriptors it spans.
+ */
+static void write_wr_hdr_sgl(unsigned int ndesc, struct sk_buff *skb,
+ struct tx_desc *d, unsigned int pidx,
+ const struct sge_txq *q,
+ const struct sg_ent *sgl,
+ unsigned int flits, unsigned int sgl_flits,
+ unsigned int gen, __be32 wr_hi,
+ __be32 wr_lo)
+{
+ struct work_request_hdr *wrp = (struct work_request_hdr *)d;
+ struct tx_sw_desc *sd = &q->sdesc[pidx];
+
+ sd->skb = skb;
+ if (need_skb_unmap()) {
+ sd->fragidx = 0;
+ sd->addr_idx = 0;
+ sd->sflit = flits;
+ }
+
+ if (likely(ndesc == 1)) {
+ sd->eop = 1;
+ wrp->wr_hi = htonl(F_WR_SOP | F_WR_EOP | V_WR_DATATYPE(1) |
+ V_WR_SGLSFLT(flits)) | wr_hi;
+ dma_wmb();
+ wrp->wr_lo = htonl(V_WR_LEN(flits + sgl_flits) |
+ V_WR_GEN(gen)) | wr_lo;
+ wr_gen2(d, gen);
+ } else {
+ unsigned int ogen = gen;
+ const u64 *fp = (const u64 *)sgl;
+ struct work_request_hdr *wp = wrp;
+
+ wrp->wr_hi = htonl(F_WR_SOP | V_WR_DATATYPE(1) |
+ V_WR_SGLSFLT(flits)) | wr_hi;
+
+ while (sgl_flits) {
+ unsigned int avail = WR_FLITS - flits;
+
+ if (avail > sgl_flits)
+ avail = sgl_flits;
+ memcpy(&d->flit[flits], fp, avail * sizeof(*fp));
+ sgl_flits -= avail;
+ ndesc--;
+ if (!sgl_flits)
+ break;
+
+ fp += avail;
+ d++;
+ sd->eop = 0;
+ sd++;
+ if (++pidx == q->size) {
+ pidx = 0;
+ gen ^= 1;
+ d = q->desc;
+ sd = q->sdesc;
+ }
+
+ sd->skb = skb;
+ wrp = (struct work_request_hdr *)d;
+ wrp->wr_hi = htonl(V_WR_DATATYPE(1) |
+ V_WR_SGLSFLT(1)) | wr_hi;
+ wrp->wr_lo = htonl(V_WR_LEN(min(WR_FLITS,
+ sgl_flits + 1)) |
+ V_WR_GEN(gen)) | wr_lo;
+ wr_gen2(d, gen);
+ flits = 1;
+ }
+ sd->eop = 1;
+ wrp->wr_hi |= htonl(F_WR_EOP);
+ dma_wmb();
+ wp->wr_lo = htonl(V_WR_LEN(WR_FLITS) | V_WR_GEN(ogen)) | wr_lo;
+ wr_gen2((struct tx_desc *)wp, ogen);
+ WARN_ON(ndesc != 0);
+ }
+}
+
+/**
+ * write_tx_pkt_wr - write a TX_PKT work request
+ * @adap: the adapter
+ * @skb: the packet to send
+ * @pi: the egress interface
+ * @pidx: index of the first Tx descriptor to write
+ * @gen: the generation value to use
+ * @q: the Tx queue
+ * @ndesc: number of descriptors the packet will occupy
+ * @compl: the value of the COMPL bit to use
+ * @addr: address
+ *
+ * Generate a TX_PKT work request to send the supplied packet.
+ */
+static void write_tx_pkt_wr(struct adapter *adap, struct sk_buff *skb,
+ const struct port_info *pi,
+ unsigned int pidx, unsigned int gen,
+ struct sge_txq *q, unsigned int ndesc,
+ unsigned int compl, const dma_addr_t *addr)
+{
+ unsigned int flits, sgl_flits, cntrl, tso_info;
+ struct sg_ent *sgp, sgl[MAX_SKB_FRAGS / 2 + 1];
+ struct tx_desc *d = &q->desc[pidx];
+ struct cpl_tx_pkt *cpl = (struct cpl_tx_pkt *)d;
+
+ cpl->len = htonl(skb->len);
+ cntrl = V_TXPKT_INTF(pi->port_id);
+
+ if (skb_vlan_tag_present(skb))
+ cntrl |= F_TXPKT_VLAN_VLD | V_TXPKT_VLAN(skb_vlan_tag_get(skb));
+
+ tso_info = V_LSO_MSS(skb_shinfo(skb)->gso_size);
+ if (tso_info) {
+ int eth_type;
+ struct cpl_tx_pkt_lso *hdr = (struct cpl_tx_pkt_lso *)cpl;
+
+ d->flit[2] = 0;
+ cntrl |= V_TXPKT_OPCODE(CPL_TX_PKT_LSO);
+ hdr->cntrl = htonl(cntrl);
+ eth_type = skb_network_offset(skb) == ETH_HLEN ?
+ CPL_ETH_II : CPL_ETH_II_VLAN;
+ tso_info |= V_LSO_ETH_TYPE(eth_type) |
+ V_LSO_IPHDR_WORDS(ip_hdr(skb)->ihl) |
+ V_LSO_TCPHDR_WORDS(tcp_hdr(skb)->doff);
+ hdr->lso_info = htonl(tso_info);
+ flits = 3;
+ } else {
+ cntrl |= V_TXPKT_OPCODE(CPL_TX_PKT);
+ cntrl |= F_TXPKT_IPCSUM_DIS; /* SW calculates IP csum */
+ cntrl |= V_TXPKT_L4CSUM_DIS(skb->ip_summed != CHECKSUM_PARTIAL);
+ cpl->cntrl = htonl(cntrl);
+
+ if (skb->len <= WR_LEN - sizeof(*cpl)) {
+ q->sdesc[pidx].skb = NULL;
+ if (!skb->data_len)
+ skb_copy_from_linear_data(skb, &d->flit[2],
+ skb->len);
+ else
+ skb_copy_bits(skb, 0, &d->flit[2], skb->len);
+
+ flits = (skb->len + 7) / 8 + 2;
+ cpl->wr.wr_hi = htonl(V_WR_BCNTLFLT(skb->len & 7) |
+ V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT)
+ | F_WR_SOP | F_WR_EOP | compl);
+ dma_wmb();
+ cpl->wr.wr_lo = htonl(V_WR_LEN(flits) | V_WR_GEN(gen) |
+ V_WR_TID(q->token));
+ wr_gen2(d, gen);
+ dev_consume_skb_any(skb);
+ return;
+ }
+
+ flits = 2;
+ }
+
+ sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl;
+ sgl_flits = write_sgl(skb, sgp, skb->data, skb_headlen(skb), addr);
+
+ write_wr_hdr_sgl(ndesc, skb, d, pidx, q, sgl, flits, sgl_flits, gen,
+ htonl(V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT) | compl),
+ htonl(V_WR_TID(q->token)));
+}
+
+static inline void t3_stop_tx_queue(struct netdev_queue *txq,
+ struct sge_qset *qs, struct sge_txq *q)
+{
+ netif_tx_stop_queue(txq);
+ set_bit(TXQ_ETH, &qs->txq_stopped);
+ q->stops++;
+}
+
+/**
+ * t3_eth_xmit - add a packet to the Ethernet Tx queue
+ * @skb: the packet
+ * @dev: the egress net device
+ *
+ * Add a packet to an SGE Tx queue. Runs with softirqs disabled.
+ */
+netdev_tx_t t3_eth_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ int qidx;
+ unsigned int ndesc, pidx, credits, gen, compl;
+ const struct port_info *pi = netdev_priv(dev);
+ struct adapter *adap = pi->adapter;
+ struct netdev_queue *txq;
+ struct sge_qset *qs;
+ struct sge_txq *q;
+ dma_addr_t addr[MAX_SKB_FRAGS + 1];
+
+ /*
+ * The chip min packet length is 9 octets but play safe and reject
+ * anything shorter than an Ethernet header.
+ */
+ if (unlikely(skb->len < ETH_HLEN)) {
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
+
+ qidx = skb_get_queue_mapping(skb);
+ qs = &pi->qs[qidx];
+ q = &qs->txq[TXQ_ETH];
+ txq = netdev_get_tx_queue(dev, qidx);
+
+ reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK);
+
+ credits = q->size - q->in_use;
+ ndesc = calc_tx_descs(skb);
+
+ if (unlikely(credits < ndesc)) {
+ t3_stop_tx_queue(txq, qs, q);
+ dev_err(&adap->pdev->dev,
+ "%s: Tx ring %u full while queue awake!\n",
+ dev->name, q->cntxt_id & 7);
+ return NETDEV_TX_BUSY;
+ }
+
+ /* Check if ethernet packet can't be sent as immediate data */
+ if (skb->len > (WR_LEN - sizeof(struct cpl_tx_pkt))) {
+ if (unlikely(map_skb(adap->pdev, skb, addr) < 0)) {
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
+ }
+ }
+
+ q->in_use += ndesc;
+ if (unlikely(credits - ndesc < q->stop_thres)) {
+ t3_stop_tx_queue(txq, qs, q);
+
+ if (should_restart_tx(q) &&
+ test_and_clear_bit(TXQ_ETH, &qs->txq_stopped)) {
+ q->restarts++;
+ netif_tx_start_queue(txq);
+ }
+ }
+
+ gen = q->gen;
+ q->unacked += ndesc;
+ compl = (q->unacked & 8) << (S_WR_COMPL - 3);
+ q->unacked &= 7;
+ pidx = q->pidx;
+ q->pidx += ndesc;
+ if (q->pidx >= q->size) {
+ q->pidx -= q->size;
+ q->gen ^= 1;
+ }
+
+ /* update port statistics */
+ if (skb->ip_summed == CHECKSUM_PARTIAL)
+ qs->port_stats[SGE_PSTAT_TX_CSUM]++;
+ if (skb_shinfo(skb)->gso_size)
+ qs->port_stats[SGE_PSTAT_TSO]++;
+ if (skb_vlan_tag_present(skb))
+ qs->port_stats[SGE_PSTAT_VLANINS]++;
+
+ /*
+ * We do not use Tx completion interrupts to free DMAd Tx packets.
+ * This is good for performance but means that we rely on new Tx
+ * packets arriving to run the destructors of completed packets,
+ * which open up space in their sockets' send queues. Sometimes
+ * we do not get such new packets causing Tx to stall. A single
+ * UDP transmitter is a good example of this situation. We have
+ * a clean up timer that periodically reclaims completed packets
+ * but it doesn't run often enough (nor do we want it to) to prevent
+ * lengthy stalls. A solution to this problem is to run the
+ * destructor early, after the packet is queued but before it's DMAd.
+ * A cons is that we lie to socket memory accounting, but the amount
+ * of extra memory is reasonable (limited by the number of Tx
+ * descriptors), the packets do actually get freed quickly by new
+ * packets almost always, and for protocols like TCP that wait for
+ * acks to really free up the data the extra memory is even less.
+ * On the positive side we run the destructors on the sending CPU
+ * rather than on a potentially different completing CPU, usually a
+ * good thing. We also run them without holding our Tx queue lock,
+ * unlike what reclaim_completed_tx() would otherwise do.
+ *
+ * Run the destructor before telling the DMA engine about the packet
+ * to make sure it doesn't complete and get freed prematurely.
+ */
+ if (likely(!skb_shared(skb)))
+ skb_orphan(skb);
+
+ write_tx_pkt_wr(adap, skb, pi, pidx, gen, q, ndesc, compl, addr);
+ check_ring_tx_db(adap, q);
+ return NETDEV_TX_OK;
+}
+
+/**
+ * write_imm - write a packet into a Tx descriptor as immediate data
+ * @d: the Tx descriptor to write
+ * @skb: the packet
+ * @len: the length of packet data to write as immediate data
+ * @gen: the generation bit value to write
+ *
+ * Writes a packet as immediate data into a Tx descriptor. The packet
+ * contains a work request at its beginning. We must write the packet
+ * carefully so the SGE doesn't read it accidentally before it's written
+ * in its entirety.
+ */
+static inline void write_imm(struct tx_desc *d, struct sk_buff *skb,
+ unsigned int len, unsigned int gen)
+{
+ struct work_request_hdr *from = (struct work_request_hdr *)skb->data;
+ struct work_request_hdr *to = (struct work_request_hdr *)d;
+
+ if (likely(!skb->data_len))
+ memcpy(&to[1], &from[1], len - sizeof(*from));
+ else
+ skb_copy_bits(skb, sizeof(*from), &to[1], len - sizeof(*from));
+
+ to->wr_hi = from->wr_hi | htonl(F_WR_SOP | F_WR_EOP |
+ V_WR_BCNTLFLT(len & 7));
+ dma_wmb();
+ to->wr_lo = from->wr_lo | htonl(V_WR_GEN(gen) |
+ V_WR_LEN((len + 7) / 8));
+ wr_gen2(d, gen);
+ kfree_skb(skb);
+}
+
+/**
+ * check_desc_avail - check descriptor availability on a send queue
+ * @adap: the adapter
+ * @q: the send queue
+ * @skb: the packet needing the descriptors
+ * @ndesc: the number of Tx descriptors needed
+ * @qid: the Tx queue number in its queue set (TXQ_OFLD or TXQ_CTRL)
+ *
+ * Checks if the requested number of Tx descriptors is available on an
+ * SGE send queue. If the queue is already suspended or not enough
+ * descriptors are available the packet is queued for later transmission.
+ * Must be called with the Tx queue locked.
+ *
+ * Returns 0 if enough descriptors are available, 1 if there aren't
+ * enough descriptors and the packet has been queued, and 2 if the caller
+ * needs to retry because there weren't enough descriptors at the
+ * beginning of the call but some freed up in the mean time.
+ */
+static inline int check_desc_avail(struct adapter *adap, struct sge_txq *q,
+ struct sk_buff *skb, unsigned int ndesc,
+ unsigned int qid)
+{
+ if (unlikely(!skb_queue_empty(&q->sendq))) {
+ addq_exit:__skb_queue_tail(&q->sendq, skb);
+ return 1;
+ }
+ if (unlikely(q->size - q->in_use < ndesc)) {
+ struct sge_qset *qs = txq_to_qset(q, qid);
+
+ set_bit(qid, &qs->txq_stopped);
+ smp_mb__after_atomic();
+
+ if (should_restart_tx(q) &&
+ test_and_clear_bit(qid, &qs->txq_stopped))
+ return 2;
+
+ q->stops++;
+ goto addq_exit;
+ }
+ return 0;
+}
+
+/**
+ * reclaim_completed_tx_imm - reclaim completed control-queue Tx descs
+ * @q: the SGE control Tx queue
+ *
+ * This is a variant of reclaim_completed_tx() that is used for Tx queues
+ * that send only immediate data (presently just the control queues) and
+ * thus do not have any sk_buffs to release.
+ */
+static inline void reclaim_completed_tx_imm(struct sge_txq *q)
+{
+ unsigned int reclaim = q->processed - q->cleaned;
+
+ q->in_use -= reclaim;
+ q->cleaned += reclaim;
+}
+
+static inline int immediate(const struct sk_buff *skb)
+{
+ return skb->len <= WR_LEN;
+}
+
+/**
+ * ctrl_xmit - send a packet through an SGE control Tx queue
+ * @adap: the adapter
+ * @q: the control queue
+ * @skb: the packet
+ *
+ * Send a packet through an SGE control Tx queue. Packets sent through
+ * a control queue must fit entirely as immediate data in a single Tx
+ * descriptor and have no page fragments.
+ */
+static int ctrl_xmit(struct adapter *adap, struct sge_txq *q,
+ struct sk_buff *skb)
+{
+ int ret;
+ struct work_request_hdr *wrp = (struct work_request_hdr *)skb->data;
+
+ if (unlikely(!immediate(skb))) {
+ WARN_ON(1);
+ dev_kfree_skb(skb);
+ return NET_XMIT_SUCCESS;
+ }
+
+ wrp->wr_hi |= htonl(F_WR_SOP | F_WR_EOP);
+ wrp->wr_lo = htonl(V_WR_TID(q->token));
+
+ spin_lock(&q->lock);
+ again:reclaim_completed_tx_imm(q);
+
+ ret = check_desc_avail(adap, q, skb, 1, TXQ_CTRL);
+ if (unlikely(ret)) {
+ if (ret == 1) {
+ spin_unlock(&q->lock);
+ return NET_XMIT_CN;
+ }
+ goto again;
+ }
+
+ write_imm(&q->desc[q->pidx], skb, skb->len, q->gen);
+
+ q->in_use++;
+ if (++q->pidx >= q->size) {
+ q->pidx = 0;
+ q->gen ^= 1;
+ }
+ spin_unlock(&q->lock);
+ wmb();
+ t3_write_reg(adap, A_SG_KDOORBELL,
+ F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
+ return NET_XMIT_SUCCESS;
+}
+
+/**
+ * restart_ctrlq - restart a suspended control queue
+ * @w: pointer to the work associated with this handler
+ *
+ * Resumes transmission on a suspended Tx control queue.
+ */
+static void restart_ctrlq(struct work_struct *w)
+{
+ struct sk_buff *skb;
+ struct sge_qset *qs = container_of(w, struct sge_qset,
+ txq[TXQ_CTRL].qresume_task);
+ struct sge_txq *q = &qs->txq[TXQ_CTRL];
+
+ spin_lock(&q->lock);
+ again:reclaim_completed_tx_imm(q);
+
+ while (q->in_use < q->size &&
+ (skb = __skb_dequeue(&q->sendq)) != NULL) {
+
+ write_imm(&q->desc[q->pidx], skb, skb->len, q->gen);
+
+ if (++q->pidx >= q->size) {
+ q->pidx = 0;
+ q->gen ^= 1;
+ }
+ q->in_use++;
+ }
+
+ if (!skb_queue_empty(&q->sendq)) {
+ set_bit(TXQ_CTRL, &qs->txq_stopped);
+ smp_mb__after_atomic();
+
+ if (should_restart_tx(q) &&
+ test_and_clear_bit(TXQ_CTRL, &qs->txq_stopped))
+ goto again;
+ q->stops++;
+ }
+
+ spin_unlock(&q->lock);
+ wmb();
+ t3_write_reg(qs->adap, A_SG_KDOORBELL,
+ F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
+}
+
+/*
+ * Send a management message through control queue 0
+ */
+int t3_mgmt_tx(struct adapter *adap, struct sk_buff *skb)
+{
+ int ret;
+ local_bh_disable();
+ ret = ctrl_xmit(adap, &adap->sge.qs[0].txq[TXQ_CTRL], skb);
+ local_bh_enable();
+
+ return ret;
+}
+
+/**
+ * deferred_unmap_destructor - unmap a packet when it is freed
+ * @skb: the packet
+ *
+ * This is the packet destructor used for Tx packets that need to remain
+ * mapped until they are freed rather than until their Tx descriptors are
+ * freed.
+ */
+static void deferred_unmap_destructor(struct sk_buff *skb)
+{
+ int i;
+ const dma_addr_t *p;
+ const struct skb_shared_info *si;
+ const struct deferred_unmap_info *dui;
+
+ dui = (struct deferred_unmap_info *)skb->head;
+ p = dui->addr;
+
+ if (skb_tail_pointer(skb) - skb_transport_header(skb))
+ dma_unmap_single(&dui->pdev->dev, *p++,
+ skb_tail_pointer(skb) - skb_transport_header(skb),
+ DMA_TO_DEVICE);
+
+ si = skb_shinfo(skb);
+ for (i = 0; i < si->nr_frags; i++)
+ dma_unmap_page(&dui->pdev->dev, *p++,
+ skb_frag_size(&si->frags[i]), DMA_TO_DEVICE);
+}
+
+static void setup_deferred_unmapping(struct sk_buff *skb, struct pci_dev *pdev,
+ const struct sg_ent *sgl, int sgl_flits)
+{
+ dma_addr_t *p;
+ struct deferred_unmap_info *dui;
+
+ dui = (struct deferred_unmap_info *)skb->head;
+ dui->pdev = pdev;
+ for (p = dui->addr; sgl_flits >= 3; sgl++, sgl_flits -= 3) {
+ *p++ = be64_to_cpu(sgl->addr[0]);
+ *p++ = be64_to_cpu(sgl->addr[1]);
+ }
+ if (sgl_flits)
+ *p = be64_to_cpu(sgl->addr[0]);
+}
+
+/**
+ * write_ofld_wr - write an offload work request
+ * @adap: the adapter
+ * @skb: the packet to send
+ * @q: the Tx queue
+ * @pidx: index of the first Tx descriptor to write
+ * @gen: the generation value to use
+ * @ndesc: number of descriptors the packet will occupy
+ * @addr: the address
+ *
+ * Write an offload work request to send the supplied packet. The packet
+ * data already carry the work request with most fields populated.
+ */
+static void write_ofld_wr(struct adapter *adap, struct sk_buff *skb,
+ struct sge_txq *q, unsigned int pidx,
+ unsigned int gen, unsigned int ndesc,
+ const dma_addr_t *addr)
+{
+ unsigned int sgl_flits, flits;
+ struct work_request_hdr *from;
+ struct sg_ent *sgp, sgl[MAX_SKB_FRAGS / 2 + 1];
+ struct tx_desc *d = &q->desc[pidx];
+
+ if (immediate(skb)) {
+ q->sdesc[pidx].skb = NULL;
+ write_imm(d, skb, skb->len, gen);
+ return;
+ }
+
+ /* Only TX_DATA builds SGLs */
+
+ from = (struct work_request_hdr *)skb->data;
+ memcpy(&d->flit[1], &from[1],
+ skb_transport_offset(skb) - sizeof(*from));
+
+ flits = skb_transport_offset(skb) / 8;
+ sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl;
+ sgl_flits = write_sgl(skb, sgp, skb_transport_header(skb),
+ skb_tail_pointer(skb) - skb_transport_header(skb),
+ addr);
+ if (need_skb_unmap()) {
+ setup_deferred_unmapping(skb, adap->pdev, sgp, sgl_flits);
+ skb->destructor = deferred_unmap_destructor;
+ }
+
+ write_wr_hdr_sgl(ndesc, skb, d, pidx, q, sgl, flits, sgl_flits,
+ gen, from->wr_hi, from->wr_lo);
+}
+
+/**
+ * calc_tx_descs_ofld - calculate # of Tx descriptors for an offload packet
+ * @skb: the packet
+ *
+ * Returns the number of Tx descriptors needed for the given offload
+ * packet. These packets are already fully constructed.
+ */
+static inline unsigned int calc_tx_descs_ofld(const struct sk_buff *skb)
+{
+ unsigned int flits, cnt;
+
+ if (skb->len <= WR_LEN)
+ return 1; /* packet fits as immediate data */
+
+ flits = skb_transport_offset(skb) / 8; /* headers */
+ cnt = skb_shinfo(skb)->nr_frags;
+ if (skb_tail_pointer(skb) != skb_transport_header(skb))
+ cnt++;
+ return flits_to_desc(flits + sgl_len(cnt));
+}
+
+/**
+ * ofld_xmit - send a packet through an offload queue
+ * @adap: the adapter
+ * @q: the Tx offload queue
+ * @skb: the packet
+ *
+ * Send an offload packet through an SGE offload queue.
+ */
+static int ofld_xmit(struct adapter *adap, struct sge_txq *q,
+ struct sk_buff *skb)
+{
+ int ret;
+ unsigned int ndesc = calc_tx_descs_ofld(skb), pidx, gen;
+
+ spin_lock(&q->lock);
+again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK);
+
+ ret = check_desc_avail(adap, q, skb, ndesc, TXQ_OFLD);
+ if (unlikely(ret)) {
+ if (ret == 1) {
+ skb->priority = ndesc; /* save for restart */
+ spin_unlock(&q->lock);
+ return NET_XMIT_CN;
+ }
+ goto again;
+ }
+
+ if (!immediate(skb) &&
+ map_skb(adap->pdev, skb, (dma_addr_t *)skb->head)) {
+ spin_unlock(&q->lock);
+ return NET_XMIT_SUCCESS;
+ }
+
+ gen = q->gen;
+ q->in_use += ndesc;
+ pidx = q->pidx;
+ q->pidx += ndesc;
+ if (q->pidx >= q->size) {
+ q->pidx -= q->size;
+ q->gen ^= 1;
+ }
+ spin_unlock(&q->lock);
+
+ write_ofld_wr(adap, skb, q, pidx, gen, ndesc, (dma_addr_t *)skb->head);
+ check_ring_tx_db(adap, q);
+ return NET_XMIT_SUCCESS;
+}
+
+/**
+ * restart_offloadq - restart a suspended offload queue
+ * @w: pointer to the work associated with this handler
+ *
+ * Resumes transmission on a suspended Tx offload queue.
+ */
+static void restart_offloadq(struct work_struct *w)
+{
+ struct sk_buff *skb;
+ struct sge_qset *qs = container_of(w, struct sge_qset,
+ txq[TXQ_OFLD].qresume_task);
+ struct sge_txq *q = &qs->txq[TXQ_OFLD];
+ const struct port_info *pi = netdev_priv(qs->netdev);
+ struct adapter *adap = pi->adapter;
+ unsigned int written = 0;
+
+ spin_lock(&q->lock);
+again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK);
+
+ while ((skb = skb_peek(&q->sendq)) != NULL) {
+ unsigned int gen, pidx;
+ unsigned int ndesc = skb->priority;
+
+ if (unlikely(q->size - q->in_use < ndesc)) {
+ set_bit(TXQ_OFLD, &qs->txq_stopped);
+ smp_mb__after_atomic();
+
+ if (should_restart_tx(q) &&
+ test_and_clear_bit(TXQ_OFLD, &qs->txq_stopped))
+ goto again;
+ q->stops++;
+ break;
+ }
+
+ if (!immediate(skb) &&
+ map_skb(adap->pdev, skb, (dma_addr_t *)skb->head))
+ break;
+
+ gen = q->gen;
+ q->in_use += ndesc;
+ pidx = q->pidx;
+ q->pidx += ndesc;
+ written += ndesc;
+ if (q->pidx >= q->size) {
+ q->pidx -= q->size;
+ q->gen ^= 1;
+ }
+ __skb_unlink(skb, &q->sendq);
+ spin_unlock(&q->lock);
+
+ write_ofld_wr(adap, skb, q, pidx, gen, ndesc,
+ (dma_addr_t *)skb->head);
+ spin_lock(&q->lock);
+ }
+ spin_unlock(&q->lock);
+
+#if USE_GTS
+ set_bit(TXQ_RUNNING, &q->flags);
+ set_bit(TXQ_LAST_PKT_DB, &q->flags);
+#endif
+ wmb();
+ if (likely(written))
+ t3_write_reg(adap, A_SG_KDOORBELL,
+ F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
+}
+
+/**
+ * queue_set - return the queue set a packet should use
+ * @skb: the packet
+ *
+ * Maps a packet to the SGE queue set it should use. The desired queue
+ * set is carried in bits 1-3 in the packet's priority.
+ */
+static inline int queue_set(const struct sk_buff *skb)
+{
+ return skb->priority >> 1;
+}
+
+/**
+ * is_ctrl_pkt - return whether an offload packet is a control packet
+ * @skb: the packet
+ *
+ * Determines whether an offload packet should use an OFLD or a CTRL
+ * Tx queue. This is indicated by bit 0 in the packet's priority.
+ */
+static inline int is_ctrl_pkt(const struct sk_buff *skb)
+{
+ return skb->priority & 1;
+}
+
+/**
+ * t3_offload_tx - send an offload packet
+ * @tdev: the offload device to send to
+ * @skb: the packet
+ *
+ * Sends an offload packet. We use the packet priority to select the
+ * appropriate Tx queue as follows: bit 0 indicates whether the packet
+ * should be sent as regular or control, bits 1-3 select the queue set.
+ */
+int t3_offload_tx(struct t3cdev *tdev, struct sk_buff *skb)
+{
+ struct adapter *adap = tdev2adap(tdev);
+ struct sge_qset *qs = &adap->sge.qs[queue_set(skb)];
+
+ if (unlikely(is_ctrl_pkt(skb)))
+ return ctrl_xmit(adap, &qs->txq[TXQ_CTRL], skb);
+
+ return ofld_xmit(adap, &qs->txq[TXQ_OFLD], skb);
+}
+
+/**
+ * offload_enqueue - add an offload packet to an SGE offload receive queue
+ * @q: the SGE response queue
+ * @skb: the packet
+ *
+ * Add a new offload packet to an SGE response queue's offload packet
+ * queue. If the packet is the first on the queue it schedules the RX
+ * softirq to process the queue.
+ */
+static inline void offload_enqueue(struct sge_rspq *q, struct sk_buff *skb)
+{
+ int was_empty = skb_queue_empty(&q->rx_queue);
+
+ __skb_queue_tail(&q->rx_queue, skb);
+
+ if (was_empty) {
+ struct sge_qset *qs = rspq_to_qset(q);
+
+ napi_schedule(&qs->napi);
+ }
+}
+
+/**
+ * deliver_partial_bundle - deliver a (partial) bundle of Rx offload pkts
+ * @tdev: the offload device that will be receiving the packets
+ * @q: the SGE response queue that assembled the bundle
+ * @skbs: the partial bundle
+ * @n: the number of packets in the bundle
+ *
+ * Delivers a (partial) bundle of Rx offload packets to an offload device.
+ */
+static inline void deliver_partial_bundle(struct t3cdev *tdev,
+ struct sge_rspq *q,
+ struct sk_buff *skbs[], int n)
+{
+ if (n) {
+ q->offload_bundles++;
+ tdev->recv(tdev, skbs, n);
+ }
+}
+
+/**
+ * ofld_poll - NAPI handler for offload packets in interrupt mode
+ * @napi: the network device doing the polling
+ * @budget: polling budget
+ *
+ * The NAPI handler for offload packets when a response queue is serviced
+ * by the hard interrupt handler, i.e., when it's operating in non-polling
+ * mode. Creates small packet batches and sends them through the offload
+ * receive handler. Batches need to be of modest size as we do prefetches
+ * on the packets in each.
+ */
+static int ofld_poll(struct napi_struct *napi, int budget)
+{
+ struct sge_qset *qs = container_of(napi, struct sge_qset, napi);
+ struct sge_rspq *q = &qs->rspq;
+ struct adapter *adapter = qs->adap;
+ int work_done = 0;
+
+ while (work_done < budget) {
+ struct sk_buff *skb, *tmp, *skbs[RX_BUNDLE_SIZE];
+ struct sk_buff_head queue;
+ int ngathered;
+
+ spin_lock_irq(&q->lock);
+ __skb_queue_head_init(&queue);
+ skb_queue_splice_init(&q->rx_queue, &queue);
+ if (skb_queue_empty(&queue)) {
+ napi_complete_done(napi, work_done);
+ spin_unlock_irq(&q->lock);
+ return work_done;
+ }
+ spin_unlock_irq(&q->lock);
+
+ ngathered = 0;
+ skb_queue_walk_safe(&queue, skb, tmp) {
+ if (work_done >= budget)
+ break;
+ work_done++;
+
+ __skb_unlink(skb, &queue);
+ prefetch(skb->data);
+ skbs[ngathered] = skb;
+ if (++ngathered == RX_BUNDLE_SIZE) {
+ q->offload_bundles++;
+ adapter->tdev.recv(&adapter->tdev, skbs,
+ ngathered);
+ ngathered = 0;
+ }
+ }
+ if (!skb_queue_empty(&queue)) {
+ /* splice remaining packets back onto Rx queue */
+ spin_lock_irq(&q->lock);
+ skb_queue_splice(&queue, &q->rx_queue);
+ spin_unlock_irq(&q->lock);
+ }
+ deliver_partial_bundle(&adapter->tdev, q, skbs, ngathered);
+ }
+
+ return work_done;
+}
+
+/**
+ * rx_offload - process a received offload packet
+ * @tdev: the offload device receiving the packet
+ * @rq: the response queue that received the packet
+ * @skb: the packet
+ * @rx_gather: a gather list of packets if we are building a bundle
+ * @gather_idx: index of the next available slot in the bundle
+ *
+ * Process an ingress offload packet and add it to the offload ingress
+ * queue. Returns the index of the next available slot in the bundle.
+ */
+static inline int rx_offload(struct t3cdev *tdev, struct sge_rspq *rq,
+ struct sk_buff *skb, struct sk_buff *rx_gather[],
+ unsigned int gather_idx)
+{
+ skb_reset_mac_header(skb);
+ skb_reset_network_header(skb);
+ skb_reset_transport_header(skb);
+
+ if (rq->polling) {
+ rx_gather[gather_idx++] = skb;
+ if (gather_idx == RX_BUNDLE_SIZE) {
+ tdev->recv(tdev, rx_gather, RX_BUNDLE_SIZE);
+ gather_idx = 0;
+ rq->offload_bundles++;
+ }
+ } else
+ offload_enqueue(rq, skb);
+
+ return gather_idx;
+}
+
+/**
+ * restart_tx - check whether to restart suspended Tx queues
+ * @qs: the queue set to resume
+ *
+ * Restarts suspended Tx queues of an SGE queue set if they have enough
+ * free resources to resume operation.
+ */
+static void restart_tx(struct sge_qset *qs)
+{
+ if (test_bit(TXQ_ETH, &qs->txq_stopped) &&
+ should_restart_tx(&qs->txq[TXQ_ETH]) &&
+ test_and_clear_bit(TXQ_ETH, &qs->txq_stopped)) {
+ qs->txq[TXQ_ETH].restarts++;
+ if (netif_running(qs->netdev))
+ netif_tx_wake_queue(qs->tx_q);
+ }
+
+ if (test_bit(TXQ_OFLD, &qs->txq_stopped) &&
+ should_restart_tx(&qs->txq[TXQ_OFLD]) &&
+ test_and_clear_bit(TXQ_OFLD, &qs->txq_stopped)) {
+ qs->txq[TXQ_OFLD].restarts++;
+
+ /* The work can be quite lengthy so we use driver's own queue */
+ queue_work(cxgb3_wq, &qs->txq[TXQ_OFLD].qresume_task);
+ }
+ if (test_bit(TXQ_CTRL, &qs->txq_stopped) &&
+ should_restart_tx(&qs->txq[TXQ_CTRL]) &&
+ test_and_clear_bit(TXQ_CTRL, &qs->txq_stopped)) {
+ qs->txq[TXQ_CTRL].restarts++;
+
+ /* The work can be quite lengthy so we use driver's own queue */
+ queue_work(cxgb3_wq, &qs->txq[TXQ_CTRL].qresume_task);
+ }
+}
+
+/**
+ * cxgb3_arp_process - process an ARP request probing a private IP address
+ * @pi: the port info
+ * @skb: the skbuff containing the ARP request
+ *
+ * Check if the ARP request is probing the private IP address
+ * dedicated to iSCSI, generate an ARP reply if so.
+ */
+static void cxgb3_arp_process(struct port_info *pi, struct sk_buff *skb)
+{
+ struct net_device *dev = skb->dev;
+ struct arphdr *arp;
+ unsigned char *arp_ptr;
+ unsigned char *sha;
+ __be32 sip, tip;
+
+ if (!dev)
+ return;
+
+ skb_reset_network_header(skb);
+ arp = arp_hdr(skb);
+
+ if (arp->ar_op != htons(ARPOP_REQUEST))
+ return;
+
+ arp_ptr = (unsigned char *)(arp + 1);
+ sha = arp_ptr;
+ arp_ptr += dev->addr_len;
+ memcpy(&sip, arp_ptr, sizeof(sip));
+ arp_ptr += sizeof(sip);
+ arp_ptr += dev->addr_len;
+ memcpy(&tip, arp_ptr, sizeof(tip));
+
+ if (tip != pi->iscsi_ipv4addr)
+ return;
+
+ arp_send(ARPOP_REPLY, ETH_P_ARP, sip, dev, tip, sha,
+ pi->iscsic.mac_addr, sha);
+
+}
+
+static inline int is_arp(struct sk_buff *skb)
+{
+ return skb->protocol == htons(ETH_P_ARP);
+}
+
+static void cxgb3_process_iscsi_prov_pack(struct port_info *pi,
+ struct sk_buff *skb)
+{
+ if (is_arp(skb)) {
+ cxgb3_arp_process(pi, skb);
+ return;
+ }
+
+ if (pi->iscsic.recv)
+ pi->iscsic.recv(pi, skb);
+
+}
+
+/**
+ * rx_eth - process an ingress ethernet packet
+ * @adap: the adapter
+ * @rq: the response queue that received the packet
+ * @skb: the packet
+ * @pad: padding
+ * @lro: large receive offload
+ *
+ * Process an ingress ethernet packet and deliver it to the stack.
+ * The padding is 2 if the packet was delivered in an Rx buffer and 0
+ * if it was immediate data in a response.
+ */
+static void rx_eth(struct adapter *adap, struct sge_rspq *rq,
+ struct sk_buff *skb, int pad, int lro)
+{
+ struct cpl_rx_pkt *p = (struct cpl_rx_pkt *)(skb->data + pad);
+ struct sge_qset *qs = rspq_to_qset(rq);
+ struct port_info *pi;
+
+ skb_pull(skb, sizeof(*p) + pad);
+ skb->protocol = eth_type_trans(skb, adap->port[p->iff]);
+ pi = netdev_priv(skb->dev);
+ if ((skb->dev->features & NETIF_F_RXCSUM) && p->csum_valid &&
+ p->csum == htons(0xffff) && !p->fragment) {
+ qs->port_stats[SGE_PSTAT_RX_CSUM_GOOD]++;
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ } else
+ skb_checksum_none_assert(skb);
+ skb_record_rx_queue(skb, qs - &adap->sge.qs[pi->first_qset]);
+
+ if (p->vlan_valid) {
+ qs->port_stats[SGE_PSTAT_VLANEX]++;
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(p->vlan));
+ }
+ if (rq->polling) {
+ if (lro)
+ napi_gro_receive(&qs->napi, skb);
+ else {
+ if (unlikely(pi->iscsic.flags))
+ cxgb3_process_iscsi_prov_pack(pi, skb);
+ netif_receive_skb(skb);
+ }
+ } else
+ netif_rx(skb);
+}
+
+static inline int is_eth_tcp(u32 rss)
+{
+ return G_HASHTYPE(ntohl(rss)) == RSS_HASH_4_TUPLE;
+}
+
+/**
+ * lro_add_page - add a page chunk to an LRO session
+ * @adap: the adapter
+ * @qs: the associated queue set
+ * @fl: the free list containing the page chunk to add
+ * @len: packet length
+ * @complete: Indicates the last fragment of a frame
+ *
+ * Add a received packet contained in a page chunk to an existing LRO
+ * session.
+ */
+static void lro_add_page(struct adapter *adap, struct sge_qset *qs,
+ struct sge_fl *fl, int len, int complete)
+{
+ struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];
+ struct port_info *pi = netdev_priv(qs->netdev);
+ struct sk_buff *skb = NULL;
+ struct cpl_rx_pkt *cpl;
+ skb_frag_t *rx_frag;
+ int nr_frags;
+ int offset = 0;
+
+ if (!qs->nomem) {
+ skb = napi_get_frags(&qs->napi);
+ qs->nomem = !skb;
+ }
+
+ fl->credits--;
+
+ dma_sync_single_for_cpu(&adap->pdev->dev,
+ dma_unmap_addr(sd, dma_addr),
+ fl->buf_size - SGE_PG_RSVD, DMA_FROM_DEVICE);
+
+ (*sd->pg_chunk.p_cnt)--;
+ if (!*sd->pg_chunk.p_cnt && sd->pg_chunk.page != fl->pg_chunk.page)
+ dma_unmap_page(&adap->pdev->dev, sd->pg_chunk.mapping,
+ fl->alloc_size, DMA_FROM_DEVICE);
+
+ if (!skb) {
+ put_page(sd->pg_chunk.page);
+ if (complete)
+ qs->nomem = 0;
+ return;
+ }
+
+ rx_frag = skb_shinfo(skb)->frags;
+ nr_frags = skb_shinfo(skb)->nr_frags;
+
+ if (!nr_frags) {
+ offset = 2 + sizeof(struct cpl_rx_pkt);
+ cpl = qs->lro_va = sd->pg_chunk.va + 2;
+
+ if ((qs->netdev->features & NETIF_F_RXCSUM) &&
+ cpl->csum_valid && cpl->csum == htons(0xffff)) {
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ qs->port_stats[SGE_PSTAT_RX_CSUM_GOOD]++;
+ } else
+ skb->ip_summed = CHECKSUM_NONE;
+ } else
+ cpl = qs->lro_va;
+
+ len -= offset;
+
+ rx_frag += nr_frags;
+ skb_frag_fill_page_desc(rx_frag, sd->pg_chunk.page,
+ sd->pg_chunk.offset + offset, len);
+
+ skb->len += len;
+ skb->data_len += len;
+ skb->truesize += len;
+ skb_shinfo(skb)->nr_frags++;
+
+ if (!complete)
+ return;
+
+ skb_record_rx_queue(skb, qs - &adap->sge.qs[pi->first_qset]);
+
+ if (cpl->vlan_valid) {
+ qs->port_stats[SGE_PSTAT_VLANEX]++;
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(cpl->vlan));
+ }
+ napi_gro_frags(&qs->napi);
+}
+
+/**
+ * handle_rsp_cntrl_info - handles control information in a response
+ * @qs: the queue set corresponding to the response
+ * @flags: the response control flags
+ *
+ * Handles the control information of an SGE response, such as GTS
+ * indications and completion credits for the queue set's Tx queues.
+ * HW coalesces credits, we don't do any extra SW coalescing.
+ */
+static inline void handle_rsp_cntrl_info(struct sge_qset *qs, u32 flags)
+{
+ unsigned int credits;
+
+#if USE_GTS
+ if (flags & F_RSPD_TXQ0_GTS)
+ clear_bit(TXQ_RUNNING, &qs->txq[TXQ_ETH].flags);
+#endif
+
+ credits = G_RSPD_TXQ0_CR(flags);
+ if (credits)
+ qs->txq[TXQ_ETH].processed += credits;
+
+ credits = G_RSPD_TXQ2_CR(flags);
+ if (credits)
+ qs->txq[TXQ_CTRL].processed += credits;
+
+# if USE_GTS
+ if (flags & F_RSPD_TXQ1_GTS)
+ clear_bit(TXQ_RUNNING, &qs->txq[TXQ_OFLD].flags);
+# endif
+ credits = G_RSPD_TXQ1_CR(flags);
+ if (credits)
+ qs->txq[TXQ_OFLD].processed += credits;
+}
+
+/**
+ * check_ring_db - check if we need to ring any doorbells
+ * @adap: the adapter
+ * @qs: the queue set whose Tx queues are to be examined
+ * @sleeping: indicates which Tx queue sent GTS
+ *
+ * Checks if some of a queue set's Tx queues need to ring their doorbells
+ * to resume transmission after idling while they still have unprocessed
+ * descriptors.
+ */
+static void check_ring_db(struct adapter *adap, struct sge_qset *qs,
+ unsigned int sleeping)
+{
+ if (sleeping & F_RSPD_TXQ0_GTS) {
+ struct sge_txq *txq = &qs->txq[TXQ_ETH];
+
+ if (txq->cleaned + txq->in_use != txq->processed &&
+ !test_and_set_bit(TXQ_LAST_PKT_DB, &txq->flags)) {
+ set_bit(TXQ_RUNNING, &txq->flags);
+ t3_write_reg(adap, A_SG_KDOORBELL, F_SELEGRCNTX |
+ V_EGRCNTX(txq->cntxt_id));
+ }
+ }
+
+ if (sleeping & F_RSPD_TXQ1_GTS) {
+ struct sge_txq *txq = &qs->txq[TXQ_OFLD];
+
+ if (txq->cleaned + txq->in_use != txq->processed &&
+ !test_and_set_bit(TXQ_LAST_PKT_DB, &txq->flags)) {
+ set_bit(TXQ_RUNNING, &txq->flags);
+ t3_write_reg(adap, A_SG_KDOORBELL, F_SELEGRCNTX |
+ V_EGRCNTX(txq->cntxt_id));
+ }
+ }
+}
+
+/**
+ * is_new_response - check if a response is newly written
+ * @r: the response descriptor
+ * @q: the response queue
+ *
+ * Returns true if a response descriptor contains a yet unprocessed
+ * response.
+ */
+static inline int is_new_response(const struct rsp_desc *r,
+ const struct sge_rspq *q)
+{
+ return (r->intr_gen & F_RSPD_GEN2) == q->gen;
+}
+
+static inline void clear_rspq_bufstate(struct sge_rspq * const q)
+{
+ q->pg_skb = NULL;
+ q->rx_recycle_buf = 0;
+}
+
+#define RSPD_GTS_MASK (F_RSPD_TXQ0_GTS | F_RSPD_TXQ1_GTS)
+#define RSPD_CTRL_MASK (RSPD_GTS_MASK | \
+ V_RSPD_TXQ0_CR(M_RSPD_TXQ0_CR) | \
+ V_RSPD_TXQ1_CR(M_RSPD_TXQ1_CR) | \
+ V_RSPD_TXQ2_CR(M_RSPD_TXQ2_CR))
+
+/* How long to delay the next interrupt in case of memory shortage, in 0.1us. */
+#define NOMEM_INTR_DELAY 2500
+
+/**
+ * process_responses - process responses from an SGE response queue
+ * @adap: the adapter
+ * @qs: the queue set to which the response queue belongs
+ * @budget: how many responses can be processed in this round
+ *
+ * Process responses from an SGE response queue up to the supplied budget.
+ * Responses include received packets as well as credits and other events
+ * for the queues that belong to the response queue's queue set.
+ * A negative budget is effectively unlimited.
+ *
+ * Additionally choose the interrupt holdoff time for the next interrupt
+ * on this queue. If the system is under memory shortage use a fairly
+ * long delay to help recovery.
+ */
+static int process_responses(struct adapter *adap, struct sge_qset *qs,
+ int budget)
+{
+ struct sge_rspq *q = &qs->rspq;
+ struct rsp_desc *r = &q->desc[q->cidx];
+ int budget_left = budget;
+ unsigned int sleeping = 0;
+ struct sk_buff *offload_skbs[RX_BUNDLE_SIZE];
+ int ngathered = 0;
+
+ q->next_holdoff = q->holdoff_tmr;
+
+ while (likely(budget_left && is_new_response(r, q))) {
+ int packet_complete, eth, ethpad = 2;
+ int lro = !!(qs->netdev->features & NETIF_F_GRO);
+ struct sk_buff *skb = NULL;
+ u32 len, flags;
+ __be32 rss_hi, rss_lo;
+
+ dma_rmb();
+ eth = r->rss_hdr.opcode == CPL_RX_PKT;
+ rss_hi = *(const __be32 *)r;
+ rss_lo = r->rss_hdr.rss_hash_val;
+ flags = ntohl(r->flags);
+
+ if (unlikely(flags & F_RSPD_ASYNC_NOTIF)) {
+ skb = alloc_skb(AN_PKT_SIZE, GFP_ATOMIC);
+ if (!skb)
+ goto no_mem;
+
+ __skb_put_data(skb, r, AN_PKT_SIZE);
+ skb->data[0] = CPL_ASYNC_NOTIF;
+ rss_hi = htonl(CPL_ASYNC_NOTIF << 24);
+ q->async_notif++;
+ } else if (flags & F_RSPD_IMM_DATA_VALID) {
+ skb = get_imm_packet(r);
+ if (unlikely(!skb)) {
+no_mem:
+ q->next_holdoff = NOMEM_INTR_DELAY;
+ q->nomem++;
+ /* consume one credit since we tried */
+ budget_left--;
+ break;
+ }
+ q->imm_data++;
+ ethpad = 0;
+ } else if ((len = ntohl(r->len_cq)) != 0) {
+ struct sge_fl *fl;
+
+ lro &= eth && is_eth_tcp(rss_hi);
+
+ fl = (len & F_RSPD_FLQ) ? &qs->fl[1] : &qs->fl[0];
+ if (fl->use_pages) {
+ void *addr = fl->sdesc[fl->cidx].pg_chunk.va;
+
+ net_prefetch(addr);
+ __refill_fl(adap, fl);
+ if (lro > 0) {
+ lro_add_page(adap, qs, fl,
+ G_RSPD_LEN(len),
+ flags & F_RSPD_EOP);
+ goto next_fl;
+ }
+
+ skb = get_packet_pg(adap, fl, q,
+ G_RSPD_LEN(len),
+ eth ?
+ SGE_RX_DROP_THRES : 0);
+ q->pg_skb = skb;
+ } else
+ skb = get_packet(adap, fl, G_RSPD_LEN(len),
+ eth ? SGE_RX_DROP_THRES : 0);
+ if (unlikely(!skb)) {
+ if (!eth)
+ goto no_mem;
+ q->rx_drops++;
+ } else if (unlikely(r->rss_hdr.opcode == CPL_TRACE_PKT))
+ __skb_pull(skb, 2);
+next_fl:
+ if (++fl->cidx == fl->size)
+ fl->cidx = 0;
+ } else
+ q->pure_rsps++;
+
+ if (flags & RSPD_CTRL_MASK) {
+ sleeping |= flags & RSPD_GTS_MASK;
+ handle_rsp_cntrl_info(qs, flags);
+ }
+
+ r++;
+ if (unlikely(++q->cidx == q->size)) {
+ q->cidx = 0;
+ q->gen ^= 1;
+ r = q->desc;
+ }
+ prefetch(r);
+
+ if (++q->credits >= (q->size / 4)) {
+ refill_rspq(adap, q, q->credits);
+ q->credits = 0;
+ }
+
+ packet_complete = flags &
+ (F_RSPD_EOP | F_RSPD_IMM_DATA_VALID |
+ F_RSPD_ASYNC_NOTIF);
+
+ if (skb != NULL && packet_complete) {
+ if (eth)
+ rx_eth(adap, q, skb, ethpad, lro);
+ else {
+ q->offload_pkts++;
+ /* Preserve the RSS info in csum & priority */
+ skb->csum = rss_hi;
+ skb->priority = rss_lo;
+ ngathered = rx_offload(&adap->tdev, q, skb,
+ offload_skbs,
+ ngathered);
+ }
+
+ if (flags & F_RSPD_EOP)
+ clear_rspq_bufstate(q);
+ }
+ --budget_left;
+ }
+
+ deliver_partial_bundle(&adap->tdev, q, offload_skbs, ngathered);
+
+ if (sleeping)
+ check_ring_db(adap, qs, sleeping);
+
+ smp_mb(); /* commit Tx queue .processed updates */
+ if (unlikely(qs->txq_stopped != 0))
+ restart_tx(qs);
+
+ budget -= budget_left;
+ return budget;
+}
+
+static inline int is_pure_response(const struct rsp_desc *r)
+{
+ __be32 n = r->flags & htonl(F_RSPD_ASYNC_NOTIF | F_RSPD_IMM_DATA_VALID);
+
+ return (n | r->len_cq) == 0;
+}
+
+/**
+ * napi_rx_handler - the NAPI handler for Rx processing
+ * @napi: the napi instance
+ * @budget: how many packets we can process in this round
+ *
+ * Handler for new data events when using NAPI.
+ */
+static int napi_rx_handler(struct napi_struct *napi, int budget)
+{
+ struct sge_qset *qs = container_of(napi, struct sge_qset, napi);
+ struct adapter *adap = qs->adap;
+ int work_done = process_responses(adap, qs, budget);
+
+ if (likely(work_done < budget)) {
+ napi_complete_done(napi, work_done);
+
+ /*
+ * Because we don't atomically flush the following
+ * write it is possible that in very rare cases it can
+ * reach the device in a way that races with a new
+ * response being written plus an error interrupt
+ * causing the NAPI interrupt handler below to return
+ * unhandled status to the OS. To protect against
+ * this would require flushing the write and doing
+ * both the write and the flush with interrupts off.
+ * Way too expensive and unjustifiable given the
+ * rarity of the race.
+ *
+ * The race cannot happen at all with MSI-X.
+ */
+ t3_write_reg(adap, A_SG_GTS, V_RSPQ(qs->rspq.cntxt_id) |
+ V_NEWTIMER(qs->rspq.next_holdoff) |
+ V_NEWINDEX(qs->rspq.cidx));
+ }
+ return work_done;
+}
+
+/*
+ * Returns true if the device is already scheduled for polling.
+ */
+static inline int napi_is_scheduled(struct napi_struct *napi)
+{
+ return test_bit(NAPI_STATE_SCHED, &napi->state);
+}
+
+/**
+ * process_pure_responses - process pure responses from a response queue
+ * @adap: the adapter
+ * @qs: the queue set owning the response queue
+ * @r: the first pure response to process
+ *
+ * A simpler version of process_responses() that handles only pure (i.e.,
+ * non data-carrying) responses. Such respones are too light-weight to
+ * justify calling a softirq under NAPI, so we handle them specially in
+ * the interrupt handler. The function is called with a pointer to a
+ * response, which the caller must ensure is a valid pure response.
+ *
+ * Returns 1 if it encounters a valid data-carrying response, 0 otherwise.
+ */
+static int process_pure_responses(struct adapter *adap, struct sge_qset *qs,
+ struct rsp_desc *r)
+{
+ struct sge_rspq *q = &qs->rspq;
+ unsigned int sleeping = 0;
+
+ do {
+ u32 flags = ntohl(r->flags);
+
+ r++;
+ if (unlikely(++q->cidx == q->size)) {
+ q->cidx = 0;
+ q->gen ^= 1;
+ r = q->desc;
+ }
+ prefetch(r);
+
+ if (flags & RSPD_CTRL_MASK) {
+ sleeping |= flags & RSPD_GTS_MASK;
+ handle_rsp_cntrl_info(qs, flags);
+ }
+
+ q->pure_rsps++;
+ if (++q->credits >= (q->size / 4)) {
+ refill_rspq(adap, q, q->credits);
+ q->credits = 0;
+ }
+ if (!is_new_response(r, q))
+ break;
+ dma_rmb();
+ } while (is_pure_response(r));
+
+ if (sleeping)
+ check_ring_db(adap, qs, sleeping);
+
+ smp_mb(); /* commit Tx queue .processed updates */
+ if (unlikely(qs->txq_stopped != 0))
+ restart_tx(qs);
+
+ return is_new_response(r, q);
+}
+
+/**
+ * handle_responses - decide what to do with new responses in NAPI mode
+ * @adap: the adapter
+ * @q: the response queue
+ *
+ * This is used by the NAPI interrupt handlers to decide what to do with
+ * new SGE responses. If there are no new responses it returns -1. If
+ * there are new responses and they are pure (i.e., non-data carrying)
+ * it handles them straight in hard interrupt context as they are very
+ * cheap and don't deliver any packets. Finally, if there are any data
+ * signaling responses it schedules the NAPI handler. Returns 1 if it
+ * schedules NAPI, 0 if all new responses were pure.
+ *
+ * The caller must ascertain NAPI is not already running.
+ */
+static inline int handle_responses(struct adapter *adap, struct sge_rspq *q)
+{
+ struct sge_qset *qs = rspq_to_qset(q);
+ struct rsp_desc *r = &q->desc[q->cidx];
+
+ if (!is_new_response(r, q))
+ return -1;
+ dma_rmb();
+ if (is_pure_response(r) && process_pure_responses(adap, qs, r) == 0) {
+ t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) |
+ V_NEWTIMER(q->holdoff_tmr) | V_NEWINDEX(q->cidx));
+ return 0;
+ }
+ napi_schedule(&qs->napi);
+ return 1;
+}
+
+/*
+ * The MSI-X interrupt handler for an SGE response queue for the non-NAPI case
+ * (i.e., response queue serviced in hard interrupt).
+ */
+static irqreturn_t t3_sge_intr_msix(int irq, void *cookie)
+{
+ struct sge_qset *qs = cookie;
+ struct adapter *adap = qs->adap;
+ struct sge_rspq *q = &qs->rspq;
+
+ spin_lock(&q->lock);
+ if (process_responses(adap, qs, -1) == 0)
+ q->unhandled_irqs++;
+ t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) |
+ V_NEWTIMER(q->next_holdoff) | V_NEWINDEX(q->cidx));
+ spin_unlock(&q->lock);
+ return IRQ_HANDLED;
+}
+
+/*
+ * The MSI-X interrupt handler for an SGE response queue for the NAPI case
+ * (i.e., response queue serviced by NAPI polling).
+ */
+static irqreturn_t t3_sge_intr_msix_napi(int irq, void *cookie)
+{
+ struct sge_qset *qs = cookie;
+ struct sge_rspq *q = &qs->rspq;
+
+ spin_lock(&q->lock);
+
+ if (handle_responses(qs->adap, q) < 0)
+ q->unhandled_irqs++;
+ spin_unlock(&q->lock);
+ return IRQ_HANDLED;
+}
+
+/*
+ * The non-NAPI MSI interrupt handler. This needs to handle data events from
+ * SGE response queues as well as error and other async events as they all use
+ * the same MSI vector. We use one SGE response queue per port in this mode
+ * and protect all response queues with queue 0's lock.
+ */
+static irqreturn_t t3_intr_msi(int irq, void *cookie)
+{
+ int new_packets = 0;
+ struct adapter *adap = cookie;
+ struct sge_rspq *q = &adap->sge.qs[0].rspq;
+
+ spin_lock(&q->lock);
+
+ if (process_responses(adap, &adap->sge.qs[0], -1)) {
+ t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) |
+ V_NEWTIMER(q->next_holdoff) | V_NEWINDEX(q->cidx));
+ new_packets = 1;
+ }
+
+ if (adap->params.nports == 2 &&
+ process_responses(adap, &adap->sge.qs[1], -1)) {
+ struct sge_rspq *q1 = &adap->sge.qs[1].rspq;
+
+ t3_write_reg(adap, A_SG_GTS, V_RSPQ(q1->cntxt_id) |
+ V_NEWTIMER(q1->next_holdoff) |
+ V_NEWINDEX(q1->cidx));
+ new_packets = 1;
+ }
+
+ if (!new_packets && t3_slow_intr_handler(adap) == 0)
+ q->unhandled_irqs++;
+
+ spin_unlock(&q->lock);
+ return IRQ_HANDLED;
+}
+
+static int rspq_check_napi(struct sge_qset *qs)
+{
+ struct sge_rspq *q = &qs->rspq;
+
+ if (!napi_is_scheduled(&qs->napi) &&
+ is_new_response(&q->desc[q->cidx], q)) {
+ napi_schedule(&qs->napi);
+ return 1;
+ }
+ return 0;
+}
+
+/*
+ * The MSI interrupt handler for the NAPI case (i.e., response queues serviced
+ * by NAPI polling). Handles data events from SGE response queues as well as
+ * error and other async events as they all use the same MSI vector. We use
+ * one SGE response queue per port in this mode and protect all response
+ * queues with queue 0's lock.
+ */
+static irqreturn_t t3_intr_msi_napi(int irq, void *cookie)
+{
+ int new_packets;
+ struct adapter *adap = cookie;
+ struct sge_rspq *q = &adap->sge.qs[0].rspq;
+
+ spin_lock(&q->lock);
+
+ new_packets = rspq_check_napi(&adap->sge.qs[0]);
+ if (adap->params.nports == 2)
+ new_packets += rspq_check_napi(&adap->sge.qs[1]);
+ if (!new_packets && t3_slow_intr_handler(adap) == 0)
+ q->unhandled_irqs++;
+
+ spin_unlock(&q->lock);
+ return IRQ_HANDLED;
+}
+
+/*
+ * A helper function that processes responses and issues GTS.
+ */
+static inline int process_responses_gts(struct adapter *adap,
+ struct sge_rspq *rq)
+{
+ int work;
+
+ work = process_responses(adap, rspq_to_qset(rq), -1);
+ t3_write_reg(adap, A_SG_GTS, V_RSPQ(rq->cntxt_id) |
+ V_NEWTIMER(rq->next_holdoff) | V_NEWINDEX(rq->cidx));
+ return work;
+}
+
+/*
+ * The legacy INTx interrupt handler. This needs to handle data events from
+ * SGE response queues as well as error and other async events as they all use
+ * the same interrupt pin. We use one SGE response queue per port in this mode
+ * and protect all response queues with queue 0's lock.
+ */
+static irqreturn_t t3_intr(int irq, void *cookie)
+{
+ int work_done, w0, w1;
+ struct adapter *adap = cookie;
+ struct sge_rspq *q0 = &adap->sge.qs[0].rspq;
+ struct sge_rspq *q1 = &adap->sge.qs[1].rspq;
+
+ spin_lock(&q0->lock);
+
+ w0 = is_new_response(&q0->desc[q0->cidx], q0);
+ w1 = adap->params.nports == 2 &&
+ is_new_response(&q1->desc[q1->cidx], q1);
+
+ if (likely(w0 | w1)) {
+ t3_write_reg(adap, A_PL_CLI, 0);
+ t3_read_reg(adap, A_PL_CLI); /* flush */
+
+ if (likely(w0))
+ process_responses_gts(adap, q0);
+
+ if (w1)
+ process_responses_gts(adap, q1);
+
+ work_done = w0 | w1;
+ } else
+ work_done = t3_slow_intr_handler(adap);
+
+ spin_unlock(&q0->lock);
+ return IRQ_RETVAL(work_done != 0);
+}
+
+/*
+ * Interrupt handler for legacy INTx interrupts for T3B-based cards.
+ * Handles data events from SGE response queues as well as error and other
+ * async events as they all use the same interrupt pin. We use one SGE
+ * response queue per port in this mode and protect all response queues with
+ * queue 0's lock.
+ */
+static irqreturn_t t3b_intr(int irq, void *cookie)
+{
+ u32 map;
+ struct adapter *adap = cookie;
+ struct sge_rspq *q0 = &adap->sge.qs[0].rspq;
+
+ t3_write_reg(adap, A_PL_CLI, 0);
+ map = t3_read_reg(adap, A_SG_DATA_INTR);
+
+ if (unlikely(!map)) /* shared interrupt, most likely */
+ return IRQ_NONE;
+
+ spin_lock(&q0->lock);
+
+ if (unlikely(map & F_ERRINTR))
+ t3_slow_intr_handler(adap);
+
+ if (likely(map & 1))
+ process_responses_gts(adap, q0);
+
+ if (map & 2)
+ process_responses_gts(adap, &adap->sge.qs[1].rspq);
+
+ spin_unlock(&q0->lock);
+ return IRQ_HANDLED;
+}
+
+/*
+ * NAPI interrupt handler for legacy INTx interrupts for T3B-based cards.
+ * Handles data events from SGE response queues as well as error and other
+ * async events as they all use the same interrupt pin. We use one SGE
+ * response queue per port in this mode and protect all response queues with
+ * queue 0's lock.
+ */
+static irqreturn_t t3b_intr_napi(int irq, void *cookie)
+{
+ u32 map;
+ struct adapter *adap = cookie;
+ struct sge_qset *qs0 = &adap->sge.qs[0];
+ struct sge_rspq *q0 = &qs0->rspq;
+
+ t3_write_reg(adap, A_PL_CLI, 0);
+ map = t3_read_reg(adap, A_SG_DATA_INTR);
+
+ if (unlikely(!map)) /* shared interrupt, most likely */
+ return IRQ_NONE;
+
+ spin_lock(&q0->lock);
+
+ if (unlikely(map & F_ERRINTR))
+ t3_slow_intr_handler(adap);
+
+ if (likely(map & 1))
+ napi_schedule(&qs0->napi);
+
+ if (map & 2)
+ napi_schedule(&adap->sge.qs[1].napi);
+
+ spin_unlock(&q0->lock);
+ return IRQ_HANDLED;
+}
+
+/**
+ * t3_intr_handler - select the top-level interrupt handler
+ * @adap: the adapter
+ * @polling: whether using NAPI to service response queues
+ *
+ * Selects the top-level interrupt handler based on the type of interrupts
+ * (MSI-X, MSI, or legacy) and whether NAPI will be used to service the
+ * response queues.
+ */
+irq_handler_t t3_intr_handler(struct adapter *adap, int polling)
+{
+ if (adap->flags & USING_MSIX)
+ return polling ? t3_sge_intr_msix_napi : t3_sge_intr_msix;
+ if (adap->flags & USING_MSI)
+ return polling ? t3_intr_msi_napi : t3_intr_msi;
+ if (adap->params.rev > 0)
+ return polling ? t3b_intr_napi : t3b_intr;
+ return t3_intr;
+}
+
+#define SGE_PARERR (F_CPPARITYERROR | F_OCPARITYERROR | F_RCPARITYERROR | \
+ F_IRPARITYERROR | V_ITPARITYERROR(M_ITPARITYERROR) | \
+ V_FLPARITYERROR(M_FLPARITYERROR) | F_LODRBPARITYERROR | \
+ F_HIDRBPARITYERROR | F_LORCQPARITYERROR | \
+ F_HIRCQPARITYERROR)
+#define SGE_FRAMINGERR (F_UC_REQ_FRAMINGERROR | F_R_REQ_FRAMINGERROR)
+#define SGE_FATALERR (SGE_PARERR | SGE_FRAMINGERR | F_RSPQCREDITOVERFOW | \
+ F_RSPQDISABLED)
+
+/**
+ * t3_sge_err_intr_handler - SGE async event interrupt handler
+ * @adapter: the adapter
+ *
+ * Interrupt handler for SGE asynchronous (non-data) events.
+ */
+void t3_sge_err_intr_handler(struct adapter *adapter)
+{
+ unsigned int v, status = t3_read_reg(adapter, A_SG_INT_CAUSE) &
+ ~F_FLEMPTY;
+
+ if (status & SGE_PARERR)
+ CH_ALERT(adapter, "SGE parity error (0x%x)\n",
+ status & SGE_PARERR);
+ if (status & SGE_FRAMINGERR)
+ CH_ALERT(adapter, "SGE framing error (0x%x)\n",
+ status & SGE_FRAMINGERR);
+
+ if (status & F_RSPQCREDITOVERFOW)
+ CH_ALERT(adapter, "SGE response queue credit overflow\n");
+
+ if (status & F_RSPQDISABLED) {
+ v = t3_read_reg(adapter, A_SG_RSPQ_FL_STATUS);
+
+ CH_ALERT(adapter,
+ "packet delivered to disabled response queue "
+ "(0x%x)\n", (v >> S_RSPQ0DISABLED) & 0xff);
+ }
+
+ if (status & (F_HIPIODRBDROPERR | F_LOPIODRBDROPERR))
+ queue_work(cxgb3_wq, &adapter->db_drop_task);
+
+ if (status & (F_HIPRIORITYDBFULL | F_LOPRIORITYDBFULL))
+ queue_work(cxgb3_wq, &adapter->db_full_task);
+
+ if (status & (F_HIPRIORITYDBEMPTY | F_LOPRIORITYDBEMPTY))
+ queue_work(cxgb3_wq, &adapter->db_empty_task);
+
+ t3_write_reg(adapter, A_SG_INT_CAUSE, status);
+ if (status & SGE_FATALERR)
+ t3_fatal_err(adapter);
+}
+
+/**
+ * sge_timer_tx - perform periodic maintenance of an SGE qset
+ * @t: a timer list containing the SGE queue set to maintain
+ *
+ * Runs periodically from a timer to perform maintenance of an SGE queue
+ * set. It performs two tasks:
+ *
+ * Cleans up any completed Tx descriptors that may still be pending.
+ * Normal descriptor cleanup happens when new packets are added to a Tx
+ * queue so this timer is relatively infrequent and does any cleanup only
+ * if the Tx queue has not seen any new packets in a while. We make a
+ * best effort attempt to reclaim descriptors, in that we don't wait
+ * around if we cannot get a queue's lock (which most likely is because
+ * someone else is queueing new packets and so will also handle the clean
+ * up). Since control queues use immediate data exclusively we don't
+ * bother cleaning them up here.
+ *
+ */
+static void sge_timer_tx(struct timer_list *t)
+{
+ struct sge_qset *qs = from_timer(qs, t, tx_reclaim_timer);
+ struct port_info *pi = netdev_priv(qs->netdev);
+ struct adapter *adap = pi->adapter;
+ unsigned int tbd[SGE_TXQ_PER_SET] = {0, 0};
+ unsigned long next_period;
+
+ if (__netif_tx_trylock(qs->tx_q)) {
+ tbd[TXQ_ETH] = reclaim_completed_tx(adap, &qs->txq[TXQ_ETH],
+ TX_RECLAIM_TIMER_CHUNK);
+ __netif_tx_unlock(qs->tx_q);
+ }
+
+ if (spin_trylock(&qs->txq[TXQ_OFLD].lock)) {
+ tbd[TXQ_OFLD] = reclaim_completed_tx(adap, &qs->txq[TXQ_OFLD],
+ TX_RECLAIM_TIMER_CHUNK);
+ spin_unlock(&qs->txq[TXQ_OFLD].lock);
+ }
+
+ next_period = TX_RECLAIM_PERIOD >>
+ (max(tbd[TXQ_ETH], tbd[TXQ_OFLD]) /
+ TX_RECLAIM_TIMER_CHUNK);
+ mod_timer(&qs->tx_reclaim_timer, jiffies + next_period);
+}
+
+/**
+ * sge_timer_rx - perform periodic maintenance of an SGE qset
+ * @t: the timer list containing the SGE queue set to maintain
+ *
+ * a) Replenishes Rx queues that have run out due to memory shortage.
+ * Normally new Rx buffers are added when existing ones are consumed but
+ * when out of memory a queue can become empty. We try to add only a few
+ * buffers here, the queue will be replenished fully as these new buffers
+ * are used up if memory shortage has subsided.
+ *
+ * b) Return coalesced response queue credits in case a response queue is
+ * starved.
+ *
+ */
+static void sge_timer_rx(struct timer_list *t)
+{
+ spinlock_t *lock;
+ struct sge_qset *qs = from_timer(qs, t, rx_reclaim_timer);
+ struct port_info *pi = netdev_priv(qs->netdev);
+ struct adapter *adap = pi->adapter;
+ u32 status;
+
+ lock = adap->params.rev > 0 ?
+ &qs->rspq.lock : &adap->sge.qs[0].rspq.lock;
+
+ if (!spin_trylock_irq(lock))
+ goto out;
+
+ if (napi_is_scheduled(&qs->napi))
+ goto unlock;
+
+ if (adap->params.rev < 4) {
+ status = t3_read_reg(adap, A_SG_RSPQ_FL_STATUS);
+
+ if (status & (1 << qs->rspq.cntxt_id)) {
+ qs->rspq.starved++;
+ if (qs->rspq.credits) {
+ qs->rspq.credits--;
+ refill_rspq(adap, &qs->rspq, 1);
+ qs->rspq.restarted++;
+ t3_write_reg(adap, A_SG_RSPQ_FL_STATUS,
+ 1 << qs->rspq.cntxt_id);
+ }
+ }
+ }
+
+ if (qs->fl[0].credits < qs->fl[0].size)
+ __refill_fl(adap, &qs->fl[0]);
+ if (qs->fl[1].credits < qs->fl[1].size)
+ __refill_fl(adap, &qs->fl[1]);
+
+unlock:
+ spin_unlock_irq(lock);
+out:
+ mod_timer(&qs->rx_reclaim_timer, jiffies + RX_RECLAIM_PERIOD);
+}
+
+/**
+ * t3_update_qset_coalesce - update coalescing settings for a queue set
+ * @qs: the SGE queue set
+ * @p: new queue set parameters
+ *
+ * Update the coalescing settings for an SGE queue set. Nothing is done
+ * if the queue set is not initialized yet.
+ */
+void t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p)
+{
+ qs->rspq.holdoff_tmr = max(p->coalesce_usecs * 10, 1U);/* can't be 0 */
+ qs->rspq.polling = p->polling;
+ qs->napi.poll = p->polling ? napi_rx_handler : ofld_poll;
+}
+
+/**
+ * t3_sge_alloc_qset - initialize an SGE queue set
+ * @adapter: the adapter
+ * @id: the queue set id
+ * @nports: how many Ethernet ports will be using this queue set
+ * @irq_vec_idx: the IRQ vector index for response queue interrupts
+ * @p: configuration parameters for this queue set
+ * @ntxq: number of Tx queues for the queue set
+ * @dev: net device associated with this queue set
+ * @netdevq: net device TX queue associated with this queue set
+ *
+ * Allocate resources and initialize an SGE queue set. A queue set
+ * comprises a response queue, two Rx free-buffer queues, and up to 3
+ * Tx queues. The Tx queues are assigned roles in the order Ethernet
+ * queue, offload queue, and control queue.
+ */
+int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
+ int irq_vec_idx, const struct qset_params *p,
+ int ntxq, struct net_device *dev,
+ struct netdev_queue *netdevq)
+{
+ int i, avail, ret = -ENOMEM;
+ struct sge_qset *q = &adapter->sge.qs[id];
+
+ init_qset_cntxt(q, id);
+ timer_setup(&q->tx_reclaim_timer, sge_timer_tx, 0);
+ timer_setup(&q->rx_reclaim_timer, sge_timer_rx, 0);
+
+ q->fl[0].desc = alloc_ring(adapter->pdev, p->fl_size,
+ sizeof(struct rx_desc),
+ sizeof(struct rx_sw_desc),
+ &q->fl[0].phys_addr, &q->fl[0].sdesc);
+ if (!q->fl[0].desc)
+ goto err;
+
+ q->fl[1].desc = alloc_ring(adapter->pdev, p->jumbo_size,
+ sizeof(struct rx_desc),
+ sizeof(struct rx_sw_desc),
+ &q->fl[1].phys_addr, &q->fl[1].sdesc);
+ if (!q->fl[1].desc)
+ goto err;
+
+ q->rspq.desc = alloc_ring(adapter->pdev, p->rspq_size,
+ sizeof(struct rsp_desc), 0,
+ &q->rspq.phys_addr, NULL);
+ if (!q->rspq.desc)
+ goto err;
+
+ for (i = 0; i < ntxq; ++i) {
+ /*
+ * The control queue always uses immediate data so does not
+ * need to keep track of any sk_buffs.
+ */
+ size_t sz = i == TXQ_CTRL ? 0 : sizeof(struct tx_sw_desc);
+
+ q->txq[i].desc = alloc_ring(adapter->pdev, p->txq_size[i],
+ sizeof(struct tx_desc), sz,
+ &q->txq[i].phys_addr,
+ &q->txq[i].sdesc);
+ if (!q->txq[i].desc)
+ goto err;
+
+ q->txq[i].gen = 1;
+ q->txq[i].size = p->txq_size[i];
+ spin_lock_init(&q->txq[i].lock);
+ skb_queue_head_init(&q->txq[i].sendq);
+ }
+
+ INIT_WORK(&q->txq[TXQ_OFLD].qresume_task, restart_offloadq);
+ INIT_WORK(&q->txq[TXQ_CTRL].qresume_task, restart_ctrlq);
+
+ q->fl[0].gen = q->fl[1].gen = 1;
+ q->fl[0].size = p->fl_size;
+ q->fl[1].size = p->jumbo_size;
+
+ q->rspq.gen = 1;
+ q->rspq.size = p->rspq_size;
+ spin_lock_init(&q->rspq.lock);
+ skb_queue_head_init(&q->rspq.rx_queue);
+
+ q->txq[TXQ_ETH].stop_thres = nports *
+ flits_to_desc(sgl_len(MAX_SKB_FRAGS + 1) + 3);
+
+#if FL0_PG_CHUNK_SIZE > 0
+ q->fl[0].buf_size = FL0_PG_CHUNK_SIZE;
+#else
+ q->fl[0].buf_size = SGE_RX_SM_BUF_SIZE + sizeof(struct cpl_rx_data);
+#endif
+#if FL1_PG_CHUNK_SIZE > 0
+ q->fl[1].buf_size = FL1_PG_CHUNK_SIZE;
+#else
+ q->fl[1].buf_size = is_offload(adapter) ?
+ (16 * 1024) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) :
+ MAX_FRAME_SIZE + 2 + sizeof(struct cpl_rx_pkt);
+#endif
+
+ q->fl[0].use_pages = FL0_PG_CHUNK_SIZE > 0;
+ q->fl[1].use_pages = FL1_PG_CHUNK_SIZE > 0;
+ q->fl[0].order = FL0_PG_ORDER;
+ q->fl[1].order = FL1_PG_ORDER;
+ q->fl[0].alloc_size = FL0_PG_ALLOC_SIZE;
+ q->fl[1].alloc_size = FL1_PG_ALLOC_SIZE;
+
+ spin_lock_irq(&adapter->sge.reg_lock);
+
+ /* FL threshold comparison uses < */
+ ret = t3_sge_init_rspcntxt(adapter, q->rspq.cntxt_id, irq_vec_idx,
+ q->rspq.phys_addr, q->rspq.size,
+ q->fl[0].buf_size - SGE_PG_RSVD, 1, 0);
+ if (ret)
+ goto err_unlock;
+
+ for (i = 0; i < SGE_RXQ_PER_SET; ++i) {
+ ret = t3_sge_init_flcntxt(adapter, q->fl[i].cntxt_id, 0,
+ q->fl[i].phys_addr, q->fl[i].size,
+ q->fl[i].buf_size - SGE_PG_RSVD,
+ p->cong_thres, 1, 0);
+ if (ret)
+ goto err_unlock;
+ }
+
+ ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_ETH].cntxt_id, USE_GTS,
+ SGE_CNTXT_ETH, id, q->txq[TXQ_ETH].phys_addr,
+ q->txq[TXQ_ETH].size, q->txq[TXQ_ETH].token,
+ 1, 0);
+ if (ret)
+ goto err_unlock;
+
+ if (ntxq > 1) {
+ ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_OFLD].cntxt_id,
+ USE_GTS, SGE_CNTXT_OFLD, id,
+ q->txq[TXQ_OFLD].phys_addr,
+ q->txq[TXQ_OFLD].size, 0, 1, 0);
+ if (ret)
+ goto err_unlock;
+ }
+
+ if (ntxq > 2) {
+ ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_CTRL].cntxt_id, 0,
+ SGE_CNTXT_CTRL, id,
+ q->txq[TXQ_CTRL].phys_addr,
+ q->txq[TXQ_CTRL].size,
+ q->txq[TXQ_CTRL].token, 1, 0);
+ if (ret)
+ goto err_unlock;
+ }
+
+ spin_unlock_irq(&adapter->sge.reg_lock);
+
+ q->adap = adapter;
+ q->netdev = dev;
+ q->tx_q = netdevq;
+ t3_update_qset_coalesce(q, p);
+
+ avail = refill_fl(adapter, &q->fl[0], q->fl[0].size,
+ GFP_KERNEL | __GFP_COMP);
+ if (!avail) {
+ CH_ALERT(adapter, "free list queue 0 initialization failed\n");
+ ret = -ENOMEM;
+ goto err;
+ }
+ if (avail < q->fl[0].size)
+ CH_WARN(adapter, "free list queue 0 enabled with %d credits\n",
+ avail);
+
+ avail = refill_fl(adapter, &q->fl[1], q->fl[1].size,
+ GFP_KERNEL | __GFP_COMP);
+ if (avail < q->fl[1].size)
+ CH_WARN(adapter, "free list queue 1 enabled with %d credits\n",
+ avail);
+ refill_rspq(adapter, &q->rspq, q->rspq.size - 1);
+
+ t3_write_reg(adapter, A_SG_GTS, V_RSPQ(q->rspq.cntxt_id) |
+ V_NEWTIMER(q->rspq.holdoff_tmr));
+
+ return 0;
+
+err_unlock:
+ spin_unlock_irq(&adapter->sge.reg_lock);
+err:
+ t3_free_qset(adapter, q);
+ return ret;
+}
+
+/**
+ * t3_start_sge_timers - start SGE timer call backs
+ * @adap: the adapter
+ *
+ * Starts each SGE queue set's timer call back
+ */
+void t3_start_sge_timers(struct adapter *adap)
+{
+ int i;
+
+ for (i = 0; i < SGE_QSETS; ++i) {
+ struct sge_qset *q = &adap->sge.qs[i];
+
+ if (q->tx_reclaim_timer.function)
+ mod_timer(&q->tx_reclaim_timer,
+ jiffies + TX_RECLAIM_PERIOD);
+
+ if (q->rx_reclaim_timer.function)
+ mod_timer(&q->rx_reclaim_timer,
+ jiffies + RX_RECLAIM_PERIOD);
+ }
+}
+
+/**
+ * t3_stop_sge_timers - stop SGE timer call backs
+ * @adap: the adapter
+ *
+ * Stops each SGE queue set's timer call back
+ */
+void t3_stop_sge_timers(struct adapter *adap)
+{
+ int i;
+
+ for (i = 0; i < SGE_QSETS; ++i) {
+ struct sge_qset *q = &adap->sge.qs[i];
+
+ if (q->tx_reclaim_timer.function)
+ del_timer_sync(&q->tx_reclaim_timer);
+ if (q->rx_reclaim_timer.function)
+ del_timer_sync(&q->rx_reclaim_timer);
+ }
+}
+
+/**
+ * t3_free_sge_resources - free SGE resources
+ * @adap: the adapter
+ *
+ * Frees resources used by the SGE queue sets.
+ */
+void t3_free_sge_resources(struct adapter *adap)
+{
+ int i;
+
+ for (i = 0; i < SGE_QSETS; ++i)
+ t3_free_qset(adap, &adap->sge.qs[i]);
+}
+
+/**
+ * t3_sge_start - enable SGE
+ * @adap: the adapter
+ *
+ * Enables the SGE for DMAs. This is the last step in starting packet
+ * transfers.
+ */
+void t3_sge_start(struct adapter *adap)
+{
+ t3_set_reg_field(adap, A_SG_CONTROL, F_GLOBALENABLE, F_GLOBALENABLE);
+}
+
+/**
+ * t3_sge_stop_dma - Disable SGE DMA engine operation
+ * @adap: the adapter
+ *
+ * Can be invoked from interrupt context e.g. error handler.
+ *
+ * Note that this function cannot disable the restart of works as
+ * it cannot wait if called from interrupt context, however the
+ * works will have no effect since the doorbells are disabled. The
+ * driver will call tg3_sge_stop() later from process context, at
+ * which time the works will be stopped if they are still running.
+ */
+void t3_sge_stop_dma(struct adapter *adap)
+{
+ t3_set_reg_field(adap, A_SG_CONTROL, F_GLOBALENABLE, 0);
+}
+
+/**
+ * t3_sge_stop - disable SGE operation completly
+ * @adap: the adapter
+ *
+ * Called from process context. Disables the DMA engine and any
+ * pending queue restart works.
+ */
+void t3_sge_stop(struct adapter *adap)
+{
+ int i;
+
+ t3_sge_stop_dma(adap);
+
+ /* workqueues aren't initialized otherwise */
+ if (!(adap->flags & FULL_INIT_DONE))
+ return;
+ for (i = 0; i < SGE_QSETS; ++i) {
+ struct sge_qset *qs = &adap->sge.qs[i];
+
+ cancel_work_sync(&qs->txq[TXQ_OFLD].qresume_task);
+ cancel_work_sync(&qs->txq[TXQ_CTRL].qresume_task);
+ }
+}
+
+/**
+ * t3_sge_init - initialize SGE
+ * @adap: the adapter
+ * @p: the SGE parameters
+ *
+ * Performs SGE initialization needed every time after a chip reset.
+ * We do not initialize any of the queue sets here, instead the driver
+ * top-level must request those individually. We also do not enable DMA
+ * here, that should be done after the queues have been set up.
+ */
+void t3_sge_init(struct adapter *adap, struct sge_params *p)
+{
+ unsigned int ctrl, ups = ffs(pci_resource_len(adap->pdev, 2) >> 12);
+
+ ctrl = F_DROPPKT | V_PKTSHIFT(2) | F_FLMODE | F_AVOIDCQOVFL |
+ F_CQCRDTCTRL | F_CONGMODE | F_TNLFLMODE | F_FATLPERREN |
+ V_HOSTPAGESIZE(PAGE_SHIFT - 11) | F_BIGENDIANINGRESS |
+ V_USERSPACESIZE(ups ? ups - 1 : 0) | F_ISCSICOALESCING;
+#if SGE_NUM_GENBITS == 1
+ ctrl |= F_EGRGENCTRL;
+#endif
+ if (adap->params.rev > 0) {
+ if (!(adap->flags & (USING_MSIX | USING_MSI)))
+ ctrl |= F_ONEINTMULTQ | F_OPTONEINTMULTQ;
+ }
+ t3_write_reg(adap, A_SG_CONTROL, ctrl);
+ t3_write_reg(adap, A_SG_EGR_RCQ_DRB_THRSH, V_HIRCQDRBTHRSH(512) |
+ V_LORCQDRBTHRSH(512));
+ t3_write_reg(adap, A_SG_TIMER_TICK, core_ticks_per_usec(adap) / 10);
+ t3_write_reg(adap, A_SG_CMDQ_CREDIT_TH, V_THRESHOLD(32) |
+ V_TIMEOUT(200 * core_ticks_per_usec(adap)));
+ t3_write_reg(adap, A_SG_HI_DRB_HI_THRSH,
+ adap->params.rev < T3_REV_C ? 1000 : 500);
+ t3_write_reg(adap, A_SG_HI_DRB_LO_THRSH, 256);
+ t3_write_reg(adap, A_SG_LO_DRB_HI_THRSH, 1000);
+ t3_write_reg(adap, A_SG_LO_DRB_LO_THRSH, 256);
+ t3_write_reg(adap, A_SG_OCO_BASE, V_BASE1(0xfff));
+ t3_write_reg(adap, A_SG_DRB_PRI_THRESH, 63 * 1024);
+}
+
+/**
+ * t3_sge_prep - one-time SGE initialization
+ * @adap: the associated adapter
+ * @p: SGE parameters
+ *
+ * Performs one-time initialization of SGE SW state. Includes determining
+ * defaults for the assorted SGE parameters, which admins can change until
+ * they are used to initialize the SGE.
+ */
+void t3_sge_prep(struct adapter *adap, struct sge_params *p)
+{
+ int i;
+
+ p->max_pkt_size = (16 * 1024) - sizeof(struct cpl_rx_data) -
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+
+ for (i = 0; i < SGE_QSETS; ++i) {
+ struct qset_params *q = p->qset + i;
+
+ q->polling = adap->params.rev > 0;
+ q->coalesce_usecs = 5;
+ q->rspq_size = 1024;
+ q->fl_size = 1024;
+ q->jumbo_size = 512;
+ q->txq_size[TXQ_ETH] = 1024;
+ q->txq_size[TXQ_OFLD] = 1024;
+ q->txq_size[TXQ_CTRL] = 256;
+ q->cong_thres = 0;
+ }
+
+ spin_lock_init(&adap->sge.reg_lock);
+}
diff --git a/drivers/net/ethernet/chelsio/cxgb3/sge_defs.h b/drivers/net/ethernet/chelsio/cxgb3/sge_defs.h
new file mode 100644
index 0000000000..c31ce8dc95
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb3/sge_defs.h
@@ -0,0 +1,256 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * This file is automatically generated --- any changes will be lost.
+ */
+
+#ifndef _SGE_DEFS_H
+#define _SGE_DEFS_H
+
+#define S_EC_CREDITS 0
+#define M_EC_CREDITS 0x7FFF
+#define V_EC_CREDITS(x) ((x) << S_EC_CREDITS)
+#define G_EC_CREDITS(x) (((x) >> S_EC_CREDITS) & M_EC_CREDITS)
+
+#define S_EC_GTS 15
+#define V_EC_GTS(x) ((x) << S_EC_GTS)
+#define F_EC_GTS V_EC_GTS(1U)
+
+#define S_EC_INDEX 16
+#define M_EC_INDEX 0xFFFF
+#define V_EC_INDEX(x) ((x) << S_EC_INDEX)
+#define G_EC_INDEX(x) (((x) >> S_EC_INDEX) & M_EC_INDEX)
+
+#define S_EC_SIZE 0
+#define M_EC_SIZE 0xFFFF
+#define V_EC_SIZE(x) ((x) << S_EC_SIZE)
+#define G_EC_SIZE(x) (((x) >> S_EC_SIZE) & M_EC_SIZE)
+
+#define S_EC_BASE_LO 16
+#define M_EC_BASE_LO 0xFFFF
+#define V_EC_BASE_LO(x) ((x) << S_EC_BASE_LO)
+#define G_EC_BASE_LO(x) (((x) >> S_EC_BASE_LO) & M_EC_BASE_LO)
+
+#define S_EC_BASE_HI 0
+#define M_EC_BASE_HI 0xF
+#define V_EC_BASE_HI(x) ((x) << S_EC_BASE_HI)
+#define G_EC_BASE_HI(x) (((x) >> S_EC_BASE_HI) & M_EC_BASE_HI)
+
+#define S_EC_RESPQ 4
+#define M_EC_RESPQ 0x7
+#define V_EC_RESPQ(x) ((x) << S_EC_RESPQ)
+#define G_EC_RESPQ(x) (((x) >> S_EC_RESPQ) & M_EC_RESPQ)
+
+#define S_EC_TYPE 7
+#define M_EC_TYPE 0x7
+#define V_EC_TYPE(x) ((x) << S_EC_TYPE)
+#define G_EC_TYPE(x) (((x) >> S_EC_TYPE) & M_EC_TYPE)
+
+#define S_EC_GEN 10
+#define V_EC_GEN(x) ((x) << S_EC_GEN)
+#define F_EC_GEN V_EC_GEN(1U)
+
+#define S_EC_UP_TOKEN 11
+#define M_EC_UP_TOKEN 0xFFFFF
+#define V_EC_UP_TOKEN(x) ((x) << S_EC_UP_TOKEN)
+#define G_EC_UP_TOKEN(x) (((x) >> S_EC_UP_TOKEN) & M_EC_UP_TOKEN)
+
+#define S_EC_VALID 31
+#define V_EC_VALID(x) ((x) << S_EC_VALID)
+#define F_EC_VALID V_EC_VALID(1U)
+
+#define S_RQ_MSI_VEC 20
+#define M_RQ_MSI_VEC 0x3F
+#define V_RQ_MSI_VEC(x) ((x) << S_RQ_MSI_VEC)
+#define G_RQ_MSI_VEC(x) (((x) >> S_RQ_MSI_VEC) & M_RQ_MSI_VEC)
+
+#define S_RQ_INTR_EN 26
+#define V_RQ_INTR_EN(x) ((x) << S_RQ_INTR_EN)
+#define F_RQ_INTR_EN V_RQ_INTR_EN(1U)
+
+#define S_RQ_GEN 28
+#define V_RQ_GEN(x) ((x) << S_RQ_GEN)
+#define F_RQ_GEN V_RQ_GEN(1U)
+
+#define S_CQ_INDEX 0
+#define M_CQ_INDEX 0xFFFF
+#define V_CQ_INDEX(x) ((x) << S_CQ_INDEX)
+#define G_CQ_INDEX(x) (((x) >> S_CQ_INDEX) & M_CQ_INDEX)
+
+#define S_CQ_SIZE 16
+#define M_CQ_SIZE 0xFFFF
+#define V_CQ_SIZE(x) ((x) << S_CQ_SIZE)
+#define G_CQ_SIZE(x) (((x) >> S_CQ_SIZE) & M_CQ_SIZE)
+
+#define S_CQ_BASE_HI 0
+#define M_CQ_BASE_HI 0xFFFFF
+#define V_CQ_BASE_HI(x) ((x) << S_CQ_BASE_HI)
+#define G_CQ_BASE_HI(x) (((x) >> S_CQ_BASE_HI) & M_CQ_BASE_HI)
+
+#define S_CQ_RSPQ 20
+#define M_CQ_RSPQ 0x3F
+#define V_CQ_RSPQ(x) ((x) << S_CQ_RSPQ)
+#define G_CQ_RSPQ(x) (((x) >> S_CQ_RSPQ) & M_CQ_RSPQ)
+
+#define S_CQ_ASYNC_NOTIF 26
+#define V_CQ_ASYNC_NOTIF(x) ((x) << S_CQ_ASYNC_NOTIF)
+#define F_CQ_ASYNC_NOTIF V_CQ_ASYNC_NOTIF(1U)
+
+#define S_CQ_ARMED 27
+#define V_CQ_ARMED(x) ((x) << S_CQ_ARMED)
+#define F_CQ_ARMED V_CQ_ARMED(1U)
+
+#define S_CQ_ASYNC_NOTIF_SOL 28
+#define V_CQ_ASYNC_NOTIF_SOL(x) ((x) << S_CQ_ASYNC_NOTIF_SOL)
+#define F_CQ_ASYNC_NOTIF_SOL V_CQ_ASYNC_NOTIF_SOL(1U)
+
+#define S_CQ_GEN 29
+#define V_CQ_GEN(x) ((x) << S_CQ_GEN)
+#define F_CQ_GEN V_CQ_GEN(1U)
+
+#define S_CQ_ERR 30
+#define V_CQ_ERR(x) ((x) << S_CQ_ERR)
+#define F_CQ_ERR V_CQ_ERR(1U)
+
+#define S_CQ_OVERFLOW_MODE 31
+#define V_CQ_OVERFLOW_MODE(x) ((x) << S_CQ_OVERFLOW_MODE)
+#define F_CQ_OVERFLOW_MODE V_CQ_OVERFLOW_MODE(1U)
+
+#define S_CQ_CREDITS 0
+#define M_CQ_CREDITS 0xFFFF
+#define V_CQ_CREDITS(x) ((x) << S_CQ_CREDITS)
+#define G_CQ_CREDITS(x) (((x) >> S_CQ_CREDITS) & M_CQ_CREDITS)
+
+#define S_CQ_CREDIT_THRES 16
+#define M_CQ_CREDIT_THRES 0x1FFF
+#define V_CQ_CREDIT_THRES(x) ((x) << S_CQ_CREDIT_THRES)
+#define G_CQ_CREDIT_THRES(x) (((x) >> S_CQ_CREDIT_THRES) & M_CQ_CREDIT_THRES)
+
+#define S_FL_BASE_HI 0
+#define M_FL_BASE_HI 0xFFFFF
+#define V_FL_BASE_HI(x) ((x) << S_FL_BASE_HI)
+#define G_FL_BASE_HI(x) (((x) >> S_FL_BASE_HI) & M_FL_BASE_HI)
+
+#define S_FL_INDEX_LO 20
+#define M_FL_INDEX_LO 0xFFF
+#define V_FL_INDEX_LO(x) ((x) << S_FL_INDEX_LO)
+#define G_FL_INDEX_LO(x) (((x) >> S_FL_INDEX_LO) & M_FL_INDEX_LO)
+
+#define S_FL_INDEX_HI 0
+#define M_FL_INDEX_HI 0xF
+#define V_FL_INDEX_HI(x) ((x) << S_FL_INDEX_HI)
+#define G_FL_INDEX_HI(x) (((x) >> S_FL_INDEX_HI) & M_FL_INDEX_HI)
+
+#define S_FL_SIZE 4
+#define M_FL_SIZE 0xFFFF
+#define V_FL_SIZE(x) ((x) << S_FL_SIZE)
+#define G_FL_SIZE(x) (((x) >> S_FL_SIZE) & M_FL_SIZE)
+
+#define S_FL_GEN 20
+#define V_FL_GEN(x) ((x) << S_FL_GEN)
+#define F_FL_GEN V_FL_GEN(1U)
+
+#define S_FL_ENTRY_SIZE_LO 21
+#define M_FL_ENTRY_SIZE_LO 0x7FF
+#define V_FL_ENTRY_SIZE_LO(x) ((x) << S_FL_ENTRY_SIZE_LO)
+#define G_FL_ENTRY_SIZE_LO(x) (((x) >> S_FL_ENTRY_SIZE_LO) & M_FL_ENTRY_SIZE_LO)
+
+#define S_FL_ENTRY_SIZE_HI 0
+#define M_FL_ENTRY_SIZE_HI 0x1FFFFF
+#define V_FL_ENTRY_SIZE_HI(x) ((x) << S_FL_ENTRY_SIZE_HI)
+#define G_FL_ENTRY_SIZE_HI(x) (((x) >> S_FL_ENTRY_SIZE_HI) & M_FL_ENTRY_SIZE_HI)
+
+#define S_FL_CONG_THRES 21
+#define M_FL_CONG_THRES 0x3FF
+#define V_FL_CONG_THRES(x) ((x) << S_FL_CONG_THRES)
+#define G_FL_CONG_THRES(x) (((x) >> S_FL_CONG_THRES) & M_FL_CONG_THRES)
+
+#define S_FL_GTS 31
+#define V_FL_GTS(x) ((x) << S_FL_GTS)
+#define F_FL_GTS V_FL_GTS(1U)
+
+#define S_FLD_GEN1 31
+#define V_FLD_GEN1(x) ((x) << S_FLD_GEN1)
+#define F_FLD_GEN1 V_FLD_GEN1(1U)
+
+#define S_FLD_GEN2 0
+#define V_FLD_GEN2(x) ((x) << S_FLD_GEN2)
+#define F_FLD_GEN2 V_FLD_GEN2(1U)
+
+#define S_RSPD_TXQ1_CR 0
+#define M_RSPD_TXQ1_CR 0x7F
+#define V_RSPD_TXQ1_CR(x) ((x) << S_RSPD_TXQ1_CR)
+#define G_RSPD_TXQ1_CR(x) (((x) >> S_RSPD_TXQ1_CR) & M_RSPD_TXQ1_CR)
+
+#define S_RSPD_TXQ1_GTS 7
+#define V_RSPD_TXQ1_GTS(x) ((x) << S_RSPD_TXQ1_GTS)
+#define F_RSPD_TXQ1_GTS V_RSPD_TXQ1_GTS(1U)
+
+#define S_RSPD_TXQ2_CR 8
+#define M_RSPD_TXQ2_CR 0x7F
+#define V_RSPD_TXQ2_CR(x) ((x) << S_RSPD_TXQ2_CR)
+#define G_RSPD_TXQ2_CR(x) (((x) >> S_RSPD_TXQ2_CR) & M_RSPD_TXQ2_CR)
+
+#define S_RSPD_TXQ2_GTS 15
+#define V_RSPD_TXQ2_GTS(x) ((x) << S_RSPD_TXQ2_GTS)
+#define F_RSPD_TXQ2_GTS V_RSPD_TXQ2_GTS(1U)
+
+#define S_RSPD_TXQ0_CR 16
+#define M_RSPD_TXQ0_CR 0x7F
+#define V_RSPD_TXQ0_CR(x) ((x) << S_RSPD_TXQ0_CR)
+#define G_RSPD_TXQ0_CR(x) (((x) >> S_RSPD_TXQ0_CR) & M_RSPD_TXQ0_CR)
+
+#define S_RSPD_TXQ0_GTS 23
+#define V_RSPD_TXQ0_GTS(x) ((x) << S_RSPD_TXQ0_GTS)
+#define F_RSPD_TXQ0_GTS V_RSPD_TXQ0_GTS(1U)
+
+#define S_RSPD_EOP 24
+#define V_RSPD_EOP(x) ((x) << S_RSPD_EOP)
+#define F_RSPD_EOP V_RSPD_EOP(1U)
+
+#define S_RSPD_SOP 25
+#define V_RSPD_SOP(x) ((x) << S_RSPD_SOP)
+#define F_RSPD_SOP V_RSPD_SOP(1U)
+
+#define S_RSPD_ASYNC_NOTIF 26
+#define V_RSPD_ASYNC_NOTIF(x) ((x) << S_RSPD_ASYNC_NOTIF)
+#define F_RSPD_ASYNC_NOTIF V_RSPD_ASYNC_NOTIF(1U)
+
+#define S_RSPD_FL0_GTS 27
+#define V_RSPD_FL0_GTS(x) ((x) << S_RSPD_FL0_GTS)
+#define F_RSPD_FL0_GTS V_RSPD_FL0_GTS(1U)
+
+#define S_RSPD_FL1_GTS 28
+#define V_RSPD_FL1_GTS(x) ((x) << S_RSPD_FL1_GTS)
+#define F_RSPD_FL1_GTS V_RSPD_FL1_GTS(1U)
+
+#define S_RSPD_IMM_DATA_VALID 29
+#define V_RSPD_IMM_DATA_VALID(x) ((x) << S_RSPD_IMM_DATA_VALID)
+#define F_RSPD_IMM_DATA_VALID V_RSPD_IMM_DATA_VALID(1U)
+
+#define S_RSPD_OFFLOAD 30
+#define V_RSPD_OFFLOAD(x) ((x) << S_RSPD_OFFLOAD)
+#define F_RSPD_OFFLOAD V_RSPD_OFFLOAD(1U)
+
+#define S_RSPD_GEN1 31
+#define V_RSPD_GEN1(x) ((x) << S_RSPD_GEN1)
+#define F_RSPD_GEN1 V_RSPD_GEN1(1U)
+
+#define S_RSPD_LEN 0
+#define M_RSPD_LEN 0x7FFFFFFF
+#define V_RSPD_LEN(x) ((x) << S_RSPD_LEN)
+#define G_RSPD_LEN(x) (((x) >> S_RSPD_LEN) & M_RSPD_LEN)
+
+#define S_RSPD_FLQ 31
+#define V_RSPD_FLQ(x) ((x) << S_RSPD_FLQ)
+#define F_RSPD_FLQ V_RSPD_FLQ(1U)
+
+#define S_RSPD_GEN2 0
+#define V_RSPD_GEN2(x) ((x) << S_RSPD_GEN2)
+#define F_RSPD_GEN2 V_RSPD_GEN2(1U)
+
+#define S_RSPD_INR_VEC 1
+#define M_RSPD_INR_VEC 0x7F
+#define V_RSPD_INR_VEC(x) ((x) << S_RSPD_INR_VEC)
+#define G_RSPD_INR_VEC(x) (((x) >> S_RSPD_INR_VEC) & M_RSPD_INR_VEC)
+
+#endif /* _SGE_DEFS_H */
diff --git a/drivers/net/ethernet/chelsio/cxgb3/t3_cpl.h b/drivers/net/ethernet/chelsio/cxgb3/t3_cpl.h
new file mode 100644
index 0000000000..68bb5f39f3
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb3/t3_cpl.h
@@ -0,0 +1,1495 @@
+/*
+ * Copyright (c) 2004-2008 Chelsio, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef T3_CPL_H
+#define T3_CPL_H
+
+#if !defined(__LITTLE_ENDIAN_BITFIELD) && !defined(__BIG_ENDIAN_BITFIELD)
+# include <asm/byteorder.h>
+#endif
+
+enum CPL_opcode {
+ CPL_PASS_OPEN_REQ = 0x1,
+ CPL_PASS_ACCEPT_RPL = 0x2,
+ CPL_ACT_OPEN_REQ = 0x3,
+ CPL_SET_TCB = 0x4,
+ CPL_SET_TCB_FIELD = 0x5,
+ CPL_GET_TCB = 0x6,
+ CPL_PCMD = 0x7,
+ CPL_CLOSE_CON_REQ = 0x8,
+ CPL_CLOSE_LISTSRV_REQ = 0x9,
+ CPL_ABORT_REQ = 0xA,
+ CPL_ABORT_RPL = 0xB,
+ CPL_TX_DATA = 0xC,
+ CPL_RX_DATA_ACK = 0xD,
+ CPL_TX_PKT = 0xE,
+ CPL_RTE_DELETE_REQ = 0xF,
+ CPL_RTE_WRITE_REQ = 0x10,
+ CPL_RTE_READ_REQ = 0x11,
+ CPL_L2T_WRITE_REQ = 0x12,
+ CPL_L2T_READ_REQ = 0x13,
+ CPL_SMT_WRITE_REQ = 0x14,
+ CPL_SMT_READ_REQ = 0x15,
+ CPL_TX_PKT_LSO = 0x16,
+ CPL_PCMD_READ = 0x17,
+ CPL_BARRIER = 0x18,
+ CPL_TID_RELEASE = 0x1A,
+
+ CPL_CLOSE_LISTSRV_RPL = 0x20,
+ CPL_ERROR = 0x21,
+ CPL_GET_TCB_RPL = 0x22,
+ CPL_L2T_WRITE_RPL = 0x23,
+ CPL_PCMD_READ_RPL = 0x24,
+ CPL_PCMD_RPL = 0x25,
+ CPL_PEER_CLOSE = 0x26,
+ CPL_RTE_DELETE_RPL = 0x27,
+ CPL_RTE_WRITE_RPL = 0x28,
+ CPL_RX_DDP_COMPLETE = 0x29,
+ CPL_RX_PHYS_ADDR = 0x2A,
+ CPL_RX_PKT = 0x2B,
+ CPL_RX_URG_NOTIFY = 0x2C,
+ CPL_SET_TCB_RPL = 0x2D,
+ CPL_SMT_WRITE_RPL = 0x2E,
+ CPL_TX_DATA_ACK = 0x2F,
+
+ CPL_ABORT_REQ_RSS = 0x30,
+ CPL_ABORT_RPL_RSS = 0x31,
+ CPL_CLOSE_CON_RPL = 0x32,
+ CPL_ISCSI_HDR = 0x33,
+ CPL_L2T_READ_RPL = 0x34,
+ CPL_RDMA_CQE = 0x35,
+ CPL_RDMA_CQE_READ_RSP = 0x36,
+ CPL_RDMA_CQE_ERR = 0x37,
+ CPL_RTE_READ_RPL = 0x38,
+ CPL_RX_DATA = 0x39,
+
+ CPL_ACT_OPEN_RPL = 0x40,
+ CPL_PASS_OPEN_RPL = 0x41,
+ CPL_RX_DATA_DDP = 0x42,
+ CPL_SMT_READ_RPL = 0x43,
+
+ CPL_ACT_ESTABLISH = 0x50,
+ CPL_PASS_ESTABLISH = 0x51,
+
+ CPL_PASS_ACCEPT_REQ = 0x70,
+
+ CPL_ASYNC_NOTIF = 0x80, /* fake opcode for async notifications */
+
+ CPL_TX_DMA_ACK = 0xA0,
+ CPL_RDMA_READ_REQ = 0xA1,
+ CPL_RDMA_TERMINATE = 0xA2,
+ CPL_TRACE_PKT = 0xA3,
+ CPL_RDMA_EC_STATUS = 0xA5,
+
+ NUM_CPL_CMDS /* must be last and previous entries must be sorted */
+};
+
+enum CPL_error {
+ CPL_ERR_NONE = 0,
+ CPL_ERR_TCAM_PARITY = 1,
+ CPL_ERR_TCAM_FULL = 3,
+ CPL_ERR_CONN_RESET = 20,
+ CPL_ERR_CONN_EXIST = 22,
+ CPL_ERR_ARP_MISS = 23,
+ CPL_ERR_BAD_SYN = 24,
+ CPL_ERR_CONN_TIMEDOUT = 30,
+ CPL_ERR_XMIT_TIMEDOUT = 31,
+ CPL_ERR_PERSIST_TIMEDOUT = 32,
+ CPL_ERR_FINWAIT2_TIMEDOUT = 33,
+ CPL_ERR_KEEPALIVE_TIMEDOUT = 34,
+ CPL_ERR_RTX_NEG_ADVICE = 35,
+ CPL_ERR_PERSIST_NEG_ADVICE = 36,
+ CPL_ERR_ABORT_FAILED = 42,
+ CPL_ERR_GENERAL = 99
+};
+
+enum {
+ CPL_CONN_POLICY_AUTO = 0,
+ CPL_CONN_POLICY_ASK = 1,
+ CPL_CONN_POLICY_DENY = 3
+};
+
+enum {
+ ULP_MODE_NONE = 0,
+ ULP_MODE_ISCSI = 2,
+ ULP_MODE_RDMA = 4,
+ ULP_MODE_TCPDDP = 5
+};
+
+enum {
+ ULP_CRC_HEADER = 1 << 0,
+ ULP_CRC_DATA = 1 << 1
+};
+
+enum {
+ CPL_PASS_OPEN_ACCEPT,
+ CPL_PASS_OPEN_REJECT
+};
+
+enum {
+ CPL_ABORT_SEND_RST = 0,
+ CPL_ABORT_NO_RST,
+ CPL_ABORT_POST_CLOSE_REQ = 2
+};
+
+enum { /* TX_PKT_LSO ethernet types */
+ CPL_ETH_II,
+ CPL_ETH_II_VLAN,
+ CPL_ETH_802_3,
+ CPL_ETH_802_3_VLAN
+};
+
+enum { /* TCP congestion control algorithms */
+ CONG_ALG_RENO,
+ CONG_ALG_TAHOE,
+ CONG_ALG_NEWRENO,
+ CONG_ALG_HIGHSPEED
+};
+
+enum { /* RSS hash type */
+ RSS_HASH_NONE = 0,
+ RSS_HASH_2_TUPLE = 1,
+ RSS_HASH_4_TUPLE = 2,
+ RSS_HASH_TCPV6 = 3
+};
+
+union opcode_tid {
+ __be32 opcode_tid;
+ __u8 opcode;
+};
+
+#define S_OPCODE 24
+#define V_OPCODE(x) ((x) << S_OPCODE)
+#define G_OPCODE(x) (((x) >> S_OPCODE) & 0xFF)
+#define G_TID(x) ((x) & 0xFFFFFF)
+
+#define S_QNUM 0
+#define G_QNUM(x) (((x) >> S_QNUM) & 0xFFFF)
+
+#define S_HASHTYPE 22
+#define M_HASHTYPE 0x3
+#define G_HASHTYPE(x) (((x) >> S_HASHTYPE) & M_HASHTYPE)
+
+/* tid is assumed to be 24-bits */
+#define MK_OPCODE_TID(opcode, tid) (V_OPCODE(opcode) | (tid))
+
+#define OPCODE_TID(cmd) ((cmd)->ot.opcode_tid)
+
+/* extract the TID from a CPL command */
+#define GET_TID(cmd) (G_TID(ntohl(OPCODE_TID(cmd))))
+
+struct tcp_options {
+ __be16 mss;
+ __u8 wsf;
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ __u8:5;
+ __u8 ecn:1;
+ __u8 sack:1;
+ __u8 tstamp:1;
+#else
+ __u8 tstamp:1;
+ __u8 sack:1;
+ __u8 ecn:1;
+ __u8:5;
+#endif
+};
+
+struct rss_header {
+ __u8 opcode;
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ __u8 cpu_idx:6;
+ __u8 hash_type:2;
+#else
+ __u8 hash_type:2;
+ __u8 cpu_idx:6;
+#endif
+ __be16 cq_idx;
+ __be32 rss_hash_val;
+};
+
+#ifndef CHELSIO_FW
+struct work_request_hdr {
+ __be32 wr_hi;
+ __be32 wr_lo;
+};
+
+/* wr_hi fields */
+#define S_WR_SGE_CREDITS 0
+#define M_WR_SGE_CREDITS 0xFF
+#define V_WR_SGE_CREDITS(x) ((x) << S_WR_SGE_CREDITS)
+#define G_WR_SGE_CREDITS(x) (((x) >> S_WR_SGE_CREDITS) & M_WR_SGE_CREDITS)
+
+#define S_WR_SGLSFLT 8
+#define M_WR_SGLSFLT 0xFF
+#define V_WR_SGLSFLT(x) ((x) << S_WR_SGLSFLT)
+#define G_WR_SGLSFLT(x) (((x) >> S_WR_SGLSFLT) & M_WR_SGLSFLT)
+
+#define S_WR_BCNTLFLT 16
+#define M_WR_BCNTLFLT 0xF
+#define V_WR_BCNTLFLT(x) ((x) << S_WR_BCNTLFLT)
+#define G_WR_BCNTLFLT(x) (((x) >> S_WR_BCNTLFLT) & M_WR_BCNTLFLT)
+
+#define S_WR_DATATYPE 20
+#define V_WR_DATATYPE(x) ((x) << S_WR_DATATYPE)
+#define F_WR_DATATYPE V_WR_DATATYPE(1U)
+
+#define S_WR_COMPL 21
+#define V_WR_COMPL(x) ((x) << S_WR_COMPL)
+#define F_WR_COMPL V_WR_COMPL(1U)
+
+#define S_WR_EOP 22
+#define V_WR_EOP(x) ((x) << S_WR_EOP)
+#define F_WR_EOP V_WR_EOP(1U)
+
+#define S_WR_SOP 23
+#define V_WR_SOP(x) ((x) << S_WR_SOP)
+#define F_WR_SOP V_WR_SOP(1U)
+
+#define S_WR_OP 24
+#define M_WR_OP 0xFF
+#define V_WR_OP(x) ((x) << S_WR_OP)
+#define G_WR_OP(x) (((x) >> S_WR_OP) & M_WR_OP)
+
+/* wr_lo fields */
+#define S_WR_LEN 0
+#define M_WR_LEN 0xFF
+#define V_WR_LEN(x) ((x) << S_WR_LEN)
+#define G_WR_LEN(x) (((x) >> S_WR_LEN) & M_WR_LEN)
+
+#define S_WR_TID 8
+#define M_WR_TID 0xFFFFF
+#define V_WR_TID(x) ((x) << S_WR_TID)
+#define G_WR_TID(x) (((x) >> S_WR_TID) & M_WR_TID)
+
+#define S_WR_CR_FLUSH 30
+#define V_WR_CR_FLUSH(x) ((x) << S_WR_CR_FLUSH)
+#define F_WR_CR_FLUSH V_WR_CR_FLUSH(1U)
+
+#define S_WR_GEN 31
+#define V_WR_GEN(x) ((x) << S_WR_GEN)
+#define F_WR_GEN V_WR_GEN(1U)
+
+# define WR_HDR struct work_request_hdr wr
+# define RSS_HDR
+#else
+# define WR_HDR
+# define RSS_HDR struct rss_header rss_hdr;
+#endif
+
+/* option 0 lower-half fields */
+#define S_CPL_STATUS 0
+#define M_CPL_STATUS 0xFF
+#define V_CPL_STATUS(x) ((x) << S_CPL_STATUS)
+#define G_CPL_STATUS(x) (((x) >> S_CPL_STATUS) & M_CPL_STATUS)
+
+#define S_INJECT_TIMER 6
+#define V_INJECT_TIMER(x) ((x) << S_INJECT_TIMER)
+#define F_INJECT_TIMER V_INJECT_TIMER(1U)
+
+#define S_NO_OFFLOAD 7
+#define V_NO_OFFLOAD(x) ((x) << S_NO_OFFLOAD)
+#define F_NO_OFFLOAD V_NO_OFFLOAD(1U)
+
+#define S_ULP_MODE 8
+#define M_ULP_MODE 0xF
+#define V_ULP_MODE(x) ((x) << S_ULP_MODE)
+#define G_ULP_MODE(x) (((x) >> S_ULP_MODE) & M_ULP_MODE)
+
+#define S_RCV_BUFSIZ 12
+#define M_RCV_BUFSIZ 0x3FFF
+#define V_RCV_BUFSIZ(x) ((x) << S_RCV_BUFSIZ)
+#define G_RCV_BUFSIZ(x) (((x) >> S_RCV_BUFSIZ) & M_RCV_BUFSIZ)
+
+#define S_TOS 26
+#define M_TOS 0x3F
+#define V_TOS(x) ((x) << S_TOS)
+#define G_TOS(x) (((x) >> S_TOS) & M_TOS)
+
+/* option 0 upper-half fields */
+#define S_DELACK 0
+#define V_DELACK(x) ((x) << S_DELACK)
+#define F_DELACK V_DELACK(1U)
+
+#define S_NO_CONG 1
+#define V_NO_CONG(x) ((x) << S_NO_CONG)
+#define F_NO_CONG V_NO_CONG(1U)
+
+#define S_SRC_MAC_SEL 2
+#define M_SRC_MAC_SEL 0x3
+#define V_SRC_MAC_SEL(x) ((x) << S_SRC_MAC_SEL)
+#define G_SRC_MAC_SEL(x) (((x) >> S_SRC_MAC_SEL) & M_SRC_MAC_SEL)
+
+#define S_L2T_IDX 4
+#define M_L2T_IDX 0x7FF
+#define V_L2T_IDX(x) ((x) << S_L2T_IDX)
+#define G_L2T_IDX(x) (((x) >> S_L2T_IDX) & M_L2T_IDX)
+
+#define S_TX_CHANNEL 15
+#define V_TX_CHANNEL(x) ((x) << S_TX_CHANNEL)
+#define F_TX_CHANNEL V_TX_CHANNEL(1U)
+
+#define S_TCAM_BYPASS 16
+#define V_TCAM_BYPASS(x) ((x) << S_TCAM_BYPASS)
+#define F_TCAM_BYPASS V_TCAM_BYPASS(1U)
+
+#define S_NAGLE 17
+#define V_NAGLE(x) ((x) << S_NAGLE)
+#define F_NAGLE V_NAGLE(1U)
+
+#define S_WND_SCALE 18
+#define M_WND_SCALE 0xF
+#define V_WND_SCALE(x) ((x) << S_WND_SCALE)
+#define G_WND_SCALE(x) (((x) >> S_WND_SCALE) & M_WND_SCALE)
+
+#define S_KEEP_ALIVE 22
+#define V_KEEP_ALIVE(x) ((x) << S_KEEP_ALIVE)
+#define F_KEEP_ALIVE V_KEEP_ALIVE(1U)
+
+#define S_MAX_RETRANS 23
+#define M_MAX_RETRANS 0xF
+#define V_MAX_RETRANS(x) ((x) << S_MAX_RETRANS)
+#define G_MAX_RETRANS(x) (((x) >> S_MAX_RETRANS) & M_MAX_RETRANS)
+
+#define S_MAX_RETRANS_OVERRIDE 27
+#define V_MAX_RETRANS_OVERRIDE(x) ((x) << S_MAX_RETRANS_OVERRIDE)
+#define F_MAX_RETRANS_OVERRIDE V_MAX_RETRANS_OVERRIDE(1U)
+
+#define S_MSS_IDX 28
+#define M_MSS_IDX 0xF
+#define V_MSS_IDX(x) ((x) << S_MSS_IDX)
+#define G_MSS_IDX(x) (((x) >> S_MSS_IDX) & M_MSS_IDX)
+
+/* option 1 fields */
+#define S_RSS_ENABLE 0
+#define V_RSS_ENABLE(x) ((x) << S_RSS_ENABLE)
+#define F_RSS_ENABLE V_RSS_ENABLE(1U)
+
+#define S_RSS_MASK_LEN 1
+#define M_RSS_MASK_LEN 0x7
+#define V_RSS_MASK_LEN(x) ((x) << S_RSS_MASK_LEN)
+#define G_RSS_MASK_LEN(x) (((x) >> S_RSS_MASK_LEN) & M_RSS_MASK_LEN)
+
+#define S_CPU_IDX 4
+#define M_CPU_IDX 0x3F
+#define V_CPU_IDX(x) ((x) << S_CPU_IDX)
+#define G_CPU_IDX(x) (((x) >> S_CPU_IDX) & M_CPU_IDX)
+
+#define S_MAC_MATCH_VALID 18
+#define V_MAC_MATCH_VALID(x) ((x) << S_MAC_MATCH_VALID)
+#define F_MAC_MATCH_VALID V_MAC_MATCH_VALID(1U)
+
+#define S_CONN_POLICY 19
+#define M_CONN_POLICY 0x3
+#define V_CONN_POLICY(x) ((x) << S_CONN_POLICY)
+#define G_CONN_POLICY(x) (((x) >> S_CONN_POLICY) & M_CONN_POLICY)
+
+#define S_SYN_DEFENSE 21
+#define V_SYN_DEFENSE(x) ((x) << S_SYN_DEFENSE)
+#define F_SYN_DEFENSE V_SYN_DEFENSE(1U)
+
+#define S_VLAN_PRI 22
+#define M_VLAN_PRI 0x3
+#define V_VLAN_PRI(x) ((x) << S_VLAN_PRI)
+#define G_VLAN_PRI(x) (((x) >> S_VLAN_PRI) & M_VLAN_PRI)
+
+#define S_VLAN_PRI_VALID 24
+#define V_VLAN_PRI_VALID(x) ((x) << S_VLAN_PRI_VALID)
+#define F_VLAN_PRI_VALID V_VLAN_PRI_VALID(1U)
+
+#define S_PKT_TYPE 25
+#define M_PKT_TYPE 0x3
+#define V_PKT_TYPE(x) ((x) << S_PKT_TYPE)
+#define G_PKT_TYPE(x) (((x) >> S_PKT_TYPE) & M_PKT_TYPE)
+
+#define S_MAC_MATCH 27
+#define M_MAC_MATCH 0x1F
+#define V_MAC_MATCH(x) ((x) << S_MAC_MATCH)
+#define G_MAC_MATCH(x) (((x) >> S_MAC_MATCH) & M_MAC_MATCH)
+
+/* option 2 fields */
+#define S_CPU_INDEX 0
+#define M_CPU_INDEX 0x7F
+#define V_CPU_INDEX(x) ((x) << S_CPU_INDEX)
+#define G_CPU_INDEX(x) (((x) >> S_CPU_INDEX) & M_CPU_INDEX)
+
+#define S_CPU_INDEX_VALID 7
+#define V_CPU_INDEX_VALID(x) ((x) << S_CPU_INDEX_VALID)
+#define F_CPU_INDEX_VALID V_CPU_INDEX_VALID(1U)
+
+#define S_RX_COALESCE 8
+#define M_RX_COALESCE 0x3
+#define V_RX_COALESCE(x) ((x) << S_RX_COALESCE)
+#define G_RX_COALESCE(x) (((x) >> S_RX_COALESCE) & M_RX_COALESCE)
+
+#define S_RX_COALESCE_VALID 10
+#define V_RX_COALESCE_VALID(x) ((x) << S_RX_COALESCE_VALID)
+#define F_RX_COALESCE_VALID V_RX_COALESCE_VALID(1U)
+
+#define S_CONG_CONTROL_FLAVOR 11
+#define M_CONG_CONTROL_FLAVOR 0x3
+#define V_CONG_CONTROL_FLAVOR(x) ((x) << S_CONG_CONTROL_FLAVOR)
+#define G_CONG_CONTROL_FLAVOR(x) (((x) >> S_CONG_CONTROL_FLAVOR) & M_CONG_CONTROL_FLAVOR)
+
+#define S_PACING_FLAVOR 13
+#define M_PACING_FLAVOR 0x3
+#define V_PACING_FLAVOR(x) ((x) << S_PACING_FLAVOR)
+#define G_PACING_FLAVOR(x) (((x) >> S_PACING_FLAVOR) & M_PACING_FLAVOR)
+
+#define S_FLAVORS_VALID 15
+#define V_FLAVORS_VALID(x) ((x) << S_FLAVORS_VALID)
+#define F_FLAVORS_VALID V_FLAVORS_VALID(1U)
+
+#define S_RX_FC_DISABLE 16
+#define V_RX_FC_DISABLE(x) ((x) << S_RX_FC_DISABLE)
+#define F_RX_FC_DISABLE V_RX_FC_DISABLE(1U)
+
+#define S_RX_FC_VALID 17
+#define V_RX_FC_VALID(x) ((x) << S_RX_FC_VALID)
+#define F_RX_FC_VALID V_RX_FC_VALID(1U)
+
+struct cpl_pass_open_req {
+ WR_HDR;
+ union opcode_tid ot;
+ __be16 local_port;
+ __be16 peer_port;
+ __be32 local_ip;
+ __be32 peer_ip;
+ __be32 opt0h;
+ __be32 opt0l;
+ __be32 peer_netmask;
+ __be32 opt1;
+};
+
+struct cpl_pass_open_rpl {
+ RSS_HDR union opcode_tid ot;
+ __be16 local_port;
+ __be16 peer_port;
+ __be32 local_ip;
+ __be32 peer_ip;
+ __u8 resvd[7];
+ __u8 status;
+};
+
+struct cpl_pass_establish {
+ RSS_HDR union opcode_tid ot;
+ __be16 local_port;
+ __be16 peer_port;
+ __be32 local_ip;
+ __be32 peer_ip;
+ __be32 tos_tid;
+ __be16 l2t_idx;
+ __be16 tcp_opt;
+ __be32 snd_isn;
+ __be32 rcv_isn;
+};
+
+/* cpl_pass_establish.tos_tid fields */
+#define S_PASS_OPEN_TID 0
+#define M_PASS_OPEN_TID 0xFFFFFF
+#define V_PASS_OPEN_TID(x) ((x) << S_PASS_OPEN_TID)
+#define G_PASS_OPEN_TID(x) (((x) >> S_PASS_OPEN_TID) & M_PASS_OPEN_TID)
+
+#define S_PASS_OPEN_TOS 24
+#define M_PASS_OPEN_TOS 0xFF
+#define V_PASS_OPEN_TOS(x) ((x) << S_PASS_OPEN_TOS)
+#define G_PASS_OPEN_TOS(x) (((x) >> S_PASS_OPEN_TOS) & M_PASS_OPEN_TOS)
+
+/* cpl_pass_establish.l2t_idx fields */
+#define S_L2T_IDX16 5
+#define M_L2T_IDX16 0x7FF
+#define V_L2T_IDX16(x) ((x) << S_L2T_IDX16)
+#define G_L2T_IDX16(x) (((x) >> S_L2T_IDX16) & M_L2T_IDX16)
+
+/* cpl_pass_establish.tcp_opt fields (also applies act_open_establish) */
+#define G_TCPOPT_WSCALE_OK(x) (((x) >> 5) & 1)
+#define G_TCPOPT_SACK(x) (((x) >> 6) & 1)
+#define G_TCPOPT_TSTAMP(x) (((x) >> 7) & 1)
+#define G_TCPOPT_SND_WSCALE(x) (((x) >> 8) & 0xf)
+#define G_TCPOPT_MSS(x) (((x) >> 12) & 0xf)
+
+struct cpl_pass_accept_req {
+ RSS_HDR union opcode_tid ot;
+ __be16 local_port;
+ __be16 peer_port;
+ __be32 local_ip;
+ __be32 peer_ip;
+ __be32 tos_tid;
+ struct tcp_options tcp_options;
+ __u8 dst_mac[6];
+ __be16 vlan_tag;
+ __u8 src_mac[6];
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ __u8:3;
+ __u8 addr_idx:3;
+ __u8 port_idx:1;
+ __u8 exact_match:1;
+#else
+ __u8 exact_match:1;
+ __u8 port_idx:1;
+ __u8 addr_idx:3;
+ __u8:3;
+#endif
+ __u8 rsvd;
+ __be32 rcv_isn;
+ __be32 rsvd2;
+};
+
+struct cpl_pass_accept_rpl {
+ WR_HDR;
+ union opcode_tid ot;
+ __be32 opt2;
+ __be32 rsvd;
+ __be32 peer_ip;
+ __be32 opt0h;
+ __be32 opt0l_status;
+};
+
+struct cpl_act_open_req {
+ WR_HDR;
+ union opcode_tid ot;
+ __be16 local_port;
+ __be16 peer_port;
+ __be32 local_ip;
+ __be32 peer_ip;
+ __be32 opt0h;
+ __be32 opt0l;
+ __be32 params;
+ __be32 opt2;
+};
+
+/* cpl_act_open_req.params fields */
+#define S_AOPEN_VLAN_PRI 9
+#define M_AOPEN_VLAN_PRI 0x3
+#define V_AOPEN_VLAN_PRI(x) ((x) << S_AOPEN_VLAN_PRI)
+#define G_AOPEN_VLAN_PRI(x) (((x) >> S_AOPEN_VLAN_PRI) & M_AOPEN_VLAN_PRI)
+
+#define S_AOPEN_VLAN_PRI_VALID 11
+#define V_AOPEN_VLAN_PRI_VALID(x) ((x) << S_AOPEN_VLAN_PRI_VALID)
+#define F_AOPEN_VLAN_PRI_VALID V_AOPEN_VLAN_PRI_VALID(1U)
+
+#define S_AOPEN_PKT_TYPE 12
+#define M_AOPEN_PKT_TYPE 0x3
+#define V_AOPEN_PKT_TYPE(x) ((x) << S_AOPEN_PKT_TYPE)
+#define G_AOPEN_PKT_TYPE(x) (((x) >> S_AOPEN_PKT_TYPE) & M_AOPEN_PKT_TYPE)
+
+#define S_AOPEN_MAC_MATCH 14
+#define M_AOPEN_MAC_MATCH 0x1F
+#define V_AOPEN_MAC_MATCH(x) ((x) << S_AOPEN_MAC_MATCH)
+#define G_AOPEN_MAC_MATCH(x) (((x) >> S_AOPEN_MAC_MATCH) & M_AOPEN_MAC_MATCH)
+
+#define S_AOPEN_MAC_MATCH_VALID 19
+#define V_AOPEN_MAC_MATCH_VALID(x) ((x) << S_AOPEN_MAC_MATCH_VALID)
+#define F_AOPEN_MAC_MATCH_VALID V_AOPEN_MAC_MATCH_VALID(1U)
+
+#define S_AOPEN_IFF_VLAN 20
+#define M_AOPEN_IFF_VLAN 0xFFF
+#define V_AOPEN_IFF_VLAN(x) ((x) << S_AOPEN_IFF_VLAN)
+#define G_AOPEN_IFF_VLAN(x) (((x) >> S_AOPEN_IFF_VLAN) & M_AOPEN_IFF_VLAN)
+
+struct cpl_act_open_rpl {
+ RSS_HDR union opcode_tid ot;
+ __be16 local_port;
+ __be16 peer_port;
+ __be32 local_ip;
+ __be32 peer_ip;
+ __be32 atid;
+ __u8 rsvd[3];
+ __u8 status;
+};
+
+struct cpl_act_establish {
+ RSS_HDR union opcode_tid ot;
+ __be16 local_port;
+ __be16 peer_port;
+ __be32 local_ip;
+ __be32 peer_ip;
+ __be32 tos_tid;
+ __be16 l2t_idx;
+ __be16 tcp_opt;
+ __be32 snd_isn;
+ __be32 rcv_isn;
+};
+
+struct cpl_get_tcb {
+ WR_HDR;
+ union opcode_tid ot;
+ __be16 cpuno;
+ __be16 rsvd;
+};
+
+struct cpl_get_tcb_rpl {
+ RSS_HDR union opcode_tid ot;
+ __u8 rsvd;
+ __u8 status;
+ __be16 len;
+};
+
+struct cpl_set_tcb {
+ WR_HDR;
+ union opcode_tid ot;
+ __u8 reply;
+ __u8 cpu_idx;
+ __be16 len;
+};
+
+/* cpl_set_tcb.reply fields */
+#define S_NO_REPLY 7
+#define V_NO_REPLY(x) ((x) << S_NO_REPLY)
+#define F_NO_REPLY V_NO_REPLY(1U)
+
+struct cpl_set_tcb_field {
+ WR_HDR;
+ union opcode_tid ot;
+ __u8 reply;
+ __u8 cpu_idx;
+ __be16 word;
+ __be64 mask;
+ __be64 val;
+};
+
+struct cpl_set_tcb_rpl {
+ RSS_HDR union opcode_tid ot;
+ __u8 rsvd[3];
+ __u8 status;
+};
+
+struct cpl_pcmd {
+ WR_HDR;
+ union opcode_tid ot;
+ __u8 rsvd[3];
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ __u8 src:1;
+ __u8 bundle:1;
+ __u8 channel:1;
+ __u8:5;
+#else
+ __u8:5;
+ __u8 channel:1;
+ __u8 bundle:1;
+ __u8 src:1;
+#endif
+ __be32 pcmd_parm[2];
+};
+
+struct cpl_pcmd_reply {
+ RSS_HDR union opcode_tid ot;
+ __u8 status;
+ __u8 rsvd;
+ __be16 len;
+};
+
+struct cpl_close_con_req {
+ WR_HDR;
+ union opcode_tid ot;
+ __be32 rsvd;
+};
+
+struct cpl_close_con_rpl {
+ RSS_HDR union opcode_tid ot;
+ __u8 rsvd[3];
+ __u8 status;
+ __be32 snd_nxt;
+ __be32 rcv_nxt;
+};
+
+struct cpl_close_listserv_req {
+ WR_HDR;
+ union opcode_tid ot;
+ __u8 rsvd0;
+ __u8 cpu_idx;
+ __be16 rsvd1;
+};
+
+struct cpl_close_listserv_rpl {
+ RSS_HDR union opcode_tid ot;
+ __u8 rsvd[3];
+ __u8 status;
+};
+
+struct cpl_abort_req_rss {
+ RSS_HDR union opcode_tid ot;
+ __be32 rsvd0;
+ __u8 rsvd1;
+ __u8 status;
+ __u8 rsvd2[6];
+};
+
+struct cpl_abort_req {
+ WR_HDR;
+ union opcode_tid ot;
+ __be32 rsvd0;
+ __u8 rsvd1;
+ __u8 cmd;
+ __u8 rsvd2[6];
+};
+
+struct cpl_abort_rpl_rss {
+ RSS_HDR union opcode_tid ot;
+ __be32 rsvd0;
+ __u8 rsvd1;
+ __u8 status;
+ __u8 rsvd2[6];
+};
+
+struct cpl_abort_rpl {
+ WR_HDR;
+ union opcode_tid ot;
+ __be32 rsvd0;
+ __u8 rsvd1;
+ __u8 cmd;
+ __u8 rsvd2[6];
+};
+
+struct cpl_peer_close {
+ RSS_HDR union opcode_tid ot;
+ __be32 rcv_nxt;
+};
+
+struct tx_data_wr {
+ __be32 wr_hi;
+ __be32 wr_lo;
+ __be32 len;
+ __be32 flags;
+ __be32 sndseq;
+ __be32 param;
+};
+
+/* tx_data_wr.flags fields */
+#define S_TX_ACK_PAGES 21
+#define M_TX_ACK_PAGES 0x7
+#define V_TX_ACK_PAGES(x) ((x) << S_TX_ACK_PAGES)
+#define G_TX_ACK_PAGES(x) (((x) >> S_TX_ACK_PAGES) & M_TX_ACK_PAGES)
+
+/* tx_data_wr.param fields */
+#define S_TX_PORT 0
+#define M_TX_PORT 0x7
+#define V_TX_PORT(x) ((x) << S_TX_PORT)
+#define G_TX_PORT(x) (((x) >> S_TX_PORT) & M_TX_PORT)
+
+#define S_TX_MSS 4
+#define M_TX_MSS 0xF
+#define V_TX_MSS(x) ((x) << S_TX_MSS)
+#define G_TX_MSS(x) (((x) >> S_TX_MSS) & M_TX_MSS)
+
+#define S_TX_QOS 8
+#define M_TX_QOS 0xFF
+#define V_TX_QOS(x) ((x) << S_TX_QOS)
+#define G_TX_QOS(x) (((x) >> S_TX_QOS) & M_TX_QOS)
+
+#define S_TX_SNDBUF 16
+#define M_TX_SNDBUF 0xFFFF
+#define V_TX_SNDBUF(x) ((x) << S_TX_SNDBUF)
+#define G_TX_SNDBUF(x) (((x) >> S_TX_SNDBUF) & M_TX_SNDBUF)
+
+struct cpl_tx_data {
+ union opcode_tid ot;
+ __be32 len;
+ __be32 rsvd;
+ __be16 urg;
+ __be16 flags;
+};
+
+/* cpl_tx_data.flags fields */
+#define S_TX_ULP_SUBMODE 6
+#define M_TX_ULP_SUBMODE 0xF
+#define V_TX_ULP_SUBMODE(x) ((x) << S_TX_ULP_SUBMODE)
+#define G_TX_ULP_SUBMODE(x) (((x) >> S_TX_ULP_SUBMODE) & M_TX_ULP_SUBMODE)
+
+#define S_TX_ULP_MODE 10
+#define M_TX_ULP_MODE 0xF
+#define V_TX_ULP_MODE(x) ((x) << S_TX_ULP_MODE)
+#define G_TX_ULP_MODE(x) (((x) >> S_TX_ULP_MODE) & M_TX_ULP_MODE)
+
+#define S_TX_SHOVE 14
+#define V_TX_SHOVE(x) ((x) << S_TX_SHOVE)
+#define F_TX_SHOVE V_TX_SHOVE(1U)
+
+#define S_TX_MORE 15
+#define V_TX_MORE(x) ((x) << S_TX_MORE)
+#define F_TX_MORE V_TX_MORE(1U)
+
+/* additional tx_data_wr.flags fields */
+#define S_TX_CPU_IDX 0
+#define M_TX_CPU_IDX 0x3F
+#define V_TX_CPU_IDX(x) ((x) << S_TX_CPU_IDX)
+#define G_TX_CPU_IDX(x) (((x) >> S_TX_CPU_IDX) & M_TX_CPU_IDX)
+
+#define S_TX_URG 16
+#define V_TX_URG(x) ((x) << S_TX_URG)
+#define F_TX_URG V_TX_URG(1U)
+
+#define S_TX_CLOSE 17
+#define V_TX_CLOSE(x) ((x) << S_TX_CLOSE)
+#define F_TX_CLOSE V_TX_CLOSE(1U)
+
+#define S_TX_INIT 18
+#define V_TX_INIT(x) ((x) << S_TX_INIT)
+#define F_TX_INIT V_TX_INIT(1U)
+
+#define S_TX_IMM_ACK 19
+#define V_TX_IMM_ACK(x) ((x) << S_TX_IMM_ACK)
+#define F_TX_IMM_ACK V_TX_IMM_ACK(1U)
+
+#define S_TX_IMM_DMA 20
+#define V_TX_IMM_DMA(x) ((x) << S_TX_IMM_DMA)
+#define F_TX_IMM_DMA V_TX_IMM_DMA(1U)
+
+struct cpl_tx_data_ack {
+ RSS_HDR union opcode_tid ot;
+ __be32 ack_seq;
+};
+
+struct cpl_wr_ack {
+ RSS_HDR union opcode_tid ot;
+ __be16 credits;
+ __be16 rsvd;
+ __be32 snd_nxt;
+ __be32 snd_una;
+};
+
+struct cpl_rdma_ec_status {
+ RSS_HDR union opcode_tid ot;
+ __u8 rsvd[3];
+ __u8 status;
+};
+
+struct mngt_pktsched_wr {
+ __be32 wr_hi;
+ __be32 wr_lo;
+ __u8 mngt_opcode;
+ __u8 rsvd[7];
+ __u8 sched;
+ __u8 idx;
+ __u8 min;
+ __u8 max;
+ __u8 binding;
+ __u8 rsvd1[3];
+};
+
+struct cpl_iscsi_hdr {
+ RSS_HDR union opcode_tid ot;
+ __be16 pdu_len_ddp;
+ __be16 len;
+ __be32 seq;
+ __be16 urg;
+ __u8 rsvd;
+ __u8 status;
+};
+
+/* cpl_iscsi_hdr.pdu_len_ddp fields */
+#define S_ISCSI_PDU_LEN 0
+#define M_ISCSI_PDU_LEN 0x7FFF
+#define V_ISCSI_PDU_LEN(x) ((x) << S_ISCSI_PDU_LEN)
+#define G_ISCSI_PDU_LEN(x) (((x) >> S_ISCSI_PDU_LEN) & M_ISCSI_PDU_LEN)
+
+#define S_ISCSI_DDP 15
+#define V_ISCSI_DDP(x) ((x) << S_ISCSI_DDP)
+#define F_ISCSI_DDP V_ISCSI_DDP(1U)
+
+struct cpl_rx_data {
+ RSS_HDR union opcode_tid ot;
+ __be16 rsvd;
+ __be16 len;
+ __be32 seq;
+ __be16 urg;
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ __u8 dack_mode:2;
+ __u8 psh:1;
+ __u8 heartbeat:1;
+ __u8:4;
+#else
+ __u8:4;
+ __u8 heartbeat:1;
+ __u8 psh:1;
+ __u8 dack_mode:2;
+#endif
+ __u8 status;
+};
+
+struct cpl_rx_data_ack {
+ WR_HDR;
+ union opcode_tid ot;
+ __be32 credit_dack;
+};
+
+/* cpl_rx_data_ack.ack_seq fields */
+#define S_RX_CREDITS 0
+#define M_RX_CREDITS 0x7FFFFFF
+#define V_RX_CREDITS(x) ((x) << S_RX_CREDITS)
+#define G_RX_CREDITS(x) (((x) >> S_RX_CREDITS) & M_RX_CREDITS)
+
+#define S_RX_MODULATE 27
+#define V_RX_MODULATE(x) ((x) << S_RX_MODULATE)
+#define F_RX_MODULATE V_RX_MODULATE(1U)
+
+#define S_RX_FORCE_ACK 28
+#define V_RX_FORCE_ACK(x) ((x) << S_RX_FORCE_ACK)
+#define F_RX_FORCE_ACK V_RX_FORCE_ACK(1U)
+
+#define S_RX_DACK_MODE 29
+#define M_RX_DACK_MODE 0x3
+#define V_RX_DACK_MODE(x) ((x) << S_RX_DACK_MODE)
+#define G_RX_DACK_MODE(x) (((x) >> S_RX_DACK_MODE) & M_RX_DACK_MODE)
+
+#define S_RX_DACK_CHANGE 31
+#define V_RX_DACK_CHANGE(x) ((x) << S_RX_DACK_CHANGE)
+#define F_RX_DACK_CHANGE V_RX_DACK_CHANGE(1U)
+
+struct cpl_rx_urg_notify {
+ RSS_HDR union opcode_tid ot;
+ __be32 seq;
+};
+
+struct cpl_rx_ddp_complete {
+ RSS_HDR union opcode_tid ot;
+ __be32 ddp_report;
+};
+
+struct cpl_rx_data_ddp {
+ RSS_HDR union opcode_tid ot;
+ __be16 urg;
+ __be16 len;
+ __be32 seq;
+ union {
+ __be32 nxt_seq;
+ __be32 ddp_report;
+ };
+ __be32 ulp_crc;
+ __be32 ddpvld_status;
+};
+
+/* cpl_rx_data_ddp.ddpvld_status fields */
+#define S_DDP_STATUS 0
+#define M_DDP_STATUS 0xFF
+#define V_DDP_STATUS(x) ((x) << S_DDP_STATUS)
+#define G_DDP_STATUS(x) (((x) >> S_DDP_STATUS) & M_DDP_STATUS)
+
+#define S_DDP_VALID 15
+#define M_DDP_VALID 0x1FFFF
+#define V_DDP_VALID(x) ((x) << S_DDP_VALID)
+#define G_DDP_VALID(x) (((x) >> S_DDP_VALID) & M_DDP_VALID)
+
+#define S_DDP_PPOD_MISMATCH 15
+#define V_DDP_PPOD_MISMATCH(x) ((x) << S_DDP_PPOD_MISMATCH)
+#define F_DDP_PPOD_MISMATCH V_DDP_PPOD_MISMATCH(1U)
+
+#define S_DDP_PDU 16
+#define V_DDP_PDU(x) ((x) << S_DDP_PDU)
+#define F_DDP_PDU V_DDP_PDU(1U)
+
+#define S_DDP_LLIMIT_ERR 17
+#define V_DDP_LLIMIT_ERR(x) ((x) << S_DDP_LLIMIT_ERR)
+#define F_DDP_LLIMIT_ERR V_DDP_LLIMIT_ERR(1U)
+
+#define S_DDP_PPOD_PARITY_ERR 18
+#define V_DDP_PPOD_PARITY_ERR(x) ((x) << S_DDP_PPOD_PARITY_ERR)
+#define F_DDP_PPOD_PARITY_ERR V_DDP_PPOD_PARITY_ERR(1U)
+
+#define S_DDP_PADDING_ERR 19
+#define V_DDP_PADDING_ERR(x) ((x) << S_DDP_PADDING_ERR)
+#define F_DDP_PADDING_ERR V_DDP_PADDING_ERR(1U)
+
+#define S_DDP_HDRCRC_ERR 20
+#define V_DDP_HDRCRC_ERR(x) ((x) << S_DDP_HDRCRC_ERR)
+#define F_DDP_HDRCRC_ERR V_DDP_HDRCRC_ERR(1U)
+
+#define S_DDP_DATACRC_ERR 21
+#define V_DDP_DATACRC_ERR(x) ((x) << S_DDP_DATACRC_ERR)
+#define F_DDP_DATACRC_ERR V_DDP_DATACRC_ERR(1U)
+
+#define S_DDP_INVALID_TAG 22
+#define V_DDP_INVALID_TAG(x) ((x) << S_DDP_INVALID_TAG)
+#define F_DDP_INVALID_TAG V_DDP_INVALID_TAG(1U)
+
+#define S_DDP_ULIMIT_ERR 23
+#define V_DDP_ULIMIT_ERR(x) ((x) << S_DDP_ULIMIT_ERR)
+#define F_DDP_ULIMIT_ERR V_DDP_ULIMIT_ERR(1U)
+
+#define S_DDP_OFFSET_ERR 24
+#define V_DDP_OFFSET_ERR(x) ((x) << S_DDP_OFFSET_ERR)
+#define F_DDP_OFFSET_ERR V_DDP_OFFSET_ERR(1U)
+
+#define S_DDP_COLOR_ERR 25
+#define V_DDP_COLOR_ERR(x) ((x) << S_DDP_COLOR_ERR)
+#define F_DDP_COLOR_ERR V_DDP_COLOR_ERR(1U)
+
+#define S_DDP_TID_MISMATCH 26
+#define V_DDP_TID_MISMATCH(x) ((x) << S_DDP_TID_MISMATCH)
+#define F_DDP_TID_MISMATCH V_DDP_TID_MISMATCH(1U)
+
+#define S_DDP_INVALID_PPOD 27
+#define V_DDP_INVALID_PPOD(x) ((x) << S_DDP_INVALID_PPOD)
+#define F_DDP_INVALID_PPOD V_DDP_INVALID_PPOD(1U)
+
+#define S_DDP_ULP_MODE 28
+#define M_DDP_ULP_MODE 0xF
+#define V_DDP_ULP_MODE(x) ((x) << S_DDP_ULP_MODE)
+#define G_DDP_ULP_MODE(x) (((x) >> S_DDP_ULP_MODE) & M_DDP_ULP_MODE)
+
+/* cpl_rx_data_ddp.ddp_report fields */
+#define S_DDP_OFFSET 0
+#define M_DDP_OFFSET 0x3FFFFF
+#define V_DDP_OFFSET(x) ((x) << S_DDP_OFFSET)
+#define G_DDP_OFFSET(x) (((x) >> S_DDP_OFFSET) & M_DDP_OFFSET)
+
+#define S_DDP_URG 24
+#define V_DDP_URG(x) ((x) << S_DDP_URG)
+#define F_DDP_URG V_DDP_URG(1U)
+
+#define S_DDP_PSH 25
+#define V_DDP_PSH(x) ((x) << S_DDP_PSH)
+#define F_DDP_PSH V_DDP_PSH(1U)
+
+#define S_DDP_BUF_COMPLETE 26
+#define V_DDP_BUF_COMPLETE(x) ((x) << S_DDP_BUF_COMPLETE)
+#define F_DDP_BUF_COMPLETE V_DDP_BUF_COMPLETE(1U)
+
+#define S_DDP_BUF_TIMED_OUT 27
+#define V_DDP_BUF_TIMED_OUT(x) ((x) << S_DDP_BUF_TIMED_OUT)
+#define F_DDP_BUF_TIMED_OUT V_DDP_BUF_TIMED_OUT(1U)
+
+#define S_DDP_BUF_IDX 28
+#define V_DDP_BUF_IDX(x) ((x) << S_DDP_BUF_IDX)
+#define F_DDP_BUF_IDX V_DDP_BUF_IDX(1U)
+
+struct cpl_tx_pkt {
+ WR_HDR;
+ __be32 cntrl;
+ __be32 len;
+};
+
+struct cpl_tx_pkt_lso {
+ WR_HDR;
+ __be32 cntrl;
+ __be32 len;
+
+ __be32 rsvd;
+ __be32 lso_info;
+};
+
+/* cpl_tx_pkt*.cntrl fields */
+#define S_TXPKT_VLAN 0
+#define M_TXPKT_VLAN 0xFFFF
+#define V_TXPKT_VLAN(x) ((x) << S_TXPKT_VLAN)
+#define G_TXPKT_VLAN(x) (((x) >> S_TXPKT_VLAN) & M_TXPKT_VLAN)
+
+#define S_TXPKT_INTF 16
+#define M_TXPKT_INTF 0xF
+#define V_TXPKT_INTF(x) ((x) << S_TXPKT_INTF)
+#define G_TXPKT_INTF(x) (((x) >> S_TXPKT_INTF) & M_TXPKT_INTF)
+
+#define S_TXPKT_IPCSUM_DIS 20
+#define V_TXPKT_IPCSUM_DIS(x) ((x) << S_TXPKT_IPCSUM_DIS)
+#define F_TXPKT_IPCSUM_DIS V_TXPKT_IPCSUM_DIS(1U)
+
+#define S_TXPKT_L4CSUM_DIS 21
+#define V_TXPKT_L4CSUM_DIS(x) ((x) << S_TXPKT_L4CSUM_DIS)
+#define F_TXPKT_L4CSUM_DIS V_TXPKT_L4CSUM_DIS(1U)
+
+#define S_TXPKT_VLAN_VLD 22
+#define V_TXPKT_VLAN_VLD(x) ((x) << S_TXPKT_VLAN_VLD)
+#define F_TXPKT_VLAN_VLD V_TXPKT_VLAN_VLD(1U)
+
+#define S_TXPKT_LOOPBACK 23
+#define V_TXPKT_LOOPBACK(x) ((x) << S_TXPKT_LOOPBACK)
+#define F_TXPKT_LOOPBACK V_TXPKT_LOOPBACK(1U)
+
+#define S_TXPKT_OPCODE 24
+#define M_TXPKT_OPCODE 0xFF
+#define V_TXPKT_OPCODE(x) ((x) << S_TXPKT_OPCODE)
+#define G_TXPKT_OPCODE(x) (((x) >> S_TXPKT_OPCODE) & M_TXPKT_OPCODE)
+
+/* cpl_tx_pkt_lso.lso_info fields */
+#define S_LSO_MSS 0
+#define M_LSO_MSS 0x3FFF
+#define V_LSO_MSS(x) ((x) << S_LSO_MSS)
+#define G_LSO_MSS(x) (((x) >> S_LSO_MSS) & M_LSO_MSS)
+
+#define S_LSO_ETH_TYPE 14
+#define M_LSO_ETH_TYPE 0x3
+#define V_LSO_ETH_TYPE(x) ((x) << S_LSO_ETH_TYPE)
+#define G_LSO_ETH_TYPE(x) (((x) >> S_LSO_ETH_TYPE) & M_LSO_ETH_TYPE)
+
+#define S_LSO_TCPHDR_WORDS 16
+#define M_LSO_TCPHDR_WORDS 0xF
+#define V_LSO_TCPHDR_WORDS(x) ((x) << S_LSO_TCPHDR_WORDS)
+#define G_LSO_TCPHDR_WORDS(x) (((x) >> S_LSO_TCPHDR_WORDS) & M_LSO_TCPHDR_WORDS)
+
+#define S_LSO_IPHDR_WORDS 20
+#define M_LSO_IPHDR_WORDS 0xF
+#define V_LSO_IPHDR_WORDS(x) ((x) << S_LSO_IPHDR_WORDS)
+#define G_LSO_IPHDR_WORDS(x) (((x) >> S_LSO_IPHDR_WORDS) & M_LSO_IPHDR_WORDS)
+
+#define S_LSO_IPV6 24
+#define V_LSO_IPV6(x) ((x) << S_LSO_IPV6)
+#define F_LSO_IPV6 V_LSO_IPV6(1U)
+
+struct cpl_trace_pkt {
+#ifdef CHELSIO_FW
+ __u8 rss_opcode;
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ __u8 err:1;
+ __u8:7;
+#else
+ __u8:7;
+ __u8 err:1;
+#endif
+ __u8 rsvd0;
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ __u8 qid:4;
+ __u8:4;
+#else
+ __u8:4;
+ __u8 qid:4;
+#endif
+ __be32 tstamp;
+#endif /* CHELSIO_FW */
+
+ __u8 opcode;
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ __u8 iff:4;
+ __u8:4;
+#else
+ __u8:4;
+ __u8 iff:4;
+#endif
+ __u8 rsvd[4];
+ __be16 len;
+};
+
+struct cpl_rx_pkt {
+ RSS_HDR __u8 opcode;
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ __u8 iff:4;
+ __u8 csum_valid:1;
+ __u8 ipmi_pkt:1;
+ __u8 vlan_valid:1;
+ __u8 fragment:1;
+#else
+ __u8 fragment:1;
+ __u8 vlan_valid:1;
+ __u8 ipmi_pkt:1;
+ __u8 csum_valid:1;
+ __u8 iff:4;
+#endif
+ __be16 csum;
+ __be16 vlan;
+ __be16 len;
+};
+
+struct cpl_l2t_write_req {
+ WR_HDR;
+ union opcode_tid ot;
+ __be32 params;
+ __u8 rsvd[2];
+ __u8 dst_mac[6];
+};
+
+/* cpl_l2t_write_req.params fields */
+#define S_L2T_W_IDX 0
+#define M_L2T_W_IDX 0x7FF
+#define V_L2T_W_IDX(x) ((x) << S_L2T_W_IDX)
+#define G_L2T_W_IDX(x) (((x) >> S_L2T_W_IDX) & M_L2T_W_IDX)
+
+#define S_L2T_W_VLAN 11
+#define M_L2T_W_VLAN 0xFFF
+#define V_L2T_W_VLAN(x) ((x) << S_L2T_W_VLAN)
+#define G_L2T_W_VLAN(x) (((x) >> S_L2T_W_VLAN) & M_L2T_W_VLAN)
+
+#define S_L2T_W_IFF 23
+#define M_L2T_W_IFF 0xF
+#define V_L2T_W_IFF(x) ((x) << S_L2T_W_IFF)
+#define G_L2T_W_IFF(x) (((x) >> S_L2T_W_IFF) & M_L2T_W_IFF)
+
+#define S_L2T_W_PRIO 27
+#define M_L2T_W_PRIO 0x7
+#define V_L2T_W_PRIO(x) ((x) << S_L2T_W_PRIO)
+#define G_L2T_W_PRIO(x) (((x) >> S_L2T_W_PRIO) & M_L2T_W_PRIO)
+
+struct cpl_l2t_write_rpl {
+ RSS_HDR union opcode_tid ot;
+ __u8 status;
+ __u8 rsvd[3];
+};
+
+struct cpl_l2t_read_req {
+ WR_HDR;
+ union opcode_tid ot;
+ __be16 rsvd;
+ __be16 l2t_idx;
+};
+
+struct cpl_l2t_read_rpl {
+ RSS_HDR union opcode_tid ot;
+ __be32 params;
+ __u8 rsvd[2];
+ __u8 dst_mac[6];
+};
+
+/* cpl_l2t_read_rpl.params fields */
+#define S_L2T_R_PRIO 0
+#define M_L2T_R_PRIO 0x7
+#define V_L2T_R_PRIO(x) ((x) << S_L2T_R_PRIO)
+#define G_L2T_R_PRIO(x) (((x) >> S_L2T_R_PRIO) & M_L2T_R_PRIO)
+
+#define S_L2T_R_VLAN 8
+#define M_L2T_R_VLAN 0xFFF
+#define V_L2T_R_VLAN(x) ((x) << S_L2T_R_VLAN)
+#define G_L2T_R_VLAN(x) (((x) >> S_L2T_R_VLAN) & M_L2T_R_VLAN)
+
+#define S_L2T_R_IFF 20
+#define M_L2T_R_IFF 0xF
+#define V_L2T_R_IFF(x) ((x) << S_L2T_R_IFF)
+#define G_L2T_R_IFF(x) (((x) >> S_L2T_R_IFF) & M_L2T_R_IFF)
+
+#define S_L2T_STATUS 24
+#define M_L2T_STATUS 0xFF
+#define V_L2T_STATUS(x) ((x) << S_L2T_STATUS)
+#define G_L2T_STATUS(x) (((x) >> S_L2T_STATUS) & M_L2T_STATUS)
+
+struct cpl_smt_write_req {
+ WR_HDR;
+ union opcode_tid ot;
+ __u8 rsvd0;
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ __u8 mtu_idx:4;
+ __u8 iff:4;
+#else
+ __u8 iff:4;
+ __u8 mtu_idx:4;
+#endif
+ __be16 rsvd2;
+ __be16 rsvd3;
+ __u8 src_mac1[6];
+ __be16 rsvd4;
+ __u8 src_mac0[6];
+};
+
+struct cpl_smt_write_rpl {
+ RSS_HDR union opcode_tid ot;
+ __u8 status;
+ __u8 rsvd[3];
+};
+
+struct cpl_smt_read_req {
+ WR_HDR;
+ union opcode_tid ot;
+ __u8 rsvd0;
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ __u8:4;
+ __u8 iff:4;
+#else
+ __u8 iff:4;
+ __u8:4;
+#endif
+ __be16 rsvd2;
+};
+
+struct cpl_smt_read_rpl {
+ RSS_HDR union opcode_tid ot;
+ __u8 status;
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ __u8 mtu_idx:4;
+ __u8:4;
+#else
+ __u8:4;
+ __u8 mtu_idx:4;
+#endif
+ __be16 rsvd2;
+ __be16 rsvd3;
+ __u8 src_mac1[6];
+ __be16 rsvd4;
+ __u8 src_mac0[6];
+};
+
+struct cpl_rte_delete_req {
+ WR_HDR;
+ union opcode_tid ot;
+ __be32 params;
+};
+
+/* { cpl_rte_delete_req, cpl_rte_read_req }.params fields */
+#define S_RTE_REQ_LUT_IX 8
+#define M_RTE_REQ_LUT_IX 0x7FF
+#define V_RTE_REQ_LUT_IX(x) ((x) << S_RTE_REQ_LUT_IX)
+#define G_RTE_REQ_LUT_IX(x) (((x) >> S_RTE_REQ_LUT_IX) & M_RTE_REQ_LUT_IX)
+
+#define S_RTE_REQ_LUT_BASE 19
+#define M_RTE_REQ_LUT_BASE 0x7FF
+#define V_RTE_REQ_LUT_BASE(x) ((x) << S_RTE_REQ_LUT_BASE)
+#define G_RTE_REQ_LUT_BASE(x) (((x) >> S_RTE_REQ_LUT_BASE) & M_RTE_REQ_LUT_BASE)
+
+#define S_RTE_READ_REQ_SELECT 31
+#define V_RTE_READ_REQ_SELECT(x) ((x) << S_RTE_READ_REQ_SELECT)
+#define F_RTE_READ_REQ_SELECT V_RTE_READ_REQ_SELECT(1U)
+
+struct cpl_rte_delete_rpl {
+ RSS_HDR union opcode_tid ot;
+ __u8 status;
+ __u8 rsvd[3];
+};
+
+struct cpl_rte_write_req {
+ WR_HDR;
+ union opcode_tid ot;
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ __u8:6;
+ __u8 write_tcam:1;
+ __u8 write_l2t_lut:1;
+#else
+ __u8 write_l2t_lut:1;
+ __u8 write_tcam:1;
+ __u8:6;
+#endif
+ __u8 rsvd[3];
+ __be32 lut_params;
+ __be16 rsvd2;
+ __be16 l2t_idx;
+ __be32 netmask;
+ __be32 faddr;
+};
+
+/* cpl_rte_write_req.lut_params fields */
+#define S_RTE_WRITE_REQ_LUT_IX 10
+#define M_RTE_WRITE_REQ_LUT_IX 0x7FF
+#define V_RTE_WRITE_REQ_LUT_IX(x) ((x) << S_RTE_WRITE_REQ_LUT_IX)
+#define G_RTE_WRITE_REQ_LUT_IX(x) (((x) >> S_RTE_WRITE_REQ_LUT_IX) & M_RTE_WRITE_REQ_LUT_IX)
+
+#define S_RTE_WRITE_REQ_LUT_BASE 21
+#define M_RTE_WRITE_REQ_LUT_BASE 0x7FF
+#define V_RTE_WRITE_REQ_LUT_BASE(x) ((x) << S_RTE_WRITE_REQ_LUT_BASE)
+#define G_RTE_WRITE_REQ_LUT_BASE(x) (((x) >> S_RTE_WRITE_REQ_LUT_BASE) & M_RTE_WRITE_REQ_LUT_BASE)
+
+struct cpl_rte_write_rpl {
+ RSS_HDR union opcode_tid ot;
+ __u8 status;
+ __u8 rsvd[3];
+};
+
+struct cpl_rte_read_req {
+ WR_HDR;
+ union opcode_tid ot;
+ __be32 params;
+};
+
+struct cpl_rte_read_rpl {
+ RSS_HDR union opcode_tid ot;
+ __u8 status;
+ __u8 rsvd0;
+ __be16 l2t_idx;
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ __u8:7;
+ __u8 select:1;
+#else
+ __u8 select:1;
+ __u8:7;
+#endif
+ __u8 rsvd2[3];
+ __be32 addr;
+};
+
+struct cpl_tid_release {
+ WR_HDR;
+ union opcode_tid ot;
+ __be32 rsvd;
+};
+
+struct cpl_barrier {
+ WR_HDR;
+ __u8 opcode;
+ __u8 rsvd[7];
+};
+
+struct cpl_rdma_read_req {
+ __u8 opcode;
+ __u8 rsvd[15];
+};
+
+struct cpl_rdma_terminate {
+#ifdef CHELSIO_FW
+ __u8 opcode;
+ __u8 rsvd[2];
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ __u8 rspq:3;
+ __u8:5;
+#else
+ __u8:5;
+ __u8 rspq:3;
+#endif
+ __be32 tid_len;
+#endif
+ __be32 msn;
+ __be32 mo;
+ __u8 data[];
+};
+
+/* cpl_rdma_terminate.tid_len fields */
+#define S_FLIT_CNT 0
+#define M_FLIT_CNT 0xFF
+#define V_FLIT_CNT(x) ((x) << S_FLIT_CNT)
+#define G_FLIT_CNT(x) (((x) >> S_FLIT_CNT) & M_FLIT_CNT)
+
+#define S_TERM_TID 8
+#define M_TERM_TID 0xFFFFF
+#define V_TERM_TID(x) ((x) << S_TERM_TID)
+#define G_TERM_TID(x) (((x) >> S_TERM_TID) & M_TERM_TID)
+
+/* ULP_TX opcodes */
+enum { ULP_MEM_READ = 2, ULP_MEM_WRITE = 3, ULP_TXPKT = 4 };
+
+#define S_ULPTX_CMD 28
+#define M_ULPTX_CMD 0xF
+#define V_ULPTX_CMD(x) ((x) << S_ULPTX_CMD)
+
+#define S_ULPTX_NFLITS 0
+#define M_ULPTX_NFLITS 0xFF
+#define V_ULPTX_NFLITS(x) ((x) << S_ULPTX_NFLITS)
+
+struct ulp_mem_io {
+ WR_HDR;
+ __be32 cmd_lock_addr;
+ __be32 len;
+};
+
+/* ulp_mem_io.cmd_lock_addr fields */
+#define S_ULP_MEMIO_ADDR 0
+#define M_ULP_MEMIO_ADDR 0x7FFFFFF
+#define V_ULP_MEMIO_ADDR(x) ((x) << S_ULP_MEMIO_ADDR)
+#define S_ULP_MEMIO_LOCK 27
+#define V_ULP_MEMIO_LOCK(x) ((x) << S_ULP_MEMIO_LOCK)
+#define F_ULP_MEMIO_LOCK V_ULP_MEMIO_LOCK(1U)
+
+/* ulp_mem_io.len fields */
+#define S_ULP_MEMIO_DATA_LEN 28
+#define M_ULP_MEMIO_DATA_LEN 0xF
+#define V_ULP_MEMIO_DATA_LEN(x) ((x) << S_ULP_MEMIO_DATA_LEN)
+
+#endif /* T3_CPL_H */
diff --git a/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c b/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c
new file mode 100644
index 0000000000..a06003bfa0
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c
@@ -0,0 +1,3748 @@
+/*
+ * Copyright (c) 2003-2008 Chelsio, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include <linux/etherdevice.h>
+#include "common.h"
+#include "regs.h"
+#include "sge_defs.h"
+#include "firmware_exports.h"
+
+static void t3_port_intr_clear(struct adapter *adapter, int idx);
+
+/**
+ * t3_wait_op_done_val - wait until an operation is completed
+ * @adapter: the adapter performing the operation
+ * @reg: the register to check for completion
+ * @mask: a single-bit field within @reg that indicates completion
+ * @polarity: the value of the field when the operation is completed
+ * @attempts: number of check iterations
+ * @delay: delay in usecs between iterations
+ * @valp: where to store the value of the register at completion time
+ *
+ * Wait until an operation is completed by checking a bit in a register
+ * up to @attempts times. If @valp is not NULL the value of the register
+ * at the time it indicated completion is stored there. Returns 0 if the
+ * operation completes and -EAGAIN otherwise.
+ */
+
+int t3_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
+ int polarity, int attempts, int delay, u32 *valp)
+{
+ while (1) {
+ u32 val = t3_read_reg(adapter, reg);
+
+ if (!!(val & mask) == polarity) {
+ if (valp)
+ *valp = val;
+ return 0;
+ }
+ if (--attempts == 0)
+ return -EAGAIN;
+ if (delay)
+ udelay(delay);
+ }
+}
+
+/**
+ * t3_write_regs - write a bunch of registers
+ * @adapter: the adapter to program
+ * @p: an array of register address/register value pairs
+ * @n: the number of address/value pairs
+ * @offset: register address offset
+ *
+ * Takes an array of register address/register value pairs and writes each
+ * value to the corresponding register. Register addresses are adjusted
+ * by the supplied offset.
+ */
+void t3_write_regs(struct adapter *adapter, const struct addr_val_pair *p,
+ int n, unsigned int offset)
+{
+ while (n--) {
+ t3_write_reg(adapter, p->reg_addr + offset, p->val);
+ p++;
+ }
+}
+
+/**
+ * t3_set_reg_field - set a register field to a value
+ * @adapter: the adapter to program
+ * @addr: the register address
+ * @mask: specifies the portion of the register to modify
+ * @val: the new value for the register field
+ *
+ * Sets a register field specified by the supplied mask to the
+ * given value.
+ */
+void t3_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask,
+ u32 val)
+{
+ u32 v = t3_read_reg(adapter, addr) & ~mask;
+
+ t3_write_reg(adapter, addr, v | val);
+ t3_read_reg(adapter, addr); /* flush */
+}
+
+/**
+ * t3_read_indirect - read indirectly addressed registers
+ * @adap: the adapter
+ * @addr_reg: register holding the indirect address
+ * @data_reg: register holding the value of the indirect register
+ * @vals: where the read register values are stored
+ * @start_idx: index of first indirect register to read
+ * @nregs: how many indirect registers to read
+ *
+ * Reads registers that are accessed indirectly through an address/data
+ * register pair.
+ */
+static void t3_read_indirect(struct adapter *adap, unsigned int addr_reg,
+ unsigned int data_reg, u32 *vals,
+ unsigned int nregs, unsigned int start_idx)
+{
+ while (nregs--) {
+ t3_write_reg(adap, addr_reg, start_idx);
+ *vals++ = t3_read_reg(adap, data_reg);
+ start_idx++;
+ }
+}
+
+/**
+ * t3_mc7_bd_read - read from MC7 through backdoor accesses
+ * @mc7: identifies MC7 to read from
+ * @start: index of first 64-bit word to read
+ * @n: number of 64-bit words to read
+ * @buf: where to store the read result
+ *
+ * Read n 64-bit words from MC7 starting at word start, using backdoor
+ * accesses.
+ */
+int t3_mc7_bd_read(struct mc7 *mc7, unsigned int start, unsigned int n,
+ u64 *buf)
+{
+ static const int shift[] = { 0, 0, 16, 24 };
+ static const int step[] = { 0, 32, 16, 8 };
+
+ unsigned int size64 = mc7->size / 8; /* # of 64-bit words */
+ struct adapter *adap = mc7->adapter;
+
+ if (start >= size64 || start + n > size64)
+ return -EINVAL;
+
+ start *= (8 << mc7->width);
+ while (n--) {
+ int i;
+ u64 val64 = 0;
+
+ for (i = (1 << mc7->width) - 1; i >= 0; --i) {
+ int attempts = 10;
+ u32 val;
+
+ t3_write_reg(adap, mc7->offset + A_MC7_BD_ADDR, start);
+ t3_write_reg(adap, mc7->offset + A_MC7_BD_OP, 0);
+ val = t3_read_reg(adap, mc7->offset + A_MC7_BD_OP);
+ while ((val & F_BUSY) && attempts--)
+ val = t3_read_reg(adap,
+ mc7->offset + A_MC7_BD_OP);
+ if (val & F_BUSY)
+ return -EIO;
+
+ val = t3_read_reg(adap, mc7->offset + A_MC7_BD_DATA1);
+ if (mc7->width == 0) {
+ val64 = t3_read_reg(adap,
+ mc7->offset +
+ A_MC7_BD_DATA0);
+ val64 |= (u64) val << 32;
+ } else {
+ if (mc7->width > 1)
+ val >>= shift[mc7->width];
+ val64 |= (u64) val << (step[mc7->width] * i);
+ }
+ start += 8;
+ }
+ *buf++ = val64;
+ }
+ return 0;
+}
+
+/*
+ * Initialize MI1.
+ */
+static void mi1_init(struct adapter *adap, const struct adapter_info *ai)
+{
+ u32 clkdiv = adap->params.vpd.cclk / (2 * adap->params.vpd.mdc) - 1;
+ u32 val = F_PREEN | V_CLKDIV(clkdiv);
+
+ t3_write_reg(adap, A_MI1_CFG, val);
+}
+
+#define MDIO_ATTEMPTS 20
+
+/*
+ * MI1 read/write operations for clause 22 PHYs.
+ */
+static int t3_mi1_read(struct net_device *dev, int phy_addr, int mmd_addr,
+ u16 reg_addr)
+{
+ struct port_info *pi = netdev_priv(dev);
+ struct adapter *adapter = pi->adapter;
+ int ret;
+ u32 addr = V_REGADDR(reg_addr) | V_PHYADDR(phy_addr);
+
+ mutex_lock(&adapter->mdio_lock);
+ t3_set_reg_field(adapter, A_MI1_CFG, V_ST(M_ST), V_ST(1));
+ t3_write_reg(adapter, A_MI1_ADDR, addr);
+ t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(2));
+ ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 10);
+ if (!ret)
+ ret = t3_read_reg(adapter, A_MI1_DATA);
+ mutex_unlock(&adapter->mdio_lock);
+ return ret;
+}
+
+static int t3_mi1_write(struct net_device *dev, int phy_addr, int mmd_addr,
+ u16 reg_addr, u16 val)
+{
+ struct port_info *pi = netdev_priv(dev);
+ struct adapter *adapter = pi->adapter;
+ int ret;
+ u32 addr = V_REGADDR(reg_addr) | V_PHYADDR(phy_addr);
+
+ mutex_lock(&adapter->mdio_lock);
+ t3_set_reg_field(adapter, A_MI1_CFG, V_ST(M_ST), V_ST(1));
+ t3_write_reg(adapter, A_MI1_ADDR, addr);
+ t3_write_reg(adapter, A_MI1_DATA, val);
+ t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(1));
+ ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 10);
+ mutex_unlock(&adapter->mdio_lock);
+ return ret;
+}
+
+static const struct mdio_ops mi1_mdio_ops = {
+ .read = t3_mi1_read,
+ .write = t3_mi1_write,
+ .mode_support = MDIO_SUPPORTS_C22
+};
+
+/*
+ * Performs the address cycle for clause 45 PHYs.
+ * Must be called with the MDIO_LOCK held.
+ */
+static int mi1_wr_addr(struct adapter *adapter, int phy_addr, int mmd_addr,
+ int reg_addr)
+{
+ u32 addr = V_REGADDR(mmd_addr) | V_PHYADDR(phy_addr);
+
+ t3_set_reg_field(adapter, A_MI1_CFG, V_ST(M_ST), 0);
+ t3_write_reg(adapter, A_MI1_ADDR, addr);
+ t3_write_reg(adapter, A_MI1_DATA, reg_addr);
+ t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(0));
+ return t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0,
+ MDIO_ATTEMPTS, 10);
+}
+
+/*
+ * MI1 read/write operations for indirect-addressed PHYs.
+ */
+static int mi1_ext_read(struct net_device *dev, int phy_addr, int mmd_addr,
+ u16 reg_addr)
+{
+ struct port_info *pi = netdev_priv(dev);
+ struct adapter *adapter = pi->adapter;
+ int ret;
+
+ mutex_lock(&adapter->mdio_lock);
+ ret = mi1_wr_addr(adapter, phy_addr, mmd_addr, reg_addr);
+ if (!ret) {
+ t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(3));
+ ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0,
+ MDIO_ATTEMPTS, 10);
+ if (!ret)
+ ret = t3_read_reg(adapter, A_MI1_DATA);
+ }
+ mutex_unlock(&adapter->mdio_lock);
+ return ret;
+}
+
+static int mi1_ext_write(struct net_device *dev, int phy_addr, int mmd_addr,
+ u16 reg_addr, u16 val)
+{
+ struct port_info *pi = netdev_priv(dev);
+ struct adapter *adapter = pi->adapter;
+ int ret;
+
+ mutex_lock(&adapter->mdio_lock);
+ ret = mi1_wr_addr(adapter, phy_addr, mmd_addr, reg_addr);
+ if (!ret) {
+ t3_write_reg(adapter, A_MI1_DATA, val);
+ t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(1));
+ ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0,
+ MDIO_ATTEMPTS, 10);
+ }
+ mutex_unlock(&adapter->mdio_lock);
+ return ret;
+}
+
+static const struct mdio_ops mi1_mdio_ext_ops = {
+ .read = mi1_ext_read,
+ .write = mi1_ext_write,
+ .mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22
+};
+
+/**
+ * t3_mdio_change_bits - modify the value of a PHY register
+ * @phy: the PHY to operate on
+ * @mmd: the device address
+ * @reg: the register address
+ * @clear: what part of the register value to mask off
+ * @set: what part of the register value to set
+ *
+ * Changes the value of a PHY register by applying a mask to its current
+ * value and ORing the result with a new value.
+ */
+int t3_mdio_change_bits(struct cphy *phy, int mmd, int reg, unsigned int clear,
+ unsigned int set)
+{
+ int ret;
+ unsigned int val;
+
+ ret = t3_mdio_read(phy, mmd, reg, &val);
+ if (!ret) {
+ val &= ~clear;
+ ret = t3_mdio_write(phy, mmd, reg, val | set);
+ }
+ return ret;
+}
+
+/**
+ * t3_phy_reset - reset a PHY block
+ * @phy: the PHY to operate on
+ * @mmd: the device address of the PHY block to reset
+ * @wait: how long to wait for the reset to complete in 1ms increments
+ *
+ * Resets a PHY block and optionally waits for the reset to complete.
+ * @mmd should be 0 for 10/100/1000 PHYs and the device address to reset
+ * for 10G PHYs.
+ */
+int t3_phy_reset(struct cphy *phy, int mmd, int wait)
+{
+ int err;
+ unsigned int ctl;
+
+ err = t3_mdio_change_bits(phy, mmd, MDIO_CTRL1, MDIO_CTRL1_LPOWER,
+ MDIO_CTRL1_RESET);
+ if (err || !wait)
+ return err;
+
+ do {
+ err = t3_mdio_read(phy, mmd, MDIO_CTRL1, &ctl);
+ if (err)
+ return err;
+ ctl &= MDIO_CTRL1_RESET;
+ if (ctl)
+ msleep(1);
+ } while (ctl && --wait);
+
+ return ctl ? -1 : 0;
+}
+
+/**
+ * t3_phy_advertise - set the PHY advertisement registers for autoneg
+ * @phy: the PHY to operate on
+ * @advert: bitmap of capabilities the PHY should advertise
+ *
+ * Sets a 10/100/1000 PHY's advertisement registers to advertise the
+ * requested capabilities.
+ */
+int t3_phy_advertise(struct cphy *phy, unsigned int advert)
+{
+ int err;
+ unsigned int val = 0;
+
+ err = t3_mdio_read(phy, MDIO_DEVAD_NONE, MII_CTRL1000, &val);
+ if (err)
+ return err;
+
+ val &= ~(ADVERTISE_1000HALF | ADVERTISE_1000FULL);
+ if (advert & ADVERTISED_1000baseT_Half)
+ val |= ADVERTISE_1000HALF;
+ if (advert & ADVERTISED_1000baseT_Full)
+ val |= ADVERTISE_1000FULL;
+
+ err = t3_mdio_write(phy, MDIO_DEVAD_NONE, MII_CTRL1000, val);
+ if (err)
+ return err;
+
+ val = 1;
+ if (advert & ADVERTISED_10baseT_Half)
+ val |= ADVERTISE_10HALF;
+ if (advert & ADVERTISED_10baseT_Full)
+ val |= ADVERTISE_10FULL;
+ if (advert & ADVERTISED_100baseT_Half)
+ val |= ADVERTISE_100HALF;
+ if (advert & ADVERTISED_100baseT_Full)
+ val |= ADVERTISE_100FULL;
+ if (advert & ADVERTISED_Pause)
+ val |= ADVERTISE_PAUSE_CAP;
+ if (advert & ADVERTISED_Asym_Pause)
+ val |= ADVERTISE_PAUSE_ASYM;
+ return t3_mdio_write(phy, MDIO_DEVAD_NONE, MII_ADVERTISE, val);
+}
+
+/**
+ * t3_phy_advertise_fiber - set fiber PHY advertisement register
+ * @phy: the PHY to operate on
+ * @advert: bitmap of capabilities the PHY should advertise
+ *
+ * Sets a fiber PHY's advertisement register to advertise the
+ * requested capabilities.
+ */
+int t3_phy_advertise_fiber(struct cphy *phy, unsigned int advert)
+{
+ unsigned int val = 0;
+
+ if (advert & ADVERTISED_1000baseT_Half)
+ val |= ADVERTISE_1000XHALF;
+ if (advert & ADVERTISED_1000baseT_Full)
+ val |= ADVERTISE_1000XFULL;
+ if (advert & ADVERTISED_Pause)
+ val |= ADVERTISE_1000XPAUSE;
+ if (advert & ADVERTISED_Asym_Pause)
+ val |= ADVERTISE_1000XPSE_ASYM;
+ return t3_mdio_write(phy, MDIO_DEVAD_NONE, MII_ADVERTISE, val);
+}
+
+/**
+ * t3_set_phy_speed_duplex - force PHY speed and duplex
+ * @phy: the PHY to operate on
+ * @speed: requested PHY speed
+ * @duplex: requested PHY duplex
+ *
+ * Force a 10/100/1000 PHY's speed and duplex. This also disables
+ * auto-negotiation except for GigE, where auto-negotiation is mandatory.
+ */
+int t3_set_phy_speed_duplex(struct cphy *phy, int speed, int duplex)
+{
+ int err;
+ unsigned int ctl;
+
+ err = t3_mdio_read(phy, MDIO_DEVAD_NONE, MII_BMCR, &ctl);
+ if (err)
+ return err;
+
+ if (speed >= 0) {
+ ctl &= ~(BMCR_SPEED100 | BMCR_SPEED1000 | BMCR_ANENABLE);
+ if (speed == SPEED_100)
+ ctl |= BMCR_SPEED100;
+ else if (speed == SPEED_1000)
+ ctl |= BMCR_SPEED1000;
+ }
+ if (duplex >= 0) {
+ ctl &= ~(BMCR_FULLDPLX | BMCR_ANENABLE);
+ if (duplex == DUPLEX_FULL)
+ ctl |= BMCR_FULLDPLX;
+ }
+ if (ctl & BMCR_SPEED1000) /* auto-negotiation required for GigE */
+ ctl |= BMCR_ANENABLE;
+ return t3_mdio_write(phy, MDIO_DEVAD_NONE, MII_BMCR, ctl);
+}
+
+int t3_phy_lasi_intr_enable(struct cphy *phy)
+{
+ return t3_mdio_write(phy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_CTRL,
+ MDIO_PMA_LASI_LSALARM);
+}
+
+int t3_phy_lasi_intr_disable(struct cphy *phy)
+{
+ return t3_mdio_write(phy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_CTRL, 0);
+}
+
+int t3_phy_lasi_intr_clear(struct cphy *phy)
+{
+ u32 val;
+
+ return t3_mdio_read(phy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_STAT, &val);
+}
+
+int t3_phy_lasi_intr_handler(struct cphy *phy)
+{
+ unsigned int status;
+ int err = t3_mdio_read(phy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_STAT,
+ &status);
+
+ if (err)
+ return err;
+ return (status & MDIO_PMA_LASI_LSALARM) ? cphy_cause_link_change : 0;
+}
+
+static const struct adapter_info t3_adap_info[] = {
+ {1, 1, 0,
+ F_GPIO2_OEN | F_GPIO4_OEN |
+ F_GPIO2_OUT_VAL | F_GPIO4_OUT_VAL, { S_GPIO3, S_GPIO5 }, 0,
+ &mi1_mdio_ops, "Chelsio PE9000"},
+ {1, 1, 0,
+ F_GPIO2_OEN | F_GPIO4_OEN |
+ F_GPIO2_OUT_VAL | F_GPIO4_OUT_VAL, { S_GPIO3, S_GPIO5 }, 0,
+ &mi1_mdio_ops, "Chelsio T302"},
+ {1, 0, 0,
+ F_GPIO1_OEN | F_GPIO6_OEN | F_GPIO7_OEN | F_GPIO10_OEN |
+ F_GPIO11_OEN | F_GPIO1_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL,
+ { 0 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
+ &mi1_mdio_ext_ops, "Chelsio T310"},
+ {1, 1, 0,
+ F_GPIO1_OEN | F_GPIO2_OEN | F_GPIO4_OEN | F_GPIO5_OEN | F_GPIO6_OEN |
+ F_GPIO7_OEN | F_GPIO10_OEN | F_GPIO11_OEN | F_GPIO1_OUT_VAL |
+ F_GPIO5_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL,
+ { S_GPIO9, S_GPIO3 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
+ &mi1_mdio_ext_ops, "Chelsio T320"},
+ {},
+ {},
+ {1, 0, 0,
+ F_GPIO1_OEN | F_GPIO2_OEN | F_GPIO4_OEN | F_GPIO6_OEN | F_GPIO7_OEN |
+ F_GPIO10_OEN | F_GPIO1_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL,
+ { S_GPIO9 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
+ &mi1_mdio_ext_ops, "Chelsio T310" },
+ {1, 0, 0,
+ F_GPIO1_OEN | F_GPIO6_OEN | F_GPIO7_OEN |
+ F_GPIO1_OUT_VAL | F_GPIO6_OUT_VAL,
+ { S_GPIO9 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
+ &mi1_mdio_ext_ops, "Chelsio N320E-G2" },
+};
+
+/*
+ * Return the adapter_info structure with a given index. Out-of-range indices
+ * return NULL.
+ */
+const struct adapter_info *t3_get_adapter_info(unsigned int id)
+{
+ return id < ARRAY_SIZE(t3_adap_info) ? &t3_adap_info[id] : NULL;
+}
+
+struct port_type_info {
+ int (*phy_prep)(struct cphy *phy, struct adapter *adapter,
+ int phy_addr, const struct mdio_ops *ops);
+};
+
+static const struct port_type_info port_types[] = {
+ { NULL },
+ { t3_ael1002_phy_prep },
+ { t3_vsc8211_phy_prep },
+ { NULL},
+ { t3_xaui_direct_phy_prep },
+ { t3_ael2005_phy_prep },
+ { t3_qt2045_phy_prep },
+ { t3_ael1006_phy_prep },
+ { NULL },
+ { t3_aq100x_phy_prep },
+ { t3_ael2020_phy_prep },
+};
+
+#define VPD_ENTRY(name, len) \
+ u8 name##_kword[2]; u8 name##_len; u8 name##_data[len]
+
+/*
+ * Partial EEPROM Vital Product Data structure. Includes only the ID and
+ * VPD-R sections.
+ */
+struct t3_vpd {
+ u8 id_tag;
+ u8 id_len[2];
+ u8 id_data[16];
+ u8 vpdr_tag;
+ u8 vpdr_len[2];
+ VPD_ENTRY(pn, 16); /* part number */
+ VPD_ENTRY(ec, 16); /* EC level */
+ VPD_ENTRY(sn, SERNUM_LEN); /* serial number */
+ VPD_ENTRY(na, 12); /* MAC address base */
+ VPD_ENTRY(cclk, 6); /* core clock */
+ VPD_ENTRY(mclk, 6); /* mem clock */
+ VPD_ENTRY(uclk, 6); /* uP clk */
+ VPD_ENTRY(mdc, 6); /* MDIO clk */
+ VPD_ENTRY(mt, 2); /* mem timing */
+ VPD_ENTRY(xaui0cfg, 6); /* XAUI0 config */
+ VPD_ENTRY(xaui1cfg, 6); /* XAUI1 config */
+ VPD_ENTRY(port0, 2); /* PHY0 complex */
+ VPD_ENTRY(port1, 2); /* PHY1 complex */
+ VPD_ENTRY(port2, 2); /* PHY2 complex */
+ VPD_ENTRY(port3, 2); /* PHY3 complex */
+ VPD_ENTRY(rv, 1); /* csum */
+ u32 pad; /* for multiple-of-4 sizing and alignment */
+};
+
+#define EEPROM_STAT_ADDR 0x4000
+#define VPD_BASE 0xc00
+
+/**
+ * t3_seeprom_wp - enable/disable EEPROM write protection
+ * @adapter: the adapter
+ * @enable: 1 to enable write protection, 0 to disable it
+ *
+ * Enables or disables write protection on the serial EEPROM.
+ */
+int t3_seeprom_wp(struct adapter *adapter, int enable)
+{
+ u32 data = enable ? 0xc : 0;
+ int ret;
+
+ /* EEPROM_STAT_ADDR is outside VPD area, use pci_write_vpd_any() */
+ ret = pci_write_vpd_any(adapter->pdev, EEPROM_STAT_ADDR, sizeof(u32),
+ &data);
+
+ return ret < 0 ? ret : 0;
+}
+
+static int vpdstrtouint(char *s, u8 len, unsigned int base, unsigned int *val)
+{
+ char tok[256];
+
+ memcpy(tok, s, len);
+ tok[len] = 0;
+ return kstrtouint(strim(tok), base, val);
+}
+
+static int vpdstrtou16(char *s, u8 len, unsigned int base, u16 *val)
+{
+ char tok[256];
+
+ memcpy(tok, s, len);
+ tok[len] = 0;
+ return kstrtou16(strim(tok), base, val);
+}
+
+/**
+ * get_vpd_params - read VPD parameters from VPD EEPROM
+ * @adapter: adapter to read
+ * @p: where to store the parameters
+ *
+ * Reads card parameters stored in VPD EEPROM.
+ */
+static int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
+{
+ struct t3_vpd vpd;
+ u8 base_val = 0;
+ int addr, ret;
+
+ /*
+ * Card information is normally at VPD_BASE but some early cards had
+ * it at 0.
+ */
+ ret = pci_read_vpd(adapter->pdev, VPD_BASE, 1, &base_val);
+ if (ret < 0)
+ return ret;
+ addr = base_val == PCI_VPD_LRDT_ID_STRING ? VPD_BASE : 0;
+
+ ret = pci_read_vpd(adapter->pdev, addr, sizeof(vpd), &vpd);
+ if (ret < 0)
+ return ret;
+
+ ret = vpdstrtouint(vpd.cclk_data, vpd.cclk_len, 10, &p->cclk);
+ if (ret)
+ return ret;
+ ret = vpdstrtouint(vpd.mclk_data, vpd.mclk_len, 10, &p->mclk);
+ if (ret)
+ return ret;
+ ret = vpdstrtouint(vpd.uclk_data, vpd.uclk_len, 10, &p->uclk);
+ if (ret)
+ return ret;
+ ret = vpdstrtouint(vpd.mdc_data, vpd.mdc_len, 10, &p->mdc);
+ if (ret)
+ return ret;
+ ret = vpdstrtouint(vpd.mt_data, vpd.mt_len, 10, &p->mem_timing);
+ if (ret)
+ return ret;
+ memcpy(p->sn, vpd.sn_data, SERNUM_LEN);
+
+ /* Old eeproms didn't have port information */
+ if (adapter->params.rev == 0 && !vpd.port0_data[0]) {
+ p->port_type[0] = uses_xaui(adapter) ? 1 : 2;
+ p->port_type[1] = uses_xaui(adapter) ? 6 : 2;
+ } else {
+ p->port_type[0] = hex_to_bin(vpd.port0_data[0]);
+ p->port_type[1] = hex_to_bin(vpd.port1_data[0]);
+ ret = vpdstrtou16(vpd.xaui0cfg_data, vpd.xaui0cfg_len, 16,
+ &p->xauicfg[0]);
+ if (ret)
+ return ret;
+ ret = vpdstrtou16(vpd.xaui1cfg_data, vpd.xaui1cfg_len, 16,
+ &p->xauicfg[1]);
+ if (ret)
+ return ret;
+ }
+
+ ret = hex2bin(p->eth_base, vpd.na_data, 6);
+ if (ret < 0)
+ return -EINVAL;
+ return 0;
+}
+
+/* serial flash and firmware constants */
+enum {
+ SF_ATTEMPTS = 5, /* max retries for SF1 operations */
+ SF_SEC_SIZE = 64 * 1024, /* serial flash sector size */
+ SF_SIZE = SF_SEC_SIZE * 8, /* serial flash size */
+
+ /* flash command opcodes */
+ SF_PROG_PAGE = 2, /* program page */
+ SF_WR_DISABLE = 4, /* disable writes */
+ SF_RD_STATUS = 5, /* read status register */
+ SF_WR_ENABLE = 6, /* enable writes */
+ SF_RD_DATA_FAST = 0xb, /* read flash */
+ SF_ERASE_SECTOR = 0xd8, /* erase sector */
+
+ FW_FLASH_BOOT_ADDR = 0x70000, /* start address of FW in flash */
+ FW_VERS_ADDR = 0x7fffc, /* flash address holding FW version */
+ FW_MIN_SIZE = 8 /* at least version and csum */
+};
+
+/**
+ * sf1_read - read data from the serial flash
+ * @adapter: the adapter
+ * @byte_cnt: number of bytes to read
+ * @cont: whether another operation will be chained
+ * @valp: where to store the read data
+ *
+ * Reads up to 4 bytes of data from the serial flash. The location of
+ * the read needs to be specified prior to calling this by issuing the
+ * appropriate commands to the serial flash.
+ */
+static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont,
+ u32 *valp)
+{
+ int ret;
+
+ if (!byte_cnt || byte_cnt > 4)
+ return -EINVAL;
+ if (t3_read_reg(adapter, A_SF_OP) & F_BUSY)
+ return -EBUSY;
+ t3_write_reg(adapter, A_SF_OP, V_CONT(cont) | V_BYTECNT(byte_cnt - 1));
+ ret = t3_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 10);
+ if (!ret)
+ *valp = t3_read_reg(adapter, A_SF_DATA);
+ return ret;
+}
+
+/**
+ * sf1_write - write data to the serial flash
+ * @adapter: the adapter
+ * @byte_cnt: number of bytes to write
+ * @cont: whether another operation will be chained
+ * @val: value to write
+ *
+ * Writes up to 4 bytes of data to the serial flash. The location of
+ * the write needs to be specified prior to calling this by issuing the
+ * appropriate commands to the serial flash.
+ */
+static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont,
+ u32 val)
+{
+ if (!byte_cnt || byte_cnt > 4)
+ return -EINVAL;
+ if (t3_read_reg(adapter, A_SF_OP) & F_BUSY)
+ return -EBUSY;
+ t3_write_reg(adapter, A_SF_DATA, val);
+ t3_write_reg(adapter, A_SF_OP,
+ V_CONT(cont) | V_BYTECNT(byte_cnt - 1) | V_OP(1));
+ return t3_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 10);
+}
+
+/**
+ * flash_wait_op - wait for a flash operation to complete
+ * @adapter: the adapter
+ * @attempts: max number of polls of the status register
+ * @delay: delay between polls in ms
+ *
+ * Wait for a flash operation to complete by polling the status register.
+ */
+static int flash_wait_op(struct adapter *adapter, int attempts, int delay)
+{
+ int ret;
+ u32 status;
+
+ while (1) {
+ if ((ret = sf1_write(adapter, 1, 1, SF_RD_STATUS)) != 0 ||
+ (ret = sf1_read(adapter, 1, 0, &status)) != 0)
+ return ret;
+ if (!(status & 1))
+ return 0;
+ if (--attempts == 0)
+ return -EAGAIN;
+ if (delay)
+ msleep(delay);
+ }
+}
+
+/**
+ * t3_read_flash - read words from serial flash
+ * @adapter: the adapter
+ * @addr: the start address for the read
+ * @nwords: how many 32-bit words to read
+ * @data: where to store the read data
+ * @byte_oriented: whether to store data as bytes or as words
+ *
+ * Read the specified number of 32-bit words from the serial flash.
+ * If @byte_oriented is set the read data is stored as a byte array
+ * (i.e., big-endian), otherwise as 32-bit words in the platform's
+ * natural endianness.
+ */
+static int t3_read_flash(struct adapter *adapter, unsigned int addr,
+ unsigned int nwords, u32 *data, int byte_oriented)
+{
+ int ret;
+
+ if (addr + nwords * sizeof(u32) > SF_SIZE || (addr & 3))
+ return -EINVAL;
+
+ addr = swab32(addr) | SF_RD_DATA_FAST;
+
+ if ((ret = sf1_write(adapter, 4, 1, addr)) != 0 ||
+ (ret = sf1_read(adapter, 1, 1, data)) != 0)
+ return ret;
+
+ for (; nwords; nwords--, data++) {
+ ret = sf1_read(adapter, 4, nwords > 1, data);
+ if (ret)
+ return ret;
+ if (byte_oriented)
+ *data = htonl(*data);
+ }
+ return 0;
+}
+
+/**
+ * t3_write_flash - write up to a page of data to the serial flash
+ * @adapter: the adapter
+ * @addr: the start address to write
+ * @n: length of data to write
+ * @data: the data to write
+ *
+ * Writes up to a page of data (256 bytes) to the serial flash starting
+ * at the given address.
+ */
+static int t3_write_flash(struct adapter *adapter, unsigned int addr,
+ unsigned int n, const u8 *data)
+{
+ int ret;
+ u32 buf[64];
+ unsigned int i, c, left, val, offset = addr & 0xff;
+
+ if (addr + n > SF_SIZE || offset + n > 256)
+ return -EINVAL;
+
+ val = swab32(addr) | SF_PROG_PAGE;
+
+ if ((ret = sf1_write(adapter, 1, 0, SF_WR_ENABLE)) != 0 ||
+ (ret = sf1_write(adapter, 4, 1, val)) != 0)
+ return ret;
+
+ for (left = n; left; left -= c) {
+ c = min(left, 4U);
+ for (val = 0, i = 0; i < c; ++i)
+ val = (val << 8) + *data++;
+
+ ret = sf1_write(adapter, c, c != left, val);
+ if (ret)
+ return ret;
+ }
+ if ((ret = flash_wait_op(adapter, 5, 1)) != 0)
+ return ret;
+
+ /* Read the page to verify the write succeeded */
+ ret = t3_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf, 1);
+ if (ret)
+ return ret;
+
+ if (memcmp(data - n, (u8 *) buf + offset, n))
+ return -EIO;
+ return 0;
+}
+
+/**
+ * t3_get_tp_version - read the tp sram version
+ * @adapter: the adapter
+ * @vers: where to place the version
+ *
+ * Reads the protocol sram version from sram.
+ */
+int t3_get_tp_version(struct adapter *adapter, u32 *vers)
+{
+ int ret;
+
+ /* Get version loaded in SRAM */
+ t3_write_reg(adapter, A_TP_EMBED_OP_FIELD0, 0);
+ ret = t3_wait_op_done(adapter, A_TP_EMBED_OP_FIELD0,
+ 1, 1, 5, 1);
+ if (ret)
+ return ret;
+
+ *vers = t3_read_reg(adapter, A_TP_EMBED_OP_FIELD1);
+
+ return 0;
+}
+
+/**
+ * t3_check_tpsram_version - read the tp sram version
+ * @adapter: the adapter
+ *
+ * Reads the protocol sram version from flash.
+ */
+int t3_check_tpsram_version(struct adapter *adapter)
+{
+ int ret;
+ u32 vers;
+ unsigned int major, minor;
+
+ if (adapter->params.rev == T3_REV_A)
+ return 0;
+
+
+ ret = t3_get_tp_version(adapter, &vers);
+ if (ret)
+ return ret;
+
+ major = G_TP_VERSION_MAJOR(vers);
+ minor = G_TP_VERSION_MINOR(vers);
+
+ if (major == TP_VERSION_MAJOR && minor == TP_VERSION_MINOR)
+ return 0;
+ else {
+ CH_ERR(adapter, "found wrong TP version (%u.%u), "
+ "driver compiled for version %d.%d\n", major, minor,
+ TP_VERSION_MAJOR, TP_VERSION_MINOR);
+ }
+ return -EINVAL;
+}
+
+/**
+ * t3_check_tpsram - check if provided protocol SRAM
+ * is compatible with this driver
+ * @adapter: the adapter
+ * @tp_sram: the firmware image to write
+ * @size: image size
+ *
+ * Checks if an adapter's tp sram is compatible with the driver.
+ * Returns 0 if the versions are compatible, a negative error otherwise.
+ */
+int t3_check_tpsram(struct adapter *adapter, const u8 *tp_sram,
+ unsigned int size)
+{
+ u32 csum;
+ unsigned int i;
+ const __be32 *p = (const __be32 *)tp_sram;
+
+ /* Verify checksum */
+ for (csum = 0, i = 0; i < size / sizeof(csum); i++)
+ csum += ntohl(p[i]);
+ if (csum != 0xffffffff) {
+ CH_ERR(adapter, "corrupted protocol SRAM image, checksum %u\n",
+ csum);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+enum fw_version_type {
+ FW_VERSION_N3,
+ FW_VERSION_T3
+};
+
+/**
+ * t3_get_fw_version - read the firmware version
+ * @adapter: the adapter
+ * @vers: where to place the version
+ *
+ * Reads the FW version from flash.
+ */
+int t3_get_fw_version(struct adapter *adapter, u32 *vers)
+{
+ return t3_read_flash(adapter, FW_VERS_ADDR, 1, vers, 0);
+}
+
+/**
+ * t3_check_fw_version - check if the FW is compatible with this driver
+ * @adapter: the adapter
+ *
+ * Checks if an adapter's FW is compatible with the driver. Returns 0
+ * if the versions are compatible, a negative error otherwise.
+ */
+int t3_check_fw_version(struct adapter *adapter)
+{
+ int ret;
+ u32 vers;
+ unsigned int type, major, minor;
+
+ ret = t3_get_fw_version(adapter, &vers);
+ if (ret)
+ return ret;
+
+ type = G_FW_VERSION_TYPE(vers);
+ major = G_FW_VERSION_MAJOR(vers);
+ minor = G_FW_VERSION_MINOR(vers);
+
+ if (type == FW_VERSION_T3 && major == FW_VERSION_MAJOR &&
+ minor == FW_VERSION_MINOR)
+ return 0;
+ else if (major != FW_VERSION_MAJOR || minor < FW_VERSION_MINOR)
+ CH_WARN(adapter, "found old FW minor version(%u.%u), "
+ "driver compiled for version %u.%u\n", major, minor,
+ FW_VERSION_MAJOR, FW_VERSION_MINOR);
+ else {
+ CH_WARN(adapter, "found newer FW version(%u.%u), "
+ "driver compiled for version %u.%u\n", major, minor,
+ FW_VERSION_MAJOR, FW_VERSION_MINOR);
+ return 0;
+ }
+ return -EINVAL;
+}
+
+/**
+ * t3_flash_erase_sectors - erase a range of flash sectors
+ * @adapter: the adapter
+ * @start: the first sector to erase
+ * @end: the last sector to erase
+ *
+ * Erases the sectors in the given range.
+ */
+static int t3_flash_erase_sectors(struct adapter *adapter, int start, int end)
+{
+ while (start <= end) {
+ int ret;
+
+ if ((ret = sf1_write(adapter, 1, 0, SF_WR_ENABLE)) != 0 ||
+ (ret = sf1_write(adapter, 4, 0,
+ SF_ERASE_SECTOR | (start << 8))) != 0 ||
+ (ret = flash_wait_op(adapter, 5, 500)) != 0)
+ return ret;
+ start++;
+ }
+ return 0;
+}
+
+/**
+ * t3_load_fw - download firmware
+ * @adapter: the adapter
+ * @fw_data: the firmware image to write
+ * @size: image size
+ *
+ * Write the supplied firmware image to the card's serial flash.
+ * The FW image has the following sections: @size - 8 bytes of code and
+ * data, followed by 4 bytes of FW version, followed by the 32-bit
+ * 1's complement checksum of the whole image.
+ */
+int t3_load_fw(struct adapter *adapter, const u8 *fw_data, unsigned int size)
+{
+ u32 csum;
+ unsigned int i;
+ const __be32 *p = (const __be32 *)fw_data;
+ int ret, addr, fw_sector = FW_FLASH_BOOT_ADDR >> 16;
+
+ if ((size & 3) || size < FW_MIN_SIZE)
+ return -EINVAL;
+ if (size > FW_VERS_ADDR + 8 - FW_FLASH_BOOT_ADDR)
+ return -EFBIG;
+
+ for (csum = 0, i = 0; i < size / sizeof(csum); i++)
+ csum += ntohl(p[i]);
+ if (csum != 0xffffffff) {
+ CH_ERR(adapter, "corrupted firmware image, checksum %u\n",
+ csum);
+ return -EINVAL;
+ }
+
+ ret = t3_flash_erase_sectors(adapter, fw_sector, fw_sector);
+ if (ret)
+ goto out;
+
+ size -= 8; /* trim off version and checksum */
+ for (addr = FW_FLASH_BOOT_ADDR; size;) {
+ unsigned int chunk_size = min(size, 256U);
+
+ ret = t3_write_flash(adapter, addr, chunk_size, fw_data);
+ if (ret)
+ goto out;
+
+ addr += chunk_size;
+ fw_data += chunk_size;
+ size -= chunk_size;
+ }
+
+ ret = t3_write_flash(adapter, FW_VERS_ADDR, 4, fw_data);
+out:
+ if (ret)
+ CH_ERR(adapter, "firmware download failed, error %d\n", ret);
+ return ret;
+}
+
+#define CIM_CTL_BASE 0x2000
+
+/**
+ * t3_cim_ctl_blk_read - read a block from CIM control region
+ *
+ * @adap: the adapter
+ * @addr: the start address within the CIM control region
+ * @n: number of words to read
+ * @valp: where to store the result
+ *
+ * Reads a block of 4-byte words from the CIM control region.
+ */
+int t3_cim_ctl_blk_read(struct adapter *adap, unsigned int addr,
+ unsigned int n, unsigned int *valp)
+{
+ int ret = 0;
+
+ if (t3_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY)
+ return -EBUSY;
+
+ for ( ; !ret && n--; addr += 4) {
+ t3_write_reg(adap, A_CIM_HOST_ACC_CTRL, CIM_CTL_BASE + addr);
+ ret = t3_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY,
+ 0, 5, 2);
+ if (!ret)
+ *valp++ = t3_read_reg(adap, A_CIM_HOST_ACC_DATA);
+ }
+ return ret;
+}
+
+static void t3_gate_rx_traffic(struct cmac *mac, u32 *rx_cfg,
+ u32 *rx_hash_high, u32 *rx_hash_low)
+{
+ /* stop Rx unicast traffic */
+ t3_mac_disable_exact_filters(mac);
+
+ /* stop broadcast, multicast, promiscuous mode traffic */
+ *rx_cfg = t3_read_reg(mac->adapter, A_XGM_RX_CFG);
+ t3_set_reg_field(mac->adapter, A_XGM_RX_CFG,
+ F_ENHASHMCAST | F_DISBCAST | F_COPYALLFRAMES,
+ F_DISBCAST);
+
+ *rx_hash_high = t3_read_reg(mac->adapter, A_XGM_RX_HASH_HIGH);
+ t3_write_reg(mac->adapter, A_XGM_RX_HASH_HIGH, 0);
+
+ *rx_hash_low = t3_read_reg(mac->adapter, A_XGM_RX_HASH_LOW);
+ t3_write_reg(mac->adapter, A_XGM_RX_HASH_LOW, 0);
+
+ /* Leave time to drain max RX fifo */
+ msleep(1);
+}
+
+static void t3_open_rx_traffic(struct cmac *mac, u32 rx_cfg,
+ u32 rx_hash_high, u32 rx_hash_low)
+{
+ t3_mac_enable_exact_filters(mac);
+ t3_set_reg_field(mac->adapter, A_XGM_RX_CFG,
+ F_ENHASHMCAST | F_DISBCAST | F_COPYALLFRAMES,
+ rx_cfg);
+ t3_write_reg(mac->adapter, A_XGM_RX_HASH_HIGH, rx_hash_high);
+ t3_write_reg(mac->adapter, A_XGM_RX_HASH_LOW, rx_hash_low);
+}
+
+/**
+ * t3_link_changed - handle interface link changes
+ * @adapter: the adapter
+ * @port_id: the port index that changed link state
+ *
+ * Called when a port's link settings change to propagate the new values
+ * to the associated PHY and MAC. After performing the common tasks it
+ * invokes an OS-specific handler.
+ */
+void t3_link_changed(struct adapter *adapter, int port_id)
+{
+ int link_ok, speed, duplex, fc;
+ struct port_info *pi = adap2pinfo(adapter, port_id);
+ struct cphy *phy = &pi->phy;
+ struct cmac *mac = &pi->mac;
+ struct link_config *lc = &pi->link_config;
+
+ phy->ops->get_link_status(phy, &link_ok, &speed, &duplex, &fc);
+
+ if (!lc->link_ok && link_ok) {
+ u32 rx_cfg, rx_hash_high, rx_hash_low;
+ u32 status;
+
+ t3_xgm_intr_enable(adapter, port_id);
+ t3_gate_rx_traffic(mac, &rx_cfg, &rx_hash_high, &rx_hash_low);
+ t3_write_reg(adapter, A_XGM_RX_CTRL + mac->offset, 0);
+ t3_mac_enable(mac, MAC_DIRECTION_RX);
+
+ status = t3_read_reg(adapter, A_XGM_INT_STATUS + mac->offset);
+ if (status & F_LINKFAULTCHANGE) {
+ mac->stats.link_faults++;
+ pi->link_fault = 1;
+ }
+ t3_open_rx_traffic(mac, rx_cfg, rx_hash_high, rx_hash_low);
+ }
+
+ if (lc->requested_fc & PAUSE_AUTONEG)
+ fc &= lc->requested_fc;
+ else
+ fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
+
+ if (link_ok == lc->link_ok && speed == lc->speed &&
+ duplex == lc->duplex && fc == lc->fc)
+ return; /* nothing changed */
+
+ if (link_ok != lc->link_ok && adapter->params.rev > 0 &&
+ uses_xaui(adapter)) {
+ if (link_ok)
+ t3b_pcs_reset(mac);
+ t3_write_reg(adapter, A_XGM_XAUI_ACT_CTRL + mac->offset,
+ link_ok ? F_TXACTENABLE | F_RXEN : 0);
+ }
+ lc->link_ok = link_ok;
+ lc->speed = speed < 0 ? SPEED_INVALID : speed;
+ lc->duplex = duplex < 0 ? DUPLEX_INVALID : duplex;
+
+ if (link_ok && speed >= 0 && lc->autoneg == AUTONEG_ENABLE) {
+ /* Set MAC speed, duplex, and flow control to match PHY. */
+ t3_mac_set_speed_duplex_fc(mac, speed, duplex, fc);
+ lc->fc = fc;
+ }
+
+ t3_os_link_changed(adapter, port_id, link_ok && !pi->link_fault,
+ speed, duplex, fc);
+}
+
+void t3_link_fault(struct adapter *adapter, int port_id)
+{
+ struct port_info *pi = adap2pinfo(adapter, port_id);
+ struct cmac *mac = &pi->mac;
+ struct cphy *phy = &pi->phy;
+ struct link_config *lc = &pi->link_config;
+ int link_ok, speed, duplex, fc, link_fault;
+ u32 rx_cfg, rx_hash_high, rx_hash_low;
+
+ t3_gate_rx_traffic(mac, &rx_cfg, &rx_hash_high, &rx_hash_low);
+
+ if (adapter->params.rev > 0 && uses_xaui(adapter))
+ t3_write_reg(adapter, A_XGM_XAUI_ACT_CTRL + mac->offset, 0);
+
+ t3_write_reg(adapter, A_XGM_RX_CTRL + mac->offset, 0);
+ t3_mac_enable(mac, MAC_DIRECTION_RX);
+
+ t3_open_rx_traffic(mac, rx_cfg, rx_hash_high, rx_hash_low);
+
+ link_fault = t3_read_reg(adapter,
+ A_XGM_INT_STATUS + mac->offset);
+ link_fault &= F_LINKFAULTCHANGE;
+
+ link_ok = lc->link_ok;
+ speed = lc->speed;
+ duplex = lc->duplex;
+ fc = lc->fc;
+
+ phy->ops->get_link_status(phy, &link_ok, &speed, &duplex, &fc);
+
+ if (link_fault) {
+ lc->link_ok = 0;
+ lc->speed = SPEED_INVALID;
+ lc->duplex = DUPLEX_INVALID;
+
+ t3_os_link_fault(adapter, port_id, 0);
+
+ /* Account link faults only when the phy reports a link up */
+ if (link_ok)
+ mac->stats.link_faults++;
+ } else {
+ if (link_ok)
+ t3_write_reg(adapter, A_XGM_XAUI_ACT_CTRL + mac->offset,
+ F_TXACTENABLE | F_RXEN);
+
+ pi->link_fault = 0;
+ lc->link_ok = (unsigned char)link_ok;
+ lc->speed = speed < 0 ? SPEED_INVALID : speed;
+ lc->duplex = duplex < 0 ? DUPLEX_INVALID : duplex;
+ t3_os_link_fault(adapter, port_id, link_ok);
+ }
+}
+
+/**
+ * t3_link_start - apply link configuration to MAC/PHY
+ * @phy: the PHY to setup
+ * @mac: the MAC to setup
+ * @lc: the requested link configuration
+ *
+ * Set up a port's MAC and PHY according to a desired link configuration.
+ * - If the PHY can auto-negotiate first decide what to advertise, then
+ * enable/disable auto-negotiation as desired, and reset.
+ * - If the PHY does not auto-negotiate just reset it.
+ * - If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
+ * otherwise do it later based on the outcome of auto-negotiation.
+ */
+int t3_link_start(struct cphy *phy, struct cmac *mac, struct link_config *lc)
+{
+ unsigned int fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
+
+ lc->link_ok = 0;
+ if (lc->supported & SUPPORTED_Autoneg) {
+ lc->advertising &= ~(ADVERTISED_Asym_Pause | ADVERTISED_Pause);
+ if (fc) {
+ lc->advertising |= ADVERTISED_Asym_Pause;
+ if (fc & PAUSE_RX)
+ lc->advertising |= ADVERTISED_Pause;
+ }
+ phy->ops->advertise(phy, lc->advertising);
+
+ if (lc->autoneg == AUTONEG_DISABLE) {
+ lc->speed = lc->requested_speed;
+ lc->duplex = lc->requested_duplex;
+ lc->fc = (unsigned char)fc;
+ t3_mac_set_speed_duplex_fc(mac, lc->speed, lc->duplex,
+ fc);
+ /* Also disables autoneg */
+ phy->ops->set_speed_duplex(phy, lc->speed, lc->duplex);
+ } else
+ phy->ops->autoneg_enable(phy);
+ } else {
+ t3_mac_set_speed_duplex_fc(mac, -1, -1, fc);
+ lc->fc = (unsigned char)fc;
+ phy->ops->reset(phy, 0);
+ }
+ return 0;
+}
+
+/**
+ * t3_set_vlan_accel - control HW VLAN extraction
+ * @adapter: the adapter
+ * @ports: bitmap of adapter ports to operate on
+ * @on: enable (1) or disable (0) HW VLAN extraction
+ *
+ * Enables or disables HW extraction of VLAN tags for the given port.
+ */
+void t3_set_vlan_accel(struct adapter *adapter, unsigned int ports, int on)
+{
+ t3_set_reg_field(adapter, A_TP_OUT_CONFIG,
+ ports << S_VLANEXTRACTIONENABLE,
+ on ? (ports << S_VLANEXTRACTIONENABLE) : 0);
+}
+
+struct intr_info {
+ unsigned int mask; /* bits to check in interrupt status */
+ const char *msg; /* message to print or NULL */
+ short stat_idx; /* stat counter to increment or -1 */
+ unsigned short fatal; /* whether the condition reported is fatal */
+};
+
+/**
+ * t3_handle_intr_status - table driven interrupt handler
+ * @adapter: the adapter that generated the interrupt
+ * @reg: the interrupt status register to process
+ * @mask: a mask to apply to the interrupt status
+ * @acts: table of interrupt actions
+ * @stats: statistics counters tracking interrupt occurrences
+ *
+ * A table driven interrupt handler that applies a set of masks to an
+ * interrupt status word and performs the corresponding actions if the
+ * interrupts described by the mask have occurred. The actions include
+ * optionally printing a warning or alert message, and optionally
+ * incrementing a stat counter. The table is terminated by an entry
+ * specifying mask 0. Returns the number of fatal interrupt conditions.
+ */
+static int t3_handle_intr_status(struct adapter *adapter, unsigned int reg,
+ unsigned int mask,
+ const struct intr_info *acts,
+ unsigned long *stats)
+{
+ int fatal = 0;
+ unsigned int status = t3_read_reg(adapter, reg) & mask;
+
+ for (; acts->mask; ++acts) {
+ if (!(status & acts->mask))
+ continue;
+ if (acts->fatal) {
+ fatal++;
+ CH_ALERT(adapter, "%s (0x%x)\n",
+ acts->msg, status & acts->mask);
+ status &= ~acts->mask;
+ } else if (acts->msg)
+ CH_WARN(adapter, "%s (0x%x)\n",
+ acts->msg, status & acts->mask);
+ if (acts->stat_idx >= 0)
+ stats[acts->stat_idx]++;
+ }
+ if (status) /* clear processed interrupts */
+ t3_write_reg(adapter, reg, status);
+ return fatal;
+}
+
+#define SGE_INTR_MASK (F_RSPQDISABLED | \
+ F_UC_REQ_FRAMINGERROR | F_R_REQ_FRAMINGERROR | \
+ F_CPPARITYERROR | F_OCPARITYERROR | F_RCPARITYERROR | \
+ F_IRPARITYERROR | V_ITPARITYERROR(M_ITPARITYERROR) | \
+ V_FLPARITYERROR(M_FLPARITYERROR) | F_LODRBPARITYERROR | \
+ F_HIDRBPARITYERROR | F_LORCQPARITYERROR | \
+ F_HIRCQPARITYERROR | F_LOPRIORITYDBFULL | \
+ F_HIPRIORITYDBFULL | F_LOPRIORITYDBEMPTY | \
+ F_HIPRIORITYDBEMPTY | F_HIPIODRBDROPERR | \
+ F_LOPIODRBDROPERR)
+#define MC5_INTR_MASK (F_PARITYERR | F_ACTRGNFULL | F_UNKNOWNCMD | \
+ F_REQQPARERR | F_DISPQPARERR | F_DELACTEMPTY | \
+ F_NFASRCHFAIL)
+#define MC7_INTR_MASK (F_AE | F_UE | F_CE | V_PE(M_PE))
+#define XGM_INTR_MASK (V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR) | \
+ V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR) | \
+ F_TXFIFO_UNDERRUN)
+#define PCIX_INTR_MASK (F_MSTDETPARERR | F_SIGTARABT | F_RCVTARABT | \
+ F_RCVMSTABT | F_SIGSYSERR | F_DETPARERR | \
+ F_SPLCMPDIS | F_UNXSPLCMP | F_RCVSPLCMPERR | \
+ F_DETCORECCERR | F_DETUNCECCERR | F_PIOPARERR | \
+ V_WFPARERR(M_WFPARERR) | V_RFPARERR(M_RFPARERR) | \
+ V_CFPARERR(M_CFPARERR) /* | V_MSIXPARERR(M_MSIXPARERR) */)
+#define PCIE_INTR_MASK (F_UNXSPLCPLERRR | F_UNXSPLCPLERRC | F_PCIE_PIOPARERR |\
+ F_PCIE_WFPARERR | F_PCIE_RFPARERR | F_PCIE_CFPARERR | \
+ /* V_PCIE_MSIXPARERR(M_PCIE_MSIXPARERR) | */ \
+ F_RETRYBUFPARERR | F_RETRYLUTPARERR | F_RXPARERR | \
+ F_TXPARERR | V_BISTERR(M_BISTERR))
+#define ULPRX_INTR_MASK (F_PARERRDATA | F_PARERRPCMD | F_ARBPF1PERR | \
+ F_ARBPF0PERR | F_ARBFPERR | F_PCMDMUXPERR | \
+ F_DATASELFRAMEERR1 | F_DATASELFRAMEERR0)
+#define ULPTX_INTR_MASK 0xfc
+#define CPLSW_INTR_MASK (F_CIM_OP_MAP_PERR | F_TP_FRAMING_ERROR | \
+ F_SGE_FRAMING_ERROR | F_CIM_FRAMING_ERROR | \
+ F_ZERO_SWITCH_ERROR)
+#define CIM_INTR_MASK (F_BLKWRPLINT | F_BLKRDPLINT | F_BLKWRCTLINT | \
+ F_BLKRDCTLINT | F_BLKWRFLASHINT | F_BLKRDFLASHINT | \
+ F_SGLWRFLASHINT | F_WRBLKFLASHINT | F_BLKWRBOOTINT | \
+ F_FLASHRANGEINT | F_SDRAMRANGEINT | F_RSVDSPACEINT | \
+ F_DRAMPARERR | F_ICACHEPARERR | F_DCACHEPARERR | \
+ F_OBQSGEPARERR | F_OBQULPHIPARERR | F_OBQULPLOPARERR | \
+ F_IBQSGELOPARERR | F_IBQSGEHIPARERR | F_IBQULPPARERR | \
+ F_IBQTPPARERR | F_ITAGPARERR | F_DTAGPARERR)
+#define PMTX_INTR_MASK (F_ZERO_C_CMD_ERROR | ICSPI_FRM_ERR | OESPI_FRM_ERR | \
+ V_ICSPI_PAR_ERROR(M_ICSPI_PAR_ERROR) | \
+ V_OESPI_PAR_ERROR(M_OESPI_PAR_ERROR))
+#define PMRX_INTR_MASK (F_ZERO_E_CMD_ERROR | IESPI_FRM_ERR | OCSPI_FRM_ERR | \
+ V_IESPI_PAR_ERROR(M_IESPI_PAR_ERROR) | \
+ V_OCSPI_PAR_ERROR(M_OCSPI_PAR_ERROR))
+#define MPS_INTR_MASK (V_TX0TPPARERRENB(M_TX0TPPARERRENB) | \
+ V_TX1TPPARERRENB(M_TX1TPPARERRENB) | \
+ V_RXTPPARERRENB(M_RXTPPARERRENB) | \
+ V_MCAPARERRENB(M_MCAPARERRENB))
+#define XGM_EXTRA_INTR_MASK (F_LINKFAULTCHANGE)
+#define PL_INTR_MASK (F_T3DBG | F_XGMAC0_0 | F_XGMAC0_1 | F_MC5A | F_PM1_TX | \
+ F_PM1_RX | F_ULP2_TX | F_ULP2_RX | F_TP1 | F_CIM | \
+ F_MC7_CM | F_MC7_PMTX | F_MC7_PMRX | F_SGE3 | F_PCIM0 | \
+ F_MPS0 | F_CPL_SWITCH)
+/*
+ * Interrupt handler for the PCIX1 module.
+ */
+static void pci_intr_handler(struct adapter *adapter)
+{
+ static const struct intr_info pcix1_intr_info[] = {
+ {F_MSTDETPARERR, "PCI master detected parity error", -1, 1},
+ {F_SIGTARABT, "PCI signaled target abort", -1, 1},
+ {F_RCVTARABT, "PCI received target abort", -1, 1},
+ {F_RCVMSTABT, "PCI received master abort", -1, 1},
+ {F_SIGSYSERR, "PCI signaled system error", -1, 1},
+ {F_DETPARERR, "PCI detected parity error", -1, 1},
+ {F_SPLCMPDIS, "PCI split completion discarded", -1, 1},
+ {F_UNXSPLCMP, "PCI unexpected split completion error", -1, 1},
+ {F_RCVSPLCMPERR, "PCI received split completion error", -1,
+ 1},
+ {F_DETCORECCERR, "PCI correctable ECC error",
+ STAT_PCI_CORR_ECC, 0},
+ {F_DETUNCECCERR, "PCI uncorrectable ECC error", -1, 1},
+ {F_PIOPARERR, "PCI PIO FIFO parity error", -1, 1},
+ {V_WFPARERR(M_WFPARERR), "PCI write FIFO parity error", -1,
+ 1},
+ {V_RFPARERR(M_RFPARERR), "PCI read FIFO parity error", -1,
+ 1},
+ {V_CFPARERR(M_CFPARERR), "PCI command FIFO parity error", -1,
+ 1},
+ {V_MSIXPARERR(M_MSIXPARERR), "PCI MSI-X table/PBA parity "
+ "error", -1, 1},
+ {0}
+ };
+
+ if (t3_handle_intr_status(adapter, A_PCIX_INT_CAUSE, PCIX_INTR_MASK,
+ pcix1_intr_info, adapter->irq_stats))
+ t3_fatal_err(adapter);
+}
+
+/*
+ * Interrupt handler for the PCIE module.
+ */
+static void pcie_intr_handler(struct adapter *adapter)
+{
+ static const struct intr_info pcie_intr_info[] = {
+ {F_PEXERR, "PCI PEX error", -1, 1},
+ {F_UNXSPLCPLERRR,
+ "PCI unexpected split completion DMA read error", -1, 1},
+ {F_UNXSPLCPLERRC,
+ "PCI unexpected split completion DMA command error", -1, 1},
+ {F_PCIE_PIOPARERR, "PCI PIO FIFO parity error", -1, 1},
+ {F_PCIE_WFPARERR, "PCI write FIFO parity error", -1, 1},
+ {F_PCIE_RFPARERR, "PCI read FIFO parity error", -1, 1},
+ {F_PCIE_CFPARERR, "PCI command FIFO parity error", -1, 1},
+ {V_PCIE_MSIXPARERR(M_PCIE_MSIXPARERR),
+ "PCI MSI-X table/PBA parity error", -1, 1},
+ {F_RETRYBUFPARERR, "PCI retry buffer parity error", -1, 1},
+ {F_RETRYLUTPARERR, "PCI retry LUT parity error", -1, 1},
+ {F_RXPARERR, "PCI Rx parity error", -1, 1},
+ {F_TXPARERR, "PCI Tx parity error", -1, 1},
+ {V_BISTERR(M_BISTERR), "PCI BIST error", -1, 1},
+ {0}
+ };
+
+ if (t3_read_reg(adapter, A_PCIE_INT_CAUSE) & F_PEXERR)
+ CH_ALERT(adapter, "PEX error code 0x%x\n",
+ t3_read_reg(adapter, A_PCIE_PEX_ERR));
+
+ if (t3_handle_intr_status(adapter, A_PCIE_INT_CAUSE, PCIE_INTR_MASK,
+ pcie_intr_info, adapter->irq_stats))
+ t3_fatal_err(adapter);
+}
+
+/*
+ * TP interrupt handler.
+ */
+static void tp_intr_handler(struct adapter *adapter)
+{
+ static const struct intr_info tp_intr_info[] = {
+ {0xffffff, "TP parity error", -1, 1},
+ {0x1000000, "TP out of Rx pages", -1, 1},
+ {0x2000000, "TP out of Tx pages", -1, 1},
+ {0}
+ };
+
+ static const struct intr_info tp_intr_info_t3c[] = {
+ {0x1fffffff, "TP parity error", -1, 1},
+ {F_FLMRXFLSTEMPTY, "TP out of Rx pages", -1, 1},
+ {F_FLMTXFLSTEMPTY, "TP out of Tx pages", -1, 1},
+ {0}
+ };
+
+ if (t3_handle_intr_status(adapter, A_TP_INT_CAUSE, 0xffffffff,
+ adapter->params.rev < T3_REV_C ?
+ tp_intr_info : tp_intr_info_t3c, NULL))
+ t3_fatal_err(adapter);
+}
+
+/*
+ * CIM interrupt handler.
+ */
+static void cim_intr_handler(struct adapter *adapter)
+{
+ static const struct intr_info cim_intr_info[] = {
+ {F_RSVDSPACEINT, "CIM reserved space write", -1, 1},
+ {F_SDRAMRANGEINT, "CIM SDRAM address out of range", -1, 1},
+ {F_FLASHRANGEINT, "CIM flash address out of range", -1, 1},
+ {F_BLKWRBOOTINT, "CIM block write to boot space", -1, 1},
+ {F_WRBLKFLASHINT, "CIM write to cached flash space", -1, 1},
+ {F_SGLWRFLASHINT, "CIM single write to flash space", -1, 1},
+ {F_BLKRDFLASHINT, "CIM block read from flash space", -1, 1},
+ {F_BLKWRFLASHINT, "CIM block write to flash space", -1, 1},
+ {F_BLKRDCTLINT, "CIM block read from CTL space", -1, 1},
+ {F_BLKWRCTLINT, "CIM block write to CTL space", -1, 1},
+ {F_BLKRDPLINT, "CIM block read from PL space", -1, 1},
+ {F_BLKWRPLINT, "CIM block write to PL space", -1, 1},
+ {F_DRAMPARERR, "CIM DRAM parity error", -1, 1},
+ {F_ICACHEPARERR, "CIM icache parity error", -1, 1},
+ {F_DCACHEPARERR, "CIM dcache parity error", -1, 1},
+ {F_OBQSGEPARERR, "CIM OBQ SGE parity error", -1, 1},
+ {F_OBQULPHIPARERR, "CIM OBQ ULPHI parity error", -1, 1},
+ {F_OBQULPLOPARERR, "CIM OBQ ULPLO parity error", -1, 1},
+ {F_IBQSGELOPARERR, "CIM IBQ SGELO parity error", -1, 1},
+ {F_IBQSGEHIPARERR, "CIM IBQ SGEHI parity error", -1, 1},
+ {F_IBQULPPARERR, "CIM IBQ ULP parity error", -1, 1},
+ {F_IBQTPPARERR, "CIM IBQ TP parity error", -1, 1},
+ {F_ITAGPARERR, "CIM itag parity error", -1, 1},
+ {F_DTAGPARERR, "CIM dtag parity error", -1, 1},
+ {0}
+ };
+
+ if (t3_handle_intr_status(adapter, A_CIM_HOST_INT_CAUSE, 0xffffffff,
+ cim_intr_info, NULL))
+ t3_fatal_err(adapter);
+}
+
+/*
+ * ULP RX interrupt handler.
+ */
+static void ulprx_intr_handler(struct adapter *adapter)
+{
+ static const struct intr_info ulprx_intr_info[] = {
+ {F_PARERRDATA, "ULP RX data parity error", -1, 1},
+ {F_PARERRPCMD, "ULP RX command parity error", -1, 1},
+ {F_ARBPF1PERR, "ULP RX ArbPF1 parity error", -1, 1},
+ {F_ARBPF0PERR, "ULP RX ArbPF0 parity error", -1, 1},
+ {F_ARBFPERR, "ULP RX ArbF parity error", -1, 1},
+ {F_PCMDMUXPERR, "ULP RX PCMDMUX parity error", -1, 1},
+ {F_DATASELFRAMEERR1, "ULP RX frame error", -1, 1},
+ {F_DATASELFRAMEERR0, "ULP RX frame error", -1, 1},
+ {0}
+ };
+
+ if (t3_handle_intr_status(adapter, A_ULPRX_INT_CAUSE, 0xffffffff,
+ ulprx_intr_info, NULL))
+ t3_fatal_err(adapter);
+}
+
+/*
+ * ULP TX interrupt handler.
+ */
+static void ulptx_intr_handler(struct adapter *adapter)
+{
+ static const struct intr_info ulptx_intr_info[] = {
+ {F_PBL_BOUND_ERR_CH0, "ULP TX channel 0 PBL out of bounds",
+ STAT_ULP_CH0_PBL_OOB, 0},
+ {F_PBL_BOUND_ERR_CH1, "ULP TX channel 1 PBL out of bounds",
+ STAT_ULP_CH1_PBL_OOB, 0},
+ {0xfc, "ULP TX parity error", -1, 1},
+ {0}
+ };
+
+ if (t3_handle_intr_status(adapter, A_ULPTX_INT_CAUSE, 0xffffffff,
+ ulptx_intr_info, adapter->irq_stats))
+ t3_fatal_err(adapter);
+}
+
+#define ICSPI_FRM_ERR (F_ICSPI0_FIFO2X_RX_FRAMING_ERROR | \
+ F_ICSPI1_FIFO2X_RX_FRAMING_ERROR | F_ICSPI0_RX_FRAMING_ERROR | \
+ F_ICSPI1_RX_FRAMING_ERROR | F_ICSPI0_TX_FRAMING_ERROR | \
+ F_ICSPI1_TX_FRAMING_ERROR)
+#define OESPI_FRM_ERR (F_OESPI0_RX_FRAMING_ERROR | \
+ F_OESPI1_RX_FRAMING_ERROR | F_OESPI0_TX_FRAMING_ERROR | \
+ F_OESPI1_TX_FRAMING_ERROR | F_OESPI0_OFIFO2X_TX_FRAMING_ERROR | \
+ F_OESPI1_OFIFO2X_TX_FRAMING_ERROR)
+
+/*
+ * PM TX interrupt handler.
+ */
+static void pmtx_intr_handler(struct adapter *adapter)
+{
+ static const struct intr_info pmtx_intr_info[] = {
+ {F_ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1},
+ {ICSPI_FRM_ERR, "PMTX ispi framing error", -1, 1},
+ {OESPI_FRM_ERR, "PMTX ospi framing error", -1, 1},
+ {V_ICSPI_PAR_ERROR(M_ICSPI_PAR_ERROR),
+ "PMTX ispi parity error", -1, 1},
+ {V_OESPI_PAR_ERROR(M_OESPI_PAR_ERROR),
+ "PMTX ospi parity error", -1, 1},
+ {0}
+ };
+
+ if (t3_handle_intr_status(adapter, A_PM1_TX_INT_CAUSE, 0xffffffff,
+ pmtx_intr_info, NULL))
+ t3_fatal_err(adapter);
+}
+
+#define IESPI_FRM_ERR (F_IESPI0_FIFO2X_RX_FRAMING_ERROR | \
+ F_IESPI1_FIFO2X_RX_FRAMING_ERROR | F_IESPI0_RX_FRAMING_ERROR | \
+ F_IESPI1_RX_FRAMING_ERROR | F_IESPI0_TX_FRAMING_ERROR | \
+ F_IESPI1_TX_FRAMING_ERROR)
+#define OCSPI_FRM_ERR (F_OCSPI0_RX_FRAMING_ERROR | \
+ F_OCSPI1_RX_FRAMING_ERROR | F_OCSPI0_TX_FRAMING_ERROR | \
+ F_OCSPI1_TX_FRAMING_ERROR | F_OCSPI0_OFIFO2X_TX_FRAMING_ERROR | \
+ F_OCSPI1_OFIFO2X_TX_FRAMING_ERROR)
+
+/*
+ * PM RX interrupt handler.
+ */
+static void pmrx_intr_handler(struct adapter *adapter)
+{
+ static const struct intr_info pmrx_intr_info[] = {
+ {F_ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1},
+ {IESPI_FRM_ERR, "PMRX ispi framing error", -1, 1},
+ {OCSPI_FRM_ERR, "PMRX ospi framing error", -1, 1},
+ {V_IESPI_PAR_ERROR(M_IESPI_PAR_ERROR),
+ "PMRX ispi parity error", -1, 1},
+ {V_OCSPI_PAR_ERROR(M_OCSPI_PAR_ERROR),
+ "PMRX ospi parity error", -1, 1},
+ {0}
+ };
+
+ if (t3_handle_intr_status(adapter, A_PM1_RX_INT_CAUSE, 0xffffffff,
+ pmrx_intr_info, NULL))
+ t3_fatal_err(adapter);
+}
+
+/*
+ * CPL switch interrupt handler.
+ */
+static void cplsw_intr_handler(struct adapter *adapter)
+{
+ static const struct intr_info cplsw_intr_info[] = {
+ {F_CIM_OP_MAP_PERR, "CPL switch CIM parity error", -1, 1},
+ {F_CIM_OVFL_ERROR, "CPL switch CIM overflow", -1, 1},
+ {F_TP_FRAMING_ERROR, "CPL switch TP framing error", -1, 1},
+ {F_SGE_FRAMING_ERROR, "CPL switch SGE framing error", -1, 1},
+ {F_CIM_FRAMING_ERROR, "CPL switch CIM framing error", -1, 1},
+ {F_ZERO_SWITCH_ERROR, "CPL switch no-switch error", -1, 1},
+ {0}
+ };
+
+ if (t3_handle_intr_status(adapter, A_CPL_INTR_CAUSE, 0xffffffff,
+ cplsw_intr_info, NULL))
+ t3_fatal_err(adapter);
+}
+
+/*
+ * MPS interrupt handler.
+ */
+static void mps_intr_handler(struct adapter *adapter)
+{
+ static const struct intr_info mps_intr_info[] = {
+ {0x1ff, "MPS parity error", -1, 1},
+ {0}
+ };
+
+ if (t3_handle_intr_status(adapter, A_MPS_INT_CAUSE, 0xffffffff,
+ mps_intr_info, NULL))
+ t3_fatal_err(adapter);
+}
+
+#define MC7_INTR_FATAL (F_UE | V_PE(M_PE) | F_AE)
+
+/*
+ * MC7 interrupt handler.
+ */
+static void mc7_intr_handler(struct mc7 *mc7)
+{
+ struct adapter *adapter = mc7->adapter;
+ u32 cause = t3_read_reg(adapter, mc7->offset + A_MC7_INT_CAUSE);
+
+ if (cause & F_CE) {
+ mc7->stats.corr_err++;
+ CH_WARN(adapter, "%s MC7 correctable error at addr 0x%x, "
+ "data 0x%x 0x%x 0x%x\n", mc7->name,
+ t3_read_reg(adapter, mc7->offset + A_MC7_CE_ADDR),
+ t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA0),
+ t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA1),
+ t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA2));
+ }
+
+ if (cause & F_UE) {
+ mc7->stats.uncorr_err++;
+ CH_ALERT(adapter, "%s MC7 uncorrectable error at addr 0x%x, "
+ "data 0x%x 0x%x 0x%x\n", mc7->name,
+ t3_read_reg(adapter, mc7->offset + A_MC7_UE_ADDR),
+ t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA0),
+ t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA1),
+ t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA2));
+ }
+
+ if (G_PE(cause)) {
+ mc7->stats.parity_err++;
+ CH_ALERT(adapter, "%s MC7 parity error 0x%x\n",
+ mc7->name, G_PE(cause));
+ }
+
+ if (cause & F_AE) {
+ u32 addr = 0;
+
+ if (adapter->params.rev > 0)
+ addr = t3_read_reg(adapter,
+ mc7->offset + A_MC7_ERR_ADDR);
+ mc7->stats.addr_err++;
+ CH_ALERT(adapter, "%s MC7 address error: 0x%x\n",
+ mc7->name, addr);
+ }
+
+ if (cause & MC7_INTR_FATAL)
+ t3_fatal_err(adapter);
+
+ t3_write_reg(adapter, mc7->offset + A_MC7_INT_CAUSE, cause);
+}
+
+#define XGM_INTR_FATAL (V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR) | \
+ V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR))
+/*
+ * XGMAC interrupt handler.
+ */
+static int mac_intr_handler(struct adapter *adap, unsigned int idx)
+{
+ struct cmac *mac = &adap2pinfo(adap, idx)->mac;
+ /*
+ * We mask out interrupt causes for which we're not taking interrupts.
+ * This allows us to use polling logic to monitor some of the other
+ * conditions when taking interrupts would impose too much load on the
+ * system.
+ */
+ u32 cause = t3_read_reg(adap, A_XGM_INT_CAUSE + mac->offset) &
+ ~F_RXFIFO_OVERFLOW;
+
+ if (cause & V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR)) {
+ mac->stats.tx_fifo_parity_err++;
+ CH_ALERT(adap, "port%d: MAC TX FIFO parity error\n", idx);
+ }
+ if (cause & V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR)) {
+ mac->stats.rx_fifo_parity_err++;
+ CH_ALERT(adap, "port%d: MAC RX FIFO parity error\n", idx);
+ }
+ if (cause & F_TXFIFO_UNDERRUN)
+ mac->stats.tx_fifo_urun++;
+ if (cause & F_RXFIFO_OVERFLOW)
+ mac->stats.rx_fifo_ovfl++;
+ if (cause & V_SERDES_LOS(M_SERDES_LOS))
+ mac->stats.serdes_signal_loss++;
+ if (cause & F_XAUIPCSCTCERR)
+ mac->stats.xaui_pcs_ctc_err++;
+ if (cause & F_XAUIPCSALIGNCHANGE)
+ mac->stats.xaui_pcs_align_change++;
+ if (cause & F_XGM_INT) {
+ t3_set_reg_field(adap,
+ A_XGM_INT_ENABLE + mac->offset,
+ F_XGM_INT, 0);
+ mac->stats.link_faults++;
+
+ t3_os_link_fault_handler(adap, idx);
+ }
+
+ if (cause & XGM_INTR_FATAL)
+ t3_fatal_err(adap);
+
+ t3_write_reg(adap, A_XGM_INT_CAUSE + mac->offset, cause);
+ return cause != 0;
+}
+
+/*
+ * Interrupt handler for PHY events.
+ */
+int t3_phy_intr_handler(struct adapter *adapter)
+{
+ u32 i, cause = t3_read_reg(adapter, A_T3DBG_INT_CAUSE);
+
+ for_each_port(adapter, i) {
+ struct port_info *p = adap2pinfo(adapter, i);
+
+ if (!(p->phy.caps & SUPPORTED_IRQ))
+ continue;
+
+ if (cause & (1 << adapter_info(adapter)->gpio_intr[i])) {
+ int phy_cause = p->phy.ops->intr_handler(&p->phy);
+
+ if (phy_cause & cphy_cause_link_change)
+ t3_link_changed(adapter, i);
+ if (phy_cause & cphy_cause_fifo_error)
+ p->phy.fifo_errors++;
+ if (phy_cause & cphy_cause_module_change)
+ t3_os_phymod_changed(adapter, i);
+ }
+ }
+
+ t3_write_reg(adapter, A_T3DBG_INT_CAUSE, cause);
+ return 0;
+}
+
+/*
+ * T3 slow path (non-data) interrupt handler.
+ */
+int t3_slow_intr_handler(struct adapter *adapter)
+{
+ u32 cause = t3_read_reg(adapter, A_PL_INT_CAUSE0);
+
+ cause &= adapter->slow_intr_mask;
+ if (!cause)
+ return 0;
+ if (cause & F_PCIM0) {
+ if (is_pcie(adapter))
+ pcie_intr_handler(adapter);
+ else
+ pci_intr_handler(adapter);
+ }
+ if (cause & F_SGE3)
+ t3_sge_err_intr_handler(adapter);
+ if (cause & F_MC7_PMRX)
+ mc7_intr_handler(&adapter->pmrx);
+ if (cause & F_MC7_PMTX)
+ mc7_intr_handler(&adapter->pmtx);
+ if (cause & F_MC7_CM)
+ mc7_intr_handler(&adapter->cm);
+ if (cause & F_CIM)
+ cim_intr_handler(adapter);
+ if (cause & F_TP1)
+ tp_intr_handler(adapter);
+ if (cause & F_ULP2_RX)
+ ulprx_intr_handler(adapter);
+ if (cause & F_ULP2_TX)
+ ulptx_intr_handler(adapter);
+ if (cause & F_PM1_RX)
+ pmrx_intr_handler(adapter);
+ if (cause & F_PM1_TX)
+ pmtx_intr_handler(adapter);
+ if (cause & F_CPL_SWITCH)
+ cplsw_intr_handler(adapter);
+ if (cause & F_MPS0)
+ mps_intr_handler(adapter);
+ if (cause & F_MC5A)
+ t3_mc5_intr_handler(&adapter->mc5);
+ if (cause & F_XGMAC0_0)
+ mac_intr_handler(adapter, 0);
+ if (cause & F_XGMAC0_1)
+ mac_intr_handler(adapter, 1);
+ if (cause & F_T3DBG)
+ t3_os_ext_intr_handler(adapter);
+
+ /* Clear the interrupts just processed. */
+ t3_write_reg(adapter, A_PL_INT_CAUSE0, cause);
+ t3_read_reg(adapter, A_PL_INT_CAUSE0); /* flush */
+ return 1;
+}
+
+static unsigned int calc_gpio_intr(struct adapter *adap)
+{
+ unsigned int i, gpi_intr = 0;
+
+ for_each_port(adap, i)
+ if ((adap2pinfo(adap, i)->phy.caps & SUPPORTED_IRQ) &&
+ adapter_info(adap)->gpio_intr[i])
+ gpi_intr |= 1 << adapter_info(adap)->gpio_intr[i];
+ return gpi_intr;
+}
+
+/**
+ * t3_intr_enable - enable interrupts
+ * @adapter: the adapter whose interrupts should be enabled
+ *
+ * Enable interrupts by setting the interrupt enable registers of the
+ * various HW modules and then enabling the top-level interrupt
+ * concentrator.
+ */
+void t3_intr_enable(struct adapter *adapter)
+{
+ static const struct addr_val_pair intr_en_avp[] = {
+ {A_SG_INT_ENABLE, SGE_INTR_MASK},
+ {A_MC7_INT_ENABLE, MC7_INTR_MASK},
+ {A_MC7_INT_ENABLE - MC7_PMRX_BASE_ADDR + MC7_PMTX_BASE_ADDR,
+ MC7_INTR_MASK},
+ {A_MC7_INT_ENABLE - MC7_PMRX_BASE_ADDR + MC7_CM_BASE_ADDR,
+ MC7_INTR_MASK},
+ {A_MC5_DB_INT_ENABLE, MC5_INTR_MASK},
+ {A_ULPRX_INT_ENABLE, ULPRX_INTR_MASK},
+ {A_PM1_TX_INT_ENABLE, PMTX_INTR_MASK},
+ {A_PM1_RX_INT_ENABLE, PMRX_INTR_MASK},
+ {A_CIM_HOST_INT_ENABLE, CIM_INTR_MASK},
+ {A_MPS_INT_ENABLE, MPS_INTR_MASK},
+ };
+
+ adapter->slow_intr_mask = PL_INTR_MASK;
+
+ t3_write_regs(adapter, intr_en_avp, ARRAY_SIZE(intr_en_avp), 0);
+ t3_write_reg(adapter, A_TP_INT_ENABLE,
+ adapter->params.rev >= T3_REV_C ? 0x2bfffff : 0x3bfffff);
+
+ if (adapter->params.rev > 0) {
+ t3_write_reg(adapter, A_CPL_INTR_ENABLE,
+ CPLSW_INTR_MASK | F_CIM_OVFL_ERROR);
+ t3_write_reg(adapter, A_ULPTX_INT_ENABLE,
+ ULPTX_INTR_MASK | F_PBL_BOUND_ERR_CH0 |
+ F_PBL_BOUND_ERR_CH1);
+ } else {
+ t3_write_reg(adapter, A_CPL_INTR_ENABLE, CPLSW_INTR_MASK);
+ t3_write_reg(adapter, A_ULPTX_INT_ENABLE, ULPTX_INTR_MASK);
+ }
+
+ t3_write_reg(adapter, A_T3DBG_INT_ENABLE, calc_gpio_intr(adapter));
+
+ if (is_pcie(adapter))
+ t3_write_reg(adapter, A_PCIE_INT_ENABLE, PCIE_INTR_MASK);
+ else
+ t3_write_reg(adapter, A_PCIX_INT_ENABLE, PCIX_INTR_MASK);
+ t3_write_reg(adapter, A_PL_INT_ENABLE0, adapter->slow_intr_mask);
+ t3_read_reg(adapter, A_PL_INT_ENABLE0); /* flush */
+}
+
+/**
+ * t3_intr_disable - disable a card's interrupts
+ * @adapter: the adapter whose interrupts should be disabled
+ *
+ * Disable interrupts. We only disable the top-level interrupt
+ * concentrator and the SGE data interrupts.
+ */
+void t3_intr_disable(struct adapter *adapter)
+{
+ t3_write_reg(adapter, A_PL_INT_ENABLE0, 0);
+ t3_read_reg(adapter, A_PL_INT_ENABLE0); /* flush */
+ adapter->slow_intr_mask = 0;
+}
+
+/**
+ * t3_intr_clear - clear all interrupts
+ * @adapter: the adapter whose interrupts should be cleared
+ *
+ * Clears all interrupts.
+ */
+void t3_intr_clear(struct adapter *adapter)
+{
+ static const unsigned int cause_reg_addr[] = {
+ A_SG_INT_CAUSE,
+ A_SG_RSPQ_FL_STATUS,
+ A_PCIX_INT_CAUSE,
+ A_MC7_INT_CAUSE,
+ A_MC7_INT_CAUSE - MC7_PMRX_BASE_ADDR + MC7_PMTX_BASE_ADDR,
+ A_MC7_INT_CAUSE - MC7_PMRX_BASE_ADDR + MC7_CM_BASE_ADDR,
+ A_CIM_HOST_INT_CAUSE,
+ A_TP_INT_CAUSE,
+ A_MC5_DB_INT_CAUSE,
+ A_ULPRX_INT_CAUSE,
+ A_ULPTX_INT_CAUSE,
+ A_CPL_INTR_CAUSE,
+ A_PM1_TX_INT_CAUSE,
+ A_PM1_RX_INT_CAUSE,
+ A_MPS_INT_CAUSE,
+ A_T3DBG_INT_CAUSE,
+ };
+ unsigned int i;
+
+ /* Clear PHY and MAC interrupts for each port. */
+ for_each_port(adapter, i)
+ t3_port_intr_clear(adapter, i);
+
+ for (i = 0; i < ARRAY_SIZE(cause_reg_addr); ++i)
+ t3_write_reg(adapter, cause_reg_addr[i], 0xffffffff);
+
+ if (is_pcie(adapter))
+ t3_write_reg(adapter, A_PCIE_PEX_ERR, 0xffffffff);
+ t3_write_reg(adapter, A_PL_INT_CAUSE0, 0xffffffff);
+ t3_read_reg(adapter, A_PL_INT_CAUSE0); /* flush */
+}
+
+void t3_xgm_intr_enable(struct adapter *adapter, int idx)
+{
+ struct port_info *pi = adap2pinfo(adapter, idx);
+
+ t3_write_reg(adapter, A_XGM_XGM_INT_ENABLE + pi->mac.offset,
+ XGM_EXTRA_INTR_MASK);
+}
+
+void t3_xgm_intr_disable(struct adapter *adapter, int idx)
+{
+ struct port_info *pi = adap2pinfo(adapter, idx);
+
+ t3_write_reg(adapter, A_XGM_XGM_INT_DISABLE + pi->mac.offset,
+ 0x7ff);
+}
+
+/**
+ * t3_port_intr_enable - enable port-specific interrupts
+ * @adapter: associated adapter
+ * @idx: index of port whose interrupts should be enabled
+ *
+ * Enable port-specific (i.e., MAC and PHY) interrupts for the given
+ * adapter port.
+ */
+void t3_port_intr_enable(struct adapter *adapter, int idx)
+{
+ struct cphy *phy = &adap2pinfo(adapter, idx)->phy;
+
+ t3_write_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx), XGM_INTR_MASK);
+ t3_read_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx)); /* flush */
+ phy->ops->intr_enable(phy);
+}
+
+/**
+ * t3_port_intr_disable - disable port-specific interrupts
+ * @adapter: associated adapter
+ * @idx: index of port whose interrupts should be disabled
+ *
+ * Disable port-specific (i.e., MAC and PHY) interrupts for the given
+ * adapter port.
+ */
+void t3_port_intr_disable(struct adapter *adapter, int idx)
+{
+ struct cphy *phy = &adap2pinfo(adapter, idx)->phy;
+
+ t3_write_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx), 0);
+ t3_read_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx)); /* flush */
+ phy->ops->intr_disable(phy);
+}
+
+/**
+ * t3_port_intr_clear - clear port-specific interrupts
+ * @adapter: associated adapter
+ * @idx: index of port whose interrupts to clear
+ *
+ * Clear port-specific (i.e., MAC and PHY) interrupts for the given
+ * adapter port.
+ */
+static void t3_port_intr_clear(struct adapter *adapter, int idx)
+{
+ struct cphy *phy = &adap2pinfo(adapter, idx)->phy;
+
+ t3_write_reg(adapter, XGM_REG(A_XGM_INT_CAUSE, idx), 0xffffffff);
+ t3_read_reg(adapter, XGM_REG(A_XGM_INT_CAUSE, idx)); /* flush */
+ phy->ops->intr_clear(phy);
+}
+
+#define SG_CONTEXT_CMD_ATTEMPTS 100
+
+/**
+ * t3_sge_write_context - write an SGE context
+ * @adapter: the adapter
+ * @id: the context id
+ * @type: the context type
+ *
+ * Program an SGE context with the values already loaded in the
+ * CONTEXT_DATA? registers.
+ */
+static int t3_sge_write_context(struct adapter *adapter, unsigned int id,
+ unsigned int type)
+{
+ if (type == F_RESPONSEQ) {
+ /*
+ * Can't write the Response Queue Context bits for
+ * Interrupt Armed or the Reserve bits after the chip
+ * has been initialized out of reset. Writing to these
+ * bits can confuse the hardware.
+ */
+ t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0xffffffff);
+ t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0xffffffff);
+ t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0x17ffffff);
+ t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0xffffffff);
+ } else {
+ t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0xffffffff);
+ t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0xffffffff);
+ t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0xffffffff);
+ t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0xffffffff);
+ }
+ t3_write_reg(adapter, A_SG_CONTEXT_CMD,
+ V_CONTEXT_CMD_OPCODE(1) | type | V_CONTEXT(id));
+ return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
+ 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
+}
+
+/**
+ * clear_sge_ctxt - completely clear an SGE context
+ * @adap: the adapter
+ * @id: the context id
+ * @type: the context type
+ *
+ * Completely clear an SGE context. Used predominantly at post-reset
+ * initialization. Note in particular that we don't skip writing to any
+ * "sensitive bits" in the contexts the way that t3_sge_write_context()
+ * does ...
+ */
+static int clear_sge_ctxt(struct adapter *adap, unsigned int id,
+ unsigned int type)
+{
+ t3_write_reg(adap, A_SG_CONTEXT_DATA0, 0);
+ t3_write_reg(adap, A_SG_CONTEXT_DATA1, 0);
+ t3_write_reg(adap, A_SG_CONTEXT_DATA2, 0);
+ t3_write_reg(adap, A_SG_CONTEXT_DATA3, 0);
+ t3_write_reg(adap, A_SG_CONTEXT_MASK0, 0xffffffff);
+ t3_write_reg(adap, A_SG_CONTEXT_MASK1, 0xffffffff);
+ t3_write_reg(adap, A_SG_CONTEXT_MASK2, 0xffffffff);
+ t3_write_reg(adap, A_SG_CONTEXT_MASK3, 0xffffffff);
+ t3_write_reg(adap, A_SG_CONTEXT_CMD,
+ V_CONTEXT_CMD_OPCODE(1) | type | V_CONTEXT(id));
+ return t3_wait_op_done(adap, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
+ 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
+}
+
+/**
+ * t3_sge_init_ecntxt - initialize an SGE egress context
+ * @adapter: the adapter to configure
+ * @id: the context id
+ * @gts_enable: whether to enable GTS for the context
+ * @type: the egress context type
+ * @respq: associated response queue
+ * @base_addr: base address of queue
+ * @size: number of queue entries
+ * @token: uP token
+ * @gen: initial generation value for the context
+ * @cidx: consumer pointer
+ *
+ * Initialize an SGE egress context and make it ready for use. If the
+ * platform allows concurrent context operations, the caller is
+ * responsible for appropriate locking.
+ */
+int t3_sge_init_ecntxt(struct adapter *adapter, unsigned int id, int gts_enable,
+ enum sge_context_type type, int respq, u64 base_addr,
+ unsigned int size, unsigned int token, int gen,
+ unsigned int cidx)
+{
+ unsigned int credits = type == SGE_CNTXT_OFLD ? 0 : FW_WR_NUM;
+
+ if (base_addr & 0xfff) /* must be 4K aligned */
+ return -EINVAL;
+ if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
+ return -EBUSY;
+
+ base_addr >>= 12;
+ t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_EC_INDEX(cidx) |
+ V_EC_CREDITS(credits) | V_EC_GTS(gts_enable));
+ t3_write_reg(adapter, A_SG_CONTEXT_DATA1, V_EC_SIZE(size) |
+ V_EC_BASE_LO(base_addr & 0xffff));
+ base_addr >>= 16;
+ t3_write_reg(adapter, A_SG_CONTEXT_DATA2, base_addr);
+ base_addr >>= 32;
+ t3_write_reg(adapter, A_SG_CONTEXT_DATA3,
+ V_EC_BASE_HI(base_addr & 0xf) | V_EC_RESPQ(respq) |
+ V_EC_TYPE(type) | V_EC_GEN(gen) | V_EC_UP_TOKEN(token) |
+ F_EC_VALID);
+ return t3_sge_write_context(adapter, id, F_EGRESS);
+}
+
+/**
+ * t3_sge_init_flcntxt - initialize an SGE free-buffer list context
+ * @adapter: the adapter to configure
+ * @id: the context id
+ * @gts_enable: whether to enable GTS for the context
+ * @base_addr: base address of queue
+ * @size: number of queue entries
+ * @bsize: size of each buffer for this queue
+ * @cong_thres: threshold to signal congestion to upstream producers
+ * @gen: initial generation value for the context
+ * @cidx: consumer pointer
+ *
+ * Initialize an SGE free list context and make it ready for use. The
+ * caller is responsible for ensuring only one context operation occurs
+ * at a time.
+ */
+int t3_sge_init_flcntxt(struct adapter *adapter, unsigned int id,
+ int gts_enable, u64 base_addr, unsigned int size,
+ unsigned int bsize, unsigned int cong_thres, int gen,
+ unsigned int cidx)
+{
+ if (base_addr & 0xfff) /* must be 4K aligned */
+ return -EINVAL;
+ if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
+ return -EBUSY;
+
+ base_addr >>= 12;
+ t3_write_reg(adapter, A_SG_CONTEXT_DATA0, base_addr);
+ base_addr >>= 32;
+ t3_write_reg(adapter, A_SG_CONTEXT_DATA1,
+ V_FL_BASE_HI((u32) base_addr) |
+ V_FL_INDEX_LO(cidx & M_FL_INDEX_LO));
+ t3_write_reg(adapter, A_SG_CONTEXT_DATA2, V_FL_SIZE(size) |
+ V_FL_GEN(gen) | V_FL_INDEX_HI(cidx >> 12) |
+ V_FL_ENTRY_SIZE_LO(bsize & M_FL_ENTRY_SIZE_LO));
+ t3_write_reg(adapter, A_SG_CONTEXT_DATA3,
+ V_FL_ENTRY_SIZE_HI(bsize >> (32 - S_FL_ENTRY_SIZE_LO)) |
+ V_FL_CONG_THRES(cong_thres) | V_FL_GTS(gts_enable));
+ return t3_sge_write_context(adapter, id, F_FREELIST);
+}
+
+/**
+ * t3_sge_init_rspcntxt - initialize an SGE response queue context
+ * @adapter: the adapter to configure
+ * @id: the context id
+ * @irq_vec_idx: MSI-X interrupt vector index, 0 if no MSI-X, -1 if no IRQ
+ * @base_addr: base address of queue
+ * @size: number of queue entries
+ * @fl_thres: threshold for selecting the normal or jumbo free list
+ * @gen: initial generation value for the context
+ * @cidx: consumer pointer
+ *
+ * Initialize an SGE response queue context and make it ready for use.
+ * The caller is responsible for ensuring only one context operation
+ * occurs at a time.
+ */
+int t3_sge_init_rspcntxt(struct adapter *adapter, unsigned int id,
+ int irq_vec_idx, u64 base_addr, unsigned int size,
+ unsigned int fl_thres, int gen, unsigned int cidx)
+{
+ unsigned int intr = 0;
+
+ if (base_addr & 0xfff) /* must be 4K aligned */
+ return -EINVAL;
+ if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
+ return -EBUSY;
+
+ base_addr >>= 12;
+ t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_CQ_SIZE(size) |
+ V_CQ_INDEX(cidx));
+ t3_write_reg(adapter, A_SG_CONTEXT_DATA1, base_addr);
+ base_addr >>= 32;
+ if (irq_vec_idx >= 0)
+ intr = V_RQ_MSI_VEC(irq_vec_idx) | F_RQ_INTR_EN;
+ t3_write_reg(adapter, A_SG_CONTEXT_DATA2,
+ V_CQ_BASE_HI((u32) base_addr) | intr | V_RQ_GEN(gen));
+ t3_write_reg(adapter, A_SG_CONTEXT_DATA3, fl_thres);
+ return t3_sge_write_context(adapter, id, F_RESPONSEQ);
+}
+
+/**
+ * t3_sge_init_cqcntxt - initialize an SGE completion queue context
+ * @adapter: the adapter to configure
+ * @id: the context id
+ * @base_addr: base address of queue
+ * @size: number of queue entries
+ * @rspq: response queue for async notifications
+ * @ovfl_mode: CQ overflow mode
+ * @credits: completion queue credits
+ * @credit_thres: the credit threshold
+ *
+ * Initialize an SGE completion queue context and make it ready for use.
+ * The caller is responsible for ensuring only one context operation
+ * occurs at a time.
+ */
+int t3_sge_init_cqcntxt(struct adapter *adapter, unsigned int id, u64 base_addr,
+ unsigned int size, int rspq, int ovfl_mode,
+ unsigned int credits, unsigned int credit_thres)
+{
+ if (base_addr & 0xfff) /* must be 4K aligned */
+ return -EINVAL;
+ if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
+ return -EBUSY;
+
+ base_addr >>= 12;
+ t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_CQ_SIZE(size));
+ t3_write_reg(adapter, A_SG_CONTEXT_DATA1, base_addr);
+ base_addr >>= 32;
+ t3_write_reg(adapter, A_SG_CONTEXT_DATA2,
+ V_CQ_BASE_HI((u32) base_addr) | V_CQ_RSPQ(rspq) |
+ V_CQ_GEN(1) | V_CQ_OVERFLOW_MODE(ovfl_mode) |
+ V_CQ_ERR(ovfl_mode));
+ t3_write_reg(adapter, A_SG_CONTEXT_DATA3, V_CQ_CREDITS(credits) |
+ V_CQ_CREDIT_THRES(credit_thres));
+ return t3_sge_write_context(adapter, id, F_CQ);
+}
+
+/**
+ * t3_sge_enable_ecntxt - enable/disable an SGE egress context
+ * @adapter: the adapter
+ * @id: the egress context id
+ * @enable: enable (1) or disable (0) the context
+ *
+ * Enable or disable an SGE egress context. The caller is responsible for
+ * ensuring only one context operation occurs at a time.
+ */
+int t3_sge_enable_ecntxt(struct adapter *adapter, unsigned int id, int enable)
+{
+ if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
+ return -EBUSY;
+
+ t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0);
+ t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
+ t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
+ t3_write_reg(adapter, A_SG_CONTEXT_MASK3, F_EC_VALID);
+ t3_write_reg(adapter, A_SG_CONTEXT_DATA3, V_EC_VALID(enable));
+ t3_write_reg(adapter, A_SG_CONTEXT_CMD,
+ V_CONTEXT_CMD_OPCODE(1) | F_EGRESS | V_CONTEXT(id));
+ return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
+ 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
+}
+
+/**
+ * t3_sge_disable_fl - disable an SGE free-buffer list
+ * @adapter: the adapter
+ * @id: the free list context id
+ *
+ * Disable an SGE free-buffer list. The caller is responsible for
+ * ensuring only one context operation occurs at a time.
+ */
+int t3_sge_disable_fl(struct adapter *adapter, unsigned int id)
+{
+ if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
+ return -EBUSY;
+
+ t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0);
+ t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
+ t3_write_reg(adapter, A_SG_CONTEXT_MASK2, V_FL_SIZE(M_FL_SIZE));
+ t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
+ t3_write_reg(adapter, A_SG_CONTEXT_DATA2, 0);
+ t3_write_reg(adapter, A_SG_CONTEXT_CMD,
+ V_CONTEXT_CMD_OPCODE(1) | F_FREELIST | V_CONTEXT(id));
+ return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
+ 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
+}
+
+/**
+ * t3_sge_disable_rspcntxt - disable an SGE response queue
+ * @adapter: the adapter
+ * @id: the response queue context id
+ *
+ * Disable an SGE response queue. The caller is responsible for
+ * ensuring only one context operation occurs at a time.
+ */
+int t3_sge_disable_rspcntxt(struct adapter *adapter, unsigned int id)
+{
+ if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
+ return -EBUSY;
+
+ t3_write_reg(adapter, A_SG_CONTEXT_MASK0, V_CQ_SIZE(M_CQ_SIZE));
+ t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
+ t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
+ t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
+ t3_write_reg(adapter, A_SG_CONTEXT_DATA0, 0);
+ t3_write_reg(adapter, A_SG_CONTEXT_CMD,
+ V_CONTEXT_CMD_OPCODE(1) | F_RESPONSEQ | V_CONTEXT(id));
+ return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
+ 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
+}
+
+/**
+ * t3_sge_disable_cqcntxt - disable an SGE completion queue
+ * @adapter: the adapter
+ * @id: the completion queue context id
+ *
+ * Disable an SGE completion queue. The caller is responsible for
+ * ensuring only one context operation occurs at a time.
+ */
+int t3_sge_disable_cqcntxt(struct adapter *adapter, unsigned int id)
+{
+ if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
+ return -EBUSY;
+
+ t3_write_reg(adapter, A_SG_CONTEXT_MASK0, V_CQ_SIZE(M_CQ_SIZE));
+ t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
+ t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
+ t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
+ t3_write_reg(adapter, A_SG_CONTEXT_DATA0, 0);
+ t3_write_reg(adapter, A_SG_CONTEXT_CMD,
+ V_CONTEXT_CMD_OPCODE(1) | F_CQ | V_CONTEXT(id));
+ return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
+ 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
+}
+
+/**
+ * t3_sge_cqcntxt_op - perform an operation on a completion queue context
+ * @adapter: the adapter
+ * @id: the context id
+ * @op: the operation to perform
+ * @credits: credit value to write
+ *
+ * Perform the selected operation on an SGE completion queue context.
+ * The caller is responsible for ensuring only one context operation
+ * occurs at a time.
+ */
+int t3_sge_cqcntxt_op(struct adapter *adapter, unsigned int id, unsigned int op,
+ unsigned int credits)
+{
+ u32 val;
+
+ if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
+ return -EBUSY;
+
+ t3_write_reg(adapter, A_SG_CONTEXT_DATA0, credits << 16);
+ t3_write_reg(adapter, A_SG_CONTEXT_CMD, V_CONTEXT_CMD_OPCODE(op) |
+ V_CONTEXT(id) | F_CQ);
+ if (t3_wait_op_done_val(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
+ 0, SG_CONTEXT_CMD_ATTEMPTS, 1, &val))
+ return -EIO;
+
+ if (op >= 2 && op < 7) {
+ if (adapter->params.rev > 0)
+ return G_CQ_INDEX(val);
+
+ t3_write_reg(adapter, A_SG_CONTEXT_CMD,
+ V_CONTEXT_CMD_OPCODE(0) | F_CQ | V_CONTEXT(id));
+ if (t3_wait_op_done(adapter, A_SG_CONTEXT_CMD,
+ F_CONTEXT_CMD_BUSY, 0,
+ SG_CONTEXT_CMD_ATTEMPTS, 1))
+ return -EIO;
+ return G_CQ_INDEX(t3_read_reg(adapter, A_SG_CONTEXT_DATA0));
+ }
+ return 0;
+}
+
+/**
+ * t3_config_rss - configure Rx packet steering
+ * @adapter: the adapter
+ * @rss_config: RSS settings (written to TP_RSS_CONFIG)
+ * @cpus: values for the CPU lookup table (0xff terminated)
+ * @rspq: values for the response queue lookup table (0xffff terminated)
+ *
+ * Programs the receive packet steering logic. @cpus and @rspq provide
+ * the values for the CPU and response queue lookup tables. If they
+ * provide fewer values than the size of the tables the supplied values
+ * are used repeatedly until the tables are fully populated.
+ */
+void t3_config_rss(struct adapter *adapter, unsigned int rss_config,
+ const u8 * cpus, const u16 *rspq)
+{
+ int i, j, cpu_idx = 0, q_idx = 0;
+
+ if (cpus)
+ for (i = 0; i < RSS_TABLE_SIZE; ++i) {
+ u32 val = i << 16;
+
+ for (j = 0; j < 2; ++j) {
+ val |= (cpus[cpu_idx++] & 0x3f) << (8 * j);
+ if (cpus[cpu_idx] == 0xff)
+ cpu_idx = 0;
+ }
+ t3_write_reg(adapter, A_TP_RSS_LKP_TABLE, val);
+ }
+
+ if (rspq)
+ for (i = 0; i < RSS_TABLE_SIZE; ++i) {
+ t3_write_reg(adapter, A_TP_RSS_MAP_TABLE,
+ (i << 16) | rspq[q_idx++]);
+ if (rspq[q_idx] == 0xffff)
+ q_idx = 0;
+ }
+
+ t3_write_reg(adapter, A_TP_RSS_CONFIG, rss_config);
+}
+
+/**
+ * t3_tp_set_offload_mode - put TP in NIC/offload mode
+ * @adap: the adapter
+ * @enable: 1 to select offload mode, 0 for regular NIC
+ *
+ * Switches TP to NIC/offload mode.
+ */
+void t3_tp_set_offload_mode(struct adapter *adap, int enable)
+{
+ if (is_offload(adap) || !enable)
+ t3_set_reg_field(adap, A_TP_IN_CONFIG, F_NICMODE,
+ V_NICMODE(!enable));
+}
+
+/**
+ * pm_num_pages - calculate the number of pages of the payload memory
+ * @mem_size: the size of the payload memory
+ * @pg_size: the size of each payload memory page
+ *
+ * Calculate the number of pages, each of the given size, that fit in a
+ * memory of the specified size, respecting the HW requirement that the
+ * number of pages must be a multiple of 24.
+ */
+static inline unsigned int pm_num_pages(unsigned int mem_size,
+ unsigned int pg_size)
+{
+ unsigned int n = mem_size / pg_size;
+
+ return n - n % 24;
+}
+
+#define mem_region(adap, start, size, reg) \
+ t3_write_reg((adap), A_ ## reg, (start)); \
+ start += size
+
+/**
+ * partition_mem - partition memory and configure TP memory settings
+ * @adap: the adapter
+ * @p: the TP parameters
+ *
+ * Partitions context and payload memory and configures TP's memory
+ * registers.
+ */
+static void partition_mem(struct adapter *adap, const struct tp_params *p)
+{
+ unsigned int m, pstructs, tids = t3_mc5_size(&adap->mc5);
+ unsigned int timers = 0, timers_shift = 22;
+
+ if (adap->params.rev > 0) {
+ if (tids <= 16 * 1024) {
+ timers = 1;
+ timers_shift = 16;
+ } else if (tids <= 64 * 1024) {
+ timers = 2;
+ timers_shift = 18;
+ } else if (tids <= 256 * 1024) {
+ timers = 3;
+ timers_shift = 20;
+ }
+ }
+
+ t3_write_reg(adap, A_TP_PMM_SIZE,
+ p->chan_rx_size | (p->chan_tx_size >> 16));
+
+ t3_write_reg(adap, A_TP_PMM_TX_BASE, 0);
+ t3_write_reg(adap, A_TP_PMM_TX_PAGE_SIZE, p->tx_pg_size);
+ t3_write_reg(adap, A_TP_PMM_TX_MAX_PAGE, p->tx_num_pgs);
+ t3_set_reg_field(adap, A_TP_PARA_REG3, V_TXDATAACKIDX(M_TXDATAACKIDX),
+ V_TXDATAACKIDX(fls(p->tx_pg_size) - 12));
+
+ t3_write_reg(adap, A_TP_PMM_RX_BASE, 0);
+ t3_write_reg(adap, A_TP_PMM_RX_PAGE_SIZE, p->rx_pg_size);
+ t3_write_reg(adap, A_TP_PMM_RX_MAX_PAGE, p->rx_num_pgs);
+
+ pstructs = p->rx_num_pgs + p->tx_num_pgs;
+ /* Add a bit of headroom and make multiple of 24 */
+ pstructs += 48;
+ pstructs -= pstructs % 24;
+ t3_write_reg(adap, A_TP_CMM_MM_MAX_PSTRUCT, pstructs);
+
+ m = tids * TCB_SIZE;
+ mem_region(adap, m, (64 << 10) * 64, SG_EGR_CNTX_BADDR);
+ mem_region(adap, m, (64 << 10) * 64, SG_CQ_CONTEXT_BADDR);
+ t3_write_reg(adap, A_TP_CMM_TIMER_BASE, V_CMTIMERMAXNUM(timers) | m);
+ m += ((p->ntimer_qs - 1) << timers_shift) + (1 << 22);
+ mem_region(adap, m, pstructs * 64, TP_CMM_MM_BASE);
+ mem_region(adap, m, 64 * (pstructs / 24), TP_CMM_MM_PS_FLST_BASE);
+ mem_region(adap, m, 64 * (p->rx_num_pgs / 24), TP_CMM_MM_RX_FLST_BASE);
+ mem_region(adap, m, 64 * (p->tx_num_pgs / 24), TP_CMM_MM_TX_FLST_BASE);
+
+ m = (m + 4095) & ~0xfff;
+ t3_write_reg(adap, A_CIM_SDRAM_BASE_ADDR, m);
+ t3_write_reg(adap, A_CIM_SDRAM_ADDR_SIZE, p->cm_size - m);
+
+ tids = (p->cm_size - m - (3 << 20)) / 3072 - 32;
+ m = t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers -
+ adap->params.mc5.nfilters - adap->params.mc5.nroutes;
+ if (tids < m)
+ adap->params.mc5.nservers += m - tids;
+}
+
+static inline void tp_wr_indirect(struct adapter *adap, unsigned int addr,
+ u32 val)
+{
+ t3_write_reg(adap, A_TP_PIO_ADDR, addr);
+ t3_write_reg(adap, A_TP_PIO_DATA, val);
+}
+
+static void tp_config(struct adapter *adap, const struct tp_params *p)
+{
+ t3_write_reg(adap, A_TP_GLOBAL_CONFIG, F_TXPACINGENABLE | F_PATHMTU |
+ F_IPCHECKSUMOFFLOAD | F_UDPCHECKSUMOFFLOAD |
+ F_TCPCHECKSUMOFFLOAD | V_IPTTL(64));
+ t3_write_reg(adap, A_TP_TCP_OPTIONS, V_MTUDEFAULT(576) |
+ F_MTUENABLE | V_WINDOWSCALEMODE(1) |
+ V_TIMESTAMPSMODE(1) | V_SACKMODE(1) | V_SACKRX(1));
+ t3_write_reg(adap, A_TP_DACK_CONFIG, V_AUTOSTATE3(1) |
+ V_AUTOSTATE2(1) | V_AUTOSTATE1(0) |
+ V_BYTETHRESHOLD(26880) | V_MSSTHRESHOLD(2) |
+ F_AUTOCAREFUL | F_AUTOENABLE | V_DACK_MODE(1));
+ t3_set_reg_field(adap, A_TP_IN_CONFIG, F_RXFBARBPRIO | F_TXFBARBPRIO,
+ F_IPV6ENABLE | F_NICMODE);
+ t3_write_reg(adap, A_TP_TX_RESOURCE_LIMIT, 0x18141814);
+ t3_write_reg(adap, A_TP_PARA_REG4, 0x5050105);
+ t3_set_reg_field(adap, A_TP_PARA_REG6, 0,
+ adap->params.rev > 0 ? F_ENABLEESND :
+ F_T3A_ENABLEESND);
+
+ t3_set_reg_field(adap, A_TP_PC_CONFIG,
+ F_ENABLEEPCMDAFULL,
+ F_ENABLEOCSPIFULL |F_TXDEFERENABLE | F_HEARBEATDACK |
+ F_TXCONGESTIONMODE | F_RXCONGESTIONMODE);
+ t3_set_reg_field(adap, A_TP_PC_CONFIG2, F_CHDRAFULL,
+ F_ENABLEIPV6RSS | F_ENABLENONOFDTNLSYN |
+ F_ENABLEARPMISS | F_DISBLEDAPARBIT0);
+ t3_write_reg(adap, A_TP_PROXY_FLOW_CNTL, 1080);
+ t3_write_reg(adap, A_TP_PROXY_FLOW_CNTL, 1000);
+
+ if (adap->params.rev > 0) {
+ tp_wr_indirect(adap, A_TP_EGRESS_CONFIG, F_REWRITEFORCETOSIZE);
+ t3_set_reg_field(adap, A_TP_PARA_REG3, F_TXPACEAUTO,
+ F_TXPACEAUTO);
+ t3_set_reg_field(adap, A_TP_PC_CONFIG, F_LOCKTID, F_LOCKTID);
+ t3_set_reg_field(adap, A_TP_PARA_REG3, 0, F_TXPACEAUTOSTRICT);
+ } else
+ t3_set_reg_field(adap, A_TP_PARA_REG3, 0, F_TXPACEFIXED);
+
+ if (adap->params.rev == T3_REV_C)
+ t3_set_reg_field(adap, A_TP_PC_CONFIG,
+ V_TABLELATENCYDELTA(M_TABLELATENCYDELTA),
+ V_TABLELATENCYDELTA(4));
+
+ t3_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT1, 0);
+ t3_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT0, 0);
+ t3_write_reg(adap, A_TP_MOD_CHANNEL_WEIGHT, 0);
+ t3_write_reg(adap, A_TP_MOD_RATE_LIMIT, 0xf2200000);
+}
+
+/* Desired TP timer resolution in usec */
+#define TP_TMR_RES 50
+
+/* TCP timer values in ms */
+#define TP_DACK_TIMER 50
+#define TP_RTO_MIN 250
+
+/**
+ * tp_set_timers - set TP timing parameters
+ * @adap: the adapter to set
+ * @core_clk: the core clock frequency in Hz
+ *
+ * Set TP's timing parameters, such as the various timer resolutions and
+ * the TCP timer values.
+ */
+static void tp_set_timers(struct adapter *adap, unsigned int core_clk)
+{
+ unsigned int tre = fls(core_clk / (1000000 / TP_TMR_RES)) - 1;
+ unsigned int dack_re = fls(core_clk / 5000) - 1; /* 200us */
+ unsigned int tstamp_re = fls(core_clk / 1000); /* 1ms, at least */
+ unsigned int tps = core_clk >> tre;
+
+ t3_write_reg(adap, A_TP_TIMER_RESOLUTION, V_TIMERRESOLUTION(tre) |
+ V_DELAYEDACKRESOLUTION(dack_re) |
+ V_TIMESTAMPRESOLUTION(tstamp_re));
+ t3_write_reg(adap, A_TP_DACK_TIMER,
+ (core_clk >> dack_re) / (1000 / TP_DACK_TIMER));
+ t3_write_reg(adap, A_TP_TCP_BACKOFF_REG0, 0x3020100);
+ t3_write_reg(adap, A_TP_TCP_BACKOFF_REG1, 0x7060504);
+ t3_write_reg(adap, A_TP_TCP_BACKOFF_REG2, 0xb0a0908);
+ t3_write_reg(adap, A_TP_TCP_BACKOFF_REG3, 0xf0e0d0c);
+ t3_write_reg(adap, A_TP_SHIFT_CNT, V_SYNSHIFTMAX(6) |
+ V_RXTSHIFTMAXR1(4) | V_RXTSHIFTMAXR2(15) |
+ V_PERSHIFTBACKOFFMAX(8) | V_PERSHIFTMAX(8) |
+ V_KEEPALIVEMAX(9));
+
+#define SECONDS * tps
+
+ t3_write_reg(adap, A_TP_MSL, adap->params.rev > 0 ? 0 : 2 SECONDS);
+ t3_write_reg(adap, A_TP_RXT_MIN, tps / (1000 / TP_RTO_MIN));
+ t3_write_reg(adap, A_TP_RXT_MAX, 64 SECONDS);
+ t3_write_reg(adap, A_TP_PERS_MIN, 5 SECONDS);
+ t3_write_reg(adap, A_TP_PERS_MAX, 64 SECONDS);
+ t3_write_reg(adap, A_TP_KEEP_IDLE, 7200 SECONDS);
+ t3_write_reg(adap, A_TP_KEEP_INTVL, 75 SECONDS);
+ t3_write_reg(adap, A_TP_INIT_SRTT, 3 SECONDS);
+ t3_write_reg(adap, A_TP_FINWAIT2_TIMER, 600 SECONDS);
+
+#undef SECONDS
+}
+
+/**
+ * t3_tp_set_coalescing_size - set receive coalescing size
+ * @adap: the adapter
+ * @size: the receive coalescing size
+ * @psh: whether a set PSH bit should deliver coalesced data
+ *
+ * Set the receive coalescing size and PSH bit handling.
+ */
+static int t3_tp_set_coalescing_size(struct adapter *adap,
+ unsigned int size, int psh)
+{
+ u32 val;
+
+ if (size > MAX_RX_COALESCING_LEN)
+ return -EINVAL;
+
+ val = t3_read_reg(adap, A_TP_PARA_REG3);
+ val &= ~(F_RXCOALESCEENABLE | F_RXCOALESCEPSHEN);
+
+ if (size) {
+ val |= F_RXCOALESCEENABLE;
+ if (psh)
+ val |= F_RXCOALESCEPSHEN;
+ size = min(MAX_RX_COALESCING_LEN, size);
+ t3_write_reg(adap, A_TP_PARA_REG2, V_RXCOALESCESIZE(size) |
+ V_MAXRXDATA(MAX_RX_COALESCING_LEN));
+ }
+ t3_write_reg(adap, A_TP_PARA_REG3, val);
+ return 0;
+}
+
+/**
+ * t3_tp_set_max_rxsize - set the max receive size
+ * @adap: the adapter
+ * @size: the max receive size
+ *
+ * Set TP's max receive size. This is the limit that applies when
+ * receive coalescing is disabled.
+ */
+static void t3_tp_set_max_rxsize(struct adapter *adap, unsigned int size)
+{
+ t3_write_reg(adap, A_TP_PARA_REG7,
+ V_PMMAXXFERLEN0(size) | V_PMMAXXFERLEN1(size));
+}
+
+static void init_mtus(unsigned short mtus[])
+{
+ /*
+ * See draft-mathis-plpmtud-00.txt for the values. The min is 88 so
+ * it can accommodate max size TCP/IP headers when SACK and timestamps
+ * are enabled and still have at least 8 bytes of payload.
+ */
+ mtus[0] = 88;
+ mtus[1] = 88;
+ mtus[2] = 256;
+ mtus[3] = 512;
+ mtus[4] = 576;
+ mtus[5] = 1024;
+ mtus[6] = 1280;
+ mtus[7] = 1492;
+ mtus[8] = 1500;
+ mtus[9] = 2002;
+ mtus[10] = 2048;
+ mtus[11] = 4096;
+ mtus[12] = 4352;
+ mtus[13] = 8192;
+ mtus[14] = 9000;
+ mtus[15] = 9600;
+}
+
+/*
+ * Initial congestion control parameters.
+ */
+static void init_cong_ctrl(unsigned short *a, unsigned short *b)
+{
+ a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1;
+ a[9] = 2;
+ a[10] = 3;
+ a[11] = 4;
+ a[12] = 5;
+ a[13] = 6;
+ a[14] = 7;
+ a[15] = 8;
+ a[16] = 9;
+ a[17] = 10;
+ a[18] = 14;
+ a[19] = 17;
+ a[20] = 21;
+ a[21] = 25;
+ a[22] = 30;
+ a[23] = 35;
+ a[24] = 45;
+ a[25] = 60;
+ a[26] = 80;
+ a[27] = 100;
+ a[28] = 200;
+ a[29] = 300;
+ a[30] = 400;
+ a[31] = 500;
+
+ b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0;
+ b[9] = b[10] = 1;
+ b[11] = b[12] = 2;
+ b[13] = b[14] = b[15] = b[16] = 3;
+ b[17] = b[18] = b[19] = b[20] = b[21] = 4;
+ b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5;
+ b[28] = b[29] = 6;
+ b[30] = b[31] = 7;
+}
+
+/* The minimum additive increment value for the congestion control table */
+#define CC_MIN_INCR 2U
+
+/**
+ * t3_load_mtus - write the MTU and congestion control HW tables
+ * @adap: the adapter
+ * @mtus: the unrestricted values for the MTU table
+ * @alpha: the values for the congestion control alpha parameter
+ * @beta: the values for the congestion control beta parameter
+ * @mtu_cap: the maximum permitted effective MTU
+ *
+ * Write the MTU table with the supplied MTUs capping each at &mtu_cap.
+ * Update the high-speed congestion control table with the supplied alpha,
+ * beta, and MTUs.
+ */
+void t3_load_mtus(struct adapter *adap, unsigned short mtus[NMTUS],
+ unsigned short alpha[NCCTRL_WIN],
+ unsigned short beta[NCCTRL_WIN], unsigned short mtu_cap)
+{
+ static const unsigned int avg_pkts[NCCTRL_WIN] = {
+ 2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640,
+ 896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480,
+ 28672, 40960, 57344, 81920, 114688, 163840, 229376
+ };
+
+ unsigned int i, w;
+
+ for (i = 0; i < NMTUS; ++i) {
+ unsigned int mtu = min(mtus[i], mtu_cap);
+ unsigned int log2 = fls(mtu);
+
+ if (!(mtu & ((1 << log2) >> 2))) /* round */
+ log2--;
+ t3_write_reg(adap, A_TP_MTU_TABLE,
+ (i << 24) | (log2 << 16) | mtu);
+
+ for (w = 0; w < NCCTRL_WIN; ++w) {
+ unsigned int inc;
+
+ inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w],
+ CC_MIN_INCR);
+
+ t3_write_reg(adap, A_TP_CCTRL_TABLE, (i << 21) |
+ (w << 16) | (beta[w] << 13) | inc);
+ }
+ }
+}
+
+/**
+ * t3_tp_get_mib_stats - read TP's MIB counters
+ * @adap: the adapter
+ * @tps: holds the returned counter values
+ *
+ * Returns the values of TP's MIB counters.
+ */
+void t3_tp_get_mib_stats(struct adapter *adap, struct tp_mib_stats *tps)
+{
+ t3_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_RDATA, (u32 *) tps,
+ sizeof(*tps) / sizeof(u32), 0);
+}
+
+#define ulp_region(adap, name, start, len) \
+ t3_write_reg((adap), A_ULPRX_ ## name ## _LLIMIT, (start)); \
+ t3_write_reg((adap), A_ULPRX_ ## name ## _ULIMIT, \
+ (start) + (len) - 1); \
+ start += len
+
+#define ulptx_region(adap, name, start, len) \
+ t3_write_reg((adap), A_ULPTX_ ## name ## _LLIMIT, (start)); \
+ t3_write_reg((adap), A_ULPTX_ ## name ## _ULIMIT, \
+ (start) + (len) - 1)
+
+static void ulp_config(struct adapter *adap, const struct tp_params *p)
+{
+ unsigned int m = p->chan_rx_size;
+
+ ulp_region(adap, ISCSI, m, p->chan_rx_size / 8);
+ ulp_region(adap, TDDP, m, p->chan_rx_size / 8);
+ ulptx_region(adap, TPT, m, p->chan_rx_size / 4);
+ ulp_region(adap, STAG, m, p->chan_rx_size / 4);
+ ulp_region(adap, RQ, m, p->chan_rx_size / 4);
+ ulptx_region(adap, PBL, m, p->chan_rx_size / 4);
+ ulp_region(adap, PBL, m, p->chan_rx_size / 4);
+ t3_write_reg(adap, A_ULPRX_TDDP_TAGMASK, 0xffffffff);
+}
+
+/**
+ * t3_set_proto_sram - set the contents of the protocol sram
+ * @adap: the adapter
+ * @data: the protocol image
+ *
+ * Write the contents of the protocol SRAM.
+ */
+int t3_set_proto_sram(struct adapter *adap, const u8 *data)
+{
+ int i;
+ const __be32 *buf = (const __be32 *)data;
+
+ for (i = 0; i < PROTO_SRAM_LINES; i++) {
+ t3_write_reg(adap, A_TP_EMBED_OP_FIELD5, be32_to_cpu(*buf++));
+ t3_write_reg(adap, A_TP_EMBED_OP_FIELD4, be32_to_cpu(*buf++));
+ t3_write_reg(adap, A_TP_EMBED_OP_FIELD3, be32_to_cpu(*buf++));
+ t3_write_reg(adap, A_TP_EMBED_OP_FIELD2, be32_to_cpu(*buf++));
+ t3_write_reg(adap, A_TP_EMBED_OP_FIELD1, be32_to_cpu(*buf++));
+
+ t3_write_reg(adap, A_TP_EMBED_OP_FIELD0, i << 1 | 1 << 31);
+ if (t3_wait_op_done(adap, A_TP_EMBED_OP_FIELD0, 1, 1, 5, 1))
+ return -EIO;
+ }
+ t3_write_reg(adap, A_TP_EMBED_OP_FIELD0, 0);
+
+ return 0;
+}
+
+void t3_config_trace_filter(struct adapter *adapter,
+ const struct trace_params *tp, int filter_index,
+ int invert, int enable)
+{
+ u32 addr, key[4], mask[4];
+
+ key[0] = tp->sport | (tp->sip << 16);
+ key[1] = (tp->sip >> 16) | (tp->dport << 16);
+ key[2] = tp->dip;
+ key[3] = tp->proto | (tp->vlan << 8) | (tp->intf << 20);
+
+ mask[0] = tp->sport_mask | (tp->sip_mask << 16);
+ mask[1] = (tp->sip_mask >> 16) | (tp->dport_mask << 16);
+ mask[2] = tp->dip_mask;
+ mask[3] = tp->proto_mask | (tp->vlan_mask << 8) | (tp->intf_mask << 20);
+
+ if (invert)
+ key[3] |= (1 << 29);
+ if (enable)
+ key[3] |= (1 << 28);
+
+ addr = filter_index ? A_TP_RX_TRC_KEY0 : A_TP_TX_TRC_KEY0;
+ tp_wr_indirect(adapter, addr++, key[0]);
+ tp_wr_indirect(adapter, addr++, mask[0]);
+ tp_wr_indirect(adapter, addr++, key[1]);
+ tp_wr_indirect(adapter, addr++, mask[1]);
+ tp_wr_indirect(adapter, addr++, key[2]);
+ tp_wr_indirect(adapter, addr++, mask[2]);
+ tp_wr_indirect(adapter, addr++, key[3]);
+ tp_wr_indirect(adapter, addr, mask[3]);
+ t3_read_reg(adapter, A_TP_PIO_DATA);
+}
+
+/**
+ * t3_config_sched - configure a HW traffic scheduler
+ * @adap: the adapter
+ * @kbps: target rate in Kbps
+ * @sched: the scheduler index
+ *
+ * Configure a HW scheduler for the target rate
+ */
+int t3_config_sched(struct adapter *adap, unsigned int kbps, int sched)
+{
+ unsigned int v, tps, cpt, bpt, delta, mindelta = ~0;
+ unsigned int clk = adap->params.vpd.cclk * 1000;
+ unsigned int selected_cpt = 0, selected_bpt = 0;
+
+ if (kbps > 0) {
+ kbps *= 125; /* -> bytes */
+ for (cpt = 1; cpt <= 255; cpt++) {
+ tps = clk / cpt;
+ bpt = (kbps + tps / 2) / tps;
+ if (bpt > 0 && bpt <= 255) {
+ v = bpt * tps;
+ delta = v >= kbps ? v - kbps : kbps - v;
+ if (delta <= mindelta) {
+ mindelta = delta;
+ selected_cpt = cpt;
+ selected_bpt = bpt;
+ }
+ } else if (selected_cpt)
+ break;
+ }
+ if (!selected_cpt)
+ return -EINVAL;
+ }
+ t3_write_reg(adap, A_TP_TM_PIO_ADDR,
+ A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2);
+ v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
+ if (sched & 1)
+ v = (v & 0xffff) | (selected_cpt << 16) | (selected_bpt << 24);
+ else
+ v = (v & 0xffff0000) | selected_cpt | (selected_bpt << 8);
+ t3_write_reg(adap, A_TP_TM_PIO_DATA, v);
+ return 0;
+}
+
+static int tp_init(struct adapter *adap, const struct tp_params *p)
+{
+ int busy = 0;
+
+ tp_config(adap, p);
+ t3_set_vlan_accel(adap, 3, 0);
+
+ if (is_offload(adap)) {
+ tp_set_timers(adap, adap->params.vpd.cclk * 1000);
+ t3_write_reg(adap, A_TP_RESET, F_FLSTINITENABLE);
+ busy = t3_wait_op_done(adap, A_TP_RESET, F_FLSTINITENABLE,
+ 0, 1000, 5);
+ if (busy)
+ CH_ERR(adap, "TP initialization timed out\n");
+ }
+
+ if (!busy)
+ t3_write_reg(adap, A_TP_RESET, F_TPRESET);
+ return busy;
+}
+
+/*
+ * Perform the bits of HW initialization that are dependent on the Tx
+ * channels being used.
+ */
+static void chan_init_hw(struct adapter *adap, unsigned int chan_map)
+{
+ int i;
+
+ if (chan_map != 3) { /* one channel */
+ t3_set_reg_field(adap, A_ULPRX_CTL, F_ROUND_ROBIN, 0);
+ t3_set_reg_field(adap, A_ULPTX_CONFIG, F_CFG_RR_ARB, 0);
+ t3_write_reg(adap, A_MPS_CFG, F_TPRXPORTEN | F_ENFORCEPKT |
+ (chan_map == 1 ? F_TPTXPORT0EN | F_PORT0ACTIVE :
+ F_TPTXPORT1EN | F_PORT1ACTIVE));
+ t3_write_reg(adap, A_PM1_TX_CFG,
+ chan_map == 1 ? 0xffffffff : 0);
+ } else { /* two channels */
+ t3_set_reg_field(adap, A_ULPRX_CTL, 0, F_ROUND_ROBIN);
+ t3_set_reg_field(adap, A_ULPTX_CONFIG, 0, F_CFG_RR_ARB);
+ t3_write_reg(adap, A_ULPTX_DMA_WEIGHT,
+ V_D1_WEIGHT(16) | V_D0_WEIGHT(16));
+ t3_write_reg(adap, A_MPS_CFG, F_TPTXPORT0EN | F_TPTXPORT1EN |
+ F_TPRXPORTEN | F_PORT0ACTIVE | F_PORT1ACTIVE |
+ F_ENFORCEPKT);
+ t3_write_reg(adap, A_PM1_TX_CFG, 0x80008000);
+ t3_set_reg_field(adap, A_TP_PC_CONFIG, 0, F_TXTOSQUEUEMAPMODE);
+ t3_write_reg(adap, A_TP_TX_MOD_QUEUE_REQ_MAP,
+ V_TX_MOD_QUEUE_REQ_MAP(0xaa));
+ for (i = 0; i < 16; i++)
+ t3_write_reg(adap, A_TP_TX_MOD_QUE_TABLE,
+ (i << 16) | 0x1010);
+ }
+}
+
+static int calibrate_xgm(struct adapter *adapter)
+{
+ if (uses_xaui(adapter)) {
+ unsigned int v, i;
+
+ for (i = 0; i < 5; ++i) {
+ t3_write_reg(adapter, A_XGM_XAUI_IMP, 0);
+ t3_read_reg(adapter, A_XGM_XAUI_IMP);
+ msleep(1);
+ v = t3_read_reg(adapter, A_XGM_XAUI_IMP);
+ if (!(v & (F_XGM_CALFAULT | F_CALBUSY))) {
+ t3_write_reg(adapter, A_XGM_XAUI_IMP,
+ V_XAUIIMP(G_CALIMP(v) >> 2));
+ return 0;
+ }
+ }
+ CH_ERR(adapter, "MAC calibration failed\n");
+ return -1;
+ } else {
+ t3_write_reg(adapter, A_XGM_RGMII_IMP,
+ V_RGMIIIMPPD(2) | V_RGMIIIMPPU(3));
+ t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_XGM_IMPSETUPDATE,
+ F_XGM_IMPSETUPDATE);
+ }
+ return 0;
+}
+
+static void calibrate_xgm_t3b(struct adapter *adapter)
+{
+ if (!uses_xaui(adapter)) {
+ t3_write_reg(adapter, A_XGM_RGMII_IMP, F_CALRESET |
+ F_CALUPDATE | V_RGMIIIMPPD(2) | V_RGMIIIMPPU(3));
+ t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_CALRESET, 0);
+ t3_set_reg_field(adapter, A_XGM_RGMII_IMP, 0,
+ F_XGM_IMPSETUPDATE);
+ t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_XGM_IMPSETUPDATE,
+ 0);
+ t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_CALUPDATE, 0);
+ t3_set_reg_field(adapter, A_XGM_RGMII_IMP, 0, F_CALUPDATE);
+ }
+}
+
+struct mc7_timing_params {
+ unsigned char ActToPreDly;
+ unsigned char ActToRdWrDly;
+ unsigned char PreCyc;
+ unsigned char RefCyc[5];
+ unsigned char BkCyc;
+ unsigned char WrToRdDly;
+ unsigned char RdToWrDly;
+};
+
+/*
+ * Write a value to a register and check that the write completed. These
+ * writes normally complete in a cycle or two, so one read should suffice.
+ * The very first read exists to flush the posted write to the device.
+ */
+static int wrreg_wait(struct adapter *adapter, unsigned int addr, u32 val)
+{
+ t3_write_reg(adapter, addr, val);
+ t3_read_reg(adapter, addr); /* flush */
+ if (!(t3_read_reg(adapter, addr) & F_BUSY))
+ return 0;
+ CH_ERR(adapter, "write to MC7 register 0x%x timed out\n", addr);
+ return -EIO;
+}
+
+static int mc7_init(struct mc7 *mc7, unsigned int mc7_clock, int mem_type)
+{
+ static const unsigned int mc7_mode[] = {
+ 0x632, 0x642, 0x652, 0x432, 0x442
+ };
+ static const struct mc7_timing_params mc7_timings[] = {
+ {12, 3, 4, {20, 28, 34, 52, 0}, 15, 6, 4},
+ {12, 4, 5, {20, 28, 34, 52, 0}, 16, 7, 4},
+ {12, 5, 6, {20, 28, 34, 52, 0}, 17, 8, 4},
+ {9, 3, 4, {15, 21, 26, 39, 0}, 12, 6, 4},
+ {9, 4, 5, {15, 21, 26, 39, 0}, 13, 7, 4}
+ };
+
+ u32 val;
+ unsigned int width, density, slow, attempts;
+ struct adapter *adapter = mc7->adapter;
+ const struct mc7_timing_params *p = &mc7_timings[mem_type];
+
+ if (!mc7->size)
+ return 0;
+
+ val = t3_read_reg(adapter, mc7->offset + A_MC7_CFG);
+ slow = val & F_SLOW;
+ width = G_WIDTH(val);
+ density = G_DEN(val);
+
+ t3_write_reg(adapter, mc7->offset + A_MC7_CFG, val | F_IFEN);
+ val = t3_read_reg(adapter, mc7->offset + A_MC7_CFG); /* flush */
+ msleep(1);
+
+ if (!slow) {
+ t3_write_reg(adapter, mc7->offset + A_MC7_CAL, F_SGL_CAL_EN);
+ t3_read_reg(adapter, mc7->offset + A_MC7_CAL);
+ msleep(1);
+ if (t3_read_reg(adapter, mc7->offset + A_MC7_CAL) &
+ (F_BUSY | F_SGL_CAL_EN | F_CAL_FAULT)) {
+ CH_ERR(adapter, "%s MC7 calibration timed out\n",
+ mc7->name);
+ goto out_fail;
+ }
+ }
+
+ t3_write_reg(adapter, mc7->offset + A_MC7_PARM,
+ V_ACTTOPREDLY(p->ActToPreDly) |
+ V_ACTTORDWRDLY(p->ActToRdWrDly) | V_PRECYC(p->PreCyc) |
+ V_REFCYC(p->RefCyc[density]) | V_BKCYC(p->BkCyc) |
+ V_WRTORDDLY(p->WrToRdDly) | V_RDTOWRDLY(p->RdToWrDly));
+
+ t3_write_reg(adapter, mc7->offset + A_MC7_CFG,
+ val | F_CLKEN | F_TERM150);
+ t3_read_reg(adapter, mc7->offset + A_MC7_CFG); /* flush */
+
+ if (!slow)
+ t3_set_reg_field(adapter, mc7->offset + A_MC7_DLL, F_DLLENB,
+ F_DLLENB);
+ udelay(1);
+
+ val = slow ? 3 : 6;
+ if (wrreg_wait(adapter, mc7->offset + A_MC7_PRE, 0) ||
+ wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE2, 0) ||
+ wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE3, 0) ||
+ wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val))
+ goto out_fail;
+
+ if (!slow) {
+ t3_write_reg(adapter, mc7->offset + A_MC7_MODE, 0x100);
+ t3_set_reg_field(adapter, mc7->offset + A_MC7_DLL, F_DLLRST, 0);
+ udelay(5);
+ }
+
+ if (wrreg_wait(adapter, mc7->offset + A_MC7_PRE, 0) ||
+ wrreg_wait(adapter, mc7->offset + A_MC7_REF, 0) ||
+ wrreg_wait(adapter, mc7->offset + A_MC7_REF, 0) ||
+ wrreg_wait(adapter, mc7->offset + A_MC7_MODE,
+ mc7_mode[mem_type]) ||
+ wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val | 0x380) ||
+ wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val))
+ goto out_fail;
+
+ /* clock value is in KHz */
+ mc7_clock = mc7_clock * 7812 + mc7_clock / 2; /* ns */
+ mc7_clock /= 1000000; /* KHz->MHz, ns->us */
+
+ t3_write_reg(adapter, mc7->offset + A_MC7_REF,
+ F_PERREFEN | V_PREREFDIV(mc7_clock));
+ t3_read_reg(adapter, mc7->offset + A_MC7_REF); /* flush */
+
+ t3_write_reg(adapter, mc7->offset + A_MC7_ECC, F_ECCGENEN | F_ECCCHKEN);
+ t3_write_reg(adapter, mc7->offset + A_MC7_BIST_DATA, 0);
+ t3_write_reg(adapter, mc7->offset + A_MC7_BIST_ADDR_BEG, 0);
+ t3_write_reg(adapter, mc7->offset + A_MC7_BIST_ADDR_END,
+ (mc7->size << width) - 1);
+ t3_write_reg(adapter, mc7->offset + A_MC7_BIST_OP, V_OP(1));
+ t3_read_reg(adapter, mc7->offset + A_MC7_BIST_OP); /* flush */
+
+ attempts = 50;
+ do {
+ msleep(250);
+ val = t3_read_reg(adapter, mc7->offset + A_MC7_BIST_OP);
+ } while ((val & F_BUSY) && --attempts);
+ if (val & F_BUSY) {
+ CH_ERR(adapter, "%s MC7 BIST timed out\n", mc7->name);
+ goto out_fail;
+ }
+
+ /* Enable normal memory accesses. */
+ t3_set_reg_field(adapter, mc7->offset + A_MC7_CFG, 0, F_RDY);
+ return 0;
+
+out_fail:
+ return -1;
+}
+
+static void config_pcie(struct adapter *adap)
+{
+ static const u16 ack_lat[4][6] = {
+ {237, 416, 559, 1071, 2095, 4143},
+ {128, 217, 289, 545, 1057, 2081},
+ {73, 118, 154, 282, 538, 1050},
+ {67, 107, 86, 150, 278, 534}
+ };
+ static const u16 rpl_tmr[4][6] = {
+ {711, 1248, 1677, 3213, 6285, 12429},
+ {384, 651, 867, 1635, 3171, 6243},
+ {219, 354, 462, 846, 1614, 3150},
+ {201, 321, 258, 450, 834, 1602}
+ };
+
+ u16 val, devid;
+ unsigned int log2_width, pldsize;
+ unsigned int fst_trn_rx, fst_trn_tx, acklat, rpllmt;
+
+ pcie_capability_read_word(adap->pdev, PCI_EXP_DEVCTL, &val);
+ pldsize = (val & PCI_EXP_DEVCTL_PAYLOAD) >> 5;
+
+ pci_read_config_word(adap->pdev, 0x2, &devid);
+ if (devid == 0x37) {
+ pcie_capability_write_word(adap->pdev, PCI_EXP_DEVCTL,
+ val & ~PCI_EXP_DEVCTL_READRQ &
+ ~PCI_EXP_DEVCTL_PAYLOAD);
+ pldsize = 0;
+ }
+
+ pcie_capability_read_word(adap->pdev, PCI_EXP_LNKCTL, &val);
+
+ fst_trn_tx = G_NUMFSTTRNSEQ(t3_read_reg(adap, A_PCIE_PEX_CTRL0));
+ fst_trn_rx = adap->params.rev == 0 ? fst_trn_tx :
+ G_NUMFSTTRNSEQRX(t3_read_reg(adap, A_PCIE_MODE));
+ log2_width = fls(adap->params.pci.width) - 1;
+ acklat = ack_lat[log2_width][pldsize];
+ if (val & PCI_EXP_LNKCTL_ASPM_L0S) /* check LOsEnable */
+ acklat += fst_trn_tx * 4;
+ rpllmt = rpl_tmr[log2_width][pldsize] + fst_trn_rx * 4;
+
+ if (adap->params.rev == 0)
+ t3_set_reg_field(adap, A_PCIE_PEX_CTRL1,
+ V_T3A_ACKLAT(M_T3A_ACKLAT),
+ V_T3A_ACKLAT(acklat));
+ else
+ t3_set_reg_field(adap, A_PCIE_PEX_CTRL1, V_ACKLAT(M_ACKLAT),
+ V_ACKLAT(acklat));
+
+ t3_set_reg_field(adap, A_PCIE_PEX_CTRL0, V_REPLAYLMT(M_REPLAYLMT),
+ V_REPLAYLMT(rpllmt));
+
+ t3_write_reg(adap, A_PCIE_PEX_ERR, 0xffffffff);
+ t3_set_reg_field(adap, A_PCIE_CFG, 0,
+ F_ENABLELINKDWNDRST | F_ENABLELINKDOWNRST |
+ F_PCIE_DMASTOPEN | F_PCIE_CLIDECEN);
+}
+
+/*
+ * Initialize and configure T3 HW modules. This performs the
+ * initialization steps that need to be done once after a card is reset.
+ * MAC and PHY initialization is handled separarely whenever a port is enabled.
+ *
+ * fw_params are passed to FW and their value is platform dependent. Only the
+ * top 8 bits are available for use, the rest must be 0.
+ */
+int t3_init_hw(struct adapter *adapter, u32 fw_params)
+{
+ int err = -EIO, attempts, i;
+ const struct vpd_params *vpd = &adapter->params.vpd;
+
+ if (adapter->params.rev > 0)
+ calibrate_xgm_t3b(adapter);
+ else if (calibrate_xgm(adapter))
+ goto out_err;
+
+ if (vpd->mclk) {
+ partition_mem(adapter, &adapter->params.tp);
+
+ if (mc7_init(&adapter->pmrx, vpd->mclk, vpd->mem_timing) ||
+ mc7_init(&adapter->pmtx, vpd->mclk, vpd->mem_timing) ||
+ mc7_init(&adapter->cm, vpd->mclk, vpd->mem_timing) ||
+ t3_mc5_init(&adapter->mc5, adapter->params.mc5.nservers,
+ adapter->params.mc5.nfilters,
+ adapter->params.mc5.nroutes))
+ goto out_err;
+
+ for (i = 0; i < 32; i++)
+ if (clear_sge_ctxt(adapter, i, F_CQ))
+ goto out_err;
+ }
+
+ if (tp_init(adapter, &adapter->params.tp))
+ goto out_err;
+
+ t3_tp_set_coalescing_size(adapter,
+ min(adapter->params.sge.max_pkt_size,
+ MAX_RX_COALESCING_LEN), 1);
+ t3_tp_set_max_rxsize(adapter,
+ min(adapter->params.sge.max_pkt_size, 16384U));
+ ulp_config(adapter, &adapter->params.tp);
+
+ if (is_pcie(adapter))
+ config_pcie(adapter);
+ else
+ t3_set_reg_field(adapter, A_PCIX_CFG, 0,
+ F_DMASTOPEN | F_CLIDECEN);
+
+ if (adapter->params.rev == T3_REV_C)
+ t3_set_reg_field(adapter, A_ULPTX_CONFIG, 0,
+ F_CFG_CQE_SOP_MASK);
+
+ t3_write_reg(adapter, A_PM1_RX_CFG, 0xffffffff);
+ t3_write_reg(adapter, A_PM1_RX_MODE, 0);
+ t3_write_reg(adapter, A_PM1_TX_MODE, 0);
+ chan_init_hw(adapter, adapter->params.chan_map);
+ t3_sge_init(adapter, &adapter->params.sge);
+ t3_set_reg_field(adapter, A_PL_RST, 0, F_FATALPERREN);
+
+ t3_write_reg(adapter, A_T3DBG_GPIO_ACT_LOW, calc_gpio_intr(adapter));
+
+ t3_write_reg(adapter, A_CIM_HOST_ACC_DATA, vpd->uclk | fw_params);
+ t3_write_reg(adapter, A_CIM_BOOT_CFG,
+ V_BOOTADDR(FW_FLASH_BOOT_ADDR >> 2));
+ t3_read_reg(adapter, A_CIM_BOOT_CFG); /* flush */
+
+ attempts = 100;
+ do { /* wait for uP to initialize */
+ msleep(20);
+ } while (t3_read_reg(adapter, A_CIM_HOST_ACC_DATA) && --attempts);
+ if (!attempts) {
+ CH_ERR(adapter, "uP initialization timed out\n");
+ goto out_err;
+ }
+
+ err = 0;
+out_err:
+ return err;
+}
+
+/**
+ * get_pci_mode - determine a card's PCI mode
+ * @adapter: the adapter
+ * @p: where to store the PCI settings
+ *
+ * Determines a card's PCI mode and associated parameters, such as speed
+ * and width.
+ */
+static void get_pci_mode(struct adapter *adapter, struct pci_params *p)
+{
+ static unsigned short speed_map[] = { 33, 66, 100, 133 };
+ u32 pci_mode;
+
+ if (pci_is_pcie(adapter->pdev)) {
+ u16 val;
+
+ p->variant = PCI_VARIANT_PCIE;
+ pcie_capability_read_word(adapter->pdev, PCI_EXP_LNKSTA, &val);
+ p->width = (val >> 4) & 0x3f;
+ return;
+ }
+
+ pci_mode = t3_read_reg(adapter, A_PCIX_MODE);
+ p->speed = speed_map[G_PCLKRANGE(pci_mode)];
+ p->width = (pci_mode & F_64BIT) ? 64 : 32;
+ pci_mode = G_PCIXINITPAT(pci_mode);
+ if (pci_mode == 0)
+ p->variant = PCI_VARIANT_PCI;
+ else if (pci_mode < 4)
+ p->variant = PCI_VARIANT_PCIX_MODE1_PARITY;
+ else if (pci_mode < 8)
+ p->variant = PCI_VARIANT_PCIX_MODE1_ECC;
+ else
+ p->variant = PCI_VARIANT_PCIX_266_MODE2;
+}
+
+/**
+ * init_link_config - initialize a link's SW state
+ * @lc: structure holding the link state
+ * @caps: information about the current card
+ *
+ * Initializes the SW state maintained for each link, including the link's
+ * capabilities and default speed/duplex/flow-control/autonegotiation
+ * settings.
+ */
+static void init_link_config(struct link_config *lc, unsigned int caps)
+{
+ lc->supported = caps;
+ lc->requested_speed = lc->speed = SPEED_INVALID;
+ lc->requested_duplex = lc->duplex = DUPLEX_INVALID;
+ lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
+ if (lc->supported & SUPPORTED_Autoneg) {
+ lc->advertising = lc->supported;
+ lc->autoneg = AUTONEG_ENABLE;
+ lc->requested_fc |= PAUSE_AUTONEG;
+ } else {
+ lc->advertising = 0;
+ lc->autoneg = AUTONEG_DISABLE;
+ }
+}
+
+/**
+ * mc7_calc_size - calculate MC7 memory size
+ * @cfg: the MC7 configuration
+ *
+ * Calculates the size of an MC7 memory in bytes from the value of its
+ * configuration register.
+ */
+static unsigned int mc7_calc_size(u32 cfg)
+{
+ unsigned int width = G_WIDTH(cfg);
+ unsigned int banks = !!(cfg & F_BKS) + 1;
+ unsigned int org = !!(cfg & F_ORG) + 1;
+ unsigned int density = G_DEN(cfg);
+ unsigned int MBs = ((256 << density) * banks) / (org << width);
+
+ return MBs << 20;
+}
+
+static void mc7_prep(struct adapter *adapter, struct mc7 *mc7,
+ unsigned int base_addr, const char *name)
+{
+ u32 cfg;
+
+ mc7->adapter = adapter;
+ mc7->name = name;
+ mc7->offset = base_addr - MC7_PMRX_BASE_ADDR;
+ cfg = t3_read_reg(adapter, mc7->offset + A_MC7_CFG);
+ mc7->size = G_DEN(cfg) == M_DEN ? 0 : mc7_calc_size(cfg);
+ mc7->width = G_WIDTH(cfg);
+}
+
+static void mac_prep(struct cmac *mac, struct adapter *adapter, int index)
+{
+ u16 devid;
+
+ mac->adapter = adapter;
+ pci_read_config_word(adapter->pdev, 0x2, &devid);
+
+ if (devid == 0x37 && !adapter->params.vpd.xauicfg[1])
+ index = 0;
+ mac->offset = (XGMAC0_1_BASE_ADDR - XGMAC0_0_BASE_ADDR) * index;
+ mac->nucast = 1;
+
+ if (adapter->params.rev == 0 && uses_xaui(adapter)) {
+ t3_write_reg(adapter, A_XGM_SERDES_CTRL + mac->offset,
+ is_10G(adapter) ? 0x2901c04 : 0x2301c04);
+ t3_set_reg_field(adapter, A_XGM_PORT_CFG + mac->offset,
+ F_ENRGMII, 0);
+ }
+}
+
+static void early_hw_init(struct adapter *adapter,
+ const struct adapter_info *ai)
+{
+ u32 val = V_PORTSPEED(is_10G(adapter) ? 3 : 2);
+
+ mi1_init(adapter, ai);
+ t3_write_reg(adapter, A_I2C_CFG, /* set for 80KHz */
+ V_I2C_CLKDIV(adapter->params.vpd.cclk / 80 - 1));
+ t3_write_reg(adapter, A_T3DBG_GPIO_EN,
+ ai->gpio_out | F_GPIO0_OEN | F_GPIO0_OUT_VAL);
+ t3_write_reg(adapter, A_MC5_DB_SERVER_INDEX, 0);
+ t3_write_reg(adapter, A_SG_OCO_BASE, V_BASE1(0xfff));
+
+ if (adapter->params.rev == 0 || !uses_xaui(adapter))
+ val |= F_ENRGMII;
+
+ /* Enable MAC clocks so we can access the registers */
+ t3_write_reg(adapter, A_XGM_PORT_CFG, val);
+ t3_read_reg(adapter, A_XGM_PORT_CFG);
+
+ val |= F_CLKDIVRESET_;
+ t3_write_reg(adapter, A_XGM_PORT_CFG, val);
+ t3_read_reg(adapter, A_XGM_PORT_CFG);
+ t3_write_reg(adapter, XGM_REG(A_XGM_PORT_CFG, 1), val);
+ t3_read_reg(adapter, A_XGM_PORT_CFG);
+}
+
+/*
+ * Reset the adapter.
+ * Older PCIe cards lose their config space during reset, PCI-X
+ * ones don't.
+ */
+int t3_reset_adapter(struct adapter *adapter)
+{
+ int i, save_and_restore_pcie =
+ adapter->params.rev < T3_REV_B2 && is_pcie(adapter);
+ uint16_t devid = 0;
+
+ if (save_and_restore_pcie)
+ pci_save_state(adapter->pdev);
+ t3_write_reg(adapter, A_PL_RST, F_CRSTWRM | F_CRSTWRMMODE);
+
+ /*
+ * Delay. Give Some time to device to reset fully.
+ * XXX The delay time should be modified.
+ */
+ for (i = 0; i < 10; i++) {
+ msleep(50);
+ pci_read_config_word(adapter->pdev, 0x00, &devid);
+ if (devid == 0x1425)
+ break;
+ }
+
+ if (devid != 0x1425)
+ return -1;
+
+ if (save_and_restore_pcie)
+ pci_restore_state(adapter->pdev);
+ return 0;
+}
+
+static int init_parity(struct adapter *adap)
+{
+ int i, err, addr;
+
+ if (t3_read_reg(adap, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
+ return -EBUSY;
+
+ for (err = i = 0; !err && i < 16; i++)
+ err = clear_sge_ctxt(adap, i, F_EGRESS);
+ for (i = 0xfff0; !err && i <= 0xffff; i++)
+ err = clear_sge_ctxt(adap, i, F_EGRESS);
+ for (i = 0; !err && i < SGE_QSETS; i++)
+ err = clear_sge_ctxt(adap, i, F_RESPONSEQ);
+ if (err)
+ return err;
+
+ t3_write_reg(adap, A_CIM_IBQ_DBG_DATA, 0);
+ for (i = 0; i < 4; i++)
+ for (addr = 0; addr <= M_IBQDBGADDR; addr++) {
+ t3_write_reg(adap, A_CIM_IBQ_DBG_CFG, F_IBQDBGEN |
+ F_IBQDBGWR | V_IBQDBGQID(i) |
+ V_IBQDBGADDR(addr));
+ err = t3_wait_op_done(adap, A_CIM_IBQ_DBG_CFG,
+ F_IBQDBGBUSY, 0, 2, 1);
+ if (err)
+ return err;
+ }
+ return 0;
+}
+
+/*
+ * Initialize adapter SW state for the various HW modules, set initial values
+ * for some adapter tunables, take PHYs out of reset, and initialize the MDIO
+ * interface.
+ */
+int t3_prep_adapter(struct adapter *adapter, const struct adapter_info *ai,
+ int reset)
+{
+ int ret;
+ unsigned int i, j = -1;
+
+ get_pci_mode(adapter, &adapter->params.pci);
+
+ adapter->params.info = ai;
+ adapter->params.nports = ai->nports0 + ai->nports1;
+ adapter->params.chan_map = (!!ai->nports0) | (!!ai->nports1 << 1);
+ adapter->params.rev = t3_read_reg(adapter, A_PL_REV);
+ /*
+ * We used to only run the "adapter check task" once a second if
+ * we had PHYs which didn't support interrupts (we would check
+ * their link status once a second). Now we check other conditions
+ * in that routine which could potentially impose a very high
+ * interrupt load on the system. As such, we now always scan the
+ * adapter state once a second ...
+ */
+ adapter->params.linkpoll_period = 10;
+ adapter->params.stats_update_period = is_10G(adapter) ?
+ MAC_STATS_ACCUM_SECS : (MAC_STATS_ACCUM_SECS * 10);
+ adapter->params.pci.vpd_cap_addr =
+ pci_find_capability(adapter->pdev, PCI_CAP_ID_VPD);
+ if (!adapter->params.pci.vpd_cap_addr)
+ return -ENODEV;
+ ret = get_vpd_params(adapter, &adapter->params.vpd);
+ if (ret < 0)
+ return ret;
+
+ if (reset && t3_reset_adapter(adapter))
+ return -1;
+
+ t3_sge_prep(adapter, &adapter->params.sge);
+
+ if (adapter->params.vpd.mclk) {
+ struct tp_params *p = &adapter->params.tp;
+
+ mc7_prep(adapter, &adapter->pmrx, MC7_PMRX_BASE_ADDR, "PMRX");
+ mc7_prep(adapter, &adapter->pmtx, MC7_PMTX_BASE_ADDR, "PMTX");
+ mc7_prep(adapter, &adapter->cm, MC7_CM_BASE_ADDR, "CM");
+
+ p->nchan = adapter->params.chan_map == 3 ? 2 : 1;
+ p->pmrx_size = t3_mc7_size(&adapter->pmrx);
+ p->pmtx_size = t3_mc7_size(&adapter->pmtx);
+ p->cm_size = t3_mc7_size(&adapter->cm);
+ p->chan_rx_size = p->pmrx_size / 2; /* only 1 Rx channel */
+ p->chan_tx_size = p->pmtx_size / p->nchan;
+ p->rx_pg_size = 64 * 1024;
+ p->tx_pg_size = is_10G(adapter) ? 64 * 1024 : 16 * 1024;
+ p->rx_num_pgs = pm_num_pages(p->chan_rx_size, p->rx_pg_size);
+ p->tx_num_pgs = pm_num_pages(p->chan_tx_size, p->tx_pg_size);
+ p->ntimer_qs = p->cm_size >= (128 << 20) ||
+ adapter->params.rev > 0 ? 12 : 6;
+ }
+
+ adapter->params.offload = t3_mc7_size(&adapter->pmrx) &&
+ t3_mc7_size(&adapter->pmtx) &&
+ t3_mc7_size(&adapter->cm);
+
+ if (is_offload(adapter)) {
+ adapter->params.mc5.nservers = DEFAULT_NSERVERS;
+ adapter->params.mc5.nfilters = adapter->params.rev > 0 ?
+ DEFAULT_NFILTERS : 0;
+ adapter->params.mc5.nroutes = 0;
+ t3_mc5_prep(adapter, &adapter->mc5, MC5_MODE_144_BIT);
+
+ init_mtus(adapter->params.mtus);
+ init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
+ }
+
+ early_hw_init(adapter, ai);
+ ret = init_parity(adapter);
+ if (ret)
+ return ret;
+
+ for_each_port(adapter, i) {
+ u8 hw_addr[6];
+ const struct port_type_info *pti;
+ struct port_info *p = adap2pinfo(adapter, i);
+
+ while (!adapter->params.vpd.port_type[++j])
+ ;
+
+ pti = &port_types[adapter->params.vpd.port_type[j]];
+ if (!pti->phy_prep) {
+ CH_ALERT(adapter, "Invalid port type index %d\n",
+ adapter->params.vpd.port_type[j]);
+ return -EINVAL;
+ }
+
+ p->phy.mdio.dev = adapter->port[i];
+ ret = pti->phy_prep(&p->phy, adapter, ai->phy_base_addr + j,
+ ai->mdio_ops);
+ if (ret)
+ return ret;
+ mac_prep(&p->mac, adapter, j);
+
+ /*
+ * The VPD EEPROM stores the base Ethernet address for the
+ * card. A port's address is derived from the base by adding
+ * the port's index to the base's low octet.
+ */
+ memcpy(hw_addr, adapter->params.vpd.eth_base, 5);
+ hw_addr[5] = adapter->params.vpd.eth_base[5] + i;
+
+ eth_hw_addr_set(adapter->port[i], hw_addr);
+ init_link_config(&p->link_config, p->phy.caps);
+ p->phy.ops->power_down(&p->phy, 1);
+
+ /*
+ * If the PHY doesn't support interrupts for link status
+ * changes, schedule a scan of the adapter links at least
+ * once a second.
+ */
+ if (!(p->phy.caps & SUPPORTED_IRQ) &&
+ adapter->params.linkpoll_period > 10)
+ adapter->params.linkpoll_period = 10;
+ }
+
+ return 0;
+}
+
+void t3_led_ready(struct adapter *adapter)
+{
+ t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
+ F_GPIO0_OUT_VAL);
+}
+
+int t3_replay_prep_adapter(struct adapter *adapter)
+{
+ const struct adapter_info *ai = adapter->params.info;
+ unsigned int i, j = -1;
+ int ret;
+
+ early_hw_init(adapter, ai);
+ ret = init_parity(adapter);
+ if (ret)
+ return ret;
+
+ for_each_port(adapter, i) {
+ const struct port_type_info *pti;
+ struct port_info *p = adap2pinfo(adapter, i);
+
+ while (!adapter->params.vpd.port_type[++j])
+ ;
+
+ pti = &port_types[adapter->params.vpd.port_type[j]];
+ ret = pti->phy_prep(&p->phy, adapter, p->phy.mdio.prtad, NULL);
+ if (ret)
+ return ret;
+ p->phy.ops->power_down(&p->phy, 1);
+ }
+
+ return 0;
+}
+
diff --git a/drivers/net/ethernet/chelsio/cxgb3/t3cdev.h b/drivers/net/ethernet/chelsio/cxgb3/t3cdev.h
new file mode 100644
index 0000000000..3c3e6cf6ac
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb3/t3cdev.h
@@ -0,0 +1,70 @@
+/*
+ * Copyright (C) 2006-2008 Chelsio Communications. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef _T3CDEV_H_
+#define _T3CDEV_H_
+
+#include <linux/list.h>
+#include <linux/atomic.h>
+#include <linux/netdevice.h>
+#include <linux/proc_fs.h>
+#include <linux/skbuff.h>
+#include <net/neighbour.h>
+
+#define T3CNAMSIZ 16
+
+struct cxgb3_client;
+
+enum t3ctype {
+ T3A = 0,
+ T3B,
+ T3C,
+};
+
+struct t3cdev {
+ char name[T3CNAMSIZ]; /* T3C device name */
+ enum t3ctype type;
+ struct list_head ofld_dev_list; /* for list linking */
+ struct net_device *lldev; /* LL dev associated with T3C messages */
+ struct proc_dir_entry *proc_dir; /* root of proc dir for this T3C */
+ int (*send)(struct t3cdev *dev, struct sk_buff *skb);
+ int (*recv)(struct t3cdev *dev, struct sk_buff **skb, int n);
+ int (*ctl)(struct t3cdev *dev, unsigned int req, void *data);
+ void (*neigh_update)(struct t3cdev *dev, struct neighbour *neigh);
+ void *priv; /* driver private data */
+ void __rcu *l2opt; /* optional layer 2 data */
+ void *l3opt; /* optional layer 3 data */
+ void *l4opt; /* optional layer 4 data */
+ void *ulp; /* ulp stuff */
+ void *ulp_iscsi; /* ulp iscsi */
+};
+
+#endif /* _T3CDEV_H_ */
diff --git a/drivers/net/ethernet/chelsio/cxgb3/version.h b/drivers/net/ethernet/chelsio/cxgb3/version.h
new file mode 100644
index 0000000000..b4b2547efc
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb3/version.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright (c) 2003-2008 Chelsio, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+/* $Date: 2006/10/31 18:57:51 $ $RCSfile: version.h,v $ $Revision: 1.3 $ */
+#ifndef __CHELSIO_VERSION_H
+#define __CHELSIO_VERSION_H
+#define DRV_DESC "Chelsio T3 Network Driver"
+#define DRV_NAME "cxgb3"
+
+/* Firmware version */
+#define FW_VERSION_MAJOR 7
+#define FW_VERSION_MINOR 12
+#define FW_VERSION_MICRO 0
+#endif /* __CHELSIO_VERSION_H */
diff --git a/drivers/net/ethernet/chelsio/cxgb3/vsc8211.c b/drivers/net/ethernet/chelsio/cxgb3/vsc8211.c
new file mode 100644
index 0000000000..8638ad42bf
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb3/vsc8211.c
@@ -0,0 +1,416 @@
+/*
+ * Copyright (c) 2005-2008 Chelsio, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "common.h"
+
+/* VSC8211 PHY specific registers. */
+enum {
+ VSC8211_SIGDET_CTRL = 19,
+ VSC8211_EXT_CTRL = 23,
+ VSC8211_INTR_ENABLE = 25,
+ VSC8211_INTR_STATUS = 26,
+ VSC8211_LED_CTRL = 27,
+ VSC8211_AUX_CTRL_STAT = 28,
+ VSC8211_EXT_PAGE_AXS = 31,
+};
+
+enum {
+ VSC_INTR_RX_ERR = 1 << 0,
+ VSC_INTR_MS_ERR = 1 << 1, /* master/slave resolution error */
+ VSC_INTR_CABLE = 1 << 2, /* cable impairment */
+ VSC_INTR_FALSE_CARR = 1 << 3, /* false carrier */
+ VSC_INTR_MEDIA_CHG = 1 << 4, /* AMS media change */
+ VSC_INTR_RX_FIFO = 1 << 5, /* Rx FIFO over/underflow */
+ VSC_INTR_TX_FIFO = 1 << 6, /* Tx FIFO over/underflow */
+ VSC_INTR_DESCRAMBL = 1 << 7, /* descrambler lock-lost */
+ VSC_INTR_SYMBOL_ERR = 1 << 8, /* symbol error */
+ VSC_INTR_NEG_DONE = 1 << 10, /* autoneg done */
+ VSC_INTR_NEG_ERR = 1 << 11, /* autoneg error */
+ VSC_INTR_DPLX_CHG = 1 << 12, /* duplex change */
+ VSC_INTR_LINK_CHG = 1 << 13, /* link change */
+ VSC_INTR_SPD_CHG = 1 << 14, /* speed change */
+ VSC_INTR_ENABLE = 1 << 15, /* interrupt enable */
+};
+
+enum {
+ VSC_CTRL_CLAUSE37_VIEW = 1 << 4, /* Switch to Clause 37 view */
+ VSC_CTRL_MEDIA_MODE_HI = 0xf000 /* High part of media mode select */
+};
+
+#define CFG_CHG_INTR_MASK (VSC_INTR_LINK_CHG | VSC_INTR_NEG_ERR | \
+ VSC_INTR_DPLX_CHG | VSC_INTR_SPD_CHG | \
+ VSC_INTR_NEG_DONE)
+#define INTR_MASK (CFG_CHG_INTR_MASK | VSC_INTR_TX_FIFO | VSC_INTR_RX_FIFO | \
+ VSC_INTR_ENABLE)
+
+/* PHY specific auxiliary control & status register fields */
+#define S_ACSR_ACTIPHY_TMR 0
+#define M_ACSR_ACTIPHY_TMR 0x3
+#define V_ACSR_ACTIPHY_TMR(x) ((x) << S_ACSR_ACTIPHY_TMR)
+
+#define S_ACSR_SPEED 3
+#define M_ACSR_SPEED 0x3
+#define G_ACSR_SPEED(x) (((x) >> S_ACSR_SPEED) & M_ACSR_SPEED)
+
+#define S_ACSR_DUPLEX 5
+#define F_ACSR_DUPLEX (1 << S_ACSR_DUPLEX)
+
+#define S_ACSR_ACTIPHY 6
+#define F_ACSR_ACTIPHY (1 << S_ACSR_ACTIPHY)
+
+/*
+ * Reset the PHY. This PHY completes reset immediately so we never wait.
+ */
+static int vsc8211_reset(struct cphy *cphy, int wait)
+{
+ return t3_phy_reset(cphy, MDIO_DEVAD_NONE, 0);
+}
+
+static int vsc8211_intr_enable(struct cphy *cphy)
+{
+ return t3_mdio_write(cphy, MDIO_DEVAD_NONE, VSC8211_INTR_ENABLE,
+ INTR_MASK);
+}
+
+static int vsc8211_intr_disable(struct cphy *cphy)
+{
+ return t3_mdio_write(cphy, MDIO_DEVAD_NONE, VSC8211_INTR_ENABLE, 0);
+}
+
+static int vsc8211_intr_clear(struct cphy *cphy)
+{
+ u32 val;
+
+ /* Clear PHY interrupts by reading the register. */
+ return t3_mdio_read(cphy, MDIO_DEVAD_NONE, VSC8211_INTR_STATUS, &val);
+}
+
+static int vsc8211_autoneg_enable(struct cphy *cphy)
+{
+ return t3_mdio_change_bits(cphy, MDIO_DEVAD_NONE, MII_BMCR,
+ BMCR_PDOWN | BMCR_ISOLATE,
+ BMCR_ANENABLE | BMCR_ANRESTART);
+}
+
+static int vsc8211_autoneg_restart(struct cphy *cphy)
+{
+ return t3_mdio_change_bits(cphy, MDIO_DEVAD_NONE, MII_BMCR,
+ BMCR_PDOWN | BMCR_ISOLATE,
+ BMCR_ANRESTART);
+}
+
+static int vsc8211_get_link_status(struct cphy *cphy, int *link_ok,
+ int *speed, int *duplex, int *fc)
+{
+ unsigned int bmcr, status, lpa, adv;
+ int err, sp = -1, dplx = -1, pause = 0;
+
+ err = t3_mdio_read(cphy, MDIO_DEVAD_NONE, MII_BMCR, &bmcr);
+ if (!err)
+ err = t3_mdio_read(cphy, MDIO_DEVAD_NONE, MII_BMSR, &status);
+ if (err)
+ return err;
+
+ if (link_ok) {
+ /*
+ * BMSR_LSTATUS is latch-low, so if it is 0 we need to read it
+ * once more to get the current link state.
+ */
+ if (!(status & BMSR_LSTATUS))
+ err = t3_mdio_read(cphy, MDIO_DEVAD_NONE, MII_BMSR,
+ &status);
+ if (err)
+ return err;
+ *link_ok = (status & BMSR_LSTATUS) != 0;
+ }
+ if (!(bmcr & BMCR_ANENABLE)) {
+ dplx = (bmcr & BMCR_FULLDPLX) ? DUPLEX_FULL : DUPLEX_HALF;
+ if (bmcr & BMCR_SPEED1000)
+ sp = SPEED_1000;
+ else if (bmcr & BMCR_SPEED100)
+ sp = SPEED_100;
+ else
+ sp = SPEED_10;
+ } else if (status & BMSR_ANEGCOMPLETE) {
+ err = t3_mdio_read(cphy, MDIO_DEVAD_NONE, VSC8211_AUX_CTRL_STAT,
+ &status);
+ if (err)
+ return err;
+
+ dplx = (status & F_ACSR_DUPLEX) ? DUPLEX_FULL : DUPLEX_HALF;
+ sp = G_ACSR_SPEED(status);
+ if (sp == 0)
+ sp = SPEED_10;
+ else if (sp == 1)
+ sp = SPEED_100;
+ else
+ sp = SPEED_1000;
+
+ if (fc && dplx == DUPLEX_FULL) {
+ err = t3_mdio_read(cphy, MDIO_DEVAD_NONE, MII_LPA,
+ &lpa);
+ if (!err)
+ err = t3_mdio_read(cphy, MDIO_DEVAD_NONE,
+ MII_ADVERTISE, &adv);
+ if (err)
+ return err;
+
+ if (lpa & adv & ADVERTISE_PAUSE_CAP)
+ pause = PAUSE_RX | PAUSE_TX;
+ else if ((lpa & ADVERTISE_PAUSE_CAP) &&
+ (lpa & ADVERTISE_PAUSE_ASYM) &&
+ (adv & ADVERTISE_PAUSE_ASYM))
+ pause = PAUSE_TX;
+ else if ((lpa & ADVERTISE_PAUSE_ASYM) &&
+ (adv & ADVERTISE_PAUSE_CAP))
+ pause = PAUSE_RX;
+ }
+ }
+ if (speed)
+ *speed = sp;
+ if (duplex)
+ *duplex = dplx;
+ if (fc)
+ *fc = pause;
+ return 0;
+}
+
+static int vsc8211_get_link_status_fiber(struct cphy *cphy, int *link_ok,
+ int *speed, int *duplex, int *fc)
+{
+ unsigned int bmcr, status, lpa, adv;
+ int err, sp = -1, dplx = -1, pause = 0;
+
+ err = t3_mdio_read(cphy, MDIO_DEVAD_NONE, MII_BMCR, &bmcr);
+ if (!err)
+ err = t3_mdio_read(cphy, MDIO_DEVAD_NONE, MII_BMSR, &status);
+ if (err)
+ return err;
+
+ if (link_ok) {
+ /*
+ * BMSR_LSTATUS is latch-low, so if it is 0 we need to read it
+ * once more to get the current link state.
+ */
+ if (!(status & BMSR_LSTATUS))
+ err = t3_mdio_read(cphy, MDIO_DEVAD_NONE, MII_BMSR,
+ &status);
+ if (err)
+ return err;
+ *link_ok = (status & BMSR_LSTATUS) != 0;
+ }
+ if (!(bmcr & BMCR_ANENABLE)) {
+ dplx = (bmcr & BMCR_FULLDPLX) ? DUPLEX_FULL : DUPLEX_HALF;
+ if (bmcr & BMCR_SPEED1000)
+ sp = SPEED_1000;
+ else if (bmcr & BMCR_SPEED100)
+ sp = SPEED_100;
+ else
+ sp = SPEED_10;
+ } else if (status & BMSR_ANEGCOMPLETE) {
+ err = t3_mdio_read(cphy, MDIO_DEVAD_NONE, MII_LPA, &lpa);
+ if (!err)
+ err = t3_mdio_read(cphy, MDIO_DEVAD_NONE, MII_ADVERTISE,
+ &adv);
+ if (err)
+ return err;
+
+ if (adv & lpa & ADVERTISE_1000XFULL) {
+ dplx = DUPLEX_FULL;
+ sp = SPEED_1000;
+ } else if (adv & lpa & ADVERTISE_1000XHALF) {
+ dplx = DUPLEX_HALF;
+ sp = SPEED_1000;
+ }
+
+ if (fc && dplx == DUPLEX_FULL) {
+ if (lpa & adv & ADVERTISE_1000XPAUSE)
+ pause = PAUSE_RX | PAUSE_TX;
+ else if ((lpa & ADVERTISE_1000XPAUSE) &&
+ (adv & lpa & ADVERTISE_1000XPSE_ASYM))
+ pause = PAUSE_TX;
+ else if ((lpa & ADVERTISE_1000XPSE_ASYM) &&
+ (adv & ADVERTISE_1000XPAUSE))
+ pause = PAUSE_RX;
+ }
+ }
+ if (speed)
+ *speed = sp;
+ if (duplex)
+ *duplex = dplx;
+ if (fc)
+ *fc = pause;
+ return 0;
+}
+
+#ifdef UNUSED
+/*
+ * Enable/disable auto MDI/MDI-X in forced link speed mode.
+ */
+static int vsc8211_set_automdi(struct cphy *phy, int enable)
+{
+ int err;
+
+ err = t3_mdio_write(phy, MDIO_DEVAD_NONE, VSC8211_EXT_PAGE_AXS, 0x52b5);
+ if (err)
+ return err;
+
+ err = t3_mdio_write(phy, MDIO_DEVAD_NONE, 18, 0x12);
+ if (err)
+ return err;
+
+ err = t3_mdio_write(phy, MDIO_DEVAD_NONE, 17, enable ? 0x2803 : 0x3003);
+ if (err)
+ return err;
+
+ err = t3_mdio_write(phy, MDIO_DEVAD_NONE, 16, 0x87fa);
+ if (err)
+ return err;
+
+ err = t3_mdio_write(phy, MDIO_DEVAD_NONE, VSC8211_EXT_PAGE_AXS, 0);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+int vsc8211_set_speed_duplex(struct cphy *phy, int speed, int duplex)
+{
+ int err;
+
+ err = t3_set_phy_speed_duplex(phy, speed, duplex);
+ if (!err)
+ err = vsc8211_set_automdi(phy, 1);
+ return err;
+}
+#endif /* UNUSED */
+
+static int vsc8211_power_down(struct cphy *cphy, int enable)
+{
+ return t3_mdio_change_bits(cphy, 0, MII_BMCR, BMCR_PDOWN,
+ enable ? BMCR_PDOWN : 0);
+}
+
+static int vsc8211_intr_handler(struct cphy *cphy)
+{
+ unsigned int cause;
+ int err, cphy_cause = 0;
+
+ err = t3_mdio_read(cphy, MDIO_DEVAD_NONE, VSC8211_INTR_STATUS, &cause);
+ if (err)
+ return err;
+
+ cause &= INTR_MASK;
+ if (cause & CFG_CHG_INTR_MASK)
+ cphy_cause |= cphy_cause_link_change;
+ if (cause & (VSC_INTR_RX_FIFO | VSC_INTR_TX_FIFO))
+ cphy_cause |= cphy_cause_fifo_error;
+ return cphy_cause;
+}
+
+static const struct cphy_ops vsc8211_ops = {
+ .reset = vsc8211_reset,
+ .intr_enable = vsc8211_intr_enable,
+ .intr_disable = vsc8211_intr_disable,
+ .intr_clear = vsc8211_intr_clear,
+ .intr_handler = vsc8211_intr_handler,
+ .autoneg_enable = vsc8211_autoneg_enable,
+ .autoneg_restart = vsc8211_autoneg_restart,
+ .advertise = t3_phy_advertise,
+ .set_speed_duplex = t3_set_phy_speed_duplex,
+ .get_link_status = vsc8211_get_link_status,
+ .power_down = vsc8211_power_down,
+};
+
+static const struct cphy_ops vsc8211_fiber_ops = {
+ .reset = vsc8211_reset,
+ .intr_enable = vsc8211_intr_enable,
+ .intr_disable = vsc8211_intr_disable,
+ .intr_clear = vsc8211_intr_clear,
+ .intr_handler = vsc8211_intr_handler,
+ .autoneg_enable = vsc8211_autoneg_enable,
+ .autoneg_restart = vsc8211_autoneg_restart,
+ .advertise = t3_phy_advertise_fiber,
+ .set_speed_duplex = t3_set_phy_speed_duplex,
+ .get_link_status = vsc8211_get_link_status_fiber,
+ .power_down = vsc8211_power_down,
+};
+
+int t3_vsc8211_phy_prep(struct cphy *phy, struct adapter *adapter,
+ int phy_addr, const struct mdio_ops *mdio_ops)
+{
+ int err;
+ unsigned int val;
+
+ cphy_init(phy, adapter, phy_addr, &vsc8211_ops, mdio_ops,
+ SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Full |
+ SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg | SUPPORTED_MII |
+ SUPPORTED_TP | SUPPORTED_IRQ, "10/100/1000BASE-T");
+ msleep(20); /* PHY needs ~10ms to start responding to MDIO */
+
+ err = t3_mdio_read(phy, MDIO_DEVAD_NONE, VSC8211_EXT_CTRL, &val);
+ if (err)
+ return err;
+ if (val & VSC_CTRL_MEDIA_MODE_HI) {
+ /* copper interface, just need to configure the LEDs */
+ return t3_mdio_write(phy, MDIO_DEVAD_NONE, VSC8211_LED_CTRL,
+ 0x100);
+ }
+
+ phy->caps = SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg |
+ SUPPORTED_MII | SUPPORTED_FIBRE | SUPPORTED_IRQ;
+ phy->desc = "1000BASE-X";
+ phy->ops = &vsc8211_fiber_ops;
+
+ err = t3_mdio_write(phy, MDIO_DEVAD_NONE, VSC8211_EXT_PAGE_AXS, 1);
+ if (err)
+ return err;
+
+ err = t3_mdio_write(phy, MDIO_DEVAD_NONE, VSC8211_SIGDET_CTRL, 1);
+ if (err)
+ return err;
+
+ err = t3_mdio_write(phy, MDIO_DEVAD_NONE, VSC8211_EXT_PAGE_AXS, 0);
+ if (err)
+ return err;
+
+ err = t3_mdio_write(phy, MDIO_DEVAD_NONE, VSC8211_EXT_CTRL,
+ val | VSC_CTRL_CLAUSE37_VIEW);
+ if (err)
+ return err;
+
+ err = vsc8211_reset(phy, 0);
+ if (err)
+ return err;
+
+ udelay(5); /* delay after reset before next SMI */
+ return 0;
+}
diff --git a/drivers/net/ethernet/chelsio/cxgb3/xgmac.c b/drivers/net/ethernet/chelsio/cxgb3/xgmac.c
new file mode 100644
index 0000000000..1bdc6cad1e
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb3/xgmac.c
@@ -0,0 +1,657 @@
+/*
+ * Copyright (c) 2005-2008 Chelsio, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "common.h"
+#include "regs.h"
+
+/*
+ * # of exact address filters. The first one is used for the station address,
+ * the rest are available for multicast addresses.
+ */
+#define EXACT_ADDR_FILTERS 8
+
+static inline int macidx(const struct cmac *mac)
+{
+ return mac->offset / (XGMAC0_1_BASE_ADDR - XGMAC0_0_BASE_ADDR);
+}
+
+static void xaui_serdes_reset(struct cmac *mac)
+{
+ static const unsigned int clear[] = {
+ F_PWRDN0 | F_PWRDN1, F_RESETPLL01, F_RESET0 | F_RESET1,
+ F_PWRDN2 | F_PWRDN3, F_RESETPLL23, F_RESET2 | F_RESET3
+ };
+
+ int i;
+ struct adapter *adap = mac->adapter;
+ u32 ctrl = A_XGM_SERDES_CTRL0 + mac->offset;
+
+ t3_write_reg(adap, ctrl, adap->params.vpd.xauicfg[macidx(mac)] |
+ F_RESET3 | F_RESET2 | F_RESET1 | F_RESET0 |
+ F_PWRDN3 | F_PWRDN2 | F_PWRDN1 | F_PWRDN0 |
+ F_RESETPLL23 | F_RESETPLL01);
+ t3_read_reg(adap, ctrl);
+ udelay(15);
+
+ for (i = 0; i < ARRAY_SIZE(clear); i++) {
+ t3_set_reg_field(adap, ctrl, clear[i], 0);
+ udelay(15);
+ }
+}
+
+void t3b_pcs_reset(struct cmac *mac)
+{
+ t3_set_reg_field(mac->adapter, A_XGM_RESET_CTRL + mac->offset,
+ F_PCS_RESET_, 0);
+ udelay(20);
+ t3_set_reg_field(mac->adapter, A_XGM_RESET_CTRL + mac->offset, 0,
+ F_PCS_RESET_);
+}
+
+int t3_mac_reset(struct cmac *mac)
+{
+ static const struct addr_val_pair mac_reset_avp[] = {
+ {A_XGM_TX_CTRL, 0},
+ {A_XGM_RX_CTRL, 0},
+ {A_XGM_RX_CFG, F_DISPAUSEFRAMES | F_EN1536BFRAMES |
+ F_RMFCS | F_ENJUMBO | F_ENHASHMCAST},
+ {A_XGM_RX_HASH_LOW, 0},
+ {A_XGM_RX_HASH_HIGH, 0},
+ {A_XGM_RX_EXACT_MATCH_LOW_1, 0},
+ {A_XGM_RX_EXACT_MATCH_LOW_2, 0},
+ {A_XGM_RX_EXACT_MATCH_LOW_3, 0},
+ {A_XGM_RX_EXACT_MATCH_LOW_4, 0},
+ {A_XGM_RX_EXACT_MATCH_LOW_5, 0},
+ {A_XGM_RX_EXACT_MATCH_LOW_6, 0},
+ {A_XGM_RX_EXACT_MATCH_LOW_7, 0},
+ {A_XGM_RX_EXACT_MATCH_LOW_8, 0},
+ {A_XGM_STAT_CTRL, F_CLRSTATS}
+ };
+ u32 val;
+ struct adapter *adap = mac->adapter;
+ unsigned int oft = mac->offset;
+
+ t3_write_reg(adap, A_XGM_RESET_CTRL + oft, F_MAC_RESET_);
+ t3_read_reg(adap, A_XGM_RESET_CTRL + oft); /* flush */
+
+ t3_write_regs(adap, mac_reset_avp, ARRAY_SIZE(mac_reset_avp), oft);
+ t3_set_reg_field(adap, A_XGM_RXFIFO_CFG + oft,
+ F_RXSTRFRWRD | F_DISERRFRAMES,
+ uses_xaui(adap) ? 0 : F_RXSTRFRWRD);
+ t3_set_reg_field(adap, A_XGM_TXFIFO_CFG + oft, 0, F_UNDERUNFIX);
+
+ if (uses_xaui(adap)) {
+ if (adap->params.rev == 0) {
+ t3_set_reg_field(adap, A_XGM_SERDES_CTRL + oft, 0,
+ F_RXENABLE | F_TXENABLE);
+ if (t3_wait_op_done(adap, A_XGM_SERDES_STATUS1 + oft,
+ F_CMULOCK, 1, 5, 2)) {
+ CH_ERR(adap,
+ "MAC %d XAUI SERDES CMU lock failed\n",
+ macidx(mac));
+ return -1;
+ }
+ t3_set_reg_field(adap, A_XGM_SERDES_CTRL + oft, 0,
+ F_SERDESRESET_);
+ } else
+ xaui_serdes_reset(mac);
+ }
+
+ t3_set_reg_field(adap, A_XGM_RX_MAX_PKT_SIZE + oft,
+ V_RXMAXFRAMERSIZE(M_RXMAXFRAMERSIZE),
+ V_RXMAXFRAMERSIZE(MAX_FRAME_SIZE) | F_RXENFRAMER);
+ val = F_MAC_RESET_ | F_XGMAC_STOP_EN;
+
+ if (is_10G(adap))
+ val |= F_PCS_RESET_;
+ else if (uses_xaui(adap))
+ val |= F_PCS_RESET_ | F_XG2G_RESET_;
+ else
+ val |= F_RGMII_RESET_ | F_XG2G_RESET_;
+ t3_write_reg(adap, A_XGM_RESET_CTRL + oft, val);
+ t3_read_reg(adap, A_XGM_RESET_CTRL + oft); /* flush */
+ if ((val & F_PCS_RESET_) && adap->params.rev) {
+ msleep(1);
+ t3b_pcs_reset(mac);
+ }
+
+ memset(&mac->stats, 0, sizeof(mac->stats));
+ return 0;
+}
+
+static int t3b2_mac_reset(struct cmac *mac)
+{
+ struct adapter *adap = mac->adapter;
+ unsigned int oft = mac->offset, store;
+ int idx = macidx(mac);
+ u32 val;
+
+ if (!macidx(mac))
+ t3_set_reg_field(adap, A_MPS_CFG, F_PORT0ACTIVE, 0);
+ else
+ t3_set_reg_field(adap, A_MPS_CFG, F_PORT1ACTIVE, 0);
+
+ /* Stop NIC traffic to reduce the number of TXTOGGLES */
+ t3_set_reg_field(adap, A_MPS_CFG, F_ENFORCEPKT, 0);
+ /* Ensure TX drains */
+ t3_set_reg_field(adap, A_XGM_TX_CFG + oft, F_TXPAUSEEN, 0);
+
+ t3_write_reg(adap, A_XGM_RESET_CTRL + oft, F_MAC_RESET_);
+ t3_read_reg(adap, A_XGM_RESET_CTRL + oft); /* flush */
+
+ /* Store A_TP_TX_DROP_CFG_CH0 */
+ t3_write_reg(adap, A_TP_PIO_ADDR, A_TP_TX_DROP_CFG_CH0 + idx);
+ store = t3_read_reg(adap, A_TP_TX_DROP_CFG_CH0 + idx);
+
+ msleep(10);
+
+ /* Change DROP_CFG to 0xc0000011 */
+ t3_write_reg(adap, A_TP_PIO_ADDR, A_TP_TX_DROP_CFG_CH0 + idx);
+ t3_write_reg(adap, A_TP_PIO_DATA, 0xc0000011);
+
+ /* Check for xgm Rx fifo empty */
+ /* Increased loop count to 1000 from 5 cover 1G and 100Mbps case */
+ if (t3_wait_op_done(adap, A_XGM_RX_MAX_PKT_SIZE_ERR_CNT + oft,
+ 0x80000000, 1, 1000, 2)) {
+ CH_ERR(adap, "MAC %d Rx fifo drain failed\n",
+ macidx(mac));
+ return -1;
+ }
+
+ t3_write_reg(adap, A_XGM_RESET_CTRL + oft, 0);
+ t3_read_reg(adap, A_XGM_RESET_CTRL + oft); /* flush */
+
+ val = F_MAC_RESET_;
+ if (is_10G(adap))
+ val |= F_PCS_RESET_;
+ else if (uses_xaui(adap))
+ val |= F_PCS_RESET_ | F_XG2G_RESET_;
+ else
+ val |= F_RGMII_RESET_ | F_XG2G_RESET_;
+ t3_write_reg(adap, A_XGM_RESET_CTRL + oft, val);
+ t3_read_reg(adap, A_XGM_RESET_CTRL + oft); /* flush */
+ if ((val & F_PCS_RESET_) && adap->params.rev) {
+ msleep(1);
+ t3b_pcs_reset(mac);
+ }
+ t3_write_reg(adap, A_XGM_RX_CFG + oft,
+ F_DISPAUSEFRAMES | F_EN1536BFRAMES |
+ F_RMFCS | F_ENJUMBO | F_ENHASHMCAST);
+
+ /* Restore the DROP_CFG */
+ t3_write_reg(adap, A_TP_PIO_ADDR, A_TP_TX_DROP_CFG_CH0 + idx);
+ t3_write_reg(adap, A_TP_PIO_DATA, store);
+
+ if (!idx)
+ t3_set_reg_field(adap, A_MPS_CFG, 0, F_PORT0ACTIVE);
+ else
+ t3_set_reg_field(adap, A_MPS_CFG, 0, F_PORT1ACTIVE);
+
+ /* re-enable nic traffic */
+ t3_set_reg_field(adap, A_MPS_CFG, F_ENFORCEPKT, 1);
+
+ /* Set: re-enable NIC traffic */
+ t3_set_reg_field(adap, A_MPS_CFG, F_ENFORCEPKT, 1);
+
+ return 0;
+}
+
+/*
+ * Set the exact match register 'idx' to recognize the given Ethernet address.
+ */
+static void set_addr_filter(struct cmac *mac, int idx, const u8 * addr)
+{
+ u32 addr_lo, addr_hi;
+ unsigned int oft = mac->offset + idx * 8;
+
+ addr_lo = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0];
+ addr_hi = (addr[5] << 8) | addr[4];
+
+ t3_write_reg(mac->adapter, A_XGM_RX_EXACT_MATCH_LOW_1 + oft, addr_lo);
+ t3_write_reg(mac->adapter, A_XGM_RX_EXACT_MATCH_HIGH_1 + oft, addr_hi);
+}
+
+/* Set one of the station's unicast MAC addresses. */
+int t3_mac_set_address(struct cmac *mac, unsigned int idx, const u8 addr[6])
+{
+ if (idx >= mac->nucast)
+ return -EINVAL;
+ set_addr_filter(mac, idx, addr);
+ return 0;
+}
+
+/*
+ * Specify the number of exact address filters that should be reserved for
+ * unicast addresses. Caller should reload the unicast and multicast addresses
+ * after calling this.
+ */
+int t3_mac_set_num_ucast(struct cmac *mac, int n)
+{
+ if (n > EXACT_ADDR_FILTERS)
+ return -EINVAL;
+ mac->nucast = n;
+ return 0;
+}
+
+void t3_mac_disable_exact_filters(struct cmac *mac)
+{
+ unsigned int i, reg = mac->offset + A_XGM_RX_EXACT_MATCH_LOW_1;
+
+ for (i = 0; i < EXACT_ADDR_FILTERS; i++, reg += 8) {
+ u32 v = t3_read_reg(mac->adapter, reg);
+ t3_write_reg(mac->adapter, reg, v);
+ }
+ t3_read_reg(mac->adapter, A_XGM_RX_EXACT_MATCH_LOW_1); /* flush */
+}
+
+void t3_mac_enable_exact_filters(struct cmac *mac)
+{
+ unsigned int i, reg = mac->offset + A_XGM_RX_EXACT_MATCH_HIGH_1;
+
+ for (i = 0; i < EXACT_ADDR_FILTERS; i++, reg += 8) {
+ u32 v = t3_read_reg(mac->adapter, reg);
+ t3_write_reg(mac->adapter, reg, v);
+ }
+ t3_read_reg(mac->adapter, A_XGM_RX_EXACT_MATCH_LOW_1); /* flush */
+}
+
+/* Calculate the RX hash filter index of an Ethernet address */
+static int hash_hw_addr(const u8 * addr)
+{
+ int hash = 0, octet, bit, i = 0, c;
+
+ for (octet = 0; octet < 6; ++octet)
+ for (c = addr[octet], bit = 0; bit < 8; c >>= 1, ++bit) {
+ hash ^= (c & 1) << i;
+ if (++i == 6)
+ i = 0;
+ }
+ return hash;
+}
+
+int t3_mac_set_rx_mode(struct cmac *mac, struct net_device *dev)
+{
+ u32 val, hash_lo, hash_hi;
+ struct adapter *adap = mac->adapter;
+ unsigned int oft = mac->offset;
+
+ val = t3_read_reg(adap, A_XGM_RX_CFG + oft) & ~F_COPYALLFRAMES;
+ if (dev->flags & IFF_PROMISC)
+ val |= F_COPYALLFRAMES;
+ t3_write_reg(adap, A_XGM_RX_CFG + oft, val);
+
+ if (dev->flags & IFF_ALLMULTI)
+ hash_lo = hash_hi = 0xffffffff;
+ else {
+ struct netdev_hw_addr *ha;
+ int exact_addr_idx = mac->nucast;
+
+ hash_lo = hash_hi = 0;
+ netdev_for_each_mc_addr(ha, dev)
+ if (exact_addr_idx < EXACT_ADDR_FILTERS)
+ set_addr_filter(mac, exact_addr_idx++,
+ ha->addr);
+ else {
+ int hash = hash_hw_addr(ha->addr);
+
+ if (hash < 32)
+ hash_lo |= (1 << hash);
+ else
+ hash_hi |= (1 << (hash - 32));
+ }
+ }
+
+ t3_write_reg(adap, A_XGM_RX_HASH_LOW + oft, hash_lo);
+ t3_write_reg(adap, A_XGM_RX_HASH_HIGH + oft, hash_hi);
+ return 0;
+}
+
+static int rx_fifo_hwm(int mtu)
+{
+ int hwm;
+
+ hwm = max(MAC_RXFIFO_SIZE - 3 * mtu, (MAC_RXFIFO_SIZE * 38) / 100);
+ return min(hwm, MAC_RXFIFO_SIZE - 8192);
+}
+
+int t3_mac_set_mtu(struct cmac *mac, unsigned int mtu)
+{
+ int hwm, lwm, divisor;
+ int ipg;
+ unsigned int thres, v, reg;
+ struct adapter *adap = mac->adapter;
+
+ /*
+ * MAX_FRAME_SIZE inludes header + FCS, mtu doesn't. The HW max
+ * packet size register includes header, but not FCS.
+ */
+ mtu += 14;
+ if (mtu > 1536)
+ mtu += 4;
+
+ if (mtu > MAX_FRAME_SIZE - 4)
+ return -EINVAL;
+ t3_write_reg(adap, A_XGM_RX_MAX_PKT_SIZE + mac->offset, mtu);
+
+ if (adap->params.rev >= T3_REV_B2 &&
+ (t3_read_reg(adap, A_XGM_RX_CTRL + mac->offset) & F_RXEN)) {
+ t3_mac_disable_exact_filters(mac);
+ v = t3_read_reg(adap, A_XGM_RX_CFG + mac->offset);
+ t3_set_reg_field(adap, A_XGM_RX_CFG + mac->offset,
+ F_ENHASHMCAST | F_COPYALLFRAMES, F_DISBCAST);
+
+ reg = adap->params.rev == T3_REV_B2 ?
+ A_XGM_RX_MAX_PKT_SIZE_ERR_CNT : A_XGM_RXFIFO_CFG;
+
+ /* drain RX FIFO */
+ if (t3_wait_op_done(adap, reg + mac->offset,
+ F_RXFIFO_EMPTY, 1, 20, 5)) {
+ t3_write_reg(adap, A_XGM_RX_CFG + mac->offset, v);
+ t3_mac_enable_exact_filters(mac);
+ return -EIO;
+ }
+ t3_set_reg_field(adap, A_XGM_RX_MAX_PKT_SIZE + mac->offset,
+ V_RXMAXPKTSIZE(M_RXMAXPKTSIZE),
+ V_RXMAXPKTSIZE(mtu));
+ t3_write_reg(adap, A_XGM_RX_CFG + mac->offset, v);
+ t3_mac_enable_exact_filters(mac);
+ } else
+ t3_set_reg_field(adap, A_XGM_RX_MAX_PKT_SIZE + mac->offset,
+ V_RXMAXPKTSIZE(M_RXMAXPKTSIZE),
+ V_RXMAXPKTSIZE(mtu));
+
+ /*
+ * Adjust the PAUSE frame watermarks. We always set the LWM, and the
+ * HWM only if flow-control is enabled.
+ */
+ hwm = rx_fifo_hwm(mtu);
+ lwm = min(3 * (int)mtu, MAC_RXFIFO_SIZE / 4);
+ v = t3_read_reg(adap, A_XGM_RXFIFO_CFG + mac->offset);
+ v &= ~V_RXFIFOPAUSELWM(M_RXFIFOPAUSELWM);
+ v |= V_RXFIFOPAUSELWM(lwm / 8);
+ if (G_RXFIFOPAUSEHWM(v))
+ v = (v & ~V_RXFIFOPAUSEHWM(M_RXFIFOPAUSEHWM)) |
+ V_RXFIFOPAUSEHWM(hwm / 8);
+
+ t3_write_reg(adap, A_XGM_RXFIFO_CFG + mac->offset, v);
+
+ /* Adjust the TX FIFO threshold based on the MTU */
+ thres = (adap->params.vpd.cclk * 1000) / 15625;
+ thres = (thres * mtu) / 1000;
+ if (is_10G(adap))
+ thres /= 10;
+ thres = mtu > thres ? (mtu - thres + 7) / 8 : 0;
+ thres = max(thres, 8U); /* need at least 8 */
+ ipg = (adap->params.rev == T3_REV_C) ? 0 : 1;
+ t3_set_reg_field(adap, A_XGM_TXFIFO_CFG + mac->offset,
+ V_TXFIFOTHRESH(M_TXFIFOTHRESH) | V_TXIPG(M_TXIPG),
+ V_TXFIFOTHRESH(thres) | V_TXIPG(ipg));
+
+ if (adap->params.rev > 0) {
+ divisor = (adap->params.rev == T3_REV_C) ? 64 : 8;
+ t3_write_reg(adap, A_XGM_PAUSE_TIMER + mac->offset,
+ (hwm - lwm) * 4 / divisor);
+ }
+ t3_write_reg(adap, A_XGM_TX_PAUSE_QUANTA + mac->offset,
+ MAC_RXFIFO_SIZE * 4 * 8 / 512);
+ return 0;
+}
+
+int t3_mac_set_speed_duplex_fc(struct cmac *mac, int speed, int duplex, int fc)
+{
+ u32 val;
+ struct adapter *adap = mac->adapter;
+ unsigned int oft = mac->offset;
+
+ if (duplex >= 0 && duplex != DUPLEX_FULL)
+ return -EINVAL;
+ if (speed >= 0) {
+ if (speed == SPEED_10)
+ val = V_PORTSPEED(0);
+ else if (speed == SPEED_100)
+ val = V_PORTSPEED(1);
+ else if (speed == SPEED_1000)
+ val = V_PORTSPEED(2);
+ else if (speed == SPEED_10000)
+ val = V_PORTSPEED(3);
+ else
+ return -EINVAL;
+
+ t3_set_reg_field(adap, A_XGM_PORT_CFG + oft,
+ V_PORTSPEED(M_PORTSPEED), val);
+ }
+
+ val = t3_read_reg(adap, A_XGM_RXFIFO_CFG + oft);
+ val &= ~V_RXFIFOPAUSEHWM(M_RXFIFOPAUSEHWM);
+ if (fc & PAUSE_TX) {
+ u32 rx_max_pkt_size =
+ G_RXMAXPKTSIZE(t3_read_reg(adap,
+ A_XGM_RX_MAX_PKT_SIZE + oft));
+ val |= V_RXFIFOPAUSEHWM(rx_fifo_hwm(rx_max_pkt_size) / 8);
+ }
+ t3_write_reg(adap, A_XGM_RXFIFO_CFG + oft, val);
+
+ t3_set_reg_field(adap, A_XGM_TX_CFG + oft, F_TXPAUSEEN,
+ (fc & PAUSE_RX) ? F_TXPAUSEEN : 0);
+ return 0;
+}
+
+int t3_mac_enable(struct cmac *mac, int which)
+{
+ int idx = macidx(mac);
+ struct adapter *adap = mac->adapter;
+ unsigned int oft = mac->offset;
+ struct mac_stats *s = &mac->stats;
+
+ if (which & MAC_DIRECTION_TX) {
+ t3_write_reg(adap, A_TP_PIO_ADDR, A_TP_TX_DROP_CFG_CH0 + idx);
+ t3_write_reg(adap, A_TP_PIO_DATA,
+ adap->params.rev == T3_REV_C ?
+ 0xc4ffff01 : 0xc0ede401);
+ t3_write_reg(adap, A_TP_PIO_ADDR, A_TP_TX_DROP_MODE);
+ t3_set_reg_field(adap, A_TP_PIO_DATA, 1 << idx,
+ adap->params.rev == T3_REV_C ? 0 : 1 << idx);
+
+ t3_write_reg(adap, A_XGM_TX_CTRL + oft, F_TXEN);
+
+ t3_write_reg(adap, A_TP_PIO_ADDR, A_TP_TX_DROP_CNT_CH0 + idx);
+ mac->tx_mcnt = s->tx_frames;
+ mac->tx_tcnt = (G_TXDROPCNTCH0RCVD(t3_read_reg(adap,
+ A_TP_PIO_DATA)));
+ mac->tx_xcnt = (G_TXSPI4SOPCNT(t3_read_reg(adap,
+ A_XGM_TX_SPI4_SOP_EOP_CNT +
+ oft)));
+ mac->rx_mcnt = s->rx_frames;
+ mac->rx_pause = s->rx_pause;
+ mac->rx_xcnt = (G_TXSPI4SOPCNT(t3_read_reg(adap,
+ A_XGM_RX_SPI4_SOP_EOP_CNT +
+ oft)));
+ mac->rx_ocnt = s->rx_fifo_ovfl;
+ mac->txen = F_TXEN;
+ mac->toggle_cnt = 0;
+ }
+ if (which & MAC_DIRECTION_RX)
+ t3_write_reg(adap, A_XGM_RX_CTRL + oft, F_RXEN);
+ return 0;
+}
+
+int t3_mac_disable(struct cmac *mac, int which)
+{
+ struct adapter *adap = mac->adapter;
+
+ if (which & MAC_DIRECTION_TX) {
+ t3_write_reg(adap, A_XGM_TX_CTRL + mac->offset, 0);
+ mac->txen = 0;
+ }
+ if (which & MAC_DIRECTION_RX) {
+ int val = F_MAC_RESET_;
+
+ t3_set_reg_field(mac->adapter, A_XGM_RESET_CTRL + mac->offset,
+ F_PCS_RESET_, 0);
+ msleep(100);
+ t3_write_reg(adap, A_XGM_RX_CTRL + mac->offset, 0);
+ if (is_10G(adap))
+ val |= F_PCS_RESET_;
+ else if (uses_xaui(adap))
+ val |= F_PCS_RESET_ | F_XG2G_RESET_;
+ else
+ val |= F_RGMII_RESET_ | F_XG2G_RESET_;
+ t3_write_reg(mac->adapter, A_XGM_RESET_CTRL + mac->offset, val);
+ }
+ return 0;
+}
+
+int t3b2_mac_watchdog_task(struct cmac *mac)
+{
+ struct adapter *adap = mac->adapter;
+ struct mac_stats *s = &mac->stats;
+ unsigned int tx_tcnt, tx_xcnt;
+ u64 tx_mcnt = s->tx_frames;
+ int status;
+
+ status = 0;
+ tx_xcnt = 1; /* By default tx_xcnt is making progress */
+ tx_tcnt = mac->tx_tcnt; /* If tx_mcnt is progressing ignore tx_tcnt */
+ if (tx_mcnt == mac->tx_mcnt && mac->rx_pause == s->rx_pause) {
+ tx_xcnt = (G_TXSPI4SOPCNT(t3_read_reg(adap,
+ A_XGM_TX_SPI4_SOP_EOP_CNT +
+ mac->offset)));
+ if (tx_xcnt == 0) {
+ t3_write_reg(adap, A_TP_PIO_ADDR,
+ A_TP_TX_DROP_CNT_CH0 + macidx(mac));
+ tx_tcnt = (G_TXDROPCNTCH0RCVD(t3_read_reg(adap,
+ A_TP_PIO_DATA)));
+ } else {
+ goto out;
+ }
+ } else {
+ mac->toggle_cnt = 0;
+ goto out;
+ }
+
+ if ((tx_tcnt != mac->tx_tcnt) && (mac->tx_xcnt == 0)) {
+ if (mac->toggle_cnt > 4) {
+ status = 2;
+ goto out;
+ } else {
+ status = 1;
+ goto out;
+ }
+ } else {
+ mac->toggle_cnt = 0;
+ goto out;
+ }
+
+out:
+ mac->tx_tcnt = tx_tcnt;
+ mac->tx_xcnt = tx_xcnt;
+ mac->tx_mcnt = s->tx_frames;
+ mac->rx_pause = s->rx_pause;
+ if (status == 1) {
+ t3_write_reg(adap, A_XGM_TX_CTRL + mac->offset, 0);
+ t3_read_reg(adap, A_XGM_TX_CTRL + mac->offset); /* flush */
+ t3_write_reg(adap, A_XGM_TX_CTRL + mac->offset, mac->txen);
+ t3_read_reg(adap, A_XGM_TX_CTRL + mac->offset); /* flush */
+ mac->toggle_cnt++;
+ } else if (status == 2) {
+ t3b2_mac_reset(mac);
+ mac->toggle_cnt = 0;
+ }
+ return status;
+}
+
+/*
+ * This function is called periodically to accumulate the current values of the
+ * RMON counters into the port statistics. Since the packet counters are only
+ * 32 bits they can overflow in ~286 secs at 10G, so the function should be
+ * called more frequently than that. The byte counters are 45-bit wide, they
+ * would overflow in ~7.8 hours.
+ */
+const struct mac_stats *t3_mac_update_stats(struct cmac *mac)
+{
+#define RMON_READ(mac, addr) t3_read_reg(mac->adapter, addr + mac->offset)
+#define RMON_UPDATE(mac, name, reg) \
+ (mac)->stats.name += (u64)RMON_READ(mac, A_XGM_STAT_##reg)
+#define RMON_UPDATE64(mac, name, reg_lo, reg_hi) \
+ (mac)->stats.name += RMON_READ(mac, A_XGM_STAT_##reg_lo) + \
+ ((u64)RMON_READ(mac, A_XGM_STAT_##reg_hi) << 32)
+
+ u32 v, lo;
+
+ RMON_UPDATE64(mac, rx_octets, RX_BYTES_LOW, RX_BYTES_HIGH);
+ RMON_UPDATE64(mac, rx_frames, RX_FRAMES_LOW, RX_FRAMES_HIGH);
+ RMON_UPDATE(mac, rx_mcast_frames, RX_MCAST_FRAMES);
+ RMON_UPDATE(mac, rx_bcast_frames, RX_BCAST_FRAMES);
+ RMON_UPDATE(mac, rx_fcs_errs, RX_CRC_ERR_FRAMES);
+ RMON_UPDATE(mac, rx_pause, RX_PAUSE_FRAMES);
+ RMON_UPDATE(mac, rx_jabber, RX_JABBER_FRAMES);
+ RMON_UPDATE(mac, rx_short, RX_SHORT_FRAMES);
+ RMON_UPDATE(mac, rx_symbol_errs, RX_SYM_CODE_ERR_FRAMES);
+
+ RMON_UPDATE(mac, rx_too_long, RX_OVERSIZE_FRAMES);
+
+ v = RMON_READ(mac, A_XGM_RX_MAX_PKT_SIZE_ERR_CNT);
+ if (mac->adapter->params.rev == T3_REV_B2)
+ v &= 0x7fffffff;
+ mac->stats.rx_too_long += v;
+
+ RMON_UPDATE(mac, rx_frames_64, RX_64B_FRAMES);
+ RMON_UPDATE(mac, rx_frames_65_127, RX_65_127B_FRAMES);
+ RMON_UPDATE(mac, rx_frames_128_255, RX_128_255B_FRAMES);
+ RMON_UPDATE(mac, rx_frames_256_511, RX_256_511B_FRAMES);
+ RMON_UPDATE(mac, rx_frames_512_1023, RX_512_1023B_FRAMES);
+ RMON_UPDATE(mac, rx_frames_1024_1518, RX_1024_1518B_FRAMES);
+ RMON_UPDATE(mac, rx_frames_1519_max, RX_1519_MAXB_FRAMES);
+
+ RMON_UPDATE64(mac, tx_octets, TX_BYTE_LOW, TX_BYTE_HIGH);
+ RMON_UPDATE64(mac, tx_frames, TX_FRAME_LOW, TX_FRAME_HIGH);
+ RMON_UPDATE(mac, tx_mcast_frames, TX_MCAST);
+ RMON_UPDATE(mac, tx_bcast_frames, TX_BCAST);
+ RMON_UPDATE(mac, tx_pause, TX_PAUSE);
+ /* This counts error frames in general (bad FCS, underrun, etc). */
+ RMON_UPDATE(mac, tx_underrun, TX_ERR_FRAMES);
+
+ RMON_UPDATE(mac, tx_frames_64, TX_64B_FRAMES);
+ RMON_UPDATE(mac, tx_frames_65_127, TX_65_127B_FRAMES);
+ RMON_UPDATE(mac, tx_frames_128_255, TX_128_255B_FRAMES);
+ RMON_UPDATE(mac, tx_frames_256_511, TX_256_511B_FRAMES);
+ RMON_UPDATE(mac, tx_frames_512_1023, TX_512_1023B_FRAMES);
+ RMON_UPDATE(mac, tx_frames_1024_1518, TX_1024_1518B_FRAMES);
+ RMON_UPDATE(mac, tx_frames_1519_max, TX_1519_MAXB_FRAMES);
+
+ /* The next stat isn't clear-on-read. */
+ t3_write_reg(mac->adapter, A_TP_MIB_INDEX, mac->offset ? 51 : 50);
+ v = t3_read_reg(mac->adapter, A_TP_MIB_RDATA);
+ lo = (u32) mac->stats.rx_cong_drops;
+ mac->stats.rx_cong_drops += (u64) (v - lo);
+
+ return &mac->stats;
+}