summaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/emulex/benet
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/emulex/benet')
-rw-r--r--drivers/net/ethernet/emulex/benet/Kconfig52
-rw-r--r--drivers/net/ethernet/emulex/benet/Makefile8
-rw-r--r--drivers/net/ethernet/emulex/benet/be.h987
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c5082
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.h2511
-rw-r--r--drivers/net/ethernet/emulex/benet/be_ethtool.c1462
-rw-r--r--drivers/net/ethernet/emulex/benet/be_hw.h371
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c6146
-rw-r--r--drivers/net/ethernet/emulex/benet/be_roce.c157
-rw-r--r--drivers/net/ethernet/emulex/benet/be_roce.h73
10 files changed, 16849 insertions, 0 deletions
diff --git a/drivers/net/ethernet/emulex/benet/Kconfig b/drivers/net/ethernet/emulex/benet/Kconfig
new file mode 100644
index 0000000000..f51dca1526
--- /dev/null
+++ b/drivers/net/ethernet/emulex/benet/Kconfig
@@ -0,0 +1,52 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config BE2NET
+ tristate "ServerEngines' 10Gbps NIC - BladeEngine"
+ depends on PCI
+ help
+ This driver implements the NIC functionality for ServerEngines'
+ 10Gbps network adapter - BladeEngine.
+
+config BE2NET_HWMON
+ bool "HWMON support for be2net driver"
+ depends on BE2NET && HWMON
+ depends on !(BE2NET=y && HWMON=m)
+ default y
+ help
+ Say Y here if you want to expose thermal sensor data on
+ be2net network adapter.
+
+config BE2NET_BE2
+ bool "Support for BE2 chipsets"
+ depends on BE2NET
+ default y
+ help
+ Say Y here if you want to use devices based on BE2
+ chipsets. (e.g. OneConnect OCe10xxx)
+
+config BE2NET_BE3
+ bool "Support for BE3 chipsets"
+ depends on BE2NET
+ default y
+ help
+ Say Y here if you want to use devices based on BE3
+ chipsets. (e.g. OneConnect OCe11xxx)
+
+config BE2NET_LANCER
+ bool "Support for Lancer chipsets"
+ depends on BE2NET
+ default y
+ help
+ Say Y here if you want to use devices based on Lancer
+ chipsets. (e.g LightPulse LPe12xxx)
+
+config BE2NET_SKYHAWK
+ bool "Support for Skyhawk chipsets"
+ depends on BE2NET
+ default y
+ help
+ Say Y here if you want to use devices based on Skyhawk
+ chipsets. (e.g. OneConnect OCe14xxx)
+
+comment "WARNING: be2net is useless without any enabled chip"
+ depends on BE2NET_BE2=n && BE2NET_BE3=n && BE2NET_LANCER=n && \
+ BE2NET_SKYHAWK=n && BE2NET
diff --git a/drivers/net/ethernet/emulex/benet/Makefile b/drivers/net/ethernet/emulex/benet/Makefile
new file mode 100644
index 0000000000..1a238ec7fe
--- /dev/null
+++ b/drivers/net/ethernet/emulex/benet/Makefile
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Makefile to build the network driver for ServerEngine's BladeEngine.
+#
+
+obj-$(CONFIG_BE2NET) += be2net.o
+
+be2net-y := be_main.o be_cmds.o be_ethtool.o be_roce.o
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h
new file mode 100644
index 0000000000..61fe9625be
--- /dev/null
+++ b/drivers/net/ethernet/emulex/benet/be.h
@@ -0,0 +1,987 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2005 - 2016 Broadcom
+ * All rights reserved.
+ *
+ * Contact Information:
+ * linux-drivers@emulex.com
+ *
+ * Emulex
+ * 3333 Susan Street
+ * Costa Mesa, CA 92626
+ */
+
+#ifndef BE_H
+#define BE_H
+
+#include <linux/pci.h>
+#include <linux/etherdevice.h>
+#include <linux/delay.h>
+#include <net/tcp.h>
+#include <net/ip.h>
+#include <net/ipv6.h>
+#include <linux/if_vlan.h>
+#include <linux/workqueue.h>
+#include <linux/interrupt.h>
+#include <linux/firmware.h>
+#include <linux/slab.h>
+#include <linux/u64_stats_sync.h>
+#include <linux/cpumask.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+
+#include "be_hw.h"
+#include "be_roce.h"
+
+#define DRV_NAME "be2net"
+#define BE_NAME "Emulex BladeEngine2"
+#define BE3_NAME "Emulex BladeEngine3"
+#define OC_NAME "Emulex OneConnect"
+#define OC_NAME_BE OC_NAME "(be3)"
+#define OC_NAME_LANCER OC_NAME "(Lancer)"
+#define OC_NAME_SH OC_NAME "(Skyhawk)"
+#define DRV_DESC "Emulex OneConnect NIC Driver"
+
+#define BE_VENDOR_ID 0x19a2
+#define EMULEX_VENDOR_ID 0x10df
+#define BE_DEVICE_ID1 0x211
+#define BE_DEVICE_ID2 0x221
+#define OC_DEVICE_ID1 0x700 /* Device Id for BE2 cards */
+#define OC_DEVICE_ID2 0x710 /* Device Id for BE3 cards */
+#define OC_DEVICE_ID3 0xe220 /* Device id for Lancer cards */
+#define OC_DEVICE_ID4 0xe228 /* Device id for VF in Lancer */
+#define OC_DEVICE_ID5 0x720 /* Device Id for Skyhawk cards */
+#define OC_DEVICE_ID6 0x728 /* Device id for VF in SkyHawk */
+#define OC_SUBSYS_DEVICE_ID1 0xE602
+#define OC_SUBSYS_DEVICE_ID2 0xE642
+#define OC_SUBSYS_DEVICE_ID3 0xE612
+#define OC_SUBSYS_DEVICE_ID4 0xE652
+
+/* Number of bytes of an RX frame that are copied to skb->data */
+#define BE_HDR_LEN ((u16) 64)
+/* allocate extra space to allow tunneling decapsulation without head reallocation */
+#define BE_RX_SKB_ALLOC_SIZE 256
+
+#define BE_MAX_JUMBO_FRAME_SIZE 9018
+#define BE_MIN_MTU 256
+#define BE_MAX_MTU (BE_MAX_JUMBO_FRAME_SIZE - \
+ (ETH_HLEN + ETH_FCS_LEN))
+
+/* Accommodate for QnQ configurations where VLAN insertion is enabled in HW */
+#define BE_MAX_GSO_SIZE (65535 - 2 * VLAN_HLEN)
+
+#define BE_NUM_VLANS_SUPPORTED 64
+#define BE_MAX_EQD 128u
+#define BE_MAX_TX_FRAG_COUNT 30
+
+#define EVNT_Q_LEN 1024
+#define TX_Q_LEN 2048
+#define TX_CQ_LEN 1024
+#define RX_Q_LEN 1024 /* Does not support any other value */
+#define RX_CQ_LEN 1024
+#define MCC_Q_LEN 128 /* total size not to exceed 8 pages */
+#define MCC_CQ_LEN 256
+
+#define BE2_MAX_RSS_QS 4
+#define BE3_MAX_RSS_QS 16
+#define BE3_MAX_TX_QS 16
+#define BE3_MAX_EVT_QS 16
+#define BE3_SRIOV_MAX_EVT_QS 8
+#define SH_VF_MAX_NIC_EQS 3 /* Skyhawk VFs can have a max of 4 EQs
+ * and at least 1 is granted to either
+ * SURF/DPDK
+ */
+
+#define MAX_PORT_RSS_TABLES 15
+#define MAX_NIC_FUNCS 16
+#define MAX_RX_QS 32
+#define MAX_EVT_QS 32
+#define MAX_TX_QS 32
+
+#define MAX_ROCE_EQS 5
+#define MAX_MSIX_VECTORS 32
+#define MIN_MSIX_VECTORS 1
+#define MAX_RX_POST NAPI_POLL_WEIGHT /* Frags posted at a time */
+#define RX_FRAGS_REFILL_WM (RX_Q_LEN - MAX_RX_POST)
+#define MAX_NUM_POST_ERX_DB 255u
+
+#define MAX_VFS 30 /* Max VFs supported by BE3 FW */
+#define FW_VER_LEN 32
+#define CNTL_SERIAL_NUM_WORDS 8 /* Controller serial number words */
+#define CNTL_SERIAL_NUM_WORD_SZ (sizeof(u16)) /* Byte-sz of serial num word */
+
+#define RSS_INDIR_TABLE_LEN 128
+#define RSS_HASH_KEY_LEN 40
+
+#define BE_UNKNOWN_PHY_STATE 0xFF
+
+struct be_dma_mem {
+ void *va;
+ dma_addr_t dma;
+ u32 size;
+};
+
+struct be_queue_info {
+ u32 len;
+ u32 entry_size; /* Size of an element in the queue */
+ u32 tail, head;
+ atomic_t used; /* Number of valid elements in the queue */
+ u32 id;
+ struct be_dma_mem dma_mem;
+ bool created;
+};
+
+static inline u32 MODULO(u32 val, u32 limit)
+{
+ BUG_ON(limit & (limit - 1));
+ return val & (limit - 1);
+}
+
+static inline void index_adv(u32 *index, u32 val, u32 limit)
+{
+ *index = MODULO((*index + val), limit);
+}
+
+static inline void index_inc(u32 *index, u32 limit)
+{
+ *index = MODULO((*index + 1), limit);
+}
+
+static inline void *queue_head_node(struct be_queue_info *q)
+{
+ return q->dma_mem.va + q->head * q->entry_size;
+}
+
+static inline void *queue_tail_node(struct be_queue_info *q)
+{
+ return q->dma_mem.va + q->tail * q->entry_size;
+}
+
+static inline void *queue_index_node(struct be_queue_info *q, u16 index)
+{
+ return q->dma_mem.va + index * q->entry_size;
+}
+
+static inline void queue_head_inc(struct be_queue_info *q)
+{
+ index_inc(&q->head, q->len);
+}
+
+static inline void index_dec(u32 *index, u32 limit)
+{
+ *index = MODULO((*index - 1), limit);
+}
+
+static inline void queue_tail_inc(struct be_queue_info *q)
+{
+ index_inc(&q->tail, q->len);
+}
+
+struct be_eq_obj {
+ struct be_queue_info q;
+ char desc[32];
+
+ struct be_adapter *adapter;
+ struct napi_struct napi;
+ u8 idx; /* array index */
+ u8 msix_idx;
+ u16 spurious_intr;
+ cpumask_var_t affinity_mask;
+} ____cacheline_aligned_in_smp;
+
+struct be_aic_obj { /* Adaptive interrupt coalescing (AIC) info */
+ u32 min_eqd; /* in usecs */
+ u32 max_eqd; /* in usecs */
+ u32 prev_eqd; /* in usecs */
+ u32 et_eqd; /* configured val when aic is off */
+ ulong jiffies;
+ u64 rx_pkts_prev; /* Used to calculate RX pps */
+ u64 tx_reqs_prev; /* Used to calculate TX pps */
+};
+
+struct be_mcc_obj {
+ struct be_queue_info q;
+ struct be_queue_info cq;
+ bool rearm_cq;
+};
+
+struct be_tx_stats {
+ u64 tx_bytes;
+ u64 tx_pkts;
+ u64 tx_vxlan_offload_pkts;
+ u64 tx_reqs;
+ u64 tx_compl;
+ u32 tx_stops;
+ u32 tx_drv_drops; /* pkts dropped by driver */
+ /* the error counters are described in be_ethtool.c */
+ u32 tx_hdr_parse_err;
+ u32 tx_dma_err;
+ u32 tx_tso_err;
+ u32 tx_spoof_check_err;
+ u32 tx_qinq_err;
+ u32 tx_internal_parity_err;
+ u32 tx_sge_err;
+ struct u64_stats_sync sync;
+ struct u64_stats_sync sync_compl;
+};
+
+/* Structure to hold some data of interest obtained from a TX CQE */
+struct be_tx_compl_info {
+ u8 status; /* Completion status */
+ u16 end_index; /* Completed TXQ Index */
+};
+
+struct be_tx_obj {
+ u32 db_offset;
+ struct be_tx_compl_info txcp;
+ struct be_queue_info q;
+ struct be_queue_info cq;
+ /* Remember the skbs that were transmitted */
+ struct sk_buff *sent_skb_list[TX_Q_LEN];
+ struct be_tx_stats stats;
+ u16 pend_wrb_cnt; /* Number of WRBs yet to be given to HW */
+ u16 last_req_wrb_cnt; /* wrb cnt of the last req in the Q */
+ u16 last_req_hdr; /* index of the last req's hdr-wrb */
+} ____cacheline_aligned_in_smp;
+
+/* Struct to remember the pages posted for rx frags */
+struct be_rx_page_info {
+ struct page *page;
+ /* set to page-addr for last frag of the page & frag-addr otherwise */
+ DEFINE_DMA_UNMAP_ADDR(bus);
+ u16 page_offset;
+ bool last_frag; /* last frag of the page */
+};
+
+struct be_rx_stats {
+ u64 rx_bytes;
+ u64 rx_pkts;
+ u64 rx_vxlan_offload_pkts;
+ u32 rx_drops_no_skbs; /* skb allocation errors */
+ u32 rx_drops_no_frags; /* HW has no fetched frags */
+ u32 rx_post_fail; /* page post alloc failures */
+ u32 rx_compl;
+ u32 rx_mcast_pkts;
+ u32 rx_compl_err; /* completions with err set */
+ struct u64_stats_sync sync;
+};
+
+struct be_rx_compl_info {
+ u32 rss_hash;
+ u16 vlan_tag;
+ u16 pkt_size;
+ u16 port;
+ u8 vlanf;
+ u8 num_rcvd;
+ u8 err;
+ u8 ipf;
+ u8 tcpf;
+ u8 udpf;
+ u8 ip_csum;
+ u8 l4_csum;
+ u8 ipv6;
+ u8 qnq;
+ u8 pkt_type;
+ u8 ip_frag;
+ u8 tunneled;
+};
+
+struct be_rx_obj {
+ struct be_adapter *adapter;
+ struct be_queue_info q;
+ struct be_queue_info cq;
+ struct be_rx_compl_info rxcp;
+ struct be_rx_page_info page_info_tbl[RX_Q_LEN];
+ struct be_rx_stats stats;
+ u8 rss_id;
+ bool rx_post_starved; /* Zero rx frags have been posted to BE */
+} ____cacheline_aligned_in_smp;
+
+struct be_drv_stats {
+ u32 eth_red_drops;
+ u32 dma_map_errors;
+ u32 rx_drops_no_pbuf;
+ u32 rx_drops_no_txpb;
+ u32 rx_drops_no_erx_descr;
+ u32 rx_drops_no_tpre_descr;
+ u32 rx_drops_too_many_frags;
+ u32 forwarded_packets;
+ u32 rx_drops_mtu;
+ u32 rx_crc_errors;
+ u32 rx_alignment_symbol_errors;
+ u32 rx_pause_frames;
+ u32 rx_priority_pause_frames;
+ u32 rx_control_frames;
+ u32 rx_in_range_errors;
+ u32 rx_out_range_errors;
+ u32 rx_frame_too_long;
+ u32 rx_address_filtered;
+ u32 rx_dropped_too_small;
+ u32 rx_dropped_too_short;
+ u32 rx_dropped_header_too_small;
+ u32 rx_dropped_tcp_length;
+ u32 rx_dropped_runt;
+ u32 rx_ip_checksum_errs;
+ u32 rx_tcp_checksum_errs;
+ u32 rx_udp_checksum_errs;
+ u32 tx_pauseframes;
+ u32 tx_priority_pauseframes;
+ u32 tx_controlframes;
+ u32 rxpp_fifo_overflow_drop;
+ u32 rx_input_fifo_overflow_drop;
+ u32 pmem_fifo_overflow_drop;
+ u32 jabber_events;
+ u32 rx_roce_bytes_lsd;
+ u32 rx_roce_bytes_msd;
+ u32 rx_roce_frames;
+ u32 roce_drops_payload_len;
+ u32 roce_drops_crc;
+};
+
+/* A vlan-id of 0xFFFF must be used to clear transparent vlan-tagging */
+#define BE_RESET_VLAN_TAG_ID 0xFFFF
+
+struct be_vf_cfg {
+ unsigned char mac_addr[ETH_ALEN];
+ int if_handle;
+ int pmac_id;
+ u16 vlan_tag;
+ u32 tx_rate;
+ u32 plink_tracking;
+ u32 privileges;
+ bool spoofchk;
+};
+
+enum vf_state {
+ ENABLED = 0,
+ ASSIGNED = 1
+};
+
+#define BE_FLAGS_LINK_STATUS_INIT BIT(1)
+#define BE_FLAGS_SRIOV_ENABLED BIT(2)
+#define BE_FLAGS_WORKER_SCHEDULED BIT(3)
+#define BE_FLAGS_NAPI_ENABLED BIT(6)
+#define BE_FLAGS_QNQ_ASYNC_EVT_RCVD BIT(7)
+#define BE_FLAGS_VXLAN_OFFLOADS BIT(8)
+#define BE_FLAGS_SETUP_DONE BIT(9)
+#define BE_FLAGS_PHY_MISCONFIGURED BIT(10)
+#define BE_FLAGS_ERR_DETECTION_SCHEDULED BIT(11)
+#define BE_FLAGS_OS2BMC BIT(12)
+#define BE_FLAGS_TRY_RECOVERY BIT(13)
+
+#define BE_UC_PMAC_COUNT 30
+#define BE_VF_UC_PMAC_COUNT 2
+
+#define MAX_ERR_RECOVERY_RETRY_COUNT 3
+#define ERR_DETECTION_DELAY 1000
+
+/* Ethtool set_dump flags */
+#define LANCER_INITIATE_FW_DUMP 0x1
+#define LANCER_DELETE_FW_DUMP 0x2
+
+struct phy_info {
+/* From SFF-8472 spec */
+#define SFP_VENDOR_NAME_LEN 17
+ u8 transceiver;
+ u8 autoneg;
+ u8 fc_autoneg;
+ u8 port_type;
+ u16 phy_type;
+ u16 interface_type;
+ u32 misc_params;
+ u16 auto_speeds_supported;
+ u16 fixed_speeds_supported;
+ int link_speed;
+ u32 advertising;
+ u32 supported;
+ u8 cable_type;
+ u8 vendor_name[SFP_VENDOR_NAME_LEN];
+ u8 vendor_pn[SFP_VENDOR_NAME_LEN];
+};
+
+struct be_resources {
+ u16 max_vfs; /* Total VFs "really" supported by FW/HW */
+ u16 max_mcast_mac;
+ u16 max_tx_qs;
+ u16 max_rss_qs;
+ u16 max_rx_qs;
+ u16 max_cq_count;
+ u16 max_uc_mac; /* Max UC MACs programmable */
+ u16 max_vlans; /* Number of vlans supported */
+ u16 max_iface_count;
+ u16 max_mcc_count;
+ u16 max_evt_qs;
+ u16 max_nic_evt_qs; /* NIC's share of evt qs */
+ u32 if_cap_flags;
+ u32 vf_if_cap_flags; /* VF if capability flags */
+ u32 flags;
+ /* Calculated PF Pool's share of RSS Tables. This is not enforced by
+ * the FW, but is a self-imposed driver limitation.
+ */
+ u16 max_rss_tables;
+};
+
+/* These are port-wide values */
+struct be_port_resources {
+ u16 max_vfs;
+ u16 nic_pfs;
+};
+
+#define be_is_os2bmc_enabled(adapter) (adapter->flags & BE_FLAGS_OS2BMC)
+
+struct rss_info {
+ u8 rsstable[RSS_INDIR_TABLE_LEN];
+ u8 rss_queue[RSS_INDIR_TABLE_LEN];
+ u8 rss_hkey[RSS_HASH_KEY_LEN];
+ u64 rss_flags;
+};
+
+#define BE_INVALID_DIE_TEMP 0xFF
+struct be_hwmon {
+ struct device *hwmon_dev;
+ u8 be_on_die_temp; /* Unit: millidegree Celsius */
+};
+
+/* Macros to read/write the 'features' word of be_wrb_params structure.
+ */
+#define BE_WRB_F_BIT(name) BE_WRB_F_##name##_BIT
+#define BE_WRB_F_MASK(name) BIT_MASK(BE_WRB_F_##name##_BIT)
+
+#define BE_WRB_F_GET(word, name) \
+ (((word) & (BE_WRB_F_MASK(name))) >> BE_WRB_F_BIT(name))
+
+#define BE_WRB_F_SET(word, name, val) \
+ ((word) |= (((val) << BE_WRB_F_BIT(name)) & BE_WRB_F_MASK(name)))
+
+/* Feature/offload bits */
+enum {
+ BE_WRB_F_CRC_BIT, /* Ethernet CRC */
+ BE_WRB_F_IPCS_BIT, /* IP csum */
+ BE_WRB_F_TCPCS_BIT, /* TCP csum */
+ BE_WRB_F_UDPCS_BIT, /* UDP csum */
+ BE_WRB_F_LSO_BIT, /* LSO */
+ BE_WRB_F_LSO6_BIT, /* LSO6 */
+ BE_WRB_F_VLAN_BIT, /* VLAN */
+ BE_WRB_F_VLAN_SKIP_HW_BIT, /* Skip VLAN tag (workaround) */
+ BE_WRB_F_OS2BMC_BIT /* Send packet to the management ring */
+};
+
+/* The structure below provides a HW-agnostic abstraction of WRB params
+ * retrieved from a TX skb. This is in turn passed to chip specific routines
+ * during transmit, to set the corresponding params in the WRB.
+ */
+struct be_wrb_params {
+ u32 features; /* Feature bits */
+ u16 vlan_tag; /* VLAN tag */
+ u16 lso_mss; /* MSS for LSO */
+};
+
+struct be_eth_addr {
+ unsigned char mac[ETH_ALEN];
+};
+
+#define BE_SEC 1000 /* in msec */
+#define BE_MIN (60 * BE_SEC) /* in msec */
+#define BE_HOUR (60 * BE_MIN) /* in msec */
+
+#define ERR_RECOVERY_MAX_RETRY_COUNT 3
+#define ERR_RECOVERY_DETECTION_DELAY BE_SEC
+#define ERR_RECOVERY_RETRY_DELAY (30 * BE_SEC)
+
+/* UE-detection-duration in BEx/Skyhawk:
+ * All PFs must wait for this duration after they detect UE before reading
+ * SLIPORT_SEMAPHORE register. At the end of this duration, the Firmware
+ * guarantees that the SLIPORT_SEMAPHORE register is updated to indicate
+ * if the UE is recoverable.
+ */
+#define ERR_RECOVERY_UE_DETECT_DURATION BE_SEC
+
+/* Initial idle time (in msec) to elapse after driver load,
+ * before UE recovery is allowed.
+ */
+#define ERR_IDLE_HR 24
+#define ERR_RECOVERY_IDLE_TIME (ERR_IDLE_HR * BE_HOUR)
+
+/* Time interval (in msec) after which UE recovery can be repeated */
+#define ERR_INTERVAL_HR 72
+#define ERR_RECOVERY_INTERVAL (ERR_INTERVAL_HR * BE_HOUR)
+
+/* BEx/SH UE recovery state machine */
+enum {
+ ERR_RECOVERY_ST_NONE = 0, /* No Recovery */
+ ERR_RECOVERY_ST_DETECT = 1, /* UE detection duration */
+ ERR_RECOVERY_ST_RESET = 2, /* Reset Phase (PF0 only) */
+ ERR_RECOVERY_ST_PRE_POLL = 3, /* Pre-Poll Phase (all PFs) */
+ ERR_RECOVERY_ST_REINIT = 4 /* Re-initialize Phase */
+};
+
+struct be_error_recovery {
+ union {
+ u8 recovery_retries; /* used for Lancer */
+ u8 recovery_state; /* used for BEx and Skyhawk */
+ };
+
+ /* BEx/Skyhawk error recovery variables */
+ bool recovery_supported;
+ u16 ue_to_reset_time; /* Time after UE, to soft reset
+ * the chip - PF0 only
+ */
+ u16 ue_to_poll_time; /* Time after UE, to Restart Polling
+ * of SLIPORT_SEMAPHORE reg
+ */
+ u16 last_err_code;
+ unsigned long probe_time;
+ unsigned long last_recovery_time;
+
+ /* Common to both Lancer & BEx/SH error recovery */
+ u32 resched_delay;
+ struct delayed_work err_detection_work;
+};
+
+/* Ethtool priv_flags */
+#define BE_DISABLE_TPE_RECOVERY 0x1
+
+struct be_vxlan_port {
+ struct list_head list;
+ __be16 port; /* VxLAN UDP dst port */
+ int port_aliases; /* alias count */
+};
+
+struct be_adapter {
+ struct pci_dev *pdev;
+ struct net_device *netdev;
+
+ u8 __iomem *csr; /* CSR BAR used only for BE2/3 */
+ u8 __iomem *db; /* Door Bell */
+ u8 __iomem *pcicfg; /* On SH,BEx only. Shadow of PCI config space */
+
+ struct mutex mbox_lock; /* For serializing mbox cmds to BE card */
+ struct be_dma_mem mbox_mem;
+ /* Mbox mem is adjusted to align to 16 bytes. The allocated addr
+ * is stored for freeing purpose */
+ struct be_dma_mem mbox_mem_alloced;
+
+ struct be_mcc_obj mcc_obj;
+ struct mutex mcc_lock; /* For serializing mcc cmds to BE card */
+ spinlock_t mcc_cq_lock;
+
+ u16 cfg_num_rx_irqs; /* configured via set-channels */
+ u16 cfg_num_tx_irqs; /* configured via set-channels */
+ u16 num_evt_qs;
+ u16 num_msix_vec;
+ struct be_eq_obj eq_obj[MAX_EVT_QS];
+ struct msix_entry msix_entries[MAX_MSIX_VECTORS];
+ bool isr_registered;
+
+ /* TX Rings */
+ u16 num_tx_qs;
+ struct be_tx_obj tx_obj[MAX_TX_QS];
+
+ /* Rx rings */
+ u16 num_rx_qs;
+ u16 num_rss_qs;
+ u16 need_def_rxq;
+ struct be_rx_obj rx_obj[MAX_RX_QS];
+ u32 big_page_size; /* Compounded page size shared by rx wrbs */
+
+ struct be_drv_stats drv_stats;
+ struct be_aic_obj aic_obj[MAX_EVT_QS];
+ bool aic_enabled;
+ u8 vlan_prio_bmap; /* Available Priority BitMap */
+ u16 recommended_prio_bits;/* Recommended Priority bits in vlan tag */
+ struct be_dma_mem rx_filter; /* Cmd DMA mem for rx-filter */
+
+ struct be_dma_mem stats_cmd;
+ /* Work queue used to perform periodic tasks like getting statistics */
+ struct delayed_work work;
+ u16 work_counter;
+
+ u8 recovery_retries;
+ u8 err_flags;
+ bool pcicfg_mapped; /* pcicfg obtained via pci_iomap() */
+ u32 flags;
+ u32 cmd_privileges;
+ /* Ethtool knobs and info */
+ char fw_ver[FW_VER_LEN];
+ char fw_on_flash[FW_VER_LEN];
+
+ /* IFACE filtering fields */
+ int if_handle; /* Used to configure filtering */
+ u32 if_flags; /* Interface filtering flags */
+ u32 *pmac_id; /* MAC addr handle used by BE card */
+ struct be_eth_addr *uc_list;/* list of uc-addrs programmed (not perm) */
+ u32 uc_macs; /* Count of secondary UC MAC programmed */
+ struct be_eth_addr *mc_list;/* list of mcast addrs programmed */
+ u32 mc_count;
+ unsigned long vids[BITS_TO_LONGS(VLAN_N_VID)];
+ u16 vlans_added;
+ bool update_uc_list;
+ bool update_mc_list;
+ struct mutex rx_filter_lock;/* For protecting vids[] & mc/uc_list[] */
+
+ u32 beacon_state; /* for set_phys_id */
+
+ u32 port_num;
+ char port_name;
+ u8 mc_type;
+ u32 function_mode;
+ u32 function_caps;
+ u32 rx_fc; /* Rx flow control */
+ u32 tx_fc; /* Tx flow control */
+ bool stats_cmd_sent;
+ struct {
+ u32 size;
+ u32 total_size;
+ u64 io_addr;
+ } roce_db;
+ u32 num_msix_roce_vec;
+ struct ocrdma_dev *ocrdma_dev;
+ struct list_head entry;
+
+ u32 flash_status;
+ struct completion et_cmd_compl;
+
+ struct be_resources pool_res; /* resources available for the port */
+ struct be_resources res; /* resources available for the func */
+ u16 num_vfs; /* Number of VFs provisioned by PF */
+ u8 pf_num; /* Numbering used by FW, starts at 0 */
+ u8 vf_num; /* Numbering used by FW, starts at 1 */
+ u8 virtfn;
+ struct be_vf_cfg *vf_cfg;
+ bool be3_native;
+ u32 sli_family;
+ u8 hba_port_num;
+ u16 pvid;
+ __be16 vxlan_port; /* offloaded vxlan port num */
+ struct phy_info phy;
+ u8 wol_cap;
+ bool wol_en;
+ u16 asic_rev;
+ u16 qnq_vid;
+ u32 msg_enable;
+ int be_get_temp_freq;
+ struct be_hwmon hwmon_info;
+ struct rss_info rss_info;
+ /* Filters for packets that need to be sent to BMC */
+ u32 bmc_filt_mask;
+ u32 fat_dump_len;
+ u16 serial_num[CNTL_SERIAL_NUM_WORDS];
+ u8 phy_state; /* state of sfp optics (functional, faulted, etc.,) */
+ u8 dev_mac[ETH_ALEN];
+ u32 priv_flags; /* ethtool get/set_priv_flags() */
+ struct be_error_recovery error_recovery;
+};
+
+/* Used for defered FW config cmds. Add fields to this struct as reqd */
+struct be_cmd_work {
+ struct work_struct work;
+ struct be_adapter *adapter;
+};
+
+#define be_physfn(adapter) (!adapter->virtfn)
+#define be_virtfn(adapter) (adapter->virtfn)
+#define sriov_enabled(adapter) (adapter->flags & \
+ BE_FLAGS_SRIOV_ENABLED)
+
+#define for_all_vfs(adapter, vf_cfg, i) \
+ for (i = 0, vf_cfg = &adapter->vf_cfg[i]; i < adapter->num_vfs; \
+ i++, vf_cfg++)
+
+#define ON 1
+#define OFF 0
+
+#define be_max_vlans(adapter) (adapter->res.max_vlans)
+#define be_max_uc(adapter) (adapter->res.max_uc_mac)
+#define be_max_mc(adapter) (adapter->res.max_mcast_mac)
+#define be_max_vfs(adapter) (adapter->pool_res.max_vfs)
+#define be_max_rss(adapter) (adapter->res.max_rss_qs)
+#define be_max_txqs(adapter) (adapter->res.max_tx_qs)
+#define be_max_prio_txqs(adapter) (adapter->res.max_prio_tx_qs)
+#define be_max_rxqs(adapter) (adapter->res.max_rx_qs)
+/* Max number of EQs available for the function (NIC + RoCE (if enabled)) */
+#define be_max_func_eqs(adapter) (adapter->res.max_evt_qs)
+/* Max number of EQs available avaialble only for NIC */
+#define be_max_nic_eqs(adapter) (adapter->res.max_nic_evt_qs)
+#define be_if_cap_flags(adapter) (adapter->res.if_cap_flags)
+#define be_max_pf_pool_rss_tables(adapter) \
+ (adapter->pool_res.max_rss_tables)
+/* Max irqs avaialble for NIC */
+#define be_max_irqs(adapter) \
+ (min_t(u16, be_max_nic_eqs(adapter), num_online_cpus()))
+
+/* Max irqs *needed* for RX queues */
+static inline u16 be_max_rx_irqs(struct be_adapter *adapter)
+{
+ /* If no RSS, need atleast one irq for def-RXQ */
+ u16 num = max_t(u16, be_max_rss(adapter), 1);
+
+ return min_t(u16, num, be_max_irqs(adapter));
+}
+
+/* Max irqs *needed* for TX queues */
+static inline u16 be_max_tx_irqs(struct be_adapter *adapter)
+{
+ return min_t(u16, be_max_txqs(adapter), be_max_irqs(adapter));
+}
+
+/* Max irqs *needed* for combined queues */
+static inline u16 be_max_qp_irqs(struct be_adapter *adapter)
+{
+ return min(be_max_tx_irqs(adapter), be_max_rx_irqs(adapter));
+}
+
+/* Max irqs *needed* for RX and TX queues together */
+static inline u16 be_max_any_irqs(struct be_adapter *adapter)
+{
+ return max(be_max_tx_irqs(adapter), be_max_rx_irqs(adapter));
+}
+
+/* Is BE in pvid_tagging mode */
+#define be_pvid_tagging_enabled(adapter) (adapter->pvid)
+
+/* Is BE in QNQ multi-channel mode */
+#define be_is_qnq_mode(adapter) (adapter->function_mode & QNQ_MODE)
+
+#ifdef CONFIG_BE2NET_LANCER
+#define lancer_chip(adapter) (adapter->pdev->device == OC_DEVICE_ID3 || \
+ adapter->pdev->device == OC_DEVICE_ID4)
+#else
+#define lancer_chip(adapter) (0)
+#endif /* CONFIG_BE2NET_LANCER */
+
+#ifdef CONFIG_BE2NET_SKYHAWK
+#define skyhawk_chip(adapter) (adapter->pdev->device == OC_DEVICE_ID5 || \
+ adapter->pdev->device == OC_DEVICE_ID6)
+#else
+#define skyhawk_chip(adapter) (0)
+#endif /* CONFIG_BE2NET_SKYHAWK */
+
+#ifdef CONFIG_BE2NET_BE3
+#define BE3_chip(adapter) (adapter->pdev->device == BE_DEVICE_ID2 || \
+ adapter->pdev->device == OC_DEVICE_ID2)
+#else
+#define BE3_chip(adapter) (0)
+#endif /* CONFIG_BE2NET_BE3 */
+
+#ifdef CONFIG_BE2NET_BE2
+#define BE2_chip(adapter) (adapter->pdev->device == BE_DEVICE_ID1 || \
+ adapter->pdev->device == OC_DEVICE_ID1)
+#else
+#define BE2_chip(adapter) (0)
+#endif /* CONFIG_BE2NET_BE2 */
+
+#define BEx_chip(adapter) (BE3_chip(adapter) || BE2_chip(adapter))
+
+#define be_roce_supported(adapter) (skyhawk_chip(adapter) && \
+ (adapter->function_mode & RDMA_ENABLED))
+
+extern const struct ethtool_ops be_ethtool_ops;
+
+#define msix_enabled(adapter) (adapter->num_msix_vec > 0)
+#define num_irqs(adapter) (msix_enabled(adapter) ? \
+ adapter->num_msix_vec : 1)
+#define tx_stats(txo) (&(txo)->stats)
+#define rx_stats(rxo) (&(rxo)->stats)
+
+/* The default RXQ is the last RXQ */
+#define default_rxo(adpt) (&adpt->rx_obj[adpt->num_rx_qs - 1])
+
+#define for_all_rx_queues(adapter, rxo, i) \
+ for (i = 0, rxo = &adapter->rx_obj[i]; i < adapter->num_rx_qs; \
+ i++, rxo++)
+
+#define for_all_rss_queues(adapter, rxo, i) \
+ for (i = 0, rxo = &adapter->rx_obj[i]; i < adapter->num_rss_qs; \
+ i++, rxo++)
+
+#define for_all_tx_queues(adapter, txo, i) \
+ for (i = 0, txo = &adapter->tx_obj[i]; i < adapter->num_tx_qs; \
+ i++, txo++)
+
+#define for_all_evt_queues(adapter, eqo, i) \
+ for (i = 0, eqo = &adapter->eq_obj[i]; i < adapter->num_evt_qs; \
+ i++, eqo++)
+
+#define for_all_rx_queues_on_eq(adapter, eqo, rxo, i) \
+ for (i = eqo->idx, rxo = &adapter->rx_obj[i]; i < adapter->num_rx_qs;\
+ i += adapter->num_evt_qs, rxo += adapter->num_evt_qs)
+
+#define for_all_tx_queues_on_eq(adapter, eqo, txo, i) \
+ for (i = eqo->idx, txo = &adapter->tx_obj[i]; i < adapter->num_tx_qs;\
+ i += adapter->num_evt_qs, txo += adapter->num_evt_qs)
+
+#define is_mcc_eqo(eqo) (eqo->idx == 0)
+#define mcc_eqo(adapter) (&adapter->eq_obj[0])
+
+#define PAGE_SHIFT_4K 12
+#define PAGE_SIZE_4K (1 << PAGE_SHIFT_4K)
+
+/* Returns number of pages spanned by the data starting at the given addr */
+#define PAGES_4K_SPANNED(_address, size) \
+ ((u32)((((size_t)(_address) & (PAGE_SIZE_4K - 1)) + \
+ (size) + (PAGE_SIZE_4K - 1)) >> PAGE_SHIFT_4K))
+
+/* Returns bit offset within a DWORD of a bitfield */
+#define AMAP_BIT_OFFSET(_struct, field) \
+ (((size_t)&(((_struct *)0)->field))%32)
+
+/* Returns the bit mask of the field that is NOT shifted into location. */
+static inline u32 amap_mask(u32 bitsize)
+{
+ return (bitsize == 32 ? 0xFFFFFFFF : (1 << bitsize) - 1);
+}
+
+static inline void
+amap_set(void *ptr, u32 dw_offset, u32 mask, u32 offset, u32 value)
+{
+ u32 *dw = (u32 *) ptr + dw_offset;
+ *dw &= ~(mask << offset);
+ *dw |= (mask & value) << offset;
+}
+
+#define AMAP_SET_BITS(_struct, field, ptr, val) \
+ amap_set(ptr, \
+ offsetof(_struct, field)/32, \
+ amap_mask(sizeof(((_struct *)0)->field)), \
+ AMAP_BIT_OFFSET(_struct, field), \
+ val)
+
+static inline u32 amap_get(void *ptr, u32 dw_offset, u32 mask, u32 offset)
+{
+ u32 *dw = (u32 *) ptr;
+ return mask & (*(dw + dw_offset) >> offset);
+}
+
+#define AMAP_GET_BITS(_struct, field, ptr) \
+ amap_get(ptr, \
+ offsetof(_struct, field)/32, \
+ amap_mask(sizeof(((_struct *)0)->field)), \
+ AMAP_BIT_OFFSET(_struct, field))
+
+#define GET_RX_COMPL_V0_BITS(field, ptr) \
+ AMAP_GET_BITS(struct amap_eth_rx_compl_v0, field, ptr)
+
+#define GET_RX_COMPL_V1_BITS(field, ptr) \
+ AMAP_GET_BITS(struct amap_eth_rx_compl_v1, field, ptr)
+
+#define GET_TX_COMPL_BITS(field, ptr) \
+ AMAP_GET_BITS(struct amap_eth_tx_compl, field, ptr)
+
+#define SET_TX_WRB_HDR_BITS(field, ptr, val) \
+ AMAP_SET_BITS(struct amap_eth_hdr_wrb, field, ptr, val)
+
+#define be_dws_cpu_to_le(wrb, len) swap_dws(wrb, len)
+#define be_dws_le_to_cpu(wrb, len) swap_dws(wrb, len)
+static inline void swap_dws(void *wrb, int len)
+{
+#ifdef __BIG_ENDIAN
+ u32 *dw = wrb;
+ BUG_ON(len % 4);
+ do {
+ *dw = cpu_to_le32(*dw);
+ dw++;
+ len -= 4;
+ } while (len);
+#endif /* __BIG_ENDIAN */
+}
+
+#define be_cmd_status(status) (status > 0 ? -EIO : status)
+
+static inline u8 is_tcp_pkt(struct sk_buff *skb)
+{
+ u8 val = 0;
+
+ if (ip_hdr(skb)->version == 4)
+ val = (ip_hdr(skb)->protocol == IPPROTO_TCP);
+ else if (ip_hdr(skb)->version == 6)
+ val = (ipv6_hdr(skb)->nexthdr == NEXTHDR_TCP);
+
+ return val;
+}
+
+static inline u8 is_udp_pkt(struct sk_buff *skb)
+{
+ u8 val = 0;
+
+ if (ip_hdr(skb)->version == 4)
+ val = (ip_hdr(skb)->protocol == IPPROTO_UDP);
+ else if (ip_hdr(skb)->version == 6)
+ val = (ipv6_hdr(skb)->nexthdr == NEXTHDR_UDP);
+
+ return val;
+}
+
+static inline bool is_ipv4_pkt(struct sk_buff *skb)
+{
+ return skb->protocol == htons(ETH_P_IP) && ip_hdr(skb)->version == 4;
+}
+
+static inline bool is_ipv6_ext_hdr(struct sk_buff *skb)
+{
+ if (ip_hdr(skb)->version == 6)
+ return ipv6_ext_hdr(ipv6_hdr(skb)->nexthdr);
+ else
+ return false;
+}
+
+#define be_error_recovering(adapter) \
+ (adapter->flags & BE_FLAGS_TRY_RECOVERY)
+
+#define BE_ERROR_EEH 1
+#define BE_ERROR_UE BIT(1)
+#define BE_ERROR_FW BIT(2)
+#define BE_ERROR_TX BIT(3)
+#define BE_ERROR_HW (BE_ERROR_EEH | BE_ERROR_UE | BE_ERROR_TX)
+#define BE_ERROR_ANY (BE_ERROR_EEH | BE_ERROR_UE | BE_ERROR_FW | \
+ BE_ERROR_TX)
+#define BE_CLEAR_ALL 0xFF
+
+static inline u8 be_check_error(struct be_adapter *adapter, u32 err_type)
+{
+ return (adapter->err_flags & err_type);
+}
+
+static inline void be_set_error(struct be_adapter *adapter, int err_type)
+{
+ struct net_device *netdev = adapter->netdev;
+
+ adapter->err_flags |= err_type;
+ netif_carrier_off(netdev);
+
+ dev_info(&adapter->pdev->dev, "%s: Link down\n", netdev->name);
+}
+
+static inline void be_clear_error(struct be_adapter *adapter, int err_type)
+{
+ adapter->err_flags &= ~err_type;
+}
+
+static inline bool be_multi_rxq(const struct be_adapter *adapter)
+{
+ return adapter->num_rx_qs > 1;
+}
+
+void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm,
+ u16 num_popped);
+void be_link_status_update(struct be_adapter *adapter, u8 link_status);
+void be_parse_stats(struct be_adapter *adapter);
+int be_load_fw(struct be_adapter *adapter, u8 *func);
+bool be_is_wol_supported(struct be_adapter *adapter);
+bool be_pause_supported(struct be_adapter *adapter);
+u32 be_get_fw_log_level(struct be_adapter *adapter);
+int be_update_queues(struct be_adapter *adapter);
+int be_poll(struct napi_struct *napi, int budget);
+void be_eqd_update(struct be_adapter *adapter, bool force_update);
+
+/*
+ * internal function to initialize-cleanup roce device.
+ */
+void be_roce_dev_add(struct be_adapter *);
+void be_roce_dev_remove(struct be_adapter *);
+
+/*
+ * internal function to open-close roce device during ifup-ifdown.
+ */
+void be_roce_dev_shutdown(struct be_adapter *);
+
+#endif /* BE_H */
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
new file mode 100644
index 0000000000..61adcebeef
--- /dev/null
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -0,0 +1,5082 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2005 - 2016 Broadcom
+ * All rights reserved.
+ *
+ * Contact Information:
+ * linux-drivers@emulex.com
+ *
+ * Emulex
+ * 3333 Susan Street
+ * Costa Mesa, CA 92626
+ */
+
+#include <linux/module.h>
+#include "be.h"
+#include "be_cmds.h"
+
+const char * const be_misconfig_evt_port_state[] = {
+ "Physical Link is functional",
+ "Optics faulted/incorrectly installed/not installed - Reseat optics. If issue not resolved, replace.",
+ "Optics of two types installed – Remove one optic or install matching pair of optics.",
+ "Incompatible optics – Replace with compatible optics for card to function.",
+ "Unqualified optics – Replace with Avago optics for Warranty and Technical Support.",
+ "Uncertified optics – Replace with Avago-certified optics to enable link operation."
+};
+
+static char *be_port_misconfig_evt_severity[] = {
+ "KERN_WARN",
+ "KERN_INFO",
+ "KERN_ERR",
+ "KERN_WARN"
+};
+
+static char *phy_state_oper_desc[] = {
+ "Link is non-operational",
+ "Link is operational",
+ ""
+};
+
+static struct be_cmd_priv_map cmd_priv_map[] = {
+ {
+ OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG,
+ CMD_SUBSYSTEM_ETH,
+ BE_PRIV_LNKMGMT | BE_PRIV_VHADM |
+ BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
+ },
+ {
+ OPCODE_COMMON_GET_FLOW_CONTROL,
+ CMD_SUBSYSTEM_COMMON,
+ BE_PRIV_LNKQUERY | BE_PRIV_VHADM |
+ BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
+ },
+ {
+ OPCODE_COMMON_SET_FLOW_CONTROL,
+ CMD_SUBSYSTEM_COMMON,
+ BE_PRIV_LNKMGMT | BE_PRIV_VHADM |
+ BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
+ },
+ {
+ OPCODE_ETH_GET_PPORT_STATS,
+ CMD_SUBSYSTEM_ETH,
+ BE_PRIV_LNKMGMT | BE_PRIV_VHADM |
+ BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
+ },
+ {
+ OPCODE_COMMON_GET_PHY_DETAILS,
+ CMD_SUBSYSTEM_COMMON,
+ BE_PRIV_LNKMGMT | BE_PRIV_VHADM |
+ BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
+ },
+ {
+ OPCODE_LOWLEVEL_HOST_DDR_DMA,
+ CMD_SUBSYSTEM_LOWLEVEL,
+ BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
+ },
+ {
+ OPCODE_LOWLEVEL_LOOPBACK_TEST,
+ CMD_SUBSYSTEM_LOWLEVEL,
+ BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
+ },
+ {
+ OPCODE_LOWLEVEL_SET_LOOPBACK_MODE,
+ CMD_SUBSYSTEM_LOWLEVEL,
+ BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
+ },
+ {
+ OPCODE_COMMON_SET_HSW_CONFIG,
+ CMD_SUBSYSTEM_COMMON,
+ BE_PRIV_DEVCFG | BE_PRIV_VHADM |
+ BE_PRIV_DEVSEC
+ },
+ {
+ OPCODE_COMMON_GET_EXT_FAT_CAPABILITIES,
+ CMD_SUBSYSTEM_COMMON,
+ BE_PRIV_DEVCFG
+ }
+};
+
+static bool be_cmd_allowed(struct be_adapter *adapter, u8 opcode, u8 subsystem)
+{
+ int i;
+ int num_entries = ARRAY_SIZE(cmd_priv_map);
+ u32 cmd_privileges = adapter->cmd_privileges;
+
+ for (i = 0; i < num_entries; i++)
+ if (opcode == cmd_priv_map[i].opcode &&
+ subsystem == cmd_priv_map[i].subsystem)
+ if (!(cmd_privileges & cmd_priv_map[i].priv_mask))
+ return false;
+
+ return true;
+}
+
+static inline void *embedded_payload(struct be_mcc_wrb *wrb)
+{
+ return wrb->payload.embedded_payload;
+}
+
+static int be_mcc_notify(struct be_adapter *adapter)
+{
+ struct be_queue_info *mccq = &adapter->mcc_obj.q;
+ u32 val = 0;
+
+ if (be_check_error(adapter, BE_ERROR_ANY))
+ return -EIO;
+
+ val |= mccq->id & DB_MCCQ_RING_ID_MASK;
+ val |= 1 << DB_MCCQ_NUM_POSTED_SHIFT;
+
+ wmb();
+ iowrite32(val, adapter->db + DB_MCCQ_OFFSET);
+
+ return 0;
+}
+
+/* To check if valid bit is set, check the entire word as we don't know
+ * the endianness of the data (old entry is host endian while a new entry is
+ * little endian)
+ */
+static inline bool be_mcc_compl_is_new(struct be_mcc_compl *compl)
+{
+ u32 flags;
+
+ if (compl->flags != 0) {
+ flags = le32_to_cpu(compl->flags);
+ if (flags & CQE_FLAGS_VALID_MASK) {
+ compl->flags = flags;
+ return true;
+ }
+ }
+ return false;
+}
+
+/* Need to reset the entire word that houses the valid bit */
+static inline void be_mcc_compl_use(struct be_mcc_compl *compl)
+{
+ compl->flags = 0;
+}
+
+static struct be_cmd_resp_hdr *be_decode_resp_hdr(u32 tag0, u32 tag1)
+{
+ unsigned long addr;
+
+ addr = tag1;
+ addr = ((addr << 16) << 16) | tag0;
+ return (void *)addr;
+}
+
+static bool be_skip_err_log(u8 opcode, u16 base_status, u16 addl_status)
+{
+ if (base_status == MCC_STATUS_NOT_SUPPORTED ||
+ base_status == MCC_STATUS_ILLEGAL_REQUEST ||
+ addl_status == MCC_ADDL_STATUS_TOO_MANY_INTERFACES ||
+ addl_status == MCC_ADDL_STATUS_INSUFFICIENT_VLANS ||
+ (opcode == OPCODE_COMMON_WRITE_FLASHROM &&
+ (base_status == MCC_STATUS_ILLEGAL_FIELD ||
+ addl_status == MCC_ADDL_STATUS_FLASH_IMAGE_CRC_MISMATCH)))
+ return true;
+ else
+ return false;
+}
+
+/* Place holder for all the async MCC cmds wherein the caller is not in a busy
+ * loop (has not issued be_mcc_notify_wait())
+ */
+static void be_async_cmd_process(struct be_adapter *adapter,
+ struct be_mcc_compl *compl,
+ struct be_cmd_resp_hdr *resp_hdr)
+{
+ enum mcc_base_status base_status = base_status(compl->status);
+ u8 opcode = 0, subsystem = 0;
+
+ if (resp_hdr) {
+ opcode = resp_hdr->opcode;
+ subsystem = resp_hdr->subsystem;
+ }
+
+ if (opcode == OPCODE_LOWLEVEL_LOOPBACK_TEST &&
+ subsystem == CMD_SUBSYSTEM_LOWLEVEL) {
+ complete(&adapter->et_cmd_compl);
+ return;
+ }
+
+ if (opcode == OPCODE_LOWLEVEL_SET_LOOPBACK_MODE &&
+ subsystem == CMD_SUBSYSTEM_LOWLEVEL) {
+ complete(&adapter->et_cmd_compl);
+ return;
+ }
+
+ if ((opcode == OPCODE_COMMON_WRITE_FLASHROM ||
+ opcode == OPCODE_COMMON_WRITE_OBJECT) &&
+ subsystem == CMD_SUBSYSTEM_COMMON) {
+ adapter->flash_status = compl->status;
+ complete(&adapter->et_cmd_compl);
+ return;
+ }
+
+ if ((opcode == OPCODE_ETH_GET_STATISTICS ||
+ opcode == OPCODE_ETH_GET_PPORT_STATS) &&
+ subsystem == CMD_SUBSYSTEM_ETH &&
+ base_status == MCC_STATUS_SUCCESS) {
+ be_parse_stats(adapter);
+ adapter->stats_cmd_sent = false;
+ return;
+ }
+
+ if (opcode == OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES &&
+ subsystem == CMD_SUBSYSTEM_COMMON) {
+ if (base_status == MCC_STATUS_SUCCESS) {
+ struct be_cmd_resp_get_cntl_addnl_attribs *resp =
+ (void *)resp_hdr;
+ adapter->hwmon_info.be_on_die_temp =
+ resp->on_die_temperature;
+ } else {
+ adapter->be_get_temp_freq = 0;
+ adapter->hwmon_info.be_on_die_temp =
+ BE_INVALID_DIE_TEMP;
+ }
+ return;
+ }
+}
+
+static int be_mcc_compl_process(struct be_adapter *adapter,
+ struct be_mcc_compl *compl)
+{
+ enum mcc_base_status base_status;
+ enum mcc_addl_status addl_status;
+ struct be_cmd_resp_hdr *resp_hdr;
+ u8 opcode = 0, subsystem = 0;
+
+ /* Just swap the status to host endian; mcc tag is opaquely copied
+ * from mcc_wrb
+ */
+ be_dws_le_to_cpu(compl, 4);
+
+ base_status = base_status(compl->status);
+ addl_status = addl_status(compl->status);
+
+ resp_hdr = be_decode_resp_hdr(compl->tag0, compl->tag1);
+ if (resp_hdr) {
+ opcode = resp_hdr->opcode;
+ subsystem = resp_hdr->subsystem;
+ }
+
+ be_async_cmd_process(adapter, compl, resp_hdr);
+
+ if (base_status != MCC_STATUS_SUCCESS &&
+ !be_skip_err_log(opcode, base_status, addl_status)) {
+ if (base_status == MCC_STATUS_UNAUTHORIZED_REQUEST ||
+ addl_status == MCC_ADDL_STATUS_INSUFFICIENT_PRIVILEGES) {
+ dev_warn(&adapter->pdev->dev,
+ "VF is not privileged to issue opcode %d-%d\n",
+ opcode, subsystem);
+ } else {
+ dev_err(&adapter->pdev->dev,
+ "opcode %d-%d failed:status %d-%d\n",
+ opcode, subsystem, base_status, addl_status);
+ }
+ }
+ return compl->status;
+}
+
+/* Link state evt is a string of bytes; no need for endian swapping */
+static void be_async_link_state_process(struct be_adapter *adapter,
+ struct be_mcc_compl *compl)
+{
+ struct be_async_event_link_state *evt =
+ (struct be_async_event_link_state *)compl;
+
+ /* When link status changes, link speed must be re-queried from FW */
+ adapter->phy.link_speed = -1;
+
+ /* On BEx the FW does not send a separate link status
+ * notification for physical and logical link.
+ * On other chips just process the logical link
+ * status notification
+ */
+ if (!BEx_chip(adapter) &&
+ !(evt->port_link_status & LOGICAL_LINK_STATUS_MASK))
+ return;
+
+ /* For the initial link status do not rely on the ASYNC event as
+ * it may not be received in some cases.
+ */
+ if (adapter->flags & BE_FLAGS_LINK_STATUS_INIT)
+ be_link_status_update(adapter,
+ evt->port_link_status & LINK_STATUS_MASK);
+}
+
+static void be_async_port_misconfig_event_process(struct be_adapter *adapter,
+ struct be_mcc_compl *compl)
+{
+ struct be_async_event_misconfig_port *evt =
+ (struct be_async_event_misconfig_port *)compl;
+ u32 sfp_misconfig_evt_word1 = le32_to_cpu(evt->event_data_word1);
+ u32 sfp_misconfig_evt_word2 = le32_to_cpu(evt->event_data_word2);
+ u8 phy_oper_state = PHY_STATE_OPER_MSG_NONE;
+ struct device *dev = &adapter->pdev->dev;
+ u8 msg_severity = DEFAULT_MSG_SEVERITY;
+ u8 phy_state_info;
+ u8 new_phy_state;
+
+ new_phy_state =
+ (sfp_misconfig_evt_word1 >> (adapter->hba_port_num * 8)) & 0xff;
+
+ if (new_phy_state == adapter->phy_state)
+ return;
+
+ adapter->phy_state = new_phy_state;
+
+ /* for older fw that doesn't populate link effect data */
+ if (!sfp_misconfig_evt_word2)
+ goto log_message;
+
+ phy_state_info =
+ (sfp_misconfig_evt_word2 >> (adapter->hba_port_num * 8)) & 0xff;
+
+ if (phy_state_info & PHY_STATE_INFO_VALID) {
+ msg_severity = (phy_state_info & PHY_STATE_MSG_SEVERITY) >> 1;
+
+ if (be_phy_unqualified(new_phy_state))
+ phy_oper_state = (phy_state_info & PHY_STATE_OPER);
+ }
+
+log_message:
+ /* Log an error message that would allow a user to determine
+ * whether the SFPs have an issue
+ */
+ if (be_phy_state_unknown(new_phy_state))
+ dev_printk(be_port_misconfig_evt_severity[msg_severity], dev,
+ "Port %c: Unrecognized Optics state: 0x%x. %s",
+ adapter->port_name,
+ new_phy_state,
+ phy_state_oper_desc[phy_oper_state]);
+ else
+ dev_printk(be_port_misconfig_evt_severity[msg_severity], dev,
+ "Port %c: %s %s",
+ adapter->port_name,
+ be_misconfig_evt_port_state[new_phy_state],
+ phy_state_oper_desc[phy_oper_state]);
+
+ /* Log Vendor name and part no. if a misconfigured SFP is detected */
+ if (be_phy_misconfigured(new_phy_state))
+ adapter->flags |= BE_FLAGS_PHY_MISCONFIGURED;
+}
+
+/* Grp5 CoS Priority evt */
+static void be_async_grp5_cos_priority_process(struct be_adapter *adapter,
+ struct be_mcc_compl *compl)
+{
+ struct be_async_event_grp5_cos_priority *evt =
+ (struct be_async_event_grp5_cos_priority *)compl;
+
+ if (evt->valid) {
+ adapter->vlan_prio_bmap = evt->available_priority_bmap;
+ adapter->recommended_prio_bits =
+ evt->reco_default_priority << VLAN_PRIO_SHIFT;
+ }
+}
+
+/* Grp5 QOS Speed evt: qos_link_speed is in units of 10 Mbps */
+static void be_async_grp5_qos_speed_process(struct be_adapter *adapter,
+ struct be_mcc_compl *compl)
+{
+ struct be_async_event_grp5_qos_link_speed *evt =
+ (struct be_async_event_grp5_qos_link_speed *)compl;
+
+ if (adapter->phy.link_speed >= 0 &&
+ evt->physical_port == adapter->port_num)
+ adapter->phy.link_speed = le16_to_cpu(evt->qos_link_speed) * 10;
+}
+
+/*Grp5 PVID evt*/
+static void be_async_grp5_pvid_state_process(struct be_adapter *adapter,
+ struct be_mcc_compl *compl)
+{
+ struct be_async_event_grp5_pvid_state *evt =
+ (struct be_async_event_grp5_pvid_state *)compl;
+
+ if (evt->enabled) {
+ adapter->pvid = le16_to_cpu(evt->tag) & VLAN_VID_MASK;
+ dev_info(&adapter->pdev->dev, "LPVID: %d\n", adapter->pvid);
+ } else {
+ adapter->pvid = 0;
+ }
+}
+
+#define MGMT_ENABLE_MASK 0x4
+static void be_async_grp5_fw_control_process(struct be_adapter *adapter,
+ struct be_mcc_compl *compl)
+{
+ struct be_async_fw_control *evt = (struct be_async_fw_control *)compl;
+ u32 evt_dw1 = le32_to_cpu(evt->event_data_word1);
+
+ if (evt_dw1 & MGMT_ENABLE_MASK) {
+ adapter->flags |= BE_FLAGS_OS2BMC;
+ adapter->bmc_filt_mask = le32_to_cpu(evt->event_data_word2);
+ } else {
+ adapter->flags &= ~BE_FLAGS_OS2BMC;
+ }
+}
+
+static void be_async_grp5_evt_process(struct be_adapter *adapter,
+ struct be_mcc_compl *compl)
+{
+ u8 event_type = (compl->flags >> ASYNC_EVENT_TYPE_SHIFT) &
+ ASYNC_EVENT_TYPE_MASK;
+
+ switch (event_type) {
+ case ASYNC_EVENT_COS_PRIORITY:
+ be_async_grp5_cos_priority_process(adapter, compl);
+ break;
+ case ASYNC_EVENT_QOS_SPEED:
+ be_async_grp5_qos_speed_process(adapter, compl);
+ break;
+ case ASYNC_EVENT_PVID_STATE:
+ be_async_grp5_pvid_state_process(adapter, compl);
+ break;
+ /* Async event to disable/enable os2bmc and/or mac-learning */
+ case ASYNC_EVENT_FW_CONTROL:
+ be_async_grp5_fw_control_process(adapter, compl);
+ break;
+ default:
+ break;
+ }
+}
+
+static void be_async_dbg_evt_process(struct be_adapter *adapter,
+ struct be_mcc_compl *cmp)
+{
+ u8 event_type = 0;
+ struct be_async_event_qnq *evt = (struct be_async_event_qnq *)cmp;
+
+ event_type = (cmp->flags >> ASYNC_EVENT_TYPE_SHIFT) &
+ ASYNC_EVENT_TYPE_MASK;
+
+ switch (event_type) {
+ case ASYNC_DEBUG_EVENT_TYPE_QNQ:
+ if (evt->valid)
+ adapter->qnq_vid = le16_to_cpu(evt->vlan_tag);
+ adapter->flags |= BE_FLAGS_QNQ_ASYNC_EVT_RCVD;
+ break;
+ default:
+ dev_warn(&adapter->pdev->dev, "Unknown debug event 0x%x!\n",
+ event_type);
+ break;
+ }
+}
+
+static void be_async_sliport_evt_process(struct be_adapter *adapter,
+ struct be_mcc_compl *cmp)
+{
+ u8 event_type = (cmp->flags >> ASYNC_EVENT_TYPE_SHIFT) &
+ ASYNC_EVENT_TYPE_MASK;
+
+ if (event_type == ASYNC_EVENT_PORT_MISCONFIG)
+ be_async_port_misconfig_event_process(adapter, cmp);
+}
+
+static inline bool is_link_state_evt(u32 flags)
+{
+ return ((flags >> ASYNC_EVENT_CODE_SHIFT) & ASYNC_EVENT_CODE_MASK) ==
+ ASYNC_EVENT_CODE_LINK_STATE;
+}
+
+static inline bool is_grp5_evt(u32 flags)
+{
+ return ((flags >> ASYNC_EVENT_CODE_SHIFT) & ASYNC_EVENT_CODE_MASK) ==
+ ASYNC_EVENT_CODE_GRP_5;
+}
+
+static inline bool is_dbg_evt(u32 flags)
+{
+ return ((flags >> ASYNC_EVENT_CODE_SHIFT) & ASYNC_EVENT_CODE_MASK) ==
+ ASYNC_EVENT_CODE_QNQ;
+}
+
+static inline bool is_sliport_evt(u32 flags)
+{
+ return ((flags >> ASYNC_EVENT_CODE_SHIFT) & ASYNC_EVENT_CODE_MASK) ==
+ ASYNC_EVENT_CODE_SLIPORT;
+}
+
+static void be_mcc_event_process(struct be_adapter *adapter,
+ struct be_mcc_compl *compl)
+{
+ if (is_link_state_evt(compl->flags))
+ be_async_link_state_process(adapter, compl);
+ else if (is_grp5_evt(compl->flags))
+ be_async_grp5_evt_process(adapter, compl);
+ else if (is_dbg_evt(compl->flags))
+ be_async_dbg_evt_process(adapter, compl);
+ else if (is_sliport_evt(compl->flags))
+ be_async_sliport_evt_process(adapter, compl);
+}
+
+static struct be_mcc_compl *be_mcc_compl_get(struct be_adapter *adapter)
+{
+ struct be_queue_info *mcc_cq = &adapter->mcc_obj.cq;
+ struct be_mcc_compl *compl = queue_tail_node(mcc_cq);
+
+ if (be_mcc_compl_is_new(compl)) {
+ queue_tail_inc(mcc_cq);
+ return compl;
+ }
+ return NULL;
+}
+
+void be_async_mcc_enable(struct be_adapter *adapter)
+{
+ spin_lock_bh(&adapter->mcc_cq_lock);
+
+ be_cq_notify(adapter, adapter->mcc_obj.cq.id, true, 0);
+ adapter->mcc_obj.rearm_cq = true;
+
+ spin_unlock_bh(&adapter->mcc_cq_lock);
+}
+
+void be_async_mcc_disable(struct be_adapter *adapter)
+{
+ spin_lock_bh(&adapter->mcc_cq_lock);
+
+ adapter->mcc_obj.rearm_cq = false;
+ be_cq_notify(adapter, adapter->mcc_obj.cq.id, false, 0);
+
+ spin_unlock_bh(&adapter->mcc_cq_lock);
+}
+
+int be_process_mcc(struct be_adapter *adapter)
+{
+ struct be_mcc_compl *compl;
+ int num = 0, status = 0;
+ struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
+
+ spin_lock(&adapter->mcc_cq_lock);
+
+ while ((compl = be_mcc_compl_get(adapter))) {
+ if (compl->flags & CQE_FLAGS_ASYNC_MASK) {
+ be_mcc_event_process(adapter, compl);
+ } else if (compl->flags & CQE_FLAGS_COMPLETED_MASK) {
+ status = be_mcc_compl_process(adapter, compl);
+ atomic_dec(&mcc_obj->q.used);
+ }
+ be_mcc_compl_use(compl);
+ num++;
+ }
+
+ if (num)
+ be_cq_notify(adapter, mcc_obj->cq.id, mcc_obj->rearm_cq, num);
+
+ spin_unlock(&adapter->mcc_cq_lock);
+ return status;
+}
+
+/* Wait till no more pending mcc requests are present */
+static int be_mcc_wait_compl(struct be_adapter *adapter)
+{
+#define mcc_timeout 12000 /* 12s timeout */
+ int i, status = 0;
+ struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
+
+ for (i = 0; i < mcc_timeout; i++) {
+ if (be_check_error(adapter, BE_ERROR_ANY))
+ return -EIO;
+
+ local_bh_disable();
+ status = be_process_mcc(adapter);
+ local_bh_enable();
+
+ if (atomic_read(&mcc_obj->q.used) == 0)
+ break;
+ usleep_range(500, 1000);
+ }
+ if (i == mcc_timeout) {
+ dev_err(&adapter->pdev->dev, "FW not responding\n");
+ be_set_error(adapter, BE_ERROR_FW);
+ return -EIO;
+ }
+ return status;
+}
+
+/* Notify MCC requests and wait for completion */
+static int be_mcc_notify_wait(struct be_adapter *adapter)
+{
+ int status;
+ struct be_mcc_wrb *wrb;
+ struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
+ u32 index = mcc_obj->q.head;
+ struct be_cmd_resp_hdr *resp;
+
+ index_dec(&index, mcc_obj->q.len);
+ wrb = queue_index_node(&mcc_obj->q, index);
+
+ resp = be_decode_resp_hdr(wrb->tag0, wrb->tag1);
+
+ status = be_mcc_notify(adapter);
+ if (status)
+ goto out;
+
+ status = be_mcc_wait_compl(adapter);
+ if (status == -EIO)
+ goto out;
+
+ status = (resp->base_status |
+ ((resp->addl_status & CQE_ADDL_STATUS_MASK) <<
+ CQE_ADDL_STATUS_SHIFT));
+out:
+ return status;
+}
+
+static int be_mbox_db_ready_wait(struct be_adapter *adapter, void __iomem *db)
+{
+ int msecs = 0;
+ u32 ready;
+
+ do {
+ if (be_check_error(adapter, BE_ERROR_ANY))
+ return -EIO;
+
+ ready = ioread32(db);
+ if (ready == 0xffffffff)
+ return -1;
+
+ ready &= MPU_MAILBOX_DB_RDY_MASK;
+ if (ready)
+ break;
+
+ if (msecs > 4000) {
+ dev_err(&adapter->pdev->dev, "FW not responding\n");
+ be_set_error(adapter, BE_ERROR_FW);
+ be_detect_error(adapter);
+ return -1;
+ }
+
+ msleep(1);
+ msecs++;
+ } while (true);
+
+ return 0;
+}
+
+/* Insert the mailbox address into the doorbell in two steps
+ * Polls on the mbox doorbell till a command completion (or a timeout) occurs
+ */
+static int be_mbox_notify_wait(struct be_adapter *adapter)
+{
+ int status;
+ u32 val = 0;
+ void __iomem *db = adapter->db + MPU_MAILBOX_DB_OFFSET;
+ struct be_dma_mem *mbox_mem = &adapter->mbox_mem;
+ struct be_mcc_mailbox *mbox = mbox_mem->va;
+ struct be_mcc_compl *compl = &mbox->compl;
+
+ /* wait for ready to be set */
+ status = be_mbox_db_ready_wait(adapter, db);
+ if (status != 0)
+ return status;
+
+ val |= MPU_MAILBOX_DB_HI_MASK;
+ /* at bits 2 - 31 place mbox dma addr msb bits 34 - 63 */
+ val |= (upper_32_bits(mbox_mem->dma) >> 2) << 2;
+ iowrite32(val, db);
+
+ /* wait for ready to be set */
+ status = be_mbox_db_ready_wait(adapter, db);
+ if (status != 0)
+ return status;
+
+ val = 0;
+ /* at bits 2 - 31 place mbox dma addr lsb bits 4 - 33 */
+ val |= (u32)(mbox_mem->dma >> 4) << 2;
+ iowrite32(val, db);
+
+ status = be_mbox_db_ready_wait(adapter, db);
+ if (status != 0)
+ return status;
+
+ /* A cq entry has been made now */
+ if (be_mcc_compl_is_new(compl)) {
+ status = be_mcc_compl_process(adapter, &mbox->compl);
+ be_mcc_compl_use(compl);
+ if (status)
+ return status;
+ } else {
+ dev_err(&adapter->pdev->dev, "invalid mailbox completion\n");
+ return -1;
+ }
+ return 0;
+}
+
+u16 be_POST_stage_get(struct be_adapter *adapter)
+{
+ u32 sem;
+
+ if (BEx_chip(adapter))
+ sem = ioread32(adapter->csr + SLIPORT_SEMAPHORE_OFFSET_BEx);
+ else
+ pci_read_config_dword(adapter->pdev,
+ SLIPORT_SEMAPHORE_OFFSET_SH, &sem);
+
+ return sem & POST_STAGE_MASK;
+}
+
+static int lancer_wait_ready(struct be_adapter *adapter)
+{
+#define SLIPORT_READY_TIMEOUT 30
+ u32 sliport_status;
+ int i;
+
+ for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
+ sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
+ if (sliport_status & SLIPORT_STATUS_RDY_MASK)
+ return 0;
+
+ if (sliport_status & SLIPORT_STATUS_ERR_MASK &&
+ !(sliport_status & SLIPORT_STATUS_RN_MASK))
+ return -EIO;
+
+ msleep(1000);
+ }
+
+ return sliport_status ? : -1;
+}
+
+int be_fw_wait_ready(struct be_adapter *adapter)
+{
+ u16 stage;
+ int status, timeout = 0;
+ struct device *dev = &adapter->pdev->dev;
+
+ if (lancer_chip(adapter)) {
+ status = lancer_wait_ready(adapter);
+ if (status) {
+ stage = status;
+ goto err;
+ }
+ return 0;
+ }
+
+ do {
+ /* There's no means to poll POST state on BE2/3 VFs */
+ if (BEx_chip(adapter) && be_virtfn(adapter))
+ return 0;
+
+ stage = be_POST_stage_get(adapter);
+ if (stage == POST_STAGE_ARMFW_RDY)
+ return 0;
+
+ dev_info(dev, "Waiting for POST, %ds elapsed\n", timeout);
+ if (msleep_interruptible(2000)) {
+ dev_err(dev, "Waiting for POST aborted\n");
+ return -EINTR;
+ }
+ timeout += 2;
+ } while (timeout < 60);
+
+err:
+ dev_err(dev, "POST timeout; stage=%#x\n", stage);
+ return -ETIMEDOUT;
+}
+
+static inline struct be_sge *nonembedded_sgl(struct be_mcc_wrb *wrb)
+{
+ return &wrb->payload.sgl[0];
+}
+
+static inline void fill_wrb_tags(struct be_mcc_wrb *wrb, unsigned long addr)
+{
+ wrb->tag0 = addr & 0xFFFFFFFF;
+ wrb->tag1 = upper_32_bits(addr);
+}
+
+/* Don't touch the hdr after it's prepared */
+/* mem will be NULL for embedded commands */
+static void be_wrb_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr,
+ u8 subsystem, u8 opcode, int cmd_len,
+ struct be_mcc_wrb *wrb,
+ struct be_dma_mem *mem)
+{
+ struct be_sge *sge;
+
+ req_hdr->opcode = opcode;
+ req_hdr->subsystem = subsystem;
+ req_hdr->request_length = cpu_to_le32(cmd_len - sizeof(*req_hdr));
+ req_hdr->version = 0;
+ fill_wrb_tags(wrb, (ulong)req_hdr);
+ wrb->payload_length = cmd_len;
+ if (mem) {
+ wrb->embedded |= (1 & MCC_WRB_SGE_CNT_MASK) <<
+ MCC_WRB_SGE_CNT_SHIFT;
+ sge = nonembedded_sgl(wrb);
+ sge->pa_hi = cpu_to_le32(upper_32_bits(mem->dma));
+ sge->pa_lo = cpu_to_le32(mem->dma & 0xFFFFFFFF);
+ sge->len = cpu_to_le32(mem->size);
+ } else
+ wrb->embedded |= MCC_WRB_EMBEDDED_MASK;
+ be_dws_cpu_to_le(wrb, 8);
+}
+
+static void be_cmd_page_addrs_prepare(struct phys_addr *pages, u32 max_pages,
+ struct be_dma_mem *mem)
+{
+ int i, buf_pages = min(PAGES_4K_SPANNED(mem->va, mem->size), max_pages);
+ u64 dma = (u64)mem->dma;
+
+ for (i = 0; i < buf_pages; i++) {
+ pages[i].lo = cpu_to_le32(dma & 0xFFFFFFFF);
+ pages[i].hi = cpu_to_le32(upper_32_bits(dma));
+ dma += PAGE_SIZE_4K;
+ }
+}
+
+static inline struct be_mcc_wrb *wrb_from_mbox(struct be_adapter *adapter)
+{
+ struct be_dma_mem *mbox_mem = &adapter->mbox_mem;
+ struct be_mcc_wrb *wrb = &((struct be_mcc_mailbox *)(mbox_mem->va))->wrb;
+
+ memset(wrb, 0, sizeof(*wrb));
+ return wrb;
+}
+
+static struct be_mcc_wrb *wrb_from_mccq(struct be_adapter *adapter)
+{
+ struct be_queue_info *mccq = &adapter->mcc_obj.q;
+ struct be_mcc_wrb *wrb;
+
+ if (!mccq->created)
+ return NULL;
+
+ if (atomic_read(&mccq->used) >= mccq->len)
+ return NULL;
+
+ wrb = queue_head_node(mccq);
+ queue_head_inc(mccq);
+ atomic_inc(&mccq->used);
+ memset(wrb, 0, sizeof(*wrb));
+ return wrb;
+}
+
+static bool use_mcc(struct be_adapter *adapter)
+{
+ return adapter->mcc_obj.q.created;
+}
+
+/* Must be used only in process context */
+static int be_cmd_lock(struct be_adapter *adapter)
+{
+ if (use_mcc(adapter)) {
+ mutex_lock(&adapter->mcc_lock);
+ return 0;
+ } else {
+ return mutex_lock_interruptible(&adapter->mbox_lock);
+ }
+}
+
+/* Must be used only in process context */
+static void be_cmd_unlock(struct be_adapter *adapter)
+{
+ if (use_mcc(adapter))
+ return mutex_unlock(&adapter->mcc_lock);
+ else
+ return mutex_unlock(&adapter->mbox_lock);
+}
+
+static struct be_mcc_wrb *be_cmd_copy(struct be_adapter *adapter,
+ struct be_mcc_wrb *wrb)
+{
+ struct be_mcc_wrb *dest_wrb;
+
+ if (use_mcc(adapter)) {
+ dest_wrb = wrb_from_mccq(adapter);
+ if (!dest_wrb)
+ return NULL;
+ } else {
+ dest_wrb = wrb_from_mbox(adapter);
+ }
+
+ memcpy(dest_wrb, wrb, sizeof(*wrb));
+ if (wrb->embedded & cpu_to_le32(MCC_WRB_EMBEDDED_MASK))
+ fill_wrb_tags(dest_wrb, (ulong)embedded_payload(wrb));
+
+ return dest_wrb;
+}
+
+/* Must be used only in process context */
+static int be_cmd_notify_wait(struct be_adapter *adapter,
+ struct be_mcc_wrb *wrb)
+{
+ struct be_mcc_wrb *dest_wrb;
+ int status;
+
+ status = be_cmd_lock(adapter);
+ if (status)
+ return status;
+
+ dest_wrb = be_cmd_copy(adapter, wrb);
+ if (!dest_wrb) {
+ status = -EBUSY;
+ goto unlock;
+ }
+
+ if (use_mcc(adapter))
+ status = be_mcc_notify_wait(adapter);
+ else
+ status = be_mbox_notify_wait(adapter);
+
+ if (!status)
+ memcpy(wrb, dest_wrb, sizeof(*wrb));
+
+unlock:
+ be_cmd_unlock(adapter);
+ return status;
+}
+
+/* Tell fw we're about to start firing cmds by writing a
+ * special pattern across the wrb hdr; uses mbox
+ */
+int be_cmd_fw_init(struct be_adapter *adapter)
+{
+ u8 *wrb;
+ int status;
+
+ if (lancer_chip(adapter))
+ return 0;
+
+ if (mutex_lock_interruptible(&adapter->mbox_lock))
+ return -1;
+
+ wrb = (u8 *)wrb_from_mbox(adapter);
+ *wrb++ = 0xFF;
+ *wrb++ = 0x12;
+ *wrb++ = 0x34;
+ *wrb++ = 0xFF;
+ *wrb++ = 0xFF;
+ *wrb++ = 0x56;
+ *wrb++ = 0x78;
+ *wrb = 0xFF;
+
+ status = be_mbox_notify_wait(adapter);
+
+ mutex_unlock(&adapter->mbox_lock);
+ return status;
+}
+
+/* Tell fw we're done with firing cmds by writing a
+ * special pattern across the wrb hdr; uses mbox
+ */
+int be_cmd_fw_clean(struct be_adapter *adapter)
+{
+ u8 *wrb;
+ int status;
+
+ if (lancer_chip(adapter))
+ return 0;
+
+ if (mutex_lock_interruptible(&adapter->mbox_lock))
+ return -1;
+
+ wrb = (u8 *)wrb_from_mbox(adapter);
+ *wrb++ = 0xFF;
+ *wrb++ = 0xAA;
+ *wrb++ = 0xBB;
+ *wrb++ = 0xFF;
+ *wrb++ = 0xFF;
+ *wrb++ = 0xCC;
+ *wrb++ = 0xDD;
+ *wrb = 0xFF;
+
+ status = be_mbox_notify_wait(adapter);
+
+ mutex_unlock(&adapter->mbox_lock);
+ return status;
+}
+
+int be_cmd_eq_create(struct be_adapter *adapter, struct be_eq_obj *eqo)
+{
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_req_eq_create *req;
+ struct be_dma_mem *q_mem = &eqo->q.dma_mem;
+ int status, ver = 0;
+
+ if (mutex_lock_interruptible(&adapter->mbox_lock))
+ return -1;
+
+ wrb = wrb_from_mbox(adapter);
+ req = embedded_payload(wrb);
+
+ be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_EQ_CREATE, sizeof(*req), wrb,
+ NULL);
+
+ /* Support for EQ_CREATEv2 available only SH-R onwards */
+ if (!(BEx_chip(adapter) || lancer_chip(adapter)))
+ ver = 2;
+
+ req->hdr.version = ver;
+ req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
+
+ AMAP_SET_BITS(struct amap_eq_context, valid, req->context, 1);
+ /* 4byte eqe*/
+ AMAP_SET_BITS(struct amap_eq_context, size, req->context, 0);
+ AMAP_SET_BITS(struct amap_eq_context, count, req->context,
+ __ilog2_u32(eqo->q.len / 256));
+ be_dws_cpu_to_le(req->context, sizeof(req->context));
+
+ be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
+
+ status = be_mbox_notify_wait(adapter);
+ if (!status) {
+ struct be_cmd_resp_eq_create *resp = embedded_payload(wrb);
+
+ eqo->q.id = le16_to_cpu(resp->eq_id);
+ eqo->msix_idx =
+ (ver == 2) ? le16_to_cpu(resp->msix_idx) : eqo->idx;
+ eqo->q.created = true;
+ }
+
+ mutex_unlock(&adapter->mbox_lock);
+ return status;
+}
+
+/* Use MCC */
+int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
+ bool permanent, u32 if_handle, u32 pmac_id)
+{
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_req_mac_query *req;
+ int status;
+
+ mutex_lock(&adapter->mcc_lock);
+
+ wrb = wrb_from_mccq(adapter);
+ if (!wrb) {
+ status = -EBUSY;
+ goto err;
+ }
+ req = embedded_payload(wrb);
+
+ be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_NTWK_MAC_QUERY, sizeof(*req), wrb,
+ NULL);
+ req->type = MAC_ADDRESS_TYPE_NETWORK;
+ if (permanent) {
+ req->permanent = 1;
+ } else {
+ req->if_id = cpu_to_le16((u16)if_handle);
+ req->pmac_id = cpu_to_le32(pmac_id);
+ req->permanent = 0;
+ }
+
+ status = be_mcc_notify_wait(adapter);
+ if (!status) {
+ struct be_cmd_resp_mac_query *resp = embedded_payload(wrb);
+
+ memcpy(mac_addr, resp->mac.addr, ETH_ALEN);
+ }
+
+err:
+ mutex_unlock(&adapter->mcc_lock);
+ return status;
+}
+
+/* Uses synchronous MCCQ */
+int be_cmd_pmac_add(struct be_adapter *adapter, const u8 *mac_addr,
+ u32 if_id, u32 *pmac_id, u32 domain)
+{
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_req_pmac_add *req;
+ int status;
+
+ mutex_lock(&adapter->mcc_lock);
+
+ wrb = wrb_from_mccq(adapter);
+ if (!wrb) {
+ status = -EBUSY;
+ goto err;
+ }
+ req = embedded_payload(wrb);
+
+ be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_NTWK_PMAC_ADD, sizeof(*req), wrb,
+ NULL);
+
+ req->hdr.domain = domain;
+ req->if_id = cpu_to_le32(if_id);
+ memcpy(req->mac_address, mac_addr, ETH_ALEN);
+
+ status = be_mcc_notify_wait(adapter);
+ if (!status) {
+ struct be_cmd_resp_pmac_add *resp = embedded_payload(wrb);
+
+ *pmac_id = le32_to_cpu(resp->pmac_id);
+ }
+
+err:
+ mutex_unlock(&adapter->mcc_lock);
+
+ if (base_status(status) == MCC_STATUS_UNAUTHORIZED_REQUEST)
+ status = -EPERM;
+
+ return status;
+}
+
+/* Uses synchronous MCCQ */
+int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, int pmac_id, u32 dom)
+{
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_req_pmac_del *req;
+ int status;
+
+ if (pmac_id == -1)
+ return 0;
+
+ mutex_lock(&adapter->mcc_lock);
+
+ wrb = wrb_from_mccq(adapter);
+ if (!wrb) {
+ status = -EBUSY;
+ goto err;
+ }
+ req = embedded_payload(wrb);
+
+ be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_NTWK_PMAC_DEL, sizeof(*req),
+ wrb, NULL);
+
+ req->hdr.domain = dom;
+ req->if_id = cpu_to_le32(if_id);
+ req->pmac_id = cpu_to_le32(pmac_id);
+
+ status = be_mcc_notify_wait(adapter);
+
+err:
+ mutex_unlock(&adapter->mcc_lock);
+ return status;
+}
+
+/* Uses Mbox */
+int be_cmd_cq_create(struct be_adapter *adapter, struct be_queue_info *cq,
+ struct be_queue_info *eq, bool no_delay, int coalesce_wm)
+{
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_req_cq_create *req;
+ struct be_dma_mem *q_mem = &cq->dma_mem;
+ void *ctxt;
+ int status;
+
+ if (mutex_lock_interruptible(&adapter->mbox_lock))
+ return -1;
+
+ wrb = wrb_from_mbox(adapter);
+ req = embedded_payload(wrb);
+ ctxt = &req->context;
+
+ be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_CQ_CREATE, sizeof(*req), wrb,
+ NULL);
+
+ req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
+
+ if (BEx_chip(adapter)) {
+ AMAP_SET_BITS(struct amap_cq_context_be, coalescwm, ctxt,
+ coalesce_wm);
+ AMAP_SET_BITS(struct amap_cq_context_be, nodelay,
+ ctxt, no_delay);
+ AMAP_SET_BITS(struct amap_cq_context_be, count, ctxt,
+ __ilog2_u32(cq->len / 256));
+ AMAP_SET_BITS(struct amap_cq_context_be, valid, ctxt, 1);
+ AMAP_SET_BITS(struct amap_cq_context_be, eventable, ctxt, 1);
+ AMAP_SET_BITS(struct amap_cq_context_be, eqid, ctxt, eq->id);
+ } else {
+ req->hdr.version = 2;
+ req->page_size = 1; /* 1 for 4K */
+
+ /* coalesce-wm field in this cmd is not relevant to Lancer.
+ * Lancer uses COMMON_MODIFY_CQ to set this field
+ */
+ if (!lancer_chip(adapter))
+ AMAP_SET_BITS(struct amap_cq_context_v2, coalescwm,
+ ctxt, coalesce_wm);
+ AMAP_SET_BITS(struct amap_cq_context_v2, nodelay, ctxt,
+ no_delay);
+ AMAP_SET_BITS(struct amap_cq_context_v2, count, ctxt,
+ __ilog2_u32(cq->len / 256));
+ AMAP_SET_BITS(struct amap_cq_context_v2, valid, ctxt, 1);
+ AMAP_SET_BITS(struct amap_cq_context_v2, eventable, ctxt, 1);
+ AMAP_SET_BITS(struct amap_cq_context_v2, eqid, ctxt, eq->id);
+ }
+
+ be_dws_cpu_to_le(ctxt, sizeof(req->context));
+
+ be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
+
+ status = be_mbox_notify_wait(adapter);
+ if (!status) {
+ struct be_cmd_resp_cq_create *resp = embedded_payload(wrb);
+
+ cq->id = le16_to_cpu(resp->cq_id);
+ cq->created = true;
+ }
+
+ mutex_unlock(&adapter->mbox_lock);
+
+ return status;
+}
+
+static u32 be_encoded_q_len(int q_len)
+{
+ u32 len_encoded = fls(q_len); /* log2(len) + 1 */
+
+ if (len_encoded == 16)
+ len_encoded = 0;
+ return len_encoded;
+}
+
+static int be_cmd_mccq_ext_create(struct be_adapter *adapter,
+ struct be_queue_info *mccq,
+ struct be_queue_info *cq)
+{
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_req_mcc_ext_create *req;
+ struct be_dma_mem *q_mem = &mccq->dma_mem;
+ void *ctxt;
+ int status;
+
+ if (mutex_lock_interruptible(&adapter->mbox_lock))
+ return -1;
+
+ wrb = wrb_from_mbox(adapter);
+ req = embedded_payload(wrb);
+ ctxt = &req->context;
+
+ be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_MCC_CREATE_EXT, sizeof(*req), wrb,
+ NULL);
+
+ req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
+ if (BEx_chip(adapter)) {
+ AMAP_SET_BITS(struct amap_mcc_context_be, valid, ctxt, 1);
+ AMAP_SET_BITS(struct amap_mcc_context_be, ring_size, ctxt,
+ be_encoded_q_len(mccq->len));
+ AMAP_SET_BITS(struct amap_mcc_context_be, cq_id, ctxt, cq->id);
+ } else {
+ req->hdr.version = 1;
+ req->cq_id = cpu_to_le16(cq->id);
+
+ AMAP_SET_BITS(struct amap_mcc_context_v1, ring_size, ctxt,
+ be_encoded_q_len(mccq->len));
+ AMAP_SET_BITS(struct amap_mcc_context_v1, valid, ctxt, 1);
+ AMAP_SET_BITS(struct amap_mcc_context_v1, async_cq_id,
+ ctxt, cq->id);
+ AMAP_SET_BITS(struct amap_mcc_context_v1, async_cq_valid,
+ ctxt, 1);
+ }
+
+ /* Subscribe to Link State, Sliport Event and Group 5 Events
+ * (bits 1, 5 and 17 set)
+ */
+ req->async_event_bitmap[0] =
+ cpu_to_le32(BIT(ASYNC_EVENT_CODE_LINK_STATE) |
+ BIT(ASYNC_EVENT_CODE_GRP_5) |
+ BIT(ASYNC_EVENT_CODE_QNQ) |
+ BIT(ASYNC_EVENT_CODE_SLIPORT));
+
+ be_dws_cpu_to_le(ctxt, sizeof(req->context));
+
+ be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
+
+ status = be_mbox_notify_wait(adapter);
+ if (!status) {
+ struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb);
+
+ mccq->id = le16_to_cpu(resp->id);
+ mccq->created = true;
+ }
+ mutex_unlock(&adapter->mbox_lock);
+
+ return status;
+}
+
+static int be_cmd_mccq_org_create(struct be_adapter *adapter,
+ struct be_queue_info *mccq,
+ struct be_queue_info *cq)
+{
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_req_mcc_create *req;
+ struct be_dma_mem *q_mem = &mccq->dma_mem;
+ void *ctxt;
+ int status;
+
+ if (mutex_lock_interruptible(&adapter->mbox_lock))
+ return -1;
+
+ wrb = wrb_from_mbox(adapter);
+ req = embedded_payload(wrb);
+ ctxt = &req->context;
+
+ be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_MCC_CREATE, sizeof(*req), wrb,
+ NULL);
+
+ req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
+
+ AMAP_SET_BITS(struct amap_mcc_context_be, valid, ctxt, 1);
+ AMAP_SET_BITS(struct amap_mcc_context_be, ring_size, ctxt,
+ be_encoded_q_len(mccq->len));
+ AMAP_SET_BITS(struct amap_mcc_context_be, cq_id, ctxt, cq->id);
+
+ be_dws_cpu_to_le(ctxt, sizeof(req->context));
+
+ be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
+
+ status = be_mbox_notify_wait(adapter);
+ if (!status) {
+ struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb);
+
+ mccq->id = le16_to_cpu(resp->id);
+ mccq->created = true;
+ }
+
+ mutex_unlock(&adapter->mbox_lock);
+ return status;
+}
+
+int be_cmd_mccq_create(struct be_adapter *adapter,
+ struct be_queue_info *mccq, struct be_queue_info *cq)
+{
+ int status;
+
+ status = be_cmd_mccq_ext_create(adapter, mccq, cq);
+ if (status && BEx_chip(adapter)) {
+ dev_warn(&adapter->pdev->dev, "Upgrade to F/W ver 2.102.235.0 "
+ "or newer to avoid conflicting priorities between NIC "
+ "and FCoE traffic");
+ status = be_cmd_mccq_org_create(adapter, mccq, cq);
+ }
+ return status;
+}
+
+int be_cmd_txq_create(struct be_adapter *adapter, struct be_tx_obj *txo)
+{
+ struct be_mcc_wrb wrb = {0};
+ struct be_cmd_req_eth_tx_create *req;
+ struct be_queue_info *txq = &txo->q;
+ struct be_queue_info *cq = &txo->cq;
+ struct be_dma_mem *q_mem = &txq->dma_mem;
+ int status, ver = 0;
+
+ req = embedded_payload(&wrb);
+ be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
+ OPCODE_ETH_TX_CREATE, sizeof(*req), &wrb, NULL);
+
+ if (lancer_chip(adapter)) {
+ req->hdr.version = 1;
+ } else if (BEx_chip(adapter)) {
+ if (adapter->function_caps & BE_FUNCTION_CAPS_SUPER_NIC)
+ req->hdr.version = 2;
+ } else { /* For SH */
+ req->hdr.version = 2;
+ }
+
+ if (req->hdr.version > 0)
+ req->if_id = cpu_to_le16(adapter->if_handle);
+ req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
+ req->ulp_num = BE_ULP1_NUM;
+ req->type = BE_ETH_TX_RING_TYPE_STANDARD;
+ req->cq_id = cpu_to_le16(cq->id);
+ req->queue_size = be_encoded_q_len(txq->len);
+ be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
+ ver = req->hdr.version;
+
+ status = be_cmd_notify_wait(adapter, &wrb);
+ if (!status) {
+ struct be_cmd_resp_eth_tx_create *resp = embedded_payload(&wrb);
+
+ txq->id = le16_to_cpu(resp->cid);
+ if (ver == 2)
+ txo->db_offset = le32_to_cpu(resp->db_offset);
+ else
+ txo->db_offset = DB_TXULP1_OFFSET;
+ txq->created = true;
+ }
+
+ return status;
+}
+
+/* Uses MCC */
+int be_cmd_rxq_create(struct be_adapter *adapter,
+ struct be_queue_info *rxq, u16 cq_id, u16 frag_size,
+ u32 if_id, u32 rss, u8 *rss_id)
+{
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_req_eth_rx_create *req;
+ struct be_dma_mem *q_mem = &rxq->dma_mem;
+ int status;
+
+ mutex_lock(&adapter->mcc_lock);
+
+ wrb = wrb_from_mccq(adapter);
+ if (!wrb) {
+ status = -EBUSY;
+ goto err;
+ }
+ req = embedded_payload(wrb);
+
+ be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
+ OPCODE_ETH_RX_CREATE, sizeof(*req), wrb, NULL);
+
+ req->cq_id = cpu_to_le16(cq_id);
+ req->frag_size = fls(frag_size) - 1;
+ req->num_pages = 2;
+ be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
+ req->interface_id = cpu_to_le32(if_id);
+ req->max_frame_size = cpu_to_le16(BE_MAX_JUMBO_FRAME_SIZE);
+ req->rss_queue = cpu_to_le32(rss);
+
+ status = be_mcc_notify_wait(adapter);
+ if (!status) {
+ struct be_cmd_resp_eth_rx_create *resp = embedded_payload(wrb);
+
+ rxq->id = le16_to_cpu(resp->id);
+ rxq->created = true;
+ *rss_id = resp->rss_id;
+ }
+
+err:
+ mutex_unlock(&adapter->mcc_lock);
+ return status;
+}
+
+/* Generic destroyer function for all types of queues
+ * Uses Mbox
+ */
+int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
+ int queue_type)
+{
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_req_q_destroy *req;
+ u8 subsys = 0, opcode = 0;
+ int status;
+
+ if (mutex_lock_interruptible(&adapter->mbox_lock))
+ return -1;
+
+ wrb = wrb_from_mbox(adapter);
+ req = embedded_payload(wrb);
+
+ switch (queue_type) {
+ case QTYPE_EQ:
+ subsys = CMD_SUBSYSTEM_COMMON;
+ opcode = OPCODE_COMMON_EQ_DESTROY;
+ break;
+ case QTYPE_CQ:
+ subsys = CMD_SUBSYSTEM_COMMON;
+ opcode = OPCODE_COMMON_CQ_DESTROY;
+ break;
+ case QTYPE_TXQ:
+ subsys = CMD_SUBSYSTEM_ETH;
+ opcode = OPCODE_ETH_TX_DESTROY;
+ break;
+ case QTYPE_RXQ:
+ subsys = CMD_SUBSYSTEM_ETH;
+ opcode = OPCODE_ETH_RX_DESTROY;
+ break;
+ case QTYPE_MCCQ:
+ subsys = CMD_SUBSYSTEM_COMMON;
+ opcode = OPCODE_COMMON_MCC_DESTROY;
+ break;
+ default:
+ BUG();
+ }
+
+ be_wrb_cmd_hdr_prepare(&req->hdr, subsys, opcode, sizeof(*req), wrb,
+ NULL);
+ req->id = cpu_to_le16(q->id);
+
+ status = be_mbox_notify_wait(adapter);
+ q->created = false;
+
+ mutex_unlock(&adapter->mbox_lock);
+ return status;
+}
+
+/* Uses MCC */
+int be_cmd_rxq_destroy(struct be_adapter *adapter, struct be_queue_info *q)
+{
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_req_q_destroy *req;
+ int status;
+
+ mutex_lock(&adapter->mcc_lock);
+
+ wrb = wrb_from_mccq(adapter);
+ if (!wrb) {
+ status = -EBUSY;
+ goto err;
+ }
+ req = embedded_payload(wrb);
+
+ be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
+ OPCODE_ETH_RX_DESTROY, sizeof(*req), wrb, NULL);
+ req->id = cpu_to_le16(q->id);
+
+ status = be_mcc_notify_wait(adapter);
+ q->created = false;
+
+err:
+ mutex_unlock(&adapter->mcc_lock);
+ return status;
+}
+
+/* Create an rx filtering policy configuration on an i/f
+ * Will use MBOX only if MCCQ has not been created.
+ */
+int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags,
+ u32 *if_handle, u32 domain)
+{
+ struct be_mcc_wrb wrb = {0};
+ struct be_cmd_req_if_create *req;
+ int status;
+
+ req = embedded_payload(&wrb);
+ be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_NTWK_INTERFACE_CREATE,
+ sizeof(*req), &wrb, NULL);
+ req->hdr.domain = domain;
+ req->capability_flags = cpu_to_le32(cap_flags);
+ req->enable_flags = cpu_to_le32(en_flags);
+ req->pmac_invalid = true;
+
+ status = be_cmd_notify_wait(adapter, &wrb);
+ if (!status) {
+ struct be_cmd_resp_if_create *resp = embedded_payload(&wrb);
+
+ *if_handle = le32_to_cpu(resp->interface_id);
+
+ /* Hack to retrieve VF's pmac-id on BE3 */
+ if (BE3_chip(adapter) && be_virtfn(adapter))
+ adapter->pmac_id[0] = le32_to_cpu(resp->pmac_id);
+ }
+ return status;
+}
+
+/* Uses MCCQ if available else MBOX */
+int be_cmd_if_destroy(struct be_adapter *adapter, int interface_id, u32 domain)
+{
+ struct be_mcc_wrb wrb = {0};
+ struct be_cmd_req_if_destroy *req;
+ int status;
+
+ if (interface_id == -1)
+ return 0;
+
+ req = embedded_payload(&wrb);
+
+ be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_NTWK_INTERFACE_DESTROY,
+ sizeof(*req), &wrb, NULL);
+ req->hdr.domain = domain;
+ req->interface_id = cpu_to_le32(interface_id);
+
+ status = be_cmd_notify_wait(adapter, &wrb);
+ return status;
+}
+
+/* Get stats is a non embedded command: the request is not embedded inside
+ * WRB but is a separate dma memory block
+ * Uses asynchronous MCC
+ */
+int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd)
+{
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_req_hdr *hdr;
+ int status = 0;
+
+ mutex_lock(&adapter->mcc_lock);
+
+ wrb = wrb_from_mccq(adapter);
+ if (!wrb) {
+ status = -EBUSY;
+ goto err;
+ }
+ hdr = nonemb_cmd->va;
+
+ be_wrb_cmd_hdr_prepare(hdr, CMD_SUBSYSTEM_ETH,
+ OPCODE_ETH_GET_STATISTICS, nonemb_cmd->size, wrb,
+ nonemb_cmd);
+
+ /* version 1 of the cmd is not supported only by BE2 */
+ if (BE2_chip(adapter))
+ hdr->version = 0;
+ if (BE3_chip(adapter) || lancer_chip(adapter))
+ hdr->version = 1;
+ else
+ hdr->version = 2;
+
+ status = be_mcc_notify(adapter);
+ if (status)
+ goto err;
+
+ adapter->stats_cmd_sent = true;
+
+err:
+ mutex_unlock(&adapter->mcc_lock);
+ return status;
+}
+
+/* Lancer Stats */
+int lancer_cmd_get_pport_stats(struct be_adapter *adapter,
+ struct be_dma_mem *nonemb_cmd)
+{
+ struct be_mcc_wrb *wrb;
+ struct lancer_cmd_req_pport_stats *req;
+ int status = 0;
+
+ if (!be_cmd_allowed(adapter, OPCODE_ETH_GET_PPORT_STATS,
+ CMD_SUBSYSTEM_ETH))
+ return -EPERM;
+
+ mutex_lock(&adapter->mcc_lock);
+
+ wrb = wrb_from_mccq(adapter);
+ if (!wrb) {
+ status = -EBUSY;
+ goto err;
+ }
+ req = nonemb_cmd->va;
+
+ be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
+ OPCODE_ETH_GET_PPORT_STATS, nonemb_cmd->size,
+ wrb, nonemb_cmd);
+
+ req->cmd_params.params.pport_num = cpu_to_le16(adapter->hba_port_num);
+ req->cmd_params.params.reset_stats = 0;
+
+ status = be_mcc_notify(adapter);
+ if (status)
+ goto err;
+
+ adapter->stats_cmd_sent = true;
+
+err:
+ mutex_unlock(&adapter->mcc_lock);
+ return status;
+}
+
+static int be_mac_to_link_speed(int mac_speed)
+{
+ switch (mac_speed) {
+ case PHY_LINK_SPEED_ZERO:
+ return 0;
+ case PHY_LINK_SPEED_10MBPS:
+ return 10;
+ case PHY_LINK_SPEED_100MBPS:
+ return 100;
+ case PHY_LINK_SPEED_1GBPS:
+ return 1000;
+ case PHY_LINK_SPEED_10GBPS:
+ return 10000;
+ case PHY_LINK_SPEED_20GBPS:
+ return 20000;
+ case PHY_LINK_SPEED_25GBPS:
+ return 25000;
+ case PHY_LINK_SPEED_40GBPS:
+ return 40000;
+ }
+ return 0;
+}
+
+/* Uses synchronous mcc
+ * Returns link_speed in Mbps
+ */
+int be_cmd_link_status_query(struct be_adapter *adapter, u16 *link_speed,
+ u8 *link_status, u32 dom)
+{
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_req_link_status *req;
+ int status;
+
+ mutex_lock(&adapter->mcc_lock);
+
+ if (link_status)
+ *link_status = LINK_DOWN;
+
+ wrb = wrb_from_mccq(adapter);
+ if (!wrb) {
+ status = -EBUSY;
+ goto err;
+ }
+ req = embedded_payload(wrb);
+
+ be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_NTWK_LINK_STATUS_QUERY,
+ sizeof(*req), wrb, NULL);
+
+ /* version 1 of the cmd is not supported only by BE2 */
+ if (!BE2_chip(adapter))
+ req->hdr.version = 1;
+
+ req->hdr.domain = dom;
+
+ status = be_mcc_notify_wait(adapter);
+ if (!status) {
+ struct be_cmd_resp_link_status *resp = embedded_payload(wrb);
+
+ if (link_speed) {
+ *link_speed = resp->link_speed ?
+ le16_to_cpu(resp->link_speed) * 10 :
+ be_mac_to_link_speed(resp->mac_speed);
+
+ if (!resp->logical_link_status)
+ *link_speed = 0;
+ }
+ if (link_status)
+ *link_status = resp->logical_link_status;
+ }
+
+err:
+ mutex_unlock(&adapter->mcc_lock);
+ return status;
+}
+
+/* Uses synchronous mcc */
+int be_cmd_get_die_temperature(struct be_adapter *adapter)
+{
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_req_get_cntl_addnl_attribs *req;
+ int status = 0;
+
+ mutex_lock(&adapter->mcc_lock);
+
+ wrb = wrb_from_mccq(adapter);
+ if (!wrb) {
+ status = -EBUSY;
+ goto err;
+ }
+ req = embedded_payload(wrb);
+
+ be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES,
+ sizeof(*req), wrb, NULL);
+
+ status = be_mcc_notify(adapter);
+err:
+ mutex_unlock(&adapter->mcc_lock);
+ return status;
+}
+
+/* Uses synchronous mcc */
+int be_cmd_get_fat_dump_len(struct be_adapter *adapter, u32 *dump_size)
+{
+ struct be_mcc_wrb wrb = {0};
+ struct be_cmd_req_get_fat *req;
+ int status;
+
+ req = embedded_payload(&wrb);
+
+ be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_MANAGE_FAT, sizeof(*req),
+ &wrb, NULL);
+ req->fat_operation = cpu_to_le32(QUERY_FAT);
+ status = be_cmd_notify_wait(adapter, &wrb);
+ if (!status) {
+ struct be_cmd_resp_get_fat *resp = embedded_payload(&wrb);
+
+ if (dump_size && resp->log_size)
+ *dump_size = le32_to_cpu(resp->log_size) -
+ sizeof(u32);
+ }
+ return status;
+}
+
+int be_cmd_get_fat_dump(struct be_adapter *adapter, u32 buf_len, void *buf)
+{
+ struct be_dma_mem get_fat_cmd;
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_req_get_fat *req;
+ u32 offset = 0, total_size, buf_size,
+ log_offset = sizeof(u32), payload_len;
+ int status;
+
+ if (buf_len == 0)
+ return 0;
+
+ total_size = buf_len;
+
+ get_fat_cmd.size = sizeof(struct be_cmd_req_get_fat) + 60 * 1024;
+ get_fat_cmd.va = dma_alloc_coherent(&adapter->pdev->dev,
+ get_fat_cmd.size,
+ &get_fat_cmd.dma, GFP_ATOMIC);
+ if (!get_fat_cmd.va)
+ return -ENOMEM;
+
+ mutex_lock(&adapter->mcc_lock);
+
+ while (total_size) {
+ buf_size = min(total_size, (u32)60 * 1024);
+ total_size -= buf_size;
+
+ wrb = wrb_from_mccq(adapter);
+ if (!wrb) {
+ status = -EBUSY;
+ goto err;
+ }
+ req = get_fat_cmd.va;
+
+ payload_len = sizeof(struct be_cmd_req_get_fat) + buf_size;
+ be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_MANAGE_FAT, payload_len,
+ wrb, &get_fat_cmd);
+
+ req->fat_operation = cpu_to_le32(RETRIEVE_FAT);
+ req->read_log_offset = cpu_to_le32(log_offset);
+ req->read_log_length = cpu_to_le32(buf_size);
+ req->data_buffer_size = cpu_to_le32(buf_size);
+
+ status = be_mcc_notify_wait(adapter);
+ if (!status) {
+ struct be_cmd_resp_get_fat *resp = get_fat_cmd.va;
+
+ memcpy(buf + offset,
+ resp->data_buffer,
+ le32_to_cpu(resp->read_log_length));
+ } else {
+ dev_err(&adapter->pdev->dev, "FAT Table Retrieve error\n");
+ goto err;
+ }
+ offset += buf_size;
+ log_offset += buf_size;
+ }
+err:
+ dma_free_coherent(&adapter->pdev->dev, get_fat_cmd.size,
+ get_fat_cmd.va, get_fat_cmd.dma);
+ mutex_unlock(&adapter->mcc_lock);
+ return status;
+}
+
+/* Uses synchronous mcc */
+int be_cmd_get_fw_ver(struct be_adapter *adapter)
+{
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_req_get_fw_version *req;
+ int status;
+
+ mutex_lock(&adapter->mcc_lock);
+
+ wrb = wrb_from_mccq(adapter);
+ if (!wrb) {
+ status = -EBUSY;
+ goto err;
+ }
+
+ req = embedded_payload(wrb);
+
+ be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_GET_FW_VERSION, sizeof(*req), wrb,
+ NULL);
+ status = be_mcc_notify_wait(adapter);
+ if (!status) {
+ struct be_cmd_resp_get_fw_version *resp = embedded_payload(wrb);
+
+ strscpy(adapter->fw_ver, resp->firmware_version_string,
+ sizeof(adapter->fw_ver));
+ strscpy(adapter->fw_on_flash, resp->fw_on_flash_version_string,
+ sizeof(adapter->fw_on_flash));
+ }
+err:
+ mutex_unlock(&adapter->mcc_lock);
+ return status;
+}
+
+/* set the EQ delay interval of an EQ to specified value
+ * Uses async mcc
+ */
+static int __be_cmd_modify_eqd(struct be_adapter *adapter,
+ struct be_set_eqd *set_eqd, int num)
+{
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_req_modify_eq_delay *req;
+ int status = 0, i;
+
+ mutex_lock(&adapter->mcc_lock);
+
+ wrb = wrb_from_mccq(adapter);
+ if (!wrb) {
+ status = -EBUSY;
+ goto err;
+ }
+ req = embedded_payload(wrb);
+
+ be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_MODIFY_EQ_DELAY, sizeof(*req), wrb,
+ NULL);
+
+ req->num_eq = cpu_to_le32(num);
+ for (i = 0; i < num; i++) {
+ req->set_eqd[i].eq_id = cpu_to_le32(set_eqd[i].eq_id);
+ req->set_eqd[i].phase = 0;
+ req->set_eqd[i].delay_multiplier =
+ cpu_to_le32(set_eqd[i].delay_multiplier);
+ }
+
+ status = be_mcc_notify(adapter);
+err:
+ mutex_unlock(&adapter->mcc_lock);
+ return status;
+}
+
+int be_cmd_modify_eqd(struct be_adapter *adapter, struct be_set_eqd *set_eqd,
+ int num)
+{
+ int num_eqs, i = 0;
+
+ while (num) {
+ num_eqs = min(num, 8);
+ __be_cmd_modify_eqd(adapter, &set_eqd[i], num_eqs);
+ i += num_eqs;
+ num -= num_eqs;
+ }
+
+ return 0;
+}
+
+/* Uses sycnhronous mcc */
+int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array,
+ u32 num, u32 domain)
+{
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_req_vlan_config *req;
+ int status;
+
+ mutex_lock(&adapter->mcc_lock);
+
+ wrb = wrb_from_mccq(adapter);
+ if (!wrb) {
+ status = -EBUSY;
+ goto err;
+ }
+ req = embedded_payload(wrb);
+
+ be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_NTWK_VLAN_CONFIG, sizeof(*req),
+ wrb, NULL);
+ req->hdr.domain = domain;
+
+ req->interface_id = if_id;
+ req->untagged = BE_IF_FLAGS_UNTAGGED & be_if_cap_flags(adapter) ? 1 : 0;
+ req->num_vlan = num;
+ memcpy(req->normal_vlan, vtag_array,
+ req->num_vlan * sizeof(vtag_array[0]));
+
+ status = be_mcc_notify_wait(adapter);
+err:
+ mutex_unlock(&adapter->mcc_lock);
+ return status;
+}
+
+static int __be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value)
+{
+ struct be_mcc_wrb *wrb;
+ struct be_dma_mem *mem = &adapter->rx_filter;
+ struct be_cmd_req_rx_filter *req = mem->va;
+ int status;
+
+ mutex_lock(&adapter->mcc_lock);
+
+ wrb = wrb_from_mccq(adapter);
+ if (!wrb) {
+ status = -EBUSY;
+ goto err;
+ }
+ memset(req, 0, sizeof(*req));
+ be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_NTWK_RX_FILTER, sizeof(*req),
+ wrb, mem);
+
+ req->if_id = cpu_to_le32(adapter->if_handle);
+ req->if_flags_mask = cpu_to_le32(flags);
+ req->if_flags = (value == ON) ? req->if_flags_mask : 0;
+
+ if (flags & BE_IF_FLAGS_MULTICAST) {
+ int i;
+
+ /* Reset mcast promisc mode if already set by setting mask
+ * and not setting flags field
+ */
+ req->if_flags_mask |=
+ cpu_to_le32(BE_IF_FLAGS_MCAST_PROMISCUOUS &
+ be_if_cap_flags(adapter));
+ req->mcast_num = cpu_to_le32(adapter->mc_count);
+ for (i = 0; i < adapter->mc_count; i++)
+ ether_addr_copy(req->mcast_mac[i].byte,
+ adapter->mc_list[i].mac);
+ }
+
+ status = be_mcc_notify_wait(adapter);
+err:
+ mutex_unlock(&adapter->mcc_lock);
+ return status;
+}
+
+int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value)
+{
+ struct device *dev = &adapter->pdev->dev;
+
+ if ((flags & be_if_cap_flags(adapter)) != flags) {
+ dev_warn(dev, "Cannot set rx filter flags 0x%x\n", flags);
+ dev_warn(dev, "Interface is capable of 0x%x flags only\n",
+ be_if_cap_flags(adapter));
+ }
+ flags &= be_if_cap_flags(adapter);
+ if (!flags)
+ return -ENOTSUPP;
+
+ return __be_cmd_rx_filter(adapter, flags, value);
+}
+
+/* Uses synchrounous mcc */
+int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc)
+{
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_req_set_flow_control *req;
+ int status;
+
+ if (!be_cmd_allowed(adapter, OPCODE_COMMON_SET_FLOW_CONTROL,
+ CMD_SUBSYSTEM_COMMON))
+ return -EPERM;
+
+ mutex_lock(&adapter->mcc_lock);
+
+ wrb = wrb_from_mccq(adapter);
+ if (!wrb) {
+ status = -EBUSY;
+ goto err;
+ }
+ req = embedded_payload(wrb);
+
+ be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_SET_FLOW_CONTROL, sizeof(*req),
+ wrb, NULL);
+
+ req->hdr.version = 1;
+ req->tx_flow_control = cpu_to_le16((u16)tx_fc);
+ req->rx_flow_control = cpu_to_le16((u16)rx_fc);
+
+ status = be_mcc_notify_wait(adapter);
+
+err:
+ mutex_unlock(&adapter->mcc_lock);
+
+ if (base_status(status) == MCC_STATUS_FEATURE_NOT_SUPPORTED)
+ return -EOPNOTSUPP;
+
+ return status;
+}
+
+/* Uses sycn mcc */
+int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc)
+{
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_req_get_flow_control *req;
+ int status;
+
+ if (!be_cmd_allowed(adapter, OPCODE_COMMON_GET_FLOW_CONTROL,
+ CMD_SUBSYSTEM_COMMON))
+ return -EPERM;
+
+ mutex_lock(&adapter->mcc_lock);
+
+ wrb = wrb_from_mccq(adapter);
+ if (!wrb) {
+ status = -EBUSY;
+ goto err;
+ }
+ req = embedded_payload(wrb);
+
+ be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_GET_FLOW_CONTROL, sizeof(*req),
+ wrb, NULL);
+
+ status = be_mcc_notify_wait(adapter);
+ if (!status) {
+ struct be_cmd_resp_get_flow_control *resp =
+ embedded_payload(wrb);
+
+ *tx_fc = le16_to_cpu(resp->tx_flow_control);
+ *rx_fc = le16_to_cpu(resp->rx_flow_control);
+ }
+
+err:
+ mutex_unlock(&adapter->mcc_lock);
+ return status;
+}
+
+/* Uses mbox */
+int be_cmd_query_fw_cfg(struct be_adapter *adapter)
+{
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_req_query_fw_cfg *req;
+ int status;
+
+ if (mutex_lock_interruptible(&adapter->mbox_lock))
+ return -1;
+
+ wrb = wrb_from_mbox(adapter);
+ req = embedded_payload(wrb);
+
+ be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_QUERY_FIRMWARE_CONFIG,
+ sizeof(*req), wrb, NULL);
+
+ status = be_mbox_notify_wait(adapter);
+ if (!status) {
+ struct be_cmd_resp_query_fw_cfg *resp = embedded_payload(wrb);
+
+ adapter->port_num = le32_to_cpu(resp->phys_port);
+ adapter->function_mode = le32_to_cpu(resp->function_mode);
+ adapter->function_caps = le32_to_cpu(resp->function_caps);
+ adapter->asic_rev = le32_to_cpu(resp->asic_revision) & 0xFF;
+ dev_info(&adapter->pdev->dev,
+ "FW config: function_mode=0x%x, function_caps=0x%x\n",
+ adapter->function_mode, adapter->function_caps);
+ }
+
+ mutex_unlock(&adapter->mbox_lock);
+ return status;
+}
+
+/* Uses mbox */
+int be_cmd_reset_function(struct be_adapter *adapter)
+{
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_req_hdr *req;
+ int status;
+
+ if (lancer_chip(adapter)) {
+ iowrite32(SLI_PORT_CONTROL_IP_MASK,
+ adapter->db + SLIPORT_CONTROL_OFFSET);
+ status = lancer_wait_ready(adapter);
+ if (status)
+ dev_err(&adapter->pdev->dev,
+ "Adapter in non recoverable error\n");
+ return status;
+ }
+
+ if (mutex_lock_interruptible(&adapter->mbox_lock))
+ return -1;
+
+ wrb = wrb_from_mbox(adapter);
+ req = embedded_payload(wrb);
+
+ be_wrb_cmd_hdr_prepare(req, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_FUNCTION_RESET, sizeof(*req), wrb,
+ NULL);
+
+ status = be_mbox_notify_wait(adapter);
+
+ mutex_unlock(&adapter->mbox_lock);
+ return status;
+}
+
+int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable,
+ u32 rss_hash_opts, u16 table_size, const u8 *rss_hkey)
+{
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_req_rss_config *req;
+ int status;
+
+ if (!(be_if_cap_flags(adapter) & BE_IF_FLAGS_RSS))
+ return 0;
+
+ mutex_lock(&adapter->mcc_lock);
+
+ wrb = wrb_from_mccq(adapter);
+ if (!wrb) {
+ status = -EBUSY;
+ goto err;
+ }
+ req = embedded_payload(wrb);
+
+ be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
+ OPCODE_ETH_RSS_CONFIG, sizeof(*req), wrb, NULL);
+
+ req->if_id = cpu_to_le32(adapter->if_handle);
+ req->enable_rss = cpu_to_le16(rss_hash_opts);
+ req->cpu_table_size_log2 = cpu_to_le16(fls(table_size) - 1);
+
+ if (!BEx_chip(adapter))
+ req->hdr.version = 1;
+
+ memcpy(req->cpu_table, rsstable, table_size);
+ memcpy(req->hash, rss_hkey, RSS_HASH_KEY_LEN);
+ be_dws_cpu_to_le(req->hash, sizeof(req->hash));
+
+ status = be_mcc_notify_wait(adapter);
+err:
+ mutex_unlock(&adapter->mcc_lock);
+ return status;
+}
+
+/* Uses sync mcc */
+int be_cmd_set_beacon_state(struct be_adapter *adapter, u8 port_num,
+ u8 bcn, u8 sts, u8 state)
+{
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_req_enable_disable_beacon *req;
+ int status;
+
+ mutex_lock(&adapter->mcc_lock);
+
+ wrb = wrb_from_mccq(adapter);
+ if (!wrb) {
+ status = -EBUSY;
+ goto err;
+ }
+ req = embedded_payload(wrb);
+
+ be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_ENABLE_DISABLE_BEACON,
+ sizeof(*req), wrb, NULL);
+
+ req->port_num = port_num;
+ req->beacon_state = state;
+ req->beacon_duration = bcn;
+ req->status_duration = sts;
+
+ status = be_mcc_notify_wait(adapter);
+
+err:
+ mutex_unlock(&adapter->mcc_lock);
+ return status;
+}
+
+/* Uses sync mcc */
+int be_cmd_get_beacon_state(struct be_adapter *adapter, u8 port_num, u32 *state)
+{
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_req_get_beacon_state *req;
+ int status;
+
+ mutex_lock(&adapter->mcc_lock);
+
+ wrb = wrb_from_mccq(adapter);
+ if (!wrb) {
+ status = -EBUSY;
+ goto err;
+ }
+ req = embedded_payload(wrb);
+
+ be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_GET_BEACON_STATE, sizeof(*req),
+ wrb, NULL);
+
+ req->port_num = port_num;
+
+ status = be_mcc_notify_wait(adapter);
+ if (!status) {
+ struct be_cmd_resp_get_beacon_state *resp =
+ embedded_payload(wrb);
+
+ *state = resp->beacon_state;
+ }
+
+err:
+ mutex_unlock(&adapter->mcc_lock);
+ return status;
+}
+
+/* Uses sync mcc */
+int be_cmd_read_port_transceiver_data(struct be_adapter *adapter,
+ u8 page_num, u32 off, u32 len, u8 *data)
+{
+ struct be_dma_mem cmd;
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_req_port_type *req;
+ int status;
+
+ if (page_num > TR_PAGE_A2)
+ return -EINVAL;
+
+ cmd.size = sizeof(struct be_cmd_resp_port_type);
+ cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
+ GFP_ATOMIC);
+ if (!cmd.va) {
+ dev_err(&adapter->pdev->dev, "Memory allocation failed\n");
+ return -ENOMEM;
+ }
+
+ mutex_lock(&adapter->mcc_lock);
+
+ wrb = wrb_from_mccq(adapter);
+ if (!wrb) {
+ status = -EBUSY;
+ goto err;
+ }
+ req = cmd.va;
+
+ be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_READ_TRANSRECV_DATA,
+ cmd.size, wrb, &cmd);
+
+ req->port = cpu_to_le32(adapter->hba_port_num);
+ req->page_num = cpu_to_le32(page_num);
+ status = be_mcc_notify_wait(adapter);
+ if (!status && len > 0) {
+ struct be_cmd_resp_port_type *resp = cmd.va;
+
+ memcpy(data, resp->page_data + off, len);
+ }
+err:
+ mutex_unlock(&adapter->mcc_lock);
+ dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
+ return status;
+}
+
+static int lancer_cmd_write_object(struct be_adapter *adapter,
+ struct be_dma_mem *cmd, u32 data_size,
+ u32 data_offset, const char *obj_name,
+ u32 *data_written, u8 *change_status,
+ u8 *addn_status)
+{
+ struct be_mcc_wrb *wrb;
+ struct lancer_cmd_req_write_object *req;
+ struct lancer_cmd_resp_write_object *resp;
+ void *ctxt = NULL;
+ int status;
+
+ mutex_lock(&adapter->mcc_lock);
+ adapter->flash_status = 0;
+
+ wrb = wrb_from_mccq(adapter);
+ if (!wrb) {
+ status = -EBUSY;
+ goto err_unlock;
+ }
+
+ req = embedded_payload(wrb);
+
+ be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_WRITE_OBJECT,
+ sizeof(struct lancer_cmd_req_write_object), wrb,
+ NULL);
+
+ ctxt = &req->context;
+ AMAP_SET_BITS(struct amap_lancer_write_obj_context,
+ write_length, ctxt, data_size);
+
+ if (data_size == 0)
+ AMAP_SET_BITS(struct amap_lancer_write_obj_context,
+ eof, ctxt, 1);
+ else
+ AMAP_SET_BITS(struct amap_lancer_write_obj_context,
+ eof, ctxt, 0);
+
+ be_dws_cpu_to_le(ctxt, sizeof(req->context));
+ req->write_offset = cpu_to_le32(data_offset);
+ strscpy(req->object_name, obj_name, sizeof(req->object_name));
+ req->descriptor_count = cpu_to_le32(1);
+ req->buf_len = cpu_to_le32(data_size);
+ req->addr_low = cpu_to_le32((cmd->dma +
+ sizeof(struct lancer_cmd_req_write_object))
+ & 0xFFFFFFFF);
+ req->addr_high = cpu_to_le32(upper_32_bits(cmd->dma +
+ sizeof(struct lancer_cmd_req_write_object)));
+
+ status = be_mcc_notify(adapter);
+ if (status)
+ goto err_unlock;
+
+ mutex_unlock(&adapter->mcc_lock);
+
+ if (!wait_for_completion_timeout(&adapter->et_cmd_compl,
+ msecs_to_jiffies(60000)))
+ status = -ETIMEDOUT;
+ else
+ status = adapter->flash_status;
+
+ resp = embedded_payload(wrb);
+ if (!status) {
+ *data_written = le32_to_cpu(resp->actual_write_len);
+ *change_status = resp->change_status;
+ } else {
+ *addn_status = resp->additional_status;
+ }
+
+ return status;
+
+err_unlock:
+ mutex_unlock(&adapter->mcc_lock);
+ return status;
+}
+
+int be_cmd_query_cable_type(struct be_adapter *adapter)
+{
+ u8 page_data[PAGE_DATA_LEN];
+ int status;
+
+ status = be_cmd_read_port_transceiver_data(adapter, TR_PAGE_A0,
+ 0, PAGE_DATA_LEN, page_data);
+ if (!status) {
+ switch (adapter->phy.interface_type) {
+ case PHY_TYPE_QSFP:
+ adapter->phy.cable_type =
+ page_data[QSFP_PLUS_CABLE_TYPE_OFFSET];
+ break;
+ case PHY_TYPE_SFP_PLUS_10GB:
+ adapter->phy.cable_type =
+ page_data[SFP_PLUS_CABLE_TYPE_OFFSET];
+ break;
+ default:
+ adapter->phy.cable_type = 0;
+ break;
+ }
+ }
+ return status;
+}
+
+int be_cmd_query_sfp_info(struct be_adapter *adapter)
+{
+ u8 page_data[PAGE_DATA_LEN];
+ int status;
+
+ status = be_cmd_read_port_transceiver_data(adapter, TR_PAGE_A0,
+ 0, PAGE_DATA_LEN, page_data);
+ if (!status) {
+ strscpy(adapter->phy.vendor_name, page_data +
+ SFP_VENDOR_NAME_OFFSET, SFP_VENDOR_NAME_LEN - 1);
+ strscpy(adapter->phy.vendor_pn,
+ page_data + SFP_VENDOR_PN_OFFSET,
+ SFP_VENDOR_NAME_LEN - 1);
+ }
+
+ return status;
+}
+
+static int lancer_cmd_delete_object(struct be_adapter *adapter,
+ const char *obj_name)
+{
+ struct lancer_cmd_req_delete_object *req;
+ struct be_mcc_wrb *wrb;
+ int status;
+
+ mutex_lock(&adapter->mcc_lock);
+
+ wrb = wrb_from_mccq(adapter);
+ if (!wrb) {
+ status = -EBUSY;
+ goto err;
+ }
+
+ req = embedded_payload(wrb);
+
+ be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_DELETE_OBJECT,
+ sizeof(*req), wrb, NULL);
+
+ strscpy(req->object_name, obj_name, sizeof(req->object_name));
+
+ status = be_mcc_notify_wait(adapter);
+err:
+ mutex_unlock(&adapter->mcc_lock);
+ return status;
+}
+
+int lancer_cmd_read_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
+ u32 data_size, u32 data_offset, const char *obj_name,
+ u32 *data_read, u32 *eof, u8 *addn_status)
+{
+ struct be_mcc_wrb *wrb;
+ struct lancer_cmd_req_read_object *req;
+ struct lancer_cmd_resp_read_object *resp;
+ int status;
+
+ mutex_lock(&adapter->mcc_lock);
+
+ wrb = wrb_from_mccq(adapter);
+ if (!wrb) {
+ status = -EBUSY;
+ goto err_unlock;
+ }
+
+ req = embedded_payload(wrb);
+
+ be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_READ_OBJECT,
+ sizeof(struct lancer_cmd_req_read_object), wrb,
+ NULL);
+
+ req->desired_read_len = cpu_to_le32(data_size);
+ req->read_offset = cpu_to_le32(data_offset);
+ strcpy(req->object_name, obj_name);
+ req->descriptor_count = cpu_to_le32(1);
+ req->buf_len = cpu_to_le32(data_size);
+ req->addr_low = cpu_to_le32((cmd->dma & 0xFFFFFFFF));
+ req->addr_high = cpu_to_le32(upper_32_bits(cmd->dma));
+
+ status = be_mcc_notify_wait(adapter);
+
+ resp = embedded_payload(wrb);
+ if (!status) {
+ *data_read = le32_to_cpu(resp->actual_read_len);
+ *eof = le32_to_cpu(resp->eof);
+ } else {
+ *addn_status = resp->additional_status;
+ }
+
+err_unlock:
+ mutex_unlock(&adapter->mcc_lock);
+ return status;
+}
+
+static int be_cmd_write_flashrom(struct be_adapter *adapter,
+ struct be_dma_mem *cmd, u32 flash_type,
+ u32 flash_opcode, u32 img_offset, u32 buf_size)
+{
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_write_flashrom *req;
+ int status;
+
+ mutex_lock(&adapter->mcc_lock);
+ adapter->flash_status = 0;
+
+ wrb = wrb_from_mccq(adapter);
+ if (!wrb) {
+ status = -EBUSY;
+ goto err_unlock;
+ }
+ req = cmd->va;
+
+ be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_WRITE_FLASHROM, cmd->size, wrb,
+ cmd);
+
+ req->params.op_type = cpu_to_le32(flash_type);
+ if (flash_type == OPTYPE_OFFSET_SPECIFIED)
+ req->params.offset = cpu_to_le32(img_offset);
+
+ req->params.op_code = cpu_to_le32(flash_opcode);
+ req->params.data_buf_size = cpu_to_le32(buf_size);
+
+ status = be_mcc_notify(adapter);
+ if (status)
+ goto err_unlock;
+
+ mutex_unlock(&adapter->mcc_lock);
+
+ if (!wait_for_completion_timeout(&adapter->et_cmd_compl,
+ msecs_to_jiffies(40000)))
+ status = -ETIMEDOUT;
+ else
+ status = adapter->flash_status;
+
+ return status;
+
+err_unlock:
+ mutex_unlock(&adapter->mcc_lock);
+ return status;
+}
+
+static int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc,
+ u16 img_optype, u32 img_offset, u32 crc_offset)
+{
+ struct be_cmd_read_flash_crc *req;
+ struct be_mcc_wrb *wrb;
+ int status;
+
+ mutex_lock(&adapter->mcc_lock);
+
+ wrb = wrb_from_mccq(adapter);
+ if (!wrb) {
+ status = -EBUSY;
+ goto err;
+ }
+ req = embedded_payload(wrb);
+
+ be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_READ_FLASHROM, sizeof(*req),
+ wrb, NULL);
+
+ req->params.op_type = cpu_to_le32(img_optype);
+ if (img_optype == OPTYPE_OFFSET_SPECIFIED)
+ req->params.offset = cpu_to_le32(img_offset + crc_offset);
+ else
+ req->params.offset = cpu_to_le32(crc_offset);
+
+ req->params.op_code = cpu_to_le32(FLASHROM_OPER_REPORT);
+ req->params.data_buf_size = cpu_to_le32(0x4);
+
+ status = be_mcc_notify_wait(adapter);
+ if (!status)
+ memcpy(flashed_crc, req->crc, 4);
+
+err:
+ mutex_unlock(&adapter->mcc_lock);
+ return status;
+}
+
+static char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
+
+static bool phy_flashing_required(struct be_adapter *adapter)
+{
+ return (adapter->phy.phy_type == PHY_TYPE_TN_8022 &&
+ adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
+}
+
+static bool is_comp_in_ufi(struct be_adapter *adapter,
+ struct flash_section_info *fsec, int type)
+{
+ int i = 0, img_type = 0;
+ struct flash_section_info_g2 *fsec_g2 = NULL;
+
+ if (BE2_chip(adapter))
+ fsec_g2 = (struct flash_section_info_g2 *)fsec;
+
+ for (i = 0; i < MAX_FLASH_COMP; i++) {
+ if (fsec_g2)
+ img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
+ else
+ img_type = le32_to_cpu(fsec->fsec_entry[i].type);
+
+ if (img_type == type)
+ return true;
+ }
+ return false;
+}
+
+static struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
+ int header_size,
+ const struct firmware *fw)
+{
+ struct flash_section_info *fsec = NULL;
+ const u8 *p = fw->data;
+
+ p += header_size;
+ while (p < (fw->data + fw->size)) {
+ fsec = (struct flash_section_info *)p;
+ if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
+ return fsec;
+ p += 32;
+ }
+ return NULL;
+}
+
+static int be_check_flash_crc(struct be_adapter *adapter, const u8 *p,
+ u32 img_offset, u32 img_size, int hdr_size,
+ u16 img_optype, bool *crc_match)
+{
+ u32 crc_offset;
+ int status;
+ u8 crc[4];
+
+ status = be_cmd_get_flash_crc(adapter, crc, img_optype, img_offset,
+ img_size - 4);
+ if (status)
+ return status;
+
+ crc_offset = hdr_size + img_offset + img_size - 4;
+
+ /* Skip flashing, if crc of flashed region matches */
+ if (!memcmp(crc, p + crc_offset, 4))
+ *crc_match = true;
+ else
+ *crc_match = false;
+
+ return status;
+}
+
+static int be_flash(struct be_adapter *adapter, const u8 *img,
+ struct be_dma_mem *flash_cmd, int optype, int img_size,
+ u32 img_offset)
+{
+ u32 flash_op, num_bytes, total_bytes = img_size, bytes_sent = 0;
+ struct be_cmd_write_flashrom *req = flash_cmd->va;
+ int status;
+
+ while (total_bytes) {
+ num_bytes = min_t(u32, 32 * 1024, total_bytes);
+
+ total_bytes -= num_bytes;
+
+ if (!total_bytes) {
+ if (optype == OPTYPE_PHY_FW)
+ flash_op = FLASHROM_OPER_PHY_FLASH;
+ else
+ flash_op = FLASHROM_OPER_FLASH;
+ } else {
+ if (optype == OPTYPE_PHY_FW)
+ flash_op = FLASHROM_OPER_PHY_SAVE;
+ else
+ flash_op = FLASHROM_OPER_SAVE;
+ }
+
+ memcpy(req->data_buf, img, num_bytes);
+ img += num_bytes;
+ status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
+ flash_op, img_offset +
+ bytes_sent, num_bytes);
+ if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST &&
+ optype == OPTYPE_PHY_FW)
+ break;
+ else if (status)
+ return status;
+
+ bytes_sent += num_bytes;
+ }
+ return 0;
+}
+
+#define NCSI_UPDATE_LOG "NCSI section update is not supported in FW ver %s\n"
+static bool be_fw_ncsi_supported(char *ver)
+{
+ int v1[4] = {3, 102, 148, 0}; /* Min ver that supports NCSI FW */
+ int v2[4];
+ int i;
+
+ if (sscanf(ver, "%d.%d.%d.%d", &v2[0], &v2[1], &v2[2], &v2[3]) != 4)
+ return false;
+
+ for (i = 0; i < 4; i++) {
+ if (v1[i] < v2[i])
+ return true;
+ else if (v1[i] > v2[i])
+ return false;
+ }
+
+ return true;
+}
+
+/* For BE2, BE3 and BE3-R */
+static int be_flash_BEx(struct be_adapter *adapter,
+ const struct firmware *fw,
+ struct be_dma_mem *flash_cmd, int num_of_images)
+{
+ int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
+ struct device *dev = &adapter->pdev->dev;
+ struct flash_section_info *fsec = NULL;
+ int status, i, filehdr_size, num_comp;
+ const struct flash_comp *pflashcomp;
+ bool crc_match;
+ const u8 *p;
+
+ static const struct flash_comp gen3_flash_types[] = {
+ { BE3_ISCSI_PRIMARY_IMAGE_START, OPTYPE_ISCSI_ACTIVE,
+ BE3_COMP_MAX_SIZE, IMAGE_FIRMWARE_ISCSI},
+ { BE3_REDBOOT_START, OPTYPE_REDBOOT,
+ BE3_REDBOOT_COMP_MAX_SIZE, IMAGE_BOOT_CODE},
+ { BE3_ISCSI_BIOS_START, OPTYPE_BIOS,
+ BE3_BIOS_COMP_MAX_SIZE, IMAGE_OPTION_ROM_ISCSI},
+ { BE3_PXE_BIOS_START, OPTYPE_PXE_BIOS,
+ BE3_BIOS_COMP_MAX_SIZE, IMAGE_OPTION_ROM_PXE},
+ { BE3_FCOE_BIOS_START, OPTYPE_FCOE_BIOS,
+ BE3_BIOS_COMP_MAX_SIZE, IMAGE_OPTION_ROM_FCOE},
+ { BE3_ISCSI_BACKUP_IMAGE_START, OPTYPE_ISCSI_BACKUP,
+ BE3_COMP_MAX_SIZE, IMAGE_FIRMWARE_BACKUP_ISCSI},
+ { BE3_FCOE_PRIMARY_IMAGE_START, OPTYPE_FCOE_FW_ACTIVE,
+ BE3_COMP_MAX_SIZE, IMAGE_FIRMWARE_FCOE},
+ { BE3_FCOE_BACKUP_IMAGE_START, OPTYPE_FCOE_FW_BACKUP,
+ BE3_COMP_MAX_SIZE, IMAGE_FIRMWARE_BACKUP_FCOE},
+ { BE3_NCSI_START, OPTYPE_NCSI_FW,
+ BE3_NCSI_COMP_MAX_SIZE, IMAGE_NCSI},
+ { BE3_PHY_FW_START, OPTYPE_PHY_FW,
+ BE3_PHY_FW_COMP_MAX_SIZE, IMAGE_FIRMWARE_PHY}
+ };
+
+ static const struct flash_comp gen2_flash_types[] = {
+ { BE2_ISCSI_PRIMARY_IMAGE_START, OPTYPE_ISCSI_ACTIVE,
+ BE2_COMP_MAX_SIZE, IMAGE_FIRMWARE_ISCSI},
+ { BE2_REDBOOT_START, OPTYPE_REDBOOT,
+ BE2_REDBOOT_COMP_MAX_SIZE, IMAGE_BOOT_CODE},
+ { BE2_ISCSI_BIOS_START, OPTYPE_BIOS,
+ BE2_BIOS_COMP_MAX_SIZE, IMAGE_OPTION_ROM_ISCSI},
+ { BE2_PXE_BIOS_START, OPTYPE_PXE_BIOS,
+ BE2_BIOS_COMP_MAX_SIZE, IMAGE_OPTION_ROM_PXE},
+ { BE2_FCOE_BIOS_START, OPTYPE_FCOE_BIOS,
+ BE2_BIOS_COMP_MAX_SIZE, IMAGE_OPTION_ROM_FCOE},
+ { BE2_ISCSI_BACKUP_IMAGE_START, OPTYPE_ISCSI_BACKUP,
+ BE2_COMP_MAX_SIZE, IMAGE_FIRMWARE_BACKUP_ISCSI},
+ { BE2_FCOE_PRIMARY_IMAGE_START, OPTYPE_FCOE_FW_ACTIVE,
+ BE2_COMP_MAX_SIZE, IMAGE_FIRMWARE_FCOE},
+ { BE2_FCOE_BACKUP_IMAGE_START, OPTYPE_FCOE_FW_BACKUP,
+ BE2_COMP_MAX_SIZE, IMAGE_FIRMWARE_BACKUP_FCOE}
+ };
+
+ if (BE3_chip(adapter)) {
+ pflashcomp = gen3_flash_types;
+ filehdr_size = sizeof(struct flash_file_hdr_g3);
+ num_comp = ARRAY_SIZE(gen3_flash_types);
+ } else {
+ pflashcomp = gen2_flash_types;
+ filehdr_size = sizeof(struct flash_file_hdr_g2);
+ num_comp = ARRAY_SIZE(gen2_flash_types);
+ img_hdrs_size = 0;
+ }
+
+ /* Get flash section info*/
+ fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
+ if (!fsec) {
+ dev_err(dev, "Invalid Cookie. FW image may be corrupted\n");
+ return -1;
+ }
+ for (i = 0; i < num_comp; i++) {
+ if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
+ continue;
+
+ if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
+ !be_fw_ncsi_supported(adapter->fw_ver)) {
+ dev_info(dev, NCSI_UPDATE_LOG, adapter->fw_ver);
+ continue;
+ }
+
+ if (pflashcomp[i].optype == OPTYPE_PHY_FW &&
+ !phy_flashing_required(adapter))
+ continue;
+
+ if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
+ status = be_check_flash_crc(adapter, fw->data,
+ pflashcomp[i].offset,
+ pflashcomp[i].size,
+ filehdr_size +
+ img_hdrs_size,
+ OPTYPE_REDBOOT, &crc_match);
+ if (status) {
+ dev_err(dev,
+ "Could not get CRC for 0x%x region\n",
+ pflashcomp[i].optype);
+ continue;
+ }
+
+ if (crc_match)
+ continue;
+ }
+
+ p = fw->data + filehdr_size + pflashcomp[i].offset +
+ img_hdrs_size;
+ if (p + pflashcomp[i].size > fw->data + fw->size)
+ return -1;
+
+ status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
+ pflashcomp[i].size, 0);
+ if (status) {
+ dev_err(dev, "Flashing section type 0x%x failed\n",
+ pflashcomp[i].img_type);
+ return status;
+ }
+ }
+ return 0;
+}
+
+static u16 be_get_img_optype(struct flash_section_entry fsec_entry)
+{
+ u32 img_type = le32_to_cpu(fsec_entry.type);
+ u16 img_optype = le16_to_cpu(fsec_entry.optype);
+
+ if (img_optype != 0xFFFF)
+ return img_optype;
+
+ switch (img_type) {
+ case IMAGE_FIRMWARE_ISCSI:
+ img_optype = OPTYPE_ISCSI_ACTIVE;
+ break;
+ case IMAGE_BOOT_CODE:
+ img_optype = OPTYPE_REDBOOT;
+ break;
+ case IMAGE_OPTION_ROM_ISCSI:
+ img_optype = OPTYPE_BIOS;
+ break;
+ case IMAGE_OPTION_ROM_PXE:
+ img_optype = OPTYPE_PXE_BIOS;
+ break;
+ case IMAGE_OPTION_ROM_FCOE:
+ img_optype = OPTYPE_FCOE_BIOS;
+ break;
+ case IMAGE_FIRMWARE_BACKUP_ISCSI:
+ img_optype = OPTYPE_ISCSI_BACKUP;
+ break;
+ case IMAGE_NCSI:
+ img_optype = OPTYPE_NCSI_FW;
+ break;
+ case IMAGE_FLASHISM_JUMPVECTOR:
+ img_optype = OPTYPE_FLASHISM_JUMPVECTOR;
+ break;
+ case IMAGE_FIRMWARE_PHY:
+ img_optype = OPTYPE_SH_PHY_FW;
+ break;
+ case IMAGE_REDBOOT_DIR:
+ img_optype = OPTYPE_REDBOOT_DIR;
+ break;
+ case IMAGE_REDBOOT_CONFIG:
+ img_optype = OPTYPE_REDBOOT_CONFIG;
+ break;
+ case IMAGE_UFI_DIR:
+ img_optype = OPTYPE_UFI_DIR;
+ break;
+ default:
+ break;
+ }
+
+ return img_optype;
+}
+
+static int be_flash_skyhawk(struct be_adapter *adapter,
+ const struct firmware *fw,
+ struct be_dma_mem *flash_cmd, int num_of_images)
+{
+ int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
+ bool crc_match, old_fw_img, flash_offset_support = true;
+ struct device *dev = &adapter->pdev->dev;
+ struct flash_section_info *fsec = NULL;
+ u32 img_offset, img_size, img_type;
+ u16 img_optype, flash_optype;
+ int status, i, filehdr_size;
+ const u8 *p;
+
+ filehdr_size = sizeof(struct flash_file_hdr_g3);
+ fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
+ if (!fsec) {
+ dev_err(dev, "Invalid Cookie. FW image may be corrupted\n");
+ return -EINVAL;
+ }
+
+retry_flash:
+ for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
+ img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
+ img_size = le32_to_cpu(fsec->fsec_entry[i].pad_size);
+ img_type = le32_to_cpu(fsec->fsec_entry[i].type);
+ img_optype = be_get_img_optype(fsec->fsec_entry[i]);
+ old_fw_img = fsec->fsec_entry[i].optype == 0xFFFF;
+
+ if (img_optype == 0xFFFF)
+ continue;
+
+ if (flash_offset_support)
+ flash_optype = OPTYPE_OFFSET_SPECIFIED;
+ else
+ flash_optype = img_optype;
+
+ /* Don't bother verifying CRC if an old FW image is being
+ * flashed
+ */
+ if (old_fw_img)
+ goto flash;
+
+ status = be_check_flash_crc(adapter, fw->data, img_offset,
+ img_size, filehdr_size +
+ img_hdrs_size, flash_optype,
+ &crc_match);
+ if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST ||
+ base_status(status) == MCC_STATUS_ILLEGAL_FIELD) {
+ /* The current FW image on the card does not support
+ * OFFSET based flashing. Retry using older mechanism
+ * of OPTYPE based flashing
+ */
+ if (flash_optype == OPTYPE_OFFSET_SPECIFIED) {
+ flash_offset_support = false;
+ goto retry_flash;
+ }
+
+ /* The current FW image on the card does not recognize
+ * the new FLASH op_type. The FW download is partially
+ * complete. Reboot the server now to enable FW image
+ * to recognize the new FLASH op_type. To complete the
+ * remaining process, download the same FW again after
+ * the reboot.
+ */
+ dev_err(dev, "Flash incomplete. Reset the server\n");
+ dev_err(dev, "Download FW image again after reset\n");
+ return -EAGAIN;
+ } else if (status) {
+ dev_err(dev, "Could not get CRC for 0x%x region\n",
+ img_optype);
+ return -EFAULT;
+ }
+
+ if (crc_match)
+ continue;
+
+flash:
+ p = fw->data + filehdr_size + img_offset + img_hdrs_size;
+ if (p + img_size > fw->data + fw->size)
+ return -1;
+
+ status = be_flash(adapter, p, flash_cmd, flash_optype, img_size,
+ img_offset);
+
+ /* The current FW image on the card does not support OFFSET
+ * based flashing. Retry using older mechanism of OPTYPE based
+ * flashing
+ */
+ if (base_status(status) == MCC_STATUS_ILLEGAL_FIELD &&
+ flash_optype == OPTYPE_OFFSET_SPECIFIED) {
+ flash_offset_support = false;
+ goto retry_flash;
+ }
+
+ /* For old FW images ignore ILLEGAL_FIELD error or errors on
+ * UFI_DIR region
+ */
+ if (old_fw_img &&
+ (base_status(status) == MCC_STATUS_ILLEGAL_FIELD ||
+ (img_optype == OPTYPE_UFI_DIR &&
+ base_status(status) == MCC_STATUS_FAILED))) {
+ continue;
+ } else if (status) {
+ dev_err(dev, "Flashing section type 0x%x failed\n",
+ img_type);
+
+ switch (addl_status(status)) {
+ case MCC_ADDL_STATUS_MISSING_SIGNATURE:
+ dev_err(dev,
+ "Digital signature missing in FW\n");
+ return -EINVAL;
+ case MCC_ADDL_STATUS_INVALID_SIGNATURE:
+ dev_err(dev,
+ "Invalid digital signature in FW\n");
+ return -EINVAL;
+ default:
+ return -EFAULT;
+ }
+ }
+ }
+ return 0;
+}
+
+int lancer_fw_download(struct be_adapter *adapter,
+ const struct firmware *fw)
+{
+ struct device *dev = &adapter->pdev->dev;
+ struct be_dma_mem flash_cmd;
+ const u8 *data_ptr = NULL;
+ u8 *dest_image_ptr = NULL;
+ size_t image_size = 0;
+ u32 chunk_size = 0;
+ u32 data_written = 0;
+ u32 offset = 0;
+ int status = 0;
+ u8 add_status = 0;
+ u8 change_status;
+
+ if (!IS_ALIGNED(fw->size, sizeof(u32))) {
+ dev_err(dev, "FW image size should be multiple of 4\n");
+ return -EINVAL;
+ }
+
+ flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
+ + LANCER_FW_DOWNLOAD_CHUNK;
+ flash_cmd.va = dma_alloc_coherent(dev, flash_cmd.size, &flash_cmd.dma,
+ GFP_KERNEL);
+ if (!flash_cmd.va)
+ return -ENOMEM;
+
+ dest_image_ptr = flash_cmd.va +
+ sizeof(struct lancer_cmd_req_write_object);
+ image_size = fw->size;
+ data_ptr = fw->data;
+
+ while (image_size) {
+ chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
+
+ /* Copy the image chunk content. */
+ memcpy(dest_image_ptr, data_ptr, chunk_size);
+
+ status = lancer_cmd_write_object(adapter, &flash_cmd,
+ chunk_size, offset,
+ LANCER_FW_DOWNLOAD_LOCATION,
+ &data_written, &change_status,
+ &add_status);
+ if (status)
+ break;
+
+ offset += data_written;
+ data_ptr += data_written;
+ image_size -= data_written;
+ }
+
+ if (!status) {
+ /* Commit the FW written */
+ status = lancer_cmd_write_object(adapter, &flash_cmd,
+ 0, offset,
+ LANCER_FW_DOWNLOAD_LOCATION,
+ &data_written, &change_status,
+ &add_status);
+ }
+
+ dma_free_coherent(dev, flash_cmd.size, flash_cmd.va, flash_cmd.dma);
+ if (status) {
+ dev_err(dev, "Firmware load error\n");
+ return be_cmd_status(status);
+ }
+
+ dev_info(dev, "Firmware flashed successfully\n");
+
+ if (change_status == LANCER_FW_RESET_NEEDED) {
+ dev_info(dev, "Resetting adapter to activate new FW\n");
+ status = lancer_physdev_ctrl(adapter,
+ PHYSDEV_CONTROL_FW_RESET_MASK);
+ if (status) {
+ dev_err(dev, "Adapter busy, could not reset FW\n");
+ dev_err(dev, "Reboot server to activate new FW\n");
+ }
+ } else if (change_status != LANCER_NO_RESET_NEEDED) {
+ dev_info(dev, "Reboot server to activate new FW\n");
+ }
+
+ return 0;
+}
+
+/* Check if the flash image file is compatible with the adapter that
+ * is being flashed.
+ */
+static bool be_check_ufi_compatibility(struct be_adapter *adapter,
+ struct flash_file_hdr_g3 *fhdr)
+{
+ if (!fhdr) {
+ dev_err(&adapter->pdev->dev, "Invalid FW UFI file");
+ return false;
+ }
+
+ /* First letter of the build version is used to identify
+ * which chip this image file is meant for.
+ */
+ switch (fhdr->build[0]) {
+ case BLD_STR_UFI_TYPE_SH:
+ if (!skyhawk_chip(adapter))
+ return false;
+ break;
+ case BLD_STR_UFI_TYPE_BE3:
+ if (!BE3_chip(adapter))
+ return false;
+ break;
+ case BLD_STR_UFI_TYPE_BE2:
+ if (!BE2_chip(adapter))
+ return false;
+ break;
+ default:
+ return false;
+ }
+
+ /* In BE3 FW images the "asic_type_rev" field doesn't track the
+ * asic_rev of the chips it is compatible with.
+ * When asic_type_rev is 0 the image is compatible only with
+ * pre-BE3-R chips (asic_rev < 0x10)
+ */
+ if (BEx_chip(adapter) && fhdr->asic_type_rev == 0)
+ return adapter->asic_rev < 0x10;
+ else
+ return (fhdr->asic_type_rev >= adapter->asic_rev);
+}
+
+int be_fw_download(struct be_adapter *adapter, const struct firmware *fw)
+{
+ struct device *dev = &adapter->pdev->dev;
+ struct flash_file_hdr_g3 *fhdr3;
+ struct image_hdr *img_hdr_ptr;
+ int status = 0, i, num_imgs;
+ struct be_dma_mem flash_cmd;
+
+ fhdr3 = (struct flash_file_hdr_g3 *)fw->data;
+ if (!be_check_ufi_compatibility(adapter, fhdr3)) {
+ dev_err(dev, "Flash image is not compatible with adapter\n");
+ return -EINVAL;
+ }
+
+ flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
+ flash_cmd.va = dma_alloc_coherent(dev, flash_cmd.size, &flash_cmd.dma,
+ GFP_KERNEL);
+ if (!flash_cmd.va)
+ return -ENOMEM;
+
+ num_imgs = le32_to_cpu(fhdr3->num_imgs);
+ for (i = 0; i < num_imgs; i++) {
+ img_hdr_ptr = (struct image_hdr *)(fw->data +
+ (sizeof(struct flash_file_hdr_g3) +
+ i * sizeof(struct image_hdr)));
+ if (!BE2_chip(adapter) &&
+ le32_to_cpu(img_hdr_ptr->imageid) != 1)
+ continue;
+
+ if (skyhawk_chip(adapter))
+ status = be_flash_skyhawk(adapter, fw, &flash_cmd,
+ num_imgs);
+ else
+ status = be_flash_BEx(adapter, fw, &flash_cmd,
+ num_imgs);
+ }
+
+ dma_free_coherent(dev, flash_cmd.size, flash_cmd.va, flash_cmd.dma);
+ if (!status)
+ dev_info(dev, "Firmware flashed successfully\n");
+
+ return status;
+}
+
+int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac,
+ struct be_dma_mem *nonemb_cmd)
+{
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_req_acpi_wol_magic_config *req;
+ int status;
+
+ mutex_lock(&adapter->mcc_lock);
+
+ wrb = wrb_from_mccq(adapter);
+ if (!wrb) {
+ status = -EBUSY;
+ goto err;
+ }
+ req = nonemb_cmd->va;
+
+ be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
+ OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG, sizeof(*req),
+ wrb, nonemb_cmd);
+ memcpy(req->magic_mac, mac, ETH_ALEN);
+
+ status = be_mcc_notify_wait(adapter);
+
+err:
+ mutex_unlock(&adapter->mcc_lock);
+ return status;
+}
+
+int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
+ u8 loopback_type, u8 enable)
+{
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_req_set_lmode *req;
+ int status;
+
+ if (!be_cmd_allowed(adapter, OPCODE_LOWLEVEL_SET_LOOPBACK_MODE,
+ CMD_SUBSYSTEM_LOWLEVEL))
+ return -EPERM;
+
+ mutex_lock(&adapter->mcc_lock);
+
+ wrb = wrb_from_mccq(adapter);
+ if (!wrb) {
+ status = -EBUSY;
+ goto err_unlock;
+ }
+
+ req = embedded_payload(wrb);
+
+ be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
+ OPCODE_LOWLEVEL_SET_LOOPBACK_MODE, sizeof(*req),
+ wrb, NULL);
+
+ req->src_port = port_num;
+ req->dest_port = port_num;
+ req->loopback_type = loopback_type;
+ req->loopback_state = enable;
+
+ status = be_mcc_notify(adapter);
+ if (status)
+ goto err_unlock;
+
+ mutex_unlock(&adapter->mcc_lock);
+
+ if (!wait_for_completion_timeout(&adapter->et_cmd_compl,
+ msecs_to_jiffies(SET_LB_MODE_TIMEOUT)))
+ status = -ETIMEDOUT;
+
+ return status;
+
+err_unlock:
+ mutex_unlock(&adapter->mcc_lock);
+ return status;
+}
+
+int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
+ u32 loopback_type, u32 pkt_size, u32 num_pkts,
+ u64 pattern)
+{
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_req_loopback_test *req;
+ struct be_cmd_resp_loopback_test *resp;
+ int status;
+
+ if (!be_cmd_allowed(adapter, OPCODE_LOWLEVEL_LOOPBACK_TEST,
+ CMD_SUBSYSTEM_LOWLEVEL))
+ return -EPERM;
+
+ mutex_lock(&adapter->mcc_lock);
+
+ wrb = wrb_from_mccq(adapter);
+ if (!wrb) {
+ status = -EBUSY;
+ goto err;
+ }
+
+ req = embedded_payload(wrb);
+
+ be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
+ OPCODE_LOWLEVEL_LOOPBACK_TEST, sizeof(*req), wrb,
+ NULL);
+
+ req->hdr.timeout = cpu_to_le32(15);
+ req->pattern = cpu_to_le64(pattern);
+ req->src_port = cpu_to_le32(port_num);
+ req->dest_port = cpu_to_le32(port_num);
+ req->pkt_size = cpu_to_le32(pkt_size);
+ req->num_pkts = cpu_to_le32(num_pkts);
+ req->loopback_type = cpu_to_le32(loopback_type);
+
+ status = be_mcc_notify(adapter);
+ if (status)
+ goto err;
+
+ mutex_unlock(&adapter->mcc_lock);
+
+ wait_for_completion(&adapter->et_cmd_compl);
+ resp = embedded_payload(wrb);
+ status = le32_to_cpu(resp->status);
+
+ return status;
+err:
+ mutex_unlock(&adapter->mcc_lock);
+ return status;
+}
+
+int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern,
+ u32 byte_cnt, struct be_dma_mem *cmd)
+{
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_req_ddrdma_test *req;
+ int status;
+ int i, j = 0;
+
+ if (!be_cmd_allowed(adapter, OPCODE_LOWLEVEL_HOST_DDR_DMA,
+ CMD_SUBSYSTEM_LOWLEVEL))
+ return -EPERM;
+
+ mutex_lock(&adapter->mcc_lock);
+
+ wrb = wrb_from_mccq(adapter);
+ if (!wrb) {
+ status = -EBUSY;
+ goto err;
+ }
+ req = cmd->va;
+ be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
+ OPCODE_LOWLEVEL_HOST_DDR_DMA, cmd->size, wrb,
+ cmd);
+
+ req->pattern = cpu_to_le64(pattern);
+ req->byte_count = cpu_to_le32(byte_cnt);
+ for (i = 0; i < byte_cnt; i++) {
+ req->snd_buff[i] = (u8)(pattern >> (j * 8));
+ j++;
+ if (j > 7)
+ j = 0;
+ }
+
+ status = be_mcc_notify_wait(adapter);
+
+ if (!status) {
+ struct be_cmd_resp_ddrdma_test *resp;
+
+ resp = cmd->va;
+ if ((memcmp(resp->rcv_buff, req->snd_buff, byte_cnt) != 0) ||
+ resp->snd_err) {
+ status = -1;
+ }
+ }
+
+err:
+ mutex_unlock(&adapter->mcc_lock);
+ return status;
+}
+
+int be_cmd_get_seeprom_data(struct be_adapter *adapter,
+ struct be_dma_mem *nonemb_cmd)
+{
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_req_seeprom_read *req;
+ int status;
+
+ mutex_lock(&adapter->mcc_lock);
+
+ wrb = wrb_from_mccq(adapter);
+ if (!wrb) {
+ status = -EBUSY;
+ goto err;
+ }
+ req = nonemb_cmd->va;
+
+ be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_SEEPROM_READ, sizeof(*req), wrb,
+ nonemb_cmd);
+
+ status = be_mcc_notify_wait(adapter);
+
+err:
+ mutex_unlock(&adapter->mcc_lock);
+ return status;
+}
+
+int be_cmd_get_phy_info(struct be_adapter *adapter)
+{
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_req_get_phy_info *req;
+ struct be_dma_mem cmd;
+ int status;
+
+ if (!be_cmd_allowed(adapter, OPCODE_COMMON_GET_PHY_DETAILS,
+ CMD_SUBSYSTEM_COMMON))
+ return -EPERM;
+
+ mutex_lock(&adapter->mcc_lock);
+
+ wrb = wrb_from_mccq(adapter);
+ if (!wrb) {
+ status = -EBUSY;
+ goto err;
+ }
+ cmd.size = sizeof(struct be_cmd_req_get_phy_info);
+ cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
+ GFP_ATOMIC);
+ if (!cmd.va) {
+ dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
+ status = -ENOMEM;
+ goto err;
+ }
+
+ req = cmd.va;
+
+ be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_GET_PHY_DETAILS, sizeof(*req),
+ wrb, &cmd);
+
+ status = be_mcc_notify_wait(adapter);
+ if (!status) {
+ struct be_phy_info *resp_phy_info =
+ cmd.va + sizeof(struct be_cmd_req_hdr);
+
+ adapter->phy.phy_type = le16_to_cpu(resp_phy_info->phy_type);
+ adapter->phy.interface_type =
+ le16_to_cpu(resp_phy_info->interface_type);
+ adapter->phy.auto_speeds_supported =
+ le16_to_cpu(resp_phy_info->auto_speeds_supported);
+ adapter->phy.fixed_speeds_supported =
+ le16_to_cpu(resp_phy_info->fixed_speeds_supported);
+ adapter->phy.misc_params =
+ le32_to_cpu(resp_phy_info->misc_params);
+
+ if (BE2_chip(adapter)) {
+ adapter->phy.fixed_speeds_supported =
+ BE_SUPPORTED_SPEED_10GBPS |
+ BE_SUPPORTED_SPEED_1GBPS;
+ }
+ }
+ dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
+err:
+ mutex_unlock(&adapter->mcc_lock);
+ return status;
+}
+
+static int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain)
+{
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_req_set_qos *req;
+ int status;
+
+ mutex_lock(&adapter->mcc_lock);
+
+ wrb = wrb_from_mccq(adapter);
+ if (!wrb) {
+ status = -EBUSY;
+ goto err;
+ }
+
+ req = embedded_payload(wrb);
+
+ be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_SET_QOS, sizeof(*req), wrb, NULL);
+
+ req->hdr.domain = domain;
+ req->valid_bits = cpu_to_le32(BE_QOS_BITS_NIC);
+ req->max_bps_nic = cpu_to_le32(bps);
+
+ status = be_mcc_notify_wait(adapter);
+
+err:
+ mutex_unlock(&adapter->mcc_lock);
+ return status;
+}
+
+int be_cmd_get_cntl_attributes(struct be_adapter *adapter)
+{
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_req_cntl_attribs *req;
+ struct be_cmd_resp_cntl_attribs *resp;
+ int status, i;
+ int payload_len = max(sizeof(*req), sizeof(*resp));
+ struct mgmt_controller_attrib *attribs;
+ struct be_dma_mem attribs_cmd;
+ u32 *serial_num;
+
+ if (mutex_lock_interruptible(&adapter->mbox_lock))
+ return -1;
+
+ memset(&attribs_cmd, 0, sizeof(struct be_dma_mem));
+ attribs_cmd.size = sizeof(struct be_cmd_resp_cntl_attribs);
+ attribs_cmd.va = dma_alloc_coherent(&adapter->pdev->dev,
+ attribs_cmd.size,
+ &attribs_cmd.dma, GFP_ATOMIC);
+ if (!attribs_cmd.va) {
+ dev_err(&adapter->pdev->dev, "Memory allocation failure\n");
+ status = -ENOMEM;
+ goto err;
+ }
+
+ wrb = wrb_from_mbox(adapter);
+ if (!wrb) {
+ status = -EBUSY;
+ goto err;
+ }
+ req = attribs_cmd.va;
+
+ be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_GET_CNTL_ATTRIBUTES, payload_len,
+ wrb, &attribs_cmd);
+
+ status = be_mbox_notify_wait(adapter);
+ if (!status) {
+ attribs = attribs_cmd.va + sizeof(struct be_cmd_resp_hdr);
+ adapter->hba_port_num = attribs->hba_attribs.phy_port;
+ serial_num = attribs->hba_attribs.controller_serial_number;
+ for (i = 0; i < CNTL_SERIAL_NUM_WORDS; i++)
+ adapter->serial_num[i] = le32_to_cpu(serial_num[i]) &
+ (BIT_MASK(16) - 1);
+ /* For BEx, since GET_FUNC_CONFIG command is not
+ * supported, we read funcnum here as a workaround.
+ */
+ if (BEx_chip(adapter))
+ adapter->pf_num = attribs->hba_attribs.pci_funcnum;
+ }
+
+err:
+ mutex_unlock(&adapter->mbox_lock);
+ if (attribs_cmd.va)
+ dma_free_coherent(&adapter->pdev->dev, attribs_cmd.size,
+ attribs_cmd.va, attribs_cmd.dma);
+ return status;
+}
+
+/* Uses mbox */
+int be_cmd_req_native_mode(struct be_adapter *adapter)
+{
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_req_set_func_cap *req;
+ int status;
+
+ if (mutex_lock_interruptible(&adapter->mbox_lock))
+ return -1;
+
+ wrb = wrb_from_mbox(adapter);
+ if (!wrb) {
+ status = -EBUSY;
+ goto err;
+ }
+
+ req = embedded_payload(wrb);
+
+ be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_SET_DRIVER_FUNCTION_CAP,
+ sizeof(*req), wrb, NULL);
+
+ req->valid_cap_flags = cpu_to_le32(CAPABILITY_SW_TIMESTAMPS |
+ CAPABILITY_BE3_NATIVE_ERX_API);
+ req->cap_flags = cpu_to_le32(CAPABILITY_BE3_NATIVE_ERX_API);
+
+ status = be_mbox_notify_wait(adapter);
+ if (!status) {
+ struct be_cmd_resp_set_func_cap *resp = embedded_payload(wrb);
+
+ adapter->be3_native = le32_to_cpu(resp->cap_flags) &
+ CAPABILITY_BE3_NATIVE_ERX_API;
+ if (!adapter->be3_native)
+ dev_warn(&adapter->pdev->dev,
+ "adapter not in advanced mode\n");
+ }
+err:
+ mutex_unlock(&adapter->mbox_lock);
+ return status;
+}
+
+/* Get privilege(s) for a function */
+int be_cmd_get_fn_privileges(struct be_adapter *adapter, u32 *privilege,
+ u32 domain)
+{
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_req_get_fn_privileges *req;
+ int status;
+
+ mutex_lock(&adapter->mcc_lock);
+
+ wrb = wrb_from_mccq(adapter);
+ if (!wrb) {
+ status = -EBUSY;
+ goto err;
+ }
+
+ req = embedded_payload(wrb);
+
+ be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_GET_FN_PRIVILEGES, sizeof(*req),
+ wrb, NULL);
+
+ req->hdr.domain = domain;
+
+ status = be_mcc_notify_wait(adapter);
+ if (!status) {
+ struct be_cmd_resp_get_fn_privileges *resp =
+ embedded_payload(wrb);
+
+ *privilege = le32_to_cpu(resp->privilege_mask);
+
+ /* In UMC mode FW does not return right privileges.
+ * Override with correct privilege equivalent to PF.
+ */
+ if (BEx_chip(adapter) && be_is_mc(adapter) &&
+ be_physfn(adapter))
+ *privilege = MAX_PRIVILEGES;
+ }
+
+err:
+ mutex_unlock(&adapter->mcc_lock);
+ return status;
+}
+
+/* Set privilege(s) for a function */
+int be_cmd_set_fn_privileges(struct be_adapter *adapter, u32 privileges,
+ u32 domain)
+{
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_req_set_fn_privileges *req;
+ int status;
+
+ mutex_lock(&adapter->mcc_lock);
+
+ wrb = wrb_from_mccq(adapter);
+ if (!wrb) {
+ status = -EBUSY;
+ goto err;
+ }
+
+ req = embedded_payload(wrb);
+ be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_SET_FN_PRIVILEGES, sizeof(*req),
+ wrb, NULL);
+ req->hdr.domain = domain;
+ if (lancer_chip(adapter))
+ req->privileges_lancer = cpu_to_le32(privileges);
+ else
+ req->privileges = cpu_to_le32(privileges);
+
+ status = be_mcc_notify_wait(adapter);
+err:
+ mutex_unlock(&adapter->mcc_lock);
+ return status;
+}
+
+/* pmac_id_valid: true => pmac_id is supplied and MAC address is requested.
+ * pmac_id_valid: false => pmac_id or MAC address is requested.
+ * If pmac_id is returned, pmac_id_valid is returned as true
+ */
+int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac,
+ bool *pmac_id_valid, u32 *pmac_id, u32 if_handle,
+ u8 domain)
+{
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_req_get_mac_list *req;
+ int status;
+ int mac_count;
+ struct be_dma_mem get_mac_list_cmd;
+ int i;
+
+ memset(&get_mac_list_cmd, 0, sizeof(struct be_dma_mem));
+ get_mac_list_cmd.size = sizeof(struct be_cmd_resp_get_mac_list);
+ get_mac_list_cmd.va = dma_alloc_coherent(&adapter->pdev->dev,
+ get_mac_list_cmd.size,
+ &get_mac_list_cmd.dma,
+ GFP_ATOMIC);
+
+ if (!get_mac_list_cmd.va) {
+ dev_err(&adapter->pdev->dev,
+ "Memory allocation failure during GET_MAC_LIST\n");
+ return -ENOMEM;
+ }
+
+ mutex_lock(&adapter->mcc_lock);
+
+ wrb = wrb_from_mccq(adapter);
+ if (!wrb) {
+ status = -EBUSY;
+ goto out;
+ }
+
+ req = get_mac_list_cmd.va;
+
+ be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_GET_MAC_LIST,
+ get_mac_list_cmd.size, wrb, &get_mac_list_cmd);
+ req->hdr.domain = domain;
+ req->mac_type = MAC_ADDRESS_TYPE_NETWORK;
+ if (*pmac_id_valid) {
+ req->mac_id = cpu_to_le32(*pmac_id);
+ req->iface_id = cpu_to_le16(if_handle);
+ req->perm_override = 0;
+ } else {
+ req->perm_override = 1;
+ }
+
+ status = be_mcc_notify_wait(adapter);
+ if (!status) {
+ struct be_cmd_resp_get_mac_list *resp =
+ get_mac_list_cmd.va;
+
+ if (*pmac_id_valid) {
+ memcpy(mac, resp->macid_macaddr.mac_addr_id.macaddr,
+ ETH_ALEN);
+ goto out;
+ }
+
+ mac_count = resp->true_mac_count + resp->pseudo_mac_count;
+ /* Mac list returned could contain one or more active mac_ids
+ * or one or more true or pseudo permanent mac addresses.
+ * If an active mac_id is present, return first active mac_id
+ * found.
+ */
+ for (i = 0; i < mac_count; i++) {
+ struct get_list_macaddr *mac_entry;
+ u16 mac_addr_size;
+ u32 mac_id;
+
+ mac_entry = &resp->macaddr_list[i];
+ mac_addr_size = le16_to_cpu(mac_entry->mac_addr_size);
+ /* mac_id is a 32 bit value and mac_addr size
+ * is 6 bytes
+ */
+ if (mac_addr_size == sizeof(u32)) {
+ *pmac_id_valid = true;
+ mac_id = mac_entry->mac_addr_id.s_mac_id.mac_id;
+ *pmac_id = le32_to_cpu(mac_id);
+ goto out;
+ }
+ }
+ /* If no active mac_id found, return first mac addr */
+ *pmac_id_valid = false;
+ memcpy(mac, resp->macaddr_list[0].mac_addr_id.macaddr,
+ ETH_ALEN);
+ }
+
+out:
+ mutex_unlock(&adapter->mcc_lock);
+ dma_free_coherent(&adapter->pdev->dev, get_mac_list_cmd.size,
+ get_mac_list_cmd.va, get_mac_list_cmd.dma);
+ return status;
+}
+
+int be_cmd_get_active_mac(struct be_adapter *adapter, u32 curr_pmac_id,
+ u8 *mac, u32 if_handle, bool active, u32 domain)
+{
+ if (!active)
+ be_cmd_get_mac_from_list(adapter, mac, &active, &curr_pmac_id,
+ if_handle, domain);
+ if (BEx_chip(adapter))
+ return be_cmd_mac_addr_query(adapter, mac, false,
+ if_handle, curr_pmac_id);
+ else
+ /* Fetch the MAC address using pmac_id */
+ return be_cmd_get_mac_from_list(adapter, mac, &active,
+ &curr_pmac_id,
+ if_handle, domain);
+}
+
+int be_cmd_get_perm_mac(struct be_adapter *adapter, u8 *mac)
+{
+ int status;
+ bool pmac_valid = false;
+
+ eth_zero_addr(mac);
+
+ if (BEx_chip(adapter)) {
+ if (be_physfn(adapter))
+ status = be_cmd_mac_addr_query(adapter, mac, true, 0,
+ 0);
+ else
+ status = be_cmd_mac_addr_query(adapter, mac, false,
+ adapter->if_handle, 0);
+ } else {
+ status = be_cmd_get_mac_from_list(adapter, mac, &pmac_valid,
+ NULL, adapter->if_handle, 0);
+ }
+
+ return status;
+}
+
+/* Uses synchronous MCCQ */
+int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array,
+ u8 mac_count, u32 domain)
+{
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_req_set_mac_list *req;
+ int status;
+ struct be_dma_mem cmd;
+
+ memset(&cmd, 0, sizeof(struct be_dma_mem));
+ cmd.size = sizeof(struct be_cmd_req_set_mac_list);
+ cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
+ GFP_KERNEL);
+ if (!cmd.va)
+ return -ENOMEM;
+
+ mutex_lock(&adapter->mcc_lock);
+
+ wrb = wrb_from_mccq(adapter);
+ if (!wrb) {
+ status = -EBUSY;
+ goto err;
+ }
+
+ req = cmd.va;
+ be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_SET_MAC_LIST, sizeof(*req),
+ wrb, &cmd);
+
+ req->hdr.domain = domain;
+ req->mac_count = mac_count;
+ if (mac_count)
+ memcpy(req->mac, mac_array, ETH_ALEN * mac_count);
+
+ status = be_mcc_notify_wait(adapter);
+
+err:
+ dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
+ mutex_unlock(&adapter->mcc_lock);
+ return status;
+}
+
+/* Wrapper to delete any active MACs and provision the new mac.
+ * Changes to MAC_LIST are allowed iff none of the MAC addresses in the
+ * current list are active.
+ */
+int be_cmd_set_mac(struct be_adapter *adapter, u8 *mac, int if_id, u32 dom)
+{
+ bool active_mac = false;
+ u8 old_mac[ETH_ALEN];
+ u32 pmac_id;
+ int status;
+
+ status = be_cmd_get_mac_from_list(adapter, old_mac, &active_mac,
+ &pmac_id, if_id, dom);
+
+ if (!status && active_mac)
+ be_cmd_pmac_del(adapter, if_id, pmac_id, dom);
+
+ return be_cmd_set_mac_list(adapter, mac, mac ? 1 : 0, dom);
+}
+
+int be_cmd_set_hsw_config(struct be_adapter *adapter, u16 pvid,
+ u32 domain, u16 intf_id, u16 hsw_mode, u8 spoofchk)
+{
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_req_set_hsw_config *req;
+ void *ctxt;
+ int status;
+
+ if (!be_cmd_allowed(adapter, OPCODE_COMMON_SET_HSW_CONFIG,
+ CMD_SUBSYSTEM_COMMON))
+ return -EPERM;
+
+ mutex_lock(&adapter->mcc_lock);
+
+ wrb = wrb_from_mccq(adapter);
+ if (!wrb) {
+ status = -EBUSY;
+ goto err;
+ }
+
+ req = embedded_payload(wrb);
+ ctxt = &req->context;
+
+ be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_SET_HSW_CONFIG, sizeof(*req), wrb,
+ NULL);
+
+ req->hdr.domain = domain;
+ AMAP_SET_BITS(struct amap_set_hsw_context, interface_id, ctxt, intf_id);
+ if (pvid) {
+ AMAP_SET_BITS(struct amap_set_hsw_context, pvid_valid, ctxt, 1);
+ AMAP_SET_BITS(struct amap_set_hsw_context, pvid, ctxt, pvid);
+ }
+ if (hsw_mode) {
+ AMAP_SET_BITS(struct amap_set_hsw_context, interface_id,
+ ctxt, adapter->hba_port_num);
+ AMAP_SET_BITS(struct amap_set_hsw_context, pport, ctxt, 1);
+ AMAP_SET_BITS(struct amap_set_hsw_context, port_fwd_type,
+ ctxt, hsw_mode);
+ }
+
+ /* Enable/disable both mac and vlan spoof checking */
+ if (!BEx_chip(adapter) && spoofchk) {
+ AMAP_SET_BITS(struct amap_set_hsw_context, mac_spoofchk,
+ ctxt, spoofchk);
+ AMAP_SET_BITS(struct amap_set_hsw_context, vlan_spoofchk,
+ ctxt, spoofchk);
+ }
+
+ be_dws_cpu_to_le(req->context, sizeof(req->context));
+ status = be_mcc_notify_wait(adapter);
+
+err:
+ mutex_unlock(&adapter->mcc_lock);
+ return status;
+}
+
+/* Get Hyper switch config */
+int be_cmd_get_hsw_config(struct be_adapter *adapter, u16 *pvid,
+ u32 domain, u16 intf_id, u8 *mode, bool *spoofchk)
+{
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_req_get_hsw_config *req;
+ void *ctxt;
+ int status;
+ u16 vid;
+
+ mutex_lock(&adapter->mcc_lock);
+
+ wrb = wrb_from_mccq(adapter);
+ if (!wrb) {
+ status = -EBUSY;
+ goto err;
+ }
+
+ req = embedded_payload(wrb);
+ ctxt = &req->context;
+
+ be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_GET_HSW_CONFIG, sizeof(*req), wrb,
+ NULL);
+
+ req->hdr.domain = domain;
+ AMAP_SET_BITS(struct amap_get_hsw_req_context, interface_id,
+ ctxt, intf_id);
+ AMAP_SET_BITS(struct amap_get_hsw_req_context, pvid_valid, ctxt, 1);
+
+ if (!BEx_chip(adapter) && mode) {
+ AMAP_SET_BITS(struct amap_get_hsw_req_context, interface_id,
+ ctxt, adapter->hba_port_num);
+ AMAP_SET_BITS(struct amap_get_hsw_req_context, pport, ctxt, 1);
+ }
+ be_dws_cpu_to_le(req->context, sizeof(req->context));
+
+ status = be_mcc_notify_wait(adapter);
+ if (!status) {
+ struct be_cmd_resp_get_hsw_config *resp =
+ embedded_payload(wrb);
+
+ be_dws_le_to_cpu(&resp->context, sizeof(resp->context));
+ vid = AMAP_GET_BITS(struct amap_get_hsw_resp_context,
+ pvid, &resp->context);
+ if (pvid)
+ *pvid = le16_to_cpu(vid);
+ if (mode)
+ *mode = AMAP_GET_BITS(struct amap_get_hsw_resp_context,
+ port_fwd_type, &resp->context);
+ if (spoofchk)
+ *spoofchk =
+ AMAP_GET_BITS(struct amap_get_hsw_resp_context,
+ spoofchk, &resp->context);
+ }
+
+err:
+ mutex_unlock(&adapter->mcc_lock);
+ return status;
+}
+
+static bool be_is_wol_excluded(struct be_adapter *adapter)
+{
+ struct pci_dev *pdev = adapter->pdev;
+
+ if (be_virtfn(adapter))
+ return true;
+
+ switch (pdev->subsystem_device) {
+ case OC_SUBSYS_DEVICE_ID1:
+ case OC_SUBSYS_DEVICE_ID2:
+ case OC_SUBSYS_DEVICE_ID3:
+ case OC_SUBSYS_DEVICE_ID4:
+ return true;
+ default:
+ return false;
+ }
+}
+
+int be_cmd_get_acpi_wol_cap(struct be_adapter *adapter)
+{
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_req_acpi_wol_magic_config_v1 *req;
+ int status = 0;
+ struct be_dma_mem cmd;
+
+ if (!be_cmd_allowed(adapter, OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG,
+ CMD_SUBSYSTEM_ETH))
+ return -EPERM;
+
+ if (be_is_wol_excluded(adapter))
+ return status;
+
+ if (mutex_lock_interruptible(&adapter->mbox_lock))
+ return -1;
+
+ memset(&cmd, 0, sizeof(struct be_dma_mem));
+ cmd.size = sizeof(struct be_cmd_resp_acpi_wol_magic_config_v1);
+ cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
+ GFP_ATOMIC);
+ if (!cmd.va) {
+ dev_err(&adapter->pdev->dev, "Memory allocation failure\n");
+ status = -ENOMEM;
+ goto err;
+ }
+
+ wrb = wrb_from_mbox(adapter);
+ if (!wrb) {
+ status = -EBUSY;
+ goto err;
+ }
+
+ req = cmd.va;
+
+ be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
+ OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG,
+ sizeof(*req), wrb, &cmd);
+
+ req->hdr.version = 1;
+ req->query_options = BE_GET_WOL_CAP;
+
+ status = be_mbox_notify_wait(adapter);
+ if (!status) {
+ struct be_cmd_resp_acpi_wol_magic_config_v1 *resp;
+
+ resp = (struct be_cmd_resp_acpi_wol_magic_config_v1 *)cmd.va;
+
+ adapter->wol_cap = resp->wol_settings;
+
+ /* Non-zero macaddr indicates WOL is enabled */
+ if (adapter->wol_cap & BE_WOL_CAP &&
+ !is_zero_ether_addr(resp->magic_mac))
+ adapter->wol_en = true;
+ }
+err:
+ mutex_unlock(&adapter->mbox_lock);
+ if (cmd.va)
+ dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
+ cmd.dma);
+ return status;
+
+}
+
+int be_cmd_set_fw_log_level(struct be_adapter *adapter, u32 level)
+{
+ struct be_dma_mem extfat_cmd;
+ struct be_fat_conf_params *cfgs;
+ int status;
+ int i, j;
+
+ memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
+ extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
+ extfat_cmd.va = dma_alloc_coherent(&adapter->pdev->dev,
+ extfat_cmd.size, &extfat_cmd.dma,
+ GFP_ATOMIC);
+ if (!extfat_cmd.va)
+ return -ENOMEM;
+
+ status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd);
+ if (status)
+ goto err;
+
+ cfgs = (struct be_fat_conf_params *)
+ (extfat_cmd.va + sizeof(struct be_cmd_resp_hdr));
+ for (i = 0; i < le32_to_cpu(cfgs->num_modules); i++) {
+ u32 num_modes = le32_to_cpu(cfgs->module[i].num_modes);
+
+ for (j = 0; j < num_modes; j++) {
+ if (cfgs->module[i].trace_lvl[j].mode == MODE_UART)
+ cfgs->module[i].trace_lvl[j].dbg_lvl =
+ cpu_to_le32(level);
+ }
+ }
+
+ status = be_cmd_set_ext_fat_capabilites(adapter, &extfat_cmd, cfgs);
+err:
+ dma_free_coherent(&adapter->pdev->dev, extfat_cmd.size, extfat_cmd.va,
+ extfat_cmd.dma);
+ return status;
+}
+
+int be_cmd_get_fw_log_level(struct be_adapter *adapter)
+{
+ struct be_dma_mem extfat_cmd;
+ struct be_fat_conf_params *cfgs;
+ int status, j;
+ int level = 0;
+
+ memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
+ extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
+ extfat_cmd.va = dma_alloc_coherent(&adapter->pdev->dev,
+ extfat_cmd.size, &extfat_cmd.dma,
+ GFP_ATOMIC);
+
+ if (!extfat_cmd.va) {
+ dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n",
+ __func__);
+ goto err;
+ }
+
+ status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd);
+ if (!status) {
+ cfgs = (struct be_fat_conf_params *)(extfat_cmd.va +
+ sizeof(struct be_cmd_resp_hdr));
+
+ for (j = 0; j < le32_to_cpu(cfgs->module[0].num_modes); j++) {
+ if (cfgs->module[0].trace_lvl[j].mode == MODE_UART)
+ level = cfgs->module[0].trace_lvl[j].dbg_lvl;
+ }
+ }
+ dma_free_coherent(&adapter->pdev->dev, extfat_cmd.size, extfat_cmd.va,
+ extfat_cmd.dma);
+err:
+ return level;
+}
+
+int be_cmd_get_ext_fat_capabilites(struct be_adapter *adapter,
+ struct be_dma_mem *cmd)
+{
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_req_get_ext_fat_caps *req;
+ int status;
+
+ if (!be_cmd_allowed(adapter, OPCODE_COMMON_GET_EXT_FAT_CAPABILITIES,
+ CMD_SUBSYSTEM_COMMON))
+ return -EPERM;
+
+ if (mutex_lock_interruptible(&adapter->mbox_lock))
+ return -1;
+
+ wrb = wrb_from_mbox(adapter);
+ if (!wrb) {
+ status = -EBUSY;
+ goto err;
+ }
+
+ req = cmd->va;
+ be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_GET_EXT_FAT_CAPABILITIES,
+ cmd->size, wrb, cmd);
+ req->parameter_type = cpu_to_le32(1);
+
+ status = be_mbox_notify_wait(adapter);
+err:
+ mutex_unlock(&adapter->mbox_lock);
+ return status;
+}
+
+int be_cmd_set_ext_fat_capabilites(struct be_adapter *adapter,
+ struct be_dma_mem *cmd,
+ struct be_fat_conf_params *configs)
+{
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_req_set_ext_fat_caps *req;
+ int status;
+
+ mutex_lock(&adapter->mcc_lock);
+
+ wrb = wrb_from_mccq(adapter);
+ if (!wrb) {
+ status = -EBUSY;
+ goto err;
+ }
+
+ req = cmd->va;
+ memcpy(&req->set_params, configs, sizeof(struct be_fat_conf_params));
+ be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_SET_EXT_FAT_CAPABILITIES,
+ cmd->size, wrb, cmd);
+
+ status = be_mcc_notify_wait(adapter);
+err:
+ mutex_unlock(&adapter->mcc_lock);
+ return status;
+}
+
+int be_cmd_query_port_name(struct be_adapter *adapter)
+{
+ struct be_cmd_req_get_port_name *req;
+ struct be_mcc_wrb *wrb;
+ int status;
+
+ if (mutex_lock_interruptible(&adapter->mbox_lock))
+ return -1;
+
+ wrb = wrb_from_mbox(adapter);
+ req = embedded_payload(wrb);
+
+ be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_GET_PORT_NAME, sizeof(*req), wrb,
+ NULL);
+ if (!BEx_chip(adapter))
+ req->hdr.version = 1;
+
+ status = be_mbox_notify_wait(adapter);
+ if (!status) {
+ struct be_cmd_resp_get_port_name *resp = embedded_payload(wrb);
+
+ adapter->port_name = resp->port_name[adapter->hba_port_num];
+ } else {
+ adapter->port_name = adapter->hba_port_num + '0';
+ }
+
+ mutex_unlock(&adapter->mbox_lock);
+ return status;
+}
+
+/* When more than 1 NIC descriptor is present in the descriptor list,
+ * the caller must specify the pf_num to obtain the NIC descriptor
+ * corresponding to its pci function.
+ * get_vft must be true when the caller wants the VF-template desc of the
+ * PF-pool.
+ * The pf_num should be set to PF_NUM_IGNORE when the caller knows
+ * that only it's NIC descriptor is present in the descriptor list.
+ */
+static struct be_nic_res_desc *be_get_nic_desc(u8 *buf, u32 desc_count,
+ bool get_vft, u8 pf_num)
+{
+ struct be_res_desc_hdr *hdr = (struct be_res_desc_hdr *)buf;
+ struct be_nic_res_desc *nic;
+ int i;
+
+ for (i = 0; i < desc_count; i++) {
+ if (hdr->desc_type == NIC_RESOURCE_DESC_TYPE_V0 ||
+ hdr->desc_type == NIC_RESOURCE_DESC_TYPE_V1) {
+ nic = (struct be_nic_res_desc *)hdr;
+
+ if ((pf_num == PF_NUM_IGNORE ||
+ nic->pf_num == pf_num) &&
+ (!get_vft || nic->flags & BIT(VFT_SHIFT)))
+ return nic;
+ }
+ hdr->desc_len = hdr->desc_len ? : RESOURCE_DESC_SIZE_V0;
+ hdr = (void *)hdr + hdr->desc_len;
+ }
+ return NULL;
+}
+
+static struct be_nic_res_desc *be_get_vft_desc(u8 *buf, u32 desc_count,
+ u8 pf_num)
+{
+ return be_get_nic_desc(buf, desc_count, true, pf_num);
+}
+
+static struct be_nic_res_desc *be_get_func_nic_desc(u8 *buf, u32 desc_count,
+ u8 pf_num)
+{
+ return be_get_nic_desc(buf, desc_count, false, pf_num);
+}
+
+static struct be_pcie_res_desc *be_get_pcie_desc(u8 *buf, u32 desc_count,
+ u8 pf_num)
+{
+ struct be_res_desc_hdr *hdr = (struct be_res_desc_hdr *)buf;
+ struct be_pcie_res_desc *pcie;
+ int i;
+
+ for (i = 0; i < desc_count; i++) {
+ if (hdr->desc_type == PCIE_RESOURCE_DESC_TYPE_V0 ||
+ hdr->desc_type == PCIE_RESOURCE_DESC_TYPE_V1) {
+ pcie = (struct be_pcie_res_desc *)hdr;
+ if (pcie->pf_num == pf_num)
+ return pcie;
+ }
+
+ hdr->desc_len = hdr->desc_len ? : RESOURCE_DESC_SIZE_V0;
+ hdr = (void *)hdr + hdr->desc_len;
+ }
+ return NULL;
+}
+
+static struct be_port_res_desc *be_get_port_desc(u8 *buf, u32 desc_count)
+{
+ struct be_res_desc_hdr *hdr = (struct be_res_desc_hdr *)buf;
+ int i;
+
+ for (i = 0; i < desc_count; i++) {
+ if (hdr->desc_type == PORT_RESOURCE_DESC_TYPE_V1)
+ return (struct be_port_res_desc *)hdr;
+
+ hdr->desc_len = hdr->desc_len ? : RESOURCE_DESC_SIZE_V0;
+ hdr = (void *)hdr + hdr->desc_len;
+ }
+ return NULL;
+}
+
+static void be_copy_nic_desc(struct be_resources *res,
+ struct be_nic_res_desc *desc)
+{
+ res->max_uc_mac = le16_to_cpu(desc->unicast_mac_count);
+ res->max_vlans = le16_to_cpu(desc->vlan_count);
+ res->max_mcast_mac = le16_to_cpu(desc->mcast_mac_count);
+ res->max_tx_qs = le16_to_cpu(desc->txq_count);
+ res->max_rss_qs = le16_to_cpu(desc->rssq_count);
+ res->max_rx_qs = le16_to_cpu(desc->rq_count);
+ res->max_evt_qs = le16_to_cpu(desc->eq_count);
+ res->max_cq_count = le16_to_cpu(desc->cq_count);
+ res->max_iface_count = le16_to_cpu(desc->iface_count);
+ res->max_mcc_count = le16_to_cpu(desc->mcc_count);
+ /* Clear flags that driver is not interested in */
+ res->if_cap_flags = le32_to_cpu(desc->cap_flags) &
+ BE_IF_CAP_FLAGS_WANT;
+}
+
+/* Uses Mbox */
+int be_cmd_get_func_config(struct be_adapter *adapter, struct be_resources *res)
+{
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_req_get_func_config *req;
+ int status;
+ struct be_dma_mem cmd;
+
+ if (mutex_lock_interruptible(&adapter->mbox_lock))
+ return -1;
+
+ memset(&cmd, 0, sizeof(struct be_dma_mem));
+ cmd.size = sizeof(struct be_cmd_resp_get_func_config);
+ cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
+ GFP_ATOMIC);
+ if (!cmd.va) {
+ dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
+ status = -ENOMEM;
+ goto err;
+ }
+
+ wrb = wrb_from_mbox(adapter);
+ if (!wrb) {
+ status = -EBUSY;
+ goto err;
+ }
+
+ req = cmd.va;
+
+ be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_GET_FUNC_CONFIG,
+ cmd.size, wrb, &cmd);
+
+ if (skyhawk_chip(adapter))
+ req->hdr.version = 1;
+
+ status = be_mbox_notify_wait(adapter);
+ if (!status) {
+ struct be_cmd_resp_get_func_config *resp = cmd.va;
+ u32 desc_count = le32_to_cpu(resp->desc_count);
+ struct be_nic_res_desc *desc;
+
+ /* GET_FUNC_CONFIG returns resource descriptors of the
+ * current function only. So, pf_num should be set to
+ * PF_NUM_IGNORE.
+ */
+ desc = be_get_func_nic_desc(resp->func_param, desc_count,
+ PF_NUM_IGNORE);
+ if (!desc) {
+ status = -EINVAL;
+ goto err;
+ }
+
+ /* Store pf_num & vf_num for later use in GET_PROFILE_CONFIG */
+ adapter->pf_num = desc->pf_num;
+ adapter->vf_num = desc->vf_num;
+
+ if (res)
+ be_copy_nic_desc(res, desc);
+ }
+err:
+ mutex_unlock(&adapter->mbox_lock);
+ if (cmd.va)
+ dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
+ cmd.dma);
+ return status;
+}
+
+/* This routine returns a list of all the NIC PF_nums in the adapter */
+static u16 be_get_nic_pf_num_list(u8 *buf, u32 desc_count, u16 *nic_pf_nums)
+{
+ struct be_res_desc_hdr *hdr = (struct be_res_desc_hdr *)buf;
+ struct be_pcie_res_desc *pcie = NULL;
+ int i;
+ u16 nic_pf_count = 0;
+
+ for (i = 0; i < desc_count; i++) {
+ if (hdr->desc_type == PCIE_RESOURCE_DESC_TYPE_V0 ||
+ hdr->desc_type == PCIE_RESOURCE_DESC_TYPE_V1) {
+ pcie = (struct be_pcie_res_desc *)hdr;
+ if (pcie->pf_state && (pcie->pf_type == MISSION_NIC ||
+ pcie->pf_type == MISSION_RDMA)) {
+ nic_pf_nums[nic_pf_count++] = pcie->pf_num;
+ }
+ }
+
+ hdr->desc_len = hdr->desc_len ? : RESOURCE_DESC_SIZE_V0;
+ hdr = (void *)hdr + hdr->desc_len;
+ }
+ return nic_pf_count;
+}
+
+/* Will use MBOX only if MCCQ has not been created */
+int be_cmd_get_profile_config(struct be_adapter *adapter,
+ struct be_resources *res,
+ struct be_port_resources *port_res,
+ u8 profile_type, u8 query, u8 domain)
+{
+ struct be_cmd_resp_get_profile_config *resp;
+ struct be_cmd_req_get_profile_config *req;
+ struct be_nic_res_desc *vf_res;
+ struct be_pcie_res_desc *pcie;
+ struct be_port_res_desc *port;
+ struct be_nic_res_desc *nic;
+ struct be_mcc_wrb wrb = {0};
+ struct be_dma_mem cmd;
+ u16 desc_count;
+ int status;
+
+ memset(&cmd, 0, sizeof(struct be_dma_mem));
+ cmd.size = sizeof(struct be_cmd_resp_get_profile_config);
+ cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
+ GFP_ATOMIC);
+ if (!cmd.va)
+ return -ENOMEM;
+
+ req = cmd.va;
+ be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_GET_PROFILE_CONFIG,
+ cmd.size, &wrb, &cmd);
+
+ if (!lancer_chip(adapter))
+ req->hdr.version = 1;
+ req->type = profile_type;
+ req->hdr.domain = domain;
+
+ /* When QUERY_MODIFIABLE_FIELDS_TYPE bit is set, cmd returns the
+ * descriptors with all bits set to "1" for the fields which can be
+ * modified using SET_PROFILE_CONFIG cmd.
+ */
+ if (query == RESOURCE_MODIFIABLE)
+ req->type |= QUERY_MODIFIABLE_FIELDS_TYPE;
+
+ status = be_cmd_notify_wait(adapter, &wrb);
+ if (status)
+ goto err;
+
+ resp = cmd.va;
+ desc_count = le16_to_cpu(resp->desc_count);
+
+ if (port_res) {
+ u16 nic_pf_cnt = 0, i;
+ u16 nic_pf_num_list[MAX_NIC_FUNCS];
+
+ nic_pf_cnt = be_get_nic_pf_num_list(resp->func_param,
+ desc_count,
+ nic_pf_num_list);
+
+ for (i = 0; i < nic_pf_cnt; i++) {
+ nic = be_get_func_nic_desc(resp->func_param, desc_count,
+ nic_pf_num_list[i]);
+ if (nic->link_param == adapter->port_num) {
+ port_res->nic_pfs++;
+ pcie = be_get_pcie_desc(resp->func_param,
+ desc_count,
+ nic_pf_num_list[i]);
+ port_res->max_vfs += le16_to_cpu(pcie->num_vfs);
+ }
+ }
+ goto err;
+ }
+
+ pcie = be_get_pcie_desc(resp->func_param, desc_count,
+ adapter->pf_num);
+ if (pcie)
+ res->max_vfs = le16_to_cpu(pcie->num_vfs);
+
+ port = be_get_port_desc(resp->func_param, desc_count);
+ if (port)
+ adapter->mc_type = port->mc_type;
+
+ nic = be_get_func_nic_desc(resp->func_param, desc_count,
+ adapter->pf_num);
+ if (nic)
+ be_copy_nic_desc(res, nic);
+
+ vf_res = be_get_vft_desc(resp->func_param, desc_count,
+ adapter->pf_num);
+ if (vf_res)
+ res->vf_if_cap_flags = vf_res->cap_flags;
+err:
+ if (cmd.va)
+ dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
+ cmd.dma);
+ return status;
+}
+
+/* Will use MBOX only if MCCQ has not been created */
+static int be_cmd_set_profile_config(struct be_adapter *adapter, void *desc,
+ int size, int count, u8 version, u8 domain)
+{
+ struct be_cmd_req_set_profile_config *req;
+ struct be_mcc_wrb wrb = {0};
+ struct be_dma_mem cmd;
+ int status;
+
+ memset(&cmd, 0, sizeof(struct be_dma_mem));
+ cmd.size = sizeof(struct be_cmd_req_set_profile_config);
+ cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
+ GFP_ATOMIC);
+ if (!cmd.va)
+ return -ENOMEM;
+
+ req = cmd.va;
+ be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_SET_PROFILE_CONFIG, cmd.size,
+ &wrb, &cmd);
+ req->hdr.version = version;
+ req->hdr.domain = domain;
+ req->desc_count = cpu_to_le32(count);
+ memcpy(req->desc, desc, size);
+
+ status = be_cmd_notify_wait(adapter, &wrb);
+
+ if (cmd.va)
+ dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
+ cmd.dma);
+ return status;
+}
+
+/* Mark all fields invalid */
+static void be_reset_nic_desc(struct be_nic_res_desc *nic)
+{
+ memset(nic, 0, sizeof(*nic));
+ nic->unicast_mac_count = 0xFFFF;
+ nic->mcc_count = 0xFFFF;
+ nic->vlan_count = 0xFFFF;
+ nic->mcast_mac_count = 0xFFFF;
+ nic->txq_count = 0xFFFF;
+ nic->rq_count = 0xFFFF;
+ nic->rssq_count = 0xFFFF;
+ nic->lro_count = 0xFFFF;
+ nic->cq_count = 0xFFFF;
+ nic->toe_conn_count = 0xFFFF;
+ nic->eq_count = 0xFFFF;
+ nic->iface_count = 0xFFFF;
+ nic->link_param = 0xFF;
+ nic->channel_id_param = cpu_to_le16(0xF000);
+ nic->acpi_params = 0xFF;
+ nic->wol_param = 0x0F;
+ nic->tunnel_iface_count = 0xFFFF;
+ nic->direct_tenant_iface_count = 0xFFFF;
+ nic->bw_min = 0xFFFFFFFF;
+ nic->bw_max = 0xFFFFFFFF;
+}
+
+/* Mark all fields invalid */
+static void be_reset_pcie_desc(struct be_pcie_res_desc *pcie)
+{
+ memset(pcie, 0, sizeof(*pcie));
+ pcie->sriov_state = 0xFF;
+ pcie->pf_state = 0xFF;
+ pcie->pf_type = 0xFF;
+ pcie->num_vfs = 0xFFFF;
+}
+
+int be_cmd_config_qos(struct be_adapter *adapter, u32 max_rate, u16 link_speed,
+ u8 domain)
+{
+ struct be_nic_res_desc nic_desc;
+ u32 bw_percent;
+ u16 version = 0;
+
+ if (BE3_chip(adapter))
+ return be_cmd_set_qos(adapter, max_rate / 10, domain);
+
+ be_reset_nic_desc(&nic_desc);
+ nic_desc.pf_num = adapter->pf_num;
+ nic_desc.vf_num = domain;
+ nic_desc.bw_min = 0;
+ if (lancer_chip(adapter)) {
+ nic_desc.hdr.desc_type = NIC_RESOURCE_DESC_TYPE_V0;
+ nic_desc.hdr.desc_len = RESOURCE_DESC_SIZE_V0;
+ nic_desc.flags = (1 << QUN_SHIFT) | (1 << IMM_SHIFT) |
+ (1 << NOSV_SHIFT);
+ nic_desc.bw_max = cpu_to_le32(max_rate / 10);
+ } else {
+ version = 1;
+ nic_desc.hdr.desc_type = NIC_RESOURCE_DESC_TYPE_V1;
+ nic_desc.hdr.desc_len = RESOURCE_DESC_SIZE_V1;
+ nic_desc.flags = (1 << IMM_SHIFT) | (1 << NOSV_SHIFT);
+ bw_percent = max_rate ? (max_rate * 100) / link_speed : 100;
+ nic_desc.bw_max = cpu_to_le32(bw_percent);
+ }
+
+ return be_cmd_set_profile_config(adapter, &nic_desc,
+ nic_desc.hdr.desc_len,
+ 1, version, domain);
+}
+
+int be_cmd_set_sriov_config(struct be_adapter *adapter,
+ struct be_resources pool_res, u16 num_vfs,
+ struct be_resources *vft_res)
+{
+ struct {
+ struct be_pcie_res_desc pcie;
+ struct be_nic_res_desc nic_vft;
+ } __packed desc;
+
+ /* PF PCIE descriptor */
+ be_reset_pcie_desc(&desc.pcie);
+ desc.pcie.hdr.desc_type = PCIE_RESOURCE_DESC_TYPE_V1;
+ desc.pcie.hdr.desc_len = RESOURCE_DESC_SIZE_V1;
+ desc.pcie.flags = BIT(IMM_SHIFT) | BIT(NOSV_SHIFT);
+ desc.pcie.pf_num = adapter->pdev->devfn;
+ desc.pcie.sriov_state = num_vfs ? 1 : 0;
+ desc.pcie.num_vfs = cpu_to_le16(num_vfs);
+
+ /* VF NIC Template descriptor */
+ be_reset_nic_desc(&desc.nic_vft);
+ desc.nic_vft.hdr.desc_type = NIC_RESOURCE_DESC_TYPE_V1;
+ desc.nic_vft.hdr.desc_len = RESOURCE_DESC_SIZE_V1;
+ desc.nic_vft.flags = vft_res->flags | BIT(VFT_SHIFT) |
+ BIT(IMM_SHIFT) | BIT(NOSV_SHIFT);
+ desc.nic_vft.pf_num = adapter->pdev->devfn;
+ desc.nic_vft.vf_num = 0;
+ desc.nic_vft.cap_flags = cpu_to_le32(vft_res->vf_if_cap_flags);
+ desc.nic_vft.rq_count = cpu_to_le16(vft_res->max_rx_qs);
+ desc.nic_vft.txq_count = cpu_to_le16(vft_res->max_tx_qs);
+ desc.nic_vft.rssq_count = cpu_to_le16(vft_res->max_rss_qs);
+ desc.nic_vft.cq_count = cpu_to_le16(vft_res->max_cq_count);
+
+ if (vft_res->max_uc_mac)
+ desc.nic_vft.unicast_mac_count =
+ cpu_to_le16(vft_res->max_uc_mac);
+ if (vft_res->max_vlans)
+ desc.nic_vft.vlan_count = cpu_to_le16(vft_res->max_vlans);
+ if (vft_res->max_iface_count)
+ desc.nic_vft.iface_count =
+ cpu_to_le16(vft_res->max_iface_count);
+ if (vft_res->max_mcc_count)
+ desc.nic_vft.mcc_count = cpu_to_le16(vft_res->max_mcc_count);
+
+ return be_cmd_set_profile_config(adapter, &desc,
+ 2 * RESOURCE_DESC_SIZE_V1, 2, 1, 0);
+}
+
+int be_cmd_manage_iface(struct be_adapter *adapter, u32 iface, u8 op)
+{
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_req_manage_iface_filters *req;
+ int status;
+
+ if (iface == 0xFFFFFFFF)
+ return -1;
+
+ mutex_lock(&adapter->mcc_lock);
+
+ wrb = wrb_from_mccq(adapter);
+ if (!wrb) {
+ status = -EBUSY;
+ goto err;
+ }
+ req = embedded_payload(wrb);
+
+ be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_MANAGE_IFACE_FILTERS, sizeof(*req),
+ wrb, NULL);
+ req->op = op;
+ req->target_iface_id = cpu_to_le32(iface);
+
+ status = be_mcc_notify_wait(adapter);
+err:
+ mutex_unlock(&adapter->mcc_lock);
+ return status;
+}
+
+int be_cmd_set_vxlan_port(struct be_adapter *adapter, __be16 port)
+{
+ struct be_port_res_desc port_desc;
+
+ memset(&port_desc, 0, sizeof(port_desc));
+ port_desc.hdr.desc_type = PORT_RESOURCE_DESC_TYPE_V1;
+ port_desc.hdr.desc_len = RESOURCE_DESC_SIZE_V1;
+ port_desc.flags = (1 << IMM_SHIFT) | (1 << NOSV_SHIFT);
+ port_desc.link_num = adapter->hba_port_num;
+ if (port) {
+ port_desc.nv_flags = NV_TYPE_VXLAN | (1 << SOCVID_SHIFT) |
+ (1 << RCVID_SHIFT);
+ port_desc.nv_port = swab16(port);
+ } else {
+ port_desc.nv_flags = NV_TYPE_DISABLED;
+ port_desc.nv_port = 0;
+ }
+
+ return be_cmd_set_profile_config(adapter, &port_desc,
+ RESOURCE_DESC_SIZE_V1, 1, 1, 0);
+}
+
+int be_cmd_get_if_id(struct be_adapter *adapter, struct be_vf_cfg *vf_cfg,
+ int vf_num)
+{
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_req_get_iface_list *req;
+ struct be_cmd_resp_get_iface_list *resp;
+ int status;
+
+ mutex_lock(&adapter->mcc_lock);
+
+ wrb = wrb_from_mccq(adapter);
+ if (!wrb) {
+ status = -EBUSY;
+ goto err;
+ }
+ req = embedded_payload(wrb);
+
+ be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_GET_IFACE_LIST, sizeof(*resp),
+ wrb, NULL);
+ req->hdr.domain = vf_num + 1;
+
+ status = be_mcc_notify_wait(adapter);
+ if (!status) {
+ resp = (struct be_cmd_resp_get_iface_list *)req;
+ vf_cfg->if_handle = le32_to_cpu(resp->if_desc.if_id);
+ }
+
+err:
+ mutex_unlock(&adapter->mcc_lock);
+ return status;
+}
+
+static int lancer_wait_idle(struct be_adapter *adapter)
+{
+#define SLIPORT_IDLE_TIMEOUT 30
+ u32 reg_val;
+ int status = 0, i;
+
+ for (i = 0; i < SLIPORT_IDLE_TIMEOUT; i++) {
+ reg_val = ioread32(adapter->db + PHYSDEV_CONTROL_OFFSET);
+ if ((reg_val & PHYSDEV_CONTROL_INP_MASK) == 0)
+ break;
+
+ ssleep(1);
+ }
+
+ if (i == SLIPORT_IDLE_TIMEOUT)
+ status = -1;
+
+ return status;
+}
+
+int lancer_physdev_ctrl(struct be_adapter *adapter, u32 mask)
+{
+ int status = 0;
+
+ status = lancer_wait_idle(adapter);
+ if (status)
+ return status;
+
+ iowrite32(mask, adapter->db + PHYSDEV_CONTROL_OFFSET);
+
+ return status;
+}
+
+/* Routine to check whether dump image is present or not */
+bool dump_present(struct be_adapter *adapter)
+{
+ u32 sliport_status = 0;
+
+ sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
+ return !!(sliport_status & SLIPORT_STATUS_DIP_MASK);
+}
+
+int lancer_initiate_dump(struct be_adapter *adapter)
+{
+ struct device *dev = &adapter->pdev->dev;
+ int status;
+
+ if (dump_present(adapter)) {
+ dev_info(dev, "Previous dump not cleared, not forcing dump\n");
+ return -EEXIST;
+ }
+
+ /* give firmware reset and diagnostic dump */
+ status = lancer_physdev_ctrl(adapter, PHYSDEV_CONTROL_FW_RESET_MASK |
+ PHYSDEV_CONTROL_DD_MASK);
+ if (status < 0) {
+ dev_err(dev, "FW reset failed\n");
+ return status;
+ }
+
+ status = lancer_wait_idle(adapter);
+ if (status)
+ return status;
+
+ if (!dump_present(adapter)) {
+ dev_err(dev, "FW dump not generated\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+int lancer_delete_dump(struct be_adapter *adapter)
+{
+ int status;
+
+ status = lancer_cmd_delete_object(adapter, LANCER_FW_DUMP_FILE);
+ return be_cmd_status(status);
+}
+
+/* Uses sync mcc */
+int be_cmd_enable_vf(struct be_adapter *adapter, u8 domain)
+{
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_enable_disable_vf *req;
+ int status;
+
+ if (BEx_chip(adapter))
+ return 0;
+
+ mutex_lock(&adapter->mcc_lock);
+
+ wrb = wrb_from_mccq(adapter);
+ if (!wrb) {
+ status = -EBUSY;
+ goto err;
+ }
+
+ req = embedded_payload(wrb);
+
+ be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_ENABLE_DISABLE_VF, sizeof(*req),
+ wrb, NULL);
+
+ req->hdr.domain = domain;
+ req->enable = 1;
+ status = be_mcc_notify_wait(adapter);
+err:
+ mutex_unlock(&adapter->mcc_lock);
+ return status;
+}
+
+int be_cmd_intr_set(struct be_adapter *adapter, bool intr_enable)
+{
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_req_intr_set *req;
+ int status;
+
+ if (mutex_lock_interruptible(&adapter->mbox_lock))
+ return -1;
+
+ wrb = wrb_from_mbox(adapter);
+
+ req = embedded_payload(wrb);
+
+ be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_SET_INTERRUPT_ENABLE, sizeof(*req),
+ wrb, NULL);
+
+ req->intr_enabled = intr_enable;
+
+ status = be_mbox_notify_wait(adapter);
+
+ mutex_unlock(&adapter->mbox_lock);
+ return status;
+}
+
+/* Uses MBOX */
+int be_cmd_get_active_profile(struct be_adapter *adapter, u16 *profile_id)
+{
+ struct be_cmd_req_get_active_profile *req;
+ struct be_mcc_wrb *wrb;
+ int status;
+
+ if (mutex_lock_interruptible(&adapter->mbox_lock))
+ return -1;
+
+ wrb = wrb_from_mbox(adapter);
+ if (!wrb) {
+ status = -EBUSY;
+ goto err;
+ }
+
+ req = embedded_payload(wrb);
+
+ be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_GET_ACTIVE_PROFILE, sizeof(*req),
+ wrb, NULL);
+
+ status = be_mbox_notify_wait(adapter);
+ if (!status) {
+ struct be_cmd_resp_get_active_profile *resp =
+ embedded_payload(wrb);
+
+ *profile_id = le16_to_cpu(resp->active_profile_id);
+ }
+
+err:
+ mutex_unlock(&adapter->mbox_lock);
+ return status;
+}
+
+static int
+__be_cmd_set_logical_link_config(struct be_adapter *adapter,
+ int link_state, int version, u8 domain)
+{
+ struct be_cmd_req_set_ll_link *req;
+ struct be_mcc_wrb *wrb;
+ u32 link_config = 0;
+ int status;
+
+ mutex_lock(&adapter->mcc_lock);
+
+ wrb = wrb_from_mccq(adapter);
+ if (!wrb) {
+ status = -EBUSY;
+ goto err;
+ }
+
+ req = embedded_payload(wrb);
+
+ be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_SET_LOGICAL_LINK_CONFIG,
+ sizeof(*req), wrb, NULL);
+
+ req->hdr.version = version;
+ req->hdr.domain = domain;
+
+ if (link_state == IFLA_VF_LINK_STATE_ENABLE ||
+ link_state == IFLA_VF_LINK_STATE_AUTO)
+ link_config |= PLINK_ENABLE;
+
+ if (link_state == IFLA_VF_LINK_STATE_AUTO)
+ link_config |= PLINK_TRACK;
+
+ req->link_config = cpu_to_le32(link_config);
+
+ status = be_mcc_notify_wait(adapter);
+err:
+ mutex_unlock(&adapter->mcc_lock);
+ return status;
+}
+
+int be_cmd_set_logical_link_config(struct be_adapter *adapter,
+ int link_state, u8 domain)
+{
+ int status;
+
+ if (BE2_chip(adapter))
+ return -EOPNOTSUPP;
+
+ status = __be_cmd_set_logical_link_config(adapter, link_state,
+ 2, domain);
+
+ /* Version 2 of the command will not be recognized by older FW.
+ * On such a failure issue version 1 of the command.
+ */
+ if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST)
+ status = __be_cmd_set_logical_link_config(adapter, link_state,
+ 1, domain);
+ return status;
+}
+
+int be_cmd_set_features(struct be_adapter *adapter)
+{
+ struct be_cmd_resp_set_features *resp;
+ struct be_cmd_req_set_features *req;
+ struct be_mcc_wrb *wrb;
+ int status;
+
+ if (mutex_lock_interruptible(&adapter->mcc_lock))
+ return -1;
+
+ wrb = wrb_from_mccq(adapter);
+ if (!wrb) {
+ status = -EBUSY;
+ goto err;
+ }
+
+ req = embedded_payload(wrb);
+
+ be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_SET_FEATURES,
+ sizeof(*req), wrb, NULL);
+
+ req->features = cpu_to_le32(BE_FEATURE_UE_RECOVERY);
+ req->parameter_len = cpu_to_le32(sizeof(struct be_req_ue_recovery));
+ req->parameter.req.uer = cpu_to_le32(BE_UE_RECOVERY_UER_MASK);
+
+ status = be_mcc_notify_wait(adapter);
+ if (status)
+ goto err;
+
+ resp = embedded_payload(wrb);
+
+ adapter->error_recovery.ue_to_poll_time =
+ le16_to_cpu(resp->parameter.resp.ue2rp);
+ adapter->error_recovery.ue_to_reset_time =
+ le16_to_cpu(resp->parameter.resp.ue2sr);
+ adapter->error_recovery.recovery_supported = true;
+err:
+ /* Checking "MCC_STATUS_INVALID_LENGTH" for SKH as FW
+ * returns this error in older firmware versions
+ */
+ if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST ||
+ base_status(status) == MCC_STATUS_INVALID_LENGTH)
+ dev_info(&adapter->pdev->dev,
+ "Adapter does not support HW error recovery\n");
+
+ mutex_unlock(&adapter->mcc_lock);
+ return status;
+}
+
+int be_roce_mcc_cmd(void *netdev_handle, void *wrb_payload,
+ int wrb_payload_size, u16 *cmd_status, u16 *ext_status)
+{
+ struct be_adapter *adapter = netdev_priv(netdev_handle);
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_req_hdr *hdr = (struct be_cmd_req_hdr *)wrb_payload;
+ struct be_cmd_req_hdr *req;
+ struct be_cmd_resp_hdr *resp;
+ int status;
+
+ mutex_lock(&adapter->mcc_lock);
+
+ wrb = wrb_from_mccq(adapter);
+ if (!wrb) {
+ status = -EBUSY;
+ goto err;
+ }
+ req = embedded_payload(wrb);
+ resp = embedded_payload(wrb);
+
+ be_wrb_cmd_hdr_prepare(req, hdr->subsystem,
+ hdr->opcode, wrb_payload_size, wrb, NULL);
+ memcpy(req, wrb_payload, wrb_payload_size);
+ be_dws_cpu_to_le(req, wrb_payload_size);
+
+ status = be_mcc_notify_wait(adapter);
+ if (cmd_status)
+ *cmd_status = (status & 0xffff);
+ if (ext_status)
+ *ext_status = 0;
+ memcpy(wrb_payload, resp, sizeof(*resp) + resp->response_length);
+ be_dws_le_to_cpu(wrb_payload, sizeof(*resp) + resp->response_length);
+err:
+ mutex_unlock(&adapter->mcc_lock);
+ return status;
+}
+EXPORT_SYMBOL(be_roce_mcc_cmd);
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h
new file mode 100644
index 0000000000..e2085c68c0
--- /dev/null
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.h
@@ -0,0 +1,2511 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2005 - 2016 Broadcom
+ * All rights reserved.
+ *
+ * Contact Information:
+ * linux-drivers@emulex.com
+ *
+ * Emulex
+ * 3333 Susan Street
+ * Costa Mesa, CA 92626
+ */
+
+/*
+ * The driver sends configuration and managements command requests to the
+ * firmware in the BE. These requests are communicated to the processor
+ * using Work Request Blocks (WRBs) submitted to the MCC-WRB ring or via one
+ * WRB inside a MAILBOX.
+ * The commands are serviced by the ARM processor in the BladeEngine's MPU.
+ */
+
+struct be_sge {
+ u32 pa_lo;
+ u32 pa_hi;
+ u32 len;
+};
+
+#define MCC_WRB_EMBEDDED_MASK 1 /* bit 0 of dword 0*/
+#define MCC_WRB_SGE_CNT_SHIFT 3 /* bits 3 - 7 of dword 0 */
+#define MCC_WRB_SGE_CNT_MASK 0x1F /* bits 3 - 7 of dword 0 */
+struct be_mcc_wrb {
+ u32 embedded; /* dword 0 */
+ u32 payload_length; /* dword 1 */
+ u32 tag0; /* dword 2 */
+ u32 tag1; /* dword 3 */
+ u32 rsvd; /* dword 4 */
+ union {
+ u8 embedded_payload[236]; /* used by embedded cmds */
+ struct be_sge sgl[19]; /* used by non-embedded cmds */
+ } payload;
+};
+
+#define CQE_FLAGS_VALID_MASK BIT(31)
+#define CQE_FLAGS_ASYNC_MASK BIT(30)
+#define CQE_FLAGS_COMPLETED_MASK BIT(28)
+#define CQE_FLAGS_CONSUMED_MASK BIT(27)
+
+/* Completion Status */
+enum mcc_base_status {
+ MCC_STATUS_SUCCESS = 0,
+ MCC_STATUS_FAILED = 1,
+ MCC_STATUS_ILLEGAL_REQUEST = 2,
+ MCC_STATUS_ILLEGAL_FIELD = 3,
+ MCC_STATUS_INSUFFICIENT_BUFFER = 4,
+ MCC_STATUS_UNAUTHORIZED_REQUEST = 5,
+ MCC_STATUS_NOT_SUPPORTED = 66,
+ MCC_STATUS_FEATURE_NOT_SUPPORTED = 68,
+ MCC_STATUS_INVALID_LENGTH = 116
+};
+
+/* Additional status */
+enum mcc_addl_status {
+ MCC_ADDL_STATUS_INSUFFICIENT_RESOURCES = 0x16,
+ MCC_ADDL_STATUS_FLASH_IMAGE_CRC_MISMATCH = 0x4d,
+ MCC_ADDL_STATUS_TOO_MANY_INTERFACES = 0x4a,
+ MCC_ADDL_STATUS_INSUFFICIENT_VLANS = 0xab,
+ MCC_ADDL_STATUS_INVALID_SIGNATURE = 0x56,
+ MCC_ADDL_STATUS_MISSING_SIGNATURE = 0x57,
+ MCC_ADDL_STATUS_INSUFFICIENT_PRIVILEGES = 0x60
+};
+
+#define CQE_BASE_STATUS_MASK 0xFFFF
+#define CQE_BASE_STATUS_SHIFT 0 /* bits 0 - 15 */
+#define CQE_ADDL_STATUS_MASK 0xFF
+#define CQE_ADDL_STATUS_SHIFT 16 /* bits 16 - 31 */
+
+#define base_status(status) \
+ ((enum mcc_base_status) \
+ (status > 0 ? (status & CQE_BASE_STATUS_MASK) : 0))
+#define addl_status(status) \
+ ((enum mcc_addl_status) \
+ (status > 0 ? (status >> CQE_ADDL_STATUS_SHIFT) & \
+ CQE_ADDL_STATUS_MASK : 0))
+
+struct be_mcc_compl {
+ u32 status; /* dword 0 */
+ u32 tag0; /* dword 1 */
+ u32 tag1; /* dword 2 */
+ u32 flags; /* dword 3 */
+};
+
+/* When the async bit of mcc_compl flags is set, flags
+ * is interpreted as follows:
+ */
+#define ASYNC_EVENT_CODE_SHIFT 8 /* bits 8 - 15 */
+#define ASYNC_EVENT_CODE_MASK 0xFF
+#define ASYNC_EVENT_TYPE_SHIFT 16
+#define ASYNC_EVENT_TYPE_MASK 0xFF
+#define ASYNC_EVENT_CODE_LINK_STATE 0x1
+#define ASYNC_EVENT_CODE_GRP_5 0x5
+#define ASYNC_EVENT_QOS_SPEED 0x1
+#define ASYNC_EVENT_COS_PRIORITY 0x2
+#define ASYNC_EVENT_PVID_STATE 0x3
+#define ASYNC_EVENT_CODE_QNQ 0x6
+#define ASYNC_DEBUG_EVENT_TYPE_QNQ 1
+#define ASYNC_EVENT_CODE_SLIPORT 0x11
+#define ASYNC_EVENT_PORT_MISCONFIG 0x9
+#define ASYNC_EVENT_FW_CONTROL 0x5
+
+enum {
+ LINK_DOWN = 0x0,
+ LINK_UP = 0x1
+};
+#define LINK_STATUS_MASK 0x1
+#define LOGICAL_LINK_STATUS_MASK 0x2
+
+/* When the event code of compl->flags is link-state, the mcc_compl
+ * must be interpreted as follows
+ */
+struct be_async_event_link_state {
+ u8 physical_port;
+ u8 port_link_status;
+ u8 port_duplex;
+ u8 port_speed;
+ u8 port_fault;
+ u8 rsvd0[7];
+ u32 flags;
+} __packed;
+
+/* When the event code of compl->flags is GRP-5 and event_type is QOS_SPEED
+ * the mcc_compl must be interpreted as follows
+ */
+struct be_async_event_grp5_qos_link_speed {
+ u8 physical_port;
+ u8 rsvd[5];
+ u16 qos_link_speed;
+ u32 event_tag;
+ u32 flags;
+} __packed;
+
+/* When the event code of compl->flags is GRP5 and event type is
+ * CoS-Priority, the mcc_compl must be interpreted as follows
+ */
+struct be_async_event_grp5_cos_priority {
+ u8 physical_port;
+ u8 available_priority_bmap;
+ u8 reco_default_priority;
+ u8 valid;
+ u8 rsvd0;
+ u8 event_tag;
+ u32 flags;
+} __packed;
+
+/* When the event code of compl->flags is GRP5 and event type is
+ * PVID state, the mcc_compl must be interpreted as follows
+ */
+struct be_async_event_grp5_pvid_state {
+ u8 enabled;
+ u8 rsvd0;
+ u16 tag;
+ u32 event_tag;
+ u32 rsvd1;
+ u32 flags;
+} __packed;
+
+/* async event indicating outer VLAN tag in QnQ */
+struct be_async_event_qnq {
+ u8 valid; /* Indicates if outer VLAN is valid */
+ u8 rsvd0;
+ u16 vlan_tag;
+ u32 event_tag;
+ u8 rsvd1[4];
+ u32 flags;
+} __packed;
+
+enum {
+ BE_PHY_FUNCTIONAL = 0,
+ BE_PHY_NOT_PRESENT = 1,
+ BE_PHY_DIFF_MEDIA = 2,
+ BE_PHY_INCOMPATIBLE = 3,
+ BE_PHY_UNQUALIFIED = 4,
+ BE_PHY_UNCERTIFIED = 5
+};
+
+#define PHY_STATE_MSG_SEVERITY 0x6
+#define PHY_STATE_OPER 0x1
+#define PHY_STATE_INFO_VALID 0x80
+#define PHY_STATE_OPER_MSG_NONE 0x2
+#define DEFAULT_MSG_SEVERITY 0x1
+
+#define be_phy_state_unknown(phy_state) (phy_state > BE_PHY_UNCERTIFIED)
+#define be_phy_unqualified(phy_state) \
+ (phy_state == BE_PHY_UNQUALIFIED || \
+ phy_state == BE_PHY_UNCERTIFIED)
+#define be_phy_misconfigured(phy_state) \
+ (phy_state == BE_PHY_INCOMPATIBLE || \
+ phy_state == BE_PHY_UNQUALIFIED || \
+ phy_state == BE_PHY_UNCERTIFIED)
+
+extern const char * const be_misconfig_evt_port_state[];
+
+/* async event indicating misconfigured port */
+struct be_async_event_misconfig_port {
+ /* DATA_WORD1:
+ * phy state of port 0: bits 7 - 0
+ * phy state of port 1: bits 15 - 8
+ * phy state of port 2: bits 23 - 16
+ * phy state of port 3: bits 31 - 24
+ */
+ u32 event_data_word1;
+ /* DATA_WORD2:
+ * phy state info of port 0: bits 7 - 0
+ * phy state info of port 1: bits 15 - 8
+ * phy state info of port 2: bits 23 - 16
+ * phy state info of port 3: bits 31 - 24
+ *
+ * PHY STATE INFO:
+ * Link operability :bit 0
+ * Message severity :bit 2 - 1
+ * Rsvd :bits 6 - 3
+ * phy state info valid :bit 7
+ */
+ u32 event_data_word2;
+ u32 rsvd0;
+ u32 flags;
+} __packed;
+
+#define BMC_FILT_BROADCAST_ARP BIT(0)
+#define BMC_FILT_BROADCAST_DHCP_CLIENT BIT(1)
+#define BMC_FILT_BROADCAST_DHCP_SERVER BIT(2)
+#define BMC_FILT_BROADCAST_NET_BIOS BIT(3)
+#define BMC_FILT_BROADCAST BIT(7)
+#define BMC_FILT_MULTICAST_IPV6_NEIGH_ADVER BIT(8)
+#define BMC_FILT_MULTICAST_IPV6_RA BIT(9)
+#define BMC_FILT_MULTICAST_IPV6_RAS BIT(10)
+#define BMC_FILT_MULTICAST BIT(15)
+struct be_async_fw_control {
+ u32 event_data_word1;
+ u32 event_data_word2;
+ u32 evt_tag;
+ u32 event_data_word4;
+} __packed;
+
+struct be_mcc_mailbox {
+ struct be_mcc_wrb wrb;
+ struct be_mcc_compl compl;
+};
+
+#define CMD_SUBSYSTEM_COMMON 0x1
+#define CMD_SUBSYSTEM_ETH 0x3
+#define CMD_SUBSYSTEM_LOWLEVEL 0xb
+
+#define OPCODE_COMMON_NTWK_MAC_QUERY 1
+#define OPCODE_COMMON_NTWK_MAC_SET 2
+#define OPCODE_COMMON_NTWK_MULTICAST_SET 3
+#define OPCODE_COMMON_NTWK_VLAN_CONFIG 4
+#define OPCODE_COMMON_NTWK_LINK_STATUS_QUERY 5
+#define OPCODE_COMMON_READ_FLASHROM 6
+#define OPCODE_COMMON_WRITE_FLASHROM 7
+#define OPCODE_COMMON_CQ_CREATE 12
+#define OPCODE_COMMON_EQ_CREATE 13
+#define OPCODE_COMMON_MCC_CREATE 21
+#define OPCODE_COMMON_SET_QOS 28
+#define OPCODE_COMMON_MCC_CREATE_EXT 90
+#define OPCODE_COMMON_SEEPROM_READ 30
+#define OPCODE_COMMON_GET_CNTL_ATTRIBUTES 32
+#define OPCODE_COMMON_NTWK_RX_FILTER 34
+#define OPCODE_COMMON_GET_FW_VERSION 35
+#define OPCODE_COMMON_SET_FLOW_CONTROL 36
+#define OPCODE_COMMON_GET_FLOW_CONTROL 37
+#define OPCODE_COMMON_SET_FRAME_SIZE 39
+#define OPCODE_COMMON_MODIFY_EQ_DELAY 41
+#define OPCODE_COMMON_FIRMWARE_CONFIG 42
+#define OPCODE_COMMON_NTWK_INTERFACE_CREATE 50
+#define OPCODE_COMMON_NTWK_INTERFACE_DESTROY 51
+#define OPCODE_COMMON_MCC_DESTROY 53
+#define OPCODE_COMMON_CQ_DESTROY 54
+#define OPCODE_COMMON_EQ_DESTROY 55
+#define OPCODE_COMMON_QUERY_FIRMWARE_CONFIG 58
+#define OPCODE_COMMON_NTWK_PMAC_ADD 59
+#define OPCODE_COMMON_NTWK_PMAC_DEL 60
+#define OPCODE_COMMON_FUNCTION_RESET 61
+#define OPCODE_COMMON_MANAGE_FAT 68
+#define OPCODE_COMMON_ENABLE_DISABLE_BEACON 69
+#define OPCODE_COMMON_GET_BEACON_STATE 70
+#define OPCODE_COMMON_READ_TRANSRECV_DATA 73
+#define OPCODE_COMMON_GET_PORT_NAME 77
+#define OPCODE_COMMON_SET_LOGICAL_LINK_CONFIG 80
+#define OPCODE_COMMON_SET_INTERRUPT_ENABLE 89
+#define OPCODE_COMMON_SET_FN_PRIVILEGES 100
+#define OPCODE_COMMON_GET_PHY_DETAILS 102
+#define OPCODE_COMMON_SET_DRIVER_FUNCTION_CAP 103
+#define OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES 121
+#define OPCODE_COMMON_GET_EXT_FAT_CAPABILITIES 125
+#define OPCODE_COMMON_SET_EXT_FAT_CAPABILITIES 126
+#define OPCODE_COMMON_GET_MAC_LIST 147
+#define OPCODE_COMMON_SET_MAC_LIST 148
+#define OPCODE_COMMON_GET_HSW_CONFIG 152
+#define OPCODE_COMMON_GET_FUNC_CONFIG 160
+#define OPCODE_COMMON_GET_PROFILE_CONFIG 164
+#define OPCODE_COMMON_SET_PROFILE_CONFIG 165
+#define OPCODE_COMMON_GET_ACTIVE_PROFILE 167
+#define OPCODE_COMMON_SET_HSW_CONFIG 153
+#define OPCODE_COMMON_GET_FN_PRIVILEGES 170
+#define OPCODE_COMMON_READ_OBJECT 171
+#define OPCODE_COMMON_WRITE_OBJECT 172
+#define OPCODE_COMMON_DELETE_OBJECT 174
+#define OPCODE_COMMON_SET_FEATURES 191
+#define OPCODE_COMMON_MANAGE_IFACE_FILTERS 193
+#define OPCODE_COMMON_GET_IFACE_LIST 194
+#define OPCODE_COMMON_ENABLE_DISABLE_VF 196
+
+#define OPCODE_ETH_RSS_CONFIG 1
+#define OPCODE_ETH_ACPI_CONFIG 2
+#define OPCODE_ETH_PROMISCUOUS 3
+#define OPCODE_ETH_GET_STATISTICS 4
+#define OPCODE_ETH_TX_CREATE 7
+#define OPCODE_ETH_RX_CREATE 8
+#define OPCODE_ETH_TX_DESTROY 9
+#define OPCODE_ETH_RX_DESTROY 10
+#define OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG 12
+#define OPCODE_ETH_GET_PPORT_STATS 18
+
+#define OPCODE_LOWLEVEL_HOST_DDR_DMA 17
+#define OPCODE_LOWLEVEL_LOOPBACK_TEST 18
+#define OPCODE_LOWLEVEL_SET_LOOPBACK_MODE 19
+
+struct be_cmd_req_hdr {
+ u8 opcode; /* dword 0 */
+ u8 subsystem; /* dword 0 */
+ u8 port_number; /* dword 0 */
+ u8 domain; /* dword 0 */
+ u32 timeout; /* dword 1 */
+ u32 request_length; /* dword 2 */
+ u8 version; /* dword 3 */
+ u8 rsvd[3]; /* dword 3 */
+};
+
+#define RESP_HDR_INFO_OPCODE_SHIFT 0 /* bits 0 - 7 */
+#define RESP_HDR_INFO_SUBSYS_SHIFT 8 /* bits 8 - 15 */
+struct be_cmd_resp_hdr {
+ u8 opcode; /* dword 0 */
+ u8 subsystem; /* dword 0 */
+ u8 rsvd[2]; /* dword 0 */
+ u8 base_status; /* dword 1 */
+ u8 addl_status; /* dword 1 */
+ u8 rsvd1[2]; /* dword 1 */
+ u32 response_length; /* dword 2 */
+ u32 actual_resp_len; /* dword 3 */
+};
+
+struct phys_addr {
+ u32 lo;
+ u32 hi;
+};
+
+/**************************
+ * BE Command definitions *
+ **************************/
+
+/* Pseudo amap definition in which each bit of the actual structure is defined
+ * as a byte: used to calculate offset/shift/mask of each field */
+struct amap_eq_context {
+ u8 cidx[13]; /* dword 0*/
+ u8 rsvd0[3]; /* dword 0*/
+ u8 epidx[13]; /* dword 0*/
+ u8 valid; /* dword 0*/
+ u8 rsvd1; /* dword 0*/
+ u8 size; /* dword 0*/
+ u8 pidx[13]; /* dword 1*/
+ u8 rsvd2[3]; /* dword 1*/
+ u8 pd[10]; /* dword 1*/
+ u8 count[3]; /* dword 1*/
+ u8 solevent; /* dword 1*/
+ u8 stalled; /* dword 1*/
+ u8 armed; /* dword 1*/
+ u8 rsvd3[4]; /* dword 2*/
+ u8 func[8]; /* dword 2*/
+ u8 rsvd4; /* dword 2*/
+ u8 delaymult[10]; /* dword 2*/
+ u8 rsvd5[2]; /* dword 2*/
+ u8 phase[2]; /* dword 2*/
+ u8 nodelay; /* dword 2*/
+ u8 rsvd6[4]; /* dword 2*/
+ u8 rsvd7[32]; /* dword 3*/
+} __packed;
+
+struct be_cmd_req_eq_create {
+ struct be_cmd_req_hdr hdr;
+ u16 num_pages; /* sword */
+ u16 rsvd0; /* sword */
+ u8 context[sizeof(struct amap_eq_context) / 8];
+ struct phys_addr pages[8];
+} __packed;
+
+struct be_cmd_resp_eq_create {
+ struct be_cmd_resp_hdr resp_hdr;
+ u16 eq_id; /* sword */
+ u16 msix_idx; /* available only in v2 */
+} __packed;
+
+/******************** Mac query ***************************/
+enum {
+ MAC_ADDRESS_TYPE_STORAGE = 0x0,
+ MAC_ADDRESS_TYPE_NETWORK = 0x1,
+ MAC_ADDRESS_TYPE_PD = 0x2,
+ MAC_ADDRESS_TYPE_MANAGEMENT = 0x3
+};
+
+struct mac_addr {
+ u16 size_of_struct;
+ u8 addr[ETH_ALEN];
+} __packed;
+
+struct be_cmd_req_mac_query {
+ struct be_cmd_req_hdr hdr;
+ u8 type;
+ u8 permanent;
+ u16 if_id;
+ u32 pmac_id;
+} __packed;
+
+struct be_cmd_resp_mac_query {
+ struct be_cmd_resp_hdr hdr;
+ struct mac_addr mac;
+};
+
+/******************** PMac Add ***************************/
+struct be_cmd_req_pmac_add {
+ struct be_cmd_req_hdr hdr;
+ u32 if_id;
+ u8 mac_address[ETH_ALEN];
+ u8 rsvd0[2];
+} __packed;
+
+struct be_cmd_resp_pmac_add {
+ struct be_cmd_resp_hdr hdr;
+ u32 pmac_id;
+};
+
+/******************** PMac Del ***************************/
+struct be_cmd_req_pmac_del {
+ struct be_cmd_req_hdr hdr;
+ u32 if_id;
+ u32 pmac_id;
+};
+
+/******************** Create CQ ***************************/
+/* Pseudo amap definition in which each bit of the actual structure is defined
+ * as a byte: used to calculate offset/shift/mask of each field */
+struct amap_cq_context_be {
+ u8 cidx[11]; /* dword 0*/
+ u8 rsvd0; /* dword 0*/
+ u8 coalescwm[2]; /* dword 0*/
+ u8 nodelay; /* dword 0*/
+ u8 epidx[11]; /* dword 0*/
+ u8 rsvd1; /* dword 0*/
+ u8 count[2]; /* dword 0*/
+ u8 valid; /* dword 0*/
+ u8 solevent; /* dword 0*/
+ u8 eventable; /* dword 0*/
+ u8 pidx[11]; /* dword 1*/
+ u8 rsvd2; /* dword 1*/
+ u8 pd[10]; /* dword 1*/
+ u8 eqid[8]; /* dword 1*/
+ u8 stalled; /* dword 1*/
+ u8 armed; /* dword 1*/
+ u8 rsvd3[4]; /* dword 2*/
+ u8 func[8]; /* dword 2*/
+ u8 rsvd4[20]; /* dword 2*/
+ u8 rsvd5[32]; /* dword 3*/
+} __packed;
+
+struct amap_cq_context_v2 {
+ u8 rsvd0[12]; /* dword 0*/
+ u8 coalescwm[2]; /* dword 0*/
+ u8 nodelay; /* dword 0*/
+ u8 rsvd1[12]; /* dword 0*/
+ u8 count[2]; /* dword 0*/
+ u8 valid; /* dword 0*/
+ u8 rsvd2; /* dword 0*/
+ u8 eventable; /* dword 0*/
+ u8 eqid[16]; /* dword 1*/
+ u8 rsvd3[15]; /* dword 1*/
+ u8 armed; /* dword 1*/
+ u8 rsvd4[32]; /* dword 2*/
+ u8 rsvd5[32]; /* dword 3*/
+} __packed;
+
+struct be_cmd_req_cq_create {
+ struct be_cmd_req_hdr hdr;
+ u16 num_pages;
+ u8 page_size;
+ u8 rsvd0;
+ u8 context[sizeof(struct amap_cq_context_be) / 8];
+ struct phys_addr pages[8];
+} __packed;
+
+
+struct be_cmd_resp_cq_create {
+ struct be_cmd_resp_hdr hdr;
+ u16 cq_id;
+ u16 rsvd0;
+} __packed;
+
+struct be_cmd_req_get_fat {
+ struct be_cmd_req_hdr hdr;
+ u32 fat_operation;
+ u32 read_log_offset;
+ u32 read_log_length;
+ u32 data_buffer_size;
+ u32 data_buffer[1];
+} __packed;
+
+struct be_cmd_resp_get_fat {
+ struct be_cmd_resp_hdr hdr;
+ u32 log_size;
+ u32 read_log_length;
+ u32 rsvd[2];
+ u32 data_buffer[1];
+} __packed;
+
+
+/******************** Create MCCQ ***************************/
+/* Pseudo amap definition in which each bit of the actual structure is defined
+ * as a byte: used to calculate offset/shift/mask of each field */
+struct amap_mcc_context_be {
+ u8 con_index[14];
+ u8 rsvd0[2];
+ u8 ring_size[4];
+ u8 fetch_wrb;
+ u8 fetch_r2t;
+ u8 cq_id[10];
+ u8 prod_index[14];
+ u8 fid[8];
+ u8 pdid[9];
+ u8 valid;
+ u8 rsvd1[32];
+ u8 rsvd2[32];
+} __packed;
+
+struct amap_mcc_context_v1 {
+ u8 async_cq_id[16];
+ u8 ring_size[4];
+ u8 rsvd0[12];
+ u8 rsvd1[31];
+ u8 valid;
+ u8 async_cq_valid[1];
+ u8 rsvd2[31];
+ u8 rsvd3[32];
+} __packed;
+
+struct be_cmd_req_mcc_create {
+ struct be_cmd_req_hdr hdr;
+ u16 num_pages;
+ u16 cq_id;
+ u8 context[sizeof(struct amap_mcc_context_be) / 8];
+ struct phys_addr pages[8];
+} __packed;
+
+struct be_cmd_req_mcc_ext_create {
+ struct be_cmd_req_hdr hdr;
+ u16 num_pages;
+ u16 cq_id;
+ u32 async_event_bitmap[1];
+ u8 context[sizeof(struct amap_mcc_context_v1) / 8];
+ struct phys_addr pages[8];
+} __packed;
+
+struct be_cmd_resp_mcc_create {
+ struct be_cmd_resp_hdr hdr;
+ u16 id;
+ u16 rsvd0;
+} __packed;
+
+/******************** Create TxQ ***************************/
+#define BE_ETH_TX_RING_TYPE_STANDARD 2
+#define BE_ULP1_NUM 1
+
+struct be_cmd_req_eth_tx_create {
+ struct be_cmd_req_hdr hdr;
+ u8 num_pages;
+ u8 ulp_num;
+ u16 type;
+ u16 if_id;
+ u8 queue_size;
+ u8 rsvd0;
+ u32 rsvd1;
+ u16 cq_id;
+ u16 rsvd2;
+ u32 rsvd3[13];
+ struct phys_addr pages[8];
+} __packed;
+
+struct be_cmd_resp_eth_tx_create {
+ struct be_cmd_resp_hdr hdr;
+ u16 cid;
+ u16 rid;
+ u32 db_offset;
+ u32 rsvd0[4];
+} __packed;
+
+/******************** Create RxQ ***************************/
+struct be_cmd_req_eth_rx_create {
+ struct be_cmd_req_hdr hdr;
+ u16 cq_id;
+ u8 frag_size;
+ u8 num_pages;
+ struct phys_addr pages[2];
+ u32 interface_id;
+ u16 max_frame_size;
+ u16 rsvd0;
+ u32 rss_queue;
+} __packed;
+
+struct be_cmd_resp_eth_rx_create {
+ struct be_cmd_resp_hdr hdr;
+ u16 id;
+ u8 rss_id;
+ u8 rsvd0;
+} __packed;
+
+/******************** Q Destroy ***************************/
+/* Type of Queue to be destroyed */
+enum {
+ QTYPE_EQ = 1,
+ QTYPE_CQ,
+ QTYPE_TXQ,
+ QTYPE_RXQ,
+ QTYPE_MCCQ
+};
+
+struct be_cmd_req_q_destroy {
+ struct be_cmd_req_hdr hdr;
+ u16 id;
+ u16 bypass_flush; /* valid only for rx q destroy */
+} __packed;
+
+/************ I/f Create (it's actually I/f Config Create)**********/
+
+/* Capability flags for the i/f */
+enum be_if_flags {
+ BE_IF_FLAGS_RSS = 0x4,
+ BE_IF_FLAGS_PROMISCUOUS = 0x8,
+ BE_IF_FLAGS_BROADCAST = 0x10,
+ BE_IF_FLAGS_UNTAGGED = 0x20,
+ BE_IF_FLAGS_ULP = 0x40,
+ BE_IF_FLAGS_VLAN_PROMISCUOUS = 0x80,
+ BE_IF_FLAGS_VLAN = 0x100,
+ BE_IF_FLAGS_MCAST_PROMISCUOUS = 0x200,
+ BE_IF_FLAGS_PASS_L2_ERRORS = 0x400,
+ BE_IF_FLAGS_PASS_L3L4_ERRORS = 0x800,
+ BE_IF_FLAGS_MULTICAST = 0x1000,
+ BE_IF_FLAGS_DEFQ_RSS = 0x1000000
+};
+
+#define BE_IF_CAP_FLAGS_WANT (BE_IF_FLAGS_RSS | BE_IF_FLAGS_PROMISCUOUS |\
+ BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_VLAN_PROMISCUOUS |\
+ BE_IF_FLAGS_VLAN | BE_IF_FLAGS_MCAST_PROMISCUOUS |\
+ BE_IF_FLAGS_PASS_L3L4_ERRORS | BE_IF_FLAGS_MULTICAST |\
+ BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_DEFQ_RSS)
+
+#define BE_IF_FLAGS_ALL_PROMISCUOUS (BE_IF_FLAGS_PROMISCUOUS | \
+ BE_IF_FLAGS_VLAN_PROMISCUOUS |\
+ BE_IF_FLAGS_MCAST_PROMISCUOUS)
+
+#define BE_IF_FILT_FLAGS_BASIC (BE_IF_FLAGS_BROADCAST | \
+ BE_IF_FLAGS_PASS_L3L4_ERRORS | \
+ BE_IF_FLAGS_UNTAGGED)
+
+#define BE_IF_ALL_FILT_FLAGS (BE_IF_FILT_FLAGS_BASIC | \
+ BE_IF_FLAGS_MULTICAST | \
+ BE_IF_FLAGS_ALL_PROMISCUOUS)
+
+/* An RX interface is an object with one or more MAC addresses and
+ * filtering capabilities. */
+struct be_cmd_req_if_create {
+ struct be_cmd_req_hdr hdr;
+ u32 version; /* ignore currently */
+ u32 capability_flags;
+ u32 enable_flags;
+ u8 mac_addr[ETH_ALEN];
+ u8 rsvd0;
+ u8 pmac_invalid; /* if set, don't attach the mac addr to the i/f */
+ u32 vlan_tag; /* not used currently */
+} __packed;
+
+struct be_cmd_resp_if_create {
+ struct be_cmd_resp_hdr hdr;
+ u32 interface_id;
+ u32 pmac_id;
+};
+
+/****** I/f Destroy(it's actually I/f Config Destroy )**********/
+struct be_cmd_req_if_destroy {
+ struct be_cmd_req_hdr hdr;
+ u32 interface_id;
+};
+
+/*************** HW Stats Get **********************************/
+struct be_port_rxf_stats_v0 {
+ u32 rx_bytes_lsd; /* dword 0*/
+ u32 rx_bytes_msd; /* dword 1*/
+ u32 rx_total_frames; /* dword 2*/
+ u32 rx_unicast_frames; /* dword 3*/
+ u32 rx_multicast_frames; /* dword 4*/
+ u32 rx_broadcast_frames; /* dword 5*/
+ u32 rx_crc_errors; /* dword 6*/
+ u32 rx_alignment_symbol_errors; /* dword 7*/
+ u32 rx_pause_frames; /* dword 8*/
+ u32 rx_control_frames; /* dword 9*/
+ u32 rx_in_range_errors; /* dword 10*/
+ u32 rx_out_range_errors; /* dword 11*/
+ u32 rx_frame_too_long; /* dword 12*/
+ u32 rx_address_filtered; /* dword 13*/
+ u32 rx_vlan_filtered; /* dword 14*/
+ u32 rx_dropped_too_small; /* dword 15*/
+ u32 rx_dropped_too_short; /* dword 16*/
+ u32 rx_dropped_header_too_small; /* dword 17*/
+ u32 rx_dropped_tcp_length; /* dword 18*/
+ u32 rx_dropped_runt; /* dword 19*/
+ u32 rx_64_byte_packets; /* dword 20*/
+ u32 rx_65_127_byte_packets; /* dword 21*/
+ u32 rx_128_256_byte_packets; /* dword 22*/
+ u32 rx_256_511_byte_packets; /* dword 23*/
+ u32 rx_512_1023_byte_packets; /* dword 24*/
+ u32 rx_1024_1518_byte_packets; /* dword 25*/
+ u32 rx_1519_2047_byte_packets; /* dword 26*/
+ u32 rx_2048_4095_byte_packets; /* dword 27*/
+ u32 rx_4096_8191_byte_packets; /* dword 28*/
+ u32 rx_8192_9216_byte_packets; /* dword 29*/
+ u32 rx_ip_checksum_errs; /* dword 30*/
+ u32 rx_tcp_checksum_errs; /* dword 31*/
+ u32 rx_udp_checksum_errs; /* dword 32*/
+ u32 rx_non_rss_packets; /* dword 33*/
+ u32 rx_ipv4_packets; /* dword 34*/
+ u32 rx_ipv6_packets; /* dword 35*/
+ u32 rx_ipv4_bytes_lsd; /* dword 36*/
+ u32 rx_ipv4_bytes_msd; /* dword 37*/
+ u32 rx_ipv6_bytes_lsd; /* dword 38*/
+ u32 rx_ipv6_bytes_msd; /* dword 39*/
+ u32 rx_chute1_packets; /* dword 40*/
+ u32 rx_chute2_packets; /* dword 41*/
+ u32 rx_chute3_packets; /* dword 42*/
+ u32 rx_management_packets; /* dword 43*/
+ u32 rx_switched_unicast_packets; /* dword 44*/
+ u32 rx_switched_multicast_packets; /* dword 45*/
+ u32 rx_switched_broadcast_packets; /* dword 46*/
+ u32 tx_bytes_lsd; /* dword 47*/
+ u32 tx_bytes_msd; /* dword 48*/
+ u32 tx_unicastframes; /* dword 49*/
+ u32 tx_multicastframes; /* dword 50*/
+ u32 tx_broadcastframes; /* dword 51*/
+ u32 tx_pauseframes; /* dword 52*/
+ u32 tx_controlframes; /* dword 53*/
+ u32 tx_64_byte_packets; /* dword 54*/
+ u32 tx_65_127_byte_packets; /* dword 55*/
+ u32 tx_128_256_byte_packets; /* dword 56*/
+ u32 tx_256_511_byte_packets; /* dword 57*/
+ u32 tx_512_1023_byte_packets; /* dword 58*/
+ u32 tx_1024_1518_byte_packets; /* dword 59*/
+ u32 tx_1519_2047_byte_packets; /* dword 60*/
+ u32 tx_2048_4095_byte_packets; /* dword 61*/
+ u32 tx_4096_8191_byte_packets; /* dword 62*/
+ u32 tx_8192_9216_byte_packets; /* dword 63*/
+ u32 rx_fifo_overflow; /* dword 64*/
+ u32 rx_input_fifo_overflow; /* dword 65*/
+};
+
+struct be_rxf_stats_v0 {
+ struct be_port_rxf_stats_v0 port[2];
+ u32 rx_drops_no_pbuf; /* dword 132*/
+ u32 rx_drops_no_txpb; /* dword 133*/
+ u32 rx_drops_no_erx_descr; /* dword 134*/
+ u32 rx_drops_no_tpre_descr; /* dword 135*/
+ u32 management_rx_port_packets; /* dword 136*/
+ u32 management_rx_port_bytes; /* dword 137*/
+ u32 management_rx_port_pause_frames; /* dword 138*/
+ u32 management_rx_port_errors; /* dword 139*/
+ u32 management_tx_port_packets; /* dword 140*/
+ u32 management_tx_port_bytes; /* dword 141*/
+ u32 management_tx_port_pause; /* dword 142*/
+ u32 management_rx_port_rxfifo_overflow; /* dword 143*/
+ u32 rx_drops_too_many_frags; /* dword 144*/
+ u32 rx_drops_invalid_ring; /* dword 145*/
+ u32 forwarded_packets; /* dword 146*/
+ u32 rx_drops_mtu; /* dword 147*/
+ u32 rsvd0[7];
+ u32 port0_jabber_events;
+ u32 port1_jabber_events;
+ u32 rsvd1[6];
+};
+
+struct be_erx_stats_v0 {
+ u32 rx_drops_no_fragments[44]; /* dwordS 0 to 43*/
+ u32 rsvd[4];
+};
+
+struct be_pmem_stats {
+ u32 eth_red_drops;
+ u32 rsvd[5];
+};
+
+struct be_hw_stats_v0 {
+ struct be_rxf_stats_v0 rxf;
+ u32 rsvd[48];
+ struct be_erx_stats_v0 erx;
+ struct be_pmem_stats pmem;
+};
+
+struct be_cmd_req_get_stats_v0 {
+ struct be_cmd_req_hdr hdr;
+ u8 rsvd[sizeof(struct be_hw_stats_v0)];
+};
+
+struct be_cmd_resp_get_stats_v0 {
+ struct be_cmd_resp_hdr hdr;
+ struct be_hw_stats_v0 hw_stats;
+};
+
+struct lancer_pport_stats {
+ u32 tx_packets_lo;
+ u32 tx_packets_hi;
+ u32 tx_unicast_packets_lo;
+ u32 tx_unicast_packets_hi;
+ u32 tx_multicast_packets_lo;
+ u32 tx_multicast_packets_hi;
+ u32 tx_broadcast_packets_lo;
+ u32 tx_broadcast_packets_hi;
+ u32 tx_bytes_lo;
+ u32 tx_bytes_hi;
+ u32 tx_unicast_bytes_lo;
+ u32 tx_unicast_bytes_hi;
+ u32 tx_multicast_bytes_lo;
+ u32 tx_multicast_bytes_hi;
+ u32 tx_broadcast_bytes_lo;
+ u32 tx_broadcast_bytes_hi;
+ u32 tx_discards_lo;
+ u32 tx_discards_hi;
+ u32 tx_errors_lo;
+ u32 tx_errors_hi;
+ u32 tx_pause_frames_lo;
+ u32 tx_pause_frames_hi;
+ u32 tx_pause_on_frames_lo;
+ u32 tx_pause_on_frames_hi;
+ u32 tx_pause_off_frames_lo;
+ u32 tx_pause_off_frames_hi;
+ u32 tx_internal_mac_errors_lo;
+ u32 tx_internal_mac_errors_hi;
+ u32 tx_control_frames_lo;
+ u32 tx_control_frames_hi;
+ u32 tx_packets_64_bytes_lo;
+ u32 tx_packets_64_bytes_hi;
+ u32 tx_packets_65_to_127_bytes_lo;
+ u32 tx_packets_65_to_127_bytes_hi;
+ u32 tx_packets_128_to_255_bytes_lo;
+ u32 tx_packets_128_to_255_bytes_hi;
+ u32 tx_packets_256_to_511_bytes_lo;
+ u32 tx_packets_256_to_511_bytes_hi;
+ u32 tx_packets_512_to_1023_bytes_lo;
+ u32 tx_packets_512_to_1023_bytes_hi;
+ u32 tx_packets_1024_to_1518_bytes_lo;
+ u32 tx_packets_1024_to_1518_bytes_hi;
+ u32 tx_packets_1519_to_2047_bytes_lo;
+ u32 tx_packets_1519_to_2047_bytes_hi;
+ u32 tx_packets_2048_to_4095_bytes_lo;
+ u32 tx_packets_2048_to_4095_bytes_hi;
+ u32 tx_packets_4096_to_8191_bytes_lo;
+ u32 tx_packets_4096_to_8191_bytes_hi;
+ u32 tx_packets_8192_to_9216_bytes_lo;
+ u32 tx_packets_8192_to_9216_bytes_hi;
+ u32 tx_lso_packets_lo;
+ u32 tx_lso_packets_hi;
+ u32 rx_packets_lo;
+ u32 rx_packets_hi;
+ u32 rx_unicast_packets_lo;
+ u32 rx_unicast_packets_hi;
+ u32 rx_multicast_packets_lo;
+ u32 rx_multicast_packets_hi;
+ u32 rx_broadcast_packets_lo;
+ u32 rx_broadcast_packets_hi;
+ u32 rx_bytes_lo;
+ u32 rx_bytes_hi;
+ u32 rx_unicast_bytes_lo;
+ u32 rx_unicast_bytes_hi;
+ u32 rx_multicast_bytes_lo;
+ u32 rx_multicast_bytes_hi;
+ u32 rx_broadcast_bytes_lo;
+ u32 rx_broadcast_bytes_hi;
+ u32 rx_unknown_protos;
+ u32 rsvd_69; /* Word 69 is reserved */
+ u32 rx_discards_lo;
+ u32 rx_discards_hi;
+ u32 rx_errors_lo;
+ u32 rx_errors_hi;
+ u32 rx_crc_errors_lo;
+ u32 rx_crc_errors_hi;
+ u32 rx_alignment_errors_lo;
+ u32 rx_alignment_errors_hi;
+ u32 rx_symbol_errors_lo;
+ u32 rx_symbol_errors_hi;
+ u32 rx_pause_frames_lo;
+ u32 rx_pause_frames_hi;
+ u32 rx_pause_on_frames_lo;
+ u32 rx_pause_on_frames_hi;
+ u32 rx_pause_off_frames_lo;
+ u32 rx_pause_off_frames_hi;
+ u32 rx_frames_too_long_lo;
+ u32 rx_frames_too_long_hi;
+ u32 rx_internal_mac_errors_lo;
+ u32 rx_internal_mac_errors_hi;
+ u32 rx_undersize_packets;
+ u32 rx_oversize_packets;
+ u32 rx_fragment_packets;
+ u32 rx_jabbers;
+ u32 rx_control_frames_lo;
+ u32 rx_control_frames_hi;
+ u32 rx_control_frames_unknown_opcode_lo;
+ u32 rx_control_frames_unknown_opcode_hi;
+ u32 rx_in_range_errors;
+ u32 rx_out_of_range_errors;
+ u32 rx_address_filtered;
+ u32 rx_vlan_filtered;
+ u32 rx_dropped_too_small;
+ u32 rx_dropped_too_short;
+ u32 rx_dropped_header_too_small;
+ u32 rx_dropped_invalid_tcp_length;
+ u32 rx_dropped_runt;
+ u32 rx_ip_checksum_errors;
+ u32 rx_tcp_checksum_errors;
+ u32 rx_udp_checksum_errors;
+ u32 rx_non_rss_packets;
+ u32 rsvd_111;
+ u32 rx_ipv4_packets_lo;
+ u32 rx_ipv4_packets_hi;
+ u32 rx_ipv6_packets_lo;
+ u32 rx_ipv6_packets_hi;
+ u32 rx_ipv4_bytes_lo;
+ u32 rx_ipv4_bytes_hi;
+ u32 rx_ipv6_bytes_lo;
+ u32 rx_ipv6_bytes_hi;
+ u32 rx_nic_packets_lo;
+ u32 rx_nic_packets_hi;
+ u32 rx_tcp_packets_lo;
+ u32 rx_tcp_packets_hi;
+ u32 rx_iscsi_packets_lo;
+ u32 rx_iscsi_packets_hi;
+ u32 rx_management_packets_lo;
+ u32 rx_management_packets_hi;
+ u32 rx_switched_unicast_packets_lo;
+ u32 rx_switched_unicast_packets_hi;
+ u32 rx_switched_multicast_packets_lo;
+ u32 rx_switched_multicast_packets_hi;
+ u32 rx_switched_broadcast_packets_lo;
+ u32 rx_switched_broadcast_packets_hi;
+ u32 num_forwards_lo;
+ u32 num_forwards_hi;
+ u32 rx_fifo_overflow;
+ u32 rx_input_fifo_overflow;
+ u32 rx_drops_too_many_frags_lo;
+ u32 rx_drops_too_many_frags_hi;
+ u32 rx_drops_invalid_queue;
+ u32 rsvd_141;
+ u32 rx_drops_mtu_lo;
+ u32 rx_drops_mtu_hi;
+ u32 rx_packets_64_bytes_lo;
+ u32 rx_packets_64_bytes_hi;
+ u32 rx_packets_65_to_127_bytes_lo;
+ u32 rx_packets_65_to_127_bytes_hi;
+ u32 rx_packets_128_to_255_bytes_lo;
+ u32 rx_packets_128_to_255_bytes_hi;
+ u32 rx_packets_256_to_511_bytes_lo;
+ u32 rx_packets_256_to_511_bytes_hi;
+ u32 rx_packets_512_to_1023_bytes_lo;
+ u32 rx_packets_512_to_1023_bytes_hi;
+ u32 rx_packets_1024_to_1518_bytes_lo;
+ u32 rx_packets_1024_to_1518_bytes_hi;
+ u32 rx_packets_1519_to_2047_bytes_lo;
+ u32 rx_packets_1519_to_2047_bytes_hi;
+ u32 rx_packets_2048_to_4095_bytes_lo;
+ u32 rx_packets_2048_to_4095_bytes_hi;
+ u32 rx_packets_4096_to_8191_bytes_lo;
+ u32 rx_packets_4096_to_8191_bytes_hi;
+ u32 rx_packets_8192_to_9216_bytes_lo;
+ u32 rx_packets_8192_to_9216_bytes_hi;
+};
+
+struct pport_stats_params {
+ u16 pport_num;
+ u8 rsvd;
+ u8 reset_stats;
+};
+
+struct lancer_cmd_req_pport_stats {
+ struct be_cmd_req_hdr hdr;
+ union {
+ struct pport_stats_params params;
+ u8 rsvd[sizeof(struct lancer_pport_stats)];
+ } cmd_params;
+};
+
+struct lancer_cmd_resp_pport_stats {
+ struct be_cmd_resp_hdr hdr;
+ struct lancer_pport_stats pport_stats;
+};
+
+static inline struct lancer_pport_stats*
+ pport_stats_from_cmd(struct be_adapter *adapter)
+{
+ struct lancer_cmd_resp_pport_stats *cmd = adapter->stats_cmd.va;
+ return &cmd->pport_stats;
+}
+
+struct be_cmd_req_get_cntl_addnl_attribs {
+ struct be_cmd_req_hdr hdr;
+ u8 rsvd[8];
+};
+
+struct be_cmd_resp_get_cntl_addnl_attribs {
+ struct be_cmd_resp_hdr hdr;
+ u16 ipl_file_number;
+ u8 ipl_file_version;
+ u8 rsvd0;
+ u8 on_die_temperature; /* in degrees centigrade*/
+ u8 rsvd1[3];
+};
+
+struct be_cmd_req_vlan_config {
+ struct be_cmd_req_hdr hdr;
+ u8 interface_id;
+ u8 promiscuous;
+ u8 untagged;
+ u8 num_vlan;
+ u16 normal_vlan[64];
+} __packed;
+
+/******************* RX FILTER ******************************/
+#define BE_MAX_MC 64 /* set mcast promisc if > 64 */
+struct macaddr {
+ u8 byte[ETH_ALEN];
+};
+
+struct be_cmd_req_rx_filter {
+ struct be_cmd_req_hdr hdr;
+ u32 global_flags_mask;
+ u32 global_flags;
+ u32 if_flags_mask;
+ u32 if_flags;
+ u32 if_id;
+ u32 mcast_num;
+ struct macaddr mcast_mac[BE_MAX_MC];
+};
+
+/******************** Link Status Query *******************/
+struct be_cmd_req_link_status {
+ struct be_cmd_req_hdr hdr;
+ u32 rsvd;
+};
+
+enum {
+ PHY_LINK_DUPLEX_NONE = 0x0,
+ PHY_LINK_DUPLEX_HALF = 0x1,
+ PHY_LINK_DUPLEX_FULL = 0x2
+};
+
+enum {
+ PHY_LINK_SPEED_ZERO = 0x0, /* => No link */
+ PHY_LINK_SPEED_10MBPS = 0x1,
+ PHY_LINK_SPEED_100MBPS = 0x2,
+ PHY_LINK_SPEED_1GBPS = 0x3,
+ PHY_LINK_SPEED_10GBPS = 0x4,
+ PHY_LINK_SPEED_20GBPS = 0x5,
+ PHY_LINK_SPEED_25GBPS = 0x6,
+ PHY_LINK_SPEED_40GBPS = 0x7
+};
+
+struct be_cmd_resp_link_status {
+ struct be_cmd_resp_hdr hdr;
+ u8 physical_port;
+ u8 mac_duplex;
+ u8 mac_speed;
+ u8 mac_fault;
+ u8 mgmt_mac_duplex;
+ u8 mgmt_mac_speed;
+ u16 link_speed;
+ u8 logical_link_status;
+ u8 rsvd1[3];
+} __packed;
+
+/******************** Port Identification ***************************/
+/* Identifies the type of port attached to NIC */
+struct be_cmd_req_port_type {
+ struct be_cmd_req_hdr hdr;
+ __le32 page_num;
+ __le32 port;
+};
+
+enum {
+ TR_PAGE_A0 = 0xa0,
+ TR_PAGE_A2 = 0xa2
+};
+
+/* From SFF-8436 QSFP+ spec */
+#define QSFP_PLUS_CABLE_TYPE_OFFSET 0x83
+#define QSFP_PLUS_CR4_CABLE 0x8
+#define QSFP_PLUS_SR4_CABLE 0x4
+#define QSFP_PLUS_LR4_CABLE 0x2
+
+/* From SFF-8472 spec */
+#define SFP_PLUS_SFF_8472_COMP 0x5E
+#define SFP_PLUS_CABLE_TYPE_OFFSET 0x8
+#define SFP_PLUS_COPPER_CABLE 0x4
+#define SFP_VENDOR_NAME_OFFSET 0x14
+#define SFP_VENDOR_PN_OFFSET 0x28
+
+#define PAGE_DATA_LEN 256
+struct be_cmd_resp_port_type {
+ struct be_cmd_resp_hdr hdr;
+ u32 page_num;
+ u32 port;
+ u8 page_data[PAGE_DATA_LEN];
+};
+
+/******************** Get FW Version *******************/
+struct be_cmd_req_get_fw_version {
+ struct be_cmd_req_hdr hdr;
+ u8 rsvd0[FW_VER_LEN];
+ u8 rsvd1[FW_VER_LEN];
+} __packed;
+
+struct be_cmd_resp_get_fw_version {
+ struct be_cmd_resp_hdr hdr;
+ u8 firmware_version_string[FW_VER_LEN];
+ u8 fw_on_flash_version_string[FW_VER_LEN];
+} __packed;
+
+/******************** Set Flow Contrl *******************/
+struct be_cmd_req_set_flow_control {
+ struct be_cmd_req_hdr hdr;
+ u16 tx_flow_control;
+ u16 rx_flow_control;
+} __packed;
+
+/******************** Get Flow Contrl *******************/
+struct be_cmd_req_get_flow_control {
+ struct be_cmd_req_hdr hdr;
+ u32 rsvd;
+};
+
+struct be_cmd_resp_get_flow_control {
+ struct be_cmd_resp_hdr hdr;
+ u16 tx_flow_control;
+ u16 rx_flow_control;
+} __packed;
+
+/******************** Modify EQ Delay *******************/
+struct be_set_eqd {
+ u32 eq_id;
+ u32 phase;
+ u32 delay_multiplier;
+};
+
+struct be_cmd_req_modify_eq_delay {
+ struct be_cmd_req_hdr hdr;
+ u32 num_eq;
+ struct be_set_eqd set_eqd[MAX_EVT_QS];
+} __packed;
+
+/******************** Get FW Config *******************/
+/* The HW can come up in either of the following multi-channel modes
+ * based on the skew/IPL.
+ */
+#define RDMA_ENABLED 0x4
+#define QNQ_MODE 0x400
+#define VNIC_MODE 0x20000
+#define UMC_ENABLED 0x1000000
+struct be_cmd_req_query_fw_cfg {
+ struct be_cmd_req_hdr hdr;
+ u32 rsvd[31];
+};
+
+struct be_cmd_resp_query_fw_cfg {
+ struct be_cmd_resp_hdr hdr;
+ u32 be_config_number;
+ u32 asic_revision;
+ u32 phys_port;
+ u32 function_mode;
+ u32 rsvd[26];
+ u32 function_caps;
+};
+
+/******************** RSS Config ****************************************/
+/* RSS type Input parameters used to compute RX hash
+ * RSS_ENABLE_IPV4 SRC IPv4, DST IPv4
+ * RSS_ENABLE_TCP_IPV4 SRC IPv4, DST IPv4, TCP SRC PORT, TCP DST PORT
+ * RSS_ENABLE_IPV6 SRC IPv6, DST IPv6
+ * RSS_ENABLE_TCP_IPV6 SRC IPv6, DST IPv6, TCP SRC PORT, TCP DST PORT
+ * RSS_ENABLE_UDP_IPV4 SRC IPv4, DST IPv4, UDP SRC PORT, UDP DST PORT
+ * RSS_ENABLE_UDP_IPV6 SRC IPv6, DST IPv6, UDP SRC PORT, UDP DST PORT
+ *
+ * When multiple RSS types are enabled, HW picks the best hash policy
+ * based on the type of the received packet.
+ */
+#define RSS_ENABLE_NONE 0x0
+#define RSS_ENABLE_IPV4 0x1
+#define RSS_ENABLE_TCP_IPV4 0x2
+#define RSS_ENABLE_IPV6 0x4
+#define RSS_ENABLE_TCP_IPV6 0x8
+#define RSS_ENABLE_UDP_IPV4 0x10
+#define RSS_ENABLE_UDP_IPV6 0x20
+
+#define L3_RSS_FLAGS (RXH_IP_DST | RXH_IP_SRC)
+#define L4_RSS_FLAGS (RXH_L4_B_0_1 | RXH_L4_B_2_3)
+
+struct be_cmd_req_rss_config {
+ struct be_cmd_req_hdr hdr;
+ u32 if_id;
+ u16 enable_rss;
+ u16 cpu_table_size_log2;
+ u32 hash[10];
+ u8 cpu_table[128];
+ u8 flush;
+ u8 rsvd0[3];
+};
+
+/******************** Port Beacon ***************************/
+
+#define BEACON_STATE_ENABLED 0x1
+#define BEACON_STATE_DISABLED 0x0
+
+struct be_cmd_req_enable_disable_beacon {
+ struct be_cmd_req_hdr hdr;
+ u8 port_num;
+ u8 beacon_state;
+ u8 beacon_duration;
+ u8 status_duration;
+} __packed;
+
+struct be_cmd_req_get_beacon_state {
+ struct be_cmd_req_hdr hdr;
+ u8 port_num;
+ u8 rsvd0;
+ u16 rsvd1;
+} __packed;
+
+struct be_cmd_resp_get_beacon_state {
+ struct be_cmd_resp_hdr resp_hdr;
+ u8 beacon_state;
+ u8 rsvd0[3];
+} __packed;
+
+/* Flashrom related descriptors */
+#define MAX_FLASH_COMP 32
+
+/* Optypes of each component in the UFI */
+enum {
+ OPTYPE_ISCSI_ACTIVE = 0,
+ OPTYPE_REDBOOT = 1,
+ OPTYPE_BIOS = 2,
+ OPTYPE_PXE_BIOS = 3,
+ OPTYPE_OFFSET_SPECIFIED = 7,
+ OPTYPE_FCOE_BIOS = 8,
+ OPTYPE_ISCSI_BACKUP = 9,
+ OPTYPE_FCOE_FW_ACTIVE = 10,
+ OPTYPE_FCOE_FW_BACKUP = 11,
+ OPTYPE_NCSI_FW = 13,
+ OPTYPE_REDBOOT_DIR = 18,
+ OPTYPE_REDBOOT_CONFIG = 19,
+ OPTYPE_SH_PHY_FW = 21,
+ OPTYPE_FLASHISM_JUMPVECTOR = 22,
+ OPTYPE_UFI_DIR = 23,
+ OPTYPE_PHY_FW = 99
+};
+
+/* Maximum sizes of components in BE2 FW UFI */
+enum {
+ BE2_BIOS_COMP_MAX_SIZE = 0x40000,
+ BE2_REDBOOT_COMP_MAX_SIZE = 0x40000,
+ BE2_COMP_MAX_SIZE = 0x140000
+};
+
+/* Maximum sizes of components in BE3 FW UFI */
+enum {
+ BE3_NCSI_COMP_MAX_SIZE = 0x40000,
+ BE3_PHY_FW_COMP_MAX_SIZE = 0x40000,
+ BE3_BIOS_COMP_MAX_SIZE = 0x80000,
+ BE3_REDBOOT_COMP_MAX_SIZE = 0x100000,
+ BE3_COMP_MAX_SIZE = 0x200000
+};
+
+/* Offsets for components in BE2 FW UFI */
+enum {
+ BE2_REDBOOT_START = 0x8000,
+ BE2_FCOE_BIOS_START = 0x80000,
+ BE2_ISCSI_PRIMARY_IMAGE_START = 0x100000,
+ BE2_ISCSI_BACKUP_IMAGE_START = 0x240000,
+ BE2_FCOE_PRIMARY_IMAGE_START = 0x380000,
+ BE2_FCOE_BACKUP_IMAGE_START = 0x4c0000,
+ BE2_ISCSI_BIOS_START = 0x700000,
+ BE2_PXE_BIOS_START = 0x780000
+};
+
+/* Offsets for components in BE3 FW UFI */
+enum {
+ BE3_REDBOOT_START = 0x40000,
+ BE3_PHY_FW_START = 0x140000,
+ BE3_ISCSI_PRIMARY_IMAGE_START = 0x200000,
+ BE3_ISCSI_BACKUP_IMAGE_START = 0x400000,
+ BE3_FCOE_PRIMARY_IMAGE_START = 0x600000,
+ BE3_FCOE_BACKUP_IMAGE_START = 0x800000,
+ BE3_ISCSI_BIOS_START = 0xc00000,
+ BE3_PXE_BIOS_START = 0xc80000,
+ BE3_FCOE_BIOS_START = 0xd00000,
+ BE3_NCSI_START = 0xf40000
+};
+
+/* Component entry types */
+enum {
+ IMAGE_NCSI = 0x10,
+ IMAGE_OPTION_ROM_PXE = 0x20,
+ IMAGE_OPTION_ROM_FCOE = 0x21,
+ IMAGE_OPTION_ROM_ISCSI = 0x22,
+ IMAGE_FLASHISM_JUMPVECTOR = 0x30,
+ IMAGE_FIRMWARE_ISCSI = 0xa0,
+ IMAGE_FIRMWARE_FCOE = 0xa2,
+ IMAGE_FIRMWARE_BACKUP_ISCSI = 0xb0,
+ IMAGE_FIRMWARE_BACKUP_FCOE = 0xb2,
+ IMAGE_FIRMWARE_PHY = 0xc0,
+ IMAGE_REDBOOT_DIR = 0xd0,
+ IMAGE_REDBOOT_CONFIG = 0xd1,
+ IMAGE_UFI_DIR = 0xd2,
+ IMAGE_BOOT_CODE = 0xe2
+};
+
+struct controller_id {
+ u32 vendor;
+ u32 device;
+ u32 subvendor;
+ u32 subdevice;
+};
+
+struct flash_comp {
+ unsigned long offset;
+ int optype;
+ int size;
+ int img_type;
+};
+
+struct image_hdr {
+ u32 imageid;
+ u32 imageoffset;
+ u32 imagelength;
+ u32 image_checksum;
+ u8 image_version[32];
+};
+
+struct flash_file_hdr_g2 {
+ u8 sign[32];
+ u32 cksum;
+ u32 antidote;
+ struct controller_id cont_id;
+ u32 file_len;
+ u32 chunk_num;
+ u32 total_chunks;
+ u32 num_imgs;
+ u8 build[24];
+};
+
+/* First letter of the build version of the image */
+#define BLD_STR_UFI_TYPE_BE2 '2'
+#define BLD_STR_UFI_TYPE_BE3 '3'
+#define BLD_STR_UFI_TYPE_SH '4'
+
+struct flash_file_hdr_g3 {
+ u8 sign[52];
+ u8 ufi_version[4];
+ u32 file_len;
+ u32 cksum;
+ u32 antidote;
+ u32 num_imgs;
+ u8 build[24];
+ u8 asic_type_rev;
+ u8 rsvd[31];
+};
+
+struct flash_section_hdr {
+ u32 format_rev;
+ u32 cksum;
+ u32 antidote;
+ u32 num_images;
+ u8 id_string[128];
+ u32 rsvd[4];
+} __packed;
+
+struct flash_section_hdr_g2 {
+ u32 format_rev;
+ u32 cksum;
+ u32 antidote;
+ u32 build_num;
+ u8 id_string[128];
+ u32 rsvd[8];
+} __packed;
+
+struct flash_section_entry {
+ u32 type;
+ u32 offset;
+ u32 pad_size;
+ u32 image_size;
+ u32 cksum;
+ u32 entry_point;
+ u16 optype;
+ u16 rsvd0;
+ u32 rsvd1;
+ u8 ver_data[32];
+} __packed;
+
+struct flash_section_info {
+ u8 cookie[32];
+ struct flash_section_hdr fsec_hdr;
+ struct flash_section_entry fsec_entry[32];
+} __packed;
+
+struct flash_section_info_g2 {
+ u8 cookie[32];
+ struct flash_section_hdr_g2 fsec_hdr;
+ struct flash_section_entry fsec_entry[32];
+} __packed;
+
+/****************** Firmware Flash ******************/
+#define FLASHROM_OPER_FLASH 1
+#define FLASHROM_OPER_SAVE 2
+#define FLASHROM_OPER_REPORT 4
+#define FLASHROM_OPER_PHY_FLASH 9
+#define FLASHROM_OPER_PHY_SAVE 10
+
+struct flashrom_params {
+ u32 op_code;
+ u32 op_type;
+ u32 data_buf_size;
+ u32 offset;
+};
+
+struct be_cmd_write_flashrom {
+ struct be_cmd_req_hdr hdr;
+ struct flashrom_params params;
+ u8 data_buf[32768];
+ u8 rsvd[4];
+} __packed;
+
+/* cmd to read flash crc */
+struct be_cmd_read_flash_crc {
+ struct be_cmd_req_hdr hdr;
+ struct flashrom_params params;
+ u8 crc[4];
+ u8 rsvd[4];
+} __packed;
+
+/**************** Lancer Firmware Flash ************/
+#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
+#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
+
+struct amap_lancer_write_obj_context {
+ u8 write_length[24];
+ u8 reserved1[7];
+ u8 eof;
+} __packed;
+
+struct lancer_cmd_req_write_object {
+ struct be_cmd_req_hdr hdr;
+ u8 context[sizeof(struct amap_lancer_write_obj_context) / 8];
+ u32 write_offset;
+ u8 object_name[104];
+ u32 descriptor_count;
+ u32 buf_len;
+ u32 addr_low;
+ u32 addr_high;
+};
+
+#define LANCER_NO_RESET_NEEDED 0x00
+#define LANCER_FW_RESET_NEEDED 0x02
+struct lancer_cmd_resp_write_object {
+ u8 opcode;
+ u8 subsystem;
+ u8 rsvd1[2];
+ u8 status;
+ u8 additional_status;
+ u8 rsvd2[2];
+ u32 resp_len;
+ u32 actual_resp_len;
+ u32 actual_write_len;
+ u8 change_status;
+ u8 rsvd3[3];
+};
+
+/************************ Lancer Read FW info **************/
+#define LANCER_READ_FILE_CHUNK (32*1024)
+#define LANCER_READ_FILE_EOF_MASK 0x80000000
+
+#define LANCER_FW_DUMP_FILE "/dbg/dump.bin"
+#define LANCER_VPD_PF_FILE "/vpd/ntr_pf.vpd"
+#define LANCER_VPD_VF_FILE "/vpd/ntr_vf.vpd"
+
+struct lancer_cmd_req_read_object {
+ struct be_cmd_req_hdr hdr;
+ u32 desired_read_len;
+ u32 read_offset;
+ u8 object_name[104];
+ u32 descriptor_count;
+ u32 buf_len;
+ u32 addr_low;
+ u32 addr_high;
+};
+
+struct lancer_cmd_resp_read_object {
+ u8 opcode;
+ u8 subsystem;
+ u8 rsvd1[2];
+ u8 status;
+ u8 additional_status;
+ u8 rsvd2[2];
+ u32 resp_len;
+ u32 actual_resp_len;
+ u32 actual_read_len;
+ u32 eof;
+};
+
+struct lancer_cmd_req_delete_object {
+ struct be_cmd_req_hdr hdr;
+ u32 rsvd1;
+ u32 rsvd2;
+ u8 object_name[104];
+};
+
+/************************ WOL *******************************/
+struct be_cmd_req_acpi_wol_magic_config{
+ struct be_cmd_req_hdr hdr;
+ u32 rsvd0[145];
+ u8 magic_mac[6];
+ u8 rsvd2[2];
+} __packed;
+
+struct be_cmd_req_acpi_wol_magic_config_v1 {
+ struct be_cmd_req_hdr hdr;
+ u8 rsvd0[2];
+ u8 query_options;
+ u8 rsvd1[5];
+ u32 rsvd2[288];
+ u8 magic_mac[6];
+ u8 rsvd3[22];
+} __packed;
+
+struct be_cmd_resp_acpi_wol_magic_config_v1 {
+ struct be_cmd_resp_hdr hdr;
+ u8 rsvd0[2];
+ u8 wol_settings;
+ u8 rsvd1[5];
+ u32 rsvd2[288];
+ u8 magic_mac[6];
+ u8 rsvd3[22];
+} __packed;
+
+#define BE_GET_WOL_CAP 2
+
+#define BE_WOL_CAP 0x1
+#define BE_PME_D0_CAP 0x8
+#define BE_PME_D1_CAP 0x10
+#define BE_PME_D2_CAP 0x20
+#define BE_PME_D3HOT_CAP 0x40
+#define BE_PME_D3COLD_CAP 0x80
+
+/********************** LoopBack test *********************/
+#define SET_LB_MODE_TIMEOUT 12000
+
+struct be_cmd_req_loopback_test {
+ struct be_cmd_req_hdr hdr;
+ u32 loopback_type;
+ u32 num_pkts;
+ u64 pattern;
+ u32 src_port;
+ u32 dest_port;
+ u32 pkt_size;
+};
+
+struct be_cmd_resp_loopback_test {
+ struct be_cmd_resp_hdr resp_hdr;
+ u32 status;
+ u32 num_txfer;
+ u32 num_rx;
+ u32 miscomp_off;
+ u32 ticks_compl;
+};
+
+struct be_cmd_req_set_lmode {
+ struct be_cmd_req_hdr hdr;
+ u8 src_port;
+ u8 dest_port;
+ u8 loopback_type;
+ u8 loopback_state;
+};
+
+/********************** DDR DMA test *********************/
+struct be_cmd_req_ddrdma_test {
+ struct be_cmd_req_hdr hdr;
+ u64 pattern;
+ u32 byte_count;
+ u32 rsvd0;
+ u8 snd_buff[4096];
+ u8 rsvd1[4096];
+};
+
+struct be_cmd_resp_ddrdma_test {
+ struct be_cmd_resp_hdr hdr;
+ u64 pattern;
+ u32 byte_cnt;
+ u32 snd_err;
+ u8 rsvd0[4096];
+ u8 rcv_buff[4096];
+};
+
+/*********************** SEEPROM Read ***********************/
+
+#define BE_READ_SEEPROM_LEN 1024
+struct be_cmd_req_seeprom_read {
+ struct be_cmd_req_hdr hdr;
+ u8 rsvd0[BE_READ_SEEPROM_LEN];
+};
+
+struct be_cmd_resp_seeprom_read {
+ struct be_cmd_req_hdr hdr;
+ u8 seeprom_data[BE_READ_SEEPROM_LEN];
+};
+
+enum {
+ PHY_TYPE_CX4_10GB = 0,
+ PHY_TYPE_XFP_10GB,
+ PHY_TYPE_SFP_1GB,
+ PHY_TYPE_SFP_PLUS_10GB,
+ PHY_TYPE_KR_10GB,
+ PHY_TYPE_KX4_10GB,
+ PHY_TYPE_BASET_10GB,
+ PHY_TYPE_BASET_1GB,
+ PHY_TYPE_BASEX_1GB,
+ PHY_TYPE_SGMII,
+ PHY_TYPE_QSFP,
+ PHY_TYPE_KR4_40GB,
+ PHY_TYPE_KR2_20GB,
+ PHY_TYPE_TN_8022,
+ PHY_TYPE_DISABLED = 255
+};
+
+#define BE_SUPPORTED_SPEED_NONE 0
+#define BE_SUPPORTED_SPEED_10MBPS 1
+#define BE_SUPPORTED_SPEED_100MBPS 2
+#define BE_SUPPORTED_SPEED_1GBPS 4
+#define BE_SUPPORTED_SPEED_10GBPS 8
+#define BE_SUPPORTED_SPEED_20GBPS 0x10
+#define BE_SUPPORTED_SPEED_40GBPS 0x20
+
+#define BE_AN_EN 0x2
+#define BE_PAUSE_SYM_EN 0x80
+
+/* MAC speed valid values */
+#define SPEED_DEFAULT 0x0
+#define SPEED_FORCED_10GB 0x1
+#define SPEED_FORCED_1GB 0x2
+#define SPEED_AUTONEG_10GB 0x3
+#define SPEED_AUTONEG_1GB 0x4
+#define SPEED_AUTONEG_100MB 0x5
+#define SPEED_AUTONEG_10GB_1GB 0x6
+#define SPEED_AUTONEG_10GB_1GB_100MB 0x7
+#define SPEED_AUTONEG_1GB_100MB 0x8
+#define SPEED_AUTONEG_10MB 0x9
+#define SPEED_AUTONEG_1GB_100MB_10MB 0xa
+#define SPEED_AUTONEG_100MB_10MB 0xb
+#define SPEED_FORCED_100MB 0xc
+#define SPEED_FORCED_10MB 0xd
+
+struct be_cmd_req_get_phy_info {
+ struct be_cmd_req_hdr hdr;
+ u8 rsvd0[24];
+};
+
+struct be_phy_info {
+ u16 phy_type;
+ u16 interface_type;
+ u32 misc_params;
+ u16 ext_phy_details;
+ u16 rsvd;
+ u16 auto_speeds_supported;
+ u16 fixed_speeds_supported;
+ u32 future_use[2];
+};
+
+struct be_cmd_resp_get_phy_info {
+ struct be_cmd_req_hdr hdr;
+ struct be_phy_info phy_info;
+};
+
+/*********************** Set QOS ***********************/
+
+#define BE_QOS_BITS_NIC 1
+
+struct be_cmd_req_set_qos {
+ struct be_cmd_req_hdr hdr;
+ u32 valid_bits;
+ u32 max_bps_nic;
+ u32 rsvd[7];
+};
+
+/*********************** Controller Attributes ***********************/
+struct mgmt_hba_attribs {
+ u32 rsvd0[24];
+ u8 controller_model_number[32];
+ u32 rsvd1[16];
+ u32 controller_serial_number[8];
+ u32 rsvd2[55];
+ u8 rsvd3[3];
+ u8 phy_port;
+ u32 rsvd4[15];
+ u8 rsvd5[2];
+ u8 pci_funcnum;
+ u8 rsvd6;
+ u32 rsvd7[6];
+} __packed;
+
+struct mgmt_controller_attrib {
+ struct mgmt_hba_attribs hba_attribs;
+ u32 rsvd0[10];
+} __packed;
+
+struct be_cmd_req_cntl_attribs {
+ struct be_cmd_req_hdr hdr;
+};
+
+struct be_cmd_resp_cntl_attribs {
+ struct be_cmd_resp_hdr hdr;
+ struct mgmt_controller_attrib attribs;
+};
+
+/*********************** Set driver function ***********************/
+#define CAPABILITY_SW_TIMESTAMPS 2
+#define CAPABILITY_BE3_NATIVE_ERX_API 4
+
+struct be_cmd_req_set_func_cap {
+ struct be_cmd_req_hdr hdr;
+ u32 valid_cap_flags;
+ u32 cap_flags;
+ u8 rsvd[212];
+};
+
+struct be_cmd_resp_set_func_cap {
+ struct be_cmd_resp_hdr hdr;
+ u32 valid_cap_flags;
+ u32 cap_flags;
+ u8 rsvd[212];
+};
+
+/*********************** Function Privileges ***********************/
+enum {
+ BE_PRIV_DEFAULT = 0x1,
+ BE_PRIV_LNKQUERY = 0x2,
+ BE_PRIV_LNKSTATS = 0x4,
+ BE_PRIV_LNKMGMT = 0x8,
+ BE_PRIV_LNKDIAG = 0x10,
+ BE_PRIV_UTILQUERY = 0x20,
+ BE_PRIV_FILTMGMT = 0x40,
+ BE_PRIV_IFACEMGMT = 0x80,
+ BE_PRIV_VHADM = 0x100,
+ BE_PRIV_DEVCFG = 0x200,
+ BE_PRIV_DEVSEC = 0x400
+};
+#define MAX_PRIVILEGES (BE_PRIV_VHADM | BE_PRIV_DEVCFG | \
+ BE_PRIV_DEVSEC)
+#define MIN_PRIVILEGES BE_PRIV_DEFAULT
+
+struct be_cmd_priv_map {
+ u8 opcode;
+ u8 subsystem;
+ u32 priv_mask;
+};
+
+struct be_cmd_req_get_fn_privileges {
+ struct be_cmd_req_hdr hdr;
+ u32 rsvd;
+};
+
+struct be_cmd_resp_get_fn_privileges {
+ struct be_cmd_resp_hdr hdr;
+ u32 privilege_mask;
+};
+
+struct be_cmd_req_set_fn_privileges {
+ struct be_cmd_req_hdr hdr;
+ u32 privileges; /* Used by BE3, SH-R */
+ u32 privileges_lancer; /* Used by Lancer */
+};
+
+/******************** GET/SET_MACLIST **************************/
+#define BE_MAX_MAC 64
+struct be_cmd_req_get_mac_list {
+ struct be_cmd_req_hdr hdr;
+ u8 mac_type;
+ u8 perm_override;
+ u16 iface_id;
+ u32 mac_id;
+ u32 rsvd[3];
+} __packed;
+
+struct get_list_macaddr {
+ u16 mac_addr_size;
+ union {
+ u8 macaddr[6];
+ struct {
+ u8 rsvd[2];
+ u32 mac_id;
+ } __packed s_mac_id;
+ } __packed mac_addr_id;
+} __packed;
+
+struct be_cmd_resp_get_mac_list {
+ struct be_cmd_resp_hdr hdr;
+ struct get_list_macaddr fd_macaddr; /* Factory default mac */
+ struct get_list_macaddr macid_macaddr; /* soft mac */
+ u8 true_mac_count;
+ u8 pseudo_mac_count;
+ u8 mac_list_size;
+ u8 rsvd;
+ /* perm override mac */
+ struct get_list_macaddr macaddr_list[BE_MAX_MAC];
+} __packed;
+
+struct be_cmd_req_set_mac_list {
+ struct be_cmd_req_hdr hdr;
+ u8 mac_count;
+ u8 rsvd1;
+ u16 rsvd2;
+ struct macaddr mac[BE_MAX_MAC];
+} __packed;
+
+/*********************** HSW Config ***********************/
+#define PORT_FWD_TYPE_VEPA 0x3
+#define PORT_FWD_TYPE_VEB 0x2
+#define PORT_FWD_TYPE_PASSTHRU 0x1
+
+#define ENABLE_MAC_SPOOFCHK 0x2
+#define DISABLE_MAC_SPOOFCHK 0x3
+
+struct amap_set_hsw_context {
+ u8 interface_id[16];
+ u8 rsvd0[8];
+ u8 mac_spoofchk[2];
+ u8 rsvd1[4];
+ u8 pvid_valid;
+ u8 pport;
+ u8 rsvd2[6];
+ u8 port_fwd_type[3];
+ u8 rsvd3[5];
+ u8 vlan_spoofchk[2];
+ u8 pvid[16];
+ u8 rsvd4[32];
+ u8 rsvd5[32];
+ u8 rsvd6[32];
+} __packed;
+
+struct be_cmd_req_set_hsw_config {
+ struct be_cmd_req_hdr hdr;
+ u8 context[sizeof(struct amap_set_hsw_context) / 8];
+} __packed;
+
+struct amap_get_hsw_req_context {
+ u8 interface_id[16];
+ u8 rsvd0[14];
+ u8 pvid_valid;
+ u8 pport;
+} __packed;
+
+struct amap_get_hsw_resp_context {
+ u8 rsvd0[6];
+ u8 port_fwd_type[3];
+ u8 rsvd1[5];
+ u8 spoofchk;
+ u8 rsvd2;
+ u8 pvid[16];
+ u8 rsvd3[32];
+ u8 rsvd4[32];
+ u8 rsvd5[32];
+} __packed;
+
+struct be_cmd_req_get_hsw_config {
+ struct be_cmd_req_hdr hdr;
+ u8 context[sizeof(struct amap_get_hsw_req_context) / 8];
+} __packed;
+
+struct be_cmd_resp_get_hsw_config {
+ struct be_cmd_resp_hdr hdr;
+ u8 context[sizeof(struct amap_get_hsw_resp_context) / 8];
+ u32 rsvd;
+};
+
+/******************* get port names ***************/
+struct be_cmd_req_get_port_name {
+ struct be_cmd_req_hdr hdr;
+ u32 rsvd0;
+};
+
+struct be_cmd_resp_get_port_name {
+ struct be_cmd_req_hdr hdr;
+ u8 port_name[4];
+};
+
+/*************** HW Stats Get v1 **********************************/
+#define BE_TXP_SW_SZ 48
+struct be_port_rxf_stats_v1 {
+ u32 rsvd0[12];
+ u32 rx_crc_errors;
+ u32 rx_alignment_symbol_errors;
+ u32 rx_pause_frames;
+ u32 rx_priority_pause_frames;
+ u32 rx_control_frames;
+ u32 rx_in_range_errors;
+ u32 rx_out_range_errors;
+ u32 rx_frame_too_long;
+ u32 rx_address_filtered;
+ u32 rx_dropped_too_small;
+ u32 rx_dropped_too_short;
+ u32 rx_dropped_header_too_small;
+ u32 rx_dropped_tcp_length;
+ u32 rx_dropped_runt;
+ u32 rsvd1[10];
+ u32 rx_ip_checksum_errs;
+ u32 rx_tcp_checksum_errs;
+ u32 rx_udp_checksum_errs;
+ u32 rsvd2[7];
+ u32 rx_switched_unicast_packets;
+ u32 rx_switched_multicast_packets;
+ u32 rx_switched_broadcast_packets;
+ u32 rsvd3[3];
+ u32 tx_pauseframes;
+ u32 tx_priority_pauseframes;
+ u32 tx_controlframes;
+ u32 rsvd4[10];
+ u32 rxpp_fifo_overflow_drop;
+ u32 rx_input_fifo_overflow_drop;
+ u32 pmem_fifo_overflow_drop;
+ u32 jabber_events;
+ u32 rsvd5[3];
+};
+
+
+struct be_rxf_stats_v1 {
+ struct be_port_rxf_stats_v1 port[4];
+ u32 rsvd0[2];
+ u32 rx_drops_no_pbuf;
+ u32 rx_drops_no_txpb;
+ u32 rx_drops_no_erx_descr;
+ u32 rx_drops_no_tpre_descr;
+ u32 rsvd1[6];
+ u32 rx_drops_too_many_frags;
+ u32 rx_drops_invalid_ring;
+ u32 forwarded_packets;
+ u32 rx_drops_mtu;
+ u32 rsvd2[14];
+};
+
+struct be_erx_stats_v1 {
+ u32 rx_drops_no_fragments[68]; /* dwordS 0 to 67*/
+ u32 rsvd[4];
+};
+
+struct be_port_rxf_stats_v2 {
+ u32 rsvd0[10];
+ u32 roce_bytes_received_lsd;
+ u32 roce_bytes_received_msd;
+ u32 rsvd1[5];
+ u32 roce_frames_received;
+ u32 rx_crc_errors;
+ u32 rx_alignment_symbol_errors;
+ u32 rx_pause_frames;
+ u32 rx_priority_pause_frames;
+ u32 rx_control_frames;
+ u32 rx_in_range_errors;
+ u32 rx_out_range_errors;
+ u32 rx_frame_too_long;
+ u32 rx_address_filtered;
+ u32 rx_dropped_too_small;
+ u32 rx_dropped_too_short;
+ u32 rx_dropped_header_too_small;
+ u32 rx_dropped_tcp_length;
+ u32 rx_dropped_runt;
+ u32 rsvd2[10];
+ u32 rx_ip_checksum_errs;
+ u32 rx_tcp_checksum_errs;
+ u32 rx_udp_checksum_errs;
+ u32 rsvd3[7];
+ u32 rx_switched_unicast_packets;
+ u32 rx_switched_multicast_packets;
+ u32 rx_switched_broadcast_packets;
+ u32 rsvd4[3];
+ u32 tx_pauseframes;
+ u32 tx_priority_pauseframes;
+ u32 tx_controlframes;
+ u32 rsvd5[10];
+ u32 rxpp_fifo_overflow_drop;
+ u32 rx_input_fifo_overflow_drop;
+ u32 pmem_fifo_overflow_drop;
+ u32 jabber_events;
+ u32 rsvd6[3];
+ u32 rx_drops_payload_size;
+ u32 rx_drops_clipped_header;
+ u32 rx_drops_crc;
+ u32 roce_drops_payload_len;
+ u32 roce_drops_crc;
+ u32 rsvd7[19];
+};
+
+struct be_rxf_stats_v2 {
+ struct be_port_rxf_stats_v2 port[4];
+ u32 rsvd0[2];
+ u32 rx_drops_no_pbuf;
+ u32 rx_drops_no_txpb;
+ u32 rx_drops_no_erx_descr;
+ u32 rx_drops_no_tpre_descr;
+ u32 rsvd1[6];
+ u32 rx_drops_too_many_frags;
+ u32 rx_drops_invalid_ring;
+ u32 forwarded_packets;
+ u32 rx_drops_mtu;
+ u32 rsvd2[35];
+};
+
+struct be_hw_stats_v1 {
+ struct be_rxf_stats_v1 rxf;
+ u32 rsvd0[BE_TXP_SW_SZ];
+ struct be_erx_stats_v1 erx;
+ struct be_pmem_stats pmem;
+ u32 rsvd1[18];
+};
+
+struct be_cmd_req_get_stats_v1 {
+ struct be_cmd_req_hdr hdr;
+ u8 rsvd[sizeof(struct be_hw_stats_v1)];
+};
+
+struct be_cmd_resp_get_stats_v1 {
+ struct be_cmd_resp_hdr hdr;
+ struct be_hw_stats_v1 hw_stats;
+};
+
+struct be_erx_stats_v2 {
+ u32 rx_drops_no_fragments[136]; /* dwordS 0 to 135*/
+ u32 rsvd[3];
+};
+
+struct be_hw_stats_v2 {
+ struct be_rxf_stats_v2 rxf;
+ u32 rsvd0[BE_TXP_SW_SZ];
+ struct be_erx_stats_v2 erx;
+ struct be_pmem_stats pmem;
+ u32 rsvd1[18];
+};
+
+struct be_cmd_req_get_stats_v2 {
+ struct be_cmd_req_hdr hdr;
+ u8 rsvd[sizeof(struct be_hw_stats_v2)];
+};
+
+struct be_cmd_resp_get_stats_v2 {
+ struct be_cmd_resp_hdr hdr;
+ struct be_hw_stats_v2 hw_stats;
+};
+
+/************** get fat capabilites *******************/
+#define MAX_MODULES 27
+#define MAX_MODES 4
+#define MODE_UART 0
+#define FW_LOG_LEVEL_DEFAULT 48
+#define FW_LOG_LEVEL_FATAL 64
+
+struct ext_fat_mode {
+ u8 mode;
+ u8 rsvd0;
+ u16 port_mask;
+ u32 dbg_lvl;
+ u64 fun_mask;
+} __packed;
+
+struct ext_fat_modules {
+ u8 modules_str[32];
+ u32 modules_id;
+ u32 num_modes;
+ struct ext_fat_mode trace_lvl[MAX_MODES];
+} __packed;
+
+struct be_fat_conf_params {
+ u32 max_log_entries;
+ u32 log_entry_size;
+ u8 log_type;
+ u8 max_log_funs;
+ u8 max_log_ports;
+ u8 rsvd0;
+ u32 supp_modes;
+ u32 num_modules;
+ struct ext_fat_modules module[MAX_MODULES];
+} __packed;
+
+struct be_cmd_req_get_ext_fat_caps {
+ struct be_cmd_req_hdr hdr;
+ u32 parameter_type;
+};
+
+struct be_cmd_resp_get_ext_fat_caps {
+ struct be_cmd_resp_hdr hdr;
+ struct be_fat_conf_params get_params;
+};
+
+struct be_cmd_req_set_ext_fat_caps {
+ struct be_cmd_req_hdr hdr;
+ struct be_fat_conf_params set_params;
+};
+
+#define RESOURCE_DESC_SIZE_V0 72
+#define RESOURCE_DESC_SIZE_V1 88
+#define PCIE_RESOURCE_DESC_TYPE_V0 0x40
+#define NIC_RESOURCE_DESC_TYPE_V0 0x41
+#define PCIE_RESOURCE_DESC_TYPE_V1 0x50
+#define NIC_RESOURCE_DESC_TYPE_V1 0x51
+#define PORT_RESOURCE_DESC_TYPE_V1 0x55
+#define MAX_RESOURCE_DESC 264
+
+#define IF_CAPS_FLAGS_VALID_SHIFT 0 /* IF caps valid */
+#define VFT_SHIFT 3 /* VF template */
+#define IMM_SHIFT 6 /* Immediate */
+#define NOSV_SHIFT 7 /* No save */
+
+#define MISSION_NIC 1
+#define MISSION_RDMA 8
+
+struct be_res_desc_hdr {
+ u8 desc_type;
+ u8 desc_len;
+} __packed;
+
+struct be_port_res_desc {
+ struct be_res_desc_hdr hdr;
+ u8 rsvd0;
+ u8 flags;
+ u8 link_num;
+ u8 mc_type;
+ u16 rsvd1;
+
+#define NV_TYPE_MASK 0x3 /* bits 0-1 */
+#define NV_TYPE_DISABLED 1
+#define NV_TYPE_VXLAN 3
+#define SOCVID_SHIFT 2 /* Strip outer vlan */
+#define RCVID_SHIFT 4 /* Report vlan */
+#define PF_NUM_IGNORE 255
+ u8 nv_flags;
+ u8 rsvd2;
+ __le16 nv_port; /* vxlan/gre port */
+ u32 rsvd3[19];
+} __packed;
+
+struct be_pcie_res_desc {
+ struct be_res_desc_hdr hdr;
+ u8 rsvd0;
+ u8 flags;
+ u16 rsvd1;
+ u8 pf_num;
+ u8 rsvd2;
+ u32 rsvd3;
+ u8 sriov_state;
+ u8 pf_state;
+ u8 pf_type;
+ u8 rsvd4;
+ u16 num_vfs;
+ u16 rsvd5;
+ u32 rsvd6[17];
+} __packed;
+
+struct be_nic_res_desc {
+ struct be_res_desc_hdr hdr;
+ u8 rsvd1;
+
+#define QUN_SHIFT 4 /* QoS is in absolute units */
+ u8 flags;
+ u8 vf_num;
+ u8 rsvd2;
+ u8 pf_num;
+ u8 rsvd3;
+ u16 unicast_mac_count;
+ u8 rsvd4[6];
+ u16 mcc_count;
+ u16 vlan_count;
+ u16 mcast_mac_count;
+ u16 txq_count;
+ u16 rq_count;
+ u16 rssq_count;
+ u16 lro_count;
+ u16 cq_count;
+ u16 toe_conn_count;
+ u16 eq_count;
+ u16 vlan_id;
+ u16 iface_count;
+ u32 cap_flags;
+ u8 link_param;
+ u8 rsvd6;
+ u16 channel_id_param;
+ u32 bw_min;
+ u32 bw_max;
+ u8 acpi_params;
+ u8 wol_param;
+ u16 rsvd7;
+ u16 tunnel_iface_count;
+ u16 direct_tenant_iface_count;
+ u32 rsvd8[6];
+} __packed;
+
+/************ Multi-Channel type ***********/
+enum mc_type {
+ MC_NONE = 0x01,
+ UMC = 0x02,
+ FLEX10 = 0x03,
+ vNIC1 = 0x04,
+ nPAR = 0x05,
+ UFP = 0x06,
+ vNIC2 = 0x07
+};
+
+/* Is BE in a multi-channel mode */
+static inline bool be_is_mc(struct be_adapter *adapter)
+{
+ return adapter->mc_type > MC_NONE;
+}
+
+struct be_cmd_req_get_func_config {
+ struct be_cmd_req_hdr hdr;
+};
+
+struct be_cmd_resp_get_func_config {
+ struct be_cmd_resp_hdr hdr;
+ u32 desc_count;
+ u8 func_param[MAX_RESOURCE_DESC * RESOURCE_DESC_SIZE_V1];
+};
+
+enum {
+ RESOURCE_LIMITS,
+ RESOURCE_MODIFIABLE
+};
+
+struct be_cmd_req_get_profile_config {
+ struct be_cmd_req_hdr hdr;
+ u8 rsvd;
+#define ACTIVE_PROFILE_TYPE 0x2
+#define SAVED_PROFILE_TYPE 0x0
+#define QUERY_MODIFIABLE_FIELDS_TYPE BIT(3)
+ u8 type;
+ u16 rsvd1;
+};
+
+struct be_cmd_resp_get_profile_config {
+ struct be_cmd_resp_hdr hdr;
+ __le16 desc_count;
+ u16 rsvd;
+ u8 func_param[MAX_RESOURCE_DESC * RESOURCE_DESC_SIZE_V1];
+};
+
+#define FIELD_MODIFIABLE 0xFFFF
+struct be_cmd_req_set_profile_config {
+ struct be_cmd_req_hdr hdr;
+ u32 rsvd;
+ u32 desc_count;
+ u8 desc[2 * RESOURCE_DESC_SIZE_V1];
+} __packed;
+
+struct be_cmd_req_get_active_profile {
+ struct be_cmd_req_hdr hdr;
+ u32 rsvd;
+} __packed;
+
+struct be_cmd_resp_get_active_profile {
+ struct be_cmd_resp_hdr hdr;
+ u16 active_profile_id;
+ u16 next_profile_id;
+} __packed;
+
+struct be_cmd_enable_disable_vf {
+ struct be_cmd_req_hdr hdr;
+ u8 enable;
+ u8 rsvd[3];
+};
+
+struct be_cmd_req_intr_set {
+ struct be_cmd_req_hdr hdr;
+ u8 intr_enabled;
+ u8 rsvd[3];
+};
+
+static inline bool check_privilege(struct be_adapter *adapter, u32 flags)
+{
+ return flags & adapter->cmd_privileges ? true : false;
+}
+
+/************** Get IFACE LIST *******************/
+struct be_if_desc {
+ u32 if_id;
+ u32 cap_flags;
+ u32 en_flags;
+};
+
+struct be_cmd_req_get_iface_list {
+ struct be_cmd_req_hdr hdr;
+};
+
+struct be_cmd_resp_get_iface_list {
+ struct be_cmd_req_hdr hdr;
+ u32 if_cnt;
+ struct be_if_desc if_desc;
+};
+
+/************** Set Features *******************/
+#define BE_FEATURE_UE_RECOVERY 0x10
+#define BE_UE_RECOVERY_UER_MASK 0x1
+
+struct be_req_ue_recovery {
+ u32 uer;
+ u32 rsvd;
+};
+
+struct be_cmd_req_set_features {
+ struct be_cmd_req_hdr hdr;
+ u32 features;
+ u32 parameter_len;
+ union {
+ struct be_req_ue_recovery req;
+ u32 rsvd[2];
+ } parameter;
+};
+
+struct be_resp_ue_recovery {
+ u32 uer;
+ u16 ue2rp;
+ u16 ue2sr;
+};
+
+struct be_cmd_resp_set_features {
+ struct be_cmd_resp_hdr hdr;
+ u32 features;
+ u32 parameter_len;
+ union {
+ struct be_resp_ue_recovery resp;
+ u32 rsvd[2];
+ } parameter;
+};
+
+/*************** Set logical link ********************/
+#define PLINK_ENABLE BIT(0)
+#define PLINK_TRACK BIT(8)
+struct be_cmd_req_set_ll_link {
+ struct be_cmd_req_hdr hdr;
+ u32 link_config; /* Bit 0: UP_DOWN, Bit 9: PLINK */
+};
+
+/************** Manage IFACE Filters *******************/
+#define OP_CONVERT_NORMAL_TO_TUNNEL 0
+#define OP_CONVERT_TUNNEL_TO_NORMAL 1
+
+struct be_cmd_req_manage_iface_filters {
+ struct be_cmd_req_hdr hdr;
+ u8 op;
+ u8 rsvd0;
+ u8 flags;
+ u8 rsvd1;
+ u32 tunnel_iface_id;
+ u32 target_iface_id;
+ u8 mac[6];
+ u16 vlan_tag;
+ u32 tenant_id;
+ u32 filter_id;
+ u32 cap_flags;
+ u32 cap_control_flags;
+} __packed;
+
+u16 be_POST_stage_get(struct be_adapter *adapter);
+int be_pci_fnum_get(struct be_adapter *adapter);
+int be_fw_wait_ready(struct be_adapter *adapter);
+int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
+ bool permanent, u32 if_handle, u32 pmac_id);
+int be_cmd_pmac_add(struct be_adapter *adapter, const u8 *mac_addr, u32 if_id,
+ u32 *pmac_id, u32 domain);
+int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, int pmac_id,
+ u32 domain);
+int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags,
+ u32 *if_handle, u32 domain);
+int be_cmd_if_destroy(struct be_adapter *adapter, int if_handle, u32 domain);
+int be_cmd_eq_create(struct be_adapter *adapter, struct be_eq_obj *eqo);
+int be_cmd_cq_create(struct be_adapter *adapter, struct be_queue_info *cq,
+ struct be_queue_info *eq, bool no_delay,
+ int num_cqe_dma_coalesce);
+int be_cmd_mccq_create(struct be_adapter *adapter, struct be_queue_info *mccq,
+ struct be_queue_info *cq);
+int be_cmd_txq_create(struct be_adapter *adapter, struct be_tx_obj *txo);
+int be_cmd_rxq_create(struct be_adapter *adapter, struct be_queue_info *rxq,
+ u16 cq_id, u16 frag_size, u32 if_id, u32 rss, u8 *rss_id);
+int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
+ int type);
+int be_cmd_rxq_destroy(struct be_adapter *adapter, struct be_queue_info *q);
+int be_cmd_link_status_query(struct be_adapter *adapter, u16 *link_speed,
+ u8 *link_status, u32 dom);
+int be_cmd_reset(struct be_adapter *adapter);
+int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd);
+int lancer_cmd_get_pport_stats(struct be_adapter *adapter,
+ struct be_dma_mem *nonemb_cmd);
+int be_cmd_get_fw_ver(struct be_adapter *adapter);
+int be_cmd_modify_eqd(struct be_adapter *adapter, struct be_set_eqd *, int num);
+int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array,
+ u32 num, u32 domain);
+int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 status);
+int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc);
+int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc);
+int be_cmd_query_fw_cfg(struct be_adapter *adapter);
+int be_cmd_reset_function(struct be_adapter *adapter);
+int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable,
+ u32 rss_hash_opts, u16 table_size, const u8 *rss_hkey);
+int be_process_mcc(struct be_adapter *adapter);
+int be_cmd_set_beacon_state(struct be_adapter *adapter, u8 port_num, u8 beacon,
+ u8 status, u8 state);
+int be_cmd_get_beacon_state(struct be_adapter *adapter, u8 port_num,
+ u32 *state);
+int be_cmd_read_port_transceiver_data(struct be_adapter *adapter,
+ u8 page_num, u32 off, u32 len, u8 *data);
+int be_cmd_query_cable_type(struct be_adapter *adapter);
+int be_cmd_query_sfp_info(struct be_adapter *adapter);
+int lancer_cmd_read_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
+ u32 data_size, u32 data_offset, const char *obj_name,
+ u32 *data_read, u32 *eof, u8 *addn_status);
+int lancer_fw_download(struct be_adapter *adapter, const struct firmware *fw);
+int be_fw_download(struct be_adapter *adapter, const struct firmware *fw);
+int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac,
+ struct be_dma_mem *nonemb_cmd);
+int be_cmd_fw_init(struct be_adapter *adapter);
+int be_cmd_fw_clean(struct be_adapter *adapter);
+void be_async_mcc_enable(struct be_adapter *adapter);
+void be_async_mcc_disable(struct be_adapter *adapter);
+int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
+ u32 loopback_type, u32 pkt_size, u32 num_pkts,
+ u64 pattern);
+int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern, u32 byte_cnt,
+ struct be_dma_mem *cmd);
+int be_cmd_get_seeprom_data(struct be_adapter *adapter,
+ struct be_dma_mem *nonemb_cmd);
+int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
+ u8 loopback_type, u8 enable);
+int be_cmd_get_phy_info(struct be_adapter *adapter);
+int be_cmd_config_qos(struct be_adapter *adapter, u32 max_rate,
+ u16 link_speed, u8 domain);
+void be_detect_error(struct be_adapter *adapter);
+int be_cmd_get_die_temperature(struct be_adapter *adapter);
+int be_cmd_get_cntl_attributes(struct be_adapter *adapter);
+int be_cmd_get_fat_dump_len(struct be_adapter *adapter, u32 *dump_size);
+int be_cmd_get_fat_dump(struct be_adapter *adapter, u32 buf_len, void *buf);
+int be_cmd_req_native_mode(struct be_adapter *adapter);
+int be_cmd_get_fn_privileges(struct be_adapter *adapter, u32 *privilege,
+ u32 domain);
+int be_cmd_set_fn_privileges(struct be_adapter *adapter, u32 privileges,
+ u32 vf_num);
+int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac,
+ bool *pmac_id_active, u32 *pmac_id,
+ u32 if_handle, u8 domain);
+int be_cmd_get_active_mac(struct be_adapter *adapter, u32 pmac_id, u8 *mac,
+ u32 if_handle, bool active, u32 domain);
+int be_cmd_get_perm_mac(struct be_adapter *adapter, u8 *mac);
+int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array, u8 mac_count,
+ u32 domain);
+int be_cmd_set_mac(struct be_adapter *adapter, u8 *mac, int if_id, u32 dom);
+int be_cmd_set_hsw_config(struct be_adapter *adapter, u16 pvid, u32 domain,
+ u16 intf_id, u16 hsw_mode, u8 spoofchk);
+int be_cmd_get_hsw_config(struct be_adapter *adapter, u16 *pvid, u32 domain,
+ u16 intf_id, u8 *mode, bool *spoofchk);
+int be_cmd_get_acpi_wol_cap(struct be_adapter *adapter);
+int be_cmd_set_fw_log_level(struct be_adapter *adapter, u32 level);
+int be_cmd_get_fw_log_level(struct be_adapter *adapter);
+int be_cmd_get_ext_fat_capabilites(struct be_adapter *adapter,
+ struct be_dma_mem *cmd);
+int be_cmd_set_ext_fat_capabilites(struct be_adapter *adapter,
+ struct be_dma_mem *cmd,
+ struct be_fat_conf_params *cfgs);
+int lancer_physdev_ctrl(struct be_adapter *adapter, u32 mask);
+int lancer_initiate_dump(struct be_adapter *adapter);
+int lancer_delete_dump(struct be_adapter *adapter);
+bool dump_present(struct be_adapter *adapter);
+int lancer_test_and_set_rdy_state(struct be_adapter *adapter);
+int be_cmd_query_port_name(struct be_adapter *adapter);
+int be_cmd_get_func_config(struct be_adapter *adapter,
+ struct be_resources *res);
+int be_cmd_get_profile_config(struct be_adapter *adapter,
+ struct be_resources *res,
+ struct be_port_resources *port_res,
+ u8 profile_type, u8 query, u8 domain);
+int be_cmd_get_active_profile(struct be_adapter *adapter, u16 *profile);
+int be_cmd_get_if_id(struct be_adapter *adapter, struct be_vf_cfg *vf_cfg,
+ int vf_num);
+int be_cmd_enable_vf(struct be_adapter *adapter, u8 domain);
+int be_cmd_intr_set(struct be_adapter *adapter, bool intr_enable);
+int be_cmd_set_logical_link_config(struct be_adapter *adapter,
+ int link_state, u8 domain);
+int be_cmd_set_vxlan_port(struct be_adapter *adapter, __be16 port);
+int be_cmd_manage_iface(struct be_adapter *adapter, u32 iface, u8 op);
+int be_cmd_set_sriov_config(struct be_adapter *adapter,
+ struct be_resources res, u16 num_vfs,
+ struct be_resources *vft_res);
+int be_cmd_set_features(struct be_adapter *adapter);
diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c
new file mode 100644
index 0000000000..a29de29bdf
--- /dev/null
+++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c
@@ -0,0 +1,1462 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2005 - 2016 Broadcom
+ * All rights reserved.
+ *
+ * Contact Information:
+ * linux-drivers@emulex.com
+ *
+ * Emulex
+ * 3333 Susan Street
+ * Costa Mesa, CA 92626
+ */
+
+#include "be.h"
+#include "be_cmds.h"
+#include <linux/ethtool.h>
+
+struct be_ethtool_stat {
+ char desc[ETH_GSTRING_LEN];
+ int type;
+ int size;
+ int offset;
+};
+
+enum {DRVSTAT_TX, DRVSTAT_RX, DRVSTAT};
+#define FIELDINFO(_struct, field) sizeof_field(_struct, field), \
+ offsetof(_struct, field)
+#define DRVSTAT_TX_INFO(field) #field, DRVSTAT_TX,\
+ FIELDINFO(struct be_tx_stats, field)
+#define DRVSTAT_RX_INFO(field) #field, DRVSTAT_RX,\
+ FIELDINFO(struct be_rx_stats, field)
+#define DRVSTAT_INFO(field) #field, DRVSTAT,\
+ FIELDINFO(struct be_drv_stats, field)
+
+static const struct be_ethtool_stat et_stats[] = {
+ {DRVSTAT_INFO(rx_crc_errors)},
+ {DRVSTAT_INFO(rx_alignment_symbol_errors)},
+ {DRVSTAT_INFO(rx_pause_frames)},
+ {DRVSTAT_INFO(rx_control_frames)},
+ /* Received packets dropped when the Ethernet length field
+ * is not equal to the actual Ethernet data length.
+ */
+ {DRVSTAT_INFO(rx_in_range_errors)},
+ /* Received packets dropped when their length field is >= 1501 bytes
+ * and <= 1535 bytes.
+ */
+ {DRVSTAT_INFO(rx_out_range_errors)},
+ /* Received packets dropped when they are longer than 9216 bytes */
+ {DRVSTAT_INFO(rx_frame_too_long)},
+ /* Received packets dropped when they don't pass the unicast or
+ * multicast address filtering.
+ */
+ {DRVSTAT_INFO(rx_address_filtered)},
+ /* Received packets dropped when IP packet length field is less than
+ * the IP header length field.
+ */
+ {DRVSTAT_INFO(rx_dropped_too_small)},
+ /* Received packets dropped when IP length field is greater than
+ * the actual packet length.
+ */
+ {DRVSTAT_INFO(rx_dropped_too_short)},
+ /* Received packets dropped when the IP header length field is less
+ * than 5.
+ */
+ {DRVSTAT_INFO(rx_dropped_header_too_small)},
+ /* Received packets dropped when the TCP header length field is less
+ * than 5 or the TCP header length + IP header length is more
+ * than IP packet length.
+ */
+ {DRVSTAT_INFO(rx_dropped_tcp_length)},
+ {DRVSTAT_INFO(rx_dropped_runt)},
+ /* Number of received packets dropped when a fifo for descriptors going
+ * into the packet demux block overflows. In normal operation, this
+ * fifo must never overflow.
+ */
+ {DRVSTAT_INFO(rxpp_fifo_overflow_drop)},
+ /* Received packets dropped when the RX block runs out of space in
+ * one of its input FIFOs. This could happen due a long burst of
+ * minimum-sized (64b) frames in the receive path.
+ * This counter may also be erroneously incremented rarely.
+ */
+ {DRVSTAT_INFO(rx_input_fifo_overflow_drop)},
+ {DRVSTAT_INFO(rx_ip_checksum_errs)},
+ {DRVSTAT_INFO(rx_tcp_checksum_errs)},
+ {DRVSTAT_INFO(rx_udp_checksum_errs)},
+ {DRVSTAT_INFO(tx_pauseframes)},
+ {DRVSTAT_INFO(tx_controlframes)},
+ {DRVSTAT_INFO(rx_priority_pause_frames)},
+ {DRVSTAT_INFO(tx_priority_pauseframes)},
+ /* Received packets dropped when an internal fifo going into
+ * main packet buffer tank (PMEM) overflows.
+ */
+ {DRVSTAT_INFO(pmem_fifo_overflow_drop)},
+ {DRVSTAT_INFO(jabber_events)},
+ /* Received packets dropped due to lack of available HW packet buffers
+ * used to temporarily hold the received packets.
+ */
+ {DRVSTAT_INFO(rx_drops_no_pbuf)},
+ /* Received packets dropped due to input receive buffer
+ * descriptor fifo overflowing.
+ */
+ {DRVSTAT_INFO(rx_drops_no_erx_descr)},
+ /* Packets dropped because the internal FIFO to the offloaded TCP
+ * receive processing block is full. This could happen only for
+ * offloaded iSCSI or FCoE trarffic.
+ */
+ {DRVSTAT_INFO(rx_drops_no_tpre_descr)},
+ /* Received packets dropped when they need more than 8
+ * receive buffers. This cannot happen as the driver configures
+ * 2048 byte receive buffers.
+ */
+ {DRVSTAT_INFO(rx_drops_too_many_frags)},
+ {DRVSTAT_INFO(forwarded_packets)},
+ /* Received packets dropped when the frame length
+ * is more than 9018 bytes
+ */
+ {DRVSTAT_INFO(rx_drops_mtu)},
+ /* Number of dma mapping errors */
+ {DRVSTAT_INFO(dma_map_errors)},
+ /* Number of packets dropped due to random early drop function */
+ {DRVSTAT_INFO(eth_red_drops)},
+ {DRVSTAT_INFO(rx_roce_bytes_lsd)},
+ {DRVSTAT_INFO(rx_roce_bytes_msd)},
+ {DRVSTAT_INFO(rx_roce_frames)},
+ {DRVSTAT_INFO(roce_drops_payload_len)},
+ {DRVSTAT_INFO(roce_drops_crc)}
+};
+
+#define ETHTOOL_STATS_NUM ARRAY_SIZE(et_stats)
+
+/* Stats related to multi RX queues: get_stats routine assumes bytes, pkts
+ * are first and second members respectively.
+ */
+static const struct be_ethtool_stat et_rx_stats[] = {
+ {DRVSTAT_RX_INFO(rx_bytes)},/* If moving this member see above note */
+ {DRVSTAT_RX_INFO(rx_pkts)}, /* If moving this member see above note */
+ {DRVSTAT_RX_INFO(rx_vxlan_offload_pkts)},
+ {DRVSTAT_RX_INFO(rx_compl)},
+ {DRVSTAT_RX_INFO(rx_compl_err)},
+ {DRVSTAT_RX_INFO(rx_mcast_pkts)},
+ /* Number of page allocation failures while posting receive buffers
+ * to HW.
+ */
+ {DRVSTAT_RX_INFO(rx_post_fail)},
+ /* Recevied packets dropped due to skb allocation failure */
+ {DRVSTAT_RX_INFO(rx_drops_no_skbs)},
+ /* Received packets dropped due to lack of available fetched buffers
+ * posted by the driver.
+ */
+ {DRVSTAT_RX_INFO(rx_drops_no_frags)}
+};
+
+#define ETHTOOL_RXSTATS_NUM (ARRAY_SIZE(et_rx_stats))
+
+/* Stats related to multi TX queues: get_stats routine assumes compl is the
+ * first member
+ */
+static const struct be_ethtool_stat et_tx_stats[] = {
+ {DRVSTAT_TX_INFO(tx_compl)}, /* If moving this member see above note */
+ /* This counter is incremented when the HW encounters an error while
+ * parsing the packet header of an outgoing TX request. This counter is
+ * applicable only for BE2, BE3 and Skyhawk based adapters.
+ */
+ {DRVSTAT_TX_INFO(tx_hdr_parse_err)},
+ /* This counter is incremented when an error occurs in the DMA
+ * operation associated with the TX request from the host to the device.
+ */
+ {DRVSTAT_TX_INFO(tx_dma_err)},
+ /* This counter is incremented when MAC or VLAN spoof checking is
+ * enabled on the interface and the TX request fails the spoof check
+ * in HW.
+ */
+ {DRVSTAT_TX_INFO(tx_spoof_check_err)},
+ /* This counter is incremented when the HW encounters an error while
+ * performing TSO offload. This counter is applicable only for Lancer
+ * adapters.
+ */
+ {DRVSTAT_TX_INFO(tx_tso_err)},
+ /* This counter is incremented when the HW detects Q-in-Q style VLAN
+ * tagging in a packet and such tagging is not expected on the outgoing
+ * interface. This counter is applicable only for Lancer adapters.
+ */
+ {DRVSTAT_TX_INFO(tx_qinq_err)},
+ /* This counter is incremented when the HW detects parity errors in the
+ * packet data. This counter is applicable only for Lancer adapters.
+ */
+ {DRVSTAT_TX_INFO(tx_internal_parity_err)},
+ {DRVSTAT_TX_INFO(tx_sge_err)},
+ {DRVSTAT_TX_INFO(tx_bytes)},
+ {DRVSTAT_TX_INFO(tx_pkts)},
+ {DRVSTAT_TX_INFO(tx_vxlan_offload_pkts)},
+ /* Number of skbs queued for trasmission by the driver */
+ {DRVSTAT_TX_INFO(tx_reqs)},
+ /* Number of times the TX queue was stopped due to lack
+ * of spaces in the TXQ.
+ */
+ {DRVSTAT_TX_INFO(tx_stops)},
+ /* Pkts dropped in the driver's transmit path */
+ {DRVSTAT_TX_INFO(tx_drv_drops)}
+};
+
+#define ETHTOOL_TXSTATS_NUM (ARRAY_SIZE(et_tx_stats))
+
+static const char et_self_tests[][ETH_GSTRING_LEN] = {
+ "MAC Loopback test",
+ "PHY Loopback test",
+ "External Loopback test",
+ "DDR DMA test",
+ "Link test"
+};
+
+#define ETHTOOL_TESTS_NUM ARRAY_SIZE(et_self_tests)
+#define BE_MAC_LOOPBACK 0x0
+#define BE_PHY_LOOPBACK 0x1
+#define BE_ONE_PORT_EXT_LOOPBACK 0x2
+#define BE_NO_LOOPBACK 0xff
+
+static void be_get_drvinfo(struct net_device *netdev,
+ struct ethtool_drvinfo *drvinfo)
+{
+ struct be_adapter *adapter = netdev_priv(netdev);
+
+ strscpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
+ if (!memcmp(adapter->fw_ver, adapter->fw_on_flash, FW_VER_LEN))
+ strscpy(drvinfo->fw_version, adapter->fw_ver,
+ sizeof(drvinfo->fw_version));
+ else
+ snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
+ "%s [%s]", adapter->fw_ver, adapter->fw_on_flash);
+
+ strscpy(drvinfo->bus_info, pci_name(adapter->pdev),
+ sizeof(drvinfo->bus_info));
+}
+
+static u32 lancer_cmd_get_file_len(struct be_adapter *adapter, u8 *file_name)
+{
+ u32 data_read = 0, eof;
+ u8 addn_status;
+ struct be_dma_mem data_len_cmd;
+
+ memset(&data_len_cmd, 0, sizeof(data_len_cmd));
+ /* data_offset and data_size should be 0 to get reg len */
+ lancer_cmd_read_object(adapter, &data_len_cmd, 0, 0, file_name,
+ &data_read, &eof, &addn_status);
+
+ return data_read;
+}
+
+static int be_get_dump_len(struct be_adapter *adapter)
+{
+ u32 dump_size = 0;
+
+ if (lancer_chip(adapter))
+ dump_size = lancer_cmd_get_file_len(adapter,
+ LANCER_FW_DUMP_FILE);
+ else
+ dump_size = adapter->fat_dump_len;
+
+ return dump_size;
+}
+
+static int lancer_cmd_read_file(struct be_adapter *adapter, u8 *file_name,
+ u32 buf_len, void *buf)
+{
+ struct be_dma_mem read_cmd;
+ u32 read_len = 0, total_read_len = 0, chunk_size;
+ u32 eof = 0;
+ u8 addn_status;
+ int status = 0;
+
+ read_cmd.size = LANCER_READ_FILE_CHUNK;
+ read_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, read_cmd.size,
+ &read_cmd.dma, GFP_ATOMIC);
+
+ if (!read_cmd.va) {
+ dev_err(&adapter->pdev->dev,
+ "Memory allocation failure while reading dump\n");
+ return -ENOMEM;
+ }
+
+ while ((total_read_len < buf_len) && !eof) {
+ chunk_size = min_t(u32, (buf_len - total_read_len),
+ LANCER_READ_FILE_CHUNK);
+ chunk_size = ALIGN(chunk_size, 4);
+ status = lancer_cmd_read_object(adapter, &read_cmd, chunk_size,
+ total_read_len, file_name,
+ &read_len, &eof, &addn_status);
+ if (!status) {
+ memcpy(buf + total_read_len, read_cmd.va, read_len);
+ total_read_len += read_len;
+ eof &= LANCER_READ_FILE_EOF_MASK;
+ } else {
+ status = -EIO;
+ break;
+ }
+ }
+ dma_free_coherent(&adapter->pdev->dev, read_cmd.size, read_cmd.va,
+ read_cmd.dma);
+
+ return status;
+}
+
+static int be_read_dump_data(struct be_adapter *adapter, u32 dump_len,
+ void *buf)
+{
+ int status = 0;
+
+ if (lancer_chip(adapter))
+ status = lancer_cmd_read_file(adapter, LANCER_FW_DUMP_FILE,
+ dump_len, buf);
+ else
+ status = be_cmd_get_fat_dump(adapter, dump_len, buf);
+
+ return status;
+}
+
+static int be_get_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *et,
+ struct kernel_ethtool_coalesce *kernel_coal,
+ struct netlink_ext_ack *extack)
+{
+ struct be_adapter *adapter = netdev_priv(netdev);
+ struct be_aic_obj *aic = &adapter->aic_obj[0];
+
+ et->rx_coalesce_usecs = aic->prev_eqd;
+ et->rx_coalesce_usecs_high = aic->max_eqd;
+ et->rx_coalesce_usecs_low = aic->min_eqd;
+
+ et->tx_coalesce_usecs = aic->prev_eqd;
+ et->tx_coalesce_usecs_high = aic->max_eqd;
+ et->tx_coalesce_usecs_low = aic->min_eqd;
+
+ et->use_adaptive_rx_coalesce = adapter->aic_enabled;
+ et->use_adaptive_tx_coalesce = adapter->aic_enabled;
+
+ return 0;
+}
+
+/* TX attributes are ignored. Only RX attributes are considered
+ * eqd cmd is issued in the worker thread.
+ */
+static int be_set_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *et,
+ struct kernel_ethtool_coalesce *kernel_coal,
+ struct netlink_ext_ack *extack)
+{
+ struct be_adapter *adapter = netdev_priv(netdev);
+ struct be_aic_obj *aic = &adapter->aic_obj[0];
+ struct be_eq_obj *eqo;
+ int i;
+
+ adapter->aic_enabled = et->use_adaptive_rx_coalesce;
+
+ for_all_evt_queues(adapter, eqo, i) {
+ aic->max_eqd = min(et->rx_coalesce_usecs_high, BE_MAX_EQD);
+ aic->min_eqd = min(et->rx_coalesce_usecs_low, aic->max_eqd);
+ aic->et_eqd = min(et->rx_coalesce_usecs, aic->max_eqd);
+ aic->et_eqd = max(aic->et_eqd, aic->min_eqd);
+ aic++;
+ }
+
+ /* For Skyhawk, the EQD setting happens via EQ_DB when AIC is enabled.
+ * When AIC is disabled, persistently force set EQD value via the
+ * FW cmd, so that we don't have to calculate the delay multiplier
+ * encode value each time EQ_DB is rung
+ */
+ if (!et->use_adaptive_rx_coalesce && skyhawk_chip(adapter))
+ be_eqd_update(adapter, true);
+
+ return 0;
+}
+
+static void be_get_ethtool_stats(struct net_device *netdev,
+ struct ethtool_stats *stats, uint64_t *data)
+{
+ struct be_adapter *adapter = netdev_priv(netdev);
+ struct be_rx_obj *rxo;
+ struct be_tx_obj *txo;
+ void *p;
+ unsigned int i, j, base = 0, start;
+
+ for (i = 0; i < ETHTOOL_STATS_NUM; i++) {
+ p = (u8 *)&adapter->drv_stats + et_stats[i].offset;
+ data[i] = *(u32 *)p;
+ }
+ base += ETHTOOL_STATS_NUM;
+
+ for_all_rx_queues(adapter, rxo, j) {
+ struct be_rx_stats *stats = rx_stats(rxo);
+
+ do {
+ start = u64_stats_fetch_begin(&stats->sync);
+ data[base] = stats->rx_bytes;
+ data[base + 1] = stats->rx_pkts;
+ } while (u64_stats_fetch_retry(&stats->sync, start));
+
+ for (i = 2; i < ETHTOOL_RXSTATS_NUM; i++) {
+ p = (u8 *)stats + et_rx_stats[i].offset;
+ data[base + i] = *(u32 *)p;
+ }
+ base += ETHTOOL_RXSTATS_NUM;
+ }
+
+ for_all_tx_queues(adapter, txo, j) {
+ struct be_tx_stats *stats = tx_stats(txo);
+
+ do {
+ start = u64_stats_fetch_begin(&stats->sync_compl);
+ data[base] = stats->tx_compl;
+ } while (u64_stats_fetch_retry(&stats->sync_compl, start));
+
+ do {
+ start = u64_stats_fetch_begin(&stats->sync);
+ for (i = 1; i < ETHTOOL_TXSTATS_NUM; i++) {
+ p = (u8 *)stats + et_tx_stats[i].offset;
+ data[base + i] =
+ (et_tx_stats[i].size == sizeof(u64)) ?
+ *(u64 *)p : *(u32 *)p;
+ }
+ } while (u64_stats_fetch_retry(&stats->sync, start));
+ base += ETHTOOL_TXSTATS_NUM;
+ }
+}
+
+static const char be_priv_flags[][ETH_GSTRING_LEN] = {
+ "disable-tpe-recovery"
+};
+
+static void be_get_stat_strings(struct net_device *netdev, uint32_t stringset,
+ uint8_t *data)
+{
+ struct be_adapter *adapter = netdev_priv(netdev);
+ int i, j;
+
+ switch (stringset) {
+ case ETH_SS_STATS:
+ for (i = 0; i < ETHTOOL_STATS_NUM; i++) {
+ memcpy(data, et_stats[i].desc, ETH_GSTRING_LEN);
+ data += ETH_GSTRING_LEN;
+ }
+ for (i = 0; i < adapter->num_rx_qs; i++) {
+ for (j = 0; j < ETHTOOL_RXSTATS_NUM; j++) {
+ sprintf(data, "rxq%d: %s", i,
+ et_rx_stats[j].desc);
+ data += ETH_GSTRING_LEN;
+ }
+ }
+ for (i = 0; i < adapter->num_tx_qs; i++) {
+ for (j = 0; j < ETHTOOL_TXSTATS_NUM; j++) {
+ sprintf(data, "txq%d: %s", i,
+ et_tx_stats[j].desc);
+ data += ETH_GSTRING_LEN;
+ }
+ }
+ break;
+ case ETH_SS_TEST:
+ for (i = 0; i < ETHTOOL_TESTS_NUM; i++) {
+ memcpy(data, et_self_tests[i], ETH_GSTRING_LEN);
+ data += ETH_GSTRING_LEN;
+ }
+ break;
+ case ETH_SS_PRIV_FLAGS:
+ for (i = 0; i < ARRAY_SIZE(be_priv_flags); i++)
+ strcpy(data + i * ETH_GSTRING_LEN, be_priv_flags[i]);
+ break;
+ }
+}
+
+static int be_get_sset_count(struct net_device *netdev, int stringset)
+{
+ struct be_adapter *adapter = netdev_priv(netdev);
+
+ switch (stringset) {
+ case ETH_SS_TEST:
+ return ETHTOOL_TESTS_NUM;
+ case ETH_SS_STATS:
+ return ETHTOOL_STATS_NUM +
+ adapter->num_rx_qs * ETHTOOL_RXSTATS_NUM +
+ adapter->num_tx_qs * ETHTOOL_TXSTATS_NUM;
+ case ETH_SS_PRIV_FLAGS:
+ return ARRAY_SIZE(be_priv_flags);
+ default:
+ return -EINVAL;
+ }
+}
+
+static u32 be_get_port_type(struct be_adapter *adapter)
+{
+ u32 port;
+
+ switch (adapter->phy.interface_type) {
+ case PHY_TYPE_BASET_1GB:
+ case PHY_TYPE_BASEX_1GB:
+ case PHY_TYPE_SGMII:
+ port = PORT_TP;
+ break;
+ case PHY_TYPE_SFP_PLUS_10GB:
+ if (adapter->phy.cable_type & SFP_PLUS_COPPER_CABLE)
+ port = PORT_DA;
+ else
+ port = PORT_FIBRE;
+ break;
+ case PHY_TYPE_QSFP:
+ if (adapter->phy.cable_type & QSFP_PLUS_CR4_CABLE)
+ port = PORT_DA;
+ else
+ port = PORT_FIBRE;
+ break;
+ case PHY_TYPE_XFP_10GB:
+ case PHY_TYPE_SFP_1GB:
+ port = PORT_FIBRE;
+ break;
+ case PHY_TYPE_BASET_10GB:
+ port = PORT_TP;
+ break;
+ default:
+ port = PORT_OTHER;
+ }
+
+ return port;
+}
+
+static u32 convert_to_et_setting(struct be_adapter *adapter, u32 if_speeds)
+{
+ u32 val = 0;
+
+ switch (adapter->phy.interface_type) {
+ case PHY_TYPE_BASET_1GB:
+ case PHY_TYPE_BASEX_1GB:
+ case PHY_TYPE_SGMII:
+ val |= SUPPORTED_TP;
+ if (if_speeds & BE_SUPPORTED_SPEED_1GBPS)
+ val |= SUPPORTED_1000baseT_Full;
+ if (if_speeds & BE_SUPPORTED_SPEED_100MBPS)
+ val |= SUPPORTED_100baseT_Full;
+ if (if_speeds & BE_SUPPORTED_SPEED_10MBPS)
+ val |= SUPPORTED_10baseT_Full;
+ break;
+ case PHY_TYPE_KX4_10GB:
+ val |= SUPPORTED_Backplane;
+ if (if_speeds & BE_SUPPORTED_SPEED_1GBPS)
+ val |= SUPPORTED_1000baseKX_Full;
+ if (if_speeds & BE_SUPPORTED_SPEED_10GBPS)
+ val |= SUPPORTED_10000baseKX4_Full;
+ break;
+ case PHY_TYPE_KR2_20GB:
+ val |= SUPPORTED_Backplane;
+ if (if_speeds & BE_SUPPORTED_SPEED_10GBPS)
+ val |= SUPPORTED_10000baseKR_Full;
+ if (if_speeds & BE_SUPPORTED_SPEED_20GBPS)
+ val |= SUPPORTED_20000baseKR2_Full;
+ break;
+ case PHY_TYPE_KR_10GB:
+ val |= SUPPORTED_Backplane |
+ SUPPORTED_10000baseKR_Full;
+ break;
+ case PHY_TYPE_KR4_40GB:
+ val |= SUPPORTED_Backplane;
+ if (if_speeds & BE_SUPPORTED_SPEED_10GBPS)
+ val |= SUPPORTED_10000baseKR_Full;
+ if (if_speeds & BE_SUPPORTED_SPEED_40GBPS)
+ val |= SUPPORTED_40000baseKR4_Full;
+ break;
+ case PHY_TYPE_QSFP:
+ if (if_speeds & BE_SUPPORTED_SPEED_40GBPS) {
+ switch (adapter->phy.cable_type) {
+ case QSFP_PLUS_CR4_CABLE:
+ val |= SUPPORTED_40000baseCR4_Full;
+ break;
+ case QSFP_PLUS_LR4_CABLE:
+ val |= SUPPORTED_40000baseLR4_Full;
+ break;
+ default:
+ val |= SUPPORTED_40000baseSR4_Full;
+ break;
+ }
+ }
+ fallthrough;
+ case PHY_TYPE_SFP_PLUS_10GB:
+ case PHY_TYPE_XFP_10GB:
+ case PHY_TYPE_SFP_1GB:
+ val |= SUPPORTED_FIBRE;
+ if (if_speeds & BE_SUPPORTED_SPEED_10GBPS)
+ val |= SUPPORTED_10000baseT_Full;
+ if (if_speeds & BE_SUPPORTED_SPEED_1GBPS)
+ val |= SUPPORTED_1000baseT_Full;
+ break;
+ case PHY_TYPE_BASET_10GB:
+ val |= SUPPORTED_TP;
+ if (if_speeds & BE_SUPPORTED_SPEED_10GBPS)
+ val |= SUPPORTED_10000baseT_Full;
+ if (if_speeds & BE_SUPPORTED_SPEED_1GBPS)
+ val |= SUPPORTED_1000baseT_Full;
+ if (if_speeds & BE_SUPPORTED_SPEED_100MBPS)
+ val |= SUPPORTED_100baseT_Full;
+ break;
+ default:
+ val |= SUPPORTED_TP;
+ }
+
+ return val;
+}
+
+bool be_pause_supported(struct be_adapter *adapter)
+{
+ return (adapter->phy.interface_type == PHY_TYPE_SFP_PLUS_10GB ||
+ adapter->phy.interface_type == PHY_TYPE_XFP_10GB) ?
+ false : true;
+}
+
+static int be_get_link_ksettings(struct net_device *netdev,
+ struct ethtool_link_ksettings *cmd)
+{
+ struct be_adapter *adapter = netdev_priv(netdev);
+ u8 link_status;
+ u16 link_speed = 0;
+ int status;
+ u32 auto_speeds;
+ u32 fixed_speeds;
+ u32 supported = 0, advertising = 0;
+
+ if (adapter->phy.link_speed < 0) {
+ status = be_cmd_link_status_query(adapter, &link_speed,
+ &link_status, 0);
+ if (!status)
+ be_link_status_update(adapter, link_status);
+ cmd->base.speed = link_speed;
+
+ status = be_cmd_get_phy_info(adapter);
+ if (!status) {
+ auto_speeds = adapter->phy.auto_speeds_supported;
+ fixed_speeds = adapter->phy.fixed_speeds_supported;
+
+ be_cmd_query_cable_type(adapter);
+
+ supported =
+ convert_to_et_setting(adapter,
+ auto_speeds |
+ fixed_speeds);
+ advertising =
+ convert_to_et_setting(adapter, auto_speeds);
+
+ cmd->base.port = be_get_port_type(adapter);
+
+ if (adapter->phy.auto_speeds_supported) {
+ supported |= SUPPORTED_Autoneg;
+ cmd->base.autoneg = AUTONEG_ENABLE;
+ advertising |= ADVERTISED_Autoneg;
+ }
+
+ supported |= SUPPORTED_Pause;
+ if (be_pause_supported(adapter))
+ advertising |= ADVERTISED_Pause;
+ } else {
+ cmd->base.port = PORT_OTHER;
+ cmd->base.autoneg = AUTONEG_DISABLE;
+ }
+
+ /* Save for future use */
+ adapter->phy.link_speed = cmd->base.speed;
+ adapter->phy.port_type = cmd->base.port;
+ adapter->phy.autoneg = cmd->base.autoneg;
+ adapter->phy.advertising = advertising;
+ adapter->phy.supported = supported;
+ } else {
+ cmd->base.speed = adapter->phy.link_speed;
+ cmd->base.port = adapter->phy.port_type;
+ cmd->base.autoneg = adapter->phy.autoneg;
+ advertising = adapter->phy.advertising;
+ supported = adapter->phy.supported;
+ }
+
+ cmd->base.duplex = netif_carrier_ok(netdev) ?
+ DUPLEX_FULL : DUPLEX_UNKNOWN;
+ cmd->base.phy_address = adapter->port_num;
+
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
+ supported);
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
+ advertising);
+
+ return 0;
+}
+
+static void be_get_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
+{
+ struct be_adapter *adapter = netdev_priv(netdev);
+
+ ring->rx_max_pending = adapter->rx_obj[0].q.len;
+ ring->rx_pending = adapter->rx_obj[0].q.len;
+ ring->tx_max_pending = adapter->tx_obj[0].q.len;
+ ring->tx_pending = adapter->tx_obj[0].q.len;
+}
+
+static void
+be_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *ecmd)
+{
+ struct be_adapter *adapter = netdev_priv(netdev);
+
+ be_cmd_get_flow_control(adapter, &ecmd->tx_pause, &ecmd->rx_pause);
+ ecmd->autoneg = adapter->phy.fc_autoneg;
+}
+
+static int
+be_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *ecmd)
+{
+ struct be_adapter *adapter = netdev_priv(netdev);
+ int status;
+
+ if (ecmd->autoneg != adapter->phy.fc_autoneg)
+ return -EINVAL;
+
+ status = be_cmd_set_flow_control(adapter, ecmd->tx_pause,
+ ecmd->rx_pause);
+ if (status) {
+ dev_warn(&adapter->pdev->dev, "Pause param set failed\n");
+ return be_cmd_status(status);
+ }
+
+ adapter->tx_fc = ecmd->tx_pause;
+ adapter->rx_fc = ecmd->rx_pause;
+ return 0;
+}
+
+static int be_set_phys_id(struct net_device *netdev,
+ enum ethtool_phys_id_state state)
+{
+ struct be_adapter *adapter = netdev_priv(netdev);
+ int status = 0;
+
+ switch (state) {
+ case ETHTOOL_ID_ACTIVE:
+ status = be_cmd_get_beacon_state(adapter, adapter->hba_port_num,
+ &adapter->beacon_state);
+ if (status)
+ return be_cmd_status(status);
+ return 1; /* cycle on/off once per second */
+
+ case ETHTOOL_ID_ON:
+ status = be_cmd_set_beacon_state(adapter, adapter->hba_port_num,
+ 0, 0, BEACON_STATE_ENABLED);
+ break;
+
+ case ETHTOOL_ID_OFF:
+ status = be_cmd_set_beacon_state(adapter, adapter->hba_port_num,
+ 0, 0, BEACON_STATE_DISABLED);
+ break;
+
+ case ETHTOOL_ID_INACTIVE:
+ status = be_cmd_set_beacon_state(adapter, adapter->hba_port_num,
+ 0, 0, adapter->beacon_state);
+ }
+
+ return be_cmd_status(status);
+}
+
+static int be_set_dump(struct net_device *netdev, struct ethtool_dump *dump)
+{
+ struct be_adapter *adapter = netdev_priv(netdev);
+ struct device *dev = &adapter->pdev->dev;
+ int status;
+
+ if (!lancer_chip(adapter) ||
+ !check_privilege(adapter, MAX_PRIVILEGES))
+ return -EOPNOTSUPP;
+
+ switch (dump->flag) {
+ case LANCER_INITIATE_FW_DUMP:
+ status = lancer_initiate_dump(adapter);
+ if (!status)
+ dev_info(dev, "FW dump initiated successfully\n");
+ break;
+ case LANCER_DELETE_FW_DUMP:
+ status = lancer_delete_dump(adapter);
+ if (!status)
+ dev_info(dev, "FW dump deleted successfully\n");
+ break;
+ default:
+ dev_err(dev, "Invalid dump level: 0x%x\n", dump->flag);
+ return -EINVAL;
+ }
+ return status;
+}
+
+static void be_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
+{
+ struct be_adapter *adapter = netdev_priv(netdev);
+
+ if (adapter->wol_cap & BE_WOL_CAP) {
+ wol->supported |= WAKE_MAGIC;
+ if (adapter->wol_en)
+ wol->wolopts |= WAKE_MAGIC;
+ } else {
+ wol->wolopts = 0;
+ }
+ memset(&wol->sopass, 0, sizeof(wol->sopass));
+}
+
+static int be_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
+{
+ struct be_adapter *adapter = netdev_priv(netdev);
+ struct device *dev = &adapter->pdev->dev;
+ struct be_dma_mem cmd;
+ u8 mac[ETH_ALEN];
+ bool enable;
+ int status;
+
+ if (wol->wolopts & ~WAKE_MAGIC)
+ return -EOPNOTSUPP;
+
+ if (!(adapter->wol_cap & BE_WOL_CAP)) {
+ dev_warn(&adapter->pdev->dev, "WOL not supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
+ cmd.va = dma_alloc_coherent(dev, cmd.size, &cmd.dma, GFP_KERNEL);
+ if (!cmd.va)
+ return -ENOMEM;
+
+ eth_zero_addr(mac);
+
+ enable = wol->wolopts & WAKE_MAGIC;
+ if (enable)
+ ether_addr_copy(mac, adapter->netdev->dev_addr);
+
+ status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
+ if (status) {
+ dev_err(dev, "Could not set Wake-on-lan mac address\n");
+ status = be_cmd_status(status);
+ goto err;
+ }
+
+ pci_enable_wake(adapter->pdev, PCI_D3hot, enable);
+ pci_enable_wake(adapter->pdev, PCI_D3cold, enable);
+
+ adapter->wol_en = enable ? true : false;
+
+err:
+ dma_free_coherent(dev, cmd.size, cmd.va, cmd.dma);
+ return status;
+}
+
+static int be_test_ddr_dma(struct be_adapter *adapter)
+{
+ int ret, i;
+ struct be_dma_mem ddrdma_cmd;
+ static const u64 pattern[2] = {
+ 0x5a5a5a5a5a5a5a5aULL, 0xa5a5a5a5a5a5a5a5ULL
+ };
+
+ ddrdma_cmd.size = sizeof(struct be_cmd_req_ddrdma_test);
+ ddrdma_cmd.va = dma_alloc_coherent(&adapter->pdev->dev,
+ ddrdma_cmd.size, &ddrdma_cmd.dma,
+ GFP_KERNEL);
+ if (!ddrdma_cmd.va)
+ return -ENOMEM;
+
+ for (i = 0; i < 2; i++) {
+ ret = be_cmd_ddr_dma_test(adapter, pattern[i],
+ 4096, &ddrdma_cmd);
+ if (ret != 0)
+ goto err;
+ }
+
+err:
+ dma_free_coherent(&adapter->pdev->dev, ddrdma_cmd.size, ddrdma_cmd.va,
+ ddrdma_cmd.dma);
+ return be_cmd_status(ret);
+}
+
+static u64 be_loopback_test(struct be_adapter *adapter, u8 loopback_type,
+ u64 *status)
+{
+ int ret;
+
+ ret = be_cmd_set_loopback(adapter, adapter->hba_port_num,
+ loopback_type, 1);
+ if (ret)
+ return ret;
+
+ *status = be_cmd_loopback_test(adapter, adapter->hba_port_num,
+ loopback_type, 1500, 2, 0xabc);
+
+ ret = be_cmd_set_loopback(adapter, adapter->hba_port_num,
+ BE_NO_LOOPBACK, 1);
+ if (ret)
+ return ret;
+
+ return *status;
+}
+
+static void be_self_test(struct net_device *netdev, struct ethtool_test *test,
+ u64 *data)
+{
+ struct be_adapter *adapter = netdev_priv(netdev);
+ int status, cnt;
+ u8 link_status = 0;
+
+ if (adapter->function_caps & BE_FUNCTION_CAPS_SUPER_NIC) {
+ dev_err(&adapter->pdev->dev, "Self test not supported\n");
+ test->flags |= ETH_TEST_FL_FAILED;
+ return;
+ }
+
+ memset(data, 0, sizeof(u64) * ETHTOOL_TESTS_NUM);
+
+ /* check link status before offline tests */
+ link_status = netif_carrier_ok(netdev);
+
+ if (test->flags & ETH_TEST_FL_OFFLINE) {
+ if (be_loopback_test(adapter, BE_MAC_LOOPBACK, &data[0]) != 0)
+ test->flags |= ETH_TEST_FL_FAILED;
+
+ if (be_loopback_test(adapter, BE_PHY_LOOPBACK, &data[1]) != 0)
+ test->flags |= ETH_TEST_FL_FAILED;
+
+ if (test->flags & ETH_TEST_FL_EXTERNAL_LB) {
+ if (be_loopback_test(adapter, BE_ONE_PORT_EXT_LOOPBACK,
+ &data[2]) != 0)
+ test->flags |= ETH_TEST_FL_FAILED;
+ test->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
+ }
+ }
+
+ if (!lancer_chip(adapter) && be_test_ddr_dma(adapter) != 0) {
+ data[3] = 1;
+ test->flags |= ETH_TEST_FL_FAILED;
+ }
+
+ /* link status was down prior to test */
+ if (!link_status) {
+ test->flags |= ETH_TEST_FL_FAILED;
+ data[4] = 1;
+ return;
+ }
+
+ for (cnt = 10; cnt; cnt--) {
+ status = be_cmd_link_status_query(adapter, NULL, &link_status,
+ 0);
+ if (status) {
+ test->flags |= ETH_TEST_FL_FAILED;
+ data[4] = -1;
+ break;
+ }
+
+ if (link_status)
+ break;
+
+ msleep_interruptible(500);
+ }
+}
+
+static int be_do_flash(struct net_device *netdev, struct ethtool_flash *efl)
+{
+ struct be_adapter *adapter = netdev_priv(netdev);
+
+ return be_load_fw(adapter, efl->data);
+}
+
+static int
+be_get_dump_flag(struct net_device *netdev, struct ethtool_dump *dump)
+{
+ struct be_adapter *adapter = netdev_priv(netdev);
+
+ if (!check_privilege(adapter, MAX_PRIVILEGES))
+ return -EOPNOTSUPP;
+
+ dump->len = be_get_dump_len(adapter);
+ dump->version = 1;
+ dump->flag = 0x1; /* FW dump is enabled */
+ return 0;
+}
+
+static int
+be_get_dump_data(struct net_device *netdev, struct ethtool_dump *dump,
+ void *buf)
+{
+ struct be_adapter *adapter = netdev_priv(netdev);
+ int status;
+
+ if (!check_privilege(adapter, MAX_PRIVILEGES))
+ return -EOPNOTSUPP;
+
+ status = be_read_dump_data(adapter, dump->len, buf);
+ return be_cmd_status(status);
+}
+
+static int be_get_eeprom_len(struct net_device *netdev)
+{
+ struct be_adapter *adapter = netdev_priv(netdev);
+
+ if (!check_privilege(adapter, MAX_PRIVILEGES))
+ return 0;
+
+ if (lancer_chip(adapter)) {
+ if (be_physfn(adapter))
+ return lancer_cmd_get_file_len(adapter,
+ LANCER_VPD_PF_FILE);
+ else
+ return lancer_cmd_get_file_len(adapter,
+ LANCER_VPD_VF_FILE);
+ } else {
+ return BE_READ_SEEPROM_LEN;
+ }
+}
+
+static int be_read_eeprom(struct net_device *netdev,
+ struct ethtool_eeprom *eeprom, uint8_t *data)
+{
+ struct be_adapter *adapter = netdev_priv(netdev);
+ struct be_dma_mem eeprom_cmd;
+ struct be_cmd_resp_seeprom_read *resp;
+ int status;
+
+ if (!eeprom->len)
+ return -EINVAL;
+
+ if (lancer_chip(adapter)) {
+ if (be_physfn(adapter))
+ return lancer_cmd_read_file(adapter, LANCER_VPD_PF_FILE,
+ eeprom->len, data);
+ else
+ return lancer_cmd_read_file(adapter, LANCER_VPD_VF_FILE,
+ eeprom->len, data);
+ }
+
+ eeprom->magic = BE_VENDOR_ID | (adapter->pdev->device<<16);
+
+ memset(&eeprom_cmd, 0, sizeof(struct be_dma_mem));
+ eeprom_cmd.size = sizeof(struct be_cmd_req_seeprom_read);
+ eeprom_cmd.va = dma_alloc_coherent(&adapter->pdev->dev,
+ eeprom_cmd.size, &eeprom_cmd.dma,
+ GFP_KERNEL);
+
+ if (!eeprom_cmd.va)
+ return -ENOMEM;
+
+ status = be_cmd_get_seeprom_data(adapter, &eeprom_cmd);
+
+ if (!status) {
+ resp = eeprom_cmd.va;
+ memcpy(data, resp->seeprom_data + eeprom->offset, eeprom->len);
+ }
+ dma_free_coherent(&adapter->pdev->dev, eeprom_cmd.size, eeprom_cmd.va,
+ eeprom_cmd.dma);
+
+ return be_cmd_status(status);
+}
+
+static u32 be_get_msg_level(struct net_device *netdev)
+{
+ struct be_adapter *adapter = netdev_priv(netdev);
+
+ return adapter->msg_enable;
+}
+
+static void be_set_msg_level(struct net_device *netdev, u32 level)
+{
+ struct be_adapter *adapter = netdev_priv(netdev);
+
+ if (adapter->msg_enable == level)
+ return;
+
+ if ((level & NETIF_MSG_HW) != (adapter->msg_enable & NETIF_MSG_HW))
+ if (BEx_chip(adapter))
+ be_cmd_set_fw_log_level(adapter, level & NETIF_MSG_HW ?
+ FW_LOG_LEVEL_DEFAULT :
+ FW_LOG_LEVEL_FATAL);
+ adapter->msg_enable = level;
+}
+
+static u64 be_get_rss_hash_opts(struct be_adapter *adapter, u64 flow_type)
+{
+ u64 data = 0;
+
+ switch (flow_type) {
+ case TCP_V4_FLOW:
+ if (adapter->rss_info.rss_flags & RSS_ENABLE_IPV4)
+ data |= RXH_IP_DST | RXH_IP_SRC;
+ if (adapter->rss_info.rss_flags & RSS_ENABLE_TCP_IPV4)
+ data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ break;
+ case UDP_V4_FLOW:
+ if (adapter->rss_info.rss_flags & RSS_ENABLE_IPV4)
+ data |= RXH_IP_DST | RXH_IP_SRC;
+ if (adapter->rss_info.rss_flags & RSS_ENABLE_UDP_IPV4)
+ data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ break;
+ case TCP_V6_FLOW:
+ if (adapter->rss_info.rss_flags & RSS_ENABLE_IPV6)
+ data |= RXH_IP_DST | RXH_IP_SRC;
+ if (adapter->rss_info.rss_flags & RSS_ENABLE_TCP_IPV6)
+ data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ break;
+ case UDP_V6_FLOW:
+ if (adapter->rss_info.rss_flags & RSS_ENABLE_IPV6)
+ data |= RXH_IP_DST | RXH_IP_SRC;
+ if (adapter->rss_info.rss_flags & RSS_ENABLE_UDP_IPV6)
+ data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ break;
+ }
+
+ return data;
+}
+
+static int be_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
+ u32 *rule_locs)
+{
+ struct be_adapter *adapter = netdev_priv(netdev);
+
+ if (!be_multi_rxq(adapter)) {
+ dev_info(&adapter->pdev->dev,
+ "ethtool::get_rxnfc: RX flow hashing is disabled\n");
+ return -EINVAL;
+ }
+
+ switch (cmd->cmd) {
+ case ETHTOOL_GRXFH:
+ cmd->data = be_get_rss_hash_opts(adapter, cmd->flow_type);
+ break;
+ case ETHTOOL_GRXRINGS:
+ cmd->data = adapter->num_rx_qs;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int be_set_rss_hash_opts(struct be_adapter *adapter,
+ struct ethtool_rxnfc *cmd)
+{
+ int status;
+ u32 rss_flags = adapter->rss_info.rss_flags;
+
+ if (cmd->data != L3_RSS_FLAGS &&
+ cmd->data != (L3_RSS_FLAGS | L4_RSS_FLAGS))
+ return -EINVAL;
+
+ switch (cmd->flow_type) {
+ case TCP_V4_FLOW:
+ if (cmd->data == L3_RSS_FLAGS)
+ rss_flags &= ~RSS_ENABLE_TCP_IPV4;
+ else if (cmd->data == (L3_RSS_FLAGS | L4_RSS_FLAGS))
+ rss_flags |= RSS_ENABLE_IPV4 |
+ RSS_ENABLE_TCP_IPV4;
+ break;
+ case TCP_V6_FLOW:
+ if (cmd->data == L3_RSS_FLAGS)
+ rss_flags &= ~RSS_ENABLE_TCP_IPV6;
+ else if (cmd->data == (L3_RSS_FLAGS | L4_RSS_FLAGS))
+ rss_flags |= RSS_ENABLE_IPV6 |
+ RSS_ENABLE_TCP_IPV6;
+ break;
+ case UDP_V4_FLOW:
+ if ((cmd->data == (L3_RSS_FLAGS | L4_RSS_FLAGS)) &&
+ BEx_chip(adapter))
+ return -EINVAL;
+
+ if (cmd->data == L3_RSS_FLAGS)
+ rss_flags &= ~RSS_ENABLE_UDP_IPV4;
+ else if (cmd->data == (L3_RSS_FLAGS | L4_RSS_FLAGS))
+ rss_flags |= RSS_ENABLE_IPV4 |
+ RSS_ENABLE_UDP_IPV4;
+ break;
+ case UDP_V6_FLOW:
+ if ((cmd->data == (L3_RSS_FLAGS | L4_RSS_FLAGS)) &&
+ BEx_chip(adapter))
+ return -EINVAL;
+
+ if (cmd->data == L3_RSS_FLAGS)
+ rss_flags &= ~RSS_ENABLE_UDP_IPV6;
+ else if (cmd->data == (L3_RSS_FLAGS | L4_RSS_FLAGS))
+ rss_flags |= RSS_ENABLE_IPV6 |
+ RSS_ENABLE_UDP_IPV6;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (rss_flags == adapter->rss_info.rss_flags)
+ return 0;
+
+ status = be_cmd_rss_config(adapter, adapter->rss_info.rsstable,
+ rss_flags, RSS_INDIR_TABLE_LEN,
+ adapter->rss_info.rss_hkey);
+ if (!status)
+ adapter->rss_info.rss_flags = rss_flags;
+
+ return be_cmd_status(status);
+}
+
+static int be_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
+{
+ struct be_adapter *adapter = netdev_priv(netdev);
+ int status = 0;
+
+ if (!be_multi_rxq(adapter)) {
+ dev_err(&adapter->pdev->dev,
+ "ethtool::set_rxnfc: RX flow hashing is disabled\n");
+ return -EINVAL;
+ }
+
+ switch (cmd->cmd) {
+ case ETHTOOL_SRXFH:
+ status = be_set_rss_hash_opts(adapter, cmd);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return status;
+}
+
+static void be_get_channels(struct net_device *netdev,
+ struct ethtool_channels *ch)
+{
+ struct be_adapter *adapter = netdev_priv(netdev);
+ u16 num_rx_irqs = max_t(u16, adapter->num_rss_qs, 1);
+
+ /* num_tx_qs is always same as the number of irqs used for TX */
+ ch->combined_count = min(adapter->num_tx_qs, num_rx_irqs);
+ ch->rx_count = num_rx_irqs - ch->combined_count;
+ ch->tx_count = adapter->num_tx_qs - ch->combined_count;
+
+ ch->max_combined = be_max_qp_irqs(adapter);
+ /* The user must create atleast one combined channel */
+ ch->max_rx = be_max_rx_irqs(adapter) - 1;
+ ch->max_tx = be_max_tx_irqs(adapter) - 1;
+}
+
+static int be_set_channels(struct net_device *netdev,
+ struct ethtool_channels *ch)
+{
+ struct be_adapter *adapter = netdev_priv(netdev);
+ int status;
+
+ /* we support either only combined channels or a combination of
+ * combined and either RX-only or TX-only channels.
+ */
+ if (ch->other_count || !ch->combined_count ||
+ (ch->rx_count && ch->tx_count))
+ return -EINVAL;
+
+ if (ch->combined_count > be_max_qp_irqs(adapter) ||
+ (ch->rx_count &&
+ (ch->rx_count + ch->combined_count) > be_max_rx_irqs(adapter)) ||
+ (ch->tx_count &&
+ (ch->tx_count + ch->combined_count) > be_max_tx_irqs(adapter)))
+ return -EINVAL;
+
+ adapter->cfg_num_rx_irqs = ch->combined_count + ch->rx_count;
+ adapter->cfg_num_tx_irqs = ch->combined_count + ch->tx_count;
+
+ status = be_update_queues(adapter);
+ return be_cmd_status(status);
+}
+
+static u32 be_get_rxfh_indir_size(struct net_device *netdev)
+{
+ return RSS_INDIR_TABLE_LEN;
+}
+
+static u32 be_get_rxfh_key_size(struct net_device *netdev)
+{
+ return RSS_HASH_KEY_LEN;
+}
+
+static int be_get_rxfh(struct net_device *netdev, u32 *indir, u8 *hkey,
+ u8 *hfunc)
+{
+ struct be_adapter *adapter = netdev_priv(netdev);
+ int i;
+ struct rss_info *rss = &adapter->rss_info;
+
+ if (indir) {
+ for (i = 0; i < RSS_INDIR_TABLE_LEN; i++)
+ indir[i] = rss->rss_queue[i];
+ }
+
+ if (hkey)
+ memcpy(hkey, rss->rss_hkey, RSS_HASH_KEY_LEN);
+
+ if (hfunc)
+ *hfunc = ETH_RSS_HASH_TOP;
+
+ return 0;
+}
+
+static int be_set_rxfh(struct net_device *netdev, const u32 *indir,
+ const u8 *hkey, const u8 hfunc)
+{
+ int rc = 0, i, j;
+ struct be_adapter *adapter = netdev_priv(netdev);
+ u8 rsstable[RSS_INDIR_TABLE_LEN];
+
+ /* We do not allow change in unsupported parameters */
+ if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
+ return -EOPNOTSUPP;
+
+ if (indir) {
+ struct be_rx_obj *rxo;
+
+ for (i = 0; i < RSS_INDIR_TABLE_LEN; i++) {
+ j = indir[i];
+ rxo = &adapter->rx_obj[j];
+ rsstable[i] = rxo->rss_id;
+ adapter->rss_info.rss_queue[i] = j;
+ }
+ } else {
+ memcpy(rsstable, adapter->rss_info.rsstable,
+ RSS_INDIR_TABLE_LEN);
+ }
+
+ if (!hkey)
+ hkey = adapter->rss_info.rss_hkey;
+
+ rc = be_cmd_rss_config(adapter, rsstable,
+ adapter->rss_info.rss_flags,
+ RSS_INDIR_TABLE_LEN, hkey);
+ if (rc) {
+ adapter->rss_info.rss_flags = RSS_ENABLE_NONE;
+ return -EIO;
+ }
+ memcpy(adapter->rss_info.rss_hkey, hkey, RSS_HASH_KEY_LEN);
+ memcpy(adapter->rss_info.rsstable, rsstable,
+ RSS_INDIR_TABLE_LEN);
+ return 0;
+}
+
+static int be_get_module_info(struct net_device *netdev,
+ struct ethtool_modinfo *modinfo)
+{
+ struct be_adapter *adapter = netdev_priv(netdev);
+ u8 page_data[PAGE_DATA_LEN];
+ int status;
+
+ if (!check_privilege(adapter, MAX_PRIVILEGES))
+ return -EOPNOTSUPP;
+
+ status = be_cmd_read_port_transceiver_data(adapter, TR_PAGE_A0,
+ 0, PAGE_DATA_LEN, page_data);
+ if (!status) {
+ if (!page_data[SFP_PLUS_SFF_8472_COMP]) {
+ modinfo->type = ETH_MODULE_SFF_8079;
+ modinfo->eeprom_len = PAGE_DATA_LEN;
+ } else {
+ modinfo->type = ETH_MODULE_SFF_8472;
+ modinfo->eeprom_len = 2 * PAGE_DATA_LEN;
+ }
+ }
+ return be_cmd_status(status);
+}
+
+static int be_get_module_eeprom(struct net_device *netdev,
+ struct ethtool_eeprom *eeprom, u8 *data)
+{
+ struct be_adapter *adapter = netdev_priv(netdev);
+ int status;
+ u32 begin, end;
+
+ if (!check_privilege(adapter, MAX_PRIVILEGES))
+ return -EOPNOTSUPP;
+
+ begin = eeprom->offset;
+ end = eeprom->offset + eeprom->len;
+
+ if (begin < PAGE_DATA_LEN) {
+ status = be_cmd_read_port_transceiver_data(adapter, TR_PAGE_A0, begin,
+ min_t(u32, end, PAGE_DATA_LEN) - begin,
+ data);
+ if (status)
+ goto err;
+
+ data += PAGE_DATA_LEN - begin;
+ begin = PAGE_DATA_LEN;
+ }
+
+ if (end > PAGE_DATA_LEN) {
+ status = be_cmd_read_port_transceiver_data(adapter, TR_PAGE_A2,
+ begin - PAGE_DATA_LEN,
+ end - begin, data);
+ if (status)
+ goto err;
+ }
+err:
+ return be_cmd_status(status);
+}
+
+static u32 be_get_priv_flags(struct net_device *netdev)
+{
+ struct be_adapter *adapter = netdev_priv(netdev);
+
+ return adapter->priv_flags;
+}
+
+static int be_set_priv_flags(struct net_device *netdev, u32 flags)
+{
+ struct be_adapter *adapter = netdev_priv(netdev);
+ bool tpe_old = !!(adapter->priv_flags & BE_DISABLE_TPE_RECOVERY);
+ bool tpe_new = !!(flags & BE_DISABLE_TPE_RECOVERY);
+
+ if (tpe_old != tpe_new) {
+ if (tpe_new) {
+ adapter->priv_flags |= BE_DISABLE_TPE_RECOVERY;
+ dev_info(&adapter->pdev->dev,
+ "HW error recovery is disabled\n");
+ } else {
+ adapter->priv_flags &= ~BE_DISABLE_TPE_RECOVERY;
+ dev_info(&adapter->pdev->dev,
+ "HW error recovery is enabled\n");
+ }
+ }
+
+ return 0;
+}
+
+const struct ethtool_ops be_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
+ ETHTOOL_COALESCE_USE_ADAPTIVE |
+ ETHTOOL_COALESCE_USECS_LOW_HIGH,
+ .get_drvinfo = be_get_drvinfo,
+ .get_wol = be_get_wol,
+ .set_wol = be_set_wol,
+ .get_link = ethtool_op_get_link,
+ .get_eeprom_len = be_get_eeprom_len,
+ .get_eeprom = be_read_eeprom,
+ .get_coalesce = be_get_coalesce,
+ .set_coalesce = be_set_coalesce,
+ .get_ringparam = be_get_ringparam,
+ .get_pauseparam = be_get_pauseparam,
+ .set_pauseparam = be_set_pauseparam,
+ .set_priv_flags = be_set_priv_flags,
+ .get_priv_flags = be_get_priv_flags,
+ .get_strings = be_get_stat_strings,
+ .set_phys_id = be_set_phys_id,
+ .set_dump = be_set_dump,
+ .get_msglevel = be_get_msg_level,
+ .set_msglevel = be_set_msg_level,
+ .get_sset_count = be_get_sset_count,
+ .get_ethtool_stats = be_get_ethtool_stats,
+ .flash_device = be_do_flash,
+ .self_test = be_self_test,
+ .get_rxnfc = be_get_rxnfc,
+ .set_rxnfc = be_set_rxnfc,
+ .get_rxfh_indir_size = be_get_rxfh_indir_size,
+ .get_rxfh_key_size = be_get_rxfh_key_size,
+ .get_rxfh = be_get_rxfh,
+ .set_rxfh = be_set_rxfh,
+ .get_dump_flag = be_get_dump_flag,
+ .get_dump_data = be_get_dump_data,
+ .get_channels = be_get_channels,
+ .set_channels = be_set_channels,
+ .get_module_info = be_get_module_info,
+ .get_module_eeprom = be_get_module_eeprom,
+ .get_link_ksettings = be_get_link_ksettings,
+};
diff --git a/drivers/net/ethernet/emulex/benet/be_hw.h b/drivers/net/ethernet/emulex/benet/be_hw.h
new file mode 100644
index 0000000000..3476194f08
--- /dev/null
+++ b/drivers/net/ethernet/emulex/benet/be_hw.h
@@ -0,0 +1,371 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2005-2016 Broadcom.
+ * All rights reserved.
+ *
+ * Contact Information:
+ * linux-drivers@emulex.com
+ *
+ * Emulex
+ * 3333 Susan Street
+ * Costa Mesa, CA 92626
+ */
+
+/********* Mailbox door bell *************/
+/* Used for driver communication with the FW.
+ * The software must write this register twice to post any command. First,
+ * it writes the register with hi=1 and the upper bits of the physical address
+ * for the MAILBOX structure. Software must poll the ready bit until this
+ * is acknowledged. Then, sotware writes the register with hi=0 with the lower
+ * bits in the address. It must poll the ready bit until the command is
+ * complete. Upon completion, the MAILBOX will contain a valid completion
+ * queue entry.
+ */
+#define MPU_MAILBOX_DB_OFFSET 0x160
+#define MPU_MAILBOX_DB_RDY_MASK 0x1 /* bit 0 */
+#define MPU_MAILBOX_DB_HI_MASK 0x2 /* bit 1 */
+
+#define MPU_EP_CONTROL 0
+
+/********** MPU semphore: used for SH & BE *************/
+#define SLIPORT_SOFTRESET_OFFSET 0x5c /* CSR BAR offset */
+#define SLIPORT_SEMAPHORE_OFFSET_BEx 0xac /* CSR BAR offset */
+#define SLIPORT_SEMAPHORE_OFFSET_SH 0x94 /* PCI-CFG offset */
+#define POST_STAGE_MASK 0x0000FFFF
+#define POST_ERR_MASK 0x1
+#define POST_ERR_SHIFT 31
+#define POST_ERR_RECOVERY_CODE_MASK 0xFFF
+
+/* Soft Reset register masks */
+#define SLIPORT_SOFTRESET_SR_MASK 0x00000080 /* SR bit */
+
+/* MPU semphore POST stage values */
+#define POST_STAGE_AWAITING_HOST_RDY 0x1 /* FW awaiting goahead from host */
+#define POST_STAGE_HOST_RDY 0x2 /* Host has given go-ahed to FW */
+#define POST_STAGE_BE_RESET 0x3 /* Host wants to reset chip */
+#define POST_STAGE_ARMFW_RDY 0xc000 /* FW is done with POST */
+#define POST_STAGE_RECOVERABLE_ERR 0xE000 /* Recoverable err detected */
+/* FW has detected a UE and is dumping FAT log data */
+#define POST_STAGE_FAT_LOG_START 0x0D00
+#define POST_STAGE_ARMFW_UE 0xF000 /*FW has asserted an UE*/
+
+/* Lancer SLIPORT registers */
+#define SLIPORT_STATUS_OFFSET 0x404
+#define SLIPORT_CONTROL_OFFSET 0x408
+#define SLIPORT_ERROR1_OFFSET 0x40C
+#define SLIPORT_ERROR2_OFFSET 0x410
+#define PHYSDEV_CONTROL_OFFSET 0x414
+
+#define SLIPORT_STATUS_ERR_MASK 0x80000000
+#define SLIPORT_STATUS_DIP_MASK 0x02000000
+#define SLIPORT_STATUS_RN_MASK 0x01000000
+#define SLIPORT_STATUS_RDY_MASK 0x00800000
+#define SLI_PORT_CONTROL_IP_MASK 0x08000000
+#define PHYSDEV_CONTROL_FW_RESET_MASK 0x00000002
+#define PHYSDEV_CONTROL_DD_MASK 0x00000004
+#define PHYSDEV_CONTROL_INP_MASK 0x40000000
+
+#define SLIPORT_ERROR_NO_RESOURCE1 0x2
+#define SLIPORT_ERROR_NO_RESOURCE2 0x9
+
+#define SLIPORT_ERROR_FW_RESET1 0x2
+#define SLIPORT_ERROR_FW_RESET2 0x0
+
+/********* Memory BAR register ************/
+#define PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET 0xfc
+/* Host Interrupt Enable, if set interrupts are enabled although "PCI Interrupt
+ * Disable" may still globally block interrupts in addition to individual
+ * interrupt masks; a mechanism for the device driver to block all interrupts
+ * atomically without having to arbitrate for the PCI Interrupt Disable bit
+ * with the OS.
+ */
+#define MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK BIT(29) /* bit 29 */
+
+/********* PCI Function Capability *********/
+#define BE_FUNCTION_CAPS_RSS 0x2
+#define BE_FUNCTION_CAPS_SUPER_NIC 0x40
+
+/********* Power management (WOL) **********/
+#define PCICFG_PM_CONTROL_OFFSET 0x44
+#define PCICFG_PM_CONTROL_MASK 0x108 /* bits 3 & 8 */
+
+/********* Online Control Registers *******/
+#define PCICFG_ONLINE0 0xB0
+#define PCICFG_ONLINE1 0xB4
+
+/********* UE Status and Mask Registers ***/
+#define PCICFG_UE_STATUS_LOW 0xA0
+#define PCICFG_UE_STATUS_HIGH 0xA4
+#define PCICFG_UE_STATUS_LOW_MASK 0xA8
+#define PCICFG_UE_STATUS_HI_MASK 0xAC
+
+/******** SLI_INTF ***********************/
+#define SLI_INTF_REG_OFFSET 0x58
+#define SLI_INTF_VALID_MASK 0xE0000000
+#define SLI_INTF_VALID 0xC0000000
+#define SLI_INTF_HINT2_MASK 0x1F000000
+#define SLI_INTF_HINT2_SHIFT 24
+#define SLI_INTF_HINT1_MASK 0x00FF0000
+#define SLI_INTF_HINT1_SHIFT 16
+#define SLI_INTF_FAMILY_MASK 0x00000F00
+#define SLI_INTF_FAMILY_SHIFT 8
+#define SLI_INTF_IF_TYPE_MASK 0x0000F000
+#define SLI_INTF_IF_TYPE_SHIFT 12
+#define SLI_INTF_REV_MASK 0x000000F0
+#define SLI_INTF_REV_SHIFT 4
+#define SLI_INTF_FT_MASK 0x00000001
+
+#define SLI_INTF_TYPE_2 2
+#define SLI_INTF_TYPE_3 3
+
+/********* ISR0 Register offset **********/
+#define CEV_ISR0_OFFSET 0xC18
+#define CEV_ISR_SIZE 4
+
+/********* Event Q door bell *************/
+#define DB_EQ_OFFSET DB_CQ_OFFSET
+#define DB_EQ_RING_ID_MASK 0x1FF /* bits 0 - 8 */
+#define DB_EQ_RING_ID_EXT_MASK 0x3e00 /* bits 9-13 */
+#define DB_EQ_RING_ID_EXT_MASK_SHIFT (2) /* qid bits 9-13 placing at 11-15 */
+
+/* Clear the interrupt for this eq */
+#define DB_EQ_CLR_SHIFT (9) /* bit 9 */
+/* Must be 1 */
+#define DB_EQ_EVNT_SHIFT (10) /* bit 10 */
+/* Number of event entries processed */
+#define DB_EQ_NUM_POPPED_SHIFT (16) /* bits 16 - 28 */
+/* Rearm bit */
+#define DB_EQ_REARM_SHIFT (29) /* bit 29 */
+/* Rearm to interrupt delay encoding */
+#define DB_EQ_R2I_DLY_SHIFT (30) /* bits 30 - 31 */
+
+/* Rearm to interrupt (R2I) delay multiplier encoding represents 3 different
+ * values configured in CEV_REARM2IRPT_DLY_MULT_CSR register. This value is
+ * programmed by host driver while ringing an EQ doorbell(EQ_DB) if a delay
+ * between rearming the EQ and next interrupt on this EQ is desired.
+ */
+#define R2I_DLY_ENC_0 0 /* No delay */
+#define R2I_DLY_ENC_1 1 /* maps to 160us EQ delay */
+#define R2I_DLY_ENC_2 2 /* maps to 96us EQ delay */
+#define R2I_DLY_ENC_3 3 /* maps to 48us EQ delay */
+
+/********* Compl Q door bell *************/
+#define DB_CQ_OFFSET 0x120
+#define DB_CQ_RING_ID_MASK 0x3FF /* bits 0 - 9 */
+#define DB_CQ_RING_ID_EXT_MASK 0x7C00 /* bits 10-14 */
+#define DB_CQ_RING_ID_EXT_MASK_SHIFT (1) /* qid bits 10-14
+ placing at 11-15 */
+
+/* Number of event entries processed */
+#define DB_CQ_NUM_POPPED_SHIFT (16) /* bits 16 - 28 */
+/* Rearm bit */
+#define DB_CQ_REARM_SHIFT (29) /* bit 29 */
+
+/********** TX ULP door bell *************/
+#define DB_TXULP1_OFFSET 0x60
+#define DB_TXULP_RING_ID_MASK 0x7FF /* bits 0 - 10 */
+/* Number of tx entries posted */
+#define DB_TXULP_NUM_POSTED_SHIFT (16) /* bits 16 - 29 */
+#define DB_TXULP_NUM_POSTED_MASK 0x3FFF /* bits 16 - 29 */
+
+/********** RQ(erx) door bell ************/
+#define DB_RQ_OFFSET 0x100
+#define DB_RQ_RING_ID_MASK 0x3FF /* bits 0 - 9 */
+/* Number of rx frags posted */
+#define DB_RQ_NUM_POSTED_SHIFT (24) /* bits 24 - 31 */
+
+/********** MCC door bell ************/
+#define DB_MCCQ_OFFSET 0x140
+#define DB_MCCQ_RING_ID_MASK 0x7FF /* bits 0 - 10 */
+/* Number of entries posted */
+#define DB_MCCQ_NUM_POSTED_SHIFT (16) /* bits 16 - 29 */
+
+/********** SRIOV VF PCICFG OFFSET ********/
+#define SRIOV_VF_PCICFG_OFFSET (4096)
+
+/********** FAT TABLE ********/
+#define RETRIEVE_FAT 0
+#define QUERY_FAT 1
+
+/************* Rx Packet Type Encoding **************/
+#define BE_UNICAST_PACKET 0
+#define BE_MULTICAST_PACKET 1
+#define BE_BROADCAST_PACKET 2
+#define BE_RSVD_PACKET 3
+
+/*
+ * BE descriptors: host memory data structures whose formats
+ * are hardwired in BE silicon.
+ */
+/* Event Queue Descriptor */
+#define EQ_ENTRY_VALID_MASK 0x1 /* bit 0 */
+#define EQ_ENTRY_RES_ID_MASK 0xFFFF /* bits 16 - 31 */
+#define EQ_ENTRY_RES_ID_SHIFT 16
+
+struct be_eq_entry {
+ u32 evt;
+};
+
+/* TX Queue Descriptor */
+#define ETH_WRB_FRAG_LEN_MASK 0xFFFF
+struct be_eth_wrb {
+ __le32 frag_pa_hi; /* dword 0 */
+ __le32 frag_pa_lo; /* dword 1 */
+ u32 rsvd0; /* dword 2 */
+ __le32 frag_len; /* dword 3: bits 0 - 15 */
+} __packed;
+
+/* Pseudo amap definition for eth_hdr_wrb in which each bit of the
+ * actual structure is defined as a byte : used to calculate
+ * offset/shift/mask of each field */
+struct amap_eth_hdr_wrb {
+ u8 rsvd0[32]; /* dword 0 */
+ u8 rsvd1[32]; /* dword 1 */
+ u8 complete; /* dword 2 */
+ u8 event;
+ u8 crc;
+ u8 forward;
+ u8 lso6;
+ u8 mgmt;
+ u8 ipcs;
+ u8 udpcs;
+ u8 tcpcs;
+ u8 lso;
+ u8 vlan;
+ u8 gso[2];
+ u8 num_wrb[5];
+ u8 lso_mss[14];
+ u8 len[16]; /* dword 3 */
+ u8 vlan_tag[16];
+} __packed;
+
+#define TX_HDR_WRB_COMPL 1 /* word 2 */
+#define TX_HDR_WRB_EVT BIT(1) /* word 2 */
+#define TX_HDR_WRB_NUM_SHIFT 13 /* word 2: bits 13:17 */
+#define TX_HDR_WRB_NUM_MASK 0x1F /* word 2: bits 13:17 */
+
+struct be_eth_hdr_wrb {
+ __le32 dw[4];
+};
+
+/********* Tx Compl Status Encoding *********/
+#define BE_TX_COMP_HDR_PARSE_ERR 0x2
+#define BE_TX_COMP_NDMA_ERR 0x3
+#define BE_TX_COMP_ACL_ERR 0x5
+
+#define LANCER_TX_COMP_LSO_ERR 0x1
+#define LANCER_TX_COMP_HSW_DROP_MAC_ERR 0x3
+#define LANCER_TX_COMP_HSW_DROP_VLAN_ERR 0x5
+#define LANCER_TX_COMP_QINQ_ERR 0x7
+#define LANCER_TX_COMP_SGE_ERR 0x9
+#define LANCER_TX_COMP_PARITY_ERR 0xb
+#define LANCER_TX_COMP_DMA_ERR 0xd
+
+/* TX Compl Queue Descriptor */
+
+/* Pseudo amap definition for eth_tx_compl in which each bit of the
+ * actual structure is defined as a byte: used to calculate
+ * offset/shift/mask of each field */
+struct amap_eth_tx_compl {
+ u8 wrb_index[16]; /* dword 0 */
+ u8 ct[2]; /* dword 0 */
+ u8 port[2]; /* dword 0 */
+ u8 rsvd0[8]; /* dword 0 */
+ u8 status[4]; /* dword 0 */
+ u8 user_bytes[16]; /* dword 1 */
+ u8 nwh_bytes[8]; /* dword 1 */
+ u8 lso; /* dword 1 */
+ u8 cast_enc[2]; /* dword 1 */
+ u8 rsvd1[5]; /* dword 1 */
+ u8 rsvd2[32]; /* dword 2 */
+ u8 pkts[16]; /* dword 3 */
+ u8 ringid[11]; /* dword 3 */
+ u8 hash_val[4]; /* dword 3 */
+ u8 valid; /* dword 3 */
+} __packed;
+
+struct be_eth_tx_compl {
+ u32 dw[4];
+};
+
+/* RX Queue Descriptor */
+struct be_eth_rx_d {
+ u32 fragpa_hi;
+ u32 fragpa_lo;
+};
+
+/* RX Compl Queue Descriptor */
+
+/* Pseudo amap definition for BE2 and BE3 legacy mode eth_rx_compl in which
+ * each bit of the actual structure is defined as a byte: used to calculate
+ * offset/shift/mask of each field */
+struct amap_eth_rx_compl_v0 {
+ u8 vlan_tag[16]; /* dword 0 */
+ u8 pktsize[14]; /* dword 0 */
+ u8 port; /* dword 0 */
+ u8 ip_opt; /* dword 0 */
+ u8 err; /* dword 1 */
+ u8 rsshp; /* dword 1 */
+ u8 ipf; /* dword 1 */
+ u8 tcpf; /* dword 1 */
+ u8 udpf; /* dword 1 */
+ u8 ipcksm; /* dword 1 */
+ u8 l4_cksm; /* dword 1 */
+ u8 ip_version; /* dword 1 */
+ u8 macdst[6]; /* dword 1 */
+ u8 vtp; /* dword 1 */
+ u8 ip_frag; /* dword 1 */
+ u8 fragndx[10]; /* dword 1 */
+ u8 ct[2]; /* dword 1 */
+ u8 sw; /* dword 1 */
+ u8 numfrags[3]; /* dword 1 */
+ u8 rss_flush; /* dword 2 */
+ u8 cast_enc[2]; /* dword 2 */
+ u8 qnq; /* dword 2 */
+ u8 rss_bank; /* dword 2 */
+ u8 rsvd1[23]; /* dword 2 */
+ u8 lro_pkt; /* dword 2 */
+ u8 rsvd2[2]; /* dword 2 */
+ u8 valid; /* dword 2 */
+ u8 rsshash[32]; /* dword 3 */
+} __packed;
+
+/* Pseudo amap definition for BE3 native mode eth_rx_compl in which
+ * each bit of the actual structure is defined as a byte: used to calculate
+ * offset/shift/mask of each field */
+struct amap_eth_rx_compl_v1 {
+ u8 vlan_tag[16]; /* dword 0 */
+ u8 pktsize[14]; /* dword 0 */
+ u8 vtp; /* dword 0 */
+ u8 ip_opt; /* dword 0 */
+ u8 err; /* dword 1 */
+ u8 rsshp; /* dword 1 */
+ u8 ipf; /* dword 1 */
+ u8 tcpf; /* dword 1 */
+ u8 udpf; /* dword 1 */
+ u8 ipcksm; /* dword 1 */
+ u8 l4_cksm; /* dword 1 */
+ u8 ip_version; /* dword 1 */
+ u8 macdst[7]; /* dword 1 */
+ u8 rsvd0; /* dword 1 */
+ u8 fragndx[10]; /* dword 1 */
+ u8 ct[2]; /* dword 1 */
+ u8 sw; /* dword 1 */
+ u8 numfrags[3]; /* dword 1 */
+ u8 rss_flush; /* dword 2 */
+ u8 cast_enc[2]; /* dword 2 */
+ u8 qnq; /* dword 2 */
+ u8 rss_bank; /* dword 2 */
+ u8 port[2]; /* dword 2 */
+ u8 vntagp; /* dword 2 */
+ u8 header_len[8]; /* dword 2 */
+ u8 header_split[2]; /* dword 2 */
+ u8 rsvd1[12]; /* dword 2 */
+ u8 tunneled;
+ u8 valid; /* dword 2 */
+ u8 rsshash[32]; /* dword 3 */
+} __packed;
+
+struct be_eth_rx_compl {
+ u32 dw[4];
+};
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
new file mode 100644
index 0000000000..ad862ed788
--- /dev/null
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -0,0 +1,6146 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2005 - 2016 Broadcom
+ * All rights reserved.
+ *
+ * Contact Information:
+ * linux-drivers@emulex.com
+ *
+ * Emulex
+ * 3333 Susan Street
+ * Costa Mesa, CA 92626
+ */
+
+#include <linux/prefetch.h>
+#include <linux/module.h>
+#include "be.h"
+#include "be_cmds.h"
+#include <asm/div64.h>
+#include <linux/if_bridge.h>
+#include <net/busy_poll.h>
+#include <net/vxlan.h>
+
+MODULE_DESCRIPTION(DRV_DESC);
+MODULE_AUTHOR("Emulex Corporation");
+MODULE_LICENSE("GPL");
+
+/* num_vfs module param is obsolete.
+ * Use sysfs method to enable/disable VFs.
+ */
+static unsigned int num_vfs;
+module_param(num_vfs, uint, 0444);
+MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
+
+static ushort rx_frag_size = 2048;
+module_param(rx_frag_size, ushort, 0444);
+MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
+
+/* Per-module error detection/recovery workq shared across all functions.
+ * Each function schedules its own work request on this shared workq.
+ */
+static struct workqueue_struct *be_err_recovery_workq;
+
+static const struct pci_device_id be_dev_ids[] = {
+#ifdef CONFIG_BE2NET_BE2
+ { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
+ { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
+#endif /* CONFIG_BE2NET_BE2 */
+#ifdef CONFIG_BE2NET_BE3
+ { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
+ { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
+#endif /* CONFIG_BE2NET_BE3 */
+#ifdef CONFIG_BE2NET_LANCER
+ { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
+ { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
+#endif /* CONFIG_BE2NET_LANCER */
+#ifdef CONFIG_BE2NET_SKYHAWK
+ { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
+ { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
+#endif /* CONFIG_BE2NET_SKYHAWK */
+ { 0 }
+};
+MODULE_DEVICE_TABLE(pci, be_dev_ids);
+
+/* Workqueue used by all functions for defering cmd calls to the adapter */
+static struct workqueue_struct *be_wq;
+
+/* UE Status Low CSR */
+static const char * const ue_status_low_desc[] = {
+ "CEV",
+ "CTX",
+ "DBUF",
+ "ERX",
+ "Host",
+ "MPU",
+ "NDMA",
+ "PTC ",
+ "RDMA ",
+ "RXF ",
+ "RXIPS ",
+ "RXULP0 ",
+ "RXULP1 ",
+ "RXULP2 ",
+ "TIM ",
+ "TPOST ",
+ "TPRE ",
+ "TXIPS ",
+ "TXULP0 ",
+ "TXULP1 ",
+ "UC ",
+ "WDMA ",
+ "TXULP2 ",
+ "HOST1 ",
+ "P0_OB_LINK ",
+ "P1_OB_LINK ",
+ "HOST_GPIO ",
+ "MBOX ",
+ "ERX2 ",
+ "SPARE ",
+ "JTAG ",
+ "MPU_INTPEND "
+};
+
+/* UE Status High CSR */
+static const char * const ue_status_hi_desc[] = {
+ "LPCMEMHOST",
+ "MGMT_MAC",
+ "PCS0ONLINE",
+ "MPU_IRAM",
+ "PCS1ONLINE",
+ "PCTL0",
+ "PCTL1",
+ "PMEM",
+ "RR",
+ "TXPB",
+ "RXPP",
+ "XAUI",
+ "TXP",
+ "ARM",
+ "IPC",
+ "HOST2",
+ "HOST3",
+ "HOST4",
+ "HOST5",
+ "HOST6",
+ "HOST7",
+ "ECRC",
+ "Poison TLP",
+ "NETC",
+ "PERIPH",
+ "LLTXULP",
+ "D2P",
+ "RCON",
+ "LDMA",
+ "LLTXP",
+ "LLTXPB",
+ "Unknown"
+};
+
+#define BE_VF_IF_EN_FLAGS (BE_IF_FLAGS_UNTAGGED | \
+ BE_IF_FLAGS_BROADCAST | \
+ BE_IF_FLAGS_MULTICAST | \
+ BE_IF_FLAGS_PASS_L3L4_ERRORS)
+
+static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
+{
+ struct be_dma_mem *mem = &q->dma_mem;
+
+ if (mem->va) {
+ dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
+ mem->dma);
+ mem->va = NULL;
+ }
+}
+
+static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
+ u16 len, u16 entry_size)
+{
+ struct be_dma_mem *mem = &q->dma_mem;
+
+ memset(q, 0, sizeof(*q));
+ q->len = len;
+ q->entry_size = entry_size;
+ mem->size = len * entry_size;
+ mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size,
+ &mem->dma, GFP_KERNEL);
+ if (!mem->va)
+ return -ENOMEM;
+ return 0;
+}
+
+static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
+{
+ u32 reg, enabled;
+
+ pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
+ &reg);
+ enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
+
+ if (!enabled && enable)
+ reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
+ else if (enabled && !enable)
+ reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
+ else
+ return;
+
+ pci_write_config_dword(adapter->pdev,
+ PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
+}
+
+static void be_intr_set(struct be_adapter *adapter, bool enable)
+{
+ int status = 0;
+
+ /* On lancer interrupts can't be controlled via this register */
+ if (lancer_chip(adapter))
+ return;
+
+ if (be_check_error(adapter, BE_ERROR_EEH))
+ return;
+
+ status = be_cmd_intr_set(adapter, enable);
+ if (status)
+ be_reg_intr_set(adapter, enable);
+}
+
+static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
+{
+ u32 val = 0;
+
+ if (be_check_error(adapter, BE_ERROR_HW))
+ return;
+
+ val |= qid & DB_RQ_RING_ID_MASK;
+ val |= posted << DB_RQ_NUM_POSTED_SHIFT;
+
+ wmb();
+ iowrite32(val, adapter->db + DB_RQ_OFFSET);
+}
+
+static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
+ u16 posted)
+{
+ u32 val = 0;
+
+ if (be_check_error(adapter, BE_ERROR_HW))
+ return;
+
+ val |= txo->q.id & DB_TXULP_RING_ID_MASK;
+ val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
+
+ wmb();
+ iowrite32(val, adapter->db + txo->db_offset);
+}
+
+static void be_eq_notify(struct be_adapter *adapter, u16 qid,
+ bool arm, bool clear_int, u16 num_popped,
+ u32 eq_delay_mult_enc)
+{
+ u32 val = 0;
+
+ val |= qid & DB_EQ_RING_ID_MASK;
+ val |= ((qid & DB_EQ_RING_ID_EXT_MASK) << DB_EQ_RING_ID_EXT_MASK_SHIFT);
+
+ if (be_check_error(adapter, BE_ERROR_HW))
+ return;
+
+ if (arm)
+ val |= 1 << DB_EQ_REARM_SHIFT;
+ if (clear_int)
+ val |= 1 << DB_EQ_CLR_SHIFT;
+ val |= 1 << DB_EQ_EVNT_SHIFT;
+ val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
+ val |= eq_delay_mult_enc << DB_EQ_R2I_DLY_SHIFT;
+ iowrite32(val, adapter->db + DB_EQ_OFFSET);
+}
+
+void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
+{
+ u32 val = 0;
+
+ val |= qid & DB_CQ_RING_ID_MASK;
+ val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
+ DB_CQ_RING_ID_EXT_MASK_SHIFT);
+
+ if (be_check_error(adapter, BE_ERROR_HW))
+ return;
+
+ if (arm)
+ val |= 1 << DB_CQ_REARM_SHIFT;
+ val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
+ iowrite32(val, adapter->db + DB_CQ_OFFSET);
+}
+
+static int be_dev_mac_add(struct be_adapter *adapter, const u8 *mac)
+{
+ int i;
+
+ /* Check if mac has already been added as part of uc-list */
+ for (i = 0; i < adapter->uc_macs; i++) {
+ if (ether_addr_equal(adapter->uc_list[i].mac, mac)) {
+ /* mac already added, skip addition */
+ adapter->pmac_id[0] = adapter->pmac_id[i + 1];
+ return 0;
+ }
+ }
+
+ return be_cmd_pmac_add(adapter, mac, adapter->if_handle,
+ &adapter->pmac_id[0], 0);
+}
+
+static void be_dev_mac_del(struct be_adapter *adapter, int pmac_id)
+{
+ int i;
+
+ /* Skip deletion if the programmed mac is
+ * being used in uc-list
+ */
+ for (i = 0; i < adapter->uc_macs; i++) {
+ if (adapter->pmac_id[i + 1] == pmac_id)
+ return;
+ }
+ be_cmd_pmac_del(adapter, adapter->if_handle, pmac_id, 0);
+}
+
+static int be_mac_addr_set(struct net_device *netdev, void *p)
+{
+ struct be_adapter *adapter = netdev_priv(netdev);
+ struct device *dev = &adapter->pdev->dev;
+ struct sockaddr *addr = p;
+ int status;
+ u8 mac[ETH_ALEN];
+ u32 old_pmac_id = adapter->pmac_id[0];
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ /* Proceed further only if, User provided MAC is different
+ * from active MAC
+ */
+ if (ether_addr_equal(addr->sa_data, adapter->dev_mac))
+ return 0;
+
+ /* BE3 VFs without FILTMGMT privilege are not allowed to set its MAC
+ * address
+ */
+ if (BEx_chip(adapter) && be_virtfn(adapter) &&
+ !check_privilege(adapter, BE_PRIV_FILTMGMT))
+ return -EPERM;
+
+ /* if device is not running, copy MAC to netdev->dev_addr */
+ if (!netif_running(netdev))
+ goto done;
+
+ /* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT
+ * privilege or if PF did not provision the new MAC address.
+ * On BE3, this cmd will always fail if the VF doesn't have the
+ * FILTMGMT privilege. This failure is OK, only if the PF programmed
+ * the MAC for the VF.
+ */
+ mutex_lock(&adapter->rx_filter_lock);
+ status = be_dev_mac_add(adapter, (u8 *)addr->sa_data);
+ if (!status) {
+
+ /* Delete the old programmed MAC. This call may fail if the
+ * old MAC was already deleted by the PF driver.
+ */
+ if (adapter->pmac_id[0] != old_pmac_id)
+ be_dev_mac_del(adapter, old_pmac_id);
+ }
+
+ mutex_unlock(&adapter->rx_filter_lock);
+ /* Decide if the new MAC is successfully activated only after
+ * querying the FW
+ */
+ status = be_cmd_get_active_mac(adapter, adapter->pmac_id[0], mac,
+ adapter->if_handle, true, 0);
+ if (status)
+ goto err;
+
+ /* The MAC change did not happen, either due to lack of privilege
+ * or PF didn't pre-provision.
+ */
+ if (!ether_addr_equal(addr->sa_data, mac)) {
+ status = -EPERM;
+ goto err;
+ }
+
+ /* Remember currently programmed MAC */
+ ether_addr_copy(adapter->dev_mac, addr->sa_data);
+done:
+ eth_hw_addr_set(netdev, addr->sa_data);
+ dev_info(dev, "MAC address changed to %pM\n", addr->sa_data);
+ return 0;
+err:
+ dev_warn(dev, "MAC address change to %pM failed\n", addr->sa_data);
+ return status;
+}
+
+/* BE2 supports only v0 cmd */
+static void *hw_stats_from_cmd(struct be_adapter *adapter)
+{
+ if (BE2_chip(adapter)) {
+ struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
+
+ return &cmd->hw_stats;
+ } else if (BE3_chip(adapter)) {
+ struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
+
+ return &cmd->hw_stats;
+ } else {
+ struct be_cmd_resp_get_stats_v2 *cmd = adapter->stats_cmd.va;
+
+ return &cmd->hw_stats;
+ }
+}
+
+/* BE2 supports only v0 cmd */
+static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
+{
+ if (BE2_chip(adapter)) {
+ struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
+
+ return &hw_stats->erx;
+ } else if (BE3_chip(adapter)) {
+ struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
+
+ return &hw_stats->erx;
+ } else {
+ struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
+
+ return &hw_stats->erx;
+ }
+}
+
+static void populate_be_v0_stats(struct be_adapter *adapter)
+{
+ struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
+ struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
+ struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
+ struct be_port_rxf_stats_v0 *port_stats =
+ &rxf_stats->port[adapter->port_num];
+ struct be_drv_stats *drvs = &adapter->drv_stats;
+
+ be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
+ drvs->rx_pause_frames = port_stats->rx_pause_frames;
+ drvs->rx_crc_errors = port_stats->rx_crc_errors;
+ drvs->rx_control_frames = port_stats->rx_control_frames;
+ drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
+ drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
+ drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
+ drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
+ drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
+ drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
+ drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
+ drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
+ drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
+ drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
+ drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
+ drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
+ drvs->rx_dropped_header_too_small =
+ port_stats->rx_dropped_header_too_small;
+ drvs->rx_address_filtered =
+ port_stats->rx_address_filtered +
+ port_stats->rx_vlan_filtered;
+ drvs->rx_alignment_symbol_errors =
+ port_stats->rx_alignment_symbol_errors;
+
+ drvs->tx_pauseframes = port_stats->tx_pauseframes;
+ drvs->tx_controlframes = port_stats->tx_controlframes;
+
+ if (adapter->port_num)
+ drvs->jabber_events = rxf_stats->port1_jabber_events;
+ else
+ drvs->jabber_events = rxf_stats->port0_jabber_events;
+ drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
+ drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
+ drvs->forwarded_packets = rxf_stats->forwarded_packets;
+ drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
+ drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
+ drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
+ adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
+}
+
+static void populate_be_v1_stats(struct be_adapter *adapter)
+{
+ struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
+ struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
+ struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
+ struct be_port_rxf_stats_v1 *port_stats =
+ &rxf_stats->port[adapter->port_num];
+ struct be_drv_stats *drvs = &adapter->drv_stats;
+
+ be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
+ drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
+ drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
+ drvs->rx_pause_frames = port_stats->rx_pause_frames;
+ drvs->rx_crc_errors = port_stats->rx_crc_errors;
+ drvs->rx_control_frames = port_stats->rx_control_frames;
+ drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
+ drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
+ drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
+ drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
+ drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
+ drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
+ drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
+ drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
+ drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
+ drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
+ drvs->rx_dropped_header_too_small =
+ port_stats->rx_dropped_header_too_small;
+ drvs->rx_input_fifo_overflow_drop =
+ port_stats->rx_input_fifo_overflow_drop;
+ drvs->rx_address_filtered = port_stats->rx_address_filtered;
+ drvs->rx_alignment_symbol_errors =
+ port_stats->rx_alignment_symbol_errors;
+ drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
+ drvs->tx_pauseframes = port_stats->tx_pauseframes;
+ drvs->tx_controlframes = port_stats->tx_controlframes;
+ drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
+ drvs->jabber_events = port_stats->jabber_events;
+ drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
+ drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
+ drvs->forwarded_packets = rxf_stats->forwarded_packets;
+ drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
+ drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
+ drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
+ adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
+}
+
+static void populate_be_v2_stats(struct be_adapter *adapter)
+{
+ struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
+ struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
+ struct be_rxf_stats_v2 *rxf_stats = &hw_stats->rxf;
+ struct be_port_rxf_stats_v2 *port_stats =
+ &rxf_stats->port[adapter->port_num];
+ struct be_drv_stats *drvs = &adapter->drv_stats;
+
+ be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
+ drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
+ drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
+ drvs->rx_pause_frames = port_stats->rx_pause_frames;
+ drvs->rx_crc_errors = port_stats->rx_crc_errors;
+ drvs->rx_control_frames = port_stats->rx_control_frames;
+ drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
+ drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
+ drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
+ drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
+ drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
+ drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
+ drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
+ drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
+ drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
+ drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
+ drvs->rx_dropped_header_too_small =
+ port_stats->rx_dropped_header_too_small;
+ drvs->rx_input_fifo_overflow_drop =
+ port_stats->rx_input_fifo_overflow_drop;
+ drvs->rx_address_filtered = port_stats->rx_address_filtered;
+ drvs->rx_alignment_symbol_errors =
+ port_stats->rx_alignment_symbol_errors;
+ drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
+ drvs->tx_pauseframes = port_stats->tx_pauseframes;
+ drvs->tx_controlframes = port_stats->tx_controlframes;
+ drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
+ drvs->jabber_events = port_stats->jabber_events;
+ drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
+ drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
+ drvs->forwarded_packets = rxf_stats->forwarded_packets;
+ drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
+ drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
+ drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
+ adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
+ if (be_roce_supported(adapter)) {
+ drvs->rx_roce_bytes_lsd = port_stats->roce_bytes_received_lsd;
+ drvs->rx_roce_bytes_msd = port_stats->roce_bytes_received_msd;
+ drvs->rx_roce_frames = port_stats->roce_frames_received;
+ drvs->roce_drops_crc = port_stats->roce_drops_crc;
+ drvs->roce_drops_payload_len =
+ port_stats->roce_drops_payload_len;
+ }
+}
+
+static void populate_lancer_stats(struct be_adapter *adapter)
+{
+ struct be_drv_stats *drvs = &adapter->drv_stats;
+ struct lancer_pport_stats *pport_stats = pport_stats_from_cmd(adapter);
+
+ be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
+ drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
+ drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
+ drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
+ drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
+ drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
+ drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
+ drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
+ drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
+ drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
+ drvs->rx_dropped_tcp_length =
+ pport_stats->rx_dropped_invalid_tcp_length;
+ drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
+ drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
+ drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
+ drvs->rx_dropped_header_too_small =
+ pport_stats->rx_dropped_header_too_small;
+ drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
+ drvs->rx_address_filtered =
+ pport_stats->rx_address_filtered +
+ pport_stats->rx_vlan_filtered;
+ drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
+ drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
+ drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
+ drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
+ drvs->jabber_events = pport_stats->rx_jabbers;
+ drvs->forwarded_packets = pport_stats->num_forwards_lo;
+ drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
+ drvs->rx_drops_too_many_frags =
+ pport_stats->rx_drops_too_many_frags_lo;
+}
+
+static void accumulate_16bit_val(u32 *acc, u16 val)
+{
+#define lo(x) (x & 0xFFFF)
+#define hi(x) (x & 0xFFFF0000)
+ bool wrapped = val < lo(*acc);
+ u32 newacc = hi(*acc) + val;
+
+ if (wrapped)
+ newacc += 65536;
+ WRITE_ONCE(*acc, newacc);
+}
+
+static void populate_erx_stats(struct be_adapter *adapter,
+ struct be_rx_obj *rxo, u32 erx_stat)
+{
+ if (!BEx_chip(adapter))
+ rx_stats(rxo)->rx_drops_no_frags = erx_stat;
+ else
+ /* below erx HW counter can actually wrap around after
+ * 65535. Driver accumulates a 32-bit value
+ */
+ accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
+ (u16)erx_stat);
+}
+
+void be_parse_stats(struct be_adapter *adapter)
+{
+ struct be_erx_stats_v2 *erx = be_erx_stats_from_cmd(adapter);
+ struct be_rx_obj *rxo;
+ int i;
+ u32 erx_stat;
+
+ if (lancer_chip(adapter)) {
+ populate_lancer_stats(adapter);
+ } else {
+ if (BE2_chip(adapter))
+ populate_be_v0_stats(adapter);
+ else if (BE3_chip(adapter))
+ /* for BE3 */
+ populate_be_v1_stats(adapter);
+ else
+ populate_be_v2_stats(adapter);
+
+ /* erx_v2 is longer than v0, v1. use v2 for v0, v1 access */
+ for_all_rx_queues(adapter, rxo, i) {
+ erx_stat = erx->rx_drops_no_fragments[rxo->q.id];
+ populate_erx_stats(adapter, rxo, erx_stat);
+ }
+ }
+}
+
+static void be_get_stats64(struct net_device *netdev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct be_adapter *adapter = netdev_priv(netdev);
+ struct be_drv_stats *drvs = &adapter->drv_stats;
+ struct be_rx_obj *rxo;
+ struct be_tx_obj *txo;
+ u64 pkts, bytes;
+ unsigned int start;
+ int i;
+
+ for_all_rx_queues(adapter, rxo, i) {
+ const struct be_rx_stats *rx_stats = rx_stats(rxo);
+
+ do {
+ start = u64_stats_fetch_begin(&rx_stats->sync);
+ pkts = rx_stats(rxo)->rx_pkts;
+ bytes = rx_stats(rxo)->rx_bytes;
+ } while (u64_stats_fetch_retry(&rx_stats->sync, start));
+ stats->rx_packets += pkts;
+ stats->rx_bytes += bytes;
+ stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
+ stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
+ rx_stats(rxo)->rx_drops_no_frags;
+ }
+
+ for_all_tx_queues(adapter, txo, i) {
+ const struct be_tx_stats *tx_stats = tx_stats(txo);
+
+ do {
+ start = u64_stats_fetch_begin(&tx_stats->sync);
+ pkts = tx_stats(txo)->tx_pkts;
+ bytes = tx_stats(txo)->tx_bytes;
+ } while (u64_stats_fetch_retry(&tx_stats->sync, start));
+ stats->tx_packets += pkts;
+ stats->tx_bytes += bytes;
+ }
+
+ /* bad pkts received */
+ stats->rx_errors = drvs->rx_crc_errors +
+ drvs->rx_alignment_symbol_errors +
+ drvs->rx_in_range_errors +
+ drvs->rx_out_range_errors +
+ drvs->rx_frame_too_long +
+ drvs->rx_dropped_too_small +
+ drvs->rx_dropped_too_short +
+ drvs->rx_dropped_header_too_small +
+ drvs->rx_dropped_tcp_length +
+ drvs->rx_dropped_runt;
+
+ /* detailed rx errors */
+ stats->rx_length_errors = drvs->rx_in_range_errors +
+ drvs->rx_out_range_errors +
+ drvs->rx_frame_too_long;
+
+ stats->rx_crc_errors = drvs->rx_crc_errors;
+
+ /* frame alignment errors */
+ stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
+
+ /* receiver fifo overrun */
+ /* drops_no_pbuf is no per i/f, it's per BE card */
+ stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
+ drvs->rx_input_fifo_overflow_drop +
+ drvs->rx_drops_no_pbuf;
+}
+
+void be_link_status_update(struct be_adapter *adapter, u8 link_status)
+{
+ struct net_device *netdev = adapter->netdev;
+
+ if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
+ netif_carrier_off(netdev);
+ adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
+ }
+
+ if (link_status)
+ netif_carrier_on(netdev);
+ else
+ netif_carrier_off(netdev);
+
+ netdev_info(netdev, "Link is %s\n", link_status ? "Up" : "Down");
+}
+
+static int be_gso_hdr_len(struct sk_buff *skb)
+{
+ if (skb->encapsulation)
+ return skb_inner_tcp_all_headers(skb);
+
+ return skb_tcp_all_headers(skb);
+}
+
+static void be_tx_stats_update(struct be_tx_obj *txo, struct sk_buff *skb)
+{
+ struct be_tx_stats *stats = tx_stats(txo);
+ u32 tx_pkts = skb_shinfo(skb)->gso_segs ? : 1;
+ /* Account for headers which get duplicated in TSO pkt */
+ u32 dup_hdr_len = tx_pkts > 1 ? be_gso_hdr_len(skb) * (tx_pkts - 1) : 0;
+
+ u64_stats_update_begin(&stats->sync);
+ stats->tx_reqs++;
+ stats->tx_bytes += skb->len + dup_hdr_len;
+ stats->tx_pkts += tx_pkts;
+ if (skb->encapsulation && skb->ip_summed == CHECKSUM_PARTIAL)
+ stats->tx_vxlan_offload_pkts += tx_pkts;
+ u64_stats_update_end(&stats->sync);
+}
+
+/* Returns number of WRBs needed for the skb */
+static u32 skb_wrb_cnt(struct sk_buff *skb)
+{
+ /* +1 for the header wrb */
+ return 1 + (skb_headlen(skb) ? 1 : 0) + skb_shinfo(skb)->nr_frags;
+}
+
+static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
+{
+ wrb->frag_pa_hi = cpu_to_le32(upper_32_bits(addr));
+ wrb->frag_pa_lo = cpu_to_le32(lower_32_bits(addr));
+ wrb->frag_len = cpu_to_le32(len & ETH_WRB_FRAG_LEN_MASK);
+ wrb->rsvd0 = 0;
+}
+
+/* A dummy wrb is just all zeros. Using a separate routine for dummy-wrb
+ * to avoid the swap and shift/mask operations in wrb_fill().
+ */
+static inline void wrb_fill_dummy(struct be_eth_wrb *wrb)
+{
+ wrb->frag_pa_hi = 0;
+ wrb->frag_pa_lo = 0;
+ wrb->frag_len = 0;
+ wrb->rsvd0 = 0;
+}
+
+static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
+ struct sk_buff *skb)
+{
+ u8 vlan_prio;
+ u16 vlan_tag;
+
+ vlan_tag = skb_vlan_tag_get(skb);
+ vlan_prio = skb_vlan_tag_get_prio(skb);
+ /* If vlan priority provided by OS is NOT in available bmap */
+ if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
+ vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
+ adapter->recommended_prio_bits;
+
+ return vlan_tag;
+}
+
+/* Used only for IP tunnel packets */
+static u16 skb_inner_ip_proto(struct sk_buff *skb)
+{
+ return (inner_ip_hdr(skb)->version == 4) ?
+ inner_ip_hdr(skb)->protocol : inner_ipv6_hdr(skb)->nexthdr;
+}
+
+static u16 skb_ip_proto(struct sk_buff *skb)
+{
+ return (ip_hdr(skb)->version == 4) ?
+ ip_hdr(skb)->protocol : ipv6_hdr(skb)->nexthdr;
+}
+
+static inline bool be_is_txq_full(struct be_tx_obj *txo)
+{
+ return atomic_read(&txo->q.used) + BE_MAX_TX_FRAG_COUNT >= txo->q.len;
+}
+
+static inline bool be_can_txq_wake(struct be_tx_obj *txo)
+{
+ return atomic_read(&txo->q.used) < txo->q.len / 2;
+}
+
+static inline bool be_is_tx_compl_pending(struct be_tx_obj *txo)
+{
+ return atomic_read(&txo->q.used) > txo->pend_wrb_cnt;
+}
+
+static void be_get_wrb_params_from_skb(struct be_adapter *adapter,
+ struct sk_buff *skb,
+ struct be_wrb_params *wrb_params)
+{
+ u16 proto;
+
+ if (skb_is_gso(skb)) {
+ BE_WRB_F_SET(wrb_params->features, LSO, 1);
+ wrb_params->lso_mss = skb_shinfo(skb)->gso_size;
+ if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
+ BE_WRB_F_SET(wrb_params->features, LSO6, 1);
+ } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ if (skb->encapsulation) {
+ BE_WRB_F_SET(wrb_params->features, IPCS, 1);
+ proto = skb_inner_ip_proto(skb);
+ } else {
+ proto = skb_ip_proto(skb);
+ }
+ if (proto == IPPROTO_TCP)
+ BE_WRB_F_SET(wrb_params->features, TCPCS, 1);
+ else if (proto == IPPROTO_UDP)
+ BE_WRB_F_SET(wrb_params->features, UDPCS, 1);
+ }
+
+ if (skb_vlan_tag_present(skb)) {
+ BE_WRB_F_SET(wrb_params->features, VLAN, 1);
+ wrb_params->vlan_tag = be_get_tx_vlan_tag(adapter, skb);
+ }
+
+ BE_WRB_F_SET(wrb_params->features, CRC, 1);
+}
+
+static void wrb_fill_hdr(struct be_adapter *adapter,
+ struct be_eth_hdr_wrb *hdr,
+ struct be_wrb_params *wrb_params,
+ struct sk_buff *skb)
+{
+ memset(hdr, 0, sizeof(*hdr));
+
+ SET_TX_WRB_HDR_BITS(crc, hdr,
+ BE_WRB_F_GET(wrb_params->features, CRC));
+ SET_TX_WRB_HDR_BITS(ipcs, hdr,
+ BE_WRB_F_GET(wrb_params->features, IPCS));
+ SET_TX_WRB_HDR_BITS(tcpcs, hdr,
+ BE_WRB_F_GET(wrb_params->features, TCPCS));
+ SET_TX_WRB_HDR_BITS(udpcs, hdr,
+ BE_WRB_F_GET(wrb_params->features, UDPCS));
+
+ SET_TX_WRB_HDR_BITS(lso, hdr,
+ BE_WRB_F_GET(wrb_params->features, LSO));
+ SET_TX_WRB_HDR_BITS(lso6, hdr,
+ BE_WRB_F_GET(wrb_params->features, LSO6));
+ SET_TX_WRB_HDR_BITS(lso_mss, hdr, wrb_params->lso_mss);
+
+ /* Hack to skip HW VLAN tagging needs evt = 1, compl = 0. When this
+ * hack is not needed, the evt bit is set while ringing DB.
+ */
+ SET_TX_WRB_HDR_BITS(event, hdr,
+ BE_WRB_F_GET(wrb_params->features, VLAN_SKIP_HW));
+ SET_TX_WRB_HDR_BITS(vlan, hdr,
+ BE_WRB_F_GET(wrb_params->features, VLAN));
+ SET_TX_WRB_HDR_BITS(vlan_tag, hdr, wrb_params->vlan_tag);
+
+ SET_TX_WRB_HDR_BITS(num_wrb, hdr, skb_wrb_cnt(skb));
+ SET_TX_WRB_HDR_BITS(len, hdr, skb->len);
+ SET_TX_WRB_HDR_BITS(mgmt, hdr,
+ BE_WRB_F_GET(wrb_params->features, OS2BMC));
+}
+
+static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
+ bool unmap_single)
+{
+ dma_addr_t dma;
+ u32 frag_len = le32_to_cpu(wrb->frag_len);
+
+
+ dma = (u64)le32_to_cpu(wrb->frag_pa_hi) << 32 |
+ (u64)le32_to_cpu(wrb->frag_pa_lo);
+ if (frag_len) {
+ if (unmap_single)
+ dma_unmap_single(dev, dma, frag_len, DMA_TO_DEVICE);
+ else
+ dma_unmap_page(dev, dma, frag_len, DMA_TO_DEVICE);
+ }
+}
+
+/* Grab a WRB header for xmit */
+static u32 be_tx_get_wrb_hdr(struct be_tx_obj *txo)
+{
+ u32 head = txo->q.head;
+
+ queue_head_inc(&txo->q);
+ return head;
+}
+
+/* Set up the WRB header for xmit */
+static void be_tx_setup_wrb_hdr(struct be_adapter *adapter,
+ struct be_tx_obj *txo,
+ struct be_wrb_params *wrb_params,
+ struct sk_buff *skb, u16 head)
+{
+ u32 num_frags = skb_wrb_cnt(skb);
+ struct be_queue_info *txq = &txo->q;
+ struct be_eth_hdr_wrb *hdr = queue_index_node(txq, head);
+
+ wrb_fill_hdr(adapter, hdr, wrb_params, skb);
+ be_dws_cpu_to_le(hdr, sizeof(*hdr));
+
+ BUG_ON(txo->sent_skb_list[head]);
+ txo->sent_skb_list[head] = skb;
+ txo->last_req_hdr = head;
+ atomic_add(num_frags, &txq->used);
+ txo->last_req_wrb_cnt = num_frags;
+ txo->pend_wrb_cnt += num_frags;
+}
+
+/* Setup a WRB fragment (buffer descriptor) for xmit */
+static void be_tx_setup_wrb_frag(struct be_tx_obj *txo, dma_addr_t busaddr,
+ int len)
+{
+ struct be_eth_wrb *wrb;
+ struct be_queue_info *txq = &txo->q;
+
+ wrb = queue_head_node(txq);
+ wrb_fill(wrb, busaddr, len);
+ queue_head_inc(txq);
+}
+
+/* Bring the queue back to the state it was in before be_xmit_enqueue() routine
+ * was invoked. The producer index is restored to the previous packet and the
+ * WRBs of the current packet are unmapped. Invoked to handle tx setup errors.
+ */
+static void be_xmit_restore(struct be_adapter *adapter,
+ struct be_tx_obj *txo, u32 head, bool map_single,
+ u32 copied)
+{
+ struct device *dev;
+ struct be_eth_wrb *wrb;
+ struct be_queue_info *txq = &txo->q;
+
+ dev = &adapter->pdev->dev;
+ txq->head = head;
+
+ /* skip the first wrb (hdr); it's not mapped */
+ queue_head_inc(txq);
+ while (copied) {
+ wrb = queue_head_node(txq);
+ unmap_tx_frag(dev, wrb, map_single);
+ map_single = false;
+ copied -= le32_to_cpu(wrb->frag_len);
+ queue_head_inc(txq);
+ }
+
+ txq->head = head;
+}
+
+/* Enqueue the given packet for transmit. This routine allocates WRBs for the
+ * packet, dma maps the packet buffers and sets up the WRBs. Returns the number
+ * of WRBs used up by the packet.
+ */
+static u32 be_xmit_enqueue(struct be_adapter *adapter, struct be_tx_obj *txo,
+ struct sk_buff *skb,
+ struct be_wrb_params *wrb_params)
+{
+ u32 i, copied = 0, wrb_cnt = skb_wrb_cnt(skb);
+ struct device *dev = &adapter->pdev->dev;
+ bool map_single = false;
+ u32 head;
+ dma_addr_t busaddr;
+ int len;
+
+ head = be_tx_get_wrb_hdr(txo);
+
+ if (skb->len > skb->data_len) {
+ len = skb_headlen(skb);
+
+ busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, busaddr))
+ goto dma_err;
+ map_single = true;
+ be_tx_setup_wrb_frag(txo, busaddr, len);
+ copied += len;
+ }
+
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+ len = skb_frag_size(frag);
+
+ busaddr = skb_frag_dma_map(dev, frag, 0, len, DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, busaddr))
+ goto dma_err;
+ be_tx_setup_wrb_frag(txo, busaddr, len);
+ copied += len;
+ }
+
+ be_tx_setup_wrb_hdr(adapter, txo, wrb_params, skb, head);
+
+ be_tx_stats_update(txo, skb);
+ return wrb_cnt;
+
+dma_err:
+ adapter->drv_stats.dma_map_errors++;
+ be_xmit_restore(adapter, txo, head, map_single, copied);
+ return 0;
+}
+
+static inline int qnq_async_evt_rcvd(struct be_adapter *adapter)
+{
+ return adapter->flags & BE_FLAGS_QNQ_ASYNC_EVT_RCVD;
+}
+
+static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
+ struct sk_buff *skb,
+ struct be_wrb_params
+ *wrb_params)
+{
+ bool insert_vlan = false;
+ u16 vlan_tag = 0;
+
+ skb = skb_share_check(skb, GFP_ATOMIC);
+ if (unlikely(!skb))
+ return skb;
+
+ if (skb_vlan_tag_present(skb)) {
+ vlan_tag = be_get_tx_vlan_tag(adapter, skb);
+ insert_vlan = true;
+ }
+
+ if (qnq_async_evt_rcvd(adapter) && adapter->pvid) {
+ if (!insert_vlan) {
+ vlan_tag = adapter->pvid;
+ insert_vlan = true;
+ }
+ /* f/w workaround to set skip_hw_vlan = 1, informs the F/W to
+ * skip VLAN insertion
+ */
+ BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1);
+ }
+
+ if (insert_vlan) {
+ skb = vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q),
+ vlan_tag);
+ if (unlikely(!skb))
+ return skb;
+ __vlan_hwaccel_clear_tag(skb);
+ }
+
+ /* Insert the outer VLAN, if any */
+ if (adapter->qnq_vid) {
+ vlan_tag = adapter->qnq_vid;
+ skb = vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q),
+ vlan_tag);
+ if (unlikely(!skb))
+ return skb;
+ BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1);
+ }
+
+ return skb;
+}
+
+static bool be_ipv6_exthdr_check(struct sk_buff *skb)
+{
+ struct ethhdr *eh = (struct ethhdr *)skb->data;
+ u16 offset = ETH_HLEN;
+
+ if (eh->h_proto == htons(ETH_P_IPV6)) {
+ struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + offset);
+
+ offset += sizeof(struct ipv6hdr);
+ if (ip6h->nexthdr != NEXTHDR_TCP &&
+ ip6h->nexthdr != NEXTHDR_UDP) {
+ struct ipv6_opt_hdr *ehdr =
+ (struct ipv6_opt_hdr *)(skb->data + offset);
+
+ /* offending pkt: 2nd byte following IPv6 hdr is 0xff */
+ if (ehdr->hdrlen == 0xff)
+ return true;
+ }
+ }
+ return false;
+}
+
+static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
+{
+ return skb_vlan_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
+}
+
+static int be_ipv6_tx_stall_chk(struct be_adapter *adapter, struct sk_buff *skb)
+{
+ return BE3_chip(adapter) && be_ipv6_exthdr_check(skb);
+}
+
+static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter,
+ struct sk_buff *skb,
+ struct be_wrb_params
+ *wrb_params)
+{
+ struct vlan_ethhdr *veh = skb_vlan_eth_hdr(skb);
+ unsigned int eth_hdr_len;
+ struct iphdr *ip;
+
+ /* For padded packets, BE HW modifies tot_len field in IP header
+ * incorrecly when VLAN tag is inserted by HW.
+ * For padded packets, Lancer computes incorrect checksum.
+ */
+ eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
+ VLAN_ETH_HLEN : ETH_HLEN;
+ if (skb->len <= 60 &&
+ (lancer_chip(adapter) || BE3_chip(adapter) ||
+ skb_vlan_tag_present(skb)) && is_ipv4_pkt(skb)) {
+ ip = (struct iphdr *)ip_hdr(skb);
+ if (unlikely(pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len))))
+ goto tx_drop;
+ }
+
+ /* If vlan tag is already inlined in the packet, skip HW VLAN
+ * tagging in pvid-tagging mode
+ */
+ if (be_pvid_tagging_enabled(adapter) &&
+ veh->h_vlan_proto == htons(ETH_P_8021Q))
+ BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1);
+
+ /* HW has a bug wherein it will calculate CSUM for VLAN
+ * pkts even though it is disabled.
+ * Manually insert VLAN in pkt.
+ */
+ if (skb->ip_summed != CHECKSUM_PARTIAL &&
+ skb_vlan_tag_present(skb)) {
+ skb = be_insert_vlan_in_pkt(adapter, skb, wrb_params);
+ if (unlikely(!skb))
+ goto err;
+ }
+
+ /* HW may lockup when VLAN HW tagging is requested on
+ * certain ipv6 packets. Drop such pkts if the HW workaround to
+ * skip HW tagging is not enabled by FW.
+ */
+ if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) &&
+ (adapter->pvid || adapter->qnq_vid) &&
+ !qnq_async_evt_rcvd(adapter)))
+ goto tx_drop;
+
+ /* Manual VLAN tag insertion to prevent:
+ * ASIC lockup when the ASIC inserts VLAN tag into
+ * certain ipv6 packets. Insert VLAN tags in driver,
+ * and set event, completion, vlan bits accordingly
+ * in the Tx WRB.
+ */
+ if (be_ipv6_tx_stall_chk(adapter, skb) &&
+ be_vlan_tag_tx_chk(adapter, skb)) {
+ skb = be_insert_vlan_in_pkt(adapter, skb, wrb_params);
+ if (unlikely(!skb))
+ goto err;
+ }
+
+ return skb;
+tx_drop:
+ dev_kfree_skb_any(skb);
+err:
+ return NULL;
+}
+
+static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
+ struct sk_buff *skb,
+ struct be_wrb_params *wrb_params)
+{
+ int err;
+
+ /* Lancer, SH and BE3 in SRIOV mode have a bug wherein
+ * packets that are 32b or less may cause a transmit stall
+ * on that port. The workaround is to pad such packets
+ * (len <= 32 bytes) to a minimum length of 36b.
+ */
+ if (skb->len <= 32) {
+ if (skb_put_padto(skb, 36))
+ return NULL;
+ }
+
+ if (BEx_chip(adapter) || lancer_chip(adapter)) {
+ skb = be_lancer_xmit_workarounds(adapter, skb, wrb_params);
+ if (!skb)
+ return NULL;
+ }
+
+ /* The stack can send us skbs with length greater than
+ * what the HW can handle. Trim the extra bytes.
+ */
+ WARN_ON_ONCE(skb->len > BE_MAX_GSO_SIZE);
+ err = pskb_trim(skb, BE_MAX_GSO_SIZE);
+ WARN_ON(err);
+
+ return skb;
+}
+
+static void be_xmit_flush(struct be_adapter *adapter, struct be_tx_obj *txo)
+{
+ struct be_queue_info *txq = &txo->q;
+ struct be_eth_hdr_wrb *hdr = queue_index_node(txq, txo->last_req_hdr);
+
+ /* Mark the last request eventable if it hasn't been marked already */
+ if (!(hdr->dw[2] & cpu_to_le32(TX_HDR_WRB_EVT)))
+ hdr->dw[2] |= cpu_to_le32(TX_HDR_WRB_EVT | TX_HDR_WRB_COMPL);
+
+ /* compose a dummy wrb if there are odd set of wrbs to notify */
+ if (!lancer_chip(adapter) && (txo->pend_wrb_cnt & 1)) {
+ wrb_fill_dummy(queue_head_node(txq));
+ queue_head_inc(txq);
+ atomic_inc(&txq->used);
+ txo->pend_wrb_cnt++;
+ hdr->dw[2] &= ~cpu_to_le32(TX_HDR_WRB_NUM_MASK <<
+ TX_HDR_WRB_NUM_SHIFT);
+ hdr->dw[2] |= cpu_to_le32((txo->last_req_wrb_cnt + 1) <<
+ TX_HDR_WRB_NUM_SHIFT);
+ }
+ be_txq_notify(adapter, txo, txo->pend_wrb_cnt);
+ txo->pend_wrb_cnt = 0;
+}
+
+/* OS2BMC related */
+
+#define DHCP_CLIENT_PORT 68
+#define DHCP_SERVER_PORT 67
+#define NET_BIOS_PORT1 137
+#define NET_BIOS_PORT2 138
+#define DHCPV6_RAS_PORT 547
+
+#define is_mc_allowed_on_bmc(adapter, eh) \
+ (!is_multicast_filt_enabled(adapter) && \
+ is_multicast_ether_addr(eh->h_dest) && \
+ !is_broadcast_ether_addr(eh->h_dest))
+
+#define is_bc_allowed_on_bmc(adapter, eh) \
+ (!is_broadcast_filt_enabled(adapter) && \
+ is_broadcast_ether_addr(eh->h_dest))
+
+#define is_arp_allowed_on_bmc(adapter, skb) \
+ (is_arp(skb) && is_arp_filt_enabled(adapter))
+
+#define is_arp(skb) (skb->protocol == htons(ETH_P_ARP))
+
+#define is_arp_filt_enabled(adapter) \
+ (adapter->bmc_filt_mask & (BMC_FILT_BROADCAST_ARP))
+
+#define is_dhcp_client_filt_enabled(adapter) \
+ (adapter->bmc_filt_mask & BMC_FILT_BROADCAST_DHCP_CLIENT)
+
+#define is_dhcp_srvr_filt_enabled(adapter) \
+ (adapter->bmc_filt_mask & BMC_FILT_BROADCAST_DHCP_SERVER)
+
+#define is_nbios_filt_enabled(adapter) \
+ (adapter->bmc_filt_mask & BMC_FILT_BROADCAST_NET_BIOS)
+
+#define is_ipv6_na_filt_enabled(adapter) \
+ (adapter->bmc_filt_mask & \
+ BMC_FILT_MULTICAST_IPV6_NEIGH_ADVER)
+
+#define is_ipv6_ra_filt_enabled(adapter) \
+ (adapter->bmc_filt_mask & BMC_FILT_MULTICAST_IPV6_RA)
+
+#define is_ipv6_ras_filt_enabled(adapter) \
+ (adapter->bmc_filt_mask & BMC_FILT_MULTICAST_IPV6_RAS)
+
+#define is_broadcast_filt_enabled(adapter) \
+ (adapter->bmc_filt_mask & BMC_FILT_BROADCAST)
+
+#define is_multicast_filt_enabled(adapter) \
+ (adapter->bmc_filt_mask & BMC_FILT_MULTICAST)
+
+static bool be_send_pkt_to_bmc(struct be_adapter *adapter,
+ struct sk_buff **skb)
+{
+ struct ethhdr *eh = (struct ethhdr *)(*skb)->data;
+ bool os2bmc = false;
+
+ if (!be_is_os2bmc_enabled(adapter))
+ goto done;
+
+ if (!is_multicast_ether_addr(eh->h_dest))
+ goto done;
+
+ if (is_mc_allowed_on_bmc(adapter, eh) ||
+ is_bc_allowed_on_bmc(adapter, eh) ||
+ is_arp_allowed_on_bmc(adapter, (*skb))) {
+ os2bmc = true;
+ goto done;
+ }
+
+ if ((*skb)->protocol == htons(ETH_P_IPV6)) {
+ struct ipv6hdr *hdr = ipv6_hdr((*skb));
+ u8 nexthdr = hdr->nexthdr;
+
+ if (nexthdr == IPPROTO_ICMPV6) {
+ struct icmp6hdr *icmp6 = icmp6_hdr((*skb));
+
+ switch (icmp6->icmp6_type) {
+ case NDISC_ROUTER_ADVERTISEMENT:
+ os2bmc = is_ipv6_ra_filt_enabled(adapter);
+ goto done;
+ case NDISC_NEIGHBOUR_ADVERTISEMENT:
+ os2bmc = is_ipv6_na_filt_enabled(adapter);
+ goto done;
+ default:
+ break;
+ }
+ }
+ }
+
+ if (is_udp_pkt((*skb))) {
+ struct udphdr *udp = udp_hdr((*skb));
+
+ switch (ntohs(udp->dest)) {
+ case DHCP_CLIENT_PORT:
+ os2bmc = is_dhcp_client_filt_enabled(adapter);
+ goto done;
+ case DHCP_SERVER_PORT:
+ os2bmc = is_dhcp_srvr_filt_enabled(adapter);
+ goto done;
+ case NET_BIOS_PORT1:
+ case NET_BIOS_PORT2:
+ os2bmc = is_nbios_filt_enabled(adapter);
+ goto done;
+ case DHCPV6_RAS_PORT:
+ os2bmc = is_ipv6_ras_filt_enabled(adapter);
+ goto done;
+ default:
+ break;
+ }
+ }
+done:
+ /* For packets over a vlan, which are destined
+ * to BMC, asic expects the vlan to be inline in the packet.
+ */
+ if (os2bmc)
+ *skb = be_insert_vlan_in_pkt(adapter, *skb, NULL);
+
+ return os2bmc;
+}
+
+static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
+{
+ struct be_adapter *adapter = netdev_priv(netdev);
+ u16 q_idx = skb_get_queue_mapping(skb);
+ struct be_tx_obj *txo = &adapter->tx_obj[q_idx];
+ struct be_wrb_params wrb_params = { 0 };
+ bool flush = !netdev_xmit_more();
+ u16 wrb_cnt;
+
+ skb = be_xmit_workarounds(adapter, skb, &wrb_params);
+ if (unlikely(!skb))
+ goto drop;
+
+ be_get_wrb_params_from_skb(adapter, skb, &wrb_params);
+
+ wrb_cnt = be_xmit_enqueue(adapter, txo, skb, &wrb_params);
+ if (unlikely(!wrb_cnt)) {
+ dev_kfree_skb_any(skb);
+ goto drop;
+ }
+
+ /* if os2bmc is enabled and if the pkt is destined to bmc,
+ * enqueue the pkt a 2nd time with mgmt bit set.
+ */
+ if (be_send_pkt_to_bmc(adapter, &skb)) {
+ BE_WRB_F_SET(wrb_params.features, OS2BMC, 1);
+ wrb_cnt = be_xmit_enqueue(adapter, txo, skb, &wrb_params);
+ if (unlikely(!wrb_cnt))
+ goto drop;
+ else
+ skb_get(skb);
+ }
+
+ if (be_is_txq_full(txo)) {
+ netif_stop_subqueue(netdev, q_idx);
+ tx_stats(txo)->tx_stops++;
+ }
+
+ if (flush || __netif_subqueue_stopped(netdev, q_idx))
+ be_xmit_flush(adapter, txo);
+
+ return NETDEV_TX_OK;
+drop:
+ tx_stats(txo)->tx_drv_drops++;
+ /* Flush the already enqueued tx requests */
+ if (flush && txo->pend_wrb_cnt)
+ be_xmit_flush(adapter, txo);
+
+ return NETDEV_TX_OK;
+}
+
+static void be_tx_timeout(struct net_device *netdev, unsigned int txqueue)
+{
+ struct be_adapter *adapter = netdev_priv(netdev);
+ struct device *dev = &adapter->pdev->dev;
+ struct be_tx_obj *txo;
+ struct sk_buff *skb;
+ struct tcphdr *tcphdr;
+ struct udphdr *udphdr;
+ u32 *entry;
+ int status;
+ int i, j;
+
+ for_all_tx_queues(adapter, txo, i) {
+ dev_info(dev, "TXQ Dump: %d H: %d T: %d used: %d, qid: 0x%x\n",
+ i, txo->q.head, txo->q.tail,
+ atomic_read(&txo->q.used), txo->q.id);
+
+ entry = txo->q.dma_mem.va;
+ for (j = 0; j < TX_Q_LEN * 4; j += 4) {
+ if (entry[j] != 0 || entry[j + 1] != 0 ||
+ entry[j + 2] != 0 || entry[j + 3] != 0) {
+ dev_info(dev, "Entry %d 0x%x 0x%x 0x%x 0x%x\n",
+ j, entry[j], entry[j + 1],
+ entry[j + 2], entry[j + 3]);
+ }
+ }
+
+ entry = txo->cq.dma_mem.va;
+ dev_info(dev, "TXCQ Dump: %d H: %d T: %d used: %d\n",
+ i, txo->cq.head, txo->cq.tail,
+ atomic_read(&txo->cq.used));
+ for (j = 0; j < TX_CQ_LEN * 4; j += 4) {
+ if (entry[j] != 0 || entry[j + 1] != 0 ||
+ entry[j + 2] != 0 || entry[j + 3] != 0) {
+ dev_info(dev, "Entry %d 0x%x 0x%x 0x%x 0x%x\n",
+ j, entry[j], entry[j + 1],
+ entry[j + 2], entry[j + 3]);
+ }
+ }
+
+ for (j = 0; j < TX_Q_LEN; j++) {
+ if (txo->sent_skb_list[j]) {
+ skb = txo->sent_skb_list[j];
+ if (ip_hdr(skb)->protocol == IPPROTO_TCP) {
+ tcphdr = tcp_hdr(skb);
+ dev_info(dev, "TCP source port %d\n",
+ ntohs(tcphdr->source));
+ dev_info(dev, "TCP dest port %d\n",
+ ntohs(tcphdr->dest));
+ dev_info(dev, "TCP sequence num %d\n",
+ ntohs(tcphdr->seq));
+ dev_info(dev, "TCP ack_seq %d\n",
+ ntohs(tcphdr->ack_seq));
+ } else if (ip_hdr(skb)->protocol ==
+ IPPROTO_UDP) {
+ udphdr = udp_hdr(skb);
+ dev_info(dev, "UDP source port %d\n",
+ ntohs(udphdr->source));
+ dev_info(dev, "UDP dest port %d\n",
+ ntohs(udphdr->dest));
+ }
+ dev_info(dev, "skb[%d] %p len %d proto 0x%x\n",
+ j, skb, skb->len, skb->protocol);
+ }
+ }
+ }
+
+ if (lancer_chip(adapter)) {
+ dev_info(dev, "Initiating reset due to tx timeout\n");
+ dev_info(dev, "Resetting adapter\n");
+ status = lancer_physdev_ctrl(adapter,
+ PHYSDEV_CONTROL_FW_RESET_MASK);
+ if (status)
+ dev_err(dev, "Reset failed .. Reboot server\n");
+ }
+}
+
+static inline bool be_in_all_promisc(struct be_adapter *adapter)
+{
+ return (adapter->if_flags & BE_IF_FLAGS_ALL_PROMISCUOUS) ==
+ BE_IF_FLAGS_ALL_PROMISCUOUS;
+}
+
+static int be_set_vlan_promisc(struct be_adapter *adapter)
+{
+ struct device *dev = &adapter->pdev->dev;
+ int status;
+
+ if (adapter->if_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS)
+ return 0;
+
+ status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_VLAN_PROMISCUOUS, ON);
+ if (!status) {
+ dev_info(dev, "Enabled VLAN promiscuous mode\n");
+ adapter->if_flags |= BE_IF_FLAGS_VLAN_PROMISCUOUS;
+ } else {
+ dev_err(dev, "Failed to enable VLAN promiscuous mode\n");
+ }
+ return status;
+}
+
+static int be_clear_vlan_promisc(struct be_adapter *adapter)
+{
+ struct device *dev = &adapter->pdev->dev;
+ int status;
+
+ status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_VLAN_PROMISCUOUS, OFF);
+ if (!status) {
+ dev_info(dev, "Disabling VLAN promiscuous mode\n");
+ adapter->if_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS;
+ }
+ return status;
+}
+
+/*
+ * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
+ * If the user configures more, place BE in vlan promiscuous mode.
+ */
+static int be_vid_config(struct be_adapter *adapter)
+{
+ struct device *dev = &adapter->pdev->dev;
+ u16 vids[BE_NUM_VLANS_SUPPORTED];
+ u16 num = 0, i = 0;
+ int status = 0;
+
+ /* No need to change the VLAN state if the I/F is in promiscuous */
+ if (adapter->netdev->flags & IFF_PROMISC)
+ return 0;
+
+ if (adapter->vlans_added > be_max_vlans(adapter))
+ return be_set_vlan_promisc(adapter);
+
+ if (adapter->if_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS) {
+ status = be_clear_vlan_promisc(adapter);
+ if (status)
+ return status;
+ }
+ /* Construct VLAN Table to give to HW */
+ for_each_set_bit(i, adapter->vids, VLAN_N_VID)
+ vids[num++] = cpu_to_le16(i);
+
+ status = be_cmd_vlan_config(adapter, adapter->if_handle, vids, num, 0);
+ if (status) {
+ dev_err(dev, "Setting HW VLAN filtering failed\n");
+ /* Set to VLAN promisc mode as setting VLAN filter failed */
+ if (addl_status(status) == MCC_ADDL_STATUS_INSUFFICIENT_VLANS ||
+ addl_status(status) ==
+ MCC_ADDL_STATUS_INSUFFICIENT_RESOURCES)
+ return be_set_vlan_promisc(adapter);
+ }
+ return status;
+}
+
+static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
+{
+ struct be_adapter *adapter = netdev_priv(netdev);
+ int status = 0;
+
+ mutex_lock(&adapter->rx_filter_lock);
+
+ /* Packets with VID 0 are always received by Lancer by default */
+ if (lancer_chip(adapter) && vid == 0)
+ goto done;
+
+ if (test_bit(vid, adapter->vids))
+ goto done;
+
+ set_bit(vid, adapter->vids);
+ adapter->vlans_added++;
+
+ status = be_vid_config(adapter);
+done:
+ mutex_unlock(&adapter->rx_filter_lock);
+ return status;
+}
+
+static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
+{
+ struct be_adapter *adapter = netdev_priv(netdev);
+ int status = 0;
+
+ mutex_lock(&adapter->rx_filter_lock);
+
+ /* Packets with VID 0 are always received by Lancer by default */
+ if (lancer_chip(adapter) && vid == 0)
+ goto done;
+
+ if (!test_bit(vid, adapter->vids))
+ goto done;
+
+ clear_bit(vid, adapter->vids);
+ adapter->vlans_added--;
+
+ status = be_vid_config(adapter);
+done:
+ mutex_unlock(&adapter->rx_filter_lock);
+ return status;
+}
+
+static void be_set_all_promisc(struct be_adapter *adapter)
+{
+ be_cmd_rx_filter(adapter, BE_IF_FLAGS_ALL_PROMISCUOUS, ON);
+ adapter->if_flags |= BE_IF_FLAGS_ALL_PROMISCUOUS;
+}
+
+static void be_set_mc_promisc(struct be_adapter *adapter)
+{
+ int status;
+
+ if (adapter->if_flags & BE_IF_FLAGS_MCAST_PROMISCUOUS)
+ return;
+
+ status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_MCAST_PROMISCUOUS, ON);
+ if (!status)
+ adapter->if_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS;
+}
+
+static void be_set_uc_promisc(struct be_adapter *adapter)
+{
+ int status;
+
+ if (adapter->if_flags & BE_IF_FLAGS_PROMISCUOUS)
+ return;
+
+ status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_PROMISCUOUS, ON);
+ if (!status)
+ adapter->if_flags |= BE_IF_FLAGS_PROMISCUOUS;
+}
+
+static void be_clear_uc_promisc(struct be_adapter *adapter)
+{
+ int status;
+
+ if (!(adapter->if_flags & BE_IF_FLAGS_PROMISCUOUS))
+ return;
+
+ status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_PROMISCUOUS, OFF);
+ if (!status)
+ adapter->if_flags &= ~BE_IF_FLAGS_PROMISCUOUS;
+}
+
+/* The below 2 functions are the callback args for __dev_mc_sync/dev_uc_sync().
+ * We use a single callback function for both sync and unsync. We really don't
+ * add/remove addresses through this callback. But, we use it to detect changes
+ * to the uc/mc lists. The entire uc/mc list is programmed in be_set_rx_mode().
+ */
+static int be_uc_list_update(struct net_device *netdev,
+ const unsigned char *addr)
+{
+ struct be_adapter *adapter = netdev_priv(netdev);
+
+ adapter->update_uc_list = true;
+ return 0;
+}
+
+static int be_mc_list_update(struct net_device *netdev,
+ const unsigned char *addr)
+{
+ struct be_adapter *adapter = netdev_priv(netdev);
+
+ adapter->update_mc_list = true;
+ return 0;
+}
+
+static void be_set_mc_list(struct be_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct netdev_hw_addr *ha;
+ bool mc_promisc = false;
+ int status;
+
+ netif_addr_lock_bh(netdev);
+ __dev_mc_sync(netdev, be_mc_list_update, be_mc_list_update);
+
+ if (netdev->flags & IFF_PROMISC) {
+ adapter->update_mc_list = false;
+ } else if (netdev->flags & IFF_ALLMULTI ||
+ netdev_mc_count(netdev) > be_max_mc(adapter)) {
+ /* Enable multicast promisc if num configured exceeds
+ * what we support
+ */
+ mc_promisc = true;
+ adapter->update_mc_list = false;
+ } else if (adapter->if_flags & BE_IF_FLAGS_MCAST_PROMISCUOUS) {
+ /* Update mc-list unconditionally if the iface was previously
+ * in mc-promisc mode and now is out of that mode.
+ */
+ adapter->update_mc_list = true;
+ }
+
+ if (adapter->update_mc_list) {
+ int i = 0;
+
+ /* cache the mc-list in adapter */
+ netdev_for_each_mc_addr(ha, netdev) {
+ ether_addr_copy(adapter->mc_list[i].mac, ha->addr);
+ i++;
+ }
+ adapter->mc_count = netdev_mc_count(netdev);
+ }
+ netif_addr_unlock_bh(netdev);
+
+ if (mc_promisc) {
+ be_set_mc_promisc(adapter);
+ } else if (adapter->update_mc_list) {
+ status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_MULTICAST, ON);
+ if (!status)
+ adapter->if_flags &= ~BE_IF_FLAGS_MCAST_PROMISCUOUS;
+ else
+ be_set_mc_promisc(adapter);
+
+ adapter->update_mc_list = false;
+ }
+}
+
+static void be_clear_mc_list(struct be_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+
+ __dev_mc_unsync(netdev, NULL);
+ be_cmd_rx_filter(adapter, BE_IF_FLAGS_MULTICAST, OFF);
+ adapter->mc_count = 0;
+}
+
+static int be_uc_mac_add(struct be_adapter *adapter, int uc_idx)
+{
+ if (ether_addr_equal(adapter->uc_list[uc_idx].mac, adapter->dev_mac)) {
+ adapter->pmac_id[uc_idx + 1] = adapter->pmac_id[0];
+ return 0;
+ }
+
+ return be_cmd_pmac_add(adapter, adapter->uc_list[uc_idx].mac,
+ adapter->if_handle,
+ &adapter->pmac_id[uc_idx + 1], 0);
+}
+
+static void be_uc_mac_del(struct be_adapter *adapter, int pmac_id)
+{
+ if (pmac_id == adapter->pmac_id[0])
+ return;
+
+ be_cmd_pmac_del(adapter, adapter->if_handle, pmac_id, 0);
+}
+
+static void be_set_uc_list(struct be_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct netdev_hw_addr *ha;
+ bool uc_promisc = false;
+ int curr_uc_macs = 0, i;
+
+ netif_addr_lock_bh(netdev);
+ __dev_uc_sync(netdev, be_uc_list_update, be_uc_list_update);
+
+ if (netdev->flags & IFF_PROMISC) {
+ adapter->update_uc_list = false;
+ } else if (netdev_uc_count(netdev) > (be_max_uc(adapter) - 1)) {
+ uc_promisc = true;
+ adapter->update_uc_list = false;
+ } else if (adapter->if_flags & BE_IF_FLAGS_PROMISCUOUS) {
+ /* Update uc-list unconditionally if the iface was previously
+ * in uc-promisc mode and now is out of that mode.
+ */
+ adapter->update_uc_list = true;
+ }
+
+ if (adapter->update_uc_list) {
+ /* cache the uc-list in adapter array */
+ i = 0;
+ netdev_for_each_uc_addr(ha, netdev) {
+ ether_addr_copy(adapter->uc_list[i].mac, ha->addr);
+ i++;
+ }
+ curr_uc_macs = netdev_uc_count(netdev);
+ }
+ netif_addr_unlock_bh(netdev);
+
+ if (uc_promisc) {
+ be_set_uc_promisc(adapter);
+ } else if (adapter->update_uc_list) {
+ be_clear_uc_promisc(adapter);
+
+ for (i = 0; i < adapter->uc_macs; i++)
+ be_uc_mac_del(adapter, adapter->pmac_id[i + 1]);
+
+ for (i = 0; i < curr_uc_macs; i++)
+ be_uc_mac_add(adapter, i);
+ adapter->uc_macs = curr_uc_macs;
+ adapter->update_uc_list = false;
+ }
+}
+
+static void be_clear_uc_list(struct be_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ int i;
+
+ __dev_uc_unsync(netdev, NULL);
+ for (i = 0; i < adapter->uc_macs; i++)
+ be_uc_mac_del(adapter, adapter->pmac_id[i + 1]);
+
+ adapter->uc_macs = 0;
+}
+
+static void __be_set_rx_mode(struct be_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+
+ mutex_lock(&adapter->rx_filter_lock);
+
+ if (netdev->flags & IFF_PROMISC) {
+ if (!be_in_all_promisc(adapter))
+ be_set_all_promisc(adapter);
+ } else if (be_in_all_promisc(adapter)) {
+ /* We need to re-program the vlan-list or clear
+ * vlan-promisc mode (if needed) when the interface
+ * comes out of promisc mode.
+ */
+ be_vid_config(adapter);
+ }
+
+ be_set_uc_list(adapter);
+ be_set_mc_list(adapter);
+
+ mutex_unlock(&adapter->rx_filter_lock);
+}
+
+static void be_work_set_rx_mode(struct work_struct *work)
+{
+ struct be_cmd_work *cmd_work =
+ container_of(work, struct be_cmd_work, work);
+
+ __be_set_rx_mode(cmd_work->adapter);
+ kfree(cmd_work);
+}
+
+static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
+{
+ struct be_adapter *adapter = netdev_priv(netdev);
+ struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
+ int status;
+
+ if (!sriov_enabled(adapter))
+ return -EPERM;
+
+ if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
+ return -EINVAL;
+
+ /* Proceed further only if user provided MAC is different
+ * from active MAC
+ */
+ if (ether_addr_equal(mac, vf_cfg->mac_addr))
+ return 0;
+
+ if (BEx_chip(adapter)) {
+ be_cmd_pmac_del(adapter, vf_cfg->if_handle, vf_cfg->pmac_id,
+ vf + 1);
+
+ status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
+ &vf_cfg->pmac_id, vf + 1);
+ } else {
+ status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
+ vf + 1);
+ }
+
+ if (status) {
+ dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed: %#x",
+ mac, vf, status);
+ return be_cmd_status(status);
+ }
+
+ ether_addr_copy(vf_cfg->mac_addr, mac);
+
+ return 0;
+}
+
+static int be_get_vf_config(struct net_device *netdev, int vf,
+ struct ifla_vf_info *vi)
+{
+ struct be_adapter *adapter = netdev_priv(netdev);
+ struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
+
+ if (!sriov_enabled(adapter))
+ return -EPERM;
+
+ if (vf >= adapter->num_vfs)
+ return -EINVAL;
+
+ vi->vf = vf;
+ vi->max_tx_rate = vf_cfg->tx_rate;
+ vi->min_tx_rate = 0;
+ vi->vlan = vf_cfg->vlan_tag & VLAN_VID_MASK;
+ vi->qos = vf_cfg->vlan_tag >> VLAN_PRIO_SHIFT;
+ memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
+ vi->linkstate = adapter->vf_cfg[vf].plink_tracking;
+ vi->spoofchk = adapter->vf_cfg[vf].spoofchk;
+
+ return 0;
+}
+
+static int be_set_vf_tvt(struct be_adapter *adapter, int vf, u16 vlan)
+{
+ struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
+ u16 vids[BE_NUM_VLANS_SUPPORTED];
+ int vf_if_id = vf_cfg->if_handle;
+ int status;
+
+ /* Enable Transparent VLAN Tagging */
+ status = be_cmd_set_hsw_config(adapter, vlan, vf + 1, vf_if_id, 0, 0);
+ if (status)
+ return status;
+
+ /* Clear pre-programmed VLAN filters on VF if any, if TVT is enabled */
+ vids[0] = 0;
+ status = be_cmd_vlan_config(adapter, vf_if_id, vids, 1, vf + 1);
+ if (!status)
+ dev_info(&adapter->pdev->dev,
+ "Cleared guest VLANs on VF%d", vf);
+
+ /* After TVT is enabled, disallow VFs to program VLAN filters */
+ if (vf_cfg->privileges & BE_PRIV_FILTMGMT) {
+ status = be_cmd_set_fn_privileges(adapter, vf_cfg->privileges &
+ ~BE_PRIV_FILTMGMT, vf + 1);
+ if (!status)
+ vf_cfg->privileges &= ~BE_PRIV_FILTMGMT;
+ }
+ return 0;
+}
+
+static int be_clear_vf_tvt(struct be_adapter *adapter, int vf)
+{
+ struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
+ struct device *dev = &adapter->pdev->dev;
+ int status;
+
+ /* Reset Transparent VLAN Tagging. */
+ status = be_cmd_set_hsw_config(adapter, BE_RESET_VLAN_TAG_ID, vf + 1,
+ vf_cfg->if_handle, 0, 0);
+ if (status)
+ return status;
+
+ /* Allow VFs to program VLAN filtering */
+ if (!(vf_cfg->privileges & BE_PRIV_FILTMGMT)) {
+ status = be_cmd_set_fn_privileges(adapter, vf_cfg->privileges |
+ BE_PRIV_FILTMGMT, vf + 1);
+ if (!status) {
+ vf_cfg->privileges |= BE_PRIV_FILTMGMT;
+ dev_info(dev, "VF%d: FILTMGMT priv enabled", vf);
+ }
+ }
+
+ dev_info(dev,
+ "Disable/re-enable i/f in VM to clear Transparent VLAN tag");
+ return 0;
+}
+
+static int be_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos,
+ __be16 vlan_proto)
+{
+ struct be_adapter *adapter = netdev_priv(netdev);
+ struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
+ int status;
+
+ if (!sriov_enabled(adapter))
+ return -EPERM;
+
+ if (vf >= adapter->num_vfs || vlan > 4095 || qos > 7)
+ return -EINVAL;
+
+ if (vlan_proto != htons(ETH_P_8021Q))
+ return -EPROTONOSUPPORT;
+
+ if (vlan || qos) {
+ vlan |= qos << VLAN_PRIO_SHIFT;
+ status = be_set_vf_tvt(adapter, vf, vlan);
+ } else {
+ status = be_clear_vf_tvt(adapter, vf);
+ }
+
+ if (status) {
+ dev_err(&adapter->pdev->dev,
+ "VLAN %d config on VF %d failed : %#x\n", vlan, vf,
+ status);
+ return be_cmd_status(status);
+ }
+
+ vf_cfg->vlan_tag = vlan;
+ return 0;
+}
+
+static int be_set_vf_tx_rate(struct net_device *netdev, int vf,
+ int min_tx_rate, int max_tx_rate)
+{
+ struct be_adapter *adapter = netdev_priv(netdev);
+ struct device *dev = &adapter->pdev->dev;
+ int percent_rate, status = 0;
+ u16 link_speed = 0;
+ u8 link_status;
+
+ if (!sriov_enabled(adapter))
+ return -EPERM;
+
+ if (vf >= adapter->num_vfs)
+ return -EINVAL;
+
+ if (min_tx_rate)
+ return -EINVAL;
+
+ if (!max_tx_rate)
+ goto config_qos;
+
+ status = be_cmd_link_status_query(adapter, &link_speed,
+ &link_status, 0);
+ if (status)
+ goto err;
+
+ if (!link_status) {
+ dev_err(dev, "TX-rate setting not allowed when link is down\n");
+ status = -ENETDOWN;
+ goto err;
+ }
+
+ if (max_tx_rate < 100 || max_tx_rate > link_speed) {
+ dev_err(dev, "TX-rate must be between 100 and %d Mbps\n",
+ link_speed);
+ status = -EINVAL;
+ goto err;
+ }
+
+ /* On Skyhawk the QOS setting must be done only as a % value */
+ percent_rate = link_speed / 100;
+ if (skyhawk_chip(adapter) && (max_tx_rate % percent_rate)) {
+ dev_err(dev, "TX-rate must be a multiple of %d Mbps\n",
+ percent_rate);
+ status = -EINVAL;
+ goto err;
+ }
+
+config_qos:
+ status = be_cmd_config_qos(adapter, max_tx_rate, link_speed, vf + 1);
+ if (status)
+ goto err;
+
+ adapter->vf_cfg[vf].tx_rate = max_tx_rate;
+ return 0;
+
+err:
+ dev_err(dev, "TX-rate setting of %dMbps on VF%d failed\n",
+ max_tx_rate, vf);
+ return be_cmd_status(status);
+}
+
+static int be_set_vf_link_state(struct net_device *netdev, int vf,
+ int link_state)
+{
+ struct be_adapter *adapter = netdev_priv(netdev);
+ int status;
+
+ if (!sriov_enabled(adapter))
+ return -EPERM;
+
+ if (vf >= adapter->num_vfs)
+ return -EINVAL;
+
+ status = be_cmd_set_logical_link_config(adapter, link_state, vf+1);
+ if (status) {
+ dev_err(&adapter->pdev->dev,
+ "Link state change on VF %d failed: %#x\n", vf, status);
+ return be_cmd_status(status);
+ }
+
+ adapter->vf_cfg[vf].plink_tracking = link_state;
+
+ return 0;
+}
+
+static int be_set_vf_spoofchk(struct net_device *netdev, int vf, bool enable)
+{
+ struct be_adapter *adapter = netdev_priv(netdev);
+ struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
+ u8 spoofchk;
+ int status;
+
+ if (!sriov_enabled(adapter))
+ return -EPERM;
+
+ if (vf >= adapter->num_vfs)
+ return -EINVAL;
+
+ if (BEx_chip(adapter))
+ return -EOPNOTSUPP;
+
+ if (enable == vf_cfg->spoofchk)
+ return 0;
+
+ spoofchk = enable ? ENABLE_MAC_SPOOFCHK : DISABLE_MAC_SPOOFCHK;
+
+ status = be_cmd_set_hsw_config(adapter, 0, vf + 1, vf_cfg->if_handle,
+ 0, spoofchk);
+ if (status) {
+ dev_err(&adapter->pdev->dev,
+ "Spoofchk change on VF %d failed: %#x\n", vf, status);
+ return be_cmd_status(status);
+ }
+
+ vf_cfg->spoofchk = enable;
+ return 0;
+}
+
+static void be_aic_update(struct be_aic_obj *aic, u64 rx_pkts, u64 tx_pkts,
+ ulong now)
+{
+ aic->rx_pkts_prev = rx_pkts;
+ aic->tx_reqs_prev = tx_pkts;
+ aic->jiffies = now;
+}
+
+static int be_get_new_eqd(struct be_eq_obj *eqo)
+{
+ struct be_adapter *adapter = eqo->adapter;
+ int eqd, start;
+ struct be_aic_obj *aic;
+ struct be_rx_obj *rxo;
+ struct be_tx_obj *txo;
+ u64 rx_pkts = 0, tx_pkts = 0;
+ ulong now;
+ u32 pps, delta;
+ int i;
+
+ aic = &adapter->aic_obj[eqo->idx];
+ if (!adapter->aic_enabled) {
+ if (aic->jiffies)
+ aic->jiffies = 0;
+ eqd = aic->et_eqd;
+ return eqd;
+ }
+
+ for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
+ do {
+ start = u64_stats_fetch_begin(&rxo->stats.sync);
+ rx_pkts += rxo->stats.rx_pkts;
+ } while (u64_stats_fetch_retry(&rxo->stats.sync, start));
+ }
+
+ for_all_tx_queues_on_eq(adapter, eqo, txo, i) {
+ do {
+ start = u64_stats_fetch_begin(&txo->stats.sync);
+ tx_pkts += txo->stats.tx_reqs;
+ } while (u64_stats_fetch_retry(&txo->stats.sync, start));
+ }
+
+ /* Skip, if wrapped around or first calculation */
+ now = jiffies;
+ if (!aic->jiffies || time_before(now, aic->jiffies) ||
+ rx_pkts < aic->rx_pkts_prev ||
+ tx_pkts < aic->tx_reqs_prev) {
+ be_aic_update(aic, rx_pkts, tx_pkts, now);
+ return aic->prev_eqd;
+ }
+
+ delta = jiffies_to_msecs(now - aic->jiffies);
+ if (delta == 0)
+ return aic->prev_eqd;
+
+ pps = (((u32)(rx_pkts - aic->rx_pkts_prev) * 1000) / delta) +
+ (((u32)(tx_pkts - aic->tx_reqs_prev) * 1000) / delta);
+ eqd = (pps / 15000) << 2;
+
+ if (eqd < 8)
+ eqd = 0;
+ eqd = min_t(u32, eqd, aic->max_eqd);
+ eqd = max_t(u32, eqd, aic->min_eqd);
+
+ be_aic_update(aic, rx_pkts, tx_pkts, now);
+
+ return eqd;
+}
+
+/* For Skyhawk-R only */
+static u32 be_get_eq_delay_mult_enc(struct be_eq_obj *eqo)
+{
+ struct be_adapter *adapter = eqo->adapter;
+ struct be_aic_obj *aic = &adapter->aic_obj[eqo->idx];
+ ulong now = jiffies;
+ int eqd;
+ u32 mult_enc;
+
+ if (!adapter->aic_enabled)
+ return 0;
+
+ if (jiffies_to_msecs(now - aic->jiffies) < 1)
+ eqd = aic->prev_eqd;
+ else
+ eqd = be_get_new_eqd(eqo);
+
+ if (eqd > 100)
+ mult_enc = R2I_DLY_ENC_1;
+ else if (eqd > 60)
+ mult_enc = R2I_DLY_ENC_2;
+ else if (eqd > 20)
+ mult_enc = R2I_DLY_ENC_3;
+ else
+ mult_enc = R2I_DLY_ENC_0;
+
+ aic->prev_eqd = eqd;
+
+ return mult_enc;
+}
+
+void be_eqd_update(struct be_adapter *adapter, bool force_update)
+{
+ struct be_set_eqd set_eqd[MAX_EVT_QS];
+ struct be_aic_obj *aic;
+ struct be_eq_obj *eqo;
+ int i, num = 0, eqd;
+
+ for_all_evt_queues(adapter, eqo, i) {
+ aic = &adapter->aic_obj[eqo->idx];
+ eqd = be_get_new_eqd(eqo);
+ if (force_update || eqd != aic->prev_eqd) {
+ set_eqd[num].delay_multiplier = (eqd * 65)/100;
+ set_eqd[num].eq_id = eqo->q.id;
+ aic->prev_eqd = eqd;
+ num++;
+ }
+ }
+
+ if (num)
+ be_cmd_modify_eqd(adapter, set_eqd, num);
+}
+
+static void be_rx_stats_update(struct be_rx_obj *rxo,
+ struct be_rx_compl_info *rxcp)
+{
+ struct be_rx_stats *stats = rx_stats(rxo);
+
+ u64_stats_update_begin(&stats->sync);
+ stats->rx_compl++;
+ stats->rx_bytes += rxcp->pkt_size;
+ stats->rx_pkts++;
+ if (rxcp->tunneled)
+ stats->rx_vxlan_offload_pkts++;
+ if (rxcp->pkt_type == BE_MULTICAST_PACKET)
+ stats->rx_mcast_pkts++;
+ if (rxcp->err)
+ stats->rx_compl_err++;
+ u64_stats_update_end(&stats->sync);
+}
+
+static inline bool csum_passed(struct be_rx_compl_info *rxcp)
+{
+ /* L4 checksum is not reliable for non TCP/UDP packets.
+ * Also ignore ipcksm for ipv6 pkts
+ */
+ return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
+ (rxcp->ip_csum || rxcp->ipv6) && !rxcp->err;
+}
+
+static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo)
+{
+ struct be_adapter *adapter = rxo->adapter;
+ struct be_rx_page_info *rx_page_info;
+ struct be_queue_info *rxq = &rxo->q;
+ u32 frag_idx = rxq->tail;
+
+ rx_page_info = &rxo->page_info_tbl[frag_idx];
+ BUG_ON(!rx_page_info->page);
+
+ if (rx_page_info->last_frag) {
+ dma_unmap_page(&adapter->pdev->dev,
+ dma_unmap_addr(rx_page_info, bus),
+ adapter->big_page_size, DMA_FROM_DEVICE);
+ rx_page_info->last_frag = false;
+ } else {
+ dma_sync_single_for_cpu(&adapter->pdev->dev,
+ dma_unmap_addr(rx_page_info, bus),
+ rx_frag_size, DMA_FROM_DEVICE);
+ }
+
+ queue_tail_inc(rxq);
+ atomic_dec(&rxq->used);
+ return rx_page_info;
+}
+
+/* Throwaway the data in the Rx completion */
+static void be_rx_compl_discard(struct be_rx_obj *rxo,
+ struct be_rx_compl_info *rxcp)
+{
+ struct be_rx_page_info *page_info;
+ u16 i, num_rcvd = rxcp->num_rcvd;
+
+ for (i = 0; i < num_rcvd; i++) {
+ page_info = get_rx_page_info(rxo);
+ put_page(page_info->page);
+ memset(page_info, 0, sizeof(*page_info));
+ }
+}
+
+/*
+ * skb_fill_rx_data forms a complete skb for an ether frame
+ * indicated by rxcp.
+ */
+static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
+ struct be_rx_compl_info *rxcp)
+{
+ struct be_rx_page_info *page_info;
+ u16 i, j;
+ u16 hdr_len, curr_frag_len, remaining;
+ u8 *start;
+
+ page_info = get_rx_page_info(rxo);
+ start = page_address(page_info->page) + page_info->page_offset;
+ prefetch(start);
+
+ /* Copy data in the first descriptor of this completion */
+ curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
+
+ skb->len = curr_frag_len;
+ if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
+ memcpy(skb->data, start, curr_frag_len);
+ /* Complete packet has now been moved to data */
+ put_page(page_info->page);
+ skb->data_len = 0;
+ skb->tail += curr_frag_len;
+ } else {
+ hdr_len = ETH_HLEN;
+ memcpy(skb->data, start, hdr_len);
+ skb_shinfo(skb)->nr_frags = 1;
+ skb_frag_fill_page_desc(&skb_shinfo(skb)->frags[0],
+ page_info->page,
+ page_info->page_offset + hdr_len,
+ curr_frag_len - hdr_len);
+ skb->data_len = curr_frag_len - hdr_len;
+ skb->truesize += rx_frag_size;
+ skb->tail += hdr_len;
+ }
+ page_info->page = NULL;
+
+ if (rxcp->pkt_size <= rx_frag_size) {
+ BUG_ON(rxcp->num_rcvd != 1);
+ return;
+ }
+
+ /* More frags present for this completion */
+ remaining = rxcp->pkt_size - curr_frag_len;
+ for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
+ page_info = get_rx_page_info(rxo);
+ curr_frag_len = min(remaining, rx_frag_size);
+
+ /* Coalesce all frags from the same physical page in one slot */
+ if (page_info->page_offset == 0) {
+ /* Fresh page */
+ j++;
+ skb_frag_fill_page_desc(&skb_shinfo(skb)->frags[j],
+ page_info->page,
+ page_info->page_offset,
+ curr_frag_len);
+ skb_shinfo(skb)->nr_frags++;
+ } else {
+ put_page(page_info->page);
+ skb_frag_size_add(&skb_shinfo(skb)->frags[j],
+ curr_frag_len);
+ }
+
+ skb->len += curr_frag_len;
+ skb->data_len += curr_frag_len;
+ skb->truesize += rx_frag_size;
+ remaining -= curr_frag_len;
+ page_info->page = NULL;
+ }
+ BUG_ON(j > MAX_SKB_FRAGS);
+}
+
+/* Process the RX completion indicated by rxcp when GRO is disabled */
+static void be_rx_compl_process(struct be_rx_obj *rxo, struct napi_struct *napi,
+ struct be_rx_compl_info *rxcp)
+{
+ struct be_adapter *adapter = rxo->adapter;
+ struct net_device *netdev = adapter->netdev;
+ struct sk_buff *skb;
+
+ skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
+ if (unlikely(!skb)) {
+ rx_stats(rxo)->rx_drops_no_skbs++;
+ be_rx_compl_discard(rxo, rxcp);
+ return;
+ }
+
+ skb_fill_rx_data(rxo, skb, rxcp);
+
+ if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ else
+ skb_checksum_none_assert(skb);
+
+ skb->protocol = eth_type_trans(skb, netdev);
+ skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
+ if (netdev->features & NETIF_F_RXHASH)
+ skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
+
+ skb->csum_level = rxcp->tunneled;
+ skb_mark_napi_id(skb, napi);
+
+ if (rxcp->vlanf)
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
+
+ netif_receive_skb(skb);
+}
+
+/* Process the RX completion indicated by rxcp when GRO is enabled */
+static void be_rx_compl_process_gro(struct be_rx_obj *rxo,
+ struct napi_struct *napi,
+ struct be_rx_compl_info *rxcp)
+{
+ struct be_adapter *adapter = rxo->adapter;
+ struct be_rx_page_info *page_info;
+ struct sk_buff *skb = NULL;
+ u16 remaining, curr_frag_len;
+ u16 i, j;
+
+ skb = napi_get_frags(napi);
+ if (!skb) {
+ be_rx_compl_discard(rxo, rxcp);
+ return;
+ }
+
+ remaining = rxcp->pkt_size;
+ for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
+ page_info = get_rx_page_info(rxo);
+
+ curr_frag_len = min(remaining, rx_frag_size);
+
+ /* Coalesce all frags from the same physical page in one slot */
+ if (i == 0 || page_info->page_offset == 0) {
+ /* First frag or Fresh page */
+ j++;
+ skb_frag_fill_page_desc(&skb_shinfo(skb)->frags[j],
+ page_info->page,
+ page_info->page_offset,
+ curr_frag_len);
+ } else {
+ put_page(page_info->page);
+ skb_frag_size_add(&skb_shinfo(skb)->frags[j],
+ curr_frag_len);
+ }
+
+ skb->truesize += rx_frag_size;
+ remaining -= curr_frag_len;
+ memset(page_info, 0, sizeof(*page_info));
+ }
+ BUG_ON(j > MAX_SKB_FRAGS);
+
+ skb_shinfo(skb)->nr_frags = j + 1;
+ skb->len = rxcp->pkt_size;
+ skb->data_len = rxcp->pkt_size;
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
+ if (adapter->netdev->features & NETIF_F_RXHASH)
+ skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
+
+ skb->csum_level = rxcp->tunneled;
+
+ if (rxcp->vlanf)
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
+
+ napi_gro_frags(napi);
+}
+
+static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
+ struct be_rx_compl_info *rxcp)
+{
+ rxcp->pkt_size = GET_RX_COMPL_V1_BITS(pktsize, compl);
+ rxcp->vlanf = GET_RX_COMPL_V1_BITS(vtp, compl);
+ rxcp->err = GET_RX_COMPL_V1_BITS(err, compl);
+ rxcp->tcpf = GET_RX_COMPL_V1_BITS(tcpf, compl);
+ rxcp->udpf = GET_RX_COMPL_V1_BITS(udpf, compl);
+ rxcp->ip_csum = GET_RX_COMPL_V1_BITS(ipcksm, compl);
+ rxcp->l4_csum = GET_RX_COMPL_V1_BITS(l4_cksm, compl);
+ rxcp->ipv6 = GET_RX_COMPL_V1_BITS(ip_version, compl);
+ rxcp->num_rcvd = GET_RX_COMPL_V1_BITS(numfrags, compl);
+ rxcp->pkt_type = GET_RX_COMPL_V1_BITS(cast_enc, compl);
+ rxcp->rss_hash = GET_RX_COMPL_V1_BITS(rsshash, compl);
+ if (rxcp->vlanf) {
+ rxcp->qnq = GET_RX_COMPL_V1_BITS(qnq, compl);
+ rxcp->vlan_tag = GET_RX_COMPL_V1_BITS(vlan_tag, compl);
+ }
+ rxcp->port = GET_RX_COMPL_V1_BITS(port, compl);
+ rxcp->tunneled =
+ GET_RX_COMPL_V1_BITS(tunneled, compl);
+}
+
+static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
+ struct be_rx_compl_info *rxcp)
+{
+ rxcp->pkt_size = GET_RX_COMPL_V0_BITS(pktsize, compl);
+ rxcp->vlanf = GET_RX_COMPL_V0_BITS(vtp, compl);
+ rxcp->err = GET_RX_COMPL_V0_BITS(err, compl);
+ rxcp->tcpf = GET_RX_COMPL_V0_BITS(tcpf, compl);
+ rxcp->udpf = GET_RX_COMPL_V0_BITS(udpf, compl);
+ rxcp->ip_csum = GET_RX_COMPL_V0_BITS(ipcksm, compl);
+ rxcp->l4_csum = GET_RX_COMPL_V0_BITS(l4_cksm, compl);
+ rxcp->ipv6 = GET_RX_COMPL_V0_BITS(ip_version, compl);
+ rxcp->num_rcvd = GET_RX_COMPL_V0_BITS(numfrags, compl);
+ rxcp->pkt_type = GET_RX_COMPL_V0_BITS(cast_enc, compl);
+ rxcp->rss_hash = GET_RX_COMPL_V0_BITS(rsshash, compl);
+ if (rxcp->vlanf) {
+ rxcp->qnq = GET_RX_COMPL_V0_BITS(qnq, compl);
+ rxcp->vlan_tag = GET_RX_COMPL_V0_BITS(vlan_tag, compl);
+ }
+ rxcp->port = GET_RX_COMPL_V0_BITS(port, compl);
+ rxcp->ip_frag = GET_RX_COMPL_V0_BITS(ip_frag, compl);
+}
+
+static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
+{
+ struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
+ struct be_rx_compl_info *rxcp = &rxo->rxcp;
+ struct be_adapter *adapter = rxo->adapter;
+
+ /* For checking the valid bit it is Ok to use either definition as the
+ * valid bit is at the same position in both v0 and v1 Rx compl */
+ if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
+ return NULL;
+
+ rmb();
+ be_dws_le_to_cpu(compl, sizeof(*compl));
+
+ if (adapter->be3_native)
+ be_parse_rx_compl_v1(compl, rxcp);
+ else
+ be_parse_rx_compl_v0(compl, rxcp);
+
+ if (rxcp->ip_frag)
+ rxcp->l4_csum = 0;
+
+ if (rxcp->vlanf) {
+ /* In QNQ modes, if qnq bit is not set, then the packet was
+ * tagged only with the transparent outer vlan-tag and must
+ * not be treated as a vlan packet by host
+ */
+ if (be_is_qnq_mode(adapter) && !rxcp->qnq)
+ rxcp->vlanf = 0;
+
+ if (!lancer_chip(adapter))
+ rxcp->vlan_tag = swab16(rxcp->vlan_tag);
+
+ if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
+ !test_bit(rxcp->vlan_tag, adapter->vids))
+ rxcp->vlanf = 0;
+ }
+
+ /* As the compl has been parsed, reset it; we wont touch it again */
+ compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
+
+ queue_tail_inc(&rxo->cq);
+ return rxcp;
+}
+
+static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
+{
+ u32 order = get_order(size);
+
+ if (order > 0)
+ gfp |= __GFP_COMP;
+ return alloc_pages(gfp, order);
+}
+
+/*
+ * Allocate a page, split it to fragments of size rx_frag_size and post as
+ * receive buffers to BE
+ */
+static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp, u32 frags_needed)
+{
+ struct be_adapter *adapter = rxo->adapter;
+ struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
+ struct be_queue_info *rxq = &rxo->q;
+ struct page *pagep = NULL;
+ struct device *dev = &adapter->pdev->dev;
+ struct be_eth_rx_d *rxd;
+ u64 page_dmaaddr = 0, frag_dmaaddr;
+ u32 posted, page_offset = 0, notify = 0;
+
+ page_info = &rxo->page_info_tbl[rxq->head];
+ for (posted = 0; posted < frags_needed && !page_info->page; posted++) {
+ if (!pagep) {
+ pagep = be_alloc_pages(adapter->big_page_size, gfp);
+ if (unlikely(!pagep)) {
+ rx_stats(rxo)->rx_post_fail++;
+ break;
+ }
+ page_dmaaddr = dma_map_page(dev, pagep, 0,
+ adapter->big_page_size,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(dev, page_dmaaddr)) {
+ put_page(pagep);
+ pagep = NULL;
+ adapter->drv_stats.dma_map_errors++;
+ break;
+ }
+ page_offset = 0;
+ } else {
+ get_page(pagep);
+ page_offset += rx_frag_size;
+ }
+ page_info->page_offset = page_offset;
+ page_info->page = pagep;
+
+ rxd = queue_head_node(rxq);
+ frag_dmaaddr = page_dmaaddr + page_info->page_offset;
+ rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
+ rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
+
+ /* Any space left in the current big page for another frag? */
+ if ((page_offset + rx_frag_size + rx_frag_size) >
+ adapter->big_page_size) {
+ pagep = NULL;
+ page_info->last_frag = true;
+ dma_unmap_addr_set(page_info, bus, page_dmaaddr);
+ } else {
+ dma_unmap_addr_set(page_info, bus, frag_dmaaddr);
+ }
+
+ prev_page_info = page_info;
+ queue_head_inc(rxq);
+ page_info = &rxo->page_info_tbl[rxq->head];
+ }
+
+ /* Mark the last frag of a page when we break out of the above loop
+ * with no more slots available in the RXQ
+ */
+ if (pagep) {
+ prev_page_info->last_frag = true;
+ dma_unmap_addr_set(prev_page_info, bus, page_dmaaddr);
+ }
+
+ if (posted) {
+ atomic_add(posted, &rxq->used);
+ if (rxo->rx_post_starved)
+ rxo->rx_post_starved = false;
+ do {
+ notify = min(MAX_NUM_POST_ERX_DB, posted);
+ be_rxq_notify(adapter, rxq->id, notify);
+ posted -= notify;
+ } while (posted);
+ } else if (atomic_read(&rxq->used) == 0) {
+ /* Let be_worker replenish when memory is available */
+ rxo->rx_post_starved = true;
+ }
+}
+
+static inline void be_update_tx_err(struct be_tx_obj *txo, u8 status)
+{
+ switch (status) {
+ case BE_TX_COMP_HDR_PARSE_ERR:
+ tx_stats(txo)->tx_hdr_parse_err++;
+ break;
+ case BE_TX_COMP_NDMA_ERR:
+ tx_stats(txo)->tx_dma_err++;
+ break;
+ case BE_TX_COMP_ACL_ERR:
+ tx_stats(txo)->tx_spoof_check_err++;
+ break;
+ }
+}
+
+static inline void lancer_update_tx_err(struct be_tx_obj *txo, u8 status)
+{
+ switch (status) {
+ case LANCER_TX_COMP_LSO_ERR:
+ tx_stats(txo)->tx_tso_err++;
+ break;
+ case LANCER_TX_COMP_HSW_DROP_MAC_ERR:
+ case LANCER_TX_COMP_HSW_DROP_VLAN_ERR:
+ tx_stats(txo)->tx_spoof_check_err++;
+ break;
+ case LANCER_TX_COMP_QINQ_ERR:
+ tx_stats(txo)->tx_qinq_err++;
+ break;
+ case LANCER_TX_COMP_PARITY_ERR:
+ tx_stats(txo)->tx_internal_parity_err++;
+ break;
+ case LANCER_TX_COMP_DMA_ERR:
+ tx_stats(txo)->tx_dma_err++;
+ break;
+ case LANCER_TX_COMP_SGE_ERR:
+ tx_stats(txo)->tx_sge_err++;
+ break;
+ }
+}
+
+static struct be_tx_compl_info *be_tx_compl_get(struct be_adapter *adapter,
+ struct be_tx_obj *txo)
+{
+ struct be_queue_info *tx_cq = &txo->cq;
+ struct be_tx_compl_info *txcp = &txo->txcp;
+ struct be_eth_tx_compl *compl = queue_tail_node(tx_cq);
+
+ if (compl->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
+ return NULL;
+
+ /* Ensure load ordering of valid bit dword and other dwords below */
+ rmb();
+ be_dws_le_to_cpu(compl, sizeof(*compl));
+
+ txcp->status = GET_TX_COMPL_BITS(status, compl);
+ txcp->end_index = GET_TX_COMPL_BITS(wrb_index, compl);
+
+ if (txcp->status) {
+ if (lancer_chip(adapter)) {
+ lancer_update_tx_err(txo, txcp->status);
+ /* Reset the adapter incase of TSO,
+ * SGE or Parity error
+ */
+ if (txcp->status == LANCER_TX_COMP_LSO_ERR ||
+ txcp->status == LANCER_TX_COMP_PARITY_ERR ||
+ txcp->status == LANCER_TX_COMP_SGE_ERR)
+ be_set_error(adapter, BE_ERROR_TX);
+ } else {
+ be_update_tx_err(txo, txcp->status);
+ }
+ }
+
+ if (be_check_error(adapter, BE_ERROR_TX))
+ return NULL;
+
+ compl->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
+ queue_tail_inc(tx_cq);
+ return txcp;
+}
+
+static u16 be_tx_compl_process(struct be_adapter *adapter,
+ struct be_tx_obj *txo, u16 last_index)
+{
+ struct sk_buff **sent_skbs = txo->sent_skb_list;
+ struct be_queue_info *txq = &txo->q;
+ struct sk_buff *skb = NULL;
+ bool unmap_skb_hdr = false;
+ struct be_eth_wrb *wrb;
+ u16 num_wrbs = 0;
+ u32 frag_index;
+
+ do {
+ if (sent_skbs[txq->tail]) {
+ /* Free skb from prev req */
+ if (skb)
+ dev_consume_skb_any(skb);
+ skb = sent_skbs[txq->tail];
+ sent_skbs[txq->tail] = NULL;
+ queue_tail_inc(txq); /* skip hdr wrb */
+ num_wrbs++;
+ unmap_skb_hdr = true;
+ }
+ wrb = queue_tail_node(txq);
+ frag_index = txq->tail;
+ unmap_tx_frag(&adapter->pdev->dev, wrb,
+ (unmap_skb_hdr && skb_headlen(skb)));
+ unmap_skb_hdr = false;
+ queue_tail_inc(txq);
+ num_wrbs++;
+ } while (frag_index != last_index);
+ dev_consume_skb_any(skb);
+
+ return num_wrbs;
+}
+
+/* Return the number of events in the event queue */
+static inline int events_get(struct be_eq_obj *eqo)
+{
+ struct be_eq_entry *eqe;
+ int num = 0;
+
+ do {
+ eqe = queue_tail_node(&eqo->q);
+ if (eqe->evt == 0)
+ break;
+
+ rmb();
+ eqe->evt = 0;
+ num++;
+ queue_tail_inc(&eqo->q);
+ } while (true);
+
+ return num;
+}
+
+/* Leaves the EQ is disarmed state */
+static void be_eq_clean(struct be_eq_obj *eqo)
+{
+ int num = events_get(eqo);
+
+ be_eq_notify(eqo->adapter, eqo->q.id, false, true, num, 0);
+}
+
+/* Free posted rx buffers that were not used */
+static void be_rxq_clean(struct be_rx_obj *rxo)
+{
+ struct be_queue_info *rxq = &rxo->q;
+ struct be_rx_page_info *page_info;
+
+ while (atomic_read(&rxq->used) > 0) {
+ page_info = get_rx_page_info(rxo);
+ put_page(page_info->page);
+ memset(page_info, 0, sizeof(*page_info));
+ }
+ BUG_ON(atomic_read(&rxq->used));
+ rxq->tail = 0;
+ rxq->head = 0;
+}
+
+static void be_rx_cq_clean(struct be_rx_obj *rxo)
+{
+ struct be_queue_info *rx_cq = &rxo->cq;
+ struct be_rx_compl_info *rxcp;
+ struct be_adapter *adapter = rxo->adapter;
+ int flush_wait = 0;
+
+ /* Consume pending rx completions.
+ * Wait for the flush completion (identified by zero num_rcvd)
+ * to arrive. Notify CQ even when there are no more CQ entries
+ * for HW to flush partially coalesced CQ entries.
+ * In Lancer, there is no need to wait for flush compl.
+ */
+ for (;;) {
+ rxcp = be_rx_compl_get(rxo);
+ if (!rxcp) {
+ if (lancer_chip(adapter))
+ break;
+
+ if (flush_wait++ > 50 ||
+ be_check_error(adapter,
+ BE_ERROR_HW)) {
+ dev_warn(&adapter->pdev->dev,
+ "did not receive flush compl\n");
+ break;
+ }
+ be_cq_notify(adapter, rx_cq->id, true, 0);
+ mdelay(1);
+ } else {
+ be_rx_compl_discard(rxo, rxcp);
+ be_cq_notify(adapter, rx_cq->id, false, 1);
+ if (rxcp->num_rcvd == 0)
+ break;
+ }
+ }
+
+ /* After cleanup, leave the CQ in unarmed state */
+ be_cq_notify(adapter, rx_cq->id, false, 0);
+}
+
+static void be_tx_compl_clean(struct be_adapter *adapter)
+{
+ struct device *dev = &adapter->pdev->dev;
+ u16 cmpl = 0, timeo = 0, num_wrbs = 0;
+ struct be_tx_compl_info *txcp;
+ struct be_queue_info *txq;
+ u32 end_idx, notified_idx;
+ struct be_tx_obj *txo;
+ int i, pending_txqs;
+
+ /* Stop polling for compls when HW has been silent for 10ms */
+ do {
+ pending_txqs = adapter->num_tx_qs;
+
+ for_all_tx_queues(adapter, txo, i) {
+ cmpl = 0;
+ num_wrbs = 0;
+ txq = &txo->q;
+ while ((txcp = be_tx_compl_get(adapter, txo))) {
+ num_wrbs +=
+ be_tx_compl_process(adapter, txo,
+ txcp->end_index);
+ cmpl++;
+ }
+ if (cmpl) {
+ be_cq_notify(adapter, txo->cq.id, false, cmpl);
+ atomic_sub(num_wrbs, &txq->used);
+ timeo = 0;
+ }
+ if (!be_is_tx_compl_pending(txo))
+ pending_txqs--;
+ }
+
+ if (pending_txqs == 0 || ++timeo > 10 ||
+ be_check_error(adapter, BE_ERROR_HW))
+ break;
+
+ mdelay(1);
+ } while (true);
+
+ /* Free enqueued TX that was never notified to HW */
+ for_all_tx_queues(adapter, txo, i) {
+ txq = &txo->q;
+
+ if (atomic_read(&txq->used)) {
+ dev_info(dev, "txq%d: cleaning %d pending tx-wrbs\n",
+ i, atomic_read(&txq->used));
+ notified_idx = txq->tail;
+ end_idx = txq->tail;
+ index_adv(&end_idx, atomic_read(&txq->used) - 1,
+ txq->len);
+ /* Use the tx-compl process logic to handle requests
+ * that were not sent to the HW.
+ */
+ num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
+ atomic_sub(num_wrbs, &txq->used);
+ BUG_ON(atomic_read(&txq->used));
+ txo->pend_wrb_cnt = 0;
+ /* Since hw was never notified of these requests,
+ * reset TXQ indices
+ */
+ txq->head = notified_idx;
+ txq->tail = notified_idx;
+ }
+ }
+}
+
+static void be_evt_queues_destroy(struct be_adapter *adapter)
+{
+ struct be_eq_obj *eqo;
+ int i;
+
+ for_all_evt_queues(adapter, eqo, i) {
+ if (eqo->q.created) {
+ be_eq_clean(eqo);
+ be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
+ netif_napi_del(&eqo->napi);
+ free_cpumask_var(eqo->affinity_mask);
+ }
+ be_queue_free(adapter, &eqo->q);
+ }
+}
+
+static int be_evt_queues_create(struct be_adapter *adapter)
+{
+ struct be_queue_info *eq;
+ struct be_eq_obj *eqo;
+ struct be_aic_obj *aic;
+ int i, rc;
+
+ /* need enough EQs to service both RX and TX queues */
+ adapter->num_evt_qs = min_t(u16, num_irqs(adapter),
+ max(adapter->cfg_num_rx_irqs,
+ adapter->cfg_num_tx_irqs));
+
+ adapter->aic_enabled = true;
+
+ for_all_evt_queues(adapter, eqo, i) {
+ int numa_node = dev_to_node(&adapter->pdev->dev);
+
+ aic = &adapter->aic_obj[i];
+ eqo->adapter = adapter;
+ eqo->idx = i;
+ aic->max_eqd = BE_MAX_EQD;
+
+ eq = &eqo->q;
+ rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
+ sizeof(struct be_eq_entry));
+ if (rc)
+ return rc;
+
+ rc = be_cmd_eq_create(adapter, eqo);
+ if (rc)
+ return rc;
+
+ if (!zalloc_cpumask_var(&eqo->affinity_mask, GFP_KERNEL))
+ return -ENOMEM;
+ cpumask_set_cpu(cpumask_local_spread(i, numa_node),
+ eqo->affinity_mask);
+ netif_napi_add(adapter->netdev, &eqo->napi, be_poll);
+ }
+ return 0;
+}
+
+static void be_mcc_queues_destroy(struct be_adapter *adapter)
+{
+ struct be_queue_info *q;
+
+ q = &adapter->mcc_obj.q;
+ if (q->created)
+ be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
+ be_queue_free(adapter, q);
+
+ q = &adapter->mcc_obj.cq;
+ if (q->created)
+ be_cmd_q_destroy(adapter, q, QTYPE_CQ);
+ be_queue_free(adapter, q);
+}
+
+/* Must be called only after TX qs are created as MCC shares TX EQ */
+static int be_mcc_queues_create(struct be_adapter *adapter)
+{
+ struct be_queue_info *q, *cq;
+
+ cq = &adapter->mcc_obj.cq;
+ if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
+ sizeof(struct be_mcc_compl)))
+ goto err;
+
+ /* Use the default EQ for MCC completions */
+ if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
+ goto mcc_cq_free;
+
+ q = &adapter->mcc_obj.q;
+ if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
+ goto mcc_cq_destroy;
+
+ if (be_cmd_mccq_create(adapter, q, cq))
+ goto mcc_q_free;
+
+ return 0;
+
+mcc_q_free:
+ be_queue_free(adapter, q);
+mcc_cq_destroy:
+ be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
+mcc_cq_free:
+ be_queue_free(adapter, cq);
+err:
+ return -1;
+}
+
+static void be_tx_queues_destroy(struct be_adapter *adapter)
+{
+ struct be_queue_info *q;
+ struct be_tx_obj *txo;
+ u8 i;
+
+ for_all_tx_queues(adapter, txo, i) {
+ q = &txo->q;
+ if (q->created)
+ be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
+ be_queue_free(adapter, q);
+
+ q = &txo->cq;
+ if (q->created)
+ be_cmd_q_destroy(adapter, q, QTYPE_CQ);
+ be_queue_free(adapter, q);
+ }
+}
+
+static int be_tx_qs_create(struct be_adapter *adapter)
+{
+ struct be_queue_info *cq;
+ struct be_tx_obj *txo;
+ struct be_eq_obj *eqo;
+ int status, i;
+
+ adapter->num_tx_qs = min(adapter->num_evt_qs, adapter->cfg_num_tx_irqs);
+
+ for_all_tx_queues(adapter, txo, i) {
+ cq = &txo->cq;
+ status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
+ sizeof(struct be_eth_tx_compl));
+ if (status)
+ return status;
+
+ u64_stats_init(&txo->stats.sync);
+ u64_stats_init(&txo->stats.sync_compl);
+
+ /* If num_evt_qs is less than num_tx_qs, then more than
+ * one txq share an eq
+ */
+ eqo = &adapter->eq_obj[i % adapter->num_evt_qs];
+ status = be_cmd_cq_create(adapter, cq, &eqo->q, false, 3);
+ if (status)
+ return status;
+
+ status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
+ sizeof(struct be_eth_wrb));
+ if (status)
+ return status;
+
+ status = be_cmd_txq_create(adapter, txo);
+ if (status)
+ return status;
+
+ netif_set_xps_queue(adapter->netdev, eqo->affinity_mask,
+ eqo->idx);
+ }
+
+ dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
+ adapter->num_tx_qs);
+ return 0;
+}
+
+static void be_rx_cqs_destroy(struct be_adapter *adapter)
+{
+ struct be_queue_info *q;
+ struct be_rx_obj *rxo;
+ int i;
+
+ for_all_rx_queues(adapter, rxo, i) {
+ q = &rxo->cq;
+ if (q->created)
+ be_cmd_q_destroy(adapter, q, QTYPE_CQ);
+ be_queue_free(adapter, q);
+ }
+}
+
+static int be_rx_cqs_create(struct be_adapter *adapter)
+{
+ struct be_queue_info *eq, *cq;
+ struct be_rx_obj *rxo;
+ int rc, i;
+
+ adapter->num_rss_qs =
+ min(adapter->num_evt_qs, adapter->cfg_num_rx_irqs);
+
+ /* We'll use RSS only if atleast 2 RSS rings are supported. */
+ if (adapter->num_rss_qs < 2)
+ adapter->num_rss_qs = 0;
+
+ adapter->num_rx_qs = adapter->num_rss_qs + adapter->need_def_rxq;
+
+ /* When the interface is not capable of RSS rings (and there is no
+ * need to create a default RXQ) we'll still need one RXQ
+ */
+ if (adapter->num_rx_qs == 0)
+ adapter->num_rx_qs = 1;
+
+ adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
+ for_all_rx_queues(adapter, rxo, i) {
+ rxo->adapter = adapter;
+ cq = &rxo->cq;
+ rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
+ sizeof(struct be_eth_rx_compl));
+ if (rc)
+ return rc;
+
+ u64_stats_init(&rxo->stats.sync);
+ eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
+ rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
+ if (rc)
+ return rc;
+ }
+
+ dev_info(&adapter->pdev->dev,
+ "created %d RX queue(s)\n", adapter->num_rx_qs);
+ return 0;
+}
+
+static irqreturn_t be_intx(int irq, void *dev)
+{
+ struct be_eq_obj *eqo = dev;
+ struct be_adapter *adapter = eqo->adapter;
+ int num_evts = 0;
+
+ /* IRQ is not expected when NAPI is scheduled as the EQ
+ * will not be armed.
+ * But, this can happen on Lancer INTx where it takes
+ * a while to de-assert INTx or in BE2 where occasionaly
+ * an interrupt may be raised even when EQ is unarmed.
+ * If NAPI is already scheduled, then counting & notifying
+ * events will orphan them.
+ */
+ if (napi_schedule_prep(&eqo->napi)) {
+ num_evts = events_get(eqo);
+ __napi_schedule(&eqo->napi);
+ if (num_evts)
+ eqo->spurious_intr = 0;
+ }
+ be_eq_notify(adapter, eqo->q.id, false, true, num_evts, 0);
+
+ /* Return IRQ_HANDLED only for the first spurious intr
+ * after a valid intr to stop the kernel from branding
+ * this irq as a bad one!
+ */
+ if (num_evts || eqo->spurious_intr++ == 0)
+ return IRQ_HANDLED;
+ else
+ return IRQ_NONE;
+}
+
+static irqreturn_t be_msix(int irq, void *dev)
+{
+ struct be_eq_obj *eqo = dev;
+
+ be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0, 0);
+ napi_schedule(&eqo->napi);
+ return IRQ_HANDLED;
+}
+
+static inline bool do_gro(struct be_rx_compl_info *rxcp)
+{
+ return (rxcp->tcpf && !rxcp->err && rxcp->l4_csum) ? true : false;
+}
+
+static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
+ int budget)
+{
+ struct be_adapter *adapter = rxo->adapter;
+ struct be_queue_info *rx_cq = &rxo->cq;
+ struct be_rx_compl_info *rxcp;
+ u32 work_done;
+ u32 frags_consumed = 0;
+
+ for (work_done = 0; work_done < budget; work_done++) {
+ rxcp = be_rx_compl_get(rxo);
+ if (!rxcp)
+ break;
+
+ /* Is it a flush compl that has no data */
+ if (unlikely(rxcp->num_rcvd == 0))
+ goto loop_continue;
+
+ /* Discard compl with partial DMA Lancer B0 */
+ if (unlikely(!rxcp->pkt_size)) {
+ be_rx_compl_discard(rxo, rxcp);
+ goto loop_continue;
+ }
+
+ /* On BE drop pkts that arrive due to imperfect filtering in
+ * promiscuous mode on some skews
+ */
+ if (unlikely(rxcp->port != adapter->port_num &&
+ !lancer_chip(adapter))) {
+ be_rx_compl_discard(rxo, rxcp);
+ goto loop_continue;
+ }
+
+ if (do_gro(rxcp))
+ be_rx_compl_process_gro(rxo, napi, rxcp);
+ else
+ be_rx_compl_process(rxo, napi, rxcp);
+
+loop_continue:
+ frags_consumed += rxcp->num_rcvd;
+ be_rx_stats_update(rxo, rxcp);
+ }
+
+ if (work_done) {
+ be_cq_notify(adapter, rx_cq->id, true, work_done);
+
+ /* When an rx-obj gets into post_starved state, just
+ * let be_worker do the posting.
+ */
+ if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM &&
+ !rxo->rx_post_starved)
+ be_post_rx_frags(rxo, GFP_ATOMIC,
+ max_t(u32, MAX_RX_POST,
+ frags_consumed));
+ }
+
+ return work_done;
+}
+
+
+static void be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
+ int idx)
+{
+ int num_wrbs = 0, work_done = 0;
+ struct be_tx_compl_info *txcp;
+
+ while ((txcp = be_tx_compl_get(adapter, txo))) {
+ num_wrbs += be_tx_compl_process(adapter, txo, txcp->end_index);
+ work_done++;
+ }
+
+ if (work_done) {
+ be_cq_notify(adapter, txo->cq.id, true, work_done);
+ atomic_sub(num_wrbs, &txo->q.used);
+
+ /* As Tx wrbs have been freed up, wake up netdev queue
+ * if it was stopped due to lack of tx wrbs. */
+ if (__netif_subqueue_stopped(adapter->netdev, idx) &&
+ be_can_txq_wake(txo)) {
+ netif_wake_subqueue(adapter->netdev, idx);
+ }
+
+ u64_stats_update_begin(&tx_stats(txo)->sync_compl);
+ tx_stats(txo)->tx_compl += work_done;
+ u64_stats_update_end(&tx_stats(txo)->sync_compl);
+ }
+}
+
+int be_poll(struct napi_struct *napi, int budget)
+{
+ struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
+ struct be_adapter *adapter = eqo->adapter;
+ int max_work = 0, work, i, num_evts;
+ struct be_rx_obj *rxo;
+ struct be_tx_obj *txo;
+ u32 mult_enc = 0;
+
+ num_evts = events_get(eqo);
+
+ for_all_tx_queues_on_eq(adapter, eqo, txo, i)
+ be_process_tx(adapter, txo, i);
+
+ /* This loop will iterate twice for EQ0 in which
+ * completions of the last RXQ (default one) are also processed
+ * For other EQs the loop iterates only once
+ */
+ for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
+ work = be_process_rx(rxo, napi, budget);
+ max_work = max(work, max_work);
+ }
+
+ if (is_mcc_eqo(eqo))
+ be_process_mcc(adapter);
+
+ if (max_work < budget) {
+ napi_complete_done(napi, max_work);
+
+ /* Skyhawk EQ_DB has a provision to set the rearm to interrupt
+ * delay via a delay multiplier encoding value
+ */
+ if (skyhawk_chip(adapter))
+ mult_enc = be_get_eq_delay_mult_enc(eqo);
+
+ be_eq_notify(adapter, eqo->q.id, true, false, num_evts,
+ mult_enc);
+ } else {
+ /* As we'll continue in polling mode, count and clear events */
+ be_eq_notify(adapter, eqo->q.id, false, false, num_evts, 0);
+ }
+ return max_work;
+}
+
+void be_detect_error(struct be_adapter *adapter)
+{
+ u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
+ u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
+ struct device *dev = &adapter->pdev->dev;
+ u16 val;
+ u32 i;
+
+ if (be_check_error(adapter, BE_ERROR_HW))
+ return;
+
+ if (lancer_chip(adapter)) {
+ sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
+ if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
+ be_set_error(adapter, BE_ERROR_UE);
+ sliport_err1 = ioread32(adapter->db +
+ SLIPORT_ERROR1_OFFSET);
+ sliport_err2 = ioread32(adapter->db +
+ SLIPORT_ERROR2_OFFSET);
+ /* Do not log error messages if its a FW reset */
+ if (sliport_err1 == SLIPORT_ERROR_FW_RESET1 &&
+ sliport_err2 == SLIPORT_ERROR_FW_RESET2) {
+ dev_info(dev, "Reset is in progress\n");
+ } else {
+ dev_err(dev, "Error detected in the card\n");
+ dev_err(dev, "ERR: sliport status 0x%x\n",
+ sliport_status);
+ dev_err(dev, "ERR: sliport error1 0x%x\n",
+ sliport_err1);
+ dev_err(dev, "ERR: sliport error2 0x%x\n",
+ sliport_err2);
+ }
+ }
+ } else {
+ ue_lo = ioread32(adapter->pcicfg + PCICFG_UE_STATUS_LOW);
+ ue_hi = ioread32(adapter->pcicfg + PCICFG_UE_STATUS_HIGH);
+ ue_lo_mask = ioread32(adapter->pcicfg +
+ PCICFG_UE_STATUS_LOW_MASK);
+ ue_hi_mask = ioread32(adapter->pcicfg +
+ PCICFG_UE_STATUS_HI_MASK);
+
+ ue_lo = (ue_lo & ~ue_lo_mask);
+ ue_hi = (ue_hi & ~ue_hi_mask);
+
+ if (ue_lo || ue_hi) {
+ /* On certain platforms BE3 hardware can indicate
+ * spurious UEs. In case of a UE in the chip,
+ * the POST register correctly reports either a
+ * FAT_LOG_START state (FW is currently dumping
+ * FAT log data) or a ARMFW_UE state. Check for the
+ * above states to ascertain if the UE is valid or not.
+ */
+ if (BE3_chip(adapter)) {
+ val = be_POST_stage_get(adapter);
+ if ((val & POST_STAGE_FAT_LOG_START)
+ != POST_STAGE_FAT_LOG_START &&
+ (val & POST_STAGE_ARMFW_UE)
+ != POST_STAGE_ARMFW_UE &&
+ (val & POST_STAGE_RECOVERABLE_ERR)
+ != POST_STAGE_RECOVERABLE_ERR)
+ return;
+ }
+
+ dev_err(dev, "Error detected in the adapter");
+ be_set_error(adapter, BE_ERROR_UE);
+
+ for (i = 0; ue_lo; ue_lo >>= 1, i++) {
+ if (ue_lo & 1)
+ dev_err(dev, "UE: %s bit set\n",
+ ue_status_low_desc[i]);
+ }
+ for (i = 0; ue_hi; ue_hi >>= 1, i++) {
+ if (ue_hi & 1)
+ dev_err(dev, "UE: %s bit set\n",
+ ue_status_hi_desc[i]);
+ }
+ }
+ }
+}
+
+static void be_msix_disable(struct be_adapter *adapter)
+{
+ if (msix_enabled(adapter)) {
+ pci_disable_msix(adapter->pdev);
+ adapter->num_msix_vec = 0;
+ adapter->num_msix_roce_vec = 0;
+ }
+}
+
+static int be_msix_enable(struct be_adapter *adapter)
+{
+ unsigned int i, max_roce_eqs;
+ struct device *dev = &adapter->pdev->dev;
+ int num_vec;
+
+ /* If RoCE is supported, program the max number of vectors that
+ * could be used for NIC and RoCE, else, just program the number
+ * we'll use initially.
+ */
+ if (be_roce_supported(adapter)) {
+ max_roce_eqs =
+ be_max_func_eqs(adapter) - be_max_nic_eqs(adapter);
+ max_roce_eqs = min(max_roce_eqs, num_online_cpus());
+ num_vec = be_max_any_irqs(adapter) + max_roce_eqs;
+ } else {
+ num_vec = max(adapter->cfg_num_rx_irqs,
+ adapter->cfg_num_tx_irqs);
+ }
+
+ for (i = 0; i < num_vec; i++)
+ adapter->msix_entries[i].entry = i;
+
+ num_vec = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
+ MIN_MSIX_VECTORS, num_vec);
+ if (num_vec < 0)
+ goto fail;
+
+ if (be_roce_supported(adapter) && num_vec > MIN_MSIX_VECTORS) {
+ adapter->num_msix_roce_vec = num_vec / 2;
+ dev_info(dev, "enabled %d MSI-x vector(s) for RoCE\n",
+ adapter->num_msix_roce_vec);
+ }
+
+ adapter->num_msix_vec = num_vec - adapter->num_msix_roce_vec;
+
+ dev_info(dev, "enabled %d MSI-x vector(s) for NIC\n",
+ adapter->num_msix_vec);
+ return 0;
+
+fail:
+ dev_warn(dev, "MSIx enable failed\n");
+
+ /* INTx is not supported in VFs, so fail probe if enable_msix fails */
+ if (be_virtfn(adapter))
+ return num_vec;
+ return 0;
+}
+
+static inline int be_msix_vec_get(struct be_adapter *adapter,
+ struct be_eq_obj *eqo)
+{
+ return adapter->msix_entries[eqo->msix_idx].vector;
+}
+
+static int be_msix_register(struct be_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct be_eq_obj *eqo;
+ int status, i, vec;
+
+ for_all_evt_queues(adapter, eqo, i) {
+ sprintf(eqo->desc, "%s-q%d", netdev->name, i);
+ vec = be_msix_vec_get(adapter, eqo);
+ status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
+ if (status)
+ goto err_msix;
+
+ irq_update_affinity_hint(vec, eqo->affinity_mask);
+ }
+
+ return 0;
+err_msix:
+ for (i--; i >= 0; i--) {
+ eqo = &adapter->eq_obj[i];
+ free_irq(be_msix_vec_get(adapter, eqo), eqo);
+ }
+ dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
+ status);
+ be_msix_disable(adapter);
+ return status;
+}
+
+static int be_irq_register(struct be_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ int status;
+
+ if (msix_enabled(adapter)) {
+ status = be_msix_register(adapter);
+ if (status == 0)
+ goto done;
+ /* INTx is not supported for VF */
+ if (be_virtfn(adapter))
+ return status;
+ }
+
+ /* INTx: only the first EQ is used */
+ netdev->irq = adapter->pdev->irq;
+ status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
+ &adapter->eq_obj[0]);
+ if (status) {
+ dev_err(&adapter->pdev->dev,
+ "INTx request IRQ failed - err %d\n", status);
+ return status;
+ }
+done:
+ adapter->isr_registered = true;
+ return 0;
+}
+
+static void be_irq_unregister(struct be_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct be_eq_obj *eqo;
+ int i, vec;
+
+ if (!adapter->isr_registered)
+ return;
+
+ /* INTx */
+ if (!msix_enabled(adapter)) {
+ free_irq(netdev->irq, &adapter->eq_obj[0]);
+ goto done;
+ }
+
+ /* MSIx */
+ for_all_evt_queues(adapter, eqo, i) {
+ vec = be_msix_vec_get(adapter, eqo);
+ irq_update_affinity_hint(vec, NULL);
+ free_irq(vec, eqo);
+ }
+
+done:
+ adapter->isr_registered = false;
+}
+
+static void be_rx_qs_destroy(struct be_adapter *adapter)
+{
+ struct rss_info *rss = &adapter->rss_info;
+ struct be_queue_info *q;
+ struct be_rx_obj *rxo;
+ int i;
+
+ for_all_rx_queues(adapter, rxo, i) {
+ q = &rxo->q;
+ if (q->created) {
+ /* If RXQs are destroyed while in an "out of buffer"
+ * state, there is a possibility of an HW stall on
+ * Lancer. So, post 64 buffers to each queue to relieve
+ * the "out of buffer" condition.
+ * Make sure there's space in the RXQ before posting.
+ */
+ if (lancer_chip(adapter)) {
+ be_rx_cq_clean(rxo);
+ if (atomic_read(&q->used) == 0)
+ be_post_rx_frags(rxo, GFP_KERNEL,
+ MAX_RX_POST);
+ }
+
+ be_cmd_rxq_destroy(adapter, q);
+ be_rx_cq_clean(rxo);
+ be_rxq_clean(rxo);
+ }
+ be_queue_free(adapter, q);
+ }
+
+ if (rss->rss_flags) {
+ rss->rss_flags = RSS_ENABLE_NONE;
+ be_cmd_rss_config(adapter, rss->rsstable, rss->rss_flags,
+ 128, rss->rss_hkey);
+ }
+}
+
+static void be_disable_if_filters(struct be_adapter *adapter)
+{
+ /* Don't delete MAC on BE3 VFs without FILTMGMT privilege */
+ if (!BEx_chip(adapter) || !be_virtfn(adapter) ||
+ check_privilege(adapter, BE_PRIV_FILTMGMT)) {
+ be_dev_mac_del(adapter, adapter->pmac_id[0]);
+ eth_zero_addr(adapter->dev_mac);
+ }
+
+ be_clear_uc_list(adapter);
+ be_clear_mc_list(adapter);
+
+ /* The IFACE flags are enabled in the open path and cleared
+ * in the close path. When a VF gets detached from the host and
+ * assigned to a VM the following happens:
+ * - VF's IFACE flags get cleared in the detach path
+ * - IFACE create is issued by the VF in the attach path
+ * Due to a bug in the BE3/Skyhawk-R FW
+ * (Lancer FW doesn't have the bug), the IFACE capability flags
+ * specified along with the IFACE create cmd issued by a VF are not
+ * honoured by FW. As a consequence, if a *new* driver
+ * (that enables/disables IFACE flags in open/close)
+ * is loaded in the host and an *old* driver is * used by a VM/VF,
+ * the IFACE gets created *without* the needed flags.
+ * To avoid this, disable RX-filter flags only for Lancer.
+ */
+ if (lancer_chip(adapter)) {
+ be_cmd_rx_filter(adapter, BE_IF_ALL_FILT_FLAGS, OFF);
+ adapter->if_flags &= ~BE_IF_ALL_FILT_FLAGS;
+ }
+}
+
+static int be_close(struct net_device *netdev)
+{
+ struct be_adapter *adapter = netdev_priv(netdev);
+ struct be_eq_obj *eqo;
+ int i;
+
+ /* This protection is needed as be_close() may be called even when the
+ * adapter is in cleared state (after eeh perm failure)
+ */
+ if (!(adapter->flags & BE_FLAGS_SETUP_DONE))
+ return 0;
+
+ /* Before attempting cleanup ensure all the pending cmds in the
+ * config_wq have finished execution
+ */
+ flush_workqueue(be_wq);
+
+ be_disable_if_filters(adapter);
+
+ if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
+ for_all_evt_queues(adapter, eqo, i) {
+ napi_disable(&eqo->napi);
+ }
+ adapter->flags &= ~BE_FLAGS_NAPI_ENABLED;
+ }
+
+ be_async_mcc_disable(adapter);
+
+ /* Wait for all pending tx completions to arrive so that
+ * all tx skbs are freed.
+ */
+ netif_tx_disable(netdev);
+ be_tx_compl_clean(adapter);
+
+ be_rx_qs_destroy(adapter);
+
+ for_all_evt_queues(adapter, eqo, i) {
+ if (msix_enabled(adapter))
+ synchronize_irq(be_msix_vec_get(adapter, eqo));
+ else
+ synchronize_irq(netdev->irq);
+ be_eq_clean(eqo);
+ }
+
+ be_irq_unregister(adapter);
+
+ return 0;
+}
+
+static int be_rx_qs_create(struct be_adapter *adapter)
+{
+ struct rss_info *rss = &adapter->rss_info;
+ u8 rss_key[RSS_HASH_KEY_LEN];
+ struct be_rx_obj *rxo;
+ int rc, i, j;
+
+ for_all_rx_queues(adapter, rxo, i) {
+ rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
+ sizeof(struct be_eth_rx_d));
+ if (rc)
+ return rc;
+ }
+
+ if (adapter->need_def_rxq || !adapter->num_rss_qs) {
+ rxo = default_rxo(adapter);
+ rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
+ rx_frag_size, adapter->if_handle,
+ false, &rxo->rss_id);
+ if (rc)
+ return rc;
+ }
+
+ for_all_rss_queues(adapter, rxo, i) {
+ rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
+ rx_frag_size, adapter->if_handle,
+ true, &rxo->rss_id);
+ if (rc)
+ return rc;
+ }
+
+ if (be_multi_rxq(adapter)) {
+ for (j = 0; j < RSS_INDIR_TABLE_LEN; j += adapter->num_rss_qs) {
+ for_all_rss_queues(adapter, rxo, i) {
+ if ((j + i) >= RSS_INDIR_TABLE_LEN)
+ break;
+ rss->rsstable[j + i] = rxo->rss_id;
+ rss->rss_queue[j + i] = i;
+ }
+ }
+ rss->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
+ RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
+
+ if (!BEx_chip(adapter))
+ rss->rss_flags |= RSS_ENABLE_UDP_IPV4 |
+ RSS_ENABLE_UDP_IPV6;
+
+ netdev_rss_key_fill(rss_key, RSS_HASH_KEY_LEN);
+ rc = be_cmd_rss_config(adapter, rss->rsstable, rss->rss_flags,
+ RSS_INDIR_TABLE_LEN, rss_key);
+ if (rc) {
+ rss->rss_flags = RSS_ENABLE_NONE;
+ return rc;
+ }
+
+ memcpy(rss->rss_hkey, rss_key, RSS_HASH_KEY_LEN);
+ } else {
+ /* Disable RSS, if only default RX Q is created */
+ rss->rss_flags = RSS_ENABLE_NONE;
+ }
+
+
+ /* Post 1 less than RXQ-len to avoid head being equal to tail,
+ * which is a queue empty condition
+ */
+ for_all_rx_queues(adapter, rxo, i)
+ be_post_rx_frags(rxo, GFP_KERNEL, RX_Q_LEN - 1);
+
+ return 0;
+}
+
+static int be_enable_if_filters(struct be_adapter *adapter)
+{
+ int status;
+
+ status = be_cmd_rx_filter(adapter, BE_IF_FILT_FLAGS_BASIC, ON);
+ if (status)
+ return status;
+
+ /* Normally this condition usually true as the ->dev_mac is zeroed.
+ * But on BE3 VFs the initial MAC is pre-programmed by PF and
+ * subsequent be_dev_mac_add() can fail (after fresh boot)
+ */
+ if (!ether_addr_equal(adapter->dev_mac, adapter->netdev->dev_addr)) {
+ int old_pmac_id = -1;
+
+ /* Remember old programmed MAC if any - can happen on BE3 VF */
+ if (!is_zero_ether_addr(adapter->dev_mac))
+ old_pmac_id = adapter->pmac_id[0];
+
+ status = be_dev_mac_add(adapter, adapter->netdev->dev_addr);
+ if (status)
+ return status;
+
+ /* Delete the old programmed MAC as we successfully programmed
+ * a new MAC
+ */
+ if (old_pmac_id >= 0 && old_pmac_id != adapter->pmac_id[0])
+ be_dev_mac_del(adapter, old_pmac_id);
+
+ ether_addr_copy(adapter->dev_mac, adapter->netdev->dev_addr);
+ }
+
+ if (adapter->vlans_added)
+ be_vid_config(adapter);
+
+ __be_set_rx_mode(adapter);
+
+ return 0;
+}
+
+static int be_open(struct net_device *netdev)
+{
+ struct be_adapter *adapter = netdev_priv(netdev);
+ struct be_eq_obj *eqo;
+ struct be_rx_obj *rxo;
+ struct be_tx_obj *txo;
+ u8 link_status;
+ int status, i;
+
+ status = be_rx_qs_create(adapter);
+ if (status)
+ goto err;
+
+ status = be_enable_if_filters(adapter);
+ if (status)
+ goto err;
+
+ status = be_irq_register(adapter);
+ if (status)
+ goto err;
+
+ for_all_rx_queues(adapter, rxo, i)
+ be_cq_notify(adapter, rxo->cq.id, true, 0);
+
+ for_all_tx_queues(adapter, txo, i)
+ be_cq_notify(adapter, txo->cq.id, true, 0);
+
+ be_async_mcc_enable(adapter);
+
+ for_all_evt_queues(adapter, eqo, i) {
+ napi_enable(&eqo->napi);
+ be_eq_notify(adapter, eqo->q.id, true, true, 0, 0);
+ }
+ adapter->flags |= BE_FLAGS_NAPI_ENABLED;
+
+ status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
+ if (!status)
+ be_link_status_update(adapter, link_status);
+
+ netif_tx_start_all_queues(netdev);
+
+ udp_tunnel_nic_reset_ntf(netdev);
+
+ return 0;
+err:
+ be_close(adapter->netdev);
+ return -EIO;
+}
+
+static void be_vf_eth_addr_generate(struct be_adapter *adapter, u8 *mac)
+{
+ u32 addr;
+
+ addr = jhash(adapter->netdev->dev_addr, ETH_ALEN, 0);
+
+ mac[5] = (u8)(addr & 0xFF);
+ mac[4] = (u8)((addr >> 8) & 0xFF);
+ mac[3] = (u8)((addr >> 16) & 0xFF);
+ /* Use the OUI from the current MAC address */
+ memcpy(mac, adapter->netdev->dev_addr, 3);
+}
+
+/*
+ * Generate a seed MAC address from the PF MAC Address using jhash.
+ * MAC Address for VFs are assigned incrementally starting from the seed.
+ * These addresses are programmed in the ASIC by the PF and the VF driver
+ * queries for the MAC address during its probe.
+ */
+static int be_vf_eth_addr_config(struct be_adapter *adapter)
+{
+ u32 vf;
+ int status = 0;
+ u8 mac[ETH_ALEN];
+ struct be_vf_cfg *vf_cfg;
+
+ be_vf_eth_addr_generate(adapter, mac);
+
+ for_all_vfs(adapter, vf_cfg, vf) {
+ if (BEx_chip(adapter))
+ status = be_cmd_pmac_add(adapter, mac,
+ vf_cfg->if_handle,
+ &vf_cfg->pmac_id, vf + 1);
+ else
+ status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
+ vf + 1);
+
+ if (status)
+ dev_err(&adapter->pdev->dev,
+ "Mac address assignment failed for VF %d\n",
+ vf);
+ else
+ memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
+
+ mac[5] += 1;
+ }
+ return status;
+}
+
+static int be_vfs_mac_query(struct be_adapter *adapter)
+{
+ int status, vf;
+ u8 mac[ETH_ALEN];
+ struct be_vf_cfg *vf_cfg;
+
+ for_all_vfs(adapter, vf_cfg, vf) {
+ status = be_cmd_get_active_mac(adapter, vf_cfg->pmac_id,
+ mac, vf_cfg->if_handle,
+ false, vf+1);
+ if (status)
+ return status;
+ memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
+ }
+ return 0;
+}
+
+static void be_vf_clear(struct be_adapter *adapter)
+{
+ struct be_vf_cfg *vf_cfg;
+ u32 vf;
+
+ if (pci_vfs_assigned(adapter->pdev)) {
+ dev_warn(&adapter->pdev->dev,
+ "VFs are assigned to VMs: not disabling VFs\n");
+ goto done;
+ }
+
+ pci_disable_sriov(adapter->pdev);
+
+ for_all_vfs(adapter, vf_cfg, vf) {
+ if (BEx_chip(adapter))
+ be_cmd_pmac_del(adapter, vf_cfg->if_handle,
+ vf_cfg->pmac_id, vf + 1);
+ else
+ be_cmd_set_mac(adapter, NULL, vf_cfg->if_handle,
+ vf + 1);
+
+ be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
+ }
+
+ if (BE3_chip(adapter))
+ be_cmd_set_hsw_config(adapter, 0, 0,
+ adapter->if_handle,
+ PORT_FWD_TYPE_PASSTHRU, 0);
+done:
+ kfree(adapter->vf_cfg);
+ adapter->num_vfs = 0;
+ adapter->flags &= ~BE_FLAGS_SRIOV_ENABLED;
+}
+
+static void be_clear_queues(struct be_adapter *adapter)
+{
+ be_mcc_queues_destroy(adapter);
+ be_rx_cqs_destroy(adapter);
+ be_tx_queues_destroy(adapter);
+ be_evt_queues_destroy(adapter);
+}
+
+static void be_cancel_worker(struct be_adapter *adapter)
+{
+ if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
+ cancel_delayed_work_sync(&adapter->work);
+ adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
+ }
+}
+
+static void be_cancel_err_detection(struct be_adapter *adapter)
+{
+ struct be_error_recovery *err_rec = &adapter->error_recovery;
+
+ if (!be_err_recovery_workq)
+ return;
+
+ if (adapter->flags & BE_FLAGS_ERR_DETECTION_SCHEDULED) {
+ cancel_delayed_work_sync(&err_rec->err_detection_work);
+ adapter->flags &= ~BE_FLAGS_ERR_DETECTION_SCHEDULED;
+ }
+}
+
+/* VxLAN offload Notes:
+ *
+ * The stack defines tunnel offload flags (hw_enc_features) for IP and doesn't
+ * distinguish various types of transports (VxLAN, GRE, NVGRE ..). So, offload
+ * is expected to work across all types of IP tunnels once exported. Skyhawk
+ * supports offloads for either VxLAN or NVGRE, exclusively. So we export VxLAN
+ * offloads in hw_enc_features only when a VxLAN port is added. If other (non
+ * VxLAN) tunnels are configured while VxLAN offloads are enabled, offloads for
+ * those other tunnels are unexported on the fly through ndo_features_check().
+ */
+static int be_vxlan_set_port(struct net_device *netdev, unsigned int table,
+ unsigned int entry, struct udp_tunnel_info *ti)
+{
+ struct be_adapter *adapter = netdev_priv(netdev);
+ struct device *dev = &adapter->pdev->dev;
+ int status;
+
+ status = be_cmd_manage_iface(adapter, adapter->if_handle,
+ OP_CONVERT_NORMAL_TO_TUNNEL);
+ if (status) {
+ dev_warn(dev, "Failed to convert normal interface to tunnel\n");
+ return status;
+ }
+ adapter->flags |= BE_FLAGS_VXLAN_OFFLOADS;
+
+ status = be_cmd_set_vxlan_port(adapter, ti->port);
+ if (status) {
+ dev_warn(dev, "Failed to add VxLAN port\n");
+ return status;
+ }
+ adapter->vxlan_port = ti->port;
+
+ netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+ NETIF_F_TSO | NETIF_F_TSO6 |
+ NETIF_F_GSO_UDP_TUNNEL;
+
+ dev_info(dev, "Enabled VxLAN offloads for UDP port %d\n",
+ be16_to_cpu(ti->port));
+ return 0;
+}
+
+static int be_vxlan_unset_port(struct net_device *netdev, unsigned int table,
+ unsigned int entry, struct udp_tunnel_info *ti)
+{
+ struct be_adapter *adapter = netdev_priv(netdev);
+
+ if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS)
+ be_cmd_manage_iface(adapter, adapter->if_handle,
+ OP_CONVERT_TUNNEL_TO_NORMAL);
+
+ if (adapter->vxlan_port)
+ be_cmd_set_vxlan_port(adapter, 0);
+
+ adapter->flags &= ~BE_FLAGS_VXLAN_OFFLOADS;
+ adapter->vxlan_port = 0;
+
+ netdev->hw_enc_features = 0;
+ return 0;
+}
+
+static const struct udp_tunnel_nic_info be_udp_tunnels = {
+ .set_port = be_vxlan_set_port,
+ .unset_port = be_vxlan_unset_port,
+ .flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP |
+ UDP_TUNNEL_NIC_INFO_OPEN_ONLY,
+ .tables = {
+ { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, },
+ },
+};
+
+static void be_calculate_vf_res(struct be_adapter *adapter, u16 num_vfs,
+ struct be_resources *vft_res)
+{
+ struct be_resources res = adapter->pool_res;
+ u32 vf_if_cap_flags = res.vf_if_cap_flags;
+ struct be_resources res_mod = {0};
+ u16 num_vf_qs = 1;
+
+ /* Distribute the queue resources among the PF and it's VFs */
+ if (num_vfs) {
+ /* Divide the rx queues evenly among the VFs and the PF, capped
+ * at VF-EQ-count. Any remainder queues belong to the PF.
+ */
+ num_vf_qs = min(SH_VF_MAX_NIC_EQS,
+ res.max_rss_qs / (num_vfs + 1));
+
+ /* Skyhawk-R chip supports only MAX_PORT_RSS_TABLES
+ * RSS Tables per port. Provide RSS on VFs, only if number of
+ * VFs requested is less than it's PF Pool's RSS Tables limit.
+ */
+ if (num_vfs >= be_max_pf_pool_rss_tables(adapter))
+ num_vf_qs = 1;
+ }
+
+ /* Resource with fields set to all '1's by GET_PROFILE_CONFIG cmd,
+ * which are modifiable using SET_PROFILE_CONFIG cmd.
+ */
+ be_cmd_get_profile_config(adapter, &res_mod, NULL, ACTIVE_PROFILE_TYPE,
+ RESOURCE_MODIFIABLE, 0);
+
+ /* If RSS IFACE capability flags are modifiable for a VF, set the
+ * capability flag as valid and set RSS and DEFQ_RSS IFACE flags if
+ * more than 1 RSSQ is available for a VF.
+ * Otherwise, provision only 1 queue pair for VF.
+ */
+ if (res_mod.vf_if_cap_flags & BE_IF_FLAGS_RSS) {
+ vft_res->flags |= BIT(IF_CAPS_FLAGS_VALID_SHIFT);
+ if (num_vf_qs > 1) {
+ vf_if_cap_flags |= BE_IF_FLAGS_RSS;
+ if (res.if_cap_flags & BE_IF_FLAGS_DEFQ_RSS)
+ vf_if_cap_flags |= BE_IF_FLAGS_DEFQ_RSS;
+ } else {
+ vf_if_cap_flags &= ~(BE_IF_FLAGS_RSS |
+ BE_IF_FLAGS_DEFQ_RSS);
+ }
+ } else {
+ num_vf_qs = 1;
+ }
+
+ if (res_mod.vf_if_cap_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS) {
+ vft_res->flags |= BIT(IF_CAPS_FLAGS_VALID_SHIFT);
+ vf_if_cap_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS;
+ }
+
+ vft_res->vf_if_cap_flags = vf_if_cap_flags;
+ vft_res->max_rx_qs = num_vf_qs;
+ vft_res->max_rss_qs = num_vf_qs;
+ vft_res->max_tx_qs = res.max_tx_qs / (num_vfs + 1);
+ vft_res->max_cq_count = res.max_cq_count / (num_vfs + 1);
+
+ /* Distribute unicast MACs, VLANs, IFACE count and MCCQ count equally
+ * among the PF and it's VFs, if the fields are changeable
+ */
+ if (res_mod.max_uc_mac == FIELD_MODIFIABLE)
+ vft_res->max_uc_mac = res.max_uc_mac / (num_vfs + 1);
+
+ if (res_mod.max_vlans == FIELD_MODIFIABLE)
+ vft_res->max_vlans = res.max_vlans / (num_vfs + 1);
+
+ if (res_mod.max_iface_count == FIELD_MODIFIABLE)
+ vft_res->max_iface_count = res.max_iface_count / (num_vfs + 1);
+
+ if (res_mod.max_mcc_count == FIELD_MODIFIABLE)
+ vft_res->max_mcc_count = res.max_mcc_count / (num_vfs + 1);
+}
+
+static void be_if_destroy(struct be_adapter *adapter)
+{
+ be_cmd_if_destroy(adapter, adapter->if_handle, 0);
+
+ kfree(adapter->pmac_id);
+ adapter->pmac_id = NULL;
+
+ kfree(adapter->mc_list);
+ adapter->mc_list = NULL;
+
+ kfree(adapter->uc_list);
+ adapter->uc_list = NULL;
+}
+
+static int be_clear(struct be_adapter *adapter)
+{
+ struct pci_dev *pdev = adapter->pdev;
+ struct be_resources vft_res = {0};
+
+ be_cancel_worker(adapter);
+
+ flush_workqueue(be_wq);
+
+ if (sriov_enabled(adapter))
+ be_vf_clear(adapter);
+
+ /* Re-configure FW to distribute resources evenly across max-supported
+ * number of VFs, only when VFs are not already enabled.
+ */
+ if (skyhawk_chip(adapter) && be_physfn(adapter) &&
+ !pci_vfs_assigned(pdev)) {
+ be_calculate_vf_res(adapter,
+ pci_sriov_get_totalvfs(pdev),
+ &vft_res);
+ be_cmd_set_sriov_config(adapter, adapter->pool_res,
+ pci_sriov_get_totalvfs(pdev),
+ &vft_res);
+ }
+
+ be_vxlan_unset_port(adapter->netdev, 0, 0, NULL);
+
+ be_if_destroy(adapter);
+
+ be_clear_queues(adapter);
+
+ be_msix_disable(adapter);
+ adapter->flags &= ~BE_FLAGS_SETUP_DONE;
+ return 0;
+}
+
+static int be_vfs_if_create(struct be_adapter *adapter)
+{
+ struct be_resources res = {0};
+ u32 cap_flags, en_flags, vf;
+ struct be_vf_cfg *vf_cfg;
+ int status;
+
+ /* If a FW profile exists, then cap_flags are updated */
+ cap_flags = BE_VF_IF_EN_FLAGS;
+
+ for_all_vfs(adapter, vf_cfg, vf) {
+ if (!BE3_chip(adapter)) {
+ status = be_cmd_get_profile_config(adapter, &res, NULL,
+ ACTIVE_PROFILE_TYPE,
+ RESOURCE_LIMITS,
+ vf + 1);
+ if (!status) {
+ cap_flags = res.if_cap_flags;
+ /* Prevent VFs from enabling VLAN promiscuous
+ * mode
+ */
+ cap_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS;
+ }
+ }
+
+ /* PF should enable IF flags during proxy if_create call */
+ en_flags = cap_flags & BE_VF_IF_EN_FLAGS;
+ status = be_cmd_if_create(adapter, cap_flags, en_flags,
+ &vf_cfg->if_handle, vf + 1);
+ if (status)
+ return status;
+ }
+
+ return 0;
+}
+
+static int be_vf_setup_init(struct be_adapter *adapter)
+{
+ struct be_vf_cfg *vf_cfg;
+ int vf;
+
+ adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
+ GFP_KERNEL);
+ if (!adapter->vf_cfg)
+ return -ENOMEM;
+
+ for_all_vfs(adapter, vf_cfg, vf) {
+ vf_cfg->if_handle = -1;
+ vf_cfg->pmac_id = -1;
+ }
+ return 0;
+}
+
+static int be_vf_setup(struct be_adapter *adapter)
+{
+ struct device *dev = &adapter->pdev->dev;
+ struct be_vf_cfg *vf_cfg;
+ int status, old_vfs, vf;
+ bool spoofchk;
+
+ old_vfs = pci_num_vf(adapter->pdev);
+
+ status = be_vf_setup_init(adapter);
+ if (status)
+ goto err;
+
+ if (old_vfs) {
+ for_all_vfs(adapter, vf_cfg, vf) {
+ status = be_cmd_get_if_id(adapter, vf_cfg, vf);
+ if (status)
+ goto err;
+ }
+
+ status = be_vfs_mac_query(adapter);
+ if (status)
+ goto err;
+ } else {
+ status = be_vfs_if_create(adapter);
+ if (status)
+ goto err;
+
+ status = be_vf_eth_addr_config(adapter);
+ if (status)
+ goto err;
+ }
+
+ for_all_vfs(adapter, vf_cfg, vf) {
+ /* Allow VFs to programs MAC/VLAN filters */
+ status = be_cmd_get_fn_privileges(adapter, &vf_cfg->privileges,
+ vf + 1);
+ if (!status && !(vf_cfg->privileges & BE_PRIV_FILTMGMT)) {
+ status = be_cmd_set_fn_privileges(adapter,
+ vf_cfg->privileges |
+ BE_PRIV_FILTMGMT,
+ vf + 1);
+ if (!status) {
+ vf_cfg->privileges |= BE_PRIV_FILTMGMT;
+ dev_info(dev, "VF%d has FILTMGMT privilege\n",
+ vf);
+ }
+ }
+
+ /* Allow full available bandwidth */
+ if (!old_vfs)
+ be_cmd_config_qos(adapter, 0, 0, vf + 1);
+
+ status = be_cmd_get_hsw_config(adapter, NULL, vf + 1,
+ vf_cfg->if_handle, NULL,
+ &spoofchk);
+ if (!status)
+ vf_cfg->spoofchk = spoofchk;
+
+ if (!old_vfs) {
+ be_cmd_enable_vf(adapter, vf + 1);
+ be_cmd_set_logical_link_config(adapter,
+ IFLA_VF_LINK_STATE_AUTO,
+ vf+1);
+ }
+ }
+
+ if (!old_vfs) {
+ status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
+ if (status) {
+ dev_err(dev, "SRIOV enable failed\n");
+ adapter->num_vfs = 0;
+ goto err;
+ }
+ }
+
+ if (BE3_chip(adapter)) {
+ /* On BE3, enable VEB only when SRIOV is enabled */
+ status = be_cmd_set_hsw_config(adapter, 0, 0,
+ adapter->if_handle,
+ PORT_FWD_TYPE_VEB, 0);
+ if (status)
+ goto err;
+ }
+
+ adapter->flags |= BE_FLAGS_SRIOV_ENABLED;
+ return 0;
+err:
+ dev_err(dev, "VF setup failed\n");
+ be_vf_clear(adapter);
+ return status;
+}
+
+/* Converting function_mode bits on BE3 to SH mc_type enums */
+
+static u8 be_convert_mc_type(u32 function_mode)
+{
+ if (function_mode & VNIC_MODE && function_mode & QNQ_MODE)
+ return vNIC1;
+ else if (function_mode & QNQ_MODE)
+ return FLEX10;
+ else if (function_mode & VNIC_MODE)
+ return vNIC2;
+ else if (function_mode & UMC_ENABLED)
+ return UMC;
+ else
+ return MC_NONE;
+}
+
+/* On BE2/BE3 FW does not suggest the supported limits */
+static void BEx_get_resources(struct be_adapter *adapter,
+ struct be_resources *res)
+{
+ bool use_sriov = adapter->num_vfs ? 1 : 0;
+
+ if (be_physfn(adapter))
+ res->max_uc_mac = BE_UC_PMAC_COUNT;
+ else
+ res->max_uc_mac = BE_VF_UC_PMAC_COUNT;
+
+ adapter->mc_type = be_convert_mc_type(adapter->function_mode);
+
+ if (be_is_mc(adapter)) {
+ /* Assuming that there are 4 channels per port,
+ * when multi-channel is enabled
+ */
+ if (be_is_qnq_mode(adapter))
+ res->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
+ else
+ /* In a non-qnq multichannel mode, the pvid
+ * takes up one vlan entry
+ */
+ res->max_vlans = (BE_NUM_VLANS_SUPPORTED / 4) - 1;
+ } else {
+ res->max_vlans = BE_NUM_VLANS_SUPPORTED;
+ }
+
+ res->max_mcast_mac = BE_MAX_MC;
+
+ /* 1) For BE3 1Gb ports, FW does not support multiple TXQs
+ * 2) Create multiple TX rings on a BE3-R multi-channel interface
+ * *only* if it is RSS-capable.
+ */
+ if (BE2_chip(adapter) || use_sriov || (adapter->port_num > 1) ||
+ be_virtfn(adapter) ||
+ (be_is_mc(adapter) &&
+ !(adapter->function_caps & BE_FUNCTION_CAPS_RSS))) {
+ res->max_tx_qs = 1;
+ } else if (adapter->function_caps & BE_FUNCTION_CAPS_SUPER_NIC) {
+ struct be_resources super_nic_res = {0};
+
+ /* On a SuperNIC profile, the driver needs to use the
+ * GET_PROFILE_CONFIG cmd to query the per-function TXQ limits
+ */
+ be_cmd_get_profile_config(adapter, &super_nic_res, NULL,
+ ACTIVE_PROFILE_TYPE, RESOURCE_LIMITS,
+ 0);
+ /* Some old versions of BE3 FW don't report max_tx_qs value */
+ res->max_tx_qs = super_nic_res.max_tx_qs ? : BE3_MAX_TX_QS;
+ } else {
+ res->max_tx_qs = BE3_MAX_TX_QS;
+ }
+
+ if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
+ !use_sriov && be_physfn(adapter))
+ res->max_rss_qs = (adapter->be3_native) ?
+ BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
+ res->max_rx_qs = res->max_rss_qs + 1;
+
+ if (be_physfn(adapter))
+ res->max_evt_qs = (be_max_vfs(adapter) > 0) ?
+ BE3_SRIOV_MAX_EVT_QS : BE3_MAX_EVT_QS;
+ else
+ res->max_evt_qs = 1;
+
+ res->if_cap_flags = BE_IF_CAP_FLAGS_WANT;
+ res->if_cap_flags &= ~BE_IF_FLAGS_DEFQ_RSS;
+ if (!(adapter->function_caps & BE_FUNCTION_CAPS_RSS))
+ res->if_cap_flags &= ~BE_IF_FLAGS_RSS;
+}
+
+static void be_setup_init(struct be_adapter *adapter)
+{
+ adapter->vlan_prio_bmap = 0xff;
+ adapter->phy.link_speed = -1;
+ adapter->if_handle = -1;
+ adapter->be3_native = false;
+ adapter->if_flags = 0;
+ adapter->phy_state = BE_UNKNOWN_PHY_STATE;
+ if (be_physfn(adapter))
+ adapter->cmd_privileges = MAX_PRIVILEGES;
+ else
+ adapter->cmd_privileges = MIN_PRIVILEGES;
+}
+
+/* HW supports only MAX_PORT_RSS_TABLES RSS Policy Tables per port.
+ * However, this HW limitation is not exposed to the host via any SLI cmd.
+ * As a result, in the case of SRIOV and in particular multi-partition configs
+ * the driver needs to calcuate a proportional share of RSS Tables per PF-pool
+ * for distribution between the VFs. This self-imposed limit will determine the
+ * no: of VFs for which RSS can be enabled.
+ */
+static void be_calculate_pf_pool_rss_tables(struct be_adapter *adapter)
+{
+ struct be_port_resources port_res = {0};
+ u8 rss_tables_on_port;
+ u16 max_vfs = be_max_vfs(adapter);
+
+ be_cmd_get_profile_config(adapter, NULL, &port_res, SAVED_PROFILE_TYPE,
+ RESOURCE_LIMITS, 0);
+
+ rss_tables_on_port = MAX_PORT_RSS_TABLES - port_res.nic_pfs;
+
+ /* Each PF Pool's RSS Tables limit =
+ * PF's Max VFs / Total_Max_VFs on Port * RSS Tables on Port
+ */
+ adapter->pool_res.max_rss_tables =
+ max_vfs * rss_tables_on_port / port_res.max_vfs;
+}
+
+static int be_get_sriov_config(struct be_adapter *adapter)
+{
+ struct be_resources res = {0};
+ int max_vfs, old_vfs;
+
+ be_cmd_get_profile_config(adapter, &res, NULL, ACTIVE_PROFILE_TYPE,
+ RESOURCE_LIMITS, 0);
+
+ /* Some old versions of BE3 FW don't report max_vfs value */
+ if (BE3_chip(adapter) && !res.max_vfs) {
+ max_vfs = pci_sriov_get_totalvfs(adapter->pdev);
+ res.max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0;
+ }
+
+ adapter->pool_res = res;
+
+ /* If during previous unload of the driver, the VFs were not disabled,
+ * then we cannot rely on the PF POOL limits for the TotalVFs value.
+ * Instead use the TotalVFs value stored in the pci-dev struct.
+ */
+ old_vfs = pci_num_vf(adapter->pdev);
+ if (old_vfs) {
+ dev_info(&adapter->pdev->dev, "%d VFs are already enabled\n",
+ old_vfs);
+
+ adapter->pool_res.max_vfs =
+ pci_sriov_get_totalvfs(adapter->pdev);
+ adapter->num_vfs = old_vfs;
+ }
+
+ if (skyhawk_chip(adapter) && be_max_vfs(adapter) && !old_vfs) {
+ be_calculate_pf_pool_rss_tables(adapter);
+ dev_info(&adapter->pdev->dev,
+ "RSS can be enabled for all VFs if num_vfs <= %d\n",
+ be_max_pf_pool_rss_tables(adapter));
+ }
+ return 0;
+}
+
+static void be_alloc_sriov_res(struct be_adapter *adapter)
+{
+ int old_vfs = pci_num_vf(adapter->pdev);
+ struct be_resources vft_res = {0};
+ int status;
+
+ be_get_sriov_config(adapter);
+
+ if (!old_vfs)
+ pci_sriov_set_totalvfs(adapter->pdev, be_max_vfs(adapter));
+
+ /* When the HW is in SRIOV capable configuration, the PF-pool
+ * resources are given to PF during driver load, if there are no
+ * old VFs. This facility is not available in BE3 FW.
+ * Also, this is done by FW in Lancer chip.
+ */
+ if (skyhawk_chip(adapter) && be_max_vfs(adapter) && !old_vfs) {
+ be_calculate_vf_res(adapter, 0, &vft_res);
+ status = be_cmd_set_sriov_config(adapter, adapter->pool_res, 0,
+ &vft_res);
+ if (status)
+ dev_err(&adapter->pdev->dev,
+ "Failed to optimize SRIOV resources\n");
+ }
+}
+
+static int be_get_resources(struct be_adapter *adapter)
+{
+ struct device *dev = &adapter->pdev->dev;
+ struct be_resources res = {0};
+ int status;
+
+ /* For Lancer, SH etc read per-function resource limits from FW.
+ * GET_FUNC_CONFIG returns per function guaranteed limits.
+ * GET_PROFILE_CONFIG returns PCI-E related limits PF-pool limits
+ */
+ if (BEx_chip(adapter)) {
+ BEx_get_resources(adapter, &res);
+ } else {
+ status = be_cmd_get_func_config(adapter, &res);
+ if (status)
+ return status;
+
+ /* If a deafault RXQ must be created, we'll use up one RSSQ*/
+ if (res.max_rss_qs && res.max_rss_qs == res.max_rx_qs &&
+ !(res.if_cap_flags & BE_IF_FLAGS_DEFQ_RSS))
+ res.max_rss_qs -= 1;
+ }
+
+ /* If RoCE is supported stash away half the EQs for RoCE */
+ res.max_nic_evt_qs = be_roce_supported(adapter) ?
+ res.max_evt_qs / 2 : res.max_evt_qs;
+ adapter->res = res;
+
+ /* If FW supports RSS default queue, then skip creating non-RSS
+ * queue for non-IP traffic.
+ */
+ adapter->need_def_rxq = (be_if_cap_flags(adapter) &
+ BE_IF_FLAGS_DEFQ_RSS) ? 0 : 1;
+
+ dev_info(dev, "Max: txqs %d, rxqs %d, rss %d, eqs %d, vfs %d\n",
+ be_max_txqs(adapter), be_max_rxqs(adapter),
+ be_max_rss(adapter), be_max_nic_eqs(adapter),
+ be_max_vfs(adapter));
+ dev_info(dev, "Max: uc-macs %d, mc-macs %d, vlans %d\n",
+ be_max_uc(adapter), be_max_mc(adapter),
+ be_max_vlans(adapter));
+
+ /* Ensure RX and TX queues are created in pairs at init time */
+ adapter->cfg_num_rx_irqs =
+ min_t(u16, netif_get_num_default_rss_queues(),
+ be_max_qp_irqs(adapter));
+ adapter->cfg_num_tx_irqs = adapter->cfg_num_rx_irqs;
+ return 0;
+}
+
+static int be_get_config(struct be_adapter *adapter)
+{
+ int status, level;
+ u16 profile_id;
+
+ status = be_cmd_get_cntl_attributes(adapter);
+ if (status)
+ return status;
+
+ status = be_cmd_query_fw_cfg(adapter);
+ if (status)
+ return status;
+
+ if (!lancer_chip(adapter) && be_physfn(adapter))
+ be_cmd_get_fat_dump_len(adapter, &adapter->fat_dump_len);
+
+ if (BEx_chip(adapter)) {
+ level = be_cmd_get_fw_log_level(adapter);
+ adapter->msg_enable =
+ level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
+ }
+
+ be_cmd_get_acpi_wol_cap(adapter);
+ pci_enable_wake(adapter->pdev, PCI_D3hot, adapter->wol_en);
+ pci_enable_wake(adapter->pdev, PCI_D3cold, adapter->wol_en);
+
+ be_cmd_query_port_name(adapter);
+
+ if (be_physfn(adapter)) {
+ status = be_cmd_get_active_profile(adapter, &profile_id);
+ if (!status)
+ dev_info(&adapter->pdev->dev,
+ "Using profile 0x%x\n", profile_id);
+ }
+
+ return 0;
+}
+
+static int be_mac_setup(struct be_adapter *adapter)
+{
+ u8 mac[ETH_ALEN];
+ int status;
+
+ if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
+ status = be_cmd_get_perm_mac(adapter, mac);
+ if (status)
+ return status;
+
+ eth_hw_addr_set(adapter->netdev, mac);
+ memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
+
+ /* Initial MAC for BE3 VFs is already programmed by PF */
+ if (BEx_chip(adapter) && be_virtfn(adapter))
+ memcpy(adapter->dev_mac, mac, ETH_ALEN);
+ }
+
+ return 0;
+}
+
+static void be_schedule_worker(struct be_adapter *adapter)
+{
+ queue_delayed_work(be_wq, &adapter->work, msecs_to_jiffies(1000));
+ adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
+}
+
+static void be_destroy_err_recovery_workq(void)
+{
+ if (!be_err_recovery_workq)
+ return;
+
+ destroy_workqueue(be_err_recovery_workq);
+ be_err_recovery_workq = NULL;
+}
+
+static void be_schedule_err_detection(struct be_adapter *adapter, u32 delay)
+{
+ struct be_error_recovery *err_rec = &adapter->error_recovery;
+
+ if (!be_err_recovery_workq)
+ return;
+
+ queue_delayed_work(be_err_recovery_workq, &err_rec->err_detection_work,
+ msecs_to_jiffies(delay));
+ adapter->flags |= BE_FLAGS_ERR_DETECTION_SCHEDULED;
+}
+
+static int be_setup_queues(struct be_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ int status;
+
+ status = be_evt_queues_create(adapter);
+ if (status)
+ goto err;
+
+ status = be_tx_qs_create(adapter);
+ if (status)
+ goto err;
+
+ status = be_rx_cqs_create(adapter);
+ if (status)
+ goto err;
+
+ status = be_mcc_queues_create(adapter);
+ if (status)
+ goto err;
+
+ status = netif_set_real_num_rx_queues(netdev, adapter->num_rx_qs);
+ if (status)
+ goto err;
+
+ status = netif_set_real_num_tx_queues(netdev, adapter->num_tx_qs);
+ if (status)
+ goto err;
+
+ return 0;
+err:
+ dev_err(&adapter->pdev->dev, "queue_setup failed\n");
+ return status;
+}
+
+static int be_if_create(struct be_adapter *adapter)
+{
+ u32 en_flags = BE_IF_FLAGS_RSS | BE_IF_FLAGS_DEFQ_RSS;
+ u32 cap_flags = be_if_cap_flags(adapter);
+
+ /* alloc required memory for other filtering fields */
+ adapter->pmac_id = kcalloc(be_max_uc(adapter),
+ sizeof(*adapter->pmac_id), GFP_KERNEL);
+ if (!adapter->pmac_id)
+ return -ENOMEM;
+
+ adapter->mc_list = kcalloc(be_max_mc(adapter),
+ sizeof(*adapter->mc_list), GFP_KERNEL);
+ if (!adapter->mc_list)
+ return -ENOMEM;
+
+ adapter->uc_list = kcalloc(be_max_uc(adapter),
+ sizeof(*adapter->uc_list), GFP_KERNEL);
+ if (!adapter->uc_list)
+ return -ENOMEM;
+
+ if (adapter->cfg_num_rx_irqs == 1)
+ cap_flags &= ~(BE_IF_FLAGS_DEFQ_RSS | BE_IF_FLAGS_RSS);
+
+ en_flags &= cap_flags;
+ /* will enable all the needed filter flags in be_open() */
+ return be_cmd_if_create(adapter, be_if_cap_flags(adapter), en_flags,
+ &adapter->if_handle, 0);
+}
+
+int be_update_queues(struct be_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ int status;
+
+ if (netif_running(netdev)) {
+ /* be_tx_timeout() must not run concurrently with this
+ * function, synchronize with an already-running dev_watchdog
+ */
+ netif_tx_lock_bh(netdev);
+ /* device cannot transmit now, avoid dev_watchdog timeouts */
+ netif_carrier_off(netdev);
+ netif_tx_unlock_bh(netdev);
+
+ be_close(netdev);
+ }
+
+ be_cancel_worker(adapter);
+
+ /* If any vectors have been shared with RoCE we cannot re-program
+ * the MSIx table.
+ */
+ if (!adapter->num_msix_roce_vec)
+ be_msix_disable(adapter);
+
+ be_clear_queues(adapter);
+ status = be_cmd_if_destroy(adapter, adapter->if_handle, 0);
+ if (status)
+ return status;
+
+ if (!msix_enabled(adapter)) {
+ status = be_msix_enable(adapter);
+ if (status)
+ return status;
+ }
+
+ status = be_if_create(adapter);
+ if (status)
+ return status;
+
+ status = be_setup_queues(adapter);
+ if (status)
+ return status;
+
+ be_schedule_worker(adapter);
+
+ /* The IF was destroyed and re-created. We need to clear
+ * all promiscuous flags valid for the destroyed IF.
+ * Without this promisc mode is not restored during
+ * be_open() because the driver thinks that it is
+ * already enabled in HW.
+ */
+ adapter->if_flags &= ~BE_IF_FLAGS_ALL_PROMISCUOUS;
+
+ if (netif_running(netdev))
+ status = be_open(netdev);
+
+ return status;
+}
+
+static inline int fw_major_num(const char *fw_ver)
+{
+ int fw_major = 0, i;
+
+ i = sscanf(fw_ver, "%d.", &fw_major);
+ if (i != 1)
+ return 0;
+
+ return fw_major;
+}
+
+/* If it is error recovery, FLR the PF
+ * Else if any VFs are already enabled don't FLR the PF
+ */
+static bool be_reset_required(struct be_adapter *adapter)
+{
+ if (be_error_recovering(adapter))
+ return true;
+ else
+ return pci_num_vf(adapter->pdev) == 0;
+}
+
+/* Wait for the FW to be ready and perform the required initialization */
+static int be_func_init(struct be_adapter *adapter)
+{
+ int status;
+
+ status = be_fw_wait_ready(adapter);
+ if (status)
+ return status;
+
+ /* FW is now ready; clear errors to allow cmds/doorbell */
+ be_clear_error(adapter, BE_CLEAR_ALL);
+
+ if (be_reset_required(adapter)) {
+ status = be_cmd_reset_function(adapter);
+ if (status)
+ return status;
+
+ /* Wait for interrupts to quiesce after an FLR */
+ msleep(100);
+ }
+
+ /* Tell FW we're ready to fire cmds */
+ status = be_cmd_fw_init(adapter);
+ if (status)
+ return status;
+
+ /* Allow interrupts for other ULPs running on NIC function */
+ be_intr_set(adapter, true);
+
+ return 0;
+}
+
+static int be_setup(struct be_adapter *adapter)
+{
+ struct device *dev = &adapter->pdev->dev;
+ int status;
+
+ status = be_func_init(adapter);
+ if (status)
+ return status;
+
+ be_setup_init(adapter);
+
+ if (!lancer_chip(adapter))
+ be_cmd_req_native_mode(adapter);
+
+ /* invoke this cmd first to get pf_num and vf_num which are needed
+ * for issuing profile related cmds
+ */
+ if (!BEx_chip(adapter)) {
+ status = be_cmd_get_func_config(adapter, NULL);
+ if (status)
+ return status;
+ }
+
+ status = be_get_config(adapter);
+ if (status)
+ goto err;
+
+ if (!BE2_chip(adapter) && be_physfn(adapter))
+ be_alloc_sriov_res(adapter);
+
+ status = be_get_resources(adapter);
+ if (status)
+ goto err;
+
+ status = be_msix_enable(adapter);
+ if (status)
+ goto err;
+
+ /* will enable all the needed filter flags in be_open() */
+ status = be_if_create(adapter);
+ if (status)
+ goto err;
+
+ /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
+ rtnl_lock();
+ status = be_setup_queues(adapter);
+ rtnl_unlock();
+ if (status)
+ goto err;
+
+ be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
+
+ status = be_mac_setup(adapter);
+ if (status)
+ goto err;
+
+ be_cmd_get_fw_ver(adapter);
+ dev_info(dev, "FW version is %s\n", adapter->fw_ver);
+
+ if (BE2_chip(adapter) && fw_major_num(adapter->fw_ver) < 4) {
+ dev_err(dev, "Firmware on card is old(%s), IRQs may not work",
+ adapter->fw_ver);
+ dev_err(dev, "Please upgrade firmware to version >= 4.0\n");
+ }
+
+ status = be_cmd_set_flow_control(adapter, adapter->tx_fc,
+ adapter->rx_fc);
+ if (status)
+ be_cmd_get_flow_control(adapter, &adapter->tx_fc,
+ &adapter->rx_fc);
+
+ dev_info(&adapter->pdev->dev, "HW Flow control - TX:%d RX:%d\n",
+ adapter->tx_fc, adapter->rx_fc);
+
+ if (be_physfn(adapter))
+ be_cmd_set_logical_link_config(adapter,
+ IFLA_VF_LINK_STATE_AUTO, 0);
+
+ /* BE3 EVB echoes broadcast/multicast packets back to PF's vport
+ * confusing a linux bridge or OVS that it might be connected to.
+ * Set the EVB to PASSTHRU mode which effectively disables the EVB
+ * when SRIOV is not enabled.
+ */
+ if (BE3_chip(adapter))
+ be_cmd_set_hsw_config(adapter, 0, 0, adapter->if_handle,
+ PORT_FWD_TYPE_PASSTHRU, 0);
+
+ if (adapter->num_vfs)
+ be_vf_setup(adapter);
+
+ status = be_cmd_get_phy_info(adapter);
+ if (!status && be_pause_supported(adapter))
+ adapter->phy.fc_autoneg = 1;
+
+ if (be_physfn(adapter) && !lancer_chip(adapter))
+ be_cmd_set_features(adapter);
+
+ be_schedule_worker(adapter);
+ adapter->flags |= BE_FLAGS_SETUP_DONE;
+ return 0;
+err:
+ be_clear(adapter);
+ return status;
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void be_netpoll(struct net_device *netdev)
+{
+ struct be_adapter *adapter = netdev_priv(netdev);
+ struct be_eq_obj *eqo;
+ int i;
+
+ for_all_evt_queues(adapter, eqo, i) {
+ be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0, 0);
+ napi_schedule(&eqo->napi);
+ }
+}
+#endif
+
+int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
+{
+ const struct firmware *fw;
+ int status;
+
+ if (!netif_running(adapter->netdev)) {
+ dev_err(&adapter->pdev->dev,
+ "Firmware load not allowed (interface is down)\n");
+ return -ENETDOWN;
+ }
+
+ status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
+ if (status)
+ goto fw_exit;
+
+ dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
+
+ if (lancer_chip(adapter))
+ status = lancer_fw_download(adapter, fw);
+ else
+ status = be_fw_download(adapter, fw);
+
+ if (!status)
+ be_cmd_get_fw_ver(adapter);
+
+fw_exit:
+ release_firmware(fw);
+ return status;
+}
+
+static int be_ndo_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
+ u16 flags, struct netlink_ext_ack *extack)
+{
+ struct be_adapter *adapter = netdev_priv(dev);
+ struct nlattr *attr, *br_spec;
+ int rem;
+ int status = 0;
+ u16 mode = 0;
+
+ if (!sriov_enabled(adapter))
+ return -EOPNOTSUPP;
+
+ br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
+ if (!br_spec)
+ return -EINVAL;
+
+ nla_for_each_nested(attr, br_spec, rem) {
+ if (nla_type(attr) != IFLA_BRIDGE_MODE)
+ continue;
+
+ mode = nla_get_u16(attr);
+ if (BE3_chip(adapter) && mode == BRIDGE_MODE_VEPA)
+ return -EOPNOTSUPP;
+
+ if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
+ return -EINVAL;
+
+ status = be_cmd_set_hsw_config(adapter, 0, 0,
+ adapter->if_handle,
+ mode == BRIDGE_MODE_VEPA ?
+ PORT_FWD_TYPE_VEPA :
+ PORT_FWD_TYPE_VEB, 0);
+ if (status)
+ goto err;
+
+ dev_info(&adapter->pdev->dev, "enabled switch mode: %s\n",
+ mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
+
+ return status;
+ }
+err:
+ dev_err(&adapter->pdev->dev, "Failed to set switch mode %s\n",
+ mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
+
+ return status;
+}
+
+static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
+ struct net_device *dev, u32 filter_mask,
+ int nlflags)
+{
+ struct be_adapter *adapter = netdev_priv(dev);
+ int status = 0;
+ u8 hsw_mode;
+
+ /* BE and Lancer chips support VEB mode only */
+ if (BEx_chip(adapter) || lancer_chip(adapter)) {
+ /* VEB is disabled in non-SR-IOV profiles on BE3/Lancer */
+ if (!pci_sriov_get_totalvfs(adapter->pdev))
+ return 0;
+ hsw_mode = PORT_FWD_TYPE_VEB;
+ } else {
+ status = be_cmd_get_hsw_config(adapter, NULL, 0,
+ adapter->if_handle, &hsw_mode,
+ NULL);
+ if (status)
+ return 0;
+
+ if (hsw_mode == PORT_FWD_TYPE_PASSTHRU)
+ return 0;
+ }
+
+ return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
+ hsw_mode == PORT_FWD_TYPE_VEPA ?
+ BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB,
+ 0, 0, nlflags, filter_mask, NULL);
+}
+
+static struct be_cmd_work *be_alloc_work(struct be_adapter *adapter,
+ void (*func)(struct work_struct *))
+{
+ struct be_cmd_work *work;
+
+ work = kzalloc(sizeof(*work), GFP_ATOMIC);
+ if (!work) {
+ dev_err(&adapter->pdev->dev,
+ "be_work memory allocation failed\n");
+ return NULL;
+ }
+
+ INIT_WORK(&work->work, func);
+ work->adapter = adapter;
+ return work;
+}
+
+static netdev_features_t be_features_check(struct sk_buff *skb,
+ struct net_device *dev,
+ netdev_features_t features)
+{
+ struct be_adapter *adapter = netdev_priv(dev);
+ u8 l4_hdr = 0;
+
+ if (skb_is_gso(skb)) {
+ /* IPv6 TSO requests with extension hdrs are a problem
+ * to Lancer and BE3 HW. Disable TSO6 feature.
+ */
+ if (!skyhawk_chip(adapter) && is_ipv6_ext_hdr(skb))
+ features &= ~NETIF_F_TSO6;
+
+ /* Lancer cannot handle the packet with MSS less than 256.
+ * Also it can't handle a TSO packet with a single segment
+ * Disable the GSO support in such cases
+ */
+ if (lancer_chip(adapter) &&
+ (skb_shinfo(skb)->gso_size < 256 ||
+ skb_shinfo(skb)->gso_segs == 1))
+ features &= ~NETIF_F_GSO_MASK;
+ }
+
+ /* The code below restricts offload features for some tunneled and
+ * Q-in-Q packets.
+ * Offload features for normal (non tunnel) packets are unchanged.
+ */
+ features = vlan_features_check(skb, features);
+ if (!skb->encapsulation ||
+ !(adapter->flags & BE_FLAGS_VXLAN_OFFLOADS))
+ return features;
+
+ /* It's an encapsulated packet and VxLAN offloads are enabled. We
+ * should disable tunnel offload features if it's not a VxLAN packet,
+ * as tunnel offloads have been enabled only for VxLAN. This is done to
+ * allow other tunneled traffic like GRE work fine while VxLAN
+ * offloads are configured in Skyhawk-R.
+ */
+ switch (vlan_get_protocol(skb)) {
+ case htons(ETH_P_IP):
+ l4_hdr = ip_hdr(skb)->protocol;
+ break;
+ case htons(ETH_P_IPV6):
+ l4_hdr = ipv6_hdr(skb)->nexthdr;
+ break;
+ default:
+ return features;
+ }
+
+ if (l4_hdr != IPPROTO_UDP ||
+ skb->inner_protocol_type != ENCAP_TYPE_ETHER ||
+ skb->inner_protocol != htons(ETH_P_TEB) ||
+ skb_inner_mac_header(skb) - skb_transport_header(skb) !=
+ sizeof(struct udphdr) + sizeof(struct vxlanhdr) ||
+ !adapter->vxlan_port ||
+ udp_hdr(skb)->dest != adapter->vxlan_port)
+ return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
+
+ return features;
+}
+
+static int be_get_phys_port_id(struct net_device *dev,
+ struct netdev_phys_item_id *ppid)
+{
+ int i, id_len = CNTL_SERIAL_NUM_WORDS * CNTL_SERIAL_NUM_WORD_SZ + 1;
+ struct be_adapter *adapter = netdev_priv(dev);
+ u8 *id;
+
+ if (MAX_PHYS_ITEM_ID_LEN < id_len)
+ return -ENOSPC;
+
+ ppid->id[0] = adapter->hba_port_num + 1;
+ id = &ppid->id[1];
+ for (i = CNTL_SERIAL_NUM_WORDS - 1; i >= 0;
+ i--, id += CNTL_SERIAL_NUM_WORD_SZ)
+ memcpy(id, &adapter->serial_num[i], CNTL_SERIAL_NUM_WORD_SZ);
+
+ ppid->id_len = id_len;
+
+ return 0;
+}
+
+static void be_set_rx_mode(struct net_device *dev)
+{
+ struct be_adapter *adapter = netdev_priv(dev);
+ struct be_cmd_work *work;
+
+ work = be_alloc_work(adapter, be_work_set_rx_mode);
+ if (work)
+ queue_work(be_wq, &work->work);
+}
+
+static const struct net_device_ops be_netdev_ops = {
+ .ndo_open = be_open,
+ .ndo_stop = be_close,
+ .ndo_start_xmit = be_xmit,
+ .ndo_set_rx_mode = be_set_rx_mode,
+ .ndo_set_mac_address = be_mac_addr_set,
+ .ndo_get_stats64 = be_get_stats64,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_vlan_rx_add_vid = be_vlan_add_vid,
+ .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
+ .ndo_set_vf_mac = be_set_vf_mac,
+ .ndo_set_vf_vlan = be_set_vf_vlan,
+ .ndo_set_vf_rate = be_set_vf_tx_rate,
+ .ndo_get_vf_config = be_get_vf_config,
+ .ndo_set_vf_link_state = be_set_vf_link_state,
+ .ndo_set_vf_spoofchk = be_set_vf_spoofchk,
+ .ndo_tx_timeout = be_tx_timeout,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = be_netpoll,
+#endif
+ .ndo_bridge_setlink = be_ndo_bridge_setlink,
+ .ndo_bridge_getlink = be_ndo_bridge_getlink,
+ .ndo_features_check = be_features_check,
+ .ndo_get_phys_port_id = be_get_phys_port_id,
+};
+
+static void be_netdev_init(struct net_device *netdev)
+{
+ struct be_adapter *adapter = netdev_priv(netdev);
+
+ netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
+ NETIF_F_GSO_UDP_TUNNEL |
+ NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
+ NETIF_F_HW_VLAN_CTAG_TX;
+ if ((be_if_cap_flags(adapter) & BE_IF_FLAGS_RSS))
+ netdev->hw_features |= NETIF_F_RXHASH;
+
+ netdev->features |= netdev->hw_features |
+ NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER |
+ NETIF_F_HIGHDMA;
+
+ netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
+ NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
+
+ netdev->priv_flags |= IFF_UNICAST_FLT;
+
+ netdev->flags |= IFF_MULTICAST;
+
+ netif_set_tso_max_size(netdev, BE_MAX_GSO_SIZE - ETH_HLEN);
+
+ netdev->netdev_ops = &be_netdev_ops;
+
+ netdev->ethtool_ops = &be_ethtool_ops;
+
+ if (!lancer_chip(adapter) && !BEx_chip(adapter) && !be_is_mc(adapter))
+ netdev->udp_tunnel_nic_info = &be_udp_tunnels;
+
+ /* MTU range: 256 - 9000 */
+ netdev->min_mtu = BE_MIN_MTU;
+ netdev->max_mtu = BE_MAX_MTU;
+}
+
+static void be_cleanup(struct be_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+
+ rtnl_lock();
+ netif_device_detach(netdev);
+ if (netif_running(netdev))
+ be_close(netdev);
+ rtnl_unlock();
+
+ be_clear(adapter);
+}
+
+static int be_resume(struct be_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ int status;
+
+ status = be_setup(adapter);
+ if (status)
+ return status;
+
+ rtnl_lock();
+ if (netif_running(netdev))
+ status = be_open(netdev);
+ rtnl_unlock();
+
+ if (status)
+ return status;
+
+ netif_device_attach(netdev);
+
+ return 0;
+}
+
+static void be_soft_reset(struct be_adapter *adapter)
+{
+ u32 val;
+
+ dev_info(&adapter->pdev->dev, "Initiating chip soft reset\n");
+ val = ioread32(adapter->pcicfg + SLIPORT_SOFTRESET_OFFSET);
+ val |= SLIPORT_SOFTRESET_SR_MASK;
+ iowrite32(val, adapter->pcicfg + SLIPORT_SOFTRESET_OFFSET);
+}
+
+static bool be_err_is_recoverable(struct be_adapter *adapter)
+{
+ struct be_error_recovery *err_rec = &adapter->error_recovery;
+ unsigned long initial_idle_time =
+ msecs_to_jiffies(ERR_RECOVERY_IDLE_TIME);
+ unsigned long recovery_interval =
+ msecs_to_jiffies(ERR_RECOVERY_INTERVAL);
+ u16 ue_err_code;
+ u32 val;
+
+ val = be_POST_stage_get(adapter);
+ if ((val & POST_STAGE_RECOVERABLE_ERR) != POST_STAGE_RECOVERABLE_ERR)
+ return false;
+ ue_err_code = val & POST_ERR_RECOVERY_CODE_MASK;
+ if (ue_err_code == 0)
+ return false;
+
+ dev_err(&adapter->pdev->dev, "Recoverable HW error code: 0x%x\n",
+ ue_err_code);
+
+ if (time_before_eq(jiffies - err_rec->probe_time, initial_idle_time)) {
+ dev_err(&adapter->pdev->dev,
+ "Cannot recover within %lu sec from driver load\n",
+ jiffies_to_msecs(initial_idle_time) / MSEC_PER_SEC);
+ return false;
+ }
+
+ if (err_rec->last_recovery_time && time_before_eq(
+ jiffies - err_rec->last_recovery_time, recovery_interval)) {
+ dev_err(&adapter->pdev->dev,
+ "Cannot recover within %lu sec from last recovery\n",
+ jiffies_to_msecs(recovery_interval) / MSEC_PER_SEC);
+ return false;
+ }
+
+ if (ue_err_code == err_rec->last_err_code) {
+ dev_err(&adapter->pdev->dev,
+ "Cannot recover from a consecutive TPE error\n");
+ return false;
+ }
+
+ err_rec->last_recovery_time = jiffies;
+ err_rec->last_err_code = ue_err_code;
+ return true;
+}
+
+static int be_tpe_recover(struct be_adapter *adapter)
+{
+ struct be_error_recovery *err_rec = &adapter->error_recovery;
+ int status = -EAGAIN;
+ u32 val;
+
+ switch (err_rec->recovery_state) {
+ case ERR_RECOVERY_ST_NONE:
+ err_rec->recovery_state = ERR_RECOVERY_ST_DETECT;
+ err_rec->resched_delay = ERR_RECOVERY_UE_DETECT_DURATION;
+ break;
+
+ case ERR_RECOVERY_ST_DETECT:
+ val = be_POST_stage_get(adapter);
+ if ((val & POST_STAGE_RECOVERABLE_ERR) !=
+ POST_STAGE_RECOVERABLE_ERR) {
+ dev_err(&adapter->pdev->dev,
+ "Unrecoverable HW error detected: 0x%x\n", val);
+ status = -EINVAL;
+ err_rec->resched_delay = 0;
+ break;
+ }
+
+ dev_err(&adapter->pdev->dev, "Recoverable HW error detected\n");
+
+ /* Only PF0 initiates Chip Soft Reset. But PF0 must wait UE2SR
+ * milliseconds before it checks for final error status in
+ * SLIPORT_SEMAPHORE to determine if recovery criteria is met.
+ * If it does, then PF0 initiates a Soft Reset.
+ */
+ if (adapter->pf_num == 0) {
+ err_rec->recovery_state = ERR_RECOVERY_ST_RESET;
+ err_rec->resched_delay = err_rec->ue_to_reset_time -
+ ERR_RECOVERY_UE_DETECT_DURATION;
+ break;
+ }
+
+ err_rec->recovery_state = ERR_RECOVERY_ST_PRE_POLL;
+ err_rec->resched_delay = err_rec->ue_to_poll_time -
+ ERR_RECOVERY_UE_DETECT_DURATION;
+ break;
+
+ case ERR_RECOVERY_ST_RESET:
+ if (!be_err_is_recoverable(adapter)) {
+ dev_err(&adapter->pdev->dev,
+ "Failed to meet recovery criteria\n");
+ status = -EIO;
+ err_rec->resched_delay = 0;
+ break;
+ }
+ be_soft_reset(adapter);
+ err_rec->recovery_state = ERR_RECOVERY_ST_PRE_POLL;
+ err_rec->resched_delay = err_rec->ue_to_poll_time -
+ err_rec->ue_to_reset_time;
+ break;
+
+ case ERR_RECOVERY_ST_PRE_POLL:
+ err_rec->recovery_state = ERR_RECOVERY_ST_REINIT;
+ err_rec->resched_delay = 0;
+ status = 0; /* done */
+ break;
+
+ default:
+ status = -EINVAL;
+ err_rec->resched_delay = 0;
+ break;
+ }
+
+ return status;
+}
+
+static int be_err_recover(struct be_adapter *adapter)
+{
+ int status;
+
+ if (!lancer_chip(adapter)) {
+ if (!adapter->error_recovery.recovery_supported ||
+ adapter->priv_flags & BE_DISABLE_TPE_RECOVERY)
+ return -EIO;
+ status = be_tpe_recover(adapter);
+ if (status)
+ goto err;
+ }
+
+ /* Wait for adapter to reach quiescent state before
+ * destroying queues
+ */
+ status = be_fw_wait_ready(adapter);
+ if (status)
+ goto err;
+
+ adapter->flags |= BE_FLAGS_TRY_RECOVERY;
+
+ be_cleanup(adapter);
+
+ status = be_resume(adapter);
+ if (status)
+ goto err;
+
+ adapter->flags &= ~BE_FLAGS_TRY_RECOVERY;
+
+err:
+ return status;
+}
+
+static void be_err_detection_task(struct work_struct *work)
+{
+ struct be_error_recovery *err_rec =
+ container_of(work, struct be_error_recovery,
+ err_detection_work.work);
+ struct be_adapter *adapter =
+ container_of(err_rec, struct be_adapter,
+ error_recovery);
+ u32 resched_delay = ERR_RECOVERY_DETECTION_DELAY;
+ struct device *dev = &adapter->pdev->dev;
+ int recovery_status;
+
+ be_detect_error(adapter);
+ if (!be_check_error(adapter, BE_ERROR_HW))
+ goto reschedule_task;
+
+ recovery_status = be_err_recover(adapter);
+ if (!recovery_status) {
+ err_rec->recovery_retries = 0;
+ err_rec->recovery_state = ERR_RECOVERY_ST_NONE;
+ dev_info(dev, "Adapter recovery successful\n");
+ goto reschedule_task;
+ } else if (!lancer_chip(adapter) && err_rec->resched_delay) {
+ /* BEx/SH recovery state machine */
+ if (adapter->pf_num == 0 &&
+ err_rec->recovery_state > ERR_RECOVERY_ST_DETECT)
+ dev_err(&adapter->pdev->dev,
+ "Adapter recovery in progress\n");
+ resched_delay = err_rec->resched_delay;
+ goto reschedule_task;
+ } else if (lancer_chip(adapter) && be_virtfn(adapter)) {
+ /* For VFs, check if PF have allocated resources
+ * every second.
+ */
+ dev_err(dev, "Re-trying adapter recovery\n");
+ goto reschedule_task;
+ } else if (lancer_chip(adapter) && err_rec->recovery_retries++ <
+ ERR_RECOVERY_MAX_RETRY_COUNT) {
+ /* In case of another error during recovery, it takes 30 sec
+ * for adapter to come out of error. Retry error recovery after
+ * this time interval.
+ */
+ dev_err(&adapter->pdev->dev, "Re-trying adapter recovery\n");
+ resched_delay = ERR_RECOVERY_RETRY_DELAY;
+ goto reschedule_task;
+ } else {
+ dev_err(dev, "Adapter recovery failed\n");
+ dev_err(dev, "Please reboot server to recover\n");
+ }
+
+ return;
+
+reschedule_task:
+ be_schedule_err_detection(adapter, resched_delay);
+}
+
+static void be_log_sfp_info(struct be_adapter *adapter)
+{
+ int status;
+
+ status = be_cmd_query_sfp_info(adapter);
+ if (!status) {
+ dev_err(&adapter->pdev->dev,
+ "Port %c: %s Vendor: %s part no: %s",
+ adapter->port_name,
+ be_misconfig_evt_port_state[adapter->phy_state],
+ adapter->phy.vendor_name,
+ adapter->phy.vendor_pn);
+ }
+ adapter->flags &= ~BE_FLAGS_PHY_MISCONFIGURED;
+}
+
+static void be_worker(struct work_struct *work)
+{
+ struct be_adapter *adapter =
+ container_of(work, struct be_adapter, work.work);
+ struct be_rx_obj *rxo;
+ int i;
+
+ if (be_physfn(adapter) &&
+ MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
+ be_cmd_get_die_temperature(adapter);
+
+ /* when interrupts are not yet enabled, just reap any pending
+ * mcc completions
+ */
+ if (!netif_running(adapter->netdev)) {
+ local_bh_disable();
+ be_process_mcc(adapter);
+ local_bh_enable();
+ goto reschedule;
+ }
+
+ if (!adapter->stats_cmd_sent) {
+ if (lancer_chip(adapter))
+ lancer_cmd_get_pport_stats(adapter,
+ &adapter->stats_cmd);
+ else
+ be_cmd_get_stats(adapter, &adapter->stats_cmd);
+ }
+
+ for_all_rx_queues(adapter, rxo, i) {
+ /* Replenish RX-queues starved due to memory
+ * allocation failures.
+ */
+ if (rxo->rx_post_starved)
+ be_post_rx_frags(rxo, GFP_KERNEL, MAX_RX_POST);
+ }
+
+ /* EQ-delay update for Skyhawk is done while notifying EQ */
+ if (!skyhawk_chip(adapter))
+ be_eqd_update(adapter, false);
+
+ if (adapter->flags & BE_FLAGS_PHY_MISCONFIGURED)
+ be_log_sfp_info(adapter);
+
+reschedule:
+ adapter->work_counter++;
+ queue_delayed_work(be_wq, &adapter->work, msecs_to_jiffies(1000));
+}
+
+static void be_unmap_pci_bars(struct be_adapter *adapter)
+{
+ if (adapter->csr)
+ pci_iounmap(adapter->pdev, adapter->csr);
+ if (adapter->db)
+ pci_iounmap(adapter->pdev, adapter->db);
+ if (adapter->pcicfg && adapter->pcicfg_mapped)
+ pci_iounmap(adapter->pdev, adapter->pcicfg);
+}
+
+static int db_bar(struct be_adapter *adapter)
+{
+ if (lancer_chip(adapter) || be_virtfn(adapter))
+ return 0;
+ else
+ return 4;
+}
+
+static int be_roce_map_pci_bars(struct be_adapter *adapter)
+{
+ if (skyhawk_chip(adapter)) {
+ adapter->roce_db.size = 4096;
+ adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
+ db_bar(adapter));
+ adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
+ db_bar(adapter));
+ }
+ return 0;
+}
+
+static int be_map_pci_bars(struct be_adapter *adapter)
+{
+ struct pci_dev *pdev = adapter->pdev;
+ u8 __iomem *addr;
+ u32 sli_intf;
+
+ pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
+ adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
+ SLI_INTF_FAMILY_SHIFT;
+ adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
+
+ if (BEx_chip(adapter) && be_physfn(adapter)) {
+ adapter->csr = pci_iomap(pdev, 2, 0);
+ if (!adapter->csr)
+ return -ENOMEM;
+ }
+
+ addr = pci_iomap(pdev, db_bar(adapter), 0);
+ if (!addr)
+ goto pci_map_err;
+ adapter->db = addr;
+
+ if (skyhawk_chip(adapter) || BEx_chip(adapter)) {
+ if (be_physfn(adapter)) {
+ /* PCICFG is the 2nd BAR in BE2 */
+ addr = pci_iomap(pdev, BE2_chip(adapter) ? 1 : 0, 0);
+ if (!addr)
+ goto pci_map_err;
+ adapter->pcicfg = addr;
+ adapter->pcicfg_mapped = true;
+ } else {
+ adapter->pcicfg = adapter->db + SRIOV_VF_PCICFG_OFFSET;
+ adapter->pcicfg_mapped = false;
+ }
+ }
+
+ be_roce_map_pci_bars(adapter);
+ return 0;
+
+pci_map_err:
+ dev_err(&pdev->dev, "Error in mapping PCI BARs\n");
+ be_unmap_pci_bars(adapter);
+ return -ENOMEM;
+}
+
+static void be_drv_cleanup(struct be_adapter *adapter)
+{
+ struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
+ struct device *dev = &adapter->pdev->dev;
+
+ if (mem->va)
+ dma_free_coherent(dev, mem->size, mem->va, mem->dma);
+
+ mem = &adapter->rx_filter;
+ if (mem->va)
+ dma_free_coherent(dev, mem->size, mem->va, mem->dma);
+
+ mem = &adapter->stats_cmd;
+ if (mem->va)
+ dma_free_coherent(dev, mem->size, mem->va, mem->dma);
+}
+
+/* Allocate and initialize various fields in be_adapter struct */
+static int be_drv_init(struct be_adapter *adapter)
+{
+ struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
+ struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
+ struct be_dma_mem *rx_filter = &adapter->rx_filter;
+ struct be_dma_mem *stats_cmd = &adapter->stats_cmd;
+ struct device *dev = &adapter->pdev->dev;
+ int status = 0;
+
+ mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
+ mbox_mem_alloc->va = dma_alloc_coherent(dev, mbox_mem_alloc->size,
+ &mbox_mem_alloc->dma,
+ GFP_KERNEL);
+ if (!mbox_mem_alloc->va)
+ return -ENOMEM;
+
+ mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
+ mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
+ mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
+
+ rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
+ rx_filter->va = dma_alloc_coherent(dev, rx_filter->size,
+ &rx_filter->dma, GFP_KERNEL);
+ if (!rx_filter->va) {
+ status = -ENOMEM;
+ goto free_mbox;
+ }
+
+ if (lancer_chip(adapter))
+ stats_cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
+ else if (BE2_chip(adapter))
+ stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
+ else if (BE3_chip(adapter))
+ stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
+ else
+ stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v2);
+ stats_cmd->va = dma_alloc_coherent(dev, stats_cmd->size,
+ &stats_cmd->dma, GFP_KERNEL);
+ if (!stats_cmd->va) {
+ status = -ENOMEM;
+ goto free_rx_filter;
+ }
+
+ mutex_init(&adapter->mbox_lock);
+ mutex_init(&adapter->mcc_lock);
+ mutex_init(&adapter->rx_filter_lock);
+ spin_lock_init(&adapter->mcc_cq_lock);
+ init_completion(&adapter->et_cmd_compl);
+
+ pci_save_state(adapter->pdev);
+
+ INIT_DELAYED_WORK(&adapter->work, be_worker);
+
+ adapter->error_recovery.recovery_state = ERR_RECOVERY_ST_NONE;
+ adapter->error_recovery.resched_delay = 0;
+ INIT_DELAYED_WORK(&adapter->error_recovery.err_detection_work,
+ be_err_detection_task);
+
+ adapter->rx_fc = true;
+ adapter->tx_fc = true;
+
+ /* Must be a power of 2 or else MODULO will BUG_ON */
+ adapter->be_get_temp_freq = 64;
+
+ return 0;
+
+free_rx_filter:
+ dma_free_coherent(dev, rx_filter->size, rx_filter->va, rx_filter->dma);
+free_mbox:
+ dma_free_coherent(dev, mbox_mem_alloc->size, mbox_mem_alloc->va,
+ mbox_mem_alloc->dma);
+ return status;
+}
+
+static void be_remove(struct pci_dev *pdev)
+{
+ struct be_adapter *adapter = pci_get_drvdata(pdev);
+
+ if (!adapter)
+ return;
+
+ be_roce_dev_remove(adapter);
+ be_intr_set(adapter, false);
+
+ be_cancel_err_detection(adapter);
+
+ unregister_netdev(adapter->netdev);
+
+ be_clear(adapter);
+
+ if (!pci_vfs_assigned(adapter->pdev))
+ be_cmd_reset_function(adapter);
+
+ /* tell fw we're done with firing cmds */
+ be_cmd_fw_clean(adapter);
+
+ be_unmap_pci_bars(adapter);
+ be_drv_cleanup(adapter);
+
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+
+ free_netdev(adapter->netdev);
+}
+
+static ssize_t be_hwmon_show_temp(struct device *dev,
+ struct device_attribute *dev_attr,
+ char *buf)
+{
+ struct be_adapter *adapter = dev_get_drvdata(dev);
+
+ /* Unit: millidegree Celsius */
+ if (adapter->hwmon_info.be_on_die_temp == BE_INVALID_DIE_TEMP)
+ return -EIO;
+ else
+ return sprintf(buf, "%u\n",
+ adapter->hwmon_info.be_on_die_temp * 1000);
+}
+
+static SENSOR_DEVICE_ATTR(temp1_input, 0444,
+ be_hwmon_show_temp, NULL, 1);
+
+static struct attribute *be_hwmon_attrs[] = {
+ &sensor_dev_attr_temp1_input.dev_attr.attr,
+ NULL
+};
+
+ATTRIBUTE_GROUPS(be_hwmon);
+
+static char *mc_name(struct be_adapter *adapter)
+{
+ char *str = ""; /* default */
+
+ switch (adapter->mc_type) {
+ case UMC:
+ str = "UMC";
+ break;
+ case FLEX10:
+ str = "FLEX10";
+ break;
+ case vNIC1:
+ str = "vNIC-1";
+ break;
+ case nPAR:
+ str = "nPAR";
+ break;
+ case UFP:
+ str = "UFP";
+ break;
+ case vNIC2:
+ str = "vNIC-2";
+ break;
+ default:
+ str = "";
+ }
+
+ return str;
+}
+
+static inline char *func_name(struct be_adapter *adapter)
+{
+ return be_physfn(adapter) ? "PF" : "VF";
+}
+
+static inline char *nic_name(struct pci_dev *pdev)
+{
+ switch (pdev->device) {
+ case OC_DEVICE_ID1:
+ return OC_NAME;
+ case OC_DEVICE_ID2:
+ return OC_NAME_BE;
+ case OC_DEVICE_ID3:
+ case OC_DEVICE_ID4:
+ return OC_NAME_LANCER;
+ case BE_DEVICE_ID2:
+ return BE3_NAME;
+ case OC_DEVICE_ID5:
+ case OC_DEVICE_ID6:
+ return OC_NAME_SH;
+ default:
+ return BE_NAME;
+ }
+}
+
+static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
+{
+ struct be_adapter *adapter;
+ struct net_device *netdev;
+ int status = 0;
+
+ status = pci_enable_device(pdev);
+ if (status)
+ goto do_none;
+
+ status = pci_request_regions(pdev, DRV_NAME);
+ if (status)
+ goto disable_dev;
+ pci_set_master(pdev);
+
+ netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
+ if (!netdev) {
+ status = -ENOMEM;
+ goto rel_reg;
+ }
+ adapter = netdev_priv(netdev);
+ adapter->pdev = pdev;
+ pci_set_drvdata(pdev, adapter);
+ adapter->netdev = netdev;
+ SET_NETDEV_DEV(netdev, &pdev->dev);
+
+ status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (status) {
+ dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
+ goto free_netdev;
+ }
+
+ status = be_map_pci_bars(adapter);
+ if (status)
+ goto free_netdev;
+
+ status = be_drv_init(adapter);
+ if (status)
+ goto unmap_bars;
+
+ status = be_setup(adapter);
+ if (status)
+ goto drv_cleanup;
+
+ be_netdev_init(netdev);
+ status = register_netdev(netdev);
+ if (status != 0)
+ goto unsetup;
+
+ be_roce_dev_add(adapter);
+
+ be_schedule_err_detection(adapter, ERR_DETECTION_DELAY);
+ adapter->error_recovery.probe_time = jiffies;
+
+ /* On Die temperature not supported for VF. */
+ if (be_physfn(adapter) && IS_ENABLED(CONFIG_BE2NET_HWMON)) {
+ adapter->hwmon_info.hwmon_dev =
+ devm_hwmon_device_register_with_groups(&pdev->dev,
+ DRV_NAME,
+ adapter,
+ be_hwmon_groups);
+ adapter->hwmon_info.be_on_die_temp = BE_INVALID_DIE_TEMP;
+ }
+
+ dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
+ func_name(adapter), mc_name(adapter), adapter->port_name);
+
+ return 0;
+
+unsetup:
+ be_clear(adapter);
+drv_cleanup:
+ be_drv_cleanup(adapter);
+unmap_bars:
+ be_unmap_pci_bars(adapter);
+free_netdev:
+ free_netdev(netdev);
+rel_reg:
+ pci_release_regions(pdev);
+disable_dev:
+ pci_disable_device(pdev);
+do_none:
+ dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
+ return status;
+}
+
+static int __maybe_unused be_suspend(struct device *dev_d)
+{
+ struct be_adapter *adapter = dev_get_drvdata(dev_d);
+
+ be_intr_set(adapter, false);
+ be_cancel_err_detection(adapter);
+
+ be_cleanup(adapter);
+
+ return 0;
+}
+
+static int __maybe_unused be_pci_resume(struct device *dev_d)
+{
+ struct be_adapter *adapter = dev_get_drvdata(dev_d);
+ int status = 0;
+
+ status = be_resume(adapter);
+ if (status)
+ return status;
+
+ be_schedule_err_detection(adapter, ERR_DETECTION_DELAY);
+
+ return 0;
+}
+
+/*
+ * An FLR will stop BE from DMAing any data.
+ */
+static void be_shutdown(struct pci_dev *pdev)
+{
+ struct be_adapter *adapter = pci_get_drvdata(pdev);
+
+ if (!adapter)
+ return;
+
+ be_roce_dev_shutdown(adapter);
+ cancel_delayed_work_sync(&adapter->work);
+ be_cancel_err_detection(adapter);
+
+ netif_device_detach(adapter->netdev);
+
+ be_cmd_reset_function(adapter);
+
+ pci_disable_device(pdev);
+}
+
+static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
+ pci_channel_state_t state)
+{
+ struct be_adapter *adapter = pci_get_drvdata(pdev);
+
+ dev_err(&adapter->pdev->dev, "EEH error detected\n");
+
+ be_roce_dev_remove(adapter);
+
+ if (!be_check_error(adapter, BE_ERROR_EEH)) {
+ be_set_error(adapter, BE_ERROR_EEH);
+
+ be_cancel_err_detection(adapter);
+
+ be_cleanup(adapter);
+ }
+
+ if (state == pci_channel_io_perm_failure)
+ return PCI_ERS_RESULT_DISCONNECT;
+
+ pci_disable_device(pdev);
+
+ /* The error could cause the FW to trigger a flash debug dump.
+ * Resetting the card while flash dump is in progress
+ * can cause it not to recover; wait for it to finish.
+ * Wait only for first function as it is needed only once per
+ * adapter.
+ */
+ if (pdev->devfn == 0)
+ ssleep(30);
+
+ return PCI_ERS_RESULT_NEED_RESET;
+}
+
+static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
+{
+ struct be_adapter *adapter = pci_get_drvdata(pdev);
+ int status;
+
+ dev_info(&adapter->pdev->dev, "EEH reset\n");
+
+ status = pci_enable_device(pdev);
+ if (status)
+ return PCI_ERS_RESULT_DISCONNECT;
+
+ pci_set_master(pdev);
+ pci_restore_state(pdev);
+
+ /* Check if card is ok and fw is ready */
+ dev_info(&adapter->pdev->dev,
+ "Waiting for FW to be ready after EEH reset\n");
+ status = be_fw_wait_ready(adapter);
+ if (status)
+ return PCI_ERS_RESULT_DISCONNECT;
+
+ be_clear_error(adapter, BE_CLEAR_ALL);
+ return PCI_ERS_RESULT_RECOVERED;
+}
+
+static void be_eeh_resume(struct pci_dev *pdev)
+{
+ int status = 0;
+ struct be_adapter *adapter = pci_get_drvdata(pdev);
+
+ dev_info(&adapter->pdev->dev, "EEH resume\n");
+
+ pci_save_state(pdev);
+
+ status = be_resume(adapter);
+ if (status)
+ goto err;
+
+ be_roce_dev_add(adapter);
+
+ be_schedule_err_detection(adapter, ERR_DETECTION_DELAY);
+ return;
+err:
+ dev_err(&adapter->pdev->dev, "EEH resume failed\n");
+}
+
+static int be_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
+{
+ struct be_adapter *adapter = pci_get_drvdata(pdev);
+ struct be_resources vft_res = {0};
+ int status;
+
+ if (!num_vfs)
+ be_vf_clear(adapter);
+
+ adapter->num_vfs = num_vfs;
+
+ if (adapter->num_vfs == 0 && pci_vfs_assigned(pdev)) {
+ dev_warn(&pdev->dev,
+ "Cannot disable VFs while they are assigned\n");
+ return -EBUSY;
+ }
+
+ /* When the HW is in SRIOV capable configuration, the PF-pool resources
+ * are equally distributed across the max-number of VFs. The user may
+ * request only a subset of the max-vfs to be enabled.
+ * Based on num_vfs, redistribute the resources across num_vfs so that
+ * each VF will have access to more number of resources.
+ * This facility is not available in BE3 FW.
+ * Also, this is done by FW in Lancer chip.
+ */
+ if (skyhawk_chip(adapter) && !pci_num_vf(pdev)) {
+ be_calculate_vf_res(adapter, adapter->num_vfs,
+ &vft_res);
+ status = be_cmd_set_sriov_config(adapter, adapter->pool_res,
+ adapter->num_vfs, &vft_res);
+ if (status)
+ dev_err(&pdev->dev,
+ "Failed to optimize SR-IOV resources\n");
+ }
+
+ status = be_get_resources(adapter);
+ if (status)
+ return be_cmd_status(status);
+
+ /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
+ rtnl_lock();
+ status = be_update_queues(adapter);
+ rtnl_unlock();
+ if (status)
+ return be_cmd_status(status);
+
+ if (adapter->num_vfs)
+ status = be_vf_setup(adapter);
+
+ if (!status)
+ return adapter->num_vfs;
+
+ return 0;
+}
+
+static const struct pci_error_handlers be_eeh_handlers = {
+ .error_detected = be_eeh_err_detected,
+ .slot_reset = be_eeh_reset,
+ .resume = be_eeh_resume,
+};
+
+static SIMPLE_DEV_PM_OPS(be_pci_pm_ops, be_suspend, be_pci_resume);
+
+static struct pci_driver be_driver = {
+ .name = DRV_NAME,
+ .id_table = be_dev_ids,
+ .probe = be_probe,
+ .remove = be_remove,
+ .driver.pm = &be_pci_pm_ops,
+ .shutdown = be_shutdown,
+ .sriov_configure = be_pci_sriov_configure,
+ .err_handler = &be_eeh_handlers
+};
+
+static int __init be_init_module(void)
+{
+ int status;
+
+ if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
+ rx_frag_size != 2048) {
+ printk(KERN_WARNING DRV_NAME
+ " : Module param rx_frag_size must be 2048/4096/8192."
+ " Using 2048\n");
+ rx_frag_size = 2048;
+ }
+
+ if (num_vfs > 0) {
+ pr_info(DRV_NAME " : Module param num_vfs is obsolete.");
+ pr_info(DRV_NAME " : Use sysfs method to enable VFs\n");
+ }
+
+ be_wq = create_singlethread_workqueue("be_wq");
+ if (!be_wq) {
+ pr_warn(DRV_NAME "workqueue creation failed\n");
+ return -1;
+ }
+
+ be_err_recovery_workq =
+ create_singlethread_workqueue("be_err_recover");
+ if (!be_err_recovery_workq)
+ pr_warn(DRV_NAME "Could not create error recovery workqueue\n");
+
+ status = pci_register_driver(&be_driver);
+ if (status) {
+ destroy_workqueue(be_wq);
+ be_destroy_err_recovery_workq();
+ }
+ return status;
+}
+module_init(be_init_module);
+
+static void __exit be_exit_module(void)
+{
+ pci_unregister_driver(&be_driver);
+
+ be_destroy_err_recovery_workq();
+
+ if (be_wq)
+ destroy_workqueue(be_wq);
+}
+module_exit(be_exit_module);
diff --git a/drivers/net/ethernet/emulex/benet/be_roce.c b/drivers/net/ethernet/emulex/benet/be_roce.c
new file mode 100644
index 0000000000..521c4c2b48
--- /dev/null
+++ b/drivers/net/ethernet/emulex/benet/be_roce.c
@@ -0,0 +1,157 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2005 - 2016 Broadcom
+ * All rights reserved.
+ *
+ * Contact Information:
+ * linux-drivers@emulex.com
+ *
+ * Emulex
+ * 3333 Susan Street
+ * Costa Mesa, CA 92626
+ */
+
+#include <linux/mutex.h>
+#include <linux/list.h>
+#include <linux/netdevice.h>
+#include <linux/module.h>
+
+#include "be.h"
+#include "be_cmds.h"
+
+static struct ocrdma_driver *ocrdma_drv;
+static LIST_HEAD(be_adapter_list);
+static DEFINE_MUTEX(be_adapter_list_lock);
+
+static void _be_roce_dev_add(struct be_adapter *adapter)
+{
+ struct be_dev_info dev_info;
+ int i, num_vec;
+ struct pci_dev *pdev = adapter->pdev;
+
+ if (!ocrdma_drv)
+ return;
+
+ if (ocrdma_drv->be_abi_version != BE_ROCE_ABI_VERSION) {
+ dev_warn(&pdev->dev, "Cannot initialize RoCE due to ocrdma ABI mismatch\n");
+ return;
+ }
+
+ if (pdev->device == OC_DEVICE_ID5) {
+ /* only msix is supported on these devices */
+ if (!msix_enabled(adapter))
+ return;
+ /* DPP region address and length */
+ dev_info.dpp_unmapped_addr = pci_resource_start(pdev, 2);
+ dev_info.dpp_unmapped_len = pci_resource_len(pdev, 2);
+ } else {
+ dev_info.dpp_unmapped_addr = 0;
+ dev_info.dpp_unmapped_len = 0;
+ }
+ dev_info.pdev = adapter->pdev;
+ dev_info.db = adapter->db;
+ dev_info.unmapped_db = adapter->roce_db.io_addr;
+ dev_info.db_page_size = adapter->roce_db.size;
+ dev_info.db_total_size = adapter->roce_db.total_size;
+ dev_info.netdev = adapter->netdev;
+ memcpy(dev_info.mac_addr, adapter->netdev->dev_addr, ETH_ALEN);
+ dev_info.dev_family = adapter->sli_family;
+ if (msix_enabled(adapter)) {
+ /* provide all the vectors, so that EQ creation response
+ * can decide which one to use.
+ */
+ num_vec = adapter->num_msix_vec + adapter->num_msix_roce_vec;
+ dev_info.intr_mode = BE_INTERRUPT_MODE_MSIX;
+ dev_info.msix.num_vectors = min(num_vec, MAX_MSIX_VECTORS);
+ /* provide start index of the vector,
+ * so in case of linear usage,
+ * it can use the base as starting point.
+ */
+ dev_info.msix.start_vector = adapter->num_evt_qs;
+ for (i = 0; i < dev_info.msix.num_vectors; i++) {
+ dev_info.msix.vector_list[i] =
+ adapter->msix_entries[i].vector;
+ }
+ } else {
+ dev_info.msix.num_vectors = 0;
+ dev_info.intr_mode = BE_INTERRUPT_MODE_INTX;
+ }
+ adapter->ocrdma_dev = ocrdma_drv->add(&dev_info);
+}
+
+void be_roce_dev_add(struct be_adapter *adapter)
+{
+ if (be_roce_supported(adapter)) {
+ INIT_LIST_HEAD(&adapter->entry);
+ mutex_lock(&be_adapter_list_lock);
+ list_add_tail(&adapter->entry, &be_adapter_list);
+
+ /* invoke add() routine of roce driver only if
+ * valid driver registered with add method and add() is not yet
+ * invoked on a given adapter.
+ */
+ _be_roce_dev_add(adapter);
+ mutex_unlock(&be_adapter_list_lock);
+ }
+}
+
+static void _be_roce_dev_remove(struct be_adapter *adapter)
+{
+ if (ocrdma_drv && ocrdma_drv->remove && adapter->ocrdma_dev)
+ ocrdma_drv->remove(adapter->ocrdma_dev);
+ adapter->ocrdma_dev = NULL;
+}
+
+void be_roce_dev_remove(struct be_adapter *adapter)
+{
+ if (be_roce_supported(adapter)) {
+ mutex_lock(&be_adapter_list_lock);
+ _be_roce_dev_remove(adapter);
+ list_del(&adapter->entry);
+ mutex_unlock(&be_adapter_list_lock);
+ }
+}
+
+void be_roce_dev_shutdown(struct be_adapter *adapter)
+{
+ if (be_roce_supported(adapter)) {
+ mutex_lock(&be_adapter_list_lock);
+ if (ocrdma_drv && adapter->ocrdma_dev &&
+ ocrdma_drv->state_change_handler)
+ ocrdma_drv->state_change_handler(adapter->ocrdma_dev,
+ BE_DEV_SHUTDOWN);
+ mutex_unlock(&be_adapter_list_lock);
+ }
+}
+
+int be_roce_register_driver(struct ocrdma_driver *drv)
+{
+ struct be_adapter *dev;
+
+ mutex_lock(&be_adapter_list_lock);
+ if (ocrdma_drv) {
+ mutex_unlock(&be_adapter_list_lock);
+ return -EINVAL;
+ }
+ ocrdma_drv = drv;
+ list_for_each_entry(dev, &be_adapter_list, entry) {
+ _be_roce_dev_add(dev);
+ }
+ mutex_unlock(&be_adapter_list_lock);
+ return 0;
+}
+EXPORT_SYMBOL(be_roce_register_driver);
+
+void be_roce_unregister_driver(struct ocrdma_driver *drv)
+{
+ struct be_adapter *dev;
+
+ mutex_lock(&be_adapter_list_lock);
+ list_for_each_entry(dev, &be_adapter_list, entry) {
+ if (dev->ocrdma_dev)
+ _be_roce_dev_remove(dev);
+ }
+ ocrdma_drv = NULL;
+ mutex_unlock(&be_adapter_list_lock);
+}
+EXPORT_SYMBOL(be_roce_unregister_driver);
diff --git a/drivers/net/ethernet/emulex/benet/be_roce.h b/drivers/net/ethernet/emulex/benet/be_roce.h
new file mode 100644
index 0000000000..801e105221
--- /dev/null
+++ b/drivers/net/ethernet/emulex/benet/be_roce.h
@@ -0,0 +1,73 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2005 - 2016 Broadcom
+ * All rights reserved.
+ *
+ * Contact Information:
+ * linux-drivers@emulex.com
+ *
+ * Emulex
+ * 3333 Susan Street
+ * Costa Mesa, CA 92626
+ */
+
+#ifndef BE_ROCE_H
+#define BE_ROCE_H
+
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+
+#define BE_ROCE_ABI_VERSION 1
+
+struct ocrdma_dev;
+
+enum be_interrupt_mode {
+ BE_INTERRUPT_MODE_MSIX = 0,
+ BE_INTERRUPT_MODE_INTX = 1,
+ BE_INTERRUPT_MODE_MSI = 2,
+};
+
+#define MAX_MSIX_VECTORS 32
+struct be_dev_info {
+ u8 __iomem *db;
+ u64 unmapped_db;
+ u32 db_page_size;
+ u32 db_total_size;
+ u64 dpp_unmapped_addr;
+ u32 dpp_unmapped_len;
+ struct pci_dev *pdev;
+ struct net_device *netdev;
+ u8 mac_addr[ETH_ALEN];
+ u32 dev_family;
+ enum be_interrupt_mode intr_mode;
+ struct {
+ int num_vectors;
+ int start_vector;
+ u32 vector_list[MAX_MSIX_VECTORS];
+ } msix;
+};
+
+/* ocrdma driver register's the callback functions with nic driver. */
+struct ocrdma_driver {
+ unsigned char name[32];
+ u32 be_abi_version;
+ struct ocrdma_dev *(*add) (struct be_dev_info *dev_info);
+ void (*remove) (struct ocrdma_dev *);
+ void (*state_change_handler) (struct ocrdma_dev *, u32 new_state);
+};
+
+enum be_roce_event {
+ BE_DEV_SHUTDOWN = 2
+};
+
+/* APIs for RoCE driver to register callback handlers,
+ * which will be invoked when device is added, removed, ifup, ifdown
+ */
+int be_roce_register_driver(struct ocrdma_driver *drv);
+void be_roce_unregister_driver(struct ocrdma_driver *drv);
+
+/* API for RoCE driver to issue mailbox commands */
+int be_roce_mcc_cmd(void *netdev_handle, void *wrb_payload,
+ int wrb_payload_size, u16 *cmd_status, u16 *ext_status);
+
+#endif /* BE_ROCE_H */