summaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/intel/i40e
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 18:49:45 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 18:49:45 +0000
commit2c3c1048746a4622d8c89a29670120dc8fab93c4 (patch)
tree848558de17fb3008cdf4d861b01ac7781903ce39 /drivers/net/ethernet/intel/i40e
parentInitial commit. (diff)
downloadlinux-2c3c1048746a4622d8c89a29670120dc8fab93c4.tar.xz
linux-2c3c1048746a4622d8c89a29670120dc8fab93c4.zip
Adding upstream version 6.1.76.upstream/6.1.76upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/net/ethernet/intel/i40e')
-rw-r--r--drivers/net/ethernet/intel/i40e/Makefile29
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h1323
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq.c1193
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq.h136
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h2430
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_alloc.h35
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_client.c756
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_common.c6003
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_dcb.c1987
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_dcb.h283
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c1036
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ddp.c481
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_debugfs.c1855
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_devids.h41
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_diag.c131
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_diag.h28
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c5818
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_hmc.c334
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_hmc.h214
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c1120
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h158
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c16740
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_nvm.c1671
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_osdep.h59
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_prototype.h492
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ptp.c1587
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_register.h899
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_status.h43
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_trace.h209
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c3977
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.h557
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx_common.h107
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_type.h1539
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c4877
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h144
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_xsk.c745
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_xsk.h38
37 files changed, 59075 insertions, 0 deletions
diff --git a/drivers/net/ethernet/intel/i40e/Makefile b/drivers/net/ethernet/intel/i40e/Makefile
new file mode 100644
index 000000000..2f21b3e89
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/Makefile
@@ -0,0 +1,29 @@
+# SPDX-License-Identifier: GPL-2.0
+# Copyright(c) 2013 - 2018 Intel Corporation.
+
+#
+# Makefile for the Intel(R) Ethernet Connection XL710 (i40e.ko) driver
+#
+
+ccflags-y += -I$(src)
+subdir-ccflags-y += -I$(src)
+
+obj-$(CONFIG_I40E) += i40e.o
+
+i40e-objs := i40e_main.o \
+ i40e_ethtool.o \
+ i40e_adminq.o \
+ i40e_common.o \
+ i40e_hmc.o \
+ i40e_lan_hmc.o \
+ i40e_nvm.o \
+ i40e_debugfs.o \
+ i40e_diag.o \
+ i40e_txrx.o \
+ i40e_ptp.o \
+ i40e_ddp.o \
+ i40e_client.o \
+ i40e_virtchnl_pf.o \
+ i40e_xsk.o
+
+i40e-$(CONFIG_I40E_DCB) += i40e_dcb.o i40e_dcb_nl.o
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
new file mode 100644
index 000000000..7d4cc4eaf
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -0,0 +1,1323 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2013 - 2021 Intel Corporation. */
+
+#ifndef _I40E_H_
+#define _I40E_H_
+
+#include <net/tcp.h>
+#include <net/udp.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/aer.h>
+#include <linux/netdevice.h>
+#include <linux/ioport.h>
+#include <linux/iommu.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/hashtable.h>
+#include <linux/string.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/sctp.h>
+#include <linux/pkt_sched.h>
+#include <linux/ipv6.h>
+#include <net/checksum.h>
+#include <net/ip6_checksum.h>
+#include <linux/ethtool.h>
+#include <linux/if_vlan.h>
+#include <linux/if_macvlan.h>
+#include <linux/if_bridge.h>
+#include <linux/clocksource.h>
+#include <linux/net_tstamp.h>
+#include <linux/ptp_clock_kernel.h>
+#include <net/pkt_cls.h>
+#include <net/tc_act/tc_gact.h>
+#include <net/tc_act/tc_mirred.h>
+#include <net/udp_tunnel.h>
+#include <net/xdp_sock.h>
+#include <linux/bitfield.h>
+#include "i40e_type.h"
+#include "i40e_prototype.h"
+#include <linux/net/intel/i40e_client.h>
+#include <linux/avf/virtchnl.h>
+#include "i40e_virtchnl_pf.h"
+#include "i40e_txrx.h"
+#include "i40e_dcb.h"
+
+/* Useful i40e defaults */
+#define I40E_MAX_VEB 16
+
+#define I40E_MAX_NUM_DESCRIPTORS 4096
+#define I40E_MAX_CSR_SPACE (4 * 1024 * 1024 - 64 * 1024)
+#define I40E_DEFAULT_NUM_DESCRIPTORS 512
+#define I40E_REQ_DESCRIPTOR_MULTIPLE 32
+#define I40E_MIN_NUM_DESCRIPTORS 64
+#define I40E_MIN_MSIX 2
+#define I40E_DEFAULT_NUM_VMDQ_VSI 8 /* max 256 VSIs */
+#define I40E_MIN_VSI_ALLOC 83 /* LAN, ATR, FCOE, 64 VF */
+/* max 16 qps */
+#define i40e_default_queues_per_vmdq(pf) \
+ (((pf)->hw_features & I40E_HW_RSS_AQ_CAPABLE) ? 4 : 1)
+#define I40E_DEFAULT_QUEUES_PER_VF 4
+#define I40E_MAX_VF_QUEUES 16
+#define i40e_pf_get_max_q_per_tc(pf) \
+ (((pf)->hw_features & I40E_HW_128_QP_RSS_CAPABLE) ? 128 : 64)
+#define I40E_FDIR_RING_COUNT 32
+#define I40E_MAX_AQ_BUF_SIZE 4096
+#define I40E_AQ_LEN 256
+#define I40E_MIN_ARQ_LEN 1
+#define I40E_MIN_ASQ_LEN 2
+#define I40E_AQ_WORK_LIMIT 66 /* max number of VFs + a little */
+#define I40E_MAX_USER_PRIORITY 8
+#define I40E_DEFAULT_TRAFFIC_CLASS BIT(0)
+#define I40E_QUEUE_WAIT_RETRY_LIMIT 10
+#define I40E_INT_NAME_STR_LEN (IFNAMSIZ + 16)
+
+#define I40E_NVM_VERSION_LO_SHIFT 0
+#define I40E_NVM_VERSION_LO_MASK (0xff << I40E_NVM_VERSION_LO_SHIFT)
+#define I40E_NVM_VERSION_HI_SHIFT 12
+#define I40E_NVM_VERSION_HI_MASK (0xf << I40E_NVM_VERSION_HI_SHIFT)
+#define I40E_OEM_VER_BUILD_MASK 0xffff
+#define I40E_OEM_VER_PATCH_MASK 0xff
+#define I40E_OEM_VER_BUILD_SHIFT 8
+#define I40E_OEM_VER_SHIFT 24
+#define I40E_PHY_DEBUG_ALL \
+ (I40E_AQ_PHY_DEBUG_DISABLE_LINK_FW | \
+ I40E_AQ_PHY_DEBUG_DISABLE_ALL_LINK_FW)
+
+#define I40E_OEM_EETRACK_ID 0xffffffff
+#define I40E_OEM_GEN_SHIFT 24
+#define I40E_OEM_SNAP_MASK 0x00ff0000
+#define I40E_OEM_SNAP_SHIFT 16
+#define I40E_OEM_RELEASE_MASK 0x0000ffff
+
+#define I40E_RX_DESC(R, i) \
+ (&(((union i40e_rx_desc *)((R)->desc))[i]))
+#define I40E_TX_DESC(R, i) \
+ (&(((struct i40e_tx_desc *)((R)->desc))[i]))
+#define I40E_TX_CTXTDESC(R, i) \
+ (&(((struct i40e_tx_context_desc *)((R)->desc))[i]))
+#define I40E_TX_FDIRDESC(R, i) \
+ (&(((struct i40e_filter_program_desc *)((R)->desc))[i]))
+
+/* BW rate limiting */
+#define I40E_BW_CREDIT_DIVISOR 50 /* 50Mbps per BW credit */
+#define I40E_BW_MBPS_DIVISOR 125000 /* rate / (1000000 / 8) Mbps */
+#define I40E_MAX_BW_INACTIVE_ACCUM 4 /* accumulate 4 credits max */
+
+/* driver state flags */
+enum i40e_state_t {
+ __I40E_TESTING,
+ __I40E_CONFIG_BUSY,
+ __I40E_CONFIG_DONE,
+ __I40E_DOWN,
+ __I40E_SERVICE_SCHED,
+ __I40E_ADMINQ_EVENT_PENDING,
+ __I40E_MDD_EVENT_PENDING,
+ __I40E_VFLR_EVENT_PENDING,
+ __I40E_RESET_RECOVERY_PENDING,
+ __I40E_TIMEOUT_RECOVERY_PENDING,
+ __I40E_MISC_IRQ_REQUESTED,
+ __I40E_RESET_INTR_RECEIVED,
+ __I40E_REINIT_REQUESTED,
+ __I40E_PF_RESET_REQUESTED,
+ __I40E_PF_RESET_AND_REBUILD_REQUESTED,
+ __I40E_CORE_RESET_REQUESTED,
+ __I40E_GLOBAL_RESET_REQUESTED,
+ __I40E_EMP_RESET_INTR_RECEIVED,
+ __I40E_SUSPENDED,
+ __I40E_PTP_TX_IN_PROGRESS,
+ __I40E_BAD_EEPROM,
+ __I40E_DOWN_REQUESTED,
+ __I40E_FD_FLUSH_REQUESTED,
+ __I40E_FD_ATR_AUTO_DISABLED,
+ __I40E_FD_SB_AUTO_DISABLED,
+ __I40E_RESET_FAILED,
+ __I40E_PORT_SUSPENDED,
+ __I40E_VF_DISABLE,
+ __I40E_MACVLAN_SYNC_PENDING,
+ __I40E_TEMP_LINK_POLLING,
+ __I40E_CLIENT_SERVICE_REQUESTED,
+ __I40E_CLIENT_L2_CHANGE,
+ __I40E_CLIENT_RESET,
+ __I40E_VIRTCHNL_OP_PENDING,
+ __I40E_RECOVERY_MODE,
+ __I40E_VF_RESETS_DISABLED, /* disable resets during i40e_remove */
+ __I40E_IN_REMOVE,
+ __I40E_VFS_RELEASING,
+ /* This must be last as it determines the size of the BITMAP */
+ __I40E_STATE_SIZE__,
+};
+
+#define I40E_PF_RESET_FLAG BIT_ULL(__I40E_PF_RESET_REQUESTED)
+#define I40E_PF_RESET_AND_REBUILD_FLAG \
+ BIT_ULL(__I40E_PF_RESET_AND_REBUILD_REQUESTED)
+
+/* VSI state flags */
+enum i40e_vsi_state_t {
+ __I40E_VSI_DOWN,
+ __I40E_VSI_NEEDS_RESTART,
+ __I40E_VSI_SYNCING_FILTERS,
+ __I40E_VSI_OVERFLOW_PROMISC,
+ __I40E_VSI_REINIT_REQUESTED,
+ __I40E_VSI_DOWN_REQUESTED,
+ __I40E_VSI_RELEASING,
+ /* This must be last as it determines the size of the BITMAP */
+ __I40E_VSI_STATE_SIZE__,
+};
+
+enum i40e_interrupt_policy {
+ I40E_INTERRUPT_BEST_CASE,
+ I40E_INTERRUPT_MEDIUM,
+ I40E_INTERRUPT_LOWEST
+};
+
+struct i40e_lump_tracking {
+ u16 num_entries;
+ u16 list[0];
+#define I40E_PILE_VALID_BIT 0x8000
+#define I40E_IWARP_IRQ_PILE_ID (I40E_PILE_VALID_BIT - 2)
+};
+
+#define I40E_DEFAULT_ATR_SAMPLE_RATE 20
+#define I40E_FDIR_MAX_RAW_PACKET_SIZE 512
+#define I40E_FDIR_BUFFER_FULL_MARGIN 10
+#define I40E_FDIR_BUFFER_HEAD_ROOM 32
+#define I40E_FDIR_BUFFER_HEAD_ROOM_FOR_ATR (I40E_FDIR_BUFFER_HEAD_ROOM * 4)
+
+#define I40E_HKEY_ARRAY_SIZE ((I40E_PFQF_HKEY_MAX_INDEX + 1) * 4)
+#define I40E_HLUT_ARRAY_SIZE ((I40E_PFQF_HLUT_MAX_INDEX + 1) * 4)
+#define I40E_VF_HLUT_ARRAY_SIZE ((I40E_VFQF_HLUT1_MAX_INDEX + 1) * 4)
+
+enum i40e_fd_stat_idx {
+ I40E_FD_STAT_ATR,
+ I40E_FD_STAT_SB,
+ I40E_FD_STAT_ATR_TUNNEL,
+ I40E_FD_STAT_PF_COUNT
+};
+#define I40E_FD_STAT_PF_IDX(pf_id) ((pf_id) * I40E_FD_STAT_PF_COUNT)
+#define I40E_FD_ATR_STAT_IDX(pf_id) \
+ (I40E_FD_STAT_PF_IDX(pf_id) + I40E_FD_STAT_ATR)
+#define I40E_FD_SB_STAT_IDX(pf_id) \
+ (I40E_FD_STAT_PF_IDX(pf_id) + I40E_FD_STAT_SB)
+#define I40E_FD_ATR_TUNNEL_STAT_IDX(pf_id) \
+ (I40E_FD_STAT_PF_IDX(pf_id) + I40E_FD_STAT_ATR_TUNNEL)
+
+/* The following structure contains the data parsed from the user-defined
+ * field of the ethtool_rx_flow_spec structure.
+ */
+struct i40e_rx_flow_userdef {
+ bool flex_filter;
+ u16 flex_word;
+ u16 flex_offset;
+};
+
+struct i40e_fdir_filter {
+ struct hlist_node fdir_node;
+ /* filter ipnut set */
+ u8 flow_type;
+ u8 ipl4_proto;
+ /* TX packet view of src and dst */
+ __be32 dst_ip;
+ __be32 src_ip;
+ __be32 dst_ip6[4];
+ __be32 src_ip6[4];
+ __be16 src_port;
+ __be16 dst_port;
+ __be32 sctp_v_tag;
+
+ __be16 vlan_etype;
+ __be16 vlan_tag;
+ /* Flexible data to match within the packet payload */
+ __be16 flex_word;
+ u16 flex_offset;
+ bool flex_filter;
+
+ /* filter control */
+ u16 q_index;
+ u8 flex_off;
+ u8 pctype;
+ u16 dest_vsi;
+ u8 dest_ctl;
+ u8 fd_status;
+ u16 cnt_index;
+ u32 fd_id;
+};
+
+#define I40E_CLOUD_FIELD_OMAC BIT(0)
+#define I40E_CLOUD_FIELD_IMAC BIT(1)
+#define I40E_CLOUD_FIELD_IVLAN BIT(2)
+#define I40E_CLOUD_FIELD_TEN_ID BIT(3)
+#define I40E_CLOUD_FIELD_IIP BIT(4)
+
+#define I40E_CLOUD_FILTER_FLAGS_OMAC I40E_CLOUD_FIELD_OMAC
+#define I40E_CLOUD_FILTER_FLAGS_IMAC I40E_CLOUD_FIELD_IMAC
+#define I40E_CLOUD_FILTER_FLAGS_IMAC_IVLAN (I40E_CLOUD_FIELD_IMAC | \
+ I40E_CLOUD_FIELD_IVLAN)
+#define I40E_CLOUD_FILTER_FLAGS_IMAC_TEN_ID (I40E_CLOUD_FIELD_IMAC | \
+ I40E_CLOUD_FIELD_TEN_ID)
+#define I40E_CLOUD_FILTER_FLAGS_OMAC_TEN_ID_IMAC (I40E_CLOUD_FIELD_OMAC | \
+ I40E_CLOUD_FIELD_IMAC | \
+ I40E_CLOUD_FIELD_TEN_ID)
+#define I40E_CLOUD_FILTER_FLAGS_IMAC_IVLAN_TEN_ID (I40E_CLOUD_FIELD_IMAC | \
+ I40E_CLOUD_FIELD_IVLAN | \
+ I40E_CLOUD_FIELD_TEN_ID)
+#define I40E_CLOUD_FILTER_FLAGS_IIP I40E_CLOUD_FIELD_IIP
+
+struct i40e_cloud_filter {
+ struct hlist_node cloud_node;
+ unsigned long cookie;
+ /* cloud filter input set follows */
+ u8 dst_mac[ETH_ALEN];
+ u8 src_mac[ETH_ALEN];
+ __be16 vlan_id;
+ u16 seid; /* filter control */
+ __be16 dst_port;
+ __be16 src_port;
+ u32 tenant_id;
+ union {
+ struct {
+ struct in_addr dst_ip;
+ struct in_addr src_ip;
+ } v4;
+ struct {
+ struct in6_addr dst_ip6;
+ struct in6_addr src_ip6;
+ } v6;
+ } ip;
+#define dst_ipv6 ip.v6.dst_ip6.s6_addr32
+#define src_ipv6 ip.v6.src_ip6.s6_addr32
+#define dst_ipv4 ip.v4.dst_ip.s_addr
+#define src_ipv4 ip.v4.src_ip.s_addr
+ u16 n_proto; /* Ethernet Protocol */
+ u8 ip_proto; /* IPPROTO value */
+ u8 flags;
+#define I40E_CLOUD_TNL_TYPE_NONE 0xff
+ u8 tunnel_type;
+};
+
+#define I40E_DCB_PRIO_TYPE_STRICT 0
+#define I40E_DCB_PRIO_TYPE_ETS 1
+#define I40E_DCB_STRICT_PRIO_CREDITS 127
+/* DCB per TC information data structure */
+struct i40e_tc_info {
+ u16 qoffset; /* Queue offset from base queue */
+ u16 qcount; /* Total Queues */
+ u8 netdev_tc; /* Netdev TC index if netdev associated */
+};
+
+/* TC configuration data structure */
+struct i40e_tc_configuration {
+ u8 numtc; /* Total number of enabled TCs */
+ u8 enabled_tc; /* TC map */
+ struct i40e_tc_info tc_info[I40E_MAX_TRAFFIC_CLASS];
+};
+
+#define I40E_UDP_PORT_INDEX_UNUSED 255
+struct i40e_udp_port_config {
+ /* AdminQ command interface expects port number in Host byte order */
+ u16 port;
+ u8 type;
+ u8 filter_index;
+};
+
+#define I40_DDP_FLASH_REGION 100
+#define I40E_PROFILE_INFO_SIZE 48
+#define I40E_MAX_PROFILE_NUM 16
+#define I40E_PROFILE_LIST_SIZE \
+ (I40E_PROFILE_INFO_SIZE * I40E_MAX_PROFILE_NUM + 4)
+#define I40E_DDP_PROFILE_PATH "intel/i40e/ddp/"
+#define I40E_DDP_PROFILE_NAME_MAX 64
+
+int i40e_ddp_load(struct net_device *netdev, const u8 *data, size_t size,
+ bool is_add);
+int i40e_ddp_flash(struct net_device *netdev, struct ethtool_flash *flash);
+
+struct i40e_ddp_profile_list {
+ u32 p_count;
+ struct i40e_profile_info p_info[];
+};
+
+struct i40e_ddp_old_profile_list {
+ struct list_head list;
+ size_t old_ddp_size;
+ u8 old_ddp_buf[];
+};
+
+/* macros related to FLX_PIT */
+#define I40E_FLEX_SET_FSIZE(fsize) (((fsize) << \
+ I40E_PRTQF_FLX_PIT_FSIZE_SHIFT) & \
+ I40E_PRTQF_FLX_PIT_FSIZE_MASK)
+#define I40E_FLEX_SET_DST_WORD(dst) (((dst) << \
+ I40E_PRTQF_FLX_PIT_DEST_OFF_SHIFT) & \
+ I40E_PRTQF_FLX_PIT_DEST_OFF_MASK)
+#define I40E_FLEX_SET_SRC_WORD(src) (((src) << \
+ I40E_PRTQF_FLX_PIT_SOURCE_OFF_SHIFT) & \
+ I40E_PRTQF_FLX_PIT_SOURCE_OFF_MASK)
+#define I40E_FLEX_PREP_VAL(dst, fsize, src) (I40E_FLEX_SET_DST_WORD(dst) | \
+ I40E_FLEX_SET_FSIZE(fsize) | \
+ I40E_FLEX_SET_SRC_WORD(src))
+
+
+#define I40E_MAX_FLEX_SRC_OFFSET 0x1F
+
+/* macros related to GLQF_ORT */
+#define I40E_ORT_SET_IDX(idx) (((idx) << \
+ I40E_GLQF_ORT_PIT_INDX_SHIFT) & \
+ I40E_GLQF_ORT_PIT_INDX_MASK)
+
+#define I40E_ORT_SET_COUNT(count) (((count) << \
+ I40E_GLQF_ORT_FIELD_CNT_SHIFT) & \
+ I40E_GLQF_ORT_FIELD_CNT_MASK)
+
+#define I40E_ORT_SET_PAYLOAD(payload) (((payload) << \
+ I40E_GLQF_ORT_FLX_PAYLOAD_SHIFT) & \
+ I40E_GLQF_ORT_FLX_PAYLOAD_MASK)
+
+#define I40E_ORT_PREP_VAL(idx, count, payload) (I40E_ORT_SET_IDX(idx) | \
+ I40E_ORT_SET_COUNT(count) | \
+ I40E_ORT_SET_PAYLOAD(payload))
+
+#define I40E_L3_GLQF_ORT_IDX 34
+#define I40E_L4_GLQF_ORT_IDX 35
+
+/* Flex PIT register index */
+#define I40E_FLEX_PIT_IDX_START_L3 3
+#define I40E_FLEX_PIT_IDX_START_L4 6
+
+#define I40E_FLEX_PIT_TABLE_SIZE 3
+
+#define I40E_FLEX_DEST_UNUSED 63
+
+#define I40E_FLEX_INDEX_ENTRIES 8
+
+/* Flex MASK to disable all flexible entries */
+#define I40E_FLEX_INPUT_MASK (I40E_FLEX_50_MASK | I40E_FLEX_51_MASK | \
+ I40E_FLEX_52_MASK | I40E_FLEX_53_MASK | \
+ I40E_FLEX_54_MASK | I40E_FLEX_55_MASK | \
+ I40E_FLEX_56_MASK | I40E_FLEX_57_MASK)
+
+#define I40E_QINT_TQCTL_VAL(qp, vector, nextq_type) \
+ (I40E_QINT_TQCTL_CAUSE_ENA_MASK | \
+ (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) | \
+ ((vector) << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) | \
+ ((qp) << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) | \
+ (I40E_QUEUE_TYPE_##nextq_type << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT))
+
+#define I40E_QINT_RQCTL_VAL(qp, vector, nextq_type) \
+ (I40E_QINT_RQCTL_CAUSE_ENA_MASK | \
+ (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) | \
+ ((vector) << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) | \
+ ((qp) << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) | \
+ (I40E_QUEUE_TYPE_##nextq_type << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT))
+
+struct i40e_flex_pit {
+ struct list_head list;
+ u16 src_offset;
+ u8 pit_index;
+};
+
+struct i40e_fwd_adapter {
+ struct net_device *netdev;
+ int bit_no;
+};
+
+struct i40e_channel {
+ struct list_head list;
+ bool initialized;
+ u8 type;
+ u16 vsi_number; /* Assigned VSI number from AQ 'Add VSI' response */
+ u16 stat_counter_idx;
+ u16 base_queue;
+ u16 num_queue_pairs; /* Requested by user */
+ u16 seid;
+
+ u8 enabled_tc;
+ struct i40e_aqc_vsi_properties_data info;
+
+ u64 max_tx_rate;
+ struct i40e_fwd_adapter *fwd;
+
+ /* track this channel belongs to which VSI */
+ struct i40e_vsi *parent_vsi;
+};
+
+struct i40e_ptp_pins_settings;
+
+static inline bool i40e_is_channel_macvlan(struct i40e_channel *ch)
+{
+ return !!ch->fwd;
+}
+
+static inline const u8 *i40e_channel_mac(struct i40e_channel *ch)
+{
+ if (i40e_is_channel_macvlan(ch))
+ return ch->fwd->netdev->dev_addr;
+ else
+ return NULL;
+}
+
+/* struct that defines the Ethernet device */
+struct i40e_pf {
+ struct pci_dev *pdev;
+ struct i40e_hw hw;
+ DECLARE_BITMAP(state, __I40E_STATE_SIZE__);
+ struct msix_entry *msix_entries;
+ bool fc_autoneg_status;
+
+ u16 eeprom_version;
+ u16 num_vmdq_vsis; /* num vmdq vsis this PF has set up */
+ u16 num_vmdq_qps; /* num queue pairs per vmdq pool */
+ u16 num_vmdq_msix; /* num queue vectors per vmdq pool */
+ u16 num_req_vfs; /* num VFs requested for this PF */
+ u16 num_vf_qps; /* num queue pairs per VF */
+ u16 num_lan_qps; /* num lan queues this PF has set up */
+ u16 num_lan_msix; /* num queue vectors for the base PF vsi */
+ u16 num_fdsb_msix; /* num queue vectors for sideband Fdir */
+ u16 num_iwarp_msix; /* num of iwarp vectors for this PF */
+ int iwarp_base_vector;
+ int queues_left; /* queues left unclaimed */
+ u16 alloc_rss_size; /* allocated RSS queues */
+ u16 rss_size_max; /* HW defined max RSS queues */
+ u16 fdir_pf_filter_count; /* num of guaranteed filters for this PF */
+ u16 num_alloc_vsi; /* num VSIs this driver supports */
+ u8 atr_sample_rate;
+ bool wol_en;
+
+ struct hlist_head fdir_filter_list;
+ u16 fdir_pf_active_filters;
+ unsigned long fd_flush_timestamp;
+ u32 fd_flush_cnt;
+ u32 fd_add_err;
+ u32 fd_atr_cnt;
+
+ /* Book-keeping of side-band filter count per flow-type.
+ * This is used to detect and handle input set changes for
+ * respective flow-type.
+ */
+ u16 fd_tcp4_filter_cnt;
+ u16 fd_udp4_filter_cnt;
+ u16 fd_sctp4_filter_cnt;
+ u16 fd_ip4_filter_cnt;
+
+ u16 fd_tcp6_filter_cnt;
+ u16 fd_udp6_filter_cnt;
+ u16 fd_sctp6_filter_cnt;
+ u16 fd_ip6_filter_cnt;
+
+ /* Flexible filter table values that need to be programmed into
+ * hardware, which expects L3 and L4 to be programmed separately. We
+ * need to ensure that the values are in ascended order and don't have
+ * duplicates, so we track each L3 and L4 values in separate lists.
+ */
+ struct list_head l3_flex_pit_list;
+ struct list_head l4_flex_pit_list;
+
+ struct udp_tunnel_nic_shared udp_tunnel_shared;
+ struct udp_tunnel_nic_info udp_tunnel_nic;
+
+ struct hlist_head cloud_filter_list;
+ u16 num_cloud_filters;
+
+ enum i40e_interrupt_policy int_policy;
+ u16 rx_itr_default;
+ u16 tx_itr_default;
+ u32 msg_enable;
+ char int_name[I40E_INT_NAME_STR_LEN];
+ u16 adminq_work_limit; /* num of admin receive queue desc to process */
+ unsigned long service_timer_period;
+ unsigned long service_timer_previous;
+ struct timer_list service_timer;
+ struct work_struct service_task;
+
+ u32 hw_features;
+#define I40E_HW_RSS_AQ_CAPABLE BIT(0)
+#define I40E_HW_128_QP_RSS_CAPABLE BIT(1)
+#define I40E_HW_ATR_EVICT_CAPABLE BIT(2)
+#define I40E_HW_WB_ON_ITR_CAPABLE BIT(3)
+#define I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE BIT(4)
+#define I40E_HW_NO_PCI_LINK_CHECK BIT(5)
+#define I40E_HW_100M_SGMII_CAPABLE BIT(6)
+#define I40E_HW_NO_DCB_SUPPORT BIT(7)
+#define I40E_HW_USE_SET_LLDP_MIB BIT(8)
+#define I40E_HW_GENEVE_OFFLOAD_CAPABLE BIT(9)
+#define I40E_HW_PTP_L4_CAPABLE BIT(10)
+#define I40E_HW_WOL_MC_MAGIC_PKT_WAKE BIT(11)
+#define I40E_HW_HAVE_CRT_RETIMER BIT(13)
+#define I40E_HW_OUTER_UDP_CSUM_CAPABLE BIT(14)
+#define I40E_HW_PHY_CONTROLS_LEDS BIT(15)
+#define I40E_HW_STOP_FW_LLDP BIT(16)
+#define I40E_HW_PORT_ID_VALID BIT(17)
+#define I40E_HW_RESTART_AUTONEG BIT(18)
+
+ u32 flags;
+#define I40E_FLAG_RX_CSUM_ENABLED BIT(0)
+#define I40E_FLAG_MSI_ENABLED BIT(1)
+#define I40E_FLAG_MSIX_ENABLED BIT(2)
+#define I40E_FLAG_RSS_ENABLED BIT(3)
+#define I40E_FLAG_VMDQ_ENABLED BIT(4)
+#define I40E_FLAG_SRIOV_ENABLED BIT(5)
+#define I40E_FLAG_DCB_CAPABLE BIT(6)
+#define I40E_FLAG_DCB_ENABLED BIT(7)
+#define I40E_FLAG_FD_SB_ENABLED BIT(8)
+#define I40E_FLAG_FD_ATR_ENABLED BIT(9)
+#define I40E_FLAG_MFP_ENABLED BIT(10)
+#define I40E_FLAG_HW_ATR_EVICT_ENABLED BIT(11)
+#define I40E_FLAG_VEB_MODE_ENABLED BIT(12)
+#define I40E_FLAG_VEB_STATS_ENABLED BIT(13)
+#define I40E_FLAG_LINK_POLLING_ENABLED BIT(14)
+#define I40E_FLAG_TRUE_PROMISC_SUPPORT BIT(15)
+#define I40E_FLAG_LEGACY_RX BIT(16)
+#define I40E_FLAG_PTP BIT(17)
+#define I40E_FLAG_IWARP_ENABLED BIT(18)
+#define I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED BIT(19)
+#define I40E_FLAG_SOURCE_PRUNING_DISABLED BIT(20)
+#define I40E_FLAG_TC_MQPRIO BIT(21)
+#define I40E_FLAG_FD_SB_INACTIVE BIT(22)
+#define I40E_FLAG_FD_SB_TO_CLOUD_FILTER BIT(23)
+#define I40E_FLAG_DISABLE_FW_LLDP BIT(24)
+#define I40E_FLAG_RS_FEC BIT(25)
+#define I40E_FLAG_BASE_R_FEC BIT(26)
+/* TOTAL_PORT_SHUTDOWN
+ * Allows to physically disable the link on the NIC's port.
+ * If enabled, (after link down request from the OS)
+ * no link, traffic or led activity is possible on that port.
+ *
+ * If I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENABLED is set, the
+ * I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED must be explicitly forced to true
+ * and cannot be disabled by system admin at that time.
+ * The functionalities are exclusive in terms of configuration, but they also
+ * have similar behavior (allowing to disable physical link of the port),
+ * with following differences:
+ * - LINK_DOWN_ON_CLOSE_ENABLED is configurable at host OS run-time and is
+ * supported by whole family of 7xx Intel Ethernet Controllers
+ * - TOTAL_PORT_SHUTDOWN may be enabled only before OS loads (in BIOS)
+ * only if motherboard's BIOS and NIC's FW has support of it
+ * - when LINK_DOWN_ON_CLOSE_ENABLED is used, the link is being brought down
+ * by sending phy_type=0 to NIC's FW
+ * - when TOTAL_PORT_SHUTDOWN is used, phy_type is not altered, instead
+ * the link is being brought down by clearing bit (I40E_AQ_PHY_ENABLE_LINK)
+ * in abilities field of i40e_aq_set_phy_config structure
+ */
+#define I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENABLED BIT(27)
+#define I40E_FLAG_VF_VLAN_PRUNING BIT(28)
+
+ struct i40e_client_instance *cinst;
+ bool stat_offsets_loaded;
+ struct i40e_hw_port_stats stats;
+ struct i40e_hw_port_stats stats_offsets;
+ u32 tx_timeout_count;
+ u32 tx_timeout_recovery_level;
+ unsigned long tx_timeout_last_recovery;
+ u32 tx_sluggish_count;
+ u32 hw_csum_rx_error;
+ u32 led_status;
+ u16 corer_count; /* Core reset count */
+ u16 globr_count; /* Global reset count */
+ u16 empr_count; /* EMP reset count */
+ u16 pfr_count; /* PF reset count */
+ u16 sw_int_count; /* SW interrupt count */
+
+ struct mutex switch_mutex;
+ u16 lan_vsi; /* our default LAN VSI */
+ u16 lan_veb; /* initial relay, if exists */
+#define I40E_NO_VEB 0xffff
+#define I40E_NO_VSI 0xffff
+ u16 next_vsi; /* Next unallocated VSI - 0-based! */
+ struct i40e_vsi **vsi;
+ struct i40e_veb *veb[I40E_MAX_VEB];
+
+ struct i40e_lump_tracking *qp_pile;
+ struct i40e_lump_tracking *irq_pile;
+
+ /* switch config info */
+ u16 pf_seid;
+ u16 main_vsi_seid;
+ u16 mac_seid;
+ struct kobject *switch_kobj;
+#ifdef CONFIG_DEBUG_FS
+ struct dentry *i40e_dbg_pf;
+#endif /* CONFIG_DEBUG_FS */
+ bool cur_promisc;
+
+ u16 instance; /* A unique number per i40e_pf instance in the system */
+
+ /* sr-iov config info */
+ struct i40e_vf *vf;
+ int num_alloc_vfs; /* actual number of VFs allocated */
+ u32 vf_aq_requests;
+ u32 arq_overflows; /* Not fatal, possibly indicative of problems */
+
+ /* DCBx/DCBNL capability for PF that indicates
+ * whether DCBx is managed by firmware or host
+ * based agent (LLDPAD). Also, indicates what
+ * flavor of DCBx protocol (IEEE/CEE) is supported
+ * by the device. For now we're supporting IEEE
+ * mode only.
+ */
+ u16 dcbx_cap;
+
+ struct i40e_filter_control_settings filter_settings;
+ struct i40e_rx_pb_config pb_cfg; /* Current Rx packet buffer config */
+ struct i40e_dcbx_config tmp_cfg;
+
+/* GPIO defines used by PTP */
+#define I40E_SDP3_2 18
+#define I40E_SDP3_3 19
+#define I40E_GPIO_4 20
+#define I40E_LED2_0 26
+#define I40E_LED2_1 27
+#define I40E_LED3_0 28
+#define I40E_LED3_1 29
+#define I40E_GLGEN_GPIO_SET_SDP_DATA_HI \
+ (1 << I40E_GLGEN_GPIO_SET_SDP_DATA_SHIFT)
+#define I40E_GLGEN_GPIO_SET_DRV_SDP_DATA \
+ (1 << I40E_GLGEN_GPIO_SET_DRIVE_SDP_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_PRT_NUM_0 \
+ (0 << I40E_GLGEN_GPIO_CTL_PRT_NUM_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_PRT_NUM_1 \
+ (1 << I40E_GLGEN_GPIO_CTL_PRT_NUM_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_RESERVED BIT(2)
+#define I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_Z \
+ (1 << I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_DIR_OUT \
+ (1 << I40E_GLGEN_GPIO_CTL_PIN_DIR_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_TRI_DRV_HI \
+ (1 << I40E_GLGEN_GPIO_CTL_TRI_CTL_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_OUT_HI_RST \
+ (1 << I40E_GLGEN_GPIO_CTL_OUT_CTL_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_TIMESYNC_0 \
+ (3 << I40E_GLGEN_GPIO_CTL_PIN_FUNC_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_TIMESYNC_1 \
+ (4 << I40E_GLGEN_GPIO_CTL_PIN_FUNC_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_NOT_FOR_PHY_CONN \
+ (0x3F << I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_OUT_DEFAULT \
+ (1 << I40E_GLGEN_GPIO_CTL_OUT_DEFAULT_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_PORT_0_IN_TIMESYNC_0 \
+ (I40E_GLGEN_GPIO_CTL_NOT_FOR_PHY_CONN | \
+ I40E_GLGEN_GPIO_CTL_TIMESYNC_0 | \
+ I40E_GLGEN_GPIO_CTL_RESERVED | I40E_GLGEN_GPIO_CTL_PRT_NUM_0)
+#define I40E_GLGEN_GPIO_CTL_PORT_1_IN_TIMESYNC_0 \
+ (I40E_GLGEN_GPIO_CTL_NOT_FOR_PHY_CONN | \
+ I40E_GLGEN_GPIO_CTL_TIMESYNC_0 | \
+ I40E_GLGEN_GPIO_CTL_RESERVED | I40E_GLGEN_GPIO_CTL_PRT_NUM_1)
+#define I40E_GLGEN_GPIO_CTL_PORT_0_OUT_TIMESYNC_1 \
+ (I40E_GLGEN_GPIO_CTL_NOT_FOR_PHY_CONN | \
+ I40E_GLGEN_GPIO_CTL_TIMESYNC_1 | I40E_GLGEN_GPIO_CTL_OUT_HI_RST | \
+ I40E_GLGEN_GPIO_CTL_TRI_DRV_HI | I40E_GLGEN_GPIO_CTL_DIR_OUT | \
+ I40E_GLGEN_GPIO_CTL_RESERVED | I40E_GLGEN_GPIO_CTL_PRT_NUM_0)
+#define I40E_GLGEN_GPIO_CTL_PORT_1_OUT_TIMESYNC_1 \
+ (I40E_GLGEN_GPIO_CTL_NOT_FOR_PHY_CONN | \
+ I40E_GLGEN_GPIO_CTL_TIMESYNC_1 | I40E_GLGEN_GPIO_CTL_OUT_HI_RST | \
+ I40E_GLGEN_GPIO_CTL_TRI_DRV_HI | I40E_GLGEN_GPIO_CTL_DIR_OUT | \
+ I40E_GLGEN_GPIO_CTL_RESERVED | I40E_GLGEN_GPIO_CTL_PRT_NUM_1)
+#define I40E_GLGEN_GPIO_CTL_LED_INIT \
+ (I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_Z | \
+ I40E_GLGEN_GPIO_CTL_DIR_OUT | \
+ I40E_GLGEN_GPIO_CTL_TRI_DRV_HI | \
+ I40E_GLGEN_GPIO_CTL_OUT_HI_RST | \
+ I40E_GLGEN_GPIO_CTL_OUT_DEFAULT | \
+ I40E_GLGEN_GPIO_CTL_NOT_FOR_PHY_CONN)
+#define I40E_PRTTSYN_AUX_1_INSTNT \
+ (1 << I40E_PRTTSYN_AUX_1_INSTNT_SHIFT)
+#define I40E_PRTTSYN_AUX_0_OUT_ENABLE \
+ (1 << I40E_PRTTSYN_AUX_0_OUT_ENA_SHIFT)
+#define I40E_PRTTSYN_AUX_0_OUT_CLK_MOD (3 << I40E_PRTTSYN_AUX_0_OUTMOD_SHIFT)
+#define I40E_PRTTSYN_AUX_0_OUT_ENABLE_CLK_MOD \
+ (I40E_PRTTSYN_AUX_0_OUT_ENABLE | I40E_PRTTSYN_AUX_0_OUT_CLK_MOD)
+#define I40E_PTP_HALF_SECOND 500000000LL /* nano seconds */
+#define I40E_PTP_2_SEC_DELAY 2
+
+ struct ptp_clock *ptp_clock;
+ struct ptp_clock_info ptp_caps;
+ struct sk_buff *ptp_tx_skb;
+ unsigned long ptp_tx_start;
+ struct hwtstamp_config tstamp_config;
+ struct timespec64 ptp_prev_hw_time;
+ struct work_struct ptp_pps_work;
+ struct work_struct ptp_extts0_work;
+ struct work_struct ptp_extts1_work;
+ ktime_t ptp_reset_start;
+ struct mutex tmreg_lock; /* Used to protect the SYSTIME registers. */
+ u32 ptp_adj_mult;
+ u32 tx_hwtstamp_timeouts;
+ u32 tx_hwtstamp_skipped;
+ u32 rx_hwtstamp_cleared;
+ u32 latch_event_flags;
+ u64 ptp_pps_start;
+ u32 pps_delay;
+ spinlock_t ptp_rx_lock; /* Used to protect Rx timestamp registers. */
+ struct ptp_pin_desc ptp_pin[3];
+ unsigned long latch_events[4];
+ bool ptp_tx;
+ bool ptp_rx;
+ struct i40e_ptp_pins_settings *ptp_pins;
+ u16 rss_table_size; /* HW RSS table size */
+ u32 max_bw;
+ u32 min_bw;
+
+ u32 ioremap_len;
+ u32 fd_inv;
+ u16 phy_led_val;
+
+ u16 override_q_count;
+ u16 last_sw_conf_flags;
+ u16 last_sw_conf_valid_flags;
+ /* List to keep previous DDP profiles to be rolled back in the future */
+ struct list_head ddp_old_prof;
+};
+
+/**
+ * i40e_mac_to_hkey - Convert a 6-byte MAC Address to a u64 hash key
+ * @macaddr: the MAC Address as the base key
+ *
+ * Simply copies the address and returns it as a u64 for hashing
+ **/
+static inline u64 i40e_addr_to_hkey(const u8 *macaddr)
+{
+ u64 key = 0;
+
+ ether_addr_copy((u8 *)&key, macaddr);
+ return key;
+}
+
+enum i40e_filter_state {
+ I40E_FILTER_INVALID = 0, /* Invalid state */
+ I40E_FILTER_NEW, /* New, not sent to FW yet */
+ I40E_FILTER_ACTIVE, /* Added to switch by FW */
+ I40E_FILTER_FAILED, /* Rejected by FW */
+ I40E_FILTER_REMOVE, /* To be removed */
+/* There is no 'removed' state; the filter struct is freed */
+};
+struct i40e_mac_filter {
+ struct hlist_node hlist;
+ u8 macaddr[ETH_ALEN];
+#define I40E_VLAN_ANY -1
+ s16 vlan;
+ enum i40e_filter_state state;
+};
+
+/* Wrapper structure to keep track of filters while we are preparing to send
+ * firmware commands. We cannot send firmware commands while holding a
+ * spinlock, since it might sleep. To avoid this, we wrap the added filters in
+ * a separate structure, which will track the state change and update the real
+ * filter while under lock. We can't simply hold the filters in a separate
+ * list, as this opens a window for a race condition when adding new MAC
+ * addresses to all VLANs, or when adding new VLANs to all MAC addresses.
+ */
+struct i40e_new_mac_filter {
+ struct hlist_node hlist;
+ struct i40e_mac_filter *f;
+
+ /* Track future changes to state separately */
+ enum i40e_filter_state state;
+};
+
+struct i40e_veb {
+ struct i40e_pf *pf;
+ u16 idx;
+ u16 veb_idx; /* index of VEB parent */
+ u16 seid;
+ u16 uplink_seid;
+ u16 stats_idx; /* index of VEB parent */
+ u8 enabled_tc;
+ u16 bridge_mode; /* Bridge Mode (VEB/VEPA) */
+ u16 flags;
+ u16 bw_limit;
+ u8 bw_max_quanta;
+ bool is_abs_credits;
+ u8 bw_tc_share_credits[I40E_MAX_TRAFFIC_CLASS];
+ u16 bw_tc_limit_credits[I40E_MAX_TRAFFIC_CLASS];
+ u8 bw_tc_max_quanta[I40E_MAX_TRAFFIC_CLASS];
+ struct kobject *kobj;
+ bool stat_offsets_loaded;
+ struct i40e_eth_stats stats;
+ struct i40e_eth_stats stats_offsets;
+ struct i40e_veb_tc_stats tc_stats;
+ struct i40e_veb_tc_stats tc_stats_offsets;
+};
+
+/* struct that defines a VSI, associated with a dev */
+struct i40e_vsi {
+ struct net_device *netdev;
+ unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
+ bool netdev_registered;
+ bool stat_offsets_loaded;
+
+ u32 current_netdev_flags;
+ DECLARE_BITMAP(state, __I40E_VSI_STATE_SIZE__);
+#define I40E_VSI_FLAG_FILTER_CHANGED BIT(0)
+#define I40E_VSI_FLAG_VEB_OWNER BIT(1)
+ unsigned long flags;
+
+ /* Per VSI lock to protect elements/hash (MAC filter) */
+ spinlock_t mac_filter_hash_lock;
+ /* Fixed size hash table with 2^8 buckets for MAC filters */
+ DECLARE_HASHTABLE(mac_filter_hash, 8);
+ bool has_vlan_filter;
+
+ /* VSI stats */
+ struct rtnl_link_stats64 net_stats;
+ struct rtnl_link_stats64 net_stats_offsets;
+ struct i40e_eth_stats eth_stats;
+ struct i40e_eth_stats eth_stats_offsets;
+ u64 tx_restart;
+ u64 tx_busy;
+ u64 tx_linearize;
+ u64 tx_force_wb;
+ u64 tx_stopped;
+ u64 rx_buf_failed;
+ u64 rx_page_failed;
+ u64 rx_page_reuse;
+ u64 rx_page_alloc;
+ u64 rx_page_waive;
+ u64 rx_page_busy;
+
+ /* These are containers of ring pointers, allocated at run-time */
+ struct i40e_ring **rx_rings;
+ struct i40e_ring **tx_rings;
+ struct i40e_ring **xdp_rings; /* XDP Tx rings */
+
+ u32 active_filters;
+ u32 promisc_threshold;
+
+ u16 work_limit;
+ u16 int_rate_limit; /* value in usecs */
+
+ u16 rss_table_size; /* HW RSS table size */
+ u16 rss_size; /* Allocated RSS queues */
+ u8 *rss_hkey_user; /* User configured hash keys */
+ u8 *rss_lut_user; /* User configured lookup table entries */
+
+
+ u16 max_frame;
+ u16 rx_buf_len;
+
+ struct bpf_prog *xdp_prog;
+
+ /* List of q_vectors allocated to this VSI */
+ struct i40e_q_vector **q_vectors;
+ int num_q_vectors;
+ int base_vector;
+ bool irqs_ready;
+
+ u16 seid; /* HW index of this VSI (absolute index) */
+ u16 id; /* VSI number */
+ u16 uplink_seid;
+
+ u16 base_queue; /* vsi's first queue in hw array */
+ u16 alloc_queue_pairs; /* Allocated Tx/Rx queues */
+ u16 req_queue_pairs; /* User requested queue pairs */
+ u16 num_queue_pairs; /* Used tx and rx pairs */
+ u16 num_tx_desc;
+ u16 num_rx_desc;
+ enum i40e_vsi_type type; /* VSI type, e.g., LAN, FCoE, etc */
+ s16 vf_id; /* Virtual function ID for SRIOV VSIs */
+
+ struct tc_mqprio_qopt_offload mqprio_qopt; /* queue parameters */
+ struct i40e_tc_configuration tc_config;
+ struct i40e_aqc_vsi_properties_data info;
+
+ /* VSI BW limit (absolute across all TCs) */
+ u16 bw_limit; /* VSI BW Limit (0 = disabled) */
+ u8 bw_max_quanta; /* Max Quanta when BW limit is enabled */
+
+ /* Relative TC credits across VSIs */
+ u8 bw_ets_share_credits[I40E_MAX_TRAFFIC_CLASS];
+ /* TC BW limit credits within VSI */
+ u16 bw_ets_limit_credits[I40E_MAX_TRAFFIC_CLASS];
+ /* TC BW limit max quanta within VSI */
+ u8 bw_ets_max_quanta[I40E_MAX_TRAFFIC_CLASS];
+
+ struct i40e_pf *back; /* Backreference to associated PF */
+ u16 idx; /* index in pf->vsi[] */
+ u16 veb_idx; /* index of VEB parent */
+ struct kobject *kobj; /* sysfs object */
+ bool current_isup; /* Sync 'link up' logging */
+ enum i40e_aq_link_speed current_speed; /* Sync link speed logging */
+
+ /* channel specific fields */
+ u16 cnt_q_avail; /* num of queues available for channel usage */
+ u16 orig_rss_size;
+ u16 current_rss_size;
+ bool reconfig_rss;
+
+ u16 next_base_queue; /* next queue to be used for channel setup */
+
+ struct list_head ch_list;
+ u16 tc_seid_map[I40E_MAX_TRAFFIC_CLASS];
+
+ /* macvlan fields */
+#define I40E_MAX_MACVLANS 128 /* Max HW vectors - 1 on FVL */
+#define I40E_MIN_MACVLAN_VECTORS 2 /* Min vectors to enable macvlans */
+ DECLARE_BITMAP(fwd_bitmask, I40E_MAX_MACVLANS);
+ struct list_head macvlan_list;
+ int macvlan_cnt;
+
+ void *priv; /* client driver data reference. */
+
+ /* VSI specific handlers */
+ irqreturn_t (*irq_handler)(int irq, void *data);
+
+ unsigned long *af_xdp_zc_qps; /* tracks AF_XDP ZC enabled qps */
+} ____cacheline_internodealigned_in_smp;
+
+struct i40e_netdev_priv {
+ struct i40e_vsi *vsi;
+};
+
+extern struct ida i40e_client_ida;
+
+/* struct that defines an interrupt vector */
+struct i40e_q_vector {
+ struct i40e_vsi *vsi;
+
+ u16 v_idx; /* index in the vsi->q_vector array. */
+ u16 reg_idx; /* register index of the interrupt */
+
+ struct napi_struct napi;
+
+ struct i40e_ring_container rx;
+ struct i40e_ring_container tx;
+
+ u8 itr_countdown; /* when 0 should adjust adaptive ITR */
+ u8 num_ringpairs; /* total number of ring pairs in vector */
+
+ cpumask_t affinity_mask;
+ struct irq_affinity_notify affinity_notify;
+
+ struct rcu_head rcu; /* to avoid race with update stats on free */
+ char name[I40E_INT_NAME_STR_LEN];
+ bool arm_wb_state;
+} ____cacheline_internodealigned_in_smp;
+
+/* lan device */
+struct i40e_device {
+ struct list_head list;
+ struct i40e_pf *pf;
+};
+
+/**
+ * i40e_nvm_version_str - format the NVM version strings
+ * @hw: ptr to the hardware info
+ **/
+static inline char *i40e_nvm_version_str(struct i40e_hw *hw)
+{
+ static char buf[32];
+ u32 full_ver;
+
+ full_ver = hw->nvm.oem_ver;
+
+ if (hw->nvm.eetrack == I40E_OEM_EETRACK_ID) {
+ u8 gen, snap;
+ u16 release;
+
+ gen = (u8)(full_ver >> I40E_OEM_GEN_SHIFT);
+ snap = (u8)((full_ver & I40E_OEM_SNAP_MASK) >>
+ I40E_OEM_SNAP_SHIFT);
+ release = (u16)(full_ver & I40E_OEM_RELEASE_MASK);
+
+ snprintf(buf, sizeof(buf), "%x.%x.%x", gen, snap, release);
+ } else {
+ u8 ver, patch;
+ u16 build;
+
+ ver = (u8)(full_ver >> I40E_OEM_VER_SHIFT);
+ build = (u16)((full_ver >> I40E_OEM_VER_BUILD_SHIFT) &
+ I40E_OEM_VER_BUILD_MASK);
+ patch = (u8)(full_ver & I40E_OEM_VER_PATCH_MASK);
+
+ snprintf(buf, sizeof(buf),
+ "%x.%02x 0x%x %d.%d.%d",
+ (hw->nvm.version & I40E_NVM_VERSION_HI_MASK) >>
+ I40E_NVM_VERSION_HI_SHIFT,
+ (hw->nvm.version & I40E_NVM_VERSION_LO_MASK) >>
+ I40E_NVM_VERSION_LO_SHIFT,
+ hw->nvm.eetrack, ver, build, patch);
+ }
+
+ return buf;
+}
+
+/**
+ * i40e_netdev_to_pf: Retrieve the PF struct for given netdev
+ * @netdev: the corresponding netdev
+ *
+ * Return the PF struct for the given netdev
+ **/
+static inline struct i40e_pf *i40e_netdev_to_pf(struct net_device *netdev)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_vsi *vsi = np->vsi;
+
+ return vsi->back;
+}
+
+static inline void i40e_vsi_setup_irqhandler(struct i40e_vsi *vsi,
+ irqreturn_t (*irq_handler)(int, void *))
+{
+ vsi->irq_handler = irq_handler;
+}
+
+/**
+ * i40e_get_fd_cnt_all - get the total FD filter space available
+ * @pf: pointer to the PF struct
+ **/
+static inline int i40e_get_fd_cnt_all(struct i40e_pf *pf)
+{
+ return pf->hw.fdir_shared_filter_count + pf->fdir_pf_filter_count;
+}
+
+/**
+ * i40e_read_fd_input_set - reads value of flow director input set register
+ * @pf: pointer to the PF struct
+ * @addr: register addr
+ *
+ * This function reads value of flow director input set register
+ * specified by 'addr' (which is specific to flow-type)
+ **/
+static inline u64 i40e_read_fd_input_set(struct i40e_pf *pf, u16 addr)
+{
+ u64 val;
+
+ val = i40e_read_rx_ctl(&pf->hw, I40E_PRTQF_FD_INSET(addr, 1));
+ val <<= 32;
+ val += i40e_read_rx_ctl(&pf->hw, I40E_PRTQF_FD_INSET(addr, 0));
+
+ return val;
+}
+
+/**
+ * i40e_write_fd_input_set - writes value into flow director input set register
+ * @pf: pointer to the PF struct
+ * @addr: register addr
+ * @val: value to be written
+ *
+ * This function writes specified value to the register specified by 'addr'.
+ * This register is input set register based on flow-type.
+ **/
+static inline void i40e_write_fd_input_set(struct i40e_pf *pf,
+ u16 addr, u64 val)
+{
+ i40e_write_rx_ctl(&pf->hw, I40E_PRTQF_FD_INSET(addr, 1),
+ (u32)(val >> 32));
+ i40e_write_rx_ctl(&pf->hw, I40E_PRTQF_FD_INSET(addr, 0),
+ (u32)(val & 0xFFFFFFFFULL));
+}
+
+/**
+ * i40e_get_pf_count - get PCI PF count.
+ * @hw: pointer to a hw.
+ *
+ * Reports the function number of the highest PCI physical
+ * function plus 1 as it is loaded from the NVM.
+ *
+ * Return: PCI PF count.
+ **/
+static inline u32 i40e_get_pf_count(struct i40e_hw *hw)
+{
+ return FIELD_GET(I40E_GLGEN_PCIFCNCNT_PCIPFCNT_MASK,
+ rd32(hw, I40E_GLGEN_PCIFCNCNT));
+}
+
+/* needed by i40e_ethtool.c */
+int i40e_up(struct i40e_vsi *vsi);
+void i40e_down(struct i40e_vsi *vsi);
+extern const char i40e_driver_name[];
+void i40e_do_reset_safe(struct i40e_pf *pf, u32 reset_flags);
+void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags, bool lock_acquired);
+int i40e_config_rss(struct i40e_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size);
+int i40e_get_rss(struct i40e_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size);
+void i40e_fill_rss_lut(struct i40e_pf *pf, u8 *lut,
+ u16 rss_table_size, u16 rss_size);
+struct i40e_vsi *i40e_find_vsi_from_id(struct i40e_pf *pf, u16 id);
+/**
+ * i40e_find_vsi_by_type - Find and return Flow Director VSI
+ * @pf: PF to search for VSI
+ * @type: Value indicating type of VSI we are looking for
+ **/
+static inline struct i40e_vsi *
+i40e_find_vsi_by_type(struct i40e_pf *pf, u16 type)
+{
+ int i;
+
+ for (i = 0; i < pf->num_alloc_vsi; i++) {
+ struct i40e_vsi *vsi = pf->vsi[i];
+
+ if (vsi && vsi->type == type)
+ return vsi;
+ }
+
+ return NULL;
+}
+void i40e_update_stats(struct i40e_vsi *vsi);
+void i40e_update_veb_stats(struct i40e_veb *veb);
+void i40e_update_eth_stats(struct i40e_vsi *vsi);
+struct rtnl_link_stats64 *i40e_get_vsi_stats_struct(struct i40e_vsi *vsi);
+int i40e_fetch_switch_configuration(struct i40e_pf *pf,
+ bool printconfig);
+
+int i40e_add_del_fdir(struct i40e_vsi *vsi,
+ struct i40e_fdir_filter *input, bool add);
+void i40e_fdir_check_and_reenable(struct i40e_pf *pf);
+u32 i40e_get_current_fd_count(struct i40e_pf *pf);
+u32 i40e_get_cur_guaranteed_fd_count(struct i40e_pf *pf);
+u32 i40e_get_current_atr_cnt(struct i40e_pf *pf);
+u32 i40e_get_global_fd_count(struct i40e_pf *pf);
+bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features);
+void i40e_set_ethtool_ops(struct net_device *netdev);
+struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi,
+ const u8 *macaddr, s16 vlan);
+void __i40e_del_filter(struct i40e_vsi *vsi, struct i40e_mac_filter *f);
+void i40e_del_filter(struct i40e_vsi *vsi, const u8 *macaddr, s16 vlan);
+int i40e_sync_vsi_filters(struct i40e_vsi *vsi);
+struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
+ u16 uplink, u32 param1);
+int i40e_vsi_release(struct i40e_vsi *vsi);
+void i40e_service_event_schedule(struct i40e_pf *pf);
+void i40e_notify_client_of_vf_msg(struct i40e_vsi *vsi, u32 vf_id,
+ u8 *msg, u16 len);
+
+int i40e_control_wait_tx_q(int seid, struct i40e_pf *pf, int pf_q, bool is_xdp,
+ bool enable);
+int i40e_control_wait_rx_q(struct i40e_pf *pf, int pf_q, bool enable);
+int i40e_vsi_start_rings(struct i40e_vsi *vsi);
+void i40e_vsi_stop_rings(struct i40e_vsi *vsi);
+void i40e_vsi_stop_rings_no_wait(struct i40e_vsi *vsi);
+int i40e_vsi_wait_queues_disabled(struct i40e_vsi *vsi);
+int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count);
+struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 flags, u16 uplink_seid,
+ u16 downlink_seid, u8 enabled_tc);
+void i40e_veb_release(struct i40e_veb *veb);
+
+int i40e_veb_config_tc(struct i40e_veb *veb, u8 enabled_tc);
+int i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid);
+void i40e_vsi_remove_pvid(struct i40e_vsi *vsi);
+void i40e_vsi_reset_stats(struct i40e_vsi *vsi);
+void i40e_pf_reset_stats(struct i40e_pf *pf);
+#ifdef CONFIG_DEBUG_FS
+void i40e_dbg_pf_init(struct i40e_pf *pf);
+void i40e_dbg_pf_exit(struct i40e_pf *pf);
+void i40e_dbg_init(void);
+void i40e_dbg_exit(void);
+#else
+static inline void i40e_dbg_pf_init(struct i40e_pf *pf) {}
+static inline void i40e_dbg_pf_exit(struct i40e_pf *pf) {}
+static inline void i40e_dbg_init(void) {}
+static inline void i40e_dbg_exit(void) {}
+#endif /* CONFIG_DEBUG_FS*/
+/* needed by client drivers */
+int i40e_lan_add_device(struct i40e_pf *pf);
+int i40e_lan_del_device(struct i40e_pf *pf);
+void i40e_client_subtask(struct i40e_pf *pf);
+void i40e_notify_client_of_l2_param_changes(struct i40e_vsi *vsi);
+void i40e_notify_client_of_netdev_close(struct i40e_vsi *vsi, bool reset);
+void i40e_notify_client_of_vf_enable(struct i40e_pf *pf, u32 num_vfs);
+void i40e_notify_client_of_vf_reset(struct i40e_pf *pf, u32 vf_id);
+void i40e_client_update_msix_info(struct i40e_pf *pf);
+int i40e_vf_client_capable(struct i40e_pf *pf, u32 vf_id);
+/**
+ * i40e_irq_dynamic_enable - Enable default interrupt generation settings
+ * @vsi: pointer to a vsi
+ * @vector: enable a particular Hw Interrupt vector, without base_vector
+ **/
+static inline void i40e_irq_dynamic_enable(struct i40e_vsi *vsi, int vector)
+{
+ struct i40e_pf *pf = vsi->back;
+ struct i40e_hw *hw = &pf->hw;
+ u32 val;
+
+ val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
+ I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
+ (I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
+ wr32(hw, I40E_PFINT_DYN_CTLN(vector + vsi->base_vector - 1), val);
+ /* skip the flush */
+}
+
+void i40e_irq_dynamic_disable_icr0(struct i40e_pf *pf);
+void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf);
+int i40e_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd);
+int i40e_open(struct net_device *netdev);
+int i40e_close(struct net_device *netdev);
+int i40e_vsi_open(struct i40e_vsi *vsi);
+void i40e_vlan_stripping_disable(struct i40e_vsi *vsi);
+int i40e_add_vlan_all_mac(struct i40e_vsi *vsi, s16 vid);
+int i40e_vsi_add_vlan(struct i40e_vsi *vsi, u16 vid);
+void i40e_rm_vlan_all_mac(struct i40e_vsi *vsi, s16 vid);
+void i40e_vsi_kill_vlan(struct i40e_vsi *vsi, u16 vid);
+struct i40e_mac_filter *i40e_add_mac_filter(struct i40e_vsi *vsi,
+ const u8 *macaddr);
+int i40e_del_mac_filter(struct i40e_vsi *vsi, const u8 *macaddr);
+bool i40e_is_vsi_in_vlan(struct i40e_vsi *vsi);
+int i40e_count_filters(struct i40e_vsi *vsi);
+struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, const u8 *macaddr);
+void i40e_vlan_stripping_enable(struct i40e_vsi *vsi);
+static inline bool i40e_is_sw_dcb(struct i40e_pf *pf)
+{
+ return !!(pf->flags & I40E_FLAG_DISABLE_FW_LLDP);
+}
+
+#ifdef CONFIG_I40E_DCB
+void i40e_dcbnl_flush_apps(struct i40e_pf *pf,
+ struct i40e_dcbx_config *old_cfg,
+ struct i40e_dcbx_config *new_cfg);
+void i40e_dcbnl_set_all(struct i40e_vsi *vsi);
+void i40e_dcbnl_setup(struct i40e_vsi *vsi);
+bool i40e_dcb_need_reconfig(struct i40e_pf *pf,
+ struct i40e_dcbx_config *old_cfg,
+ struct i40e_dcbx_config *new_cfg);
+int i40e_hw_dcb_config(struct i40e_pf *pf, struct i40e_dcbx_config *new_cfg);
+int i40e_dcb_sw_default_config(struct i40e_pf *pf);
+#endif /* CONFIG_I40E_DCB */
+void i40e_ptp_rx_hang(struct i40e_pf *pf);
+void i40e_ptp_tx_hang(struct i40e_pf *pf);
+void i40e_ptp_tx_hwtstamp(struct i40e_pf *pf);
+void i40e_ptp_rx_hwtstamp(struct i40e_pf *pf, struct sk_buff *skb, u8 index);
+void i40e_ptp_set_increment(struct i40e_pf *pf);
+int i40e_ptp_set_ts_config(struct i40e_pf *pf, struct ifreq *ifr);
+int i40e_ptp_get_ts_config(struct i40e_pf *pf, struct ifreq *ifr);
+void i40e_ptp_save_hw_time(struct i40e_pf *pf);
+void i40e_ptp_restore_hw_time(struct i40e_pf *pf);
+void i40e_ptp_init(struct i40e_pf *pf);
+void i40e_ptp_stop(struct i40e_pf *pf);
+int i40e_ptp_alloc_pins(struct i40e_pf *pf);
+int i40e_update_adq_vsi_queues(struct i40e_vsi *vsi, int vsi_offset);
+int i40e_is_vsi_uplink_mode_veb(struct i40e_vsi *vsi);
+int i40e_get_partition_bw_setting(struct i40e_pf *pf);
+int i40e_set_partition_bw_setting(struct i40e_pf *pf);
+int i40e_commit_partition_bw_setting(struct i40e_pf *pf);
+void i40e_print_link_message(struct i40e_vsi *vsi, bool isup);
+
+void i40e_set_fec_in_flags(u8 fec_cfg, u32 *flags);
+
+static inline bool i40e_enabled_xdp_vsi(struct i40e_vsi *vsi)
+{
+ return !!READ_ONCE(vsi->xdp_prog);
+}
+
+int i40e_create_queue_channel(struct i40e_vsi *vsi, struct i40e_channel *ch);
+int i40e_set_bw_limit(struct i40e_vsi *vsi, u16 seid, u64 max_tx_rate);
+int i40e_add_del_cloud_filter(struct i40e_vsi *vsi,
+ struct i40e_cloud_filter *filter,
+ bool add);
+int i40e_add_del_cloud_filter_big_buf(struct i40e_vsi *vsi,
+ struct i40e_cloud_filter *filter,
+ bool add);
+
+/**
+ * i40e_is_tc_mqprio_enabled - check if TC MQPRIO is enabled on PF
+ * @pf: pointer to a pf.
+ *
+ * Check and return value of flag I40E_FLAG_TC_MQPRIO.
+ *
+ * Return: I40E_FLAG_TC_MQPRIO set state.
+ **/
+static inline u32 i40e_is_tc_mqprio_enabled(struct i40e_pf *pf)
+{
+ return pf->flags & I40E_FLAG_TC_MQPRIO;
+}
+
+#endif /* _I40E_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.c b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
new file mode 100644
index 000000000..86fac8f95
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
@@ -0,0 +1,1193 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
+
+#include "i40e_status.h"
+#include "i40e_type.h"
+#include "i40e_register.h"
+#include "i40e_adminq.h"
+#include "i40e_prototype.h"
+
+static void i40e_resume_aq(struct i40e_hw *hw);
+
+/**
+ * i40e_adminq_init_regs - Initialize AdminQ registers
+ * @hw: pointer to the hardware structure
+ *
+ * This assumes the alloc_asq and alloc_arq functions have already been called
+ **/
+static void i40e_adminq_init_regs(struct i40e_hw *hw)
+{
+ /* set head and tail registers in our local struct */
+ if (i40e_is_vf(hw)) {
+ hw->aq.asq.tail = I40E_VF_ATQT1;
+ hw->aq.asq.head = I40E_VF_ATQH1;
+ hw->aq.asq.len = I40E_VF_ATQLEN1;
+ hw->aq.asq.bal = I40E_VF_ATQBAL1;
+ hw->aq.asq.bah = I40E_VF_ATQBAH1;
+ hw->aq.arq.tail = I40E_VF_ARQT1;
+ hw->aq.arq.head = I40E_VF_ARQH1;
+ hw->aq.arq.len = I40E_VF_ARQLEN1;
+ hw->aq.arq.bal = I40E_VF_ARQBAL1;
+ hw->aq.arq.bah = I40E_VF_ARQBAH1;
+ } else {
+ hw->aq.asq.tail = I40E_PF_ATQT;
+ hw->aq.asq.head = I40E_PF_ATQH;
+ hw->aq.asq.len = I40E_PF_ATQLEN;
+ hw->aq.asq.bal = I40E_PF_ATQBAL;
+ hw->aq.asq.bah = I40E_PF_ATQBAH;
+ hw->aq.arq.tail = I40E_PF_ARQT;
+ hw->aq.arq.head = I40E_PF_ARQH;
+ hw->aq.arq.len = I40E_PF_ARQLEN;
+ hw->aq.arq.bal = I40E_PF_ARQBAL;
+ hw->aq.arq.bah = I40E_PF_ARQBAH;
+ }
+}
+
+/**
+ * i40e_alloc_adminq_asq_ring - Allocate Admin Queue send rings
+ * @hw: pointer to the hardware structure
+ **/
+static int i40e_alloc_adminq_asq_ring(struct i40e_hw *hw)
+{
+ int ret_code;
+
+ ret_code = i40e_allocate_dma_mem(hw, &hw->aq.asq.desc_buf,
+ i40e_mem_atq_ring,
+ (hw->aq.num_asq_entries *
+ sizeof(struct i40e_aq_desc)),
+ I40E_ADMINQ_DESC_ALIGNMENT);
+ if (ret_code)
+ return ret_code;
+
+ ret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.cmd_buf,
+ (hw->aq.num_asq_entries *
+ sizeof(struct i40e_asq_cmd_details)));
+ if (ret_code) {
+ i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
+ return ret_code;
+ }
+
+ return ret_code;
+}
+
+/**
+ * i40e_alloc_adminq_arq_ring - Allocate Admin Queue receive rings
+ * @hw: pointer to the hardware structure
+ **/
+static int i40e_alloc_adminq_arq_ring(struct i40e_hw *hw)
+{
+ int ret_code;
+
+ ret_code = i40e_allocate_dma_mem(hw, &hw->aq.arq.desc_buf,
+ i40e_mem_arq_ring,
+ (hw->aq.num_arq_entries *
+ sizeof(struct i40e_aq_desc)),
+ I40E_ADMINQ_DESC_ALIGNMENT);
+
+ return ret_code;
+}
+
+/**
+ * i40e_free_adminq_asq - Free Admin Queue send rings
+ * @hw: pointer to the hardware structure
+ *
+ * This assumes the posted send buffers have already been cleaned
+ * and de-allocated
+ **/
+static void i40e_free_adminq_asq(struct i40e_hw *hw)
+{
+ i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
+}
+
+/**
+ * i40e_free_adminq_arq - Free Admin Queue receive rings
+ * @hw: pointer to the hardware structure
+ *
+ * This assumes the posted receive buffers have already been cleaned
+ * and de-allocated
+ **/
+static void i40e_free_adminq_arq(struct i40e_hw *hw)
+{
+ i40e_free_dma_mem(hw, &hw->aq.arq.desc_buf);
+}
+
+/**
+ * i40e_alloc_arq_bufs - Allocate pre-posted buffers for the receive queue
+ * @hw: pointer to the hardware structure
+ **/
+static int i40e_alloc_arq_bufs(struct i40e_hw *hw)
+{
+ struct i40e_aq_desc *desc;
+ struct i40e_dma_mem *bi;
+ int ret_code;
+ int i;
+
+ /* We'll be allocating the buffer info memory first, then we can
+ * allocate the mapped buffers for the event processing
+ */
+
+ /* buffer_info structures do not need alignment */
+ ret_code = i40e_allocate_virt_mem(hw, &hw->aq.arq.dma_head,
+ (hw->aq.num_arq_entries * sizeof(struct i40e_dma_mem)));
+ if (ret_code)
+ goto alloc_arq_bufs;
+ hw->aq.arq.r.arq_bi = (struct i40e_dma_mem *)hw->aq.arq.dma_head.va;
+
+ /* allocate the mapped buffers */
+ for (i = 0; i < hw->aq.num_arq_entries; i++) {
+ bi = &hw->aq.arq.r.arq_bi[i];
+ ret_code = i40e_allocate_dma_mem(hw, bi,
+ i40e_mem_arq_buf,
+ hw->aq.arq_buf_size,
+ I40E_ADMINQ_DESC_ALIGNMENT);
+ if (ret_code)
+ goto unwind_alloc_arq_bufs;
+
+ /* now configure the descriptors for use */
+ desc = I40E_ADMINQ_DESC(hw->aq.arq, i);
+
+ desc->flags = cpu_to_le16(I40E_AQ_FLAG_BUF);
+ if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF)
+ desc->flags |= cpu_to_le16(I40E_AQ_FLAG_LB);
+ desc->opcode = 0;
+ /* This is in accordance with Admin queue design, there is no
+ * register for buffer size configuration
+ */
+ desc->datalen = cpu_to_le16((u16)bi->size);
+ desc->retval = 0;
+ desc->cookie_high = 0;
+ desc->cookie_low = 0;
+ desc->params.external.addr_high =
+ cpu_to_le32(upper_32_bits(bi->pa));
+ desc->params.external.addr_low =
+ cpu_to_le32(lower_32_bits(bi->pa));
+ desc->params.external.param0 = 0;
+ desc->params.external.param1 = 0;
+ }
+
+alloc_arq_bufs:
+ return ret_code;
+
+unwind_alloc_arq_bufs:
+ /* don't try to free the one that failed... */
+ i--;
+ for (; i >= 0; i--)
+ i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
+ i40e_free_virt_mem(hw, &hw->aq.arq.dma_head);
+
+ return ret_code;
+}
+
+/**
+ * i40e_alloc_asq_bufs - Allocate empty buffer structs for the send queue
+ * @hw: pointer to the hardware structure
+ **/
+static int i40e_alloc_asq_bufs(struct i40e_hw *hw)
+{
+ struct i40e_dma_mem *bi;
+ int ret_code;
+ int i;
+
+ /* No mapped memory needed yet, just the buffer info structures */
+ ret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.dma_head,
+ (hw->aq.num_asq_entries * sizeof(struct i40e_dma_mem)));
+ if (ret_code)
+ goto alloc_asq_bufs;
+ hw->aq.asq.r.asq_bi = (struct i40e_dma_mem *)hw->aq.asq.dma_head.va;
+
+ /* allocate the mapped buffers */
+ for (i = 0; i < hw->aq.num_asq_entries; i++) {
+ bi = &hw->aq.asq.r.asq_bi[i];
+ ret_code = i40e_allocate_dma_mem(hw, bi,
+ i40e_mem_asq_buf,
+ hw->aq.asq_buf_size,
+ I40E_ADMINQ_DESC_ALIGNMENT);
+ if (ret_code)
+ goto unwind_alloc_asq_bufs;
+ }
+alloc_asq_bufs:
+ return ret_code;
+
+unwind_alloc_asq_bufs:
+ /* don't try to free the one that failed... */
+ i--;
+ for (; i >= 0; i--)
+ i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
+ i40e_free_virt_mem(hw, &hw->aq.asq.dma_head);
+
+ return ret_code;
+}
+
+/**
+ * i40e_free_arq_bufs - Free receive queue buffer info elements
+ * @hw: pointer to the hardware structure
+ **/
+static void i40e_free_arq_bufs(struct i40e_hw *hw)
+{
+ int i;
+
+ /* free descriptors */
+ for (i = 0; i < hw->aq.num_arq_entries; i++)
+ i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
+
+ /* free the descriptor memory */
+ i40e_free_dma_mem(hw, &hw->aq.arq.desc_buf);
+
+ /* free the dma header */
+ i40e_free_virt_mem(hw, &hw->aq.arq.dma_head);
+}
+
+/**
+ * i40e_free_asq_bufs - Free send queue buffer info elements
+ * @hw: pointer to the hardware structure
+ **/
+static void i40e_free_asq_bufs(struct i40e_hw *hw)
+{
+ int i;
+
+ /* only unmap if the address is non-NULL */
+ for (i = 0; i < hw->aq.num_asq_entries; i++)
+ if (hw->aq.asq.r.asq_bi[i].pa)
+ i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
+
+ /* free the buffer info list */
+ i40e_free_virt_mem(hw, &hw->aq.asq.cmd_buf);
+
+ /* free the descriptor memory */
+ i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
+
+ /* free the dma header */
+ i40e_free_virt_mem(hw, &hw->aq.asq.dma_head);
+}
+
+/**
+ * i40e_config_asq_regs - configure ASQ registers
+ * @hw: pointer to the hardware structure
+ *
+ * Configure base address and length registers for the transmit queue
+ **/
+static int i40e_config_asq_regs(struct i40e_hw *hw)
+{
+ int ret_code = 0;
+ u32 reg = 0;
+
+ /* Clear Head and Tail */
+ wr32(hw, hw->aq.asq.head, 0);
+ wr32(hw, hw->aq.asq.tail, 0);
+
+ /* set starting point */
+ wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries |
+ I40E_PF_ATQLEN_ATQENABLE_MASK));
+ wr32(hw, hw->aq.asq.bal, lower_32_bits(hw->aq.asq.desc_buf.pa));
+ wr32(hw, hw->aq.asq.bah, upper_32_bits(hw->aq.asq.desc_buf.pa));
+
+ /* Check one register to verify that config was applied */
+ reg = rd32(hw, hw->aq.asq.bal);
+ if (reg != lower_32_bits(hw->aq.asq.desc_buf.pa))
+ ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
+
+ return ret_code;
+}
+
+/**
+ * i40e_config_arq_regs - ARQ register configuration
+ * @hw: pointer to the hardware structure
+ *
+ * Configure base address and length registers for the receive (event queue)
+ **/
+static int i40e_config_arq_regs(struct i40e_hw *hw)
+{
+ int ret_code = 0;
+ u32 reg = 0;
+
+ /* Clear Head and Tail */
+ wr32(hw, hw->aq.arq.head, 0);
+ wr32(hw, hw->aq.arq.tail, 0);
+
+ /* set starting point */
+ wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries |
+ I40E_PF_ARQLEN_ARQENABLE_MASK));
+ wr32(hw, hw->aq.arq.bal, lower_32_bits(hw->aq.arq.desc_buf.pa));
+ wr32(hw, hw->aq.arq.bah, upper_32_bits(hw->aq.arq.desc_buf.pa));
+
+ /* Update tail in the HW to post pre-allocated buffers */
+ wr32(hw, hw->aq.arq.tail, hw->aq.num_arq_entries - 1);
+
+ /* Check one register to verify that config was applied */
+ reg = rd32(hw, hw->aq.arq.bal);
+ if (reg != lower_32_bits(hw->aq.arq.desc_buf.pa))
+ ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
+
+ return ret_code;
+}
+
+/**
+ * i40e_init_asq - main initialization routine for ASQ
+ * @hw: pointer to the hardware structure
+ *
+ * This is the main initialization routine for the Admin Send Queue
+ * Prior to calling this function, drivers *MUST* set the following fields
+ * in the hw->aq structure:
+ * - hw->aq.num_asq_entries
+ * - hw->aq.arq_buf_size
+ *
+ * Do *NOT* hold the lock when calling this as the memory allocation routines
+ * called are not going to be atomic context safe
+ **/
+static int i40e_init_asq(struct i40e_hw *hw)
+{
+ int ret_code = 0;
+
+ if (hw->aq.asq.count > 0) {
+ /* queue already initialized */
+ ret_code = I40E_ERR_NOT_READY;
+ goto init_adminq_exit;
+ }
+
+ /* verify input for valid configuration */
+ if ((hw->aq.num_asq_entries == 0) ||
+ (hw->aq.asq_buf_size == 0)) {
+ ret_code = I40E_ERR_CONFIG;
+ goto init_adminq_exit;
+ }
+
+ hw->aq.asq.next_to_use = 0;
+ hw->aq.asq.next_to_clean = 0;
+
+ /* allocate the ring memory */
+ ret_code = i40e_alloc_adminq_asq_ring(hw);
+ if (ret_code)
+ goto init_adminq_exit;
+
+ /* allocate buffers in the rings */
+ ret_code = i40e_alloc_asq_bufs(hw);
+ if (ret_code)
+ goto init_adminq_free_rings;
+
+ /* initialize base registers */
+ ret_code = i40e_config_asq_regs(hw);
+ if (ret_code)
+ goto init_adminq_free_rings;
+
+ /* success! */
+ hw->aq.asq.count = hw->aq.num_asq_entries;
+ goto init_adminq_exit;
+
+init_adminq_free_rings:
+ i40e_free_adminq_asq(hw);
+
+init_adminq_exit:
+ return ret_code;
+}
+
+/**
+ * i40e_init_arq - initialize ARQ
+ * @hw: pointer to the hardware structure
+ *
+ * The main initialization routine for the Admin Receive (Event) Queue.
+ * Prior to calling this function, drivers *MUST* set the following fields
+ * in the hw->aq structure:
+ * - hw->aq.num_asq_entries
+ * - hw->aq.arq_buf_size
+ *
+ * Do *NOT* hold the lock when calling this as the memory allocation routines
+ * called are not going to be atomic context safe
+ **/
+static int i40e_init_arq(struct i40e_hw *hw)
+{
+ int ret_code = 0;
+
+ if (hw->aq.arq.count > 0) {
+ /* queue already initialized */
+ ret_code = I40E_ERR_NOT_READY;
+ goto init_adminq_exit;
+ }
+
+ /* verify input for valid configuration */
+ if ((hw->aq.num_arq_entries == 0) ||
+ (hw->aq.arq_buf_size == 0)) {
+ ret_code = I40E_ERR_CONFIG;
+ goto init_adminq_exit;
+ }
+
+ hw->aq.arq.next_to_use = 0;
+ hw->aq.arq.next_to_clean = 0;
+
+ /* allocate the ring memory */
+ ret_code = i40e_alloc_adminq_arq_ring(hw);
+ if (ret_code)
+ goto init_adminq_exit;
+
+ /* allocate buffers in the rings */
+ ret_code = i40e_alloc_arq_bufs(hw);
+ if (ret_code)
+ goto init_adminq_free_rings;
+
+ /* initialize base registers */
+ ret_code = i40e_config_arq_regs(hw);
+ if (ret_code)
+ goto init_adminq_free_rings;
+
+ /* success! */
+ hw->aq.arq.count = hw->aq.num_arq_entries;
+ goto init_adminq_exit;
+
+init_adminq_free_rings:
+ i40e_free_adminq_arq(hw);
+
+init_adminq_exit:
+ return ret_code;
+}
+
+/**
+ * i40e_shutdown_asq - shutdown the ASQ
+ * @hw: pointer to the hardware structure
+ *
+ * The main shutdown routine for the Admin Send Queue
+ **/
+static int i40e_shutdown_asq(struct i40e_hw *hw)
+{
+ int ret_code = 0;
+
+ mutex_lock(&hw->aq.asq_mutex);
+
+ if (hw->aq.asq.count == 0) {
+ ret_code = I40E_ERR_NOT_READY;
+ goto shutdown_asq_out;
+ }
+
+ /* Stop firmware AdminQ processing */
+ wr32(hw, hw->aq.asq.head, 0);
+ wr32(hw, hw->aq.asq.tail, 0);
+ wr32(hw, hw->aq.asq.len, 0);
+ wr32(hw, hw->aq.asq.bal, 0);
+ wr32(hw, hw->aq.asq.bah, 0);
+
+ hw->aq.asq.count = 0; /* to indicate uninitialized queue */
+
+ /* free ring buffers */
+ i40e_free_asq_bufs(hw);
+
+shutdown_asq_out:
+ mutex_unlock(&hw->aq.asq_mutex);
+ return ret_code;
+}
+
+/**
+ * i40e_shutdown_arq - shutdown ARQ
+ * @hw: pointer to the hardware structure
+ *
+ * The main shutdown routine for the Admin Receive Queue
+ **/
+static int i40e_shutdown_arq(struct i40e_hw *hw)
+{
+ int ret_code = 0;
+
+ mutex_lock(&hw->aq.arq_mutex);
+
+ if (hw->aq.arq.count == 0) {
+ ret_code = I40E_ERR_NOT_READY;
+ goto shutdown_arq_out;
+ }
+
+ /* Stop firmware AdminQ processing */
+ wr32(hw, hw->aq.arq.head, 0);
+ wr32(hw, hw->aq.arq.tail, 0);
+ wr32(hw, hw->aq.arq.len, 0);
+ wr32(hw, hw->aq.arq.bal, 0);
+ wr32(hw, hw->aq.arq.bah, 0);
+
+ hw->aq.arq.count = 0; /* to indicate uninitialized queue */
+
+ /* free ring buffers */
+ i40e_free_arq_bufs(hw);
+
+shutdown_arq_out:
+ mutex_unlock(&hw->aq.arq_mutex);
+ return ret_code;
+}
+
+/**
+ * i40e_set_hw_flags - set HW flags
+ * @hw: pointer to the hardware structure
+ **/
+static void i40e_set_hw_flags(struct i40e_hw *hw)
+{
+ struct i40e_adminq_info *aq = &hw->aq;
+
+ hw->flags = 0;
+
+ switch (hw->mac.type) {
+ case I40E_MAC_XL710:
+ if (aq->api_maj_ver > 1 ||
+ (aq->api_maj_ver == 1 &&
+ aq->api_min_ver >= I40E_MINOR_VER_GET_LINK_INFO_XL710)) {
+ hw->flags |= I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE;
+ hw->flags |= I40E_HW_FLAG_FW_LLDP_STOPPABLE;
+ /* The ability to RX (not drop) 802.1ad frames */
+ hw->flags |= I40E_HW_FLAG_802_1AD_CAPABLE;
+ }
+ break;
+ case I40E_MAC_X722:
+ hw->flags |= I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE |
+ I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK;
+
+ if (aq->api_maj_ver > 1 ||
+ (aq->api_maj_ver == 1 &&
+ aq->api_min_ver >= I40E_MINOR_VER_FW_LLDP_STOPPABLE_X722))
+ hw->flags |= I40E_HW_FLAG_FW_LLDP_STOPPABLE;
+
+ if (aq->api_maj_ver > 1 ||
+ (aq->api_maj_ver == 1 &&
+ aq->api_min_ver >= I40E_MINOR_VER_GET_LINK_INFO_X722))
+ hw->flags |= I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE;
+
+ if (aq->api_maj_ver > 1 ||
+ (aq->api_maj_ver == 1 &&
+ aq->api_min_ver >= I40E_MINOR_VER_FW_REQUEST_FEC_X722))
+ hw->flags |= I40E_HW_FLAG_X722_FEC_REQUEST_CAPABLE;
+
+ fallthrough;
+ default:
+ break;
+ }
+
+ /* Newer versions of firmware require lock when reading the NVM */
+ if (aq->api_maj_ver > 1 ||
+ (aq->api_maj_ver == 1 &&
+ aq->api_min_ver >= 5))
+ hw->flags |= I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK;
+
+ if (aq->api_maj_ver > 1 ||
+ (aq->api_maj_ver == 1 &&
+ aq->api_min_ver >= 8)) {
+ hw->flags |= I40E_HW_FLAG_FW_LLDP_PERSISTENT;
+ hw->flags |= I40E_HW_FLAG_DROP_MODE;
+ }
+
+ if (aq->api_maj_ver > 1 ||
+ (aq->api_maj_ver == 1 &&
+ aq->api_min_ver >= 9))
+ hw->flags |= I40E_HW_FLAG_AQ_PHY_ACCESS_EXTENDED;
+}
+
+/**
+ * i40e_init_adminq - main initialization routine for Admin Queue
+ * @hw: pointer to the hardware structure
+ *
+ * Prior to calling this function, drivers *MUST* set the following fields
+ * in the hw->aq structure:
+ * - hw->aq.num_asq_entries
+ * - hw->aq.num_arq_entries
+ * - hw->aq.arq_buf_size
+ * - hw->aq.asq_buf_size
+ **/
+int i40e_init_adminq(struct i40e_hw *hw)
+{
+ u16 cfg_ptr, oem_hi, oem_lo;
+ u16 eetrack_lo, eetrack_hi;
+ int retry = 0;
+ int ret_code;
+
+ /* verify input for valid configuration */
+ if ((hw->aq.num_arq_entries == 0) ||
+ (hw->aq.num_asq_entries == 0) ||
+ (hw->aq.arq_buf_size == 0) ||
+ (hw->aq.asq_buf_size == 0)) {
+ ret_code = I40E_ERR_CONFIG;
+ goto init_adminq_exit;
+ }
+
+ /* Set up register offsets */
+ i40e_adminq_init_regs(hw);
+
+ /* setup ASQ command write back timeout */
+ hw->aq.asq_cmd_timeout = I40E_ASQ_CMD_TIMEOUT;
+
+ /* allocate the ASQ */
+ ret_code = i40e_init_asq(hw);
+ if (ret_code)
+ goto init_adminq_destroy_locks;
+
+ /* allocate the ARQ */
+ ret_code = i40e_init_arq(hw);
+ if (ret_code)
+ goto init_adminq_free_asq;
+
+ /* There are some cases where the firmware may not be quite ready
+ * for AdminQ operations, so we retry the AdminQ setup a few times
+ * if we see timeouts in this first AQ call.
+ */
+ do {
+ ret_code = i40e_aq_get_firmware_version(hw,
+ &hw->aq.fw_maj_ver,
+ &hw->aq.fw_min_ver,
+ &hw->aq.fw_build,
+ &hw->aq.api_maj_ver,
+ &hw->aq.api_min_ver,
+ NULL);
+ if (ret_code != I40E_ERR_ADMIN_QUEUE_TIMEOUT)
+ break;
+ retry++;
+ msleep(100);
+ i40e_resume_aq(hw);
+ } while (retry < 10);
+ if (ret_code != I40E_SUCCESS)
+ goto init_adminq_free_arq;
+
+ /* Some features were introduced in different FW API version
+ * for different MAC type.
+ */
+ i40e_set_hw_flags(hw);
+
+ /* get the NVM version info */
+ i40e_read_nvm_word(hw, I40E_SR_NVM_DEV_STARTER_VERSION,
+ &hw->nvm.version);
+ i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_LO, &eetrack_lo);
+ i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_HI, &eetrack_hi);
+ hw->nvm.eetrack = (eetrack_hi << 16) | eetrack_lo;
+ i40e_read_nvm_word(hw, I40E_SR_BOOT_CONFIG_PTR, &cfg_ptr);
+ i40e_read_nvm_word(hw, (cfg_ptr + I40E_NVM_OEM_VER_OFF),
+ &oem_hi);
+ i40e_read_nvm_word(hw, (cfg_ptr + (I40E_NVM_OEM_VER_OFF + 1)),
+ &oem_lo);
+ hw->nvm.oem_ver = ((u32)oem_hi << 16) | oem_lo;
+
+ if (hw->mac.type == I40E_MAC_XL710 &&
+ hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
+ hw->aq.api_min_ver >= I40E_MINOR_VER_GET_LINK_INFO_XL710) {
+ hw->flags |= I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE;
+ hw->flags |= I40E_HW_FLAG_FW_LLDP_STOPPABLE;
+ }
+ if (hw->mac.type == I40E_MAC_X722 &&
+ hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
+ hw->aq.api_min_ver >= I40E_MINOR_VER_FW_LLDP_STOPPABLE_X722) {
+ hw->flags |= I40E_HW_FLAG_FW_LLDP_STOPPABLE;
+ }
+
+ /* The ability to RX (not drop) 802.1ad frames was added in API 1.7 */
+ if (hw->aq.api_maj_ver > 1 ||
+ (hw->aq.api_maj_ver == 1 &&
+ hw->aq.api_min_ver >= 7))
+ hw->flags |= I40E_HW_FLAG_802_1AD_CAPABLE;
+
+ if (hw->aq.api_maj_ver > I40E_FW_API_VERSION_MAJOR) {
+ ret_code = I40E_ERR_FIRMWARE_API_VERSION;
+ goto init_adminq_free_arq;
+ }
+
+ /* pre-emptive resource lock release */
+ i40e_aq_release_resource(hw, I40E_NVM_RESOURCE_ID, 0, NULL);
+ hw->nvm_release_on_done = false;
+ hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
+
+ ret_code = 0;
+
+ /* success! */
+ goto init_adminq_exit;
+
+init_adminq_free_arq:
+ i40e_shutdown_arq(hw);
+init_adminq_free_asq:
+ i40e_shutdown_asq(hw);
+init_adminq_destroy_locks:
+
+init_adminq_exit:
+ return ret_code;
+}
+
+/**
+ * i40e_shutdown_adminq - shutdown routine for the Admin Queue
+ * @hw: pointer to the hardware structure
+ **/
+void i40e_shutdown_adminq(struct i40e_hw *hw)
+{
+ if (i40e_check_asq_alive(hw))
+ i40e_aq_queue_shutdown(hw, true);
+
+ i40e_shutdown_asq(hw);
+ i40e_shutdown_arq(hw);
+
+ if (hw->nvm_buff.va)
+ i40e_free_virt_mem(hw, &hw->nvm_buff);
+}
+
+/**
+ * i40e_clean_asq - cleans Admin send queue
+ * @hw: pointer to the hardware structure
+ *
+ * returns the number of free desc
+ **/
+static u16 i40e_clean_asq(struct i40e_hw *hw)
+{
+ struct i40e_adminq_ring *asq = &(hw->aq.asq);
+ struct i40e_asq_cmd_details *details;
+ u16 ntc = asq->next_to_clean;
+ struct i40e_aq_desc desc_cb;
+ struct i40e_aq_desc *desc;
+
+ desc = I40E_ADMINQ_DESC(*asq, ntc);
+ details = I40E_ADMINQ_DETAILS(*asq, ntc);
+ while (rd32(hw, hw->aq.asq.head) != ntc) {
+ i40e_debug(hw, I40E_DEBUG_AQ_COMMAND,
+ "ntc %d head %d.\n", ntc, rd32(hw, hw->aq.asq.head));
+
+ if (details->callback) {
+ I40E_ADMINQ_CALLBACK cb_func =
+ (I40E_ADMINQ_CALLBACK)details->callback;
+ desc_cb = *desc;
+ cb_func(hw, &desc_cb);
+ }
+ memset(desc, 0, sizeof(*desc));
+ memset(details, 0, sizeof(*details));
+ ntc++;
+ if (ntc == asq->count)
+ ntc = 0;
+ desc = I40E_ADMINQ_DESC(*asq, ntc);
+ details = I40E_ADMINQ_DETAILS(*asq, ntc);
+ }
+
+ asq->next_to_clean = ntc;
+
+ return I40E_DESC_UNUSED(asq);
+}
+
+/**
+ * i40e_asq_done - check if FW has processed the Admin Send Queue
+ * @hw: pointer to the hw struct
+ *
+ * Returns true if the firmware has processed all descriptors on the
+ * admin send queue. Returns false if there are still requests pending.
+ **/
+static bool i40e_asq_done(struct i40e_hw *hw)
+{
+ /* AQ designers suggest use of head for better
+ * timing reliability than DD bit
+ */
+ return rd32(hw, hw->aq.asq.head) == hw->aq.asq.next_to_use;
+
+}
+
+/**
+ * i40e_asq_send_command_atomic_exec - send command to Admin Queue
+ * @hw: pointer to the hw struct
+ * @desc: prefilled descriptor describing the command (non DMA mem)
+ * @buff: buffer to use for indirect commands
+ * @buff_size: size of buffer for indirect commands
+ * @cmd_details: pointer to command details structure
+ * @is_atomic_context: is the function called in an atomic context?
+ *
+ * This is the main send command driver routine for the Admin Queue send
+ * queue. It runs the queue, cleans the queue, etc
+ **/
+static int
+i40e_asq_send_command_atomic_exec(struct i40e_hw *hw,
+ struct i40e_aq_desc *desc,
+ void *buff, /* can be NULL */
+ u16 buff_size,
+ struct i40e_asq_cmd_details *cmd_details,
+ bool is_atomic_context)
+{
+ struct i40e_dma_mem *dma_buff = NULL;
+ struct i40e_asq_cmd_details *details;
+ struct i40e_aq_desc *desc_on_ring;
+ bool cmd_completed = false;
+ u16 retval = 0;
+ int status = 0;
+ u32 val = 0;
+
+ if (hw->aq.asq.count == 0) {
+ i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
+ "AQTX: Admin queue not initialized.\n");
+ status = I40E_ERR_QUEUE_EMPTY;
+ goto asq_send_command_error;
+ }
+
+ hw->aq.asq_last_status = I40E_AQ_RC_OK;
+
+ val = rd32(hw, hw->aq.asq.head);
+ if (val >= hw->aq.num_asq_entries) {
+ i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
+ "AQTX: head overrun at %d\n", val);
+ status = I40E_ERR_ADMIN_QUEUE_FULL;
+ goto asq_send_command_error;
+ }
+
+ details = I40E_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use);
+ if (cmd_details) {
+ *details = *cmd_details;
+
+ /* If the cmd_details are defined copy the cookie. The
+ * cpu_to_le32 is not needed here because the data is ignored
+ * by the FW, only used by the driver
+ */
+ if (details->cookie) {
+ desc->cookie_high =
+ cpu_to_le32(upper_32_bits(details->cookie));
+ desc->cookie_low =
+ cpu_to_le32(lower_32_bits(details->cookie));
+ }
+ } else {
+ memset(details, 0, sizeof(struct i40e_asq_cmd_details));
+ }
+
+ /* clear requested flags and then set additional flags if defined */
+ desc->flags &= ~cpu_to_le16(details->flags_dis);
+ desc->flags |= cpu_to_le16(details->flags_ena);
+
+ if (buff_size > hw->aq.asq_buf_size) {
+ i40e_debug(hw,
+ I40E_DEBUG_AQ_MESSAGE,
+ "AQTX: Invalid buffer size: %d.\n",
+ buff_size);
+ status = I40E_ERR_INVALID_SIZE;
+ goto asq_send_command_error;
+ }
+
+ if (details->postpone && !details->async) {
+ i40e_debug(hw,
+ I40E_DEBUG_AQ_MESSAGE,
+ "AQTX: Async flag not set along with postpone flag");
+ status = I40E_ERR_PARAM;
+ goto asq_send_command_error;
+ }
+
+ /* call clean and check queue available function to reclaim the
+ * descriptors that were processed by FW, the function returns the
+ * number of desc available
+ */
+ /* the clean function called here could be called in a separate thread
+ * in case of asynchronous completions
+ */
+ if (i40e_clean_asq(hw) == 0) {
+ i40e_debug(hw,
+ I40E_DEBUG_AQ_MESSAGE,
+ "AQTX: Error queue is full.\n");
+ status = I40E_ERR_ADMIN_QUEUE_FULL;
+ goto asq_send_command_error;
+ }
+
+ /* initialize the temp desc pointer with the right desc */
+ desc_on_ring = I40E_ADMINQ_DESC(hw->aq.asq, hw->aq.asq.next_to_use);
+
+ /* if the desc is available copy the temp desc to the right place */
+ *desc_on_ring = *desc;
+
+ /* if buff is not NULL assume indirect command */
+ if (buff != NULL) {
+ dma_buff = &(hw->aq.asq.r.asq_bi[hw->aq.asq.next_to_use]);
+ /* copy the user buff into the respective DMA buff */
+ memcpy(dma_buff->va, buff, buff_size);
+ desc_on_ring->datalen = cpu_to_le16(buff_size);
+
+ /* Update the address values in the desc with the pa value
+ * for respective buffer
+ */
+ desc_on_ring->params.external.addr_high =
+ cpu_to_le32(upper_32_bits(dma_buff->pa));
+ desc_on_ring->params.external.addr_low =
+ cpu_to_le32(lower_32_bits(dma_buff->pa));
+ }
+
+ /* bump the tail */
+ i40e_debug(hw, I40E_DEBUG_AQ_COMMAND, "AQTX: desc and buffer:\n");
+ i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc_on_ring,
+ buff, buff_size);
+ (hw->aq.asq.next_to_use)++;
+ if (hw->aq.asq.next_to_use == hw->aq.asq.count)
+ hw->aq.asq.next_to_use = 0;
+ if (!details->postpone)
+ wr32(hw, hw->aq.asq.tail, hw->aq.asq.next_to_use);
+
+ /* if cmd_details are not defined or async flag is not set,
+ * we need to wait for desc write back
+ */
+ if (!details->async && !details->postpone) {
+ u32 total_delay = 0;
+
+ do {
+ /* AQ designers suggest use of head for better
+ * timing reliability than DD bit
+ */
+ if (i40e_asq_done(hw))
+ break;
+
+ if (is_atomic_context)
+ udelay(50);
+ else
+ usleep_range(40, 60);
+
+ total_delay += 50;
+ } while (total_delay < hw->aq.asq_cmd_timeout);
+ }
+
+ /* if ready, copy the desc back to temp */
+ if (i40e_asq_done(hw)) {
+ *desc = *desc_on_ring;
+ if (buff != NULL)
+ memcpy(buff, dma_buff->va, buff_size);
+ retval = le16_to_cpu(desc->retval);
+ if (retval != 0) {
+ i40e_debug(hw,
+ I40E_DEBUG_AQ_MESSAGE,
+ "AQTX: Command completed with error 0x%X.\n",
+ retval);
+
+ /* strip off FW internal code */
+ retval &= 0xff;
+ }
+ cmd_completed = true;
+ if ((enum i40e_admin_queue_err)retval == I40E_AQ_RC_OK)
+ status = 0;
+ else if ((enum i40e_admin_queue_err)retval == I40E_AQ_RC_EBUSY)
+ status = I40E_ERR_NOT_READY;
+ else
+ status = I40E_ERR_ADMIN_QUEUE_ERROR;
+ hw->aq.asq_last_status = (enum i40e_admin_queue_err)retval;
+ }
+
+ i40e_debug(hw, I40E_DEBUG_AQ_COMMAND,
+ "AQTX: desc and buffer writeback:\n");
+ i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, buff, buff_size);
+
+ /* save writeback aq if requested */
+ if (details->wb_desc)
+ *details->wb_desc = *desc_on_ring;
+
+ /* update the error if time out occurred */
+ if ((!cmd_completed) &&
+ (!details->async && !details->postpone)) {
+ if (rd32(hw, hw->aq.asq.len) & I40E_GL_ATQLEN_ATQCRIT_MASK) {
+ i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
+ "AQTX: AQ Critical error.\n");
+ status = I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR;
+ } else {
+ i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
+ "AQTX: Writeback timeout.\n");
+ status = I40E_ERR_ADMIN_QUEUE_TIMEOUT;
+ }
+ }
+
+asq_send_command_error:
+ return status;
+}
+
+/**
+ * i40e_asq_send_command_atomic - send command to Admin Queue
+ * @hw: pointer to the hw struct
+ * @desc: prefilled descriptor describing the command (non DMA mem)
+ * @buff: buffer to use for indirect commands
+ * @buff_size: size of buffer for indirect commands
+ * @cmd_details: pointer to command details structure
+ * @is_atomic_context: is the function called in an atomic context?
+ *
+ * Acquires the lock and calls the main send command execution
+ * routine.
+ **/
+int
+i40e_asq_send_command_atomic(struct i40e_hw *hw,
+ struct i40e_aq_desc *desc,
+ void *buff, /* can be NULL */
+ u16 buff_size,
+ struct i40e_asq_cmd_details *cmd_details,
+ bool is_atomic_context)
+{
+ int status;
+
+ mutex_lock(&hw->aq.asq_mutex);
+ status = i40e_asq_send_command_atomic_exec(hw, desc, buff, buff_size,
+ cmd_details,
+ is_atomic_context);
+
+ mutex_unlock(&hw->aq.asq_mutex);
+ return status;
+}
+
+int
+i40e_asq_send_command(struct i40e_hw *hw, struct i40e_aq_desc *desc,
+ void *buff, /* can be NULL */ u16 buff_size,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ return i40e_asq_send_command_atomic(hw, desc, buff, buff_size,
+ cmd_details, false);
+}
+
+/**
+ * i40e_asq_send_command_atomic_v2 - send command to Admin Queue
+ * @hw: pointer to the hw struct
+ * @desc: prefilled descriptor describing the command (non DMA mem)
+ * @buff: buffer to use for indirect commands
+ * @buff_size: size of buffer for indirect commands
+ * @cmd_details: pointer to command details structure
+ * @is_atomic_context: is the function called in an atomic context?
+ * @aq_status: pointer to Admin Queue status return value
+ *
+ * Acquires the lock and calls the main send command execution
+ * routine. Returns the last Admin Queue status in aq_status
+ * to avoid race conditions in access to hw->aq.asq_last_status.
+ **/
+int
+i40e_asq_send_command_atomic_v2(struct i40e_hw *hw,
+ struct i40e_aq_desc *desc,
+ void *buff, /* can be NULL */
+ u16 buff_size,
+ struct i40e_asq_cmd_details *cmd_details,
+ bool is_atomic_context,
+ enum i40e_admin_queue_err *aq_status)
+{
+ int status;
+
+ mutex_lock(&hw->aq.asq_mutex);
+ status = i40e_asq_send_command_atomic_exec(hw, desc, buff,
+ buff_size,
+ cmd_details,
+ is_atomic_context);
+ if (aq_status)
+ *aq_status = hw->aq.asq_last_status;
+ mutex_unlock(&hw->aq.asq_mutex);
+ return status;
+}
+
+int
+i40e_asq_send_command_v2(struct i40e_hw *hw, struct i40e_aq_desc *desc,
+ void *buff, /* can be NULL */ u16 buff_size,
+ struct i40e_asq_cmd_details *cmd_details,
+ enum i40e_admin_queue_err *aq_status)
+{
+ return i40e_asq_send_command_atomic_v2(hw, desc, buff, buff_size,
+ cmd_details, true, aq_status);
+}
+
+/**
+ * i40e_fill_default_direct_cmd_desc - AQ descriptor helper function
+ * @desc: pointer to the temp descriptor (non DMA mem)
+ * @opcode: the opcode can be used to decide which flags to turn off or on
+ *
+ * Fill the desc with default values
+ **/
+void i40e_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc,
+ u16 opcode)
+{
+ /* zero out the desc */
+ memset((void *)desc, 0, sizeof(struct i40e_aq_desc));
+ desc->opcode = cpu_to_le16(opcode);
+ desc->flags = cpu_to_le16(I40E_AQ_FLAG_SI);
+}
+
+/**
+ * i40e_clean_arq_element
+ * @hw: pointer to the hw struct
+ * @e: event info from the receive descriptor, includes any buffers
+ * @pending: number of events that could be left to process
+ *
+ * This function cleans one Admin Receive Queue element and returns
+ * the contents through e. It can also return how many events are
+ * left to process through 'pending'
+ **/
+int i40e_clean_arq_element(struct i40e_hw *hw,
+ struct i40e_arq_event_info *e,
+ u16 *pending)
+{
+ u16 ntc = hw->aq.arq.next_to_clean;
+ struct i40e_aq_desc *desc;
+ struct i40e_dma_mem *bi;
+ int ret_code = 0;
+ u16 desc_idx;
+ u16 datalen;
+ u16 flags;
+ u16 ntu;
+
+ /* pre-clean the event info */
+ memset(&e->desc, 0, sizeof(e->desc));
+
+ /* take the lock before we start messing with the ring */
+ mutex_lock(&hw->aq.arq_mutex);
+
+ if (hw->aq.arq.count == 0) {
+ i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
+ "AQRX: Admin queue not initialized.\n");
+ ret_code = I40E_ERR_QUEUE_EMPTY;
+ goto clean_arq_element_err;
+ }
+
+ /* set next_to_use to head */
+ ntu = rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK;
+ if (ntu == ntc) {
+ /* nothing to do - shouldn't need to update ring's values */
+ ret_code = I40E_ERR_ADMIN_QUEUE_NO_WORK;
+ goto clean_arq_element_out;
+ }
+
+ /* now clean the next descriptor */
+ desc = I40E_ADMINQ_DESC(hw->aq.arq, ntc);
+ desc_idx = ntc;
+
+ hw->aq.arq_last_status =
+ (enum i40e_admin_queue_err)le16_to_cpu(desc->retval);
+ flags = le16_to_cpu(desc->flags);
+ if (flags & I40E_AQ_FLAG_ERR) {
+ ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
+ i40e_debug(hw,
+ I40E_DEBUG_AQ_MESSAGE,
+ "AQRX: Event received with error 0x%X.\n",
+ hw->aq.arq_last_status);
+ }
+
+ e->desc = *desc;
+ datalen = le16_to_cpu(desc->datalen);
+ e->msg_len = min(datalen, e->buf_len);
+ if (e->msg_buf != NULL && (e->msg_len != 0))
+ memcpy(e->msg_buf, hw->aq.arq.r.arq_bi[desc_idx].va,
+ e->msg_len);
+
+ i40e_debug(hw, I40E_DEBUG_AQ_COMMAND, "AQRX: desc and buffer:\n");
+ i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, e->msg_buf,
+ hw->aq.arq_buf_size);
+
+ /* Restore the original datalen and buffer address in the desc,
+ * FW updates datalen to indicate the event message
+ * size
+ */
+ bi = &hw->aq.arq.r.arq_bi[ntc];
+ memset((void *)desc, 0, sizeof(struct i40e_aq_desc));
+
+ desc->flags = cpu_to_le16(I40E_AQ_FLAG_BUF);
+ if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF)
+ desc->flags |= cpu_to_le16(I40E_AQ_FLAG_LB);
+ desc->datalen = cpu_to_le16((u16)bi->size);
+ desc->params.external.addr_high = cpu_to_le32(upper_32_bits(bi->pa));
+ desc->params.external.addr_low = cpu_to_le32(lower_32_bits(bi->pa));
+
+ /* set tail = the last cleaned desc index. */
+ wr32(hw, hw->aq.arq.tail, ntc);
+ /* ntc is updated to tail + 1 */
+ ntc++;
+ if (ntc == hw->aq.num_arq_entries)
+ ntc = 0;
+ hw->aq.arq.next_to_clean = ntc;
+ hw->aq.arq.next_to_use = ntu;
+
+ i40e_nvmupd_check_wait_event(hw, le16_to_cpu(e->desc.opcode), &e->desc);
+clean_arq_element_out:
+ /* Set pending if needed, unlock and return */
+ if (pending)
+ *pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc);
+clean_arq_element_err:
+ mutex_unlock(&hw->aq.arq_mutex);
+
+ return ret_code;
+}
+
+static void i40e_resume_aq(struct i40e_hw *hw)
+{
+ /* Registers are reset after PF reset */
+ hw->aq.asq.next_to_use = 0;
+ hw->aq.asq.next_to_clean = 0;
+
+ i40e_config_asq_regs(hw);
+
+ hw->aq.arq.next_to_use = 0;
+ hw->aq.arq.next_to_clean = 0;
+
+ i40e_config_arq_regs(hw);
+}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.h b/drivers/net/ethernet/intel/i40e/i40e_adminq.h
new file mode 100644
index 000000000..ee394aace
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.h
@@ -0,0 +1,136 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
+
+#ifndef _I40E_ADMINQ_H_
+#define _I40E_ADMINQ_H_
+
+#include "i40e_osdep.h"
+#include "i40e_status.h"
+#include "i40e_adminq_cmd.h"
+
+#define I40E_ADMINQ_DESC(R, i) \
+ (&(((struct i40e_aq_desc *)((R).desc_buf.va))[i]))
+
+#define I40E_ADMINQ_DESC_ALIGNMENT 4096
+
+struct i40e_adminq_ring {
+ struct i40e_virt_mem dma_head; /* space for dma structures */
+ struct i40e_dma_mem desc_buf; /* descriptor ring memory */
+ struct i40e_virt_mem cmd_buf; /* command buffer memory */
+
+ union {
+ struct i40e_dma_mem *asq_bi;
+ struct i40e_dma_mem *arq_bi;
+ } r;
+
+ u16 count; /* Number of descriptors */
+ u16 rx_buf_len; /* Admin Receive Queue buffer length */
+
+ /* used for interrupt processing */
+ u16 next_to_use;
+ u16 next_to_clean;
+
+ /* used for queue tracking */
+ u32 head;
+ u32 tail;
+ u32 len;
+ u32 bah;
+ u32 bal;
+};
+
+/* ASQ transaction details */
+struct i40e_asq_cmd_details {
+ void *callback; /* cast from type I40E_ADMINQ_CALLBACK */
+ u64 cookie;
+ u16 flags_ena;
+ u16 flags_dis;
+ bool async;
+ bool postpone;
+ struct i40e_aq_desc *wb_desc;
+};
+
+#define I40E_ADMINQ_DETAILS(R, i) \
+ (&(((struct i40e_asq_cmd_details *)((R).cmd_buf.va))[i]))
+
+/* ARQ event information */
+struct i40e_arq_event_info {
+ struct i40e_aq_desc desc;
+ u16 msg_len;
+ u16 buf_len;
+ u8 *msg_buf;
+};
+
+/* Admin Queue information */
+struct i40e_adminq_info {
+ struct i40e_adminq_ring arq; /* receive queue */
+ struct i40e_adminq_ring asq; /* send queue */
+ u32 asq_cmd_timeout; /* send queue cmd write back timeout*/
+ u16 num_arq_entries; /* receive queue depth */
+ u16 num_asq_entries; /* send queue depth */
+ u16 arq_buf_size; /* receive queue buffer size */
+ u16 asq_buf_size; /* send queue buffer size */
+ u16 fw_maj_ver; /* firmware major version */
+ u16 fw_min_ver; /* firmware minor version */
+ u32 fw_build; /* firmware build number */
+ u16 api_maj_ver; /* api major version */
+ u16 api_min_ver; /* api minor version */
+
+ struct mutex asq_mutex; /* Send queue lock */
+ struct mutex arq_mutex; /* Receive queue lock */
+
+ /* last status values on send and receive queues */
+ enum i40e_admin_queue_err asq_last_status;
+ enum i40e_admin_queue_err arq_last_status;
+};
+
+/**
+ * i40e_aq_rc_to_posix - convert errors to user-land codes
+ * @aq_ret: AdminQ handler error code can override aq_rc
+ * @aq_rc: AdminQ firmware error code to convert
+ **/
+static inline int i40e_aq_rc_to_posix(int aq_ret, int aq_rc)
+{
+ int aq_to_posix[] = {
+ 0, /* I40E_AQ_RC_OK */
+ -EPERM, /* I40E_AQ_RC_EPERM */
+ -ENOENT, /* I40E_AQ_RC_ENOENT */
+ -ESRCH, /* I40E_AQ_RC_ESRCH */
+ -EINTR, /* I40E_AQ_RC_EINTR */
+ -EIO, /* I40E_AQ_RC_EIO */
+ -ENXIO, /* I40E_AQ_RC_ENXIO */
+ -E2BIG, /* I40E_AQ_RC_E2BIG */
+ -EAGAIN, /* I40E_AQ_RC_EAGAIN */
+ -ENOMEM, /* I40E_AQ_RC_ENOMEM */
+ -EACCES, /* I40E_AQ_RC_EACCES */
+ -EFAULT, /* I40E_AQ_RC_EFAULT */
+ -EBUSY, /* I40E_AQ_RC_EBUSY */
+ -EEXIST, /* I40E_AQ_RC_EEXIST */
+ -EINVAL, /* I40E_AQ_RC_EINVAL */
+ -ENOTTY, /* I40E_AQ_RC_ENOTTY */
+ -ENOSPC, /* I40E_AQ_RC_ENOSPC */
+ -ENOSYS, /* I40E_AQ_RC_ENOSYS */
+ -ERANGE, /* I40E_AQ_RC_ERANGE */
+ -EPIPE, /* I40E_AQ_RC_EFLUSHED */
+ -ESPIPE, /* I40E_AQ_RC_BAD_ADDR */
+ -EROFS, /* I40E_AQ_RC_EMODE */
+ -EFBIG, /* I40E_AQ_RC_EFBIG */
+ };
+
+ /* aq_rc is invalid if AQ timed out */
+ if (aq_ret == I40E_ERR_ADMIN_QUEUE_TIMEOUT)
+ return -EAGAIN;
+
+ if (!((u32)aq_rc < (sizeof(aq_to_posix) / sizeof((aq_to_posix)[0]))))
+ return -ERANGE;
+
+ return aq_to_posix[aq_rc];
+}
+
+/* general information */
+#define I40E_AQ_LARGE_BUF 512
+#define I40E_ASQ_CMD_TIMEOUT 250000 /* usecs */
+
+void i40e_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc,
+ u16 opcode);
+
+#endif /* _I40E_ADMINQ_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
new file mode 100644
index 000000000..60f9e0a6a
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
@@ -0,0 +1,2430 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2013 - 2021 Intel Corporation. */
+
+#ifndef _I40E_ADMINQ_CMD_H_
+#define _I40E_ADMINQ_CMD_H_
+
+/* This header file defines the i40e Admin Queue commands and is shared between
+ * i40e Firmware and Software.
+ *
+ * This file needs to comply with the Linux Kernel coding style.
+ */
+
+#define I40E_FW_API_VERSION_MAJOR 0x0001
+#define I40E_FW_API_VERSION_MINOR_X722 0x000C
+#define I40E_FW_API_VERSION_MINOR_X710 0x000F
+
+#define I40E_FW_MINOR_VERSION(_h) ((_h)->mac.type == I40E_MAC_XL710 ? \
+ I40E_FW_API_VERSION_MINOR_X710 : \
+ I40E_FW_API_VERSION_MINOR_X722)
+
+/* API version 1.7 implements additional link and PHY-specific APIs */
+#define I40E_MINOR_VER_GET_LINK_INFO_XL710 0x0007
+/* API version 1.9 for X722 implements additional link and PHY-specific APIs */
+#define I40E_MINOR_VER_GET_LINK_INFO_X722 0x0009
+/* API version 1.6 for X722 devices adds ability to stop FW LLDP agent */
+#define I40E_MINOR_VER_FW_LLDP_STOPPABLE_X722 0x0006
+/* API version 1.10 for X722 devices adds ability to request FEC encoding */
+#define I40E_MINOR_VER_FW_REQUEST_FEC_X722 0x000A
+
+struct i40e_aq_desc {
+ __le16 flags;
+ __le16 opcode;
+ __le16 datalen;
+ __le16 retval;
+ __le32 cookie_high;
+ __le32 cookie_low;
+ union {
+ struct {
+ __le32 param0;
+ __le32 param1;
+ __le32 param2;
+ __le32 param3;
+ } internal;
+ struct {
+ __le32 param0;
+ __le32 param1;
+ __le32 addr_high;
+ __le32 addr_low;
+ } external;
+ u8 raw[16];
+ } params;
+};
+
+/* Flags sub-structure
+ * |0 |1 |2 |3 |4 |5 |6 |7 |8 |9 |10 |11 |12 |13 |14 |15 |
+ * |DD |CMP|ERR|VFE| * * RESERVED * * |LB |RD |VFC|BUF|SI |EI |FE |
+ */
+
+/* command flags and offsets*/
+#define I40E_AQ_FLAG_ERR_SHIFT 2
+#define I40E_AQ_FLAG_LB_SHIFT 9
+#define I40E_AQ_FLAG_RD_SHIFT 10
+#define I40E_AQ_FLAG_BUF_SHIFT 12
+#define I40E_AQ_FLAG_SI_SHIFT 13
+
+#define I40E_AQ_FLAG_ERR BIT(I40E_AQ_FLAG_ERR_SHIFT) /* 0x4 */
+#define I40E_AQ_FLAG_LB BIT(I40E_AQ_FLAG_LB_SHIFT) /* 0x200 */
+#define I40E_AQ_FLAG_RD BIT(I40E_AQ_FLAG_RD_SHIFT) /* 0x400 */
+#define I40E_AQ_FLAG_BUF BIT(I40E_AQ_FLAG_BUF_SHIFT) /* 0x1000 */
+#define I40E_AQ_FLAG_SI BIT(I40E_AQ_FLAG_SI_SHIFT) /* 0x2000 */
+
+/* error codes */
+enum i40e_admin_queue_err {
+ I40E_AQ_RC_OK = 0, /* success */
+ I40E_AQ_RC_EPERM = 1, /* Operation not permitted */
+ I40E_AQ_RC_ENOENT = 2, /* No such element */
+ I40E_AQ_RC_ESRCH = 3, /* Bad opcode */
+ I40E_AQ_RC_EINTR = 4, /* operation interrupted */
+ I40E_AQ_RC_EIO = 5, /* I/O error */
+ I40E_AQ_RC_ENXIO = 6, /* No such resource */
+ I40E_AQ_RC_E2BIG = 7, /* Arg too long */
+ I40E_AQ_RC_EAGAIN = 8, /* Try again */
+ I40E_AQ_RC_ENOMEM = 9, /* Out of memory */
+ I40E_AQ_RC_EACCES = 10, /* Permission denied */
+ I40E_AQ_RC_EFAULT = 11, /* Bad address */
+ I40E_AQ_RC_EBUSY = 12, /* Device or resource busy */
+ I40E_AQ_RC_EEXIST = 13, /* object already exists */
+ I40E_AQ_RC_EINVAL = 14, /* Invalid argument */
+ I40E_AQ_RC_ENOTTY = 15, /* Not a typewriter */
+ I40E_AQ_RC_ENOSPC = 16, /* No space left or alloc failure */
+ I40E_AQ_RC_ENOSYS = 17, /* Function not implemented */
+ I40E_AQ_RC_ERANGE = 18, /* Parameter out of range */
+ I40E_AQ_RC_EFLUSHED = 19, /* Cmd flushed due to prev cmd error */
+ I40E_AQ_RC_BAD_ADDR = 20, /* Descriptor contains a bad pointer */
+ I40E_AQ_RC_EMODE = 21, /* Op not allowed in current dev mode */
+ I40E_AQ_RC_EFBIG = 22, /* File too large */
+};
+
+/* Admin Queue command opcodes */
+enum i40e_admin_queue_opc {
+ /* aq commands */
+ i40e_aqc_opc_get_version = 0x0001,
+ i40e_aqc_opc_driver_version = 0x0002,
+ i40e_aqc_opc_queue_shutdown = 0x0003,
+ i40e_aqc_opc_set_pf_context = 0x0004,
+
+ /* resource ownership */
+ i40e_aqc_opc_request_resource = 0x0008,
+ i40e_aqc_opc_release_resource = 0x0009,
+
+ i40e_aqc_opc_list_func_capabilities = 0x000A,
+ i40e_aqc_opc_list_dev_capabilities = 0x000B,
+
+ /* Proxy commands */
+ i40e_aqc_opc_set_proxy_config = 0x0104,
+ i40e_aqc_opc_set_ns_proxy_table_entry = 0x0105,
+
+ /* LAA */
+ i40e_aqc_opc_mac_address_read = 0x0107,
+ i40e_aqc_opc_mac_address_write = 0x0108,
+
+ /* PXE */
+ i40e_aqc_opc_clear_pxe_mode = 0x0110,
+
+ /* WoL commands */
+ i40e_aqc_opc_set_wol_filter = 0x0120,
+ i40e_aqc_opc_get_wake_reason = 0x0121,
+
+ /* internal switch commands */
+ i40e_aqc_opc_get_switch_config = 0x0200,
+ i40e_aqc_opc_add_statistics = 0x0201,
+ i40e_aqc_opc_remove_statistics = 0x0202,
+ i40e_aqc_opc_set_port_parameters = 0x0203,
+ i40e_aqc_opc_get_switch_resource_alloc = 0x0204,
+ i40e_aqc_opc_set_switch_config = 0x0205,
+ i40e_aqc_opc_rx_ctl_reg_read = 0x0206,
+ i40e_aqc_opc_rx_ctl_reg_write = 0x0207,
+
+ i40e_aqc_opc_add_vsi = 0x0210,
+ i40e_aqc_opc_update_vsi_parameters = 0x0211,
+ i40e_aqc_opc_get_vsi_parameters = 0x0212,
+
+ i40e_aqc_opc_add_pv = 0x0220,
+ i40e_aqc_opc_update_pv_parameters = 0x0221,
+ i40e_aqc_opc_get_pv_parameters = 0x0222,
+
+ i40e_aqc_opc_add_veb = 0x0230,
+ i40e_aqc_opc_update_veb_parameters = 0x0231,
+ i40e_aqc_opc_get_veb_parameters = 0x0232,
+
+ i40e_aqc_opc_delete_element = 0x0243,
+
+ i40e_aqc_opc_add_macvlan = 0x0250,
+ i40e_aqc_opc_remove_macvlan = 0x0251,
+ i40e_aqc_opc_add_vlan = 0x0252,
+ i40e_aqc_opc_remove_vlan = 0x0253,
+ i40e_aqc_opc_set_vsi_promiscuous_modes = 0x0254,
+ i40e_aqc_opc_add_tag = 0x0255,
+ i40e_aqc_opc_remove_tag = 0x0256,
+ i40e_aqc_opc_add_multicast_etag = 0x0257,
+ i40e_aqc_opc_remove_multicast_etag = 0x0258,
+ i40e_aqc_opc_update_tag = 0x0259,
+ i40e_aqc_opc_add_control_packet_filter = 0x025A,
+ i40e_aqc_opc_remove_control_packet_filter = 0x025B,
+ i40e_aqc_opc_add_cloud_filters = 0x025C,
+ i40e_aqc_opc_remove_cloud_filters = 0x025D,
+ i40e_aqc_opc_clear_wol_switch_filters = 0x025E,
+
+ i40e_aqc_opc_add_mirror_rule = 0x0260,
+ i40e_aqc_opc_delete_mirror_rule = 0x0261,
+
+ /* Dynamic Device Personalization */
+ i40e_aqc_opc_write_personalization_profile = 0x0270,
+ i40e_aqc_opc_get_personalization_profile_list = 0x0271,
+
+ /* DCB commands */
+ i40e_aqc_opc_dcb_ignore_pfc = 0x0301,
+ i40e_aqc_opc_dcb_updated = 0x0302,
+ i40e_aqc_opc_set_dcb_parameters = 0x0303,
+
+ /* TX scheduler */
+ i40e_aqc_opc_configure_vsi_bw_limit = 0x0400,
+ i40e_aqc_opc_configure_vsi_ets_sla_bw_limit = 0x0406,
+ i40e_aqc_opc_configure_vsi_tc_bw = 0x0407,
+ i40e_aqc_opc_query_vsi_bw_config = 0x0408,
+ i40e_aqc_opc_query_vsi_ets_sla_config = 0x040A,
+ i40e_aqc_opc_configure_switching_comp_bw_limit = 0x0410,
+
+ i40e_aqc_opc_enable_switching_comp_ets = 0x0413,
+ i40e_aqc_opc_modify_switching_comp_ets = 0x0414,
+ i40e_aqc_opc_disable_switching_comp_ets = 0x0415,
+ i40e_aqc_opc_configure_switching_comp_ets_bw_limit = 0x0416,
+ i40e_aqc_opc_configure_switching_comp_bw_config = 0x0417,
+ i40e_aqc_opc_query_switching_comp_ets_config = 0x0418,
+ i40e_aqc_opc_query_port_ets_config = 0x0419,
+ i40e_aqc_opc_query_switching_comp_bw_config = 0x041A,
+ i40e_aqc_opc_suspend_port_tx = 0x041B,
+ i40e_aqc_opc_resume_port_tx = 0x041C,
+ i40e_aqc_opc_configure_partition_bw = 0x041D,
+ /* hmc */
+ i40e_aqc_opc_query_hmc_resource_profile = 0x0500,
+ i40e_aqc_opc_set_hmc_resource_profile = 0x0501,
+
+ /* phy commands*/
+ i40e_aqc_opc_get_phy_abilities = 0x0600,
+ i40e_aqc_opc_set_phy_config = 0x0601,
+ i40e_aqc_opc_set_mac_config = 0x0603,
+ i40e_aqc_opc_set_link_restart_an = 0x0605,
+ i40e_aqc_opc_get_link_status = 0x0607,
+ i40e_aqc_opc_set_phy_int_mask = 0x0613,
+ i40e_aqc_opc_get_local_advt_reg = 0x0614,
+ i40e_aqc_opc_set_local_advt_reg = 0x0615,
+ i40e_aqc_opc_get_partner_advt = 0x0616,
+ i40e_aqc_opc_set_lb_modes = 0x0618,
+ i40e_aqc_opc_get_phy_wol_caps = 0x0621,
+ i40e_aqc_opc_set_phy_debug = 0x0622,
+ i40e_aqc_opc_upload_ext_phy_fm = 0x0625,
+ i40e_aqc_opc_run_phy_activity = 0x0626,
+ i40e_aqc_opc_set_phy_register = 0x0628,
+ i40e_aqc_opc_get_phy_register = 0x0629,
+
+ /* NVM commands */
+ i40e_aqc_opc_nvm_read = 0x0701,
+ i40e_aqc_opc_nvm_erase = 0x0702,
+ i40e_aqc_opc_nvm_update = 0x0703,
+ i40e_aqc_opc_nvm_config_read = 0x0704,
+ i40e_aqc_opc_nvm_config_write = 0x0705,
+ i40e_aqc_opc_oem_post_update = 0x0720,
+ i40e_aqc_opc_thermal_sensor = 0x0721,
+
+ /* virtualization commands */
+ i40e_aqc_opc_send_msg_to_pf = 0x0801,
+ i40e_aqc_opc_send_msg_to_vf = 0x0802,
+ i40e_aqc_opc_send_msg_to_peer = 0x0803,
+
+ /* alternate structure */
+ i40e_aqc_opc_alternate_write = 0x0900,
+ i40e_aqc_opc_alternate_write_indirect = 0x0901,
+ i40e_aqc_opc_alternate_read = 0x0902,
+ i40e_aqc_opc_alternate_read_indirect = 0x0903,
+ i40e_aqc_opc_alternate_write_done = 0x0904,
+ i40e_aqc_opc_alternate_set_mode = 0x0905,
+ i40e_aqc_opc_alternate_clear_port = 0x0906,
+
+ /* LLDP commands */
+ i40e_aqc_opc_lldp_get_mib = 0x0A00,
+ i40e_aqc_opc_lldp_update_mib = 0x0A01,
+ i40e_aqc_opc_lldp_add_tlv = 0x0A02,
+ i40e_aqc_opc_lldp_update_tlv = 0x0A03,
+ i40e_aqc_opc_lldp_delete_tlv = 0x0A04,
+ i40e_aqc_opc_lldp_stop = 0x0A05,
+ i40e_aqc_opc_lldp_start = 0x0A06,
+ i40e_aqc_opc_get_cee_dcb_cfg = 0x0A07,
+ i40e_aqc_opc_lldp_set_local_mib = 0x0A08,
+ i40e_aqc_opc_lldp_stop_start_spec_agent = 0x0A09,
+ i40e_aqc_opc_lldp_restore = 0x0A0A,
+
+ /* Tunnel commands */
+ i40e_aqc_opc_add_udp_tunnel = 0x0B00,
+ i40e_aqc_opc_del_udp_tunnel = 0x0B01,
+ i40e_aqc_opc_set_rss_key = 0x0B02,
+ i40e_aqc_opc_set_rss_lut = 0x0B03,
+ i40e_aqc_opc_get_rss_key = 0x0B04,
+ i40e_aqc_opc_get_rss_lut = 0x0B05,
+
+ /* Async Events */
+ i40e_aqc_opc_event_lan_overflow = 0x1001,
+
+ /* OEM commands */
+ i40e_aqc_opc_oem_parameter_change = 0xFE00,
+ i40e_aqc_opc_oem_device_status_change = 0xFE01,
+ i40e_aqc_opc_oem_ocsd_initialize = 0xFE02,
+ i40e_aqc_opc_oem_ocbb_initialize = 0xFE03,
+
+ /* debug commands */
+ i40e_aqc_opc_debug_read_reg = 0xFF03,
+ i40e_aqc_opc_debug_write_reg = 0xFF04,
+ i40e_aqc_opc_debug_modify_reg = 0xFF07,
+ i40e_aqc_opc_debug_dump_internals = 0xFF08,
+};
+
+/* command structures and indirect data structures */
+
+/* Structure naming conventions:
+ * - no suffix for direct command descriptor structures
+ * - _data for indirect sent data
+ * - _resp for indirect return data (data which is both will use _data)
+ * - _completion for direct return data
+ * - _element_ for repeated elements (may also be _data or _resp)
+ *
+ * Command structures are expected to overlay the params.raw member of the basic
+ * descriptor, and as such cannot exceed 16 bytes in length.
+ */
+
+/* This macro is used to generate a compilation error if a structure
+ * is not exactly the correct length. It gives a divide by zero error if the
+ * structure is not of the correct size, otherwise it creates an enum that is
+ * never used.
+ */
+#define I40E_CHECK_STRUCT_LEN(n, X) enum i40e_static_assert_enum_##X \
+ { i40e_static_assert_##X = (n)/((sizeof(struct X) == (n)) ? 1 : 0) }
+
+/* This macro is used extensively to ensure that command structures are 16
+ * bytes in length as they have to map to the raw array of that size.
+ */
+#define I40E_CHECK_CMD_LENGTH(X) I40E_CHECK_STRUCT_LEN(16, X)
+
+/* internal (0x00XX) commands */
+
+/* Get version (direct 0x0001) */
+struct i40e_aqc_get_version {
+ __le32 rom_ver;
+ __le32 fw_build;
+ __le16 fw_major;
+ __le16 fw_minor;
+ __le16 api_major;
+ __le16 api_minor;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_get_version);
+
+/* Send driver version (indirect 0x0002) */
+struct i40e_aqc_driver_version {
+ u8 driver_major_ver;
+ u8 driver_minor_ver;
+ u8 driver_build_ver;
+ u8 driver_subbuild_ver;
+ u8 reserved[4];
+ __le32 address_high;
+ __le32 address_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_driver_version);
+
+/* Queue Shutdown (direct 0x0003) */
+struct i40e_aqc_queue_shutdown {
+ __le32 driver_unloading;
+#define I40E_AQ_DRIVER_UNLOADING 0x1
+ u8 reserved[12];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_queue_shutdown);
+
+/* Set PF context (0x0004, direct) */
+struct i40e_aqc_set_pf_context {
+ u8 pf_id;
+ u8 reserved[15];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_set_pf_context);
+
+/* Request resource ownership (direct 0x0008)
+ * Release resource ownership (direct 0x0009)
+ */
+struct i40e_aqc_request_resource {
+ __le16 resource_id;
+ __le16 access_type;
+ __le32 timeout;
+ __le32 resource_number;
+ u8 reserved[4];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_request_resource);
+
+/* Get function capabilities (indirect 0x000A)
+ * Get device capabilities (indirect 0x000B)
+ */
+struct i40e_aqc_list_capabilites {
+ u8 command_flags;
+ u8 pf_index;
+ u8 reserved[2];
+ __le32 count;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_list_capabilites);
+
+struct i40e_aqc_list_capabilities_element_resp {
+ __le16 id;
+ u8 major_rev;
+ u8 minor_rev;
+ __le32 number;
+ __le32 logical_id;
+ __le32 phys_id;
+ u8 reserved[16];
+};
+
+/* list of caps */
+
+#define I40E_AQ_CAP_ID_SWITCH_MODE 0x0001
+#define I40E_AQ_CAP_ID_MNG_MODE 0x0002
+#define I40E_AQ_CAP_ID_NPAR_ACTIVE 0x0003
+#define I40E_AQ_CAP_ID_OS2BMC_CAP 0x0004
+#define I40E_AQ_CAP_ID_FUNCTIONS_VALID 0x0005
+#define I40E_AQ_CAP_ID_SRIOV 0x0012
+#define I40E_AQ_CAP_ID_VF 0x0013
+#define I40E_AQ_CAP_ID_VMDQ 0x0014
+#define I40E_AQ_CAP_ID_8021QBG 0x0015
+#define I40E_AQ_CAP_ID_8021QBR 0x0016
+#define I40E_AQ_CAP_ID_VSI 0x0017
+#define I40E_AQ_CAP_ID_DCB 0x0018
+#define I40E_AQ_CAP_ID_FCOE 0x0021
+#define I40E_AQ_CAP_ID_ISCSI 0x0022
+#define I40E_AQ_CAP_ID_RSS 0x0040
+#define I40E_AQ_CAP_ID_RXQ 0x0041
+#define I40E_AQ_CAP_ID_TXQ 0x0042
+#define I40E_AQ_CAP_ID_MSIX 0x0043
+#define I40E_AQ_CAP_ID_VF_MSIX 0x0044
+#define I40E_AQ_CAP_ID_FLOW_DIRECTOR 0x0045
+#define I40E_AQ_CAP_ID_1588 0x0046
+#define I40E_AQ_CAP_ID_IWARP 0x0051
+#define I40E_AQ_CAP_ID_LED 0x0061
+#define I40E_AQ_CAP_ID_SDP 0x0062
+#define I40E_AQ_CAP_ID_MDIO 0x0063
+#define I40E_AQ_CAP_ID_WSR_PROT 0x0064
+#define I40E_AQ_CAP_ID_NVM_MGMT 0x0080
+#define I40E_AQ_CAP_ID_FLEX10 0x00F1
+#define I40E_AQ_CAP_ID_CEM 0x00F2
+
+/* Set CPPM Configuration (direct 0x0103) */
+struct i40e_aqc_cppm_configuration {
+ __le16 command_flags;
+ __le16 ttlx;
+ __le32 dmacr;
+ __le16 dmcth;
+ u8 hptc;
+ u8 reserved;
+ __le32 pfltrc;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_cppm_configuration);
+
+/* Set ARP Proxy command / response (indirect 0x0104) */
+struct i40e_aqc_arp_proxy_data {
+ __le16 command_flags;
+ __le16 table_id;
+ __le32 enabled_offloads;
+ __le32 ip_addr;
+ u8 mac_addr[6];
+ u8 reserved[2];
+};
+
+I40E_CHECK_STRUCT_LEN(0x14, i40e_aqc_arp_proxy_data);
+
+/* Set NS Proxy Table Entry Command (indirect 0x0105) */
+struct i40e_aqc_ns_proxy_data {
+ __le16 table_idx_mac_addr_0;
+ __le16 table_idx_mac_addr_1;
+ __le16 table_idx_ipv6_0;
+ __le16 table_idx_ipv6_1;
+ __le16 control;
+ u8 mac_addr_0[6];
+ u8 mac_addr_1[6];
+ u8 local_mac_addr[6];
+ u8 ipv6_addr_0[16]; /* Warning! spec specifies BE byte order */
+ u8 ipv6_addr_1[16];
+};
+
+I40E_CHECK_STRUCT_LEN(0x3c, i40e_aqc_ns_proxy_data);
+
+/* Manage LAA Command (0x0106) - obsolete */
+struct i40e_aqc_mng_laa {
+ __le16 command_flags;
+ u8 reserved[2];
+ __le32 sal;
+ __le16 sah;
+ u8 reserved2[6];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_mng_laa);
+
+/* Manage MAC Address Read Command (indirect 0x0107) */
+struct i40e_aqc_mac_address_read {
+ __le16 command_flags;
+#define I40E_AQC_LAN_ADDR_VALID 0x10
+#define I40E_AQC_PORT_ADDR_VALID 0x40
+ u8 reserved[6];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_mac_address_read);
+
+struct i40e_aqc_mac_address_read_data {
+ u8 pf_lan_mac[6];
+ u8 pf_san_mac[6];
+ u8 port_mac[6];
+ u8 pf_wol_mac[6];
+};
+
+I40E_CHECK_STRUCT_LEN(24, i40e_aqc_mac_address_read_data);
+
+/* Manage MAC Address Write Command (0x0108) */
+struct i40e_aqc_mac_address_write {
+ __le16 command_flags;
+#define I40E_AQC_MC_MAG_EN 0x0100
+#define I40E_AQC_WOL_PRESERVE_ON_PFR 0x0200
+#define I40E_AQC_WRITE_TYPE_LAA_ONLY 0x0000
+#define I40E_AQC_WRITE_TYPE_LAA_WOL 0x4000
+#define I40E_AQC_WRITE_TYPE_UPDATE_MC_MAG 0xC000
+
+ __le16 mac_sah;
+ __le32 mac_sal;
+ u8 reserved[8];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_mac_address_write);
+
+/* PXE commands (0x011x) */
+
+/* Clear PXE Command and response (direct 0x0110) */
+struct i40e_aqc_clear_pxe {
+ u8 rx_cnt;
+ u8 reserved[15];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_clear_pxe);
+
+/* Set WoL Filter (0x0120) */
+
+struct i40e_aqc_set_wol_filter {
+ __le16 filter_index;
+
+ __le16 cmd_flags;
+ __le16 valid_flags;
+ u8 reserved[2];
+ __le32 address_high;
+ __le32 address_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_set_wol_filter);
+
+struct i40e_aqc_set_wol_filter_data {
+ u8 filter[128];
+ u8 mask[16];
+};
+
+I40E_CHECK_STRUCT_LEN(0x90, i40e_aqc_set_wol_filter_data);
+
+/* Get Wake Reason (0x0121) */
+
+struct i40e_aqc_get_wake_reason_completion {
+ u8 reserved_1[2];
+ __le16 wake_reason;
+ u8 reserved_2[12];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_get_wake_reason_completion);
+
+/* Switch configuration commands (0x02xx) */
+
+/* Used by many indirect commands that only pass an seid and a buffer in the
+ * command
+ */
+struct i40e_aqc_switch_seid {
+ __le16 seid;
+ u8 reserved[6];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_switch_seid);
+
+/* Get Switch Configuration command (indirect 0x0200)
+ * uses i40e_aqc_switch_seid for the descriptor
+ */
+struct i40e_aqc_get_switch_config_header_resp {
+ __le16 num_reported;
+ __le16 num_total;
+ u8 reserved[12];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_get_switch_config_header_resp);
+
+struct i40e_aqc_switch_config_element_resp {
+ u8 element_type;
+ u8 revision;
+ __le16 seid;
+ __le16 uplink_seid;
+ __le16 downlink_seid;
+ u8 reserved[3];
+ u8 connection_type;
+ __le16 scheduler_id;
+ __le16 element_info;
+};
+
+I40E_CHECK_STRUCT_LEN(0x10, i40e_aqc_switch_config_element_resp);
+
+/* Get Switch Configuration (indirect 0x0200)
+ * an array of elements are returned in the response buffer
+ * the first in the array is the header, remainder are elements
+ */
+struct i40e_aqc_get_switch_config_resp {
+ struct i40e_aqc_get_switch_config_header_resp header;
+ struct i40e_aqc_switch_config_element_resp element[1];
+};
+
+I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_get_switch_config_resp);
+
+/* Add Statistics (direct 0x0201)
+ * Remove Statistics (direct 0x0202)
+ */
+struct i40e_aqc_add_remove_statistics {
+ __le16 seid;
+ __le16 vlan;
+ __le16 stat_index;
+ u8 reserved[10];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_statistics);
+
+/* Set Port Parameters command (direct 0x0203) */
+struct i40e_aqc_set_port_parameters {
+ __le16 command_flags;
+ __le16 bad_frame_vsi;
+ __le16 default_seid; /* reserved for command */
+ u8 reserved[10];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_set_port_parameters);
+
+/* Get Switch Resource Allocation (indirect 0x0204) */
+struct i40e_aqc_get_switch_resource_alloc {
+ u8 num_entries; /* reserved for command */
+ u8 reserved[7];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_get_switch_resource_alloc);
+
+/* expect an array of these structs in the response buffer */
+struct i40e_aqc_switch_resource_alloc_element_resp {
+ u8 resource_type;
+ u8 reserved1;
+ __le16 guaranteed;
+ __le16 total;
+ __le16 used;
+ __le16 total_unalloced;
+ u8 reserved2[6];
+};
+
+I40E_CHECK_STRUCT_LEN(0x10, i40e_aqc_switch_resource_alloc_element_resp);
+
+/* Set Switch Configuration (direct 0x0205) */
+struct i40e_aqc_set_switch_config {
+ __le16 flags;
+/* flags used for both fields below */
+#define I40E_AQ_SET_SWITCH_CFG_PROMISC 0x0001
+ __le16 valid_flags;
+ /* The ethertype in switch_tag is dropped on ingress and used
+ * internally by the switch. Set this to zero for the default
+ * of 0x88a8 (802.1ad). Should be zero for firmware API
+ * versions lower than 1.7.
+ */
+ __le16 switch_tag;
+ /* The ethertypes in first_tag and second_tag are used to
+ * match the outer and inner VLAN tags (respectively) when HW
+ * double VLAN tagging is enabled via the set port parameters
+ * AQ command. Otherwise these are both ignored. Set them to
+ * zero for their defaults of 0x8100 (802.1Q). Should be zero
+ * for firmware API versions lower than 1.7.
+ */
+ __le16 first_tag;
+ __le16 second_tag;
+ /* Next byte is split into following:
+ * Bit 7 : 0 : No action, 1: Switch to mode defined by bits 6:0
+ * Bit 6 : 0 : Destination Port, 1: source port
+ * Bit 5..4 : L4 type
+ * 0: rsvd
+ * 1: TCP
+ * 2: UDP
+ * 3: Both TCP and UDP
+ * Bits 3:0 Mode
+ * 0: default mode
+ * 1: L4 port only mode
+ * 2: non-tunneled mode
+ * 3: tunneled mode
+ */
+#define I40E_AQ_SET_SWITCH_BIT7_VALID 0x80
+
+
+#define I40E_AQ_SET_SWITCH_L4_TYPE_TCP 0x10
+
+#define I40E_AQ_SET_SWITCH_MODE_NON_TUNNEL 0x02
+ u8 mode;
+ u8 rsvd5[5];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_set_switch_config);
+
+/* Read Receive control registers (direct 0x0206)
+ * Write Receive control registers (direct 0x0207)
+ * used for accessing Rx control registers that can be
+ * slow and need special handling when under high Rx load
+ */
+struct i40e_aqc_rx_ctl_reg_read_write {
+ __le32 reserved1;
+ __le32 address;
+ __le32 reserved2;
+ __le32 value;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_rx_ctl_reg_read_write);
+
+/* Add VSI (indirect 0x0210)
+ * this indirect command uses struct i40e_aqc_vsi_properties_data
+ * as the indirect buffer (128 bytes)
+ *
+ * Update VSI (indirect 0x211)
+ * uses the same data structure as Add VSI
+ *
+ * Get VSI (indirect 0x0212)
+ * uses the same completion and data structure as Add VSI
+ */
+struct i40e_aqc_add_get_update_vsi {
+ __le16 uplink_seid;
+ u8 connection_type;
+#define I40E_AQ_VSI_CONN_TYPE_NORMAL 0x1
+ u8 reserved1;
+ u8 vf_id;
+ u8 reserved2;
+ __le16 vsi_flags;
+#define I40E_AQ_VSI_TYPE_VF 0x0
+#define I40E_AQ_VSI_TYPE_VMDQ2 0x1
+#define I40E_AQ_VSI_TYPE_PF 0x2
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_get_update_vsi);
+
+struct i40e_aqc_add_get_update_vsi_completion {
+ __le16 seid;
+ __le16 vsi_number;
+ __le16 vsi_used;
+ __le16 vsi_free;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_get_update_vsi_completion);
+
+struct i40e_aqc_vsi_properties_data {
+ /* first 96 byte are written by SW */
+ __le16 valid_sections;
+#define I40E_AQ_VSI_PROP_SWITCH_VALID 0x0001
+#define I40E_AQ_VSI_PROP_SECURITY_VALID 0x0002
+#define I40E_AQ_VSI_PROP_VLAN_VALID 0x0004
+#define I40E_AQ_VSI_PROP_QUEUE_MAP_VALID 0x0040
+#define I40E_AQ_VSI_PROP_QUEUE_OPT_VALID 0x0080
+#define I40E_AQ_VSI_PROP_SCHED_VALID 0x0200
+ /* switch section */
+ __le16 switch_id; /* 12bit id combined with flags below */
+#define I40E_AQ_VSI_SW_ID_SHIFT 0x0000
+#define I40E_AQ_VSI_SW_ID_MASK (0xFFF << I40E_AQ_VSI_SW_ID_SHIFT)
+#define I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB 0x2000
+#define I40E_AQ_VSI_SW_ID_FLAG_LOCAL_LB 0x4000
+ u8 sw_reserved[2];
+ /* security section */
+ u8 sec_flags;
+#define I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK 0x02
+#define I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK 0x04
+ u8 sec_reserved;
+ /* VLAN section */
+ __le16 pvid; /* VLANS include priority bits */
+ __le16 fcoe_pvid;
+ u8 port_vlan_flags;
+#define I40E_AQ_VSI_PVLAN_MODE_SHIFT 0x00
+#define I40E_AQ_VSI_PVLAN_MODE_MASK (0x03 << \
+ I40E_AQ_VSI_PVLAN_MODE_SHIFT)
+#define I40E_AQ_VSI_PVLAN_MODE_TAGGED 0x01
+#define I40E_AQ_VSI_PVLAN_MODE_ALL 0x03
+#define I40E_AQ_VSI_PVLAN_INSERT_PVID 0x04
+#define I40E_AQ_VSI_PVLAN_EMOD_SHIFT 0x03
+#define I40E_AQ_VSI_PVLAN_EMOD_MASK (0x3 << \
+ I40E_AQ_VSI_PVLAN_EMOD_SHIFT)
+#define I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH 0x0
+#define I40E_AQ_VSI_PVLAN_EMOD_STR 0x10
+#define I40E_AQ_VSI_PVLAN_EMOD_NOTHING 0x18
+ u8 pvlan_reserved[3];
+ /* ingress egress up sections */
+ __le32 ingress_table; /* bitmap, 3 bits per up */
+ __le32 egress_table; /* same defines as for ingress table */
+ /* cascaded PV section */
+ __le16 cas_pv_tag;
+ u8 cas_pv_flags;
+ u8 cas_pv_reserved;
+ /* queue mapping section */
+ __le16 mapping_flags;
+#define I40E_AQ_VSI_QUE_MAP_CONTIG 0x0
+#define I40E_AQ_VSI_QUE_MAP_NONCONTIG 0x1
+ __le16 queue_mapping[16];
+ __le16 tc_mapping[8];
+#define I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT 0
+#define I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT 9
+ /* queueing option section */
+ u8 queueing_opt_flags;
+#define I40E_AQ_VSI_QUE_OPT_TCP_ENA 0x10
+#define I40E_AQ_VSI_QUE_OPT_RSS_LUT_VSI 0x40
+ u8 queueing_opt_reserved[3];
+ /* scheduler section */
+ u8 up_enable_bits;
+ u8 sched_reserved;
+ /* outer up section */
+ __le32 outer_up_table; /* same structure and defines as ingress tbl */
+ u8 cmd_reserved[8];
+ /* last 32 bytes are written by FW */
+ __le16 qs_handle[8];
+#define I40E_AQ_VSI_QS_HANDLE_INVALID 0xFFFF
+ __le16 stat_counter_idx;
+ __le16 sched_id;
+ u8 resp_reserved[12];
+};
+
+I40E_CHECK_STRUCT_LEN(128, i40e_aqc_vsi_properties_data);
+
+/* Add Port Virtualizer (direct 0x0220)
+ * also used for update PV (direct 0x0221) but only flags are used
+ * (IS_CTRL_PORT only works on add PV)
+ */
+struct i40e_aqc_add_update_pv {
+ __le16 command_flags;
+ __le16 uplink_seid;
+ __le16 connected_seid;
+ u8 reserved[10];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_update_pv);
+
+struct i40e_aqc_add_update_pv_completion {
+ /* reserved for update; for add also encodes error if rc == ENOSPC */
+ __le16 pv_seid;
+ u8 reserved[14];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_update_pv_completion);
+
+/* Get PV Params (direct 0x0222)
+ * uses i40e_aqc_switch_seid for the descriptor
+ */
+
+struct i40e_aqc_get_pv_params_completion {
+ __le16 seid;
+ __le16 default_stag;
+ __le16 pv_flags; /* same flags as add_pv */
+ u8 reserved[8];
+ __le16 default_port_seid;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_get_pv_params_completion);
+
+/* Add VEB (direct 0x0230) */
+struct i40e_aqc_add_veb {
+ __le16 uplink_seid;
+ __le16 downlink_seid;
+ __le16 veb_flags;
+#define I40E_AQC_ADD_VEB_FLOATING 0x1
+#define I40E_AQC_ADD_VEB_PORT_TYPE_DEFAULT 0x2
+#define I40E_AQC_ADD_VEB_PORT_TYPE_DATA 0x4
+#define I40E_AQC_ADD_VEB_ENABLE_DISABLE_STATS 0x10
+ u8 enable_tcs;
+ u8 reserved[9];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_veb);
+
+struct i40e_aqc_add_veb_completion {
+ u8 reserved[6];
+ __le16 switch_seid;
+ /* also encodes error if rc == ENOSPC; codes are the same as add_pv */
+ __le16 veb_seid;
+ __le16 statistic_index;
+ __le16 vebs_used;
+ __le16 vebs_free;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_veb_completion);
+
+/* Get VEB Parameters (direct 0x0232)
+ * uses i40e_aqc_switch_seid for the descriptor
+ */
+struct i40e_aqc_get_veb_parameters_completion {
+ __le16 seid;
+ __le16 switch_id;
+ __le16 veb_flags; /* only the first/last flags from 0x0230 is valid */
+ __le16 statistic_index;
+ __le16 vebs_used;
+ __le16 vebs_free;
+ u8 reserved[4];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_get_veb_parameters_completion);
+
+/* Delete Element (direct 0x0243)
+ * uses the generic i40e_aqc_switch_seid
+ */
+
+/* Add MAC-VLAN (indirect 0x0250) */
+
+/* used for the command for most vlan commands */
+struct i40e_aqc_macvlan {
+ __le16 num_addresses;
+ __le16 seid[3];
+#define I40E_AQC_MACVLAN_CMD_SEID_VALID 0x8000
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_macvlan);
+
+/* indirect data for command and response */
+struct i40e_aqc_add_macvlan_element_data {
+ u8 mac_addr[6];
+ __le16 vlan_tag;
+ __le16 flags;
+#define I40E_AQC_MACVLAN_ADD_PERFECT_MATCH 0x0001
+#define I40E_AQC_MACVLAN_ADD_IGNORE_VLAN 0x0004
+#define I40E_AQC_MACVLAN_ADD_USE_SHARED_MAC 0x0010
+ __le16 queue_number;
+ /* response section */
+ u8 match_method;
+#define I40E_AQC_MM_ERR_NO_RES 0xFF
+ u8 reserved1[3];
+};
+
+struct i40e_aqc_add_remove_macvlan_completion {
+ __le16 perfect_mac_used;
+ __le16 perfect_mac_free;
+ __le16 unicast_hash_free;
+ __le16 multicast_hash_free;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_macvlan_completion);
+
+/* Remove MAC-VLAN (indirect 0x0251)
+ * uses i40e_aqc_macvlan for the descriptor
+ * data points to an array of num_addresses of elements
+ */
+
+struct i40e_aqc_remove_macvlan_element_data {
+ u8 mac_addr[6];
+ __le16 vlan_tag;
+ u8 flags;
+#define I40E_AQC_MACVLAN_DEL_PERFECT_MATCH 0x01
+#define I40E_AQC_MACVLAN_DEL_IGNORE_VLAN 0x08
+ u8 reserved[3];
+ /* reply section */
+ u8 error_code;
+ u8 reply_reserved[3];
+};
+
+/* Add VLAN (indirect 0x0252)
+ * Remove VLAN (indirect 0x0253)
+ * use the generic i40e_aqc_macvlan for the command
+ */
+struct i40e_aqc_add_remove_vlan_element_data {
+ __le16 vlan_tag;
+ u8 vlan_flags;
+ u8 reserved;
+ u8 result;
+ u8 reserved1[3];
+};
+
+struct i40e_aqc_add_remove_vlan_completion {
+ u8 reserved[4];
+ __le16 vlans_used;
+ __le16 vlans_free;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+/* Set VSI Promiscuous Modes (direct 0x0254) */
+struct i40e_aqc_set_vsi_promiscuous_modes {
+ __le16 promiscuous_flags;
+ __le16 valid_flags;
+/* flags used for both fields above */
+#define I40E_AQC_SET_VSI_PROMISC_UNICAST 0x01
+#define I40E_AQC_SET_VSI_PROMISC_MULTICAST 0x02
+#define I40E_AQC_SET_VSI_PROMISC_BROADCAST 0x04
+#define I40E_AQC_SET_VSI_DEFAULT 0x08
+#define I40E_AQC_SET_VSI_PROMISC_VLAN 0x10
+#define I40E_AQC_SET_VSI_PROMISC_RX_ONLY 0x8000
+ __le16 seid;
+ __le16 vlan_tag;
+#define I40E_AQC_SET_VSI_VLAN_VALID 0x8000
+ u8 reserved[8];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_set_vsi_promiscuous_modes);
+
+/* Add S/E-tag command (direct 0x0255)
+ * Uses generic i40e_aqc_add_remove_tag_completion for completion
+ */
+struct i40e_aqc_add_tag {
+ __le16 flags;
+ __le16 seid;
+ __le16 tag;
+ __le16 queue_number;
+ u8 reserved[8];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_tag);
+
+struct i40e_aqc_add_remove_tag_completion {
+ u8 reserved[12];
+ __le16 tags_used;
+ __le16 tags_free;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_tag_completion);
+
+/* Remove S/E-tag command (direct 0x0256)
+ * Uses generic i40e_aqc_add_remove_tag_completion for completion
+ */
+struct i40e_aqc_remove_tag {
+ __le16 seid;
+ __le16 tag;
+ u8 reserved[12];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_remove_tag);
+
+/* Add multicast E-Tag (direct 0x0257)
+ * del multicast E-Tag (direct 0x0258) only uses pv_seid and etag fields
+ * and no external data
+ */
+struct i40e_aqc_add_remove_mcast_etag {
+ __le16 pv_seid;
+ __le16 etag;
+ u8 num_unicast_etags;
+ u8 reserved[3];
+ __le32 addr_high; /* address of array of 2-byte s-tags */
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_mcast_etag);
+
+struct i40e_aqc_add_remove_mcast_etag_completion {
+ u8 reserved[4];
+ __le16 mcast_etags_used;
+ __le16 mcast_etags_free;
+ __le32 addr_high;
+ __le32 addr_low;
+
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_mcast_etag_completion);
+
+/* Update S/E-Tag (direct 0x0259) */
+struct i40e_aqc_update_tag {
+ __le16 seid;
+ __le16 old_tag;
+ __le16 new_tag;
+ u8 reserved[10];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_update_tag);
+
+struct i40e_aqc_update_tag_completion {
+ u8 reserved[12];
+ __le16 tags_used;
+ __le16 tags_free;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_update_tag_completion);
+
+/* Add Control Packet filter (direct 0x025A)
+ * Remove Control Packet filter (direct 0x025B)
+ * uses the i40e_aqc_add_oveb_cloud,
+ * and the generic direct completion structure
+ */
+struct i40e_aqc_add_remove_control_packet_filter {
+ u8 mac[6];
+ __le16 etype;
+ __le16 flags;
+#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC 0x0001
+#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP 0x0002
+#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TX 0x0008
+#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_RX 0x0000
+ __le16 seid;
+ __le16 queue;
+ u8 reserved[2];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_control_packet_filter);
+
+struct i40e_aqc_add_remove_control_packet_filter_completion {
+ __le16 mac_etype_used;
+ __le16 etype_used;
+ __le16 mac_etype_free;
+ __le16 etype_free;
+ u8 reserved[8];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_control_packet_filter_completion);
+
+/* Add Cloud filters (indirect 0x025C)
+ * Remove Cloud filters (indirect 0x025D)
+ * uses the i40e_aqc_add_remove_cloud_filters,
+ * and the generic indirect completion structure
+ */
+struct i40e_aqc_add_remove_cloud_filters {
+ u8 num_filters;
+ u8 reserved;
+ __le16 seid;
+ u8 big_buffer_flag;
+#define I40E_AQC_ADD_CLOUD_CMD_BB 1
+ u8 reserved2[3];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_cloud_filters);
+
+struct i40e_aqc_cloud_filters_element_data {
+ u8 outer_mac[6];
+ u8 inner_mac[6];
+ __le16 inner_vlan;
+ union {
+ struct {
+ u8 reserved[12];
+ u8 data[4];
+ } v4;
+ struct {
+ u8 data[16];
+ } v6;
+ struct {
+ __le16 data[8];
+ } raw_v6;
+ } ipaddr;
+ __le16 flags;
+/* 0x0000 reserved */
+/* 0x0001 reserved */
+/* 0x0002 reserved */
+#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN 0x0003
+#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID 0x0004
+/* 0x0005 reserved */
+#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_TEN_ID 0x0006
+/* 0x0007 reserved */
+/* 0x0008 reserved */
+#define I40E_AQC_ADD_CLOUD_FILTER_OMAC 0x0009
+#define I40E_AQC_ADD_CLOUD_FILTER_IMAC 0x000A
+#define I40E_AQC_ADD_CLOUD_FILTER_OMAC_TEN_ID_IMAC 0x000B
+#define I40E_AQC_ADD_CLOUD_FILTER_IIP 0x000C
+/* 0x000D reserved */
+/* 0x000E reserved */
+/* 0x000F reserved */
+/* 0x0010 to 0x0017 is for custom filters */
+#define I40E_AQC_ADD_CLOUD_FILTER_IP_PORT 0x0010 /* Dest IP + L4 Port */
+#define I40E_AQC_ADD_CLOUD_FILTER_MAC_PORT 0x0011 /* Dest MAC + L4 Port */
+#define I40E_AQC_ADD_CLOUD_FILTER_MAC_VLAN_PORT 0x0012 /* Dest MAC + VLAN + L4 Port */
+
+#define I40E_AQC_ADD_CLOUD_FLAGS_IPV4 0
+#define I40E_AQC_ADD_CLOUD_FLAGS_IPV6 0x0100
+
+#define I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT 9
+#define I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK 0x1E00
+#define I40E_AQC_ADD_CLOUD_TNL_TYPE_GENEVE 2
+
+
+ __le32 tenant_id;
+ u8 reserved[4];
+ __le16 queue_number;
+ u8 reserved2[14];
+ /* response section */
+ u8 allocation_result;
+ u8 response_reserved[7];
+};
+
+I40E_CHECK_STRUCT_LEN(0x40, i40e_aqc_cloud_filters_element_data);
+
+/* i40e_aqc_cloud_filters_element_bb is used when
+ * I40E_AQC_CLOUD_CMD_BB flag is set.
+ */
+struct i40e_aqc_cloud_filters_element_bb {
+ struct i40e_aqc_cloud_filters_element_data element;
+ u16 general_fields[32];
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD0 15
+};
+
+I40E_CHECK_STRUCT_LEN(0x80, i40e_aqc_cloud_filters_element_bb);
+
+struct i40e_aqc_remove_cloud_filters_completion {
+ __le16 perfect_ovlan_used;
+ __le16 perfect_ovlan_free;
+ __le16 vlan_used;
+ __le16 vlan_free;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_remove_cloud_filters_completion);
+
+/* Replace filter Command 0x025F
+ * uses the i40e_aqc_replace_cloud_filters,
+ * and the generic indirect completion structure
+ */
+struct i40e_filter_data {
+ u8 filter_type;
+ u8 input[3];
+};
+
+I40E_CHECK_STRUCT_LEN(4, i40e_filter_data);
+
+struct i40e_aqc_replace_cloud_filters_cmd {
+ u8 valid_flags;
+ u8 old_filter_type;
+ u8 new_filter_type;
+ u8 tr_bit;
+ u8 reserved[4];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_replace_cloud_filters_cmd);
+
+struct i40e_aqc_replace_cloud_filters_cmd_buf {
+ u8 data[32];
+ struct i40e_filter_data filters[8];
+};
+
+I40E_CHECK_STRUCT_LEN(0x40, i40e_aqc_replace_cloud_filters_cmd_buf);
+
+/* Add Mirror Rule (indirect or direct 0x0260)
+ * Delete Mirror Rule (indirect or direct 0x0261)
+ * note: some rule types (4,5) do not use an external buffer.
+ * take care to set the flags correctly.
+ */
+struct i40e_aqc_add_delete_mirror_rule {
+ __le16 seid;
+ __le16 rule_type;
+#define I40E_AQC_MIRROR_RULE_TYPE_SHIFT 0
+#define I40E_AQC_MIRROR_RULE_TYPE_MASK (0x7 << \
+ I40E_AQC_MIRROR_RULE_TYPE_SHIFT)
+#define I40E_AQC_MIRROR_RULE_TYPE_VLAN 3
+#define I40E_AQC_MIRROR_RULE_TYPE_ALL_INGRESS 4
+#define I40E_AQC_MIRROR_RULE_TYPE_ALL_EGRESS 5
+ __le16 num_entries;
+ __le16 destination; /* VSI for add, rule id for delete */
+ __le32 addr_high; /* address of array of 2-byte VSI or VLAN ids */
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_delete_mirror_rule);
+
+struct i40e_aqc_add_delete_mirror_rule_completion {
+ u8 reserved[2];
+ __le16 rule_id; /* only used on add */
+ __le16 mirror_rules_used;
+ __le16 mirror_rules_free;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_delete_mirror_rule_completion);
+
+/* Dynamic Device Personalization */
+struct i40e_aqc_write_personalization_profile {
+ u8 flags;
+ u8 reserved[3];
+ __le32 profile_track_id;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_write_personalization_profile);
+
+struct i40e_aqc_write_ddp_resp {
+ __le32 error_offset;
+ __le32 error_info;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+struct i40e_aqc_get_applied_profiles {
+ u8 flags;
+ u8 rsv[3];
+ __le32 reserved;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_get_applied_profiles);
+
+/* DCB 0x03xx*/
+
+/* PFC Ignore (direct 0x0301)
+ * the command and response use the same descriptor structure
+ */
+struct i40e_aqc_pfc_ignore {
+ u8 tc_bitmap;
+ u8 command_flags; /* unused on response */
+ u8 reserved[14];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_pfc_ignore);
+
+/* DCB Update (direct 0x0302) uses the i40e_aq_desc structure
+ * with no parameters
+ */
+
+/* TX scheduler 0x04xx */
+
+/* Almost all the indirect commands use
+ * this generic struct to pass the SEID in param0
+ */
+struct i40e_aqc_tx_sched_ind {
+ __le16 vsi_seid;
+ u8 reserved[6];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_tx_sched_ind);
+
+/* Several commands respond with a set of queue set handles */
+struct i40e_aqc_qs_handles_resp {
+ __le16 qs_handles[8];
+};
+
+/* Configure VSI BW limits (direct 0x0400) */
+struct i40e_aqc_configure_vsi_bw_limit {
+ __le16 vsi_seid;
+ u8 reserved[2];
+ __le16 credit;
+ u8 reserved1[2];
+ u8 max_credit; /* 0-3, limit = 2^max */
+ u8 reserved2[7];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_configure_vsi_bw_limit);
+
+/* Configure VSI Bandwidth Limit per Traffic Type (indirect 0x0406)
+ * responds with i40e_aqc_qs_handles_resp
+ */
+struct i40e_aqc_configure_vsi_ets_sla_bw_data {
+ u8 tc_valid_bits;
+ u8 reserved[15];
+ __le16 tc_bw_credits[8]; /* FW writesback QS handles here */
+
+ /* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */
+ __le16 tc_bw_max[2];
+ u8 reserved1[28];
+};
+
+I40E_CHECK_STRUCT_LEN(0x40, i40e_aqc_configure_vsi_ets_sla_bw_data);
+
+/* Configure VSI Bandwidth Allocation per Traffic Type (indirect 0x0407)
+ * responds with i40e_aqc_qs_handles_resp
+ */
+struct i40e_aqc_configure_vsi_tc_bw_data {
+ u8 tc_valid_bits;
+ u8 reserved[3];
+ u8 tc_bw_credits[8];
+ u8 reserved1[4];
+ __le16 qs_handles[8];
+};
+
+I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_configure_vsi_tc_bw_data);
+
+/* Query vsi bw configuration (indirect 0x0408) */
+struct i40e_aqc_query_vsi_bw_config_resp {
+ u8 tc_valid_bits;
+ u8 tc_suspended_bits;
+ u8 reserved[14];
+ __le16 qs_handles[8];
+ u8 reserved1[4];
+ __le16 port_bw_limit;
+ u8 reserved2[2];
+ u8 max_bw; /* 0-3, limit = 2^max */
+ u8 reserved3[23];
+};
+
+I40E_CHECK_STRUCT_LEN(0x40, i40e_aqc_query_vsi_bw_config_resp);
+
+/* Query VSI Bandwidth Allocation per Traffic Type (indirect 0x040A) */
+struct i40e_aqc_query_vsi_ets_sla_config_resp {
+ u8 tc_valid_bits;
+ u8 reserved[3];
+ u8 share_credits[8];
+ __le16 credits[8];
+
+ /* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */
+ __le16 tc_bw_max[2];
+};
+
+I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_query_vsi_ets_sla_config_resp);
+
+/* Configure Switching Component Bandwidth Limit (direct 0x0410) */
+struct i40e_aqc_configure_switching_comp_bw_limit {
+ __le16 seid;
+ u8 reserved[2];
+ __le16 credit;
+ u8 reserved1[2];
+ u8 max_bw; /* 0-3, limit = 2^max */
+ u8 reserved2[7];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_configure_switching_comp_bw_limit);
+
+/* Enable Physical Port ETS (indirect 0x0413)
+ * Modify Physical Port ETS (indirect 0x0414)
+ * Disable Physical Port ETS (indirect 0x0415)
+ */
+struct i40e_aqc_configure_switching_comp_ets_data {
+ u8 reserved[4];
+ u8 tc_valid_bits;
+ u8 seepage;
+ u8 tc_strict_priority_flags;
+ u8 reserved1[17];
+ u8 tc_bw_share_credits[8];
+ u8 reserved2[96];
+};
+
+I40E_CHECK_STRUCT_LEN(0x80, i40e_aqc_configure_switching_comp_ets_data);
+
+/* Configure Switching Component Bandwidth Limits per Tc (indirect 0x0416) */
+struct i40e_aqc_configure_switching_comp_ets_bw_limit_data {
+ u8 tc_valid_bits;
+ u8 reserved[15];
+ __le16 tc_bw_credit[8];
+
+ /* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */
+ __le16 tc_bw_max[2];
+ u8 reserved1[28];
+};
+
+I40E_CHECK_STRUCT_LEN(0x40,
+ i40e_aqc_configure_switching_comp_ets_bw_limit_data);
+
+/* Configure Switching Component Bandwidth Allocation per Tc
+ * (indirect 0x0417)
+ */
+struct i40e_aqc_configure_switching_comp_bw_config_data {
+ u8 tc_valid_bits;
+ u8 reserved[2];
+ u8 absolute_credits; /* bool */
+ u8 tc_bw_share_credits[8];
+ u8 reserved1[20];
+};
+
+I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_configure_switching_comp_bw_config_data);
+
+/* Query Switching Component Configuration (indirect 0x0418) */
+struct i40e_aqc_query_switching_comp_ets_config_resp {
+ u8 tc_valid_bits;
+ u8 reserved[35];
+ __le16 port_bw_limit;
+ u8 reserved1[2];
+ u8 tc_bw_max; /* 0-3, limit = 2^max */
+ u8 reserved2[23];
+};
+
+I40E_CHECK_STRUCT_LEN(0x40, i40e_aqc_query_switching_comp_ets_config_resp);
+
+/* Query PhysicalPort ETS Configuration (indirect 0x0419) */
+struct i40e_aqc_query_port_ets_config_resp {
+ u8 reserved[4];
+ u8 tc_valid_bits;
+ u8 reserved1;
+ u8 tc_strict_priority_bits;
+ u8 reserved2;
+ u8 tc_bw_share_credits[8];
+ __le16 tc_bw_limits[8];
+
+ /* 4 bits per tc 0-7, 4th bit reserved, limit = 2^max */
+ __le16 tc_bw_max[2];
+ u8 reserved3[32];
+};
+
+I40E_CHECK_STRUCT_LEN(0x44, i40e_aqc_query_port_ets_config_resp);
+
+/* Query Switching Component Bandwidth Allocation per Traffic Type
+ * (indirect 0x041A)
+ */
+struct i40e_aqc_query_switching_comp_bw_config_resp {
+ u8 tc_valid_bits;
+ u8 reserved[2];
+ u8 absolute_credits_enable; /* bool */
+ u8 tc_bw_share_credits[8];
+ __le16 tc_bw_limits[8];
+
+ /* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */
+ __le16 tc_bw_max[2];
+};
+
+I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_query_switching_comp_bw_config_resp);
+
+/* Suspend/resume port TX traffic
+ * (direct 0x041B and 0x041C) uses the generic SEID struct
+ */
+
+/* Configure partition BW
+ * (indirect 0x041D)
+ */
+struct i40e_aqc_configure_partition_bw_data {
+ __le16 pf_valid_bits;
+ u8 min_bw[16]; /* guaranteed bandwidth */
+ u8 max_bw[16]; /* bandwidth limit */
+};
+
+I40E_CHECK_STRUCT_LEN(0x22, i40e_aqc_configure_partition_bw_data);
+
+/* Get and set the active HMC resource profile and status.
+ * (direct 0x0500) and (direct 0x0501)
+ */
+struct i40e_aq_get_set_hmc_resource_profile {
+ u8 pm_profile;
+ u8 pe_vf_enabled;
+ u8 reserved[14];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aq_get_set_hmc_resource_profile);
+
+enum i40e_aq_hmc_profile {
+ /* I40E_HMC_PROFILE_NO_CHANGE = 0, reserved */
+ I40E_HMC_PROFILE_DEFAULT = 1,
+ I40E_HMC_PROFILE_FAVOR_VF = 2,
+ I40E_HMC_PROFILE_EQUAL = 3,
+};
+
+/* Get PHY Abilities (indirect 0x0600) uses the generic indirect struct */
+
+/* set in param0 for get phy abilities to report qualified modules */
+#define I40E_AQ_PHY_REPORT_QUALIFIED_MODULES 0x0001
+#define I40E_AQ_PHY_REPORT_INITIAL_VALUES 0x0002
+
+enum i40e_aq_phy_type {
+ I40E_PHY_TYPE_SGMII = 0x0,
+ I40E_PHY_TYPE_1000BASE_KX = 0x1,
+ I40E_PHY_TYPE_10GBASE_KX4 = 0x2,
+ I40E_PHY_TYPE_10GBASE_KR = 0x3,
+ I40E_PHY_TYPE_40GBASE_KR4 = 0x4,
+ I40E_PHY_TYPE_XAUI = 0x5,
+ I40E_PHY_TYPE_XFI = 0x6,
+ I40E_PHY_TYPE_SFI = 0x7,
+ I40E_PHY_TYPE_XLAUI = 0x8,
+ I40E_PHY_TYPE_XLPPI = 0x9,
+ I40E_PHY_TYPE_40GBASE_CR4_CU = 0xA,
+ I40E_PHY_TYPE_10GBASE_CR1_CU = 0xB,
+ I40E_PHY_TYPE_10GBASE_AOC = 0xC,
+ I40E_PHY_TYPE_40GBASE_AOC = 0xD,
+ I40E_PHY_TYPE_UNRECOGNIZED = 0xE,
+ I40E_PHY_TYPE_UNSUPPORTED = 0xF,
+ I40E_PHY_TYPE_100BASE_TX = 0x11,
+ I40E_PHY_TYPE_1000BASE_T = 0x12,
+ I40E_PHY_TYPE_10GBASE_T = 0x13,
+ I40E_PHY_TYPE_10GBASE_SR = 0x14,
+ I40E_PHY_TYPE_10GBASE_LR = 0x15,
+ I40E_PHY_TYPE_10GBASE_SFPP_CU = 0x16,
+ I40E_PHY_TYPE_10GBASE_CR1 = 0x17,
+ I40E_PHY_TYPE_40GBASE_CR4 = 0x18,
+ I40E_PHY_TYPE_40GBASE_SR4 = 0x19,
+ I40E_PHY_TYPE_40GBASE_LR4 = 0x1A,
+ I40E_PHY_TYPE_1000BASE_SX = 0x1B,
+ I40E_PHY_TYPE_1000BASE_LX = 0x1C,
+ I40E_PHY_TYPE_1000BASE_T_OPTICAL = 0x1D,
+ I40E_PHY_TYPE_20GBASE_KR2 = 0x1E,
+ I40E_PHY_TYPE_25GBASE_KR = 0x1F,
+ I40E_PHY_TYPE_25GBASE_CR = 0x20,
+ I40E_PHY_TYPE_25GBASE_SR = 0x21,
+ I40E_PHY_TYPE_25GBASE_LR = 0x22,
+ I40E_PHY_TYPE_25GBASE_AOC = 0x23,
+ I40E_PHY_TYPE_25GBASE_ACC = 0x24,
+ I40E_PHY_TYPE_2_5GBASE_T = 0x26,
+ I40E_PHY_TYPE_5GBASE_T = 0x27,
+ I40E_PHY_TYPE_2_5GBASE_T_LINK_STATUS = 0x30,
+ I40E_PHY_TYPE_5GBASE_T_LINK_STATUS = 0x31,
+ I40E_PHY_TYPE_MAX,
+ I40E_PHY_TYPE_NOT_SUPPORTED_HIGH_TEMP = 0xFD,
+ I40E_PHY_TYPE_EMPTY = 0xFE,
+ I40E_PHY_TYPE_DEFAULT = 0xFF,
+};
+
+#define I40E_PHY_TYPES_BITMASK (BIT_ULL(I40E_PHY_TYPE_SGMII) | \
+ BIT_ULL(I40E_PHY_TYPE_1000BASE_KX) | \
+ BIT_ULL(I40E_PHY_TYPE_10GBASE_KX4) | \
+ BIT_ULL(I40E_PHY_TYPE_10GBASE_KR) | \
+ BIT_ULL(I40E_PHY_TYPE_40GBASE_KR4) | \
+ BIT_ULL(I40E_PHY_TYPE_XAUI) | \
+ BIT_ULL(I40E_PHY_TYPE_XFI) | \
+ BIT_ULL(I40E_PHY_TYPE_SFI) | \
+ BIT_ULL(I40E_PHY_TYPE_XLAUI) | \
+ BIT_ULL(I40E_PHY_TYPE_XLPPI) | \
+ BIT_ULL(I40E_PHY_TYPE_40GBASE_CR4_CU) | \
+ BIT_ULL(I40E_PHY_TYPE_10GBASE_CR1_CU) | \
+ BIT_ULL(I40E_PHY_TYPE_10GBASE_AOC) | \
+ BIT_ULL(I40E_PHY_TYPE_40GBASE_AOC) | \
+ BIT_ULL(I40E_PHY_TYPE_UNRECOGNIZED) | \
+ BIT_ULL(I40E_PHY_TYPE_UNSUPPORTED) | \
+ BIT_ULL(I40E_PHY_TYPE_100BASE_TX) | \
+ BIT_ULL(I40E_PHY_TYPE_1000BASE_T) | \
+ BIT_ULL(I40E_PHY_TYPE_10GBASE_T) | \
+ BIT_ULL(I40E_PHY_TYPE_10GBASE_SR) | \
+ BIT_ULL(I40E_PHY_TYPE_10GBASE_LR) | \
+ BIT_ULL(I40E_PHY_TYPE_10GBASE_SFPP_CU) | \
+ BIT_ULL(I40E_PHY_TYPE_10GBASE_CR1) | \
+ BIT_ULL(I40E_PHY_TYPE_40GBASE_CR4) | \
+ BIT_ULL(I40E_PHY_TYPE_40GBASE_SR4) | \
+ BIT_ULL(I40E_PHY_TYPE_40GBASE_LR4) | \
+ BIT_ULL(I40E_PHY_TYPE_1000BASE_SX) | \
+ BIT_ULL(I40E_PHY_TYPE_1000BASE_LX) | \
+ BIT_ULL(I40E_PHY_TYPE_1000BASE_T_OPTICAL) | \
+ BIT_ULL(I40E_PHY_TYPE_20GBASE_KR2) | \
+ BIT_ULL(I40E_PHY_TYPE_25GBASE_KR) | \
+ BIT_ULL(I40E_PHY_TYPE_25GBASE_CR) | \
+ BIT_ULL(I40E_PHY_TYPE_25GBASE_SR) | \
+ BIT_ULL(I40E_PHY_TYPE_25GBASE_LR) | \
+ BIT_ULL(I40E_PHY_TYPE_25GBASE_AOC) | \
+ BIT_ULL(I40E_PHY_TYPE_25GBASE_ACC) | \
+ BIT_ULL(I40E_PHY_TYPE_2_5GBASE_T) | \
+ BIT_ULL(I40E_PHY_TYPE_5GBASE_T))
+
+#define I40E_LINK_SPEED_2_5GB_SHIFT 0x0
+#define I40E_LINK_SPEED_100MB_SHIFT 0x1
+#define I40E_LINK_SPEED_1000MB_SHIFT 0x2
+#define I40E_LINK_SPEED_10GB_SHIFT 0x3
+#define I40E_LINK_SPEED_40GB_SHIFT 0x4
+#define I40E_LINK_SPEED_20GB_SHIFT 0x5
+#define I40E_LINK_SPEED_25GB_SHIFT 0x6
+#define I40E_LINK_SPEED_5GB_SHIFT 0x7
+
+enum i40e_aq_link_speed {
+ I40E_LINK_SPEED_UNKNOWN = 0,
+ I40E_LINK_SPEED_100MB = BIT(I40E_LINK_SPEED_100MB_SHIFT),
+ I40E_LINK_SPEED_1GB = BIT(I40E_LINK_SPEED_1000MB_SHIFT),
+ I40E_LINK_SPEED_2_5GB = (1 << I40E_LINK_SPEED_2_5GB_SHIFT),
+ I40E_LINK_SPEED_5GB = (1 << I40E_LINK_SPEED_5GB_SHIFT),
+ I40E_LINK_SPEED_10GB = BIT(I40E_LINK_SPEED_10GB_SHIFT),
+ I40E_LINK_SPEED_40GB = BIT(I40E_LINK_SPEED_40GB_SHIFT),
+ I40E_LINK_SPEED_20GB = BIT(I40E_LINK_SPEED_20GB_SHIFT),
+ I40E_LINK_SPEED_25GB = BIT(I40E_LINK_SPEED_25GB_SHIFT),
+};
+
+struct i40e_aqc_module_desc {
+ u8 oui[3];
+ u8 reserved1;
+ u8 part_number[16];
+ u8 revision[4];
+ u8 reserved2[8];
+};
+
+I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_module_desc);
+
+struct i40e_aq_get_phy_abilities_resp {
+ __le32 phy_type; /* bitmap using the above enum for offsets */
+ u8 link_speed; /* bitmap using the above enum bit patterns */
+ u8 abilities;
+#define I40E_AQ_PHY_FLAG_PAUSE_TX 0x01
+#define I40E_AQ_PHY_FLAG_PAUSE_RX 0x02
+ __le16 eee_capability;
+ __le32 eeer_val;
+ u8 d3_lpan;
+ u8 phy_type_ext;
+#define I40E_AQ_PHY_TYPE_EXT_25G_KR 0X01
+#define I40E_AQ_PHY_TYPE_EXT_25G_CR 0X02
+#define I40E_AQ_PHY_TYPE_EXT_25G_SR 0x04
+#define I40E_AQ_PHY_TYPE_EXT_25G_LR 0x08
+ u8 fec_cfg_curr_mod_ext_info;
+#define I40E_AQ_REQUEST_FEC_KR 0x04
+#define I40E_AQ_REQUEST_FEC_RS 0x08
+#define I40E_AQ_ENABLE_FEC_AUTO 0x10
+
+ u8 ext_comp_code;
+ u8 phy_id[4];
+ u8 module_type[3];
+ u8 qualified_module_count;
+#define I40E_AQ_PHY_MAX_QMS 16
+ struct i40e_aqc_module_desc qualified_module[I40E_AQ_PHY_MAX_QMS];
+};
+
+I40E_CHECK_STRUCT_LEN(0x218, i40e_aq_get_phy_abilities_resp);
+
+/* Set PHY Config (direct 0x0601) */
+struct i40e_aq_set_phy_config { /* same bits as above in all */
+ __le32 phy_type;
+ u8 link_speed;
+ u8 abilities;
+/* bits 0-2 use the values from get_phy_abilities_resp */
+#define I40E_AQ_PHY_ENABLE_LINK 0x08
+#define I40E_AQ_PHY_ENABLE_AN 0x10
+#define I40E_AQ_PHY_ENABLE_ATOMIC_LINK 0x20
+ __le16 eee_capability;
+ __le32 eeer;
+ u8 low_power_ctrl;
+ u8 phy_type_ext;
+#define I40E_AQ_PHY_TYPE_EXT_25G_KR 0X01
+#define I40E_AQ_PHY_TYPE_EXT_25G_CR 0X02
+#define I40E_AQ_PHY_TYPE_EXT_25G_SR 0x04
+#define I40E_AQ_PHY_TYPE_EXT_25G_LR 0x08
+ u8 fec_config;
+#define I40E_AQ_SET_FEC_ABILITY_KR BIT(0)
+#define I40E_AQ_SET_FEC_ABILITY_RS BIT(1)
+#define I40E_AQ_SET_FEC_REQUEST_KR BIT(2)
+#define I40E_AQ_SET_FEC_REQUEST_RS BIT(3)
+#define I40E_AQ_SET_FEC_AUTO BIT(4)
+#define I40E_AQ_PHY_FEC_CONFIG_SHIFT 0x0
+#define I40E_AQ_PHY_FEC_CONFIG_MASK (0x1F << I40E_AQ_PHY_FEC_CONFIG_SHIFT)
+ u8 reserved;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aq_set_phy_config);
+
+/* Set MAC Config command data structure (direct 0x0603) */
+struct i40e_aq_set_mac_config {
+ __le16 max_frame_size;
+ u8 params;
+ u8 tx_timer_priority; /* bitmap */
+ __le16 tx_timer_value;
+ __le16 fc_refresh_threshold;
+ u8 reserved[8];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aq_set_mac_config);
+
+/* Restart Auto-Negotiation (direct 0x605) */
+struct i40e_aqc_set_link_restart_an {
+ u8 command;
+#define I40E_AQ_PHY_RESTART_AN 0x02
+#define I40E_AQ_PHY_LINK_ENABLE 0x04
+ u8 reserved[15];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_set_link_restart_an);
+
+/* Get Link Status cmd & response data structure (direct 0x0607) */
+struct i40e_aqc_get_link_status {
+ __le16 command_flags; /* only field set on command */
+#define I40E_AQ_LSE_DISABLE 0x2
+#define I40E_AQ_LSE_ENABLE 0x3
+/* only response uses this flag */
+#define I40E_AQ_LSE_IS_ENABLED 0x1
+ u8 phy_type; /* i40e_aq_phy_type */
+ u8 link_speed; /* i40e_aq_link_speed */
+ u8 link_info;
+#define I40E_AQ_LINK_UP 0x01 /* obsolete */
+#define I40E_AQ_MEDIA_AVAILABLE 0x40
+ u8 an_info;
+#define I40E_AQ_AN_COMPLETED 0x01
+#define I40E_AQ_LINK_PAUSE_TX 0x20
+#define I40E_AQ_LINK_PAUSE_RX 0x40
+#define I40E_AQ_QUALIFIED_MODULE 0x80
+ u8 ext_info;
+ u8 loopback; /* use defines from i40e_aqc_set_lb_mode */
+/* Since firmware API 1.7 loopback field keeps power class info as well */
+#define I40E_AQ_LOOPBACK_MASK 0x07
+ __le16 max_frame_size;
+ u8 config;
+#define I40E_AQ_CONFIG_FEC_KR_ENA 0x01
+#define I40E_AQ_CONFIG_FEC_RS_ENA 0x02
+#define I40E_AQ_CONFIG_CRC_ENA 0x04
+#define I40E_AQ_CONFIG_PACING_MASK 0x78
+ union {
+ struct {
+ u8 power_desc;
+ u8 reserved[4];
+ };
+ struct {
+ u8 link_type[4];
+ u8 link_type_ext;
+ };
+ };
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_get_link_status);
+
+/* Set event mask command (direct 0x613) */
+struct i40e_aqc_set_phy_int_mask {
+ u8 reserved[8];
+ __le16 event_mask;
+#define I40E_AQ_EVENT_LINK_UPDOWN 0x0002
+#define I40E_AQ_EVENT_MEDIA_NA 0x0004
+#define I40E_AQ_EVENT_MODULE_QUAL_FAIL 0x0100
+ u8 reserved1[6];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_set_phy_int_mask);
+
+/* Get Local AN advt register (direct 0x0614)
+ * Set Local AN advt register (direct 0x0615)
+ * Get Link Partner AN advt register (direct 0x0616)
+ */
+struct i40e_aqc_an_advt_reg {
+ __le32 local_an_reg0;
+ __le16 local_an_reg1;
+ u8 reserved[10];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_an_advt_reg);
+
+/* Set Loopback mode (0x0618) */
+struct i40e_aqc_set_lb_mode {
+ __le16 lb_mode;
+#define I40E_AQ_LB_PHY_LOCAL 0x01
+#define I40E_AQ_LB_PHY_REMOTE 0x02
+#define I40E_AQ_LB_MAC_LOCAL 0x04
+ u8 reserved[14];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_set_lb_mode);
+
+/* Set PHY Debug command (0x0622) */
+struct i40e_aqc_set_phy_debug {
+ u8 command_flags;
+/* Disable link manageability on a single port */
+#define I40E_AQ_PHY_DEBUG_DISABLE_LINK_FW 0x10
+/* Disable link manageability on all ports */
+#define I40E_AQ_PHY_DEBUG_DISABLE_ALL_LINK_FW 0x20
+ u8 reserved[15];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_set_phy_debug);
+
+enum i40e_aq_phy_reg_type {
+ I40E_AQC_PHY_REG_INTERNAL = 0x1,
+ I40E_AQC_PHY_REG_EXERNAL_BASET = 0x2,
+ I40E_AQC_PHY_REG_EXERNAL_MODULE = 0x3
+};
+
+/* Run PHY Activity (0x0626) */
+struct i40e_aqc_run_phy_activity {
+ __le16 activity_id;
+ u8 flags;
+ u8 reserved1;
+ __le32 control;
+ __le32 data;
+ u8 reserved2[4];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_run_phy_activity);
+
+/* Set PHY Register command (0x0628) */
+/* Get PHY Register command (0x0629) */
+struct i40e_aqc_phy_register_access {
+ u8 phy_interface;
+#define I40E_AQ_PHY_REG_ACCESS_EXTERNAL 1
+#define I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE 2
+ u8 dev_address;
+ u8 cmd_flags;
+#define I40E_AQ_PHY_REG_ACCESS_DONT_CHANGE_QSFP_PAGE 0x01
+#define I40E_AQ_PHY_REG_ACCESS_SET_MDIO_IF_NUMBER 0x02
+#define I40E_AQ_PHY_REG_ACCESS_MDIO_IF_NUMBER_SHIFT 2
+#define I40E_AQ_PHY_REG_ACCESS_MDIO_IF_NUMBER_MASK (0x3 << \
+ I40E_AQ_PHY_REG_ACCESS_MDIO_IF_NUMBER_SHIFT)
+ u8 reserved1;
+ __le32 reg_address;
+ __le32 reg_value;
+ u8 reserved2[4];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_phy_register_access);
+
+/* NVM Read command (indirect 0x0701)
+ * NVM Erase commands (direct 0x0702)
+ * NVM Update commands (indirect 0x0703)
+ */
+struct i40e_aqc_nvm_update {
+ u8 command_flags;
+#define I40E_AQ_NVM_LAST_CMD 0x01
+#define I40E_AQ_NVM_REARRANGE_TO_FLAT 0x20
+#define I40E_AQ_NVM_REARRANGE_TO_STRUCT 0x40
+#define I40E_AQ_NVM_PRESERVATION_FLAGS_SHIFT 1
+#define I40E_AQ_NVM_PRESERVATION_FLAGS_SELECTED 0x03
+#define I40E_AQ_NVM_PRESERVATION_FLAGS_ALL 0x01
+ u8 module_pointer;
+ __le16 length;
+ __le32 offset;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_update);
+
+/* NVM Config Read (indirect 0x0704) */
+struct i40e_aqc_nvm_config_read {
+ __le16 cmd_flags;
+ __le16 element_count;
+ __le16 element_id; /* Feature/field ID */
+ __le16 element_id_msw; /* MSWord of field ID */
+ __le32 address_high;
+ __le32 address_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_config_read);
+
+/* NVM Config Write (indirect 0x0705) */
+struct i40e_aqc_nvm_config_write {
+ __le16 cmd_flags;
+ __le16 element_count;
+ u8 reserved[4];
+ __le32 address_high;
+ __le32 address_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_config_write);
+
+/* Used for 0x0704 as well as for 0x0705 commands */
+struct i40e_aqc_nvm_config_data_feature {
+ __le16 feature_id;
+ __le16 feature_options;
+ __le16 feature_selection;
+};
+
+I40E_CHECK_STRUCT_LEN(0x6, i40e_aqc_nvm_config_data_feature);
+
+struct i40e_aqc_nvm_config_data_immediate_field {
+ __le32 field_id;
+ __le32 field_value;
+ __le16 field_options;
+ __le16 reserved;
+};
+
+I40E_CHECK_STRUCT_LEN(0xc, i40e_aqc_nvm_config_data_immediate_field);
+
+/* OEM Post Update (indirect 0x0720)
+ * no command data struct used
+ */
+struct i40e_aqc_nvm_oem_post_update {
+ u8 sel_data;
+ u8 reserved[7];
+};
+
+I40E_CHECK_STRUCT_LEN(0x8, i40e_aqc_nvm_oem_post_update);
+
+struct i40e_aqc_nvm_oem_post_update_buffer {
+ u8 str_len;
+ u8 dev_addr;
+ __le16 eeprom_addr;
+ u8 data[36];
+};
+
+I40E_CHECK_STRUCT_LEN(0x28, i40e_aqc_nvm_oem_post_update_buffer);
+
+/* Thermal Sensor (indirect 0x0721)
+ * read or set thermal sensor configs and values
+ * takes a sensor and command specific data buffer, not detailed here
+ */
+struct i40e_aqc_thermal_sensor {
+ u8 sensor_action;
+ u8 reserved[7];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_thermal_sensor);
+
+/* Send to PF command (indirect 0x0801) id is only used by PF
+ * Send to VF command (indirect 0x0802) id is only used by PF
+ * Send to Peer PF command (indirect 0x0803)
+ */
+struct i40e_aqc_pf_vf_message {
+ __le32 id;
+ u8 reserved[4];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_pf_vf_message);
+
+/* Alternate structure */
+
+/* Direct write (direct 0x0900)
+ * Direct read (direct 0x0902)
+ */
+struct i40e_aqc_alternate_write {
+ __le32 address0;
+ __le32 data0;
+ __le32 address1;
+ __le32 data1;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_write);
+
+/* Indirect write (indirect 0x0901)
+ * Indirect read (indirect 0x0903)
+ */
+
+struct i40e_aqc_alternate_ind_write {
+ __le32 address;
+ __le32 length;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_ind_write);
+
+/* Done alternate write (direct 0x0904)
+ * uses i40e_aq_desc
+ */
+struct i40e_aqc_alternate_write_done {
+ __le16 cmd_flags;
+ u8 reserved[14];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_write_done);
+
+/* Set OEM mode (direct 0x0905) */
+struct i40e_aqc_alternate_set_mode {
+ __le32 mode;
+ u8 reserved[12];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_set_mode);
+
+/* Clear port Alternate RAM (direct 0x0906) uses i40e_aq_desc */
+
+/* async events 0x10xx */
+
+/* Lan Queue Overflow Event (direct, 0x1001) */
+struct i40e_aqc_lan_overflow {
+ __le32 prtdcb_rupto;
+ __le32 otx_ctl;
+ u8 reserved[8];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_lan_overflow);
+
+/* Get LLDP MIB (indirect 0x0A00) */
+struct i40e_aqc_lldp_get_mib {
+ u8 type;
+ u8 reserved1;
+#define I40E_AQ_LLDP_MIB_TYPE_MASK 0x3
+#define I40E_AQ_LLDP_MIB_LOCAL 0x0
+#define I40E_AQ_LLDP_MIB_REMOTE 0x1
+#define I40E_AQ_LLDP_BRIDGE_TYPE_MASK 0xC
+#define I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT 0x2
+#define I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE 0x0
+/* TX pause flags use I40E_AQ_LINK_TX_* above */
+ __le16 local_len;
+ __le16 remote_len;
+ u8 reserved2[2];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_get_mib);
+
+/* Configure LLDP MIB Change Event (direct 0x0A01)
+ * also used for the event (with type in the command field)
+ */
+struct i40e_aqc_lldp_update_mib {
+ u8 command;
+#define I40E_AQ_LLDP_MIB_UPDATE_DISABLE 0x1
+ u8 reserved[7];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_update_mib);
+
+/* Add LLDP TLV (indirect 0x0A02)
+ * Delete LLDP TLV (indirect 0x0A04)
+ */
+struct i40e_aqc_lldp_add_tlv {
+ u8 type; /* only nearest bridge and non-TPMR from 0x0A00 */
+ u8 reserved1[1];
+ __le16 len;
+ u8 reserved2[4];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_add_tlv);
+
+/* Update LLDP TLV (indirect 0x0A03) */
+struct i40e_aqc_lldp_update_tlv {
+ u8 type; /* only nearest bridge and non-TPMR from 0x0A00 */
+ u8 reserved;
+ __le16 old_len;
+ __le16 new_offset;
+ __le16 new_len;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_update_tlv);
+
+/* Stop LLDP (direct 0x0A05) */
+struct i40e_aqc_lldp_stop {
+ u8 command;
+#define I40E_AQ_LLDP_AGENT_SHUTDOWN 0x1
+#define I40E_AQ_LLDP_AGENT_STOP_PERSIST 0x2
+ u8 reserved[15];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_stop);
+
+/* Start LLDP (direct 0x0A06) */
+struct i40e_aqc_lldp_start {
+ u8 command;
+#define I40E_AQ_LLDP_AGENT_START 0x1
+#define I40E_AQ_LLDP_AGENT_START_PERSIST 0x2
+ u8 reserved[15];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_start);
+
+/* Set DCB (direct 0x0303) */
+struct i40e_aqc_set_dcb_parameters {
+ u8 command;
+#define I40E_AQ_DCB_SET_AGENT 0x1
+#define I40E_DCB_VALID 0x1
+ u8 valid_flags;
+ u8 reserved[14];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_set_dcb_parameters);
+
+/* Get CEE DCBX Oper Config (0x0A07)
+ * uses the generic descriptor struct
+ * returns below as indirect response
+ */
+
+#define I40E_AQC_CEE_APP_FCOE_SHIFT 0x0
+#define I40E_AQC_CEE_APP_FCOE_MASK (0x7 << I40E_AQC_CEE_APP_FCOE_SHIFT)
+#define I40E_AQC_CEE_APP_ISCSI_SHIFT 0x3
+#define I40E_AQC_CEE_APP_ISCSI_MASK (0x7 << I40E_AQC_CEE_APP_ISCSI_SHIFT)
+#define I40E_AQC_CEE_APP_FIP_SHIFT 0x8
+#define I40E_AQC_CEE_APP_FIP_MASK (0x7 << I40E_AQC_CEE_APP_FIP_SHIFT)
+
+#define I40E_AQC_CEE_PG_STATUS_SHIFT 0x0
+#define I40E_AQC_CEE_PG_STATUS_MASK (0x7 << I40E_AQC_CEE_PG_STATUS_SHIFT)
+#define I40E_AQC_CEE_PFC_STATUS_SHIFT 0x3
+#define I40E_AQC_CEE_PFC_STATUS_MASK (0x7 << I40E_AQC_CEE_PFC_STATUS_SHIFT)
+#define I40E_AQC_CEE_APP_STATUS_SHIFT 0x8
+#define I40E_AQC_CEE_APP_STATUS_MASK (0x7 << I40E_AQC_CEE_APP_STATUS_SHIFT)
+#define I40E_AQC_CEE_FCOE_STATUS_SHIFT 0x8
+#define I40E_AQC_CEE_FCOE_STATUS_MASK (0x7 << I40E_AQC_CEE_FCOE_STATUS_SHIFT)
+#define I40E_AQC_CEE_ISCSI_STATUS_SHIFT 0xB
+#define I40E_AQC_CEE_ISCSI_STATUS_MASK (0x7 << I40E_AQC_CEE_ISCSI_STATUS_SHIFT)
+#define I40E_AQC_CEE_FIP_STATUS_SHIFT 0x10
+#define I40E_AQC_CEE_FIP_STATUS_MASK (0x7 << I40E_AQC_CEE_FIP_STATUS_SHIFT)
+
+/* struct i40e_aqc_get_cee_dcb_cfg_v1_resp was originally defined with
+ * word boundary layout issues, which the Linux compilers silently deal
+ * with by adding padding, making the actual struct larger than designed.
+ * However, the FW compiler for the NIC is less lenient and complains
+ * about the struct. Hence, the struct defined here has an extra byte in
+ * fields reserved3 and reserved4 to directly acknowledge that padding,
+ * and the new length is used in the length check macro.
+ */
+struct i40e_aqc_get_cee_dcb_cfg_v1_resp {
+ u8 reserved1;
+ u8 oper_num_tc;
+ u8 oper_prio_tc[4];
+ u8 reserved2;
+ u8 oper_tc_bw[8];
+ u8 oper_pfc_en;
+ u8 reserved3[2];
+ __le16 oper_app_prio;
+ u8 reserved4[2];
+ __le16 tlv_status;
+};
+
+I40E_CHECK_STRUCT_LEN(0x18, i40e_aqc_get_cee_dcb_cfg_v1_resp);
+
+struct i40e_aqc_get_cee_dcb_cfg_resp {
+ u8 oper_num_tc;
+ u8 oper_prio_tc[4];
+ u8 oper_tc_bw[8];
+ u8 oper_pfc_en;
+ __le16 oper_app_prio;
+#define I40E_AQC_CEE_APP_FCOE_SHIFT 0x0
+#define I40E_AQC_CEE_APP_FCOE_MASK (0x7 << I40E_AQC_CEE_APP_FCOE_SHIFT)
+#define I40E_AQC_CEE_APP_ISCSI_SHIFT 0x3
+#define I40E_AQC_CEE_APP_ISCSI_MASK (0x7 << I40E_AQC_CEE_APP_ISCSI_SHIFT)
+#define I40E_AQC_CEE_APP_FIP_SHIFT 0x8
+#define I40E_AQC_CEE_APP_FIP_MASK (0x7 << I40E_AQC_CEE_APP_FIP_SHIFT)
+#define I40E_AQC_CEE_APP_FIP_MASK (0x7 << I40E_AQC_CEE_APP_FIP_SHIFT)
+ __le32 tlv_status;
+#define I40E_AQC_CEE_PG_STATUS_SHIFT 0x0
+#define I40E_AQC_CEE_PG_STATUS_MASK (0x7 << I40E_AQC_CEE_PG_STATUS_SHIFT)
+#define I40E_AQC_CEE_PFC_STATUS_SHIFT 0x3
+#define I40E_AQC_CEE_PFC_STATUS_MASK (0x7 << I40E_AQC_CEE_PFC_STATUS_SHIFT)
+#define I40E_AQC_CEE_APP_STATUS_SHIFT 0x8
+#define I40E_AQC_CEE_APP_STATUS_MASK (0x7 << I40E_AQC_CEE_APP_STATUS_SHIFT)
+ u8 reserved[12];
+};
+
+I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_get_cee_dcb_cfg_resp);
+
+/* Set Local LLDP MIB (indirect 0x0A08)
+ * Used to replace the local MIB of a given LLDP agent. e.g. DCBx
+ */
+struct i40e_aqc_lldp_set_local_mib {
+#define SET_LOCAL_MIB_AC_TYPE_DCBX_SHIFT 0
+#define SET_LOCAL_MIB_AC_TYPE_DCBX_MASK (1 << \
+ SET_LOCAL_MIB_AC_TYPE_DCBX_SHIFT)
+#define SET_LOCAL_MIB_AC_TYPE_LOCAL_MIB 0x0
+#define SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS_SHIFT (1)
+#define SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS_MASK (1 << \
+ SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS_SHIFT)
+#define SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS 0x1
+ u8 type;
+ u8 reserved0;
+ __le16 length;
+ u8 reserved1[4];
+ __le32 address_high;
+ __le32 address_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_set_local_mib);
+
+/* Stop/Start LLDP Agent (direct 0x0A09)
+ * Used for stopping/starting specific LLDP agent. e.g. DCBx
+ */
+struct i40e_aqc_lldp_stop_start_specific_agent {
+ u8 command;
+ u8 reserved[15];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_stop_start_specific_agent);
+
+/* Restore LLDP Agent factory settings (direct 0x0A0A) */
+struct i40e_aqc_lldp_restore {
+ u8 command;
+#define I40E_AQ_LLDP_AGENT_RESTORE 0x1
+ u8 reserved[15];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_restore);
+
+/* Add Udp Tunnel command and completion (direct 0x0B00) */
+struct i40e_aqc_add_udp_tunnel {
+ __le16 udp_port;
+ u8 reserved0[3];
+ u8 protocol_type;
+#define I40E_AQC_TUNNEL_TYPE_VXLAN 0x00
+#define I40E_AQC_TUNNEL_TYPE_NGE 0x01
+ u8 reserved1[10];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_udp_tunnel);
+
+struct i40e_aqc_add_udp_tunnel_completion {
+ __le16 udp_port;
+ u8 filter_entry_index;
+ u8 multiple_pfs;
+ u8 total_filters;
+ u8 reserved[11];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_udp_tunnel_completion);
+
+/* remove UDP Tunnel command (0x0B01) */
+struct i40e_aqc_remove_udp_tunnel {
+ u8 reserved[2];
+ u8 index; /* 0 to 15 */
+ u8 reserved2[13];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_remove_udp_tunnel);
+
+struct i40e_aqc_del_udp_tunnel_completion {
+ __le16 udp_port;
+ u8 index; /* 0 to 15 */
+ u8 multiple_pfs;
+ u8 total_filters_used;
+ u8 reserved1[11];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_del_udp_tunnel_completion);
+
+struct i40e_aqc_get_set_rss_key {
+#define I40E_AQC_SET_RSS_KEY_VSI_VALID BIT(15)
+#define I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT 0
+#define I40E_AQC_SET_RSS_KEY_VSI_ID_MASK (0x3FF << \
+ I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT)
+ __le16 vsi_id;
+ u8 reserved[6];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_get_set_rss_key);
+
+struct i40e_aqc_get_set_rss_key_data {
+ u8 standard_rss_key[0x28];
+ u8 extended_hash_key[0xc];
+};
+
+I40E_CHECK_STRUCT_LEN(0x34, i40e_aqc_get_set_rss_key_data);
+
+struct i40e_aqc_get_set_rss_lut {
+#define I40E_AQC_SET_RSS_LUT_VSI_VALID BIT(15)
+#define I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT 0
+#define I40E_AQC_SET_RSS_LUT_VSI_ID_MASK (0x3FF << \
+ I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT)
+ __le16 vsi_id;
+#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT 0
+#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK BIT(I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT)
+
+#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_VSI 0
+#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_PF 1
+ __le16 flags;
+ u8 reserved[4];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_get_set_rss_lut);
+
+/* tunnel key structure 0x0B10 */
+
+struct i40e_aqc_tunnel_key_structure {
+ u8 key1_off;
+ u8 key2_off;
+ u8 key1_len; /* 0 to 15 */
+ u8 key2_len; /* 0 to 15 */
+ u8 flags;
+ u8 network_key_index;
+ u8 reserved[10];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_tunnel_key_structure);
+
+/* OEM mode commands (direct 0xFE0x) */
+struct i40e_aqc_oem_param_change {
+ __le32 param_type;
+ __le32 param_value1;
+ __le16 param_value2;
+ u8 reserved[6];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_oem_param_change);
+
+struct i40e_aqc_oem_state_change {
+ __le32 state;
+ u8 reserved[12];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_oem_state_change);
+
+/* Initialize OCSD (0xFE02, direct) */
+struct i40e_aqc_opc_oem_ocsd_initialize {
+ u8 type_status;
+ u8 reserved1[3];
+ __le32 ocsd_memory_block_addr_high;
+ __le32 ocsd_memory_block_addr_low;
+ __le32 requested_update_interval;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_opc_oem_ocsd_initialize);
+
+/* Initialize OCBB (0xFE03, direct) */
+struct i40e_aqc_opc_oem_ocbb_initialize {
+ u8 type_status;
+ u8 reserved1[3];
+ __le32 ocbb_memory_block_addr_high;
+ __le32 ocbb_memory_block_addr_low;
+ u8 reserved2[4];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_opc_oem_ocbb_initialize);
+
+/* debug commands */
+
+/* get device id (0xFF00) uses the generic structure */
+
+/* set test more (0xFF01, internal) */
+
+struct i40e_acq_set_test_mode {
+ u8 mode;
+ u8 reserved[3];
+ u8 command;
+ u8 reserved2[3];
+ __le32 address_high;
+ __le32 address_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_acq_set_test_mode);
+
+/* Debug Read Register command (0xFF03)
+ * Debug Write Register command (0xFF04)
+ */
+struct i40e_aqc_debug_reg_read_write {
+ __le32 reserved;
+ __le32 address;
+ __le32 value_high;
+ __le32 value_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_reg_read_write);
+
+/* Scatter/gather Reg Read (indirect 0xFF05)
+ * Scatter/gather Reg Write (indirect 0xFF06)
+ */
+
+/* i40e_aq_desc is used for the command */
+struct i40e_aqc_debug_reg_sg_element_data {
+ __le32 address;
+ __le32 value;
+};
+
+/* Debug Modify register (direct 0xFF07) */
+struct i40e_aqc_debug_modify_reg {
+ __le32 address;
+ __le32 value;
+ __le32 clear_mask;
+ __le32 set_mask;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_modify_reg);
+
+/* dump internal data (0xFF08, indirect) */
+struct i40e_aqc_debug_dump_internals {
+ u8 cluster_id;
+ u8 table_id;
+ __le16 data_size;
+ __le32 idx;
+ __le32 address_high;
+ __le32 address_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_dump_internals);
+
+struct i40e_aqc_debug_modify_internals {
+ u8 cluster_id;
+ u8 cluster_specific_params[7];
+ __le32 address_high;
+ __le32 address_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_modify_internals);
+
+#endif /* _I40E_ADMINQ_CMD_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_alloc.h b/drivers/net/ethernet/intel/i40e/i40e_alloc.h
new file mode 100644
index 000000000..a6c9a9e34
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_alloc.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
+
+#ifndef _I40E_ALLOC_H_
+#define _I40E_ALLOC_H_
+
+struct i40e_hw;
+
+/* Memory allocation types */
+enum i40e_memory_type {
+ i40e_mem_arq_buf = 0, /* ARQ indirect command buffer */
+ i40e_mem_asq_buf = 1,
+ i40e_mem_atq_buf = 2, /* ATQ indirect command buffer */
+ i40e_mem_arq_ring = 3, /* ARQ descriptor ring */
+ i40e_mem_atq_ring = 4, /* ATQ descriptor ring */
+ i40e_mem_pd = 5, /* Page Descriptor */
+ i40e_mem_bp = 6, /* Backing Page - 4KB */
+ i40e_mem_bp_jumbo = 7, /* Backing Page - > 4KB */
+ i40e_mem_reserved
+};
+
+/* prototype for functions used for dynamic memory allocation */
+int i40e_allocate_dma_mem(struct i40e_hw *hw,
+ struct i40e_dma_mem *mem,
+ enum i40e_memory_type type,
+ u64 size, u32 alignment);
+int i40e_free_dma_mem(struct i40e_hw *hw,
+ struct i40e_dma_mem *mem);
+int i40e_allocate_virt_mem(struct i40e_hw *hw,
+ struct i40e_virt_mem *mem,
+ u32 size);
+int i40e_free_virt_mem(struct i40e_hw *hw,
+ struct i40e_virt_mem *mem);
+
+#endif /* _I40E_ALLOC_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_client.c b/drivers/net/ethernet/intel/i40e/i40e_client.c
new file mode 100644
index 000000000..a289f1bb3
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_client.c
@@ -0,0 +1,756 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
+
+#include <linux/list.h>
+#include <linux/errno.h>
+#include <linux/net/intel/i40e_client.h>
+
+#include "i40e.h"
+#include "i40e_prototype.h"
+
+static LIST_HEAD(i40e_devices);
+static DEFINE_MUTEX(i40e_device_mutex);
+DEFINE_IDA(i40e_client_ida);
+
+static int i40e_client_virtchnl_send(struct i40e_info *ldev,
+ struct i40e_client *client,
+ u32 vf_id, u8 *msg, u16 len);
+
+static int i40e_client_setup_qvlist(struct i40e_info *ldev,
+ struct i40e_client *client,
+ struct i40e_qvlist_info *qvlist_info);
+
+static void i40e_client_request_reset(struct i40e_info *ldev,
+ struct i40e_client *client,
+ u32 reset_level);
+
+static int i40e_client_update_vsi_ctxt(struct i40e_info *ldev,
+ struct i40e_client *client,
+ bool is_vf, u32 vf_id,
+ u32 flag, u32 valid_flag);
+
+static struct i40e_ops i40e_lan_ops = {
+ .virtchnl_send = i40e_client_virtchnl_send,
+ .setup_qvlist = i40e_client_setup_qvlist,
+ .request_reset = i40e_client_request_reset,
+ .update_vsi_ctxt = i40e_client_update_vsi_ctxt,
+};
+
+/**
+ * i40e_client_get_params - Get the params that can change at runtime
+ * @vsi: the VSI with the message
+ * @params: client param struct
+ *
+ **/
+static
+int i40e_client_get_params(struct i40e_vsi *vsi, struct i40e_params *params)
+{
+ struct i40e_dcbx_config *dcb_cfg = &vsi->back->hw.local_dcbx_config;
+ int i = 0;
+
+ for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
+ u8 tc = dcb_cfg->etscfg.prioritytable[i];
+ u16 qs_handle;
+
+ /* If TC is not enabled for VSI use TC0 for UP */
+ if (!(vsi->tc_config.enabled_tc & BIT(tc)))
+ tc = 0;
+
+ qs_handle = le16_to_cpu(vsi->info.qs_handle[tc]);
+ params->qos.prio_qos[i].tc = tc;
+ params->qos.prio_qos[i].qs_handle = qs_handle;
+ if (qs_handle == I40E_AQ_VSI_QS_HANDLE_INVALID) {
+ dev_err(&vsi->back->pdev->dev, "Invalid queue set handle for TC = %d, vsi id = %d\n",
+ tc, vsi->id);
+ return -EINVAL;
+ }
+ }
+
+ params->mtu = vsi->netdev->mtu;
+ return 0;
+}
+
+/**
+ * i40e_notify_client_of_vf_msg - call the client vf message callback
+ * @vsi: the VSI with the message
+ * @vf_id: the absolute VF id that sent the message
+ * @msg: message buffer
+ * @len: length of the message
+ *
+ * If there is a client to this VSI, call the client
+ **/
+void
+i40e_notify_client_of_vf_msg(struct i40e_vsi *vsi, u32 vf_id, u8 *msg, u16 len)
+{
+ struct i40e_pf *pf = vsi->back;
+ struct i40e_client_instance *cdev = pf->cinst;
+
+ if (!cdev || !cdev->client)
+ return;
+ if (!cdev->client->ops || !cdev->client->ops->virtchnl_receive) {
+ dev_dbg(&pf->pdev->dev,
+ "Cannot locate client instance virtual channel receive routine\n");
+ return;
+ }
+ if (!test_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state)) {
+ dev_dbg(&pf->pdev->dev, "Client is not open, abort virtchnl_receive\n");
+ return;
+ }
+ cdev->client->ops->virtchnl_receive(&cdev->lan_info, cdev->client,
+ vf_id, msg, len);
+}
+
+/**
+ * i40e_notify_client_of_l2_param_changes - call the client notify callback
+ * @vsi: the VSI with l2 param changes
+ *
+ * If there is a client to this VSI, call the client
+ **/
+void i40e_notify_client_of_l2_param_changes(struct i40e_vsi *vsi)
+{
+ struct i40e_pf *pf = vsi->back;
+ struct i40e_client_instance *cdev = pf->cinst;
+ struct i40e_params params;
+
+ if (!cdev || !cdev->client)
+ return;
+ if (!cdev->client->ops || !cdev->client->ops->l2_param_change) {
+ dev_dbg(&vsi->back->pdev->dev,
+ "Cannot locate client instance l2_param_change routine\n");
+ return;
+ }
+ if (!test_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state)) {
+ dev_dbg(&vsi->back->pdev->dev, "Client is not open, abort l2 param change\n");
+ return;
+ }
+ memset(&params, 0, sizeof(params));
+ i40e_client_get_params(vsi, &params);
+ memcpy(&cdev->lan_info.params, &params, sizeof(struct i40e_params));
+ cdev->client->ops->l2_param_change(&cdev->lan_info, cdev->client,
+ &params);
+}
+
+/**
+ * i40e_client_release_qvlist - release MSI-X vector mapping for client
+ * @ldev: pointer to L2 context.
+ *
+ **/
+static void i40e_client_release_qvlist(struct i40e_info *ldev)
+{
+ struct i40e_qvlist_info *qvlist_info = ldev->qvlist_info;
+ u32 i;
+
+ if (!ldev->qvlist_info)
+ return;
+
+ for (i = 0; i < qvlist_info->num_vectors; i++) {
+ struct i40e_pf *pf = ldev->pf;
+ struct i40e_qv_info *qv_info;
+ u32 reg_idx;
+
+ qv_info = &qvlist_info->qv_info[i];
+ if (!qv_info)
+ continue;
+ reg_idx = I40E_PFINT_LNKLSTN(qv_info->v_idx - 1);
+ wr32(&pf->hw, reg_idx, I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK);
+ }
+ kfree(ldev->qvlist_info);
+ ldev->qvlist_info = NULL;
+}
+
+/**
+ * i40e_notify_client_of_netdev_close - call the client close callback
+ * @vsi: the VSI with netdev closed
+ * @reset: true when close called due to a reset pending
+ *
+ * If there is a client to this netdev, call the client with close
+ **/
+void i40e_notify_client_of_netdev_close(struct i40e_vsi *vsi, bool reset)
+{
+ struct i40e_pf *pf = vsi->back;
+ struct i40e_client_instance *cdev = pf->cinst;
+
+ if (!cdev || !cdev->client)
+ return;
+ if (!cdev->client->ops || !cdev->client->ops->close) {
+ dev_dbg(&vsi->back->pdev->dev,
+ "Cannot locate client instance close routine\n");
+ return;
+ }
+ if (!test_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state)) {
+ dev_dbg(&pf->pdev->dev, "Client is not open, abort close\n");
+ return;
+ }
+ cdev->client->ops->close(&cdev->lan_info, cdev->client, reset);
+ clear_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state);
+ i40e_client_release_qvlist(&cdev->lan_info);
+}
+
+/**
+ * i40e_notify_client_of_vf_reset - call the client vf reset callback
+ * @pf: PF device pointer
+ * @vf_id: asolute id of VF being reset
+ *
+ * If there is a client attached to this PF, notify when a VF is reset
+ **/
+void i40e_notify_client_of_vf_reset(struct i40e_pf *pf, u32 vf_id)
+{
+ struct i40e_client_instance *cdev = pf->cinst;
+
+ if (!cdev || !cdev->client)
+ return;
+ if (!cdev->client->ops || !cdev->client->ops->vf_reset) {
+ dev_dbg(&pf->pdev->dev,
+ "Cannot locate client instance VF reset routine\n");
+ return;
+ }
+ if (!test_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state)) {
+ dev_dbg(&pf->pdev->dev, "Client is not open, abort vf-reset\n");
+ return;
+ }
+ cdev->client->ops->vf_reset(&cdev->lan_info, cdev->client, vf_id);
+}
+
+/**
+ * i40e_notify_client_of_vf_enable - call the client vf notification callback
+ * @pf: PF device pointer
+ * @num_vfs: the number of VFs currently enabled, 0 for disable
+ *
+ * If there is a client attached to this PF, call its VF notification routine
+ **/
+void i40e_notify_client_of_vf_enable(struct i40e_pf *pf, u32 num_vfs)
+{
+ struct i40e_client_instance *cdev = pf->cinst;
+
+ if (!cdev || !cdev->client)
+ return;
+ if (!cdev->client->ops || !cdev->client->ops->vf_enable) {
+ dev_dbg(&pf->pdev->dev,
+ "Cannot locate client instance VF enable routine\n");
+ return;
+ }
+ if (!test_bit(__I40E_CLIENT_INSTANCE_OPENED,
+ &cdev->state)) {
+ dev_dbg(&pf->pdev->dev, "Client is not open, abort vf-enable\n");
+ return;
+ }
+ cdev->client->ops->vf_enable(&cdev->lan_info, cdev->client, num_vfs);
+}
+
+/**
+ * i40e_vf_client_capable - ask the client if it likes the specified VF
+ * @pf: PF device pointer
+ * @vf_id: the VF in question
+ *
+ * If there is a client of the specified type attached to this PF, call
+ * its vf_capable routine
+ **/
+int i40e_vf_client_capable(struct i40e_pf *pf, u32 vf_id)
+{
+ struct i40e_client_instance *cdev = pf->cinst;
+ int capable = false;
+
+ if (!cdev || !cdev->client)
+ goto out;
+ if (!cdev->client->ops || !cdev->client->ops->vf_capable) {
+ dev_dbg(&pf->pdev->dev,
+ "Cannot locate client instance VF capability routine\n");
+ goto out;
+ }
+ if (!test_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state))
+ goto out;
+
+ capable = cdev->client->ops->vf_capable(&cdev->lan_info,
+ cdev->client,
+ vf_id);
+out:
+ return capable;
+}
+
+void i40e_client_update_msix_info(struct i40e_pf *pf)
+{
+ struct i40e_client_instance *cdev = pf->cinst;
+
+ if (!cdev || !cdev->client)
+ return;
+
+ cdev->lan_info.msix_count = pf->num_iwarp_msix;
+ cdev->lan_info.msix_entries = &pf->msix_entries[pf->iwarp_base_vector];
+}
+
+static void i40e_auxiliary_dev_release(struct device *dev)
+{
+ struct i40e_auxiliary_device *i40e_aux_dev =
+ container_of(dev, struct i40e_auxiliary_device, aux_dev.dev);
+
+ ida_free(&i40e_client_ida, i40e_aux_dev->aux_dev.id);
+ kfree(i40e_aux_dev);
+}
+
+static int i40e_register_auxiliary_dev(struct i40e_info *ldev, const char *name)
+{
+ struct i40e_auxiliary_device *i40e_aux_dev;
+ struct pci_dev *pdev = ldev->pcidev;
+ struct auxiliary_device *aux_dev;
+ int ret;
+
+ i40e_aux_dev = kzalloc(sizeof(*i40e_aux_dev), GFP_KERNEL);
+ if (!i40e_aux_dev)
+ return -ENOMEM;
+
+ i40e_aux_dev->ldev = ldev;
+
+ aux_dev = &i40e_aux_dev->aux_dev;
+ aux_dev->name = name;
+ aux_dev->dev.parent = &pdev->dev;
+ aux_dev->dev.release = i40e_auxiliary_dev_release;
+ ldev->aux_dev = aux_dev;
+
+ ret = ida_alloc(&i40e_client_ida, GFP_KERNEL);
+ if (ret < 0) {
+ kfree(i40e_aux_dev);
+ return ret;
+ }
+ aux_dev->id = ret;
+
+ ret = auxiliary_device_init(aux_dev);
+ if (ret < 0) {
+ ida_free(&i40e_client_ida, aux_dev->id);
+ kfree(i40e_aux_dev);
+ return ret;
+ }
+
+ ret = auxiliary_device_add(aux_dev);
+ if (ret) {
+ auxiliary_device_uninit(aux_dev);
+ return ret;
+ }
+
+ return ret;
+}
+
+/**
+ * i40e_client_add_instance - add a client instance struct to the instance list
+ * @pf: pointer to the board struct
+ *
+ **/
+static void i40e_client_add_instance(struct i40e_pf *pf)
+{
+ struct i40e_client_instance *cdev = NULL;
+ struct netdev_hw_addr *mac = NULL;
+ struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
+
+ cdev = kzalloc(sizeof(*cdev), GFP_KERNEL);
+ if (!cdev)
+ return;
+
+ cdev->lan_info.pf = (void *)pf;
+ cdev->lan_info.netdev = vsi->netdev;
+ cdev->lan_info.pcidev = pf->pdev;
+ cdev->lan_info.fid = pf->hw.pf_id;
+ cdev->lan_info.ftype = I40E_CLIENT_FTYPE_PF;
+ cdev->lan_info.hw_addr = pf->hw.hw_addr;
+ cdev->lan_info.ops = &i40e_lan_ops;
+ cdev->lan_info.version.major = I40E_CLIENT_VERSION_MAJOR;
+ cdev->lan_info.version.minor = I40E_CLIENT_VERSION_MINOR;
+ cdev->lan_info.version.build = I40E_CLIENT_VERSION_BUILD;
+ cdev->lan_info.fw_maj_ver = pf->hw.aq.fw_maj_ver;
+ cdev->lan_info.fw_min_ver = pf->hw.aq.fw_min_ver;
+ cdev->lan_info.fw_build = pf->hw.aq.fw_build;
+ set_bit(__I40E_CLIENT_INSTANCE_NONE, &cdev->state);
+
+ if (i40e_client_get_params(vsi, &cdev->lan_info.params))
+ goto free_cdev;
+
+ mac = list_first_entry(&cdev->lan_info.netdev->dev_addrs.list,
+ struct netdev_hw_addr, list);
+ if (mac)
+ ether_addr_copy(cdev->lan_info.lanmac, mac->addr);
+ else
+ dev_err(&pf->pdev->dev, "MAC address list is empty!\n");
+
+ pf->cinst = cdev;
+
+ cdev->lan_info.msix_count = pf->num_iwarp_msix;
+ cdev->lan_info.msix_entries = &pf->msix_entries[pf->iwarp_base_vector];
+
+ if (i40e_register_auxiliary_dev(&cdev->lan_info, "iwarp"))
+ goto free_cdev;
+
+ return;
+
+free_cdev:
+ kfree(cdev);
+ pf->cinst = NULL;
+}
+
+/**
+ * i40e_client_del_instance - removes a client instance from the list
+ * @pf: pointer to the board struct
+ *
+ **/
+static
+void i40e_client_del_instance(struct i40e_pf *pf)
+{
+ kfree(pf->cinst);
+ pf->cinst = NULL;
+}
+
+/**
+ * i40e_client_subtask - client maintenance work
+ * @pf: board private structure
+ **/
+void i40e_client_subtask(struct i40e_pf *pf)
+{
+ struct i40e_client *client;
+ struct i40e_client_instance *cdev;
+ struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
+ int ret = 0;
+
+ if (!test_and_clear_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state))
+ return;
+ cdev = pf->cinst;
+
+ /* If we're down or resetting, just bail */
+ if (test_bit(__I40E_DOWN, pf->state) ||
+ test_bit(__I40E_CONFIG_BUSY, pf->state))
+ return;
+
+ if (!cdev || !cdev->client)
+ return;
+
+ client = cdev->client;
+
+ /* Here we handle client opens. If the client is down, and
+ * the netdev is registered, then open the client.
+ */
+ if (!test_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state)) {
+ if (vsi->netdev_registered &&
+ client->ops && client->ops->open) {
+ set_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state);
+ ret = client->ops->open(&cdev->lan_info, client);
+ if (ret) {
+ /* Remove failed client instance */
+ clear_bit(__I40E_CLIENT_INSTANCE_OPENED,
+ &cdev->state);
+ return;
+ }
+ }
+ }
+
+ /* enable/disable PE TCP_ENA flag based on netdev down/up
+ */
+ if (test_bit(__I40E_VSI_DOWN, vsi->state))
+ i40e_client_update_vsi_ctxt(&cdev->lan_info, client,
+ 0, 0, 0,
+ I40E_CLIENT_VSI_FLAG_TCP_ENABLE);
+ else
+ i40e_client_update_vsi_ctxt(&cdev->lan_info, client,
+ 0, 0,
+ I40E_CLIENT_VSI_FLAG_TCP_ENABLE,
+ I40E_CLIENT_VSI_FLAG_TCP_ENABLE);
+}
+
+/**
+ * i40e_lan_add_device - add a lan device struct to the list of lan devices
+ * @pf: pointer to the board struct
+ *
+ * Returns 0 on success or none 0 on error
+ **/
+int i40e_lan_add_device(struct i40e_pf *pf)
+{
+ struct i40e_device *ldev;
+ int ret = 0;
+
+ mutex_lock(&i40e_device_mutex);
+ list_for_each_entry(ldev, &i40e_devices, list) {
+ if (ldev->pf == pf) {
+ ret = -EEXIST;
+ goto out;
+ }
+ }
+ ldev = kzalloc(sizeof(*ldev), GFP_KERNEL);
+ if (!ldev) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ ldev->pf = pf;
+ INIT_LIST_HEAD(&ldev->list);
+ list_add(&ldev->list, &i40e_devices);
+ dev_info(&pf->pdev->dev, "Added LAN device PF%d bus=0x%02x dev=0x%02x func=0x%02x\n",
+ pf->hw.pf_id, pf->hw.bus.bus_id,
+ pf->hw.bus.device, pf->hw.bus.func);
+
+ i40e_client_add_instance(pf);
+
+ set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state);
+ i40e_service_event_schedule(pf);
+
+out:
+ mutex_unlock(&i40e_device_mutex);
+ return ret;
+}
+
+/**
+ * i40e_lan_del_device - removes a lan device from the device list
+ * @pf: pointer to the board struct
+ *
+ * Returns 0 on success or non-0 on error
+ **/
+int i40e_lan_del_device(struct i40e_pf *pf)
+{
+ struct auxiliary_device *aux_dev = pf->cinst->lan_info.aux_dev;
+ struct i40e_device *ldev, *tmp;
+ int ret = -ENODEV;
+
+ auxiliary_device_delete(aux_dev);
+ auxiliary_device_uninit(aux_dev);
+
+ /* First, remove any client instance. */
+ i40e_client_del_instance(pf);
+
+ mutex_lock(&i40e_device_mutex);
+ list_for_each_entry_safe(ldev, tmp, &i40e_devices, list) {
+ if (ldev->pf == pf) {
+ dev_info(&pf->pdev->dev, "Deleted LAN device PF%d bus=0x%02x dev=0x%02x func=0x%02x\n",
+ pf->hw.pf_id, pf->hw.bus.bus_id,
+ pf->hw.bus.device, pf->hw.bus.func);
+ list_del(&ldev->list);
+ kfree(ldev);
+ ret = 0;
+ break;
+ }
+ }
+ mutex_unlock(&i40e_device_mutex);
+ return ret;
+}
+
+/**
+ * i40e_client_virtchnl_send - TBD
+ * @ldev: pointer to L2 context
+ * @client: Client pointer
+ * @vf_id: absolute VF identifier
+ * @msg: message buffer
+ * @len: length of message buffer
+ *
+ * Return 0 on success or < 0 on error
+ **/
+static int i40e_client_virtchnl_send(struct i40e_info *ldev,
+ struct i40e_client *client,
+ u32 vf_id, u8 *msg, u16 len)
+{
+ struct i40e_pf *pf = ldev->pf;
+ struct i40e_hw *hw = &pf->hw;
+ int err;
+
+ err = i40e_aq_send_msg_to_vf(hw, vf_id, VIRTCHNL_OP_IWARP,
+ 0, msg, len, NULL);
+ if (err)
+ dev_err(&pf->pdev->dev, "Unable to send iWarp message to VF, error %d, aq status %d\n",
+ err, hw->aq.asq_last_status);
+
+ return err;
+}
+
+/**
+ * i40e_client_setup_qvlist
+ * @ldev: pointer to L2 context.
+ * @client: Client pointer.
+ * @qvlist_info: queue and vector list
+ *
+ * Return 0 on success or < 0 on error
+ **/
+static int i40e_client_setup_qvlist(struct i40e_info *ldev,
+ struct i40e_client *client,
+ struct i40e_qvlist_info *qvlist_info)
+{
+ struct i40e_pf *pf = ldev->pf;
+ struct i40e_hw *hw = &pf->hw;
+ struct i40e_qv_info *qv_info;
+ u32 v_idx, i, reg_idx, reg;
+
+ ldev->qvlist_info = kzalloc(struct_size(ldev->qvlist_info, qv_info,
+ qvlist_info->num_vectors), GFP_KERNEL);
+ if (!ldev->qvlist_info)
+ return -ENOMEM;
+ ldev->qvlist_info->num_vectors = qvlist_info->num_vectors;
+
+ for (i = 0; i < qvlist_info->num_vectors; i++) {
+ qv_info = &qvlist_info->qv_info[i];
+ if (!qv_info)
+ continue;
+ v_idx = qv_info->v_idx;
+
+ /* Validate vector id belongs to this client */
+ if ((v_idx >= (pf->iwarp_base_vector + pf->num_iwarp_msix)) ||
+ (v_idx < pf->iwarp_base_vector))
+ goto err;
+
+ ldev->qvlist_info->qv_info[i] = *qv_info;
+ reg_idx = I40E_PFINT_LNKLSTN(v_idx - 1);
+
+ if (qv_info->ceq_idx == I40E_QUEUE_INVALID_IDX) {
+ /* Special case - No CEQ mapped on this vector */
+ wr32(hw, reg_idx, I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK);
+ } else {
+ reg = (qv_info->ceq_idx &
+ I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK) |
+ (I40E_QUEUE_TYPE_PE_CEQ <<
+ I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT);
+ wr32(hw, reg_idx, reg);
+
+ reg = (I40E_PFINT_CEQCTL_CAUSE_ENA_MASK |
+ (v_idx << I40E_PFINT_CEQCTL_MSIX_INDX_SHIFT) |
+ (qv_info->itr_idx <<
+ I40E_PFINT_CEQCTL_ITR_INDX_SHIFT) |
+ (I40E_QUEUE_END_OF_LIST <<
+ I40E_PFINT_CEQCTL_NEXTQ_INDX_SHIFT));
+ wr32(hw, I40E_PFINT_CEQCTL(qv_info->ceq_idx), reg);
+ }
+ if (qv_info->aeq_idx != I40E_QUEUE_INVALID_IDX) {
+ reg = (I40E_PFINT_AEQCTL_CAUSE_ENA_MASK |
+ (v_idx << I40E_PFINT_AEQCTL_MSIX_INDX_SHIFT) |
+ (qv_info->itr_idx <<
+ I40E_PFINT_AEQCTL_ITR_INDX_SHIFT));
+
+ wr32(hw, I40E_PFINT_AEQCTL, reg);
+ }
+ }
+ /* Mitigate sync problems with iwarp VF driver */
+ i40e_flush(hw);
+ return 0;
+err:
+ kfree(ldev->qvlist_info);
+ ldev->qvlist_info = NULL;
+ return -EINVAL;
+}
+
+/**
+ * i40e_client_request_reset
+ * @ldev: pointer to L2 context.
+ * @client: Client pointer.
+ * @reset_level: reset level
+ **/
+static void i40e_client_request_reset(struct i40e_info *ldev,
+ struct i40e_client *client,
+ u32 reset_level)
+{
+ struct i40e_pf *pf = ldev->pf;
+
+ switch (reset_level) {
+ case I40E_CLIENT_RESET_LEVEL_PF:
+ set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
+ break;
+ case I40E_CLIENT_RESET_LEVEL_CORE:
+ set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
+ break;
+ default:
+ dev_warn(&pf->pdev->dev,
+ "Client for PF id %d requested an unsupported reset: %d.\n",
+ pf->hw.pf_id, reset_level);
+ break;
+ }
+
+ i40e_service_event_schedule(pf);
+}
+
+/**
+ * i40e_client_update_vsi_ctxt
+ * @ldev: pointer to L2 context.
+ * @client: Client pointer.
+ * @is_vf: if this for the VF
+ * @vf_id: if is_vf true this carries the vf_id
+ * @flag: Any device level setting that needs to be done for PE
+ * @valid_flag: Bits in this match up and enable changing of flag bits
+ *
+ * Return 0 on success or < 0 on error
+ **/
+static int i40e_client_update_vsi_ctxt(struct i40e_info *ldev,
+ struct i40e_client *client,
+ bool is_vf, u32 vf_id,
+ u32 flag, u32 valid_flag)
+{
+ struct i40e_pf *pf = ldev->pf;
+ struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
+ struct i40e_vsi_context ctxt;
+ bool update = true;
+ int err;
+
+ /* TODO: for now do not allow setting VF's VSI setting */
+ if (is_vf)
+ return -EINVAL;
+
+ ctxt.seid = pf->main_vsi_seid;
+ ctxt.pf_num = pf->hw.pf_id;
+ err = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
+ ctxt.flags = I40E_AQ_VSI_TYPE_PF;
+ if (err) {
+ dev_info(&pf->pdev->dev,
+ "couldn't get PF vsi config, err %pe aq_err %s\n",
+ ERR_PTR(err),
+ i40e_aq_str(&pf->hw,
+ pf->hw.aq.asq_last_status));
+ return -ENOENT;
+ }
+
+ if ((valid_flag & I40E_CLIENT_VSI_FLAG_TCP_ENABLE) &&
+ (flag & I40E_CLIENT_VSI_FLAG_TCP_ENABLE)) {
+ ctxt.info.valid_sections =
+ cpu_to_le16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID);
+ ctxt.info.queueing_opt_flags |= I40E_AQ_VSI_QUE_OPT_TCP_ENA;
+ } else if ((valid_flag & I40E_CLIENT_VSI_FLAG_TCP_ENABLE) &&
+ !(flag & I40E_CLIENT_VSI_FLAG_TCP_ENABLE)) {
+ ctxt.info.valid_sections =
+ cpu_to_le16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID);
+ ctxt.info.queueing_opt_flags &= ~I40E_AQ_VSI_QUE_OPT_TCP_ENA;
+ } else {
+ update = false;
+ dev_warn(&pf->pdev->dev,
+ "Client for PF id %d request an unsupported Config: %x.\n",
+ pf->hw.pf_id, flag);
+ }
+
+ if (update) {
+ err = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
+ if (err) {
+ dev_info(&pf->pdev->dev,
+ "update VSI ctxt for PE failed, err %pe aq_err %s\n",
+ ERR_PTR(err),
+ i40e_aq_str(&pf->hw,
+ pf->hw.aq.asq_last_status));
+ }
+ }
+ return err;
+}
+
+void i40e_client_device_register(struct i40e_info *ldev, struct i40e_client *client)
+{
+ struct i40e_pf *pf = ldev->pf;
+
+ pf->cinst->client = client;
+ set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state);
+ i40e_service_event_schedule(pf);
+}
+EXPORT_SYMBOL_GPL(i40e_client_device_register);
+
+void i40e_client_device_unregister(struct i40e_info *ldev)
+{
+ struct i40e_pf *pf = ldev->pf;
+ struct i40e_client_instance *cdev = pf->cinst;
+
+ if (!cdev)
+ return;
+
+ while (test_and_set_bit(__I40E_SERVICE_SCHED, pf->state))
+ usleep_range(500, 1000);
+
+ if (test_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state)) {
+ cdev->client->ops->close(&cdev->lan_info, cdev->client, false);
+ clear_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state);
+ i40e_client_release_qvlist(&cdev->lan_info);
+ }
+
+ pf->cinst->client = NULL;
+ clear_bit(__I40E_SERVICE_SCHED, pf->state);
+}
+EXPORT_SYMBOL_GPL(i40e_client_device_unregister);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c
new file mode 100644
index 000000000..6266756b4
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_common.c
@@ -0,0 +1,6003 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2013 - 2021 Intel Corporation. */
+
+#include "i40e.h"
+#include "i40e_type.h"
+#include "i40e_adminq.h"
+#include "i40e_prototype.h"
+#include <linux/avf/virtchnl.h>
+
+/**
+ * i40e_set_mac_type - Sets MAC type
+ * @hw: pointer to the HW structure
+ *
+ * This function sets the mac type of the adapter based on the
+ * vendor ID and device ID stored in the hw structure.
+ **/
+int i40e_set_mac_type(struct i40e_hw *hw)
+{
+ int status = 0;
+
+ if (hw->vendor_id == PCI_VENDOR_ID_INTEL) {
+ switch (hw->device_id) {
+ case I40E_DEV_ID_SFP_XL710:
+ case I40E_DEV_ID_QEMU:
+ case I40E_DEV_ID_KX_B:
+ case I40E_DEV_ID_KX_C:
+ case I40E_DEV_ID_QSFP_A:
+ case I40E_DEV_ID_QSFP_B:
+ case I40E_DEV_ID_QSFP_C:
+ case I40E_DEV_ID_1G_BASE_T_BC:
+ case I40E_DEV_ID_5G_BASE_T_BC:
+ case I40E_DEV_ID_10G_BASE_T:
+ case I40E_DEV_ID_10G_BASE_T4:
+ case I40E_DEV_ID_10G_BASE_T_BC:
+ case I40E_DEV_ID_10G_B:
+ case I40E_DEV_ID_10G_SFP:
+ case I40E_DEV_ID_20G_KR2:
+ case I40E_DEV_ID_20G_KR2_A:
+ case I40E_DEV_ID_25G_B:
+ case I40E_DEV_ID_25G_SFP28:
+ case I40E_DEV_ID_X710_N3000:
+ case I40E_DEV_ID_XXV710_N3000:
+ hw->mac.type = I40E_MAC_XL710;
+ break;
+ case I40E_DEV_ID_KX_X722:
+ case I40E_DEV_ID_QSFP_X722:
+ case I40E_DEV_ID_SFP_X722:
+ case I40E_DEV_ID_1G_BASE_T_X722:
+ case I40E_DEV_ID_10G_BASE_T_X722:
+ case I40E_DEV_ID_SFP_I_X722:
+ case I40E_DEV_ID_SFP_X722_A:
+ hw->mac.type = I40E_MAC_X722;
+ break;
+ default:
+ hw->mac.type = I40E_MAC_GENERIC;
+ break;
+ }
+ } else {
+ status = I40E_ERR_DEVICE_NOT_SUPPORTED;
+ }
+
+ hw_dbg(hw, "i40e_set_mac_type found mac: %d, returns: %d\n",
+ hw->mac.type, status);
+ return status;
+}
+
+/**
+ * i40e_aq_str - convert AQ err code to a string
+ * @hw: pointer to the HW structure
+ * @aq_err: the AQ error code to convert
+ **/
+const char *i40e_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err)
+{
+ switch (aq_err) {
+ case I40E_AQ_RC_OK:
+ return "OK";
+ case I40E_AQ_RC_EPERM:
+ return "I40E_AQ_RC_EPERM";
+ case I40E_AQ_RC_ENOENT:
+ return "I40E_AQ_RC_ENOENT";
+ case I40E_AQ_RC_ESRCH:
+ return "I40E_AQ_RC_ESRCH";
+ case I40E_AQ_RC_EINTR:
+ return "I40E_AQ_RC_EINTR";
+ case I40E_AQ_RC_EIO:
+ return "I40E_AQ_RC_EIO";
+ case I40E_AQ_RC_ENXIO:
+ return "I40E_AQ_RC_ENXIO";
+ case I40E_AQ_RC_E2BIG:
+ return "I40E_AQ_RC_E2BIG";
+ case I40E_AQ_RC_EAGAIN:
+ return "I40E_AQ_RC_EAGAIN";
+ case I40E_AQ_RC_ENOMEM:
+ return "I40E_AQ_RC_ENOMEM";
+ case I40E_AQ_RC_EACCES:
+ return "I40E_AQ_RC_EACCES";
+ case I40E_AQ_RC_EFAULT:
+ return "I40E_AQ_RC_EFAULT";
+ case I40E_AQ_RC_EBUSY:
+ return "I40E_AQ_RC_EBUSY";
+ case I40E_AQ_RC_EEXIST:
+ return "I40E_AQ_RC_EEXIST";
+ case I40E_AQ_RC_EINVAL:
+ return "I40E_AQ_RC_EINVAL";
+ case I40E_AQ_RC_ENOTTY:
+ return "I40E_AQ_RC_ENOTTY";
+ case I40E_AQ_RC_ENOSPC:
+ return "I40E_AQ_RC_ENOSPC";
+ case I40E_AQ_RC_ENOSYS:
+ return "I40E_AQ_RC_ENOSYS";
+ case I40E_AQ_RC_ERANGE:
+ return "I40E_AQ_RC_ERANGE";
+ case I40E_AQ_RC_EFLUSHED:
+ return "I40E_AQ_RC_EFLUSHED";
+ case I40E_AQ_RC_BAD_ADDR:
+ return "I40E_AQ_RC_BAD_ADDR";
+ case I40E_AQ_RC_EMODE:
+ return "I40E_AQ_RC_EMODE";
+ case I40E_AQ_RC_EFBIG:
+ return "I40E_AQ_RC_EFBIG";
+ }
+
+ snprintf(hw->err_str, sizeof(hw->err_str), "%d", aq_err);
+ return hw->err_str;
+}
+
+/**
+ * i40e_debug_aq
+ * @hw: debug mask related to admin queue
+ * @mask: debug mask
+ * @desc: pointer to admin queue descriptor
+ * @buffer: pointer to command buffer
+ * @buf_len: max length of buffer
+ *
+ * Dumps debug log about adminq command with descriptor contents.
+ **/
+void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc,
+ void *buffer, u16 buf_len)
+{
+ struct i40e_aq_desc *aq_desc = (struct i40e_aq_desc *)desc;
+ u32 effective_mask = hw->debug_mask & mask;
+ char prefix[27];
+ u16 len;
+ u8 *buf = (u8 *)buffer;
+
+ if (!effective_mask || !desc)
+ return;
+
+ len = le16_to_cpu(aq_desc->datalen);
+
+ i40e_debug(hw, mask & I40E_DEBUG_AQ_DESCRIPTOR,
+ "AQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n",
+ le16_to_cpu(aq_desc->opcode),
+ le16_to_cpu(aq_desc->flags),
+ le16_to_cpu(aq_desc->datalen),
+ le16_to_cpu(aq_desc->retval));
+ i40e_debug(hw, mask & I40E_DEBUG_AQ_DESCRIPTOR,
+ "\tcookie (h,l) 0x%08X 0x%08X\n",
+ le32_to_cpu(aq_desc->cookie_high),
+ le32_to_cpu(aq_desc->cookie_low));
+ i40e_debug(hw, mask & I40E_DEBUG_AQ_DESCRIPTOR,
+ "\tparam (0,1) 0x%08X 0x%08X\n",
+ le32_to_cpu(aq_desc->params.internal.param0),
+ le32_to_cpu(aq_desc->params.internal.param1));
+ i40e_debug(hw, mask & I40E_DEBUG_AQ_DESCRIPTOR,
+ "\taddr (h,l) 0x%08X 0x%08X\n",
+ le32_to_cpu(aq_desc->params.external.addr_high),
+ le32_to_cpu(aq_desc->params.external.addr_low));
+
+ if (buffer && buf_len != 0 && len != 0 &&
+ (effective_mask & I40E_DEBUG_AQ_DESC_BUFFER)) {
+ i40e_debug(hw, mask, "AQ CMD Buffer:\n");
+ if (buf_len < len)
+ len = buf_len;
+
+ snprintf(prefix, sizeof(prefix),
+ "i40e %02x:%02x.%x: \t0x",
+ hw->bus.bus_id,
+ hw->bus.device,
+ hw->bus.func);
+
+ print_hex_dump(KERN_INFO, prefix, DUMP_PREFIX_OFFSET,
+ 16, 1, buf, len, false);
+ }
+}
+
+/**
+ * i40e_check_asq_alive
+ * @hw: pointer to the hw struct
+ *
+ * Returns true if Queue is enabled else false.
+ **/
+bool i40e_check_asq_alive(struct i40e_hw *hw)
+{
+ if (hw->aq.asq.len)
+ return !!(rd32(hw, hw->aq.asq.len) &
+ I40E_PF_ATQLEN_ATQENABLE_MASK);
+ else
+ return false;
+}
+
+/**
+ * i40e_aq_queue_shutdown
+ * @hw: pointer to the hw struct
+ * @unloading: is the driver unloading itself
+ *
+ * Tell the Firmware that we're shutting down the AdminQ and whether
+ * or not the driver is unloading as well.
+ **/
+int i40e_aq_queue_shutdown(struct i40e_hw *hw,
+ bool unloading)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_queue_shutdown *cmd =
+ (struct i40e_aqc_queue_shutdown *)&desc.params.raw;
+ int status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_queue_shutdown);
+
+ if (unloading)
+ cmd->driver_unloading = cpu_to_le32(I40E_AQ_DRIVER_UNLOADING);
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL);
+
+ return status;
+}
+
+/**
+ * i40e_aq_get_set_rss_lut
+ * @hw: pointer to the hardware structure
+ * @vsi_id: vsi fw index
+ * @pf_lut: for PF table set true, for VSI table set false
+ * @lut: pointer to the lut buffer provided by the caller
+ * @lut_size: size of the lut buffer
+ * @set: set true to set the table, false to get the table
+ *
+ * Internal function to get or set RSS look up table
+ **/
+static int i40e_aq_get_set_rss_lut(struct i40e_hw *hw,
+ u16 vsi_id, bool pf_lut,
+ u8 *lut, u16 lut_size,
+ bool set)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_get_set_rss_lut *cmd_resp =
+ (struct i40e_aqc_get_set_rss_lut *)&desc.params.raw;
+ int status;
+
+ if (set)
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_set_rss_lut);
+ else
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_get_rss_lut);
+
+ /* Indirect command */
+ desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
+ desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD);
+
+ cmd_resp->vsi_id =
+ cpu_to_le16((u16)((vsi_id <<
+ I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT) &
+ I40E_AQC_SET_RSS_LUT_VSI_ID_MASK));
+ cmd_resp->vsi_id |= cpu_to_le16((u16)I40E_AQC_SET_RSS_LUT_VSI_VALID);
+
+ if (pf_lut)
+ cmd_resp->flags |= cpu_to_le16((u16)
+ ((I40E_AQC_SET_RSS_LUT_TABLE_TYPE_PF <<
+ I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) &
+ I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK));
+ else
+ cmd_resp->flags |= cpu_to_le16((u16)
+ ((I40E_AQC_SET_RSS_LUT_TABLE_TYPE_VSI <<
+ I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) &
+ I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK));
+
+ status = i40e_asq_send_command(hw, &desc, lut, lut_size, NULL);
+
+ return status;
+}
+
+/**
+ * i40e_aq_get_rss_lut
+ * @hw: pointer to the hardware structure
+ * @vsi_id: vsi fw index
+ * @pf_lut: for PF table set true, for VSI table set false
+ * @lut: pointer to the lut buffer provided by the caller
+ * @lut_size: size of the lut buffer
+ *
+ * get the RSS lookup table, PF or VSI type
+ **/
+int i40e_aq_get_rss_lut(struct i40e_hw *hw, u16 vsi_id,
+ bool pf_lut, u8 *lut, u16 lut_size)
+{
+ return i40e_aq_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size,
+ false);
+}
+
+/**
+ * i40e_aq_set_rss_lut
+ * @hw: pointer to the hardware structure
+ * @vsi_id: vsi fw index
+ * @pf_lut: for PF table set true, for VSI table set false
+ * @lut: pointer to the lut buffer provided by the caller
+ * @lut_size: size of the lut buffer
+ *
+ * set the RSS lookup table, PF or VSI type
+ **/
+int i40e_aq_set_rss_lut(struct i40e_hw *hw, u16 vsi_id,
+ bool pf_lut, u8 *lut, u16 lut_size)
+{
+ return i40e_aq_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size, true);
+}
+
+/**
+ * i40e_aq_get_set_rss_key
+ * @hw: pointer to the hw struct
+ * @vsi_id: vsi fw index
+ * @key: pointer to key info struct
+ * @set: set true to set the key, false to get the key
+ *
+ * get the RSS key per VSI
+ **/
+static int i40e_aq_get_set_rss_key(struct i40e_hw *hw,
+ u16 vsi_id,
+ struct i40e_aqc_get_set_rss_key_data *key,
+ bool set)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_get_set_rss_key *cmd_resp =
+ (struct i40e_aqc_get_set_rss_key *)&desc.params.raw;
+ u16 key_size = sizeof(struct i40e_aqc_get_set_rss_key_data);
+ int status;
+
+ if (set)
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_set_rss_key);
+ else
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_get_rss_key);
+
+ /* Indirect command */
+ desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
+ desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD);
+
+ cmd_resp->vsi_id =
+ cpu_to_le16((u16)((vsi_id <<
+ I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT) &
+ I40E_AQC_SET_RSS_KEY_VSI_ID_MASK));
+ cmd_resp->vsi_id |= cpu_to_le16((u16)I40E_AQC_SET_RSS_KEY_VSI_VALID);
+
+ status = i40e_asq_send_command(hw, &desc, key, key_size, NULL);
+
+ return status;
+}
+
+/**
+ * i40e_aq_get_rss_key
+ * @hw: pointer to the hw struct
+ * @vsi_id: vsi fw index
+ * @key: pointer to key info struct
+ *
+ **/
+int i40e_aq_get_rss_key(struct i40e_hw *hw,
+ u16 vsi_id,
+ struct i40e_aqc_get_set_rss_key_data *key)
+{
+ return i40e_aq_get_set_rss_key(hw, vsi_id, key, false);
+}
+
+/**
+ * i40e_aq_set_rss_key
+ * @hw: pointer to the hw struct
+ * @vsi_id: vsi fw index
+ * @key: pointer to key info struct
+ *
+ * set the RSS key per VSI
+ **/
+int i40e_aq_set_rss_key(struct i40e_hw *hw,
+ u16 vsi_id,
+ struct i40e_aqc_get_set_rss_key_data *key)
+{
+ return i40e_aq_get_set_rss_key(hw, vsi_id, key, true);
+}
+
+/* The i40e_ptype_lookup table is used to convert from the 8-bit ptype in the
+ * hardware to a bit-field that can be used by SW to more easily determine the
+ * packet type.
+ *
+ * Macros are used to shorten the table lines and make this table human
+ * readable.
+ *
+ * We store the PTYPE in the top byte of the bit field - this is just so that
+ * we can check that the table doesn't have a row missing, as the index into
+ * the table should be the PTYPE.
+ *
+ * Typical work flow:
+ *
+ * IF NOT i40e_ptype_lookup[ptype].known
+ * THEN
+ * Packet is unknown
+ * ELSE IF i40e_ptype_lookup[ptype].outer_ip == I40E_RX_PTYPE_OUTER_IP
+ * Use the rest of the fields to look at the tunnels, inner protocols, etc
+ * ELSE
+ * Use the enum i40e_rx_l2_ptype to decode the packet type
+ * ENDIF
+ */
+
+/* macro to make the table lines short, use explicit indexing with [PTYPE] */
+#define I40E_PTT(PTYPE, OUTER_IP, OUTER_IP_VER, OUTER_FRAG, T, TE, TEF, I, PL)\
+ [PTYPE] = { \
+ 1, \
+ I40E_RX_PTYPE_OUTER_##OUTER_IP, \
+ I40E_RX_PTYPE_OUTER_##OUTER_IP_VER, \
+ I40E_RX_PTYPE_##OUTER_FRAG, \
+ I40E_RX_PTYPE_TUNNEL_##T, \
+ I40E_RX_PTYPE_TUNNEL_END_##TE, \
+ I40E_RX_PTYPE_##TEF, \
+ I40E_RX_PTYPE_INNER_PROT_##I, \
+ I40E_RX_PTYPE_PAYLOAD_LAYER_##PL }
+
+#define I40E_PTT_UNUSED_ENTRY(PTYPE) [PTYPE] = { 0, 0, 0, 0, 0, 0, 0, 0, 0 }
+
+/* shorter macros makes the table fit but are terse */
+#define I40E_RX_PTYPE_NOF I40E_RX_PTYPE_NOT_FRAG
+#define I40E_RX_PTYPE_FRG I40E_RX_PTYPE_FRAG
+#define I40E_RX_PTYPE_INNER_PROT_TS I40E_RX_PTYPE_INNER_PROT_TIMESYNC
+
+/* Lookup table mapping in the 8-bit HW PTYPE to the bit field for decoding */
+struct i40e_rx_ptype_decoded i40e_ptype_lookup[BIT(8)] = {
+ /* L2 Packet types */
+ I40E_PTT_UNUSED_ENTRY(0),
+ I40E_PTT(1, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
+ I40E_PTT(2, L2, NONE, NOF, NONE, NONE, NOF, TS, PAY2),
+ I40E_PTT(3, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
+ I40E_PTT_UNUSED_ENTRY(4),
+ I40E_PTT_UNUSED_ENTRY(5),
+ I40E_PTT(6, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
+ I40E_PTT(7, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
+ I40E_PTT_UNUSED_ENTRY(8),
+ I40E_PTT_UNUSED_ENTRY(9),
+ I40E_PTT(10, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
+ I40E_PTT(11, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE),
+ I40E_PTT(12, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+ I40E_PTT(13, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+ I40E_PTT(14, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+ I40E_PTT(15, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+ I40E_PTT(16, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+ I40E_PTT(17, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+ I40E_PTT(18, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+ I40E_PTT(19, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+ I40E_PTT(20, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+ I40E_PTT(21, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+
+ /* Non Tunneled IPv4 */
+ I40E_PTT(22, IP, IPV4, FRG, NONE, NONE, NOF, NONE, PAY3),
+ I40E_PTT(23, IP, IPV4, NOF, NONE, NONE, NOF, NONE, PAY3),
+ I40E_PTT(24, IP, IPV4, NOF, NONE, NONE, NOF, UDP, PAY4),
+ I40E_PTT_UNUSED_ENTRY(25),
+ I40E_PTT(26, IP, IPV4, NOF, NONE, NONE, NOF, TCP, PAY4),
+ I40E_PTT(27, IP, IPV4, NOF, NONE, NONE, NOF, SCTP, PAY4),
+ I40E_PTT(28, IP, IPV4, NOF, NONE, NONE, NOF, ICMP, PAY4),
+
+ /* IPv4 --> IPv4 */
+ I40E_PTT(29, IP, IPV4, NOF, IP_IP, IPV4, FRG, NONE, PAY3),
+ I40E_PTT(30, IP, IPV4, NOF, IP_IP, IPV4, NOF, NONE, PAY3),
+ I40E_PTT(31, IP, IPV4, NOF, IP_IP, IPV4, NOF, UDP, PAY4),
+ I40E_PTT_UNUSED_ENTRY(32),
+ I40E_PTT(33, IP, IPV4, NOF, IP_IP, IPV4, NOF, TCP, PAY4),
+ I40E_PTT(34, IP, IPV4, NOF, IP_IP, IPV4, NOF, SCTP, PAY4),
+ I40E_PTT(35, IP, IPV4, NOF, IP_IP, IPV4, NOF, ICMP, PAY4),
+
+ /* IPv4 --> IPv6 */
+ I40E_PTT(36, IP, IPV4, NOF, IP_IP, IPV6, FRG, NONE, PAY3),
+ I40E_PTT(37, IP, IPV4, NOF, IP_IP, IPV6, NOF, NONE, PAY3),
+ I40E_PTT(38, IP, IPV4, NOF, IP_IP, IPV6, NOF, UDP, PAY4),
+ I40E_PTT_UNUSED_ENTRY(39),
+ I40E_PTT(40, IP, IPV4, NOF, IP_IP, IPV6, NOF, TCP, PAY4),
+ I40E_PTT(41, IP, IPV4, NOF, IP_IP, IPV6, NOF, SCTP, PAY4),
+ I40E_PTT(42, IP, IPV4, NOF, IP_IP, IPV6, NOF, ICMP, PAY4),
+
+ /* IPv4 --> GRE/NAT */
+ I40E_PTT(43, IP, IPV4, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3),
+
+ /* IPv4 --> GRE/NAT --> IPv4 */
+ I40E_PTT(44, IP, IPV4, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3),
+ I40E_PTT(45, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3),
+ I40E_PTT(46, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4),
+ I40E_PTT_UNUSED_ENTRY(47),
+ I40E_PTT(48, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4),
+ I40E_PTT(49, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4),
+ I40E_PTT(50, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4),
+
+ /* IPv4 --> GRE/NAT --> IPv6 */
+ I40E_PTT(51, IP, IPV4, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3),
+ I40E_PTT(52, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3),
+ I40E_PTT(53, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4),
+ I40E_PTT_UNUSED_ENTRY(54),
+ I40E_PTT(55, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4),
+ I40E_PTT(56, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4),
+ I40E_PTT(57, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4),
+
+ /* IPv4 --> GRE/NAT --> MAC */
+ I40E_PTT(58, IP, IPV4, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3),
+
+ /* IPv4 --> GRE/NAT --> MAC --> IPv4 */
+ I40E_PTT(59, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3),
+ I40E_PTT(60, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3),
+ I40E_PTT(61, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4),
+ I40E_PTT_UNUSED_ENTRY(62),
+ I40E_PTT(63, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4),
+ I40E_PTT(64, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4),
+ I40E_PTT(65, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4),
+
+ /* IPv4 --> GRE/NAT -> MAC --> IPv6 */
+ I40E_PTT(66, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3),
+ I40E_PTT(67, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3),
+ I40E_PTT(68, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4),
+ I40E_PTT_UNUSED_ENTRY(69),
+ I40E_PTT(70, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4),
+ I40E_PTT(71, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4),
+ I40E_PTT(72, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4),
+
+ /* IPv4 --> GRE/NAT --> MAC/VLAN */
+ I40E_PTT(73, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3),
+
+ /* IPv4 ---> GRE/NAT -> MAC/VLAN --> IPv4 */
+ I40E_PTT(74, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3),
+ I40E_PTT(75, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3),
+ I40E_PTT(76, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4),
+ I40E_PTT_UNUSED_ENTRY(77),
+ I40E_PTT(78, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4),
+ I40E_PTT(79, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4),
+ I40E_PTT(80, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4),
+
+ /* IPv4 -> GRE/NAT -> MAC/VLAN --> IPv6 */
+ I40E_PTT(81, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3),
+ I40E_PTT(82, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3),
+ I40E_PTT(83, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4),
+ I40E_PTT_UNUSED_ENTRY(84),
+ I40E_PTT(85, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4),
+ I40E_PTT(86, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4),
+ I40E_PTT(87, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4),
+
+ /* Non Tunneled IPv6 */
+ I40E_PTT(88, IP, IPV6, FRG, NONE, NONE, NOF, NONE, PAY3),
+ I40E_PTT(89, IP, IPV6, NOF, NONE, NONE, NOF, NONE, PAY3),
+ I40E_PTT(90, IP, IPV6, NOF, NONE, NONE, NOF, UDP, PAY4),
+ I40E_PTT_UNUSED_ENTRY(91),
+ I40E_PTT(92, IP, IPV6, NOF, NONE, NONE, NOF, TCP, PAY4),
+ I40E_PTT(93, IP, IPV6, NOF, NONE, NONE, NOF, SCTP, PAY4),
+ I40E_PTT(94, IP, IPV6, NOF, NONE, NONE, NOF, ICMP, PAY4),
+
+ /* IPv6 --> IPv4 */
+ I40E_PTT(95, IP, IPV6, NOF, IP_IP, IPV4, FRG, NONE, PAY3),
+ I40E_PTT(96, IP, IPV6, NOF, IP_IP, IPV4, NOF, NONE, PAY3),
+ I40E_PTT(97, IP, IPV6, NOF, IP_IP, IPV4, NOF, UDP, PAY4),
+ I40E_PTT_UNUSED_ENTRY(98),
+ I40E_PTT(99, IP, IPV6, NOF, IP_IP, IPV4, NOF, TCP, PAY4),
+ I40E_PTT(100, IP, IPV6, NOF, IP_IP, IPV4, NOF, SCTP, PAY4),
+ I40E_PTT(101, IP, IPV6, NOF, IP_IP, IPV4, NOF, ICMP, PAY4),
+
+ /* IPv6 --> IPv6 */
+ I40E_PTT(102, IP, IPV6, NOF, IP_IP, IPV6, FRG, NONE, PAY3),
+ I40E_PTT(103, IP, IPV6, NOF, IP_IP, IPV6, NOF, NONE, PAY3),
+ I40E_PTT(104, IP, IPV6, NOF, IP_IP, IPV6, NOF, UDP, PAY4),
+ I40E_PTT_UNUSED_ENTRY(105),
+ I40E_PTT(106, IP, IPV6, NOF, IP_IP, IPV6, NOF, TCP, PAY4),
+ I40E_PTT(107, IP, IPV6, NOF, IP_IP, IPV6, NOF, SCTP, PAY4),
+ I40E_PTT(108, IP, IPV6, NOF, IP_IP, IPV6, NOF, ICMP, PAY4),
+
+ /* IPv6 --> GRE/NAT */
+ I40E_PTT(109, IP, IPV6, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3),
+
+ /* IPv6 --> GRE/NAT -> IPv4 */
+ I40E_PTT(110, IP, IPV6, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3),
+ I40E_PTT(111, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3),
+ I40E_PTT(112, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4),
+ I40E_PTT_UNUSED_ENTRY(113),
+ I40E_PTT(114, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4),
+ I40E_PTT(115, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4),
+ I40E_PTT(116, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4),
+
+ /* IPv6 --> GRE/NAT -> IPv6 */
+ I40E_PTT(117, IP, IPV6, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3),
+ I40E_PTT(118, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3),
+ I40E_PTT(119, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4),
+ I40E_PTT_UNUSED_ENTRY(120),
+ I40E_PTT(121, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4),
+ I40E_PTT(122, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4),
+ I40E_PTT(123, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4),
+
+ /* IPv6 --> GRE/NAT -> MAC */
+ I40E_PTT(124, IP, IPV6, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3),
+
+ /* IPv6 --> GRE/NAT -> MAC -> IPv4 */
+ I40E_PTT(125, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3),
+ I40E_PTT(126, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3),
+ I40E_PTT(127, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4),
+ I40E_PTT_UNUSED_ENTRY(128),
+ I40E_PTT(129, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4),
+ I40E_PTT(130, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4),
+ I40E_PTT(131, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4),
+
+ /* IPv6 --> GRE/NAT -> MAC -> IPv6 */
+ I40E_PTT(132, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3),
+ I40E_PTT(133, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3),
+ I40E_PTT(134, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4),
+ I40E_PTT_UNUSED_ENTRY(135),
+ I40E_PTT(136, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4),
+ I40E_PTT(137, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4),
+ I40E_PTT(138, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4),
+
+ /* IPv6 --> GRE/NAT -> MAC/VLAN */
+ I40E_PTT(139, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3),
+
+ /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv4 */
+ I40E_PTT(140, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3),
+ I40E_PTT(141, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3),
+ I40E_PTT(142, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4),
+ I40E_PTT_UNUSED_ENTRY(143),
+ I40E_PTT(144, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4),
+ I40E_PTT(145, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4),
+ I40E_PTT(146, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4),
+
+ /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv6 */
+ I40E_PTT(147, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3),
+ I40E_PTT(148, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3),
+ I40E_PTT(149, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4),
+ I40E_PTT_UNUSED_ENTRY(150),
+ I40E_PTT(151, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4),
+ I40E_PTT(152, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4),
+ I40E_PTT(153, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4),
+
+ /* unused entries */
+ [154 ... 255] = { 0, 0, 0, 0, 0, 0, 0, 0, 0 }
+};
+
+/**
+ * i40e_init_shared_code - Initialize the shared code
+ * @hw: pointer to hardware structure
+ *
+ * This assigns the MAC type and PHY code and inits the NVM.
+ * Does not touch the hardware. This function must be called prior to any
+ * other function in the shared code. The i40e_hw structure should be
+ * memset to 0 prior to calling this function. The following fields in
+ * hw structure should be filled in prior to calling this function:
+ * hw_addr, back, device_id, vendor_id, subsystem_device_id,
+ * subsystem_vendor_id, and revision_id
+ **/
+int i40e_init_shared_code(struct i40e_hw *hw)
+{
+ u32 port, ari, func_rid;
+ int status = 0;
+
+ i40e_set_mac_type(hw);
+
+ switch (hw->mac.type) {
+ case I40E_MAC_XL710:
+ case I40E_MAC_X722:
+ break;
+ default:
+ return I40E_ERR_DEVICE_NOT_SUPPORTED;
+ }
+
+ hw->phy.get_link_info = true;
+
+ /* Determine port number and PF number*/
+ port = (rd32(hw, I40E_PFGEN_PORTNUM) & I40E_PFGEN_PORTNUM_PORT_NUM_MASK)
+ >> I40E_PFGEN_PORTNUM_PORT_NUM_SHIFT;
+ hw->port = (u8)port;
+ ari = (rd32(hw, I40E_GLPCI_CAPSUP) & I40E_GLPCI_CAPSUP_ARI_EN_MASK) >>
+ I40E_GLPCI_CAPSUP_ARI_EN_SHIFT;
+ func_rid = rd32(hw, I40E_PF_FUNC_RID);
+ if (ari)
+ hw->pf_id = (u8)(func_rid & 0xff);
+ else
+ hw->pf_id = (u8)(func_rid & 0x7);
+
+ status = i40e_init_nvm(hw);
+ return status;
+}
+
+/**
+ * i40e_aq_mac_address_read - Retrieve the MAC addresses
+ * @hw: pointer to the hw struct
+ * @flags: a return indicator of what addresses were added to the addr store
+ * @addrs: the requestor's mac addr store
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+static int
+i40e_aq_mac_address_read(struct i40e_hw *hw,
+ u16 *flags,
+ struct i40e_aqc_mac_address_read_data *addrs,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_mac_address_read *cmd_data =
+ (struct i40e_aqc_mac_address_read *)&desc.params.raw;
+ int status;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_mac_address_read);
+ desc.flags |= cpu_to_le16(I40E_AQ_FLAG_BUF);
+
+ status = i40e_asq_send_command(hw, &desc, addrs,
+ sizeof(*addrs), cmd_details);
+ *flags = le16_to_cpu(cmd_data->command_flags);
+
+ return status;
+}
+
+/**
+ * i40e_aq_mac_address_write - Change the MAC addresses
+ * @hw: pointer to the hw struct
+ * @flags: indicates which MAC to be written
+ * @mac_addr: address to write
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+int i40e_aq_mac_address_write(struct i40e_hw *hw,
+ u16 flags, u8 *mac_addr,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_mac_address_write *cmd_data =
+ (struct i40e_aqc_mac_address_write *)&desc.params.raw;
+ int status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_mac_address_write);
+ cmd_data->command_flags = cpu_to_le16(flags);
+ cmd_data->mac_sah = cpu_to_le16((u16)mac_addr[0] << 8 | mac_addr[1]);
+ cmd_data->mac_sal = cpu_to_le32(((u32)mac_addr[2] << 24) |
+ ((u32)mac_addr[3] << 16) |
+ ((u32)mac_addr[4] << 8) |
+ mac_addr[5]);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_get_mac_addr - get MAC address
+ * @hw: pointer to the HW structure
+ * @mac_addr: pointer to MAC address
+ *
+ * Reads the adapter's MAC address from register
+ **/
+int i40e_get_mac_addr(struct i40e_hw *hw, u8 *mac_addr)
+{
+ struct i40e_aqc_mac_address_read_data addrs;
+ u16 flags = 0;
+ int status;
+
+ status = i40e_aq_mac_address_read(hw, &flags, &addrs, NULL);
+
+ if (flags & I40E_AQC_LAN_ADDR_VALID)
+ ether_addr_copy(mac_addr, addrs.pf_lan_mac);
+
+ return status;
+}
+
+/**
+ * i40e_get_port_mac_addr - get Port MAC address
+ * @hw: pointer to the HW structure
+ * @mac_addr: pointer to Port MAC address
+ *
+ * Reads the adapter's Port MAC address
+ **/
+int i40e_get_port_mac_addr(struct i40e_hw *hw, u8 *mac_addr)
+{
+ struct i40e_aqc_mac_address_read_data addrs;
+ u16 flags = 0;
+ int status;
+
+ status = i40e_aq_mac_address_read(hw, &flags, &addrs, NULL);
+ if (status)
+ return status;
+
+ if (flags & I40E_AQC_PORT_ADDR_VALID)
+ ether_addr_copy(mac_addr, addrs.port_mac);
+ else
+ status = I40E_ERR_INVALID_MAC_ADDR;
+
+ return status;
+}
+
+/**
+ * i40e_pre_tx_queue_cfg - pre tx queue configure
+ * @hw: pointer to the HW structure
+ * @queue: target PF queue index
+ * @enable: state change request
+ *
+ * Handles hw requirement to indicate intention to enable
+ * or disable target queue.
+ **/
+void i40e_pre_tx_queue_cfg(struct i40e_hw *hw, u32 queue, bool enable)
+{
+ u32 abs_queue_idx = hw->func_caps.base_queue + queue;
+ u32 reg_block = 0;
+ u32 reg_val;
+
+ if (abs_queue_idx >= 128) {
+ reg_block = abs_queue_idx / 128;
+ abs_queue_idx %= 128;
+ }
+
+ reg_val = rd32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block));
+ reg_val &= ~I40E_GLLAN_TXPRE_QDIS_QINDX_MASK;
+ reg_val |= (abs_queue_idx << I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT);
+
+ if (enable)
+ reg_val |= I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_MASK;
+ else
+ reg_val |= I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK;
+
+ wr32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block), reg_val);
+}
+
+/**
+ * i40e_read_pba_string - Reads part number string from EEPROM
+ * @hw: pointer to hardware structure
+ * @pba_num: stores the part number string from the EEPROM
+ * @pba_num_size: part number string buffer length
+ *
+ * Reads the part number string from the EEPROM.
+ **/
+int i40e_read_pba_string(struct i40e_hw *hw, u8 *pba_num,
+ u32 pba_num_size)
+{
+ u16 pba_word = 0;
+ u16 pba_size = 0;
+ u16 pba_ptr = 0;
+ int status = 0;
+ u16 i = 0;
+
+ status = i40e_read_nvm_word(hw, I40E_SR_PBA_FLAGS, &pba_word);
+ if (status || (pba_word != 0xFAFA)) {
+ hw_dbg(hw, "Failed to read PBA flags or flag is invalid.\n");
+ return status;
+ }
+
+ status = i40e_read_nvm_word(hw, I40E_SR_PBA_BLOCK_PTR, &pba_ptr);
+ if (status) {
+ hw_dbg(hw, "Failed to read PBA Block pointer.\n");
+ return status;
+ }
+
+ status = i40e_read_nvm_word(hw, pba_ptr, &pba_size);
+ if (status) {
+ hw_dbg(hw, "Failed to read PBA Block size.\n");
+ return status;
+ }
+
+ /* Subtract one to get PBA word count (PBA Size word is included in
+ * total size)
+ */
+ pba_size--;
+ if (pba_num_size < (((u32)pba_size * 2) + 1)) {
+ hw_dbg(hw, "Buffer too small for PBA data.\n");
+ return I40E_ERR_PARAM;
+ }
+
+ for (i = 0; i < pba_size; i++) {
+ status = i40e_read_nvm_word(hw, (pba_ptr + 1) + i, &pba_word);
+ if (status) {
+ hw_dbg(hw, "Failed to read PBA Block word %d.\n", i);
+ return status;
+ }
+
+ pba_num[(i * 2)] = (pba_word >> 8) & 0xFF;
+ pba_num[(i * 2) + 1] = pba_word & 0xFF;
+ }
+ pba_num[(pba_size * 2)] = '\0';
+
+ return status;
+}
+
+/**
+ * i40e_get_media_type - Gets media type
+ * @hw: pointer to the hardware structure
+ **/
+static enum i40e_media_type i40e_get_media_type(struct i40e_hw *hw)
+{
+ enum i40e_media_type media;
+
+ switch (hw->phy.link_info.phy_type) {
+ case I40E_PHY_TYPE_10GBASE_SR:
+ case I40E_PHY_TYPE_10GBASE_LR:
+ case I40E_PHY_TYPE_1000BASE_SX:
+ case I40E_PHY_TYPE_1000BASE_LX:
+ case I40E_PHY_TYPE_40GBASE_SR4:
+ case I40E_PHY_TYPE_40GBASE_LR4:
+ case I40E_PHY_TYPE_25GBASE_LR:
+ case I40E_PHY_TYPE_25GBASE_SR:
+ media = I40E_MEDIA_TYPE_FIBER;
+ break;
+ case I40E_PHY_TYPE_100BASE_TX:
+ case I40E_PHY_TYPE_1000BASE_T:
+ case I40E_PHY_TYPE_2_5GBASE_T_LINK_STATUS:
+ case I40E_PHY_TYPE_5GBASE_T_LINK_STATUS:
+ case I40E_PHY_TYPE_10GBASE_T:
+ media = I40E_MEDIA_TYPE_BASET;
+ break;
+ case I40E_PHY_TYPE_10GBASE_CR1_CU:
+ case I40E_PHY_TYPE_40GBASE_CR4_CU:
+ case I40E_PHY_TYPE_10GBASE_CR1:
+ case I40E_PHY_TYPE_40GBASE_CR4:
+ case I40E_PHY_TYPE_10GBASE_SFPP_CU:
+ case I40E_PHY_TYPE_40GBASE_AOC:
+ case I40E_PHY_TYPE_10GBASE_AOC:
+ case I40E_PHY_TYPE_25GBASE_CR:
+ case I40E_PHY_TYPE_25GBASE_AOC:
+ case I40E_PHY_TYPE_25GBASE_ACC:
+ media = I40E_MEDIA_TYPE_DA;
+ break;
+ case I40E_PHY_TYPE_1000BASE_KX:
+ case I40E_PHY_TYPE_10GBASE_KX4:
+ case I40E_PHY_TYPE_10GBASE_KR:
+ case I40E_PHY_TYPE_40GBASE_KR4:
+ case I40E_PHY_TYPE_20GBASE_KR2:
+ case I40E_PHY_TYPE_25GBASE_KR:
+ media = I40E_MEDIA_TYPE_BACKPLANE;
+ break;
+ case I40E_PHY_TYPE_SGMII:
+ case I40E_PHY_TYPE_XAUI:
+ case I40E_PHY_TYPE_XFI:
+ case I40E_PHY_TYPE_XLAUI:
+ case I40E_PHY_TYPE_XLPPI:
+ default:
+ media = I40E_MEDIA_TYPE_UNKNOWN;
+ break;
+ }
+
+ return media;
+}
+
+/**
+ * i40e_poll_globr - Poll for Global Reset completion
+ * @hw: pointer to the hardware structure
+ * @retry_limit: how many times to retry before failure
+ **/
+static int i40e_poll_globr(struct i40e_hw *hw,
+ u32 retry_limit)
+{
+ u32 cnt, reg = 0;
+
+ for (cnt = 0; cnt < retry_limit; cnt++) {
+ reg = rd32(hw, I40E_GLGEN_RSTAT);
+ if (!(reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK))
+ return 0;
+ msleep(100);
+ }
+
+ hw_dbg(hw, "Global reset failed.\n");
+ hw_dbg(hw, "I40E_GLGEN_RSTAT = 0x%x\n", reg);
+
+ return I40E_ERR_RESET_FAILED;
+}
+
+#define I40E_PF_RESET_WAIT_COUNT_A0 200
+#define I40E_PF_RESET_WAIT_COUNT 200
+/**
+ * i40e_pf_reset - Reset the PF
+ * @hw: pointer to the hardware structure
+ *
+ * Assuming someone else has triggered a global reset,
+ * assure the global reset is complete and then reset the PF
+ **/
+int i40e_pf_reset(struct i40e_hw *hw)
+{
+ u32 cnt = 0;
+ u32 cnt1 = 0;
+ u32 reg = 0;
+ u32 grst_del;
+
+ /* Poll for Global Reset steady state in case of recent GRST.
+ * The grst delay value is in 100ms units, and we'll wait a
+ * couple counts longer to be sure we don't just miss the end.
+ */
+ grst_del = (rd32(hw, I40E_GLGEN_RSTCTL) &
+ I40E_GLGEN_RSTCTL_GRSTDEL_MASK) >>
+ I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT;
+
+ /* It can take upto 15 secs for GRST steady state.
+ * Bump it to 16 secs max to be safe.
+ */
+ grst_del = grst_del * 20;
+
+ for (cnt = 0; cnt < grst_del; cnt++) {
+ reg = rd32(hw, I40E_GLGEN_RSTAT);
+ if (!(reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK))
+ break;
+ msleep(100);
+ }
+ if (reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK) {
+ hw_dbg(hw, "Global reset polling failed to complete.\n");
+ return I40E_ERR_RESET_FAILED;
+ }
+
+ /* Now Wait for the FW to be ready */
+ for (cnt1 = 0; cnt1 < I40E_PF_RESET_WAIT_COUNT; cnt1++) {
+ reg = rd32(hw, I40E_GLNVM_ULD);
+ reg &= (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK |
+ I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK);
+ if (reg == (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK |
+ I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK)) {
+ hw_dbg(hw, "Core and Global modules ready %d\n", cnt1);
+ break;
+ }
+ usleep_range(10000, 20000);
+ }
+ if (!(reg & (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK |
+ I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK))) {
+ hw_dbg(hw, "wait for FW Reset complete timedout\n");
+ hw_dbg(hw, "I40E_GLNVM_ULD = 0x%x\n", reg);
+ return I40E_ERR_RESET_FAILED;
+ }
+
+ /* If there was a Global Reset in progress when we got here,
+ * we don't need to do the PF Reset
+ */
+ if (!cnt) {
+ u32 reg2 = 0;
+ if (hw->revision_id == 0)
+ cnt = I40E_PF_RESET_WAIT_COUNT_A0;
+ else
+ cnt = I40E_PF_RESET_WAIT_COUNT;
+ reg = rd32(hw, I40E_PFGEN_CTRL);
+ wr32(hw, I40E_PFGEN_CTRL,
+ (reg | I40E_PFGEN_CTRL_PFSWR_MASK));
+ for (; cnt; cnt--) {
+ reg = rd32(hw, I40E_PFGEN_CTRL);
+ if (!(reg & I40E_PFGEN_CTRL_PFSWR_MASK))
+ break;
+ reg2 = rd32(hw, I40E_GLGEN_RSTAT);
+ if (reg2 & I40E_GLGEN_RSTAT_DEVSTATE_MASK)
+ break;
+ usleep_range(1000, 2000);
+ }
+ if (reg2 & I40E_GLGEN_RSTAT_DEVSTATE_MASK) {
+ if (i40e_poll_globr(hw, grst_del))
+ return I40E_ERR_RESET_FAILED;
+ } else if (reg & I40E_PFGEN_CTRL_PFSWR_MASK) {
+ hw_dbg(hw, "PF reset polling failed to complete.\n");
+ return I40E_ERR_RESET_FAILED;
+ }
+ }
+
+ i40e_clear_pxe_mode(hw);
+
+ return 0;
+}
+
+/**
+ * i40e_clear_hw - clear out any left over hw state
+ * @hw: pointer to the hw struct
+ *
+ * Clear queues and interrupts, typically called at init time,
+ * but after the capabilities have been found so we know how many
+ * queues and msix vectors have been allocated.
+ **/
+void i40e_clear_hw(struct i40e_hw *hw)
+{
+ u32 num_queues, base_queue;
+ u32 num_pf_int;
+ u32 num_vf_int;
+ u32 num_vfs;
+ u32 i, j;
+ u32 val;
+ u32 eol = 0x7ff;
+
+ /* get number of interrupts, queues, and VFs */
+ val = rd32(hw, I40E_GLPCI_CNF2);
+ num_pf_int = (val & I40E_GLPCI_CNF2_MSI_X_PF_N_MASK) >>
+ I40E_GLPCI_CNF2_MSI_X_PF_N_SHIFT;
+ num_vf_int = (val & I40E_GLPCI_CNF2_MSI_X_VF_N_MASK) >>
+ I40E_GLPCI_CNF2_MSI_X_VF_N_SHIFT;
+
+ val = rd32(hw, I40E_PFLAN_QALLOC);
+ base_queue = (val & I40E_PFLAN_QALLOC_FIRSTQ_MASK) >>
+ I40E_PFLAN_QALLOC_FIRSTQ_SHIFT;
+ j = (val & I40E_PFLAN_QALLOC_LASTQ_MASK) >>
+ I40E_PFLAN_QALLOC_LASTQ_SHIFT;
+ if (val & I40E_PFLAN_QALLOC_VALID_MASK && j >= base_queue)
+ num_queues = (j - base_queue) + 1;
+ else
+ num_queues = 0;
+
+ val = rd32(hw, I40E_PF_VT_PFALLOC);
+ i = (val & I40E_PF_VT_PFALLOC_FIRSTVF_MASK) >>
+ I40E_PF_VT_PFALLOC_FIRSTVF_SHIFT;
+ j = (val & I40E_PF_VT_PFALLOC_LASTVF_MASK) >>
+ I40E_PF_VT_PFALLOC_LASTVF_SHIFT;
+ if (val & I40E_PF_VT_PFALLOC_VALID_MASK && j >= i)
+ num_vfs = (j - i) + 1;
+ else
+ num_vfs = 0;
+
+ /* stop all the interrupts */
+ wr32(hw, I40E_PFINT_ICR0_ENA, 0);
+ val = 0x3 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT;
+ for (i = 0; i < num_pf_int - 2; i++)
+ wr32(hw, I40E_PFINT_DYN_CTLN(i), val);
+
+ /* Set the FIRSTQ_INDX field to 0x7FF in PFINT_LNKLSTx */
+ val = eol << I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT;
+ wr32(hw, I40E_PFINT_LNKLST0, val);
+ for (i = 0; i < num_pf_int - 2; i++)
+ wr32(hw, I40E_PFINT_LNKLSTN(i), val);
+ val = eol << I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT;
+ for (i = 0; i < num_vfs; i++)
+ wr32(hw, I40E_VPINT_LNKLST0(i), val);
+ for (i = 0; i < num_vf_int - 2; i++)
+ wr32(hw, I40E_VPINT_LNKLSTN(i), val);
+
+ /* warn the HW of the coming Tx disables */
+ for (i = 0; i < num_queues; i++) {
+ u32 abs_queue_idx = base_queue + i;
+ u32 reg_block = 0;
+
+ if (abs_queue_idx >= 128) {
+ reg_block = abs_queue_idx / 128;
+ abs_queue_idx %= 128;
+ }
+
+ val = rd32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block));
+ val &= ~I40E_GLLAN_TXPRE_QDIS_QINDX_MASK;
+ val |= (abs_queue_idx << I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT);
+ val |= I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK;
+
+ wr32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block), val);
+ }
+ udelay(400);
+
+ /* stop all the queues */
+ for (i = 0; i < num_queues; i++) {
+ wr32(hw, I40E_QINT_TQCTL(i), 0);
+ wr32(hw, I40E_QTX_ENA(i), 0);
+ wr32(hw, I40E_QINT_RQCTL(i), 0);
+ wr32(hw, I40E_QRX_ENA(i), 0);
+ }
+
+ /* short wait for all queue disables to settle */
+ udelay(50);
+}
+
+/**
+ * i40e_clear_pxe_mode - clear pxe operations mode
+ * @hw: pointer to the hw struct
+ *
+ * Make sure all PXE mode settings are cleared, including things
+ * like descriptor fetch/write-back mode.
+ **/
+void i40e_clear_pxe_mode(struct i40e_hw *hw)
+{
+ u32 reg;
+
+ if (i40e_check_asq_alive(hw))
+ i40e_aq_clear_pxe_mode(hw, NULL);
+
+ /* Clear single descriptor fetch/write-back mode */
+ reg = rd32(hw, I40E_GLLAN_RCTL_0);
+
+ if (hw->revision_id == 0) {
+ /* As a work around clear PXE_MODE instead of setting it */
+ wr32(hw, I40E_GLLAN_RCTL_0, (reg & (~I40E_GLLAN_RCTL_0_PXE_MODE_MASK)));
+ } else {
+ wr32(hw, I40E_GLLAN_RCTL_0, (reg | I40E_GLLAN_RCTL_0_PXE_MODE_MASK));
+ }
+}
+
+/**
+ * i40e_led_is_mine - helper to find matching led
+ * @hw: pointer to the hw struct
+ * @idx: index into GPIO registers
+ *
+ * returns: 0 if no match, otherwise the value of the GPIO_CTL register
+ */
+static u32 i40e_led_is_mine(struct i40e_hw *hw, int idx)
+{
+ u32 gpio_val = 0;
+ u32 port;
+
+ if (!I40E_IS_X710TL_DEVICE(hw->device_id) &&
+ !hw->func_caps.led[idx])
+ return 0;
+ gpio_val = rd32(hw, I40E_GLGEN_GPIO_CTL(idx));
+ port = (gpio_val & I40E_GLGEN_GPIO_CTL_PRT_NUM_MASK) >>
+ I40E_GLGEN_GPIO_CTL_PRT_NUM_SHIFT;
+
+ /* if PRT_NUM_NA is 1 then this LED is not port specific, OR
+ * if it is not our port then ignore
+ */
+ if ((gpio_val & I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_MASK) ||
+ (port != hw->port))
+ return 0;
+
+ return gpio_val;
+}
+
+#define I40E_FW_LED BIT(4)
+#define I40E_LED_MODE_VALID (I40E_GLGEN_GPIO_CTL_LED_MODE_MASK >> \
+ I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT)
+
+#define I40E_LED0 22
+
+#define I40E_PIN_FUNC_SDP 0x0
+#define I40E_PIN_FUNC_LED 0x1
+
+/**
+ * i40e_led_get - return current on/off mode
+ * @hw: pointer to the hw struct
+ *
+ * The value returned is the 'mode' field as defined in the
+ * GPIO register definitions: 0x0 = off, 0xf = on, and other
+ * values are variations of possible behaviors relating to
+ * blink, link, and wire.
+ **/
+u32 i40e_led_get(struct i40e_hw *hw)
+{
+ u32 mode = 0;
+ int i;
+
+ /* as per the documentation GPIO 22-29 are the LED
+ * GPIO pins named LED0..LED7
+ */
+ for (i = I40E_LED0; i <= I40E_GLGEN_GPIO_CTL_MAX_INDEX; i++) {
+ u32 gpio_val = i40e_led_is_mine(hw, i);
+
+ if (!gpio_val)
+ continue;
+
+ mode = (gpio_val & I40E_GLGEN_GPIO_CTL_LED_MODE_MASK) >>
+ I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT;
+ break;
+ }
+
+ return mode;
+}
+
+/**
+ * i40e_led_set - set new on/off mode
+ * @hw: pointer to the hw struct
+ * @mode: 0=off, 0xf=on (else see manual for mode details)
+ * @blink: true if the LED should blink when on, false if steady
+ *
+ * if this function is used to turn on the blink it should
+ * be used to disable the blink when restoring the original state.
+ **/
+void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink)
+{
+ int i;
+
+ if (mode & ~I40E_LED_MODE_VALID) {
+ hw_dbg(hw, "invalid mode passed in %X\n", mode);
+ return;
+ }
+
+ /* as per the documentation GPIO 22-29 are the LED
+ * GPIO pins named LED0..LED7
+ */
+ for (i = I40E_LED0; i <= I40E_GLGEN_GPIO_CTL_MAX_INDEX; i++) {
+ u32 gpio_val = i40e_led_is_mine(hw, i);
+
+ if (!gpio_val)
+ continue;
+
+ if (I40E_IS_X710TL_DEVICE(hw->device_id)) {
+ u32 pin_func = 0;
+
+ if (mode & I40E_FW_LED)
+ pin_func = I40E_PIN_FUNC_SDP;
+ else
+ pin_func = I40E_PIN_FUNC_LED;
+
+ gpio_val &= ~I40E_GLGEN_GPIO_CTL_PIN_FUNC_MASK;
+ gpio_val |= ((pin_func <<
+ I40E_GLGEN_GPIO_CTL_PIN_FUNC_SHIFT) &
+ I40E_GLGEN_GPIO_CTL_PIN_FUNC_MASK);
+ }
+ gpio_val &= ~I40E_GLGEN_GPIO_CTL_LED_MODE_MASK;
+ /* this & is a bit of paranoia, but serves as a range check */
+ gpio_val |= ((mode << I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT) &
+ I40E_GLGEN_GPIO_CTL_LED_MODE_MASK);
+
+ if (blink)
+ gpio_val |= BIT(I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT);
+ else
+ gpio_val &= ~BIT(I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT);
+
+ wr32(hw, I40E_GLGEN_GPIO_CTL(i), gpio_val);
+ break;
+ }
+}
+
+/* Admin command wrappers */
+
+/**
+ * i40e_aq_get_phy_capabilities
+ * @hw: pointer to the hw struct
+ * @abilities: structure for PHY capabilities to be filled
+ * @qualified_modules: report Qualified Modules
+ * @report_init: report init capabilities (active are default)
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Returns the various PHY abilities supported on the Port.
+ **/
+int
+i40e_aq_get_phy_capabilities(struct i40e_hw *hw,
+ bool qualified_modules, bool report_init,
+ struct i40e_aq_get_phy_abilities_resp *abilities,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ u16 abilities_size = sizeof(struct i40e_aq_get_phy_abilities_resp);
+ u16 max_delay = I40E_MAX_PHY_TIMEOUT, total_delay = 0;
+ struct i40e_aq_desc desc;
+ int status;
+
+ if (!abilities)
+ return I40E_ERR_PARAM;
+
+ do {
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_get_phy_abilities);
+
+ desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
+ if (abilities_size > I40E_AQ_LARGE_BUF)
+ desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
+
+ if (qualified_modules)
+ desc.params.external.param0 |=
+ cpu_to_le32(I40E_AQ_PHY_REPORT_QUALIFIED_MODULES);
+
+ if (report_init)
+ desc.params.external.param0 |=
+ cpu_to_le32(I40E_AQ_PHY_REPORT_INITIAL_VALUES);
+
+ status = i40e_asq_send_command(hw, &desc, abilities,
+ abilities_size, cmd_details);
+
+ switch (hw->aq.asq_last_status) {
+ case I40E_AQ_RC_EIO:
+ status = I40E_ERR_UNKNOWN_PHY;
+ break;
+ case I40E_AQ_RC_EAGAIN:
+ usleep_range(1000, 2000);
+ total_delay++;
+ status = I40E_ERR_TIMEOUT;
+ break;
+ /* also covers I40E_AQ_RC_OK */
+ default:
+ break;
+ }
+
+ } while ((hw->aq.asq_last_status == I40E_AQ_RC_EAGAIN) &&
+ (total_delay < max_delay));
+
+ if (status)
+ return status;
+
+ if (report_init) {
+ if (hw->mac.type == I40E_MAC_XL710 &&
+ hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
+ hw->aq.api_min_ver >= I40E_MINOR_VER_GET_LINK_INFO_XL710) {
+ status = i40e_aq_get_link_info(hw, true, NULL, NULL);
+ } else {
+ hw->phy.phy_types = le32_to_cpu(abilities->phy_type);
+ hw->phy.phy_types |=
+ ((u64)abilities->phy_type_ext << 32);
+ }
+ }
+
+ return status;
+}
+
+/**
+ * i40e_aq_set_phy_config
+ * @hw: pointer to the hw struct
+ * @config: structure with PHY configuration to be set
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Set the various PHY configuration parameters
+ * supported on the Port.One or more of the Set PHY config parameters may be
+ * ignored in an MFP mode as the PF may not have the privilege to set some
+ * of the PHY Config parameters. This status will be indicated by the
+ * command response.
+ **/
+int i40e_aq_set_phy_config(struct i40e_hw *hw,
+ struct i40e_aq_set_phy_config *config,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aq_set_phy_config *cmd =
+ (struct i40e_aq_set_phy_config *)&desc.params.raw;
+ int status;
+
+ if (!config)
+ return I40E_ERR_PARAM;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_set_phy_config);
+
+ *cmd = *config;
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+static noinline_for_stack int
+i40e_set_fc_status(struct i40e_hw *hw,
+ struct i40e_aq_get_phy_abilities_resp *abilities,
+ bool atomic_restart)
+{
+ struct i40e_aq_set_phy_config config;
+ enum i40e_fc_mode fc_mode = hw->fc.requested_mode;
+ u8 pause_mask = 0x0;
+
+ switch (fc_mode) {
+ case I40E_FC_FULL:
+ pause_mask |= I40E_AQ_PHY_FLAG_PAUSE_TX;
+ pause_mask |= I40E_AQ_PHY_FLAG_PAUSE_RX;
+ break;
+ case I40E_FC_RX_PAUSE:
+ pause_mask |= I40E_AQ_PHY_FLAG_PAUSE_RX;
+ break;
+ case I40E_FC_TX_PAUSE:
+ pause_mask |= I40E_AQ_PHY_FLAG_PAUSE_TX;
+ break;
+ default:
+ break;
+ }
+
+ memset(&config, 0, sizeof(struct i40e_aq_set_phy_config));
+ /* clear the old pause settings */
+ config.abilities = abilities->abilities & ~(I40E_AQ_PHY_FLAG_PAUSE_TX) &
+ ~(I40E_AQ_PHY_FLAG_PAUSE_RX);
+ /* set the new abilities */
+ config.abilities |= pause_mask;
+ /* If the abilities have changed, then set the new config */
+ if (config.abilities == abilities->abilities)
+ return 0;
+
+ /* Auto restart link so settings take effect */
+ if (atomic_restart)
+ config.abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
+ /* Copy over all the old settings */
+ config.phy_type = abilities->phy_type;
+ config.phy_type_ext = abilities->phy_type_ext;
+ config.link_speed = abilities->link_speed;
+ config.eee_capability = abilities->eee_capability;
+ config.eeer = abilities->eeer_val;
+ config.low_power_ctrl = abilities->d3_lpan;
+ config.fec_config = abilities->fec_cfg_curr_mod_ext_info &
+ I40E_AQ_PHY_FEC_CONFIG_MASK;
+
+ return i40e_aq_set_phy_config(hw, &config, NULL);
+}
+
+/**
+ * i40e_set_fc
+ * @hw: pointer to the hw struct
+ * @aq_failures: buffer to return AdminQ failure information
+ * @atomic_restart: whether to enable atomic link restart
+ *
+ * Set the requested flow control mode using set_phy_config.
+ **/
+int i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures,
+ bool atomic_restart)
+{
+ struct i40e_aq_get_phy_abilities_resp abilities;
+ int status;
+
+ *aq_failures = 0x0;
+
+ /* Get the current phy config */
+ status = i40e_aq_get_phy_capabilities(hw, false, false, &abilities,
+ NULL);
+ if (status) {
+ *aq_failures |= I40E_SET_FC_AQ_FAIL_GET;
+ return status;
+ }
+
+ status = i40e_set_fc_status(hw, &abilities, atomic_restart);
+ if (status)
+ *aq_failures |= I40E_SET_FC_AQ_FAIL_SET;
+
+ /* Update the link info */
+ status = i40e_update_link_info(hw);
+ if (status) {
+ /* Wait a little bit (on 40G cards it sometimes takes a really
+ * long time for link to come back from the atomic reset)
+ * and try once more
+ */
+ msleep(1000);
+ status = i40e_update_link_info(hw);
+ }
+ if (status)
+ *aq_failures |= I40E_SET_FC_AQ_FAIL_UPDATE;
+
+ return status;
+}
+
+/**
+ * i40e_aq_clear_pxe_mode
+ * @hw: pointer to the hw struct
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Tell the firmware that the driver is taking over from PXE
+ **/
+int i40e_aq_clear_pxe_mode(struct i40e_hw *hw,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_clear_pxe *cmd =
+ (struct i40e_aqc_clear_pxe *)&desc.params.raw;
+ int status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_clear_pxe_mode);
+
+ cmd->rx_cnt = 0x2;
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ wr32(hw, I40E_GLLAN_RCTL_0, 0x1);
+
+ return status;
+}
+
+/**
+ * i40e_aq_set_link_restart_an
+ * @hw: pointer to the hw struct
+ * @enable_link: if true: enable link, if false: disable link
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Sets up the link and restarts the Auto-Negotiation over the link.
+ **/
+int i40e_aq_set_link_restart_an(struct i40e_hw *hw,
+ bool enable_link,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_set_link_restart_an *cmd =
+ (struct i40e_aqc_set_link_restart_an *)&desc.params.raw;
+ int status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_set_link_restart_an);
+
+ cmd->command = I40E_AQ_PHY_RESTART_AN;
+ if (enable_link)
+ cmd->command |= I40E_AQ_PHY_LINK_ENABLE;
+ else
+ cmd->command &= ~I40E_AQ_PHY_LINK_ENABLE;
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_get_link_info
+ * @hw: pointer to the hw struct
+ * @enable_lse: enable/disable LinkStatusEvent reporting
+ * @link: pointer to link status structure - optional
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Returns the link status of the adapter.
+ **/
+int i40e_aq_get_link_info(struct i40e_hw *hw,
+ bool enable_lse, struct i40e_link_status *link,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_get_link_status *resp =
+ (struct i40e_aqc_get_link_status *)&desc.params.raw;
+ struct i40e_link_status *hw_link_info = &hw->phy.link_info;
+ bool tx_pause, rx_pause;
+ u16 command_flags;
+ int status;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_link_status);
+
+ if (enable_lse)
+ command_flags = I40E_AQ_LSE_ENABLE;
+ else
+ command_flags = I40E_AQ_LSE_DISABLE;
+ resp->command_flags = cpu_to_le16(command_flags);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ if (status)
+ goto aq_get_link_info_exit;
+
+ /* save off old link status information */
+ hw->phy.link_info_old = *hw_link_info;
+
+ /* update link status */
+ hw_link_info->phy_type = (enum i40e_aq_phy_type)resp->phy_type;
+ hw->phy.media_type = i40e_get_media_type(hw);
+ hw_link_info->link_speed = (enum i40e_aq_link_speed)resp->link_speed;
+ hw_link_info->link_info = resp->link_info;
+ hw_link_info->an_info = resp->an_info;
+ hw_link_info->fec_info = resp->config & (I40E_AQ_CONFIG_FEC_KR_ENA |
+ I40E_AQ_CONFIG_FEC_RS_ENA);
+ hw_link_info->ext_info = resp->ext_info;
+ hw_link_info->loopback = resp->loopback & I40E_AQ_LOOPBACK_MASK;
+ hw_link_info->max_frame_size = le16_to_cpu(resp->max_frame_size);
+ hw_link_info->pacing = resp->config & I40E_AQ_CONFIG_PACING_MASK;
+
+ /* update fc info */
+ tx_pause = !!(resp->an_info & I40E_AQ_LINK_PAUSE_TX);
+ rx_pause = !!(resp->an_info & I40E_AQ_LINK_PAUSE_RX);
+ if (tx_pause & rx_pause)
+ hw->fc.current_mode = I40E_FC_FULL;
+ else if (tx_pause)
+ hw->fc.current_mode = I40E_FC_TX_PAUSE;
+ else if (rx_pause)
+ hw->fc.current_mode = I40E_FC_RX_PAUSE;
+ else
+ hw->fc.current_mode = I40E_FC_NONE;
+
+ if (resp->config & I40E_AQ_CONFIG_CRC_ENA)
+ hw_link_info->crc_enable = true;
+ else
+ hw_link_info->crc_enable = false;
+
+ if (resp->command_flags & cpu_to_le16(I40E_AQ_LSE_IS_ENABLED))
+ hw_link_info->lse_enable = true;
+ else
+ hw_link_info->lse_enable = false;
+
+ if ((hw->mac.type == I40E_MAC_XL710) &&
+ (hw->aq.fw_maj_ver < 4 || (hw->aq.fw_maj_ver == 4 &&
+ hw->aq.fw_min_ver < 40)) && hw_link_info->phy_type == 0xE)
+ hw_link_info->phy_type = I40E_PHY_TYPE_10GBASE_SFPP_CU;
+
+ if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE &&
+ hw->mac.type != I40E_MAC_X722) {
+ __le32 tmp;
+
+ memcpy(&tmp, resp->link_type, sizeof(tmp));
+ hw->phy.phy_types = le32_to_cpu(tmp);
+ hw->phy.phy_types |= ((u64)resp->link_type_ext << 32);
+ }
+
+ /* save link status information */
+ if (link)
+ *link = *hw_link_info;
+
+ /* flag cleared so helper functions don't call AQ again */
+ hw->phy.get_link_info = false;
+
+aq_get_link_info_exit:
+ return status;
+}
+
+/**
+ * i40e_aq_set_phy_int_mask
+ * @hw: pointer to the hw struct
+ * @mask: interrupt mask to be set
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Set link interrupt mask.
+ **/
+int i40e_aq_set_phy_int_mask(struct i40e_hw *hw,
+ u16 mask,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_set_phy_int_mask *cmd =
+ (struct i40e_aqc_set_phy_int_mask *)&desc.params.raw;
+ int status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_set_phy_int_mask);
+
+ cmd->event_mask = cpu_to_le16(mask);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_set_phy_debug
+ * @hw: pointer to the hw struct
+ * @cmd_flags: debug command flags
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Reset the external PHY.
+ **/
+int i40e_aq_set_phy_debug(struct i40e_hw *hw, u8 cmd_flags,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_set_phy_debug *cmd =
+ (struct i40e_aqc_set_phy_debug *)&desc.params.raw;
+ int status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_set_phy_debug);
+
+ cmd->command_flags = cmd_flags;
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_is_aq_api_ver_ge
+ * @aq: pointer to AdminQ info containing HW API version to compare
+ * @maj: API major value
+ * @min: API minor value
+ *
+ * Assert whether current HW API version is greater/equal than provided.
+ **/
+static bool i40e_is_aq_api_ver_ge(struct i40e_adminq_info *aq, u16 maj,
+ u16 min)
+{
+ return (aq->api_maj_ver > maj ||
+ (aq->api_maj_ver == maj && aq->api_min_ver >= min));
+}
+
+/**
+ * i40e_aq_add_vsi
+ * @hw: pointer to the hw struct
+ * @vsi_ctx: pointer to a vsi context struct
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Add a VSI context to the hardware.
+**/
+int i40e_aq_add_vsi(struct i40e_hw *hw,
+ struct i40e_vsi_context *vsi_ctx,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_add_get_update_vsi *cmd =
+ (struct i40e_aqc_add_get_update_vsi *)&desc.params.raw;
+ struct i40e_aqc_add_get_update_vsi_completion *resp =
+ (struct i40e_aqc_add_get_update_vsi_completion *)
+ &desc.params.raw;
+ int status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_add_vsi);
+
+ cmd->uplink_seid = cpu_to_le16(vsi_ctx->uplink_seid);
+ cmd->connection_type = vsi_ctx->connection_type;
+ cmd->vf_id = vsi_ctx->vf_num;
+ cmd->vsi_flags = cpu_to_le16(vsi_ctx->flags);
+
+ desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+
+ status = i40e_asq_send_command_atomic(hw, &desc, &vsi_ctx->info,
+ sizeof(vsi_ctx->info),
+ cmd_details, true);
+
+ if (status)
+ goto aq_add_vsi_exit;
+
+ vsi_ctx->seid = le16_to_cpu(resp->seid);
+ vsi_ctx->vsi_number = le16_to_cpu(resp->vsi_number);
+ vsi_ctx->vsis_allocated = le16_to_cpu(resp->vsi_used);
+ vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free);
+
+aq_add_vsi_exit:
+ return status;
+}
+
+/**
+ * i40e_aq_set_default_vsi
+ * @hw: pointer to the hw struct
+ * @seid: vsi number
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+int i40e_aq_set_default_vsi(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
+ (struct i40e_aqc_set_vsi_promiscuous_modes *)
+ &desc.params.raw;
+ int status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_set_vsi_promiscuous_modes);
+
+ cmd->promiscuous_flags = cpu_to_le16(I40E_AQC_SET_VSI_DEFAULT);
+ cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_DEFAULT);
+ cmd->seid = cpu_to_le16(seid);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_clear_default_vsi
+ * @hw: pointer to the hw struct
+ * @seid: vsi number
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+int i40e_aq_clear_default_vsi(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
+ (struct i40e_aqc_set_vsi_promiscuous_modes *)
+ &desc.params.raw;
+ int status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_set_vsi_promiscuous_modes);
+
+ cmd->promiscuous_flags = cpu_to_le16(0);
+ cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_DEFAULT);
+ cmd->seid = cpu_to_le16(seid);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_set_vsi_unicast_promiscuous
+ * @hw: pointer to the hw struct
+ * @seid: vsi number
+ * @set: set unicast promiscuous enable/disable
+ * @cmd_details: pointer to command details structure or NULL
+ * @rx_only_promisc: flag to decide if egress traffic gets mirrored in promisc
+ **/
+int i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw,
+ u16 seid, bool set,
+ struct i40e_asq_cmd_details *cmd_details,
+ bool rx_only_promisc)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
+ (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
+ u16 flags = 0;
+ int status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_set_vsi_promiscuous_modes);
+
+ if (set) {
+ flags |= I40E_AQC_SET_VSI_PROMISC_UNICAST;
+ if (rx_only_promisc && i40e_is_aq_api_ver_ge(&hw->aq, 1, 5))
+ flags |= I40E_AQC_SET_VSI_PROMISC_RX_ONLY;
+ }
+
+ cmd->promiscuous_flags = cpu_to_le16(flags);
+
+ cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_UNICAST);
+ if (i40e_is_aq_api_ver_ge(&hw->aq, 1, 5))
+ cmd->valid_flags |=
+ cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_RX_ONLY);
+
+ cmd->seid = cpu_to_le16(seid);
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_set_vsi_multicast_promiscuous
+ * @hw: pointer to the hw struct
+ * @seid: vsi number
+ * @set: set multicast promiscuous enable/disable
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+int i40e_aq_set_vsi_multicast_promiscuous(struct i40e_hw *hw,
+ u16 seid, bool set,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
+ (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
+ u16 flags = 0;
+ int status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_set_vsi_promiscuous_modes);
+
+ if (set)
+ flags |= I40E_AQC_SET_VSI_PROMISC_MULTICAST;
+
+ cmd->promiscuous_flags = cpu_to_le16(flags);
+
+ cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_MULTICAST);
+
+ cmd->seid = cpu_to_le16(seid);
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_set_vsi_mc_promisc_on_vlan
+ * @hw: pointer to the hw struct
+ * @seid: vsi number
+ * @enable: set MAC L2 layer unicast promiscuous enable/disable for a given VLAN
+ * @vid: The VLAN tag filter - capture any multicast packet with this VLAN tag
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+int i40e_aq_set_vsi_mc_promisc_on_vlan(struct i40e_hw *hw,
+ u16 seid, bool enable,
+ u16 vid,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
+ (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
+ u16 flags = 0;
+ int status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_set_vsi_promiscuous_modes);
+
+ if (enable)
+ flags |= I40E_AQC_SET_VSI_PROMISC_MULTICAST;
+
+ cmd->promiscuous_flags = cpu_to_le16(flags);
+ cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_MULTICAST);
+ cmd->seid = cpu_to_le16(seid);
+ cmd->vlan_tag = cpu_to_le16(vid | I40E_AQC_SET_VSI_VLAN_VALID);
+
+ status = i40e_asq_send_command_atomic(hw, &desc, NULL, 0,
+ cmd_details, true);
+
+ return status;
+}
+
+/**
+ * i40e_aq_set_vsi_uc_promisc_on_vlan
+ * @hw: pointer to the hw struct
+ * @seid: vsi number
+ * @enable: set MAC L2 layer unicast promiscuous enable/disable for a given VLAN
+ * @vid: The VLAN tag filter - capture any unicast packet with this VLAN tag
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+int i40e_aq_set_vsi_uc_promisc_on_vlan(struct i40e_hw *hw,
+ u16 seid, bool enable,
+ u16 vid,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
+ (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
+ u16 flags = 0;
+ int status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_set_vsi_promiscuous_modes);
+
+ if (enable) {
+ flags |= I40E_AQC_SET_VSI_PROMISC_UNICAST;
+ if (i40e_is_aq_api_ver_ge(&hw->aq, 1, 5))
+ flags |= I40E_AQC_SET_VSI_PROMISC_RX_ONLY;
+ }
+
+ cmd->promiscuous_flags = cpu_to_le16(flags);
+ cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_UNICAST);
+ if (i40e_is_aq_api_ver_ge(&hw->aq, 1, 5))
+ cmd->valid_flags |=
+ cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_RX_ONLY);
+ cmd->seid = cpu_to_le16(seid);
+ cmd->vlan_tag = cpu_to_le16(vid | I40E_AQC_SET_VSI_VLAN_VALID);
+
+ status = i40e_asq_send_command_atomic(hw, &desc, NULL, 0,
+ cmd_details, true);
+
+ return status;
+}
+
+/**
+ * i40e_aq_set_vsi_bc_promisc_on_vlan
+ * @hw: pointer to the hw struct
+ * @seid: vsi number
+ * @enable: set broadcast promiscuous enable/disable for a given VLAN
+ * @vid: The VLAN tag filter - capture any broadcast packet with this VLAN tag
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+int i40e_aq_set_vsi_bc_promisc_on_vlan(struct i40e_hw *hw,
+ u16 seid, bool enable, u16 vid,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
+ (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
+ u16 flags = 0;
+ int status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_set_vsi_promiscuous_modes);
+
+ if (enable)
+ flags |= I40E_AQC_SET_VSI_PROMISC_BROADCAST;
+
+ cmd->promiscuous_flags = cpu_to_le16(flags);
+ cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_BROADCAST);
+ cmd->seid = cpu_to_le16(seid);
+ cmd->vlan_tag = cpu_to_le16(vid | I40E_AQC_SET_VSI_VLAN_VALID);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_set_vsi_broadcast
+ * @hw: pointer to the hw struct
+ * @seid: vsi number
+ * @set_filter: true to set filter, false to clear filter
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Set or clear the broadcast promiscuous flag (filter) for a given VSI.
+ **/
+int i40e_aq_set_vsi_broadcast(struct i40e_hw *hw,
+ u16 seid, bool set_filter,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
+ (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
+ int status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_set_vsi_promiscuous_modes);
+
+ if (set_filter)
+ cmd->promiscuous_flags
+ |= cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_BROADCAST);
+ else
+ cmd->promiscuous_flags
+ &= cpu_to_le16(~I40E_AQC_SET_VSI_PROMISC_BROADCAST);
+
+ cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_BROADCAST);
+ cmd->seid = cpu_to_le16(seid);
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_set_vsi_vlan_promisc - control the VLAN promiscuous setting
+ * @hw: pointer to the hw struct
+ * @seid: vsi number
+ * @enable: set MAC L2 layer unicast promiscuous enable/disable for a given VLAN
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+int i40e_aq_set_vsi_vlan_promisc(struct i40e_hw *hw,
+ u16 seid, bool enable,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
+ (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
+ u16 flags = 0;
+ int status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_set_vsi_promiscuous_modes);
+ if (enable)
+ flags |= I40E_AQC_SET_VSI_PROMISC_VLAN;
+
+ cmd->promiscuous_flags = cpu_to_le16(flags);
+ cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_VLAN);
+ cmd->seid = cpu_to_le16(seid);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_get_vsi_params - get VSI configuration info
+ * @hw: pointer to the hw struct
+ * @vsi_ctx: pointer to a vsi context struct
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+int i40e_aq_get_vsi_params(struct i40e_hw *hw,
+ struct i40e_vsi_context *vsi_ctx,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_add_get_update_vsi *cmd =
+ (struct i40e_aqc_add_get_update_vsi *)&desc.params.raw;
+ struct i40e_aqc_add_get_update_vsi_completion *resp =
+ (struct i40e_aqc_add_get_update_vsi_completion *)
+ &desc.params.raw;
+ int status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_get_vsi_parameters);
+
+ cmd->uplink_seid = cpu_to_le16(vsi_ctx->seid);
+
+ desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
+
+ status = i40e_asq_send_command(hw, &desc, &vsi_ctx->info,
+ sizeof(vsi_ctx->info), NULL);
+
+ if (status)
+ goto aq_get_vsi_params_exit;
+
+ vsi_ctx->seid = le16_to_cpu(resp->seid);
+ vsi_ctx->vsi_number = le16_to_cpu(resp->vsi_number);
+ vsi_ctx->vsis_allocated = le16_to_cpu(resp->vsi_used);
+ vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free);
+
+aq_get_vsi_params_exit:
+ return status;
+}
+
+/**
+ * i40e_aq_update_vsi_params
+ * @hw: pointer to the hw struct
+ * @vsi_ctx: pointer to a vsi context struct
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Update a VSI context.
+ **/
+int i40e_aq_update_vsi_params(struct i40e_hw *hw,
+ struct i40e_vsi_context *vsi_ctx,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_add_get_update_vsi *cmd =
+ (struct i40e_aqc_add_get_update_vsi *)&desc.params.raw;
+ struct i40e_aqc_add_get_update_vsi_completion *resp =
+ (struct i40e_aqc_add_get_update_vsi_completion *)
+ &desc.params.raw;
+ int status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_update_vsi_parameters);
+ cmd->uplink_seid = cpu_to_le16(vsi_ctx->seid);
+
+ desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+
+ status = i40e_asq_send_command_atomic(hw, &desc, &vsi_ctx->info,
+ sizeof(vsi_ctx->info),
+ cmd_details, true);
+
+ vsi_ctx->vsis_allocated = le16_to_cpu(resp->vsi_used);
+ vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free);
+
+ return status;
+}
+
+/**
+ * i40e_aq_get_switch_config
+ * @hw: pointer to the hardware structure
+ * @buf: pointer to the result buffer
+ * @buf_size: length of input buffer
+ * @start_seid: seid to start for the report, 0 == beginning
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Fill the buf with switch configuration returned from AdminQ command
+ **/
+int i40e_aq_get_switch_config(struct i40e_hw *hw,
+ struct i40e_aqc_get_switch_config_resp *buf,
+ u16 buf_size, u16 *start_seid,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_switch_seid *scfg =
+ (struct i40e_aqc_switch_seid *)&desc.params.raw;
+ int status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_get_switch_config);
+ desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
+ if (buf_size > I40E_AQ_LARGE_BUF)
+ desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
+ scfg->seid = cpu_to_le16(*start_seid);
+
+ status = i40e_asq_send_command(hw, &desc, buf, buf_size, cmd_details);
+ *start_seid = le16_to_cpu(scfg->seid);
+
+ return status;
+}
+
+/**
+ * i40e_aq_set_switch_config
+ * @hw: pointer to the hardware structure
+ * @flags: bit flag values to set
+ * @mode: cloud filter mode
+ * @valid_flags: which bit flags to set
+ * @mode: cloud filter mode
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Set switch configuration bits
+ **/
+int i40e_aq_set_switch_config(struct i40e_hw *hw,
+ u16 flags,
+ u16 valid_flags, u8 mode,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_set_switch_config *scfg =
+ (struct i40e_aqc_set_switch_config *)&desc.params.raw;
+ int status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_set_switch_config);
+ scfg->flags = cpu_to_le16(flags);
+ scfg->valid_flags = cpu_to_le16(valid_flags);
+ scfg->mode = mode;
+ if (hw->flags & I40E_HW_FLAG_802_1AD_CAPABLE) {
+ scfg->switch_tag = cpu_to_le16(hw->switch_tag);
+ scfg->first_tag = cpu_to_le16(hw->first_tag);
+ scfg->second_tag = cpu_to_le16(hw->second_tag);
+ }
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_get_firmware_version
+ * @hw: pointer to the hw struct
+ * @fw_major_version: firmware major version
+ * @fw_minor_version: firmware minor version
+ * @fw_build: firmware build number
+ * @api_major_version: major queue version
+ * @api_minor_version: minor queue version
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Get the firmware version from the admin queue commands
+ **/
+int i40e_aq_get_firmware_version(struct i40e_hw *hw,
+ u16 *fw_major_version, u16 *fw_minor_version,
+ u32 *fw_build,
+ u16 *api_major_version, u16 *api_minor_version,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_get_version *resp =
+ (struct i40e_aqc_get_version *)&desc.params.raw;
+ int status;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_version);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ if (!status) {
+ if (fw_major_version)
+ *fw_major_version = le16_to_cpu(resp->fw_major);
+ if (fw_minor_version)
+ *fw_minor_version = le16_to_cpu(resp->fw_minor);
+ if (fw_build)
+ *fw_build = le32_to_cpu(resp->fw_build);
+ if (api_major_version)
+ *api_major_version = le16_to_cpu(resp->api_major);
+ if (api_minor_version)
+ *api_minor_version = le16_to_cpu(resp->api_minor);
+ }
+
+ return status;
+}
+
+/**
+ * i40e_aq_send_driver_version
+ * @hw: pointer to the hw struct
+ * @dv: driver's major, minor version
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Send the driver version to the firmware
+ **/
+int i40e_aq_send_driver_version(struct i40e_hw *hw,
+ struct i40e_driver_version *dv,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_driver_version *cmd =
+ (struct i40e_aqc_driver_version *)&desc.params.raw;
+ int status;
+ u16 len;
+
+ if (dv == NULL)
+ return I40E_ERR_PARAM;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_driver_version);
+
+ desc.flags |= cpu_to_le16(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD);
+ cmd->driver_major_ver = dv->major_version;
+ cmd->driver_minor_ver = dv->minor_version;
+ cmd->driver_build_ver = dv->build_version;
+ cmd->driver_subbuild_ver = dv->subbuild_version;
+
+ len = 0;
+ while (len < sizeof(dv->driver_string) &&
+ (dv->driver_string[len] < 0x80) &&
+ dv->driver_string[len])
+ len++;
+ status = i40e_asq_send_command(hw, &desc, dv->driver_string,
+ len, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_get_link_status - get status of the HW network link
+ * @hw: pointer to the hw struct
+ * @link_up: pointer to bool (true/false = linkup/linkdown)
+ *
+ * Variable link_up true if link is up, false if link is down.
+ * The variable link_up is invalid if returned value of status != 0
+ *
+ * Side effect: LinkStatusEvent reporting becomes enabled
+ **/
+int i40e_get_link_status(struct i40e_hw *hw, bool *link_up)
+{
+ int status = 0;
+
+ if (hw->phy.get_link_info) {
+ status = i40e_update_link_info(hw);
+
+ if (status)
+ i40e_debug(hw, I40E_DEBUG_LINK, "get link failed: status %d\n",
+ status);
+ }
+
+ *link_up = hw->phy.link_info.link_info & I40E_AQ_LINK_UP;
+
+ return status;
+}
+
+/**
+ * i40e_update_link_info - update status of the HW network link
+ * @hw: pointer to the hw struct
+ **/
+noinline_for_stack int i40e_update_link_info(struct i40e_hw *hw)
+{
+ struct i40e_aq_get_phy_abilities_resp abilities;
+ int status = 0;
+
+ status = i40e_aq_get_link_info(hw, true, NULL, NULL);
+ if (status)
+ return status;
+
+ /* extra checking needed to ensure link info to user is timely */
+ if ((hw->phy.link_info.link_info & I40E_AQ_MEDIA_AVAILABLE) &&
+ ((hw->phy.link_info.link_info & I40E_AQ_LINK_UP) ||
+ !(hw->phy.link_info_old.link_info & I40E_AQ_LINK_UP))) {
+ status = i40e_aq_get_phy_capabilities(hw, false, false,
+ &abilities, NULL);
+ if (status)
+ return status;
+
+ if (abilities.fec_cfg_curr_mod_ext_info &
+ I40E_AQ_ENABLE_FEC_AUTO)
+ hw->phy.link_info.req_fec_info =
+ (I40E_AQ_REQUEST_FEC_KR |
+ I40E_AQ_REQUEST_FEC_RS);
+ else
+ hw->phy.link_info.req_fec_info =
+ abilities.fec_cfg_curr_mod_ext_info &
+ (I40E_AQ_REQUEST_FEC_KR |
+ I40E_AQ_REQUEST_FEC_RS);
+
+ memcpy(hw->phy.link_info.module_type, &abilities.module_type,
+ sizeof(hw->phy.link_info.module_type));
+ }
+
+ return status;
+}
+
+/**
+ * i40e_aq_add_veb - Insert a VEB between the VSI and the MAC
+ * @hw: pointer to the hw struct
+ * @uplink_seid: the MAC or other gizmo SEID
+ * @downlink_seid: the VSI SEID
+ * @enabled_tc: bitmap of TCs to be enabled
+ * @default_port: true for default port VSI, false for control port
+ * @veb_seid: pointer to where to put the resulting VEB SEID
+ * @enable_stats: true to turn on VEB stats
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * This asks the FW to add a VEB between the uplink and downlink
+ * elements. If the uplink SEID is 0, this will be a floating VEB.
+ **/
+int i40e_aq_add_veb(struct i40e_hw *hw, u16 uplink_seid,
+ u16 downlink_seid, u8 enabled_tc,
+ bool default_port, u16 *veb_seid,
+ bool enable_stats,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_add_veb *cmd =
+ (struct i40e_aqc_add_veb *)&desc.params.raw;
+ struct i40e_aqc_add_veb_completion *resp =
+ (struct i40e_aqc_add_veb_completion *)&desc.params.raw;
+ u16 veb_flags = 0;
+ int status;
+
+ /* SEIDs need to either both be set or both be 0 for floating VEB */
+ if (!!uplink_seid != !!downlink_seid)
+ return I40E_ERR_PARAM;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_veb);
+
+ cmd->uplink_seid = cpu_to_le16(uplink_seid);
+ cmd->downlink_seid = cpu_to_le16(downlink_seid);
+ cmd->enable_tcs = enabled_tc;
+ if (!uplink_seid)
+ veb_flags |= I40E_AQC_ADD_VEB_FLOATING;
+ if (default_port)
+ veb_flags |= I40E_AQC_ADD_VEB_PORT_TYPE_DEFAULT;
+ else
+ veb_flags |= I40E_AQC_ADD_VEB_PORT_TYPE_DATA;
+
+ /* reverse logic here: set the bitflag to disable the stats */
+ if (!enable_stats)
+ veb_flags |= I40E_AQC_ADD_VEB_ENABLE_DISABLE_STATS;
+
+ cmd->veb_flags = cpu_to_le16(veb_flags);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ if (!status && veb_seid)
+ *veb_seid = le16_to_cpu(resp->veb_seid);
+
+ return status;
+}
+
+/**
+ * i40e_aq_get_veb_parameters - Retrieve VEB parameters
+ * @hw: pointer to the hw struct
+ * @veb_seid: the SEID of the VEB to query
+ * @switch_id: the uplink switch id
+ * @floating: set to true if the VEB is floating
+ * @statistic_index: index of the stats counter block for this VEB
+ * @vebs_used: number of VEB's used by function
+ * @vebs_free: total VEB's not reserved by any function
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * This retrieves the parameters for a particular VEB, specified by
+ * uplink_seid, and returns them to the caller.
+ **/
+int i40e_aq_get_veb_parameters(struct i40e_hw *hw,
+ u16 veb_seid, u16 *switch_id,
+ bool *floating, u16 *statistic_index,
+ u16 *vebs_used, u16 *vebs_free,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_get_veb_parameters_completion *cmd_resp =
+ (struct i40e_aqc_get_veb_parameters_completion *)
+ &desc.params.raw;
+ int status;
+
+ if (veb_seid == 0)
+ return I40E_ERR_PARAM;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_get_veb_parameters);
+ cmd_resp->seid = cpu_to_le16(veb_seid);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+ if (status)
+ goto get_veb_exit;
+
+ if (switch_id)
+ *switch_id = le16_to_cpu(cmd_resp->switch_id);
+ if (statistic_index)
+ *statistic_index = le16_to_cpu(cmd_resp->statistic_index);
+ if (vebs_used)
+ *vebs_used = le16_to_cpu(cmd_resp->vebs_used);
+ if (vebs_free)
+ *vebs_free = le16_to_cpu(cmd_resp->vebs_free);
+ if (floating) {
+ u16 flags = le16_to_cpu(cmd_resp->veb_flags);
+
+ if (flags & I40E_AQC_ADD_VEB_FLOATING)
+ *floating = true;
+ else
+ *floating = false;
+ }
+
+get_veb_exit:
+ return status;
+}
+
+/**
+ * i40e_prepare_add_macvlan
+ * @mv_list: list of macvlans to be added
+ * @desc: pointer to AQ descriptor structure
+ * @count: length of the list
+ * @seid: VSI for the mac address
+ *
+ * Internal helper function that prepares the add macvlan request
+ * and returns the buffer size.
+ **/
+static u16
+i40e_prepare_add_macvlan(struct i40e_aqc_add_macvlan_element_data *mv_list,
+ struct i40e_aq_desc *desc, u16 count, u16 seid)
+{
+ struct i40e_aqc_macvlan *cmd =
+ (struct i40e_aqc_macvlan *)&desc->params.raw;
+ u16 buf_size;
+ int i;
+
+ buf_size = count * sizeof(*mv_list);
+
+ /* prep the rest of the request */
+ i40e_fill_default_direct_cmd_desc(desc, i40e_aqc_opc_add_macvlan);
+ cmd->num_addresses = cpu_to_le16(count);
+ cmd->seid[0] = cpu_to_le16(I40E_AQC_MACVLAN_CMD_SEID_VALID | seid);
+ cmd->seid[1] = 0;
+ cmd->seid[2] = 0;
+
+ for (i = 0; i < count; i++)
+ if (is_multicast_ether_addr(mv_list[i].mac_addr))
+ mv_list[i].flags |=
+ cpu_to_le16(I40E_AQC_MACVLAN_ADD_USE_SHARED_MAC);
+
+ desc->flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+ if (buf_size > I40E_AQ_LARGE_BUF)
+ desc->flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
+
+ return buf_size;
+}
+
+/**
+ * i40e_aq_add_macvlan
+ * @hw: pointer to the hw struct
+ * @seid: VSI for the mac address
+ * @mv_list: list of macvlans to be added
+ * @count: length of the list
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Add MAC/VLAN addresses to the HW filtering
+ **/
+int
+i40e_aq_add_macvlan(struct i40e_hw *hw, u16 seid,
+ struct i40e_aqc_add_macvlan_element_data *mv_list,
+ u16 count, struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ u16 buf_size;
+
+ if (count == 0 || !mv_list || !hw)
+ return I40E_ERR_PARAM;
+
+ buf_size = i40e_prepare_add_macvlan(mv_list, &desc, count, seid);
+
+ return i40e_asq_send_command_atomic(hw, &desc, mv_list, buf_size,
+ cmd_details, true);
+}
+
+/**
+ * i40e_aq_add_macvlan_v2
+ * @hw: pointer to the hw struct
+ * @seid: VSI for the mac address
+ * @mv_list: list of macvlans to be added
+ * @count: length of the list
+ * @cmd_details: pointer to command details structure or NULL
+ * @aq_status: pointer to Admin Queue status return value
+ *
+ * Add MAC/VLAN addresses to the HW filtering.
+ * The _v2 version returns the last Admin Queue status in aq_status
+ * to avoid race conditions in access to hw->aq.asq_last_status.
+ * It also calls _v2 versions of asq_send_command functions to
+ * get the aq_status on the stack.
+ **/
+int
+i40e_aq_add_macvlan_v2(struct i40e_hw *hw, u16 seid,
+ struct i40e_aqc_add_macvlan_element_data *mv_list,
+ u16 count, struct i40e_asq_cmd_details *cmd_details,
+ enum i40e_admin_queue_err *aq_status)
+{
+ struct i40e_aq_desc desc;
+ u16 buf_size;
+
+ if (count == 0 || !mv_list || !hw)
+ return I40E_ERR_PARAM;
+
+ buf_size = i40e_prepare_add_macvlan(mv_list, &desc, count, seid);
+
+ return i40e_asq_send_command_atomic_v2(hw, &desc, mv_list, buf_size,
+ cmd_details, true, aq_status);
+}
+
+/**
+ * i40e_aq_remove_macvlan
+ * @hw: pointer to the hw struct
+ * @seid: VSI for the mac address
+ * @mv_list: list of macvlans to be removed
+ * @count: length of the list
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Remove MAC/VLAN addresses from the HW filtering
+ **/
+int
+i40e_aq_remove_macvlan(struct i40e_hw *hw, u16 seid,
+ struct i40e_aqc_remove_macvlan_element_data *mv_list,
+ u16 count, struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_macvlan *cmd =
+ (struct i40e_aqc_macvlan *)&desc.params.raw;
+ u16 buf_size;
+ int status;
+
+ if (count == 0 || !mv_list || !hw)
+ return I40E_ERR_PARAM;
+
+ buf_size = count * sizeof(*mv_list);
+
+ /* prep the rest of the request */
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_remove_macvlan);
+ cmd->num_addresses = cpu_to_le16(count);
+ cmd->seid[0] = cpu_to_le16(I40E_AQC_MACVLAN_CMD_SEID_VALID | seid);
+ cmd->seid[1] = 0;
+ cmd->seid[2] = 0;
+
+ desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+ if (buf_size > I40E_AQ_LARGE_BUF)
+ desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
+
+ status = i40e_asq_send_command_atomic(hw, &desc, mv_list, buf_size,
+ cmd_details, true);
+
+ return status;
+}
+
+/**
+ * i40e_aq_remove_macvlan_v2
+ * @hw: pointer to the hw struct
+ * @seid: VSI for the mac address
+ * @mv_list: list of macvlans to be removed
+ * @count: length of the list
+ * @cmd_details: pointer to command details structure or NULL
+ * @aq_status: pointer to Admin Queue status return value
+ *
+ * Remove MAC/VLAN addresses from the HW filtering.
+ * The _v2 version returns the last Admin Queue status in aq_status
+ * to avoid race conditions in access to hw->aq.asq_last_status.
+ * It also calls _v2 versions of asq_send_command functions to
+ * get the aq_status on the stack.
+ **/
+int
+i40e_aq_remove_macvlan_v2(struct i40e_hw *hw, u16 seid,
+ struct i40e_aqc_remove_macvlan_element_data *mv_list,
+ u16 count, struct i40e_asq_cmd_details *cmd_details,
+ enum i40e_admin_queue_err *aq_status)
+{
+ struct i40e_aqc_macvlan *cmd;
+ struct i40e_aq_desc desc;
+ u16 buf_size;
+
+ if (count == 0 || !mv_list || !hw)
+ return I40E_ERR_PARAM;
+
+ buf_size = count * sizeof(*mv_list);
+
+ /* prep the rest of the request */
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_remove_macvlan);
+ cmd = (struct i40e_aqc_macvlan *)&desc.params.raw;
+ cmd->num_addresses = cpu_to_le16(count);
+ cmd->seid[0] = cpu_to_le16(I40E_AQC_MACVLAN_CMD_SEID_VALID | seid);
+ cmd->seid[1] = 0;
+ cmd->seid[2] = 0;
+
+ desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+ if (buf_size > I40E_AQ_LARGE_BUF)
+ desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
+
+ return i40e_asq_send_command_atomic_v2(hw, &desc, mv_list, buf_size,
+ cmd_details, true, aq_status);
+}
+
+/**
+ * i40e_mirrorrule_op - Internal helper function to add/delete mirror rule
+ * @hw: pointer to the hw struct
+ * @opcode: AQ opcode for add or delete mirror rule
+ * @sw_seid: Switch SEID (to which rule refers)
+ * @rule_type: Rule Type (ingress/egress/VLAN)
+ * @id: Destination VSI SEID or Rule ID
+ * @count: length of the list
+ * @mr_list: list of mirrored VSI SEIDs or VLAN IDs
+ * @cmd_details: pointer to command details structure or NULL
+ * @rule_id: Rule ID returned from FW
+ * @rules_used: Number of rules used in internal switch
+ * @rules_free: Number of rules free in internal switch
+ *
+ * Add/Delete a mirror rule to a specific switch. Mirror rules are supported for
+ * VEBs/VEPA elements only
+ **/
+static int i40e_mirrorrule_op(struct i40e_hw *hw,
+ u16 opcode, u16 sw_seid, u16 rule_type, u16 id,
+ u16 count, __le16 *mr_list,
+ struct i40e_asq_cmd_details *cmd_details,
+ u16 *rule_id, u16 *rules_used, u16 *rules_free)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_add_delete_mirror_rule *cmd =
+ (struct i40e_aqc_add_delete_mirror_rule *)&desc.params.raw;
+ struct i40e_aqc_add_delete_mirror_rule_completion *resp =
+ (struct i40e_aqc_add_delete_mirror_rule_completion *)&desc.params.raw;
+ u16 buf_size;
+ int status;
+
+ buf_size = count * sizeof(*mr_list);
+
+ /* prep the rest of the request */
+ i40e_fill_default_direct_cmd_desc(&desc, opcode);
+ cmd->seid = cpu_to_le16(sw_seid);
+ cmd->rule_type = cpu_to_le16(rule_type &
+ I40E_AQC_MIRROR_RULE_TYPE_MASK);
+ cmd->num_entries = cpu_to_le16(count);
+ /* Dest VSI for add, rule_id for delete */
+ cmd->destination = cpu_to_le16(id);
+ if (mr_list) {
+ desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF |
+ I40E_AQ_FLAG_RD));
+ if (buf_size > I40E_AQ_LARGE_BUF)
+ desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
+ }
+
+ status = i40e_asq_send_command(hw, &desc, mr_list, buf_size,
+ cmd_details);
+ if (!status ||
+ hw->aq.asq_last_status == I40E_AQ_RC_ENOSPC) {
+ if (rule_id)
+ *rule_id = le16_to_cpu(resp->rule_id);
+ if (rules_used)
+ *rules_used = le16_to_cpu(resp->mirror_rules_used);
+ if (rules_free)
+ *rules_free = le16_to_cpu(resp->mirror_rules_free);
+ }
+ return status;
+}
+
+/**
+ * i40e_aq_add_mirrorrule - add a mirror rule
+ * @hw: pointer to the hw struct
+ * @sw_seid: Switch SEID (to which rule refers)
+ * @rule_type: Rule Type (ingress/egress/VLAN)
+ * @dest_vsi: SEID of VSI to which packets will be mirrored
+ * @count: length of the list
+ * @mr_list: list of mirrored VSI SEIDs or VLAN IDs
+ * @cmd_details: pointer to command details structure or NULL
+ * @rule_id: Rule ID returned from FW
+ * @rules_used: Number of rules used in internal switch
+ * @rules_free: Number of rules free in internal switch
+ *
+ * Add mirror rule. Mirror rules are supported for VEBs or VEPA elements only
+ **/
+int i40e_aq_add_mirrorrule(struct i40e_hw *hw, u16 sw_seid,
+ u16 rule_type, u16 dest_vsi, u16 count,
+ __le16 *mr_list,
+ struct i40e_asq_cmd_details *cmd_details,
+ u16 *rule_id, u16 *rules_used, u16 *rules_free)
+{
+ if (!(rule_type == I40E_AQC_MIRROR_RULE_TYPE_ALL_INGRESS ||
+ rule_type == I40E_AQC_MIRROR_RULE_TYPE_ALL_EGRESS)) {
+ if (count == 0 || !mr_list)
+ return I40E_ERR_PARAM;
+ }
+
+ return i40e_mirrorrule_op(hw, i40e_aqc_opc_add_mirror_rule, sw_seid,
+ rule_type, dest_vsi, count, mr_list,
+ cmd_details, rule_id, rules_used, rules_free);
+}
+
+/**
+ * i40e_aq_delete_mirrorrule - delete a mirror rule
+ * @hw: pointer to the hw struct
+ * @sw_seid: Switch SEID (to which rule refers)
+ * @rule_type: Rule Type (ingress/egress/VLAN)
+ * @count: length of the list
+ * @rule_id: Rule ID that is returned in the receive desc as part of
+ * add_mirrorrule.
+ * @mr_list: list of mirrored VLAN IDs to be removed
+ * @cmd_details: pointer to command details structure or NULL
+ * @rules_used: Number of rules used in internal switch
+ * @rules_free: Number of rules free in internal switch
+ *
+ * Delete a mirror rule. Mirror rules are supported for VEBs/VEPA elements only
+ **/
+int i40e_aq_delete_mirrorrule(struct i40e_hw *hw, u16 sw_seid,
+ u16 rule_type, u16 rule_id, u16 count,
+ __le16 *mr_list,
+ struct i40e_asq_cmd_details *cmd_details,
+ u16 *rules_used, u16 *rules_free)
+{
+ /* Rule ID has to be valid except rule_type: INGRESS VLAN mirroring */
+ if (rule_type == I40E_AQC_MIRROR_RULE_TYPE_VLAN) {
+ /* count and mr_list shall be valid for rule_type INGRESS VLAN
+ * mirroring. For other rule_type, count and rule_type should
+ * not matter.
+ */
+ if (count == 0 || !mr_list)
+ return I40E_ERR_PARAM;
+ }
+
+ return i40e_mirrorrule_op(hw, i40e_aqc_opc_delete_mirror_rule, sw_seid,
+ rule_type, rule_id, count, mr_list,
+ cmd_details, NULL, rules_used, rules_free);
+}
+
+/**
+ * i40e_aq_send_msg_to_vf
+ * @hw: pointer to the hardware structure
+ * @vfid: VF id to send msg
+ * @v_opcode: opcodes for VF-PF communication
+ * @v_retval: return error code
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ * @cmd_details: pointer to command details
+ *
+ * send msg to vf
+ **/
+int i40e_aq_send_msg_to_vf(struct i40e_hw *hw, u16 vfid,
+ u32 v_opcode, u32 v_retval, u8 *msg, u16 msglen,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_pf_vf_message *cmd =
+ (struct i40e_aqc_pf_vf_message *)&desc.params.raw;
+ int status;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_send_msg_to_vf);
+ cmd->id = cpu_to_le32(vfid);
+ desc.cookie_high = cpu_to_le32(v_opcode);
+ desc.cookie_low = cpu_to_le32(v_retval);
+ desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_SI);
+ if (msglen) {
+ desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF |
+ I40E_AQ_FLAG_RD));
+ if (msglen > I40E_AQ_LARGE_BUF)
+ desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
+ desc.datalen = cpu_to_le16(msglen);
+ }
+ status = i40e_asq_send_command(hw, &desc, msg, msglen, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_debug_read_register
+ * @hw: pointer to the hw struct
+ * @reg_addr: register address
+ * @reg_val: register value
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Read the register using the admin queue commands
+ **/
+int i40e_aq_debug_read_register(struct i40e_hw *hw,
+ u32 reg_addr, u64 *reg_val,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_debug_reg_read_write *cmd_resp =
+ (struct i40e_aqc_debug_reg_read_write *)&desc.params.raw;
+ int status;
+
+ if (reg_val == NULL)
+ return I40E_ERR_PARAM;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_debug_read_reg);
+
+ cmd_resp->address = cpu_to_le32(reg_addr);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ if (!status) {
+ *reg_val = ((u64)le32_to_cpu(cmd_resp->value_high) << 32) |
+ (u64)le32_to_cpu(cmd_resp->value_low);
+ }
+
+ return status;
+}
+
+/**
+ * i40e_aq_debug_write_register
+ * @hw: pointer to the hw struct
+ * @reg_addr: register address
+ * @reg_val: register value
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Write to a register using the admin queue commands
+ **/
+int i40e_aq_debug_write_register(struct i40e_hw *hw,
+ u32 reg_addr, u64 reg_val,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_debug_reg_read_write *cmd =
+ (struct i40e_aqc_debug_reg_read_write *)&desc.params.raw;
+ int status;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_debug_write_reg);
+
+ cmd->address = cpu_to_le32(reg_addr);
+ cmd->value_high = cpu_to_le32((u32)(reg_val >> 32));
+ cmd->value_low = cpu_to_le32((u32)(reg_val & 0xFFFFFFFF));
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_request_resource
+ * @hw: pointer to the hw struct
+ * @resource: resource id
+ * @access: access type
+ * @sdp_number: resource number
+ * @timeout: the maximum time in ms that the driver may hold the resource
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * requests common resource using the admin queue commands
+ **/
+int i40e_aq_request_resource(struct i40e_hw *hw,
+ enum i40e_aq_resources_ids resource,
+ enum i40e_aq_resource_access_type access,
+ u8 sdp_number, u64 *timeout,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_request_resource *cmd_resp =
+ (struct i40e_aqc_request_resource *)&desc.params.raw;
+ int status;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_request_resource);
+
+ cmd_resp->resource_id = cpu_to_le16(resource);
+ cmd_resp->access_type = cpu_to_le16(access);
+ cmd_resp->resource_number = cpu_to_le32(sdp_number);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+ /* The completion specifies the maximum time in ms that the driver
+ * may hold the resource in the Timeout field.
+ * If the resource is held by someone else, the command completes with
+ * busy return value and the timeout field indicates the maximum time
+ * the current owner of the resource has to free it.
+ */
+ if (!status || hw->aq.asq_last_status == I40E_AQ_RC_EBUSY)
+ *timeout = le32_to_cpu(cmd_resp->timeout);
+
+ return status;
+}
+
+/**
+ * i40e_aq_release_resource
+ * @hw: pointer to the hw struct
+ * @resource: resource id
+ * @sdp_number: resource number
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * release common resource using the admin queue commands
+ **/
+int i40e_aq_release_resource(struct i40e_hw *hw,
+ enum i40e_aq_resources_ids resource,
+ u8 sdp_number,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_request_resource *cmd =
+ (struct i40e_aqc_request_resource *)&desc.params.raw;
+ int status;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_release_resource);
+
+ cmd->resource_id = cpu_to_le16(resource);
+ cmd->resource_number = cpu_to_le32(sdp_number);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_read_nvm
+ * @hw: pointer to the hw struct
+ * @module_pointer: module pointer location in words from the NVM beginning
+ * @offset: byte offset from the module beginning
+ * @length: length of the section to be read (in bytes from the offset)
+ * @data: command buffer (size [bytes] = length)
+ * @last_command: tells if this is the last command in a series
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Read the NVM using the admin queue commands
+ **/
+int i40e_aq_read_nvm(struct i40e_hw *hw, u8 module_pointer,
+ u32 offset, u16 length, void *data,
+ bool last_command,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_nvm_update *cmd =
+ (struct i40e_aqc_nvm_update *)&desc.params.raw;
+ int status;
+
+ /* In offset the highest byte must be zeroed. */
+ if (offset & 0xFF000000) {
+ status = I40E_ERR_PARAM;
+ goto i40e_aq_read_nvm_exit;
+ }
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_read);
+
+ /* If this is the last command in a series, set the proper flag. */
+ if (last_command)
+ cmd->command_flags |= I40E_AQ_NVM_LAST_CMD;
+ cmd->module_pointer = module_pointer;
+ cmd->offset = cpu_to_le32(offset);
+ cmd->length = cpu_to_le16(length);
+
+ desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
+ if (length > I40E_AQ_LARGE_BUF)
+ desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
+
+ status = i40e_asq_send_command(hw, &desc, data, length, cmd_details);
+
+i40e_aq_read_nvm_exit:
+ return status;
+}
+
+/**
+ * i40e_aq_erase_nvm
+ * @hw: pointer to the hw struct
+ * @module_pointer: module pointer location in words from the NVM beginning
+ * @offset: offset in the module (expressed in 4 KB from module's beginning)
+ * @length: length of the section to be erased (expressed in 4 KB)
+ * @last_command: tells if this is the last command in a series
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Erase the NVM sector using the admin queue commands
+ **/
+int i40e_aq_erase_nvm(struct i40e_hw *hw, u8 module_pointer,
+ u32 offset, u16 length, bool last_command,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_nvm_update *cmd =
+ (struct i40e_aqc_nvm_update *)&desc.params.raw;
+ int status;
+
+ /* In offset the highest byte must be zeroed. */
+ if (offset & 0xFF000000) {
+ status = I40E_ERR_PARAM;
+ goto i40e_aq_erase_nvm_exit;
+ }
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_erase);
+
+ /* If this is the last command in a series, set the proper flag. */
+ if (last_command)
+ cmd->command_flags |= I40E_AQ_NVM_LAST_CMD;
+ cmd->module_pointer = module_pointer;
+ cmd->offset = cpu_to_le32(offset);
+ cmd->length = cpu_to_le16(length);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+i40e_aq_erase_nvm_exit:
+ return status;
+}
+
+/**
+ * i40e_parse_discover_capabilities
+ * @hw: pointer to the hw struct
+ * @buff: pointer to a buffer containing device/function capability records
+ * @cap_count: number of capability records in the list
+ * @list_type_opc: type of capabilities list to parse
+ *
+ * Parse the device/function capabilities list.
+ **/
+static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
+ u32 cap_count,
+ enum i40e_admin_queue_opc list_type_opc)
+{
+ struct i40e_aqc_list_capabilities_element_resp *cap;
+ u32 valid_functions, num_functions;
+ u32 number, logical_id, phys_id;
+ struct i40e_hw_capabilities *p;
+ u16 id, ocp_cfg_word0;
+ u8 major_rev;
+ int status;
+ u32 i = 0;
+
+ cap = (struct i40e_aqc_list_capabilities_element_resp *) buff;
+
+ if (list_type_opc == i40e_aqc_opc_list_dev_capabilities)
+ p = &hw->dev_caps;
+ else if (list_type_opc == i40e_aqc_opc_list_func_capabilities)
+ p = &hw->func_caps;
+ else
+ return;
+
+ for (i = 0; i < cap_count; i++, cap++) {
+ id = le16_to_cpu(cap->id);
+ number = le32_to_cpu(cap->number);
+ logical_id = le32_to_cpu(cap->logical_id);
+ phys_id = le32_to_cpu(cap->phys_id);
+ major_rev = cap->major_rev;
+
+ switch (id) {
+ case I40E_AQ_CAP_ID_SWITCH_MODE:
+ p->switch_mode = number;
+ break;
+ case I40E_AQ_CAP_ID_MNG_MODE:
+ p->management_mode = number;
+ if (major_rev > 1) {
+ p->mng_protocols_over_mctp = logical_id;
+ i40e_debug(hw, I40E_DEBUG_INIT,
+ "HW Capability: Protocols over MCTP = %d\n",
+ p->mng_protocols_over_mctp);
+ } else {
+ p->mng_protocols_over_mctp = 0;
+ }
+ break;
+ case I40E_AQ_CAP_ID_NPAR_ACTIVE:
+ p->npar_enable = number;
+ break;
+ case I40E_AQ_CAP_ID_OS2BMC_CAP:
+ p->os2bmc = number;
+ break;
+ case I40E_AQ_CAP_ID_FUNCTIONS_VALID:
+ p->valid_functions = number;
+ break;
+ case I40E_AQ_CAP_ID_SRIOV:
+ if (number == 1)
+ p->sr_iov_1_1 = true;
+ break;
+ case I40E_AQ_CAP_ID_VF:
+ p->num_vfs = number;
+ p->vf_base_id = logical_id;
+ break;
+ case I40E_AQ_CAP_ID_VMDQ:
+ if (number == 1)
+ p->vmdq = true;
+ break;
+ case I40E_AQ_CAP_ID_8021QBG:
+ if (number == 1)
+ p->evb_802_1_qbg = true;
+ break;
+ case I40E_AQ_CAP_ID_8021QBR:
+ if (number == 1)
+ p->evb_802_1_qbh = true;
+ break;
+ case I40E_AQ_CAP_ID_VSI:
+ p->num_vsis = number;
+ break;
+ case I40E_AQ_CAP_ID_DCB:
+ if (number == 1) {
+ p->dcb = true;
+ p->enabled_tcmap = logical_id;
+ p->maxtc = phys_id;
+ }
+ break;
+ case I40E_AQ_CAP_ID_FCOE:
+ if (number == 1)
+ p->fcoe = true;
+ break;
+ case I40E_AQ_CAP_ID_ISCSI:
+ if (number == 1)
+ p->iscsi = true;
+ break;
+ case I40E_AQ_CAP_ID_RSS:
+ p->rss = true;
+ p->rss_table_size = number;
+ p->rss_table_entry_width = logical_id;
+ break;
+ case I40E_AQ_CAP_ID_RXQ:
+ p->num_rx_qp = number;
+ p->base_queue = phys_id;
+ break;
+ case I40E_AQ_CAP_ID_TXQ:
+ p->num_tx_qp = number;
+ p->base_queue = phys_id;
+ break;
+ case I40E_AQ_CAP_ID_MSIX:
+ p->num_msix_vectors = number;
+ i40e_debug(hw, I40E_DEBUG_INIT,
+ "HW Capability: MSIX vector count = %d\n",
+ p->num_msix_vectors);
+ break;
+ case I40E_AQ_CAP_ID_VF_MSIX:
+ p->num_msix_vectors_vf = number;
+ break;
+ case I40E_AQ_CAP_ID_FLEX10:
+ if (major_rev == 1) {
+ if (number == 1) {
+ p->flex10_enable = true;
+ p->flex10_capable = true;
+ }
+ } else {
+ /* Capability revision >= 2 */
+ if (number & 1)
+ p->flex10_enable = true;
+ if (number & 2)
+ p->flex10_capable = true;
+ }
+ p->flex10_mode = logical_id;
+ p->flex10_status = phys_id;
+ break;
+ case I40E_AQ_CAP_ID_CEM:
+ if (number == 1)
+ p->mgmt_cem = true;
+ break;
+ case I40E_AQ_CAP_ID_IWARP:
+ if (number == 1)
+ p->iwarp = true;
+ break;
+ case I40E_AQ_CAP_ID_LED:
+ if (phys_id < I40E_HW_CAP_MAX_GPIO)
+ p->led[phys_id] = true;
+ break;
+ case I40E_AQ_CAP_ID_SDP:
+ if (phys_id < I40E_HW_CAP_MAX_GPIO)
+ p->sdp[phys_id] = true;
+ break;
+ case I40E_AQ_CAP_ID_MDIO:
+ if (number == 1) {
+ p->mdio_port_num = phys_id;
+ p->mdio_port_mode = logical_id;
+ }
+ break;
+ case I40E_AQ_CAP_ID_1588:
+ if (number == 1)
+ p->ieee_1588 = true;
+ break;
+ case I40E_AQ_CAP_ID_FLOW_DIRECTOR:
+ p->fd = true;
+ p->fd_filters_guaranteed = number;
+ p->fd_filters_best_effort = logical_id;
+ break;
+ case I40E_AQ_CAP_ID_WSR_PROT:
+ p->wr_csr_prot = (u64)number;
+ p->wr_csr_prot |= (u64)logical_id << 32;
+ break;
+ case I40E_AQ_CAP_ID_NVM_MGMT:
+ if (number & I40E_NVM_MGMT_SEC_REV_DISABLED)
+ p->sec_rev_disabled = true;
+ if (number & I40E_NVM_MGMT_UPDATE_DISABLED)
+ p->update_disabled = true;
+ break;
+ default:
+ break;
+ }
+ }
+
+ if (p->fcoe)
+ i40e_debug(hw, I40E_DEBUG_ALL, "device is FCoE capable\n");
+
+ /* Software override ensuring FCoE is disabled if npar or mfp
+ * mode because it is not supported in these modes.
+ */
+ if (p->npar_enable || p->flex10_enable)
+ p->fcoe = false;
+
+ /* count the enabled ports (aka the "not disabled" ports) */
+ hw->num_ports = 0;
+ for (i = 0; i < 4; i++) {
+ u32 port_cfg_reg = I40E_PRTGEN_CNF + (4 * i);
+ u64 port_cfg = 0;
+
+ /* use AQ read to get the physical register offset instead
+ * of the port relative offset
+ */
+ i40e_aq_debug_read_register(hw, port_cfg_reg, &port_cfg, NULL);
+ if (!(port_cfg & I40E_PRTGEN_CNF_PORT_DIS_MASK))
+ hw->num_ports++;
+ }
+
+ /* OCP cards case: if a mezz is removed the Ethernet port is at
+ * disabled state in PRTGEN_CNF register. Additional NVM read is
+ * needed in order to check if we are dealing with OCP card.
+ * Those cards have 4 PFs at minimum, so using PRTGEN_CNF for counting
+ * physical ports results in wrong partition id calculation and thus
+ * not supporting WoL.
+ */
+ if (hw->mac.type == I40E_MAC_X722) {
+ if (!i40e_acquire_nvm(hw, I40E_RESOURCE_READ)) {
+ status = i40e_aq_read_nvm(hw, I40E_SR_EMP_MODULE_PTR,
+ 2 * I40E_SR_OCP_CFG_WORD0,
+ sizeof(ocp_cfg_word0),
+ &ocp_cfg_word0, true, NULL);
+ if (!status &&
+ (ocp_cfg_word0 & I40E_SR_OCP_ENABLED))
+ hw->num_ports = 4;
+ i40e_release_nvm(hw);
+ }
+ }
+
+ valid_functions = p->valid_functions;
+ num_functions = 0;
+ while (valid_functions) {
+ if (valid_functions & 1)
+ num_functions++;
+ valid_functions >>= 1;
+ }
+
+ /* partition id is 1-based, and functions are evenly spread
+ * across the ports as partitions
+ */
+ if (hw->num_ports != 0) {
+ hw->partition_id = (hw->pf_id / hw->num_ports) + 1;
+ hw->num_partitions = num_functions / hw->num_ports;
+ }
+
+ /* additional HW specific goodies that might
+ * someday be HW version specific
+ */
+ p->rx_buf_chain_len = I40E_MAX_CHAINED_RX_BUFFERS;
+}
+
+/**
+ * i40e_aq_discover_capabilities
+ * @hw: pointer to the hw struct
+ * @buff: a virtual buffer to hold the capabilities
+ * @buff_size: Size of the virtual buffer
+ * @data_size: Size of the returned data, or buff size needed if AQ err==ENOMEM
+ * @list_type_opc: capabilities type to discover - pass in the command opcode
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Get the device capabilities descriptions from the firmware
+ **/
+int i40e_aq_discover_capabilities(struct i40e_hw *hw,
+ void *buff, u16 buff_size, u16 *data_size,
+ enum i40e_admin_queue_opc list_type_opc,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aqc_list_capabilites *cmd;
+ struct i40e_aq_desc desc;
+ int status = 0;
+
+ cmd = (struct i40e_aqc_list_capabilites *)&desc.params.raw;
+
+ if (list_type_opc != i40e_aqc_opc_list_func_capabilities &&
+ list_type_opc != i40e_aqc_opc_list_dev_capabilities) {
+ status = I40E_ERR_PARAM;
+ goto exit;
+ }
+
+ i40e_fill_default_direct_cmd_desc(&desc, list_type_opc);
+
+ desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
+ if (buff_size > I40E_AQ_LARGE_BUF)
+ desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
+
+ status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
+ *data_size = le16_to_cpu(desc.datalen);
+
+ if (status)
+ goto exit;
+
+ i40e_parse_discover_capabilities(hw, buff, le32_to_cpu(cmd->count),
+ list_type_opc);
+
+exit:
+ return status;
+}
+
+/**
+ * i40e_aq_update_nvm
+ * @hw: pointer to the hw struct
+ * @module_pointer: module pointer location in words from the NVM beginning
+ * @offset: byte offset from the module beginning
+ * @length: length of the section to be written (in bytes from the offset)
+ * @data: command buffer (size [bytes] = length)
+ * @last_command: tells if this is the last command in a series
+ * @preservation_flags: Preservation mode flags
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Update the NVM using the admin queue commands
+ **/
+int i40e_aq_update_nvm(struct i40e_hw *hw, u8 module_pointer,
+ u32 offset, u16 length, void *data,
+ bool last_command, u8 preservation_flags,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_nvm_update *cmd =
+ (struct i40e_aqc_nvm_update *)&desc.params.raw;
+ int status;
+
+ /* In offset the highest byte must be zeroed. */
+ if (offset & 0xFF000000) {
+ status = I40E_ERR_PARAM;
+ goto i40e_aq_update_nvm_exit;
+ }
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_update);
+
+ /* If this is the last command in a series, set the proper flag. */
+ if (last_command)
+ cmd->command_flags |= I40E_AQ_NVM_LAST_CMD;
+ if (hw->mac.type == I40E_MAC_X722) {
+ if (preservation_flags == I40E_NVM_PRESERVATION_FLAGS_SELECTED)
+ cmd->command_flags |=
+ (I40E_AQ_NVM_PRESERVATION_FLAGS_SELECTED <<
+ I40E_AQ_NVM_PRESERVATION_FLAGS_SHIFT);
+ else if (preservation_flags == I40E_NVM_PRESERVATION_FLAGS_ALL)
+ cmd->command_flags |=
+ (I40E_AQ_NVM_PRESERVATION_FLAGS_ALL <<
+ I40E_AQ_NVM_PRESERVATION_FLAGS_SHIFT);
+ }
+ cmd->module_pointer = module_pointer;
+ cmd->offset = cpu_to_le32(offset);
+ cmd->length = cpu_to_le16(length);
+
+ desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+ if (length > I40E_AQ_LARGE_BUF)
+ desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
+
+ status = i40e_asq_send_command(hw, &desc, data, length, cmd_details);
+
+i40e_aq_update_nvm_exit:
+ return status;
+}
+
+/**
+ * i40e_aq_rearrange_nvm
+ * @hw: pointer to the hw struct
+ * @rearrange_nvm: defines direction of rearrangement
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Rearrange NVM structure, available only for transition FW
+ **/
+int i40e_aq_rearrange_nvm(struct i40e_hw *hw,
+ u8 rearrange_nvm,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aqc_nvm_update *cmd;
+ struct i40e_aq_desc desc;
+ int status;
+
+ cmd = (struct i40e_aqc_nvm_update *)&desc.params.raw;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_update);
+
+ rearrange_nvm &= (I40E_AQ_NVM_REARRANGE_TO_FLAT |
+ I40E_AQ_NVM_REARRANGE_TO_STRUCT);
+
+ if (!rearrange_nvm) {
+ status = I40E_ERR_PARAM;
+ goto i40e_aq_rearrange_nvm_exit;
+ }
+
+ cmd->command_flags |= rearrange_nvm;
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+i40e_aq_rearrange_nvm_exit:
+ return status;
+}
+
+/**
+ * i40e_aq_get_lldp_mib
+ * @hw: pointer to the hw struct
+ * @bridge_type: type of bridge requested
+ * @mib_type: Local, Remote or both Local and Remote MIBs
+ * @buff: pointer to a user supplied buffer to store the MIB block
+ * @buff_size: size of the buffer (in bytes)
+ * @local_len : length of the returned Local LLDP MIB
+ * @remote_len: length of the returned Remote LLDP MIB
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Requests the complete LLDP MIB (entire packet).
+ **/
+int i40e_aq_get_lldp_mib(struct i40e_hw *hw, u8 bridge_type,
+ u8 mib_type, void *buff, u16 buff_size,
+ u16 *local_len, u16 *remote_len,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_lldp_get_mib *cmd =
+ (struct i40e_aqc_lldp_get_mib *)&desc.params.raw;
+ struct i40e_aqc_lldp_get_mib *resp =
+ (struct i40e_aqc_lldp_get_mib *)&desc.params.raw;
+ int status;
+
+ if (buff_size == 0 || !buff)
+ return I40E_ERR_PARAM;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_get_mib);
+ /* Indirect Command */
+ desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
+
+ cmd->type = mib_type & I40E_AQ_LLDP_MIB_TYPE_MASK;
+ cmd->type |= ((bridge_type << I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT) &
+ I40E_AQ_LLDP_BRIDGE_TYPE_MASK);
+
+ desc.datalen = cpu_to_le16(buff_size);
+
+ desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
+ if (buff_size > I40E_AQ_LARGE_BUF)
+ desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
+
+ status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
+ if (!status) {
+ if (local_len != NULL)
+ *local_len = le16_to_cpu(resp->local_len);
+ if (remote_len != NULL)
+ *remote_len = le16_to_cpu(resp->remote_len);
+ }
+
+ return status;
+}
+
+/**
+ * i40e_aq_set_lldp_mib - Set the LLDP MIB
+ * @hw: pointer to the hw struct
+ * @mib_type: Local, Remote or both Local and Remote MIBs
+ * @buff: pointer to a user supplied buffer to store the MIB block
+ * @buff_size: size of the buffer (in bytes)
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Set the LLDP MIB.
+ **/
+int
+i40e_aq_set_lldp_mib(struct i40e_hw *hw,
+ u8 mib_type, void *buff, u16 buff_size,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aqc_lldp_set_local_mib *cmd;
+ struct i40e_aq_desc desc;
+ int status;
+
+ cmd = (struct i40e_aqc_lldp_set_local_mib *)&desc.params.raw;
+ if (buff_size == 0 || !buff)
+ return I40E_ERR_PARAM;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_lldp_set_local_mib);
+ /* Indirect Command */
+ desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+ if (buff_size > I40E_AQ_LARGE_BUF)
+ desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
+ desc.datalen = cpu_to_le16(buff_size);
+
+ cmd->type = mib_type;
+ cmd->length = cpu_to_le16(buff_size);
+ cmd->address_high = cpu_to_le32(upper_32_bits((uintptr_t)buff));
+ cmd->address_low = cpu_to_le32(lower_32_bits((uintptr_t)buff));
+
+ status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
+ return status;
+}
+
+/**
+ * i40e_aq_cfg_lldp_mib_change_event
+ * @hw: pointer to the hw struct
+ * @enable_update: Enable or Disable event posting
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Enable or Disable posting of an event on ARQ when LLDP MIB
+ * associated with the interface changes
+ **/
+int i40e_aq_cfg_lldp_mib_change_event(struct i40e_hw *hw,
+ bool enable_update,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_lldp_update_mib *cmd =
+ (struct i40e_aqc_lldp_update_mib *)&desc.params.raw;
+ int status;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_update_mib);
+
+ if (!enable_update)
+ cmd->command |= I40E_AQ_LLDP_MIB_UPDATE_DISABLE;
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_restore_lldp
+ * @hw: pointer to the hw struct
+ * @setting: pointer to factory setting variable or NULL
+ * @restore: True if factory settings should be restored
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Restore LLDP Agent factory settings if @restore set to True. In other case
+ * only returns factory setting in AQ response.
+ **/
+int
+i40e_aq_restore_lldp(struct i40e_hw *hw, u8 *setting, bool restore,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_lldp_restore *cmd =
+ (struct i40e_aqc_lldp_restore *)&desc.params.raw;
+ int status;
+
+ if (!(hw->flags & I40E_HW_FLAG_FW_LLDP_PERSISTENT)) {
+ i40e_debug(hw, I40E_DEBUG_ALL,
+ "Restore LLDP not supported by current FW version.\n");
+ return I40E_ERR_DEVICE_NOT_SUPPORTED;
+ }
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_restore);
+
+ if (restore)
+ cmd->command |= I40E_AQ_LLDP_AGENT_RESTORE;
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ if (setting)
+ *setting = cmd->command & 1;
+
+ return status;
+}
+
+/**
+ * i40e_aq_stop_lldp
+ * @hw: pointer to the hw struct
+ * @shutdown_agent: True if LLDP Agent needs to be Shutdown
+ * @persist: True if stop of LLDP should be persistent across power cycles
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Stop or Shutdown the embedded LLDP Agent
+ **/
+int i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent,
+ bool persist,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_lldp_stop *cmd =
+ (struct i40e_aqc_lldp_stop *)&desc.params.raw;
+ int status;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_stop);
+
+ if (shutdown_agent)
+ cmd->command |= I40E_AQ_LLDP_AGENT_SHUTDOWN;
+
+ if (persist) {
+ if (hw->flags & I40E_HW_FLAG_FW_LLDP_PERSISTENT)
+ cmd->command |= I40E_AQ_LLDP_AGENT_STOP_PERSIST;
+ else
+ i40e_debug(hw, I40E_DEBUG_ALL,
+ "Persistent Stop LLDP not supported by current FW version.\n");
+ }
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_start_lldp
+ * @hw: pointer to the hw struct
+ * @persist: True if start of LLDP should be persistent across power cycles
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Start the embedded LLDP Agent on all ports.
+ **/
+int i40e_aq_start_lldp(struct i40e_hw *hw, bool persist,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_lldp_start *cmd =
+ (struct i40e_aqc_lldp_start *)&desc.params.raw;
+ int status;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_start);
+
+ cmd->command = I40E_AQ_LLDP_AGENT_START;
+
+ if (persist) {
+ if (hw->flags & I40E_HW_FLAG_FW_LLDP_PERSISTENT)
+ cmd->command |= I40E_AQ_LLDP_AGENT_START_PERSIST;
+ else
+ i40e_debug(hw, I40E_DEBUG_ALL,
+ "Persistent Start LLDP not supported by current FW version.\n");
+ }
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_set_dcb_parameters
+ * @hw: pointer to the hw struct
+ * @cmd_details: pointer to command details structure or NULL
+ * @dcb_enable: True if DCB configuration needs to be applied
+ *
+ **/
+int
+i40e_aq_set_dcb_parameters(struct i40e_hw *hw, bool dcb_enable,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_set_dcb_parameters *cmd =
+ (struct i40e_aqc_set_dcb_parameters *)&desc.params.raw;
+ int status;
+
+ if (!(hw->flags & I40E_HW_FLAG_FW_LLDP_STOPPABLE))
+ return I40E_ERR_DEVICE_NOT_SUPPORTED;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_set_dcb_parameters);
+
+ if (dcb_enable) {
+ cmd->valid_flags = I40E_DCB_VALID;
+ cmd->command = I40E_AQ_DCB_SET_AGENT;
+ }
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_get_cee_dcb_config
+ * @hw: pointer to the hw struct
+ * @buff: response buffer that stores CEE operational configuration
+ * @buff_size: size of the buffer passed
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Get CEE DCBX mode operational configuration from firmware
+ **/
+int i40e_aq_get_cee_dcb_config(struct i40e_hw *hw,
+ void *buff, u16 buff_size,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ int status;
+
+ if (buff_size == 0 || !buff)
+ return I40E_ERR_PARAM;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_cee_dcb_cfg);
+
+ desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
+ status = i40e_asq_send_command(hw, &desc, (void *)buff, buff_size,
+ cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_add_udp_tunnel
+ * @hw: pointer to the hw struct
+ * @udp_port: the UDP port to add in Host byte order
+ * @protocol_index: protocol index type
+ * @filter_index: pointer to filter index
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Note: Firmware expects the udp_port value to be in Little Endian format,
+ * and this function will call cpu_to_le16 to convert from Host byte order to
+ * Little Endian order.
+ **/
+int i40e_aq_add_udp_tunnel(struct i40e_hw *hw,
+ u16 udp_port, u8 protocol_index,
+ u8 *filter_index,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_add_udp_tunnel *cmd =
+ (struct i40e_aqc_add_udp_tunnel *)&desc.params.raw;
+ struct i40e_aqc_del_udp_tunnel_completion *resp =
+ (struct i40e_aqc_del_udp_tunnel_completion *)&desc.params.raw;
+ int status;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_udp_tunnel);
+
+ cmd->udp_port = cpu_to_le16(udp_port);
+ cmd->protocol_type = protocol_index;
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ if (!status && filter_index)
+ *filter_index = resp->index;
+
+ return status;
+}
+
+/**
+ * i40e_aq_del_udp_tunnel
+ * @hw: pointer to the hw struct
+ * @index: filter index
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+int i40e_aq_del_udp_tunnel(struct i40e_hw *hw, u8 index,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_remove_udp_tunnel *cmd =
+ (struct i40e_aqc_remove_udp_tunnel *)&desc.params.raw;
+ int status;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_del_udp_tunnel);
+
+ cmd->index = index;
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_delete_element - Delete switch element
+ * @hw: pointer to the hw struct
+ * @seid: the SEID to delete from the switch
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * This deletes a switch element from the switch.
+ **/
+int i40e_aq_delete_element(struct i40e_hw *hw, u16 seid,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_switch_seid *cmd =
+ (struct i40e_aqc_switch_seid *)&desc.params.raw;
+ int status;
+
+ if (seid == 0)
+ return I40E_ERR_PARAM;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_delete_element);
+
+ cmd->seid = cpu_to_le16(seid);
+
+ status = i40e_asq_send_command_atomic(hw, &desc, NULL, 0,
+ cmd_details, true);
+
+ return status;
+}
+
+/**
+ * i40e_aq_dcb_updated - DCB Updated Command
+ * @hw: pointer to the hw struct
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * EMP will return when the shared RPB settings have been
+ * recomputed and modified. The retval field in the descriptor
+ * will be set to 0 when RPB is modified.
+ **/
+int i40e_aq_dcb_updated(struct i40e_hw *hw,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ int status;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_dcb_updated);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_tx_sched_cmd - generic Tx scheduler AQ command handler
+ * @hw: pointer to the hw struct
+ * @seid: seid for the physical port/switching component/vsi
+ * @buff: Indirect buffer to hold data parameters and response
+ * @buff_size: Indirect buffer size
+ * @opcode: Tx scheduler AQ command opcode
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Generic command handler for Tx scheduler AQ commands
+ **/
+static int i40e_aq_tx_sched_cmd(struct i40e_hw *hw, u16 seid,
+ void *buff, u16 buff_size,
+ enum i40e_admin_queue_opc opcode,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_tx_sched_ind *cmd =
+ (struct i40e_aqc_tx_sched_ind *)&desc.params.raw;
+ int status;
+ bool cmd_param_flag = false;
+
+ switch (opcode) {
+ case i40e_aqc_opc_configure_vsi_ets_sla_bw_limit:
+ case i40e_aqc_opc_configure_vsi_tc_bw:
+ case i40e_aqc_opc_enable_switching_comp_ets:
+ case i40e_aqc_opc_modify_switching_comp_ets:
+ case i40e_aqc_opc_disable_switching_comp_ets:
+ case i40e_aqc_opc_configure_switching_comp_ets_bw_limit:
+ case i40e_aqc_opc_configure_switching_comp_bw_config:
+ cmd_param_flag = true;
+ break;
+ case i40e_aqc_opc_query_vsi_bw_config:
+ case i40e_aqc_opc_query_vsi_ets_sla_config:
+ case i40e_aqc_opc_query_switching_comp_ets_config:
+ case i40e_aqc_opc_query_port_ets_config:
+ case i40e_aqc_opc_query_switching_comp_bw_config:
+ cmd_param_flag = false;
+ break;
+ default:
+ return I40E_ERR_PARAM;
+ }
+
+ i40e_fill_default_direct_cmd_desc(&desc, opcode);
+
+ /* Indirect command */
+ desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
+ if (cmd_param_flag)
+ desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD);
+ if (buff_size > I40E_AQ_LARGE_BUF)
+ desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
+
+ desc.datalen = cpu_to_le16(buff_size);
+
+ cmd->vsi_seid = cpu_to_le16(seid);
+
+ status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_config_vsi_bw_limit - Configure VSI BW Limit
+ * @hw: pointer to the hw struct
+ * @seid: VSI seid
+ * @credit: BW limit credits (0 = disabled)
+ * @max_credit: Max BW limit credits
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+int i40e_aq_config_vsi_bw_limit(struct i40e_hw *hw,
+ u16 seid, u16 credit, u8 max_credit,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_configure_vsi_bw_limit *cmd =
+ (struct i40e_aqc_configure_vsi_bw_limit *)&desc.params.raw;
+ int status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_configure_vsi_bw_limit);
+
+ cmd->vsi_seid = cpu_to_le16(seid);
+ cmd->credit = cpu_to_le16(credit);
+ cmd->max_credit = max_credit;
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_config_vsi_tc_bw - Config VSI BW Allocation per TC
+ * @hw: pointer to the hw struct
+ * @seid: VSI seid
+ * @bw_data: Buffer holding enabled TCs, relative TC BW limit/credits
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+int i40e_aq_config_vsi_tc_bw(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_configure_vsi_tc_bw_data *bw_data,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
+ i40e_aqc_opc_configure_vsi_tc_bw,
+ cmd_details);
+}
+
+/**
+ * i40e_aq_config_switch_comp_ets - Enable/Disable/Modify ETS on the port
+ * @hw: pointer to the hw struct
+ * @seid: seid of the switching component connected to Physical Port
+ * @ets_data: Buffer holding ETS parameters
+ * @opcode: Tx scheduler AQ command opcode
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+int
+i40e_aq_config_switch_comp_ets(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_configure_switching_comp_ets_data *ets_data,
+ enum i40e_admin_queue_opc opcode,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ return i40e_aq_tx_sched_cmd(hw, seid, (void *)ets_data,
+ sizeof(*ets_data), opcode, cmd_details);
+}
+
+/**
+ * i40e_aq_config_switch_comp_bw_config - Config Switch comp BW Alloc per TC
+ * @hw: pointer to the hw struct
+ * @seid: seid of the switching component
+ * @bw_data: Buffer holding enabled TCs, relative/absolute TC BW limit/credits
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+int
+i40e_aq_config_switch_comp_bw_config(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_configure_switching_comp_bw_config_data *bw_data,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
+ i40e_aqc_opc_configure_switching_comp_bw_config,
+ cmd_details);
+}
+
+/**
+ * i40e_aq_query_vsi_bw_config - Query VSI BW configuration
+ * @hw: pointer to the hw struct
+ * @seid: seid of the VSI
+ * @bw_data: Buffer to hold VSI BW configuration
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+int
+i40e_aq_query_vsi_bw_config(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_query_vsi_bw_config_resp *bw_data,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
+ i40e_aqc_opc_query_vsi_bw_config,
+ cmd_details);
+}
+
+/**
+ * i40e_aq_query_vsi_ets_sla_config - Query VSI BW configuration per TC
+ * @hw: pointer to the hw struct
+ * @seid: seid of the VSI
+ * @bw_data: Buffer to hold VSI BW configuration per TC
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+int
+i40e_aq_query_vsi_ets_sla_config(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_query_vsi_ets_sla_config_resp *bw_data,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
+ i40e_aqc_opc_query_vsi_ets_sla_config,
+ cmd_details);
+}
+
+/**
+ * i40e_aq_query_switch_comp_ets_config - Query Switch comp BW config per TC
+ * @hw: pointer to the hw struct
+ * @seid: seid of the switching component
+ * @bw_data: Buffer to hold switching component's per TC BW config
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+int
+i40e_aq_query_switch_comp_ets_config(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_query_switching_comp_ets_config_resp *bw_data,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
+ i40e_aqc_opc_query_switching_comp_ets_config,
+ cmd_details);
+}
+
+/**
+ * i40e_aq_query_port_ets_config - Query Physical Port ETS configuration
+ * @hw: pointer to the hw struct
+ * @seid: seid of the VSI or switching component connected to Physical Port
+ * @bw_data: Buffer to hold current ETS configuration for the Physical Port
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+int
+i40e_aq_query_port_ets_config(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_query_port_ets_config_resp *bw_data,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
+ i40e_aqc_opc_query_port_ets_config,
+ cmd_details);
+}
+
+/**
+ * i40e_aq_query_switch_comp_bw_config - Query Switch comp BW configuration
+ * @hw: pointer to the hw struct
+ * @seid: seid of the switching component
+ * @bw_data: Buffer to hold switching component's BW configuration
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+int
+i40e_aq_query_switch_comp_bw_config(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_query_switching_comp_bw_config_resp *bw_data,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
+ i40e_aqc_opc_query_switching_comp_bw_config,
+ cmd_details);
+}
+
+/**
+ * i40e_validate_filter_settings
+ * @hw: pointer to the hardware structure
+ * @settings: Filter control settings
+ *
+ * Check and validate the filter control settings passed.
+ * The function checks for the valid filter/context sizes being
+ * passed for FCoE and PE.
+ *
+ * Returns 0 if the values passed are valid and within
+ * range else returns an error.
+ **/
+static int
+i40e_validate_filter_settings(struct i40e_hw *hw,
+ struct i40e_filter_control_settings *settings)
+{
+ u32 fcoe_cntx_size, fcoe_filt_size;
+ u32 fcoe_fmax;
+ u32 val;
+
+ /* Validate FCoE settings passed */
+ switch (settings->fcoe_filt_num) {
+ case I40E_HASH_FILTER_SIZE_1K:
+ case I40E_HASH_FILTER_SIZE_2K:
+ case I40E_HASH_FILTER_SIZE_4K:
+ case I40E_HASH_FILTER_SIZE_8K:
+ case I40E_HASH_FILTER_SIZE_16K:
+ case I40E_HASH_FILTER_SIZE_32K:
+ fcoe_filt_size = I40E_HASH_FILTER_BASE_SIZE;
+ fcoe_filt_size <<= (u32)settings->fcoe_filt_num;
+ break;
+ default:
+ return I40E_ERR_PARAM;
+ }
+
+ switch (settings->fcoe_cntx_num) {
+ case I40E_DMA_CNTX_SIZE_512:
+ case I40E_DMA_CNTX_SIZE_1K:
+ case I40E_DMA_CNTX_SIZE_2K:
+ case I40E_DMA_CNTX_SIZE_4K:
+ fcoe_cntx_size = I40E_DMA_CNTX_BASE_SIZE;
+ fcoe_cntx_size <<= (u32)settings->fcoe_cntx_num;
+ break;
+ default:
+ return I40E_ERR_PARAM;
+ }
+
+ /* Validate PE settings passed */
+ switch (settings->pe_filt_num) {
+ case I40E_HASH_FILTER_SIZE_1K:
+ case I40E_HASH_FILTER_SIZE_2K:
+ case I40E_HASH_FILTER_SIZE_4K:
+ case I40E_HASH_FILTER_SIZE_8K:
+ case I40E_HASH_FILTER_SIZE_16K:
+ case I40E_HASH_FILTER_SIZE_32K:
+ case I40E_HASH_FILTER_SIZE_64K:
+ case I40E_HASH_FILTER_SIZE_128K:
+ case I40E_HASH_FILTER_SIZE_256K:
+ case I40E_HASH_FILTER_SIZE_512K:
+ case I40E_HASH_FILTER_SIZE_1M:
+ break;
+ default:
+ return I40E_ERR_PARAM;
+ }
+
+ switch (settings->pe_cntx_num) {
+ case I40E_DMA_CNTX_SIZE_512:
+ case I40E_DMA_CNTX_SIZE_1K:
+ case I40E_DMA_CNTX_SIZE_2K:
+ case I40E_DMA_CNTX_SIZE_4K:
+ case I40E_DMA_CNTX_SIZE_8K:
+ case I40E_DMA_CNTX_SIZE_16K:
+ case I40E_DMA_CNTX_SIZE_32K:
+ case I40E_DMA_CNTX_SIZE_64K:
+ case I40E_DMA_CNTX_SIZE_128K:
+ case I40E_DMA_CNTX_SIZE_256K:
+ break;
+ default:
+ return I40E_ERR_PARAM;
+ }
+
+ /* FCHSIZE + FCDSIZE should not be greater than PMFCOEFMAX */
+ val = rd32(hw, I40E_GLHMC_FCOEFMAX);
+ fcoe_fmax = (val & I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_MASK)
+ >> I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_SHIFT;
+ if (fcoe_filt_size + fcoe_cntx_size > fcoe_fmax)
+ return I40E_ERR_INVALID_SIZE;
+
+ return 0;
+}
+
+/**
+ * i40e_set_filter_control
+ * @hw: pointer to the hardware structure
+ * @settings: Filter control settings
+ *
+ * Set the Queue Filters for PE/FCoE and enable filters required
+ * for a single PF. It is expected that these settings are programmed
+ * at the driver initialization time.
+ **/
+int i40e_set_filter_control(struct i40e_hw *hw,
+ struct i40e_filter_control_settings *settings)
+{
+ u32 hash_lut_size = 0;
+ int ret = 0;
+ u32 val;
+
+ if (!settings)
+ return I40E_ERR_PARAM;
+
+ /* Validate the input settings */
+ ret = i40e_validate_filter_settings(hw, settings);
+ if (ret)
+ return ret;
+
+ /* Read the PF Queue Filter control register */
+ val = i40e_read_rx_ctl(hw, I40E_PFQF_CTL_0);
+
+ /* Program required PE hash buckets for the PF */
+ val &= ~I40E_PFQF_CTL_0_PEHSIZE_MASK;
+ val |= ((u32)settings->pe_filt_num << I40E_PFQF_CTL_0_PEHSIZE_SHIFT) &
+ I40E_PFQF_CTL_0_PEHSIZE_MASK;
+ /* Program required PE contexts for the PF */
+ val &= ~I40E_PFQF_CTL_0_PEDSIZE_MASK;
+ val |= ((u32)settings->pe_cntx_num << I40E_PFQF_CTL_0_PEDSIZE_SHIFT) &
+ I40E_PFQF_CTL_0_PEDSIZE_MASK;
+
+ /* Program required FCoE hash buckets for the PF */
+ val &= ~I40E_PFQF_CTL_0_PFFCHSIZE_MASK;
+ val |= ((u32)settings->fcoe_filt_num <<
+ I40E_PFQF_CTL_0_PFFCHSIZE_SHIFT) &
+ I40E_PFQF_CTL_0_PFFCHSIZE_MASK;
+ /* Program required FCoE DDP contexts for the PF */
+ val &= ~I40E_PFQF_CTL_0_PFFCDSIZE_MASK;
+ val |= ((u32)settings->fcoe_cntx_num <<
+ I40E_PFQF_CTL_0_PFFCDSIZE_SHIFT) &
+ I40E_PFQF_CTL_0_PFFCDSIZE_MASK;
+
+ /* Program Hash LUT size for the PF */
+ val &= ~I40E_PFQF_CTL_0_HASHLUTSIZE_MASK;
+ if (settings->hash_lut_size == I40E_HASH_LUT_SIZE_512)
+ hash_lut_size = 1;
+ val |= (hash_lut_size << I40E_PFQF_CTL_0_HASHLUTSIZE_SHIFT) &
+ I40E_PFQF_CTL_0_HASHLUTSIZE_MASK;
+
+ /* Enable FDIR, Ethertype and MACVLAN filters for PF and VFs */
+ if (settings->enable_fdir)
+ val |= I40E_PFQF_CTL_0_FD_ENA_MASK;
+ if (settings->enable_ethtype)
+ val |= I40E_PFQF_CTL_0_ETYPE_ENA_MASK;
+ if (settings->enable_macvlan)
+ val |= I40E_PFQF_CTL_0_MACVLAN_ENA_MASK;
+
+ i40e_write_rx_ctl(hw, I40E_PFQF_CTL_0, val);
+
+ return 0;
+}
+
+/**
+ * i40e_aq_add_rem_control_packet_filter - Add or Remove Control Packet Filter
+ * @hw: pointer to the hw struct
+ * @mac_addr: MAC address to use in the filter
+ * @ethtype: Ethertype to use in the filter
+ * @flags: Flags that needs to be applied to the filter
+ * @vsi_seid: seid of the control VSI
+ * @queue: VSI queue number to send the packet to
+ * @is_add: Add control packet filter if True else remove
+ * @stats: Structure to hold information on control filter counts
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * This command will Add or Remove control packet filter for a control VSI.
+ * In return it will update the total number of perfect filter count in
+ * the stats member.
+ **/
+int i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw,
+ u8 *mac_addr, u16 ethtype, u16 flags,
+ u16 vsi_seid, u16 queue, bool is_add,
+ struct i40e_control_filter_stats *stats,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_add_remove_control_packet_filter *cmd =
+ (struct i40e_aqc_add_remove_control_packet_filter *)
+ &desc.params.raw;
+ struct i40e_aqc_add_remove_control_packet_filter_completion *resp =
+ (struct i40e_aqc_add_remove_control_packet_filter_completion *)
+ &desc.params.raw;
+ int status;
+
+ if (vsi_seid == 0)
+ return I40E_ERR_PARAM;
+
+ if (is_add) {
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_add_control_packet_filter);
+ cmd->queue = cpu_to_le16(queue);
+ } else {
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_remove_control_packet_filter);
+ }
+
+ if (mac_addr)
+ ether_addr_copy(cmd->mac, mac_addr);
+
+ cmd->etype = cpu_to_le16(ethtype);
+ cmd->flags = cpu_to_le16(flags);
+ cmd->seid = cpu_to_le16(vsi_seid);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ if (!status && stats) {
+ stats->mac_etype_used = le16_to_cpu(resp->mac_etype_used);
+ stats->etype_used = le16_to_cpu(resp->etype_used);
+ stats->mac_etype_free = le16_to_cpu(resp->mac_etype_free);
+ stats->etype_free = le16_to_cpu(resp->etype_free);
+ }
+
+ return status;
+}
+
+/**
+ * i40e_add_filter_to_drop_tx_flow_control_frames- filter to drop flow control
+ * @hw: pointer to the hw struct
+ * @seid: VSI seid to add ethertype filter from
+ **/
+void i40e_add_filter_to_drop_tx_flow_control_frames(struct i40e_hw *hw,
+ u16 seid)
+{
+#define I40E_FLOW_CONTROL_ETHTYPE 0x8808
+ u16 flag = I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC |
+ I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP |
+ I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TX;
+ u16 ethtype = I40E_FLOW_CONTROL_ETHTYPE;
+ int status;
+
+ status = i40e_aq_add_rem_control_packet_filter(hw, NULL, ethtype, flag,
+ seid, 0, true, NULL,
+ NULL);
+ if (status)
+ hw_dbg(hw, "Ethtype Filter Add failed: Error pruning Tx flow control frames\n");
+}
+
+/**
+ * i40e_aq_alternate_read
+ * @hw: pointer to the hardware structure
+ * @reg_addr0: address of first dword to be read
+ * @reg_val0: pointer for data read from 'reg_addr0'
+ * @reg_addr1: address of second dword to be read
+ * @reg_val1: pointer for data read from 'reg_addr1'
+ *
+ * Read one or two dwords from alternate structure. Fields are indicated
+ * by 'reg_addr0' and 'reg_addr1' register numbers. If 'reg_val1' pointer
+ * is not passed then only register at 'reg_addr0' is read.
+ *
+ **/
+static int i40e_aq_alternate_read(struct i40e_hw *hw,
+ u32 reg_addr0, u32 *reg_val0,
+ u32 reg_addr1, u32 *reg_val1)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_alternate_write *cmd_resp =
+ (struct i40e_aqc_alternate_write *)&desc.params.raw;
+ int status;
+
+ if (!reg_val0)
+ return I40E_ERR_PARAM;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_alternate_read);
+ cmd_resp->address0 = cpu_to_le32(reg_addr0);
+ cmd_resp->address1 = cpu_to_le32(reg_addr1);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL);
+
+ if (!status) {
+ *reg_val0 = le32_to_cpu(cmd_resp->data0);
+
+ if (reg_val1)
+ *reg_val1 = le32_to_cpu(cmd_resp->data1);
+ }
+
+ return status;
+}
+
+/**
+ * i40e_aq_suspend_port_tx
+ * @hw: pointer to the hardware structure
+ * @seid: port seid
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Suspend port's Tx traffic
+ **/
+int i40e_aq_suspend_port_tx(struct i40e_hw *hw, u16 seid,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aqc_tx_sched_ind *cmd;
+ struct i40e_aq_desc desc;
+ int status;
+
+ cmd = (struct i40e_aqc_tx_sched_ind *)&desc.params.raw;
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_suspend_port_tx);
+ cmd->vsi_seid = cpu_to_le16(seid);
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_resume_port_tx
+ * @hw: pointer to the hardware structure
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Resume port's Tx traffic
+ **/
+int i40e_aq_resume_port_tx(struct i40e_hw *hw,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ int status;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_resume_port_tx);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_set_pci_config_data - store PCI bus info
+ * @hw: pointer to hardware structure
+ * @link_status: the link status word from PCI config space
+ *
+ * Stores the PCI bus info (speed, width, type) within the i40e_hw structure
+ **/
+void i40e_set_pci_config_data(struct i40e_hw *hw, u16 link_status)
+{
+ hw->bus.type = i40e_bus_type_pci_express;
+
+ switch (link_status & PCI_EXP_LNKSTA_NLW) {
+ case PCI_EXP_LNKSTA_NLW_X1:
+ hw->bus.width = i40e_bus_width_pcie_x1;
+ break;
+ case PCI_EXP_LNKSTA_NLW_X2:
+ hw->bus.width = i40e_bus_width_pcie_x2;
+ break;
+ case PCI_EXP_LNKSTA_NLW_X4:
+ hw->bus.width = i40e_bus_width_pcie_x4;
+ break;
+ case PCI_EXP_LNKSTA_NLW_X8:
+ hw->bus.width = i40e_bus_width_pcie_x8;
+ break;
+ default:
+ hw->bus.width = i40e_bus_width_unknown;
+ break;
+ }
+
+ switch (link_status & PCI_EXP_LNKSTA_CLS) {
+ case PCI_EXP_LNKSTA_CLS_2_5GB:
+ hw->bus.speed = i40e_bus_speed_2500;
+ break;
+ case PCI_EXP_LNKSTA_CLS_5_0GB:
+ hw->bus.speed = i40e_bus_speed_5000;
+ break;
+ case PCI_EXP_LNKSTA_CLS_8_0GB:
+ hw->bus.speed = i40e_bus_speed_8000;
+ break;
+ default:
+ hw->bus.speed = i40e_bus_speed_unknown;
+ break;
+ }
+}
+
+/**
+ * i40e_aq_debug_dump
+ * @hw: pointer to the hardware structure
+ * @cluster_id: specific cluster to dump
+ * @table_id: table id within cluster
+ * @start_index: index of line in the block to read
+ * @buff_size: dump buffer size
+ * @buff: dump buffer
+ * @ret_buff_size: actual buffer size returned
+ * @ret_next_table: next block to read
+ * @ret_next_index: next index to read
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Dump internal FW/HW data for debug purposes.
+ *
+ **/
+int i40e_aq_debug_dump(struct i40e_hw *hw, u8 cluster_id,
+ u8 table_id, u32 start_index, u16 buff_size,
+ void *buff, u16 *ret_buff_size,
+ u8 *ret_next_table, u32 *ret_next_index,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_debug_dump_internals *cmd =
+ (struct i40e_aqc_debug_dump_internals *)&desc.params.raw;
+ struct i40e_aqc_debug_dump_internals *resp =
+ (struct i40e_aqc_debug_dump_internals *)&desc.params.raw;
+ int status;
+
+ if (buff_size == 0 || !buff)
+ return I40E_ERR_PARAM;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_debug_dump_internals);
+ /* Indirect Command */
+ desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
+ if (buff_size > I40E_AQ_LARGE_BUF)
+ desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
+
+ cmd->cluster_id = cluster_id;
+ cmd->table_id = table_id;
+ cmd->idx = cpu_to_le32(start_index);
+
+ desc.datalen = cpu_to_le16(buff_size);
+
+ status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
+ if (!status) {
+ if (ret_buff_size)
+ *ret_buff_size = le16_to_cpu(desc.datalen);
+ if (ret_next_table)
+ *ret_next_table = resp->table_id;
+ if (ret_next_index)
+ *ret_next_index = le32_to_cpu(resp->idx);
+ }
+
+ return status;
+}
+
+/**
+ * i40e_read_bw_from_alt_ram
+ * @hw: pointer to the hardware structure
+ * @max_bw: pointer for max_bw read
+ * @min_bw: pointer for min_bw read
+ * @min_valid: pointer for bool that is true if min_bw is a valid value
+ * @max_valid: pointer for bool that is true if max_bw is a valid value
+ *
+ * Read bw from the alternate ram for the given pf
+ **/
+int i40e_read_bw_from_alt_ram(struct i40e_hw *hw,
+ u32 *max_bw, u32 *min_bw,
+ bool *min_valid, bool *max_valid)
+{
+ u32 max_bw_addr, min_bw_addr;
+ int status;
+
+ /* Calculate the address of the min/max bw registers */
+ max_bw_addr = I40E_ALT_STRUCT_FIRST_PF_OFFSET +
+ I40E_ALT_STRUCT_MAX_BW_OFFSET +
+ (I40E_ALT_STRUCT_DWORDS_PER_PF * hw->pf_id);
+ min_bw_addr = I40E_ALT_STRUCT_FIRST_PF_OFFSET +
+ I40E_ALT_STRUCT_MIN_BW_OFFSET +
+ (I40E_ALT_STRUCT_DWORDS_PER_PF * hw->pf_id);
+
+ /* Read the bandwidths from alt ram */
+ status = i40e_aq_alternate_read(hw, max_bw_addr, max_bw,
+ min_bw_addr, min_bw);
+
+ if (*min_bw & I40E_ALT_BW_VALID_MASK)
+ *min_valid = true;
+ else
+ *min_valid = false;
+
+ if (*max_bw & I40E_ALT_BW_VALID_MASK)
+ *max_valid = true;
+ else
+ *max_valid = false;
+
+ return status;
+}
+
+/**
+ * i40e_aq_configure_partition_bw
+ * @hw: pointer to the hardware structure
+ * @bw_data: Buffer holding valid pfs and bw limits
+ * @cmd_details: pointer to command details
+ *
+ * Configure partitions guaranteed/max bw
+ **/
+int
+i40e_aq_configure_partition_bw(struct i40e_hw *hw,
+ struct i40e_aqc_configure_partition_bw_data *bw_data,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ u16 bwd_size = sizeof(*bw_data);
+ struct i40e_aq_desc desc;
+ int status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_configure_partition_bw);
+
+ /* Indirect command */
+ desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
+ desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD);
+
+ if (bwd_size > I40E_AQ_LARGE_BUF)
+ desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
+
+ desc.datalen = cpu_to_le16(bwd_size);
+
+ status = i40e_asq_send_command(hw, &desc, bw_data, bwd_size,
+ cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_read_phy_register_clause22
+ * @hw: pointer to the HW structure
+ * @reg: register address in the page
+ * @phy_addr: PHY address on MDIO interface
+ * @value: PHY register value
+ *
+ * Reads specified PHY register value
+ **/
+int i40e_read_phy_register_clause22(struct i40e_hw *hw,
+ u16 reg, u8 phy_addr, u16 *value)
+{
+ u8 port_num = (u8)hw->func_caps.mdio_port_num;
+ int status = I40E_ERR_TIMEOUT;
+ u32 command = 0;
+ u16 retry = 1000;
+
+ command = (reg << I40E_GLGEN_MSCA_DEVADD_SHIFT) |
+ (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) |
+ (I40E_MDIO_CLAUSE22_OPCODE_READ_MASK) |
+ (I40E_MDIO_CLAUSE22_STCODE_MASK) |
+ (I40E_GLGEN_MSCA_MDICMD_MASK);
+ wr32(hw, I40E_GLGEN_MSCA(port_num), command);
+ do {
+ command = rd32(hw, I40E_GLGEN_MSCA(port_num));
+ if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) {
+ status = 0;
+ break;
+ }
+ udelay(10);
+ retry--;
+ } while (retry);
+
+ if (status) {
+ i40e_debug(hw, I40E_DEBUG_PHY,
+ "PHY: Can't write command to external PHY.\n");
+ } else {
+ command = rd32(hw, I40E_GLGEN_MSRWD(port_num));
+ *value = (command & I40E_GLGEN_MSRWD_MDIRDDATA_MASK) >>
+ I40E_GLGEN_MSRWD_MDIRDDATA_SHIFT;
+ }
+
+ return status;
+}
+
+/**
+ * i40e_write_phy_register_clause22
+ * @hw: pointer to the HW structure
+ * @reg: register address in the page
+ * @phy_addr: PHY address on MDIO interface
+ * @value: PHY register value
+ *
+ * Writes specified PHY register value
+ **/
+int i40e_write_phy_register_clause22(struct i40e_hw *hw,
+ u16 reg, u8 phy_addr, u16 value)
+{
+ u8 port_num = (u8)hw->func_caps.mdio_port_num;
+ int status = I40E_ERR_TIMEOUT;
+ u32 command = 0;
+ u16 retry = 1000;
+
+ command = value << I40E_GLGEN_MSRWD_MDIWRDATA_SHIFT;
+ wr32(hw, I40E_GLGEN_MSRWD(port_num), command);
+
+ command = (reg << I40E_GLGEN_MSCA_DEVADD_SHIFT) |
+ (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) |
+ (I40E_MDIO_CLAUSE22_OPCODE_WRITE_MASK) |
+ (I40E_MDIO_CLAUSE22_STCODE_MASK) |
+ (I40E_GLGEN_MSCA_MDICMD_MASK);
+
+ wr32(hw, I40E_GLGEN_MSCA(port_num), command);
+ do {
+ command = rd32(hw, I40E_GLGEN_MSCA(port_num));
+ if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) {
+ status = 0;
+ break;
+ }
+ udelay(10);
+ retry--;
+ } while (retry);
+
+ return status;
+}
+
+/**
+ * i40e_read_phy_register_clause45
+ * @hw: pointer to the HW structure
+ * @page: registers page number
+ * @reg: register address in the page
+ * @phy_addr: PHY address on MDIO interface
+ * @value: PHY register value
+ *
+ * Reads specified PHY register value
+ **/
+int i40e_read_phy_register_clause45(struct i40e_hw *hw,
+ u8 page, u16 reg, u8 phy_addr, u16 *value)
+{
+ u8 port_num = hw->func_caps.mdio_port_num;
+ int status = I40E_ERR_TIMEOUT;
+ u32 command = 0;
+ u16 retry = 1000;
+
+ command = (reg << I40E_GLGEN_MSCA_MDIADD_SHIFT) |
+ (page << I40E_GLGEN_MSCA_DEVADD_SHIFT) |
+ (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) |
+ (I40E_MDIO_CLAUSE45_OPCODE_ADDRESS_MASK) |
+ (I40E_MDIO_CLAUSE45_STCODE_MASK) |
+ (I40E_GLGEN_MSCA_MDICMD_MASK) |
+ (I40E_GLGEN_MSCA_MDIINPROGEN_MASK);
+ wr32(hw, I40E_GLGEN_MSCA(port_num), command);
+ do {
+ command = rd32(hw, I40E_GLGEN_MSCA(port_num));
+ if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) {
+ status = 0;
+ break;
+ }
+ usleep_range(10, 20);
+ retry--;
+ } while (retry);
+
+ if (status) {
+ i40e_debug(hw, I40E_DEBUG_PHY,
+ "PHY: Can't write command to external PHY.\n");
+ goto phy_read_end;
+ }
+
+ command = (page << I40E_GLGEN_MSCA_DEVADD_SHIFT) |
+ (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) |
+ (I40E_MDIO_CLAUSE45_OPCODE_READ_MASK) |
+ (I40E_MDIO_CLAUSE45_STCODE_MASK) |
+ (I40E_GLGEN_MSCA_MDICMD_MASK) |
+ (I40E_GLGEN_MSCA_MDIINPROGEN_MASK);
+ status = I40E_ERR_TIMEOUT;
+ retry = 1000;
+ wr32(hw, I40E_GLGEN_MSCA(port_num), command);
+ do {
+ command = rd32(hw, I40E_GLGEN_MSCA(port_num));
+ if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) {
+ status = 0;
+ break;
+ }
+ usleep_range(10, 20);
+ retry--;
+ } while (retry);
+
+ if (!status) {
+ command = rd32(hw, I40E_GLGEN_MSRWD(port_num));
+ *value = (command & I40E_GLGEN_MSRWD_MDIRDDATA_MASK) >>
+ I40E_GLGEN_MSRWD_MDIRDDATA_SHIFT;
+ } else {
+ i40e_debug(hw, I40E_DEBUG_PHY,
+ "PHY: Can't read register value from external PHY.\n");
+ }
+
+phy_read_end:
+ return status;
+}
+
+/**
+ * i40e_write_phy_register_clause45
+ * @hw: pointer to the HW structure
+ * @page: registers page number
+ * @reg: register address in the page
+ * @phy_addr: PHY address on MDIO interface
+ * @value: PHY register value
+ *
+ * Writes value to specified PHY register
+ **/
+int i40e_write_phy_register_clause45(struct i40e_hw *hw,
+ u8 page, u16 reg, u8 phy_addr, u16 value)
+{
+ u8 port_num = hw->func_caps.mdio_port_num;
+ int status = I40E_ERR_TIMEOUT;
+ u16 retry = 1000;
+ u32 command = 0;
+
+ command = (reg << I40E_GLGEN_MSCA_MDIADD_SHIFT) |
+ (page << I40E_GLGEN_MSCA_DEVADD_SHIFT) |
+ (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) |
+ (I40E_MDIO_CLAUSE45_OPCODE_ADDRESS_MASK) |
+ (I40E_MDIO_CLAUSE45_STCODE_MASK) |
+ (I40E_GLGEN_MSCA_MDICMD_MASK) |
+ (I40E_GLGEN_MSCA_MDIINPROGEN_MASK);
+ wr32(hw, I40E_GLGEN_MSCA(port_num), command);
+ do {
+ command = rd32(hw, I40E_GLGEN_MSCA(port_num));
+ if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) {
+ status = 0;
+ break;
+ }
+ usleep_range(10, 20);
+ retry--;
+ } while (retry);
+ if (status) {
+ i40e_debug(hw, I40E_DEBUG_PHY,
+ "PHY: Can't write command to external PHY.\n");
+ goto phy_write_end;
+ }
+
+ command = value << I40E_GLGEN_MSRWD_MDIWRDATA_SHIFT;
+ wr32(hw, I40E_GLGEN_MSRWD(port_num), command);
+
+ command = (page << I40E_GLGEN_MSCA_DEVADD_SHIFT) |
+ (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) |
+ (I40E_MDIO_CLAUSE45_OPCODE_WRITE_MASK) |
+ (I40E_MDIO_CLAUSE45_STCODE_MASK) |
+ (I40E_GLGEN_MSCA_MDICMD_MASK) |
+ (I40E_GLGEN_MSCA_MDIINPROGEN_MASK);
+ status = I40E_ERR_TIMEOUT;
+ retry = 1000;
+ wr32(hw, I40E_GLGEN_MSCA(port_num), command);
+ do {
+ command = rd32(hw, I40E_GLGEN_MSCA(port_num));
+ if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) {
+ status = 0;
+ break;
+ }
+ usleep_range(10, 20);
+ retry--;
+ } while (retry);
+
+phy_write_end:
+ return status;
+}
+
+/**
+ * i40e_write_phy_register
+ * @hw: pointer to the HW structure
+ * @page: registers page number
+ * @reg: register address in the page
+ * @phy_addr: PHY address on MDIO interface
+ * @value: PHY register value
+ *
+ * Writes value to specified PHY register
+ **/
+int i40e_write_phy_register(struct i40e_hw *hw,
+ u8 page, u16 reg, u8 phy_addr, u16 value)
+{
+ int status;
+
+ switch (hw->device_id) {
+ case I40E_DEV_ID_1G_BASE_T_X722:
+ status = i40e_write_phy_register_clause22(hw, reg, phy_addr,
+ value);
+ break;
+ case I40E_DEV_ID_1G_BASE_T_BC:
+ case I40E_DEV_ID_5G_BASE_T_BC:
+ case I40E_DEV_ID_10G_BASE_T:
+ case I40E_DEV_ID_10G_BASE_T4:
+ case I40E_DEV_ID_10G_BASE_T_BC:
+ case I40E_DEV_ID_10G_BASE_T_X722:
+ case I40E_DEV_ID_25G_B:
+ case I40E_DEV_ID_25G_SFP28:
+ status = i40e_write_phy_register_clause45(hw, page, reg,
+ phy_addr, value);
+ break;
+ default:
+ status = I40E_ERR_UNKNOWN_PHY;
+ break;
+ }
+
+ return status;
+}
+
+/**
+ * i40e_read_phy_register
+ * @hw: pointer to the HW structure
+ * @page: registers page number
+ * @reg: register address in the page
+ * @phy_addr: PHY address on MDIO interface
+ * @value: PHY register value
+ *
+ * Reads specified PHY register value
+ **/
+int i40e_read_phy_register(struct i40e_hw *hw,
+ u8 page, u16 reg, u8 phy_addr, u16 *value)
+{
+ int status;
+
+ switch (hw->device_id) {
+ case I40E_DEV_ID_1G_BASE_T_X722:
+ status = i40e_read_phy_register_clause22(hw, reg, phy_addr,
+ value);
+ break;
+ case I40E_DEV_ID_1G_BASE_T_BC:
+ case I40E_DEV_ID_5G_BASE_T_BC:
+ case I40E_DEV_ID_10G_BASE_T:
+ case I40E_DEV_ID_10G_BASE_T4:
+ case I40E_DEV_ID_10G_BASE_T_BC:
+ case I40E_DEV_ID_10G_BASE_T_X722:
+ case I40E_DEV_ID_25G_B:
+ case I40E_DEV_ID_25G_SFP28:
+ status = i40e_read_phy_register_clause45(hw, page, reg,
+ phy_addr, value);
+ break;
+ default:
+ status = I40E_ERR_UNKNOWN_PHY;
+ break;
+ }
+
+ return status;
+}
+
+/**
+ * i40e_get_phy_address
+ * @hw: pointer to the HW structure
+ * @dev_num: PHY port num that address we want
+ *
+ * Gets PHY address for current port
+ **/
+u8 i40e_get_phy_address(struct i40e_hw *hw, u8 dev_num)
+{
+ u8 port_num = hw->func_caps.mdio_port_num;
+ u32 reg_val = rd32(hw, I40E_GLGEN_MDIO_I2C_SEL(port_num));
+
+ return (u8)(reg_val >> ((dev_num + 1) * 5)) & 0x1f;
+}
+
+/**
+ * i40e_blink_phy_link_led
+ * @hw: pointer to the HW structure
+ * @time: time how long led will blinks in secs
+ * @interval: gap between LED on and off in msecs
+ *
+ * Blinks PHY link LED
+ **/
+int i40e_blink_phy_link_led(struct i40e_hw *hw,
+ u32 time, u32 interval)
+{
+ u16 led_addr = I40E_PHY_LED_PROV_REG_1;
+ u16 gpio_led_port;
+ u8 phy_addr = 0;
+ int status = 0;
+ u16 led_ctl;
+ u8 port_num;
+ u16 led_reg;
+ u32 i;
+
+ i = rd32(hw, I40E_PFGEN_PORTNUM);
+ port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK);
+ phy_addr = i40e_get_phy_address(hw, port_num);
+
+ for (gpio_led_port = 0; gpio_led_port < 3; gpio_led_port++,
+ led_addr++) {
+ status = i40e_read_phy_register_clause45(hw,
+ I40E_PHY_COM_REG_PAGE,
+ led_addr, phy_addr,
+ &led_reg);
+ if (status)
+ goto phy_blinking_end;
+ led_ctl = led_reg;
+ if (led_reg & I40E_PHY_LED_LINK_MODE_MASK) {
+ led_reg = 0;
+ status = i40e_write_phy_register_clause45(hw,
+ I40E_PHY_COM_REG_PAGE,
+ led_addr, phy_addr,
+ led_reg);
+ if (status)
+ goto phy_blinking_end;
+ break;
+ }
+ }
+
+ if (time > 0 && interval > 0) {
+ for (i = 0; i < time * 1000; i += interval) {
+ status = i40e_read_phy_register_clause45(hw,
+ I40E_PHY_COM_REG_PAGE,
+ led_addr, phy_addr, &led_reg);
+ if (status)
+ goto restore_config;
+ if (led_reg & I40E_PHY_LED_MANUAL_ON)
+ led_reg = 0;
+ else
+ led_reg = I40E_PHY_LED_MANUAL_ON;
+ status = i40e_write_phy_register_clause45(hw,
+ I40E_PHY_COM_REG_PAGE,
+ led_addr, phy_addr, led_reg);
+ if (status)
+ goto restore_config;
+ msleep(interval);
+ }
+ }
+
+restore_config:
+ status = i40e_write_phy_register_clause45(hw,
+ I40E_PHY_COM_REG_PAGE,
+ led_addr, phy_addr, led_ctl);
+
+phy_blinking_end:
+ return status;
+}
+
+/**
+ * i40e_led_get_reg - read LED register
+ * @hw: pointer to the HW structure
+ * @led_addr: LED register address
+ * @reg_val: read register value
+ **/
+static int i40e_led_get_reg(struct i40e_hw *hw, u16 led_addr,
+ u32 *reg_val)
+{
+ u8 phy_addr = 0;
+ u8 port_num;
+ int status;
+ u32 i;
+
+ *reg_val = 0;
+ if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) {
+ status =
+ i40e_aq_get_phy_register(hw,
+ I40E_AQ_PHY_REG_ACCESS_EXTERNAL,
+ I40E_PHY_COM_REG_PAGE, true,
+ I40E_PHY_LED_PROV_REG_1,
+ reg_val, NULL);
+ } else {
+ i = rd32(hw, I40E_PFGEN_PORTNUM);
+ port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK);
+ phy_addr = i40e_get_phy_address(hw, port_num);
+ status = i40e_read_phy_register_clause45(hw,
+ I40E_PHY_COM_REG_PAGE,
+ led_addr, phy_addr,
+ (u16 *)reg_val);
+ }
+ return status;
+}
+
+/**
+ * i40e_led_set_reg - write LED register
+ * @hw: pointer to the HW structure
+ * @led_addr: LED register address
+ * @reg_val: register value to write
+ **/
+static int i40e_led_set_reg(struct i40e_hw *hw, u16 led_addr,
+ u32 reg_val)
+{
+ u8 phy_addr = 0;
+ u8 port_num;
+ int status;
+ u32 i;
+
+ if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) {
+ status =
+ i40e_aq_set_phy_register(hw,
+ I40E_AQ_PHY_REG_ACCESS_EXTERNAL,
+ I40E_PHY_COM_REG_PAGE, true,
+ I40E_PHY_LED_PROV_REG_1,
+ reg_val, NULL);
+ } else {
+ i = rd32(hw, I40E_PFGEN_PORTNUM);
+ port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK);
+ phy_addr = i40e_get_phy_address(hw, port_num);
+ status = i40e_write_phy_register_clause45(hw,
+ I40E_PHY_COM_REG_PAGE,
+ led_addr, phy_addr,
+ (u16)reg_val);
+ }
+
+ return status;
+}
+
+/**
+ * i40e_led_get_phy - return current on/off mode
+ * @hw: pointer to the hw struct
+ * @led_addr: address of led register to use
+ * @val: original value of register to use
+ *
+ **/
+int i40e_led_get_phy(struct i40e_hw *hw, u16 *led_addr,
+ u16 *val)
+{
+ u16 gpio_led_port;
+ u8 phy_addr = 0;
+ u32 reg_val_aq;
+ int status = 0;
+ u16 temp_addr;
+ u16 reg_val;
+ u8 port_num;
+ u32 i;
+
+ if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) {
+ status =
+ i40e_aq_get_phy_register(hw,
+ I40E_AQ_PHY_REG_ACCESS_EXTERNAL,
+ I40E_PHY_COM_REG_PAGE, true,
+ I40E_PHY_LED_PROV_REG_1,
+ &reg_val_aq, NULL);
+ if (status == I40E_SUCCESS)
+ *val = (u16)reg_val_aq;
+ return status;
+ }
+ temp_addr = I40E_PHY_LED_PROV_REG_1;
+ i = rd32(hw, I40E_PFGEN_PORTNUM);
+ port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK);
+ phy_addr = i40e_get_phy_address(hw, port_num);
+
+ for (gpio_led_port = 0; gpio_led_port < 3; gpio_led_port++,
+ temp_addr++) {
+ status = i40e_read_phy_register_clause45(hw,
+ I40E_PHY_COM_REG_PAGE,
+ temp_addr, phy_addr,
+ &reg_val);
+ if (status)
+ return status;
+ *val = reg_val;
+ if (reg_val & I40E_PHY_LED_LINK_MODE_MASK) {
+ *led_addr = temp_addr;
+ break;
+ }
+ }
+ return status;
+}
+
+/**
+ * i40e_led_set_phy
+ * @hw: pointer to the HW structure
+ * @on: true or false
+ * @led_addr: address of led register to use
+ * @mode: original val plus bit for set or ignore
+ *
+ * Set led's on or off when controlled by the PHY
+ *
+ **/
+int i40e_led_set_phy(struct i40e_hw *hw, bool on,
+ u16 led_addr, u32 mode)
+{
+ u32 led_ctl = 0;
+ u32 led_reg = 0;
+ int status = 0;
+
+ status = i40e_led_get_reg(hw, led_addr, &led_reg);
+ if (status)
+ return status;
+ led_ctl = led_reg;
+ if (led_reg & I40E_PHY_LED_LINK_MODE_MASK) {
+ led_reg = 0;
+ status = i40e_led_set_reg(hw, led_addr, led_reg);
+ if (status)
+ return status;
+ }
+ status = i40e_led_get_reg(hw, led_addr, &led_reg);
+ if (status)
+ goto restore_config;
+ if (on)
+ led_reg = I40E_PHY_LED_MANUAL_ON;
+ else
+ led_reg = 0;
+
+ status = i40e_led_set_reg(hw, led_addr, led_reg);
+ if (status)
+ goto restore_config;
+ if (mode & I40E_PHY_LED_MODE_ORIG) {
+ led_ctl = (mode & I40E_PHY_LED_MODE_MASK);
+ status = i40e_led_set_reg(hw, led_addr, led_ctl);
+ }
+ return status;
+
+restore_config:
+ status = i40e_led_set_reg(hw, led_addr, led_ctl);
+ return status;
+}
+
+/**
+ * i40e_aq_rx_ctl_read_register - use FW to read from an Rx control register
+ * @hw: pointer to the hw struct
+ * @reg_addr: register address
+ * @reg_val: ptr to register value
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Use the firmware to read the Rx control register,
+ * especially useful if the Rx unit is under heavy pressure
+ **/
+int i40e_aq_rx_ctl_read_register(struct i40e_hw *hw,
+ u32 reg_addr, u32 *reg_val,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_rx_ctl_reg_read_write *cmd_resp =
+ (struct i40e_aqc_rx_ctl_reg_read_write *)&desc.params.raw;
+ int status;
+
+ if (!reg_val)
+ return I40E_ERR_PARAM;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_rx_ctl_reg_read);
+
+ cmd_resp->address = cpu_to_le32(reg_addr);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ if (status == 0)
+ *reg_val = le32_to_cpu(cmd_resp->value);
+
+ return status;
+}
+
+/**
+ * i40e_read_rx_ctl - read from an Rx control register
+ * @hw: pointer to the hw struct
+ * @reg_addr: register address
+ **/
+u32 i40e_read_rx_ctl(struct i40e_hw *hw, u32 reg_addr)
+{
+ bool use_register;
+ int status = 0;
+ int retry = 5;
+ u32 val = 0;
+
+ use_register = (((hw->aq.api_maj_ver == 1) &&
+ (hw->aq.api_min_ver < 5)) ||
+ (hw->mac.type == I40E_MAC_X722));
+ if (!use_register) {
+do_retry:
+ status = i40e_aq_rx_ctl_read_register(hw, reg_addr, &val, NULL);
+ if (hw->aq.asq_last_status == I40E_AQ_RC_EAGAIN && retry) {
+ usleep_range(1000, 2000);
+ retry--;
+ goto do_retry;
+ }
+ }
+
+ /* if the AQ access failed, try the old-fashioned way */
+ if (status || use_register)
+ val = rd32(hw, reg_addr);
+
+ return val;
+}
+
+/**
+ * i40e_aq_rx_ctl_write_register
+ * @hw: pointer to the hw struct
+ * @reg_addr: register address
+ * @reg_val: register value
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Use the firmware to write to an Rx control register,
+ * especially useful if the Rx unit is under heavy pressure
+ **/
+int i40e_aq_rx_ctl_write_register(struct i40e_hw *hw,
+ u32 reg_addr, u32 reg_val,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_rx_ctl_reg_read_write *cmd =
+ (struct i40e_aqc_rx_ctl_reg_read_write *)&desc.params.raw;
+ int status;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_rx_ctl_reg_write);
+
+ cmd->address = cpu_to_le32(reg_addr);
+ cmd->value = cpu_to_le32(reg_val);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_write_rx_ctl - write to an Rx control register
+ * @hw: pointer to the hw struct
+ * @reg_addr: register address
+ * @reg_val: register value
+ **/
+void i40e_write_rx_ctl(struct i40e_hw *hw, u32 reg_addr, u32 reg_val)
+{
+ bool use_register;
+ int status = 0;
+ int retry = 5;
+
+ use_register = (((hw->aq.api_maj_ver == 1) &&
+ (hw->aq.api_min_ver < 5)) ||
+ (hw->mac.type == I40E_MAC_X722));
+ if (!use_register) {
+do_retry:
+ status = i40e_aq_rx_ctl_write_register(hw, reg_addr,
+ reg_val, NULL);
+ if (hw->aq.asq_last_status == I40E_AQ_RC_EAGAIN && retry) {
+ usleep_range(1000, 2000);
+ retry--;
+ goto do_retry;
+ }
+ }
+
+ /* if the AQ access failed, try the old-fashioned way */
+ if (status || use_register)
+ wr32(hw, reg_addr, reg_val);
+}
+
+/**
+ * i40e_mdio_if_number_selection - MDIO I/F number selection
+ * @hw: pointer to the hw struct
+ * @set_mdio: use MDIO I/F number specified by mdio_num
+ * @mdio_num: MDIO I/F number
+ * @cmd: pointer to PHY Register command structure
+ **/
+static void i40e_mdio_if_number_selection(struct i40e_hw *hw, bool set_mdio,
+ u8 mdio_num,
+ struct i40e_aqc_phy_register_access *cmd)
+{
+ if (set_mdio && cmd->phy_interface == I40E_AQ_PHY_REG_ACCESS_EXTERNAL) {
+ if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_EXTENDED)
+ cmd->cmd_flags |=
+ I40E_AQ_PHY_REG_ACCESS_SET_MDIO_IF_NUMBER |
+ ((mdio_num <<
+ I40E_AQ_PHY_REG_ACCESS_MDIO_IF_NUMBER_SHIFT) &
+ I40E_AQ_PHY_REG_ACCESS_MDIO_IF_NUMBER_MASK);
+ else
+ i40e_debug(hw, I40E_DEBUG_PHY,
+ "MDIO I/F number selection not supported by current FW version.\n");
+ }
+}
+
+/**
+ * i40e_aq_set_phy_register_ext
+ * @hw: pointer to the hw struct
+ * @phy_select: select which phy should be accessed
+ * @dev_addr: PHY device address
+ * @page_change: flag to indicate if phy page should be updated
+ * @set_mdio: use MDIO I/F number specified by mdio_num
+ * @mdio_num: MDIO I/F number
+ * @reg_addr: PHY register address
+ * @reg_val: new register value
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Write the external PHY register.
+ * NOTE: In common cases MDIO I/F number should not be changed, thats why you
+ * may use simple wrapper i40e_aq_set_phy_register.
+ **/
+int i40e_aq_set_phy_register_ext(struct i40e_hw *hw,
+ u8 phy_select, u8 dev_addr, bool page_change,
+ bool set_mdio, u8 mdio_num,
+ u32 reg_addr, u32 reg_val,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_phy_register_access *cmd =
+ (struct i40e_aqc_phy_register_access *)&desc.params.raw;
+ int status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_set_phy_register);
+
+ cmd->phy_interface = phy_select;
+ cmd->dev_address = dev_addr;
+ cmd->reg_address = cpu_to_le32(reg_addr);
+ cmd->reg_value = cpu_to_le32(reg_val);
+
+ i40e_mdio_if_number_selection(hw, set_mdio, mdio_num, cmd);
+
+ if (!page_change)
+ cmd->cmd_flags = I40E_AQ_PHY_REG_ACCESS_DONT_CHANGE_QSFP_PAGE;
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_get_phy_register_ext
+ * @hw: pointer to the hw struct
+ * @phy_select: select which phy should be accessed
+ * @dev_addr: PHY device address
+ * @page_change: flag to indicate if phy page should be updated
+ * @set_mdio: use MDIO I/F number specified by mdio_num
+ * @mdio_num: MDIO I/F number
+ * @reg_addr: PHY register address
+ * @reg_val: read register value
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Read the external PHY register.
+ * NOTE: In common cases MDIO I/F number should not be changed, thats why you
+ * may use simple wrapper i40e_aq_get_phy_register.
+ **/
+int i40e_aq_get_phy_register_ext(struct i40e_hw *hw,
+ u8 phy_select, u8 dev_addr, bool page_change,
+ bool set_mdio, u8 mdio_num,
+ u32 reg_addr, u32 *reg_val,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_phy_register_access *cmd =
+ (struct i40e_aqc_phy_register_access *)&desc.params.raw;
+ int status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_get_phy_register);
+
+ cmd->phy_interface = phy_select;
+ cmd->dev_address = dev_addr;
+ cmd->reg_address = cpu_to_le32(reg_addr);
+
+ i40e_mdio_if_number_selection(hw, set_mdio, mdio_num, cmd);
+
+ if (!page_change)
+ cmd->cmd_flags = I40E_AQ_PHY_REG_ACCESS_DONT_CHANGE_QSFP_PAGE;
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+ if (!status)
+ *reg_val = le32_to_cpu(cmd->reg_value);
+
+ return status;
+}
+
+/**
+ * i40e_aq_write_ddp - Write dynamic device personalization (ddp)
+ * @hw: pointer to the hw struct
+ * @buff: command buffer (size in bytes = buff_size)
+ * @buff_size: buffer size in bytes
+ * @track_id: package tracking id
+ * @error_offset: returns error offset
+ * @error_info: returns error information
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+int i40e_aq_write_ddp(struct i40e_hw *hw, void *buff,
+ u16 buff_size, u32 track_id,
+ u32 *error_offset, u32 *error_info,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_write_personalization_profile *cmd =
+ (struct i40e_aqc_write_personalization_profile *)
+ &desc.params.raw;
+ struct i40e_aqc_write_ddp_resp *resp;
+ int status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_write_personalization_profile);
+
+ desc.flags |= cpu_to_le16(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD);
+ if (buff_size > I40E_AQ_LARGE_BUF)
+ desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
+
+ desc.datalen = cpu_to_le16(buff_size);
+
+ cmd->profile_track_id = cpu_to_le32(track_id);
+
+ status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
+ if (!status) {
+ resp = (struct i40e_aqc_write_ddp_resp *)&desc.params.raw;
+ if (error_offset)
+ *error_offset = le32_to_cpu(resp->error_offset);
+ if (error_info)
+ *error_info = le32_to_cpu(resp->error_info);
+ }
+
+ return status;
+}
+
+/**
+ * i40e_aq_get_ddp_list - Read dynamic device personalization (ddp)
+ * @hw: pointer to the hw struct
+ * @buff: command buffer (size in bytes = buff_size)
+ * @buff_size: buffer size in bytes
+ * @flags: AdminQ command flags
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+int i40e_aq_get_ddp_list(struct i40e_hw *hw, void *buff,
+ u16 buff_size, u8 flags,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_get_applied_profiles *cmd =
+ (struct i40e_aqc_get_applied_profiles *)&desc.params.raw;
+ int status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_get_personalization_profile_list);
+
+ desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
+ if (buff_size > I40E_AQ_LARGE_BUF)
+ desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
+ desc.datalen = cpu_to_le16(buff_size);
+
+ cmd->flags = flags;
+
+ status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_find_segment_in_package
+ * @segment_type: the segment type to search for (i.e., SEGMENT_TYPE_I40E)
+ * @pkg_hdr: pointer to the package header to be searched
+ *
+ * This function searches a package file for a particular segment type. On
+ * success it returns a pointer to the segment header, otherwise it will
+ * return NULL.
+ **/
+struct i40e_generic_seg_header *
+i40e_find_segment_in_package(u32 segment_type,
+ struct i40e_package_header *pkg_hdr)
+{
+ struct i40e_generic_seg_header *segment;
+ u32 i;
+
+ /* Search all package segments for the requested segment type */
+ for (i = 0; i < pkg_hdr->segment_count; i++) {
+ segment =
+ (struct i40e_generic_seg_header *)((u8 *)pkg_hdr +
+ pkg_hdr->segment_offset[i]);
+
+ if (segment->type == segment_type)
+ return segment;
+ }
+
+ return NULL;
+}
+
+/* Get section table in profile */
+#define I40E_SECTION_TABLE(profile, sec_tbl) \
+ do { \
+ struct i40e_profile_segment *p = (profile); \
+ u32 count; \
+ u32 *nvm; \
+ count = p->device_table_count; \
+ nvm = (u32 *)&p->device_table[count]; \
+ sec_tbl = (struct i40e_section_table *)&nvm[nvm[0] + 1]; \
+ } while (0)
+
+/* Get section header in profile */
+#define I40E_SECTION_HEADER(profile, offset) \
+ (struct i40e_profile_section_header *)((u8 *)(profile) + (offset))
+
+/**
+ * i40e_find_section_in_profile
+ * @section_type: the section type to search for (i.e., SECTION_TYPE_NOTE)
+ * @profile: pointer to the i40e segment header to be searched
+ *
+ * This function searches i40e segment for a particular section type. On
+ * success it returns a pointer to the section header, otherwise it will
+ * return NULL.
+ **/
+struct i40e_profile_section_header *
+i40e_find_section_in_profile(u32 section_type,
+ struct i40e_profile_segment *profile)
+{
+ struct i40e_profile_section_header *sec;
+ struct i40e_section_table *sec_tbl;
+ u32 sec_off;
+ u32 i;
+
+ if (profile->header.type != SEGMENT_TYPE_I40E)
+ return NULL;
+
+ I40E_SECTION_TABLE(profile, sec_tbl);
+
+ for (i = 0; i < sec_tbl->section_count; i++) {
+ sec_off = sec_tbl->section_offset[i];
+ sec = I40E_SECTION_HEADER(profile, sec_off);
+ if (sec->section.type == section_type)
+ return sec;
+ }
+
+ return NULL;
+}
+
+/**
+ * i40e_ddp_exec_aq_section - Execute generic AQ for DDP
+ * @hw: pointer to the hw struct
+ * @aq: command buffer containing all data to execute AQ
+ **/
+static int i40e_ddp_exec_aq_section(struct i40e_hw *hw,
+ struct i40e_profile_aq_section *aq)
+{
+ struct i40e_aq_desc desc;
+ u8 *msg = NULL;
+ u16 msglen;
+ int status;
+
+ i40e_fill_default_direct_cmd_desc(&desc, aq->opcode);
+ desc.flags |= cpu_to_le16(aq->flags);
+ memcpy(desc.params.raw, aq->param, sizeof(desc.params.raw));
+
+ msglen = aq->datalen;
+ if (msglen) {
+ desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF |
+ I40E_AQ_FLAG_RD));
+ if (msglen > I40E_AQ_LARGE_BUF)
+ desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
+ desc.datalen = cpu_to_le16(msglen);
+ msg = &aq->data[0];
+ }
+
+ status = i40e_asq_send_command(hw, &desc, msg, msglen, NULL);
+
+ if (status) {
+ i40e_debug(hw, I40E_DEBUG_PACKAGE,
+ "unable to exec DDP AQ opcode %u, error %d\n",
+ aq->opcode, status);
+ return status;
+ }
+
+ /* copy returned desc to aq_buf */
+ memcpy(aq->param, desc.params.raw, sizeof(desc.params.raw));
+
+ return 0;
+}
+
+/**
+ * i40e_validate_profile
+ * @hw: pointer to the hardware structure
+ * @profile: pointer to the profile segment of the package to be validated
+ * @track_id: package tracking id
+ * @rollback: flag if the profile is for rollback.
+ *
+ * Validates supported devices and profile's sections.
+ */
+static int
+i40e_validate_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile,
+ u32 track_id, bool rollback)
+{
+ struct i40e_profile_section_header *sec = NULL;
+ struct i40e_section_table *sec_tbl;
+ u32 vendor_dev_id;
+ int status = 0;
+ u32 dev_cnt;
+ u32 sec_off;
+ u32 i;
+
+ if (track_id == I40E_DDP_TRACKID_INVALID) {
+ i40e_debug(hw, I40E_DEBUG_PACKAGE, "Invalid track_id\n");
+ return I40E_NOT_SUPPORTED;
+ }
+
+ dev_cnt = profile->device_table_count;
+ for (i = 0; i < dev_cnt; i++) {
+ vendor_dev_id = profile->device_table[i].vendor_dev_id;
+ if ((vendor_dev_id >> 16) == PCI_VENDOR_ID_INTEL &&
+ hw->device_id == (vendor_dev_id & 0xFFFF))
+ break;
+ }
+ if (dev_cnt && i == dev_cnt) {
+ i40e_debug(hw, I40E_DEBUG_PACKAGE,
+ "Device doesn't support DDP\n");
+ return I40E_ERR_DEVICE_NOT_SUPPORTED;
+ }
+
+ I40E_SECTION_TABLE(profile, sec_tbl);
+
+ /* Validate sections types */
+ for (i = 0; i < sec_tbl->section_count; i++) {
+ sec_off = sec_tbl->section_offset[i];
+ sec = I40E_SECTION_HEADER(profile, sec_off);
+ if (rollback) {
+ if (sec->section.type == SECTION_TYPE_MMIO ||
+ sec->section.type == SECTION_TYPE_AQ ||
+ sec->section.type == SECTION_TYPE_RB_AQ) {
+ i40e_debug(hw, I40E_DEBUG_PACKAGE,
+ "Not a roll-back package\n");
+ return I40E_NOT_SUPPORTED;
+ }
+ } else {
+ if (sec->section.type == SECTION_TYPE_RB_AQ ||
+ sec->section.type == SECTION_TYPE_RB_MMIO) {
+ i40e_debug(hw, I40E_DEBUG_PACKAGE,
+ "Not an original package\n");
+ return I40E_NOT_SUPPORTED;
+ }
+ }
+ }
+
+ return status;
+}
+
+/**
+ * i40e_write_profile
+ * @hw: pointer to the hardware structure
+ * @profile: pointer to the profile segment of the package to be downloaded
+ * @track_id: package tracking id
+ *
+ * Handles the download of a complete package.
+ */
+int
+i40e_write_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile,
+ u32 track_id)
+{
+ struct i40e_profile_section_header *sec = NULL;
+ struct i40e_profile_aq_section *ddp_aq;
+ struct i40e_section_table *sec_tbl;
+ u32 offset = 0, info = 0;
+ u32 section_size = 0;
+ int status = 0;
+ u32 sec_off;
+ u32 i;
+
+ status = i40e_validate_profile(hw, profile, track_id, false);
+ if (status)
+ return status;
+
+ I40E_SECTION_TABLE(profile, sec_tbl);
+
+ for (i = 0; i < sec_tbl->section_count; i++) {
+ sec_off = sec_tbl->section_offset[i];
+ sec = I40E_SECTION_HEADER(profile, sec_off);
+ /* Process generic admin command */
+ if (sec->section.type == SECTION_TYPE_AQ) {
+ ddp_aq = (struct i40e_profile_aq_section *)&sec[1];
+ status = i40e_ddp_exec_aq_section(hw, ddp_aq);
+ if (status) {
+ i40e_debug(hw, I40E_DEBUG_PACKAGE,
+ "Failed to execute aq: section %d, opcode %u\n",
+ i, ddp_aq->opcode);
+ break;
+ }
+ sec->section.type = SECTION_TYPE_RB_AQ;
+ }
+
+ /* Skip any non-mmio sections */
+ if (sec->section.type != SECTION_TYPE_MMIO)
+ continue;
+
+ section_size = sec->section.size +
+ sizeof(struct i40e_profile_section_header);
+
+ /* Write MMIO section */
+ status = i40e_aq_write_ddp(hw, (void *)sec, (u16)section_size,
+ track_id, &offset, &info, NULL);
+ if (status) {
+ i40e_debug(hw, I40E_DEBUG_PACKAGE,
+ "Failed to write profile: section %d, offset %d, info %d\n",
+ i, offset, info);
+ break;
+ }
+ }
+ return status;
+}
+
+/**
+ * i40e_rollback_profile
+ * @hw: pointer to the hardware structure
+ * @profile: pointer to the profile segment of the package to be removed
+ * @track_id: package tracking id
+ *
+ * Rolls back previously loaded package.
+ */
+int
+i40e_rollback_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile,
+ u32 track_id)
+{
+ struct i40e_profile_section_header *sec = NULL;
+ struct i40e_section_table *sec_tbl;
+ u32 offset = 0, info = 0;
+ u32 section_size = 0;
+ int status = 0;
+ u32 sec_off;
+ int i;
+
+ status = i40e_validate_profile(hw, profile, track_id, true);
+ if (status)
+ return status;
+
+ I40E_SECTION_TABLE(profile, sec_tbl);
+
+ /* For rollback write sections in reverse */
+ for (i = sec_tbl->section_count - 1; i >= 0; i--) {
+ sec_off = sec_tbl->section_offset[i];
+ sec = I40E_SECTION_HEADER(profile, sec_off);
+
+ /* Skip any non-rollback sections */
+ if (sec->section.type != SECTION_TYPE_RB_MMIO)
+ continue;
+
+ section_size = sec->section.size +
+ sizeof(struct i40e_profile_section_header);
+
+ /* Write roll-back MMIO section */
+ status = i40e_aq_write_ddp(hw, (void *)sec, (u16)section_size,
+ track_id, &offset, &info, NULL);
+ if (status) {
+ i40e_debug(hw, I40E_DEBUG_PACKAGE,
+ "Failed to write profile: section %d, offset %d, info %d\n",
+ i, offset, info);
+ break;
+ }
+ }
+ return status;
+}
+
+/**
+ * i40e_add_pinfo_to_list
+ * @hw: pointer to the hardware structure
+ * @profile: pointer to the profile segment of the package
+ * @profile_info_sec: buffer for information section
+ * @track_id: package tracking id
+ *
+ * Register a profile to the list of loaded profiles.
+ */
+int
+i40e_add_pinfo_to_list(struct i40e_hw *hw,
+ struct i40e_profile_segment *profile,
+ u8 *profile_info_sec, u32 track_id)
+{
+ struct i40e_profile_section_header *sec = NULL;
+ struct i40e_profile_info *pinfo;
+ u32 offset = 0, info = 0;
+ int status = 0;
+
+ sec = (struct i40e_profile_section_header *)profile_info_sec;
+ sec->tbl_size = 1;
+ sec->data_end = sizeof(struct i40e_profile_section_header) +
+ sizeof(struct i40e_profile_info);
+ sec->section.type = SECTION_TYPE_INFO;
+ sec->section.offset = sizeof(struct i40e_profile_section_header);
+ sec->section.size = sizeof(struct i40e_profile_info);
+ pinfo = (struct i40e_profile_info *)(profile_info_sec +
+ sec->section.offset);
+ pinfo->track_id = track_id;
+ pinfo->version = profile->version;
+ pinfo->op = I40E_DDP_ADD_TRACKID;
+ memcpy(pinfo->name, profile->name, I40E_DDP_NAME_SIZE);
+
+ status = i40e_aq_write_ddp(hw, (void *)sec, sec->data_end,
+ track_id, &offset, &info, NULL);
+
+ return status;
+}
+
+/**
+ * i40e_aq_add_cloud_filters
+ * @hw: pointer to the hardware structure
+ * @seid: VSI seid to add cloud filters from
+ * @filters: Buffer which contains the filters to be added
+ * @filter_count: number of filters contained in the buffer
+ *
+ * Set the cloud filters for a given VSI. The contents of the
+ * i40e_aqc_cloud_filters_element_data are filled in by the caller
+ * of the function.
+ *
+ **/
+int
+i40e_aq_add_cloud_filters(struct i40e_hw *hw, u16 seid,
+ struct i40e_aqc_cloud_filters_element_data *filters,
+ u8 filter_count)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_add_remove_cloud_filters *cmd =
+ (struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw;
+ u16 buff_len;
+ int status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_add_cloud_filters);
+
+ buff_len = filter_count * sizeof(*filters);
+ desc.datalen = cpu_to_le16(buff_len);
+ desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+ cmd->num_filters = filter_count;
+ cmd->seid = cpu_to_le16(seid);
+
+ status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL);
+
+ return status;
+}
+
+/**
+ * i40e_aq_add_cloud_filters_bb
+ * @hw: pointer to the hardware structure
+ * @seid: VSI seid to add cloud filters from
+ * @filters: Buffer which contains the filters in big buffer to be added
+ * @filter_count: number of filters contained in the buffer
+ *
+ * Set the big buffer cloud filters for a given VSI. The contents of the
+ * i40e_aqc_cloud_filters_element_bb are filled in by the caller of the
+ * function.
+ *
+ **/
+int
+i40e_aq_add_cloud_filters_bb(struct i40e_hw *hw, u16 seid,
+ struct i40e_aqc_cloud_filters_element_bb *filters,
+ u8 filter_count)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_add_remove_cloud_filters *cmd =
+ (struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw;
+ u16 buff_len;
+ int status;
+ int i;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_add_cloud_filters);
+
+ buff_len = filter_count * sizeof(*filters);
+ desc.datalen = cpu_to_le16(buff_len);
+ desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+ cmd->num_filters = filter_count;
+ cmd->seid = cpu_to_le16(seid);
+ cmd->big_buffer_flag = I40E_AQC_ADD_CLOUD_CMD_BB;
+
+ for (i = 0; i < filter_count; i++) {
+ u16 tnl_type;
+ u32 ti;
+
+ tnl_type = (le16_to_cpu(filters[i].element.flags) &
+ I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK) >>
+ I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT;
+
+ /* Due to hardware eccentricities, the VNI for Geneve is shifted
+ * one more byte further than normally used for Tenant ID in
+ * other tunnel types.
+ */
+ if (tnl_type == I40E_AQC_ADD_CLOUD_TNL_TYPE_GENEVE) {
+ ti = le32_to_cpu(filters[i].element.tenant_id);
+ filters[i].element.tenant_id = cpu_to_le32(ti << 8);
+ }
+ }
+
+ status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL);
+
+ return status;
+}
+
+/**
+ * i40e_aq_rem_cloud_filters
+ * @hw: pointer to the hardware structure
+ * @seid: VSI seid to remove cloud filters from
+ * @filters: Buffer which contains the filters to be removed
+ * @filter_count: number of filters contained in the buffer
+ *
+ * Remove the cloud filters for a given VSI. The contents of the
+ * i40e_aqc_cloud_filters_element_data are filled in by the caller
+ * of the function.
+ *
+ **/
+int
+i40e_aq_rem_cloud_filters(struct i40e_hw *hw, u16 seid,
+ struct i40e_aqc_cloud_filters_element_data *filters,
+ u8 filter_count)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_add_remove_cloud_filters *cmd =
+ (struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw;
+ u16 buff_len;
+ int status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_remove_cloud_filters);
+
+ buff_len = filter_count * sizeof(*filters);
+ desc.datalen = cpu_to_le16(buff_len);
+ desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+ cmd->num_filters = filter_count;
+ cmd->seid = cpu_to_le16(seid);
+
+ status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL);
+
+ return status;
+}
+
+/**
+ * i40e_aq_rem_cloud_filters_bb
+ * @hw: pointer to the hardware structure
+ * @seid: VSI seid to remove cloud filters from
+ * @filters: Buffer which contains the filters in big buffer to be removed
+ * @filter_count: number of filters contained in the buffer
+ *
+ * Remove the big buffer cloud filters for a given VSI. The contents of the
+ * i40e_aqc_cloud_filters_element_bb are filled in by the caller of the
+ * function.
+ *
+ **/
+int
+i40e_aq_rem_cloud_filters_bb(struct i40e_hw *hw, u16 seid,
+ struct i40e_aqc_cloud_filters_element_bb *filters,
+ u8 filter_count)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_add_remove_cloud_filters *cmd =
+ (struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw;
+ u16 buff_len;
+ int status;
+ int i;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_remove_cloud_filters);
+
+ buff_len = filter_count * sizeof(*filters);
+ desc.datalen = cpu_to_le16(buff_len);
+ desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+ cmd->num_filters = filter_count;
+ cmd->seid = cpu_to_le16(seid);
+ cmd->big_buffer_flag = I40E_AQC_ADD_CLOUD_CMD_BB;
+
+ for (i = 0; i < filter_count; i++) {
+ u16 tnl_type;
+ u32 ti;
+
+ tnl_type = (le16_to_cpu(filters[i].element.flags) &
+ I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK) >>
+ I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT;
+
+ /* Due to hardware eccentricities, the VNI for Geneve is shifted
+ * one more byte further than normally used for Tenant ID in
+ * other tunnel types.
+ */
+ if (tnl_type == I40E_AQC_ADD_CLOUD_TNL_TYPE_GENEVE) {
+ ti = le32_to_cpu(filters[i].element.tenant_id);
+ filters[i].element.tenant_id = cpu_to_le32(ti << 8);
+ }
+ }
+
+ status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL);
+
+ return status;
+}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb.c b/drivers/net/ethernet/intel/i40e/i40e_dcb.c
new file mode 100644
index 000000000..90638b67f
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_dcb.c
@@ -0,0 +1,1987 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2013 - 2021 Intel Corporation. */
+
+#include "i40e_adminq.h"
+#include "i40e_prototype.h"
+#include "i40e_dcb.h"
+
+/**
+ * i40e_get_dcbx_status
+ * @hw: pointer to the hw struct
+ * @status: Embedded DCBX Engine Status
+ *
+ * Get the DCBX status from the Firmware
+ **/
+int i40e_get_dcbx_status(struct i40e_hw *hw, u16 *status)
+{
+ u32 reg;
+
+ if (!status)
+ return I40E_ERR_PARAM;
+
+ reg = rd32(hw, I40E_PRTDCB_GENS);
+ *status = (u16)((reg & I40E_PRTDCB_GENS_DCBX_STATUS_MASK) >>
+ I40E_PRTDCB_GENS_DCBX_STATUS_SHIFT);
+
+ return 0;
+}
+
+/**
+ * i40e_parse_ieee_etscfg_tlv
+ * @tlv: IEEE 802.1Qaz ETS CFG TLV
+ * @dcbcfg: Local store to update ETS CFG data
+ *
+ * Parses IEEE 802.1Qaz ETS CFG TLV
+ **/
+static void i40e_parse_ieee_etscfg_tlv(struct i40e_lldp_org_tlv *tlv,
+ struct i40e_dcbx_config *dcbcfg)
+{
+ struct i40e_dcb_ets_config *etscfg;
+ u8 *buf = tlv->tlvinfo;
+ u16 offset = 0;
+ u8 priority;
+ int i;
+
+ /* First Octet post subtype
+ * --------------------------
+ * |will-|CBS | Re- | Max |
+ * |ing | |served| TCs |
+ * --------------------------
+ * |1bit | 1bit|3 bits|3bits|
+ */
+ etscfg = &dcbcfg->etscfg;
+ etscfg->willing = (u8)((buf[offset] & I40E_IEEE_ETS_WILLING_MASK) >>
+ I40E_IEEE_ETS_WILLING_SHIFT);
+ etscfg->cbs = (u8)((buf[offset] & I40E_IEEE_ETS_CBS_MASK) >>
+ I40E_IEEE_ETS_CBS_SHIFT);
+ etscfg->maxtcs = (u8)((buf[offset] & I40E_IEEE_ETS_MAXTC_MASK) >>
+ I40E_IEEE_ETS_MAXTC_SHIFT);
+
+ /* Move offset to Priority Assignment Table */
+ offset++;
+
+ /* Priority Assignment Table (4 octets)
+ * Octets:| 1 | 2 | 3 | 4 |
+ * -----------------------------------------
+ * |pri0|pri1|pri2|pri3|pri4|pri5|pri6|pri7|
+ * -----------------------------------------
+ * Bits:|7 4|3 0|7 4|3 0|7 4|3 0|7 4|3 0|
+ * -----------------------------------------
+ */
+ for (i = 0; i < 4; i++) {
+ priority = (u8)((buf[offset] & I40E_IEEE_ETS_PRIO_1_MASK) >>
+ I40E_IEEE_ETS_PRIO_1_SHIFT);
+ etscfg->prioritytable[i * 2] = priority;
+ priority = (u8)((buf[offset] & I40E_IEEE_ETS_PRIO_0_MASK) >>
+ I40E_IEEE_ETS_PRIO_0_SHIFT);
+ etscfg->prioritytable[i * 2 + 1] = priority;
+ offset++;
+ }
+
+ /* TC Bandwidth Table (8 octets)
+ * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |
+ * ---------------------------------
+ * |tc0|tc1|tc2|tc3|tc4|tc5|tc6|tc7|
+ * ---------------------------------
+ */
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
+ etscfg->tcbwtable[i] = buf[offset++];
+
+ /* TSA Assignment Table (8 octets)
+ * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |
+ * ---------------------------------
+ * |tc0|tc1|tc2|tc3|tc4|tc5|tc6|tc7|
+ * ---------------------------------
+ */
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
+ etscfg->tsatable[i] = buf[offset++];
+}
+
+/**
+ * i40e_parse_ieee_etsrec_tlv
+ * @tlv: IEEE 802.1Qaz ETS REC TLV
+ * @dcbcfg: Local store to update ETS REC data
+ *
+ * Parses IEEE 802.1Qaz ETS REC TLV
+ **/
+static void i40e_parse_ieee_etsrec_tlv(struct i40e_lldp_org_tlv *tlv,
+ struct i40e_dcbx_config *dcbcfg)
+{
+ u8 *buf = tlv->tlvinfo;
+ u16 offset = 0;
+ u8 priority;
+ int i;
+
+ /* Move offset to priority table */
+ offset++;
+
+ /* Priority Assignment Table (4 octets)
+ * Octets:| 1 | 2 | 3 | 4 |
+ * -----------------------------------------
+ * |pri0|pri1|pri2|pri3|pri4|pri5|pri6|pri7|
+ * -----------------------------------------
+ * Bits:|7 4|3 0|7 4|3 0|7 4|3 0|7 4|3 0|
+ * -----------------------------------------
+ */
+ for (i = 0; i < 4; i++) {
+ priority = (u8)((buf[offset] & I40E_IEEE_ETS_PRIO_1_MASK) >>
+ I40E_IEEE_ETS_PRIO_1_SHIFT);
+ dcbcfg->etsrec.prioritytable[i*2] = priority;
+ priority = (u8)((buf[offset] & I40E_IEEE_ETS_PRIO_0_MASK) >>
+ I40E_IEEE_ETS_PRIO_0_SHIFT);
+ dcbcfg->etsrec.prioritytable[i*2 + 1] = priority;
+ offset++;
+ }
+
+ /* TC Bandwidth Table (8 octets)
+ * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |
+ * ---------------------------------
+ * |tc0|tc1|tc2|tc3|tc4|tc5|tc6|tc7|
+ * ---------------------------------
+ */
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
+ dcbcfg->etsrec.tcbwtable[i] = buf[offset++];
+
+ /* TSA Assignment Table (8 octets)
+ * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |
+ * ---------------------------------
+ * |tc0|tc1|tc2|tc3|tc4|tc5|tc6|tc7|
+ * ---------------------------------
+ */
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
+ dcbcfg->etsrec.tsatable[i] = buf[offset++];
+}
+
+/**
+ * i40e_parse_ieee_pfccfg_tlv
+ * @tlv: IEEE 802.1Qaz PFC CFG TLV
+ * @dcbcfg: Local store to update PFC CFG data
+ *
+ * Parses IEEE 802.1Qaz PFC CFG TLV
+ **/
+static void i40e_parse_ieee_pfccfg_tlv(struct i40e_lldp_org_tlv *tlv,
+ struct i40e_dcbx_config *dcbcfg)
+{
+ u8 *buf = tlv->tlvinfo;
+
+ /* ----------------------------------------
+ * |will-|MBC | Re- | PFC | PFC Enable |
+ * |ing | |served| cap | |
+ * -----------------------------------------
+ * |1bit | 1bit|2 bits|4bits| 1 octet |
+ */
+ dcbcfg->pfc.willing = (u8)((buf[0] & I40E_IEEE_PFC_WILLING_MASK) >>
+ I40E_IEEE_PFC_WILLING_SHIFT);
+ dcbcfg->pfc.mbc = (u8)((buf[0] & I40E_IEEE_PFC_MBC_MASK) >>
+ I40E_IEEE_PFC_MBC_SHIFT);
+ dcbcfg->pfc.pfccap = (u8)((buf[0] & I40E_IEEE_PFC_CAP_MASK) >>
+ I40E_IEEE_PFC_CAP_SHIFT);
+ dcbcfg->pfc.pfcenable = buf[1];
+}
+
+/**
+ * i40e_parse_ieee_app_tlv
+ * @tlv: IEEE 802.1Qaz APP TLV
+ * @dcbcfg: Local store to update APP PRIO data
+ *
+ * Parses IEEE 802.1Qaz APP PRIO TLV
+ **/
+static void i40e_parse_ieee_app_tlv(struct i40e_lldp_org_tlv *tlv,
+ struct i40e_dcbx_config *dcbcfg)
+{
+ u16 typelength;
+ u16 offset = 0;
+ u16 length;
+ int i = 0;
+ u8 *buf;
+
+ typelength = ntohs(tlv->typelength);
+ length = (u16)((typelength & I40E_LLDP_TLV_LEN_MASK) >>
+ I40E_LLDP_TLV_LEN_SHIFT);
+ buf = tlv->tlvinfo;
+
+ /* The App priority table starts 5 octets after TLV header */
+ length -= (sizeof(tlv->ouisubtype) + 1);
+
+ /* Move offset to App Priority Table */
+ offset++;
+
+ /* Application Priority Table (3 octets)
+ * Octets:| 1 | 2 | 3 |
+ * -----------------------------------------
+ * |Priority|Rsrvd| Sel | Protocol ID |
+ * -----------------------------------------
+ * Bits:|23 21|20 19|18 16|15 0|
+ * -----------------------------------------
+ */
+ while (offset < length) {
+ dcbcfg->app[i].priority = (u8)((buf[offset] &
+ I40E_IEEE_APP_PRIO_MASK) >>
+ I40E_IEEE_APP_PRIO_SHIFT);
+ dcbcfg->app[i].selector = (u8)((buf[offset] &
+ I40E_IEEE_APP_SEL_MASK) >>
+ I40E_IEEE_APP_SEL_SHIFT);
+ dcbcfg->app[i].protocolid = (buf[offset + 1] << 0x8) |
+ buf[offset + 2];
+ /* Move to next app */
+ offset += 3;
+ i++;
+ if (i >= I40E_DCBX_MAX_APPS)
+ break;
+ }
+
+ dcbcfg->numapps = i;
+}
+
+/**
+ * i40e_parse_ieee_tlv
+ * @tlv: IEEE 802.1Qaz TLV
+ * @dcbcfg: Local store to update ETS REC data
+ *
+ * Get the TLV subtype and send it to parsing function
+ * based on the subtype value
+ **/
+static void i40e_parse_ieee_tlv(struct i40e_lldp_org_tlv *tlv,
+ struct i40e_dcbx_config *dcbcfg)
+{
+ u32 ouisubtype;
+ u8 subtype;
+
+ ouisubtype = ntohl(tlv->ouisubtype);
+ subtype = (u8)((ouisubtype & I40E_LLDP_TLV_SUBTYPE_MASK) >>
+ I40E_LLDP_TLV_SUBTYPE_SHIFT);
+ switch (subtype) {
+ case I40E_IEEE_SUBTYPE_ETS_CFG:
+ i40e_parse_ieee_etscfg_tlv(tlv, dcbcfg);
+ break;
+ case I40E_IEEE_SUBTYPE_ETS_REC:
+ i40e_parse_ieee_etsrec_tlv(tlv, dcbcfg);
+ break;
+ case I40E_IEEE_SUBTYPE_PFC_CFG:
+ i40e_parse_ieee_pfccfg_tlv(tlv, dcbcfg);
+ break;
+ case I40E_IEEE_SUBTYPE_APP_PRI:
+ i40e_parse_ieee_app_tlv(tlv, dcbcfg);
+ break;
+ default:
+ break;
+ }
+}
+
+/**
+ * i40e_parse_cee_pgcfg_tlv
+ * @tlv: CEE DCBX PG CFG TLV
+ * @dcbcfg: Local store to update ETS CFG data
+ *
+ * Parses CEE DCBX PG CFG TLV
+ **/
+static void i40e_parse_cee_pgcfg_tlv(struct i40e_cee_feat_tlv *tlv,
+ struct i40e_dcbx_config *dcbcfg)
+{
+ struct i40e_dcb_ets_config *etscfg;
+ u8 *buf = tlv->tlvinfo;
+ u16 offset = 0;
+ u8 priority;
+ int i;
+
+ etscfg = &dcbcfg->etscfg;
+
+ if (tlv->en_will_err & I40E_CEE_FEAT_TLV_WILLING_MASK)
+ etscfg->willing = 1;
+
+ etscfg->cbs = 0;
+ /* Priority Group Table (4 octets)
+ * Octets:| 1 | 2 | 3 | 4 |
+ * -----------------------------------------
+ * |pri0|pri1|pri2|pri3|pri4|pri5|pri6|pri7|
+ * -----------------------------------------
+ * Bits:|7 4|3 0|7 4|3 0|7 4|3 0|7 4|3 0|
+ * -----------------------------------------
+ */
+ for (i = 0; i < 4; i++) {
+ priority = (u8)((buf[offset] & I40E_CEE_PGID_PRIO_1_MASK) >>
+ I40E_CEE_PGID_PRIO_1_SHIFT);
+ etscfg->prioritytable[i * 2] = priority;
+ priority = (u8)((buf[offset] & I40E_CEE_PGID_PRIO_0_MASK) >>
+ I40E_CEE_PGID_PRIO_0_SHIFT);
+ etscfg->prioritytable[i * 2 + 1] = priority;
+ offset++;
+ }
+
+ /* PG Percentage Table (8 octets)
+ * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |
+ * ---------------------------------
+ * |pg0|pg1|pg2|pg3|pg4|pg5|pg6|pg7|
+ * ---------------------------------
+ */
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
+ etscfg->tcbwtable[i] = buf[offset++];
+
+ /* Number of TCs supported (1 octet) */
+ etscfg->maxtcs = buf[offset];
+}
+
+/**
+ * i40e_parse_cee_pfccfg_tlv
+ * @tlv: CEE DCBX PFC CFG TLV
+ * @dcbcfg: Local store to update PFC CFG data
+ *
+ * Parses CEE DCBX PFC CFG TLV
+ **/
+static void i40e_parse_cee_pfccfg_tlv(struct i40e_cee_feat_tlv *tlv,
+ struct i40e_dcbx_config *dcbcfg)
+{
+ u8 *buf = tlv->tlvinfo;
+
+ if (tlv->en_will_err & I40E_CEE_FEAT_TLV_WILLING_MASK)
+ dcbcfg->pfc.willing = 1;
+
+ /* ------------------------
+ * | PFC Enable | PFC TCs |
+ * ------------------------
+ * | 1 octet | 1 octet |
+ */
+ dcbcfg->pfc.pfcenable = buf[0];
+ dcbcfg->pfc.pfccap = buf[1];
+}
+
+/**
+ * i40e_parse_cee_app_tlv
+ * @tlv: CEE DCBX APP TLV
+ * @dcbcfg: Local store to update APP PRIO data
+ *
+ * Parses CEE DCBX APP PRIO TLV
+ **/
+static void i40e_parse_cee_app_tlv(struct i40e_cee_feat_tlv *tlv,
+ struct i40e_dcbx_config *dcbcfg)
+{
+ u16 length, typelength, offset = 0;
+ struct i40e_cee_app_prio *app;
+ u8 i;
+
+ typelength = ntohs(tlv->hdr.typelen);
+ length = (u16)((typelength & I40E_LLDP_TLV_LEN_MASK) >>
+ I40E_LLDP_TLV_LEN_SHIFT);
+
+ dcbcfg->numapps = length / sizeof(*app);
+
+ if (!dcbcfg->numapps)
+ return;
+ if (dcbcfg->numapps > I40E_DCBX_MAX_APPS)
+ dcbcfg->numapps = I40E_DCBX_MAX_APPS;
+
+ for (i = 0; i < dcbcfg->numapps; i++) {
+ u8 up, selector;
+
+ app = (struct i40e_cee_app_prio *)(tlv->tlvinfo + offset);
+ for (up = 0; up < I40E_MAX_USER_PRIORITY; up++) {
+ if (app->prio_map & BIT(up))
+ break;
+ }
+ dcbcfg->app[i].priority = up;
+
+ /* Get Selector from lower 2 bits, and convert to IEEE */
+ selector = (app->upper_oui_sel & I40E_CEE_APP_SELECTOR_MASK);
+ switch (selector) {
+ case I40E_CEE_APP_SEL_ETHTYPE:
+ dcbcfg->app[i].selector = I40E_APP_SEL_ETHTYPE;
+ break;
+ case I40E_CEE_APP_SEL_TCPIP:
+ dcbcfg->app[i].selector = I40E_APP_SEL_TCPIP;
+ break;
+ default:
+ /* Keep selector as it is for unknown types */
+ dcbcfg->app[i].selector = selector;
+ }
+
+ dcbcfg->app[i].protocolid = ntohs(app->protocol);
+ /* Move to next app */
+ offset += sizeof(*app);
+ }
+}
+
+/**
+ * i40e_parse_cee_tlv
+ * @tlv: CEE DCBX TLV
+ * @dcbcfg: Local store to update DCBX config data
+ *
+ * Get the TLV subtype and send it to parsing function
+ * based on the subtype value
+ **/
+static void i40e_parse_cee_tlv(struct i40e_lldp_org_tlv *tlv,
+ struct i40e_dcbx_config *dcbcfg)
+{
+ u16 len, tlvlen, sublen, typelength;
+ struct i40e_cee_feat_tlv *sub_tlv;
+ u8 subtype, feat_tlv_count = 0;
+ u32 ouisubtype;
+
+ ouisubtype = ntohl(tlv->ouisubtype);
+ subtype = (u8)((ouisubtype & I40E_LLDP_TLV_SUBTYPE_MASK) >>
+ I40E_LLDP_TLV_SUBTYPE_SHIFT);
+ /* Return if not CEE DCBX */
+ if (subtype != I40E_CEE_DCBX_TYPE)
+ return;
+
+ typelength = ntohs(tlv->typelength);
+ tlvlen = (u16)((typelength & I40E_LLDP_TLV_LEN_MASK) >>
+ I40E_LLDP_TLV_LEN_SHIFT);
+ len = sizeof(tlv->typelength) + sizeof(ouisubtype) +
+ sizeof(struct i40e_cee_ctrl_tlv);
+ /* Return if no CEE DCBX Feature TLVs */
+ if (tlvlen <= len)
+ return;
+
+ sub_tlv = (struct i40e_cee_feat_tlv *)((char *)tlv + len);
+ while (feat_tlv_count < I40E_CEE_MAX_FEAT_TYPE) {
+ typelength = ntohs(sub_tlv->hdr.typelen);
+ sublen = (u16)((typelength &
+ I40E_LLDP_TLV_LEN_MASK) >>
+ I40E_LLDP_TLV_LEN_SHIFT);
+ subtype = (u8)((typelength & I40E_LLDP_TLV_TYPE_MASK) >>
+ I40E_LLDP_TLV_TYPE_SHIFT);
+ switch (subtype) {
+ case I40E_CEE_SUBTYPE_PG_CFG:
+ i40e_parse_cee_pgcfg_tlv(sub_tlv, dcbcfg);
+ break;
+ case I40E_CEE_SUBTYPE_PFC_CFG:
+ i40e_parse_cee_pfccfg_tlv(sub_tlv, dcbcfg);
+ break;
+ case I40E_CEE_SUBTYPE_APP_PRI:
+ i40e_parse_cee_app_tlv(sub_tlv, dcbcfg);
+ break;
+ default:
+ return; /* Invalid Sub-type return */
+ }
+ feat_tlv_count++;
+ /* Move to next sub TLV */
+ sub_tlv = (struct i40e_cee_feat_tlv *)((char *)sub_tlv +
+ sizeof(sub_tlv->hdr.typelen) +
+ sublen);
+ }
+}
+
+/**
+ * i40e_parse_org_tlv
+ * @tlv: Organization specific TLV
+ * @dcbcfg: Local store to update ETS REC data
+ *
+ * Currently only IEEE 802.1Qaz TLV is supported, all others
+ * will be returned
+ **/
+static void i40e_parse_org_tlv(struct i40e_lldp_org_tlv *tlv,
+ struct i40e_dcbx_config *dcbcfg)
+{
+ u32 ouisubtype;
+ u32 oui;
+
+ ouisubtype = ntohl(tlv->ouisubtype);
+ oui = (u32)((ouisubtype & I40E_LLDP_TLV_OUI_MASK) >>
+ I40E_LLDP_TLV_OUI_SHIFT);
+ switch (oui) {
+ case I40E_IEEE_8021QAZ_OUI:
+ i40e_parse_ieee_tlv(tlv, dcbcfg);
+ break;
+ case I40E_CEE_DCBX_OUI:
+ i40e_parse_cee_tlv(tlv, dcbcfg);
+ break;
+ default:
+ break;
+ }
+}
+
+/**
+ * i40e_lldp_to_dcb_config
+ * @lldpmib: LLDPDU to be parsed
+ * @dcbcfg: store for LLDPDU data
+ *
+ * Parse DCB configuration from the LLDPDU
+ **/
+int i40e_lldp_to_dcb_config(u8 *lldpmib,
+ struct i40e_dcbx_config *dcbcfg)
+{
+ struct i40e_lldp_org_tlv *tlv;
+ u16 typelength;
+ u16 offset = 0;
+ int ret = 0;
+ u16 length;
+ u16 type;
+
+ if (!lldpmib || !dcbcfg)
+ return I40E_ERR_PARAM;
+
+ /* set to the start of LLDPDU */
+ lldpmib += ETH_HLEN;
+ tlv = (struct i40e_lldp_org_tlv *)lldpmib;
+ while (1) {
+ typelength = ntohs(tlv->typelength);
+ type = (u16)((typelength & I40E_LLDP_TLV_TYPE_MASK) >>
+ I40E_LLDP_TLV_TYPE_SHIFT);
+ length = (u16)((typelength & I40E_LLDP_TLV_LEN_MASK) >>
+ I40E_LLDP_TLV_LEN_SHIFT);
+ offset += sizeof(typelength) + length;
+
+ /* END TLV or beyond LLDPDU size */
+ if ((type == I40E_TLV_TYPE_END) || (offset > I40E_LLDPDU_SIZE))
+ break;
+
+ switch (type) {
+ case I40E_TLV_TYPE_ORG:
+ i40e_parse_org_tlv(tlv, dcbcfg);
+ break;
+ default:
+ break;
+ }
+
+ /* Move to next TLV */
+ tlv = (struct i40e_lldp_org_tlv *)((char *)tlv +
+ sizeof(tlv->typelength) +
+ length);
+ }
+
+ return ret;
+}
+
+/**
+ * i40e_aq_get_dcb_config
+ * @hw: pointer to the hw struct
+ * @mib_type: mib type for the query
+ * @bridgetype: bridge type for the query (remote)
+ * @dcbcfg: store for LLDPDU data
+ *
+ * Query DCB configuration from the Firmware
+ **/
+int i40e_aq_get_dcb_config(struct i40e_hw *hw, u8 mib_type,
+ u8 bridgetype,
+ struct i40e_dcbx_config *dcbcfg)
+{
+ struct i40e_virt_mem mem;
+ int ret = 0;
+ u8 *lldpmib;
+
+ /* Allocate the LLDPDU */
+ ret = i40e_allocate_virt_mem(hw, &mem, I40E_LLDPDU_SIZE);
+ if (ret)
+ return ret;
+
+ lldpmib = (u8 *)mem.va;
+ ret = i40e_aq_get_lldp_mib(hw, bridgetype, mib_type,
+ (void *)lldpmib, I40E_LLDPDU_SIZE,
+ NULL, NULL, NULL);
+ if (ret)
+ goto free_mem;
+
+ /* Parse LLDP MIB to get dcb configuration */
+ ret = i40e_lldp_to_dcb_config(lldpmib, dcbcfg);
+
+free_mem:
+ i40e_free_virt_mem(hw, &mem);
+ return ret;
+}
+
+/**
+ * i40e_cee_to_dcb_v1_config
+ * @cee_cfg: pointer to CEE v1 response configuration struct
+ * @dcbcfg: DCB configuration struct
+ *
+ * Convert CEE v1 configuration from firmware to DCB configuration
+ **/
+static void i40e_cee_to_dcb_v1_config(
+ struct i40e_aqc_get_cee_dcb_cfg_v1_resp *cee_cfg,
+ struct i40e_dcbx_config *dcbcfg)
+{
+ u16 status, tlv_status = le16_to_cpu(cee_cfg->tlv_status);
+ u16 app_prio = le16_to_cpu(cee_cfg->oper_app_prio);
+ u8 i, tc, err;
+
+ /* CEE PG data to ETS config */
+ dcbcfg->etscfg.maxtcs = cee_cfg->oper_num_tc;
+
+ /* Note that the FW creates the oper_prio_tc nibbles reversed
+ * from those in the CEE Priority Group sub-TLV.
+ */
+ for (i = 0; i < 4; i++) {
+ tc = (u8)((cee_cfg->oper_prio_tc[i] &
+ I40E_CEE_PGID_PRIO_0_MASK) >>
+ I40E_CEE_PGID_PRIO_0_SHIFT);
+ dcbcfg->etscfg.prioritytable[i * 2] = tc;
+ tc = (u8)((cee_cfg->oper_prio_tc[i] &
+ I40E_CEE_PGID_PRIO_1_MASK) >>
+ I40E_CEE_PGID_PRIO_1_SHIFT);
+ dcbcfg->etscfg.prioritytable[i*2 + 1] = tc;
+ }
+
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
+ dcbcfg->etscfg.tcbwtable[i] = cee_cfg->oper_tc_bw[i];
+
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ if (dcbcfg->etscfg.prioritytable[i] == I40E_CEE_PGID_STRICT) {
+ /* Map it to next empty TC */
+ dcbcfg->etscfg.prioritytable[i] =
+ cee_cfg->oper_num_tc - 1;
+ dcbcfg->etscfg.tsatable[i] = I40E_IEEE_TSA_STRICT;
+ } else {
+ dcbcfg->etscfg.tsatable[i] = I40E_IEEE_TSA_ETS;
+ }
+ }
+
+ /* CEE PFC data to ETS config */
+ dcbcfg->pfc.pfcenable = cee_cfg->oper_pfc_en;
+ dcbcfg->pfc.pfccap = I40E_MAX_TRAFFIC_CLASS;
+
+ status = (tlv_status & I40E_AQC_CEE_APP_STATUS_MASK) >>
+ I40E_AQC_CEE_APP_STATUS_SHIFT;
+ err = (status & I40E_TLV_STATUS_ERR) ? 1 : 0;
+ /* Add APPs if Error is False */
+ if (!err) {
+ /* CEE operating configuration supports FCoE/iSCSI/FIP only */
+ dcbcfg->numapps = I40E_CEE_OPER_MAX_APPS;
+
+ /* FCoE APP */
+ dcbcfg->app[0].priority =
+ (app_prio & I40E_AQC_CEE_APP_FCOE_MASK) >>
+ I40E_AQC_CEE_APP_FCOE_SHIFT;
+ dcbcfg->app[0].selector = I40E_APP_SEL_ETHTYPE;
+ dcbcfg->app[0].protocolid = I40E_APP_PROTOID_FCOE;
+
+ /* iSCSI APP */
+ dcbcfg->app[1].priority =
+ (app_prio & I40E_AQC_CEE_APP_ISCSI_MASK) >>
+ I40E_AQC_CEE_APP_ISCSI_SHIFT;
+ dcbcfg->app[1].selector = I40E_APP_SEL_TCPIP;
+ dcbcfg->app[1].protocolid = I40E_APP_PROTOID_ISCSI;
+
+ /* FIP APP */
+ dcbcfg->app[2].priority =
+ (app_prio & I40E_AQC_CEE_APP_FIP_MASK) >>
+ I40E_AQC_CEE_APP_FIP_SHIFT;
+ dcbcfg->app[2].selector = I40E_APP_SEL_ETHTYPE;
+ dcbcfg->app[2].protocolid = I40E_APP_PROTOID_FIP;
+ }
+}
+
+/**
+ * i40e_cee_to_dcb_config
+ * @cee_cfg: pointer to CEE configuration struct
+ * @dcbcfg: DCB configuration struct
+ *
+ * Convert CEE configuration from firmware to DCB configuration
+ **/
+static void i40e_cee_to_dcb_config(
+ struct i40e_aqc_get_cee_dcb_cfg_resp *cee_cfg,
+ struct i40e_dcbx_config *dcbcfg)
+{
+ u32 status, tlv_status = le32_to_cpu(cee_cfg->tlv_status);
+ u16 app_prio = le16_to_cpu(cee_cfg->oper_app_prio);
+ u8 i, tc, err, sync, oper;
+
+ /* CEE PG data to ETS config */
+ dcbcfg->etscfg.maxtcs = cee_cfg->oper_num_tc;
+
+ /* Note that the FW creates the oper_prio_tc nibbles reversed
+ * from those in the CEE Priority Group sub-TLV.
+ */
+ for (i = 0; i < 4; i++) {
+ tc = (u8)((cee_cfg->oper_prio_tc[i] &
+ I40E_CEE_PGID_PRIO_0_MASK) >>
+ I40E_CEE_PGID_PRIO_0_SHIFT);
+ dcbcfg->etscfg.prioritytable[i * 2] = tc;
+ tc = (u8)((cee_cfg->oper_prio_tc[i] &
+ I40E_CEE_PGID_PRIO_1_MASK) >>
+ I40E_CEE_PGID_PRIO_1_SHIFT);
+ dcbcfg->etscfg.prioritytable[i * 2 + 1] = tc;
+ }
+
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
+ dcbcfg->etscfg.tcbwtable[i] = cee_cfg->oper_tc_bw[i];
+
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ if (dcbcfg->etscfg.prioritytable[i] == I40E_CEE_PGID_STRICT) {
+ /* Map it to next empty TC */
+ dcbcfg->etscfg.prioritytable[i] =
+ cee_cfg->oper_num_tc - 1;
+ dcbcfg->etscfg.tsatable[i] = I40E_IEEE_TSA_STRICT;
+ } else {
+ dcbcfg->etscfg.tsatable[i] = I40E_IEEE_TSA_ETS;
+ }
+ }
+
+ /* CEE PFC data to ETS config */
+ dcbcfg->pfc.pfcenable = cee_cfg->oper_pfc_en;
+ dcbcfg->pfc.pfccap = I40E_MAX_TRAFFIC_CLASS;
+
+ i = 0;
+ status = (tlv_status & I40E_AQC_CEE_FCOE_STATUS_MASK) >>
+ I40E_AQC_CEE_FCOE_STATUS_SHIFT;
+ err = (status & I40E_TLV_STATUS_ERR) ? 1 : 0;
+ sync = (status & I40E_TLV_STATUS_SYNC) ? 1 : 0;
+ oper = (status & I40E_TLV_STATUS_OPER) ? 1 : 0;
+ /* Add FCoE APP if Error is False and Oper/Sync is True */
+ if (!err && sync && oper) {
+ /* FCoE APP */
+ dcbcfg->app[i].priority =
+ (app_prio & I40E_AQC_CEE_APP_FCOE_MASK) >>
+ I40E_AQC_CEE_APP_FCOE_SHIFT;
+ dcbcfg->app[i].selector = I40E_APP_SEL_ETHTYPE;
+ dcbcfg->app[i].protocolid = I40E_APP_PROTOID_FCOE;
+ i++;
+ }
+
+ status = (tlv_status & I40E_AQC_CEE_ISCSI_STATUS_MASK) >>
+ I40E_AQC_CEE_ISCSI_STATUS_SHIFT;
+ err = (status & I40E_TLV_STATUS_ERR) ? 1 : 0;
+ sync = (status & I40E_TLV_STATUS_SYNC) ? 1 : 0;
+ oper = (status & I40E_TLV_STATUS_OPER) ? 1 : 0;
+ /* Add iSCSI APP if Error is False and Oper/Sync is True */
+ if (!err && sync && oper) {
+ /* iSCSI APP */
+ dcbcfg->app[i].priority =
+ (app_prio & I40E_AQC_CEE_APP_ISCSI_MASK) >>
+ I40E_AQC_CEE_APP_ISCSI_SHIFT;
+ dcbcfg->app[i].selector = I40E_APP_SEL_TCPIP;
+ dcbcfg->app[i].protocolid = I40E_APP_PROTOID_ISCSI;
+ i++;
+ }
+
+ status = (tlv_status & I40E_AQC_CEE_FIP_STATUS_MASK) >>
+ I40E_AQC_CEE_FIP_STATUS_SHIFT;
+ err = (status & I40E_TLV_STATUS_ERR) ? 1 : 0;
+ sync = (status & I40E_TLV_STATUS_SYNC) ? 1 : 0;
+ oper = (status & I40E_TLV_STATUS_OPER) ? 1 : 0;
+ /* Add FIP APP if Error is False and Oper/Sync is True */
+ if (!err && sync && oper) {
+ /* FIP APP */
+ dcbcfg->app[i].priority =
+ (app_prio & I40E_AQC_CEE_APP_FIP_MASK) >>
+ I40E_AQC_CEE_APP_FIP_SHIFT;
+ dcbcfg->app[i].selector = I40E_APP_SEL_ETHTYPE;
+ dcbcfg->app[i].protocolid = I40E_APP_PROTOID_FIP;
+ i++;
+ }
+ dcbcfg->numapps = i;
+}
+
+/**
+ * i40e_get_ieee_dcb_config
+ * @hw: pointer to the hw struct
+ *
+ * Get IEEE mode DCB configuration from the Firmware
+ **/
+static int i40e_get_ieee_dcb_config(struct i40e_hw *hw)
+{
+ int ret = 0;
+
+ /* IEEE mode */
+ hw->local_dcbx_config.dcbx_mode = I40E_DCBX_MODE_IEEE;
+ /* Get Local DCB Config */
+ ret = i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_LOCAL, 0,
+ &hw->local_dcbx_config);
+ if (ret)
+ goto out;
+
+ /* Get Remote DCB Config */
+ ret = i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_REMOTE,
+ I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE,
+ &hw->remote_dcbx_config);
+ /* Don't treat ENOENT as an error for Remote MIBs */
+ if (hw->aq.asq_last_status == I40E_AQ_RC_ENOENT)
+ ret = 0;
+
+out:
+ return ret;
+}
+
+/**
+ * i40e_get_dcb_config
+ * @hw: pointer to the hw struct
+ *
+ * Get DCB configuration from the Firmware
+ **/
+int i40e_get_dcb_config(struct i40e_hw *hw)
+{
+ struct i40e_aqc_get_cee_dcb_cfg_v1_resp cee_v1_cfg;
+ struct i40e_aqc_get_cee_dcb_cfg_resp cee_cfg;
+ int ret = 0;
+
+ /* If Firmware version < v4.33 on X710/XL710, IEEE only */
+ if ((hw->mac.type == I40E_MAC_XL710) &&
+ (((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver < 33)) ||
+ (hw->aq.fw_maj_ver < 4)))
+ return i40e_get_ieee_dcb_config(hw);
+
+ /* If Firmware version == v4.33 on X710/XL710, use old CEE struct */
+ if ((hw->mac.type == I40E_MAC_XL710) &&
+ ((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver == 33))) {
+ ret = i40e_aq_get_cee_dcb_config(hw, &cee_v1_cfg,
+ sizeof(cee_v1_cfg), NULL);
+ if (!ret) {
+ /* CEE mode */
+ hw->local_dcbx_config.dcbx_mode = I40E_DCBX_MODE_CEE;
+ hw->local_dcbx_config.tlv_status =
+ le16_to_cpu(cee_v1_cfg.tlv_status);
+ i40e_cee_to_dcb_v1_config(&cee_v1_cfg,
+ &hw->local_dcbx_config);
+ }
+ } else {
+ ret = i40e_aq_get_cee_dcb_config(hw, &cee_cfg,
+ sizeof(cee_cfg), NULL);
+ if (!ret) {
+ /* CEE mode */
+ hw->local_dcbx_config.dcbx_mode = I40E_DCBX_MODE_CEE;
+ hw->local_dcbx_config.tlv_status =
+ le32_to_cpu(cee_cfg.tlv_status);
+ i40e_cee_to_dcb_config(&cee_cfg,
+ &hw->local_dcbx_config);
+ }
+ }
+
+ /* CEE mode not enabled try querying IEEE data */
+ if (hw->aq.asq_last_status == I40E_AQ_RC_ENOENT)
+ return i40e_get_ieee_dcb_config(hw);
+
+ if (ret)
+ goto out;
+
+ /* Get CEE DCB Desired Config */
+ ret = i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_LOCAL, 0,
+ &hw->desired_dcbx_config);
+ if (ret)
+ goto out;
+
+ /* Get Remote DCB Config */
+ ret = i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_REMOTE,
+ I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE,
+ &hw->remote_dcbx_config);
+ /* Don't treat ENOENT as an error for Remote MIBs */
+ if (hw->aq.asq_last_status == I40E_AQ_RC_ENOENT)
+ ret = 0;
+
+out:
+ return ret;
+}
+
+/**
+ * i40e_init_dcb
+ * @hw: pointer to the hw struct
+ * @enable_mib_change: enable mib change event
+ *
+ * Update DCB configuration from the Firmware
+ **/
+int i40e_init_dcb(struct i40e_hw *hw, bool enable_mib_change)
+{
+ struct i40e_lldp_variables lldp_cfg;
+ u8 adminstatus = 0;
+ int ret = 0;
+
+ if (!hw->func_caps.dcb)
+ return I40E_NOT_SUPPORTED;
+
+ /* Read LLDP NVM area */
+ if (hw->flags & I40E_HW_FLAG_FW_LLDP_PERSISTENT) {
+ u8 offset = 0;
+
+ if (hw->mac.type == I40E_MAC_XL710)
+ offset = I40E_LLDP_CURRENT_STATUS_XL710_OFFSET;
+ else if (hw->mac.type == I40E_MAC_X722)
+ offset = I40E_LLDP_CURRENT_STATUS_X722_OFFSET;
+ else
+ return I40E_NOT_SUPPORTED;
+
+ ret = i40e_read_nvm_module_data(hw,
+ I40E_SR_EMP_SR_SETTINGS_PTR,
+ offset,
+ I40E_LLDP_CURRENT_STATUS_OFFSET,
+ I40E_LLDP_CURRENT_STATUS_SIZE,
+ &lldp_cfg.adminstatus);
+ } else {
+ ret = i40e_read_lldp_cfg(hw, &lldp_cfg);
+ }
+ if (ret)
+ return I40E_ERR_NOT_READY;
+
+ /* Get the LLDP AdminStatus for the current port */
+ adminstatus = lldp_cfg.adminstatus >> (hw->port * 4);
+ adminstatus &= 0xF;
+
+ /* LLDP agent disabled */
+ if (!adminstatus) {
+ hw->dcbx_status = I40E_DCBX_STATUS_DISABLED;
+ return I40E_ERR_NOT_READY;
+ }
+
+ /* Get DCBX status */
+ ret = i40e_get_dcbx_status(hw, &hw->dcbx_status);
+ if (ret)
+ return ret;
+
+ /* Check the DCBX Status */
+ if (hw->dcbx_status == I40E_DCBX_STATUS_DONE ||
+ hw->dcbx_status == I40E_DCBX_STATUS_IN_PROGRESS) {
+ /* Get current DCBX configuration */
+ ret = i40e_get_dcb_config(hw);
+ if (ret)
+ return ret;
+ } else if (hw->dcbx_status == I40E_DCBX_STATUS_DISABLED) {
+ return I40E_ERR_NOT_READY;
+ }
+
+ /* Configure the LLDP MIB change event */
+ if (enable_mib_change)
+ ret = i40e_aq_cfg_lldp_mib_change_event(hw, true, NULL);
+
+ return ret;
+}
+
+/**
+ * i40e_get_fw_lldp_status
+ * @hw: pointer to the hw struct
+ * @lldp_status: pointer to the status enum
+ *
+ * Get status of FW Link Layer Discovery Protocol (LLDP) Agent.
+ * Status of agent is reported via @lldp_status parameter.
+ **/
+int
+i40e_get_fw_lldp_status(struct i40e_hw *hw,
+ enum i40e_get_fw_lldp_status_resp *lldp_status)
+{
+ struct i40e_virt_mem mem;
+ u8 *lldpmib;
+ int ret;
+
+ if (!lldp_status)
+ return I40E_ERR_PARAM;
+
+ /* Allocate buffer for the LLDPDU */
+ ret = i40e_allocate_virt_mem(hw, &mem, I40E_LLDPDU_SIZE);
+ if (ret)
+ return ret;
+
+ lldpmib = (u8 *)mem.va;
+ ret = i40e_aq_get_lldp_mib(hw, 0, 0, (void *)lldpmib,
+ I40E_LLDPDU_SIZE, NULL, NULL, NULL);
+
+ if (!ret) {
+ *lldp_status = I40E_GET_FW_LLDP_STATUS_ENABLED;
+ } else if (hw->aq.asq_last_status == I40E_AQ_RC_ENOENT) {
+ /* MIB is not available yet but the agent is running */
+ *lldp_status = I40E_GET_FW_LLDP_STATUS_ENABLED;
+ ret = 0;
+ } else if (hw->aq.asq_last_status == I40E_AQ_RC_EPERM) {
+ *lldp_status = I40E_GET_FW_LLDP_STATUS_DISABLED;
+ ret = 0;
+ }
+
+ i40e_free_virt_mem(hw, &mem);
+ return ret;
+}
+
+/**
+ * i40e_add_ieee_ets_tlv - Prepare ETS TLV in IEEE format
+ * @tlv: Fill the ETS config data in IEEE format
+ * @dcbcfg: Local store which holds the DCB Config
+ *
+ * Prepare IEEE 802.1Qaz ETS CFG TLV
+ **/
+static void i40e_add_ieee_ets_tlv(struct i40e_lldp_org_tlv *tlv,
+ struct i40e_dcbx_config *dcbcfg)
+{
+ u8 priority0, priority1, maxtcwilling = 0;
+ struct i40e_dcb_ets_config *etscfg;
+ u16 offset = 0, typelength, i;
+ u8 *buf = tlv->tlvinfo;
+ u32 ouisubtype;
+
+ typelength = (u16)((I40E_TLV_TYPE_ORG << I40E_LLDP_TLV_TYPE_SHIFT) |
+ I40E_IEEE_ETS_TLV_LENGTH);
+ tlv->typelength = htons(typelength);
+
+ ouisubtype = (u32)((I40E_IEEE_8021QAZ_OUI << I40E_LLDP_TLV_OUI_SHIFT) |
+ I40E_IEEE_SUBTYPE_ETS_CFG);
+ tlv->ouisubtype = htonl(ouisubtype);
+
+ /* First Octet post subtype
+ * --------------------------
+ * |will-|CBS | Re- | Max |
+ * |ing | |served| TCs |
+ * --------------------------
+ * |1bit | 1bit|3 bits|3bits|
+ */
+ etscfg = &dcbcfg->etscfg;
+ if (etscfg->willing)
+ maxtcwilling = BIT(I40E_IEEE_ETS_WILLING_SHIFT);
+ maxtcwilling |= etscfg->maxtcs & I40E_IEEE_ETS_MAXTC_MASK;
+ buf[offset] = maxtcwilling;
+
+ /* Move offset to Priority Assignment Table */
+ offset++;
+
+ /* Priority Assignment Table (4 octets)
+ * Octets:| 1 | 2 | 3 | 4 |
+ * -----------------------------------------
+ * |pri0|pri1|pri2|pri3|pri4|pri5|pri6|pri7|
+ * -----------------------------------------
+ * Bits:|7 4|3 0|7 4|3 0|7 4|3 0|7 4|3 0|
+ * -----------------------------------------
+ */
+ for (i = 0; i < 4; i++) {
+ priority0 = etscfg->prioritytable[i * 2] & 0xF;
+ priority1 = etscfg->prioritytable[i * 2 + 1] & 0xF;
+ buf[offset] = (priority0 << I40E_IEEE_ETS_PRIO_1_SHIFT) |
+ priority1;
+ offset++;
+ }
+
+ /* TC Bandwidth Table (8 octets)
+ * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |
+ * ---------------------------------
+ * |tc0|tc1|tc2|tc3|tc4|tc5|tc6|tc7|
+ * ---------------------------------
+ */
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
+ buf[offset++] = etscfg->tcbwtable[i];
+
+ /* TSA Assignment Table (8 octets)
+ * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |
+ * ---------------------------------
+ * |tc0|tc1|tc2|tc3|tc4|tc5|tc6|tc7|
+ * ---------------------------------
+ */
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
+ buf[offset++] = etscfg->tsatable[i];
+}
+
+/**
+ * i40e_add_ieee_etsrec_tlv - Prepare ETS Recommended TLV in IEEE format
+ * @tlv: Fill ETS Recommended TLV in IEEE format
+ * @dcbcfg: Local store which holds the DCB Config
+ *
+ * Prepare IEEE 802.1Qaz ETS REC TLV
+ **/
+static void i40e_add_ieee_etsrec_tlv(struct i40e_lldp_org_tlv *tlv,
+ struct i40e_dcbx_config *dcbcfg)
+{
+ struct i40e_dcb_ets_config *etsrec;
+ u16 offset = 0, typelength, i;
+ u8 priority0, priority1;
+ u8 *buf = tlv->tlvinfo;
+ u32 ouisubtype;
+
+ typelength = (u16)((I40E_TLV_TYPE_ORG << I40E_LLDP_TLV_TYPE_SHIFT) |
+ I40E_IEEE_ETS_TLV_LENGTH);
+ tlv->typelength = htons(typelength);
+
+ ouisubtype = (u32)((I40E_IEEE_8021QAZ_OUI << I40E_LLDP_TLV_OUI_SHIFT) |
+ I40E_IEEE_SUBTYPE_ETS_REC);
+ tlv->ouisubtype = htonl(ouisubtype);
+
+ etsrec = &dcbcfg->etsrec;
+ /* First Octet is reserved */
+ /* Move offset to Priority Assignment Table */
+ offset++;
+
+ /* Priority Assignment Table (4 octets)
+ * Octets:| 1 | 2 | 3 | 4 |
+ * -----------------------------------------
+ * |pri0|pri1|pri2|pri3|pri4|pri5|pri6|pri7|
+ * -----------------------------------------
+ * Bits:|7 4|3 0|7 4|3 0|7 4|3 0|7 4|3 0|
+ * -----------------------------------------
+ */
+ for (i = 0; i < 4; i++) {
+ priority0 = etsrec->prioritytable[i * 2] & 0xF;
+ priority1 = etsrec->prioritytable[i * 2 + 1] & 0xF;
+ buf[offset] = (priority0 << I40E_IEEE_ETS_PRIO_1_SHIFT) |
+ priority1;
+ offset++;
+ }
+
+ /* TC Bandwidth Table (8 octets)
+ * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |
+ * ---------------------------------
+ * |tc0|tc1|tc2|tc3|tc4|tc5|tc6|tc7|
+ * ---------------------------------
+ */
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
+ buf[offset++] = etsrec->tcbwtable[i];
+
+ /* TSA Assignment Table (8 octets)
+ * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |
+ * ---------------------------------
+ * |tc0|tc1|tc2|tc3|tc4|tc5|tc6|tc7|
+ * ---------------------------------
+ */
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
+ buf[offset++] = etsrec->tsatable[i];
+}
+
+/**
+ * i40e_add_ieee_pfc_tlv - Prepare PFC TLV in IEEE format
+ * @tlv: Fill PFC TLV in IEEE format
+ * @dcbcfg: Local store to get PFC CFG data
+ *
+ * Prepare IEEE 802.1Qaz PFC CFG TLV
+ **/
+static void i40e_add_ieee_pfc_tlv(struct i40e_lldp_org_tlv *tlv,
+ struct i40e_dcbx_config *dcbcfg)
+{
+ u8 *buf = tlv->tlvinfo;
+ u32 ouisubtype;
+ u16 typelength;
+
+ typelength = (u16)((I40E_TLV_TYPE_ORG << I40E_LLDP_TLV_TYPE_SHIFT) |
+ I40E_IEEE_PFC_TLV_LENGTH);
+ tlv->typelength = htons(typelength);
+
+ ouisubtype = (u32)((I40E_IEEE_8021QAZ_OUI << I40E_LLDP_TLV_OUI_SHIFT) |
+ I40E_IEEE_SUBTYPE_PFC_CFG);
+ tlv->ouisubtype = htonl(ouisubtype);
+
+ /* ----------------------------------------
+ * |will-|MBC | Re- | PFC | PFC Enable |
+ * |ing | |served| cap | |
+ * -----------------------------------------
+ * |1bit | 1bit|2 bits|4bits| 1 octet |
+ */
+ if (dcbcfg->pfc.willing)
+ buf[0] = BIT(I40E_IEEE_PFC_WILLING_SHIFT);
+
+ if (dcbcfg->pfc.mbc)
+ buf[0] |= BIT(I40E_IEEE_PFC_MBC_SHIFT);
+
+ buf[0] |= dcbcfg->pfc.pfccap & 0xF;
+ buf[1] = dcbcfg->pfc.pfcenable;
+}
+
+/**
+ * i40e_add_ieee_app_pri_tlv - Prepare APP TLV in IEEE format
+ * @tlv: Fill APP TLV in IEEE format
+ * @dcbcfg: Local store to get APP CFG data
+ *
+ * Prepare IEEE 802.1Qaz APP CFG TLV
+ **/
+static void i40e_add_ieee_app_pri_tlv(struct i40e_lldp_org_tlv *tlv,
+ struct i40e_dcbx_config *dcbcfg)
+{
+ u16 typelength, length, offset = 0;
+ u8 priority, selector, i = 0;
+ u8 *buf = tlv->tlvinfo;
+ u32 ouisubtype;
+
+ /* No APP TLVs then just return */
+ if (dcbcfg->numapps == 0)
+ return;
+ ouisubtype = (u32)((I40E_IEEE_8021QAZ_OUI << I40E_LLDP_TLV_OUI_SHIFT) |
+ I40E_IEEE_SUBTYPE_APP_PRI);
+ tlv->ouisubtype = htonl(ouisubtype);
+
+ /* Move offset to App Priority Table */
+ offset++;
+ /* Application Priority Table (3 octets)
+ * Octets:| 1 | 2 | 3 |
+ * -----------------------------------------
+ * |Priority|Rsrvd| Sel | Protocol ID |
+ * -----------------------------------------
+ * Bits:|23 21|20 19|18 16|15 0|
+ * -----------------------------------------
+ */
+ while (i < dcbcfg->numapps) {
+ priority = dcbcfg->app[i].priority & 0x7;
+ selector = dcbcfg->app[i].selector & 0x7;
+ buf[offset] = (priority << I40E_IEEE_APP_PRIO_SHIFT) | selector;
+ buf[offset + 1] = (dcbcfg->app[i].protocolid >> 0x8) & 0xFF;
+ buf[offset + 2] = dcbcfg->app[i].protocolid & 0xFF;
+ /* Move to next app */
+ offset += 3;
+ i++;
+ if (i >= I40E_DCBX_MAX_APPS)
+ break;
+ }
+ /* length includes size of ouisubtype + 1 reserved + 3*numapps */
+ length = sizeof(tlv->ouisubtype) + 1 + (i * 3);
+ typelength = (u16)((I40E_TLV_TYPE_ORG << I40E_LLDP_TLV_TYPE_SHIFT) |
+ (length & 0x1FF));
+ tlv->typelength = htons(typelength);
+}
+
+/**
+ * i40e_add_dcb_tlv - Add all IEEE TLVs
+ * @tlv: pointer to org tlv
+ * @dcbcfg: pointer to modified dcbx config structure *
+ * @tlvid: tlv id to be added
+ * add tlv information
+ **/
+static void i40e_add_dcb_tlv(struct i40e_lldp_org_tlv *tlv,
+ struct i40e_dcbx_config *dcbcfg,
+ u16 tlvid)
+{
+ switch (tlvid) {
+ case I40E_IEEE_TLV_ID_ETS_CFG:
+ i40e_add_ieee_ets_tlv(tlv, dcbcfg);
+ break;
+ case I40E_IEEE_TLV_ID_ETS_REC:
+ i40e_add_ieee_etsrec_tlv(tlv, dcbcfg);
+ break;
+ case I40E_IEEE_TLV_ID_PFC_CFG:
+ i40e_add_ieee_pfc_tlv(tlv, dcbcfg);
+ break;
+ case I40E_IEEE_TLV_ID_APP_PRI:
+ i40e_add_ieee_app_pri_tlv(tlv, dcbcfg);
+ break;
+ default:
+ break;
+ }
+}
+
+/**
+ * i40e_set_dcb_config - Set the local LLDP MIB to FW
+ * @hw: pointer to the hw struct
+ *
+ * Set DCB configuration to the Firmware
+ **/
+int i40e_set_dcb_config(struct i40e_hw *hw)
+{
+ struct i40e_dcbx_config *dcbcfg;
+ struct i40e_virt_mem mem;
+ u8 mib_type, *lldpmib;
+ u16 miblen;
+ int ret;
+
+ /* update the hw local config */
+ dcbcfg = &hw->local_dcbx_config;
+ /* Allocate the LLDPDU */
+ ret = i40e_allocate_virt_mem(hw, &mem, I40E_LLDPDU_SIZE);
+ if (ret)
+ return ret;
+
+ mib_type = SET_LOCAL_MIB_AC_TYPE_LOCAL_MIB;
+ if (dcbcfg->app_mode == I40E_DCBX_APPS_NON_WILLING) {
+ mib_type |= SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS <<
+ SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS_SHIFT;
+ }
+ lldpmib = (u8 *)mem.va;
+ i40e_dcb_config_to_lldp(lldpmib, &miblen, dcbcfg);
+ ret = i40e_aq_set_lldp_mib(hw, mib_type, (void *)lldpmib, miblen, NULL);
+
+ i40e_free_virt_mem(hw, &mem);
+ return ret;
+}
+
+/**
+ * i40e_dcb_config_to_lldp - Convert Dcbconfig to MIB format
+ * @lldpmib: pointer to mib to be output
+ * @miblen: pointer to u16 for length of lldpmib
+ * @dcbcfg: store for LLDPDU data
+ *
+ * send DCB configuration to FW
+ **/
+int i40e_dcb_config_to_lldp(u8 *lldpmib, u16 *miblen,
+ struct i40e_dcbx_config *dcbcfg)
+{
+ u16 length, offset = 0, tlvid, typelength;
+ struct i40e_lldp_org_tlv *tlv;
+
+ tlv = (struct i40e_lldp_org_tlv *)lldpmib;
+ tlvid = I40E_TLV_ID_START;
+ do {
+ i40e_add_dcb_tlv(tlv, dcbcfg, tlvid++);
+ typelength = ntohs(tlv->typelength);
+ length = (u16)((typelength & I40E_LLDP_TLV_LEN_MASK) >>
+ I40E_LLDP_TLV_LEN_SHIFT);
+ if (length)
+ offset += length + I40E_IEEE_TLV_HEADER_LENGTH;
+ /* END TLV or beyond LLDPDU size */
+ if (tlvid >= I40E_TLV_ID_END_OF_LLDPPDU ||
+ offset >= I40E_LLDPDU_SIZE)
+ break;
+ /* Move to next TLV */
+ if (length)
+ tlv = (struct i40e_lldp_org_tlv *)((char *)tlv +
+ sizeof(tlv->typelength) + length);
+ } while (tlvid < I40E_TLV_ID_END_OF_LLDPPDU);
+ *miblen = offset;
+ return I40E_SUCCESS;
+}
+
+/**
+ * i40e_dcb_hw_rx_fifo_config
+ * @hw: pointer to the hw struct
+ * @ets_mode: Strict Priority or Round Robin mode
+ * @non_ets_mode: Strict Priority or Round Robin
+ * @max_exponent: Exponent to calculate max refill credits
+ * @lltc_map: Low latency TC bitmap
+ *
+ * Configure HW Rx FIFO as part of DCB configuration.
+ **/
+void i40e_dcb_hw_rx_fifo_config(struct i40e_hw *hw,
+ enum i40e_dcb_arbiter_mode ets_mode,
+ enum i40e_dcb_arbiter_mode non_ets_mode,
+ u32 max_exponent,
+ u8 lltc_map)
+{
+ u32 reg = rd32(hw, I40E_PRTDCB_RETSC);
+
+ reg &= ~I40E_PRTDCB_RETSC_ETS_MODE_MASK;
+ reg |= ((u32)ets_mode << I40E_PRTDCB_RETSC_ETS_MODE_SHIFT) &
+ I40E_PRTDCB_RETSC_ETS_MODE_MASK;
+
+ reg &= ~I40E_PRTDCB_RETSC_NON_ETS_MODE_MASK;
+ reg |= ((u32)non_ets_mode << I40E_PRTDCB_RETSC_NON_ETS_MODE_SHIFT) &
+ I40E_PRTDCB_RETSC_NON_ETS_MODE_MASK;
+
+ reg &= ~I40E_PRTDCB_RETSC_ETS_MAX_EXP_MASK;
+ reg |= (max_exponent << I40E_PRTDCB_RETSC_ETS_MAX_EXP_SHIFT) &
+ I40E_PRTDCB_RETSC_ETS_MAX_EXP_MASK;
+
+ reg &= ~I40E_PRTDCB_RETSC_LLTC_MASK;
+ reg |= (lltc_map << I40E_PRTDCB_RETSC_LLTC_SHIFT) &
+ I40E_PRTDCB_RETSC_LLTC_MASK;
+ wr32(hw, I40E_PRTDCB_RETSC, reg);
+}
+
+/**
+ * i40e_dcb_hw_rx_cmd_monitor_config
+ * @hw: pointer to the hw struct
+ * @num_tc: Total number of traffic class
+ * @num_ports: Total number of ports on device
+ *
+ * Configure HW Rx command monitor as part of DCB configuration.
+ **/
+void i40e_dcb_hw_rx_cmd_monitor_config(struct i40e_hw *hw,
+ u8 num_tc, u8 num_ports)
+{
+ u32 threshold;
+ u32 fifo_size;
+ u32 reg;
+
+ /* Set the threshold and fifo_size based on number of ports */
+ switch (num_ports) {
+ case 1:
+ threshold = I40E_DCB_1_PORT_THRESHOLD;
+ fifo_size = I40E_DCB_1_PORT_FIFO_SIZE;
+ break;
+ case 2:
+ if (num_tc > 4) {
+ threshold = I40E_DCB_2_PORT_THRESHOLD_HIGH_NUM_TC;
+ fifo_size = I40E_DCB_2_PORT_FIFO_SIZE_HIGH_NUM_TC;
+ } else {
+ threshold = I40E_DCB_2_PORT_THRESHOLD_LOW_NUM_TC;
+ fifo_size = I40E_DCB_2_PORT_FIFO_SIZE_LOW_NUM_TC;
+ }
+ break;
+ case 4:
+ if (num_tc > 4) {
+ threshold = I40E_DCB_4_PORT_THRESHOLD_HIGH_NUM_TC;
+ fifo_size = I40E_DCB_4_PORT_FIFO_SIZE_HIGH_NUM_TC;
+ } else {
+ threshold = I40E_DCB_4_PORT_THRESHOLD_LOW_NUM_TC;
+ fifo_size = I40E_DCB_4_PORT_FIFO_SIZE_LOW_NUM_TC;
+ }
+ break;
+ default:
+ i40e_debug(hw, I40E_DEBUG_DCB, "Invalid num_ports %u.\n",
+ (u32)num_ports);
+ return;
+ }
+
+ /* The hardware manual describes setting up of I40E_PRT_SWR_PM_THR
+ * based on the number of ports and traffic classes for a given port as
+ * part of DCB configuration.
+ */
+ reg = rd32(hw, I40E_PRT_SWR_PM_THR);
+ reg &= ~I40E_PRT_SWR_PM_THR_THRESHOLD_MASK;
+ reg |= (threshold << I40E_PRT_SWR_PM_THR_THRESHOLD_SHIFT) &
+ I40E_PRT_SWR_PM_THR_THRESHOLD_MASK;
+ wr32(hw, I40E_PRT_SWR_PM_THR, reg);
+
+ reg = rd32(hw, I40E_PRTDCB_RPPMC);
+ reg &= ~I40E_PRTDCB_RPPMC_RX_FIFO_SIZE_MASK;
+ reg |= (fifo_size << I40E_PRTDCB_RPPMC_RX_FIFO_SIZE_SHIFT) &
+ I40E_PRTDCB_RPPMC_RX_FIFO_SIZE_MASK;
+ wr32(hw, I40E_PRTDCB_RPPMC, reg);
+}
+
+/**
+ * i40e_dcb_hw_pfc_config
+ * @hw: pointer to the hw struct
+ * @pfc_en: Bitmap of PFC enabled priorities
+ * @prio_tc: priority to tc assignment indexed by priority
+ *
+ * Configure HW Priority Flow Controller as part of DCB configuration.
+ **/
+void i40e_dcb_hw_pfc_config(struct i40e_hw *hw,
+ u8 pfc_en, u8 *prio_tc)
+{
+ u16 refresh_time = (u16)I40E_DEFAULT_PAUSE_TIME / 2;
+ u32 link_speed = hw->phy.link_info.link_speed;
+ u8 first_pfc_prio = 0;
+ u8 num_pfc_tc = 0;
+ u8 tc2pfc = 0;
+ u32 reg;
+ u8 i;
+
+ /* Get Number of PFC TCs and TC2PFC map */
+ for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
+ if (pfc_en & BIT(i)) {
+ if (!first_pfc_prio)
+ first_pfc_prio = i;
+ /* Set bit for the PFC TC */
+ tc2pfc |= BIT(prio_tc[i]);
+ num_pfc_tc++;
+ }
+ }
+
+ switch (link_speed) {
+ case I40E_LINK_SPEED_10GB:
+ reg = rd32(hw, I40E_PRTDCB_MFLCN);
+ reg |= BIT(I40E_PRTDCB_MFLCN_DPF_SHIFT) &
+ I40E_PRTDCB_MFLCN_DPF_MASK;
+ reg &= ~I40E_PRTDCB_MFLCN_RFCE_MASK;
+ reg &= ~I40E_PRTDCB_MFLCN_RPFCE_MASK;
+ if (pfc_en) {
+ reg |= BIT(I40E_PRTDCB_MFLCN_RPFCM_SHIFT) &
+ I40E_PRTDCB_MFLCN_RPFCM_MASK;
+ reg |= ((u32)pfc_en << I40E_PRTDCB_MFLCN_RPFCE_SHIFT) &
+ I40E_PRTDCB_MFLCN_RPFCE_MASK;
+ }
+ wr32(hw, I40E_PRTDCB_MFLCN, reg);
+
+ reg = rd32(hw, I40E_PRTDCB_FCCFG);
+ reg &= ~I40E_PRTDCB_FCCFG_TFCE_MASK;
+ if (pfc_en)
+ reg |= (I40E_DCB_PFC_ENABLED <<
+ I40E_PRTDCB_FCCFG_TFCE_SHIFT) &
+ I40E_PRTDCB_FCCFG_TFCE_MASK;
+ wr32(hw, I40E_PRTDCB_FCCFG, reg);
+
+ /* FCTTV and FCRTV to be set by default */
+ break;
+ case I40E_LINK_SPEED_40GB:
+ reg = rd32(hw, I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GPP);
+ reg &= ~I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GPP_MASK;
+ wr32(hw, I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GPP, reg);
+
+ reg = rd32(hw, I40E_PRTMAC_HSEC_CTL_RX_ENABLE_PPP);
+ reg &= ~I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GPP_MASK;
+ reg |= BIT(I40E_PRTMAC_HSEC_CTL_RX_ENABLE_PPP_SHIFT) &
+ I40E_PRTMAC_HSEC_CTL_RX_ENABLE_PPP_MASK;
+ wr32(hw, I40E_PRTMAC_HSEC_CTL_RX_ENABLE_PPP, reg);
+
+ reg = rd32(hw, I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE);
+ reg &= ~I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_MASK;
+ reg |= ((u32)pfc_en <<
+ I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_SHIFT) &
+ I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_MASK;
+ wr32(hw, I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE, reg);
+
+ reg = rd32(hw, I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE);
+ reg &= ~I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_MASK;
+ reg |= ((u32)pfc_en <<
+ I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_SHIFT) &
+ I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_MASK;
+ wr32(hw, I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE, reg);
+
+ for (i = 0; i < I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_MAX_INDEX; i++) {
+ reg = rd32(hw, I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER(i));
+ reg &= ~I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_MASK;
+ if (pfc_en) {
+ reg |= ((u32)refresh_time <<
+ I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_SHIFT) &
+ I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_MASK;
+ }
+ wr32(hw, I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER(i), reg);
+ }
+ /* PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA default value is 0xFFFF
+ * for all user priorities
+ */
+ break;
+ }
+
+ reg = rd32(hw, I40E_PRTDCB_TC2PFC);
+ reg &= ~I40E_PRTDCB_TC2PFC_TC2PFC_MASK;
+ reg |= ((u32)tc2pfc << I40E_PRTDCB_TC2PFC_TC2PFC_SHIFT) &
+ I40E_PRTDCB_TC2PFC_TC2PFC_MASK;
+ wr32(hw, I40E_PRTDCB_TC2PFC, reg);
+
+ reg = rd32(hw, I40E_PRTDCB_RUP);
+ reg &= ~I40E_PRTDCB_RUP_NOVLANUP_MASK;
+ reg |= ((u32)first_pfc_prio << I40E_PRTDCB_RUP_NOVLANUP_SHIFT) &
+ I40E_PRTDCB_RUP_NOVLANUP_MASK;
+ wr32(hw, I40E_PRTDCB_RUP, reg);
+
+ reg = rd32(hw, I40E_PRTDCB_TDPMC);
+ reg &= ~I40E_PRTDCB_TDPMC_TCPM_MODE_MASK;
+ if (num_pfc_tc > I40E_DCB_PFC_FORCED_NUM_TC) {
+ reg |= BIT(I40E_PRTDCB_TDPMC_TCPM_MODE_SHIFT) &
+ I40E_PRTDCB_TDPMC_TCPM_MODE_MASK;
+ }
+ wr32(hw, I40E_PRTDCB_TDPMC, reg);
+
+ reg = rd32(hw, I40E_PRTDCB_TCPMC);
+ reg &= ~I40E_PRTDCB_TCPMC_TCPM_MODE_MASK;
+ if (num_pfc_tc > I40E_DCB_PFC_FORCED_NUM_TC) {
+ reg |= BIT(I40E_PRTDCB_TCPMC_TCPM_MODE_SHIFT) &
+ I40E_PRTDCB_TCPMC_TCPM_MODE_MASK;
+ }
+ wr32(hw, I40E_PRTDCB_TCPMC, reg);
+}
+
+/**
+ * i40e_dcb_hw_set_num_tc
+ * @hw: pointer to the hw struct
+ * @num_tc: number of traffic classes
+ *
+ * Configure number of traffic classes in HW
+ **/
+void i40e_dcb_hw_set_num_tc(struct i40e_hw *hw, u8 num_tc)
+{
+ u32 reg = rd32(hw, I40E_PRTDCB_GENC);
+
+ reg &= ~I40E_PRTDCB_GENC_NUMTC_MASK;
+ reg |= ((u32)num_tc << I40E_PRTDCB_GENC_NUMTC_SHIFT) &
+ I40E_PRTDCB_GENC_NUMTC_MASK;
+ wr32(hw, I40E_PRTDCB_GENC, reg);
+}
+
+/**
+ * i40e_dcb_hw_get_num_tc
+ * @hw: pointer to the hw struct
+ *
+ * Returns number of traffic classes configured in HW
+ **/
+u8 i40e_dcb_hw_get_num_tc(struct i40e_hw *hw)
+{
+ u32 reg = rd32(hw, I40E_PRTDCB_GENC);
+
+ return (u8)((reg & I40E_PRTDCB_GENC_NUMTC_MASK) >>
+ I40E_PRTDCB_GENC_NUMTC_SHIFT);
+}
+
+/**
+ * i40e_dcb_hw_rx_ets_bw_config
+ * @hw: pointer to the hw struct
+ * @bw_share: Bandwidth share indexed per traffic class
+ * @mode: Strict Priority or Round Robin mode between UP sharing same
+ * traffic class
+ * @prio_type: TC is ETS enabled or strict priority
+ *
+ * Configure HW Rx ETS bandwidth as part of DCB configuration.
+ **/
+void i40e_dcb_hw_rx_ets_bw_config(struct i40e_hw *hw, u8 *bw_share,
+ u8 *mode, u8 *prio_type)
+{
+ u32 reg;
+ u8 i;
+
+ for (i = 0; i <= I40E_PRTDCB_RETSTCC_MAX_INDEX; i++) {
+ reg = rd32(hw, I40E_PRTDCB_RETSTCC(i));
+ reg &= ~(I40E_PRTDCB_RETSTCC_BWSHARE_MASK |
+ I40E_PRTDCB_RETSTCC_UPINTC_MODE_MASK |
+ I40E_PRTDCB_RETSTCC_ETSTC_SHIFT);
+ reg |= ((u32)bw_share[i] << I40E_PRTDCB_RETSTCC_BWSHARE_SHIFT) &
+ I40E_PRTDCB_RETSTCC_BWSHARE_MASK;
+ reg |= ((u32)mode[i] << I40E_PRTDCB_RETSTCC_UPINTC_MODE_SHIFT) &
+ I40E_PRTDCB_RETSTCC_UPINTC_MODE_MASK;
+ reg |= ((u32)prio_type[i] << I40E_PRTDCB_RETSTCC_ETSTC_SHIFT) &
+ I40E_PRTDCB_RETSTCC_ETSTC_MASK;
+ wr32(hw, I40E_PRTDCB_RETSTCC(i), reg);
+ }
+}
+
+/**
+ * i40e_dcb_hw_rx_up2tc_config
+ * @hw: pointer to the hw struct
+ * @prio_tc: priority to tc assignment indexed by priority
+ *
+ * Configure HW Rx UP2TC map as part of DCB configuration.
+ **/
+void i40e_dcb_hw_rx_up2tc_config(struct i40e_hw *hw, u8 *prio_tc)
+{
+ u32 reg = rd32(hw, I40E_PRTDCB_RUP2TC);
+#define I40E_UP2TC_REG(val, i) \
+ (((val) << I40E_PRTDCB_RUP2TC_UP##i##TC_SHIFT) & \
+ I40E_PRTDCB_RUP2TC_UP##i##TC_MASK)
+
+ reg |= I40E_UP2TC_REG(prio_tc[0], 0);
+ reg |= I40E_UP2TC_REG(prio_tc[1], 1);
+ reg |= I40E_UP2TC_REG(prio_tc[2], 2);
+ reg |= I40E_UP2TC_REG(prio_tc[3], 3);
+ reg |= I40E_UP2TC_REG(prio_tc[4], 4);
+ reg |= I40E_UP2TC_REG(prio_tc[5], 5);
+ reg |= I40E_UP2TC_REG(prio_tc[6], 6);
+ reg |= I40E_UP2TC_REG(prio_tc[7], 7);
+
+ wr32(hw, I40E_PRTDCB_RUP2TC, reg);
+}
+
+/**
+ * i40e_dcb_hw_calculate_pool_sizes - configure dcb pool sizes
+ * @hw: pointer to the hw struct
+ * @num_ports: Number of available ports on the device
+ * @eee_enabled: EEE enabled for the given port
+ * @pfc_en: Bit map of PFC enabled traffic classes
+ * @mfs_tc: Array of max frame size for each traffic class
+ * @pb_cfg: pointer to packet buffer configuration
+ *
+ * Calculate the shared and dedicated per TC pool sizes,
+ * watermarks and threshold values.
+ **/
+void i40e_dcb_hw_calculate_pool_sizes(struct i40e_hw *hw,
+ u8 num_ports, bool eee_enabled,
+ u8 pfc_en, u32 *mfs_tc,
+ struct i40e_rx_pb_config *pb_cfg)
+{
+ u32 pool_size[I40E_MAX_TRAFFIC_CLASS];
+ u32 high_wm[I40E_MAX_TRAFFIC_CLASS];
+ u32 low_wm[I40E_MAX_TRAFFIC_CLASS];
+ u32 total_pool_size = 0;
+ int shared_pool_size; /* Need signed variable */
+ u32 port_pb_size;
+ u32 mfs_max = 0;
+ u32 pcirtt;
+ u8 i;
+
+ /* Get the MFS(max) for the port */
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ if (mfs_tc[i] > mfs_max)
+ mfs_max = mfs_tc[i];
+ }
+
+ pcirtt = I40E_BT2B(I40E_PCIRTT_LINK_SPEED_10G);
+
+ /* Calculate effective Rx PB size per port */
+ port_pb_size = I40E_DEVICE_RPB_SIZE / num_ports;
+ if (eee_enabled)
+ port_pb_size -= I40E_BT2B(I40E_EEE_TX_LPI_EXIT_TIME);
+ port_pb_size -= mfs_max;
+
+ /* Step 1 Calculating tc pool/shared pool sizes and watermarks */
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ if (pfc_en & BIT(i)) {
+ low_wm[i] = (I40E_DCB_WATERMARK_START_FACTOR *
+ mfs_tc[i]) + pcirtt;
+ high_wm[i] = low_wm[i];
+ high_wm[i] += ((mfs_max > I40E_MAX_FRAME_SIZE)
+ ? mfs_max : I40E_MAX_FRAME_SIZE);
+ pool_size[i] = high_wm[i];
+ pool_size[i] += I40E_BT2B(I40E_STD_DV_TC(mfs_max,
+ mfs_tc[i]));
+ } else {
+ low_wm[i] = 0;
+ pool_size[i] = (I40E_DCB_WATERMARK_START_FACTOR *
+ mfs_tc[i]) + pcirtt;
+ high_wm[i] = pool_size[i];
+ }
+ total_pool_size += pool_size[i];
+ }
+
+ shared_pool_size = port_pb_size - total_pool_size;
+ if (shared_pool_size > 0) {
+ pb_cfg->shared_pool_size = shared_pool_size;
+ pb_cfg->shared_pool_high_wm = shared_pool_size;
+ pb_cfg->shared_pool_low_wm = 0;
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ pb_cfg->shared_pool_low_thresh[i] = 0;
+ pb_cfg->shared_pool_high_thresh[i] = shared_pool_size;
+ pb_cfg->tc_pool_size[i] = pool_size[i];
+ pb_cfg->tc_pool_high_wm[i] = high_wm[i];
+ pb_cfg->tc_pool_low_wm[i] = low_wm[i];
+ }
+
+ } else {
+ i40e_debug(hw, I40E_DEBUG_DCB,
+ "The shared pool size for the port is negative %d.\n",
+ shared_pool_size);
+ }
+}
+
+/**
+ * i40e_dcb_hw_rx_pb_config
+ * @hw: pointer to the hw struct
+ * @old_pb_cfg: Existing Rx Packet buffer configuration
+ * @new_pb_cfg: New Rx Packet buffer configuration
+ *
+ * Program the Rx Packet Buffer registers.
+ **/
+void i40e_dcb_hw_rx_pb_config(struct i40e_hw *hw,
+ struct i40e_rx_pb_config *old_pb_cfg,
+ struct i40e_rx_pb_config *new_pb_cfg)
+{
+ u32 old_val;
+ u32 new_val;
+ u32 reg;
+ u8 i;
+
+ /* The Rx Packet buffer register programming needs to be done in a
+ * certain order and the following code is based on that
+ * requirement.
+ */
+
+ /* Program the shared pool low water mark per port if decreasing */
+ old_val = old_pb_cfg->shared_pool_low_wm;
+ new_val = new_pb_cfg->shared_pool_low_wm;
+ if (new_val < old_val) {
+ reg = rd32(hw, I40E_PRTRPB_SLW);
+ reg &= ~I40E_PRTRPB_SLW_SLW_MASK;
+ reg |= (new_val << I40E_PRTRPB_SLW_SLW_SHIFT) &
+ I40E_PRTRPB_SLW_SLW_MASK;
+ wr32(hw, I40E_PRTRPB_SLW, reg);
+ }
+
+ /* Program the shared pool low threshold and tc pool
+ * low water mark per TC that are decreasing.
+ */
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ old_val = old_pb_cfg->shared_pool_low_thresh[i];
+ new_val = new_pb_cfg->shared_pool_low_thresh[i];
+ if (new_val < old_val) {
+ reg = rd32(hw, I40E_PRTRPB_SLT(i));
+ reg &= ~I40E_PRTRPB_SLT_SLT_TCN_MASK;
+ reg |= (new_val << I40E_PRTRPB_SLT_SLT_TCN_SHIFT) &
+ I40E_PRTRPB_SLT_SLT_TCN_MASK;
+ wr32(hw, I40E_PRTRPB_SLT(i), reg);
+ }
+
+ old_val = old_pb_cfg->tc_pool_low_wm[i];
+ new_val = new_pb_cfg->tc_pool_low_wm[i];
+ if (new_val < old_val) {
+ reg = rd32(hw, I40E_PRTRPB_DLW(i));
+ reg &= ~I40E_PRTRPB_DLW_DLW_TCN_MASK;
+ reg |= (new_val << I40E_PRTRPB_DLW_DLW_TCN_SHIFT) &
+ I40E_PRTRPB_DLW_DLW_TCN_MASK;
+ wr32(hw, I40E_PRTRPB_DLW(i), reg);
+ }
+ }
+
+ /* Program the shared pool high water mark per port if decreasing */
+ old_val = old_pb_cfg->shared_pool_high_wm;
+ new_val = new_pb_cfg->shared_pool_high_wm;
+ if (new_val < old_val) {
+ reg = rd32(hw, I40E_PRTRPB_SHW);
+ reg &= ~I40E_PRTRPB_SHW_SHW_MASK;
+ reg |= (new_val << I40E_PRTRPB_SHW_SHW_SHIFT) &
+ I40E_PRTRPB_SHW_SHW_MASK;
+ wr32(hw, I40E_PRTRPB_SHW, reg);
+ }
+
+ /* Program the shared pool high threshold and tc pool
+ * high water mark per TC that are decreasing.
+ */
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ old_val = old_pb_cfg->shared_pool_high_thresh[i];
+ new_val = new_pb_cfg->shared_pool_high_thresh[i];
+ if (new_val < old_val) {
+ reg = rd32(hw, I40E_PRTRPB_SHT(i));
+ reg &= ~I40E_PRTRPB_SHT_SHT_TCN_MASK;
+ reg |= (new_val << I40E_PRTRPB_SHT_SHT_TCN_SHIFT) &
+ I40E_PRTRPB_SHT_SHT_TCN_MASK;
+ wr32(hw, I40E_PRTRPB_SHT(i), reg);
+ }
+
+ old_val = old_pb_cfg->tc_pool_high_wm[i];
+ new_val = new_pb_cfg->tc_pool_high_wm[i];
+ if (new_val < old_val) {
+ reg = rd32(hw, I40E_PRTRPB_DHW(i));
+ reg &= ~I40E_PRTRPB_DHW_DHW_TCN_MASK;
+ reg |= (new_val << I40E_PRTRPB_DHW_DHW_TCN_SHIFT) &
+ I40E_PRTRPB_DHW_DHW_TCN_MASK;
+ wr32(hw, I40E_PRTRPB_DHW(i), reg);
+ }
+ }
+
+ /* Write Dedicated Pool Sizes per TC */
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ new_val = new_pb_cfg->tc_pool_size[i];
+ reg = rd32(hw, I40E_PRTRPB_DPS(i));
+ reg &= ~I40E_PRTRPB_DPS_DPS_TCN_MASK;
+ reg |= (new_val << I40E_PRTRPB_DPS_DPS_TCN_SHIFT) &
+ I40E_PRTRPB_DPS_DPS_TCN_MASK;
+ wr32(hw, I40E_PRTRPB_DPS(i), reg);
+ }
+
+ /* Write Shared Pool Size per port */
+ new_val = new_pb_cfg->shared_pool_size;
+ reg = rd32(hw, I40E_PRTRPB_SPS);
+ reg &= ~I40E_PRTRPB_SPS_SPS_MASK;
+ reg |= (new_val << I40E_PRTRPB_SPS_SPS_SHIFT) &
+ I40E_PRTRPB_SPS_SPS_MASK;
+ wr32(hw, I40E_PRTRPB_SPS, reg);
+
+ /* Program the shared pool low water mark per port if increasing */
+ old_val = old_pb_cfg->shared_pool_low_wm;
+ new_val = new_pb_cfg->shared_pool_low_wm;
+ if (new_val > old_val) {
+ reg = rd32(hw, I40E_PRTRPB_SLW);
+ reg &= ~I40E_PRTRPB_SLW_SLW_MASK;
+ reg |= (new_val << I40E_PRTRPB_SLW_SLW_SHIFT) &
+ I40E_PRTRPB_SLW_SLW_MASK;
+ wr32(hw, I40E_PRTRPB_SLW, reg);
+ }
+
+ /* Program the shared pool low threshold and tc pool
+ * low water mark per TC that are increasing.
+ */
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ old_val = old_pb_cfg->shared_pool_low_thresh[i];
+ new_val = new_pb_cfg->shared_pool_low_thresh[i];
+ if (new_val > old_val) {
+ reg = rd32(hw, I40E_PRTRPB_SLT(i));
+ reg &= ~I40E_PRTRPB_SLT_SLT_TCN_MASK;
+ reg |= (new_val << I40E_PRTRPB_SLT_SLT_TCN_SHIFT) &
+ I40E_PRTRPB_SLT_SLT_TCN_MASK;
+ wr32(hw, I40E_PRTRPB_SLT(i), reg);
+ }
+
+ old_val = old_pb_cfg->tc_pool_low_wm[i];
+ new_val = new_pb_cfg->tc_pool_low_wm[i];
+ if (new_val > old_val) {
+ reg = rd32(hw, I40E_PRTRPB_DLW(i));
+ reg &= ~I40E_PRTRPB_DLW_DLW_TCN_MASK;
+ reg |= (new_val << I40E_PRTRPB_DLW_DLW_TCN_SHIFT) &
+ I40E_PRTRPB_DLW_DLW_TCN_MASK;
+ wr32(hw, I40E_PRTRPB_DLW(i), reg);
+ }
+ }
+
+ /* Program the shared pool high water mark per port if increasing */
+ old_val = old_pb_cfg->shared_pool_high_wm;
+ new_val = new_pb_cfg->shared_pool_high_wm;
+ if (new_val > old_val) {
+ reg = rd32(hw, I40E_PRTRPB_SHW);
+ reg &= ~I40E_PRTRPB_SHW_SHW_MASK;
+ reg |= (new_val << I40E_PRTRPB_SHW_SHW_SHIFT) &
+ I40E_PRTRPB_SHW_SHW_MASK;
+ wr32(hw, I40E_PRTRPB_SHW, reg);
+ }
+
+ /* Program the shared pool high threshold and tc pool
+ * high water mark per TC that are increasing.
+ */
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ old_val = old_pb_cfg->shared_pool_high_thresh[i];
+ new_val = new_pb_cfg->shared_pool_high_thresh[i];
+ if (new_val > old_val) {
+ reg = rd32(hw, I40E_PRTRPB_SHT(i));
+ reg &= ~I40E_PRTRPB_SHT_SHT_TCN_MASK;
+ reg |= (new_val << I40E_PRTRPB_SHT_SHT_TCN_SHIFT) &
+ I40E_PRTRPB_SHT_SHT_TCN_MASK;
+ wr32(hw, I40E_PRTRPB_SHT(i), reg);
+ }
+
+ old_val = old_pb_cfg->tc_pool_high_wm[i];
+ new_val = new_pb_cfg->tc_pool_high_wm[i];
+ if (new_val > old_val) {
+ reg = rd32(hw, I40E_PRTRPB_DHW(i));
+ reg &= ~I40E_PRTRPB_DHW_DHW_TCN_MASK;
+ reg |= (new_val << I40E_PRTRPB_DHW_DHW_TCN_SHIFT) &
+ I40E_PRTRPB_DHW_DHW_TCN_MASK;
+ wr32(hw, I40E_PRTRPB_DHW(i), reg);
+ }
+ }
+}
+
+/**
+ * _i40e_read_lldp_cfg - generic read of LLDP Configuration data from NVM
+ * @hw: pointer to the HW structure
+ * @lldp_cfg: pointer to hold lldp configuration variables
+ * @module: address of the module pointer
+ * @word_offset: offset of LLDP configuration
+ *
+ * Reads the LLDP configuration data from NVM using passed addresses
+ **/
+static int _i40e_read_lldp_cfg(struct i40e_hw *hw,
+ struct i40e_lldp_variables *lldp_cfg,
+ u8 module, u32 word_offset)
+{
+ u32 address, offset = (2 * word_offset);
+ __le16 raw_mem;
+ int ret;
+ u16 mem;
+
+ ret = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
+ if (ret)
+ return ret;
+
+ ret = i40e_aq_read_nvm(hw, 0x0, module * 2, sizeof(raw_mem), &raw_mem,
+ true, NULL);
+ i40e_release_nvm(hw);
+ if (ret)
+ return ret;
+
+ mem = le16_to_cpu(raw_mem);
+ /* Check if this pointer needs to be read in word size or 4K sector
+ * units.
+ */
+ if (mem & I40E_PTR_TYPE)
+ address = (0x7FFF & mem) * 4096;
+ else
+ address = (0x7FFF & mem) * 2;
+
+ ret = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
+ if (ret)
+ goto err_lldp_cfg;
+
+ ret = i40e_aq_read_nvm(hw, module, offset, sizeof(raw_mem), &raw_mem,
+ true, NULL);
+ i40e_release_nvm(hw);
+ if (ret)
+ return ret;
+
+ mem = le16_to_cpu(raw_mem);
+ offset = mem + word_offset;
+ offset *= 2;
+
+ ret = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
+ if (ret)
+ goto err_lldp_cfg;
+
+ ret = i40e_aq_read_nvm(hw, 0, address + offset,
+ sizeof(struct i40e_lldp_variables), lldp_cfg,
+ true, NULL);
+ i40e_release_nvm(hw);
+
+err_lldp_cfg:
+ return ret;
+}
+
+/**
+ * i40e_read_lldp_cfg - read LLDP Configuration data from NVM
+ * @hw: pointer to the HW structure
+ * @lldp_cfg: pointer to hold lldp configuration variables
+ *
+ * Reads the LLDP configuration data from NVM
+ **/
+int i40e_read_lldp_cfg(struct i40e_hw *hw,
+ struct i40e_lldp_variables *lldp_cfg)
+{
+ int ret = 0;
+ u32 mem;
+
+ if (!lldp_cfg)
+ return I40E_ERR_PARAM;
+
+ ret = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
+ if (ret)
+ return ret;
+
+ ret = i40e_aq_read_nvm(hw, I40E_SR_NVM_CONTROL_WORD, 0, sizeof(mem),
+ &mem, true, NULL);
+ i40e_release_nvm(hw);
+ if (ret)
+ return ret;
+
+ /* Read a bit that holds information whether we are running flat or
+ * structured NVM image. Flat image has LLDP configuration in shadow
+ * ram, so there is a need to pass different addresses for both cases.
+ */
+ if (mem & I40E_SR_NVM_MAP_STRUCTURE_TYPE) {
+ /* Flat NVM case */
+ ret = _i40e_read_lldp_cfg(hw, lldp_cfg, I40E_SR_EMP_MODULE_PTR,
+ I40E_SR_LLDP_CFG_PTR);
+ } else {
+ /* Good old structured NVM image */
+ ret = _i40e_read_lldp_cfg(hw, lldp_cfg, I40E_EMP_MODULE_PTR,
+ I40E_NVM_LLDP_CFG_PTR);
+ }
+
+ return ret;
+}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb.h b/drivers/net/ethernet/intel/i40e/i40e_dcb.h
new file mode 100644
index 000000000..6b60dc9b7
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_dcb.h
@@ -0,0 +1,283 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2013 - 2021 Intel Corporation. */
+
+#ifndef _I40E_DCB_H_
+#define _I40E_DCB_H_
+
+#include "i40e_type.h"
+
+#define I40E_DCBX_STATUS_NOT_STARTED 0
+#define I40E_DCBX_STATUS_IN_PROGRESS 1
+#define I40E_DCBX_STATUS_DONE 2
+#define I40E_DCBX_STATUS_MULTIPLE_PEERS 3
+#define I40E_DCBX_STATUS_DISABLED 7
+
+#define I40E_TLV_TYPE_END 0
+#define I40E_TLV_TYPE_ORG 127
+
+#define I40E_IEEE_8021QAZ_OUI 0x0080C2
+#define I40E_IEEE_SUBTYPE_ETS_CFG 9
+#define I40E_IEEE_SUBTYPE_ETS_REC 10
+#define I40E_IEEE_SUBTYPE_PFC_CFG 11
+#define I40E_IEEE_SUBTYPE_APP_PRI 12
+
+#define I40E_CEE_DCBX_OUI 0x001b21
+#define I40E_CEE_DCBX_TYPE 2
+
+#define I40E_CEE_SUBTYPE_CTRL 1
+#define I40E_CEE_SUBTYPE_PG_CFG 2
+#define I40E_CEE_SUBTYPE_PFC_CFG 3
+#define I40E_CEE_SUBTYPE_APP_PRI 4
+
+#define I40E_CEE_MAX_FEAT_TYPE 3
+#define I40E_LLDP_CURRENT_STATUS_XL710_OFFSET 0x2B
+#define I40E_LLDP_CURRENT_STATUS_X722_OFFSET 0x31
+#define I40E_LLDP_CURRENT_STATUS_OFFSET 1
+#define I40E_LLDP_CURRENT_STATUS_SIZE 1
+
+/* Defines for LLDP TLV header */
+#define I40E_LLDP_TLV_LEN_SHIFT 0
+#define I40E_LLDP_TLV_LEN_MASK (0x01FF << I40E_LLDP_TLV_LEN_SHIFT)
+#define I40E_LLDP_TLV_TYPE_SHIFT 9
+#define I40E_LLDP_TLV_TYPE_MASK (0x7F << I40E_LLDP_TLV_TYPE_SHIFT)
+#define I40E_LLDP_TLV_SUBTYPE_SHIFT 0
+#define I40E_LLDP_TLV_SUBTYPE_MASK (0xFF << I40E_LLDP_TLV_SUBTYPE_SHIFT)
+#define I40E_LLDP_TLV_OUI_SHIFT 8
+#define I40E_LLDP_TLV_OUI_MASK (0xFFFFFF << I40E_LLDP_TLV_OUI_SHIFT)
+
+/* Defines for IEEE ETS TLV */
+#define I40E_IEEE_ETS_MAXTC_SHIFT 0
+#define I40E_IEEE_ETS_MAXTC_MASK (0x7 << I40E_IEEE_ETS_MAXTC_SHIFT)
+#define I40E_IEEE_ETS_CBS_SHIFT 6
+#define I40E_IEEE_ETS_CBS_MASK BIT(I40E_IEEE_ETS_CBS_SHIFT)
+#define I40E_IEEE_ETS_WILLING_SHIFT 7
+#define I40E_IEEE_ETS_WILLING_MASK BIT(I40E_IEEE_ETS_WILLING_SHIFT)
+#define I40E_IEEE_ETS_PRIO_0_SHIFT 0
+#define I40E_IEEE_ETS_PRIO_0_MASK (0x7 << I40E_IEEE_ETS_PRIO_0_SHIFT)
+#define I40E_IEEE_ETS_PRIO_1_SHIFT 4
+#define I40E_IEEE_ETS_PRIO_1_MASK (0x7 << I40E_IEEE_ETS_PRIO_1_SHIFT)
+#define I40E_CEE_PGID_PRIO_0_SHIFT 0
+#define I40E_CEE_PGID_PRIO_0_MASK (0xF << I40E_CEE_PGID_PRIO_0_SHIFT)
+#define I40E_CEE_PGID_PRIO_1_SHIFT 4
+#define I40E_CEE_PGID_PRIO_1_MASK (0xF << I40E_CEE_PGID_PRIO_1_SHIFT)
+#define I40E_CEE_PGID_STRICT 15
+
+/* Defines for IEEE TSA types */
+#define I40E_IEEE_TSA_STRICT 0
+#define I40E_IEEE_TSA_ETS 2
+
+/* Defines for IEEE PFC TLV */
+#define I40E_DCB_PFC_ENABLED 2
+#define I40E_DCB_PFC_FORCED_NUM_TC 2
+#define I40E_IEEE_PFC_CAP_SHIFT 0
+#define I40E_IEEE_PFC_CAP_MASK (0xF << I40E_IEEE_PFC_CAP_SHIFT)
+#define I40E_IEEE_PFC_MBC_SHIFT 6
+#define I40E_IEEE_PFC_MBC_MASK BIT(I40E_IEEE_PFC_MBC_SHIFT)
+#define I40E_IEEE_PFC_WILLING_SHIFT 7
+#define I40E_IEEE_PFC_WILLING_MASK BIT(I40E_IEEE_PFC_WILLING_SHIFT)
+
+/* Defines for IEEE APP TLV */
+#define I40E_IEEE_APP_SEL_SHIFT 0
+#define I40E_IEEE_APP_SEL_MASK (0x7 << I40E_IEEE_APP_SEL_SHIFT)
+#define I40E_IEEE_APP_PRIO_SHIFT 5
+#define I40E_IEEE_APP_PRIO_MASK (0x7 << I40E_IEEE_APP_PRIO_SHIFT)
+
+/* TLV definitions for preparing MIB */
+#define I40E_TLV_ID_CHASSIS_ID 0
+#define I40E_TLV_ID_PORT_ID 1
+#define I40E_TLV_ID_TIME_TO_LIVE 2
+#define I40E_IEEE_TLV_ID_ETS_CFG 3
+#define I40E_IEEE_TLV_ID_ETS_REC 4
+#define I40E_IEEE_TLV_ID_PFC_CFG 5
+#define I40E_IEEE_TLV_ID_APP_PRI 6
+#define I40E_TLV_ID_END_OF_LLDPPDU 7
+#define I40E_TLV_ID_START I40E_IEEE_TLV_ID_ETS_CFG
+
+#define I40E_IEEE_TLV_HEADER_LENGTH 2
+#define I40E_IEEE_ETS_TLV_LENGTH 25
+#define I40E_IEEE_PFC_TLV_LENGTH 6
+#define I40E_IEEE_APP_TLV_LENGTH 11
+
+/* Defines for default SW DCB config */
+#define I40E_IEEE_DEFAULT_ETS_TCBW 100
+#define I40E_IEEE_DEFAULT_ETS_WILLING 1
+#define I40E_IEEE_DEFAULT_PFC_WILLING 1
+#define I40E_IEEE_DEFAULT_NUM_APPS 1
+#define I40E_IEEE_DEFAULT_APP_PRIO 3
+
+#pragma pack(1)
+/* IEEE 802.1AB LLDP Organization specific TLV */
+struct i40e_lldp_org_tlv {
+ __be16 typelength;
+ __be32 ouisubtype;
+ u8 tlvinfo[1];
+};
+
+struct i40e_cee_tlv_hdr {
+ __be16 typelen;
+ u8 operver;
+ u8 maxver;
+};
+
+struct i40e_cee_ctrl_tlv {
+ struct i40e_cee_tlv_hdr hdr;
+ __be32 seqno;
+ __be32 ackno;
+};
+
+struct i40e_cee_feat_tlv {
+ struct i40e_cee_tlv_hdr hdr;
+ u8 en_will_err; /* Bits: |En|Will|Err|Reserved(5)| */
+#define I40E_CEE_FEAT_TLV_ENABLE_MASK 0x80
+#define I40E_CEE_FEAT_TLV_WILLING_MASK 0x40
+#define I40E_CEE_FEAT_TLV_ERR_MASK 0x20
+ u8 subtype;
+ u8 tlvinfo[1];
+};
+
+struct i40e_cee_app_prio {
+ __be16 protocol;
+ u8 upper_oui_sel; /* Bits: |Upper OUI(6)|Selector(2)| */
+#define I40E_CEE_APP_SELECTOR_MASK 0x03
+ __be16 lower_oui;
+ u8 prio_map;
+};
+#pragma pack()
+
+enum i40e_get_fw_lldp_status_resp {
+ I40E_GET_FW_LLDP_STATUS_DISABLED = 0,
+ I40E_GET_FW_LLDP_STATUS_ENABLED = 1
+};
+
+/* Data structures to pass for SW DCBX */
+struct i40e_rx_pb_config {
+ u32 shared_pool_size;
+ u32 shared_pool_high_wm;
+ u32 shared_pool_low_wm;
+ u32 shared_pool_high_thresh[I40E_MAX_TRAFFIC_CLASS];
+ u32 shared_pool_low_thresh[I40E_MAX_TRAFFIC_CLASS];
+ u32 tc_pool_size[I40E_MAX_TRAFFIC_CLASS];
+ u32 tc_pool_high_wm[I40E_MAX_TRAFFIC_CLASS];
+ u32 tc_pool_low_wm[I40E_MAX_TRAFFIC_CLASS];
+};
+
+enum i40e_dcb_arbiter_mode {
+ I40E_DCB_ARB_MODE_STRICT_PRIORITY = 0,
+ I40E_DCB_ARB_MODE_ROUND_ROBIN = 1
+};
+
+#define I40E_DCB_DEFAULT_MAX_EXPONENT 0xB
+#define I40E_DEFAULT_PAUSE_TIME 0xffff
+#define I40E_MAX_FRAME_SIZE 4608 /* 4.5 KB */
+
+#define I40E_DEVICE_RPB_SIZE 968000 /* 968 KB */
+
+/* BitTimes (BT) conversion */
+#define I40E_BT2KB(BT) (((BT) + (8 * 1024 - 1)) / (8 * 1024))
+#define I40E_B2BT(BT) ((BT) * 8)
+#define I40E_BT2B(BT) (((BT) + (8 - 1)) / 8)
+
+/* Max Frame(TC) = MFS(max) + MFS(TC) */
+#define I40E_MAX_FRAME_TC(mfs_max, mfs_tc) I40E_B2BT((mfs_max) + (mfs_tc))
+
+/* EEE Tx LPI Exit time in Bit Times */
+#define I40E_EEE_TX_LPI_EXIT_TIME 142500
+
+/* PCI Round Trip Time in Bit Times */
+#define I40E_PCIRTT_LINK_SPEED_10G 20000
+#define I40E_PCIRTT_BYTE_LINK_SPEED_20G 40000
+#define I40E_PCIRTT_BYTE_LINK_SPEED_40G 80000
+
+/* PFC Frame Delay Bit Times */
+#define I40E_PFC_FRAME_DELAY 672
+
+/* Worst case Cable (10GBase-T) Delay Bit Times */
+#define I40E_CABLE_DELAY 5556
+
+/* Higher Layer Delay @10G Bit Times */
+#define I40E_HIGHER_LAYER_DELAY_10G 6144
+
+/* Interface Delays in Bit Times */
+/* TODO: Add for other link speeds 20G/40G/etc. */
+#define I40E_INTERFACE_DELAY_10G_MAC_CONTROL 8192
+#define I40E_INTERFACE_DELAY_10G_MAC 8192
+#define I40E_INTERFACE_DELAY_10G_RS 8192
+
+#define I40E_INTERFACE_DELAY_XGXS 2048
+#define I40E_INTERFACE_DELAY_XAUI 2048
+
+#define I40E_INTERFACE_DELAY_10G_BASEX_PCS 2048
+#define I40E_INTERFACE_DELAY_10G_BASER_PCS 3584
+#define I40E_INTERFACE_DELAY_LX4_PMD 512
+#define I40E_INTERFACE_DELAY_CX4_PMD 512
+#define I40E_INTERFACE_DELAY_SERIAL_PMA 512
+#define I40E_INTERFACE_DELAY_PMD 512
+
+#define I40E_INTERFACE_DELAY_10G_BASET 25600
+
+/* Hardware RX DCB config related defines */
+#define I40E_DCB_1_PORT_THRESHOLD 0xF
+#define I40E_DCB_1_PORT_FIFO_SIZE 0x10
+#define I40E_DCB_2_PORT_THRESHOLD_LOW_NUM_TC 0xF
+#define I40E_DCB_2_PORT_FIFO_SIZE_LOW_NUM_TC 0x10
+#define I40E_DCB_2_PORT_THRESHOLD_HIGH_NUM_TC 0xC
+#define I40E_DCB_2_PORT_FIFO_SIZE_HIGH_NUM_TC 0x8
+#define I40E_DCB_4_PORT_THRESHOLD_LOW_NUM_TC 0x9
+#define I40E_DCB_4_PORT_FIFO_SIZE_LOW_NUM_TC 0x8
+#define I40E_DCB_4_PORT_THRESHOLD_HIGH_NUM_TC 0x6
+#define I40E_DCB_4_PORT_FIFO_SIZE_HIGH_NUM_TC 0x4
+#define I40E_DCB_WATERMARK_START_FACTOR 0x2
+
+/* delay values for with 10G BaseT in Bit Times */
+#define I40E_INTERFACE_DELAY_10G_COPPER \
+ (I40E_INTERFACE_DELAY_10G_MAC + (2 * I40E_INTERFACE_DELAY_XAUI) \
+ + I40E_INTERFACE_DELAY_10G_BASET)
+#define I40E_DV_TC(mfs_max, mfs_tc) \
+ ((2 * I40E_MAX_FRAME_TC(mfs_max, mfs_tc)) \
+ + I40E_PFC_FRAME_DELAY \
+ + (2 * I40E_CABLE_DELAY) \
+ + (2 * I40E_INTERFACE_DELAY_10G_COPPER) \
+ + I40E_HIGHER_LAYER_DELAY_10G)
+static inline u32 I40E_STD_DV_TC(u32 mfs_max, u32 mfs_tc)
+{
+ return I40E_DV_TC(mfs_max, mfs_tc) + I40E_B2BT(mfs_max);
+}
+
+/* APIs for SW DCBX */
+void i40e_dcb_hw_rx_fifo_config(struct i40e_hw *hw,
+ enum i40e_dcb_arbiter_mode ets_mode,
+ enum i40e_dcb_arbiter_mode non_ets_mode,
+ u32 max_exponent, u8 lltc_map);
+void i40e_dcb_hw_rx_cmd_monitor_config(struct i40e_hw *hw,
+ u8 num_tc, u8 num_ports);
+void i40e_dcb_hw_pfc_config(struct i40e_hw *hw,
+ u8 pfc_en, u8 *prio_tc);
+void i40e_dcb_hw_set_num_tc(struct i40e_hw *hw, u8 num_tc);
+u8 i40e_dcb_hw_get_num_tc(struct i40e_hw *hw);
+void i40e_dcb_hw_rx_ets_bw_config(struct i40e_hw *hw, u8 *bw_share,
+ u8 *mode, u8 *prio_type);
+void i40e_dcb_hw_rx_up2tc_config(struct i40e_hw *hw, u8 *prio_tc);
+void i40e_dcb_hw_calculate_pool_sizes(struct i40e_hw *hw,
+ u8 num_ports, bool eee_enabled,
+ u8 pfc_en, u32 *mfs_tc,
+ struct i40e_rx_pb_config *pb_cfg);
+void i40e_dcb_hw_rx_pb_config(struct i40e_hw *hw,
+ struct i40e_rx_pb_config *old_pb_cfg,
+ struct i40e_rx_pb_config *new_pb_cfg);
+int i40e_get_dcbx_status(struct i40e_hw *hw,
+ u16 *status);
+int i40e_lldp_to_dcb_config(u8 *lldpmib,
+ struct i40e_dcbx_config *dcbcfg);
+int i40e_aq_get_dcb_config(struct i40e_hw *hw, u8 mib_type,
+ u8 bridgetype,
+ struct i40e_dcbx_config *dcbcfg);
+int i40e_get_dcb_config(struct i40e_hw *hw);
+int i40e_init_dcb(struct i40e_hw *hw,
+ bool enable_mib_change);
+int
+i40e_get_fw_lldp_status(struct i40e_hw *hw,
+ enum i40e_get_fw_lldp_status_resp *lldp_status);
+int i40e_set_dcb_config(struct i40e_hw *hw);
+int i40e_dcb_config_to_lldp(u8 *lldpmib, u16 *miblen,
+ struct i40e_dcbx_config *dcbcfg);
+#endif /* _I40E_DCB_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c b/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c
new file mode 100644
index 000000000..195421d86
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c
@@ -0,0 +1,1036 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2013 - 2021 Intel Corporation. */
+
+#ifdef CONFIG_I40E_DCB
+#include "i40e.h"
+#include <net/dcbnl.h>
+
+#define I40E_DCBNL_STATUS_SUCCESS 0
+#define I40E_DCBNL_STATUS_ERROR 1
+static bool i40e_dcbnl_find_app(struct i40e_dcbx_config *cfg,
+ struct i40e_dcb_app_priority_table *app);
+/**
+ * i40e_get_pfc_delay - retrieve PFC Link Delay
+ * @hw: pointer to hardware struct
+ * @delay: holds the PFC Link delay value
+ *
+ * Returns PFC Link Delay from the PRTDCB_GENC.PFCLDA
+ **/
+static void i40e_get_pfc_delay(struct i40e_hw *hw, u16 *delay)
+{
+ u32 val;
+
+ val = rd32(hw, I40E_PRTDCB_GENC);
+ *delay = (u16)((val & I40E_PRTDCB_GENC_PFCLDA_MASK) >>
+ I40E_PRTDCB_GENC_PFCLDA_SHIFT);
+}
+
+/**
+ * i40e_dcbnl_ieee_getets - retrieve local IEEE ETS configuration
+ * @dev: the corresponding netdev
+ * @ets: structure to hold the ETS information
+ *
+ * Returns local IEEE ETS configuration
+ **/
+static int i40e_dcbnl_ieee_getets(struct net_device *dev,
+ struct ieee_ets *ets)
+{
+ struct i40e_pf *pf = i40e_netdev_to_pf(dev);
+ struct i40e_dcbx_config *dcbxcfg;
+
+ if (!(pf->dcbx_cap & DCB_CAP_DCBX_VER_IEEE))
+ return -EINVAL;
+
+ dcbxcfg = &pf->hw.local_dcbx_config;
+ ets->willing = dcbxcfg->etscfg.willing;
+ ets->ets_cap = I40E_MAX_TRAFFIC_CLASS;
+ ets->cbs = dcbxcfg->etscfg.cbs;
+ memcpy(ets->tc_tx_bw, dcbxcfg->etscfg.tcbwtable,
+ sizeof(ets->tc_tx_bw));
+ memcpy(ets->tc_rx_bw, dcbxcfg->etscfg.tcbwtable,
+ sizeof(ets->tc_rx_bw));
+ memcpy(ets->tc_tsa, dcbxcfg->etscfg.tsatable,
+ sizeof(ets->tc_tsa));
+ memcpy(ets->prio_tc, dcbxcfg->etscfg.prioritytable,
+ sizeof(ets->prio_tc));
+ memcpy(ets->tc_reco_bw, dcbxcfg->etsrec.tcbwtable,
+ sizeof(ets->tc_reco_bw));
+ memcpy(ets->tc_reco_tsa, dcbxcfg->etsrec.tsatable,
+ sizeof(ets->tc_reco_tsa));
+ memcpy(ets->reco_prio_tc, dcbxcfg->etscfg.prioritytable,
+ sizeof(ets->reco_prio_tc));
+
+ return 0;
+}
+
+/**
+ * i40e_dcbnl_ieee_getpfc - retrieve local IEEE PFC configuration
+ * @dev: the corresponding netdev
+ * @pfc: structure to hold the PFC information
+ *
+ * Returns local IEEE PFC configuration
+ **/
+static int i40e_dcbnl_ieee_getpfc(struct net_device *dev,
+ struct ieee_pfc *pfc)
+{
+ struct i40e_pf *pf = i40e_netdev_to_pf(dev);
+ struct i40e_dcbx_config *dcbxcfg;
+ struct i40e_hw *hw = &pf->hw;
+ int i;
+
+ if (!(pf->dcbx_cap & DCB_CAP_DCBX_VER_IEEE))
+ return -EINVAL;
+
+ dcbxcfg = &hw->local_dcbx_config;
+ pfc->pfc_cap = dcbxcfg->pfc.pfccap;
+ pfc->pfc_en = dcbxcfg->pfc.pfcenable;
+ pfc->mbc = dcbxcfg->pfc.mbc;
+ i40e_get_pfc_delay(hw, &pfc->delay);
+
+ /* Get Requests/Indications */
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ pfc->requests[i] = pf->stats.priority_xoff_tx[i];
+ pfc->indications[i] = pf->stats.priority_xoff_rx[i];
+ }
+
+ return 0;
+}
+
+/**
+ * i40e_dcbnl_ieee_setets - set IEEE ETS configuration
+ * @netdev: the corresponding netdev
+ * @ets: structure to hold the ETS information
+ *
+ * Set IEEE ETS configuration
+ **/
+static int i40e_dcbnl_ieee_setets(struct net_device *netdev,
+ struct ieee_ets *ets)
+{
+ struct i40e_pf *pf = i40e_netdev_to_pf(netdev);
+ struct i40e_dcbx_config *old_cfg;
+ int i, ret;
+
+ if (!(pf->dcbx_cap & DCB_CAP_DCBX_VER_IEEE) ||
+ (pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED))
+ return -EINVAL;
+
+ old_cfg = &pf->hw.local_dcbx_config;
+ /* Copy current config into temp */
+ pf->tmp_cfg = *old_cfg;
+
+ /* Update the ETS configuration for temp */
+ pf->tmp_cfg.etscfg.willing = ets->willing;
+ pf->tmp_cfg.etscfg.maxtcs = I40E_MAX_TRAFFIC_CLASS;
+ pf->tmp_cfg.etscfg.cbs = ets->cbs;
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ pf->tmp_cfg.etscfg.tcbwtable[i] = ets->tc_tx_bw[i];
+ pf->tmp_cfg.etscfg.tsatable[i] = ets->tc_tsa[i];
+ pf->tmp_cfg.etscfg.prioritytable[i] = ets->prio_tc[i];
+ pf->tmp_cfg.etsrec.tcbwtable[i] = ets->tc_reco_bw[i];
+ pf->tmp_cfg.etsrec.tsatable[i] = ets->tc_reco_tsa[i];
+ pf->tmp_cfg.etsrec.prioritytable[i] = ets->reco_prio_tc[i];
+ }
+
+ /* Commit changes to HW */
+ ret = i40e_hw_dcb_config(pf, &pf->tmp_cfg);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "Failed setting DCB ETS configuration err %pe aq_err %s\n",
+ ERR_PTR(ret),
+ i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * i40e_dcbnl_ieee_setpfc - set local IEEE PFC configuration
+ * @netdev: the corresponding netdev
+ * @pfc: structure to hold the PFC information
+ *
+ * Sets local IEEE PFC configuration
+ **/
+static int i40e_dcbnl_ieee_setpfc(struct net_device *netdev,
+ struct ieee_pfc *pfc)
+{
+ struct i40e_pf *pf = i40e_netdev_to_pf(netdev);
+ struct i40e_dcbx_config *old_cfg;
+ int ret;
+
+ if (!(pf->dcbx_cap & DCB_CAP_DCBX_VER_IEEE) ||
+ (pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED))
+ return -EINVAL;
+
+ old_cfg = &pf->hw.local_dcbx_config;
+ /* Copy current config into temp */
+ pf->tmp_cfg = *old_cfg;
+ if (pfc->pfc_cap)
+ pf->tmp_cfg.pfc.pfccap = pfc->pfc_cap;
+ else
+ pf->tmp_cfg.pfc.pfccap = I40E_MAX_TRAFFIC_CLASS;
+ pf->tmp_cfg.pfc.pfcenable = pfc->pfc_en;
+
+ ret = i40e_hw_dcb_config(pf, &pf->tmp_cfg);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "Failed setting DCB PFC configuration err %pe aq_err %s\n",
+ ERR_PTR(ret),
+ i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * i40e_dcbnl_ieee_setapp - set local IEEE App configuration
+ * @netdev: the corresponding netdev
+ * @app: structure to hold the Application information
+ *
+ * Sets local IEEE App configuration
+ **/
+static int i40e_dcbnl_ieee_setapp(struct net_device *netdev,
+ struct dcb_app *app)
+{
+ struct i40e_pf *pf = i40e_netdev_to_pf(netdev);
+ struct i40e_dcb_app_priority_table new_app;
+ struct i40e_dcbx_config *old_cfg;
+ int ret;
+
+ if (!(pf->dcbx_cap & DCB_CAP_DCBX_VER_IEEE) ||
+ (pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED))
+ return -EINVAL;
+
+ old_cfg = &pf->hw.local_dcbx_config;
+ if (old_cfg->numapps == I40E_DCBX_MAX_APPS)
+ return -EINVAL;
+
+ ret = dcb_ieee_setapp(netdev, app);
+ if (ret)
+ return ret;
+
+ new_app.selector = app->selector;
+ new_app.protocolid = app->protocol;
+ new_app.priority = app->priority;
+ /* Already internally available */
+ if (i40e_dcbnl_find_app(old_cfg, &new_app))
+ return 0;
+
+ /* Copy current config into temp */
+ pf->tmp_cfg = *old_cfg;
+ /* Add the app */
+ pf->tmp_cfg.app[pf->tmp_cfg.numapps++] = new_app;
+
+ ret = i40e_hw_dcb_config(pf, &pf->tmp_cfg);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "Failed setting DCB configuration err %pe aq_err %s\n",
+ ERR_PTR(ret),
+ i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * i40e_dcbnl_ieee_delapp - delete local IEEE App configuration
+ * @netdev: the corresponding netdev
+ * @app: structure to hold the Application information
+ *
+ * Deletes local IEEE App configuration other than the first application
+ * required by firmware
+ **/
+static int i40e_dcbnl_ieee_delapp(struct net_device *netdev,
+ struct dcb_app *app)
+{
+ struct i40e_pf *pf = i40e_netdev_to_pf(netdev);
+ struct i40e_dcbx_config *old_cfg;
+ int i, j, ret;
+
+ if (!(pf->dcbx_cap & DCB_CAP_DCBX_VER_IEEE) ||
+ (pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED))
+ return -EINVAL;
+
+ ret = dcb_ieee_delapp(netdev, app);
+ if (ret)
+ return ret;
+
+ old_cfg = &pf->hw.local_dcbx_config;
+ /* Need one app for FW so keep it */
+ if (old_cfg->numapps == 1)
+ return 0;
+
+ /* Copy current config into temp */
+ pf->tmp_cfg = *old_cfg;
+
+ /* Find and reset the app */
+ for (i = 1; i < pf->tmp_cfg.numapps; i++) {
+ if (app->selector == pf->tmp_cfg.app[i].selector &&
+ app->protocol == pf->tmp_cfg.app[i].protocolid &&
+ app->priority == pf->tmp_cfg.app[i].priority) {
+ /* Reset the app data */
+ pf->tmp_cfg.app[i].selector = 0;
+ pf->tmp_cfg.app[i].protocolid = 0;
+ pf->tmp_cfg.app[i].priority = 0;
+ break;
+ }
+ }
+
+ /* If the specific DCB app not found */
+ if (i == pf->tmp_cfg.numapps)
+ return -EINVAL;
+
+ pf->tmp_cfg.numapps--;
+ /* Overwrite the tmp_cfg app */
+ for (j = i; j < pf->tmp_cfg.numapps; j++)
+ pf->tmp_cfg.app[j] = old_cfg->app[j + 1];
+
+ ret = i40e_hw_dcb_config(pf, &pf->tmp_cfg);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "Failed setting DCB configuration err %pe aq_err %s\n",
+ ERR_PTR(ret),
+ i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * i40e_dcbnl_getstate - Get DCB enabled state
+ * @netdev: the corresponding netdev
+ *
+ * Get the current DCB enabled state
+ **/
+static u8 i40e_dcbnl_getstate(struct net_device *netdev)
+{
+ struct i40e_pf *pf = i40e_netdev_to_pf(netdev);
+
+ dev_dbg(&pf->pdev->dev, "DCB state=%d\n",
+ !!(pf->flags & I40E_FLAG_DCB_ENABLED));
+ return !!(pf->flags & I40E_FLAG_DCB_ENABLED);
+}
+
+/**
+ * i40e_dcbnl_setstate - Set DCB state
+ * @netdev: the corresponding netdev
+ * @state: enable or disable
+ *
+ * Set the DCB state
+ **/
+static u8 i40e_dcbnl_setstate(struct net_device *netdev, u8 state)
+{
+ struct i40e_pf *pf = i40e_netdev_to_pf(netdev);
+ int ret = I40E_DCBNL_STATUS_SUCCESS;
+
+ if (!(pf->dcbx_cap & DCB_CAP_DCBX_VER_CEE) ||
+ (pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED))
+ return ret;
+
+ dev_dbg(&pf->pdev->dev, "new state=%d current state=%d\n",
+ state, (pf->flags & I40E_FLAG_DCB_ENABLED) ? 1 : 0);
+ /* Nothing to do */
+ if (!state == !(pf->flags & I40E_FLAG_DCB_ENABLED))
+ return ret;
+
+ if (i40e_is_sw_dcb(pf)) {
+ if (state) {
+ pf->flags |= I40E_FLAG_DCB_ENABLED;
+ memcpy(&pf->hw.desired_dcbx_config,
+ &pf->hw.local_dcbx_config,
+ sizeof(struct i40e_dcbx_config));
+ } else {
+ pf->flags &= ~I40E_FLAG_DCB_ENABLED;
+ }
+ } else {
+ /* Cannot directly manipulate FW LLDP Agent */
+ ret = I40E_DCBNL_STATUS_ERROR;
+ }
+ return ret;
+}
+
+/**
+ * i40e_dcbnl_set_pg_tc_cfg_tx - Set CEE PG Tx config
+ * @netdev: the corresponding netdev
+ * @tc: the corresponding traffic class
+ * @prio_type: the traffic priority type
+ * @bwg_id: the BW group id the traffic class belongs to
+ * @bw_pct: the BW percentage for the corresponding BWG
+ * @up_map: prio mapped to corresponding tc
+ *
+ * Set Tx PG settings for CEE mode
+ **/
+static void i40e_dcbnl_set_pg_tc_cfg_tx(struct net_device *netdev, int tc,
+ u8 prio_type, u8 bwg_id, u8 bw_pct,
+ u8 up_map)
+{
+ struct i40e_pf *pf = i40e_netdev_to_pf(netdev);
+ int i;
+
+ if (!(pf->dcbx_cap & DCB_CAP_DCBX_VER_CEE) ||
+ (pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED))
+ return;
+
+ /* LLTC not supported yet */
+ if (tc >= I40E_MAX_TRAFFIC_CLASS)
+ return;
+
+ /* prio_type, bwg_id and bw_pct per UP are not supported */
+
+ /* Use only up_map to map tc */
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ if (up_map & BIT(i))
+ pf->tmp_cfg.etscfg.prioritytable[i] = tc;
+ }
+ pf->tmp_cfg.etscfg.tsatable[tc] = I40E_IEEE_TSA_ETS;
+ dev_dbg(&pf->pdev->dev,
+ "Set PG config tc=%d bwg_id=%d prio_type=%d bw_pct=%d up_map=%d\n",
+ tc, bwg_id, prio_type, bw_pct, up_map);
+}
+
+/**
+ * i40e_dcbnl_set_pg_bwg_cfg_tx - Set CEE PG Tx BW config
+ * @netdev: the corresponding netdev
+ * @pgid: the corresponding traffic class
+ * @bw_pct: the BW percentage for the specified traffic class
+ *
+ * Set Tx BW settings for CEE mode
+ **/
+static void i40e_dcbnl_set_pg_bwg_cfg_tx(struct net_device *netdev, int pgid,
+ u8 bw_pct)
+{
+ struct i40e_pf *pf = i40e_netdev_to_pf(netdev);
+
+ if (!(pf->dcbx_cap & DCB_CAP_DCBX_VER_CEE) ||
+ (pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED))
+ return;
+
+ /* LLTC not supported yet */
+ if (pgid >= I40E_MAX_TRAFFIC_CLASS)
+ return;
+
+ pf->tmp_cfg.etscfg.tcbwtable[pgid] = bw_pct;
+ dev_dbg(&pf->pdev->dev, "Set PG BW config tc=%d bw_pct=%d\n",
+ pgid, bw_pct);
+}
+
+/**
+ * i40e_dcbnl_set_pg_tc_cfg_rx - Set CEE PG Rx config
+ * @netdev: the corresponding netdev
+ * @prio: the corresponding traffic class
+ * @prio_type: the traffic priority type
+ * @pgid: the BW group id the traffic class belongs to
+ * @bw_pct: the BW percentage for the corresponding BWG
+ * @up_map: prio mapped to corresponding tc
+ *
+ * Set Rx BW settings for CEE mode. The hardware does not support this
+ * so we won't allow setting of this parameter.
+ **/
+static void i40e_dcbnl_set_pg_tc_cfg_rx(struct net_device *netdev,
+ int __always_unused prio,
+ u8 __always_unused prio_type,
+ u8 __always_unused pgid,
+ u8 __always_unused bw_pct,
+ u8 __always_unused up_map)
+{
+ struct i40e_pf *pf = i40e_netdev_to_pf(netdev);
+
+ dev_dbg(&pf->pdev->dev, "Rx TC PG Config Not Supported.\n");
+}
+
+/**
+ * i40e_dcbnl_set_pg_bwg_cfg_rx - Set CEE PG Rx config
+ * @netdev: the corresponding netdev
+ * @pgid: the corresponding traffic class
+ * @bw_pct: the BW percentage for the specified traffic class
+ *
+ * Set Rx BW settings for CEE mode. The hardware does not support this
+ * so we won't allow setting of this parameter.
+ **/
+static void i40e_dcbnl_set_pg_bwg_cfg_rx(struct net_device *netdev, int pgid,
+ u8 bw_pct)
+{
+ struct i40e_pf *pf = i40e_netdev_to_pf(netdev);
+
+ dev_dbg(&pf->pdev->dev, "Rx BWG PG Config Not Supported.\n");
+}
+
+/**
+ * i40e_dcbnl_get_pg_tc_cfg_tx - Get CEE PG Tx config
+ * @netdev: the corresponding netdev
+ * @prio: the corresponding user priority
+ * @prio_type: traffic priority type
+ * @pgid: the BW group ID the traffic class belongs to
+ * @bw_pct: BW percentage for the corresponding BWG
+ * @up_map: prio mapped to corresponding TC
+ *
+ * Get Tx PG settings for CEE mode
+ **/
+static void i40e_dcbnl_get_pg_tc_cfg_tx(struct net_device *netdev, int prio,
+ u8 __always_unused *prio_type,
+ u8 *pgid,
+ u8 __always_unused *bw_pct,
+ u8 __always_unused *up_map)
+{
+ struct i40e_pf *pf = i40e_netdev_to_pf(netdev);
+
+ if (!(pf->dcbx_cap & DCB_CAP_DCBX_VER_CEE) ||
+ (pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED))
+ return;
+
+ if (prio >= I40E_MAX_USER_PRIORITY)
+ return;
+
+ *pgid = pf->hw.local_dcbx_config.etscfg.prioritytable[prio];
+ dev_dbg(&pf->pdev->dev, "Get PG config prio=%d tc=%d\n",
+ prio, *pgid);
+}
+
+/**
+ * i40e_dcbnl_get_pg_bwg_cfg_tx - Get CEE PG BW config
+ * @netdev: the corresponding netdev
+ * @pgid: the corresponding traffic class
+ * @bw_pct: the BW percentage for the corresponding TC
+ *
+ * Get Tx BW settings for given TC in CEE mode
+ **/
+static void i40e_dcbnl_get_pg_bwg_cfg_tx(struct net_device *netdev, int pgid,
+ u8 *bw_pct)
+{
+ struct i40e_pf *pf = i40e_netdev_to_pf(netdev);
+
+ if (!(pf->dcbx_cap & DCB_CAP_DCBX_VER_CEE) ||
+ (pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED))
+ return;
+
+ if (pgid >= I40E_MAX_TRAFFIC_CLASS)
+ return;
+
+ *bw_pct = pf->hw.local_dcbx_config.etscfg.tcbwtable[pgid];
+ dev_dbg(&pf->pdev->dev, "Get PG BW config tc=%d bw_pct=%d\n",
+ pgid, *bw_pct);
+}
+
+/**
+ * i40e_dcbnl_get_pg_tc_cfg_rx - Get CEE PG Rx config
+ * @netdev: the corresponding netdev
+ * @prio: the corresponding user priority
+ * @prio_type: the traffic priority type
+ * @pgid: the PG ID
+ * @bw_pct: the BW percentage for the corresponding BWG
+ * @up_map: prio mapped to corresponding TC
+ *
+ * Get Rx PG settings for CEE mode. The UP2TC map is applied in same
+ * manner for Tx and Rx (symmetrical) so return the TC information for
+ * given priority accordingly.
+ **/
+static void i40e_dcbnl_get_pg_tc_cfg_rx(struct net_device *netdev, int prio,
+ u8 *prio_type, u8 *pgid, u8 *bw_pct,
+ u8 *up_map)
+{
+ struct i40e_pf *pf = i40e_netdev_to_pf(netdev);
+
+ if (!(pf->dcbx_cap & DCB_CAP_DCBX_VER_CEE) ||
+ (pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED))
+ return;
+
+ if (prio >= I40E_MAX_USER_PRIORITY)
+ return;
+
+ *pgid = pf->hw.local_dcbx_config.etscfg.prioritytable[prio];
+}
+
+/**
+ * i40e_dcbnl_get_pg_bwg_cfg_rx - Get CEE PG BW Rx config
+ * @netdev: the corresponding netdev
+ * @pgid: the corresponding traffic class
+ * @bw_pct: the BW percentage for the corresponding TC
+ *
+ * Get Rx BW settings for given TC in CEE mode
+ * The adapter doesn't support Rx ETS and runs in strict priority
+ * mode in Rx path and hence just return 0.
+ **/
+static void i40e_dcbnl_get_pg_bwg_cfg_rx(struct net_device *netdev, int pgid,
+ u8 *bw_pct)
+{
+ struct i40e_pf *pf = i40e_netdev_to_pf(netdev);
+
+ if (!(pf->dcbx_cap & DCB_CAP_DCBX_VER_CEE) ||
+ (pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED))
+ return;
+ *bw_pct = 0;
+}
+
+/**
+ * i40e_dcbnl_set_pfc_cfg - Set CEE PFC configuration
+ * @netdev: the corresponding netdev
+ * @prio: the corresponding user priority
+ * @setting: the PFC setting for given priority
+ *
+ * Set the PFC enabled/disabled setting for given user priority
+ **/
+static void i40e_dcbnl_set_pfc_cfg(struct net_device *netdev, int prio,
+ u8 setting)
+{
+ struct i40e_pf *pf = i40e_netdev_to_pf(netdev);
+
+ if (!(pf->dcbx_cap & DCB_CAP_DCBX_VER_CEE) ||
+ (pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED))
+ return;
+
+ if (prio >= I40E_MAX_USER_PRIORITY)
+ return;
+
+ pf->tmp_cfg.pfc.pfccap = I40E_MAX_TRAFFIC_CLASS;
+ if (setting)
+ pf->tmp_cfg.pfc.pfcenable |= BIT(prio);
+ else
+ pf->tmp_cfg.pfc.pfcenable &= ~BIT(prio);
+ dev_dbg(&pf->pdev->dev,
+ "Set PFC Config up=%d setting=%d pfcenable=0x%x\n",
+ prio, setting, pf->tmp_cfg.pfc.pfcenable);
+}
+
+/**
+ * i40e_dcbnl_get_pfc_cfg - Get CEE PFC configuration
+ * @netdev: the corresponding netdev
+ * @prio: the corresponding user priority
+ * @setting: the PFC setting for given priority
+ *
+ * Get the PFC enabled/disabled setting for given user priority
+ **/
+static void i40e_dcbnl_get_pfc_cfg(struct net_device *netdev, int prio,
+ u8 *setting)
+{
+ struct i40e_pf *pf = i40e_netdev_to_pf(netdev);
+
+ if (!(pf->dcbx_cap & DCB_CAP_DCBX_VER_CEE) ||
+ (pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED))
+ return;
+
+ if (prio >= I40E_MAX_USER_PRIORITY)
+ return;
+
+ *setting = (pf->hw.local_dcbx_config.pfc.pfcenable >> prio) & 0x1;
+ dev_dbg(&pf->pdev->dev,
+ "Get PFC Config up=%d setting=%d pfcenable=0x%x\n",
+ prio, *setting, pf->hw.local_dcbx_config.pfc.pfcenable);
+}
+
+/**
+ * i40e_dcbnl_cee_set_all - Commit CEE DCB settings to hardware
+ * @netdev: the corresponding netdev
+ *
+ * Commit the current DCB configuration to hardware
+ **/
+static u8 i40e_dcbnl_cee_set_all(struct net_device *netdev)
+{
+ struct i40e_pf *pf = i40e_netdev_to_pf(netdev);
+ int err;
+
+ if (!(pf->dcbx_cap & DCB_CAP_DCBX_VER_CEE) ||
+ (pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED))
+ return I40E_DCBNL_STATUS_ERROR;
+
+ dev_dbg(&pf->pdev->dev, "Commit DCB Configuration to the hardware\n");
+ err = i40e_hw_dcb_config(pf, &pf->tmp_cfg);
+
+ return err ? I40E_DCBNL_STATUS_ERROR : I40E_DCBNL_STATUS_SUCCESS;
+}
+
+/**
+ * i40e_dcbnl_get_cap - Get DCBX capabilities of adapter
+ * @netdev: the corresponding netdev
+ * @capid: the capability type
+ * @cap: the capability value
+ *
+ * Return the capability value for a given capability type
+ **/
+static u8 i40e_dcbnl_get_cap(struct net_device *netdev, int capid, u8 *cap)
+{
+ struct i40e_pf *pf = i40e_netdev_to_pf(netdev);
+
+ if (!(pf->flags & I40E_FLAG_DCB_CAPABLE))
+ return I40E_DCBNL_STATUS_ERROR;
+
+ switch (capid) {
+ case DCB_CAP_ATTR_PG:
+ case DCB_CAP_ATTR_PFC:
+ *cap = true;
+ break;
+ case DCB_CAP_ATTR_PG_TCS:
+ case DCB_CAP_ATTR_PFC_TCS:
+ *cap = 0x80;
+ break;
+ case DCB_CAP_ATTR_DCBX:
+ *cap = pf->dcbx_cap;
+ break;
+ case DCB_CAP_ATTR_UP2TC:
+ case DCB_CAP_ATTR_GSP:
+ case DCB_CAP_ATTR_BCN:
+ default:
+ *cap = false;
+ break;
+ }
+
+ dev_dbg(&pf->pdev->dev, "Get Capability cap=%d capval=0x%x\n",
+ capid, *cap);
+ return I40E_DCBNL_STATUS_SUCCESS;
+}
+
+/**
+ * i40e_dcbnl_getnumtcs - Get max number of traffic classes supported
+ * @netdev: the corresponding netdev
+ * @tcid: the TC id
+ * @num: total number of TCs supported by the device
+ *
+ * Return the total number of TCs supported by the adapter
+ **/
+static int i40e_dcbnl_getnumtcs(struct net_device *netdev, int tcid, u8 *num)
+{
+ struct i40e_pf *pf = i40e_netdev_to_pf(netdev);
+
+ if (!(pf->flags & I40E_FLAG_DCB_CAPABLE))
+ return -EINVAL;
+
+ *num = I40E_MAX_TRAFFIC_CLASS;
+ return 0;
+}
+
+/**
+ * i40e_dcbnl_setnumtcs - Set CEE number of traffic classes
+ * @netdev: the corresponding netdev
+ * @tcid: the TC id
+ * @num: total number of TCs
+ *
+ * Set the total number of TCs (Unsupported)
+ **/
+static int i40e_dcbnl_setnumtcs(struct net_device *netdev, int tcid, u8 num)
+{
+ return -EINVAL;
+}
+
+/**
+ * i40e_dcbnl_getpfcstate - Get CEE PFC mode
+ * @netdev: the corresponding netdev
+ *
+ * Get the current PFC enabled state
+ **/
+static u8 i40e_dcbnl_getpfcstate(struct net_device *netdev)
+{
+ struct i40e_pf *pf = i40e_netdev_to_pf(netdev);
+
+ /* Return enabled if any PFC enabled UP */
+ if (pf->hw.local_dcbx_config.pfc.pfcenable)
+ return 1;
+ else
+ return 0;
+}
+
+/**
+ * i40e_dcbnl_setpfcstate - Set CEE PFC mode
+ * @netdev: the corresponding netdev
+ * @state: required state
+ *
+ * The PFC state to be set; this is enabled/disabled based on the PFC
+ * priority settings and not via this call for i40e driver
+ **/
+static void i40e_dcbnl_setpfcstate(struct net_device *netdev, u8 state)
+{
+ struct i40e_pf *pf = i40e_netdev_to_pf(netdev);
+
+ dev_dbg(&pf->pdev->dev, "PFC State is modified via PFC config.\n");
+}
+
+/**
+ * i40e_dcbnl_getapp - Get CEE APP
+ * @netdev: the corresponding netdev
+ * @idtype: the App selector
+ * @id: the App ethtype or port number
+ *
+ * Return the CEE mode app for the given idtype and id
+ **/
+static int i40e_dcbnl_getapp(struct net_device *netdev, u8 idtype, u16 id)
+{
+ struct i40e_pf *pf = i40e_netdev_to_pf(netdev);
+ struct dcb_app app = {
+ .selector = idtype,
+ .protocol = id,
+ };
+
+ if (!(pf->dcbx_cap & DCB_CAP_DCBX_VER_CEE) ||
+ (pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED))
+ return -EINVAL;
+
+ return dcb_getapp(netdev, &app);
+}
+
+/**
+ * i40e_dcbnl_setdcbx - set required DCBx capability
+ * @netdev: the corresponding netdev
+ * @mode: new DCB mode managed or CEE+IEEE
+ *
+ * Set DCBx capability features
+ **/
+static u8 i40e_dcbnl_setdcbx(struct net_device *netdev, u8 mode)
+{
+ struct i40e_pf *pf = i40e_netdev_to_pf(netdev);
+
+ /* Do not allow to set mode if managed by Firmware */
+ if (pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED)
+ return I40E_DCBNL_STATUS_ERROR;
+
+ /* No support for LLD_MANAGED modes or CEE+IEEE */
+ if ((mode & DCB_CAP_DCBX_LLD_MANAGED) ||
+ ((mode & DCB_CAP_DCBX_VER_IEEE) && (mode & DCB_CAP_DCBX_VER_CEE)) ||
+ !(mode & DCB_CAP_DCBX_HOST))
+ return I40E_DCBNL_STATUS_ERROR;
+
+ /* Already set to the given mode no change */
+ if (mode == pf->dcbx_cap)
+ return I40E_DCBNL_STATUS_SUCCESS;
+
+ pf->dcbx_cap = mode;
+ if (mode & DCB_CAP_DCBX_VER_CEE)
+ pf->hw.local_dcbx_config.dcbx_mode = I40E_DCBX_MODE_CEE;
+ else
+ pf->hw.local_dcbx_config.dcbx_mode = I40E_DCBX_MODE_IEEE;
+
+ dev_dbg(&pf->pdev->dev, "mode=%d\n", mode);
+ return I40E_DCBNL_STATUS_SUCCESS;
+}
+
+/**
+ * i40e_dcbnl_getdcbx - retrieve current DCBx capability
+ * @dev: the corresponding netdev
+ *
+ * Returns DCBx capability features
+ **/
+static u8 i40e_dcbnl_getdcbx(struct net_device *dev)
+{
+ struct i40e_pf *pf = i40e_netdev_to_pf(dev);
+
+ return pf->dcbx_cap;
+}
+
+/**
+ * i40e_dcbnl_get_perm_hw_addr - MAC address used by DCBx
+ * @dev: the corresponding netdev
+ * @perm_addr: buffer to store the MAC address
+ *
+ * Returns the SAN MAC address used for LLDP exchange
+ **/
+static void i40e_dcbnl_get_perm_hw_addr(struct net_device *dev,
+ u8 *perm_addr)
+{
+ struct i40e_pf *pf = i40e_netdev_to_pf(dev);
+ int i, j;
+
+ memset(perm_addr, 0xff, MAX_ADDR_LEN);
+
+ for (i = 0; i < dev->addr_len; i++)
+ perm_addr[i] = pf->hw.mac.perm_addr[i];
+
+ for (j = 0; j < dev->addr_len; j++, i++)
+ perm_addr[i] = pf->hw.mac.san_addr[j];
+}
+
+static const struct dcbnl_rtnl_ops dcbnl_ops = {
+ .ieee_getets = i40e_dcbnl_ieee_getets,
+ .ieee_getpfc = i40e_dcbnl_ieee_getpfc,
+ .getdcbx = i40e_dcbnl_getdcbx,
+ .getpermhwaddr = i40e_dcbnl_get_perm_hw_addr,
+ .ieee_setets = i40e_dcbnl_ieee_setets,
+ .ieee_setpfc = i40e_dcbnl_ieee_setpfc,
+ .ieee_setapp = i40e_dcbnl_ieee_setapp,
+ .ieee_delapp = i40e_dcbnl_ieee_delapp,
+ .getstate = i40e_dcbnl_getstate,
+ .setstate = i40e_dcbnl_setstate,
+ .setpgtccfgtx = i40e_dcbnl_set_pg_tc_cfg_tx,
+ .setpgbwgcfgtx = i40e_dcbnl_set_pg_bwg_cfg_tx,
+ .setpgtccfgrx = i40e_dcbnl_set_pg_tc_cfg_rx,
+ .setpgbwgcfgrx = i40e_dcbnl_set_pg_bwg_cfg_rx,
+ .getpgtccfgtx = i40e_dcbnl_get_pg_tc_cfg_tx,
+ .getpgbwgcfgtx = i40e_dcbnl_get_pg_bwg_cfg_tx,
+ .getpgtccfgrx = i40e_dcbnl_get_pg_tc_cfg_rx,
+ .getpgbwgcfgrx = i40e_dcbnl_get_pg_bwg_cfg_rx,
+ .setpfccfg = i40e_dcbnl_set_pfc_cfg,
+ .getpfccfg = i40e_dcbnl_get_pfc_cfg,
+ .setall = i40e_dcbnl_cee_set_all,
+ .getcap = i40e_dcbnl_get_cap,
+ .getnumtcs = i40e_dcbnl_getnumtcs,
+ .setnumtcs = i40e_dcbnl_setnumtcs,
+ .getpfcstate = i40e_dcbnl_getpfcstate,
+ .setpfcstate = i40e_dcbnl_setpfcstate,
+ .getapp = i40e_dcbnl_getapp,
+ .setdcbx = i40e_dcbnl_setdcbx,
+};
+
+/**
+ * i40e_dcbnl_set_all - set all the apps and ieee data from DCBx config
+ * @vsi: the corresponding vsi
+ *
+ * Set up all the IEEE APPs in the DCBNL App Table and generate event for
+ * other settings
+ **/
+void i40e_dcbnl_set_all(struct i40e_vsi *vsi)
+{
+ struct net_device *dev = vsi->netdev;
+ struct i40e_pf *pf = i40e_netdev_to_pf(dev);
+ struct i40e_dcbx_config *dcbxcfg;
+ struct i40e_hw *hw = &pf->hw;
+ struct dcb_app sapp;
+ u8 prio, tc_map;
+ int i;
+
+ /* SW DCB taken care by DCBNL set calls */
+ if (pf->dcbx_cap & DCB_CAP_DCBX_HOST)
+ return;
+
+ /* DCB not enabled */
+ if (!(pf->flags & I40E_FLAG_DCB_ENABLED))
+ return;
+
+ /* MFP mode but not an iSCSI PF so return */
+ if ((pf->flags & I40E_FLAG_MFP_ENABLED) && !(hw->func_caps.iscsi))
+ return;
+
+ dcbxcfg = &hw->local_dcbx_config;
+
+ /* Set up all the App TLVs if DCBx is negotiated */
+ for (i = 0; i < dcbxcfg->numapps; i++) {
+ prio = dcbxcfg->app[i].priority;
+ tc_map = BIT(dcbxcfg->etscfg.prioritytable[prio]);
+
+ /* Add APP only if the TC is enabled for this VSI */
+ if (tc_map & vsi->tc_config.enabled_tc) {
+ sapp.selector = dcbxcfg->app[i].selector;
+ sapp.protocol = dcbxcfg->app[i].protocolid;
+ sapp.priority = prio;
+ dcb_ieee_setapp(dev, &sapp);
+ }
+ }
+
+ /* Notify user-space of the changes */
+ dcbnl_ieee_notify(dev, RTM_SETDCB, DCB_CMD_IEEE_SET, 0, 0);
+}
+
+/**
+ * i40e_dcbnl_vsi_del_app - Delete APP for given VSI
+ * @vsi: the corresponding vsi
+ * @app: APP to delete
+ *
+ * Delete given APP from the DCBNL APP table for given
+ * VSI
+ **/
+static int i40e_dcbnl_vsi_del_app(struct i40e_vsi *vsi,
+ struct i40e_dcb_app_priority_table *app)
+{
+ struct net_device *dev = vsi->netdev;
+ struct dcb_app sapp;
+
+ if (!dev)
+ return -EINVAL;
+
+ sapp.selector = app->selector;
+ sapp.protocol = app->protocolid;
+ sapp.priority = app->priority;
+ return dcb_ieee_delapp(dev, &sapp);
+}
+
+/**
+ * i40e_dcbnl_del_app - Delete APP on all VSIs
+ * @pf: the corresponding PF
+ * @app: APP to delete
+ *
+ * Delete given APP from all the VSIs for given PF
+ **/
+static void i40e_dcbnl_del_app(struct i40e_pf *pf,
+ struct i40e_dcb_app_priority_table *app)
+{
+ int v, err;
+
+ for (v = 0; v < pf->num_alloc_vsi; v++) {
+ if (pf->vsi[v] && pf->vsi[v]->netdev) {
+ err = i40e_dcbnl_vsi_del_app(pf->vsi[v], app);
+ dev_dbg(&pf->pdev->dev, "Deleting app for VSI seid=%d err=%d sel=%d proto=0x%x prio=%d\n",
+ pf->vsi[v]->seid, err, app->selector,
+ app->protocolid, app->priority);
+ }
+ }
+}
+
+/**
+ * i40e_dcbnl_find_app - Search APP in given DCB config
+ * @cfg: DCBX configuration data
+ * @app: APP to search for
+ *
+ * Find given APP in the DCB configuration
+ **/
+static bool i40e_dcbnl_find_app(struct i40e_dcbx_config *cfg,
+ struct i40e_dcb_app_priority_table *app)
+{
+ int i;
+
+ for (i = 0; i < cfg->numapps; i++) {
+ if (app->selector == cfg->app[i].selector &&
+ app->protocolid == cfg->app[i].protocolid &&
+ app->priority == cfg->app[i].priority)
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * i40e_dcbnl_flush_apps - Delete all removed APPs
+ * @pf: the corresponding PF
+ * @old_cfg: old DCBX configuration data
+ * @new_cfg: new DCBX configuration data
+ *
+ * Find and delete all APPs that are not present in the passed
+ * DCB configuration
+ **/
+void i40e_dcbnl_flush_apps(struct i40e_pf *pf,
+ struct i40e_dcbx_config *old_cfg,
+ struct i40e_dcbx_config *new_cfg)
+{
+ struct i40e_dcb_app_priority_table app;
+ int i;
+
+ /* MFP mode but not an iSCSI PF so return */
+ if ((pf->flags & I40E_FLAG_MFP_ENABLED) && !(pf->hw.func_caps.iscsi))
+ return;
+
+ for (i = 0; i < old_cfg->numapps; i++) {
+ app = old_cfg->app[i];
+ /* The APP is not available anymore delete it */
+ if (!i40e_dcbnl_find_app(new_cfg, &app))
+ i40e_dcbnl_del_app(pf, &app);
+ }
+}
+
+/**
+ * i40e_dcbnl_setup - DCBNL setup
+ * @vsi: the corresponding vsi
+ *
+ * Set up DCBNL ops and initial APP TLVs
+ **/
+void i40e_dcbnl_setup(struct i40e_vsi *vsi)
+{
+ struct net_device *dev = vsi->netdev;
+ struct i40e_pf *pf = i40e_netdev_to_pf(dev);
+
+ /* Not DCB capable */
+ if (!(pf->flags & I40E_FLAG_DCB_CAPABLE))
+ return;
+
+ dev->dcbnl_ops = &dcbnl_ops;
+
+ /* Set initial IEEE DCB settings */
+ i40e_dcbnl_set_all(vsi);
+}
+#endif /* CONFIG_I40E_DCB */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ddp.c b/drivers/net/ethernet/intel/i40e/i40e_ddp.c
new file mode 100644
index 000000000..7e8183762
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_ddp.c
@@ -0,0 +1,481 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
+
+#include "i40e.h"
+
+#include <linux/firmware.h>
+
+/**
+ * i40e_ddp_profiles_eq - checks if DDP profiles are the equivalent
+ * @a: new profile info
+ * @b: old profile info
+ *
+ * checks if DDP profiles are the equivalent.
+ * Returns true if profiles are the same.
+ **/
+static bool i40e_ddp_profiles_eq(struct i40e_profile_info *a,
+ struct i40e_profile_info *b)
+{
+ return a->track_id == b->track_id &&
+ !memcmp(&a->version, &b->version, sizeof(a->version)) &&
+ !memcmp(&a->name, &b->name, I40E_DDP_NAME_SIZE);
+}
+
+/**
+ * i40e_ddp_does_profile_exist - checks if DDP profile loaded already
+ * @hw: HW data structure
+ * @pinfo: DDP profile information structure
+ *
+ * checks if DDP profile loaded already.
+ * Returns >0 if the profile exists.
+ * Returns 0 if the profile is absent.
+ * Returns <0 if error.
+ **/
+static int i40e_ddp_does_profile_exist(struct i40e_hw *hw,
+ struct i40e_profile_info *pinfo)
+{
+ struct i40e_ddp_profile_list *profile_list;
+ u8 buff[I40E_PROFILE_LIST_SIZE];
+ int status;
+ int i;
+
+ status = i40e_aq_get_ddp_list(hw, buff, I40E_PROFILE_LIST_SIZE, 0,
+ NULL);
+ if (status)
+ return -1;
+
+ profile_list = (struct i40e_ddp_profile_list *)buff;
+ for (i = 0; i < profile_list->p_count; i++) {
+ if (i40e_ddp_profiles_eq(pinfo, &profile_list->p_info[i]))
+ return 1;
+ }
+ return 0;
+}
+
+/**
+ * i40e_ddp_profiles_overlap - checks if DDP profiles overlap.
+ * @new: new profile info
+ * @old: old profile info
+ *
+ * checks if DDP profiles overlap.
+ * Returns true if profiles are overlap.
+ **/
+static bool i40e_ddp_profiles_overlap(struct i40e_profile_info *new,
+ struct i40e_profile_info *old)
+{
+ unsigned int group_id_old = (u8)((old->track_id & 0x00FF0000) >> 16);
+ unsigned int group_id_new = (u8)((new->track_id & 0x00FF0000) >> 16);
+
+ /* 0x00 group must be only the first */
+ if (group_id_new == 0)
+ return true;
+ /* 0xFF group is compatible with anything else */
+ if (group_id_new == 0xFF || group_id_old == 0xFF)
+ return false;
+ /* otherwise only profiles from the same group are compatible*/
+ return group_id_old != group_id_new;
+}
+
+/**
+ * i40e_ddp_does_profile_overlap - checks if DDP overlaps with existing one.
+ * @hw: HW data structure
+ * @pinfo: DDP profile information structure
+ *
+ * checks if DDP profile overlaps with existing one.
+ * Returns >0 if the profile overlaps.
+ * Returns 0 if the profile is ok.
+ * Returns <0 if error.
+ **/
+static int i40e_ddp_does_profile_overlap(struct i40e_hw *hw,
+ struct i40e_profile_info *pinfo)
+{
+ struct i40e_ddp_profile_list *profile_list;
+ u8 buff[I40E_PROFILE_LIST_SIZE];
+ int status;
+ int i;
+
+ status = i40e_aq_get_ddp_list(hw, buff, I40E_PROFILE_LIST_SIZE, 0,
+ NULL);
+ if (status)
+ return -EIO;
+
+ profile_list = (struct i40e_ddp_profile_list *)buff;
+ for (i = 0; i < profile_list->p_count; i++) {
+ if (i40e_ddp_profiles_overlap(pinfo,
+ &profile_list->p_info[i]))
+ return 1;
+ }
+ return 0;
+}
+
+/**
+ * i40e_add_pinfo
+ * @hw: pointer to the hardware structure
+ * @profile: pointer to the profile segment of the package
+ * @profile_info_sec: buffer for information section
+ * @track_id: package tracking id
+ *
+ * Register a profile to the list of loaded profiles.
+ */
+static int
+i40e_add_pinfo(struct i40e_hw *hw, struct i40e_profile_segment *profile,
+ u8 *profile_info_sec, u32 track_id)
+{
+ struct i40e_profile_section_header *sec;
+ struct i40e_profile_info *pinfo;
+ u32 offset = 0, info = 0;
+ int status;
+
+ sec = (struct i40e_profile_section_header *)profile_info_sec;
+ sec->tbl_size = 1;
+ sec->data_end = sizeof(struct i40e_profile_section_header) +
+ sizeof(struct i40e_profile_info);
+ sec->section.type = SECTION_TYPE_INFO;
+ sec->section.offset = sizeof(struct i40e_profile_section_header);
+ sec->section.size = sizeof(struct i40e_profile_info);
+ pinfo = (struct i40e_profile_info *)(profile_info_sec +
+ sec->section.offset);
+ pinfo->track_id = track_id;
+ pinfo->version = profile->version;
+ pinfo->op = I40E_DDP_ADD_TRACKID;
+
+ /* Clear reserved field */
+ memset(pinfo->reserved, 0, sizeof(pinfo->reserved));
+ memcpy(pinfo->name, profile->name, I40E_DDP_NAME_SIZE);
+
+ status = i40e_aq_write_ddp(hw, (void *)sec, sec->data_end,
+ track_id, &offset, &info, NULL);
+ return status;
+}
+
+/**
+ * i40e_del_pinfo - delete DDP profile info from NIC
+ * @hw: HW data structure
+ * @profile: DDP profile segment to be deleted
+ * @profile_info_sec: DDP profile section header
+ * @track_id: track ID of the profile for deletion
+ *
+ * Removes DDP profile from the NIC.
+ **/
+static int
+i40e_del_pinfo(struct i40e_hw *hw, struct i40e_profile_segment *profile,
+ u8 *profile_info_sec, u32 track_id)
+{
+ struct i40e_profile_section_header *sec;
+ struct i40e_profile_info *pinfo;
+ u32 offset = 0, info = 0;
+ int status;
+
+ sec = (struct i40e_profile_section_header *)profile_info_sec;
+ sec->tbl_size = 1;
+ sec->data_end = sizeof(struct i40e_profile_section_header) +
+ sizeof(struct i40e_profile_info);
+ sec->section.type = SECTION_TYPE_INFO;
+ sec->section.offset = sizeof(struct i40e_profile_section_header);
+ sec->section.size = sizeof(struct i40e_profile_info);
+ pinfo = (struct i40e_profile_info *)(profile_info_sec +
+ sec->section.offset);
+ pinfo->track_id = track_id;
+ pinfo->version = profile->version;
+ pinfo->op = I40E_DDP_REMOVE_TRACKID;
+
+ /* Clear reserved field */
+ memset(pinfo->reserved, 0, sizeof(pinfo->reserved));
+ memcpy(pinfo->name, profile->name, I40E_DDP_NAME_SIZE);
+
+ status = i40e_aq_write_ddp(hw, (void *)sec, sec->data_end,
+ track_id, &offset, &info, NULL);
+ return status;
+}
+
+/**
+ * i40e_ddp_is_pkg_hdr_valid - performs basic pkg header integrity checks
+ * @netdev: net device structure (for logging purposes)
+ * @pkg_hdr: pointer to package header
+ * @size_huge: size of the whole DDP profile package in size_t
+ *
+ * Checks correctness of pkg header: Version, size too big/small, and
+ * all segment offsets alignment and boundaries. This function lets
+ * reject non DDP profile file to be loaded by administrator mistake.
+ **/
+static bool i40e_ddp_is_pkg_hdr_valid(struct net_device *netdev,
+ struct i40e_package_header *pkg_hdr,
+ size_t size_huge)
+{
+ u32 size = 0xFFFFFFFFU & size_huge;
+ u32 pkg_hdr_size;
+ u32 segment;
+
+ if (!pkg_hdr)
+ return false;
+
+ if (pkg_hdr->version.major > 0) {
+ struct i40e_ddp_version ver = pkg_hdr->version;
+
+ netdev_err(netdev, "Unsupported DDP profile version %u.%u.%u.%u",
+ ver.major, ver.minor, ver.update, ver.draft);
+ return false;
+ }
+ if (size_huge > size) {
+ netdev_err(netdev, "Invalid DDP profile - size is bigger than 4G");
+ return false;
+ }
+ if (size < (sizeof(struct i40e_package_header) +
+ sizeof(struct i40e_metadata_segment) + sizeof(u32) * 2)) {
+ netdev_err(netdev, "Invalid DDP profile - size is too small.");
+ return false;
+ }
+
+ pkg_hdr_size = sizeof(u32) * (pkg_hdr->segment_count + 2U);
+ if (size < pkg_hdr_size) {
+ netdev_err(netdev, "Invalid DDP profile - too many segments");
+ return false;
+ }
+ for (segment = 0; segment < pkg_hdr->segment_count; ++segment) {
+ u32 offset = pkg_hdr->segment_offset[segment];
+
+ if (0xFU & offset) {
+ netdev_err(netdev,
+ "Invalid DDP profile %u segment alignment",
+ segment);
+ return false;
+ }
+ if (pkg_hdr_size > offset || offset >= size) {
+ netdev_err(netdev,
+ "Invalid DDP profile %u segment offset",
+ segment);
+ return false;
+ }
+ }
+
+ return true;
+}
+
+/**
+ * i40e_ddp_load - performs DDP loading
+ * @netdev: net device structure
+ * @data: buffer containing recipe file
+ * @size: size of the buffer
+ * @is_add: true when loading profile, false when rolling back the previous one
+ *
+ * Checks correctness and loads DDP profile to the NIC. The function is
+ * also used for rolling back previously loaded profile.
+ **/
+int i40e_ddp_load(struct net_device *netdev, const u8 *data, size_t size,
+ bool is_add)
+{
+ u8 profile_info_sec[sizeof(struct i40e_profile_section_header) +
+ sizeof(struct i40e_profile_info)];
+ struct i40e_metadata_segment *metadata_hdr;
+ struct i40e_profile_segment *profile_hdr;
+ struct i40e_profile_info pinfo;
+ struct i40e_package_header *pkg_hdr;
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_vsi *vsi = np->vsi;
+ struct i40e_pf *pf = vsi->back;
+ u32 track_id;
+ int istatus;
+ int status;
+
+ pkg_hdr = (struct i40e_package_header *)data;
+ if (!i40e_ddp_is_pkg_hdr_valid(netdev, pkg_hdr, size))
+ return -EINVAL;
+
+ if (size < (sizeof(struct i40e_package_header) +
+ sizeof(struct i40e_metadata_segment) + sizeof(u32) * 2)) {
+ netdev_err(netdev, "Invalid DDP recipe size.");
+ return -EINVAL;
+ }
+
+ /* Find beginning of segment data in buffer */
+ metadata_hdr = (struct i40e_metadata_segment *)
+ i40e_find_segment_in_package(SEGMENT_TYPE_METADATA, pkg_hdr);
+ if (!metadata_hdr) {
+ netdev_err(netdev, "Failed to find metadata segment in DDP recipe.");
+ return -EINVAL;
+ }
+
+ track_id = metadata_hdr->track_id;
+ profile_hdr = (struct i40e_profile_segment *)
+ i40e_find_segment_in_package(SEGMENT_TYPE_I40E, pkg_hdr);
+ if (!profile_hdr) {
+ netdev_err(netdev, "Failed to find profile segment in DDP recipe.");
+ return -EINVAL;
+ }
+
+ pinfo.track_id = track_id;
+ pinfo.version = profile_hdr->version;
+ if (is_add)
+ pinfo.op = I40E_DDP_ADD_TRACKID;
+ else
+ pinfo.op = I40E_DDP_REMOVE_TRACKID;
+
+ memcpy(pinfo.name, profile_hdr->name, I40E_DDP_NAME_SIZE);
+
+ /* Check if profile data already exists*/
+ istatus = i40e_ddp_does_profile_exist(&pf->hw, &pinfo);
+ if (istatus < 0) {
+ netdev_err(netdev, "Failed to fetch loaded profiles.");
+ return istatus;
+ }
+ if (is_add) {
+ if (istatus > 0) {
+ netdev_err(netdev, "DDP profile already loaded.");
+ return -EINVAL;
+ }
+ istatus = i40e_ddp_does_profile_overlap(&pf->hw, &pinfo);
+ if (istatus < 0) {
+ netdev_err(netdev, "Failed to fetch loaded profiles.");
+ return istatus;
+ }
+ if (istatus > 0) {
+ netdev_err(netdev, "DDP profile overlaps with existing one.");
+ return -EINVAL;
+ }
+ } else {
+ if (istatus == 0) {
+ netdev_err(netdev,
+ "DDP profile for deletion does not exist.");
+ return -EINVAL;
+ }
+ }
+
+ /* Load profile data */
+ if (is_add) {
+ status = i40e_write_profile(&pf->hw, profile_hdr, track_id);
+ if (status) {
+ if (status == I40E_ERR_DEVICE_NOT_SUPPORTED) {
+ netdev_err(netdev,
+ "Profile is not supported by the device.");
+ return -EPERM;
+ }
+ netdev_err(netdev, "Failed to write DDP profile.");
+ return -EIO;
+ }
+ } else {
+ status = i40e_rollback_profile(&pf->hw, profile_hdr, track_id);
+ if (status) {
+ netdev_err(netdev, "Failed to remove DDP profile.");
+ return -EIO;
+ }
+ }
+
+ /* Add/remove profile to/from profile list in FW */
+ if (is_add) {
+ status = i40e_add_pinfo(&pf->hw, profile_hdr, profile_info_sec,
+ track_id);
+ if (status) {
+ netdev_err(netdev, "Failed to add DDP profile info.");
+ return -EIO;
+ }
+ } else {
+ status = i40e_del_pinfo(&pf->hw, profile_hdr, profile_info_sec,
+ track_id);
+ if (status) {
+ netdev_err(netdev, "Failed to restore DDP profile info.");
+ return -EIO;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * i40e_ddp_restore - restore previously loaded profile and remove from list
+ * @pf: PF data struct
+ *
+ * Restores previously loaded profile stored on the list in driver memory.
+ * After rolling back removes entry from the list.
+ **/
+static int i40e_ddp_restore(struct i40e_pf *pf)
+{
+ struct i40e_ddp_old_profile_list *entry;
+ struct net_device *netdev = pf->vsi[pf->lan_vsi]->netdev;
+ int status = 0;
+
+ if (!list_empty(&pf->ddp_old_prof)) {
+ entry = list_first_entry(&pf->ddp_old_prof,
+ struct i40e_ddp_old_profile_list,
+ list);
+ status = i40e_ddp_load(netdev, entry->old_ddp_buf,
+ entry->old_ddp_size, false);
+ list_del(&entry->list);
+ kfree(entry);
+ }
+ return status;
+}
+
+/**
+ * i40e_ddp_flash - callback function for ethtool flash feature
+ * @netdev: net device structure
+ * @flash: kernel flash structure
+ *
+ * Ethtool callback function used for loading and unloading DDP profiles.
+ **/
+int i40e_ddp_flash(struct net_device *netdev, struct ethtool_flash *flash)
+{
+ const struct firmware *ddp_config;
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_vsi *vsi = np->vsi;
+ struct i40e_pf *pf = vsi->back;
+ int status = 0;
+
+ /* Check for valid region first */
+ if (flash->region != I40_DDP_FLASH_REGION) {
+ netdev_err(netdev, "Requested firmware region is not recognized by this driver.");
+ return -EINVAL;
+ }
+ if (pf->hw.bus.func != 0) {
+ netdev_err(netdev, "Any DDP operation is allowed only on Phy0 NIC interface");
+ return -EINVAL;
+ }
+
+ /* If the user supplied "-" instead of file name rollback previously
+ * stored profile.
+ */
+ if (strncmp(flash->data, "-", 2) != 0) {
+ struct i40e_ddp_old_profile_list *list_entry;
+ char profile_name[sizeof(I40E_DDP_PROFILE_PATH)
+ + I40E_DDP_PROFILE_NAME_MAX];
+
+ profile_name[sizeof(profile_name) - 1] = 0;
+ strncpy(profile_name, I40E_DDP_PROFILE_PATH,
+ sizeof(profile_name) - 1);
+ strncat(profile_name, flash->data, I40E_DDP_PROFILE_NAME_MAX);
+ /* Load DDP recipe. */
+ status = request_firmware(&ddp_config, profile_name,
+ &netdev->dev);
+ if (status) {
+ netdev_err(netdev, "DDP recipe file request failed.");
+ return status;
+ }
+
+ status = i40e_ddp_load(netdev, ddp_config->data,
+ ddp_config->size, true);
+
+ if (!status) {
+ list_entry =
+ kzalloc(sizeof(struct i40e_ddp_old_profile_list) +
+ ddp_config->size, GFP_KERNEL);
+ if (!list_entry) {
+ netdev_info(netdev, "Failed to allocate memory for previous DDP profile data.");
+ netdev_info(netdev, "New profile loaded but roll-back will be impossible.");
+ } else {
+ memcpy(list_entry->old_ddp_buf,
+ ddp_config->data, ddp_config->size);
+ list_entry->old_ddp_size = ddp_config->size;
+ list_add(&list_entry->list, &pf->ddp_old_prof);
+ }
+ }
+
+ release_firmware(ddp_config);
+ } else {
+ if (!list_empty(&pf->ddp_old_prof)) {
+ status = i40e_ddp_restore(pf);
+ } else {
+ netdev_warn(netdev, "There is no DDP profile to restore.");
+ status = -ENOENT;
+ }
+ }
+ return status;
+}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
new file mode 100644
index 000000000..62497f556
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
@@ -0,0 +1,1855 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
+
+#ifdef CONFIG_DEBUG_FS
+
+#include <linux/fs.h>
+#include <linux/debugfs.h>
+
+#include "i40e.h"
+
+static struct dentry *i40e_dbg_root;
+
+enum ring_type {
+ RING_TYPE_RX,
+ RING_TYPE_TX,
+ RING_TYPE_XDP
+};
+
+/**
+ * i40e_dbg_find_vsi - searches for the vsi with the given seid
+ * @pf: the PF structure to search for the vsi
+ * @seid: seid of the vsi it is searching for
+ **/
+static struct i40e_vsi *i40e_dbg_find_vsi(struct i40e_pf *pf, int seid)
+{
+ int i;
+
+ if (seid < 0)
+ dev_info(&pf->pdev->dev, "%d: bad seid\n", seid);
+ else
+ for (i = 0; i < pf->num_alloc_vsi; i++)
+ if (pf->vsi[i] && (pf->vsi[i]->seid == seid))
+ return pf->vsi[i];
+
+ return NULL;
+}
+
+/**
+ * i40e_dbg_find_veb - searches for the veb with the given seid
+ * @pf: the PF structure to search for the veb
+ * @seid: seid of the veb it is searching for
+ **/
+static struct i40e_veb *i40e_dbg_find_veb(struct i40e_pf *pf, int seid)
+{
+ int i;
+
+ for (i = 0; i < I40E_MAX_VEB; i++)
+ if (pf->veb[i] && pf->veb[i]->seid == seid)
+ return pf->veb[i];
+ return NULL;
+}
+
+/**************************************************************
+ * command
+ * The command entry in debugfs is for giving the driver commands
+ * to be executed - these may be for changing the internal switch
+ * setup, adding or removing filters, or other things. Many of
+ * these will be useful for some forms of unit testing.
+ **************************************************************/
+static char i40e_dbg_command_buf[256] = "";
+
+/**
+ * i40e_dbg_command_read - read for command datum
+ * @filp: the opened file
+ * @buffer: where to write the data for the user to read
+ * @count: the size of the user's buffer
+ * @ppos: file position offset
+ **/
+static ssize_t i40e_dbg_command_read(struct file *filp, char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct i40e_pf *pf = filp->private_data;
+ int bytes_not_copied;
+ int buf_size = 256;
+ char *buf;
+ int len;
+
+ /* don't allow partial reads */
+ if (*ppos != 0)
+ return 0;
+ if (count < buf_size)
+ return -ENOSPC;
+
+ buf = kzalloc(buf_size, GFP_KERNEL);
+ if (!buf)
+ return -ENOSPC;
+
+ len = snprintf(buf, buf_size, "%s: %s\n",
+ pf->vsi[pf->lan_vsi]->netdev->name,
+ i40e_dbg_command_buf);
+
+ bytes_not_copied = copy_to_user(buffer, buf, len);
+ kfree(buf);
+
+ if (bytes_not_copied)
+ return -EFAULT;
+
+ *ppos = len;
+ return len;
+}
+
+static char *i40e_filter_state_string[] = {
+ "INVALID",
+ "NEW",
+ "ACTIVE",
+ "FAILED",
+ "REMOVE",
+};
+
+/**
+ * i40e_dbg_dump_vsi_seid - handles dump vsi seid write into command datum
+ * @pf: the i40e_pf created in command write
+ * @seid: the seid the user put in
+ **/
+static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
+{
+ struct rtnl_link_stats64 *nstat;
+ struct i40e_mac_filter *f;
+ struct i40e_vsi *vsi;
+ int i, bkt;
+
+ vsi = i40e_dbg_find_vsi(pf, seid);
+ if (!vsi) {
+ dev_info(&pf->pdev->dev,
+ "dump %d: seid not found\n", seid);
+ return;
+ }
+ dev_info(&pf->pdev->dev, "vsi seid %d\n", seid);
+ if (vsi->netdev) {
+ struct net_device *nd = vsi->netdev;
+
+ dev_info(&pf->pdev->dev, " netdev: name = %s, state = %lu, flags = 0x%08x\n",
+ nd->name, nd->state, nd->flags);
+ dev_info(&pf->pdev->dev, " features = 0x%08lx\n",
+ (unsigned long int)nd->features);
+ dev_info(&pf->pdev->dev, " hw_features = 0x%08lx\n",
+ (unsigned long int)nd->hw_features);
+ dev_info(&pf->pdev->dev, " vlan_features = 0x%08lx\n",
+ (unsigned long int)nd->vlan_features);
+ }
+ dev_info(&pf->pdev->dev,
+ " flags = 0x%08lx, netdev_registered = %i, current_netdev_flags = 0x%04x\n",
+ vsi->flags, vsi->netdev_registered, vsi->current_netdev_flags);
+ for (i = 0; i < BITS_TO_LONGS(__I40E_VSI_STATE_SIZE__); i++)
+ dev_info(&pf->pdev->dev,
+ " state[%d] = %08lx\n",
+ i, vsi->state[i]);
+ if (vsi == pf->vsi[pf->lan_vsi])
+ dev_info(&pf->pdev->dev, " MAC address: %pM SAN MAC: %pM Port MAC: %pM\n",
+ pf->hw.mac.addr,
+ pf->hw.mac.san_addr,
+ pf->hw.mac.port_addr);
+ hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) {
+ dev_info(&pf->pdev->dev,
+ " mac_filter_hash: %pM vid=%d, state %s\n",
+ f->macaddr, f->vlan,
+ i40e_filter_state_string[f->state]);
+ }
+ dev_info(&pf->pdev->dev, " active_filters %u, promisc_threshold %u, overflow promisc %s\n",
+ vsi->active_filters, vsi->promisc_threshold,
+ (test_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state) ?
+ "ON" : "OFF"));
+ nstat = i40e_get_vsi_stats_struct(vsi);
+ dev_info(&pf->pdev->dev,
+ " net_stats: rx_packets = %lu, rx_bytes = %lu, rx_errors = %lu, rx_dropped = %lu\n",
+ (unsigned long int)nstat->rx_packets,
+ (unsigned long int)nstat->rx_bytes,
+ (unsigned long int)nstat->rx_errors,
+ (unsigned long int)nstat->rx_dropped);
+ dev_info(&pf->pdev->dev,
+ " net_stats: tx_packets = %lu, tx_bytes = %lu, tx_errors = %lu, tx_dropped = %lu\n",
+ (unsigned long int)nstat->tx_packets,
+ (unsigned long int)nstat->tx_bytes,
+ (unsigned long int)nstat->tx_errors,
+ (unsigned long int)nstat->tx_dropped);
+ dev_info(&pf->pdev->dev,
+ " net_stats: multicast = %lu, collisions = %lu\n",
+ (unsigned long int)nstat->multicast,
+ (unsigned long int)nstat->collisions);
+ dev_info(&pf->pdev->dev,
+ " net_stats: rx_length_errors = %lu, rx_over_errors = %lu, rx_crc_errors = %lu\n",
+ (unsigned long int)nstat->rx_length_errors,
+ (unsigned long int)nstat->rx_over_errors,
+ (unsigned long int)nstat->rx_crc_errors);
+ dev_info(&pf->pdev->dev,
+ " net_stats: rx_frame_errors = %lu, rx_fifo_errors = %lu, rx_missed_errors = %lu\n",
+ (unsigned long int)nstat->rx_frame_errors,
+ (unsigned long int)nstat->rx_fifo_errors,
+ (unsigned long int)nstat->rx_missed_errors);
+ dev_info(&pf->pdev->dev,
+ " net_stats: tx_aborted_errors = %lu, tx_carrier_errors = %lu, tx_fifo_errors = %lu\n",
+ (unsigned long int)nstat->tx_aborted_errors,
+ (unsigned long int)nstat->tx_carrier_errors,
+ (unsigned long int)nstat->tx_fifo_errors);
+ dev_info(&pf->pdev->dev,
+ " net_stats: tx_heartbeat_errors = %lu, tx_window_errors = %lu\n",
+ (unsigned long int)nstat->tx_heartbeat_errors,
+ (unsigned long int)nstat->tx_window_errors);
+ dev_info(&pf->pdev->dev,
+ " net_stats: rx_compressed = %lu, tx_compressed = %lu\n",
+ (unsigned long int)nstat->rx_compressed,
+ (unsigned long int)nstat->tx_compressed);
+ dev_info(&pf->pdev->dev,
+ " net_stats_offsets: rx_packets = %lu, rx_bytes = %lu, rx_errors = %lu, rx_dropped = %lu\n",
+ (unsigned long int)vsi->net_stats_offsets.rx_packets,
+ (unsigned long int)vsi->net_stats_offsets.rx_bytes,
+ (unsigned long int)vsi->net_stats_offsets.rx_errors,
+ (unsigned long int)vsi->net_stats_offsets.rx_dropped);
+ dev_info(&pf->pdev->dev,
+ " net_stats_offsets: tx_packets = %lu, tx_bytes = %lu, tx_errors = %lu, tx_dropped = %lu\n",
+ (unsigned long int)vsi->net_stats_offsets.tx_packets,
+ (unsigned long int)vsi->net_stats_offsets.tx_bytes,
+ (unsigned long int)vsi->net_stats_offsets.tx_errors,
+ (unsigned long int)vsi->net_stats_offsets.tx_dropped);
+ dev_info(&pf->pdev->dev,
+ " net_stats_offsets: multicast = %lu, collisions = %lu\n",
+ (unsigned long int)vsi->net_stats_offsets.multicast,
+ (unsigned long int)vsi->net_stats_offsets.collisions);
+ dev_info(&pf->pdev->dev,
+ " net_stats_offsets: rx_length_errors = %lu, rx_over_errors = %lu, rx_crc_errors = %lu\n",
+ (unsigned long int)vsi->net_stats_offsets.rx_length_errors,
+ (unsigned long int)vsi->net_stats_offsets.rx_over_errors,
+ (unsigned long int)vsi->net_stats_offsets.rx_crc_errors);
+ dev_info(&pf->pdev->dev,
+ " net_stats_offsets: rx_frame_errors = %lu, rx_fifo_errors = %lu, rx_missed_errors = %lu\n",
+ (unsigned long int)vsi->net_stats_offsets.rx_frame_errors,
+ (unsigned long int)vsi->net_stats_offsets.rx_fifo_errors,
+ (unsigned long int)vsi->net_stats_offsets.rx_missed_errors);
+ dev_info(&pf->pdev->dev,
+ " net_stats_offsets: tx_aborted_errors = %lu, tx_carrier_errors = %lu, tx_fifo_errors = %lu\n",
+ (unsigned long int)vsi->net_stats_offsets.tx_aborted_errors,
+ (unsigned long int)vsi->net_stats_offsets.tx_carrier_errors,
+ (unsigned long int)vsi->net_stats_offsets.tx_fifo_errors);
+ dev_info(&pf->pdev->dev,
+ " net_stats_offsets: tx_heartbeat_errors = %lu, tx_window_errors = %lu\n",
+ (unsigned long int)vsi->net_stats_offsets.tx_heartbeat_errors,
+ (unsigned long int)vsi->net_stats_offsets.tx_window_errors);
+ dev_info(&pf->pdev->dev,
+ " net_stats_offsets: rx_compressed = %lu, tx_compressed = %lu\n",
+ (unsigned long int)vsi->net_stats_offsets.rx_compressed,
+ (unsigned long int)vsi->net_stats_offsets.tx_compressed);
+ dev_info(&pf->pdev->dev,
+ " tx_restart = %llu, tx_busy = %llu, rx_buf_failed = %llu, rx_page_failed = %llu\n",
+ vsi->tx_restart, vsi->tx_busy,
+ vsi->rx_buf_failed, vsi->rx_page_failed);
+ rcu_read_lock();
+ for (i = 0; i < vsi->num_queue_pairs; i++) {
+ struct i40e_ring *rx_ring = READ_ONCE(vsi->rx_rings[i]);
+
+ if (!rx_ring)
+ continue;
+
+ dev_info(&pf->pdev->dev,
+ " rx_rings[%i]: state = %lu, queue_index = %d, reg_idx = %d\n",
+ i, *rx_ring->state,
+ rx_ring->queue_index,
+ rx_ring->reg_idx);
+ dev_info(&pf->pdev->dev,
+ " rx_rings[%i]: rx_buf_len = %d\n",
+ i, rx_ring->rx_buf_len);
+ dev_info(&pf->pdev->dev,
+ " rx_rings[%i]: next_to_use = %d, next_to_clean = %d, ring_active = %i\n",
+ i,
+ rx_ring->next_to_use,
+ rx_ring->next_to_clean,
+ rx_ring->ring_active);
+ dev_info(&pf->pdev->dev,
+ " rx_rings[%i]: rx_stats: packets = %lld, bytes = %lld, non_eop_descs = %lld\n",
+ i, rx_ring->stats.packets,
+ rx_ring->stats.bytes,
+ rx_ring->rx_stats.non_eop_descs);
+ dev_info(&pf->pdev->dev,
+ " rx_rings[%i]: rx_stats: alloc_page_failed = %lld, alloc_buff_failed = %lld\n",
+ i,
+ rx_ring->rx_stats.alloc_page_failed,
+ rx_ring->rx_stats.alloc_buff_failed);
+ dev_info(&pf->pdev->dev,
+ " rx_rings[%i]: rx_stats: realloc_count = 0, page_reuse_count = %lld\n",
+ i,
+ rx_ring->rx_stats.page_reuse_count);
+ dev_info(&pf->pdev->dev,
+ " rx_rings[%i]: size = %i\n",
+ i, rx_ring->size);
+ dev_info(&pf->pdev->dev,
+ " rx_rings[%i]: itr_setting = %d (%s)\n",
+ i, rx_ring->itr_setting,
+ ITR_IS_DYNAMIC(rx_ring->itr_setting) ? "dynamic" : "fixed");
+ }
+ for (i = 0; i < vsi->num_queue_pairs; i++) {
+ struct i40e_ring *tx_ring = READ_ONCE(vsi->tx_rings[i]);
+
+ if (!tx_ring)
+ continue;
+
+ dev_info(&pf->pdev->dev,
+ " tx_rings[%i]: state = %lu, queue_index = %d, reg_idx = %d\n",
+ i, *tx_ring->state,
+ tx_ring->queue_index,
+ tx_ring->reg_idx);
+ dev_info(&pf->pdev->dev,
+ " tx_rings[%i]: next_to_use = %d, next_to_clean = %d, ring_active = %i\n",
+ i,
+ tx_ring->next_to_use,
+ tx_ring->next_to_clean,
+ tx_ring->ring_active);
+ dev_info(&pf->pdev->dev,
+ " tx_rings[%i]: tx_stats: packets = %lld, bytes = %lld, restart_queue = %lld\n",
+ i, tx_ring->stats.packets,
+ tx_ring->stats.bytes,
+ tx_ring->tx_stats.restart_queue);
+ dev_info(&pf->pdev->dev,
+ " tx_rings[%i]: tx_stats: tx_busy = %lld, tx_done_old = %lld, tx_stopped = %lld\n",
+ i,
+ tx_ring->tx_stats.tx_busy,
+ tx_ring->tx_stats.tx_done_old,
+ tx_ring->tx_stats.tx_stopped);
+ dev_info(&pf->pdev->dev,
+ " tx_rings[%i]: size = %i\n",
+ i, tx_ring->size);
+ dev_info(&pf->pdev->dev,
+ " tx_rings[%i]: DCB tc = %d\n",
+ i, tx_ring->dcb_tc);
+ dev_info(&pf->pdev->dev,
+ " tx_rings[%i]: itr_setting = %d (%s)\n",
+ i, tx_ring->itr_setting,
+ ITR_IS_DYNAMIC(tx_ring->itr_setting) ? "dynamic" : "fixed");
+ }
+ if (i40e_enabled_xdp_vsi(vsi)) {
+ for (i = 0; i < vsi->num_queue_pairs; i++) {
+ struct i40e_ring *xdp_ring = READ_ONCE(vsi->xdp_rings[i]);
+
+ if (!xdp_ring)
+ continue;
+
+ dev_info(&pf->pdev->dev,
+ " xdp_rings[%i]: state = %lu, queue_index = %d, reg_idx = %d\n",
+ i, *xdp_ring->state,
+ xdp_ring->queue_index,
+ xdp_ring->reg_idx);
+ dev_info(&pf->pdev->dev,
+ " xdp_rings[%i]: next_to_use = %d, next_to_clean = %d, ring_active = %i\n",
+ i,
+ xdp_ring->next_to_use,
+ xdp_ring->next_to_clean,
+ xdp_ring->ring_active);
+ dev_info(&pf->pdev->dev,
+ " xdp_rings[%i]: tx_stats: packets = %lld, bytes = %lld, restart_queue = %lld\n",
+ i, xdp_ring->stats.packets,
+ xdp_ring->stats.bytes,
+ xdp_ring->tx_stats.restart_queue);
+ dev_info(&pf->pdev->dev,
+ " xdp_rings[%i]: tx_stats: tx_busy = %lld, tx_done_old = %lld\n",
+ i,
+ xdp_ring->tx_stats.tx_busy,
+ xdp_ring->tx_stats.tx_done_old);
+ dev_info(&pf->pdev->dev,
+ " xdp_rings[%i]: size = %i\n",
+ i, xdp_ring->size);
+ dev_info(&pf->pdev->dev,
+ " xdp_rings[%i]: DCB tc = %d\n",
+ i, xdp_ring->dcb_tc);
+ dev_info(&pf->pdev->dev,
+ " xdp_rings[%i]: itr_setting = %d (%s)\n",
+ i, xdp_ring->itr_setting,
+ ITR_IS_DYNAMIC(xdp_ring->itr_setting) ?
+ "dynamic" : "fixed");
+ }
+ }
+ rcu_read_unlock();
+ dev_info(&pf->pdev->dev,
+ " work_limit = %d\n",
+ vsi->work_limit);
+ dev_info(&pf->pdev->dev,
+ " max_frame = %d, rx_buf_len = %d dtype = %d\n",
+ vsi->max_frame, vsi->rx_buf_len, 0);
+ dev_info(&pf->pdev->dev,
+ " num_q_vectors = %i, base_vector = %i\n",
+ vsi->num_q_vectors, vsi->base_vector);
+ dev_info(&pf->pdev->dev,
+ " seid = %d, id = %d, uplink_seid = %d\n",
+ vsi->seid, vsi->id, vsi->uplink_seid);
+ dev_info(&pf->pdev->dev,
+ " base_queue = %d, num_queue_pairs = %d, num_tx_desc = %d, num_rx_desc = %d\n",
+ vsi->base_queue, vsi->num_queue_pairs, vsi->num_tx_desc,
+ vsi->num_rx_desc);
+ dev_info(&pf->pdev->dev, " type = %i\n", vsi->type);
+ if (vsi->type == I40E_VSI_SRIOV)
+ dev_info(&pf->pdev->dev, " VF ID = %i\n", vsi->vf_id);
+ dev_info(&pf->pdev->dev,
+ " info: valid_sections = 0x%04x, switch_id = 0x%04x\n",
+ vsi->info.valid_sections, vsi->info.switch_id);
+ dev_info(&pf->pdev->dev,
+ " info: sw_reserved[] = 0x%02x 0x%02x\n",
+ vsi->info.sw_reserved[0], vsi->info.sw_reserved[1]);
+ dev_info(&pf->pdev->dev,
+ " info: sec_flags = 0x%02x, sec_reserved = 0x%02x\n",
+ vsi->info.sec_flags, vsi->info.sec_reserved);
+ dev_info(&pf->pdev->dev,
+ " info: pvid = 0x%04x, fcoe_pvid = 0x%04x, port_vlan_flags = 0x%02x\n",
+ vsi->info.pvid, vsi->info.fcoe_pvid,
+ vsi->info.port_vlan_flags);
+ dev_info(&pf->pdev->dev,
+ " info: pvlan_reserved[] = 0x%02x 0x%02x 0x%02x\n",
+ vsi->info.pvlan_reserved[0], vsi->info.pvlan_reserved[1],
+ vsi->info.pvlan_reserved[2]);
+ dev_info(&pf->pdev->dev,
+ " info: ingress_table = 0x%08x, egress_table = 0x%08x\n",
+ vsi->info.ingress_table, vsi->info.egress_table);
+ dev_info(&pf->pdev->dev,
+ " info: cas_pv_stag = 0x%04x, cas_pv_flags= 0x%02x, cas_pv_reserved = 0x%02x\n",
+ vsi->info.cas_pv_tag, vsi->info.cas_pv_flags,
+ vsi->info.cas_pv_reserved);
+ dev_info(&pf->pdev->dev,
+ " info: queue_mapping[0..7 ] = 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x\n",
+ vsi->info.queue_mapping[0], vsi->info.queue_mapping[1],
+ vsi->info.queue_mapping[2], vsi->info.queue_mapping[3],
+ vsi->info.queue_mapping[4], vsi->info.queue_mapping[5],
+ vsi->info.queue_mapping[6], vsi->info.queue_mapping[7]);
+ dev_info(&pf->pdev->dev,
+ " info: queue_mapping[8..15] = 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x\n",
+ vsi->info.queue_mapping[8], vsi->info.queue_mapping[9],
+ vsi->info.queue_mapping[10], vsi->info.queue_mapping[11],
+ vsi->info.queue_mapping[12], vsi->info.queue_mapping[13],
+ vsi->info.queue_mapping[14], vsi->info.queue_mapping[15]);
+ dev_info(&pf->pdev->dev,
+ " info: tc_mapping[] = 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x\n",
+ vsi->info.tc_mapping[0], vsi->info.tc_mapping[1],
+ vsi->info.tc_mapping[2], vsi->info.tc_mapping[3],
+ vsi->info.tc_mapping[4], vsi->info.tc_mapping[5],
+ vsi->info.tc_mapping[6], vsi->info.tc_mapping[7]);
+ dev_info(&pf->pdev->dev,
+ " info: queueing_opt_flags = 0x%02x queueing_opt_reserved[0..2] = 0x%02x 0x%02x 0x%02x\n",
+ vsi->info.queueing_opt_flags,
+ vsi->info.queueing_opt_reserved[0],
+ vsi->info.queueing_opt_reserved[1],
+ vsi->info.queueing_opt_reserved[2]);
+ dev_info(&pf->pdev->dev,
+ " info: up_enable_bits = 0x%02x\n",
+ vsi->info.up_enable_bits);
+ dev_info(&pf->pdev->dev,
+ " info: sched_reserved = 0x%02x, outer_up_table = 0x%04x\n",
+ vsi->info.sched_reserved, vsi->info.outer_up_table);
+ dev_info(&pf->pdev->dev,
+ " info: cmd_reserved[] = 0x%02x 0x%02x 0x%02x 0x0%02x 0x%02x 0x%02x 0x%02x 0x0%02x\n",
+ vsi->info.cmd_reserved[0], vsi->info.cmd_reserved[1],
+ vsi->info.cmd_reserved[2], vsi->info.cmd_reserved[3],
+ vsi->info.cmd_reserved[4], vsi->info.cmd_reserved[5],
+ vsi->info.cmd_reserved[6], vsi->info.cmd_reserved[7]);
+ dev_info(&pf->pdev->dev,
+ " info: qs_handle[] = 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x\n",
+ vsi->info.qs_handle[0], vsi->info.qs_handle[1],
+ vsi->info.qs_handle[2], vsi->info.qs_handle[3],
+ vsi->info.qs_handle[4], vsi->info.qs_handle[5],
+ vsi->info.qs_handle[6], vsi->info.qs_handle[7]);
+ dev_info(&pf->pdev->dev,
+ " info: stat_counter_idx = 0x%04x, sched_id = 0x%04x\n",
+ vsi->info.stat_counter_idx, vsi->info.sched_id);
+ dev_info(&pf->pdev->dev,
+ " info: resp_reserved[] = 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x\n",
+ vsi->info.resp_reserved[0], vsi->info.resp_reserved[1],
+ vsi->info.resp_reserved[2], vsi->info.resp_reserved[3],
+ vsi->info.resp_reserved[4], vsi->info.resp_reserved[5],
+ vsi->info.resp_reserved[6], vsi->info.resp_reserved[7],
+ vsi->info.resp_reserved[8], vsi->info.resp_reserved[9],
+ vsi->info.resp_reserved[10], vsi->info.resp_reserved[11]);
+ dev_info(&pf->pdev->dev, " idx = %d\n", vsi->idx);
+ dev_info(&pf->pdev->dev,
+ " tc_config: numtc = %d, enabled_tc = 0x%x\n",
+ vsi->tc_config.numtc, vsi->tc_config.enabled_tc);
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ dev_info(&pf->pdev->dev,
+ " tc_config: tc = %d, qoffset = %d, qcount = %d, netdev_tc = %d\n",
+ i, vsi->tc_config.tc_info[i].qoffset,
+ vsi->tc_config.tc_info[i].qcount,
+ vsi->tc_config.tc_info[i].netdev_tc);
+ }
+ dev_info(&pf->pdev->dev,
+ " bw: bw_limit = %d, bw_max_quanta = %d\n",
+ vsi->bw_limit, vsi->bw_max_quanta);
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ dev_info(&pf->pdev->dev,
+ " bw[%d]: ets_share_credits = %d, ets_limit_credits = %d, max_quanta = %d\n",
+ i, vsi->bw_ets_share_credits[i],
+ vsi->bw_ets_limit_credits[i],
+ vsi->bw_ets_max_quanta[i]);
+ }
+}
+
+/**
+ * i40e_dbg_dump_aq_desc - handles dump aq_desc write into command datum
+ * @pf: the i40e_pf created in command write
+ **/
+static void i40e_dbg_dump_aq_desc(struct i40e_pf *pf)
+{
+ struct i40e_adminq_ring *ring;
+ struct i40e_hw *hw = &pf->hw;
+ char hdr[32];
+ int i;
+
+ snprintf(hdr, sizeof(hdr), "%s %s: ",
+ dev_driver_string(&pf->pdev->dev),
+ dev_name(&pf->pdev->dev));
+
+ /* first the send (command) ring, then the receive (event) ring */
+ dev_info(&pf->pdev->dev, "AdminQ Tx Ring\n");
+ ring = &(hw->aq.asq);
+ for (i = 0; i < ring->count; i++) {
+ struct i40e_aq_desc *d = I40E_ADMINQ_DESC(*ring, i);
+
+ dev_info(&pf->pdev->dev,
+ " at[%02d] flags=0x%04x op=0x%04x dlen=0x%04x ret=0x%04x cookie_h=0x%08x cookie_l=0x%08x\n",
+ i, d->flags, d->opcode, d->datalen, d->retval,
+ d->cookie_high, d->cookie_low);
+ print_hex_dump(KERN_INFO, hdr, DUMP_PREFIX_NONE,
+ 16, 1, d->params.raw, 16, 0);
+ }
+
+ dev_info(&pf->pdev->dev, "AdminQ Rx Ring\n");
+ ring = &(hw->aq.arq);
+ for (i = 0; i < ring->count; i++) {
+ struct i40e_aq_desc *d = I40E_ADMINQ_DESC(*ring, i);
+
+ dev_info(&pf->pdev->dev,
+ " ar[%02d] flags=0x%04x op=0x%04x dlen=0x%04x ret=0x%04x cookie_h=0x%08x cookie_l=0x%08x\n",
+ i, d->flags, d->opcode, d->datalen, d->retval,
+ d->cookie_high, d->cookie_low);
+ print_hex_dump(KERN_INFO, hdr, DUMP_PREFIX_NONE,
+ 16, 1, d->params.raw, 16, 0);
+ }
+}
+
+/**
+ * i40e_dbg_dump_desc - handles dump desc write into command datum
+ * @cnt: number of arguments that the user supplied
+ * @vsi_seid: vsi id entered by user
+ * @ring_id: ring id entered by user
+ * @desc_n: descriptor number entered by user
+ * @pf: the i40e_pf created in command write
+ * @type: enum describing whether ring is RX, TX or XDP
+ **/
+static void i40e_dbg_dump_desc(int cnt, int vsi_seid, int ring_id, int desc_n,
+ struct i40e_pf *pf, enum ring_type type)
+{
+ bool is_rx_ring = type == RING_TYPE_RX;
+ struct i40e_tx_desc *txd;
+ union i40e_rx_desc *rxd;
+ struct i40e_ring *ring;
+ struct i40e_vsi *vsi;
+ int i;
+
+ vsi = i40e_dbg_find_vsi(pf, vsi_seid);
+ if (!vsi) {
+ dev_info(&pf->pdev->dev, "vsi %d not found\n", vsi_seid);
+ return;
+ }
+ if (vsi->type != I40E_VSI_MAIN &&
+ vsi->type != I40E_VSI_FDIR &&
+ vsi->type != I40E_VSI_VMDQ2) {
+ dev_info(&pf->pdev->dev,
+ "vsi %d type %d descriptor rings not available\n",
+ vsi_seid, vsi->type);
+ return;
+ }
+ if (type == RING_TYPE_XDP && !i40e_enabled_xdp_vsi(vsi)) {
+ dev_info(&pf->pdev->dev, "XDP not enabled on VSI %d\n", vsi_seid);
+ return;
+ }
+ if (ring_id >= vsi->num_queue_pairs || ring_id < 0) {
+ dev_info(&pf->pdev->dev, "ring %d not found\n", ring_id);
+ return;
+ }
+ if (!vsi->tx_rings || !vsi->tx_rings[0]->desc) {
+ dev_info(&pf->pdev->dev,
+ "descriptor rings have not been allocated for vsi %d\n",
+ vsi_seid);
+ return;
+ }
+
+ switch (type) {
+ case RING_TYPE_RX:
+ ring = kmemdup(vsi->rx_rings[ring_id], sizeof(*ring), GFP_KERNEL);
+ break;
+ case RING_TYPE_TX:
+ ring = kmemdup(vsi->tx_rings[ring_id], sizeof(*ring), GFP_KERNEL);
+ break;
+ case RING_TYPE_XDP:
+ ring = kmemdup(vsi->xdp_rings[ring_id], sizeof(*ring), GFP_KERNEL);
+ break;
+ default:
+ ring = NULL;
+ break;
+ }
+ if (!ring)
+ return;
+
+ if (cnt == 2) {
+ switch (type) {
+ case RING_TYPE_RX:
+ dev_info(&pf->pdev->dev, "VSI = %02i Rx ring = %02i\n", vsi_seid, ring_id);
+ break;
+ case RING_TYPE_TX:
+ dev_info(&pf->pdev->dev, "VSI = %02i Tx ring = %02i\n", vsi_seid, ring_id);
+ break;
+ case RING_TYPE_XDP:
+ dev_info(&pf->pdev->dev, "VSI = %02i XDP ring = %02i\n", vsi_seid, ring_id);
+ break;
+ }
+ for (i = 0; i < ring->count; i++) {
+ if (!is_rx_ring) {
+ txd = I40E_TX_DESC(ring, i);
+ dev_info(&pf->pdev->dev,
+ " d[%03x] = 0x%016llx 0x%016llx\n",
+ i, txd->buffer_addr,
+ txd->cmd_type_offset_bsz);
+ } else {
+ rxd = I40E_RX_DESC(ring, i);
+ dev_info(&pf->pdev->dev,
+ " d[%03x] = 0x%016llx 0x%016llx\n",
+ i, rxd->read.pkt_addr,
+ rxd->read.hdr_addr);
+ }
+ }
+ } else if (cnt == 3) {
+ if (desc_n >= ring->count || desc_n < 0) {
+ dev_info(&pf->pdev->dev,
+ "descriptor %d not found\n", desc_n);
+ goto out;
+ }
+ if (!is_rx_ring) {
+ txd = I40E_TX_DESC(ring, desc_n);
+ dev_info(&pf->pdev->dev,
+ "vsi = %02i tx ring = %02i d[%03x] = 0x%016llx 0x%016llx\n",
+ vsi_seid, ring_id, desc_n,
+ txd->buffer_addr, txd->cmd_type_offset_bsz);
+ } else {
+ rxd = I40E_RX_DESC(ring, desc_n);
+ dev_info(&pf->pdev->dev,
+ "vsi = %02i rx ring = %02i d[%03x] = 0x%016llx 0x%016llx\n",
+ vsi_seid, ring_id, desc_n,
+ rxd->read.pkt_addr, rxd->read.hdr_addr);
+ }
+ } else {
+ dev_info(&pf->pdev->dev, "dump desc rx/tx/xdp <vsi_seid> <ring_id> [<desc_n>]\n");
+ }
+
+out:
+ kfree(ring);
+}
+
+/**
+ * i40e_dbg_dump_vsi_no_seid - handles dump vsi write into command datum
+ * @pf: the i40e_pf created in command write
+ **/
+static void i40e_dbg_dump_vsi_no_seid(struct i40e_pf *pf)
+{
+ int i;
+
+ for (i = 0; i < pf->num_alloc_vsi; i++)
+ if (pf->vsi[i])
+ dev_info(&pf->pdev->dev, "dump vsi[%d]: %d\n",
+ i, pf->vsi[i]->seid);
+}
+
+/**
+ * i40e_dbg_dump_eth_stats - handles dump stats write into command datum
+ * @pf: the i40e_pf created in command write
+ * @estats: the eth stats structure to be dumped
+ **/
+static void i40e_dbg_dump_eth_stats(struct i40e_pf *pf,
+ struct i40e_eth_stats *estats)
+{
+ dev_info(&pf->pdev->dev, " ethstats:\n");
+ dev_info(&pf->pdev->dev,
+ " rx_bytes = \t%lld \trx_unicast = \t\t%lld \trx_multicast = \t%lld\n",
+ estats->rx_bytes, estats->rx_unicast, estats->rx_multicast);
+ dev_info(&pf->pdev->dev,
+ " rx_broadcast = \t%lld \trx_discards = \t\t%lld\n",
+ estats->rx_broadcast, estats->rx_discards);
+ dev_info(&pf->pdev->dev,
+ " rx_unknown_protocol = \t%lld \ttx_bytes = \t%lld\n",
+ estats->rx_unknown_protocol, estats->tx_bytes);
+ dev_info(&pf->pdev->dev,
+ " tx_unicast = \t%lld \ttx_multicast = \t\t%lld \ttx_broadcast = \t%lld\n",
+ estats->tx_unicast, estats->tx_multicast, estats->tx_broadcast);
+ dev_info(&pf->pdev->dev,
+ " tx_discards = \t%lld \ttx_errors = \t\t%lld\n",
+ estats->tx_discards, estats->tx_errors);
+}
+
+/**
+ * i40e_dbg_dump_veb_seid - handles dump stats of a single given veb
+ * @pf: the i40e_pf created in command write
+ * @seid: the seid the user put in
+ **/
+static void i40e_dbg_dump_veb_seid(struct i40e_pf *pf, int seid)
+{
+ struct i40e_veb *veb;
+
+ veb = i40e_dbg_find_veb(pf, seid);
+ if (!veb) {
+ dev_info(&pf->pdev->dev, "can't find veb %d\n", seid);
+ return;
+ }
+ dev_info(&pf->pdev->dev,
+ "veb idx=%d,%d stats_ic=%d seid=%d uplink=%d mode=%s\n",
+ veb->idx, veb->veb_idx, veb->stats_idx, veb->seid,
+ veb->uplink_seid,
+ veb->bridge_mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
+ i40e_dbg_dump_eth_stats(pf, &veb->stats);
+}
+
+/**
+ * i40e_dbg_dump_veb_all - dumps all known veb's stats
+ * @pf: the i40e_pf created in command write
+ **/
+static void i40e_dbg_dump_veb_all(struct i40e_pf *pf)
+{
+ struct i40e_veb *veb;
+ int i;
+
+ for (i = 0; i < I40E_MAX_VEB; i++) {
+ veb = pf->veb[i];
+ if (veb)
+ i40e_dbg_dump_veb_seid(pf, veb->seid);
+ }
+}
+
+/**
+ * i40e_dbg_dump_vf - dump VF info
+ * @pf: the i40e_pf created in command write
+ * @vf_id: the vf_id from the user
+ **/
+static void i40e_dbg_dump_vf(struct i40e_pf *pf, int vf_id)
+{
+ struct i40e_vf *vf;
+ struct i40e_vsi *vsi;
+
+ if (!pf->num_alloc_vfs) {
+ dev_info(&pf->pdev->dev, "no VFs allocated\n");
+ } else if ((vf_id >= 0) && (vf_id < pf->num_alloc_vfs)) {
+ vf = &pf->vf[vf_id];
+ vsi = pf->vsi[vf->lan_vsi_idx];
+ dev_info(&pf->pdev->dev, "vf %2d: VSI id=%d, seid=%d, qps=%d\n",
+ vf_id, vf->lan_vsi_id, vsi->seid, vf->num_queue_pairs);
+ dev_info(&pf->pdev->dev, " num MDD=%lld\n",
+ vf->num_mdd_events);
+ } else {
+ dev_info(&pf->pdev->dev, "invalid VF id %d\n", vf_id);
+ }
+}
+
+/**
+ * i40e_dbg_dump_vf_all - dump VF info for all VFs
+ * @pf: the i40e_pf created in command write
+ **/
+static void i40e_dbg_dump_vf_all(struct i40e_pf *pf)
+{
+ int i;
+
+ if (!pf->num_alloc_vfs)
+ dev_info(&pf->pdev->dev, "no VFs enabled!\n");
+ else
+ for (i = 0; i < pf->num_alloc_vfs; i++)
+ i40e_dbg_dump_vf(pf, i);
+}
+
+/**
+ * i40e_dbg_command_write - write into command datum
+ * @filp: the opened file
+ * @buffer: where to find the user's data
+ * @count: the length of the user's data
+ * @ppos: file position offset
+ **/
+static ssize_t i40e_dbg_command_write(struct file *filp,
+ const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct i40e_pf *pf = filp->private_data;
+ char *cmd_buf, *cmd_buf_tmp;
+ int bytes_not_copied;
+ struct i40e_vsi *vsi;
+ int vsi_seid;
+ int veb_seid;
+ int vf_id;
+ int cnt;
+
+ /* don't allow partial writes */
+ if (*ppos != 0)
+ return 0;
+
+ cmd_buf = kzalloc(count + 1, GFP_KERNEL);
+ if (!cmd_buf)
+ return count;
+ bytes_not_copied = copy_from_user(cmd_buf, buffer, count);
+ if (bytes_not_copied) {
+ kfree(cmd_buf);
+ return -EFAULT;
+ }
+ cmd_buf[count] = '\0';
+
+ cmd_buf_tmp = strchr(cmd_buf, '\n');
+ if (cmd_buf_tmp) {
+ *cmd_buf_tmp = '\0';
+ count = cmd_buf_tmp - cmd_buf + 1;
+ }
+
+ if (strncmp(cmd_buf, "add vsi", 7) == 0) {
+ vsi_seid = -1;
+ cnt = sscanf(&cmd_buf[7], "%i", &vsi_seid);
+ if (cnt == 0) {
+ /* default to PF VSI */
+ vsi_seid = pf->vsi[pf->lan_vsi]->seid;
+ } else if (vsi_seid < 0) {
+ dev_info(&pf->pdev->dev, "add VSI %d: bad vsi seid\n",
+ vsi_seid);
+ goto command_write_done;
+ }
+
+ /* By default we are in VEPA mode, if this is the first VF/VMDq
+ * VSI to be added switch to VEB mode.
+ */
+ if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) {
+ pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
+ i40e_do_reset_safe(pf, I40E_PF_RESET_FLAG);
+ }
+
+ vsi = i40e_vsi_setup(pf, I40E_VSI_VMDQ2, vsi_seid, 0);
+ if (vsi)
+ dev_info(&pf->pdev->dev, "added VSI %d to relay %d\n",
+ vsi->seid, vsi->uplink_seid);
+ else
+ dev_info(&pf->pdev->dev, "'%s' failed\n", cmd_buf);
+
+ } else if (strncmp(cmd_buf, "del vsi", 7) == 0) {
+ cnt = sscanf(&cmd_buf[7], "%i", &vsi_seid);
+ if (cnt != 1) {
+ dev_info(&pf->pdev->dev,
+ "del vsi: bad command string, cnt=%d\n",
+ cnt);
+ goto command_write_done;
+ }
+ vsi = i40e_dbg_find_vsi(pf, vsi_seid);
+ if (!vsi) {
+ dev_info(&pf->pdev->dev, "del VSI %d: seid not found\n",
+ vsi_seid);
+ goto command_write_done;
+ }
+
+ dev_info(&pf->pdev->dev, "deleting VSI %d\n", vsi_seid);
+ i40e_vsi_release(vsi);
+
+ } else if (strncmp(cmd_buf, "add relay", 9) == 0) {
+ struct i40e_veb *veb;
+ int uplink_seid, i;
+
+ cnt = sscanf(&cmd_buf[9], "%i %i", &uplink_seid, &vsi_seid);
+ if (cnt != 2) {
+ dev_info(&pf->pdev->dev,
+ "add relay: bad command string, cnt=%d\n",
+ cnt);
+ goto command_write_done;
+ } else if (uplink_seid < 0) {
+ dev_info(&pf->pdev->dev,
+ "add relay %d: bad uplink seid\n",
+ uplink_seid);
+ goto command_write_done;
+ }
+
+ vsi = i40e_dbg_find_vsi(pf, vsi_seid);
+ if (!vsi) {
+ dev_info(&pf->pdev->dev,
+ "add relay: VSI %d not found\n", vsi_seid);
+ goto command_write_done;
+ }
+
+ for (i = 0; i < I40E_MAX_VEB; i++)
+ if (pf->veb[i] && pf->veb[i]->seid == uplink_seid)
+ break;
+ if (i >= I40E_MAX_VEB && uplink_seid != 0 &&
+ uplink_seid != pf->mac_seid) {
+ dev_info(&pf->pdev->dev,
+ "add relay: relay uplink %d not found\n",
+ uplink_seid);
+ goto command_write_done;
+ }
+
+ veb = i40e_veb_setup(pf, 0, uplink_seid, vsi_seid,
+ vsi->tc_config.enabled_tc);
+ if (veb)
+ dev_info(&pf->pdev->dev, "added relay %d\n", veb->seid);
+ else
+ dev_info(&pf->pdev->dev, "add relay failed\n");
+
+ } else if (strncmp(cmd_buf, "del relay", 9) == 0) {
+ int i;
+ cnt = sscanf(&cmd_buf[9], "%i", &veb_seid);
+ if (cnt != 1) {
+ dev_info(&pf->pdev->dev,
+ "del relay: bad command string, cnt=%d\n",
+ cnt);
+ goto command_write_done;
+ } else if (veb_seid < 0) {
+ dev_info(&pf->pdev->dev,
+ "del relay %d: bad relay seid\n", veb_seid);
+ goto command_write_done;
+ }
+
+ /* find the veb */
+ for (i = 0; i < I40E_MAX_VEB; i++)
+ if (pf->veb[i] && pf->veb[i]->seid == veb_seid)
+ break;
+ if (i >= I40E_MAX_VEB) {
+ dev_info(&pf->pdev->dev,
+ "del relay: relay %d not found\n", veb_seid);
+ goto command_write_done;
+ }
+
+ dev_info(&pf->pdev->dev, "deleting relay %d\n", veb_seid);
+ i40e_veb_release(pf->veb[i]);
+ } else if (strncmp(cmd_buf, "add pvid", 8) == 0) {
+ unsigned int v;
+ int ret;
+ u16 vid;
+
+ cnt = sscanf(&cmd_buf[8], "%i %u", &vsi_seid, &v);
+ if (cnt != 2) {
+ dev_info(&pf->pdev->dev,
+ "add pvid: bad command string, cnt=%d\n", cnt);
+ goto command_write_done;
+ }
+
+ vsi = i40e_dbg_find_vsi(pf, vsi_seid);
+ if (!vsi) {
+ dev_info(&pf->pdev->dev, "add pvid: VSI %d not found\n",
+ vsi_seid);
+ goto command_write_done;
+ }
+
+ vid = v;
+ ret = i40e_vsi_add_pvid(vsi, vid);
+ if (!ret)
+ dev_info(&pf->pdev->dev,
+ "add pvid: %d added to VSI %d\n",
+ vid, vsi_seid);
+ else
+ dev_info(&pf->pdev->dev,
+ "add pvid: %d to VSI %d failed, ret=%d\n",
+ vid, vsi_seid, ret);
+
+ } else if (strncmp(cmd_buf, "del pvid", 8) == 0) {
+
+ cnt = sscanf(&cmd_buf[8], "%i", &vsi_seid);
+ if (cnt != 1) {
+ dev_info(&pf->pdev->dev,
+ "del pvid: bad command string, cnt=%d\n",
+ cnt);
+ goto command_write_done;
+ }
+
+ vsi = i40e_dbg_find_vsi(pf, vsi_seid);
+ if (!vsi) {
+ dev_info(&pf->pdev->dev,
+ "del pvid: VSI %d not found\n", vsi_seid);
+ goto command_write_done;
+ }
+
+ i40e_vsi_remove_pvid(vsi);
+ dev_info(&pf->pdev->dev,
+ "del pvid: removed from VSI %d\n", vsi_seid);
+
+ } else if (strncmp(cmd_buf, "dump", 4) == 0) {
+ if (strncmp(&cmd_buf[5], "switch", 6) == 0) {
+ i40e_fetch_switch_configuration(pf, true);
+ } else if (strncmp(&cmd_buf[5], "vsi", 3) == 0) {
+ cnt = sscanf(&cmd_buf[8], "%i", &vsi_seid);
+ if (cnt > 0)
+ i40e_dbg_dump_vsi_seid(pf, vsi_seid);
+ else
+ i40e_dbg_dump_vsi_no_seid(pf);
+ } else if (strncmp(&cmd_buf[5], "veb", 3) == 0) {
+ cnt = sscanf(&cmd_buf[8], "%i", &vsi_seid);
+ if (cnt > 0)
+ i40e_dbg_dump_veb_seid(pf, vsi_seid);
+ else
+ i40e_dbg_dump_veb_all(pf);
+ } else if (strncmp(&cmd_buf[5], "vf", 2) == 0) {
+ cnt = sscanf(&cmd_buf[7], "%i", &vf_id);
+ if (cnt > 0)
+ i40e_dbg_dump_vf(pf, vf_id);
+ else
+ i40e_dbg_dump_vf_all(pf);
+ } else if (strncmp(&cmd_buf[5], "desc", 4) == 0) {
+ int ring_id, desc_n;
+ if (strncmp(&cmd_buf[10], "rx", 2) == 0) {
+ cnt = sscanf(&cmd_buf[12], "%i %i %i",
+ &vsi_seid, &ring_id, &desc_n);
+ i40e_dbg_dump_desc(cnt, vsi_seid, ring_id,
+ desc_n, pf, RING_TYPE_RX);
+ } else if (strncmp(&cmd_buf[10], "tx", 2)
+ == 0) {
+ cnt = sscanf(&cmd_buf[12], "%i %i %i",
+ &vsi_seid, &ring_id, &desc_n);
+ i40e_dbg_dump_desc(cnt, vsi_seid, ring_id,
+ desc_n, pf, RING_TYPE_TX);
+ } else if (strncmp(&cmd_buf[10], "xdp", 3)
+ == 0) {
+ cnt = sscanf(&cmd_buf[13], "%i %i %i",
+ &vsi_seid, &ring_id, &desc_n);
+ i40e_dbg_dump_desc(cnt, vsi_seid, ring_id,
+ desc_n, pf, RING_TYPE_XDP);
+ } else if (strncmp(&cmd_buf[10], "aq", 2) == 0) {
+ i40e_dbg_dump_aq_desc(pf);
+ } else {
+ dev_info(&pf->pdev->dev,
+ "dump desc tx <vsi_seid> <ring_id> [<desc_n>]\n");
+ dev_info(&pf->pdev->dev,
+ "dump desc rx <vsi_seid> <ring_id> [<desc_n>]\n");
+ dev_info(&pf->pdev->dev,
+ "dump desc xdp <vsi_seid> <ring_id> [<desc_n>]\n");
+ dev_info(&pf->pdev->dev, "dump desc aq\n");
+ }
+ } else if (strncmp(&cmd_buf[5], "reset stats", 11) == 0) {
+ dev_info(&pf->pdev->dev,
+ "core reset count: %d\n", pf->corer_count);
+ dev_info(&pf->pdev->dev,
+ "global reset count: %d\n", pf->globr_count);
+ dev_info(&pf->pdev->dev,
+ "emp reset count: %d\n", pf->empr_count);
+ dev_info(&pf->pdev->dev,
+ "pf reset count: %d\n", pf->pfr_count);
+ dev_info(&pf->pdev->dev,
+ "pf tx sluggish count: %d\n",
+ pf->tx_sluggish_count);
+ } else if (strncmp(&cmd_buf[5], "port", 4) == 0) {
+ struct i40e_aqc_query_port_ets_config_resp *bw_data;
+ struct i40e_dcbx_config *cfg =
+ &pf->hw.local_dcbx_config;
+ struct i40e_dcbx_config *r_cfg =
+ &pf->hw.remote_dcbx_config;
+ int i, ret;
+ u16 switch_id;
+
+ bw_data = kzalloc(sizeof(
+ struct i40e_aqc_query_port_ets_config_resp),
+ GFP_KERNEL);
+ if (!bw_data) {
+ ret = -ENOMEM;
+ goto command_write_done;
+ }
+
+ vsi = pf->vsi[pf->lan_vsi];
+ switch_id =
+ le16_to_cpu(vsi->info.switch_id) &
+ I40E_AQ_VSI_SW_ID_MASK;
+
+ ret = i40e_aq_query_port_ets_config(&pf->hw,
+ switch_id,
+ bw_data, NULL);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "Query Port ETS Config AQ command failed =0x%x\n",
+ pf->hw.aq.asq_last_status);
+ kfree(bw_data);
+ bw_data = NULL;
+ goto command_write_done;
+ }
+ dev_info(&pf->pdev->dev,
+ "port bw: tc_valid=0x%x tc_strict_prio=0x%x, tc_bw_max=0x%04x,0x%04x\n",
+ bw_data->tc_valid_bits,
+ bw_data->tc_strict_priority_bits,
+ le16_to_cpu(bw_data->tc_bw_max[0]),
+ le16_to_cpu(bw_data->tc_bw_max[1]));
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ dev_info(&pf->pdev->dev, "port bw: tc_bw_share=%d tc_bw_limit=%d\n",
+ bw_data->tc_bw_share_credits[i],
+ le16_to_cpu(bw_data->tc_bw_limits[i]));
+ }
+
+ kfree(bw_data);
+ bw_data = NULL;
+
+ dev_info(&pf->pdev->dev,
+ "port dcbx_mode=%d\n", cfg->dcbx_mode);
+ dev_info(&pf->pdev->dev,
+ "port ets_cfg: willing=%d cbs=%d, maxtcs=%d\n",
+ cfg->etscfg.willing, cfg->etscfg.cbs,
+ cfg->etscfg.maxtcs);
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ dev_info(&pf->pdev->dev, "port ets_cfg: %d prio_tc=%d tcbw=%d tctsa=%d\n",
+ i, cfg->etscfg.prioritytable[i],
+ cfg->etscfg.tcbwtable[i],
+ cfg->etscfg.tsatable[i]);
+ }
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ dev_info(&pf->pdev->dev, "port ets_rec: %d prio_tc=%d tcbw=%d tctsa=%d\n",
+ i, cfg->etsrec.prioritytable[i],
+ cfg->etsrec.tcbwtable[i],
+ cfg->etsrec.tsatable[i]);
+ }
+ dev_info(&pf->pdev->dev,
+ "port pfc_cfg: willing=%d mbc=%d, pfccap=%d pfcenable=0x%x\n",
+ cfg->pfc.willing, cfg->pfc.mbc,
+ cfg->pfc.pfccap, cfg->pfc.pfcenable);
+ dev_info(&pf->pdev->dev,
+ "port app_table: num_apps=%d\n", cfg->numapps);
+ for (i = 0; i < cfg->numapps; i++) {
+ dev_info(&pf->pdev->dev, "port app_table: %d prio=%d selector=%d protocol=0x%x\n",
+ i, cfg->app[i].priority,
+ cfg->app[i].selector,
+ cfg->app[i].protocolid);
+ }
+ /* Peer TLV DCBX data */
+ dev_info(&pf->pdev->dev,
+ "remote port ets_cfg: willing=%d cbs=%d, maxtcs=%d\n",
+ r_cfg->etscfg.willing,
+ r_cfg->etscfg.cbs, r_cfg->etscfg.maxtcs);
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ dev_info(&pf->pdev->dev, "remote port ets_cfg: %d prio_tc=%d tcbw=%d tctsa=%d\n",
+ i, r_cfg->etscfg.prioritytable[i],
+ r_cfg->etscfg.tcbwtable[i],
+ r_cfg->etscfg.tsatable[i]);
+ }
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ dev_info(&pf->pdev->dev, "remote port ets_rec: %d prio_tc=%d tcbw=%d tctsa=%d\n",
+ i, r_cfg->etsrec.prioritytable[i],
+ r_cfg->etsrec.tcbwtable[i],
+ r_cfg->etsrec.tsatable[i]);
+ }
+ dev_info(&pf->pdev->dev,
+ "remote port pfc_cfg: willing=%d mbc=%d, pfccap=%d pfcenable=0x%x\n",
+ r_cfg->pfc.willing,
+ r_cfg->pfc.mbc,
+ r_cfg->pfc.pfccap,
+ r_cfg->pfc.pfcenable);
+ dev_info(&pf->pdev->dev,
+ "remote port app_table: num_apps=%d\n",
+ r_cfg->numapps);
+ for (i = 0; i < r_cfg->numapps; i++) {
+ dev_info(&pf->pdev->dev, "remote port app_table: %d prio=%d selector=%d protocol=0x%x\n",
+ i, r_cfg->app[i].priority,
+ r_cfg->app[i].selector,
+ r_cfg->app[i].protocolid);
+ }
+ } else if (strncmp(&cmd_buf[5], "debug fwdata", 12) == 0) {
+ int cluster_id, table_id;
+ int index, ret;
+ u16 buff_len = 4096;
+ u32 next_index;
+ u8 next_table;
+ u8 *buff;
+ u16 rlen;
+
+ cnt = sscanf(&cmd_buf[18], "%i %i %i",
+ &cluster_id, &table_id, &index);
+ if (cnt != 3) {
+ dev_info(&pf->pdev->dev,
+ "dump debug fwdata <cluster_id> <table_id> <index>\n");
+ goto command_write_done;
+ }
+
+ dev_info(&pf->pdev->dev,
+ "AQ debug dump fwdata params %x %x %x %x\n",
+ cluster_id, table_id, index, buff_len);
+ buff = kzalloc(buff_len, GFP_KERNEL);
+ if (!buff)
+ goto command_write_done;
+
+ ret = i40e_aq_debug_dump(&pf->hw, cluster_id, table_id,
+ index, buff_len, buff, &rlen,
+ &next_table, &next_index,
+ NULL);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "debug dump fwdata AQ Failed %d 0x%x\n",
+ ret, pf->hw.aq.asq_last_status);
+ kfree(buff);
+ buff = NULL;
+ goto command_write_done;
+ }
+ dev_info(&pf->pdev->dev,
+ "AQ debug dump fwdata rlen=0x%x next_table=0x%x next_index=0x%x\n",
+ rlen, next_table, next_index);
+ print_hex_dump(KERN_INFO, "AQ buffer WB: ",
+ DUMP_PREFIX_OFFSET, 16, 1,
+ buff, rlen, true);
+ kfree(buff);
+ buff = NULL;
+ } else {
+ dev_info(&pf->pdev->dev,
+ "dump desc tx <vsi_seid> <ring_id> [<desc_n>], dump desc rx <vsi_seid> <ring_id> [<desc_n>], dump desc xdp <vsi_seid> <ring_id> [<desc_n>],\n");
+ dev_info(&pf->pdev->dev, "dump switch\n");
+ dev_info(&pf->pdev->dev, "dump vsi [seid]\n");
+ dev_info(&pf->pdev->dev, "dump reset stats\n");
+ dev_info(&pf->pdev->dev, "dump port\n");
+ dev_info(&pf->pdev->dev, "dump vf [vf_id]\n");
+ dev_info(&pf->pdev->dev,
+ "dump debug fwdata <cluster_id> <table_id> <index>\n");
+ }
+ } else if (strncmp(cmd_buf, "pfr", 3) == 0) {
+ dev_info(&pf->pdev->dev, "debugfs: forcing PFR\n");
+ i40e_do_reset_safe(pf, BIT(__I40E_PF_RESET_REQUESTED));
+
+ } else if (strncmp(cmd_buf, "corer", 5) == 0) {
+ dev_info(&pf->pdev->dev, "debugfs: forcing CoreR\n");
+ i40e_do_reset_safe(pf, BIT(__I40E_CORE_RESET_REQUESTED));
+
+ } else if (strncmp(cmd_buf, "globr", 5) == 0) {
+ dev_info(&pf->pdev->dev, "debugfs: forcing GlobR\n");
+ i40e_do_reset_safe(pf, BIT(__I40E_GLOBAL_RESET_REQUESTED));
+
+ } else if (strncmp(cmd_buf, "read", 4) == 0) {
+ u32 address;
+ u32 value;
+
+ cnt = sscanf(&cmd_buf[4], "%i", &address);
+ if (cnt != 1) {
+ dev_info(&pf->pdev->dev, "read <reg>\n");
+ goto command_write_done;
+ }
+
+ /* check the range on address */
+ if (address > (pf->ioremap_len - sizeof(u32))) {
+ dev_info(&pf->pdev->dev, "read reg address 0x%08x too large, max=0x%08lx\n",
+ address, (unsigned long int)(pf->ioremap_len - sizeof(u32)));
+ goto command_write_done;
+ }
+
+ value = rd32(&pf->hw, address);
+ dev_info(&pf->pdev->dev, "read: 0x%08x = 0x%08x\n",
+ address, value);
+
+ } else if (strncmp(cmd_buf, "write", 5) == 0) {
+ u32 address, value;
+
+ cnt = sscanf(&cmd_buf[5], "%i %i", &address, &value);
+ if (cnt != 2) {
+ dev_info(&pf->pdev->dev, "write <reg> <value>\n");
+ goto command_write_done;
+ }
+
+ /* check the range on address */
+ if (address > (pf->ioremap_len - sizeof(u32))) {
+ dev_info(&pf->pdev->dev, "write reg address 0x%08x too large, max=0x%08lx\n",
+ address, (unsigned long int)(pf->ioremap_len - sizeof(u32)));
+ goto command_write_done;
+ }
+ wr32(&pf->hw, address, value);
+ value = rd32(&pf->hw, address);
+ dev_info(&pf->pdev->dev, "write: 0x%08x = 0x%08x\n",
+ address, value);
+ } else if (strncmp(cmd_buf, "clear_stats", 11) == 0) {
+ if (strncmp(&cmd_buf[12], "vsi", 3) == 0) {
+ cnt = sscanf(&cmd_buf[15], "%i", &vsi_seid);
+ if (cnt == 0) {
+ int i;
+
+ for (i = 0; i < pf->num_alloc_vsi; i++)
+ i40e_vsi_reset_stats(pf->vsi[i]);
+ dev_info(&pf->pdev->dev, "vsi clear stats called for all vsi's\n");
+ } else if (cnt == 1) {
+ vsi = i40e_dbg_find_vsi(pf, vsi_seid);
+ if (!vsi) {
+ dev_info(&pf->pdev->dev,
+ "clear_stats vsi: bad vsi %d\n",
+ vsi_seid);
+ goto command_write_done;
+ }
+ i40e_vsi_reset_stats(vsi);
+ dev_info(&pf->pdev->dev,
+ "vsi clear stats called for vsi %d\n",
+ vsi_seid);
+ } else {
+ dev_info(&pf->pdev->dev, "clear_stats vsi [seid]\n");
+ }
+ } else if (strncmp(&cmd_buf[12], "port", 4) == 0) {
+ if (pf->hw.partition_id == 1) {
+ i40e_pf_reset_stats(pf);
+ dev_info(&pf->pdev->dev, "port stats cleared\n");
+ } else {
+ dev_info(&pf->pdev->dev, "clear port stats not allowed on this port partition\n");
+ }
+ } else {
+ dev_info(&pf->pdev->dev, "clear_stats vsi [seid] or clear_stats port\n");
+ }
+ } else if (strncmp(cmd_buf, "send aq_cmd", 11) == 0) {
+ struct i40e_aq_desc *desc;
+ int ret;
+
+ desc = kzalloc(sizeof(struct i40e_aq_desc), GFP_KERNEL);
+ if (!desc)
+ goto command_write_done;
+ cnt = sscanf(&cmd_buf[11],
+ "%hi %hi %hi %hi %i %i %i %i %i %i",
+ &desc->flags,
+ &desc->opcode, &desc->datalen, &desc->retval,
+ &desc->cookie_high, &desc->cookie_low,
+ &desc->params.internal.param0,
+ &desc->params.internal.param1,
+ &desc->params.internal.param2,
+ &desc->params.internal.param3);
+ if (cnt != 10) {
+ dev_info(&pf->pdev->dev,
+ "send aq_cmd: bad command string, cnt=%d\n",
+ cnt);
+ kfree(desc);
+ desc = NULL;
+ goto command_write_done;
+ }
+ ret = i40e_asq_send_command(&pf->hw, desc, NULL, 0, NULL);
+ if (!ret) {
+ dev_info(&pf->pdev->dev, "AQ command sent Status : Success\n");
+ } else if (ret == I40E_ERR_ADMIN_QUEUE_ERROR) {
+ dev_info(&pf->pdev->dev,
+ "AQ command send failed Opcode %x AQ Error: %d\n",
+ desc->opcode, pf->hw.aq.asq_last_status);
+ } else {
+ dev_info(&pf->pdev->dev,
+ "AQ command send failed Opcode %x Status: %d\n",
+ desc->opcode, ret);
+ }
+ dev_info(&pf->pdev->dev,
+ "AQ desc WB 0x%04x 0x%04x 0x%04x 0x%04x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
+ desc->flags, desc->opcode, desc->datalen, desc->retval,
+ desc->cookie_high, desc->cookie_low,
+ desc->params.internal.param0,
+ desc->params.internal.param1,
+ desc->params.internal.param2,
+ desc->params.internal.param3);
+ kfree(desc);
+ desc = NULL;
+ } else if (strncmp(cmd_buf, "send indirect aq_cmd", 20) == 0) {
+ struct i40e_aq_desc *desc;
+ u16 buffer_len;
+ u8 *buff;
+ int ret;
+
+ desc = kzalloc(sizeof(struct i40e_aq_desc), GFP_KERNEL);
+ if (!desc)
+ goto command_write_done;
+ cnt = sscanf(&cmd_buf[20],
+ "%hi %hi %hi %hi %i %i %i %i %i %i %hi",
+ &desc->flags,
+ &desc->opcode, &desc->datalen, &desc->retval,
+ &desc->cookie_high, &desc->cookie_low,
+ &desc->params.internal.param0,
+ &desc->params.internal.param1,
+ &desc->params.internal.param2,
+ &desc->params.internal.param3,
+ &buffer_len);
+ if (cnt != 11) {
+ dev_info(&pf->pdev->dev,
+ "send indirect aq_cmd: bad command string, cnt=%d\n",
+ cnt);
+ kfree(desc);
+ desc = NULL;
+ goto command_write_done;
+ }
+ /* Just stub a buffer big enough in case user messed up */
+ if (buffer_len == 0)
+ buffer_len = 1280;
+
+ buff = kzalloc(buffer_len, GFP_KERNEL);
+ if (!buff) {
+ kfree(desc);
+ desc = NULL;
+ goto command_write_done;
+ }
+ desc->flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
+ ret = i40e_asq_send_command(&pf->hw, desc, buff,
+ buffer_len, NULL);
+ if (!ret) {
+ dev_info(&pf->pdev->dev, "AQ command sent Status : Success\n");
+ } else if (ret == I40E_ERR_ADMIN_QUEUE_ERROR) {
+ dev_info(&pf->pdev->dev,
+ "AQ command send failed Opcode %x AQ Error: %d\n",
+ desc->opcode, pf->hw.aq.asq_last_status);
+ } else {
+ dev_info(&pf->pdev->dev,
+ "AQ command send failed Opcode %x Status: %d\n",
+ desc->opcode, ret);
+ }
+ dev_info(&pf->pdev->dev,
+ "AQ desc WB 0x%04x 0x%04x 0x%04x 0x%04x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
+ desc->flags, desc->opcode, desc->datalen, desc->retval,
+ desc->cookie_high, desc->cookie_low,
+ desc->params.internal.param0,
+ desc->params.internal.param1,
+ desc->params.internal.param2,
+ desc->params.internal.param3);
+ print_hex_dump(KERN_INFO, "AQ buffer WB: ",
+ DUMP_PREFIX_OFFSET, 16, 1,
+ buff, buffer_len, true);
+ kfree(buff);
+ buff = NULL;
+ kfree(desc);
+ desc = NULL;
+ } else if (strncmp(cmd_buf, "fd current cnt", 14) == 0) {
+ dev_info(&pf->pdev->dev, "FD current total filter count for this interface: %d\n",
+ i40e_get_current_fd_count(pf));
+ } else if (strncmp(cmd_buf, "lldp", 4) == 0) {
+ if (strncmp(&cmd_buf[5], "stop", 4) == 0) {
+ int ret;
+
+ ret = i40e_aq_stop_lldp(&pf->hw, false, false, NULL);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "Stop LLDP AQ command failed =0x%x\n",
+ pf->hw.aq.asq_last_status);
+ goto command_write_done;
+ }
+ ret = i40e_aq_add_rem_control_packet_filter(&pf->hw,
+ pf->hw.mac.addr,
+ ETH_P_LLDP, 0,
+ pf->vsi[pf->lan_vsi]->seid,
+ 0, true, NULL, NULL);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "%s: Add Control Packet Filter AQ command failed =0x%x\n",
+ __func__, pf->hw.aq.asq_last_status);
+ goto command_write_done;
+ }
+#ifdef CONFIG_I40E_DCB
+ pf->dcbx_cap = DCB_CAP_DCBX_HOST |
+ DCB_CAP_DCBX_VER_IEEE;
+#endif /* CONFIG_I40E_DCB */
+ } else if (strncmp(&cmd_buf[5], "start", 5) == 0) {
+ int ret;
+
+ ret = i40e_aq_add_rem_control_packet_filter(&pf->hw,
+ pf->hw.mac.addr,
+ ETH_P_LLDP, 0,
+ pf->vsi[pf->lan_vsi]->seid,
+ 0, false, NULL, NULL);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "%s: Remove Control Packet Filter AQ command failed =0x%x\n",
+ __func__, pf->hw.aq.asq_last_status);
+ /* Continue and start FW LLDP anyways */
+ }
+
+ ret = i40e_aq_start_lldp(&pf->hw, false, NULL);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "Start LLDP AQ command failed =0x%x\n",
+ pf->hw.aq.asq_last_status);
+ goto command_write_done;
+ }
+#ifdef CONFIG_I40E_DCB
+ pf->dcbx_cap = DCB_CAP_DCBX_LLD_MANAGED |
+ DCB_CAP_DCBX_VER_IEEE;
+#endif /* CONFIG_I40E_DCB */
+ } else if (strncmp(&cmd_buf[5],
+ "get local", 9) == 0) {
+ u16 llen, rlen;
+ int ret;
+ u8 *buff;
+
+ buff = kzalloc(I40E_LLDPDU_SIZE, GFP_KERNEL);
+ if (!buff)
+ goto command_write_done;
+
+ ret = i40e_aq_get_lldp_mib(&pf->hw, 0,
+ I40E_AQ_LLDP_MIB_LOCAL,
+ buff, I40E_LLDPDU_SIZE,
+ &llen, &rlen, NULL);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "Get LLDP MIB (local) AQ command failed =0x%x\n",
+ pf->hw.aq.asq_last_status);
+ kfree(buff);
+ buff = NULL;
+ goto command_write_done;
+ }
+ dev_info(&pf->pdev->dev, "LLDP MIB (local)\n");
+ print_hex_dump(KERN_INFO, "LLDP MIB (local): ",
+ DUMP_PREFIX_OFFSET, 16, 1,
+ buff, I40E_LLDPDU_SIZE, true);
+ kfree(buff);
+ buff = NULL;
+ } else if (strncmp(&cmd_buf[5], "get remote", 10) == 0) {
+ u16 llen, rlen;
+ int ret;
+ u8 *buff;
+
+ buff = kzalloc(I40E_LLDPDU_SIZE, GFP_KERNEL);
+ if (!buff)
+ goto command_write_done;
+
+ ret = i40e_aq_get_lldp_mib(&pf->hw,
+ I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE,
+ I40E_AQ_LLDP_MIB_REMOTE,
+ buff, I40E_LLDPDU_SIZE,
+ &llen, &rlen, NULL);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "Get LLDP MIB (remote) AQ command failed =0x%x\n",
+ pf->hw.aq.asq_last_status);
+ kfree(buff);
+ buff = NULL;
+ goto command_write_done;
+ }
+ dev_info(&pf->pdev->dev, "LLDP MIB (remote)\n");
+ print_hex_dump(KERN_INFO, "LLDP MIB (remote): ",
+ DUMP_PREFIX_OFFSET, 16, 1,
+ buff, I40E_LLDPDU_SIZE, true);
+ kfree(buff);
+ buff = NULL;
+ } else if (strncmp(&cmd_buf[5], "event on", 8) == 0) {
+ int ret;
+
+ ret = i40e_aq_cfg_lldp_mib_change_event(&pf->hw,
+ true, NULL);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "Config LLDP MIB Change Event (on) AQ command failed =0x%x\n",
+ pf->hw.aq.asq_last_status);
+ goto command_write_done;
+ }
+ } else if (strncmp(&cmd_buf[5], "event off", 9) == 0) {
+ int ret;
+
+ ret = i40e_aq_cfg_lldp_mib_change_event(&pf->hw,
+ false, NULL);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "Config LLDP MIB Change Event (off) AQ command failed =0x%x\n",
+ pf->hw.aq.asq_last_status);
+ goto command_write_done;
+ }
+ }
+ } else if (strncmp(cmd_buf, "nvm read", 8) == 0) {
+ u16 buffer_len, bytes;
+ u16 module;
+ u32 offset;
+ u16 *buff;
+ int ret;
+
+ cnt = sscanf(&cmd_buf[8], "%hx %x %hx",
+ &module, &offset, &buffer_len);
+ if (cnt == 0) {
+ module = 0;
+ offset = 0;
+ buffer_len = 0;
+ } else if (cnt == 1) {
+ offset = 0;
+ buffer_len = 0;
+ } else if (cnt == 2) {
+ buffer_len = 0;
+ } else if (cnt > 3) {
+ dev_info(&pf->pdev->dev,
+ "nvm read: bad command string, cnt=%d\n", cnt);
+ goto command_write_done;
+ }
+
+ /* set the max length */
+ buffer_len = min_t(u16, buffer_len, I40E_MAX_AQ_BUF_SIZE/2);
+
+ bytes = 2 * buffer_len;
+
+ /* read at least 1k bytes, no more than 4kB */
+ bytes = clamp(bytes, (u16)1024, (u16)I40E_MAX_AQ_BUF_SIZE);
+ buff = kzalloc(bytes, GFP_KERNEL);
+ if (!buff)
+ goto command_write_done;
+
+ ret = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_READ);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "Failed Acquiring NVM resource for read err=%d status=0x%x\n",
+ ret, pf->hw.aq.asq_last_status);
+ kfree(buff);
+ goto command_write_done;
+ }
+
+ ret = i40e_aq_read_nvm(&pf->hw, module, (2 * offset),
+ bytes, (u8 *)buff, true, NULL);
+ i40e_release_nvm(&pf->hw);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "Read NVM AQ failed err=%d status=0x%x\n",
+ ret, pf->hw.aq.asq_last_status);
+ } else {
+ dev_info(&pf->pdev->dev,
+ "Read NVM module=0x%x offset=0x%x words=%d\n",
+ module, offset, buffer_len);
+ if (bytes)
+ print_hex_dump(KERN_INFO, "NVM Dump: ",
+ DUMP_PREFIX_OFFSET, 16, 2,
+ buff, bytes, true);
+ }
+ kfree(buff);
+ buff = NULL;
+ } else {
+ dev_info(&pf->pdev->dev, "unknown command '%s'\n", cmd_buf);
+ dev_info(&pf->pdev->dev, "available commands\n");
+ dev_info(&pf->pdev->dev, " add vsi [relay_seid]\n");
+ dev_info(&pf->pdev->dev, " del vsi [vsi_seid]\n");
+ dev_info(&pf->pdev->dev, " add relay <uplink_seid> <vsi_seid>\n");
+ dev_info(&pf->pdev->dev, " del relay <relay_seid>\n");
+ dev_info(&pf->pdev->dev, " add pvid <vsi_seid> <vid>\n");
+ dev_info(&pf->pdev->dev, " del pvid <vsi_seid>\n");
+ dev_info(&pf->pdev->dev, " dump switch\n");
+ dev_info(&pf->pdev->dev, " dump vsi [seid]\n");
+ dev_info(&pf->pdev->dev, " dump desc tx <vsi_seid> <ring_id> [<desc_n>]\n");
+ dev_info(&pf->pdev->dev, " dump desc rx <vsi_seid> <ring_id> [<desc_n>]\n");
+ dev_info(&pf->pdev->dev, " dump desc xdp <vsi_seid> <ring_id> [<desc_n>]\n");
+ dev_info(&pf->pdev->dev, " dump desc aq\n");
+ dev_info(&pf->pdev->dev, " dump reset stats\n");
+ dev_info(&pf->pdev->dev, " dump debug fwdata <cluster_id> <table_id> <index>\n");
+ dev_info(&pf->pdev->dev, " read <reg>\n");
+ dev_info(&pf->pdev->dev, " write <reg> <value>\n");
+ dev_info(&pf->pdev->dev, " clear_stats vsi [seid]\n");
+ dev_info(&pf->pdev->dev, " clear_stats port\n");
+ dev_info(&pf->pdev->dev, " pfr\n");
+ dev_info(&pf->pdev->dev, " corer\n");
+ dev_info(&pf->pdev->dev, " globr\n");
+ dev_info(&pf->pdev->dev, " send aq_cmd <flags> <opcode> <datalen> <retval> <cookie_h> <cookie_l> <param0> <param1> <param2> <param3>\n");
+ dev_info(&pf->pdev->dev, " send indirect aq_cmd <flags> <opcode> <datalen> <retval> <cookie_h> <cookie_l> <param0> <param1> <param2> <param3> <buffer_len>\n");
+ dev_info(&pf->pdev->dev, " fd current cnt");
+ dev_info(&pf->pdev->dev, " lldp start\n");
+ dev_info(&pf->pdev->dev, " lldp stop\n");
+ dev_info(&pf->pdev->dev, " lldp get local\n");
+ dev_info(&pf->pdev->dev, " lldp get remote\n");
+ dev_info(&pf->pdev->dev, " lldp event on\n");
+ dev_info(&pf->pdev->dev, " lldp event off\n");
+ dev_info(&pf->pdev->dev, " nvm read [module] [word_offset] [word_count]\n");
+ }
+
+command_write_done:
+ kfree(cmd_buf);
+ cmd_buf = NULL;
+ return count;
+}
+
+static const struct file_operations i40e_dbg_command_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = i40e_dbg_command_read,
+ .write = i40e_dbg_command_write,
+};
+
+/**************************************************************
+ * netdev_ops
+ * The netdev_ops entry in debugfs is for giving the driver commands
+ * to be executed from the netdev operations.
+ **************************************************************/
+static char i40e_dbg_netdev_ops_buf[256] = "";
+
+/**
+ * i40e_dbg_netdev_ops_read - read for netdev_ops datum
+ * @filp: the opened file
+ * @buffer: where to write the data for the user to read
+ * @count: the size of the user's buffer
+ * @ppos: file position offset
+ **/
+static ssize_t i40e_dbg_netdev_ops_read(struct file *filp, char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct i40e_pf *pf = filp->private_data;
+ int bytes_not_copied;
+ int buf_size = 256;
+ char *buf;
+ int len;
+
+ /* don't allow partal reads */
+ if (*ppos != 0)
+ return 0;
+ if (count < buf_size)
+ return -ENOSPC;
+
+ buf = kzalloc(buf_size, GFP_KERNEL);
+ if (!buf)
+ return -ENOSPC;
+
+ len = snprintf(buf, buf_size, "%s: %s\n",
+ pf->vsi[pf->lan_vsi]->netdev->name,
+ i40e_dbg_netdev_ops_buf);
+
+ bytes_not_copied = copy_to_user(buffer, buf, len);
+ kfree(buf);
+
+ if (bytes_not_copied)
+ return -EFAULT;
+
+ *ppos = len;
+ return len;
+}
+
+/**
+ * i40e_dbg_netdev_ops_write - write into netdev_ops datum
+ * @filp: the opened file
+ * @buffer: where to find the user's data
+ * @count: the length of the user's data
+ * @ppos: file position offset
+ **/
+static ssize_t i40e_dbg_netdev_ops_write(struct file *filp,
+ const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct i40e_pf *pf = filp->private_data;
+ int bytes_not_copied;
+ struct i40e_vsi *vsi;
+ char *buf_tmp;
+ int vsi_seid;
+ int i, cnt;
+
+ /* don't allow partial writes */
+ if (*ppos != 0)
+ return 0;
+ if (count >= sizeof(i40e_dbg_netdev_ops_buf))
+ return -ENOSPC;
+
+ memset(i40e_dbg_netdev_ops_buf, 0, sizeof(i40e_dbg_netdev_ops_buf));
+ bytes_not_copied = copy_from_user(i40e_dbg_netdev_ops_buf,
+ buffer, count);
+ if (bytes_not_copied)
+ return -EFAULT;
+ i40e_dbg_netdev_ops_buf[count] = '\0';
+
+ buf_tmp = strchr(i40e_dbg_netdev_ops_buf, '\n');
+ if (buf_tmp) {
+ *buf_tmp = '\0';
+ count = buf_tmp - i40e_dbg_netdev_ops_buf + 1;
+ }
+
+ if (strncmp(i40e_dbg_netdev_ops_buf, "change_mtu", 10) == 0) {
+ int mtu;
+
+ cnt = sscanf(&i40e_dbg_netdev_ops_buf[11], "%i %i",
+ &vsi_seid, &mtu);
+ if (cnt != 2) {
+ dev_info(&pf->pdev->dev, "change_mtu <vsi_seid> <mtu>\n");
+ goto netdev_ops_write_done;
+ }
+ vsi = i40e_dbg_find_vsi(pf, vsi_seid);
+ if (!vsi) {
+ dev_info(&pf->pdev->dev,
+ "change_mtu: VSI %d not found\n", vsi_seid);
+ } else if (!vsi->netdev) {
+ dev_info(&pf->pdev->dev, "change_mtu: no netdev for VSI %d\n",
+ vsi_seid);
+ } else if (rtnl_trylock()) {
+ vsi->netdev->netdev_ops->ndo_change_mtu(vsi->netdev,
+ mtu);
+ rtnl_unlock();
+ dev_info(&pf->pdev->dev, "change_mtu called\n");
+ } else {
+ dev_info(&pf->pdev->dev, "Could not acquire RTNL - please try again\n");
+ }
+
+ } else if (strncmp(i40e_dbg_netdev_ops_buf, "set_rx_mode", 11) == 0) {
+ cnt = sscanf(&i40e_dbg_netdev_ops_buf[11], "%i", &vsi_seid);
+ if (cnt != 1) {
+ dev_info(&pf->pdev->dev, "set_rx_mode <vsi_seid>\n");
+ goto netdev_ops_write_done;
+ }
+ vsi = i40e_dbg_find_vsi(pf, vsi_seid);
+ if (!vsi) {
+ dev_info(&pf->pdev->dev,
+ "set_rx_mode: VSI %d not found\n", vsi_seid);
+ } else if (!vsi->netdev) {
+ dev_info(&pf->pdev->dev, "set_rx_mode: no netdev for VSI %d\n",
+ vsi_seid);
+ } else if (rtnl_trylock()) {
+ vsi->netdev->netdev_ops->ndo_set_rx_mode(vsi->netdev);
+ rtnl_unlock();
+ dev_info(&pf->pdev->dev, "set_rx_mode called\n");
+ } else {
+ dev_info(&pf->pdev->dev, "Could not acquire RTNL - please try again\n");
+ }
+
+ } else if (strncmp(i40e_dbg_netdev_ops_buf, "napi", 4) == 0) {
+ cnt = sscanf(&i40e_dbg_netdev_ops_buf[4], "%i", &vsi_seid);
+ if (cnt != 1) {
+ dev_info(&pf->pdev->dev, "napi <vsi_seid>\n");
+ goto netdev_ops_write_done;
+ }
+ vsi = i40e_dbg_find_vsi(pf, vsi_seid);
+ if (!vsi) {
+ dev_info(&pf->pdev->dev, "napi: VSI %d not found\n",
+ vsi_seid);
+ } else if (!vsi->netdev) {
+ dev_info(&pf->pdev->dev, "napi: no netdev for VSI %d\n",
+ vsi_seid);
+ } else {
+ for (i = 0; i < vsi->num_q_vectors; i++)
+ napi_schedule(&vsi->q_vectors[i]->napi);
+ dev_info(&pf->pdev->dev, "napi called\n");
+ }
+ } else {
+ dev_info(&pf->pdev->dev, "unknown command '%s'\n",
+ i40e_dbg_netdev_ops_buf);
+ dev_info(&pf->pdev->dev, "available commands\n");
+ dev_info(&pf->pdev->dev, " change_mtu <vsi_seid> <mtu>\n");
+ dev_info(&pf->pdev->dev, " set_rx_mode <vsi_seid>\n");
+ dev_info(&pf->pdev->dev, " napi <vsi_seid>\n");
+ }
+netdev_ops_write_done:
+ return count;
+}
+
+static const struct file_operations i40e_dbg_netdev_ops_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = i40e_dbg_netdev_ops_read,
+ .write = i40e_dbg_netdev_ops_write,
+};
+
+/**
+ * i40e_dbg_pf_init - setup the debugfs directory for the PF
+ * @pf: the PF that is starting up
+ **/
+void i40e_dbg_pf_init(struct i40e_pf *pf)
+{
+ const char *name = pci_name(pf->pdev);
+
+ pf->i40e_dbg_pf = debugfs_create_dir(name, i40e_dbg_root);
+
+ debugfs_create_file("command", 0600, pf->i40e_dbg_pf, pf,
+ &i40e_dbg_command_fops);
+
+ debugfs_create_file("netdev_ops", 0600, pf->i40e_dbg_pf, pf,
+ &i40e_dbg_netdev_ops_fops);
+}
+
+/**
+ * i40e_dbg_pf_exit - clear out the PF's debugfs entries
+ * @pf: the PF that is stopping
+ **/
+void i40e_dbg_pf_exit(struct i40e_pf *pf)
+{
+ debugfs_remove_recursive(pf->i40e_dbg_pf);
+ pf->i40e_dbg_pf = NULL;
+}
+
+/**
+ * i40e_dbg_init - start up debugfs for the driver
+ **/
+void i40e_dbg_init(void)
+{
+ i40e_dbg_root = debugfs_create_dir(i40e_driver_name, NULL);
+ if (IS_ERR(i40e_dbg_root))
+ pr_info("init of debugfs failed\n");
+}
+
+/**
+ * i40e_dbg_exit - clean out the driver's debugfs entries
+ **/
+void i40e_dbg_exit(void)
+{
+ debugfs_remove_recursive(i40e_dbg_root);
+ i40e_dbg_root = NULL;
+}
+
+#endif /* CONFIG_DEBUG_FS */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_devids.h b/drivers/net/ethernet/intel/i40e/i40e_devids.h
new file mode 100644
index 000000000..d9c51a238
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_devids.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
+
+#ifndef _I40E_DEVIDS_H_
+#define _I40E_DEVIDS_H_
+
+/* Device IDs */
+#define I40E_DEV_ID_X710_N3000 0x0CF8
+#define I40E_DEV_ID_XXV710_N3000 0x0D58
+#define I40E_DEV_ID_SFP_XL710 0x1572
+#define I40E_DEV_ID_QEMU 0x1574
+#define I40E_DEV_ID_KX_B 0x1580
+#define I40E_DEV_ID_KX_C 0x1581
+#define I40E_DEV_ID_QSFP_A 0x1583
+#define I40E_DEV_ID_QSFP_B 0x1584
+#define I40E_DEV_ID_QSFP_C 0x1585
+#define I40E_DEV_ID_10G_BASE_T 0x1586
+#define I40E_DEV_ID_20G_KR2 0x1587
+#define I40E_DEV_ID_20G_KR2_A 0x1588
+#define I40E_DEV_ID_10G_BASE_T4 0x1589
+#define I40E_DEV_ID_25G_B 0x158A
+#define I40E_DEV_ID_25G_SFP28 0x158B
+#define I40E_DEV_ID_10G_BASE_T_BC 0x15FF
+#define I40E_DEV_ID_10G_B 0x104F
+#define I40E_DEV_ID_10G_SFP 0x104E
+#define I40E_DEV_ID_5G_BASE_T_BC 0x101F
+#define I40E_DEV_ID_1G_BASE_T_BC 0x0DD2
+#define I40E_IS_X710TL_DEVICE(d) \
+ (((d) == I40E_DEV_ID_1G_BASE_T_BC) || \
+ ((d) == I40E_DEV_ID_5G_BASE_T_BC) || \
+ ((d) == I40E_DEV_ID_10G_BASE_T_BC))
+#define I40E_DEV_ID_KX_X722 0x37CE
+#define I40E_DEV_ID_QSFP_X722 0x37CF
+#define I40E_DEV_ID_SFP_X722 0x37D0
+#define I40E_DEV_ID_1G_BASE_T_X722 0x37D1
+#define I40E_DEV_ID_10G_BASE_T_X722 0x37D2
+#define I40E_DEV_ID_SFP_I_X722 0x37D3
+#define I40E_DEV_ID_SFP_X722_A 0x0DDA
+
+
+#endif /* _I40E_DEVIDS_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_diag.c b/drivers/net/ethernet/intel/i40e/i40e_diag.c
new file mode 100644
index 000000000..97fe1787a
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_diag.c
@@ -0,0 +1,131 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
+
+#include "i40e_diag.h"
+#include "i40e_prototype.h"
+
+/**
+ * i40e_diag_reg_pattern_test
+ * @hw: pointer to the hw struct
+ * @reg: reg to be tested
+ * @mask: bits to be touched
+ **/
+static int i40e_diag_reg_pattern_test(struct i40e_hw *hw,
+ u32 reg, u32 mask)
+{
+ static const u32 patterns[] = {
+ 0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF
+ };
+ u32 pat, val, orig_val;
+ int i;
+
+ orig_val = rd32(hw, reg);
+ for (i = 0; i < ARRAY_SIZE(patterns); i++) {
+ pat = patterns[i];
+ wr32(hw, reg, (pat & mask));
+ val = rd32(hw, reg);
+ if ((val & mask) != (pat & mask)) {
+ i40e_debug(hw, I40E_DEBUG_DIAG,
+ "%s: reg pattern test failed - reg 0x%08x pat 0x%08x val 0x%08x\n",
+ __func__, reg, pat, val);
+ return I40E_ERR_DIAG_TEST_FAILED;
+ }
+ }
+
+ wr32(hw, reg, orig_val);
+ val = rd32(hw, reg);
+ if (val != orig_val) {
+ i40e_debug(hw, I40E_DEBUG_DIAG,
+ "%s: reg restore test failed - reg 0x%08x orig_val 0x%08x val 0x%08x\n",
+ __func__, reg, orig_val, val);
+ return I40E_ERR_DIAG_TEST_FAILED;
+ }
+
+ return 0;
+}
+
+const struct i40e_diag_reg_test_info i40e_reg_list[] = {
+ /* offset mask elements stride */
+ {I40E_QTX_CTL(0), 0x0000FFBF, 1,
+ I40E_QTX_CTL(1) - I40E_QTX_CTL(0)},
+ {I40E_PFINT_ITR0(0), 0x00000FFF, 3,
+ I40E_PFINT_ITR0(1) - I40E_PFINT_ITR0(0)},
+ {I40E_PFINT_ITRN(0, 0), 0x00000FFF, 1,
+ I40E_PFINT_ITRN(0, 1) - I40E_PFINT_ITRN(0, 0)},
+ {I40E_PFINT_ITRN(1, 0), 0x00000FFF, 1,
+ I40E_PFINT_ITRN(1, 1) - I40E_PFINT_ITRN(1, 0)},
+ {I40E_PFINT_ITRN(2, 0), 0x00000FFF, 1,
+ I40E_PFINT_ITRN(2, 1) - I40E_PFINT_ITRN(2, 0)},
+ {I40E_PFINT_STAT_CTL0, 0x0000000C, 1, 0},
+ {I40E_PFINT_LNKLST0, 0x00001FFF, 1, 0},
+ {I40E_PFINT_LNKLSTN(0), 0x000007FF, 1,
+ I40E_PFINT_LNKLSTN(1) - I40E_PFINT_LNKLSTN(0)},
+ {I40E_QINT_TQCTL(0), 0x000000FF, 1,
+ I40E_QINT_TQCTL(1) - I40E_QINT_TQCTL(0)},
+ {I40E_QINT_RQCTL(0), 0x000000FF, 1,
+ I40E_QINT_RQCTL(1) - I40E_QINT_RQCTL(0)},
+ {I40E_PFINT_ICR0_ENA, 0xF7F20000, 1, 0},
+ { 0 }
+};
+
+/**
+ * i40e_diag_reg_test
+ * @hw: pointer to the hw struct
+ *
+ * Perform registers diagnostic test
+ **/
+int i40e_diag_reg_test(struct i40e_hw *hw)
+{
+ int ret_code = 0;
+ u32 reg, mask;
+ u32 elements;
+ u32 i, j;
+
+ for (i = 0; i40e_reg_list[i].offset != 0 &&
+ !ret_code; i++) {
+
+ elements = i40e_reg_list[i].elements;
+ /* set actual reg range for dynamically allocated resources */
+ if (i40e_reg_list[i].offset == I40E_QTX_CTL(0) &&
+ hw->func_caps.num_tx_qp != 0)
+ elements = hw->func_caps.num_tx_qp;
+ if ((i40e_reg_list[i].offset == I40E_PFINT_ITRN(0, 0) ||
+ i40e_reg_list[i].offset == I40E_PFINT_ITRN(1, 0) ||
+ i40e_reg_list[i].offset == I40E_PFINT_ITRN(2, 0) ||
+ i40e_reg_list[i].offset == I40E_QINT_TQCTL(0) ||
+ i40e_reg_list[i].offset == I40E_QINT_RQCTL(0)) &&
+ hw->func_caps.num_msix_vectors != 0)
+ elements = hw->func_caps.num_msix_vectors - 1;
+
+ /* test register access */
+ mask = i40e_reg_list[i].mask;
+ for (j = 0; j < elements && !ret_code; j++) {
+ reg = i40e_reg_list[i].offset +
+ (j * i40e_reg_list[i].stride);
+ ret_code = i40e_diag_reg_pattern_test(hw, reg, mask);
+ }
+ }
+
+ return ret_code;
+}
+
+/**
+ * i40e_diag_eeprom_test
+ * @hw: pointer to the hw struct
+ *
+ * Perform EEPROM diagnostic test
+ **/
+int i40e_diag_eeprom_test(struct i40e_hw *hw)
+{
+ int ret_code;
+ u16 reg_val;
+
+ /* read NVM control word and if NVM valid, validate EEPROM checksum*/
+ ret_code = i40e_read_nvm_word(hw, I40E_SR_NVM_CONTROL_WORD, &reg_val);
+ if (!ret_code &&
+ ((reg_val & I40E_SR_CONTROL_WORD_1_MASK) ==
+ BIT(I40E_SR_CONTROL_WORD_1_SHIFT)))
+ return i40e_validate_nvm_checksum(hw, NULL);
+ else
+ return I40E_ERR_DIAG_TEST_FAILED;
+}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_diag.h b/drivers/net/ethernet/intel/i40e/i40e_diag.h
new file mode 100644
index 000000000..c3ce5f352
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_diag.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
+
+#ifndef _I40E_DIAG_H_
+#define _I40E_DIAG_H_
+
+#include "i40e_type.h"
+
+enum i40e_lb_mode {
+ I40E_LB_MODE_NONE = 0x0,
+ I40E_LB_MODE_PHY_LOCAL = I40E_AQ_LB_PHY_LOCAL,
+ I40E_LB_MODE_PHY_REMOTE = I40E_AQ_LB_PHY_REMOTE,
+ I40E_LB_MODE_MAC_LOCAL = I40E_AQ_LB_MAC_LOCAL,
+};
+
+struct i40e_diag_reg_test_info {
+ u32 offset; /* the base register */
+ u32 mask; /* bits that can be tested */
+ u32 elements; /* number of elements if array */
+ u32 stride; /* bytes between each element */
+};
+
+extern const struct i40e_diag_reg_test_info i40e_reg_list[];
+
+int i40e_diag_reg_test(struct i40e_hw *hw);
+int i40e_diag_eeprom_test(struct i40e_hw *hw);
+
+#endif /* _I40E_DIAG_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
new file mode 100644
index 000000000..107bcca7d
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -0,0 +1,5818 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
+
+/* ethtool support for i40e */
+
+#include "i40e.h"
+#include "i40e_diag.h"
+#include "i40e_txrx_common.h"
+
+/* ethtool statistics helpers */
+
+/**
+ * struct i40e_stats - definition for an ethtool statistic
+ * @stat_string: statistic name to display in ethtool -S output
+ * @sizeof_stat: the sizeof() the stat, must be no greater than sizeof(u64)
+ * @stat_offset: offsetof() the stat from a base pointer
+ *
+ * This structure defines a statistic to be added to the ethtool stats buffer.
+ * It defines a statistic as offset from a common base pointer. Stats should
+ * be defined in constant arrays using the I40E_STAT macro, with every element
+ * of the array using the same _type for calculating the sizeof_stat and
+ * stat_offset.
+ *
+ * The @sizeof_stat is expected to be sizeof(u8), sizeof(u16), sizeof(u32) or
+ * sizeof(u64). Other sizes are not expected and will produce a WARN_ONCE from
+ * the i40e_add_ethtool_stat() helper function.
+ *
+ * The @stat_string is interpreted as a format string, allowing formatted
+ * values to be inserted while looping over multiple structures for a given
+ * statistics array. Thus, every statistic string in an array should have the
+ * same type and number of format specifiers, to be formatted by variadic
+ * arguments to the i40e_add_stat_string() helper function.
+ **/
+struct i40e_stats {
+ char stat_string[ETH_GSTRING_LEN];
+ int sizeof_stat;
+ int stat_offset;
+};
+
+/* Helper macro to define an i40e_stat structure with proper size and type.
+ * Use this when defining constant statistics arrays. Note that @_type expects
+ * only a type name and is used multiple times.
+ */
+#define I40E_STAT(_type, _name, _stat) { \
+ .stat_string = _name, \
+ .sizeof_stat = sizeof_field(_type, _stat), \
+ .stat_offset = offsetof(_type, _stat) \
+}
+
+/* Helper macro for defining some statistics directly copied from the netdev
+ * stats structure.
+ */
+#define I40E_NETDEV_STAT(_net_stat) \
+ I40E_STAT(struct rtnl_link_stats64, #_net_stat, _net_stat)
+
+/* Helper macro for defining some statistics related to queues */
+#define I40E_QUEUE_STAT(_name, _stat) \
+ I40E_STAT(struct i40e_ring, _name, _stat)
+
+/* Stats associated with a Tx or Rx ring */
+static const struct i40e_stats i40e_gstrings_queue_stats[] = {
+ I40E_QUEUE_STAT("%s-%u.packets", stats.packets),
+ I40E_QUEUE_STAT("%s-%u.bytes", stats.bytes),
+};
+
+/**
+ * i40e_add_one_ethtool_stat - copy the stat into the supplied buffer
+ * @data: location to store the stat value
+ * @pointer: basis for where to copy from
+ * @stat: the stat definition
+ *
+ * Copies the stat data defined by the pointer and stat structure pair into
+ * the memory supplied as data. Used to implement i40e_add_ethtool_stats and
+ * i40e_add_queue_stats. If the pointer is null, data will be zero'd.
+ */
+static void
+i40e_add_one_ethtool_stat(u64 *data, void *pointer,
+ const struct i40e_stats *stat)
+{
+ char *p;
+
+ if (!pointer) {
+ /* ensure that the ethtool data buffer is zero'd for any stats
+ * which don't have a valid pointer.
+ */
+ *data = 0;
+ return;
+ }
+
+ p = (char *)pointer + stat->stat_offset;
+ switch (stat->sizeof_stat) {
+ case sizeof(u64):
+ *data = *((u64 *)p);
+ break;
+ case sizeof(u32):
+ *data = *((u32 *)p);
+ break;
+ case sizeof(u16):
+ *data = *((u16 *)p);
+ break;
+ case sizeof(u8):
+ *data = *((u8 *)p);
+ break;
+ default:
+ WARN_ONCE(1, "unexpected stat size for %s",
+ stat->stat_string);
+ *data = 0;
+ }
+}
+
+/**
+ * __i40e_add_ethtool_stats - copy stats into the ethtool supplied buffer
+ * @data: ethtool stats buffer
+ * @pointer: location to copy stats from
+ * @stats: array of stats to copy
+ * @size: the size of the stats definition
+ *
+ * Copy the stats defined by the stats array using the pointer as a base into
+ * the data buffer supplied by ethtool. Updates the data pointer to point to
+ * the next empty location for successive calls to __i40e_add_ethtool_stats.
+ * If pointer is null, set the data values to zero and update the pointer to
+ * skip these stats.
+ **/
+static void
+__i40e_add_ethtool_stats(u64 **data, void *pointer,
+ const struct i40e_stats stats[],
+ const unsigned int size)
+{
+ unsigned int i;
+
+ for (i = 0; i < size; i++)
+ i40e_add_one_ethtool_stat((*data)++, pointer, &stats[i]);
+}
+
+/**
+ * i40e_add_ethtool_stats - copy stats into ethtool supplied buffer
+ * @data: ethtool stats buffer
+ * @pointer: location where stats are stored
+ * @stats: static const array of stat definitions
+ *
+ * Macro to ease the use of __i40e_add_ethtool_stats by taking a static
+ * constant stats array and passing the ARRAY_SIZE(). This avoids typos by
+ * ensuring that we pass the size associated with the given stats array.
+ *
+ * The parameter @stats is evaluated twice, so parameters with side effects
+ * should be avoided.
+ **/
+#define i40e_add_ethtool_stats(data, pointer, stats) \
+ __i40e_add_ethtool_stats(data, pointer, stats, ARRAY_SIZE(stats))
+
+/**
+ * i40e_add_queue_stats - copy queue statistics into supplied buffer
+ * @data: ethtool stats buffer
+ * @ring: the ring to copy
+ *
+ * Queue statistics must be copied while protected by
+ * u64_stats_fetch_begin_irq, so we can't directly use i40e_add_ethtool_stats.
+ * Assumes that queue stats are defined in i40e_gstrings_queue_stats. If the
+ * ring pointer is null, zero out the queue stat values and update the data
+ * pointer. Otherwise safely copy the stats from the ring into the supplied
+ * buffer and update the data pointer when finished.
+ *
+ * This function expects to be called while under rcu_read_lock().
+ **/
+static void
+i40e_add_queue_stats(u64 **data, struct i40e_ring *ring)
+{
+ const unsigned int size = ARRAY_SIZE(i40e_gstrings_queue_stats);
+ const struct i40e_stats *stats = i40e_gstrings_queue_stats;
+ unsigned int start;
+ unsigned int i;
+
+ /* To avoid invalid statistics values, ensure that we keep retrying
+ * the copy until we get a consistent value according to
+ * u64_stats_fetch_retry_irq. But first, make sure our ring is
+ * non-null before attempting to access its syncp.
+ */
+ do {
+ start = !ring ? 0 : u64_stats_fetch_begin_irq(&ring->syncp);
+ for (i = 0; i < size; i++) {
+ i40e_add_one_ethtool_stat(&(*data)[i], ring,
+ &stats[i]);
+ }
+ } while (ring && u64_stats_fetch_retry_irq(&ring->syncp, start));
+
+ /* Once we successfully copy the stats in, update the data pointer */
+ *data += size;
+}
+
+/**
+ * __i40e_add_stat_strings - copy stat strings into ethtool buffer
+ * @p: ethtool supplied buffer
+ * @stats: stat definitions array
+ * @size: size of the stats array
+ *
+ * Format and copy the strings described by stats into the buffer pointed at
+ * by p.
+ **/
+static void __i40e_add_stat_strings(u8 **p, const struct i40e_stats stats[],
+ const unsigned int size, ...)
+{
+ unsigned int i;
+
+ for (i = 0; i < size; i++) {
+ va_list args;
+
+ va_start(args, size);
+ vsnprintf(*p, ETH_GSTRING_LEN, stats[i].stat_string, args);
+ *p += ETH_GSTRING_LEN;
+ va_end(args);
+ }
+}
+
+/**
+ * i40e_add_stat_strings - copy stat strings into ethtool buffer
+ * @p: ethtool supplied buffer
+ * @stats: stat definitions array
+ *
+ * Format and copy the strings described by the const static stats value into
+ * the buffer pointed at by p.
+ *
+ * The parameter @stats is evaluated twice, so parameters with side effects
+ * should be avoided. Additionally, stats must be an array such that
+ * ARRAY_SIZE can be called on it.
+ **/
+#define i40e_add_stat_strings(p, stats, ...) \
+ __i40e_add_stat_strings(p, stats, ARRAY_SIZE(stats), ## __VA_ARGS__)
+
+#define I40E_PF_STAT(_name, _stat) \
+ I40E_STAT(struct i40e_pf, _name, _stat)
+#define I40E_VSI_STAT(_name, _stat) \
+ I40E_STAT(struct i40e_vsi, _name, _stat)
+#define I40E_VEB_STAT(_name, _stat) \
+ I40E_STAT(struct i40e_veb, _name, _stat)
+#define I40E_VEB_TC_STAT(_name, _stat) \
+ I40E_STAT(struct i40e_cp_veb_tc_stats, _name, _stat)
+#define I40E_PFC_STAT(_name, _stat) \
+ I40E_STAT(struct i40e_pfc_stats, _name, _stat)
+
+static const struct i40e_stats i40e_gstrings_net_stats[] = {
+ I40E_NETDEV_STAT(rx_packets),
+ I40E_NETDEV_STAT(tx_packets),
+ I40E_NETDEV_STAT(rx_bytes),
+ I40E_NETDEV_STAT(tx_bytes),
+ I40E_NETDEV_STAT(rx_errors),
+ I40E_NETDEV_STAT(tx_errors),
+ I40E_NETDEV_STAT(rx_dropped),
+ I40E_NETDEV_STAT(tx_dropped),
+ I40E_NETDEV_STAT(collisions),
+ I40E_NETDEV_STAT(rx_length_errors),
+ I40E_NETDEV_STAT(rx_crc_errors),
+};
+
+static const struct i40e_stats i40e_gstrings_veb_stats[] = {
+ I40E_VEB_STAT("veb.rx_bytes", stats.rx_bytes),
+ I40E_VEB_STAT("veb.tx_bytes", stats.tx_bytes),
+ I40E_VEB_STAT("veb.rx_unicast", stats.rx_unicast),
+ I40E_VEB_STAT("veb.tx_unicast", stats.tx_unicast),
+ I40E_VEB_STAT("veb.rx_multicast", stats.rx_multicast),
+ I40E_VEB_STAT("veb.tx_multicast", stats.tx_multicast),
+ I40E_VEB_STAT("veb.rx_broadcast", stats.rx_broadcast),
+ I40E_VEB_STAT("veb.tx_broadcast", stats.tx_broadcast),
+ I40E_VEB_STAT("veb.rx_discards", stats.rx_discards),
+ I40E_VEB_STAT("veb.tx_discards", stats.tx_discards),
+ I40E_VEB_STAT("veb.tx_errors", stats.tx_errors),
+ I40E_VEB_STAT("veb.rx_unknown_protocol", stats.rx_unknown_protocol),
+};
+
+struct i40e_cp_veb_tc_stats {
+ u64 tc_rx_packets;
+ u64 tc_rx_bytes;
+ u64 tc_tx_packets;
+ u64 tc_tx_bytes;
+};
+
+static const struct i40e_stats i40e_gstrings_veb_tc_stats[] = {
+ I40E_VEB_TC_STAT("veb.tc_%u_tx_packets", tc_tx_packets),
+ I40E_VEB_TC_STAT("veb.tc_%u_tx_bytes", tc_tx_bytes),
+ I40E_VEB_TC_STAT("veb.tc_%u_rx_packets", tc_rx_packets),
+ I40E_VEB_TC_STAT("veb.tc_%u_rx_bytes", tc_rx_bytes),
+};
+
+static const struct i40e_stats i40e_gstrings_misc_stats[] = {
+ I40E_VSI_STAT("rx_unicast", eth_stats.rx_unicast),
+ I40E_VSI_STAT("tx_unicast", eth_stats.tx_unicast),
+ I40E_VSI_STAT("rx_multicast", eth_stats.rx_multicast),
+ I40E_VSI_STAT("tx_multicast", eth_stats.tx_multicast),
+ I40E_VSI_STAT("rx_broadcast", eth_stats.rx_broadcast),
+ I40E_VSI_STAT("tx_broadcast", eth_stats.tx_broadcast),
+ I40E_VSI_STAT("rx_unknown_protocol", eth_stats.rx_unknown_protocol),
+ I40E_VSI_STAT("tx_linearize", tx_linearize),
+ I40E_VSI_STAT("tx_force_wb", tx_force_wb),
+ I40E_VSI_STAT("tx_busy", tx_busy),
+ I40E_VSI_STAT("tx_stopped", tx_stopped),
+ I40E_VSI_STAT("rx_alloc_fail", rx_buf_failed),
+ I40E_VSI_STAT("rx_pg_alloc_fail", rx_page_failed),
+ I40E_VSI_STAT("rx_cache_reuse", rx_page_reuse),
+ I40E_VSI_STAT("rx_cache_alloc", rx_page_alloc),
+ I40E_VSI_STAT("rx_cache_waive", rx_page_waive),
+ I40E_VSI_STAT("rx_cache_busy", rx_page_busy),
+ I40E_VSI_STAT("tx_restart", tx_restart),
+};
+
+/* These PF_STATs might look like duplicates of some NETDEV_STATs,
+ * but they are separate. This device supports Virtualization, and
+ * as such might have several netdevs supporting VMDq and FCoE going
+ * through a single port. The NETDEV_STATs are for individual netdevs
+ * seen at the top of the stack, and the PF_STATs are for the physical
+ * function at the bottom of the stack hosting those netdevs.
+ *
+ * The PF_STATs are appended to the netdev stats only when ethtool -S
+ * is queried on the base PF netdev, not on the VMDq or FCoE netdev.
+ */
+static const struct i40e_stats i40e_gstrings_stats[] = {
+ I40E_PF_STAT("port.rx_bytes", stats.eth.rx_bytes),
+ I40E_PF_STAT("port.tx_bytes", stats.eth.tx_bytes),
+ I40E_PF_STAT("port.rx_unicast", stats.eth.rx_unicast),
+ I40E_PF_STAT("port.tx_unicast", stats.eth.tx_unicast),
+ I40E_PF_STAT("port.rx_multicast", stats.eth.rx_multicast),
+ I40E_PF_STAT("port.tx_multicast", stats.eth.tx_multicast),
+ I40E_PF_STAT("port.rx_broadcast", stats.eth.rx_broadcast),
+ I40E_PF_STAT("port.tx_broadcast", stats.eth.tx_broadcast),
+ I40E_PF_STAT("port.tx_errors", stats.eth.tx_errors),
+ I40E_PF_STAT("port.rx_dropped", stats.eth.rx_discards),
+ I40E_PF_STAT("port.tx_dropped_link_down", stats.tx_dropped_link_down),
+ I40E_PF_STAT("port.rx_crc_errors", stats.crc_errors),
+ I40E_PF_STAT("port.illegal_bytes", stats.illegal_bytes),
+ I40E_PF_STAT("port.mac_local_faults", stats.mac_local_faults),
+ I40E_PF_STAT("port.mac_remote_faults", stats.mac_remote_faults),
+ I40E_PF_STAT("port.tx_timeout", tx_timeout_count),
+ I40E_PF_STAT("port.rx_csum_bad", hw_csum_rx_error),
+ I40E_PF_STAT("port.rx_length_errors", stats.rx_length_errors),
+ I40E_PF_STAT("port.link_xon_rx", stats.link_xon_rx),
+ I40E_PF_STAT("port.link_xoff_rx", stats.link_xoff_rx),
+ I40E_PF_STAT("port.link_xon_tx", stats.link_xon_tx),
+ I40E_PF_STAT("port.link_xoff_tx", stats.link_xoff_tx),
+ I40E_PF_STAT("port.rx_size_64", stats.rx_size_64),
+ I40E_PF_STAT("port.rx_size_127", stats.rx_size_127),
+ I40E_PF_STAT("port.rx_size_255", stats.rx_size_255),
+ I40E_PF_STAT("port.rx_size_511", stats.rx_size_511),
+ I40E_PF_STAT("port.rx_size_1023", stats.rx_size_1023),
+ I40E_PF_STAT("port.rx_size_1522", stats.rx_size_1522),
+ I40E_PF_STAT("port.rx_size_big", stats.rx_size_big),
+ I40E_PF_STAT("port.tx_size_64", stats.tx_size_64),
+ I40E_PF_STAT("port.tx_size_127", stats.tx_size_127),
+ I40E_PF_STAT("port.tx_size_255", stats.tx_size_255),
+ I40E_PF_STAT("port.tx_size_511", stats.tx_size_511),
+ I40E_PF_STAT("port.tx_size_1023", stats.tx_size_1023),
+ I40E_PF_STAT("port.tx_size_1522", stats.tx_size_1522),
+ I40E_PF_STAT("port.tx_size_big", stats.tx_size_big),
+ I40E_PF_STAT("port.rx_undersize", stats.rx_undersize),
+ I40E_PF_STAT("port.rx_fragments", stats.rx_fragments),
+ I40E_PF_STAT("port.rx_oversize", stats.rx_oversize),
+ I40E_PF_STAT("port.rx_jabber", stats.rx_jabber),
+ I40E_PF_STAT("port.VF_admin_queue_requests", vf_aq_requests),
+ I40E_PF_STAT("port.arq_overflows", arq_overflows),
+ I40E_PF_STAT("port.tx_hwtstamp_timeouts", tx_hwtstamp_timeouts),
+ I40E_PF_STAT("port.rx_hwtstamp_cleared", rx_hwtstamp_cleared),
+ I40E_PF_STAT("port.tx_hwtstamp_skipped", tx_hwtstamp_skipped),
+ I40E_PF_STAT("port.fdir_flush_cnt", fd_flush_cnt),
+ I40E_PF_STAT("port.fdir_atr_match", stats.fd_atr_match),
+ I40E_PF_STAT("port.fdir_atr_tunnel_match", stats.fd_atr_tunnel_match),
+ I40E_PF_STAT("port.fdir_atr_status", stats.fd_atr_status),
+ I40E_PF_STAT("port.fdir_sb_match", stats.fd_sb_match),
+ I40E_PF_STAT("port.fdir_sb_status", stats.fd_sb_status),
+
+ /* LPI stats */
+ I40E_PF_STAT("port.tx_lpi_status", stats.tx_lpi_status),
+ I40E_PF_STAT("port.rx_lpi_status", stats.rx_lpi_status),
+ I40E_PF_STAT("port.tx_lpi_count", stats.tx_lpi_count),
+ I40E_PF_STAT("port.rx_lpi_count", stats.rx_lpi_count),
+};
+
+struct i40e_pfc_stats {
+ u64 priority_xon_rx;
+ u64 priority_xoff_rx;
+ u64 priority_xon_tx;
+ u64 priority_xoff_tx;
+ u64 priority_xon_2_xoff;
+};
+
+static const struct i40e_stats i40e_gstrings_pfc_stats[] = {
+ I40E_PFC_STAT("port.tx_priority_%u_xon_tx", priority_xon_tx),
+ I40E_PFC_STAT("port.tx_priority_%u_xoff_tx", priority_xoff_tx),
+ I40E_PFC_STAT("port.rx_priority_%u_xon_rx", priority_xon_rx),
+ I40E_PFC_STAT("port.rx_priority_%u_xoff_rx", priority_xoff_rx),
+ I40E_PFC_STAT("port.rx_priority_%u_xon_2_xoff", priority_xon_2_xoff),
+};
+
+#define I40E_NETDEV_STATS_LEN ARRAY_SIZE(i40e_gstrings_net_stats)
+
+#define I40E_MISC_STATS_LEN ARRAY_SIZE(i40e_gstrings_misc_stats)
+
+#define I40E_VSI_STATS_LEN (I40E_NETDEV_STATS_LEN + I40E_MISC_STATS_LEN)
+
+#define I40E_PFC_STATS_LEN (ARRAY_SIZE(i40e_gstrings_pfc_stats) * \
+ I40E_MAX_USER_PRIORITY)
+
+#define I40E_VEB_STATS_LEN (ARRAY_SIZE(i40e_gstrings_veb_stats) + \
+ (ARRAY_SIZE(i40e_gstrings_veb_tc_stats) * \
+ I40E_MAX_TRAFFIC_CLASS))
+
+#define I40E_GLOBAL_STATS_LEN ARRAY_SIZE(i40e_gstrings_stats)
+
+#define I40E_PF_STATS_LEN (I40E_GLOBAL_STATS_LEN + \
+ I40E_PFC_STATS_LEN + \
+ I40E_VEB_STATS_LEN + \
+ I40E_VSI_STATS_LEN)
+
+/* Length of stats for a single queue */
+#define I40E_QUEUE_STATS_LEN ARRAY_SIZE(i40e_gstrings_queue_stats)
+
+enum i40e_ethtool_test_id {
+ I40E_ETH_TEST_REG = 0,
+ I40E_ETH_TEST_EEPROM,
+ I40E_ETH_TEST_INTR,
+ I40E_ETH_TEST_LINK,
+};
+
+static const char i40e_gstrings_test[][ETH_GSTRING_LEN] = {
+ "Register test (offline)",
+ "Eeprom test (offline)",
+ "Interrupt test (offline)",
+ "Link test (on/offline)"
+};
+
+#define I40E_TEST_LEN (sizeof(i40e_gstrings_test) / ETH_GSTRING_LEN)
+
+struct i40e_priv_flags {
+ char flag_string[ETH_GSTRING_LEN];
+ u64 flag;
+ bool read_only;
+};
+
+#define I40E_PRIV_FLAG(_name, _flag, _read_only) { \
+ .flag_string = _name, \
+ .flag = _flag, \
+ .read_only = _read_only, \
+}
+
+static const struct i40e_priv_flags i40e_gstrings_priv_flags[] = {
+ /* NOTE: MFP setting cannot be changed */
+ I40E_PRIV_FLAG("MFP", I40E_FLAG_MFP_ENABLED, 1),
+ I40E_PRIV_FLAG("total-port-shutdown",
+ I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENABLED, 1),
+ I40E_PRIV_FLAG("LinkPolling", I40E_FLAG_LINK_POLLING_ENABLED, 0),
+ I40E_PRIV_FLAG("flow-director-atr", I40E_FLAG_FD_ATR_ENABLED, 0),
+ I40E_PRIV_FLAG("veb-stats", I40E_FLAG_VEB_STATS_ENABLED, 0),
+ I40E_PRIV_FLAG("hw-atr-eviction", I40E_FLAG_HW_ATR_EVICT_ENABLED, 0),
+ I40E_PRIV_FLAG("link-down-on-close",
+ I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED, 0),
+ I40E_PRIV_FLAG("legacy-rx", I40E_FLAG_LEGACY_RX, 0),
+ I40E_PRIV_FLAG("disable-source-pruning",
+ I40E_FLAG_SOURCE_PRUNING_DISABLED, 0),
+ I40E_PRIV_FLAG("disable-fw-lldp", I40E_FLAG_DISABLE_FW_LLDP, 0),
+ I40E_PRIV_FLAG("rs-fec", I40E_FLAG_RS_FEC, 0),
+ I40E_PRIV_FLAG("base-r-fec", I40E_FLAG_BASE_R_FEC, 0),
+ I40E_PRIV_FLAG("vf-vlan-pruning",
+ I40E_FLAG_VF_VLAN_PRUNING, 0),
+};
+
+#define I40E_PRIV_FLAGS_STR_LEN ARRAY_SIZE(i40e_gstrings_priv_flags)
+
+/* Private flags with a global effect, restricted to PF 0 */
+static const struct i40e_priv_flags i40e_gl_gstrings_priv_flags[] = {
+ I40E_PRIV_FLAG("vf-true-promisc-support",
+ I40E_FLAG_TRUE_PROMISC_SUPPORT, 0),
+};
+
+#define I40E_GL_PRIV_FLAGS_STR_LEN ARRAY_SIZE(i40e_gl_gstrings_priv_flags)
+
+/**
+ * i40e_partition_setting_complaint - generic complaint for MFP restriction
+ * @pf: the PF struct
+ **/
+static void i40e_partition_setting_complaint(struct i40e_pf *pf)
+{
+ dev_info(&pf->pdev->dev,
+ "The link settings are allowed to be changed only from the first partition of a given port. Please switch to the first partition in order to change the setting.\n");
+}
+
+/**
+ * i40e_phy_type_to_ethtool - convert the phy_types to ethtool link modes
+ * @pf: PF struct with phy_types
+ * @ks: ethtool link ksettings struct to fill out
+ *
+ **/
+static void i40e_phy_type_to_ethtool(struct i40e_pf *pf,
+ struct ethtool_link_ksettings *ks)
+{
+ struct i40e_link_status *hw_link_info = &pf->hw.phy.link_info;
+ u64 phy_types = pf->hw.phy.phy_types;
+
+ ethtool_link_ksettings_zero_link_mode(ks, supported);
+ ethtool_link_ksettings_zero_link_mode(ks, advertising);
+
+ if (phy_types & I40E_CAP_PHY_TYPE_SGMII) {
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 1000baseT_Full);
+ if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB)
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 1000baseT_Full);
+ if (pf->hw_features & I40E_HW_100M_SGMII_CAPABLE) {
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 100baseT_Full);
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 100baseT_Full);
+ }
+ }
+ if (phy_types & I40E_CAP_PHY_TYPE_XAUI ||
+ phy_types & I40E_CAP_PHY_TYPE_XFI ||
+ phy_types & I40E_CAP_PHY_TYPE_SFI ||
+ phy_types & I40E_CAP_PHY_TYPE_10GBASE_SFPP_CU ||
+ phy_types & I40E_CAP_PHY_TYPE_10GBASE_AOC) {
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 10000baseT_Full);
+ if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB)
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 10000baseT_Full);
+ }
+ if (phy_types & I40E_CAP_PHY_TYPE_10GBASE_T) {
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 10000baseT_Full);
+ if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB)
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 10000baseT_Full);
+ }
+ if (phy_types & I40E_CAP_PHY_TYPE_2_5GBASE_T) {
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 2500baseT_Full);
+ if (hw_link_info->requested_speeds & I40E_LINK_SPEED_2_5GB)
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 2500baseT_Full);
+ }
+ if (phy_types & I40E_CAP_PHY_TYPE_5GBASE_T) {
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 5000baseT_Full);
+ if (hw_link_info->requested_speeds & I40E_LINK_SPEED_5GB)
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 5000baseT_Full);
+ }
+ if (phy_types & I40E_CAP_PHY_TYPE_XLAUI ||
+ phy_types & I40E_CAP_PHY_TYPE_XLPPI ||
+ phy_types & I40E_CAP_PHY_TYPE_40GBASE_AOC)
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 40000baseCR4_Full);
+ if (phy_types & I40E_CAP_PHY_TYPE_40GBASE_CR4_CU ||
+ phy_types & I40E_CAP_PHY_TYPE_40GBASE_CR4) {
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 40000baseCR4_Full);
+ if (hw_link_info->requested_speeds & I40E_LINK_SPEED_40GB)
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 40000baseCR4_Full);
+ }
+ if (phy_types & I40E_CAP_PHY_TYPE_100BASE_TX) {
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 100baseT_Full);
+ if (hw_link_info->requested_speeds & I40E_LINK_SPEED_100MB)
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 100baseT_Full);
+ }
+ if (phy_types & I40E_CAP_PHY_TYPE_1000BASE_T) {
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 1000baseT_Full);
+ if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB)
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 1000baseT_Full);
+ }
+ if (phy_types & I40E_CAP_PHY_TYPE_40GBASE_SR4) {
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 40000baseSR4_Full);
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 40000baseSR4_Full);
+ }
+ if (phy_types & I40E_CAP_PHY_TYPE_40GBASE_LR4) {
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 40000baseLR4_Full);
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 40000baseLR4_Full);
+ }
+ if (phy_types & I40E_CAP_PHY_TYPE_40GBASE_KR4) {
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 40000baseKR4_Full);
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 40000baseKR4_Full);
+ }
+ if (phy_types & I40E_CAP_PHY_TYPE_20GBASE_KR2) {
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 20000baseKR2_Full);
+ if (hw_link_info->requested_speeds & I40E_LINK_SPEED_20GB)
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 20000baseKR2_Full);
+ }
+ if (phy_types & I40E_CAP_PHY_TYPE_10GBASE_KX4) {
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 10000baseKX4_Full);
+ if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB)
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 10000baseKX4_Full);
+ }
+ if (phy_types & I40E_CAP_PHY_TYPE_10GBASE_KR &&
+ !(pf->hw_features & I40E_HW_HAVE_CRT_RETIMER)) {
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 10000baseKR_Full);
+ if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB)
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 10000baseKR_Full);
+ }
+ if (phy_types & I40E_CAP_PHY_TYPE_1000BASE_KX &&
+ !(pf->hw_features & I40E_HW_HAVE_CRT_RETIMER)) {
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 1000baseKX_Full);
+ if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB)
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 1000baseKX_Full);
+ }
+ /* need to add 25G PHY types */
+ if (phy_types & I40E_CAP_PHY_TYPE_25GBASE_KR) {
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 25000baseKR_Full);
+ if (hw_link_info->requested_speeds & I40E_LINK_SPEED_25GB)
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 25000baseKR_Full);
+ }
+ if (phy_types & I40E_CAP_PHY_TYPE_25GBASE_CR) {
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 25000baseCR_Full);
+ if (hw_link_info->requested_speeds & I40E_LINK_SPEED_25GB)
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 25000baseCR_Full);
+ }
+ if (phy_types & I40E_CAP_PHY_TYPE_25GBASE_SR ||
+ phy_types & I40E_CAP_PHY_TYPE_25GBASE_LR) {
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 25000baseSR_Full);
+ if (hw_link_info->requested_speeds & I40E_LINK_SPEED_25GB)
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 25000baseSR_Full);
+ }
+ if (phy_types & I40E_CAP_PHY_TYPE_25GBASE_AOC ||
+ phy_types & I40E_CAP_PHY_TYPE_25GBASE_ACC) {
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 25000baseCR_Full);
+ if (hw_link_info->requested_speeds & I40E_LINK_SPEED_25GB)
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 25000baseCR_Full);
+ }
+ if (phy_types & I40E_CAP_PHY_TYPE_25GBASE_KR ||
+ phy_types & I40E_CAP_PHY_TYPE_25GBASE_CR ||
+ phy_types & I40E_CAP_PHY_TYPE_25GBASE_SR ||
+ phy_types & I40E_CAP_PHY_TYPE_25GBASE_LR ||
+ phy_types & I40E_CAP_PHY_TYPE_25GBASE_AOC ||
+ phy_types & I40E_CAP_PHY_TYPE_25GBASE_ACC) {
+ ethtool_link_ksettings_add_link_mode(ks, supported, FEC_NONE);
+ ethtool_link_ksettings_add_link_mode(ks, supported, FEC_RS);
+ ethtool_link_ksettings_add_link_mode(ks, supported, FEC_BASER);
+ if (hw_link_info->requested_speeds & I40E_LINK_SPEED_25GB) {
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ FEC_NONE);
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ FEC_RS);
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ FEC_BASER);
+ }
+ }
+ /* need to add new 10G PHY types */
+ if (phy_types & I40E_CAP_PHY_TYPE_10GBASE_CR1 ||
+ phy_types & I40E_CAP_PHY_TYPE_10GBASE_CR1_CU) {
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 10000baseCR_Full);
+ if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB)
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 10000baseCR_Full);
+ }
+ if (phy_types & I40E_CAP_PHY_TYPE_10GBASE_SR) {
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 10000baseSR_Full);
+ if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB)
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 10000baseSR_Full);
+ }
+ if (phy_types & I40E_CAP_PHY_TYPE_10GBASE_LR) {
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 10000baseLR_Full);
+ if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB)
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 10000baseLR_Full);
+ }
+ if (phy_types & I40E_CAP_PHY_TYPE_1000BASE_SX ||
+ phy_types & I40E_CAP_PHY_TYPE_1000BASE_LX ||
+ phy_types & I40E_CAP_PHY_TYPE_1000BASE_T_OPTICAL) {
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 1000baseX_Full);
+ if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB)
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 1000baseX_Full);
+ }
+ /* Autoneg PHY types */
+ if (phy_types & I40E_CAP_PHY_TYPE_SGMII ||
+ phy_types & I40E_CAP_PHY_TYPE_40GBASE_KR4 ||
+ phy_types & I40E_CAP_PHY_TYPE_40GBASE_CR4_CU ||
+ phy_types & I40E_CAP_PHY_TYPE_40GBASE_CR4 ||
+ phy_types & I40E_CAP_PHY_TYPE_25GBASE_SR ||
+ phy_types & I40E_CAP_PHY_TYPE_25GBASE_LR ||
+ phy_types & I40E_CAP_PHY_TYPE_25GBASE_KR ||
+ phy_types & I40E_CAP_PHY_TYPE_25GBASE_CR ||
+ phy_types & I40E_CAP_PHY_TYPE_20GBASE_KR2 ||
+ phy_types & I40E_CAP_PHY_TYPE_10GBASE_SR ||
+ phy_types & I40E_CAP_PHY_TYPE_10GBASE_LR ||
+ phy_types & I40E_CAP_PHY_TYPE_10GBASE_KX4 ||
+ phy_types & I40E_CAP_PHY_TYPE_10GBASE_KR ||
+ phy_types & I40E_CAP_PHY_TYPE_10GBASE_CR1_CU ||
+ phy_types & I40E_CAP_PHY_TYPE_10GBASE_CR1 ||
+ phy_types & I40E_CAP_PHY_TYPE_10GBASE_T ||
+ phy_types & I40E_CAP_PHY_TYPE_5GBASE_T ||
+ phy_types & I40E_CAP_PHY_TYPE_2_5GBASE_T ||
+ phy_types & I40E_CAP_PHY_TYPE_1000BASE_T_OPTICAL ||
+ phy_types & I40E_CAP_PHY_TYPE_1000BASE_T ||
+ phy_types & I40E_CAP_PHY_TYPE_1000BASE_SX ||
+ phy_types & I40E_CAP_PHY_TYPE_1000BASE_LX ||
+ phy_types & I40E_CAP_PHY_TYPE_1000BASE_KX ||
+ phy_types & I40E_CAP_PHY_TYPE_100BASE_TX) {
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ Autoneg);
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ Autoneg);
+ }
+}
+
+/**
+ * i40e_get_settings_link_up_fec - Get the FEC mode encoding from mask
+ * @req_fec_info: mask request FEC info
+ * @ks: ethtool ksettings to fill in
+ **/
+static void i40e_get_settings_link_up_fec(u8 req_fec_info,
+ struct ethtool_link_ksettings *ks)
+{
+ ethtool_link_ksettings_add_link_mode(ks, supported, FEC_NONE);
+ ethtool_link_ksettings_add_link_mode(ks, supported, FEC_RS);
+ ethtool_link_ksettings_add_link_mode(ks, supported, FEC_BASER);
+
+ if ((I40E_AQ_SET_FEC_REQUEST_RS & req_fec_info) &&
+ (I40E_AQ_SET_FEC_REQUEST_KR & req_fec_info)) {
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ FEC_NONE);
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ FEC_BASER);
+ ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_RS);
+ } else if (I40E_AQ_SET_FEC_REQUEST_RS & req_fec_info) {
+ ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_RS);
+ } else if (I40E_AQ_SET_FEC_REQUEST_KR & req_fec_info) {
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ FEC_BASER);
+ } else {
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ FEC_NONE);
+ }
+}
+
+/**
+ * i40e_get_settings_link_up - Get the Link settings for when link is up
+ * @hw: hw structure
+ * @ks: ethtool ksettings to fill in
+ * @netdev: network interface device structure
+ * @pf: pointer to physical function struct
+ **/
+static void i40e_get_settings_link_up(struct i40e_hw *hw,
+ struct ethtool_link_ksettings *ks,
+ struct net_device *netdev,
+ struct i40e_pf *pf)
+{
+ struct i40e_link_status *hw_link_info = &hw->phy.link_info;
+ struct ethtool_link_ksettings cap_ksettings;
+ u32 link_speed = hw_link_info->link_speed;
+
+ /* Initialize supported and advertised settings based on phy settings */
+ switch (hw_link_info->phy_type) {
+ case I40E_PHY_TYPE_40GBASE_CR4:
+ case I40E_PHY_TYPE_40GBASE_CR4_CU:
+ ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 40000baseCR4_Full);
+ ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 40000baseCR4_Full);
+ break;
+ case I40E_PHY_TYPE_XLAUI:
+ case I40E_PHY_TYPE_XLPPI:
+ case I40E_PHY_TYPE_40GBASE_AOC:
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 40000baseCR4_Full);
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 40000baseCR4_Full);
+ break;
+ case I40E_PHY_TYPE_40GBASE_SR4:
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 40000baseSR4_Full);
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 40000baseSR4_Full);
+ break;
+ case I40E_PHY_TYPE_40GBASE_LR4:
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 40000baseLR4_Full);
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 40000baseLR4_Full);
+ break;
+ case I40E_PHY_TYPE_25GBASE_SR:
+ case I40E_PHY_TYPE_25GBASE_LR:
+ case I40E_PHY_TYPE_10GBASE_SR:
+ case I40E_PHY_TYPE_10GBASE_LR:
+ case I40E_PHY_TYPE_1000BASE_SX:
+ case I40E_PHY_TYPE_1000BASE_LX:
+ ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
+ ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 25000baseSR_Full);
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 25000baseSR_Full);
+ i40e_get_settings_link_up_fec(hw_link_info->req_fec_info, ks);
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 10000baseSR_Full);
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 10000baseSR_Full);
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 10000baseLR_Full);
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 10000baseLR_Full);
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 1000baseX_Full);
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 1000baseX_Full);
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 10000baseT_Full);
+ if (hw_link_info->module_type[2] &
+ I40E_MODULE_TYPE_1000BASE_SX ||
+ hw_link_info->module_type[2] &
+ I40E_MODULE_TYPE_1000BASE_LX) {
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 1000baseT_Full);
+ if (hw_link_info->requested_speeds &
+ I40E_LINK_SPEED_1GB)
+ ethtool_link_ksettings_add_link_mode(
+ ks, advertising, 1000baseT_Full);
+ }
+ if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB)
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 10000baseT_Full);
+ break;
+ case I40E_PHY_TYPE_10GBASE_T:
+ case I40E_PHY_TYPE_5GBASE_T_LINK_STATUS:
+ case I40E_PHY_TYPE_2_5GBASE_T_LINK_STATUS:
+ case I40E_PHY_TYPE_1000BASE_T:
+ case I40E_PHY_TYPE_100BASE_TX:
+ ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 10000baseT_Full);
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 5000baseT_Full);
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 2500baseT_Full);
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 1000baseT_Full);
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 100baseT_Full);
+ ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
+ if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB)
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 10000baseT_Full);
+ if (hw_link_info->requested_speeds & I40E_LINK_SPEED_5GB)
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 5000baseT_Full);
+ if (hw_link_info->requested_speeds & I40E_LINK_SPEED_2_5GB)
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 2500baseT_Full);
+ if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB)
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 1000baseT_Full);
+ if (hw_link_info->requested_speeds & I40E_LINK_SPEED_100MB)
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 100baseT_Full);
+ break;
+ case I40E_PHY_TYPE_1000BASE_T_OPTICAL:
+ ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 1000baseT_Full);
+ ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 1000baseT_Full);
+ break;
+ case I40E_PHY_TYPE_10GBASE_CR1_CU:
+ case I40E_PHY_TYPE_10GBASE_CR1:
+ ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 10000baseT_Full);
+ ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 10000baseT_Full);
+ break;
+ case I40E_PHY_TYPE_XAUI:
+ case I40E_PHY_TYPE_XFI:
+ case I40E_PHY_TYPE_SFI:
+ case I40E_PHY_TYPE_10GBASE_SFPP_CU:
+ case I40E_PHY_TYPE_10GBASE_AOC:
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 10000baseT_Full);
+ if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB)
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 10000baseT_Full);
+ i40e_get_settings_link_up_fec(hw_link_info->req_fec_info, ks);
+ break;
+ case I40E_PHY_TYPE_SGMII:
+ ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 1000baseT_Full);
+ if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB)
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 1000baseT_Full);
+ if (pf->hw_features & I40E_HW_100M_SGMII_CAPABLE) {
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 100baseT_Full);
+ if (hw_link_info->requested_speeds &
+ I40E_LINK_SPEED_100MB)
+ ethtool_link_ksettings_add_link_mode(
+ ks, advertising, 100baseT_Full);
+ }
+ break;
+ case I40E_PHY_TYPE_40GBASE_KR4:
+ case I40E_PHY_TYPE_25GBASE_KR:
+ case I40E_PHY_TYPE_20GBASE_KR2:
+ case I40E_PHY_TYPE_10GBASE_KR:
+ case I40E_PHY_TYPE_10GBASE_KX4:
+ case I40E_PHY_TYPE_1000BASE_KX:
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 40000baseKR4_Full);
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 25000baseKR_Full);
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 20000baseKR2_Full);
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 10000baseKR_Full);
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 10000baseKX4_Full);
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 1000baseKX_Full);
+ ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 40000baseKR4_Full);
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 25000baseKR_Full);
+ i40e_get_settings_link_up_fec(hw_link_info->req_fec_info, ks);
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 20000baseKR2_Full);
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 10000baseKR_Full);
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 10000baseKX4_Full);
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 1000baseKX_Full);
+ ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
+ break;
+ case I40E_PHY_TYPE_25GBASE_CR:
+ ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
+ ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 25000baseCR_Full);
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 25000baseCR_Full);
+ i40e_get_settings_link_up_fec(hw_link_info->req_fec_info, ks);
+
+ break;
+ case I40E_PHY_TYPE_25GBASE_AOC:
+ case I40E_PHY_TYPE_25GBASE_ACC:
+ ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
+ ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 25000baseCR_Full);
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 25000baseCR_Full);
+ i40e_get_settings_link_up_fec(hw_link_info->req_fec_info, ks);
+
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 10000baseCR_Full);
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 10000baseCR_Full);
+ break;
+ default:
+ /* if we got here and link is up something bad is afoot */
+ netdev_info(netdev,
+ "WARNING: Link is up but PHY type 0x%x is not recognized, or incorrect cable is in use\n",
+ hw_link_info->phy_type);
+ }
+
+ /* Now that we've worked out everything that could be supported by the
+ * current PHY type, get what is supported by the NVM and intersect
+ * them to get what is truly supported
+ */
+ memset(&cap_ksettings, 0, sizeof(struct ethtool_link_ksettings));
+ i40e_phy_type_to_ethtool(pf, &cap_ksettings);
+ ethtool_intersect_link_masks(ks, &cap_ksettings);
+
+ /* Set speed and duplex */
+ switch (link_speed) {
+ case I40E_LINK_SPEED_40GB:
+ ks->base.speed = SPEED_40000;
+ break;
+ case I40E_LINK_SPEED_25GB:
+ ks->base.speed = SPEED_25000;
+ break;
+ case I40E_LINK_SPEED_20GB:
+ ks->base.speed = SPEED_20000;
+ break;
+ case I40E_LINK_SPEED_10GB:
+ ks->base.speed = SPEED_10000;
+ break;
+ case I40E_LINK_SPEED_5GB:
+ ks->base.speed = SPEED_5000;
+ break;
+ case I40E_LINK_SPEED_2_5GB:
+ ks->base.speed = SPEED_2500;
+ break;
+ case I40E_LINK_SPEED_1GB:
+ ks->base.speed = SPEED_1000;
+ break;
+ case I40E_LINK_SPEED_100MB:
+ ks->base.speed = SPEED_100;
+ break;
+ default:
+ ks->base.speed = SPEED_UNKNOWN;
+ break;
+ }
+ ks->base.duplex = DUPLEX_FULL;
+}
+
+/**
+ * i40e_get_settings_link_down - Get the Link settings for when link is down
+ * @hw: hw structure
+ * @ks: ethtool ksettings to fill in
+ * @pf: pointer to physical function struct
+ *
+ * Reports link settings that can be determined when link is down
+ **/
+static void i40e_get_settings_link_down(struct i40e_hw *hw,
+ struct ethtool_link_ksettings *ks,
+ struct i40e_pf *pf)
+{
+ /* link is down and the driver needs to fall back on
+ * supported phy types to figure out what info to display
+ */
+ i40e_phy_type_to_ethtool(pf, ks);
+
+ /* With no link speed and duplex are unknown */
+ ks->base.speed = SPEED_UNKNOWN;
+ ks->base.duplex = DUPLEX_UNKNOWN;
+}
+
+/**
+ * i40e_get_link_ksettings - Get Link Speed and Duplex settings
+ * @netdev: network interface device structure
+ * @ks: ethtool ksettings
+ *
+ * Reports speed/duplex settings based on media_type
+ **/
+static int i40e_get_link_ksettings(struct net_device *netdev,
+ struct ethtool_link_ksettings *ks)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_pf *pf = np->vsi->back;
+ struct i40e_hw *hw = &pf->hw;
+ struct i40e_link_status *hw_link_info = &hw->phy.link_info;
+ bool link_up = hw_link_info->link_info & I40E_AQ_LINK_UP;
+
+ ethtool_link_ksettings_zero_link_mode(ks, supported);
+ ethtool_link_ksettings_zero_link_mode(ks, advertising);
+
+ if (link_up)
+ i40e_get_settings_link_up(hw, ks, netdev, pf);
+ else
+ i40e_get_settings_link_down(hw, ks, pf);
+
+ /* Now set the settings that don't rely on link being up/down */
+ /* Set autoneg settings */
+ ks->base.autoneg = ((hw_link_info->an_info & I40E_AQ_AN_COMPLETED) ?
+ AUTONEG_ENABLE : AUTONEG_DISABLE);
+
+ /* Set media type settings */
+ switch (hw->phy.media_type) {
+ case I40E_MEDIA_TYPE_BACKPLANE:
+ ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
+ ethtool_link_ksettings_add_link_mode(ks, supported, Backplane);
+ ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ Backplane);
+ ks->base.port = PORT_NONE;
+ break;
+ case I40E_MEDIA_TYPE_BASET:
+ ethtool_link_ksettings_add_link_mode(ks, supported, TP);
+ ethtool_link_ksettings_add_link_mode(ks, advertising, TP);
+ ks->base.port = PORT_TP;
+ break;
+ case I40E_MEDIA_TYPE_DA:
+ case I40E_MEDIA_TYPE_CX4:
+ ethtool_link_ksettings_add_link_mode(ks, supported, FIBRE);
+ ethtool_link_ksettings_add_link_mode(ks, advertising, FIBRE);
+ ks->base.port = PORT_DA;
+ break;
+ case I40E_MEDIA_TYPE_FIBER:
+ ethtool_link_ksettings_add_link_mode(ks, supported, FIBRE);
+ ethtool_link_ksettings_add_link_mode(ks, advertising, FIBRE);
+ ks->base.port = PORT_FIBRE;
+ break;
+ case I40E_MEDIA_TYPE_UNKNOWN:
+ default:
+ ks->base.port = PORT_OTHER;
+ break;
+ }
+
+ /* Set flow control settings */
+ ethtool_link_ksettings_add_link_mode(ks, supported, Pause);
+ ethtool_link_ksettings_add_link_mode(ks, supported, Asym_Pause);
+
+ switch (hw->fc.requested_mode) {
+ case I40E_FC_FULL:
+ ethtool_link_ksettings_add_link_mode(ks, advertising, Pause);
+ break;
+ case I40E_FC_TX_PAUSE:
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ Asym_Pause);
+ break;
+ case I40E_FC_RX_PAUSE:
+ ethtool_link_ksettings_add_link_mode(ks, advertising, Pause);
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ Asym_Pause);
+ break;
+ default:
+ ethtool_link_ksettings_del_link_mode(ks, advertising, Pause);
+ ethtool_link_ksettings_del_link_mode(ks, advertising,
+ Asym_Pause);
+ break;
+ }
+
+ return 0;
+}
+
+#define I40E_LBIT_SIZE 8
+/**
+ * i40e_speed_to_link_speed - Translate decimal speed to i40e_aq_link_speed
+ * @speed: speed in decimal
+ * @ks: ethtool ksettings
+ *
+ * Return i40e_aq_link_speed based on speed
+ **/
+static enum i40e_aq_link_speed
+i40e_speed_to_link_speed(__u32 speed, const struct ethtool_link_ksettings *ks)
+{
+ enum i40e_aq_link_speed link_speed = I40E_LINK_SPEED_UNKNOWN;
+ bool speed_changed = false;
+ int i, j;
+
+ static const struct {
+ __u32 speed;
+ enum i40e_aq_link_speed link_speed;
+ __u8 bit[I40E_LBIT_SIZE];
+ } i40e_speed_lut[] = {
+#define I40E_LBIT(mode) ETHTOOL_LINK_MODE_ ## mode ##_Full_BIT
+ {SPEED_100, I40E_LINK_SPEED_100MB, {I40E_LBIT(100baseT)} },
+ {SPEED_1000, I40E_LINK_SPEED_1GB,
+ {I40E_LBIT(1000baseT), I40E_LBIT(1000baseX),
+ I40E_LBIT(1000baseKX)} },
+ {SPEED_10000, I40E_LINK_SPEED_10GB,
+ {I40E_LBIT(10000baseT), I40E_LBIT(10000baseKR),
+ I40E_LBIT(10000baseLR), I40E_LBIT(10000baseCR),
+ I40E_LBIT(10000baseSR), I40E_LBIT(10000baseKX4)} },
+
+ {SPEED_25000, I40E_LINK_SPEED_25GB,
+ {I40E_LBIT(25000baseCR), I40E_LBIT(25000baseKR),
+ I40E_LBIT(25000baseSR)} },
+ {SPEED_40000, I40E_LINK_SPEED_40GB,
+ {I40E_LBIT(40000baseKR4), I40E_LBIT(40000baseCR4),
+ I40E_LBIT(40000baseSR4), I40E_LBIT(40000baseLR4)} },
+ {SPEED_20000, I40E_LINK_SPEED_20GB,
+ {I40E_LBIT(20000baseKR2)} },
+ {SPEED_2500, I40E_LINK_SPEED_2_5GB, {I40E_LBIT(2500baseT)} },
+ {SPEED_5000, I40E_LINK_SPEED_5GB, {I40E_LBIT(2500baseT)} }
+#undef I40E_LBIT
+};
+
+ for (i = 0; i < ARRAY_SIZE(i40e_speed_lut); i++) {
+ if (i40e_speed_lut[i].speed == speed) {
+ for (j = 0; j < I40E_LBIT_SIZE; j++) {
+ if (test_bit(i40e_speed_lut[i].bit[j],
+ ks->link_modes.supported)) {
+ speed_changed = true;
+ break;
+ }
+ if (!i40e_speed_lut[i].bit[j])
+ break;
+ }
+ if (speed_changed) {
+ link_speed = i40e_speed_lut[i].link_speed;
+ break;
+ }
+ }
+ }
+ return link_speed;
+}
+
+#undef I40E_LBIT_SIZE
+
+/**
+ * i40e_set_link_ksettings - Set Speed and Duplex
+ * @netdev: network interface device structure
+ * @ks: ethtool ksettings
+ *
+ * Set speed/duplex per media_types advertised/forced
+ **/
+static int i40e_set_link_ksettings(struct net_device *netdev,
+ const struct ethtool_link_ksettings *ks)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_aq_get_phy_abilities_resp abilities;
+ struct ethtool_link_ksettings safe_ks;
+ struct ethtool_link_ksettings copy_ks;
+ struct i40e_aq_set_phy_config config;
+ struct i40e_pf *pf = np->vsi->back;
+ enum i40e_aq_link_speed link_speed;
+ struct i40e_vsi *vsi = np->vsi;
+ struct i40e_hw *hw = &pf->hw;
+ bool autoneg_changed = false;
+ int timeout = 50;
+ int status = 0;
+ int err = 0;
+ __u32 speed;
+ u8 autoneg;
+
+ /* Changing port settings is not supported if this isn't the
+ * port's controlling PF
+ */
+ if (hw->partition_id != 1) {
+ i40e_partition_setting_complaint(pf);
+ return -EOPNOTSUPP;
+ }
+ if (vsi != pf->vsi[pf->lan_vsi])
+ return -EOPNOTSUPP;
+ if (hw->phy.media_type != I40E_MEDIA_TYPE_BASET &&
+ hw->phy.media_type != I40E_MEDIA_TYPE_FIBER &&
+ hw->phy.media_type != I40E_MEDIA_TYPE_BACKPLANE &&
+ hw->phy.media_type != I40E_MEDIA_TYPE_DA &&
+ hw->phy.link_info.link_info & I40E_AQ_LINK_UP)
+ return -EOPNOTSUPP;
+ if (hw->device_id == I40E_DEV_ID_KX_B ||
+ hw->device_id == I40E_DEV_ID_KX_C ||
+ hw->device_id == I40E_DEV_ID_20G_KR2 ||
+ hw->device_id == I40E_DEV_ID_20G_KR2_A ||
+ hw->device_id == I40E_DEV_ID_25G_B ||
+ hw->device_id == I40E_DEV_ID_KX_X722) {
+ netdev_info(netdev, "Changing settings is not supported on backplane.\n");
+ return -EOPNOTSUPP;
+ }
+
+ /* copy the ksettings to copy_ks to avoid modifying the origin */
+ memcpy(&copy_ks, ks, sizeof(struct ethtool_link_ksettings));
+
+ /* save autoneg out of ksettings */
+ autoneg = copy_ks.base.autoneg;
+ speed = copy_ks.base.speed;
+
+ /* get our own copy of the bits to check against */
+ memset(&safe_ks, 0, sizeof(struct ethtool_link_ksettings));
+ safe_ks.base.cmd = copy_ks.base.cmd;
+ safe_ks.base.link_mode_masks_nwords =
+ copy_ks.base.link_mode_masks_nwords;
+ i40e_get_link_ksettings(netdev, &safe_ks);
+
+ /* Get link modes supported by hardware and check against modes
+ * requested by the user. Return an error if unsupported mode was set.
+ */
+ if (!bitmap_subset(copy_ks.link_modes.advertising,
+ safe_ks.link_modes.supported,
+ __ETHTOOL_LINK_MODE_MASK_NBITS))
+ return -EINVAL;
+
+ /* set autoneg back to what it currently is */
+ copy_ks.base.autoneg = safe_ks.base.autoneg;
+ copy_ks.base.speed = safe_ks.base.speed;
+
+ /* If copy_ks.base and safe_ks.base are not the same now, then they are
+ * trying to set something that we do not support.
+ */
+ if (memcmp(&copy_ks.base, &safe_ks.base,
+ sizeof(struct ethtool_link_settings)))
+ return -EOPNOTSUPP;
+
+ while (test_and_set_bit(__I40E_CONFIG_BUSY, pf->state)) {
+ timeout--;
+ if (!timeout)
+ return -EBUSY;
+ usleep_range(1000, 2000);
+ }
+
+ /* Get the current phy config */
+ status = i40e_aq_get_phy_capabilities(hw, false, false, &abilities,
+ NULL);
+ if (status) {
+ err = -EAGAIN;
+ goto done;
+ }
+
+ /* Copy abilities to config in case autoneg is not
+ * set below
+ */
+ memset(&config, 0, sizeof(struct i40e_aq_set_phy_config));
+ config.abilities = abilities.abilities;
+
+ /* Check autoneg */
+ if (autoneg == AUTONEG_ENABLE) {
+ /* If autoneg was not already enabled */
+ if (!(hw->phy.link_info.an_info & I40E_AQ_AN_COMPLETED)) {
+ /* If autoneg is not supported, return error */
+ if (!ethtool_link_ksettings_test_link_mode(&safe_ks,
+ supported,
+ Autoneg)) {
+ netdev_info(netdev, "Autoneg not supported on this phy\n");
+ err = -EINVAL;
+ goto done;
+ }
+ /* Autoneg is allowed to change */
+ config.abilities = abilities.abilities |
+ I40E_AQ_PHY_ENABLE_AN;
+ autoneg_changed = true;
+ }
+ } else {
+ /* If autoneg is currently enabled */
+ if (hw->phy.link_info.an_info & I40E_AQ_AN_COMPLETED) {
+ /* If autoneg is supported 10GBASE_T is the only PHY
+ * that can disable it, so otherwise return error
+ */
+ if (ethtool_link_ksettings_test_link_mode(&safe_ks,
+ supported,
+ Autoneg) &&
+ hw->phy.media_type != I40E_MEDIA_TYPE_BASET) {
+ netdev_info(netdev, "Autoneg cannot be disabled on this phy\n");
+ err = -EINVAL;
+ goto done;
+ }
+ /* Autoneg is allowed to change */
+ config.abilities = abilities.abilities &
+ ~I40E_AQ_PHY_ENABLE_AN;
+ autoneg_changed = true;
+ }
+ }
+
+ if (ethtool_link_ksettings_test_link_mode(ks, advertising,
+ 100baseT_Full))
+ config.link_speed |= I40E_LINK_SPEED_100MB;
+ if (ethtool_link_ksettings_test_link_mode(ks, advertising,
+ 1000baseT_Full) ||
+ ethtool_link_ksettings_test_link_mode(ks, advertising,
+ 1000baseX_Full) ||
+ ethtool_link_ksettings_test_link_mode(ks, advertising,
+ 1000baseKX_Full))
+ config.link_speed |= I40E_LINK_SPEED_1GB;
+ if (ethtool_link_ksettings_test_link_mode(ks, advertising,
+ 10000baseT_Full) ||
+ ethtool_link_ksettings_test_link_mode(ks, advertising,
+ 10000baseKX4_Full) ||
+ ethtool_link_ksettings_test_link_mode(ks, advertising,
+ 10000baseKR_Full) ||
+ ethtool_link_ksettings_test_link_mode(ks, advertising,
+ 10000baseCR_Full) ||
+ ethtool_link_ksettings_test_link_mode(ks, advertising,
+ 10000baseSR_Full) ||
+ ethtool_link_ksettings_test_link_mode(ks, advertising,
+ 10000baseLR_Full))
+ config.link_speed |= I40E_LINK_SPEED_10GB;
+ if (ethtool_link_ksettings_test_link_mode(ks, advertising,
+ 2500baseT_Full))
+ config.link_speed |= I40E_LINK_SPEED_2_5GB;
+ if (ethtool_link_ksettings_test_link_mode(ks, advertising,
+ 5000baseT_Full))
+ config.link_speed |= I40E_LINK_SPEED_5GB;
+ if (ethtool_link_ksettings_test_link_mode(ks, advertising,
+ 20000baseKR2_Full))
+ config.link_speed |= I40E_LINK_SPEED_20GB;
+ if (ethtool_link_ksettings_test_link_mode(ks, advertising,
+ 25000baseCR_Full) ||
+ ethtool_link_ksettings_test_link_mode(ks, advertising,
+ 25000baseKR_Full) ||
+ ethtool_link_ksettings_test_link_mode(ks, advertising,
+ 25000baseSR_Full))
+ config.link_speed |= I40E_LINK_SPEED_25GB;
+ if (ethtool_link_ksettings_test_link_mode(ks, advertising,
+ 40000baseKR4_Full) ||
+ ethtool_link_ksettings_test_link_mode(ks, advertising,
+ 40000baseCR4_Full) ||
+ ethtool_link_ksettings_test_link_mode(ks, advertising,
+ 40000baseSR4_Full) ||
+ ethtool_link_ksettings_test_link_mode(ks, advertising,
+ 40000baseLR4_Full))
+ config.link_speed |= I40E_LINK_SPEED_40GB;
+
+ /* Autonegotiation must be disabled to change speed */
+ if ((speed != SPEED_UNKNOWN && safe_ks.base.speed != speed) &&
+ (autoneg == AUTONEG_DISABLE ||
+ (safe_ks.base.autoneg == AUTONEG_DISABLE && !autoneg_changed))) {
+ link_speed = i40e_speed_to_link_speed(speed, ks);
+ if (link_speed == I40E_LINK_SPEED_UNKNOWN) {
+ netdev_info(netdev, "Given speed is not supported\n");
+ err = -EOPNOTSUPP;
+ goto done;
+ } else {
+ config.link_speed = link_speed;
+ }
+ } else {
+ if (safe_ks.base.speed != speed) {
+ netdev_info(netdev,
+ "Unable to set speed, disable autoneg\n");
+ err = -EOPNOTSUPP;
+ goto done;
+ }
+ }
+
+ /* If speed didn't get set, set it to what it currently is.
+ * This is needed because if advertise is 0 (as it is when autoneg
+ * is disabled) then speed won't get set.
+ */
+ if (!config.link_speed)
+ config.link_speed = abilities.link_speed;
+ if (autoneg_changed || abilities.link_speed != config.link_speed) {
+ /* copy over the rest of the abilities */
+ config.phy_type = abilities.phy_type;
+ config.phy_type_ext = abilities.phy_type_ext;
+ config.eee_capability = abilities.eee_capability;
+ config.eeer = abilities.eeer_val;
+ config.low_power_ctrl = abilities.d3_lpan;
+ config.fec_config = abilities.fec_cfg_curr_mod_ext_info &
+ I40E_AQ_PHY_FEC_CONFIG_MASK;
+
+ /* save the requested speeds */
+ hw->phy.link_info.requested_speeds = config.link_speed;
+ /* set link and auto negotiation so changes take effect */
+ config.abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
+ /* If link is up put link down */
+ if (hw->phy.link_info.link_info & I40E_AQ_LINK_UP) {
+ /* Tell the OS link is going down, the link will go
+ * back up when fw says it is ready asynchronously
+ */
+ i40e_print_link_message(vsi, false);
+ netif_carrier_off(netdev);
+ netif_tx_stop_all_queues(netdev);
+ }
+
+ /* make the aq call */
+ status = i40e_aq_set_phy_config(hw, &config, NULL);
+ if (status) {
+ netdev_info(netdev,
+ "Set phy config failed, err %pe aq_err %s\n",
+ ERR_PTR(status),
+ i40e_aq_str(hw, hw->aq.asq_last_status));
+ err = -EAGAIN;
+ goto done;
+ }
+
+ status = i40e_update_link_info(hw);
+ if (status)
+ netdev_dbg(netdev,
+ "Updating link info failed with err %pe aq_err %s\n",
+ ERR_PTR(status),
+ i40e_aq_str(hw, hw->aq.asq_last_status));
+
+ } else {
+ netdev_info(netdev, "Nothing changed, exiting without setting anything.\n");
+ }
+
+done:
+ clear_bit(__I40E_CONFIG_BUSY, pf->state);
+
+ return err;
+}
+
+static int i40e_set_fec_cfg(struct net_device *netdev, u8 fec_cfg)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_aq_get_phy_abilities_resp abilities;
+ struct i40e_pf *pf = np->vsi->back;
+ struct i40e_hw *hw = &pf->hw;
+ int status = 0;
+ u32 flags = 0;
+ int err = 0;
+
+ flags = READ_ONCE(pf->flags);
+ i40e_set_fec_in_flags(fec_cfg, &flags);
+
+ /* Get the current phy config */
+ memset(&abilities, 0, sizeof(abilities));
+ status = i40e_aq_get_phy_capabilities(hw, false, false, &abilities,
+ NULL);
+ if (status) {
+ err = -EAGAIN;
+ goto done;
+ }
+
+ if (abilities.fec_cfg_curr_mod_ext_info != fec_cfg) {
+ struct i40e_aq_set_phy_config config;
+
+ memset(&config, 0, sizeof(config));
+ config.phy_type = abilities.phy_type;
+ config.abilities = abilities.abilities |
+ I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
+ config.phy_type_ext = abilities.phy_type_ext;
+ config.link_speed = abilities.link_speed;
+ config.eee_capability = abilities.eee_capability;
+ config.eeer = abilities.eeer_val;
+ config.low_power_ctrl = abilities.d3_lpan;
+ config.fec_config = fec_cfg & I40E_AQ_PHY_FEC_CONFIG_MASK;
+ status = i40e_aq_set_phy_config(hw, &config, NULL);
+ if (status) {
+ netdev_info(netdev,
+ "Set phy config failed, err %pe aq_err %s\n",
+ ERR_PTR(status),
+ i40e_aq_str(hw, hw->aq.asq_last_status));
+ err = -EAGAIN;
+ goto done;
+ }
+ pf->flags = flags;
+ status = i40e_update_link_info(hw);
+ if (status)
+ /* debug level message only due to relation to the link
+ * itself rather than to the FEC settings
+ * (e.g. no physical connection etc.)
+ */
+ netdev_dbg(netdev,
+ "Updating link info failed with err %pe aq_err %s\n",
+ ERR_PTR(status),
+ i40e_aq_str(hw, hw->aq.asq_last_status));
+ }
+
+done:
+ return err;
+}
+
+static int i40e_get_fec_param(struct net_device *netdev,
+ struct ethtool_fecparam *fecparam)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_aq_get_phy_abilities_resp abilities;
+ struct i40e_pf *pf = np->vsi->back;
+ struct i40e_hw *hw = &pf->hw;
+ int status = 0;
+ int err = 0;
+ u8 fec_cfg;
+
+ /* Get the current phy config */
+ memset(&abilities, 0, sizeof(abilities));
+ status = i40e_aq_get_phy_capabilities(hw, false, false, &abilities,
+ NULL);
+ if (status) {
+ err = -EAGAIN;
+ goto done;
+ }
+
+ fecparam->fec = 0;
+ fec_cfg = abilities.fec_cfg_curr_mod_ext_info;
+ if (fec_cfg & I40E_AQ_SET_FEC_AUTO)
+ fecparam->fec |= ETHTOOL_FEC_AUTO;
+ else if (fec_cfg & (I40E_AQ_SET_FEC_REQUEST_RS |
+ I40E_AQ_SET_FEC_ABILITY_RS))
+ fecparam->fec |= ETHTOOL_FEC_RS;
+ else if (fec_cfg & (I40E_AQ_SET_FEC_REQUEST_KR |
+ I40E_AQ_SET_FEC_ABILITY_KR))
+ fecparam->fec |= ETHTOOL_FEC_BASER;
+ if (fec_cfg == 0)
+ fecparam->fec |= ETHTOOL_FEC_OFF;
+
+ if (hw->phy.link_info.fec_info & I40E_AQ_CONFIG_FEC_KR_ENA)
+ fecparam->active_fec = ETHTOOL_FEC_BASER;
+ else if (hw->phy.link_info.fec_info & I40E_AQ_CONFIG_FEC_RS_ENA)
+ fecparam->active_fec = ETHTOOL_FEC_RS;
+ else
+ fecparam->active_fec = ETHTOOL_FEC_OFF;
+done:
+ return err;
+}
+
+static int i40e_set_fec_param(struct net_device *netdev,
+ struct ethtool_fecparam *fecparam)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_pf *pf = np->vsi->back;
+ struct i40e_hw *hw = &pf->hw;
+ u8 fec_cfg = 0;
+
+ if (hw->device_id != I40E_DEV_ID_25G_SFP28 &&
+ hw->device_id != I40E_DEV_ID_25G_B &&
+ hw->device_id != I40E_DEV_ID_KX_X722)
+ return -EPERM;
+
+ if (hw->mac.type == I40E_MAC_X722 &&
+ !(hw->flags & I40E_HW_FLAG_X722_FEC_REQUEST_CAPABLE)) {
+ netdev_err(netdev, "Setting FEC encoding not supported by firmware. Please update the NVM image.\n");
+ return -EOPNOTSUPP;
+ }
+
+ switch (fecparam->fec) {
+ case ETHTOOL_FEC_AUTO:
+ fec_cfg = I40E_AQ_SET_FEC_AUTO;
+ break;
+ case ETHTOOL_FEC_RS:
+ fec_cfg = (I40E_AQ_SET_FEC_REQUEST_RS |
+ I40E_AQ_SET_FEC_ABILITY_RS);
+ break;
+ case ETHTOOL_FEC_BASER:
+ fec_cfg = (I40E_AQ_SET_FEC_REQUEST_KR |
+ I40E_AQ_SET_FEC_ABILITY_KR);
+ break;
+ case ETHTOOL_FEC_OFF:
+ case ETHTOOL_FEC_NONE:
+ fec_cfg = 0;
+ break;
+ default:
+ dev_warn(&pf->pdev->dev, "Unsupported FEC mode: %d",
+ fecparam->fec);
+ return -EINVAL;
+ }
+
+ return i40e_set_fec_cfg(netdev, fec_cfg);
+}
+
+static int i40e_nway_reset(struct net_device *netdev)
+{
+ /* restart autonegotiation */
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_pf *pf = np->vsi->back;
+ struct i40e_hw *hw = &pf->hw;
+ bool link_up = hw->phy.link_info.link_info & I40E_AQ_LINK_UP;
+ int ret = 0;
+
+ ret = i40e_aq_set_link_restart_an(hw, link_up, NULL);
+ if (ret) {
+ netdev_info(netdev, "link restart failed, err %pe aq_err %s\n",
+ ERR_PTR(ret),
+ i40e_aq_str(hw, hw->aq.asq_last_status));
+ return -EIO;
+ }
+
+ return 0;
+}
+
+/**
+ * i40e_get_pauseparam - Get Flow Control status
+ * @netdev: netdevice structure
+ * @pause: buffer to return pause parameters
+ *
+ * Return tx/rx-pause status
+ **/
+static void i40e_get_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pause)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_pf *pf = np->vsi->back;
+ struct i40e_hw *hw = &pf->hw;
+ struct i40e_link_status *hw_link_info = &hw->phy.link_info;
+ struct i40e_dcbx_config *dcbx_cfg = &hw->local_dcbx_config;
+
+ pause->autoneg =
+ ((hw_link_info->an_info & I40E_AQ_AN_COMPLETED) ?
+ AUTONEG_ENABLE : AUTONEG_DISABLE);
+
+ /* PFC enabled so report LFC as off */
+ if (dcbx_cfg->pfc.pfcenable) {
+ pause->rx_pause = 0;
+ pause->tx_pause = 0;
+ return;
+ }
+
+ if (hw->fc.current_mode == I40E_FC_RX_PAUSE) {
+ pause->rx_pause = 1;
+ } else if (hw->fc.current_mode == I40E_FC_TX_PAUSE) {
+ pause->tx_pause = 1;
+ } else if (hw->fc.current_mode == I40E_FC_FULL) {
+ pause->rx_pause = 1;
+ pause->tx_pause = 1;
+ }
+}
+
+/**
+ * i40e_set_pauseparam - Set Flow Control parameter
+ * @netdev: network interface device structure
+ * @pause: return tx/rx flow control status
+ **/
+static int i40e_set_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pause)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_pf *pf = np->vsi->back;
+ struct i40e_vsi *vsi = np->vsi;
+ struct i40e_hw *hw = &pf->hw;
+ struct i40e_link_status *hw_link_info = &hw->phy.link_info;
+ struct i40e_dcbx_config *dcbx_cfg = &hw->local_dcbx_config;
+ bool link_up = hw_link_info->link_info & I40E_AQ_LINK_UP;
+ u8 aq_failures;
+ int err = 0;
+ int status;
+ u32 is_an;
+
+ /* Changing the port's flow control is not supported if this isn't the
+ * port's controlling PF
+ */
+ if (hw->partition_id != 1) {
+ i40e_partition_setting_complaint(pf);
+ return -EOPNOTSUPP;
+ }
+
+ if (vsi != pf->vsi[pf->lan_vsi])
+ return -EOPNOTSUPP;
+
+ is_an = hw_link_info->an_info & I40E_AQ_AN_COMPLETED;
+ if (pause->autoneg != is_an) {
+ netdev_info(netdev, "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
+ return -EOPNOTSUPP;
+ }
+
+ /* If we have link and don't have autoneg */
+ if (!test_bit(__I40E_DOWN, pf->state) && !is_an) {
+ /* Send message that it might not necessarily work*/
+ netdev_info(netdev, "Autoneg did not complete so changing settings may not result in an actual change.\n");
+ }
+
+ if (dcbx_cfg->pfc.pfcenable) {
+ netdev_info(netdev,
+ "Priority flow control enabled. Cannot set link flow control.\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (pause->rx_pause && pause->tx_pause)
+ hw->fc.requested_mode = I40E_FC_FULL;
+ else if (pause->rx_pause && !pause->tx_pause)
+ hw->fc.requested_mode = I40E_FC_RX_PAUSE;
+ else if (!pause->rx_pause && pause->tx_pause)
+ hw->fc.requested_mode = I40E_FC_TX_PAUSE;
+ else if (!pause->rx_pause && !pause->tx_pause)
+ hw->fc.requested_mode = I40E_FC_NONE;
+ else
+ return -EINVAL;
+
+ /* Tell the OS link is going down, the link will go back up when fw
+ * says it is ready asynchronously
+ */
+ i40e_print_link_message(vsi, false);
+ netif_carrier_off(netdev);
+ netif_tx_stop_all_queues(netdev);
+
+ /* Set the fc mode and only restart an if link is up*/
+ status = i40e_set_fc(hw, &aq_failures, link_up);
+
+ if (aq_failures & I40E_SET_FC_AQ_FAIL_GET) {
+ netdev_info(netdev, "Set fc failed on the get_phy_capabilities call with err %pe aq_err %s\n",
+ ERR_PTR(status),
+ i40e_aq_str(hw, hw->aq.asq_last_status));
+ err = -EAGAIN;
+ }
+ if (aq_failures & I40E_SET_FC_AQ_FAIL_SET) {
+ netdev_info(netdev, "Set fc failed on the set_phy_config call with err %pe aq_err %s\n",
+ ERR_PTR(status),
+ i40e_aq_str(hw, hw->aq.asq_last_status));
+ err = -EAGAIN;
+ }
+ if (aq_failures & I40E_SET_FC_AQ_FAIL_UPDATE) {
+ netdev_info(netdev, "Set fc failed on the get_link_info call with err %pe aq_err %s\n",
+ ERR_PTR(status),
+ i40e_aq_str(hw, hw->aq.asq_last_status));
+ err = -EAGAIN;
+ }
+
+ if (!test_bit(__I40E_DOWN, pf->state) && is_an) {
+ /* Give it a little more time to try to come back */
+ msleep(75);
+ if (!test_bit(__I40E_DOWN, pf->state))
+ return i40e_nway_reset(netdev);
+ }
+
+ return err;
+}
+
+static u32 i40e_get_msglevel(struct net_device *netdev)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_pf *pf = np->vsi->back;
+ u32 debug_mask = pf->hw.debug_mask;
+
+ if (debug_mask)
+ netdev_info(netdev, "i40e debug_mask: 0x%08X\n", debug_mask);
+
+ return pf->msg_enable;
+}
+
+static void i40e_set_msglevel(struct net_device *netdev, u32 data)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_pf *pf = np->vsi->back;
+
+ if (I40E_DEBUG_USER & data)
+ pf->hw.debug_mask = data;
+ else
+ pf->msg_enable = data;
+}
+
+static int i40e_get_regs_len(struct net_device *netdev)
+{
+ int reg_count = 0;
+ int i;
+
+ for (i = 0; i40e_reg_list[i].offset != 0; i++)
+ reg_count += i40e_reg_list[i].elements;
+
+ return reg_count * sizeof(u32);
+}
+
+static void i40e_get_regs(struct net_device *netdev, struct ethtool_regs *regs,
+ void *p)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_pf *pf = np->vsi->back;
+ struct i40e_hw *hw = &pf->hw;
+ u32 *reg_buf = p;
+ unsigned int i, j, ri;
+ u32 reg;
+
+ /* Tell ethtool which driver-version-specific regs output we have.
+ *
+ * At some point, if we have ethtool doing special formatting of
+ * this data, it will rely on this version number to know how to
+ * interpret things. Hence, this needs to be updated if/when the
+ * diags register table is changed.
+ */
+ regs->version = 1;
+
+ /* loop through the diags reg table for what to print */
+ ri = 0;
+ for (i = 0; i40e_reg_list[i].offset != 0; i++) {
+ for (j = 0; j < i40e_reg_list[i].elements; j++) {
+ reg = i40e_reg_list[i].offset
+ + (j * i40e_reg_list[i].stride);
+ reg_buf[ri++] = rd32(hw, reg);
+ }
+ }
+
+}
+
+static int i40e_get_eeprom(struct net_device *netdev,
+ struct ethtool_eeprom *eeprom, u8 *bytes)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_hw *hw = &np->vsi->back->hw;
+ struct i40e_pf *pf = np->vsi->back;
+ int ret_val = 0, len, offset;
+ u8 *eeprom_buff;
+ u16 i, sectors;
+ bool last;
+ u32 magic;
+
+#define I40E_NVM_SECTOR_SIZE 4096
+ if (eeprom->len == 0)
+ return -EINVAL;
+
+ /* check for NVMUpdate access method */
+ magic = hw->vendor_id | (hw->device_id << 16);
+ if (eeprom->magic && eeprom->magic != magic) {
+ struct i40e_nvm_access *cmd = (struct i40e_nvm_access *)eeprom;
+ int errno = 0;
+
+ /* make sure it is the right magic for NVMUpdate */
+ if ((eeprom->magic >> 16) != hw->device_id)
+ errno = -EINVAL;
+ else if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state) ||
+ test_bit(__I40E_RESET_INTR_RECEIVED, pf->state))
+ errno = -EBUSY;
+ else
+ ret_val = i40e_nvmupd_command(hw, cmd, bytes, &errno);
+
+ if ((errno || ret_val) && (hw->debug_mask & I40E_DEBUG_NVM))
+ dev_info(&pf->pdev->dev,
+ "NVMUpdate read failed err=%d status=0x%x errno=%d module=%d offset=0x%x size=%d\n",
+ ret_val, hw->aq.asq_last_status, errno,
+ (u8)(cmd->config & I40E_NVM_MOD_PNT_MASK),
+ cmd->offset, cmd->data_size);
+
+ return errno;
+ }
+
+ /* normal ethtool get_eeprom support */
+ eeprom->magic = hw->vendor_id | (hw->device_id << 16);
+
+ eeprom_buff = kzalloc(eeprom->len, GFP_KERNEL);
+ if (!eeprom_buff)
+ return -ENOMEM;
+
+ ret_val = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
+ if (ret_val) {
+ dev_info(&pf->pdev->dev,
+ "Failed Acquiring NVM resource for read err=%d status=0x%x\n",
+ ret_val, hw->aq.asq_last_status);
+ goto free_buff;
+ }
+
+ sectors = eeprom->len / I40E_NVM_SECTOR_SIZE;
+ sectors += (eeprom->len % I40E_NVM_SECTOR_SIZE) ? 1 : 0;
+ len = I40E_NVM_SECTOR_SIZE;
+ last = false;
+ for (i = 0; i < sectors; i++) {
+ if (i == (sectors - 1)) {
+ len = eeprom->len - (I40E_NVM_SECTOR_SIZE * i);
+ last = true;
+ }
+ offset = eeprom->offset + (I40E_NVM_SECTOR_SIZE * i),
+ ret_val = i40e_aq_read_nvm(hw, 0x0, offset, len,
+ (u8 *)eeprom_buff + (I40E_NVM_SECTOR_SIZE * i),
+ last, NULL);
+ if (ret_val && hw->aq.asq_last_status == I40E_AQ_RC_EPERM) {
+ dev_info(&pf->pdev->dev,
+ "read NVM failed, invalid offset 0x%x\n",
+ offset);
+ break;
+ } else if (ret_val &&
+ hw->aq.asq_last_status == I40E_AQ_RC_EACCES) {
+ dev_info(&pf->pdev->dev,
+ "read NVM failed, access, offset 0x%x\n",
+ offset);
+ break;
+ } else if (ret_val) {
+ dev_info(&pf->pdev->dev,
+ "read NVM failed offset %d err=%d status=0x%x\n",
+ offset, ret_val, hw->aq.asq_last_status);
+ break;
+ }
+ }
+
+ i40e_release_nvm(hw);
+ memcpy(bytes, (u8 *)eeprom_buff, eeprom->len);
+free_buff:
+ kfree(eeprom_buff);
+ return ret_val;
+}
+
+static int i40e_get_eeprom_len(struct net_device *netdev)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_hw *hw = &np->vsi->back->hw;
+ u32 val;
+
+#define X722_EEPROM_SCOPE_LIMIT 0x5B9FFF
+ if (hw->mac.type == I40E_MAC_X722) {
+ val = X722_EEPROM_SCOPE_LIMIT + 1;
+ return val;
+ }
+ val = (rd32(hw, I40E_GLPCI_LBARCTRL)
+ & I40E_GLPCI_LBARCTRL_FL_SIZE_MASK)
+ >> I40E_GLPCI_LBARCTRL_FL_SIZE_SHIFT;
+ /* register returns value in power of 2, 64Kbyte chunks. */
+ val = (64 * 1024) * BIT(val);
+ return val;
+}
+
+static int i40e_set_eeprom(struct net_device *netdev,
+ struct ethtool_eeprom *eeprom, u8 *bytes)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_hw *hw = &np->vsi->back->hw;
+ struct i40e_pf *pf = np->vsi->back;
+ struct i40e_nvm_access *cmd = (struct i40e_nvm_access *)eeprom;
+ int ret_val = 0;
+ int errno = 0;
+ u32 magic;
+
+ /* normal ethtool set_eeprom is not supported */
+ magic = hw->vendor_id | (hw->device_id << 16);
+ if (eeprom->magic == magic)
+ errno = -EOPNOTSUPP;
+ /* check for NVMUpdate access method */
+ else if (!eeprom->magic || (eeprom->magic >> 16) != hw->device_id)
+ errno = -EINVAL;
+ else if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state) ||
+ test_bit(__I40E_RESET_INTR_RECEIVED, pf->state))
+ errno = -EBUSY;
+ else
+ ret_val = i40e_nvmupd_command(hw, cmd, bytes, &errno);
+
+ if ((errno || ret_val) && (hw->debug_mask & I40E_DEBUG_NVM))
+ dev_info(&pf->pdev->dev,
+ "NVMUpdate write failed err=%d status=0x%x errno=%d module=%d offset=0x%x size=%d\n",
+ ret_val, hw->aq.asq_last_status, errno,
+ (u8)(cmd->config & I40E_NVM_MOD_PNT_MASK),
+ cmd->offset, cmd->data_size);
+
+ return errno;
+}
+
+static void i40e_get_drvinfo(struct net_device *netdev,
+ struct ethtool_drvinfo *drvinfo)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_vsi *vsi = np->vsi;
+ struct i40e_pf *pf = vsi->back;
+
+ strscpy(drvinfo->driver, i40e_driver_name, sizeof(drvinfo->driver));
+ strscpy(drvinfo->fw_version, i40e_nvm_version_str(&pf->hw),
+ sizeof(drvinfo->fw_version));
+ strscpy(drvinfo->bus_info, pci_name(pf->pdev),
+ sizeof(drvinfo->bus_info));
+ drvinfo->n_priv_flags = I40E_PRIV_FLAGS_STR_LEN;
+ if (pf->hw.pf_id == 0)
+ drvinfo->n_priv_flags += I40E_GL_PRIV_FLAGS_STR_LEN;
+}
+
+static void i40e_get_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_pf *pf = np->vsi->back;
+ struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
+
+ ring->rx_max_pending = I40E_MAX_NUM_DESCRIPTORS;
+ ring->tx_max_pending = I40E_MAX_NUM_DESCRIPTORS;
+ ring->rx_mini_max_pending = 0;
+ ring->rx_jumbo_max_pending = 0;
+ ring->rx_pending = vsi->rx_rings[0]->count;
+ ring->tx_pending = vsi->tx_rings[0]->count;
+ ring->rx_mini_pending = 0;
+ ring->rx_jumbo_pending = 0;
+}
+
+static bool i40e_active_tx_ring_index(struct i40e_vsi *vsi, u16 index)
+{
+ if (i40e_enabled_xdp_vsi(vsi)) {
+ return index < vsi->num_queue_pairs ||
+ (index >= vsi->alloc_queue_pairs &&
+ index < vsi->alloc_queue_pairs + vsi->num_queue_pairs);
+ }
+
+ return index < vsi->num_queue_pairs;
+}
+
+static int i40e_set_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
+{
+ struct i40e_ring *tx_rings = NULL, *rx_rings = NULL;
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_hw *hw = &np->vsi->back->hw;
+ struct i40e_vsi *vsi = np->vsi;
+ struct i40e_pf *pf = vsi->back;
+ u32 new_rx_count, new_tx_count;
+ u16 tx_alloc_queue_pairs;
+ int timeout = 50;
+ int i, err = 0;
+
+ if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
+ return -EINVAL;
+
+ if (ring->tx_pending > I40E_MAX_NUM_DESCRIPTORS ||
+ ring->tx_pending < I40E_MIN_NUM_DESCRIPTORS ||
+ ring->rx_pending > I40E_MAX_NUM_DESCRIPTORS ||
+ ring->rx_pending < I40E_MIN_NUM_DESCRIPTORS) {
+ netdev_info(netdev,
+ "Descriptors requested (Tx: %d / Rx: %d) out of range [%d-%d]\n",
+ ring->tx_pending, ring->rx_pending,
+ I40E_MIN_NUM_DESCRIPTORS, I40E_MAX_NUM_DESCRIPTORS);
+ return -EINVAL;
+ }
+
+ new_tx_count = ALIGN(ring->tx_pending, I40E_REQ_DESCRIPTOR_MULTIPLE);
+ new_rx_count = ALIGN(ring->rx_pending, I40E_REQ_DESCRIPTOR_MULTIPLE);
+
+ /* if nothing to do return success */
+ if ((new_tx_count == vsi->tx_rings[0]->count) &&
+ (new_rx_count == vsi->rx_rings[0]->count))
+ return 0;
+
+ /* If there is a AF_XDP page pool attached to any of Rx rings,
+ * disallow changing the number of descriptors -- regardless
+ * if the netdev is running or not.
+ */
+ if (i40e_xsk_any_rx_ring_enabled(vsi))
+ return -EBUSY;
+
+ while (test_and_set_bit(__I40E_CONFIG_BUSY, pf->state)) {
+ timeout--;
+ if (!timeout)
+ return -EBUSY;
+ usleep_range(1000, 2000);
+ }
+
+ if (!netif_running(vsi->netdev)) {
+ /* simple case - set for the next time the netdev is started */
+ for (i = 0; i < vsi->num_queue_pairs; i++) {
+ vsi->tx_rings[i]->count = new_tx_count;
+ vsi->rx_rings[i]->count = new_rx_count;
+ if (i40e_enabled_xdp_vsi(vsi))
+ vsi->xdp_rings[i]->count = new_tx_count;
+ }
+ vsi->num_tx_desc = new_tx_count;
+ vsi->num_rx_desc = new_rx_count;
+ goto done;
+ }
+
+ /* We can't just free everything and then setup again,
+ * because the ISRs in MSI-X mode get passed pointers
+ * to the Tx and Rx ring structs.
+ */
+
+ /* alloc updated Tx and XDP Tx resources */
+ tx_alloc_queue_pairs = vsi->alloc_queue_pairs *
+ (i40e_enabled_xdp_vsi(vsi) ? 2 : 1);
+ if (new_tx_count != vsi->tx_rings[0]->count) {
+ netdev_info(netdev,
+ "Changing Tx descriptor count from %d to %d.\n",
+ vsi->tx_rings[0]->count, new_tx_count);
+ tx_rings = kcalloc(tx_alloc_queue_pairs,
+ sizeof(struct i40e_ring), GFP_KERNEL);
+ if (!tx_rings) {
+ err = -ENOMEM;
+ goto done;
+ }
+
+ for (i = 0; i < tx_alloc_queue_pairs; i++) {
+ if (!i40e_active_tx_ring_index(vsi, i))
+ continue;
+
+ tx_rings[i] = *vsi->tx_rings[i];
+ tx_rings[i].count = new_tx_count;
+ /* the desc and bi pointers will be reallocated in the
+ * setup call
+ */
+ tx_rings[i].desc = NULL;
+ tx_rings[i].rx_bi = NULL;
+ err = i40e_setup_tx_descriptors(&tx_rings[i]);
+ if (err) {
+ while (i) {
+ i--;
+ if (!i40e_active_tx_ring_index(vsi, i))
+ continue;
+ i40e_free_tx_resources(&tx_rings[i]);
+ }
+ kfree(tx_rings);
+ tx_rings = NULL;
+
+ goto done;
+ }
+ }
+ }
+
+ /* alloc updated Rx resources */
+ if (new_rx_count != vsi->rx_rings[0]->count) {
+ netdev_info(netdev,
+ "Changing Rx descriptor count from %d to %d\n",
+ vsi->rx_rings[0]->count, new_rx_count);
+ rx_rings = kcalloc(vsi->alloc_queue_pairs,
+ sizeof(struct i40e_ring), GFP_KERNEL);
+ if (!rx_rings) {
+ err = -ENOMEM;
+ goto free_tx;
+ }
+
+ for (i = 0; i < vsi->num_queue_pairs; i++) {
+ u16 unused;
+
+ /* clone ring and setup updated count */
+ rx_rings[i] = *vsi->rx_rings[i];
+ rx_rings[i].count = new_rx_count;
+ /* the desc and bi pointers will be reallocated in the
+ * setup call
+ */
+ rx_rings[i].desc = NULL;
+ rx_rings[i].rx_bi = NULL;
+ /* Clear cloned XDP RX-queue info before setup call */
+ memset(&rx_rings[i].xdp_rxq, 0, sizeof(rx_rings[i].xdp_rxq));
+ /* this is to allow wr32 to have something to write to
+ * during early allocation of Rx buffers
+ */
+ rx_rings[i].tail = hw->hw_addr + I40E_PRTGEN_STATUS;
+ err = i40e_setup_rx_descriptors(&rx_rings[i]);
+ if (err)
+ goto rx_unwind;
+
+ /* now allocate the Rx buffers to make sure the OS
+ * has enough memory, any failure here means abort
+ */
+ unused = I40E_DESC_UNUSED(&rx_rings[i]);
+ err = i40e_alloc_rx_buffers(&rx_rings[i], unused);
+rx_unwind:
+ if (err) {
+ do {
+ i40e_free_rx_resources(&rx_rings[i]);
+ } while (i--);
+ kfree(rx_rings);
+ rx_rings = NULL;
+
+ goto free_tx;
+ }
+ }
+ }
+
+ /* Bring interface down, copy in the new ring info,
+ * then restore the interface
+ */
+ i40e_down(vsi);
+
+ if (tx_rings) {
+ for (i = 0; i < tx_alloc_queue_pairs; i++) {
+ if (i40e_active_tx_ring_index(vsi, i)) {
+ i40e_free_tx_resources(vsi->tx_rings[i]);
+ *vsi->tx_rings[i] = tx_rings[i];
+ }
+ }
+ kfree(tx_rings);
+ tx_rings = NULL;
+ }
+
+ if (rx_rings) {
+ for (i = 0; i < vsi->num_queue_pairs; i++) {
+ i40e_free_rx_resources(vsi->rx_rings[i]);
+ /* get the real tail offset */
+ rx_rings[i].tail = vsi->rx_rings[i]->tail;
+ /* this is to fake out the allocation routine
+ * into thinking it has to realloc everything
+ * but the recycling logic will let us re-use
+ * the buffers allocated above
+ */
+ rx_rings[i].next_to_use = 0;
+ rx_rings[i].next_to_clean = 0;
+ rx_rings[i].next_to_alloc = 0;
+ /* do a struct copy */
+ *vsi->rx_rings[i] = rx_rings[i];
+ }
+ kfree(rx_rings);
+ rx_rings = NULL;
+ }
+
+ vsi->num_tx_desc = new_tx_count;
+ vsi->num_rx_desc = new_rx_count;
+ i40e_up(vsi);
+
+free_tx:
+ /* error cleanup if the Rx allocations failed after getting Tx */
+ if (tx_rings) {
+ for (i = 0; i < tx_alloc_queue_pairs; i++) {
+ if (i40e_active_tx_ring_index(vsi, i))
+ i40e_free_tx_resources(vsi->tx_rings[i]);
+ }
+ kfree(tx_rings);
+ tx_rings = NULL;
+ }
+
+done:
+ clear_bit(__I40E_CONFIG_BUSY, pf->state);
+
+ return err;
+}
+
+/**
+ * i40e_get_stats_count - return the stats count for a device
+ * @netdev: the netdev to return the count for
+ *
+ * Returns the total number of statistics for this netdev. Note that even
+ * though this is a function, it is required that the count for a specific
+ * netdev must never change. Basing the count on static values such as the
+ * maximum number of queues or the device type is ok. However, the API for
+ * obtaining stats is *not* safe against changes based on non-static
+ * values such as the *current* number of queues, or runtime flags.
+ *
+ * If a statistic is not always enabled, return it as part of the count
+ * anyways, always return its string, and report its value as zero.
+ **/
+static int i40e_get_stats_count(struct net_device *netdev)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_vsi *vsi = np->vsi;
+ struct i40e_pf *pf = vsi->back;
+ int stats_len;
+
+ if (vsi == pf->vsi[pf->lan_vsi] && pf->hw.partition_id == 1)
+ stats_len = I40E_PF_STATS_LEN;
+ else
+ stats_len = I40E_VSI_STATS_LEN;
+
+ /* The number of stats reported for a given net_device must remain
+ * constant throughout the life of that device.
+ *
+ * This is because the API for obtaining the size, strings, and stats
+ * is spread out over three separate ethtool ioctls. There is no safe
+ * way to lock the number of stats across these calls, so we must
+ * assume that they will never change.
+ *
+ * Due to this, we report the maximum number of queues, even if not
+ * every queue is currently configured. Since we always allocate
+ * queues in pairs, we'll just use netdev->num_tx_queues * 2. This
+ * works because the num_tx_queues is set at device creation and never
+ * changes.
+ */
+ stats_len += I40E_QUEUE_STATS_LEN * 2 * netdev->num_tx_queues;
+
+ return stats_len;
+}
+
+static int i40e_get_sset_count(struct net_device *netdev, int sset)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_vsi *vsi = np->vsi;
+ struct i40e_pf *pf = vsi->back;
+
+ switch (sset) {
+ case ETH_SS_TEST:
+ return I40E_TEST_LEN;
+ case ETH_SS_STATS:
+ return i40e_get_stats_count(netdev);
+ case ETH_SS_PRIV_FLAGS:
+ return I40E_PRIV_FLAGS_STR_LEN +
+ (pf->hw.pf_id == 0 ? I40E_GL_PRIV_FLAGS_STR_LEN : 0);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+/**
+ * i40e_get_veb_tc_stats - copy VEB TC statistics to formatted structure
+ * @tc: the TC statistics in VEB structure (veb->tc_stats)
+ * @i: the index of traffic class in (veb->tc_stats) structure to copy
+ *
+ * Copy VEB TC statistics from structure of arrays (veb->tc_stats) to
+ * one dimensional structure i40e_cp_veb_tc_stats.
+ * Produce formatted i40e_cp_veb_tc_stats structure of the VEB TC
+ * statistics for the given TC.
+ **/
+static struct i40e_cp_veb_tc_stats
+i40e_get_veb_tc_stats(struct i40e_veb_tc_stats *tc, unsigned int i)
+{
+ struct i40e_cp_veb_tc_stats veb_tc = {
+ .tc_rx_packets = tc->tc_rx_packets[i],
+ .tc_rx_bytes = tc->tc_rx_bytes[i],
+ .tc_tx_packets = tc->tc_tx_packets[i],
+ .tc_tx_bytes = tc->tc_tx_bytes[i],
+ };
+
+ return veb_tc;
+}
+
+/**
+ * i40e_get_pfc_stats - copy HW PFC statistics to formatted structure
+ * @pf: the PF device structure
+ * @i: the priority value to copy
+ *
+ * The PFC stats are found as arrays in pf->stats, which is not easy to pass
+ * into i40e_add_ethtool_stats. Produce a formatted i40e_pfc_stats structure
+ * of the PFC stats for the given priority.
+ **/
+static inline struct i40e_pfc_stats
+i40e_get_pfc_stats(struct i40e_pf *pf, unsigned int i)
+{
+#define I40E_GET_PFC_STAT(stat, priority) \
+ .stat = pf->stats.stat[priority]
+
+ struct i40e_pfc_stats pfc = {
+ I40E_GET_PFC_STAT(priority_xon_rx, i),
+ I40E_GET_PFC_STAT(priority_xoff_rx, i),
+ I40E_GET_PFC_STAT(priority_xon_tx, i),
+ I40E_GET_PFC_STAT(priority_xoff_tx, i),
+ I40E_GET_PFC_STAT(priority_xon_2_xoff, i),
+ };
+ return pfc;
+}
+
+/**
+ * i40e_get_ethtool_stats - copy stat values into supplied buffer
+ * @netdev: the netdev to collect stats for
+ * @stats: ethtool stats command structure
+ * @data: ethtool supplied buffer
+ *
+ * Copy the stats values for this netdev into the buffer. Expects data to be
+ * pre-allocated to the size returned by i40e_get_stats_count.. Note that all
+ * statistics must be copied in a static order, and the count must not change
+ * for a given netdev. See i40e_get_stats_count for more details.
+ *
+ * If a statistic is not currently valid (such as a disabled queue), this
+ * function reports its value as zero.
+ **/
+static void i40e_get_ethtool_stats(struct net_device *netdev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_vsi *vsi = np->vsi;
+ struct i40e_pf *pf = vsi->back;
+ struct i40e_veb *veb = NULL;
+ unsigned int i;
+ bool veb_stats;
+ u64 *p = data;
+
+ i40e_update_stats(vsi);
+
+ i40e_add_ethtool_stats(&data, i40e_get_vsi_stats_struct(vsi),
+ i40e_gstrings_net_stats);
+
+ i40e_add_ethtool_stats(&data, vsi, i40e_gstrings_misc_stats);
+
+ rcu_read_lock();
+ for (i = 0; i < netdev->num_tx_queues; i++) {
+ i40e_add_queue_stats(&data, READ_ONCE(vsi->tx_rings[i]));
+ i40e_add_queue_stats(&data, READ_ONCE(vsi->rx_rings[i]));
+ }
+ rcu_read_unlock();
+
+ if (vsi != pf->vsi[pf->lan_vsi] || pf->hw.partition_id != 1)
+ goto check_data_pointer;
+
+ veb_stats = ((pf->lan_veb != I40E_NO_VEB) &&
+ (pf->lan_veb < I40E_MAX_VEB) &&
+ (pf->flags & I40E_FLAG_VEB_STATS_ENABLED));
+
+ if (veb_stats) {
+ veb = pf->veb[pf->lan_veb];
+ i40e_update_veb_stats(veb);
+ }
+
+ /* If veb stats aren't enabled, pass NULL instead of the veb so that
+ * we initialize stats to zero and update the data pointer
+ * intelligently
+ */
+ i40e_add_ethtool_stats(&data, veb_stats ? veb : NULL,
+ i40e_gstrings_veb_stats);
+
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
+ if (veb_stats) {
+ struct i40e_cp_veb_tc_stats veb_tc =
+ i40e_get_veb_tc_stats(&veb->tc_stats, i);
+
+ i40e_add_ethtool_stats(&data, &veb_tc,
+ i40e_gstrings_veb_tc_stats);
+ } else {
+ i40e_add_ethtool_stats(&data, NULL,
+ i40e_gstrings_veb_tc_stats);
+ }
+
+ i40e_add_ethtool_stats(&data, pf, i40e_gstrings_stats);
+
+ for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
+ struct i40e_pfc_stats pfc = i40e_get_pfc_stats(pf, i);
+
+ i40e_add_ethtool_stats(&data, &pfc, i40e_gstrings_pfc_stats);
+ }
+
+check_data_pointer:
+ WARN_ONCE(data - p != i40e_get_stats_count(netdev),
+ "ethtool stats count mismatch!");
+}
+
+/**
+ * i40e_get_stat_strings - copy stat strings into supplied buffer
+ * @netdev: the netdev to collect strings for
+ * @data: supplied buffer to copy strings into
+ *
+ * Copy the strings related to stats for this netdev. Expects data to be
+ * pre-allocated with the size reported by i40e_get_stats_count. Note that the
+ * strings must be copied in a static order and the total count must not
+ * change for a given netdev. See i40e_get_stats_count for more details.
+ **/
+static void i40e_get_stat_strings(struct net_device *netdev, u8 *data)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_vsi *vsi = np->vsi;
+ struct i40e_pf *pf = vsi->back;
+ unsigned int i;
+ u8 *p = data;
+
+ i40e_add_stat_strings(&data, i40e_gstrings_net_stats);
+
+ i40e_add_stat_strings(&data, i40e_gstrings_misc_stats);
+
+ for (i = 0; i < netdev->num_tx_queues; i++) {
+ i40e_add_stat_strings(&data, i40e_gstrings_queue_stats,
+ "tx", i);
+ i40e_add_stat_strings(&data, i40e_gstrings_queue_stats,
+ "rx", i);
+ }
+
+ if (vsi != pf->vsi[pf->lan_vsi] || pf->hw.partition_id != 1)
+ goto check_data_pointer;
+
+ i40e_add_stat_strings(&data, i40e_gstrings_veb_stats);
+
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
+ i40e_add_stat_strings(&data, i40e_gstrings_veb_tc_stats, i);
+
+ i40e_add_stat_strings(&data, i40e_gstrings_stats);
+
+ for (i = 0; i < I40E_MAX_USER_PRIORITY; i++)
+ i40e_add_stat_strings(&data, i40e_gstrings_pfc_stats, i);
+
+check_data_pointer:
+ WARN_ONCE(data - p != i40e_get_stats_count(netdev) * ETH_GSTRING_LEN,
+ "stat strings count mismatch!");
+}
+
+static void i40e_get_priv_flag_strings(struct net_device *netdev, u8 *data)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_vsi *vsi = np->vsi;
+ struct i40e_pf *pf = vsi->back;
+ unsigned int i;
+ u8 *p = data;
+
+ for (i = 0; i < I40E_PRIV_FLAGS_STR_LEN; i++)
+ ethtool_sprintf(&p, i40e_gstrings_priv_flags[i].flag_string);
+ if (pf->hw.pf_id != 0)
+ return;
+ for (i = 0; i < I40E_GL_PRIV_FLAGS_STR_LEN; i++)
+ ethtool_sprintf(&p, i40e_gl_gstrings_priv_flags[i].flag_string);
+}
+
+static void i40e_get_strings(struct net_device *netdev, u32 stringset,
+ u8 *data)
+{
+ switch (stringset) {
+ case ETH_SS_TEST:
+ memcpy(data, i40e_gstrings_test,
+ I40E_TEST_LEN * ETH_GSTRING_LEN);
+ break;
+ case ETH_SS_STATS:
+ i40e_get_stat_strings(netdev, data);
+ break;
+ case ETH_SS_PRIV_FLAGS:
+ i40e_get_priv_flag_strings(netdev, data);
+ break;
+ default:
+ break;
+ }
+}
+
+static int i40e_get_ts_info(struct net_device *dev,
+ struct ethtool_ts_info *info)
+{
+ struct i40e_pf *pf = i40e_netdev_to_pf(dev);
+
+ /* only report HW timestamping if PTP is enabled */
+ if (!(pf->flags & I40E_FLAG_PTP))
+ return ethtool_op_get_ts_info(dev, info);
+
+ info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE |
+ SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+
+ if (pf->ptp_clock)
+ info->phc_index = ptp_clock_index(pf->ptp_clock);
+ else
+ info->phc_index = -1;
+
+ info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON);
+
+ info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_L2_SYNC) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ);
+
+ if (pf->hw_features & I40E_HW_PTP_L4_CAPABLE)
+ info->rx_filters |= BIT(HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
+ BIT(HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_EVENT) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_L4_EVENT) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_SYNC) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_L4_SYNC) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_DELAY_REQ) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ);
+
+ return 0;
+}
+
+static u64 i40e_link_test(struct net_device *netdev, u64 *data)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_pf *pf = np->vsi->back;
+ bool link_up = false;
+ int status;
+
+ netif_info(pf, hw, netdev, "link test\n");
+ status = i40e_get_link_status(&pf->hw, &link_up);
+ if (status) {
+ netif_err(pf, drv, netdev, "link query timed out, please retry test\n");
+ *data = 1;
+ return *data;
+ }
+
+ if (link_up)
+ *data = 0;
+ else
+ *data = 1;
+
+ return *data;
+}
+
+static u64 i40e_reg_test(struct net_device *netdev, u64 *data)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_pf *pf = np->vsi->back;
+
+ netif_info(pf, hw, netdev, "register test\n");
+ *data = i40e_diag_reg_test(&pf->hw);
+
+ return *data;
+}
+
+static u64 i40e_eeprom_test(struct net_device *netdev, u64 *data)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_pf *pf = np->vsi->back;
+
+ netif_info(pf, hw, netdev, "eeprom test\n");
+ *data = i40e_diag_eeprom_test(&pf->hw);
+
+ /* forcebly clear the NVM Update state machine */
+ pf->hw.nvmupd_state = I40E_NVMUPD_STATE_INIT;
+
+ return *data;
+}
+
+static u64 i40e_intr_test(struct net_device *netdev, u64 *data)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_pf *pf = np->vsi->back;
+ u16 swc_old = pf->sw_int_count;
+
+ netif_info(pf, hw, netdev, "interrupt test\n");
+ wr32(&pf->hw, I40E_PFINT_DYN_CTL0,
+ (I40E_PFINT_DYN_CTL0_INTENA_MASK |
+ I40E_PFINT_DYN_CTL0_SWINT_TRIG_MASK |
+ I40E_PFINT_DYN_CTL0_ITR_INDX_MASK |
+ I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_MASK |
+ I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK));
+ usleep_range(1000, 2000);
+ *data = (swc_old == pf->sw_int_count);
+
+ return *data;
+}
+
+static inline bool i40e_active_vfs(struct i40e_pf *pf)
+{
+ struct i40e_vf *vfs = pf->vf;
+ int i;
+
+ for (i = 0; i < pf->num_alloc_vfs; i++)
+ if (test_bit(I40E_VF_STATE_ACTIVE, &vfs[i].vf_states))
+ return true;
+ return false;
+}
+
+static inline bool i40e_active_vmdqs(struct i40e_pf *pf)
+{
+ return !!i40e_find_vsi_by_type(pf, I40E_VSI_VMDQ2);
+}
+
+static void i40e_diag_test(struct net_device *netdev,
+ struct ethtool_test *eth_test, u64 *data)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ bool if_running = netif_running(netdev);
+ struct i40e_pf *pf = np->vsi->back;
+
+ if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
+ /* Offline tests */
+ netif_info(pf, drv, netdev, "offline testing starting\n");
+
+ set_bit(__I40E_TESTING, pf->state);
+
+ if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state) ||
+ test_bit(__I40E_RESET_INTR_RECEIVED, pf->state)) {
+ dev_warn(&pf->pdev->dev,
+ "Cannot start offline testing when PF is in reset state.\n");
+ goto skip_ol_tests;
+ }
+
+ if (i40e_active_vfs(pf) || i40e_active_vmdqs(pf)) {
+ dev_warn(&pf->pdev->dev,
+ "Please take active VFs and Netqueues offline and restart the adapter before running NIC diagnostics\n");
+ goto skip_ol_tests;
+ }
+
+ /* If the device is online then take it offline */
+ if (if_running)
+ /* indicate we're in test mode */
+ i40e_close(netdev);
+ else
+ /* This reset does not affect link - if it is
+ * changed to a type of reset that does affect
+ * link then the following link test would have
+ * to be moved to before the reset
+ */
+ i40e_do_reset(pf, BIT(__I40E_PF_RESET_REQUESTED), true);
+
+ if (i40e_link_test(netdev, &data[I40E_ETH_TEST_LINK]))
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+
+ if (i40e_eeprom_test(netdev, &data[I40E_ETH_TEST_EEPROM]))
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+
+ if (i40e_intr_test(netdev, &data[I40E_ETH_TEST_INTR]))
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+
+ /* run reg test last, a reset is required after it */
+ if (i40e_reg_test(netdev, &data[I40E_ETH_TEST_REG]))
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+
+ clear_bit(__I40E_TESTING, pf->state);
+ i40e_do_reset(pf, BIT(__I40E_PF_RESET_REQUESTED), true);
+
+ if (if_running)
+ i40e_open(netdev);
+ } else {
+ /* Online tests */
+ netif_info(pf, drv, netdev, "online testing starting\n");
+
+ if (i40e_link_test(netdev, &data[I40E_ETH_TEST_LINK]))
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+
+ /* Offline only tests, not run in online; pass by default */
+ data[I40E_ETH_TEST_REG] = 0;
+ data[I40E_ETH_TEST_EEPROM] = 0;
+ data[I40E_ETH_TEST_INTR] = 0;
+ }
+
+ netif_info(pf, drv, netdev, "testing finished\n");
+ return;
+
+skip_ol_tests:
+ data[I40E_ETH_TEST_REG] = 1;
+ data[I40E_ETH_TEST_EEPROM] = 1;
+ data[I40E_ETH_TEST_INTR] = 1;
+ data[I40E_ETH_TEST_LINK] = 1;
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+ clear_bit(__I40E_TESTING, pf->state);
+ netif_info(pf, drv, netdev, "testing failed\n");
+}
+
+static void i40e_get_wol(struct net_device *netdev,
+ struct ethtool_wolinfo *wol)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_pf *pf = np->vsi->back;
+ struct i40e_hw *hw = &pf->hw;
+ u16 wol_nvm_bits;
+
+ /* NVM bit on means WoL disabled for the port */
+ i40e_read_nvm_word(hw, I40E_SR_NVM_WAKE_ON_LAN, &wol_nvm_bits);
+ if ((BIT(hw->port) & wol_nvm_bits) || (hw->partition_id != 1)) {
+ wol->supported = 0;
+ wol->wolopts = 0;
+ } else {
+ wol->supported = WAKE_MAGIC;
+ wol->wolopts = (pf->wol_en ? WAKE_MAGIC : 0);
+ }
+}
+
+/**
+ * i40e_set_wol - set the WakeOnLAN configuration
+ * @netdev: the netdev in question
+ * @wol: the ethtool WoL setting data
+ **/
+static int i40e_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_pf *pf = np->vsi->back;
+ struct i40e_vsi *vsi = np->vsi;
+ struct i40e_hw *hw = &pf->hw;
+ u16 wol_nvm_bits;
+
+ /* WoL not supported if this isn't the controlling PF on the port */
+ if (hw->partition_id != 1) {
+ i40e_partition_setting_complaint(pf);
+ return -EOPNOTSUPP;
+ }
+
+ if (vsi != pf->vsi[pf->lan_vsi])
+ return -EOPNOTSUPP;
+
+ /* NVM bit on means WoL disabled for the port */
+ i40e_read_nvm_word(hw, I40E_SR_NVM_WAKE_ON_LAN, &wol_nvm_bits);
+ if (BIT(hw->port) & wol_nvm_bits)
+ return -EOPNOTSUPP;
+
+ /* only magic packet is supported */
+ if (wol->wolopts & ~WAKE_MAGIC)
+ return -EOPNOTSUPP;
+
+ /* is this a new value? */
+ if (pf->wol_en != !!wol->wolopts) {
+ pf->wol_en = !!wol->wolopts;
+ device_set_wakeup_enable(&pf->pdev->dev, pf->wol_en);
+ }
+
+ return 0;
+}
+
+static int i40e_set_phys_id(struct net_device *netdev,
+ enum ethtool_phys_id_state state)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_pf *pf = np->vsi->back;
+ struct i40e_hw *hw = &pf->hw;
+ int blink_freq = 2;
+ u16 temp_status;
+ int ret = 0;
+
+ switch (state) {
+ case ETHTOOL_ID_ACTIVE:
+ if (!(pf->hw_features & I40E_HW_PHY_CONTROLS_LEDS)) {
+ pf->led_status = i40e_led_get(hw);
+ } else {
+ if (!(hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE))
+ i40e_aq_set_phy_debug(hw, I40E_PHY_DEBUG_ALL,
+ NULL);
+ ret = i40e_led_get_phy(hw, &temp_status,
+ &pf->phy_led_val);
+ pf->led_status = temp_status;
+ }
+ return blink_freq;
+ case ETHTOOL_ID_ON:
+ if (!(pf->hw_features & I40E_HW_PHY_CONTROLS_LEDS))
+ i40e_led_set(hw, 0xf, false);
+ else
+ ret = i40e_led_set_phy(hw, true, pf->led_status, 0);
+ break;
+ case ETHTOOL_ID_OFF:
+ if (!(pf->hw_features & I40E_HW_PHY_CONTROLS_LEDS))
+ i40e_led_set(hw, 0x0, false);
+ else
+ ret = i40e_led_set_phy(hw, false, pf->led_status, 0);
+ break;
+ case ETHTOOL_ID_INACTIVE:
+ if (!(pf->hw_features & I40E_HW_PHY_CONTROLS_LEDS)) {
+ i40e_led_set(hw, pf->led_status, false);
+ } else {
+ ret = i40e_led_set_phy(hw, false, pf->led_status,
+ (pf->phy_led_val |
+ I40E_PHY_LED_MODE_ORIG));
+ if (!(hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE))
+ i40e_aq_set_phy_debug(hw, 0, NULL);
+ }
+ break;
+ default:
+ break;
+ }
+ if (ret)
+ return -ENOENT;
+ else
+ return 0;
+}
+
+/* NOTE: i40e hardware uses a conversion factor of 2 for Interrupt
+ * Throttle Rate (ITR) ie. ITR(1) = 2us ITR(10) = 20 us, and also
+ * 125us (8000 interrupts per second) == ITR(62)
+ */
+
+/**
+ * __i40e_get_coalesce - get per-queue coalesce settings
+ * @netdev: the netdev to check
+ * @ec: ethtool coalesce data structure
+ * @queue: which queue to pick
+ *
+ * Gets the per-queue settings for coalescence. Specifically Rx and Tx usecs
+ * are per queue. If queue is <0 then we default to queue 0 as the
+ * representative value.
+ **/
+static int __i40e_get_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *ec,
+ int queue)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_ring *rx_ring, *tx_ring;
+ struct i40e_vsi *vsi = np->vsi;
+
+ ec->tx_max_coalesced_frames_irq = vsi->work_limit;
+ ec->rx_max_coalesced_frames_irq = vsi->work_limit;
+
+ /* rx and tx usecs has per queue value. If user doesn't specify the
+ * queue, return queue 0's value to represent.
+ */
+ if (queue < 0)
+ queue = 0;
+ else if (queue >= vsi->num_queue_pairs)
+ return -EINVAL;
+
+ rx_ring = vsi->rx_rings[queue];
+ tx_ring = vsi->tx_rings[queue];
+
+ if (ITR_IS_DYNAMIC(rx_ring->itr_setting))
+ ec->use_adaptive_rx_coalesce = 1;
+
+ if (ITR_IS_DYNAMIC(tx_ring->itr_setting))
+ ec->use_adaptive_tx_coalesce = 1;
+
+ ec->rx_coalesce_usecs = rx_ring->itr_setting & ~I40E_ITR_DYNAMIC;
+ ec->tx_coalesce_usecs = tx_ring->itr_setting & ~I40E_ITR_DYNAMIC;
+
+ /* we use the _usecs_high to store/set the interrupt rate limit
+ * that the hardware supports, that almost but not quite
+ * fits the original intent of the ethtool variable,
+ * the rx_coalesce_usecs_high limits total interrupts
+ * per second from both tx/rx sources.
+ */
+ ec->rx_coalesce_usecs_high = vsi->int_rate_limit;
+ ec->tx_coalesce_usecs_high = vsi->int_rate_limit;
+
+ return 0;
+}
+
+/**
+ * i40e_get_coalesce - get a netdev's coalesce settings
+ * @netdev: the netdev to check
+ * @ec: ethtool coalesce data structure
+ * @kernel_coal: ethtool CQE mode setting structure
+ * @extack: extack for reporting error messages
+ *
+ * Gets the coalesce settings for a particular netdev. Note that if user has
+ * modified per-queue settings, this only guarantees to represent queue 0. See
+ * __i40e_get_coalesce for more details.
+ **/
+static int i40e_get_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *ec,
+ struct kernel_ethtool_coalesce *kernel_coal,
+ struct netlink_ext_ack *extack)
+{
+ return __i40e_get_coalesce(netdev, ec, -1);
+}
+
+/**
+ * i40e_get_per_queue_coalesce - gets coalesce settings for particular queue
+ * @netdev: netdev structure
+ * @ec: ethtool's coalesce settings
+ * @queue: the particular queue to read
+ *
+ * Will read a specific queue's coalesce settings
+ **/
+static int i40e_get_per_queue_coalesce(struct net_device *netdev, u32 queue,
+ struct ethtool_coalesce *ec)
+{
+ return __i40e_get_coalesce(netdev, ec, queue);
+}
+
+/**
+ * i40e_set_itr_per_queue - set ITR values for specific queue
+ * @vsi: the VSI to set values for
+ * @ec: coalesce settings from ethtool
+ * @queue: the queue to modify
+ *
+ * Change the ITR settings for a specific queue.
+ **/
+static void i40e_set_itr_per_queue(struct i40e_vsi *vsi,
+ struct ethtool_coalesce *ec,
+ int queue)
+{
+ struct i40e_ring *rx_ring = vsi->rx_rings[queue];
+ struct i40e_ring *tx_ring = vsi->tx_rings[queue];
+ struct i40e_pf *pf = vsi->back;
+ struct i40e_hw *hw = &pf->hw;
+ struct i40e_q_vector *q_vector;
+ u16 intrl;
+
+ intrl = i40e_intrl_usec_to_reg(vsi->int_rate_limit);
+
+ rx_ring->itr_setting = ITR_REG_ALIGN(ec->rx_coalesce_usecs);
+ tx_ring->itr_setting = ITR_REG_ALIGN(ec->tx_coalesce_usecs);
+
+ if (ec->use_adaptive_rx_coalesce)
+ rx_ring->itr_setting |= I40E_ITR_DYNAMIC;
+ else
+ rx_ring->itr_setting &= ~I40E_ITR_DYNAMIC;
+
+ if (ec->use_adaptive_tx_coalesce)
+ tx_ring->itr_setting |= I40E_ITR_DYNAMIC;
+ else
+ tx_ring->itr_setting &= ~I40E_ITR_DYNAMIC;
+
+ q_vector = rx_ring->q_vector;
+ q_vector->rx.target_itr = ITR_TO_REG(rx_ring->itr_setting);
+
+ q_vector = tx_ring->q_vector;
+ q_vector->tx.target_itr = ITR_TO_REG(tx_ring->itr_setting);
+
+ /* The interrupt handler itself will take care of programming
+ * the Tx and Rx ITR values based on the values we have entered
+ * into the q_vector, no need to write the values now.
+ */
+
+ wr32(hw, I40E_PFINT_RATEN(q_vector->reg_idx), intrl);
+ i40e_flush(hw);
+}
+
+/**
+ * __i40e_set_coalesce - set coalesce settings for particular queue
+ * @netdev: the netdev to change
+ * @ec: ethtool coalesce settings
+ * @queue: the queue to change
+ *
+ * Sets the coalesce settings for a particular queue.
+ **/
+static int __i40e_set_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *ec,
+ int queue)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ u16 intrl_reg, cur_rx_itr, cur_tx_itr;
+ struct i40e_vsi *vsi = np->vsi;
+ struct i40e_pf *pf = vsi->back;
+ int i;
+
+ if (ec->tx_max_coalesced_frames_irq || ec->rx_max_coalesced_frames_irq)
+ vsi->work_limit = ec->tx_max_coalesced_frames_irq;
+
+ if (queue < 0) {
+ cur_rx_itr = vsi->rx_rings[0]->itr_setting;
+ cur_tx_itr = vsi->tx_rings[0]->itr_setting;
+ } else if (queue < vsi->num_queue_pairs) {
+ cur_rx_itr = vsi->rx_rings[queue]->itr_setting;
+ cur_tx_itr = vsi->tx_rings[queue]->itr_setting;
+ } else {
+ netif_info(pf, drv, netdev, "Invalid queue value, queue range is 0 - %d\n",
+ vsi->num_queue_pairs - 1);
+ return -EINVAL;
+ }
+
+ cur_tx_itr &= ~I40E_ITR_DYNAMIC;
+ cur_rx_itr &= ~I40E_ITR_DYNAMIC;
+
+ /* tx_coalesce_usecs_high is ignored, use rx-usecs-high instead */
+ if (ec->tx_coalesce_usecs_high != vsi->int_rate_limit) {
+ netif_info(pf, drv, netdev, "tx-usecs-high is not used, please program rx-usecs-high\n");
+ return -EINVAL;
+ }
+
+ if (ec->rx_coalesce_usecs_high > INTRL_REG_TO_USEC(I40E_MAX_INTRL)) {
+ netif_info(pf, drv, netdev, "Invalid value, rx-usecs-high range is 0-%lu\n",
+ INTRL_REG_TO_USEC(I40E_MAX_INTRL));
+ return -EINVAL;
+ }
+
+ if (ec->rx_coalesce_usecs != cur_rx_itr &&
+ ec->use_adaptive_rx_coalesce) {
+ netif_info(pf, drv, netdev, "RX interrupt moderation cannot be changed if adaptive-rx is enabled.\n");
+ return -EINVAL;
+ }
+
+ if (ec->rx_coalesce_usecs > I40E_MAX_ITR) {
+ netif_info(pf, drv, netdev, "Invalid value, rx-usecs range is 0-8160\n");
+ return -EINVAL;
+ }
+
+ if (ec->tx_coalesce_usecs != cur_tx_itr &&
+ ec->use_adaptive_tx_coalesce) {
+ netif_info(pf, drv, netdev, "TX interrupt moderation cannot be changed if adaptive-tx is enabled.\n");
+ return -EINVAL;
+ }
+
+ if (ec->tx_coalesce_usecs > I40E_MAX_ITR) {
+ netif_info(pf, drv, netdev, "Invalid value, tx-usecs range is 0-8160\n");
+ return -EINVAL;
+ }
+
+ if (ec->use_adaptive_rx_coalesce && !cur_rx_itr)
+ ec->rx_coalesce_usecs = I40E_MIN_ITR;
+
+ if (ec->use_adaptive_tx_coalesce && !cur_tx_itr)
+ ec->tx_coalesce_usecs = I40E_MIN_ITR;
+
+ intrl_reg = i40e_intrl_usec_to_reg(ec->rx_coalesce_usecs_high);
+ vsi->int_rate_limit = INTRL_REG_TO_USEC(intrl_reg);
+ if (vsi->int_rate_limit != ec->rx_coalesce_usecs_high) {
+ netif_info(pf, drv, netdev, "Interrupt rate limit rounded down to %d\n",
+ vsi->int_rate_limit);
+ }
+
+ /* rx and tx usecs has per queue value. If user doesn't specify the
+ * queue, apply to all queues.
+ */
+ if (queue < 0) {
+ for (i = 0; i < vsi->num_queue_pairs; i++)
+ i40e_set_itr_per_queue(vsi, ec, i);
+ } else {
+ i40e_set_itr_per_queue(vsi, ec, queue);
+ }
+
+ return 0;
+}
+
+/**
+ * i40e_set_coalesce - set coalesce settings for every queue on the netdev
+ * @netdev: the netdev to change
+ * @ec: ethtool coalesce settings
+ * @kernel_coal: ethtool CQE mode setting structure
+ * @extack: extack for reporting error messages
+ *
+ * This will set each queue to the same coalesce settings.
+ **/
+static int i40e_set_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *ec,
+ struct kernel_ethtool_coalesce *kernel_coal,
+ struct netlink_ext_ack *extack)
+{
+ return __i40e_set_coalesce(netdev, ec, -1);
+}
+
+/**
+ * i40e_set_per_queue_coalesce - set specific queue's coalesce settings
+ * @netdev: the netdev to change
+ * @ec: ethtool's coalesce settings
+ * @queue: the queue to change
+ *
+ * Sets the specified queue's coalesce settings.
+ **/
+static int i40e_set_per_queue_coalesce(struct net_device *netdev, u32 queue,
+ struct ethtool_coalesce *ec)
+{
+ return __i40e_set_coalesce(netdev, ec, queue);
+}
+
+/**
+ * i40e_get_rss_hash_opts - Get RSS hash Input Set for each flow type
+ * @pf: pointer to the physical function struct
+ * @cmd: ethtool rxnfc command
+ *
+ * Returns Success if the flow is supported, else Invalid Input.
+ **/
+static int i40e_get_rss_hash_opts(struct i40e_pf *pf, struct ethtool_rxnfc *cmd)
+{
+ struct i40e_hw *hw = &pf->hw;
+ u8 flow_pctype = 0;
+ u64 i_set = 0;
+
+ cmd->data = 0;
+
+ switch (cmd->flow_type) {
+ case TCP_V4_FLOW:
+ flow_pctype = I40E_FILTER_PCTYPE_NONF_IPV4_TCP;
+ break;
+ case UDP_V4_FLOW:
+ flow_pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
+ break;
+ case TCP_V6_FLOW:
+ flow_pctype = I40E_FILTER_PCTYPE_NONF_IPV6_TCP;
+ break;
+ case UDP_V6_FLOW:
+ flow_pctype = I40E_FILTER_PCTYPE_NONF_IPV6_UDP;
+ break;
+ case SCTP_V4_FLOW:
+ case AH_ESP_V4_FLOW:
+ case AH_V4_FLOW:
+ case ESP_V4_FLOW:
+ case IPV4_FLOW:
+ case SCTP_V6_FLOW:
+ case AH_ESP_V6_FLOW:
+ case AH_V6_FLOW:
+ case ESP_V6_FLOW:
+ case IPV6_FLOW:
+ /* Default is src/dest for IP, no matter the L4 hashing */
+ cmd->data |= RXH_IP_SRC | RXH_IP_DST;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* Read flow based hash input set register */
+ if (flow_pctype) {
+ i_set = (u64)i40e_read_rx_ctl(hw, I40E_GLQF_HASH_INSET(0,
+ flow_pctype)) |
+ ((u64)i40e_read_rx_ctl(hw, I40E_GLQF_HASH_INSET(1,
+ flow_pctype)) << 32);
+ }
+
+ /* Process bits of hash input set */
+ if (i_set) {
+ if (i_set & I40E_L4_SRC_MASK)
+ cmd->data |= RXH_L4_B_0_1;
+ if (i_set & I40E_L4_DST_MASK)
+ cmd->data |= RXH_L4_B_2_3;
+
+ if (cmd->flow_type == TCP_V4_FLOW ||
+ cmd->flow_type == UDP_V4_FLOW) {
+ if (hw->mac.type == I40E_MAC_X722) {
+ if (i_set & I40E_X722_L3_SRC_MASK)
+ cmd->data |= RXH_IP_SRC;
+ if (i_set & I40E_X722_L3_DST_MASK)
+ cmd->data |= RXH_IP_DST;
+ } else {
+ if (i_set & I40E_L3_SRC_MASK)
+ cmd->data |= RXH_IP_SRC;
+ if (i_set & I40E_L3_DST_MASK)
+ cmd->data |= RXH_IP_DST;
+ }
+ } else if (cmd->flow_type == TCP_V6_FLOW ||
+ cmd->flow_type == UDP_V6_FLOW) {
+ if (i_set & I40E_L3_V6_SRC_MASK)
+ cmd->data |= RXH_IP_SRC;
+ if (i_set & I40E_L3_V6_DST_MASK)
+ cmd->data |= RXH_IP_DST;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * i40e_check_mask - Check whether a mask field is set
+ * @mask: the full mask value
+ * @field: mask of the field to check
+ *
+ * If the given mask is fully set, return positive value. If the mask for the
+ * field is fully unset, return zero. Otherwise return a negative error code.
+ **/
+static int i40e_check_mask(u64 mask, u64 field)
+{
+ u64 value = mask & field;
+
+ if (value == field)
+ return 1;
+ else if (!value)
+ return 0;
+ else
+ return -1;
+}
+
+/**
+ * i40e_parse_rx_flow_user_data - Deconstruct user-defined data
+ * @fsp: pointer to rx flow specification
+ * @data: pointer to userdef data structure for storage
+ *
+ * Read the user-defined data and deconstruct the value into a structure. No
+ * other code should read the user-defined data, so as to ensure that every
+ * place consistently reads the value correctly.
+ *
+ * The user-defined field is a 64bit Big Endian format value, which we
+ * deconstruct by reading bits or bit fields from it. Single bit flags shall
+ * be defined starting from the highest bits, while small bit field values
+ * shall be defined starting from the lowest bits.
+ *
+ * Returns 0 if the data is valid, and non-zero if the userdef data is invalid
+ * and the filter should be rejected. The data structure will always be
+ * modified even if FLOW_EXT is not set.
+ *
+ **/
+static int i40e_parse_rx_flow_user_data(struct ethtool_rx_flow_spec *fsp,
+ struct i40e_rx_flow_userdef *data)
+{
+ u64 value, mask;
+ int valid;
+
+ /* Zero memory first so it's always consistent. */
+ memset(data, 0, sizeof(*data));
+
+ if (!(fsp->flow_type & FLOW_EXT))
+ return 0;
+
+ value = be64_to_cpu(*((__be64 *)fsp->h_ext.data));
+ mask = be64_to_cpu(*((__be64 *)fsp->m_ext.data));
+
+#define I40E_USERDEF_FLEX_WORD GENMASK_ULL(15, 0)
+#define I40E_USERDEF_FLEX_OFFSET GENMASK_ULL(31, 16)
+#define I40E_USERDEF_FLEX_FILTER GENMASK_ULL(31, 0)
+
+ valid = i40e_check_mask(mask, I40E_USERDEF_FLEX_FILTER);
+ if (valid < 0) {
+ return -EINVAL;
+ } else if (valid) {
+ data->flex_word = value & I40E_USERDEF_FLEX_WORD;
+ data->flex_offset =
+ (value & I40E_USERDEF_FLEX_OFFSET) >> 16;
+ data->flex_filter = true;
+ }
+
+ return 0;
+}
+
+/**
+ * i40e_fill_rx_flow_user_data - Fill in user-defined data field
+ * @fsp: pointer to rx_flow specification
+ * @data: pointer to return userdef data
+ *
+ * Reads the userdef data structure and properly fills in the user defined
+ * fields of the rx_flow_spec.
+ **/
+static void i40e_fill_rx_flow_user_data(struct ethtool_rx_flow_spec *fsp,
+ struct i40e_rx_flow_userdef *data)
+{
+ u64 value = 0, mask = 0;
+
+ if (data->flex_filter) {
+ value |= data->flex_word;
+ value |= (u64)data->flex_offset << 16;
+ mask |= I40E_USERDEF_FLEX_FILTER;
+ }
+
+ if (value || mask)
+ fsp->flow_type |= FLOW_EXT;
+
+ *((__be64 *)fsp->h_ext.data) = cpu_to_be64(value);
+ *((__be64 *)fsp->m_ext.data) = cpu_to_be64(mask);
+}
+
+/**
+ * i40e_get_ethtool_fdir_all - Populates the rule count of a command
+ * @pf: Pointer to the physical function struct
+ * @cmd: The command to get or set Rx flow classification rules
+ * @rule_locs: Array of used rule locations
+ *
+ * This function populates both the total and actual rule count of
+ * the ethtool flow classification command
+ *
+ * Returns 0 on success or -EMSGSIZE if entry not found
+ **/
+static int i40e_get_ethtool_fdir_all(struct i40e_pf *pf,
+ struct ethtool_rxnfc *cmd,
+ u32 *rule_locs)
+{
+ struct i40e_fdir_filter *rule;
+ struct hlist_node *node2;
+ int cnt = 0;
+
+ /* report total rule count */
+ cmd->data = i40e_get_fd_cnt_all(pf);
+
+ hlist_for_each_entry_safe(rule, node2,
+ &pf->fdir_filter_list, fdir_node) {
+ if (cnt == cmd->rule_cnt)
+ return -EMSGSIZE;
+
+ rule_locs[cnt] = rule->fd_id;
+ cnt++;
+ }
+
+ cmd->rule_cnt = cnt;
+
+ return 0;
+}
+
+/**
+ * i40e_get_ethtool_fdir_entry - Look up a filter based on Rx flow
+ * @pf: Pointer to the physical function struct
+ * @cmd: The command to get or set Rx flow classification rules
+ *
+ * This function looks up a filter based on the Rx flow classification
+ * command and fills the flow spec info for it if found
+ *
+ * Returns 0 on success or -EINVAL if filter not found
+ **/
+static int i40e_get_ethtool_fdir_entry(struct i40e_pf *pf,
+ struct ethtool_rxnfc *cmd)
+{
+ struct ethtool_rx_flow_spec *fsp =
+ (struct ethtool_rx_flow_spec *)&cmd->fs;
+ struct i40e_rx_flow_userdef userdef = {0};
+ struct i40e_fdir_filter *rule = NULL;
+ struct hlist_node *node2;
+ u64 input_set;
+ u16 index;
+
+ hlist_for_each_entry_safe(rule, node2,
+ &pf->fdir_filter_list, fdir_node) {
+ if (fsp->location <= rule->fd_id)
+ break;
+ }
+
+ if (!rule || fsp->location != rule->fd_id)
+ return -EINVAL;
+
+ fsp->flow_type = rule->flow_type;
+ if (fsp->flow_type == IP_USER_FLOW) {
+ fsp->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
+ fsp->h_u.usr_ip4_spec.proto = 0;
+ fsp->m_u.usr_ip4_spec.proto = 0;
+ }
+
+ if (fsp->flow_type == IPV6_USER_FLOW ||
+ fsp->flow_type == UDP_V6_FLOW ||
+ fsp->flow_type == TCP_V6_FLOW ||
+ fsp->flow_type == SCTP_V6_FLOW) {
+ /* Reverse the src and dest notion, since the HW views them
+ * from Tx perspective where as the user expects it from
+ * Rx filter view.
+ */
+ fsp->h_u.tcp_ip6_spec.psrc = rule->dst_port;
+ fsp->h_u.tcp_ip6_spec.pdst = rule->src_port;
+ memcpy(fsp->h_u.tcp_ip6_spec.ip6dst, rule->src_ip6,
+ sizeof(__be32) * 4);
+ memcpy(fsp->h_u.tcp_ip6_spec.ip6src, rule->dst_ip6,
+ sizeof(__be32) * 4);
+ } else {
+ /* Reverse the src and dest notion, since the HW views them
+ * from Tx perspective where as the user expects it from
+ * Rx filter view.
+ */
+ fsp->h_u.tcp_ip4_spec.psrc = rule->dst_port;
+ fsp->h_u.tcp_ip4_spec.pdst = rule->src_port;
+ fsp->h_u.tcp_ip4_spec.ip4src = rule->dst_ip;
+ fsp->h_u.tcp_ip4_spec.ip4dst = rule->src_ip;
+ }
+
+ switch (rule->flow_type) {
+ case SCTP_V4_FLOW:
+ index = I40E_FILTER_PCTYPE_NONF_IPV4_SCTP;
+ break;
+ case TCP_V4_FLOW:
+ index = I40E_FILTER_PCTYPE_NONF_IPV4_TCP;
+ break;
+ case UDP_V4_FLOW:
+ index = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
+ break;
+ case SCTP_V6_FLOW:
+ index = I40E_FILTER_PCTYPE_NONF_IPV6_SCTP;
+ break;
+ case TCP_V6_FLOW:
+ index = I40E_FILTER_PCTYPE_NONF_IPV6_TCP;
+ break;
+ case UDP_V6_FLOW:
+ index = I40E_FILTER_PCTYPE_NONF_IPV6_UDP;
+ break;
+ case IP_USER_FLOW:
+ index = I40E_FILTER_PCTYPE_NONF_IPV4_OTHER;
+ break;
+ case IPV6_USER_FLOW:
+ index = I40E_FILTER_PCTYPE_NONF_IPV6_OTHER;
+ break;
+ default:
+ /* If we have stored a filter with a flow type not listed here
+ * it is almost certainly a driver bug. WARN(), and then
+ * assign the input_set as if all fields are enabled to avoid
+ * reading unassigned memory.
+ */
+ WARN(1, "Missing input set index for flow_type %d\n",
+ rule->flow_type);
+ input_set = 0xFFFFFFFFFFFFFFFFULL;
+ goto no_input_set;
+ }
+
+ input_set = i40e_read_fd_input_set(pf, index);
+
+no_input_set:
+ if (input_set & I40E_L3_V6_SRC_MASK) {
+ fsp->m_u.tcp_ip6_spec.ip6src[0] = htonl(0xFFFFFFFF);
+ fsp->m_u.tcp_ip6_spec.ip6src[1] = htonl(0xFFFFFFFF);
+ fsp->m_u.tcp_ip6_spec.ip6src[2] = htonl(0xFFFFFFFF);
+ fsp->m_u.tcp_ip6_spec.ip6src[3] = htonl(0xFFFFFFFF);
+ }
+
+ if (input_set & I40E_L3_V6_DST_MASK) {
+ fsp->m_u.tcp_ip6_spec.ip6dst[0] = htonl(0xFFFFFFFF);
+ fsp->m_u.tcp_ip6_spec.ip6dst[1] = htonl(0xFFFFFFFF);
+ fsp->m_u.tcp_ip6_spec.ip6dst[2] = htonl(0xFFFFFFFF);
+ fsp->m_u.tcp_ip6_spec.ip6dst[3] = htonl(0xFFFFFFFF);
+ }
+
+ if (input_set & I40E_L3_SRC_MASK)
+ fsp->m_u.tcp_ip4_spec.ip4src = htonl(0xFFFFFFFF);
+
+ if (input_set & I40E_L3_DST_MASK)
+ fsp->m_u.tcp_ip4_spec.ip4dst = htonl(0xFFFFFFFF);
+
+ if (input_set & I40E_L4_SRC_MASK)
+ fsp->m_u.tcp_ip4_spec.psrc = htons(0xFFFF);
+
+ if (input_set & I40E_L4_DST_MASK)
+ fsp->m_u.tcp_ip4_spec.pdst = htons(0xFFFF);
+
+ if (rule->dest_ctl == I40E_FILTER_PROGRAM_DESC_DEST_DROP_PACKET)
+ fsp->ring_cookie = RX_CLS_FLOW_DISC;
+ else
+ fsp->ring_cookie = rule->q_index;
+
+ if (rule->vlan_tag) {
+ fsp->h_ext.vlan_etype = rule->vlan_etype;
+ fsp->m_ext.vlan_etype = htons(0xFFFF);
+ fsp->h_ext.vlan_tci = rule->vlan_tag;
+ fsp->m_ext.vlan_tci = htons(0xFFFF);
+ fsp->flow_type |= FLOW_EXT;
+ }
+
+ if (rule->dest_vsi != pf->vsi[pf->lan_vsi]->id) {
+ struct i40e_vsi *vsi;
+
+ vsi = i40e_find_vsi_from_id(pf, rule->dest_vsi);
+ if (vsi && vsi->type == I40E_VSI_SRIOV) {
+ /* VFs are zero-indexed by the driver, but ethtool
+ * expects them to be one-indexed, so add one here
+ */
+ u64 ring_vf = vsi->vf_id + 1;
+
+ ring_vf <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
+ fsp->ring_cookie |= ring_vf;
+ }
+ }
+
+ if (rule->flex_filter) {
+ userdef.flex_filter = true;
+ userdef.flex_word = be16_to_cpu(rule->flex_word);
+ userdef.flex_offset = rule->flex_offset;
+ }
+
+ i40e_fill_rx_flow_user_data(fsp, &userdef);
+
+ return 0;
+}
+
+/**
+ * i40e_get_rxnfc - command to get RX flow classification rules
+ * @netdev: network interface device structure
+ * @cmd: ethtool rxnfc command
+ * @rule_locs: pointer to store rule data
+ *
+ * Returns Success if the command is supported.
+ **/
+static int i40e_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
+ u32 *rule_locs)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_vsi *vsi = np->vsi;
+ struct i40e_pf *pf = vsi->back;
+ int ret = -EOPNOTSUPP;
+
+ switch (cmd->cmd) {
+ case ETHTOOL_GRXRINGS:
+ cmd->data = vsi->rss_size;
+ ret = 0;
+ break;
+ case ETHTOOL_GRXFH:
+ ret = i40e_get_rss_hash_opts(pf, cmd);
+ break;
+ case ETHTOOL_GRXCLSRLCNT:
+ cmd->rule_cnt = pf->fdir_pf_active_filters;
+ /* report total rule count */
+ cmd->data = i40e_get_fd_cnt_all(pf);
+ ret = 0;
+ break;
+ case ETHTOOL_GRXCLSRULE:
+ ret = i40e_get_ethtool_fdir_entry(pf, cmd);
+ break;
+ case ETHTOOL_GRXCLSRLALL:
+ ret = i40e_get_ethtool_fdir_all(pf, cmd, rule_locs);
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+/**
+ * i40e_get_rss_hash_bits - Read RSS Hash bits from register
+ * @hw: hw structure
+ * @nfc: pointer to user request
+ * @i_setc: bits currently set
+ *
+ * Returns value of bits to be set per user request
+ **/
+static u64 i40e_get_rss_hash_bits(struct i40e_hw *hw,
+ struct ethtool_rxnfc *nfc,
+ u64 i_setc)
+{
+ u64 i_set = i_setc;
+ u64 src_l3 = 0, dst_l3 = 0;
+
+ if (nfc->data & RXH_L4_B_0_1)
+ i_set |= I40E_L4_SRC_MASK;
+ else
+ i_set &= ~I40E_L4_SRC_MASK;
+ if (nfc->data & RXH_L4_B_2_3)
+ i_set |= I40E_L4_DST_MASK;
+ else
+ i_set &= ~I40E_L4_DST_MASK;
+
+ if (nfc->flow_type == TCP_V6_FLOW || nfc->flow_type == UDP_V6_FLOW) {
+ src_l3 = I40E_L3_V6_SRC_MASK;
+ dst_l3 = I40E_L3_V6_DST_MASK;
+ } else if (nfc->flow_type == TCP_V4_FLOW ||
+ nfc->flow_type == UDP_V4_FLOW) {
+ if (hw->mac.type == I40E_MAC_X722) {
+ src_l3 = I40E_X722_L3_SRC_MASK;
+ dst_l3 = I40E_X722_L3_DST_MASK;
+ } else {
+ src_l3 = I40E_L3_SRC_MASK;
+ dst_l3 = I40E_L3_DST_MASK;
+ }
+ } else {
+ /* Any other flow type are not supported here */
+ return i_set;
+ }
+
+ if (nfc->data & RXH_IP_SRC)
+ i_set |= src_l3;
+ else
+ i_set &= ~src_l3;
+ if (nfc->data & RXH_IP_DST)
+ i_set |= dst_l3;
+ else
+ i_set &= ~dst_l3;
+
+ return i_set;
+}
+
+#define FLOW_PCTYPES_SIZE 64
+/**
+ * i40e_set_rss_hash_opt - Enable/Disable flow types for RSS hash
+ * @pf: pointer to the physical function struct
+ * @nfc: ethtool rxnfc command
+ *
+ * Returns Success if the flow input set is supported.
+ **/
+static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)
+{
+ struct i40e_hw *hw = &pf->hw;
+ u64 hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) |
+ ((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32);
+ DECLARE_BITMAP(flow_pctypes, FLOW_PCTYPES_SIZE);
+ u64 i_set, i_setc;
+
+ bitmap_zero(flow_pctypes, FLOW_PCTYPES_SIZE);
+
+ if (pf->flags & I40E_FLAG_MFP_ENABLED) {
+ dev_err(&pf->pdev->dev,
+ "Change of RSS hash input set is not supported when MFP mode is enabled\n");
+ return -EOPNOTSUPP;
+ }
+
+ /* RSS does not support anything other than hashing
+ * to queues on src and dst IPs and ports
+ */
+ if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
+ RXH_L4_B_0_1 | RXH_L4_B_2_3))
+ return -EINVAL;
+
+ switch (nfc->flow_type) {
+ case TCP_V4_FLOW:
+ set_bit(I40E_FILTER_PCTYPE_NONF_IPV4_TCP, flow_pctypes);
+ if (pf->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE)
+ set_bit(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK,
+ flow_pctypes);
+ break;
+ case TCP_V6_FLOW:
+ set_bit(I40E_FILTER_PCTYPE_NONF_IPV6_TCP, flow_pctypes);
+ if (pf->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE)
+ set_bit(I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK,
+ flow_pctypes);
+ break;
+ case UDP_V4_FLOW:
+ set_bit(I40E_FILTER_PCTYPE_NONF_IPV4_UDP, flow_pctypes);
+ if (pf->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE) {
+ set_bit(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP,
+ flow_pctypes);
+ set_bit(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP,
+ flow_pctypes);
+ }
+ hena |= BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4);
+ break;
+ case UDP_V6_FLOW:
+ set_bit(I40E_FILTER_PCTYPE_NONF_IPV6_UDP, flow_pctypes);
+ if (pf->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE) {
+ set_bit(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP,
+ flow_pctypes);
+ set_bit(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP,
+ flow_pctypes);
+ }
+ hena |= BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6);
+ break;
+ case AH_ESP_V4_FLOW:
+ case AH_V4_FLOW:
+ case ESP_V4_FLOW:
+ case SCTP_V4_FLOW:
+ if ((nfc->data & RXH_L4_B_0_1) ||
+ (nfc->data & RXH_L4_B_2_3))
+ return -EINVAL;
+ hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_OTHER);
+ break;
+ case AH_ESP_V6_FLOW:
+ case AH_V6_FLOW:
+ case ESP_V6_FLOW:
+ case SCTP_V6_FLOW:
+ if ((nfc->data & RXH_L4_B_0_1) ||
+ (nfc->data & RXH_L4_B_2_3))
+ return -EINVAL;
+ hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_OTHER);
+ break;
+ case IPV4_FLOW:
+ hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) |
+ BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4);
+ break;
+ case IPV6_FLOW:
+ hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) |
+ BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (bitmap_weight(flow_pctypes, FLOW_PCTYPES_SIZE)) {
+ u8 flow_id;
+
+ for_each_set_bit(flow_id, flow_pctypes, FLOW_PCTYPES_SIZE) {
+ i_setc = (u64)i40e_read_rx_ctl(hw, I40E_GLQF_HASH_INSET(0, flow_id)) |
+ ((u64)i40e_read_rx_ctl(hw, I40E_GLQF_HASH_INSET(1, flow_id)) << 32);
+ i_set = i40e_get_rss_hash_bits(&pf->hw, nfc, i_setc);
+
+ i40e_write_rx_ctl(hw, I40E_GLQF_HASH_INSET(0, flow_id),
+ (u32)i_set);
+ i40e_write_rx_ctl(hw, I40E_GLQF_HASH_INSET(1, flow_id),
+ (u32)(i_set >> 32));
+ hena |= BIT_ULL(flow_id);
+ }
+ }
+
+ i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (u32)hena);
+ i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
+ i40e_flush(hw);
+
+ return 0;
+}
+
+/**
+ * i40e_update_ethtool_fdir_entry - Updates the fdir filter entry
+ * @vsi: Pointer to the targeted VSI
+ * @input: The filter to update or NULL to indicate deletion
+ * @sw_idx: Software index to the filter
+ * @cmd: The command to get or set Rx flow classification rules
+ *
+ * This function updates (or deletes) a Flow Director entry from
+ * the hlist of the corresponding PF
+ *
+ * Returns 0 on success
+ **/
+static int i40e_update_ethtool_fdir_entry(struct i40e_vsi *vsi,
+ struct i40e_fdir_filter *input,
+ u16 sw_idx,
+ struct ethtool_rxnfc *cmd)
+{
+ struct i40e_fdir_filter *rule, *parent;
+ struct i40e_pf *pf = vsi->back;
+ struct hlist_node *node2;
+ int err = -EINVAL;
+
+ parent = NULL;
+ rule = NULL;
+
+ hlist_for_each_entry_safe(rule, node2,
+ &pf->fdir_filter_list, fdir_node) {
+ /* hash found, or no matching entry */
+ if (rule->fd_id >= sw_idx)
+ break;
+ parent = rule;
+ }
+
+ /* if there is an old rule occupying our place remove it */
+ if (rule && (rule->fd_id == sw_idx)) {
+ /* Remove this rule, since we're either deleting it, or
+ * replacing it.
+ */
+ err = i40e_add_del_fdir(vsi, rule, false);
+ hlist_del(&rule->fdir_node);
+ kfree(rule);
+ pf->fdir_pf_active_filters--;
+ }
+
+ /* If we weren't given an input, this is a delete, so just return the
+ * error code indicating if there was an entry at the requested slot
+ */
+ if (!input)
+ return err;
+
+ /* Otherwise, install the new rule as requested */
+ INIT_HLIST_NODE(&input->fdir_node);
+
+ /* add filter to the list */
+ if (parent)
+ hlist_add_behind(&input->fdir_node, &parent->fdir_node);
+ else
+ hlist_add_head(&input->fdir_node,
+ &pf->fdir_filter_list);
+
+ /* update counts */
+ pf->fdir_pf_active_filters++;
+
+ return 0;
+}
+
+/**
+ * i40e_prune_flex_pit_list - Cleanup unused entries in FLX_PIT table
+ * @pf: pointer to PF structure
+ *
+ * This function searches the list of filters and determines which FLX_PIT
+ * entries are still required. It will prune any entries which are no longer
+ * in use after the deletion.
+ **/
+static void i40e_prune_flex_pit_list(struct i40e_pf *pf)
+{
+ struct i40e_flex_pit *entry, *tmp;
+ struct i40e_fdir_filter *rule;
+
+ /* First, we'll check the l3 table */
+ list_for_each_entry_safe(entry, tmp, &pf->l3_flex_pit_list, list) {
+ bool found = false;
+
+ hlist_for_each_entry(rule, &pf->fdir_filter_list, fdir_node) {
+ if (rule->flow_type != IP_USER_FLOW)
+ continue;
+ if (rule->flex_filter &&
+ rule->flex_offset == entry->src_offset) {
+ found = true;
+ break;
+ }
+ }
+
+ /* If we didn't find the filter, then we can prune this entry
+ * from the list.
+ */
+ if (!found) {
+ list_del(&entry->list);
+ kfree(entry);
+ }
+ }
+
+ /* Followed by the L4 table */
+ list_for_each_entry_safe(entry, tmp, &pf->l4_flex_pit_list, list) {
+ bool found = false;
+
+ hlist_for_each_entry(rule, &pf->fdir_filter_list, fdir_node) {
+ /* Skip this filter if it's L3, since we already
+ * checked those in the above loop
+ */
+ if (rule->flow_type == IP_USER_FLOW)
+ continue;
+ if (rule->flex_filter &&
+ rule->flex_offset == entry->src_offset) {
+ found = true;
+ break;
+ }
+ }
+
+ /* If we didn't find the filter, then we can prune this entry
+ * from the list.
+ */
+ if (!found) {
+ list_del(&entry->list);
+ kfree(entry);
+ }
+ }
+}
+
+/**
+ * i40e_del_fdir_entry - Deletes a Flow Director filter entry
+ * @vsi: Pointer to the targeted VSI
+ * @cmd: The command to get or set Rx flow classification rules
+ *
+ * The function removes a Flow Director filter entry from the
+ * hlist of the corresponding PF
+ *
+ * Returns 0 on success
+ */
+static int i40e_del_fdir_entry(struct i40e_vsi *vsi,
+ struct ethtool_rxnfc *cmd)
+{
+ struct ethtool_rx_flow_spec *fsp =
+ (struct ethtool_rx_flow_spec *)&cmd->fs;
+ struct i40e_pf *pf = vsi->back;
+ int ret = 0;
+
+ if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state) ||
+ test_bit(__I40E_RESET_INTR_RECEIVED, pf->state))
+ return -EBUSY;
+
+ if (test_bit(__I40E_FD_FLUSH_REQUESTED, pf->state))
+ return -EBUSY;
+
+ ret = i40e_update_ethtool_fdir_entry(vsi, NULL, fsp->location, cmd);
+
+ i40e_prune_flex_pit_list(pf);
+
+ i40e_fdir_check_and_reenable(pf);
+ return ret;
+}
+
+/**
+ * i40e_unused_pit_index - Find an unused PIT index for given list
+ * @pf: the PF data structure
+ *
+ * Find the first unused flexible PIT index entry. We search both the L3 and
+ * L4 flexible PIT lists so that the returned index is unique and unused by
+ * either currently programmed L3 or L4 filters. We use a bit field as storage
+ * to track which indexes are already used.
+ **/
+static u8 i40e_unused_pit_index(struct i40e_pf *pf)
+{
+ unsigned long available_index = 0xFF;
+ struct i40e_flex_pit *entry;
+
+ /* We need to make sure that the new index isn't in use by either L3
+ * or L4 filters so that IP_USER_FLOW filters can program both L3 and
+ * L4 to use the same index.
+ */
+
+ list_for_each_entry(entry, &pf->l4_flex_pit_list, list)
+ clear_bit(entry->pit_index, &available_index);
+
+ list_for_each_entry(entry, &pf->l3_flex_pit_list, list)
+ clear_bit(entry->pit_index, &available_index);
+
+ return find_first_bit(&available_index, 8);
+}
+
+/**
+ * i40e_find_flex_offset - Find an existing flex src_offset
+ * @flex_pit_list: L3 or L4 flex PIT list
+ * @src_offset: new src_offset to find
+ *
+ * Searches the flex_pit_list for an existing offset. If no offset is
+ * currently programmed, then this will return an ERR_PTR if there is no space
+ * to add a new offset, otherwise it returns NULL.
+ **/
+static
+struct i40e_flex_pit *i40e_find_flex_offset(struct list_head *flex_pit_list,
+ u16 src_offset)
+{
+ struct i40e_flex_pit *entry;
+ int size = 0;
+
+ /* Search for the src_offset first. If we find a matching entry
+ * already programmed, we can simply re-use it.
+ */
+ list_for_each_entry(entry, flex_pit_list, list) {
+ size++;
+ if (entry->src_offset == src_offset)
+ return entry;
+ }
+
+ /* If we haven't found an entry yet, then the provided src offset has
+ * not yet been programmed. We will program the src offset later on,
+ * but we need to indicate whether there is enough space to do so
+ * here. We'll make use of ERR_PTR for this purpose.
+ */
+ if (size >= I40E_FLEX_PIT_TABLE_SIZE)
+ return ERR_PTR(-ENOSPC);
+
+ return NULL;
+}
+
+/**
+ * i40e_add_flex_offset - Add src_offset to flex PIT table list
+ * @flex_pit_list: L3 or L4 flex PIT list
+ * @src_offset: new src_offset to add
+ * @pit_index: the PIT index to program
+ *
+ * This function programs the new src_offset to the list. It is expected that
+ * i40e_find_flex_offset has already been tried and returned NULL, indicating
+ * that this offset is not programmed, and that the list has enough space to
+ * store another offset.
+ *
+ * Returns 0 on success, and negative value on error.
+ **/
+static int i40e_add_flex_offset(struct list_head *flex_pit_list,
+ u16 src_offset,
+ u8 pit_index)
+{
+ struct i40e_flex_pit *new_pit, *entry;
+
+ new_pit = kzalloc(sizeof(*entry), GFP_KERNEL);
+ if (!new_pit)
+ return -ENOMEM;
+
+ new_pit->src_offset = src_offset;
+ new_pit->pit_index = pit_index;
+
+ /* We need to insert this item such that the list is sorted by
+ * src_offset in ascending order.
+ */
+ list_for_each_entry(entry, flex_pit_list, list) {
+ if (new_pit->src_offset < entry->src_offset) {
+ list_add_tail(&new_pit->list, &entry->list);
+ return 0;
+ }
+
+ /* If we found an entry with our offset already programmed we
+ * can simply return here, after freeing the memory. However,
+ * if the pit_index does not match we need to report an error.
+ */
+ if (new_pit->src_offset == entry->src_offset) {
+ int err = 0;
+
+ /* If the PIT index is not the same we can't re-use
+ * the entry, so we must report an error.
+ */
+ if (new_pit->pit_index != entry->pit_index)
+ err = -EINVAL;
+
+ kfree(new_pit);
+ return err;
+ }
+ }
+
+ /* If we reached here, then we haven't yet added the item. This means
+ * that we should add the item at the end of the list.
+ */
+ list_add_tail(&new_pit->list, flex_pit_list);
+ return 0;
+}
+
+/**
+ * __i40e_reprogram_flex_pit - Re-program specific FLX_PIT table
+ * @pf: Pointer to the PF structure
+ * @flex_pit_list: list of flexible src offsets in use
+ * @flex_pit_start: index to first entry for this section of the table
+ *
+ * In order to handle flexible data, the hardware uses a table of values
+ * called the FLX_PIT table. This table is used to indicate which sections of
+ * the input correspond to what PIT index values. Unfortunately, hardware is
+ * very restrictive about programming this table. Entries must be ordered by
+ * src_offset in ascending order, without duplicates. Additionally, unused
+ * entries must be set to the unused index value, and must have valid size and
+ * length according to the src_offset ordering.
+ *
+ * This function will reprogram the FLX_PIT register from a book-keeping
+ * structure that we guarantee is already ordered correctly, and has no more
+ * than 3 entries.
+ *
+ * To make things easier, we only support flexible values of one word length,
+ * rather than allowing variable length flexible values.
+ **/
+static void __i40e_reprogram_flex_pit(struct i40e_pf *pf,
+ struct list_head *flex_pit_list,
+ int flex_pit_start)
+{
+ struct i40e_flex_pit *entry = NULL;
+ u16 last_offset = 0;
+ int i = 0, j = 0;
+
+ /* First, loop over the list of flex PIT entries, and reprogram the
+ * registers.
+ */
+ list_for_each_entry(entry, flex_pit_list, list) {
+ /* We have to be careful when programming values for the
+ * largest SRC_OFFSET value. It is possible that adding
+ * additional empty values at the end would overflow the space
+ * for the SRC_OFFSET in the FLX_PIT register. To avoid this,
+ * we check here and add the empty values prior to adding the
+ * largest value.
+ *
+ * To determine this, we will use a loop from i+1 to 3, which
+ * will determine whether the unused entries would have valid
+ * SRC_OFFSET. Note that there cannot be extra entries past
+ * this value, because the only valid values would have been
+ * larger than I40E_MAX_FLEX_SRC_OFFSET, and thus would not
+ * have been added to the list in the first place.
+ */
+ for (j = i + 1; j < 3; j++) {
+ u16 offset = entry->src_offset + j;
+ int index = flex_pit_start + i;
+ u32 value = I40E_FLEX_PREP_VAL(I40E_FLEX_DEST_UNUSED,
+ 1,
+ offset - 3);
+
+ if (offset > I40E_MAX_FLEX_SRC_OFFSET) {
+ i40e_write_rx_ctl(&pf->hw,
+ I40E_PRTQF_FLX_PIT(index),
+ value);
+ i++;
+ }
+ }
+
+ /* Now, we can program the actual value into the table */
+ i40e_write_rx_ctl(&pf->hw,
+ I40E_PRTQF_FLX_PIT(flex_pit_start + i),
+ I40E_FLEX_PREP_VAL(entry->pit_index + 50,
+ 1,
+ entry->src_offset));
+ i++;
+ }
+
+ /* In order to program the last entries in the table, we need to
+ * determine the valid offset. If the list is empty, we'll just start
+ * with 0. Otherwise, we'll start with the last item offset and add 1.
+ * This ensures that all entries have valid sizes. If we don't do this
+ * correctly, the hardware will disable flexible field parsing.
+ */
+ if (!list_empty(flex_pit_list))
+ last_offset = list_prev_entry(entry, list)->src_offset + 1;
+
+ for (; i < 3; i++, last_offset++) {
+ i40e_write_rx_ctl(&pf->hw,
+ I40E_PRTQF_FLX_PIT(flex_pit_start + i),
+ I40E_FLEX_PREP_VAL(I40E_FLEX_DEST_UNUSED,
+ 1,
+ last_offset));
+ }
+}
+
+/**
+ * i40e_reprogram_flex_pit - Reprogram all FLX_PIT tables after input set change
+ * @pf: pointer to the PF structure
+ *
+ * This function reprograms both the L3 and L4 FLX_PIT tables. See the
+ * internal helper function for implementation details.
+ **/
+static void i40e_reprogram_flex_pit(struct i40e_pf *pf)
+{
+ __i40e_reprogram_flex_pit(pf, &pf->l3_flex_pit_list,
+ I40E_FLEX_PIT_IDX_START_L3);
+
+ __i40e_reprogram_flex_pit(pf, &pf->l4_flex_pit_list,
+ I40E_FLEX_PIT_IDX_START_L4);
+
+ /* We also need to program the L3 and L4 GLQF ORT register */
+ i40e_write_rx_ctl(&pf->hw,
+ I40E_GLQF_ORT(I40E_L3_GLQF_ORT_IDX),
+ I40E_ORT_PREP_VAL(I40E_FLEX_PIT_IDX_START_L3,
+ 3, 1));
+
+ i40e_write_rx_ctl(&pf->hw,
+ I40E_GLQF_ORT(I40E_L4_GLQF_ORT_IDX),
+ I40E_ORT_PREP_VAL(I40E_FLEX_PIT_IDX_START_L4,
+ 3, 1));
+}
+
+/**
+ * i40e_flow_str - Converts a flow_type into a human readable string
+ * @fsp: the flow specification
+ *
+ * Currently only flow types we support are included here, and the string
+ * value attempts to match what ethtool would use to configure this flow type.
+ **/
+static const char *i40e_flow_str(struct ethtool_rx_flow_spec *fsp)
+{
+ switch (fsp->flow_type & ~FLOW_EXT) {
+ case TCP_V4_FLOW:
+ return "tcp4";
+ case UDP_V4_FLOW:
+ return "udp4";
+ case SCTP_V4_FLOW:
+ return "sctp4";
+ case IP_USER_FLOW:
+ return "ip4";
+ case TCP_V6_FLOW:
+ return "tcp6";
+ case UDP_V6_FLOW:
+ return "udp6";
+ case SCTP_V6_FLOW:
+ return "sctp6";
+ case IPV6_USER_FLOW:
+ return "ip6";
+ default:
+ return "unknown";
+ }
+}
+
+/**
+ * i40e_pit_index_to_mask - Return the FLEX mask for a given PIT index
+ * @pit_index: PIT index to convert
+ *
+ * Returns the mask for a given PIT index. Will return 0 if the pit_index is
+ * of range.
+ **/
+static u64 i40e_pit_index_to_mask(int pit_index)
+{
+ switch (pit_index) {
+ case 0:
+ return I40E_FLEX_50_MASK;
+ case 1:
+ return I40E_FLEX_51_MASK;
+ case 2:
+ return I40E_FLEX_52_MASK;
+ case 3:
+ return I40E_FLEX_53_MASK;
+ case 4:
+ return I40E_FLEX_54_MASK;
+ case 5:
+ return I40E_FLEX_55_MASK;
+ case 6:
+ return I40E_FLEX_56_MASK;
+ case 7:
+ return I40E_FLEX_57_MASK;
+ default:
+ return 0;
+ }
+}
+
+/**
+ * i40e_print_input_set - Show changes between two input sets
+ * @vsi: the vsi being configured
+ * @old: the old input set
+ * @new: the new input set
+ *
+ * Print the difference between old and new input sets by showing which series
+ * of words are toggled on or off. Only displays the bits we actually support
+ * changing.
+ **/
+static void i40e_print_input_set(struct i40e_vsi *vsi, u64 old, u64 new)
+{
+ struct i40e_pf *pf = vsi->back;
+ bool old_value, new_value;
+ int i;
+
+ old_value = !!(old & I40E_L3_SRC_MASK);
+ new_value = !!(new & I40E_L3_SRC_MASK);
+ if (old_value != new_value)
+ netif_info(pf, drv, vsi->netdev, "L3 source address: %s -> %s\n",
+ old_value ? "ON" : "OFF",
+ new_value ? "ON" : "OFF");
+
+ old_value = !!(old & I40E_L3_DST_MASK);
+ new_value = !!(new & I40E_L3_DST_MASK);
+ if (old_value != new_value)
+ netif_info(pf, drv, vsi->netdev, "L3 destination address: %s -> %s\n",
+ old_value ? "ON" : "OFF",
+ new_value ? "ON" : "OFF");
+
+ old_value = !!(old & I40E_L4_SRC_MASK);
+ new_value = !!(new & I40E_L4_SRC_MASK);
+ if (old_value != new_value)
+ netif_info(pf, drv, vsi->netdev, "L4 source port: %s -> %s\n",
+ old_value ? "ON" : "OFF",
+ new_value ? "ON" : "OFF");
+
+ old_value = !!(old & I40E_L4_DST_MASK);
+ new_value = !!(new & I40E_L4_DST_MASK);
+ if (old_value != new_value)
+ netif_info(pf, drv, vsi->netdev, "L4 destination port: %s -> %s\n",
+ old_value ? "ON" : "OFF",
+ new_value ? "ON" : "OFF");
+
+ old_value = !!(old & I40E_VERIFY_TAG_MASK);
+ new_value = !!(new & I40E_VERIFY_TAG_MASK);
+ if (old_value != new_value)
+ netif_info(pf, drv, vsi->netdev, "SCTP verification tag: %s -> %s\n",
+ old_value ? "ON" : "OFF",
+ new_value ? "ON" : "OFF");
+
+ /* Show change of flexible filter entries */
+ for (i = 0; i < I40E_FLEX_INDEX_ENTRIES; i++) {
+ u64 flex_mask = i40e_pit_index_to_mask(i);
+
+ old_value = !!(old & flex_mask);
+ new_value = !!(new & flex_mask);
+ if (old_value != new_value)
+ netif_info(pf, drv, vsi->netdev, "FLEX index %d: %s -> %s\n",
+ i,
+ old_value ? "ON" : "OFF",
+ new_value ? "ON" : "OFF");
+ }
+
+ netif_info(pf, drv, vsi->netdev, " Current input set: %0llx\n",
+ old);
+ netif_info(pf, drv, vsi->netdev, "Requested input set: %0llx\n",
+ new);
+}
+
+/**
+ * i40e_check_fdir_input_set - Check that a given rx_flow_spec mask is valid
+ * @vsi: pointer to the targeted VSI
+ * @fsp: pointer to Rx flow specification
+ * @userdef: userdefined data from flow specification
+ *
+ * Ensures that a given ethtool_rx_flow_spec has a valid mask. Some support
+ * for partial matches exists with a few limitations. First, hardware only
+ * supports masking by word boundary (2 bytes) and not per individual bit.
+ * Second, hardware is limited to using one mask for a flow type and cannot
+ * use a separate mask for each filter.
+ *
+ * To support these limitations, if we already have a configured filter for
+ * the specified type, this function enforces that new filters of the type
+ * match the configured input set. Otherwise, if we do not have a filter of
+ * the specified type, we allow the input set to be updated to match the
+ * desired filter.
+ *
+ * To help ensure that administrators understand why filters weren't displayed
+ * as supported, we print a diagnostic message displaying how the input set
+ * would change and warning to delete the preexisting filters if required.
+ *
+ * Returns 0 on successful input set match, and a negative return code on
+ * failure.
+ **/
+static int i40e_check_fdir_input_set(struct i40e_vsi *vsi,
+ struct ethtool_rx_flow_spec *fsp,
+ struct i40e_rx_flow_userdef *userdef)
+{
+ static const __be32 ipv6_full_mask[4] = {cpu_to_be32(0xffffffff),
+ cpu_to_be32(0xffffffff), cpu_to_be32(0xffffffff),
+ cpu_to_be32(0xffffffff)};
+ struct ethtool_tcpip6_spec *tcp_ip6_spec;
+ struct ethtool_usrip6_spec *usr_ip6_spec;
+ struct ethtool_tcpip4_spec *tcp_ip4_spec;
+ struct ethtool_usrip4_spec *usr_ip4_spec;
+ struct i40e_pf *pf = vsi->back;
+ u64 current_mask, new_mask;
+ bool new_flex_offset = false;
+ bool flex_l3 = false;
+ u16 *fdir_filter_count;
+ u16 index, src_offset = 0;
+ u8 pit_index = 0;
+ int err;
+
+ switch (fsp->flow_type & ~FLOW_EXT) {
+ case SCTP_V4_FLOW:
+ index = I40E_FILTER_PCTYPE_NONF_IPV4_SCTP;
+ fdir_filter_count = &pf->fd_sctp4_filter_cnt;
+ break;
+ case TCP_V4_FLOW:
+ index = I40E_FILTER_PCTYPE_NONF_IPV4_TCP;
+ fdir_filter_count = &pf->fd_tcp4_filter_cnt;
+ break;
+ case UDP_V4_FLOW:
+ index = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
+ fdir_filter_count = &pf->fd_udp4_filter_cnt;
+ break;
+ case SCTP_V6_FLOW:
+ index = I40E_FILTER_PCTYPE_NONF_IPV6_SCTP;
+ fdir_filter_count = &pf->fd_sctp6_filter_cnt;
+ break;
+ case TCP_V6_FLOW:
+ index = I40E_FILTER_PCTYPE_NONF_IPV6_TCP;
+ fdir_filter_count = &pf->fd_tcp6_filter_cnt;
+ break;
+ case UDP_V6_FLOW:
+ index = I40E_FILTER_PCTYPE_NONF_IPV6_UDP;
+ fdir_filter_count = &pf->fd_udp6_filter_cnt;
+ break;
+ case IP_USER_FLOW:
+ index = I40E_FILTER_PCTYPE_NONF_IPV4_OTHER;
+ fdir_filter_count = &pf->fd_ip4_filter_cnt;
+ flex_l3 = true;
+ break;
+ case IPV6_USER_FLOW:
+ index = I40E_FILTER_PCTYPE_NONF_IPV6_OTHER;
+ fdir_filter_count = &pf->fd_ip6_filter_cnt;
+ flex_l3 = true;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ /* Read the current input set from register memory. */
+ current_mask = i40e_read_fd_input_set(pf, index);
+ new_mask = current_mask;
+
+ /* Determine, if any, the required changes to the input set in order
+ * to support the provided mask.
+ *
+ * Hardware only supports masking at word (2 byte) granularity and does
+ * not support full bitwise masking. This implementation simplifies
+ * even further and only supports fully enabled or fully disabled
+ * masks for each field, even though we could split the ip4src and
+ * ip4dst fields.
+ */
+ switch (fsp->flow_type & ~FLOW_EXT) {
+ case SCTP_V4_FLOW:
+ new_mask &= ~I40E_VERIFY_TAG_MASK;
+ fallthrough;
+ case TCP_V4_FLOW:
+ case UDP_V4_FLOW:
+ tcp_ip4_spec = &fsp->m_u.tcp_ip4_spec;
+
+ /* IPv4 source address */
+ if (tcp_ip4_spec->ip4src == htonl(0xFFFFFFFF))
+ new_mask |= I40E_L3_SRC_MASK;
+ else if (!tcp_ip4_spec->ip4src)
+ new_mask &= ~I40E_L3_SRC_MASK;
+ else
+ return -EOPNOTSUPP;
+
+ /* IPv4 destination address */
+ if (tcp_ip4_spec->ip4dst == htonl(0xFFFFFFFF))
+ new_mask |= I40E_L3_DST_MASK;
+ else if (!tcp_ip4_spec->ip4dst)
+ new_mask &= ~I40E_L3_DST_MASK;
+ else
+ return -EOPNOTSUPP;
+
+ /* L4 source port */
+ if (tcp_ip4_spec->psrc == htons(0xFFFF))
+ new_mask |= I40E_L4_SRC_MASK;
+ else if (!tcp_ip4_spec->psrc)
+ new_mask &= ~I40E_L4_SRC_MASK;
+ else
+ return -EOPNOTSUPP;
+
+ /* L4 destination port */
+ if (tcp_ip4_spec->pdst == htons(0xFFFF))
+ new_mask |= I40E_L4_DST_MASK;
+ else if (!tcp_ip4_spec->pdst)
+ new_mask &= ~I40E_L4_DST_MASK;
+ else
+ return -EOPNOTSUPP;
+
+ /* Filtering on Type of Service is not supported. */
+ if (tcp_ip4_spec->tos)
+ return -EOPNOTSUPP;
+
+ break;
+ case SCTP_V6_FLOW:
+ new_mask &= ~I40E_VERIFY_TAG_MASK;
+ fallthrough;
+ case TCP_V6_FLOW:
+ case UDP_V6_FLOW:
+ tcp_ip6_spec = &fsp->m_u.tcp_ip6_spec;
+
+ /* Check if user provided IPv6 source address. */
+ if (ipv6_addr_equal((struct in6_addr *)&tcp_ip6_spec->ip6src,
+ (struct in6_addr *)&ipv6_full_mask))
+ new_mask |= I40E_L3_V6_SRC_MASK;
+ else if (ipv6_addr_any((struct in6_addr *)
+ &tcp_ip6_spec->ip6src))
+ new_mask &= ~I40E_L3_V6_SRC_MASK;
+ else
+ return -EOPNOTSUPP;
+
+ /* Check if user provided destination address. */
+ if (ipv6_addr_equal((struct in6_addr *)&tcp_ip6_spec->ip6dst,
+ (struct in6_addr *)&ipv6_full_mask))
+ new_mask |= I40E_L3_V6_DST_MASK;
+ else if (ipv6_addr_any((struct in6_addr *)
+ &tcp_ip6_spec->ip6dst))
+ new_mask &= ~I40E_L3_V6_DST_MASK;
+ else
+ return -EOPNOTSUPP;
+
+ /* L4 source port */
+ if (tcp_ip6_spec->psrc == htons(0xFFFF))
+ new_mask |= I40E_L4_SRC_MASK;
+ else if (!tcp_ip6_spec->psrc)
+ new_mask &= ~I40E_L4_SRC_MASK;
+ else
+ return -EOPNOTSUPP;
+
+ /* L4 destination port */
+ if (tcp_ip6_spec->pdst == htons(0xFFFF))
+ new_mask |= I40E_L4_DST_MASK;
+ else if (!tcp_ip6_spec->pdst)
+ new_mask &= ~I40E_L4_DST_MASK;
+ else
+ return -EOPNOTSUPP;
+
+ /* Filtering on Traffic Classes is not supported. */
+ if (tcp_ip6_spec->tclass)
+ return -EOPNOTSUPP;
+ break;
+ case IP_USER_FLOW:
+ usr_ip4_spec = &fsp->m_u.usr_ip4_spec;
+
+ /* IPv4 source address */
+ if (usr_ip4_spec->ip4src == htonl(0xFFFFFFFF))
+ new_mask |= I40E_L3_SRC_MASK;
+ else if (!usr_ip4_spec->ip4src)
+ new_mask &= ~I40E_L3_SRC_MASK;
+ else
+ return -EOPNOTSUPP;
+
+ /* IPv4 destination address */
+ if (usr_ip4_spec->ip4dst == htonl(0xFFFFFFFF))
+ new_mask |= I40E_L3_DST_MASK;
+ else if (!usr_ip4_spec->ip4dst)
+ new_mask &= ~I40E_L3_DST_MASK;
+ else
+ return -EOPNOTSUPP;
+
+ /* First 4 bytes of L4 header */
+ if (usr_ip4_spec->l4_4_bytes)
+ return -EOPNOTSUPP;
+
+ /* Filtering on Type of Service is not supported. */
+ if (usr_ip4_spec->tos)
+ return -EOPNOTSUPP;
+
+ /* Filtering on IP version is not supported */
+ if (usr_ip4_spec->ip_ver)
+ return -EINVAL;
+
+ /* Filtering on L4 protocol is not supported */
+ if (usr_ip4_spec->proto)
+ return -EINVAL;
+
+ break;
+ case IPV6_USER_FLOW:
+ usr_ip6_spec = &fsp->m_u.usr_ip6_spec;
+
+ /* Check if user provided IPv6 source address. */
+ if (ipv6_addr_equal((struct in6_addr *)&usr_ip6_spec->ip6src,
+ (struct in6_addr *)&ipv6_full_mask))
+ new_mask |= I40E_L3_V6_SRC_MASK;
+ else if (ipv6_addr_any((struct in6_addr *)
+ &usr_ip6_spec->ip6src))
+ new_mask &= ~I40E_L3_V6_SRC_MASK;
+ else
+ return -EOPNOTSUPP;
+
+ /* Check if user provided destination address. */
+ if (ipv6_addr_equal((struct in6_addr *)&usr_ip6_spec->ip6dst,
+ (struct in6_addr *)&ipv6_full_mask))
+ new_mask |= I40E_L3_V6_DST_MASK;
+ else if (ipv6_addr_any((struct in6_addr *)
+ &usr_ip6_spec->ip6dst))
+ new_mask &= ~I40E_L3_V6_DST_MASK;
+ else
+ return -EOPNOTSUPP;
+
+ if (usr_ip6_spec->l4_4_bytes)
+ return -EOPNOTSUPP;
+
+ /* Filtering on Traffic class is not supported. */
+ if (usr_ip6_spec->tclass)
+ return -EOPNOTSUPP;
+
+ /* Filtering on L4 protocol is not supported */
+ if (usr_ip6_spec->l4_proto)
+ return -EINVAL;
+
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ if (fsp->flow_type & FLOW_EXT) {
+ /* Allow only 802.1Q and no etype defined, as
+ * later it's modified to 0x8100
+ */
+ if (fsp->h_ext.vlan_etype != htons(ETH_P_8021Q) &&
+ fsp->h_ext.vlan_etype != 0)
+ return -EOPNOTSUPP;
+ if (fsp->m_ext.vlan_tci == htons(0xFFFF))
+ new_mask |= I40E_VLAN_SRC_MASK;
+ else
+ new_mask &= ~I40E_VLAN_SRC_MASK;
+ }
+
+ /* First, clear all flexible filter entries */
+ new_mask &= ~I40E_FLEX_INPUT_MASK;
+
+ /* If we have a flexible filter, try to add this offset to the correct
+ * flexible filter PIT list. Once finished, we can update the mask.
+ * If the src_offset changed, we will get a new mask value which will
+ * trigger an input set change.
+ */
+ if (userdef->flex_filter) {
+ struct i40e_flex_pit *l3_flex_pit = NULL, *flex_pit = NULL;
+
+ /* Flexible offset must be even, since the flexible payload
+ * must be aligned on 2-byte boundary.
+ */
+ if (userdef->flex_offset & 0x1) {
+ dev_warn(&pf->pdev->dev,
+ "Flexible data offset must be 2-byte aligned\n");
+ return -EINVAL;
+ }
+
+ src_offset = userdef->flex_offset >> 1;
+
+ /* FLX_PIT source offset value is only so large */
+ if (src_offset > I40E_MAX_FLEX_SRC_OFFSET) {
+ dev_warn(&pf->pdev->dev,
+ "Flexible data must reside within first 64 bytes of the packet payload\n");
+ return -EINVAL;
+ }
+
+ /* See if this offset has already been programmed. If we get
+ * an ERR_PTR, then the filter is not safe to add. Otherwise,
+ * if we get a NULL pointer, this means we will need to add
+ * the offset.
+ */
+ flex_pit = i40e_find_flex_offset(&pf->l4_flex_pit_list,
+ src_offset);
+ if (IS_ERR(flex_pit))
+ return PTR_ERR(flex_pit);
+
+ /* IP_USER_FLOW filters match both L4 (ICMP) and L3 (unknown)
+ * packet types, and thus we need to program both L3 and L4
+ * flexible values. These must have identical flexible index,
+ * as otherwise we can't correctly program the input set. So
+ * we'll find both an L3 and L4 index and make sure they are
+ * the same.
+ */
+ if (flex_l3) {
+ l3_flex_pit =
+ i40e_find_flex_offset(&pf->l3_flex_pit_list,
+ src_offset);
+ if (IS_ERR(l3_flex_pit))
+ return PTR_ERR(l3_flex_pit);
+
+ if (flex_pit) {
+ /* If we already had a matching L4 entry, we
+ * need to make sure that the L3 entry we
+ * obtained uses the same index.
+ */
+ if (l3_flex_pit) {
+ if (l3_flex_pit->pit_index !=
+ flex_pit->pit_index) {
+ return -EINVAL;
+ }
+ } else {
+ new_flex_offset = true;
+ }
+ } else {
+ flex_pit = l3_flex_pit;
+ }
+ }
+
+ /* If we didn't find an existing flex offset, we need to
+ * program a new one. However, we don't immediately program it
+ * here because we will wait to program until after we check
+ * that it is safe to change the input set.
+ */
+ if (!flex_pit) {
+ new_flex_offset = true;
+ pit_index = i40e_unused_pit_index(pf);
+ } else {
+ pit_index = flex_pit->pit_index;
+ }
+
+ /* Update the mask with the new offset */
+ new_mask |= i40e_pit_index_to_mask(pit_index);
+ }
+
+ /* If the mask and flexible filter offsets for this filter match the
+ * currently programmed values we don't need any input set change, so
+ * this filter is safe to install.
+ */
+ if (new_mask == current_mask && !new_flex_offset)
+ return 0;
+
+ netif_info(pf, drv, vsi->netdev, "Input set change requested for %s flows:\n",
+ i40e_flow_str(fsp));
+ i40e_print_input_set(vsi, current_mask, new_mask);
+ if (new_flex_offset) {
+ netif_info(pf, drv, vsi->netdev, "FLEX index %d: Offset -> %d",
+ pit_index, src_offset);
+ }
+
+ /* Hardware input sets are global across multiple ports, so even the
+ * main port cannot change them when in MFP mode as this would impact
+ * any filters on the other ports.
+ */
+ if (pf->flags & I40E_FLAG_MFP_ENABLED) {
+ netif_err(pf, drv, vsi->netdev, "Cannot change Flow Director input sets while MFP is enabled\n");
+ return -EOPNOTSUPP;
+ }
+
+ /* This filter requires us to update the input set. However, hardware
+ * only supports one input set per flow type, and does not support
+ * separate masks for each filter. This means that we can only support
+ * a single mask for all filters of a specific type.
+ *
+ * If we have preexisting filters, they obviously depend on the
+ * current programmed input set. Display a diagnostic message in this
+ * case explaining why the filter could not be accepted.
+ */
+ if (*fdir_filter_count) {
+ netif_err(pf, drv, vsi->netdev, "Cannot change input set for %s flows until %d preexisting filters are removed\n",
+ i40e_flow_str(fsp),
+ *fdir_filter_count);
+ return -EOPNOTSUPP;
+ }
+
+ i40e_write_fd_input_set(pf, index, new_mask);
+
+ /* IP_USER_FLOW filters match both IPv4/Other and IPv4/Fragmented
+ * frames. If we're programming the input set for IPv4/Other, we also
+ * need to program the IPv4/Fragmented input set. Since we don't have
+ * separate support, we'll always assume and enforce that the two flow
+ * types must have matching input sets.
+ */
+ if (index == I40E_FILTER_PCTYPE_NONF_IPV4_OTHER)
+ i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_FRAG_IPV4,
+ new_mask);
+
+ /* Add the new offset and update table, if necessary */
+ if (new_flex_offset) {
+ err = i40e_add_flex_offset(&pf->l4_flex_pit_list, src_offset,
+ pit_index);
+ if (err)
+ return err;
+
+ if (flex_l3) {
+ err = i40e_add_flex_offset(&pf->l3_flex_pit_list,
+ src_offset,
+ pit_index);
+ if (err)
+ return err;
+ }
+
+ i40e_reprogram_flex_pit(pf);
+ }
+
+ return 0;
+}
+
+/**
+ * i40e_match_fdir_filter - Return true of two filters match
+ * @a: pointer to filter struct
+ * @b: pointer to filter struct
+ *
+ * Returns true if the two filters match exactly the same criteria. I.e. they
+ * match the same flow type and have the same parameters. We don't need to
+ * check any input-set since all filters of the same flow type must use the
+ * same input set.
+ **/
+static bool i40e_match_fdir_filter(struct i40e_fdir_filter *a,
+ struct i40e_fdir_filter *b)
+{
+ /* The filters do not much if any of these criteria differ. */
+ if (a->dst_ip != b->dst_ip ||
+ a->src_ip != b->src_ip ||
+ a->dst_port != b->dst_port ||
+ a->src_port != b->src_port ||
+ a->flow_type != b->flow_type ||
+ a->ipl4_proto != b->ipl4_proto ||
+ a->vlan_tag != b->vlan_tag ||
+ a->vlan_etype != b->vlan_etype)
+ return false;
+
+ return true;
+}
+
+/**
+ * i40e_disallow_matching_filters - Check that new filters differ
+ * @vsi: pointer to the targeted VSI
+ * @input: new filter to check
+ *
+ * Due to hardware limitations, it is not possible for two filters that match
+ * similar criteria to be programmed at the same time. This is true for a few
+ * reasons:
+ *
+ * (a) all filters matching a particular flow type must use the same input
+ * set, that is they must match the same criteria.
+ * (b) different flow types will never match the same packet, as the flow type
+ * is decided by hardware before checking which rules apply.
+ * (c) hardware has no way to distinguish which order filters apply in.
+ *
+ * Due to this, we can't really support using the location data to order
+ * filters in the hardware parsing. It is technically possible for the user to
+ * request two filters matching the same criteria but which select different
+ * queues. In this case, rather than keep both filters in the list, we reject
+ * the 2nd filter when the user requests adding it.
+ *
+ * This avoids needing to track location for programming the filter to
+ * hardware, and ensures that we avoid some strange scenarios involving
+ * deleting filters which match the same criteria.
+ **/
+static int i40e_disallow_matching_filters(struct i40e_vsi *vsi,
+ struct i40e_fdir_filter *input)
+{
+ struct i40e_pf *pf = vsi->back;
+ struct i40e_fdir_filter *rule;
+ struct hlist_node *node2;
+
+ /* Loop through every filter, and check that it doesn't match */
+ hlist_for_each_entry_safe(rule, node2,
+ &pf->fdir_filter_list, fdir_node) {
+ /* Don't check the filters match if they share the same fd_id,
+ * since the new filter is actually just updating the target
+ * of the old filter.
+ */
+ if (rule->fd_id == input->fd_id)
+ continue;
+
+ /* If any filters match, then print a warning message to the
+ * kernel message buffer and bail out.
+ */
+ if (i40e_match_fdir_filter(rule, input)) {
+ dev_warn(&pf->pdev->dev,
+ "Existing user defined filter %d already matches this flow.\n",
+ rule->fd_id);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * i40e_add_fdir_ethtool - Add/Remove Flow Director filters
+ * @vsi: pointer to the targeted VSI
+ * @cmd: command to get or set RX flow classification rules
+ *
+ * Add Flow Director filters for a specific flow spec based on their
+ * protocol. Returns 0 if the filters were successfully added.
+ **/
+static int i40e_add_fdir_ethtool(struct i40e_vsi *vsi,
+ struct ethtool_rxnfc *cmd)
+{
+ struct i40e_rx_flow_userdef userdef;
+ struct ethtool_rx_flow_spec *fsp;
+ struct i40e_fdir_filter *input;
+ u16 dest_vsi = 0, q_index = 0;
+ struct i40e_pf *pf;
+ int ret = -EINVAL;
+ u8 dest_ctl;
+
+ if (!vsi)
+ return -EINVAL;
+ pf = vsi->back;
+
+ if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
+ return -EOPNOTSUPP;
+
+ if (test_bit(__I40E_FD_SB_AUTO_DISABLED, pf->state))
+ return -ENOSPC;
+
+ if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state) ||
+ test_bit(__I40E_RESET_INTR_RECEIVED, pf->state))
+ return -EBUSY;
+
+ if (test_bit(__I40E_FD_FLUSH_REQUESTED, pf->state))
+ return -EBUSY;
+
+ fsp = (struct ethtool_rx_flow_spec *)&cmd->fs;
+
+ /* Parse the user-defined field */
+ if (i40e_parse_rx_flow_user_data(fsp, &userdef))
+ return -EINVAL;
+
+ /* Extended MAC field is not supported */
+ if (fsp->flow_type & FLOW_MAC_EXT)
+ return -EINVAL;
+
+ ret = i40e_check_fdir_input_set(vsi, fsp, &userdef);
+ if (ret)
+ return ret;
+
+ if (fsp->location >= (pf->hw.func_caps.fd_filters_best_effort +
+ pf->hw.func_caps.fd_filters_guaranteed)) {
+ return -EINVAL;
+ }
+
+ /* ring_cookie is either the drop index, or is a mask of the queue
+ * index and VF id we wish to target.
+ */
+ if (fsp->ring_cookie == RX_CLS_FLOW_DISC) {
+ dest_ctl = I40E_FILTER_PROGRAM_DESC_DEST_DROP_PACKET;
+ } else {
+ u32 ring = ethtool_get_flow_spec_ring(fsp->ring_cookie);
+ u8 vf = ethtool_get_flow_spec_ring_vf(fsp->ring_cookie);
+
+ if (!vf) {
+ if (ring >= vsi->num_queue_pairs)
+ return -EINVAL;
+ dest_vsi = vsi->id;
+ } else {
+ /* VFs are zero-indexed, so we subtract one here */
+ vf--;
+
+ if (vf >= pf->num_alloc_vfs)
+ return -EINVAL;
+ if (ring >= pf->vf[vf].num_queue_pairs)
+ return -EINVAL;
+ dest_vsi = pf->vf[vf].lan_vsi_id;
+ }
+ dest_ctl = I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX;
+ q_index = ring;
+ }
+
+ input = kzalloc(sizeof(*input), GFP_KERNEL);
+
+ if (!input)
+ return -ENOMEM;
+
+ input->fd_id = fsp->location;
+ input->q_index = q_index;
+ input->dest_vsi = dest_vsi;
+ input->dest_ctl = dest_ctl;
+ input->fd_status = I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID;
+ input->cnt_index = I40E_FD_SB_STAT_IDX(pf->hw.pf_id);
+ input->dst_ip = fsp->h_u.tcp_ip4_spec.ip4src;
+ input->src_ip = fsp->h_u.tcp_ip4_spec.ip4dst;
+ input->flow_type = fsp->flow_type & ~FLOW_EXT;
+
+ input->vlan_etype = fsp->h_ext.vlan_etype;
+ if (!fsp->m_ext.vlan_etype && fsp->h_ext.vlan_tci)
+ input->vlan_etype = cpu_to_be16(ETH_P_8021Q);
+ if (fsp->m_ext.vlan_tci && input->vlan_etype)
+ input->vlan_tag = fsp->h_ext.vlan_tci;
+ if (input->flow_type == IPV6_USER_FLOW ||
+ input->flow_type == UDP_V6_FLOW ||
+ input->flow_type == TCP_V6_FLOW ||
+ input->flow_type == SCTP_V6_FLOW) {
+ /* Reverse the src and dest notion, since the HW expects them
+ * to be from Tx perspective where as the input from user is
+ * from Rx filter view.
+ */
+ input->ipl4_proto = fsp->h_u.usr_ip6_spec.l4_proto;
+ input->dst_port = fsp->h_u.tcp_ip6_spec.psrc;
+ input->src_port = fsp->h_u.tcp_ip6_spec.pdst;
+ memcpy(input->dst_ip6, fsp->h_u.ah_ip6_spec.ip6src,
+ sizeof(__be32) * 4);
+ memcpy(input->src_ip6, fsp->h_u.ah_ip6_spec.ip6dst,
+ sizeof(__be32) * 4);
+ } else {
+ /* Reverse the src and dest notion, since the HW expects them
+ * to be from Tx perspective where as the input from user is
+ * from Rx filter view.
+ */
+ input->ipl4_proto = fsp->h_u.usr_ip4_spec.proto;
+ input->dst_port = fsp->h_u.tcp_ip4_spec.psrc;
+ input->src_port = fsp->h_u.tcp_ip4_spec.pdst;
+ input->dst_ip = fsp->h_u.tcp_ip4_spec.ip4src;
+ input->src_ip = fsp->h_u.tcp_ip4_spec.ip4dst;
+ }
+
+ if (userdef.flex_filter) {
+ input->flex_filter = true;
+ input->flex_word = cpu_to_be16(userdef.flex_word);
+ input->flex_offset = userdef.flex_offset;
+ }
+
+ /* Avoid programming two filters with identical match criteria. */
+ ret = i40e_disallow_matching_filters(vsi, input);
+ if (ret)
+ goto free_filter_memory;
+
+ /* Add the input filter to the fdir_input_list, possibly replacing
+ * a previous filter. Do not free the input structure after adding it
+ * to the list as this would cause a use-after-free bug.
+ */
+ i40e_update_ethtool_fdir_entry(vsi, input, fsp->location, NULL);
+ ret = i40e_add_del_fdir(vsi, input, true);
+ if (ret)
+ goto remove_sw_rule;
+ return 0;
+
+remove_sw_rule:
+ hlist_del(&input->fdir_node);
+ pf->fdir_pf_active_filters--;
+free_filter_memory:
+ kfree(input);
+ return ret;
+}
+
+/**
+ * i40e_set_rxnfc - command to set RX flow classification rules
+ * @netdev: network interface device structure
+ * @cmd: ethtool rxnfc command
+ *
+ * Returns Success if the command is supported.
+ **/
+static int i40e_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_vsi *vsi = np->vsi;
+ struct i40e_pf *pf = vsi->back;
+ int ret = -EOPNOTSUPP;
+
+ switch (cmd->cmd) {
+ case ETHTOOL_SRXFH:
+ ret = i40e_set_rss_hash_opt(pf, cmd);
+ break;
+ case ETHTOOL_SRXCLSRLINS:
+ ret = i40e_add_fdir_ethtool(vsi, cmd);
+ break;
+ case ETHTOOL_SRXCLSRLDEL:
+ ret = i40e_del_fdir_entry(vsi, cmd);
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+/**
+ * i40e_max_channels - get Max number of combined channels supported
+ * @vsi: vsi pointer
+ **/
+static unsigned int i40e_max_channels(struct i40e_vsi *vsi)
+{
+ /* TODO: This code assumes DCB and FD is disabled for now. */
+ return vsi->alloc_queue_pairs;
+}
+
+/**
+ * i40e_get_channels - Get the current channels enabled and max supported etc.
+ * @dev: network interface device structure
+ * @ch: ethtool channels structure
+ *
+ * We don't support separate tx and rx queues as channels. The other count
+ * represents how many queues are being used for control. max_combined counts
+ * how many queue pairs we can support. They may not be mapped 1 to 1 with
+ * q_vectors since we support a lot more queue pairs than q_vectors.
+ **/
+static void i40e_get_channels(struct net_device *dev,
+ struct ethtool_channels *ch)
+{
+ struct i40e_netdev_priv *np = netdev_priv(dev);
+ struct i40e_vsi *vsi = np->vsi;
+ struct i40e_pf *pf = vsi->back;
+
+ /* report maximum channels */
+ ch->max_combined = i40e_max_channels(vsi);
+
+ /* report info for other vector */
+ ch->other_count = (pf->flags & I40E_FLAG_FD_SB_ENABLED) ? 1 : 0;
+ ch->max_other = ch->other_count;
+
+ /* Note: This code assumes DCB is disabled for now. */
+ ch->combined_count = vsi->num_queue_pairs;
+}
+
+/**
+ * i40e_set_channels - Set the new channels count.
+ * @dev: network interface device structure
+ * @ch: ethtool channels structure
+ *
+ * The new channels count may not be the same as requested by the user
+ * since it gets rounded down to a power of 2 value.
+ **/
+static int i40e_set_channels(struct net_device *dev,
+ struct ethtool_channels *ch)
+{
+ const u8 drop = I40E_FILTER_PROGRAM_DESC_DEST_DROP_PACKET;
+ struct i40e_netdev_priv *np = netdev_priv(dev);
+ unsigned int count = ch->combined_count;
+ struct i40e_vsi *vsi = np->vsi;
+ struct i40e_pf *pf = vsi->back;
+ struct i40e_fdir_filter *rule;
+ struct hlist_node *node2;
+ int new_count;
+ int err = 0;
+
+ /* We do not support setting channels for any other VSI at present */
+ if (vsi->type != I40E_VSI_MAIN)
+ return -EINVAL;
+
+ /* We do not support setting channels via ethtool when TCs are
+ * configured through mqprio
+ */
+ if (i40e_is_tc_mqprio_enabled(pf))
+ return -EINVAL;
+
+ /* verify they are not requesting separate vectors */
+ if (!count || ch->rx_count || ch->tx_count)
+ return -EINVAL;
+
+ /* verify other_count has not changed */
+ if (ch->other_count != ((pf->flags & I40E_FLAG_FD_SB_ENABLED) ? 1 : 0))
+ return -EINVAL;
+
+ /* verify the number of channels does not exceed hardware limits */
+ if (count > i40e_max_channels(vsi))
+ return -EINVAL;
+
+ /* verify that the number of channels does not invalidate any current
+ * flow director rules
+ */
+ hlist_for_each_entry_safe(rule, node2,
+ &pf->fdir_filter_list, fdir_node) {
+ if (rule->dest_ctl != drop && count <= rule->q_index) {
+ dev_warn(&pf->pdev->dev,
+ "Existing user defined filter %d assigns flow to queue %d\n",
+ rule->fd_id, rule->q_index);
+ err = -EINVAL;
+ }
+ }
+
+ if (err) {
+ dev_err(&pf->pdev->dev,
+ "Existing filter rules must be deleted to reduce combined channel count to %d\n",
+ count);
+ return err;
+ }
+
+ /* update feature limits from largest to smallest supported values */
+ /* TODO: Flow director limit, DCB etc */
+
+ /* use rss_reconfig to rebuild with new queue count and update traffic
+ * class queue mapping
+ */
+ new_count = i40e_reconfig_rss_queues(pf, count);
+ if (new_count > 0)
+ return 0;
+ else
+ return -EINVAL;
+}
+
+/**
+ * i40e_get_rxfh_key_size - get the RSS hash key size
+ * @netdev: network interface device structure
+ *
+ * Returns the table size.
+ **/
+static u32 i40e_get_rxfh_key_size(struct net_device *netdev)
+{
+ return I40E_HKEY_ARRAY_SIZE;
+}
+
+/**
+ * i40e_get_rxfh_indir_size - get the rx flow hash indirection table size
+ * @netdev: network interface device structure
+ *
+ * Returns the table size.
+ **/
+static u32 i40e_get_rxfh_indir_size(struct net_device *netdev)
+{
+ return I40E_HLUT_ARRAY_SIZE;
+}
+
+/**
+ * i40e_get_rxfh - get the rx flow hash indirection table
+ * @netdev: network interface device structure
+ * @indir: indirection table
+ * @key: hash key
+ * @hfunc: hash function
+ *
+ * Reads the indirection table directly from the hardware. Returns 0 on
+ * success.
+ **/
+static int i40e_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
+ u8 *hfunc)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_vsi *vsi = np->vsi;
+ u8 *lut, *seed = NULL;
+ int ret;
+ u16 i;
+
+ if (hfunc)
+ *hfunc = ETH_RSS_HASH_TOP;
+
+ if (!indir)
+ return 0;
+
+ seed = key;
+ lut = kzalloc(I40E_HLUT_ARRAY_SIZE, GFP_KERNEL);
+ if (!lut)
+ return -ENOMEM;
+ ret = i40e_get_rss(vsi, seed, lut, I40E_HLUT_ARRAY_SIZE);
+ if (ret)
+ goto out;
+ for (i = 0; i < I40E_HLUT_ARRAY_SIZE; i++)
+ indir[i] = (u32)(lut[i]);
+
+out:
+ kfree(lut);
+
+ return ret;
+}
+
+/**
+ * i40e_set_rxfh - set the rx flow hash indirection table
+ * @netdev: network interface device structure
+ * @indir: indirection table
+ * @key: hash key
+ * @hfunc: hash function to use
+ *
+ * Returns -EINVAL if the table specifies an invalid queue id, otherwise
+ * returns 0 after programming the table.
+ **/
+static int i40e_set_rxfh(struct net_device *netdev, const u32 *indir,
+ const u8 *key, const u8 hfunc)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_vsi *vsi = np->vsi;
+ struct i40e_pf *pf = vsi->back;
+ u8 *seed = NULL;
+ u16 i;
+
+ if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
+ return -EOPNOTSUPP;
+
+ if (key) {
+ if (!vsi->rss_hkey_user) {
+ vsi->rss_hkey_user = kzalloc(I40E_HKEY_ARRAY_SIZE,
+ GFP_KERNEL);
+ if (!vsi->rss_hkey_user)
+ return -ENOMEM;
+ }
+ memcpy(vsi->rss_hkey_user, key, I40E_HKEY_ARRAY_SIZE);
+ seed = vsi->rss_hkey_user;
+ }
+ if (!vsi->rss_lut_user) {
+ vsi->rss_lut_user = kzalloc(I40E_HLUT_ARRAY_SIZE, GFP_KERNEL);
+ if (!vsi->rss_lut_user)
+ return -ENOMEM;
+ }
+
+ /* Each 32 bits pointed by 'indir' is stored with a lut entry */
+ if (indir)
+ for (i = 0; i < I40E_HLUT_ARRAY_SIZE; i++)
+ vsi->rss_lut_user[i] = (u8)(indir[i]);
+ else
+ i40e_fill_rss_lut(pf, vsi->rss_lut_user, I40E_HLUT_ARRAY_SIZE,
+ vsi->rss_size);
+
+ return i40e_config_rss(vsi, seed, vsi->rss_lut_user,
+ I40E_HLUT_ARRAY_SIZE);
+}
+
+/**
+ * i40e_get_priv_flags - report device private flags
+ * @dev: network interface device structure
+ *
+ * The get string set count and the string set should be matched for each
+ * flag returned. Add new strings for each flag to the i40e_gstrings_priv_flags
+ * array.
+ *
+ * Returns a u32 bitmap of flags.
+ **/
+static u32 i40e_get_priv_flags(struct net_device *dev)
+{
+ struct i40e_netdev_priv *np = netdev_priv(dev);
+ struct i40e_vsi *vsi = np->vsi;
+ struct i40e_pf *pf = vsi->back;
+ u32 i, j, ret_flags = 0;
+
+ for (i = 0; i < I40E_PRIV_FLAGS_STR_LEN; i++) {
+ const struct i40e_priv_flags *priv_flags;
+
+ priv_flags = &i40e_gstrings_priv_flags[i];
+
+ if (priv_flags->flag & pf->flags)
+ ret_flags |= BIT(i);
+ }
+
+ if (pf->hw.pf_id != 0)
+ return ret_flags;
+
+ for (j = 0; j < I40E_GL_PRIV_FLAGS_STR_LEN; j++) {
+ const struct i40e_priv_flags *priv_flags;
+
+ priv_flags = &i40e_gl_gstrings_priv_flags[j];
+
+ if (priv_flags->flag & pf->flags)
+ ret_flags |= BIT(i + j);
+ }
+
+ return ret_flags;
+}
+
+/**
+ * i40e_set_priv_flags - set private flags
+ * @dev: network interface device structure
+ * @flags: bit flags to be set
+ **/
+static int i40e_set_priv_flags(struct net_device *dev, u32 flags)
+{
+ struct i40e_netdev_priv *np = netdev_priv(dev);
+ u64 orig_flags, new_flags, changed_flags;
+ enum i40e_admin_queue_err adq_err;
+ struct i40e_vsi *vsi = np->vsi;
+ struct i40e_pf *pf = vsi->back;
+ u32 reset_needed = 0;
+ int status;
+ u32 i, j;
+
+ orig_flags = READ_ONCE(pf->flags);
+ new_flags = orig_flags;
+
+ for (i = 0; i < I40E_PRIV_FLAGS_STR_LEN; i++) {
+ const struct i40e_priv_flags *priv_flags;
+
+ priv_flags = &i40e_gstrings_priv_flags[i];
+
+ if (flags & BIT(i))
+ new_flags |= priv_flags->flag;
+ else
+ new_flags &= ~(priv_flags->flag);
+
+ /* If this is a read-only flag, it can't be changed */
+ if (priv_flags->read_only &&
+ ((orig_flags ^ new_flags) & ~BIT(i)))
+ return -EOPNOTSUPP;
+ }
+
+ if (pf->hw.pf_id != 0)
+ goto flags_complete;
+
+ for (j = 0; j < I40E_GL_PRIV_FLAGS_STR_LEN; j++) {
+ const struct i40e_priv_flags *priv_flags;
+
+ priv_flags = &i40e_gl_gstrings_priv_flags[j];
+
+ if (flags & BIT(i + j))
+ new_flags |= priv_flags->flag;
+ else
+ new_flags &= ~(priv_flags->flag);
+
+ /* If this is a read-only flag, it can't be changed */
+ if (priv_flags->read_only &&
+ ((orig_flags ^ new_flags) & ~BIT(i)))
+ return -EOPNOTSUPP;
+ }
+
+flags_complete:
+ changed_flags = orig_flags ^ new_flags;
+
+ if (changed_flags & I40E_FLAG_DISABLE_FW_LLDP)
+ reset_needed = I40E_PF_RESET_AND_REBUILD_FLAG;
+ if (changed_flags & (I40E_FLAG_VEB_STATS_ENABLED |
+ I40E_FLAG_LEGACY_RX | I40E_FLAG_SOURCE_PRUNING_DISABLED))
+ reset_needed = BIT(__I40E_PF_RESET_REQUESTED);
+
+ /* Before we finalize any flag changes, we need to perform some
+ * checks to ensure that the changes are supported and safe.
+ */
+
+ /* ATR eviction is not supported on all devices */
+ if ((new_flags & I40E_FLAG_HW_ATR_EVICT_ENABLED) &&
+ !(pf->hw_features & I40E_HW_ATR_EVICT_CAPABLE))
+ return -EOPNOTSUPP;
+
+ /* If the driver detected FW LLDP was disabled on init, this flag could
+ * be set, however we do not support _changing_ the flag:
+ * - on XL710 if NPAR is enabled or FW API version < 1.7
+ * - on X722 with FW API version < 1.6
+ * There are situations where older FW versions/NPAR enabled PFs could
+ * disable LLDP, however we _must_ not allow the user to enable/disable
+ * LLDP with this flag on unsupported FW versions.
+ */
+ if (changed_flags & I40E_FLAG_DISABLE_FW_LLDP) {
+ if (!(pf->hw.flags & I40E_HW_FLAG_FW_LLDP_STOPPABLE)) {
+ dev_warn(&pf->pdev->dev,
+ "Device does not support changing FW LLDP\n");
+ return -EOPNOTSUPP;
+ }
+ }
+
+ if (changed_flags & I40E_FLAG_RS_FEC &&
+ pf->hw.device_id != I40E_DEV_ID_25G_SFP28 &&
+ pf->hw.device_id != I40E_DEV_ID_25G_B) {
+ dev_warn(&pf->pdev->dev,
+ "Device does not support changing FEC configuration\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (changed_flags & I40E_FLAG_BASE_R_FEC &&
+ pf->hw.device_id != I40E_DEV_ID_25G_SFP28 &&
+ pf->hw.device_id != I40E_DEV_ID_25G_B &&
+ pf->hw.device_id != I40E_DEV_ID_KX_X722) {
+ dev_warn(&pf->pdev->dev,
+ "Device does not support changing FEC configuration\n");
+ return -EOPNOTSUPP;
+ }
+
+ /* Process any additional changes needed as a result of flag changes.
+ * The changed_flags value reflects the list of bits that were
+ * changed in the code above.
+ */
+
+ /* Flush current ATR settings if ATR was disabled */
+ if ((changed_flags & I40E_FLAG_FD_ATR_ENABLED) &&
+ !(new_flags & I40E_FLAG_FD_ATR_ENABLED)) {
+ set_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state);
+ set_bit(__I40E_FD_FLUSH_REQUESTED, pf->state);
+ }
+
+ if (changed_flags & I40E_FLAG_TRUE_PROMISC_SUPPORT) {
+ u16 sw_flags = 0, valid_flags = 0;
+ int ret;
+
+ if (!(new_flags & I40E_FLAG_TRUE_PROMISC_SUPPORT))
+ sw_flags = I40E_AQ_SET_SWITCH_CFG_PROMISC;
+ valid_flags = I40E_AQ_SET_SWITCH_CFG_PROMISC;
+ ret = i40e_aq_set_switch_config(&pf->hw, sw_flags, valid_flags,
+ 0, NULL);
+ if (ret && pf->hw.aq.asq_last_status != I40E_AQ_RC_ESRCH) {
+ dev_info(&pf->pdev->dev,
+ "couldn't set switch config bits, err %pe aq_err %s\n",
+ ERR_PTR(ret),
+ i40e_aq_str(&pf->hw,
+ pf->hw.aq.asq_last_status));
+ /* not a fatal problem, just keep going */
+ }
+ }
+
+ if ((changed_flags & I40E_FLAG_RS_FEC) ||
+ (changed_flags & I40E_FLAG_BASE_R_FEC)) {
+ u8 fec_cfg = 0;
+
+ if (new_flags & I40E_FLAG_RS_FEC &&
+ new_flags & I40E_FLAG_BASE_R_FEC) {
+ fec_cfg = I40E_AQ_SET_FEC_AUTO;
+ } else if (new_flags & I40E_FLAG_RS_FEC) {
+ fec_cfg = (I40E_AQ_SET_FEC_REQUEST_RS |
+ I40E_AQ_SET_FEC_ABILITY_RS);
+ } else if (new_flags & I40E_FLAG_BASE_R_FEC) {
+ fec_cfg = (I40E_AQ_SET_FEC_REQUEST_KR |
+ I40E_AQ_SET_FEC_ABILITY_KR);
+ }
+ if (i40e_set_fec_cfg(dev, fec_cfg))
+ dev_warn(&pf->pdev->dev, "Cannot change FEC config\n");
+ }
+
+ if ((changed_flags & I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED) &&
+ (orig_flags & I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENABLED)) {
+ dev_err(&pf->pdev->dev,
+ "Setting link-down-on-close not supported on this port (because total-port-shutdown is enabled)\n");
+ return -EOPNOTSUPP;
+ }
+
+ if ((changed_flags & I40E_FLAG_VF_VLAN_PRUNING) &&
+ pf->num_alloc_vfs) {
+ dev_warn(&pf->pdev->dev,
+ "Changing vf-vlan-pruning flag while VF(s) are active is not supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ if ((changed_flags & new_flags &
+ I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED) &&
+ (new_flags & I40E_FLAG_MFP_ENABLED))
+ dev_warn(&pf->pdev->dev,
+ "Turning on link-down-on-close flag may affect other partitions\n");
+
+ if (changed_flags & I40E_FLAG_DISABLE_FW_LLDP) {
+ if (new_flags & I40E_FLAG_DISABLE_FW_LLDP) {
+#ifdef CONFIG_I40E_DCB
+ i40e_dcb_sw_default_config(pf);
+#endif /* CONFIG_I40E_DCB */
+ i40e_aq_cfg_lldp_mib_change_event(&pf->hw, false, NULL);
+ i40e_aq_stop_lldp(&pf->hw, true, false, NULL);
+ } else {
+ status = i40e_aq_start_lldp(&pf->hw, false, NULL);
+ if (status) {
+ adq_err = pf->hw.aq.asq_last_status;
+ switch (adq_err) {
+ case I40E_AQ_RC_EEXIST:
+ dev_warn(&pf->pdev->dev,
+ "FW LLDP agent is already running\n");
+ reset_needed = 0;
+ break;
+ case I40E_AQ_RC_EPERM:
+ dev_warn(&pf->pdev->dev,
+ "Device configuration forbids SW from starting the LLDP agent.\n");
+ return -EINVAL;
+ case I40E_AQ_RC_EAGAIN:
+ dev_warn(&pf->pdev->dev,
+ "Stop FW LLDP agent command is still being processed, please try again in a second.\n");
+ return -EBUSY;
+ default:
+ dev_warn(&pf->pdev->dev,
+ "Starting FW LLDP agent failed: error: %pe, %s\n",
+ ERR_PTR(status),
+ i40e_aq_str(&pf->hw,
+ adq_err));
+ return -EINVAL;
+ }
+ }
+ }
+ }
+
+ /* Now that we've checked to ensure that the new flags are valid, load
+ * them into place. Since we only modify flags either (a) during
+ * initialization or (b) while holding the RTNL lock, we don't need
+ * anything fancy here.
+ */
+ pf->flags = new_flags;
+
+ /* Issue reset to cause things to take effect, as additional bits
+ * are added we will need to create a mask of bits requiring reset
+ */
+ if (reset_needed)
+ i40e_do_reset(pf, reset_needed, true);
+
+ return 0;
+}
+
+/**
+ * i40e_get_module_info - get (Q)SFP+ module type info
+ * @netdev: network interface device structure
+ * @modinfo: module EEPROM size and layout information structure
+ **/
+static int i40e_get_module_info(struct net_device *netdev,
+ struct ethtool_modinfo *modinfo)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_vsi *vsi = np->vsi;
+ struct i40e_pf *pf = vsi->back;
+ struct i40e_hw *hw = &pf->hw;
+ u32 sff8472_comp = 0;
+ u32 sff8472_swap = 0;
+ u32 sff8636_rev = 0;
+ u32 type = 0;
+ int status;
+
+ /* Check if firmware supports reading module EEPROM. */
+ if (!(hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE)) {
+ netdev_err(vsi->netdev, "Module EEPROM memory read not supported. Please update the NVM image.\n");
+ return -EINVAL;
+ }
+
+ status = i40e_update_link_info(hw);
+ if (status)
+ return -EIO;
+
+ if (hw->phy.link_info.phy_type == I40E_PHY_TYPE_EMPTY) {
+ netdev_err(vsi->netdev, "Cannot read module EEPROM memory. No module connected.\n");
+ return -EINVAL;
+ }
+
+ type = hw->phy.link_info.module_type[0];
+
+ switch (type) {
+ case I40E_MODULE_TYPE_SFP:
+ status = i40e_aq_get_phy_register(hw,
+ I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE,
+ I40E_I2C_EEPROM_DEV_ADDR, true,
+ I40E_MODULE_SFF_8472_COMP,
+ &sff8472_comp, NULL);
+ if (status)
+ return -EIO;
+
+ status = i40e_aq_get_phy_register(hw,
+ I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE,
+ I40E_I2C_EEPROM_DEV_ADDR, true,
+ I40E_MODULE_SFF_8472_SWAP,
+ &sff8472_swap, NULL);
+ if (status)
+ return -EIO;
+
+ /* Check if the module requires address swap to access
+ * the other EEPROM memory page.
+ */
+ if (sff8472_swap & I40E_MODULE_SFF_ADDR_MODE) {
+ netdev_warn(vsi->netdev, "Module address swap to access page 0xA2 is not supported.\n");
+ modinfo->type = ETH_MODULE_SFF_8079;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN;
+ } else if (sff8472_comp == 0x00) {
+ /* Module is not SFF-8472 compliant */
+ modinfo->type = ETH_MODULE_SFF_8079;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN;
+ } else if (!(sff8472_swap & I40E_MODULE_SFF_DDM_IMPLEMENTED)) {
+ /* Module is SFF-8472 compliant but doesn't implement
+ * Digital Diagnostic Monitoring (DDM).
+ */
+ modinfo->type = ETH_MODULE_SFF_8079;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN;
+ } else {
+ modinfo->type = ETH_MODULE_SFF_8472;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
+ }
+ break;
+ case I40E_MODULE_TYPE_QSFP_PLUS:
+ /* Read from memory page 0. */
+ status = i40e_aq_get_phy_register(hw,
+ I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE,
+ 0, true,
+ I40E_MODULE_REVISION_ADDR,
+ &sff8636_rev, NULL);
+ if (status)
+ return -EIO;
+ /* Determine revision compliance byte */
+ if (sff8636_rev > 0x02) {
+ /* Module is SFF-8636 compliant */
+ modinfo->type = ETH_MODULE_SFF_8636;
+ modinfo->eeprom_len = I40E_MODULE_QSFP_MAX_LEN;
+ } else {
+ modinfo->type = ETH_MODULE_SFF_8436;
+ modinfo->eeprom_len = I40E_MODULE_QSFP_MAX_LEN;
+ }
+ break;
+ case I40E_MODULE_TYPE_QSFP28:
+ modinfo->type = ETH_MODULE_SFF_8636;
+ modinfo->eeprom_len = I40E_MODULE_QSFP_MAX_LEN;
+ break;
+ default:
+ netdev_err(vsi->netdev, "Module type unrecognized\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/**
+ * i40e_get_module_eeprom - fills buffer with (Q)SFP+ module memory contents
+ * @netdev: network interface device structure
+ * @ee: EEPROM dump request structure
+ * @data: buffer to be filled with EEPROM contents
+ **/
+static int i40e_get_module_eeprom(struct net_device *netdev,
+ struct ethtool_eeprom *ee,
+ u8 *data)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_vsi *vsi = np->vsi;
+ struct i40e_pf *pf = vsi->back;
+ struct i40e_hw *hw = &pf->hw;
+ bool is_sfp = false;
+ u32 value = 0;
+ int status;
+ int i;
+
+ if (!ee || !ee->len || !data)
+ return -EINVAL;
+
+ if (hw->phy.link_info.module_type[0] == I40E_MODULE_TYPE_SFP)
+ is_sfp = true;
+
+ for (i = 0; i < ee->len; i++) {
+ u32 offset = i + ee->offset;
+ u32 addr = is_sfp ? I40E_I2C_EEPROM_DEV_ADDR : 0;
+
+ /* Check if we need to access the other memory page */
+ if (is_sfp) {
+ if (offset >= ETH_MODULE_SFF_8079_LEN) {
+ offset -= ETH_MODULE_SFF_8079_LEN;
+ addr = I40E_I2C_EEPROM_DEV_ADDR2;
+ }
+ } else {
+ while (offset >= ETH_MODULE_SFF_8436_LEN) {
+ /* Compute memory page number and offset. */
+ offset -= ETH_MODULE_SFF_8436_LEN / 2;
+ addr++;
+ }
+ }
+
+ status = i40e_aq_get_phy_register(hw,
+ I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE,
+ addr, true, offset, &value, NULL);
+ if (status)
+ return -EIO;
+ data[i] = value;
+ }
+ return 0;
+}
+
+static int i40e_get_eee(struct net_device *netdev, struct ethtool_eee *edata)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_aq_get_phy_abilities_resp phy_cfg;
+ struct i40e_vsi *vsi = np->vsi;
+ struct i40e_pf *pf = vsi->back;
+ struct i40e_hw *hw = &pf->hw;
+ int status = 0;
+
+ /* Get initial PHY capabilities */
+ status = i40e_aq_get_phy_capabilities(hw, false, true, &phy_cfg, NULL);
+ if (status)
+ return -EAGAIN;
+
+ /* Check whether NIC configuration is compatible with Energy Efficient
+ * Ethernet (EEE) mode.
+ */
+ if (phy_cfg.eee_capability == 0)
+ return -EOPNOTSUPP;
+
+ edata->supported = SUPPORTED_Autoneg;
+ edata->lp_advertised = edata->supported;
+
+ /* Get current configuration */
+ status = i40e_aq_get_phy_capabilities(hw, false, false, &phy_cfg, NULL);
+ if (status)
+ return -EAGAIN;
+
+ edata->advertised = phy_cfg.eee_capability ? SUPPORTED_Autoneg : 0U;
+ edata->eee_enabled = !!edata->advertised;
+ edata->tx_lpi_enabled = pf->stats.tx_lpi_status;
+
+ edata->eee_active = pf->stats.tx_lpi_status && pf->stats.rx_lpi_status;
+
+ return 0;
+}
+
+static int i40e_is_eee_param_supported(struct net_device *netdev,
+ struct ethtool_eee *edata)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_vsi *vsi = np->vsi;
+ struct i40e_pf *pf = vsi->back;
+ struct i40e_ethtool_not_used {
+ u32 value;
+ const char *name;
+ } param[] = {
+ {edata->advertised & ~SUPPORTED_Autoneg, "advertise"},
+ {edata->tx_lpi_timer, "tx-timer"},
+ {edata->tx_lpi_enabled != pf->stats.tx_lpi_status, "tx-lpi"}
+ };
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(param); i++) {
+ if (param[i].value) {
+ netdev_info(netdev,
+ "EEE setting %s not supported\n",
+ param[i].name);
+ return -EOPNOTSUPP;
+ }
+ }
+
+ return 0;
+}
+
+static int i40e_set_eee(struct net_device *netdev, struct ethtool_eee *edata)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_aq_get_phy_abilities_resp abilities;
+ struct i40e_aq_set_phy_config config;
+ struct i40e_vsi *vsi = np->vsi;
+ struct i40e_pf *pf = vsi->back;
+ struct i40e_hw *hw = &pf->hw;
+ int status = I40E_SUCCESS;
+ __le16 eee_capability;
+
+ /* Deny parameters we don't support */
+ if (i40e_is_eee_param_supported(netdev, edata))
+ return -EOPNOTSUPP;
+
+ /* Get initial PHY capabilities */
+ status = i40e_aq_get_phy_capabilities(hw, false, true, &abilities,
+ NULL);
+ if (status)
+ return -EAGAIN;
+
+ /* Check whether NIC configuration is compatible with Energy Efficient
+ * Ethernet (EEE) mode.
+ */
+ if (abilities.eee_capability == 0)
+ return -EOPNOTSUPP;
+
+ /* Cache initial EEE capability */
+ eee_capability = abilities.eee_capability;
+
+ /* Get current PHY configuration */
+ status = i40e_aq_get_phy_capabilities(hw, false, false, &abilities,
+ NULL);
+ if (status)
+ return -EAGAIN;
+
+ /* Cache current PHY configuration */
+ config.phy_type = abilities.phy_type;
+ config.phy_type_ext = abilities.phy_type_ext;
+ config.link_speed = abilities.link_speed;
+ config.abilities = abilities.abilities |
+ I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
+ config.eeer = abilities.eeer_val;
+ config.low_power_ctrl = abilities.d3_lpan;
+ config.fec_config = abilities.fec_cfg_curr_mod_ext_info &
+ I40E_AQ_PHY_FEC_CONFIG_MASK;
+
+ /* Set desired EEE state */
+ if (edata->eee_enabled) {
+ config.eee_capability = eee_capability;
+ config.eeer |= cpu_to_le32(I40E_PRTPM_EEER_TX_LPI_EN_MASK);
+ } else {
+ config.eee_capability = 0;
+ config.eeer &= cpu_to_le32(~I40E_PRTPM_EEER_TX_LPI_EN_MASK);
+ }
+
+ /* Apply modified PHY configuration */
+ status = i40e_aq_set_phy_config(hw, &config, NULL);
+ if (status)
+ return -EAGAIN;
+
+ return 0;
+}
+
+static const struct ethtool_ops i40e_ethtool_recovery_mode_ops = {
+ .get_drvinfo = i40e_get_drvinfo,
+ .set_eeprom = i40e_set_eeprom,
+ .get_eeprom_len = i40e_get_eeprom_len,
+ .get_eeprom = i40e_get_eeprom,
+};
+
+static const struct ethtool_ops i40e_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
+ ETHTOOL_COALESCE_MAX_FRAMES_IRQ |
+ ETHTOOL_COALESCE_USE_ADAPTIVE |
+ ETHTOOL_COALESCE_RX_USECS_HIGH |
+ ETHTOOL_COALESCE_TX_USECS_HIGH,
+ .get_drvinfo = i40e_get_drvinfo,
+ .get_regs_len = i40e_get_regs_len,
+ .get_regs = i40e_get_regs,
+ .nway_reset = i40e_nway_reset,
+ .get_link = ethtool_op_get_link,
+ .get_wol = i40e_get_wol,
+ .set_wol = i40e_set_wol,
+ .set_eeprom = i40e_set_eeprom,
+ .get_eeprom_len = i40e_get_eeprom_len,
+ .get_eeprom = i40e_get_eeprom,
+ .get_ringparam = i40e_get_ringparam,
+ .set_ringparam = i40e_set_ringparam,
+ .get_pauseparam = i40e_get_pauseparam,
+ .set_pauseparam = i40e_set_pauseparam,
+ .get_msglevel = i40e_get_msglevel,
+ .set_msglevel = i40e_set_msglevel,
+ .get_rxnfc = i40e_get_rxnfc,
+ .set_rxnfc = i40e_set_rxnfc,
+ .self_test = i40e_diag_test,
+ .get_strings = i40e_get_strings,
+ .get_eee = i40e_get_eee,
+ .set_eee = i40e_set_eee,
+ .set_phys_id = i40e_set_phys_id,
+ .get_sset_count = i40e_get_sset_count,
+ .get_ethtool_stats = i40e_get_ethtool_stats,
+ .get_coalesce = i40e_get_coalesce,
+ .set_coalesce = i40e_set_coalesce,
+ .get_rxfh_key_size = i40e_get_rxfh_key_size,
+ .get_rxfh_indir_size = i40e_get_rxfh_indir_size,
+ .get_rxfh = i40e_get_rxfh,
+ .set_rxfh = i40e_set_rxfh,
+ .get_channels = i40e_get_channels,
+ .set_channels = i40e_set_channels,
+ .get_module_info = i40e_get_module_info,
+ .get_module_eeprom = i40e_get_module_eeprom,
+ .get_ts_info = i40e_get_ts_info,
+ .get_priv_flags = i40e_get_priv_flags,
+ .set_priv_flags = i40e_set_priv_flags,
+ .get_per_queue_coalesce = i40e_get_per_queue_coalesce,
+ .set_per_queue_coalesce = i40e_set_per_queue_coalesce,
+ .get_link_ksettings = i40e_get_link_ksettings,
+ .set_link_ksettings = i40e_set_link_ksettings,
+ .get_fecparam = i40e_get_fec_param,
+ .set_fecparam = i40e_set_fec_param,
+ .flash_device = i40e_ddp_flash,
+};
+
+void i40e_set_ethtool_ops(struct net_device *netdev)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_pf *pf = np->vsi->back;
+
+ if (!test_bit(__I40E_RECOVERY_MODE, pf->state))
+ netdev->ethtool_ops = &i40e_ethtool_ops;
+ else
+ netdev->ethtool_ops = &i40e_ethtool_recovery_mode_ops;
+}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_hmc.c b/drivers/net/ethernet/intel/i40e/i40e_hmc.c
new file mode 100644
index 000000000..46f7950a0
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_hmc.c
@@ -0,0 +1,334 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
+
+#include "i40e.h"
+#include "i40e_osdep.h"
+#include "i40e_register.h"
+#include "i40e_status.h"
+#include "i40e_alloc.h"
+#include "i40e_hmc.h"
+#include "i40e_type.h"
+
+/**
+ * i40e_add_sd_table_entry - Adds a segment descriptor to the table
+ * @hw: pointer to our hw struct
+ * @hmc_info: pointer to the HMC configuration information struct
+ * @sd_index: segment descriptor index to manipulate
+ * @type: what type of segment descriptor we're manipulating
+ * @direct_mode_sz: size to alloc in direct mode
+ **/
+int i40e_add_sd_table_entry(struct i40e_hw *hw,
+ struct i40e_hmc_info *hmc_info,
+ u32 sd_index,
+ enum i40e_sd_entry_type type,
+ u64 direct_mode_sz)
+{
+ enum i40e_memory_type mem_type __attribute__((unused));
+ struct i40e_hmc_sd_entry *sd_entry;
+ bool dma_mem_alloc_done = false;
+ int ret_code = I40E_SUCCESS;
+ struct i40e_dma_mem mem;
+ u64 alloc_len;
+
+ if (NULL == hmc_info->sd_table.sd_entry) {
+ ret_code = I40E_ERR_BAD_PTR;
+ hw_dbg(hw, "i40e_add_sd_table_entry: bad sd_entry\n");
+ goto exit;
+ }
+
+ if (sd_index >= hmc_info->sd_table.sd_cnt) {
+ ret_code = I40E_ERR_INVALID_SD_INDEX;
+ hw_dbg(hw, "i40e_add_sd_table_entry: bad sd_index\n");
+ goto exit;
+ }
+
+ sd_entry = &hmc_info->sd_table.sd_entry[sd_index];
+ if (!sd_entry->valid) {
+ if (I40E_SD_TYPE_PAGED == type) {
+ mem_type = i40e_mem_pd;
+ alloc_len = I40E_HMC_PAGED_BP_SIZE;
+ } else {
+ mem_type = i40e_mem_bp_jumbo;
+ alloc_len = direct_mode_sz;
+ }
+
+ /* allocate a 4K pd page or 2M backing page */
+ ret_code = i40e_allocate_dma_mem(hw, &mem, mem_type, alloc_len,
+ I40E_HMC_PD_BP_BUF_ALIGNMENT);
+ if (ret_code)
+ goto exit;
+ dma_mem_alloc_done = true;
+ if (I40E_SD_TYPE_PAGED == type) {
+ ret_code = i40e_allocate_virt_mem(hw,
+ &sd_entry->u.pd_table.pd_entry_virt_mem,
+ sizeof(struct i40e_hmc_pd_entry) * 512);
+ if (ret_code)
+ goto exit;
+ sd_entry->u.pd_table.pd_entry =
+ (struct i40e_hmc_pd_entry *)
+ sd_entry->u.pd_table.pd_entry_virt_mem.va;
+ sd_entry->u.pd_table.pd_page_addr = mem;
+ } else {
+ sd_entry->u.bp.addr = mem;
+ sd_entry->u.bp.sd_pd_index = sd_index;
+ }
+ /* initialize the sd entry */
+ hmc_info->sd_table.sd_entry[sd_index].entry_type = type;
+
+ /* increment the ref count */
+ I40E_INC_SD_REFCNT(&hmc_info->sd_table);
+ }
+ /* Increment backing page reference count */
+ if (I40E_SD_TYPE_DIRECT == sd_entry->entry_type)
+ I40E_INC_BP_REFCNT(&sd_entry->u.bp);
+exit:
+ if (ret_code)
+ if (dma_mem_alloc_done)
+ i40e_free_dma_mem(hw, &mem);
+
+ return ret_code;
+}
+
+/**
+ * i40e_add_pd_table_entry - Adds page descriptor to the specified table
+ * @hw: pointer to our HW structure
+ * @hmc_info: pointer to the HMC configuration information structure
+ * @pd_index: which page descriptor index to manipulate
+ * @rsrc_pg: if not NULL, use preallocated page instead of allocating new one.
+ *
+ * This function:
+ * 1. Initializes the pd entry
+ * 2. Adds pd_entry in the pd_table
+ * 3. Mark the entry valid in i40e_hmc_pd_entry structure
+ * 4. Initializes the pd_entry's ref count to 1
+ * assumptions:
+ * 1. The memory for pd should be pinned down, physically contiguous and
+ * aligned on 4K boundary and zeroed memory.
+ * 2. It should be 4K in size.
+ **/
+int i40e_add_pd_table_entry(struct i40e_hw *hw,
+ struct i40e_hmc_info *hmc_info,
+ u32 pd_index,
+ struct i40e_dma_mem *rsrc_pg)
+{
+ struct i40e_hmc_pd_table *pd_table;
+ struct i40e_hmc_pd_entry *pd_entry;
+ struct i40e_dma_mem mem;
+ struct i40e_dma_mem *page = &mem;
+ u32 sd_idx, rel_pd_idx;
+ int ret_code = 0;
+ u64 page_desc;
+ u64 *pd_addr;
+
+ if (pd_index / I40E_HMC_PD_CNT_IN_SD >= hmc_info->sd_table.sd_cnt) {
+ ret_code = I40E_ERR_INVALID_PAGE_DESC_INDEX;
+ hw_dbg(hw, "i40e_add_pd_table_entry: bad pd_index\n");
+ goto exit;
+ }
+
+ /* find corresponding sd */
+ sd_idx = (pd_index / I40E_HMC_PD_CNT_IN_SD);
+ if (I40E_SD_TYPE_PAGED !=
+ hmc_info->sd_table.sd_entry[sd_idx].entry_type)
+ goto exit;
+
+ rel_pd_idx = (pd_index % I40E_HMC_PD_CNT_IN_SD);
+ pd_table = &hmc_info->sd_table.sd_entry[sd_idx].u.pd_table;
+ pd_entry = &pd_table->pd_entry[rel_pd_idx];
+ if (!pd_entry->valid) {
+ if (rsrc_pg) {
+ pd_entry->rsrc_pg = true;
+ page = rsrc_pg;
+ } else {
+ /* allocate a 4K backing page */
+ ret_code = i40e_allocate_dma_mem(hw, page, i40e_mem_bp,
+ I40E_HMC_PAGED_BP_SIZE,
+ I40E_HMC_PD_BP_BUF_ALIGNMENT);
+ if (ret_code)
+ goto exit;
+ pd_entry->rsrc_pg = false;
+ }
+
+ pd_entry->bp.addr = *page;
+ pd_entry->bp.sd_pd_index = pd_index;
+ pd_entry->bp.entry_type = I40E_SD_TYPE_PAGED;
+ /* Set page address and valid bit */
+ page_desc = page->pa | 0x1;
+
+ pd_addr = (u64 *)pd_table->pd_page_addr.va;
+ pd_addr += rel_pd_idx;
+
+ /* Add the backing page physical address in the pd entry */
+ memcpy(pd_addr, &page_desc, sizeof(u64));
+
+ pd_entry->sd_index = sd_idx;
+ pd_entry->valid = true;
+ I40E_INC_PD_REFCNT(pd_table);
+ }
+ I40E_INC_BP_REFCNT(&pd_entry->bp);
+exit:
+ return ret_code;
+}
+
+/**
+ * i40e_remove_pd_bp - remove a backing page from a page descriptor
+ * @hw: pointer to our HW structure
+ * @hmc_info: pointer to the HMC configuration information structure
+ * @idx: the page index
+ *
+ * This function:
+ * 1. Marks the entry in pd tabe (for paged address mode) or in sd table
+ * (for direct address mode) invalid.
+ * 2. Write to register PMPDINV to invalidate the backing page in FV cache
+ * 3. Decrement the ref count for the pd _entry
+ * assumptions:
+ * 1. Caller can deallocate the memory used by backing storage after this
+ * function returns.
+ **/
+int i40e_remove_pd_bp(struct i40e_hw *hw,
+ struct i40e_hmc_info *hmc_info,
+ u32 idx)
+{
+ struct i40e_hmc_pd_entry *pd_entry;
+ struct i40e_hmc_pd_table *pd_table;
+ struct i40e_hmc_sd_entry *sd_entry;
+ u32 sd_idx, rel_pd_idx;
+ int ret_code = 0;
+ u64 *pd_addr;
+
+ /* calculate index */
+ sd_idx = idx / I40E_HMC_PD_CNT_IN_SD;
+ rel_pd_idx = idx % I40E_HMC_PD_CNT_IN_SD;
+ if (sd_idx >= hmc_info->sd_table.sd_cnt) {
+ ret_code = I40E_ERR_INVALID_PAGE_DESC_INDEX;
+ hw_dbg(hw, "i40e_remove_pd_bp: bad idx\n");
+ goto exit;
+ }
+ sd_entry = &hmc_info->sd_table.sd_entry[sd_idx];
+ if (I40E_SD_TYPE_PAGED != sd_entry->entry_type) {
+ ret_code = I40E_ERR_INVALID_SD_TYPE;
+ hw_dbg(hw, "i40e_remove_pd_bp: wrong sd_entry type\n");
+ goto exit;
+ }
+ /* get the entry and decrease its ref counter */
+ pd_table = &hmc_info->sd_table.sd_entry[sd_idx].u.pd_table;
+ pd_entry = &pd_table->pd_entry[rel_pd_idx];
+ I40E_DEC_BP_REFCNT(&pd_entry->bp);
+ if (pd_entry->bp.ref_cnt)
+ goto exit;
+
+ /* mark the entry invalid */
+ pd_entry->valid = false;
+ I40E_DEC_PD_REFCNT(pd_table);
+ pd_addr = (u64 *)pd_table->pd_page_addr.va;
+ pd_addr += rel_pd_idx;
+ memset(pd_addr, 0, sizeof(u64));
+ I40E_INVALIDATE_PF_HMC_PD(hw, sd_idx, idx);
+
+ /* free memory here */
+ if (!pd_entry->rsrc_pg)
+ ret_code = i40e_free_dma_mem(hw, &pd_entry->bp.addr);
+ if (ret_code)
+ goto exit;
+ if (!pd_table->ref_cnt)
+ i40e_free_virt_mem(hw, &pd_table->pd_entry_virt_mem);
+exit:
+ return ret_code;
+}
+
+/**
+ * i40e_prep_remove_sd_bp - Prepares to remove a backing page from a sd entry
+ * @hmc_info: pointer to the HMC configuration information structure
+ * @idx: the page index
+ **/
+int i40e_prep_remove_sd_bp(struct i40e_hmc_info *hmc_info,
+ u32 idx)
+{
+ struct i40e_hmc_sd_entry *sd_entry;
+ int ret_code = 0;
+
+ /* get the entry and decrease its ref counter */
+ sd_entry = &hmc_info->sd_table.sd_entry[idx];
+ I40E_DEC_BP_REFCNT(&sd_entry->u.bp);
+ if (sd_entry->u.bp.ref_cnt) {
+ ret_code = I40E_ERR_NOT_READY;
+ goto exit;
+ }
+ I40E_DEC_SD_REFCNT(&hmc_info->sd_table);
+
+ /* mark the entry invalid */
+ sd_entry->valid = false;
+exit:
+ return ret_code;
+}
+
+/**
+ * i40e_remove_sd_bp_new - Removes a backing page from a segment descriptor
+ * @hw: pointer to our hw struct
+ * @hmc_info: pointer to the HMC configuration information structure
+ * @idx: the page index
+ * @is_pf: used to distinguish between VF and PF
+ **/
+int i40e_remove_sd_bp_new(struct i40e_hw *hw,
+ struct i40e_hmc_info *hmc_info,
+ u32 idx, bool is_pf)
+{
+ struct i40e_hmc_sd_entry *sd_entry;
+
+ if (!is_pf)
+ return I40E_NOT_SUPPORTED;
+
+ /* get the entry and decrease its ref counter */
+ sd_entry = &hmc_info->sd_table.sd_entry[idx];
+ I40E_CLEAR_PF_SD_ENTRY(hw, idx, I40E_SD_TYPE_DIRECT);
+
+ return i40e_free_dma_mem(hw, &sd_entry->u.bp.addr);
+}
+
+/**
+ * i40e_prep_remove_pd_page - Prepares to remove a PD page from sd entry.
+ * @hmc_info: pointer to the HMC configuration information structure
+ * @idx: segment descriptor index to find the relevant page descriptor
+ **/
+int i40e_prep_remove_pd_page(struct i40e_hmc_info *hmc_info,
+ u32 idx)
+{
+ struct i40e_hmc_sd_entry *sd_entry;
+ int ret_code = 0;
+
+ sd_entry = &hmc_info->sd_table.sd_entry[idx];
+
+ if (sd_entry->u.pd_table.ref_cnt) {
+ ret_code = I40E_ERR_NOT_READY;
+ goto exit;
+ }
+
+ /* mark the entry invalid */
+ sd_entry->valid = false;
+
+ I40E_DEC_SD_REFCNT(&hmc_info->sd_table);
+exit:
+ return ret_code;
+}
+
+/**
+ * i40e_remove_pd_page_new - Removes a PD page from sd entry.
+ * @hw: pointer to our hw struct
+ * @hmc_info: pointer to the HMC configuration information structure
+ * @idx: segment descriptor index to find the relevant page descriptor
+ * @is_pf: used to distinguish between VF and PF
+ **/
+int i40e_remove_pd_page_new(struct i40e_hw *hw,
+ struct i40e_hmc_info *hmc_info,
+ u32 idx, bool is_pf)
+{
+ struct i40e_hmc_sd_entry *sd_entry;
+
+ if (!is_pf)
+ return I40E_NOT_SUPPORTED;
+
+ sd_entry = &hmc_info->sd_table.sd_entry[idx];
+ I40E_CLEAR_PF_SD_ENTRY(hw, idx, I40E_SD_TYPE_PAGED);
+
+ return i40e_free_dma_mem(hw, &sd_entry->u.pd_table.pd_page_addr);
+}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_hmc.h b/drivers/net/ethernet/intel/i40e/i40e_hmc.h
new file mode 100644
index 000000000..9960da07a
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_hmc.h
@@ -0,0 +1,214 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
+
+#ifndef _I40E_HMC_H_
+#define _I40E_HMC_H_
+
+#define I40E_HMC_MAX_BP_COUNT 512
+
+/* forward-declare the HW struct for the compiler */
+struct i40e_hw;
+
+#define I40E_HMC_INFO_SIGNATURE 0x484D5347 /* HMSG */
+#define I40E_HMC_PD_CNT_IN_SD 512
+#define I40E_HMC_DIRECT_BP_SIZE 0x200000 /* 2M */
+#define I40E_HMC_PAGED_BP_SIZE 4096
+#define I40E_HMC_PD_BP_BUF_ALIGNMENT 4096
+
+struct i40e_hmc_obj_info {
+ u64 base; /* base addr in FPM */
+ u32 max_cnt; /* max count available for this hmc func */
+ u32 cnt; /* count of objects driver actually wants to create */
+ u64 size; /* size in bytes of one object */
+};
+
+enum i40e_sd_entry_type {
+ I40E_SD_TYPE_INVALID = 0,
+ I40E_SD_TYPE_PAGED = 1,
+ I40E_SD_TYPE_DIRECT = 2
+};
+
+struct i40e_hmc_bp {
+ enum i40e_sd_entry_type entry_type;
+ struct i40e_dma_mem addr; /* populate to be used by hw */
+ u32 sd_pd_index;
+ u32 ref_cnt;
+};
+
+struct i40e_hmc_pd_entry {
+ struct i40e_hmc_bp bp;
+ u32 sd_index;
+ bool rsrc_pg;
+ bool valid;
+};
+
+struct i40e_hmc_pd_table {
+ struct i40e_dma_mem pd_page_addr; /* populate to be used by hw */
+ struct i40e_hmc_pd_entry *pd_entry; /* [512] for sw book keeping */
+ struct i40e_virt_mem pd_entry_virt_mem; /* virt mem for pd_entry */
+
+ u32 ref_cnt;
+ u32 sd_index;
+};
+
+struct i40e_hmc_sd_entry {
+ enum i40e_sd_entry_type entry_type;
+ bool valid;
+
+ union {
+ struct i40e_hmc_pd_table pd_table;
+ struct i40e_hmc_bp bp;
+ } u;
+};
+
+struct i40e_hmc_sd_table {
+ struct i40e_virt_mem addr; /* used to track sd_entry allocations */
+ u32 sd_cnt;
+ u32 ref_cnt;
+ struct i40e_hmc_sd_entry *sd_entry; /* (sd_cnt*512) entries max */
+};
+
+struct i40e_hmc_info {
+ u32 signature;
+ /* equals to pci func num for PF and dynamically allocated for VFs */
+ u8 hmc_fn_id;
+ u16 first_sd_index; /* index of the first available SD */
+
+ /* hmc objects */
+ struct i40e_hmc_obj_info *hmc_obj;
+ struct i40e_virt_mem hmc_obj_virt_mem;
+ struct i40e_hmc_sd_table sd_table;
+};
+
+#define I40E_INC_SD_REFCNT(sd_table) ((sd_table)->ref_cnt++)
+#define I40E_INC_PD_REFCNT(pd_table) ((pd_table)->ref_cnt++)
+#define I40E_INC_BP_REFCNT(bp) ((bp)->ref_cnt++)
+
+#define I40E_DEC_SD_REFCNT(sd_table) ((sd_table)->ref_cnt--)
+#define I40E_DEC_PD_REFCNT(pd_table) ((pd_table)->ref_cnt--)
+#define I40E_DEC_BP_REFCNT(bp) ((bp)->ref_cnt--)
+
+/**
+ * I40E_SET_PF_SD_ENTRY - marks the sd entry as valid in the hardware
+ * @hw: pointer to our hw struct
+ * @pa: pointer to physical address
+ * @sd_index: segment descriptor index
+ * @type: if sd entry is direct or paged
+ **/
+#define I40E_SET_PF_SD_ENTRY(hw, pa, sd_index, type) \
+{ \
+ u32 val1, val2, val3; \
+ val1 = (u32)(upper_32_bits(pa)); \
+ val2 = (u32)(pa) | (I40E_HMC_MAX_BP_COUNT << \
+ I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) | \
+ ((((type) == I40E_SD_TYPE_PAGED) ? 0 : 1) << \
+ I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT) | \
+ BIT(I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT); \
+ val3 = (sd_index) | BIT_ULL(I40E_PFHMC_SDCMD_PMSDWR_SHIFT); \
+ wr32((hw), I40E_PFHMC_SDDATAHIGH, val1); \
+ wr32((hw), I40E_PFHMC_SDDATALOW, val2); \
+ wr32((hw), I40E_PFHMC_SDCMD, val3); \
+}
+
+/**
+ * I40E_CLEAR_PF_SD_ENTRY - marks the sd entry as invalid in the hardware
+ * @hw: pointer to our hw struct
+ * @sd_index: segment descriptor index
+ * @type: if sd entry is direct or paged
+ **/
+#define I40E_CLEAR_PF_SD_ENTRY(hw, sd_index, type) \
+{ \
+ u32 val2, val3; \
+ val2 = (I40E_HMC_MAX_BP_COUNT << \
+ I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) | \
+ ((((type) == I40E_SD_TYPE_PAGED) ? 0 : 1) << \
+ I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT); \
+ val3 = (sd_index) | BIT_ULL(I40E_PFHMC_SDCMD_PMSDWR_SHIFT); \
+ wr32((hw), I40E_PFHMC_SDDATAHIGH, 0); \
+ wr32((hw), I40E_PFHMC_SDDATALOW, val2); \
+ wr32((hw), I40E_PFHMC_SDCMD, val3); \
+}
+
+/**
+ * I40E_INVALIDATE_PF_HMC_PD - Invalidates the pd cache in the hardware
+ * @hw: pointer to our hw struct
+ * @sd_idx: segment descriptor index
+ * @pd_idx: page descriptor index
+ **/
+#define I40E_INVALIDATE_PF_HMC_PD(hw, sd_idx, pd_idx) \
+ wr32((hw), I40E_PFHMC_PDINV, \
+ (((sd_idx) << I40E_PFHMC_PDINV_PMSDIDX_SHIFT) | \
+ ((pd_idx) << I40E_PFHMC_PDINV_PMPDIDX_SHIFT)))
+
+/**
+ * I40E_FIND_SD_INDEX_LIMIT - finds segment descriptor index limit
+ * @hmc_info: pointer to the HMC configuration information structure
+ * @type: type of HMC resources we're searching
+ * @index: starting index for the object
+ * @cnt: number of objects we're trying to create
+ * @sd_idx: pointer to return index of the segment descriptor in question
+ * @sd_limit: pointer to return the maximum number of segment descriptors
+ *
+ * This function calculates the segment descriptor index and index limit
+ * for the resource defined by i40e_hmc_rsrc_type.
+ **/
+#define I40E_FIND_SD_INDEX_LIMIT(hmc_info, type, index, cnt, sd_idx, sd_limit)\
+{ \
+ u64 fpm_addr, fpm_limit; \
+ fpm_addr = (hmc_info)->hmc_obj[(type)].base + \
+ (hmc_info)->hmc_obj[(type)].size * (index); \
+ fpm_limit = fpm_addr + (hmc_info)->hmc_obj[(type)].size * (cnt);\
+ *(sd_idx) = (u32)(fpm_addr / I40E_HMC_DIRECT_BP_SIZE); \
+ *(sd_limit) = (u32)((fpm_limit - 1) / I40E_HMC_DIRECT_BP_SIZE); \
+ /* add one more to the limit to correct our range */ \
+ *(sd_limit) += 1; \
+}
+
+/**
+ * I40E_FIND_PD_INDEX_LIMIT - finds page descriptor index limit
+ * @hmc_info: pointer to the HMC configuration information struct
+ * @type: HMC resource type we're examining
+ * @idx: starting index for the object
+ * @cnt: number of objects we're trying to create
+ * @pd_index: pointer to return page descriptor index
+ * @pd_limit: pointer to return page descriptor index limit
+ *
+ * Calculates the page descriptor index and index limit for the resource
+ * defined by i40e_hmc_rsrc_type.
+ **/
+#define I40E_FIND_PD_INDEX_LIMIT(hmc_info, type, idx, cnt, pd_index, pd_limit)\
+{ \
+ u64 fpm_adr, fpm_limit; \
+ fpm_adr = (hmc_info)->hmc_obj[(type)].base + \
+ (hmc_info)->hmc_obj[(type)].size * (idx); \
+ fpm_limit = fpm_adr + (hmc_info)->hmc_obj[(type)].size * (cnt); \
+ *(pd_index) = (u32)(fpm_adr / I40E_HMC_PAGED_BP_SIZE); \
+ *(pd_limit) = (u32)((fpm_limit - 1) / I40E_HMC_PAGED_BP_SIZE); \
+ /* add one more to the limit to correct our range */ \
+ *(pd_limit) += 1; \
+}
+
+int i40e_add_sd_table_entry(struct i40e_hw *hw,
+ struct i40e_hmc_info *hmc_info,
+ u32 sd_index,
+ enum i40e_sd_entry_type type,
+ u64 direct_mode_sz);
+int i40e_add_pd_table_entry(struct i40e_hw *hw,
+ struct i40e_hmc_info *hmc_info,
+ u32 pd_index,
+ struct i40e_dma_mem *rsrc_pg);
+int i40e_remove_pd_bp(struct i40e_hw *hw,
+ struct i40e_hmc_info *hmc_info,
+ u32 idx);
+int i40e_prep_remove_sd_bp(struct i40e_hmc_info *hmc_info,
+ u32 idx);
+int i40e_remove_sd_bp_new(struct i40e_hw *hw,
+ struct i40e_hmc_info *hmc_info,
+ u32 idx, bool is_pf);
+int i40e_prep_remove_pd_page(struct i40e_hmc_info *hmc_info,
+ u32 idx);
+int i40e_remove_pd_page_new(struct i40e_hw *hw,
+ struct i40e_hmc_info *hmc_info,
+ u32 idx, bool is_pf);
+
+#endif /* _I40E_HMC_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c
new file mode 100644
index 000000000..40c101f28
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c
@@ -0,0 +1,1120 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
+
+#include "i40e.h"
+#include "i40e_osdep.h"
+#include "i40e_register.h"
+#include "i40e_type.h"
+#include "i40e_hmc.h"
+#include "i40e_lan_hmc.h"
+#include "i40e_prototype.h"
+
+/* lan specific interface functions */
+
+/**
+ * i40e_align_l2obj_base - aligns base object pointer to 512 bytes
+ * @offset: base address offset needing alignment
+ *
+ * Aligns the layer 2 function private memory so it's 512-byte aligned.
+ **/
+static u64 i40e_align_l2obj_base(u64 offset)
+{
+ u64 aligned_offset = offset;
+
+ if ((offset % I40E_HMC_L2OBJ_BASE_ALIGNMENT) > 0)
+ aligned_offset += (I40E_HMC_L2OBJ_BASE_ALIGNMENT -
+ (offset % I40E_HMC_L2OBJ_BASE_ALIGNMENT));
+
+ return aligned_offset;
+}
+
+/**
+ * i40e_calculate_l2fpm_size - calculates layer 2 FPM memory size
+ * @txq_num: number of Tx queues needing backing context
+ * @rxq_num: number of Rx queues needing backing context
+ * @fcoe_cntx_num: amount of FCoE statefull contexts needing backing context
+ * @fcoe_filt_num: number of FCoE filters needing backing context
+ *
+ * Calculates the maximum amount of memory for the function required, based
+ * on the number of resources it must provide context for.
+ **/
+static u64 i40e_calculate_l2fpm_size(u32 txq_num, u32 rxq_num,
+ u32 fcoe_cntx_num, u32 fcoe_filt_num)
+{
+ u64 fpm_size = 0;
+
+ fpm_size = txq_num * I40E_HMC_OBJ_SIZE_TXQ;
+ fpm_size = i40e_align_l2obj_base(fpm_size);
+
+ fpm_size += (rxq_num * I40E_HMC_OBJ_SIZE_RXQ);
+ fpm_size = i40e_align_l2obj_base(fpm_size);
+
+ fpm_size += (fcoe_cntx_num * I40E_HMC_OBJ_SIZE_FCOE_CNTX);
+ fpm_size = i40e_align_l2obj_base(fpm_size);
+
+ fpm_size += (fcoe_filt_num * I40E_HMC_OBJ_SIZE_FCOE_FILT);
+ fpm_size = i40e_align_l2obj_base(fpm_size);
+
+ return fpm_size;
+}
+
+/**
+ * i40e_init_lan_hmc - initialize i40e_hmc_info struct
+ * @hw: pointer to the HW structure
+ * @txq_num: number of Tx queues needing backing context
+ * @rxq_num: number of Rx queues needing backing context
+ * @fcoe_cntx_num: amount of FCoE statefull contexts needing backing context
+ * @fcoe_filt_num: number of FCoE filters needing backing context
+ *
+ * This function will be called once per physical function initialization.
+ * It will fill out the i40e_hmc_obj_info structure for LAN objects based on
+ * the driver's provided input, as well as information from the HMC itself
+ * loaded from NVRAM.
+ *
+ * Assumptions:
+ * - HMC Resource Profile has been selected before calling this function.
+ **/
+int i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num,
+ u32 rxq_num, u32 fcoe_cntx_num,
+ u32 fcoe_filt_num)
+{
+ struct i40e_hmc_obj_info *obj, *full_obj;
+ int ret_code = 0;
+ u64 l2fpm_size;
+ u32 size_exp;
+
+ hw->hmc.signature = I40E_HMC_INFO_SIGNATURE;
+ hw->hmc.hmc_fn_id = hw->pf_id;
+
+ /* allocate memory for hmc_obj */
+ ret_code = i40e_allocate_virt_mem(hw, &hw->hmc.hmc_obj_virt_mem,
+ sizeof(struct i40e_hmc_obj_info) * I40E_HMC_LAN_MAX);
+ if (ret_code)
+ goto init_lan_hmc_out;
+ hw->hmc.hmc_obj = (struct i40e_hmc_obj_info *)
+ hw->hmc.hmc_obj_virt_mem.va;
+
+ /* The full object will be used to create the LAN HMC SD */
+ full_obj = &hw->hmc.hmc_obj[I40E_HMC_LAN_FULL];
+ full_obj->max_cnt = 0;
+ full_obj->cnt = 0;
+ full_obj->base = 0;
+ full_obj->size = 0;
+
+ /* Tx queue context information */
+ obj = &hw->hmc.hmc_obj[I40E_HMC_LAN_TX];
+ obj->max_cnt = rd32(hw, I40E_GLHMC_LANQMAX);
+ obj->cnt = txq_num;
+ obj->base = 0;
+ size_exp = rd32(hw, I40E_GLHMC_LANTXOBJSZ);
+ obj->size = BIT_ULL(size_exp);
+
+ /* validate values requested by driver don't exceed HMC capacity */
+ if (txq_num > obj->max_cnt) {
+ ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
+ hw_dbg(hw, "i40e_init_lan_hmc: Tx context: asks for 0x%x but max allowed is 0x%x, returns error %d\n",
+ txq_num, obj->max_cnt, ret_code);
+ goto init_lan_hmc_out;
+ }
+
+ /* aggregate values into the full LAN object for later */
+ full_obj->max_cnt += obj->max_cnt;
+ full_obj->cnt += obj->cnt;
+
+ /* Rx queue context information */
+ obj = &hw->hmc.hmc_obj[I40E_HMC_LAN_RX];
+ obj->max_cnt = rd32(hw, I40E_GLHMC_LANQMAX);
+ obj->cnt = rxq_num;
+ obj->base = hw->hmc.hmc_obj[I40E_HMC_LAN_TX].base +
+ (hw->hmc.hmc_obj[I40E_HMC_LAN_TX].cnt *
+ hw->hmc.hmc_obj[I40E_HMC_LAN_TX].size);
+ obj->base = i40e_align_l2obj_base(obj->base);
+ size_exp = rd32(hw, I40E_GLHMC_LANRXOBJSZ);
+ obj->size = BIT_ULL(size_exp);
+
+ /* validate values requested by driver don't exceed HMC capacity */
+ if (rxq_num > obj->max_cnt) {
+ ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
+ hw_dbg(hw, "i40e_init_lan_hmc: Rx context: asks for 0x%x but max allowed is 0x%x, returns error %d\n",
+ rxq_num, obj->max_cnt, ret_code);
+ goto init_lan_hmc_out;
+ }
+
+ /* aggregate values into the full LAN object for later */
+ full_obj->max_cnt += obj->max_cnt;
+ full_obj->cnt += obj->cnt;
+
+ /* FCoE context information */
+ obj = &hw->hmc.hmc_obj[I40E_HMC_FCOE_CTX];
+ obj->max_cnt = rd32(hw, I40E_GLHMC_FCOEMAX);
+ obj->cnt = fcoe_cntx_num;
+ obj->base = hw->hmc.hmc_obj[I40E_HMC_LAN_RX].base +
+ (hw->hmc.hmc_obj[I40E_HMC_LAN_RX].cnt *
+ hw->hmc.hmc_obj[I40E_HMC_LAN_RX].size);
+ obj->base = i40e_align_l2obj_base(obj->base);
+ size_exp = rd32(hw, I40E_GLHMC_FCOEDDPOBJSZ);
+ obj->size = BIT_ULL(size_exp);
+
+ /* validate values requested by driver don't exceed HMC capacity */
+ if (fcoe_cntx_num > obj->max_cnt) {
+ ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
+ hw_dbg(hw, "i40e_init_lan_hmc: FCoE context: asks for 0x%x but max allowed is 0x%x, returns error %d\n",
+ fcoe_cntx_num, obj->max_cnt, ret_code);
+ goto init_lan_hmc_out;
+ }
+
+ /* aggregate values into the full LAN object for later */
+ full_obj->max_cnt += obj->max_cnt;
+ full_obj->cnt += obj->cnt;
+
+ /* FCoE filter information */
+ obj = &hw->hmc.hmc_obj[I40E_HMC_FCOE_FILT];
+ obj->max_cnt = rd32(hw, I40E_GLHMC_FCOEFMAX);
+ obj->cnt = fcoe_filt_num;
+ obj->base = hw->hmc.hmc_obj[I40E_HMC_FCOE_CTX].base +
+ (hw->hmc.hmc_obj[I40E_HMC_FCOE_CTX].cnt *
+ hw->hmc.hmc_obj[I40E_HMC_FCOE_CTX].size);
+ obj->base = i40e_align_l2obj_base(obj->base);
+ size_exp = rd32(hw, I40E_GLHMC_FCOEFOBJSZ);
+ obj->size = BIT_ULL(size_exp);
+
+ /* validate values requested by driver don't exceed HMC capacity */
+ if (fcoe_filt_num > obj->max_cnt) {
+ ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
+ hw_dbg(hw, "i40e_init_lan_hmc: FCoE filter: asks for 0x%x but max allowed is 0x%x, returns error %d\n",
+ fcoe_filt_num, obj->max_cnt, ret_code);
+ goto init_lan_hmc_out;
+ }
+
+ /* aggregate values into the full LAN object for later */
+ full_obj->max_cnt += obj->max_cnt;
+ full_obj->cnt += obj->cnt;
+
+ hw->hmc.first_sd_index = 0;
+ hw->hmc.sd_table.ref_cnt = 0;
+ l2fpm_size = i40e_calculate_l2fpm_size(txq_num, rxq_num, fcoe_cntx_num,
+ fcoe_filt_num);
+ if (NULL == hw->hmc.sd_table.sd_entry) {
+ hw->hmc.sd_table.sd_cnt = (u32)
+ (l2fpm_size + I40E_HMC_DIRECT_BP_SIZE - 1) /
+ I40E_HMC_DIRECT_BP_SIZE;
+
+ /* allocate the sd_entry members in the sd_table */
+ ret_code = i40e_allocate_virt_mem(hw, &hw->hmc.sd_table.addr,
+ (sizeof(struct i40e_hmc_sd_entry) *
+ hw->hmc.sd_table.sd_cnt));
+ if (ret_code)
+ goto init_lan_hmc_out;
+ hw->hmc.sd_table.sd_entry =
+ (struct i40e_hmc_sd_entry *)hw->hmc.sd_table.addr.va;
+ }
+ /* store in the LAN full object for later */
+ full_obj->size = l2fpm_size;
+
+init_lan_hmc_out:
+ return ret_code;
+}
+
+/**
+ * i40e_remove_pd_page - Remove a page from the page descriptor table
+ * @hw: pointer to the HW structure
+ * @hmc_info: pointer to the HMC configuration information structure
+ * @idx: segment descriptor index to find the relevant page descriptor
+ *
+ * This function:
+ * 1. Marks the entry in pd table (for paged address mode) invalid
+ * 2. write to register PMPDINV to invalidate the backing page in FV cache
+ * 3. Decrement the ref count for pd_entry
+ * assumptions:
+ * 1. caller can deallocate the memory used by pd after this function
+ * returns.
+ **/
+static int i40e_remove_pd_page(struct i40e_hw *hw,
+ struct i40e_hmc_info *hmc_info,
+ u32 idx)
+{
+ int ret_code = 0;
+
+ if (!i40e_prep_remove_pd_page(hmc_info, idx))
+ ret_code = i40e_remove_pd_page_new(hw, hmc_info, idx, true);
+
+ return ret_code;
+}
+
+/**
+ * i40e_remove_sd_bp - remove a backing page from a segment descriptor
+ * @hw: pointer to our HW structure
+ * @hmc_info: pointer to the HMC configuration information structure
+ * @idx: the page index
+ *
+ * This function:
+ * 1. Marks the entry in sd table (for direct address mode) invalid
+ * 2. write to register PMSDCMD, PMSDDATALOW(PMSDDATALOW.PMSDVALID set
+ * to 0) and PMSDDATAHIGH to invalidate the sd page
+ * 3. Decrement the ref count for the sd_entry
+ * assumptions:
+ * 1. caller can deallocate the memory used by backing storage after this
+ * function returns.
+ **/
+static int i40e_remove_sd_bp(struct i40e_hw *hw,
+ struct i40e_hmc_info *hmc_info,
+ u32 idx)
+{
+ int ret_code = 0;
+
+ if (!i40e_prep_remove_sd_bp(hmc_info, idx))
+ ret_code = i40e_remove_sd_bp_new(hw, hmc_info, idx, true);
+
+ return ret_code;
+}
+
+/**
+ * i40e_create_lan_hmc_object - allocate backing store for hmc objects
+ * @hw: pointer to the HW structure
+ * @info: pointer to i40e_hmc_create_obj_info struct
+ *
+ * This will allocate memory for PDs and backing pages and populate
+ * the sd and pd entries.
+ **/
+static int i40e_create_lan_hmc_object(struct i40e_hw *hw,
+ struct i40e_hmc_lan_create_obj_info *info)
+{
+ struct i40e_hmc_sd_entry *sd_entry;
+ u32 pd_idx1 = 0, pd_lmt1 = 0;
+ u32 pd_idx = 0, pd_lmt = 0;
+ bool pd_error = false;
+ u32 sd_idx, sd_lmt;
+ int ret_code = 0;
+ u64 sd_size;
+ u32 i, j;
+
+ if (NULL == info) {
+ ret_code = I40E_ERR_BAD_PTR;
+ hw_dbg(hw, "i40e_create_lan_hmc_object: bad info ptr\n");
+ goto exit;
+ }
+ if (NULL == info->hmc_info) {
+ ret_code = I40E_ERR_BAD_PTR;
+ hw_dbg(hw, "i40e_create_lan_hmc_object: bad hmc_info ptr\n");
+ goto exit;
+ }
+ if (I40E_HMC_INFO_SIGNATURE != info->hmc_info->signature) {
+ ret_code = I40E_ERR_BAD_PTR;
+ hw_dbg(hw, "i40e_create_lan_hmc_object: bad signature\n");
+ goto exit;
+ }
+
+ if (info->start_idx >= info->hmc_info->hmc_obj[info->rsrc_type].cnt) {
+ ret_code = I40E_ERR_INVALID_HMC_OBJ_INDEX;
+ hw_dbg(hw, "i40e_create_lan_hmc_object: returns error %d\n",
+ ret_code);
+ goto exit;
+ }
+ if ((info->start_idx + info->count) >
+ info->hmc_info->hmc_obj[info->rsrc_type].cnt) {
+ ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
+ hw_dbg(hw, "i40e_create_lan_hmc_object: returns error %d\n",
+ ret_code);
+ goto exit;
+ }
+
+ /* find sd index and limit */
+ I40E_FIND_SD_INDEX_LIMIT(info->hmc_info, info->rsrc_type,
+ info->start_idx, info->count,
+ &sd_idx, &sd_lmt);
+ if (sd_idx >= info->hmc_info->sd_table.sd_cnt ||
+ sd_lmt > info->hmc_info->sd_table.sd_cnt) {
+ ret_code = I40E_ERR_INVALID_SD_INDEX;
+ goto exit;
+ }
+ /* find pd index */
+ I40E_FIND_PD_INDEX_LIMIT(info->hmc_info, info->rsrc_type,
+ info->start_idx, info->count, &pd_idx,
+ &pd_lmt);
+
+ /* This is to cover for cases where you may not want to have an SD with
+ * the full 2M memory but something smaller. By not filling out any
+ * size, the function will default the SD size to be 2M.
+ */
+ if (info->direct_mode_sz == 0)
+ sd_size = I40E_HMC_DIRECT_BP_SIZE;
+ else
+ sd_size = info->direct_mode_sz;
+
+ /* check if all the sds are valid. If not, allocate a page and
+ * initialize it.
+ */
+ for (j = sd_idx; j < sd_lmt; j++) {
+ /* update the sd table entry */
+ ret_code = i40e_add_sd_table_entry(hw, info->hmc_info, j,
+ info->entry_type,
+ sd_size);
+ if (ret_code)
+ goto exit_sd_error;
+ sd_entry = &info->hmc_info->sd_table.sd_entry[j];
+ if (I40E_SD_TYPE_PAGED == sd_entry->entry_type) {
+ /* check if all the pds in this sd are valid. If not,
+ * allocate a page and initialize it.
+ */
+
+ /* find pd_idx and pd_lmt in this sd */
+ pd_idx1 = max(pd_idx, (j * I40E_HMC_MAX_BP_COUNT));
+ pd_lmt1 = min(pd_lmt,
+ ((j + 1) * I40E_HMC_MAX_BP_COUNT));
+ for (i = pd_idx1; i < pd_lmt1; i++) {
+ /* update the pd table entry */
+ ret_code = i40e_add_pd_table_entry(hw,
+ info->hmc_info,
+ i, NULL);
+ if (ret_code) {
+ pd_error = true;
+ break;
+ }
+ }
+ if (pd_error) {
+ /* remove the backing pages from pd_idx1 to i */
+ while (i && (i > pd_idx1)) {
+ i40e_remove_pd_bp(hw, info->hmc_info,
+ (i - 1));
+ i--;
+ }
+ }
+ }
+ if (!sd_entry->valid) {
+ sd_entry->valid = true;
+ switch (sd_entry->entry_type) {
+ case I40E_SD_TYPE_PAGED:
+ I40E_SET_PF_SD_ENTRY(hw,
+ sd_entry->u.pd_table.pd_page_addr.pa,
+ j, sd_entry->entry_type);
+ break;
+ case I40E_SD_TYPE_DIRECT:
+ I40E_SET_PF_SD_ENTRY(hw, sd_entry->u.bp.addr.pa,
+ j, sd_entry->entry_type);
+ break;
+ default:
+ ret_code = I40E_ERR_INVALID_SD_TYPE;
+ goto exit;
+ }
+ }
+ }
+ goto exit;
+
+exit_sd_error:
+ /* cleanup for sd entries from j to sd_idx */
+ while (j && (j > sd_idx)) {
+ sd_entry = &info->hmc_info->sd_table.sd_entry[j - 1];
+ switch (sd_entry->entry_type) {
+ case I40E_SD_TYPE_PAGED:
+ pd_idx1 = max(pd_idx,
+ ((j - 1) * I40E_HMC_MAX_BP_COUNT));
+ pd_lmt1 = min(pd_lmt, (j * I40E_HMC_MAX_BP_COUNT));
+ for (i = pd_idx1; i < pd_lmt1; i++)
+ i40e_remove_pd_bp(hw, info->hmc_info, i);
+ i40e_remove_pd_page(hw, info->hmc_info, (j - 1));
+ break;
+ case I40E_SD_TYPE_DIRECT:
+ i40e_remove_sd_bp(hw, info->hmc_info, (j - 1));
+ break;
+ default:
+ ret_code = I40E_ERR_INVALID_SD_TYPE;
+ break;
+ }
+ j--;
+ }
+exit:
+ return ret_code;
+}
+
+/**
+ * i40e_configure_lan_hmc - prepare the HMC backing store
+ * @hw: pointer to the hw structure
+ * @model: the model for the layout of the SD/PD tables
+ *
+ * - This function will be called once per physical function initialization.
+ * - This function will be called after i40e_init_lan_hmc() and before
+ * any LAN/FCoE HMC objects can be created.
+ **/
+int i40e_configure_lan_hmc(struct i40e_hw *hw,
+ enum i40e_hmc_model model)
+{
+ struct i40e_hmc_lan_create_obj_info info;
+ u8 hmc_fn_id = hw->hmc.hmc_fn_id;
+ struct i40e_hmc_obj_info *obj;
+ int ret_code = 0;
+
+ /* Initialize part of the create object info struct */
+ info.hmc_info = &hw->hmc;
+ info.rsrc_type = I40E_HMC_LAN_FULL;
+ info.start_idx = 0;
+ info.direct_mode_sz = hw->hmc.hmc_obj[I40E_HMC_LAN_FULL].size;
+
+ /* Build the SD entry for the LAN objects */
+ switch (model) {
+ case I40E_HMC_MODEL_DIRECT_PREFERRED:
+ case I40E_HMC_MODEL_DIRECT_ONLY:
+ info.entry_type = I40E_SD_TYPE_DIRECT;
+ /* Make one big object, a single SD */
+ info.count = 1;
+ ret_code = i40e_create_lan_hmc_object(hw, &info);
+ if (ret_code && (model == I40E_HMC_MODEL_DIRECT_PREFERRED))
+ goto try_type_paged;
+ else if (ret_code)
+ goto configure_lan_hmc_out;
+ /* else clause falls through the break */
+ break;
+ case I40E_HMC_MODEL_PAGED_ONLY:
+try_type_paged:
+ info.entry_type = I40E_SD_TYPE_PAGED;
+ /* Make one big object in the PD table */
+ info.count = 1;
+ ret_code = i40e_create_lan_hmc_object(hw, &info);
+ if (ret_code)
+ goto configure_lan_hmc_out;
+ break;
+ default:
+ /* unsupported type */
+ ret_code = I40E_ERR_INVALID_SD_TYPE;
+ hw_dbg(hw, "i40e_configure_lan_hmc: Unknown SD type: %d\n",
+ ret_code);
+ goto configure_lan_hmc_out;
+ }
+
+ /* Configure and program the FPM registers so objects can be created */
+
+ /* Tx contexts */
+ obj = &hw->hmc.hmc_obj[I40E_HMC_LAN_TX];
+ wr32(hw, I40E_GLHMC_LANTXBASE(hmc_fn_id),
+ (u32)((obj->base & I40E_GLHMC_LANTXBASE_FPMLANTXBASE_MASK) / 512));
+ wr32(hw, I40E_GLHMC_LANTXCNT(hmc_fn_id), obj->cnt);
+
+ /* Rx contexts */
+ obj = &hw->hmc.hmc_obj[I40E_HMC_LAN_RX];
+ wr32(hw, I40E_GLHMC_LANRXBASE(hmc_fn_id),
+ (u32)((obj->base & I40E_GLHMC_LANRXBASE_FPMLANRXBASE_MASK) / 512));
+ wr32(hw, I40E_GLHMC_LANRXCNT(hmc_fn_id), obj->cnt);
+
+ /* FCoE contexts */
+ obj = &hw->hmc.hmc_obj[I40E_HMC_FCOE_CTX];
+ wr32(hw, I40E_GLHMC_FCOEDDPBASE(hmc_fn_id),
+ (u32)((obj->base & I40E_GLHMC_FCOEDDPBASE_FPMFCOEDDPBASE_MASK) / 512));
+ wr32(hw, I40E_GLHMC_FCOEDDPCNT(hmc_fn_id), obj->cnt);
+
+ /* FCoE filters */
+ obj = &hw->hmc.hmc_obj[I40E_HMC_FCOE_FILT];
+ wr32(hw, I40E_GLHMC_FCOEFBASE(hmc_fn_id),
+ (u32)((obj->base & I40E_GLHMC_FCOEFBASE_FPMFCOEFBASE_MASK) / 512));
+ wr32(hw, I40E_GLHMC_FCOEFCNT(hmc_fn_id), obj->cnt);
+
+configure_lan_hmc_out:
+ return ret_code;
+}
+
+/**
+ * i40e_delete_lan_hmc_object - remove hmc objects
+ * @hw: pointer to the HW structure
+ * @info: pointer to i40e_hmc_delete_obj_info struct
+ *
+ * This will de-populate the SDs and PDs. It frees
+ * the memory for PDS and backing storage. After this function is returned,
+ * caller should deallocate memory allocated previously for
+ * book-keeping information about PDs and backing storage.
+ **/
+static int i40e_delete_lan_hmc_object(struct i40e_hw *hw,
+ struct i40e_hmc_lan_delete_obj_info *info)
+{
+ struct i40e_hmc_pd_table *pd_table;
+ u32 pd_idx, pd_lmt, rel_pd_idx;
+ u32 sd_idx, sd_lmt;
+ int ret_code = 0;
+ u32 i, j;
+
+ if (NULL == info) {
+ ret_code = I40E_ERR_BAD_PTR;
+ hw_dbg(hw, "i40e_delete_hmc_object: bad info ptr\n");
+ goto exit;
+ }
+ if (NULL == info->hmc_info) {
+ ret_code = I40E_ERR_BAD_PTR;
+ hw_dbg(hw, "i40e_delete_hmc_object: bad info->hmc_info ptr\n");
+ goto exit;
+ }
+ if (I40E_HMC_INFO_SIGNATURE != info->hmc_info->signature) {
+ ret_code = I40E_ERR_BAD_PTR;
+ hw_dbg(hw, "i40e_delete_hmc_object: bad hmc_info->signature\n");
+ goto exit;
+ }
+
+ if (NULL == info->hmc_info->sd_table.sd_entry) {
+ ret_code = I40E_ERR_BAD_PTR;
+ hw_dbg(hw, "i40e_delete_hmc_object: bad sd_entry\n");
+ goto exit;
+ }
+
+ if (NULL == info->hmc_info->hmc_obj) {
+ ret_code = I40E_ERR_BAD_PTR;
+ hw_dbg(hw, "i40e_delete_hmc_object: bad hmc_info->hmc_obj\n");
+ goto exit;
+ }
+ if (info->start_idx >= info->hmc_info->hmc_obj[info->rsrc_type].cnt) {
+ ret_code = I40E_ERR_INVALID_HMC_OBJ_INDEX;
+ hw_dbg(hw, "i40e_delete_hmc_object: returns error %d\n",
+ ret_code);
+ goto exit;
+ }
+
+ if ((info->start_idx + info->count) >
+ info->hmc_info->hmc_obj[info->rsrc_type].cnt) {
+ ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
+ hw_dbg(hw, "i40e_delete_hmc_object: returns error %d\n",
+ ret_code);
+ goto exit;
+ }
+
+ I40E_FIND_PD_INDEX_LIMIT(info->hmc_info, info->rsrc_type,
+ info->start_idx, info->count, &pd_idx,
+ &pd_lmt);
+
+ for (j = pd_idx; j < pd_lmt; j++) {
+ sd_idx = j / I40E_HMC_PD_CNT_IN_SD;
+
+ if (I40E_SD_TYPE_PAGED !=
+ info->hmc_info->sd_table.sd_entry[sd_idx].entry_type)
+ continue;
+
+ rel_pd_idx = j % I40E_HMC_PD_CNT_IN_SD;
+
+ pd_table =
+ &info->hmc_info->sd_table.sd_entry[sd_idx].u.pd_table;
+ if (pd_table->pd_entry[rel_pd_idx].valid) {
+ ret_code = i40e_remove_pd_bp(hw, info->hmc_info, j);
+ if (ret_code)
+ goto exit;
+ }
+ }
+
+ /* find sd index and limit */
+ I40E_FIND_SD_INDEX_LIMIT(info->hmc_info, info->rsrc_type,
+ info->start_idx, info->count,
+ &sd_idx, &sd_lmt);
+ if (sd_idx >= info->hmc_info->sd_table.sd_cnt ||
+ sd_lmt > info->hmc_info->sd_table.sd_cnt) {
+ ret_code = I40E_ERR_INVALID_SD_INDEX;
+ goto exit;
+ }
+
+ for (i = sd_idx; i < sd_lmt; i++) {
+ if (!info->hmc_info->sd_table.sd_entry[i].valid)
+ continue;
+ switch (info->hmc_info->sd_table.sd_entry[i].entry_type) {
+ case I40E_SD_TYPE_DIRECT:
+ ret_code = i40e_remove_sd_bp(hw, info->hmc_info, i);
+ if (ret_code)
+ goto exit;
+ break;
+ case I40E_SD_TYPE_PAGED:
+ ret_code = i40e_remove_pd_page(hw, info->hmc_info, i);
+ if (ret_code)
+ goto exit;
+ break;
+ default:
+ break;
+ }
+ }
+exit:
+ return ret_code;
+}
+
+/**
+ * i40e_shutdown_lan_hmc - Remove HMC backing store, free allocated memory
+ * @hw: pointer to the hw structure
+ *
+ * This must be called by drivers as they are shutting down and being
+ * removed from the OS.
+ **/
+int i40e_shutdown_lan_hmc(struct i40e_hw *hw)
+{
+ struct i40e_hmc_lan_delete_obj_info info;
+ int ret_code;
+
+ info.hmc_info = &hw->hmc;
+ info.rsrc_type = I40E_HMC_LAN_FULL;
+ info.start_idx = 0;
+ info.count = 1;
+
+ /* delete the object */
+ ret_code = i40e_delete_lan_hmc_object(hw, &info);
+
+ /* free the SD table entry for LAN */
+ i40e_free_virt_mem(hw, &hw->hmc.sd_table.addr);
+ hw->hmc.sd_table.sd_cnt = 0;
+ hw->hmc.sd_table.sd_entry = NULL;
+
+ /* free memory used for hmc_obj */
+ i40e_free_virt_mem(hw, &hw->hmc.hmc_obj_virt_mem);
+ hw->hmc.hmc_obj = NULL;
+
+ return ret_code;
+}
+
+#define I40E_HMC_STORE(_struct, _ele) \
+ offsetof(struct _struct, _ele), \
+ sizeof_field(struct _struct, _ele)
+
+struct i40e_context_ele {
+ u16 offset;
+ u16 size_of;
+ u16 width;
+ u16 lsb;
+};
+
+/* LAN Tx Queue Context */
+static struct i40e_context_ele i40e_hmc_txq_ce_info[] = {
+ /* Field Width LSB */
+ {I40E_HMC_STORE(i40e_hmc_obj_txq, head), 13, 0 },
+ {I40E_HMC_STORE(i40e_hmc_obj_txq, new_context), 1, 30 },
+ {I40E_HMC_STORE(i40e_hmc_obj_txq, base), 57, 32 },
+ {I40E_HMC_STORE(i40e_hmc_obj_txq, fc_ena), 1, 89 },
+ {I40E_HMC_STORE(i40e_hmc_obj_txq, timesync_ena), 1, 90 },
+ {I40E_HMC_STORE(i40e_hmc_obj_txq, fd_ena), 1, 91 },
+ {I40E_HMC_STORE(i40e_hmc_obj_txq, alt_vlan_ena), 1, 92 },
+ {I40E_HMC_STORE(i40e_hmc_obj_txq, cpuid), 8, 96 },
+/* line 1 */
+ {I40E_HMC_STORE(i40e_hmc_obj_txq, thead_wb), 13, 0 + 128 },
+ {I40E_HMC_STORE(i40e_hmc_obj_txq, head_wb_ena), 1, 32 + 128 },
+ {I40E_HMC_STORE(i40e_hmc_obj_txq, qlen), 13, 33 + 128 },
+ {I40E_HMC_STORE(i40e_hmc_obj_txq, tphrdesc_ena), 1, 46 + 128 },
+ {I40E_HMC_STORE(i40e_hmc_obj_txq, tphrpacket_ena), 1, 47 + 128 },
+ {I40E_HMC_STORE(i40e_hmc_obj_txq, tphwdesc_ena), 1, 48 + 128 },
+ {I40E_HMC_STORE(i40e_hmc_obj_txq, head_wb_addr), 64, 64 + 128 },
+/* line 7 */
+ {I40E_HMC_STORE(i40e_hmc_obj_txq, crc), 32, 0 + (7 * 128) },
+ {I40E_HMC_STORE(i40e_hmc_obj_txq, rdylist), 10, 84 + (7 * 128) },
+ {I40E_HMC_STORE(i40e_hmc_obj_txq, rdylist_act), 1, 94 + (7 * 128) },
+ { 0 }
+};
+
+/* LAN Rx Queue Context */
+static struct i40e_context_ele i40e_hmc_rxq_ce_info[] = {
+ /* Field Width LSB */
+ { I40E_HMC_STORE(i40e_hmc_obj_rxq, head), 13, 0 },
+ { I40E_HMC_STORE(i40e_hmc_obj_rxq, cpuid), 8, 13 },
+ { I40E_HMC_STORE(i40e_hmc_obj_rxq, base), 57, 32 },
+ { I40E_HMC_STORE(i40e_hmc_obj_rxq, qlen), 13, 89 },
+ { I40E_HMC_STORE(i40e_hmc_obj_rxq, dbuff), 7, 102 },
+ { I40E_HMC_STORE(i40e_hmc_obj_rxq, hbuff), 5, 109 },
+ { I40E_HMC_STORE(i40e_hmc_obj_rxq, dtype), 2, 114 },
+ { I40E_HMC_STORE(i40e_hmc_obj_rxq, dsize), 1, 116 },
+ { I40E_HMC_STORE(i40e_hmc_obj_rxq, crcstrip), 1, 117 },
+ { I40E_HMC_STORE(i40e_hmc_obj_rxq, fc_ena), 1, 118 },
+ { I40E_HMC_STORE(i40e_hmc_obj_rxq, l2tsel), 1, 119 },
+ { I40E_HMC_STORE(i40e_hmc_obj_rxq, hsplit_0), 4, 120 },
+ { I40E_HMC_STORE(i40e_hmc_obj_rxq, hsplit_1), 2, 124 },
+ { I40E_HMC_STORE(i40e_hmc_obj_rxq, showiv), 1, 127 },
+ { I40E_HMC_STORE(i40e_hmc_obj_rxq, rxmax), 14, 174 },
+ { I40E_HMC_STORE(i40e_hmc_obj_rxq, tphrdesc_ena), 1, 193 },
+ { I40E_HMC_STORE(i40e_hmc_obj_rxq, tphwdesc_ena), 1, 194 },
+ { I40E_HMC_STORE(i40e_hmc_obj_rxq, tphdata_ena), 1, 195 },
+ { I40E_HMC_STORE(i40e_hmc_obj_rxq, tphhead_ena), 1, 196 },
+ { I40E_HMC_STORE(i40e_hmc_obj_rxq, lrxqthresh), 3, 198 },
+ { I40E_HMC_STORE(i40e_hmc_obj_rxq, prefena), 1, 201 },
+ { 0 }
+};
+
+/**
+ * i40e_write_byte - replace HMC context byte
+ * @hmc_bits: pointer to the HMC memory
+ * @ce_info: a description of the struct to be read from
+ * @src: the struct to be read from
+ **/
+static void i40e_write_byte(u8 *hmc_bits,
+ struct i40e_context_ele *ce_info,
+ u8 *src)
+{
+ u8 src_byte, dest_byte, mask;
+ u8 *from, *dest;
+ u16 shift_width;
+
+ /* copy from the next struct field */
+ from = src + ce_info->offset;
+
+ /* prepare the bits and mask */
+ shift_width = ce_info->lsb % 8;
+ mask = (u8)(BIT(ce_info->width) - 1);
+
+ src_byte = *from;
+ src_byte &= mask;
+
+ /* shift to correct alignment */
+ mask <<= shift_width;
+ src_byte <<= shift_width;
+
+ /* get the current bits from the target bit string */
+ dest = hmc_bits + (ce_info->lsb / 8);
+
+ memcpy(&dest_byte, dest, sizeof(dest_byte));
+
+ dest_byte &= ~mask; /* get the bits not changing */
+ dest_byte |= src_byte; /* add in the new bits */
+
+ /* put it all back */
+ memcpy(dest, &dest_byte, sizeof(dest_byte));
+}
+
+/**
+ * i40e_write_word - replace HMC context word
+ * @hmc_bits: pointer to the HMC memory
+ * @ce_info: a description of the struct to be read from
+ * @src: the struct to be read from
+ **/
+static void i40e_write_word(u8 *hmc_bits,
+ struct i40e_context_ele *ce_info,
+ u8 *src)
+{
+ u16 src_word, mask;
+ u8 *from, *dest;
+ u16 shift_width;
+ __le16 dest_word;
+
+ /* copy from the next struct field */
+ from = src + ce_info->offset;
+
+ /* prepare the bits and mask */
+ shift_width = ce_info->lsb % 8;
+ mask = BIT(ce_info->width) - 1;
+
+ /* don't swizzle the bits until after the mask because the mask bits
+ * will be in a different bit position on big endian machines
+ */
+ src_word = *(u16 *)from;
+ src_word &= mask;
+
+ /* shift to correct alignment */
+ mask <<= shift_width;
+ src_word <<= shift_width;
+
+ /* get the current bits from the target bit string */
+ dest = hmc_bits + (ce_info->lsb / 8);
+
+ memcpy(&dest_word, dest, sizeof(dest_word));
+
+ dest_word &= ~(cpu_to_le16(mask)); /* get the bits not changing */
+ dest_word |= cpu_to_le16(src_word); /* add in the new bits */
+
+ /* put it all back */
+ memcpy(dest, &dest_word, sizeof(dest_word));
+}
+
+/**
+ * i40e_write_dword - replace HMC context dword
+ * @hmc_bits: pointer to the HMC memory
+ * @ce_info: a description of the struct to be read from
+ * @src: the struct to be read from
+ **/
+static void i40e_write_dword(u8 *hmc_bits,
+ struct i40e_context_ele *ce_info,
+ u8 *src)
+{
+ u32 src_dword, mask;
+ u8 *from, *dest;
+ u16 shift_width;
+ __le32 dest_dword;
+
+ /* copy from the next struct field */
+ from = src + ce_info->offset;
+
+ /* prepare the bits and mask */
+ shift_width = ce_info->lsb % 8;
+
+ /* if the field width is exactly 32 on an x86 machine, then the shift
+ * operation will not work because the SHL instructions count is masked
+ * to 5 bits so the shift will do nothing
+ */
+ if (ce_info->width < 32)
+ mask = BIT(ce_info->width) - 1;
+ else
+ mask = ~(u32)0;
+
+ /* don't swizzle the bits until after the mask because the mask bits
+ * will be in a different bit position on big endian machines
+ */
+ src_dword = *(u32 *)from;
+ src_dword &= mask;
+
+ /* shift to correct alignment */
+ mask <<= shift_width;
+ src_dword <<= shift_width;
+
+ /* get the current bits from the target bit string */
+ dest = hmc_bits + (ce_info->lsb / 8);
+
+ memcpy(&dest_dword, dest, sizeof(dest_dword));
+
+ dest_dword &= ~(cpu_to_le32(mask)); /* get the bits not changing */
+ dest_dword |= cpu_to_le32(src_dword); /* add in the new bits */
+
+ /* put it all back */
+ memcpy(dest, &dest_dword, sizeof(dest_dword));
+}
+
+/**
+ * i40e_write_qword - replace HMC context qword
+ * @hmc_bits: pointer to the HMC memory
+ * @ce_info: a description of the struct to be read from
+ * @src: the struct to be read from
+ **/
+static void i40e_write_qword(u8 *hmc_bits,
+ struct i40e_context_ele *ce_info,
+ u8 *src)
+{
+ u64 src_qword, mask;
+ u8 *from, *dest;
+ u16 shift_width;
+ __le64 dest_qword;
+
+ /* copy from the next struct field */
+ from = src + ce_info->offset;
+
+ /* prepare the bits and mask */
+ shift_width = ce_info->lsb % 8;
+
+ /* if the field width is exactly 64 on an x86 machine, then the shift
+ * operation will not work because the SHL instructions count is masked
+ * to 6 bits so the shift will do nothing
+ */
+ if (ce_info->width < 64)
+ mask = BIT_ULL(ce_info->width) - 1;
+ else
+ mask = ~(u64)0;
+
+ /* don't swizzle the bits until after the mask because the mask bits
+ * will be in a different bit position on big endian machines
+ */
+ src_qword = *(u64 *)from;
+ src_qword &= mask;
+
+ /* shift to correct alignment */
+ mask <<= shift_width;
+ src_qword <<= shift_width;
+
+ /* get the current bits from the target bit string */
+ dest = hmc_bits + (ce_info->lsb / 8);
+
+ memcpy(&dest_qword, dest, sizeof(dest_qword));
+
+ dest_qword &= ~(cpu_to_le64(mask)); /* get the bits not changing */
+ dest_qword |= cpu_to_le64(src_qword); /* add in the new bits */
+
+ /* put it all back */
+ memcpy(dest, &dest_qword, sizeof(dest_qword));
+}
+
+/**
+ * i40e_clear_hmc_context - zero out the HMC context bits
+ * @hw: the hardware struct
+ * @context_bytes: pointer to the context bit array (DMA memory)
+ * @hmc_type: the type of HMC resource
+ **/
+static int i40e_clear_hmc_context(struct i40e_hw *hw,
+ u8 *context_bytes,
+ enum i40e_hmc_lan_rsrc_type hmc_type)
+{
+ /* clean the bit array */
+ memset(context_bytes, 0, (u32)hw->hmc.hmc_obj[hmc_type].size);
+
+ return 0;
+}
+
+/**
+ * i40e_set_hmc_context - replace HMC context bits
+ * @context_bytes: pointer to the context bit array
+ * @ce_info: a description of the struct to be filled
+ * @dest: the struct to be filled
+ **/
+static int i40e_set_hmc_context(u8 *context_bytes,
+ struct i40e_context_ele *ce_info,
+ u8 *dest)
+{
+ int f;
+
+ for (f = 0; ce_info[f].width != 0; f++) {
+
+ /* we have to deal with each element of the HMC using the
+ * correct size so that we are correct regardless of the
+ * endianness of the machine
+ */
+ switch (ce_info[f].size_of) {
+ case 1:
+ i40e_write_byte(context_bytes, &ce_info[f], dest);
+ break;
+ case 2:
+ i40e_write_word(context_bytes, &ce_info[f], dest);
+ break;
+ case 4:
+ i40e_write_dword(context_bytes, &ce_info[f], dest);
+ break;
+ case 8:
+ i40e_write_qword(context_bytes, &ce_info[f], dest);
+ break;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * i40e_hmc_get_object_va - retrieves an object's virtual address
+ * @hw: the hardware struct, from which we obtain the i40e_hmc_info pointer
+ * @object_base: pointer to u64 to get the va
+ * @rsrc_type: the hmc resource type
+ * @obj_idx: hmc object index
+ *
+ * This function retrieves the object's virtual address from the object
+ * base pointer. This function is used for LAN Queue contexts.
+ **/
+static
+int i40e_hmc_get_object_va(struct i40e_hw *hw, u8 **object_base,
+ enum i40e_hmc_lan_rsrc_type rsrc_type,
+ u32 obj_idx)
+{
+ struct i40e_hmc_info *hmc_info = &hw->hmc;
+ u32 obj_offset_in_sd, obj_offset_in_pd;
+ struct i40e_hmc_sd_entry *sd_entry;
+ struct i40e_hmc_pd_entry *pd_entry;
+ u32 pd_idx, pd_lmt, rel_pd_idx;
+ u64 obj_offset_in_fpm;
+ u32 sd_idx, sd_lmt;
+ int ret_code = 0;
+
+ if (NULL == hmc_info) {
+ ret_code = I40E_ERR_BAD_PTR;
+ hw_dbg(hw, "i40e_hmc_get_object_va: bad hmc_info ptr\n");
+ goto exit;
+ }
+ if (NULL == hmc_info->hmc_obj) {
+ ret_code = I40E_ERR_BAD_PTR;
+ hw_dbg(hw, "i40e_hmc_get_object_va: bad hmc_info->hmc_obj ptr\n");
+ goto exit;
+ }
+ if (NULL == object_base) {
+ ret_code = I40E_ERR_BAD_PTR;
+ hw_dbg(hw, "i40e_hmc_get_object_va: bad object_base ptr\n");
+ goto exit;
+ }
+ if (I40E_HMC_INFO_SIGNATURE != hmc_info->signature) {
+ ret_code = I40E_ERR_BAD_PTR;
+ hw_dbg(hw, "i40e_hmc_get_object_va: bad hmc_info->signature\n");
+ goto exit;
+ }
+ if (obj_idx >= hmc_info->hmc_obj[rsrc_type].cnt) {
+ hw_dbg(hw, "i40e_hmc_get_object_va: returns error %d\n",
+ ret_code);
+ ret_code = I40E_ERR_INVALID_HMC_OBJ_INDEX;
+ goto exit;
+ }
+ /* find sd index and limit */
+ I40E_FIND_SD_INDEX_LIMIT(hmc_info, rsrc_type, obj_idx, 1,
+ &sd_idx, &sd_lmt);
+
+ sd_entry = &hmc_info->sd_table.sd_entry[sd_idx];
+ obj_offset_in_fpm = hmc_info->hmc_obj[rsrc_type].base +
+ hmc_info->hmc_obj[rsrc_type].size * obj_idx;
+
+ if (I40E_SD_TYPE_PAGED == sd_entry->entry_type) {
+ I40E_FIND_PD_INDEX_LIMIT(hmc_info, rsrc_type, obj_idx, 1,
+ &pd_idx, &pd_lmt);
+ rel_pd_idx = pd_idx % I40E_HMC_PD_CNT_IN_SD;
+ pd_entry = &sd_entry->u.pd_table.pd_entry[rel_pd_idx];
+ obj_offset_in_pd = (u32)(obj_offset_in_fpm %
+ I40E_HMC_PAGED_BP_SIZE);
+ *object_base = (u8 *)pd_entry->bp.addr.va + obj_offset_in_pd;
+ } else {
+ obj_offset_in_sd = (u32)(obj_offset_in_fpm %
+ I40E_HMC_DIRECT_BP_SIZE);
+ *object_base = (u8 *)sd_entry->u.bp.addr.va + obj_offset_in_sd;
+ }
+exit:
+ return ret_code;
+}
+
+/**
+ * i40e_clear_lan_tx_queue_context - clear the HMC context for the queue
+ * @hw: the hardware struct
+ * @queue: the queue we care about
+ **/
+int i40e_clear_lan_tx_queue_context(struct i40e_hw *hw,
+ u16 queue)
+{
+ u8 *context_bytes;
+ int err;
+
+ err = i40e_hmc_get_object_va(hw, &context_bytes,
+ I40E_HMC_LAN_TX, queue);
+ if (err < 0)
+ return err;
+
+ return i40e_clear_hmc_context(hw, context_bytes, I40E_HMC_LAN_TX);
+}
+
+/**
+ * i40e_set_lan_tx_queue_context - set the HMC context for the queue
+ * @hw: the hardware struct
+ * @queue: the queue we care about
+ * @s: the struct to be filled
+ **/
+int i40e_set_lan_tx_queue_context(struct i40e_hw *hw,
+ u16 queue,
+ struct i40e_hmc_obj_txq *s)
+{
+ u8 *context_bytes;
+ int err;
+
+ err = i40e_hmc_get_object_va(hw, &context_bytes,
+ I40E_HMC_LAN_TX, queue);
+ if (err < 0)
+ return err;
+
+ return i40e_set_hmc_context(context_bytes,
+ i40e_hmc_txq_ce_info, (u8 *)s);
+}
+
+/**
+ * i40e_clear_lan_rx_queue_context - clear the HMC context for the queue
+ * @hw: the hardware struct
+ * @queue: the queue we care about
+ **/
+int i40e_clear_lan_rx_queue_context(struct i40e_hw *hw,
+ u16 queue)
+{
+ u8 *context_bytes;
+ int err;
+
+ err = i40e_hmc_get_object_va(hw, &context_bytes,
+ I40E_HMC_LAN_RX, queue);
+ if (err < 0)
+ return err;
+
+ return i40e_clear_hmc_context(hw, context_bytes, I40E_HMC_LAN_RX);
+}
+
+/**
+ * i40e_set_lan_rx_queue_context - set the HMC context for the queue
+ * @hw: the hardware struct
+ * @queue: the queue we care about
+ * @s: the struct to be filled
+ **/
+int i40e_set_lan_rx_queue_context(struct i40e_hw *hw,
+ u16 queue,
+ struct i40e_hmc_obj_rxq *s)
+{
+ u8 *context_bytes;
+ int err;
+
+ err = i40e_hmc_get_object_va(hw, &context_bytes,
+ I40E_HMC_LAN_RX, queue);
+ if (err < 0)
+ return err;
+
+ return i40e_set_hmc_context(context_bytes,
+ i40e_hmc_rxq_ce_info, (u8 *)s);
+}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h
new file mode 100644
index 000000000..9f960404c
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h
@@ -0,0 +1,158 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
+
+#ifndef _I40E_LAN_HMC_H_
+#define _I40E_LAN_HMC_H_
+
+/* forward-declare the HW struct for the compiler */
+struct i40e_hw;
+
+/* HMC element context information */
+
+/* Rx queue context data
+ *
+ * The sizes of the variables may be larger than needed due to crossing byte
+ * boundaries. If we do not have the width of the variable set to the correct
+ * size then we could end up shifting bits off the top of the variable when the
+ * variable is at the top of a byte and crosses over into the next byte.
+ */
+struct i40e_hmc_obj_rxq {
+ u16 head;
+ u16 cpuid; /* bigger than needed, see above for reason */
+ u64 base;
+ u16 qlen;
+#define I40E_RXQ_CTX_DBUFF_SHIFT 7
+ u16 dbuff; /* bigger than needed, see above for reason */
+#define I40E_RXQ_CTX_HBUFF_SHIFT 6
+ u16 hbuff; /* bigger than needed, see above for reason */
+ u8 dtype;
+ u8 dsize;
+ u8 crcstrip;
+ u8 fc_ena;
+ u8 l2tsel;
+ u8 hsplit_0;
+ u8 hsplit_1;
+ u8 showiv;
+ u32 rxmax; /* bigger than needed, see above for reason */
+ u8 tphrdesc_ena;
+ u8 tphwdesc_ena;
+ u8 tphdata_ena;
+ u8 tphhead_ena;
+ u16 lrxqthresh; /* bigger than needed, see above for reason */
+ u8 prefena; /* NOTE: normally must be set to 1 at init */
+};
+
+/* Tx queue context data
+*
+* The sizes of the variables may be larger than needed due to crossing byte
+* boundaries. If we do not have the width of the variable set to the correct
+* size then we could end up shifting bits off the top of the variable when the
+* variable is at the top of a byte and crosses over into the next byte.
+*/
+struct i40e_hmc_obj_txq {
+ u16 head;
+ u8 new_context;
+ u64 base;
+ u8 fc_ena;
+ u8 timesync_ena;
+ u8 fd_ena;
+ u8 alt_vlan_ena;
+ u16 thead_wb;
+ u8 cpuid;
+ u8 head_wb_ena;
+ u16 qlen;
+ u8 tphrdesc_ena;
+ u8 tphrpacket_ena;
+ u8 tphwdesc_ena;
+ u64 head_wb_addr;
+ u32 crc;
+ u16 rdylist;
+ u8 rdylist_act;
+};
+
+/* for hsplit_0 field of Rx HMC context */
+enum i40e_hmc_obj_rx_hsplit_0 {
+ I40E_HMC_OBJ_RX_HSPLIT_0_NO_SPLIT = 0,
+ I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_L2 = 1,
+ I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_IP = 2,
+ I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_TCP_UDP = 4,
+ I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_SCTP = 8,
+};
+
+/* fcoe_cntx and fcoe_filt are for debugging purpose only */
+struct i40e_hmc_obj_fcoe_cntx {
+ u32 rsv[32];
+};
+
+struct i40e_hmc_obj_fcoe_filt {
+ u32 rsv[8];
+};
+
+/* Context sizes for LAN objects */
+enum i40e_hmc_lan_object_size {
+ I40E_HMC_LAN_OBJ_SZ_8 = 0x3,
+ I40E_HMC_LAN_OBJ_SZ_16 = 0x4,
+ I40E_HMC_LAN_OBJ_SZ_32 = 0x5,
+ I40E_HMC_LAN_OBJ_SZ_64 = 0x6,
+ I40E_HMC_LAN_OBJ_SZ_128 = 0x7,
+ I40E_HMC_LAN_OBJ_SZ_256 = 0x8,
+ I40E_HMC_LAN_OBJ_SZ_512 = 0x9,
+};
+
+#define I40E_HMC_L2OBJ_BASE_ALIGNMENT 512
+#define I40E_HMC_OBJ_SIZE_TXQ 128
+#define I40E_HMC_OBJ_SIZE_RXQ 32
+#define I40E_HMC_OBJ_SIZE_FCOE_CNTX 64
+#define I40E_HMC_OBJ_SIZE_FCOE_FILT 64
+
+enum i40e_hmc_lan_rsrc_type {
+ I40E_HMC_LAN_FULL = 0,
+ I40E_HMC_LAN_TX = 1,
+ I40E_HMC_LAN_RX = 2,
+ I40E_HMC_FCOE_CTX = 3,
+ I40E_HMC_FCOE_FILT = 4,
+ I40E_HMC_LAN_MAX = 5
+};
+
+enum i40e_hmc_model {
+ I40E_HMC_MODEL_DIRECT_PREFERRED = 0,
+ I40E_HMC_MODEL_DIRECT_ONLY = 1,
+ I40E_HMC_MODEL_PAGED_ONLY = 2,
+ I40E_HMC_MODEL_UNKNOWN,
+};
+
+struct i40e_hmc_lan_create_obj_info {
+ struct i40e_hmc_info *hmc_info;
+ u32 rsrc_type;
+ u32 start_idx;
+ u32 count;
+ enum i40e_sd_entry_type entry_type;
+ u64 direct_mode_sz;
+};
+
+struct i40e_hmc_lan_delete_obj_info {
+ struct i40e_hmc_info *hmc_info;
+ u32 rsrc_type;
+ u32 start_idx;
+ u32 count;
+};
+
+int i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num,
+ u32 rxq_num, u32 fcoe_cntx_num,
+ u32 fcoe_filt_num);
+int i40e_configure_lan_hmc(struct i40e_hw *hw,
+ enum i40e_hmc_model model);
+int i40e_shutdown_lan_hmc(struct i40e_hw *hw);
+
+int i40e_clear_lan_tx_queue_context(struct i40e_hw *hw,
+ u16 queue);
+int i40e_set_lan_tx_queue_context(struct i40e_hw *hw,
+ u16 queue,
+ struct i40e_hmc_obj_txq *s);
+int i40e_clear_lan_rx_queue_context(struct i40e_hw *hw,
+ u16 queue);
+int i40e_set_lan_rx_queue_context(struct i40e_hw *hw,
+ u16 queue,
+ struct i40e_hmc_obj_rxq *s);
+
+#endif /* _I40E_LAN_HMC_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
new file mode 100644
index 000000000..63d43ef86
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -0,0 +1,16740 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2013 - 2021 Intel Corporation. */
+
+#include <linux/etherdevice.h>
+#include <linux/of_net.h>
+#include <linux/pci.h>
+#include <linux/bpf.h>
+#include <generated/utsrelease.h>
+#include <linux/crash_dump.h>
+
+/* Local includes */
+#include "i40e.h"
+#include "i40e_diag.h"
+#include "i40e_xsk.h"
+#include <net/udp_tunnel.h>
+#include <net/xdp_sock_drv.h>
+/* All i40e tracepoints are defined by the include below, which
+ * must be included exactly once across the whole kernel with
+ * CREATE_TRACE_POINTS defined
+ */
+#define CREATE_TRACE_POINTS
+#include "i40e_trace.h"
+
+const char i40e_driver_name[] = "i40e";
+static const char i40e_driver_string[] =
+ "Intel(R) Ethernet Connection XL710 Network Driver";
+
+static const char i40e_copyright[] = "Copyright (c) 2013 - 2019 Intel Corporation.";
+
+/* a bit of forward declarations */
+static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi);
+static void i40e_handle_reset_warning(struct i40e_pf *pf, bool lock_acquired);
+static int i40e_add_vsi(struct i40e_vsi *vsi);
+static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi);
+static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit, bool lock_acquired);
+static int i40e_setup_misc_vector(struct i40e_pf *pf);
+static void i40e_determine_queue_usage(struct i40e_pf *pf);
+static int i40e_setup_pf_filter_control(struct i40e_pf *pf);
+static void i40e_prep_for_reset(struct i40e_pf *pf);
+static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit,
+ bool lock_acquired);
+static int i40e_reset(struct i40e_pf *pf);
+static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired);
+static int i40e_setup_misc_vector_for_recovery_mode(struct i40e_pf *pf);
+static int i40e_restore_interrupt_scheme(struct i40e_pf *pf);
+static bool i40e_check_recovery_mode(struct i40e_pf *pf);
+static int i40e_init_recovery_mode(struct i40e_pf *pf, struct i40e_hw *hw);
+static void i40e_fdir_sb_setup(struct i40e_pf *pf);
+static int i40e_veb_get_bw_info(struct i40e_veb *veb);
+static int i40e_get_capabilities(struct i40e_pf *pf,
+ enum i40e_admin_queue_opc list_type);
+static bool i40e_is_total_port_shutdown_enabled(struct i40e_pf *pf);
+
+/* i40e_pci_tbl - PCI Device ID Table
+ *
+ * Last entry must be all 0s
+ *
+ * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
+ * Class, Class Mask, private data (not used) }
+ */
+static const struct pci_device_id i40e_pci_tbl[] = {
+ {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_XL710), 0},
+ {PCI_VDEVICE(INTEL, I40E_DEV_ID_QEMU), 0},
+ {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_B), 0},
+ {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_C), 0},
+ {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_A), 0},
+ {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_B), 0},
+ {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_C), 0},
+ {PCI_VDEVICE(INTEL, I40E_DEV_ID_1G_BASE_T_BC), 0},
+ {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T), 0},
+ {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T4), 0},
+ {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T_BC), 0},
+ {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_SFP), 0},
+ {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_B), 0},
+ {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_X722), 0},
+ {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_X722), 0},
+ {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_X722), 0},
+ {PCI_VDEVICE(INTEL, I40E_DEV_ID_1G_BASE_T_X722), 0},
+ {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T_X722), 0},
+ {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_I_X722), 0},
+ {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_X722_A), 0},
+ {PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2), 0},
+ {PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2_A), 0},
+ {PCI_VDEVICE(INTEL, I40E_DEV_ID_X710_N3000), 0},
+ {PCI_VDEVICE(INTEL, I40E_DEV_ID_XXV710_N3000), 0},
+ {PCI_VDEVICE(INTEL, I40E_DEV_ID_25G_B), 0},
+ {PCI_VDEVICE(INTEL, I40E_DEV_ID_25G_SFP28), 0},
+ /* required last entry */
+ {0, }
+};
+MODULE_DEVICE_TABLE(pci, i40e_pci_tbl);
+
+#define I40E_MAX_VF_COUNT 128
+static int debug = -1;
+module_param(debug, uint, 0);
+MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all), Debug mask (0x8XXXXXXX)");
+
+MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
+MODULE_DESCRIPTION("Intel(R) Ethernet Connection XL710 Network Driver");
+MODULE_LICENSE("GPL v2");
+
+static struct workqueue_struct *i40e_wq;
+
+static void netdev_hw_addr_refcnt(struct i40e_mac_filter *f,
+ struct net_device *netdev, int delta)
+{
+ struct netdev_hw_addr_list *ha_list;
+ struct netdev_hw_addr *ha;
+
+ if (!f || !netdev)
+ return;
+
+ if (is_unicast_ether_addr(f->macaddr) || is_link_local_ether_addr(f->macaddr))
+ ha_list = &netdev->uc;
+ else
+ ha_list = &netdev->mc;
+
+ netdev_hw_addr_list_for_each(ha, ha_list) {
+ if (ether_addr_equal(ha->addr, f->macaddr)) {
+ ha->refcount += delta;
+ if (ha->refcount <= 0)
+ ha->refcount = 1;
+ break;
+ }
+ }
+}
+
+/**
+ * i40e_allocate_dma_mem_d - OS specific memory alloc for shared code
+ * @hw: pointer to the HW structure
+ * @mem: ptr to mem struct to fill out
+ * @size: size of memory requested
+ * @alignment: what to align the allocation to
+ **/
+int i40e_allocate_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem,
+ u64 size, u32 alignment)
+{
+ struct i40e_pf *pf = (struct i40e_pf *)hw->back;
+
+ mem->size = ALIGN(size, alignment);
+ mem->va = dma_alloc_coherent(&pf->pdev->dev, mem->size, &mem->pa,
+ GFP_KERNEL);
+ if (!mem->va)
+ return -ENOMEM;
+
+ return 0;
+}
+
+/**
+ * i40e_free_dma_mem_d - OS specific memory free for shared code
+ * @hw: pointer to the HW structure
+ * @mem: ptr to mem struct to free
+ **/
+int i40e_free_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem)
+{
+ struct i40e_pf *pf = (struct i40e_pf *)hw->back;
+
+ dma_free_coherent(&pf->pdev->dev, mem->size, mem->va, mem->pa);
+ mem->va = NULL;
+ mem->pa = 0;
+ mem->size = 0;
+
+ return 0;
+}
+
+/**
+ * i40e_allocate_virt_mem_d - OS specific memory alloc for shared code
+ * @hw: pointer to the HW structure
+ * @mem: ptr to mem struct to fill out
+ * @size: size of memory requested
+ **/
+int i40e_allocate_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem,
+ u32 size)
+{
+ mem->size = size;
+ mem->va = kzalloc(size, GFP_KERNEL);
+
+ if (!mem->va)
+ return -ENOMEM;
+
+ return 0;
+}
+
+/**
+ * i40e_free_virt_mem_d - OS specific memory free for shared code
+ * @hw: pointer to the HW structure
+ * @mem: ptr to mem struct to free
+ **/
+int i40e_free_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem)
+{
+ /* it's ok to kfree a NULL pointer */
+ kfree(mem->va);
+ mem->va = NULL;
+ mem->size = 0;
+
+ return 0;
+}
+
+/**
+ * i40e_get_lump - find a lump of free generic resource
+ * @pf: board private structure
+ * @pile: the pile of resource to search
+ * @needed: the number of items needed
+ * @id: an owner id to stick on the items assigned
+ *
+ * Returns the base item index of the lump, or negative for error
+ **/
+static int i40e_get_lump(struct i40e_pf *pf, struct i40e_lump_tracking *pile,
+ u16 needed, u16 id)
+{
+ int ret = -ENOMEM;
+ int i, j;
+
+ if (!pile || needed == 0 || id >= I40E_PILE_VALID_BIT) {
+ dev_info(&pf->pdev->dev,
+ "param err: pile=%s needed=%d id=0x%04x\n",
+ pile ? "<valid>" : "<null>", needed, id);
+ return -EINVAL;
+ }
+
+ /* Allocate last queue in the pile for FDIR VSI queue
+ * so it doesn't fragment the qp_pile
+ */
+ if (pile == pf->qp_pile && pf->vsi[id]->type == I40E_VSI_FDIR) {
+ if (pile->list[pile->num_entries - 1] & I40E_PILE_VALID_BIT) {
+ dev_err(&pf->pdev->dev,
+ "Cannot allocate queue %d for I40E_VSI_FDIR\n",
+ pile->num_entries - 1);
+ return -ENOMEM;
+ }
+ pile->list[pile->num_entries - 1] = id | I40E_PILE_VALID_BIT;
+ return pile->num_entries - 1;
+ }
+
+ i = 0;
+ while (i < pile->num_entries) {
+ /* skip already allocated entries */
+ if (pile->list[i] & I40E_PILE_VALID_BIT) {
+ i++;
+ continue;
+ }
+
+ /* do we have enough in this lump? */
+ for (j = 0; (j < needed) && ((i+j) < pile->num_entries); j++) {
+ if (pile->list[i+j] & I40E_PILE_VALID_BIT)
+ break;
+ }
+
+ if (j == needed) {
+ /* there was enough, so assign it to the requestor */
+ for (j = 0; j < needed; j++)
+ pile->list[i+j] = id | I40E_PILE_VALID_BIT;
+ ret = i;
+ break;
+ }
+
+ /* not enough, so skip over it and continue looking */
+ i += j;
+ }
+
+ return ret;
+}
+
+/**
+ * i40e_put_lump - return a lump of generic resource
+ * @pile: the pile of resource to search
+ * @index: the base item index
+ * @id: the owner id of the items assigned
+ *
+ * Returns the count of items in the lump
+ **/
+static int i40e_put_lump(struct i40e_lump_tracking *pile, u16 index, u16 id)
+{
+ int valid_id = (id | I40E_PILE_VALID_BIT);
+ int count = 0;
+ u16 i;
+
+ if (!pile || index >= pile->num_entries)
+ return -EINVAL;
+
+ for (i = index;
+ i < pile->num_entries && pile->list[i] == valid_id;
+ i++) {
+ pile->list[i] = 0;
+ count++;
+ }
+
+
+ return count;
+}
+
+/**
+ * i40e_find_vsi_from_id - searches for the vsi with the given id
+ * @pf: the pf structure to search for the vsi
+ * @id: id of the vsi it is searching for
+ **/
+struct i40e_vsi *i40e_find_vsi_from_id(struct i40e_pf *pf, u16 id)
+{
+ int i;
+
+ for (i = 0; i < pf->num_alloc_vsi; i++)
+ if (pf->vsi[i] && (pf->vsi[i]->id == id))
+ return pf->vsi[i];
+
+ return NULL;
+}
+
+/**
+ * i40e_service_event_schedule - Schedule the service task to wake up
+ * @pf: board private structure
+ *
+ * If not already scheduled, this puts the task into the work queue
+ **/
+void i40e_service_event_schedule(struct i40e_pf *pf)
+{
+ if ((!test_bit(__I40E_DOWN, pf->state) &&
+ !test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state)) ||
+ test_bit(__I40E_RECOVERY_MODE, pf->state))
+ queue_work(i40e_wq, &pf->service_task);
+}
+
+/**
+ * i40e_tx_timeout - Respond to a Tx Hang
+ * @netdev: network interface device structure
+ * @txqueue: queue number timing out
+ *
+ * If any port has noticed a Tx timeout, it is likely that the whole
+ * device is munged, not just the one netdev port, so go for the full
+ * reset.
+ **/
+static void i40e_tx_timeout(struct net_device *netdev, unsigned int txqueue)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_vsi *vsi = np->vsi;
+ struct i40e_pf *pf = vsi->back;
+ struct i40e_ring *tx_ring = NULL;
+ unsigned int i;
+ u32 head, val;
+
+ pf->tx_timeout_count++;
+
+ /* with txqueue index, find the tx_ring struct */
+ for (i = 0; i < vsi->num_queue_pairs; i++) {
+ if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) {
+ if (txqueue ==
+ vsi->tx_rings[i]->queue_index) {
+ tx_ring = vsi->tx_rings[i];
+ break;
+ }
+ }
+ }
+
+ if (time_after(jiffies, (pf->tx_timeout_last_recovery + HZ*20)))
+ pf->tx_timeout_recovery_level = 1; /* reset after some time */
+ else if (time_before(jiffies,
+ (pf->tx_timeout_last_recovery + netdev->watchdog_timeo)))
+ return; /* don't do any new action before the next timeout */
+
+ /* don't kick off another recovery if one is already pending */
+ if (test_and_set_bit(__I40E_TIMEOUT_RECOVERY_PENDING, pf->state))
+ return;
+
+ if (tx_ring) {
+ head = i40e_get_head(tx_ring);
+ /* Read interrupt register */
+ if (pf->flags & I40E_FLAG_MSIX_ENABLED)
+ val = rd32(&pf->hw,
+ I40E_PFINT_DYN_CTLN(tx_ring->q_vector->v_idx +
+ tx_ring->vsi->base_vector - 1));
+ else
+ val = rd32(&pf->hw, I40E_PFINT_DYN_CTL0);
+
+ netdev_info(netdev, "tx_timeout: VSI_seid: %d, Q %d, NTC: 0x%x, HWB: 0x%x, NTU: 0x%x, TAIL: 0x%x, INT: 0x%x\n",
+ vsi->seid, txqueue, tx_ring->next_to_clean,
+ head, tx_ring->next_to_use,
+ readl(tx_ring->tail), val);
+ }
+
+ pf->tx_timeout_last_recovery = jiffies;
+ netdev_info(netdev, "tx_timeout recovery level %d, txqueue %d\n",
+ pf->tx_timeout_recovery_level, txqueue);
+
+ switch (pf->tx_timeout_recovery_level) {
+ case 1:
+ set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
+ break;
+ case 2:
+ set_bit(__I40E_CORE_RESET_REQUESTED, pf->state);
+ break;
+ case 3:
+ set_bit(__I40E_GLOBAL_RESET_REQUESTED, pf->state);
+ break;
+ default:
+ netdev_err(netdev, "tx_timeout recovery unsuccessful, device is in non-recoverable state.\n");
+ set_bit(__I40E_DOWN_REQUESTED, pf->state);
+ set_bit(__I40E_VSI_DOWN_REQUESTED, vsi->state);
+ break;
+ }
+
+ i40e_service_event_schedule(pf);
+ pf->tx_timeout_recovery_level++;
+}
+
+/**
+ * i40e_get_vsi_stats_struct - Get System Network Statistics
+ * @vsi: the VSI we care about
+ *
+ * Returns the address of the device statistics structure.
+ * The statistics are actually updated from the service task.
+ **/
+struct rtnl_link_stats64 *i40e_get_vsi_stats_struct(struct i40e_vsi *vsi)
+{
+ return &vsi->net_stats;
+}
+
+/**
+ * i40e_get_netdev_stats_struct_tx - populate stats from a Tx ring
+ * @ring: Tx ring to get statistics from
+ * @stats: statistics entry to be updated
+ **/
+static void i40e_get_netdev_stats_struct_tx(struct i40e_ring *ring,
+ struct rtnl_link_stats64 *stats)
+{
+ u64 bytes, packets;
+ unsigned int start;
+
+ do {
+ start = u64_stats_fetch_begin_irq(&ring->syncp);
+ packets = ring->stats.packets;
+ bytes = ring->stats.bytes;
+ } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
+
+ stats->tx_packets += packets;
+ stats->tx_bytes += bytes;
+}
+
+/**
+ * i40e_get_netdev_stats_struct - Get statistics for netdev interface
+ * @netdev: network interface device structure
+ * @stats: data structure to store statistics
+ *
+ * Returns the address of the device statistics structure.
+ * The statistics are actually updated from the service task.
+ **/
+static void i40e_get_netdev_stats_struct(struct net_device *netdev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_vsi *vsi = np->vsi;
+ struct rtnl_link_stats64 *vsi_stats = i40e_get_vsi_stats_struct(vsi);
+ struct i40e_ring *ring;
+ int i;
+
+ if (test_bit(__I40E_VSI_DOWN, vsi->state))
+ return;
+
+ if (!vsi->tx_rings)
+ return;
+
+ rcu_read_lock();
+ for (i = 0; i < vsi->num_queue_pairs; i++) {
+ u64 bytes, packets;
+ unsigned int start;
+
+ ring = READ_ONCE(vsi->tx_rings[i]);
+ if (!ring)
+ continue;
+ i40e_get_netdev_stats_struct_tx(ring, stats);
+
+ if (i40e_enabled_xdp_vsi(vsi)) {
+ ring = READ_ONCE(vsi->xdp_rings[i]);
+ if (!ring)
+ continue;
+ i40e_get_netdev_stats_struct_tx(ring, stats);
+ }
+
+ ring = READ_ONCE(vsi->rx_rings[i]);
+ if (!ring)
+ continue;
+ do {
+ start = u64_stats_fetch_begin_irq(&ring->syncp);
+ packets = ring->stats.packets;
+ bytes = ring->stats.bytes;
+ } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
+
+ stats->rx_packets += packets;
+ stats->rx_bytes += bytes;
+
+ }
+ rcu_read_unlock();
+
+ /* following stats updated by i40e_watchdog_subtask() */
+ stats->multicast = vsi_stats->multicast;
+ stats->tx_errors = vsi_stats->tx_errors;
+ stats->tx_dropped = vsi_stats->tx_dropped;
+ stats->rx_errors = vsi_stats->rx_errors;
+ stats->rx_dropped = vsi_stats->rx_dropped;
+ stats->rx_crc_errors = vsi_stats->rx_crc_errors;
+ stats->rx_length_errors = vsi_stats->rx_length_errors;
+}
+
+/**
+ * i40e_vsi_reset_stats - Resets all stats of the given vsi
+ * @vsi: the VSI to have its stats reset
+ **/
+void i40e_vsi_reset_stats(struct i40e_vsi *vsi)
+{
+ struct rtnl_link_stats64 *ns;
+ int i;
+
+ if (!vsi)
+ return;
+
+ ns = i40e_get_vsi_stats_struct(vsi);
+ memset(ns, 0, sizeof(*ns));
+ memset(&vsi->net_stats_offsets, 0, sizeof(vsi->net_stats_offsets));
+ memset(&vsi->eth_stats, 0, sizeof(vsi->eth_stats));
+ memset(&vsi->eth_stats_offsets, 0, sizeof(vsi->eth_stats_offsets));
+ if (vsi->rx_rings && vsi->rx_rings[0]) {
+ for (i = 0; i < vsi->num_queue_pairs; i++) {
+ memset(&vsi->rx_rings[i]->stats, 0,
+ sizeof(vsi->rx_rings[i]->stats));
+ memset(&vsi->rx_rings[i]->rx_stats, 0,
+ sizeof(vsi->rx_rings[i]->rx_stats));
+ memset(&vsi->tx_rings[i]->stats, 0,
+ sizeof(vsi->tx_rings[i]->stats));
+ memset(&vsi->tx_rings[i]->tx_stats, 0,
+ sizeof(vsi->tx_rings[i]->tx_stats));
+ }
+ }
+ vsi->stat_offsets_loaded = false;
+}
+
+/**
+ * i40e_pf_reset_stats - Reset all of the stats for the given PF
+ * @pf: the PF to be reset
+ **/
+void i40e_pf_reset_stats(struct i40e_pf *pf)
+{
+ int i;
+
+ memset(&pf->stats, 0, sizeof(pf->stats));
+ memset(&pf->stats_offsets, 0, sizeof(pf->stats_offsets));
+ pf->stat_offsets_loaded = false;
+
+ for (i = 0; i < I40E_MAX_VEB; i++) {
+ if (pf->veb[i]) {
+ memset(&pf->veb[i]->stats, 0,
+ sizeof(pf->veb[i]->stats));
+ memset(&pf->veb[i]->stats_offsets, 0,
+ sizeof(pf->veb[i]->stats_offsets));
+ memset(&pf->veb[i]->tc_stats, 0,
+ sizeof(pf->veb[i]->tc_stats));
+ memset(&pf->veb[i]->tc_stats_offsets, 0,
+ sizeof(pf->veb[i]->tc_stats_offsets));
+ pf->veb[i]->stat_offsets_loaded = false;
+ }
+ }
+ pf->hw_csum_rx_error = 0;
+}
+
+/**
+ * i40e_compute_pci_to_hw_id - compute index form PCI function.
+ * @vsi: ptr to the VSI to read from.
+ * @hw: ptr to the hardware info.
+ **/
+static u32 i40e_compute_pci_to_hw_id(struct i40e_vsi *vsi, struct i40e_hw *hw)
+{
+ int pf_count = i40e_get_pf_count(hw);
+
+ if (vsi->type == I40E_VSI_SRIOV)
+ return (hw->port * BIT(7)) / pf_count + vsi->vf_id;
+
+ return hw->port + BIT(7);
+}
+
+/**
+ * i40e_stat_update64 - read and update a 64 bit stat from the chip.
+ * @hw: ptr to the hardware info.
+ * @hireg: the high 32 bit reg to read.
+ * @loreg: the low 32 bit reg to read.
+ * @offset_loaded: has the initial offset been loaded yet.
+ * @offset: ptr to current offset value.
+ * @stat: ptr to the stat.
+ *
+ * Since the device stats are not reset at PFReset, they will not
+ * be zeroed when the driver starts. We'll save the first values read
+ * and use them as offsets to be subtracted from the raw values in order
+ * to report stats that count from zero.
+ **/
+static void i40e_stat_update64(struct i40e_hw *hw, u32 hireg, u32 loreg,
+ bool offset_loaded, u64 *offset, u64 *stat)
+{
+ u64 new_data;
+
+ new_data = rd64(hw, loreg);
+
+ if (!offset_loaded || new_data < *offset)
+ *offset = new_data;
+ *stat = new_data - *offset;
+}
+
+/**
+ * i40e_stat_update48 - read and update a 48 bit stat from the chip
+ * @hw: ptr to the hardware info
+ * @hireg: the high 32 bit reg to read
+ * @loreg: the low 32 bit reg to read
+ * @offset_loaded: has the initial offset been loaded yet
+ * @offset: ptr to current offset value
+ * @stat: ptr to the stat
+ *
+ * Since the device stats are not reset at PFReset, they likely will not
+ * be zeroed when the driver starts. We'll save the first values read
+ * and use them as offsets to be subtracted from the raw values in order
+ * to report stats that count from zero. In the process, we also manage
+ * the potential roll-over.
+ **/
+static void i40e_stat_update48(struct i40e_hw *hw, u32 hireg, u32 loreg,
+ bool offset_loaded, u64 *offset, u64 *stat)
+{
+ u64 new_data;
+
+ if (hw->device_id == I40E_DEV_ID_QEMU) {
+ new_data = rd32(hw, loreg);
+ new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32;
+ } else {
+ new_data = rd64(hw, loreg);
+ }
+ if (!offset_loaded)
+ *offset = new_data;
+ if (likely(new_data >= *offset))
+ *stat = new_data - *offset;
+ else
+ *stat = (new_data + BIT_ULL(48)) - *offset;
+ *stat &= 0xFFFFFFFFFFFFULL;
+}
+
+/**
+ * i40e_stat_update32 - read and update a 32 bit stat from the chip
+ * @hw: ptr to the hardware info
+ * @reg: the hw reg to read
+ * @offset_loaded: has the initial offset been loaded yet
+ * @offset: ptr to current offset value
+ * @stat: ptr to the stat
+ **/
+static void i40e_stat_update32(struct i40e_hw *hw, u32 reg,
+ bool offset_loaded, u64 *offset, u64 *stat)
+{
+ u32 new_data;
+
+ new_data = rd32(hw, reg);
+ if (!offset_loaded)
+ *offset = new_data;
+ if (likely(new_data >= *offset))
+ *stat = (u32)(new_data - *offset);
+ else
+ *stat = (u32)((new_data + BIT_ULL(32)) - *offset);
+}
+
+/**
+ * i40e_stat_update_and_clear32 - read and clear hw reg, update a 32 bit stat
+ * @hw: ptr to the hardware info
+ * @reg: the hw reg to read and clear
+ * @stat: ptr to the stat
+ **/
+static void i40e_stat_update_and_clear32(struct i40e_hw *hw, u32 reg, u64 *stat)
+{
+ u32 new_data = rd32(hw, reg);
+
+ wr32(hw, reg, 1); /* must write a nonzero value to clear register */
+ *stat += new_data;
+}
+
+/**
+ * i40e_stats_update_rx_discards - update rx_discards.
+ * @vsi: ptr to the VSI to be updated.
+ * @hw: ptr to the hardware info.
+ * @stat_idx: VSI's stat_counter_idx.
+ * @offset_loaded: ptr to the VSI's stat_offsets_loaded.
+ * @stat_offset: ptr to stat_offset to store first read of specific register.
+ * @stat: ptr to VSI's stat to be updated.
+ **/
+static void
+i40e_stats_update_rx_discards(struct i40e_vsi *vsi, struct i40e_hw *hw,
+ int stat_idx, bool offset_loaded,
+ struct i40e_eth_stats *stat_offset,
+ struct i40e_eth_stats *stat)
+{
+ u64 rx_rdpc, rx_rxerr;
+
+ i40e_stat_update32(hw, I40E_GLV_RDPC(stat_idx), offset_loaded,
+ &stat_offset->rx_discards, &rx_rdpc);
+ i40e_stat_update64(hw,
+ I40E_GL_RXERR1H(i40e_compute_pci_to_hw_id(vsi, hw)),
+ I40E_GL_RXERR1L(i40e_compute_pci_to_hw_id(vsi, hw)),
+ offset_loaded, &stat_offset->rx_discards_other,
+ &rx_rxerr);
+
+ stat->rx_discards = rx_rdpc + rx_rxerr;
+}
+
+/**
+ * i40e_update_eth_stats - Update VSI-specific ethernet statistics counters.
+ * @vsi: the VSI to be updated
+ **/
+void i40e_update_eth_stats(struct i40e_vsi *vsi)
+{
+ int stat_idx = le16_to_cpu(vsi->info.stat_counter_idx);
+ struct i40e_pf *pf = vsi->back;
+ struct i40e_hw *hw = &pf->hw;
+ struct i40e_eth_stats *oes;
+ struct i40e_eth_stats *es; /* device's eth stats */
+
+ es = &vsi->eth_stats;
+ oes = &vsi->eth_stats_offsets;
+
+ /* Gather up the stats that the hw collects */
+ i40e_stat_update32(hw, I40E_GLV_TEPC(stat_idx),
+ vsi->stat_offsets_loaded,
+ &oes->tx_errors, &es->tx_errors);
+ i40e_stat_update32(hw, I40E_GLV_RDPC(stat_idx),
+ vsi->stat_offsets_loaded,
+ &oes->rx_discards, &es->rx_discards);
+ i40e_stat_update32(hw, I40E_GLV_RUPP(stat_idx),
+ vsi->stat_offsets_loaded,
+ &oes->rx_unknown_protocol, &es->rx_unknown_protocol);
+
+ i40e_stat_update48(hw, I40E_GLV_GORCH(stat_idx),
+ I40E_GLV_GORCL(stat_idx),
+ vsi->stat_offsets_loaded,
+ &oes->rx_bytes, &es->rx_bytes);
+ i40e_stat_update48(hw, I40E_GLV_UPRCH(stat_idx),
+ I40E_GLV_UPRCL(stat_idx),
+ vsi->stat_offsets_loaded,
+ &oes->rx_unicast, &es->rx_unicast);
+ i40e_stat_update48(hw, I40E_GLV_MPRCH(stat_idx),
+ I40E_GLV_MPRCL(stat_idx),
+ vsi->stat_offsets_loaded,
+ &oes->rx_multicast, &es->rx_multicast);
+ i40e_stat_update48(hw, I40E_GLV_BPRCH(stat_idx),
+ I40E_GLV_BPRCL(stat_idx),
+ vsi->stat_offsets_loaded,
+ &oes->rx_broadcast, &es->rx_broadcast);
+
+ i40e_stat_update48(hw, I40E_GLV_GOTCH(stat_idx),
+ I40E_GLV_GOTCL(stat_idx),
+ vsi->stat_offsets_loaded,
+ &oes->tx_bytes, &es->tx_bytes);
+ i40e_stat_update48(hw, I40E_GLV_UPTCH(stat_idx),
+ I40E_GLV_UPTCL(stat_idx),
+ vsi->stat_offsets_loaded,
+ &oes->tx_unicast, &es->tx_unicast);
+ i40e_stat_update48(hw, I40E_GLV_MPTCH(stat_idx),
+ I40E_GLV_MPTCL(stat_idx),
+ vsi->stat_offsets_loaded,
+ &oes->tx_multicast, &es->tx_multicast);
+ i40e_stat_update48(hw, I40E_GLV_BPTCH(stat_idx),
+ I40E_GLV_BPTCL(stat_idx),
+ vsi->stat_offsets_loaded,
+ &oes->tx_broadcast, &es->tx_broadcast);
+
+ i40e_stats_update_rx_discards(vsi, hw, stat_idx,
+ vsi->stat_offsets_loaded, oes, es);
+
+ vsi->stat_offsets_loaded = true;
+}
+
+/**
+ * i40e_update_veb_stats - Update Switch component statistics
+ * @veb: the VEB being updated
+ **/
+void i40e_update_veb_stats(struct i40e_veb *veb)
+{
+ struct i40e_pf *pf = veb->pf;
+ struct i40e_hw *hw = &pf->hw;
+ struct i40e_eth_stats *oes;
+ struct i40e_eth_stats *es; /* device's eth stats */
+ struct i40e_veb_tc_stats *veb_oes;
+ struct i40e_veb_tc_stats *veb_es;
+ int i, idx = 0;
+
+ idx = veb->stats_idx;
+ es = &veb->stats;
+ oes = &veb->stats_offsets;
+ veb_es = &veb->tc_stats;
+ veb_oes = &veb->tc_stats_offsets;
+
+ /* Gather up the stats that the hw collects */
+ i40e_stat_update32(hw, I40E_GLSW_TDPC(idx),
+ veb->stat_offsets_loaded,
+ &oes->tx_discards, &es->tx_discards);
+ if (hw->revision_id > 0)
+ i40e_stat_update32(hw, I40E_GLSW_RUPP(idx),
+ veb->stat_offsets_loaded,
+ &oes->rx_unknown_protocol,
+ &es->rx_unknown_protocol);
+ i40e_stat_update48(hw, I40E_GLSW_GORCH(idx), I40E_GLSW_GORCL(idx),
+ veb->stat_offsets_loaded,
+ &oes->rx_bytes, &es->rx_bytes);
+ i40e_stat_update48(hw, I40E_GLSW_UPRCH(idx), I40E_GLSW_UPRCL(idx),
+ veb->stat_offsets_loaded,
+ &oes->rx_unicast, &es->rx_unicast);
+ i40e_stat_update48(hw, I40E_GLSW_MPRCH(idx), I40E_GLSW_MPRCL(idx),
+ veb->stat_offsets_loaded,
+ &oes->rx_multicast, &es->rx_multicast);
+ i40e_stat_update48(hw, I40E_GLSW_BPRCH(idx), I40E_GLSW_BPRCL(idx),
+ veb->stat_offsets_loaded,
+ &oes->rx_broadcast, &es->rx_broadcast);
+
+ i40e_stat_update48(hw, I40E_GLSW_GOTCH(idx), I40E_GLSW_GOTCL(idx),
+ veb->stat_offsets_loaded,
+ &oes->tx_bytes, &es->tx_bytes);
+ i40e_stat_update48(hw, I40E_GLSW_UPTCH(idx), I40E_GLSW_UPTCL(idx),
+ veb->stat_offsets_loaded,
+ &oes->tx_unicast, &es->tx_unicast);
+ i40e_stat_update48(hw, I40E_GLSW_MPTCH(idx), I40E_GLSW_MPTCL(idx),
+ veb->stat_offsets_loaded,
+ &oes->tx_multicast, &es->tx_multicast);
+ i40e_stat_update48(hw, I40E_GLSW_BPTCH(idx), I40E_GLSW_BPTCL(idx),
+ veb->stat_offsets_loaded,
+ &oes->tx_broadcast, &es->tx_broadcast);
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ i40e_stat_update48(hw, I40E_GLVEBTC_RPCH(i, idx),
+ I40E_GLVEBTC_RPCL(i, idx),
+ veb->stat_offsets_loaded,
+ &veb_oes->tc_rx_packets[i],
+ &veb_es->tc_rx_packets[i]);
+ i40e_stat_update48(hw, I40E_GLVEBTC_RBCH(i, idx),
+ I40E_GLVEBTC_RBCL(i, idx),
+ veb->stat_offsets_loaded,
+ &veb_oes->tc_rx_bytes[i],
+ &veb_es->tc_rx_bytes[i]);
+ i40e_stat_update48(hw, I40E_GLVEBTC_TPCH(i, idx),
+ I40E_GLVEBTC_TPCL(i, idx),
+ veb->stat_offsets_loaded,
+ &veb_oes->tc_tx_packets[i],
+ &veb_es->tc_tx_packets[i]);
+ i40e_stat_update48(hw, I40E_GLVEBTC_TBCH(i, idx),
+ I40E_GLVEBTC_TBCL(i, idx),
+ veb->stat_offsets_loaded,
+ &veb_oes->tc_tx_bytes[i],
+ &veb_es->tc_tx_bytes[i]);
+ }
+ veb->stat_offsets_loaded = true;
+}
+
+/**
+ * i40e_update_vsi_stats - Update the vsi statistics counters.
+ * @vsi: the VSI to be updated
+ *
+ * There are a few instances where we store the same stat in a
+ * couple of different structs. This is partly because we have
+ * the netdev stats that need to be filled out, which is slightly
+ * different from the "eth_stats" defined by the chip and used in
+ * VF communications. We sort it out here.
+ **/
+static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
+{
+ u64 rx_page, rx_buf, rx_reuse, rx_alloc, rx_waive, rx_busy;
+ struct i40e_pf *pf = vsi->back;
+ struct rtnl_link_stats64 *ons;
+ struct rtnl_link_stats64 *ns; /* netdev stats */
+ struct i40e_eth_stats *oes;
+ struct i40e_eth_stats *es; /* device's eth stats */
+ u64 tx_restart, tx_busy;
+ struct i40e_ring *p;
+ u64 bytes, packets;
+ unsigned int start;
+ u64 tx_linearize;
+ u64 tx_force_wb;
+ u64 tx_stopped;
+ u64 rx_p, rx_b;
+ u64 tx_p, tx_b;
+ u16 q;
+
+ if (test_bit(__I40E_VSI_DOWN, vsi->state) ||
+ test_bit(__I40E_CONFIG_BUSY, pf->state))
+ return;
+
+ ns = i40e_get_vsi_stats_struct(vsi);
+ ons = &vsi->net_stats_offsets;
+ es = &vsi->eth_stats;
+ oes = &vsi->eth_stats_offsets;
+
+ /* Gather up the netdev and vsi stats that the driver collects
+ * on the fly during packet processing
+ */
+ rx_b = rx_p = 0;
+ tx_b = tx_p = 0;
+ tx_restart = tx_busy = tx_linearize = tx_force_wb = 0;
+ tx_stopped = 0;
+ rx_page = 0;
+ rx_buf = 0;
+ rx_reuse = 0;
+ rx_alloc = 0;
+ rx_waive = 0;
+ rx_busy = 0;
+ rcu_read_lock();
+ for (q = 0; q < vsi->num_queue_pairs; q++) {
+ /* locate Tx ring */
+ p = READ_ONCE(vsi->tx_rings[q]);
+ if (!p)
+ continue;
+
+ do {
+ start = u64_stats_fetch_begin_irq(&p->syncp);
+ packets = p->stats.packets;
+ bytes = p->stats.bytes;
+ } while (u64_stats_fetch_retry_irq(&p->syncp, start));
+ tx_b += bytes;
+ tx_p += packets;
+ tx_restart += p->tx_stats.restart_queue;
+ tx_busy += p->tx_stats.tx_busy;
+ tx_linearize += p->tx_stats.tx_linearize;
+ tx_force_wb += p->tx_stats.tx_force_wb;
+ tx_stopped += p->tx_stats.tx_stopped;
+
+ /* locate Rx ring */
+ p = READ_ONCE(vsi->rx_rings[q]);
+ if (!p)
+ continue;
+
+ do {
+ start = u64_stats_fetch_begin_irq(&p->syncp);
+ packets = p->stats.packets;
+ bytes = p->stats.bytes;
+ } while (u64_stats_fetch_retry_irq(&p->syncp, start));
+ rx_b += bytes;
+ rx_p += packets;
+ rx_buf += p->rx_stats.alloc_buff_failed;
+ rx_page += p->rx_stats.alloc_page_failed;
+ rx_reuse += p->rx_stats.page_reuse_count;
+ rx_alloc += p->rx_stats.page_alloc_count;
+ rx_waive += p->rx_stats.page_waive_count;
+ rx_busy += p->rx_stats.page_busy_count;
+
+ if (i40e_enabled_xdp_vsi(vsi)) {
+ /* locate XDP ring */
+ p = READ_ONCE(vsi->xdp_rings[q]);
+ if (!p)
+ continue;
+
+ do {
+ start = u64_stats_fetch_begin_irq(&p->syncp);
+ packets = p->stats.packets;
+ bytes = p->stats.bytes;
+ } while (u64_stats_fetch_retry_irq(&p->syncp, start));
+ tx_b += bytes;
+ tx_p += packets;
+ tx_restart += p->tx_stats.restart_queue;
+ tx_busy += p->tx_stats.tx_busy;
+ tx_linearize += p->tx_stats.tx_linearize;
+ tx_force_wb += p->tx_stats.tx_force_wb;
+ }
+ }
+ rcu_read_unlock();
+ vsi->tx_restart = tx_restart;
+ vsi->tx_busy = tx_busy;
+ vsi->tx_linearize = tx_linearize;
+ vsi->tx_force_wb = tx_force_wb;
+ vsi->tx_stopped = tx_stopped;
+ vsi->rx_page_failed = rx_page;
+ vsi->rx_buf_failed = rx_buf;
+ vsi->rx_page_reuse = rx_reuse;
+ vsi->rx_page_alloc = rx_alloc;
+ vsi->rx_page_waive = rx_waive;
+ vsi->rx_page_busy = rx_busy;
+
+ ns->rx_packets = rx_p;
+ ns->rx_bytes = rx_b;
+ ns->tx_packets = tx_p;
+ ns->tx_bytes = tx_b;
+
+ /* update netdev stats from eth stats */
+ i40e_update_eth_stats(vsi);
+ ons->tx_errors = oes->tx_errors;
+ ns->tx_errors = es->tx_errors;
+ ons->multicast = oes->rx_multicast;
+ ns->multicast = es->rx_multicast;
+ ons->rx_dropped = oes->rx_discards;
+ ns->rx_dropped = es->rx_discards;
+ ons->tx_dropped = oes->tx_discards;
+ ns->tx_dropped = es->tx_discards;
+
+ /* pull in a couple PF stats if this is the main vsi */
+ if (vsi == pf->vsi[pf->lan_vsi]) {
+ ns->rx_crc_errors = pf->stats.crc_errors;
+ ns->rx_errors = pf->stats.crc_errors + pf->stats.illegal_bytes;
+ ns->rx_length_errors = pf->stats.rx_length_errors;
+ }
+}
+
+/**
+ * i40e_update_pf_stats - Update the PF statistics counters.
+ * @pf: the PF to be updated
+ **/
+static void i40e_update_pf_stats(struct i40e_pf *pf)
+{
+ struct i40e_hw_port_stats *osd = &pf->stats_offsets;
+ struct i40e_hw_port_stats *nsd = &pf->stats;
+ struct i40e_hw *hw = &pf->hw;
+ u32 val;
+ int i;
+
+ i40e_stat_update48(hw, I40E_GLPRT_GORCH(hw->port),
+ I40E_GLPRT_GORCL(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->eth.rx_bytes, &nsd->eth.rx_bytes);
+ i40e_stat_update48(hw, I40E_GLPRT_GOTCH(hw->port),
+ I40E_GLPRT_GOTCL(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->eth.tx_bytes, &nsd->eth.tx_bytes);
+ i40e_stat_update32(hw, I40E_GLPRT_RDPC(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->eth.rx_discards,
+ &nsd->eth.rx_discards);
+ i40e_stat_update48(hw, I40E_GLPRT_UPRCH(hw->port),
+ I40E_GLPRT_UPRCL(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->eth.rx_unicast,
+ &nsd->eth.rx_unicast);
+ i40e_stat_update48(hw, I40E_GLPRT_MPRCH(hw->port),
+ I40E_GLPRT_MPRCL(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->eth.rx_multicast,
+ &nsd->eth.rx_multicast);
+ i40e_stat_update48(hw, I40E_GLPRT_BPRCH(hw->port),
+ I40E_GLPRT_BPRCL(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->eth.rx_broadcast,
+ &nsd->eth.rx_broadcast);
+ i40e_stat_update48(hw, I40E_GLPRT_UPTCH(hw->port),
+ I40E_GLPRT_UPTCL(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->eth.tx_unicast,
+ &nsd->eth.tx_unicast);
+ i40e_stat_update48(hw, I40E_GLPRT_MPTCH(hw->port),
+ I40E_GLPRT_MPTCL(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->eth.tx_multicast,
+ &nsd->eth.tx_multicast);
+ i40e_stat_update48(hw, I40E_GLPRT_BPTCH(hw->port),
+ I40E_GLPRT_BPTCL(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->eth.tx_broadcast,
+ &nsd->eth.tx_broadcast);
+
+ i40e_stat_update32(hw, I40E_GLPRT_TDOLD(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->tx_dropped_link_down,
+ &nsd->tx_dropped_link_down);
+
+ i40e_stat_update32(hw, I40E_GLPRT_CRCERRS(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->crc_errors, &nsd->crc_errors);
+
+ i40e_stat_update32(hw, I40E_GLPRT_ILLERRC(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->illegal_bytes, &nsd->illegal_bytes);
+
+ i40e_stat_update32(hw, I40E_GLPRT_MLFC(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->mac_local_faults,
+ &nsd->mac_local_faults);
+ i40e_stat_update32(hw, I40E_GLPRT_MRFC(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->mac_remote_faults,
+ &nsd->mac_remote_faults);
+
+ i40e_stat_update32(hw, I40E_GLPRT_RLEC(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->rx_length_errors,
+ &nsd->rx_length_errors);
+
+ i40e_stat_update32(hw, I40E_GLPRT_LXONRXC(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->link_xon_rx, &nsd->link_xon_rx);
+ i40e_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->link_xon_tx, &nsd->link_xon_tx);
+ i40e_stat_update32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->link_xoff_rx, &nsd->link_xoff_rx);
+ i40e_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->link_xoff_tx, &nsd->link_xoff_tx);
+
+ for (i = 0; i < 8; i++) {
+ i40e_stat_update32(hw, I40E_GLPRT_PXOFFRXC(hw->port, i),
+ pf->stat_offsets_loaded,
+ &osd->priority_xoff_rx[i],
+ &nsd->priority_xoff_rx[i]);
+ i40e_stat_update32(hw, I40E_GLPRT_PXONRXC(hw->port, i),
+ pf->stat_offsets_loaded,
+ &osd->priority_xon_rx[i],
+ &nsd->priority_xon_rx[i]);
+ i40e_stat_update32(hw, I40E_GLPRT_PXONTXC(hw->port, i),
+ pf->stat_offsets_loaded,
+ &osd->priority_xon_tx[i],
+ &nsd->priority_xon_tx[i]);
+ i40e_stat_update32(hw, I40E_GLPRT_PXOFFTXC(hw->port, i),
+ pf->stat_offsets_loaded,
+ &osd->priority_xoff_tx[i],
+ &nsd->priority_xoff_tx[i]);
+ i40e_stat_update32(hw,
+ I40E_GLPRT_RXON2OFFCNT(hw->port, i),
+ pf->stat_offsets_loaded,
+ &osd->priority_xon_2_xoff[i],
+ &nsd->priority_xon_2_xoff[i]);
+ }
+
+ i40e_stat_update48(hw, I40E_GLPRT_PRC64H(hw->port),
+ I40E_GLPRT_PRC64L(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->rx_size_64, &nsd->rx_size_64);
+ i40e_stat_update48(hw, I40E_GLPRT_PRC127H(hw->port),
+ I40E_GLPRT_PRC127L(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->rx_size_127, &nsd->rx_size_127);
+ i40e_stat_update48(hw, I40E_GLPRT_PRC255H(hw->port),
+ I40E_GLPRT_PRC255L(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->rx_size_255, &nsd->rx_size_255);
+ i40e_stat_update48(hw, I40E_GLPRT_PRC511H(hw->port),
+ I40E_GLPRT_PRC511L(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->rx_size_511, &nsd->rx_size_511);
+ i40e_stat_update48(hw, I40E_GLPRT_PRC1023H(hw->port),
+ I40E_GLPRT_PRC1023L(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->rx_size_1023, &nsd->rx_size_1023);
+ i40e_stat_update48(hw, I40E_GLPRT_PRC1522H(hw->port),
+ I40E_GLPRT_PRC1522L(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->rx_size_1522, &nsd->rx_size_1522);
+ i40e_stat_update48(hw, I40E_GLPRT_PRC9522H(hw->port),
+ I40E_GLPRT_PRC9522L(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->rx_size_big, &nsd->rx_size_big);
+
+ i40e_stat_update48(hw, I40E_GLPRT_PTC64H(hw->port),
+ I40E_GLPRT_PTC64L(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->tx_size_64, &nsd->tx_size_64);
+ i40e_stat_update48(hw, I40E_GLPRT_PTC127H(hw->port),
+ I40E_GLPRT_PTC127L(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->tx_size_127, &nsd->tx_size_127);
+ i40e_stat_update48(hw, I40E_GLPRT_PTC255H(hw->port),
+ I40E_GLPRT_PTC255L(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->tx_size_255, &nsd->tx_size_255);
+ i40e_stat_update48(hw, I40E_GLPRT_PTC511H(hw->port),
+ I40E_GLPRT_PTC511L(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->tx_size_511, &nsd->tx_size_511);
+ i40e_stat_update48(hw, I40E_GLPRT_PTC1023H(hw->port),
+ I40E_GLPRT_PTC1023L(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->tx_size_1023, &nsd->tx_size_1023);
+ i40e_stat_update48(hw, I40E_GLPRT_PTC1522H(hw->port),
+ I40E_GLPRT_PTC1522L(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->tx_size_1522, &nsd->tx_size_1522);
+ i40e_stat_update48(hw, I40E_GLPRT_PTC9522H(hw->port),
+ I40E_GLPRT_PTC9522L(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->tx_size_big, &nsd->tx_size_big);
+
+ i40e_stat_update32(hw, I40E_GLPRT_RUC(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->rx_undersize, &nsd->rx_undersize);
+ i40e_stat_update32(hw, I40E_GLPRT_RFC(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->rx_fragments, &nsd->rx_fragments);
+ i40e_stat_update32(hw, I40E_GLPRT_ROC(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->rx_oversize, &nsd->rx_oversize);
+ i40e_stat_update32(hw, I40E_GLPRT_RJC(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->rx_jabber, &nsd->rx_jabber);
+
+ /* FDIR stats */
+ i40e_stat_update_and_clear32(hw,
+ I40E_GLQF_PCNT(I40E_FD_ATR_STAT_IDX(hw->pf_id)),
+ &nsd->fd_atr_match);
+ i40e_stat_update_and_clear32(hw,
+ I40E_GLQF_PCNT(I40E_FD_SB_STAT_IDX(hw->pf_id)),
+ &nsd->fd_sb_match);
+ i40e_stat_update_and_clear32(hw,
+ I40E_GLQF_PCNT(I40E_FD_ATR_TUNNEL_STAT_IDX(hw->pf_id)),
+ &nsd->fd_atr_tunnel_match);
+
+ val = rd32(hw, I40E_PRTPM_EEE_STAT);
+ nsd->tx_lpi_status =
+ (val & I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_MASK) >>
+ I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT;
+ nsd->rx_lpi_status =
+ (val & I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_MASK) >>
+ I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT;
+ i40e_stat_update32(hw, I40E_PRTPM_TLPIC,
+ pf->stat_offsets_loaded,
+ &osd->tx_lpi_count, &nsd->tx_lpi_count);
+ i40e_stat_update32(hw, I40E_PRTPM_RLPIC,
+ pf->stat_offsets_loaded,
+ &osd->rx_lpi_count, &nsd->rx_lpi_count);
+
+ if (pf->flags & I40E_FLAG_FD_SB_ENABLED &&
+ !test_bit(__I40E_FD_SB_AUTO_DISABLED, pf->state))
+ nsd->fd_sb_status = true;
+ else
+ nsd->fd_sb_status = false;
+
+ if (pf->flags & I40E_FLAG_FD_ATR_ENABLED &&
+ !test_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state))
+ nsd->fd_atr_status = true;
+ else
+ nsd->fd_atr_status = false;
+
+ pf->stat_offsets_loaded = true;
+}
+
+/**
+ * i40e_update_stats - Update the various statistics counters.
+ * @vsi: the VSI to be updated
+ *
+ * Update the various stats for this VSI and its related entities.
+ **/
+void i40e_update_stats(struct i40e_vsi *vsi)
+{
+ struct i40e_pf *pf = vsi->back;
+
+ if (vsi == pf->vsi[pf->lan_vsi])
+ i40e_update_pf_stats(pf);
+
+ i40e_update_vsi_stats(vsi);
+}
+
+/**
+ * i40e_count_filters - counts VSI mac filters
+ * @vsi: the VSI to be searched
+ *
+ * Returns count of mac filters
+ **/
+int i40e_count_filters(struct i40e_vsi *vsi)
+{
+ struct i40e_mac_filter *f;
+ struct hlist_node *h;
+ int bkt;
+ int cnt = 0;
+
+ hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist)
+ ++cnt;
+
+ return cnt;
+}
+
+/**
+ * i40e_find_filter - Search VSI filter list for specific mac/vlan filter
+ * @vsi: the VSI to be searched
+ * @macaddr: the MAC address
+ * @vlan: the vlan
+ *
+ * Returns ptr to the filter object or NULL
+ **/
+static struct i40e_mac_filter *i40e_find_filter(struct i40e_vsi *vsi,
+ const u8 *macaddr, s16 vlan)
+{
+ struct i40e_mac_filter *f;
+ u64 key;
+
+ if (!vsi || !macaddr)
+ return NULL;
+
+ key = i40e_addr_to_hkey(macaddr);
+ hash_for_each_possible(vsi->mac_filter_hash, f, hlist, key) {
+ if ((ether_addr_equal(macaddr, f->macaddr)) &&
+ (vlan == f->vlan))
+ return f;
+ }
+ return NULL;
+}
+
+/**
+ * i40e_find_mac - Find a mac addr in the macvlan filters list
+ * @vsi: the VSI to be searched
+ * @macaddr: the MAC address we are searching for
+ *
+ * Returns the first filter with the provided MAC address or NULL if
+ * MAC address was not found
+ **/
+struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, const u8 *macaddr)
+{
+ struct i40e_mac_filter *f;
+ u64 key;
+
+ if (!vsi || !macaddr)
+ return NULL;
+
+ key = i40e_addr_to_hkey(macaddr);
+ hash_for_each_possible(vsi->mac_filter_hash, f, hlist, key) {
+ if ((ether_addr_equal(macaddr, f->macaddr)))
+ return f;
+ }
+ return NULL;
+}
+
+/**
+ * i40e_is_vsi_in_vlan - Check if VSI is in vlan mode
+ * @vsi: the VSI to be searched
+ *
+ * Returns true if VSI is in vlan mode or false otherwise
+ **/
+bool i40e_is_vsi_in_vlan(struct i40e_vsi *vsi)
+{
+ /* If we have a PVID, always operate in VLAN mode */
+ if (vsi->info.pvid)
+ return true;
+
+ /* We need to operate in VLAN mode whenever we have any filters with
+ * a VLAN other than I40E_VLAN_ALL. We could check the table each
+ * time, incurring search cost repeatedly. However, we can notice two
+ * things:
+ *
+ * 1) the only place where we can gain a VLAN filter is in
+ * i40e_add_filter.
+ *
+ * 2) the only place where filters are actually removed is in
+ * i40e_sync_filters_subtask.
+ *
+ * Thus, we can simply use a boolean value, has_vlan_filters which we
+ * will set to true when we add a VLAN filter in i40e_add_filter. Then
+ * we have to perform the full search after deleting filters in
+ * i40e_sync_filters_subtask, but we already have to search
+ * filters here and can perform the check at the same time. This
+ * results in avoiding embedding a loop for VLAN mode inside another
+ * loop over all the filters, and should maintain correctness as noted
+ * above.
+ */
+ return vsi->has_vlan_filter;
+}
+
+/**
+ * i40e_correct_mac_vlan_filters - Correct non-VLAN filters if necessary
+ * @vsi: the VSI to configure
+ * @tmp_add_list: list of filters ready to be added
+ * @tmp_del_list: list of filters ready to be deleted
+ * @vlan_filters: the number of active VLAN filters
+ *
+ * Update VLAN=0 and VLAN=-1 (I40E_VLAN_ANY) filters properly so that they
+ * behave as expected. If we have any active VLAN filters remaining or about
+ * to be added then we need to update non-VLAN filters to be marked as VLAN=0
+ * so that they only match against untagged traffic. If we no longer have any
+ * active VLAN filters, we need to make all non-VLAN filters marked as VLAN=-1
+ * so that they match against both tagged and untagged traffic. In this way,
+ * we ensure that we correctly receive the desired traffic. This ensures that
+ * when we have an active VLAN we will receive only untagged traffic and
+ * traffic matching active VLANs. If we have no active VLANs then we will
+ * operate in non-VLAN mode and receive all traffic, tagged or untagged.
+ *
+ * Finally, in a similar fashion, this function also corrects filters when
+ * there is an active PVID assigned to this VSI.
+ *
+ * In case of memory allocation failure return -ENOMEM. Otherwise, return 0.
+ *
+ * This function is only expected to be called from within
+ * i40e_sync_vsi_filters.
+ *
+ * NOTE: This function expects to be called while under the
+ * mac_filter_hash_lock
+ */
+static int i40e_correct_mac_vlan_filters(struct i40e_vsi *vsi,
+ struct hlist_head *tmp_add_list,
+ struct hlist_head *tmp_del_list,
+ int vlan_filters)
+{
+ s16 pvid = le16_to_cpu(vsi->info.pvid);
+ struct i40e_mac_filter *f, *add_head;
+ struct i40e_new_mac_filter *new;
+ struct hlist_node *h;
+ int bkt, new_vlan;
+
+ /* To determine if a particular filter needs to be replaced we
+ * have the three following conditions:
+ *
+ * a) if we have a PVID assigned, then all filters which are
+ * not marked as VLAN=PVID must be replaced with filters that
+ * are.
+ * b) otherwise, if we have any active VLANS, all filters
+ * which are marked as VLAN=-1 must be replaced with
+ * filters marked as VLAN=0
+ * c) finally, if we do not have any active VLANS, all filters
+ * which are marked as VLAN=0 must be replaced with filters
+ * marked as VLAN=-1
+ */
+
+ /* Update the filters about to be added in place */
+ hlist_for_each_entry(new, tmp_add_list, hlist) {
+ if (pvid && new->f->vlan != pvid)
+ new->f->vlan = pvid;
+ else if (vlan_filters && new->f->vlan == I40E_VLAN_ANY)
+ new->f->vlan = 0;
+ else if (!vlan_filters && new->f->vlan == 0)
+ new->f->vlan = I40E_VLAN_ANY;
+ }
+
+ /* Update the remaining active filters */
+ hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
+ /* Combine the checks for whether a filter needs to be changed
+ * and then determine the new VLAN inside the if block, in
+ * order to avoid duplicating code for adding the new filter
+ * then deleting the old filter.
+ */
+ if ((pvid && f->vlan != pvid) ||
+ (vlan_filters && f->vlan == I40E_VLAN_ANY) ||
+ (!vlan_filters && f->vlan == 0)) {
+ /* Determine the new vlan we will be adding */
+ if (pvid)
+ new_vlan = pvid;
+ else if (vlan_filters)
+ new_vlan = 0;
+ else
+ new_vlan = I40E_VLAN_ANY;
+
+ /* Create the new filter */
+ add_head = i40e_add_filter(vsi, f->macaddr, new_vlan);
+ if (!add_head)
+ return -ENOMEM;
+
+ /* Create a temporary i40e_new_mac_filter */
+ new = kzalloc(sizeof(*new), GFP_ATOMIC);
+ if (!new)
+ return -ENOMEM;
+
+ new->f = add_head;
+ new->state = add_head->state;
+
+ /* Add the new filter to the tmp list */
+ hlist_add_head(&new->hlist, tmp_add_list);
+
+ /* Put the original filter into the delete list */
+ f->state = I40E_FILTER_REMOVE;
+ hash_del(&f->hlist);
+ hlist_add_head(&f->hlist, tmp_del_list);
+ }
+ }
+
+ vsi->has_vlan_filter = !!vlan_filters;
+
+ return 0;
+}
+
+/**
+ * i40e_get_vf_new_vlan - Get new vlan id on a vf
+ * @vsi: the vsi to configure
+ * @new_mac: new mac filter to be added
+ * @f: existing mac filter, replaced with new_mac->f if new_mac is not NULL
+ * @vlan_filters: the number of active VLAN filters
+ * @trusted: flag if the VF is trusted
+ *
+ * Get new VLAN id based on current VLAN filters, trust, PVID
+ * and vf-vlan-prune-disable flag.
+ *
+ * Returns the value of the new vlan filter or
+ * the old value if no new filter is needed.
+ */
+static s16 i40e_get_vf_new_vlan(struct i40e_vsi *vsi,
+ struct i40e_new_mac_filter *new_mac,
+ struct i40e_mac_filter *f,
+ int vlan_filters,
+ bool trusted)
+{
+ s16 pvid = le16_to_cpu(vsi->info.pvid);
+ struct i40e_pf *pf = vsi->back;
+ bool is_any;
+
+ if (new_mac)
+ f = new_mac->f;
+
+ if (pvid && f->vlan != pvid)
+ return pvid;
+
+ is_any = (trusted ||
+ !(pf->flags & I40E_FLAG_VF_VLAN_PRUNING));
+
+ if ((vlan_filters && f->vlan == I40E_VLAN_ANY) ||
+ (!is_any && !vlan_filters && f->vlan == I40E_VLAN_ANY) ||
+ (is_any && !vlan_filters && f->vlan == 0)) {
+ if (is_any)
+ return I40E_VLAN_ANY;
+ else
+ return 0;
+ }
+
+ return f->vlan;
+}
+
+/**
+ * i40e_correct_vf_mac_vlan_filters - Correct non-VLAN VF filters if necessary
+ * @vsi: the vsi to configure
+ * @tmp_add_list: list of filters ready to be added
+ * @tmp_del_list: list of filters ready to be deleted
+ * @vlan_filters: the number of active VLAN filters
+ * @trusted: flag if the VF is trusted
+ *
+ * Correct VF VLAN filters based on current VLAN filters, trust, PVID
+ * and vf-vlan-prune-disable flag.
+ *
+ * In case of memory allocation failure return -ENOMEM. Otherwise, return 0.
+ *
+ * This function is only expected to be called from within
+ * i40e_sync_vsi_filters.
+ *
+ * NOTE: This function expects to be called while under the
+ * mac_filter_hash_lock
+ */
+static int i40e_correct_vf_mac_vlan_filters(struct i40e_vsi *vsi,
+ struct hlist_head *tmp_add_list,
+ struct hlist_head *tmp_del_list,
+ int vlan_filters,
+ bool trusted)
+{
+ struct i40e_mac_filter *f, *add_head;
+ struct i40e_new_mac_filter *new_mac;
+ struct hlist_node *h;
+ int bkt, new_vlan;
+
+ hlist_for_each_entry(new_mac, tmp_add_list, hlist) {
+ new_mac->f->vlan = i40e_get_vf_new_vlan(vsi, new_mac, NULL,
+ vlan_filters, trusted);
+ }
+
+ hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
+ new_vlan = i40e_get_vf_new_vlan(vsi, NULL, f, vlan_filters,
+ trusted);
+ if (new_vlan != f->vlan) {
+ add_head = i40e_add_filter(vsi, f->macaddr, new_vlan);
+ if (!add_head)
+ return -ENOMEM;
+ /* Create a temporary i40e_new_mac_filter */
+ new_mac = kzalloc(sizeof(*new_mac), GFP_ATOMIC);
+ if (!new_mac)
+ return -ENOMEM;
+ new_mac->f = add_head;
+ new_mac->state = add_head->state;
+
+ /* Add the new filter to the tmp list */
+ hlist_add_head(&new_mac->hlist, tmp_add_list);
+
+ /* Put the original filter into the delete list */
+ f->state = I40E_FILTER_REMOVE;
+ hash_del(&f->hlist);
+ hlist_add_head(&f->hlist, tmp_del_list);
+ }
+ }
+
+ vsi->has_vlan_filter = !!vlan_filters;
+ return 0;
+}
+
+/**
+ * i40e_rm_default_mac_filter - Remove the default MAC filter set by NVM
+ * @vsi: the PF Main VSI - inappropriate for any other VSI
+ * @macaddr: the MAC address
+ *
+ * Remove whatever filter the firmware set up so the driver can manage
+ * its own filtering intelligently.
+ **/
+static void i40e_rm_default_mac_filter(struct i40e_vsi *vsi, u8 *macaddr)
+{
+ struct i40e_aqc_remove_macvlan_element_data element;
+ struct i40e_pf *pf = vsi->back;
+
+ /* Only appropriate for the PF main VSI */
+ if (vsi->type != I40E_VSI_MAIN)
+ return;
+
+ memset(&element, 0, sizeof(element));
+ ether_addr_copy(element.mac_addr, macaddr);
+ element.vlan_tag = 0;
+ /* Ignore error returns, some firmware does it this way... */
+ element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
+ i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL);
+
+ memset(&element, 0, sizeof(element));
+ ether_addr_copy(element.mac_addr, macaddr);
+ element.vlan_tag = 0;
+ /* ...and some firmware does it this way. */
+ element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
+ I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
+ i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL);
+}
+
+/**
+ * i40e_add_filter - Add a mac/vlan filter to the VSI
+ * @vsi: the VSI to be searched
+ * @macaddr: the MAC address
+ * @vlan: the vlan
+ *
+ * Returns ptr to the filter object or NULL when no memory available.
+ *
+ * NOTE: This function is expected to be called with mac_filter_hash_lock
+ * being held.
+ **/
+struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi,
+ const u8 *macaddr, s16 vlan)
+{
+ struct i40e_mac_filter *f;
+ u64 key;
+
+ if (!vsi || !macaddr)
+ return NULL;
+
+ f = i40e_find_filter(vsi, macaddr, vlan);
+ if (!f) {
+ f = kzalloc(sizeof(*f), GFP_ATOMIC);
+ if (!f)
+ return NULL;
+
+ /* Update the boolean indicating if we need to function in
+ * VLAN mode.
+ */
+ if (vlan >= 0)
+ vsi->has_vlan_filter = true;
+
+ ether_addr_copy(f->macaddr, macaddr);
+ f->vlan = vlan;
+ f->state = I40E_FILTER_NEW;
+ INIT_HLIST_NODE(&f->hlist);
+
+ key = i40e_addr_to_hkey(macaddr);
+ hash_add(vsi->mac_filter_hash, &f->hlist, key);
+
+ vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
+ set_bit(__I40E_MACVLAN_SYNC_PENDING, vsi->back->state);
+ }
+
+ /* If we're asked to add a filter that has been marked for removal, it
+ * is safe to simply restore it to active state. __i40e_del_filter
+ * will have simply deleted any filters which were previously marked
+ * NEW or FAILED, so if it is currently marked REMOVE it must have
+ * previously been ACTIVE. Since we haven't yet run the sync filters
+ * task, just restore this filter to the ACTIVE state so that the
+ * sync task leaves it in place
+ */
+ if (f->state == I40E_FILTER_REMOVE)
+ f->state = I40E_FILTER_ACTIVE;
+
+ return f;
+}
+
+/**
+ * __i40e_del_filter - Remove a specific filter from the VSI
+ * @vsi: VSI to remove from
+ * @f: the filter to remove from the list
+ *
+ * This function should be called instead of i40e_del_filter only if you know
+ * the exact filter you will remove already, such as via i40e_find_filter or
+ * i40e_find_mac.
+ *
+ * NOTE: This function is expected to be called with mac_filter_hash_lock
+ * being held.
+ * ANOTHER NOTE: This function MUST be called from within the context of
+ * the "safe" variants of any list iterators, e.g. list_for_each_entry_safe()
+ * instead of list_for_each_entry().
+ **/
+void __i40e_del_filter(struct i40e_vsi *vsi, struct i40e_mac_filter *f)
+{
+ if (!f)
+ return;
+
+ /* If the filter was never added to firmware then we can just delete it
+ * directly and we don't want to set the status to remove or else an
+ * admin queue command will unnecessarily fire.
+ */
+ if ((f->state == I40E_FILTER_FAILED) ||
+ (f->state == I40E_FILTER_NEW)) {
+ hash_del(&f->hlist);
+ kfree(f);
+ } else {
+ f->state = I40E_FILTER_REMOVE;
+ }
+
+ vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
+ set_bit(__I40E_MACVLAN_SYNC_PENDING, vsi->back->state);
+}
+
+/**
+ * i40e_del_filter - Remove a MAC/VLAN filter from the VSI
+ * @vsi: the VSI to be searched
+ * @macaddr: the MAC address
+ * @vlan: the VLAN
+ *
+ * NOTE: This function is expected to be called with mac_filter_hash_lock
+ * being held.
+ * ANOTHER NOTE: This function MUST be called from within the context of
+ * the "safe" variants of any list iterators, e.g. list_for_each_entry_safe()
+ * instead of list_for_each_entry().
+ **/
+void i40e_del_filter(struct i40e_vsi *vsi, const u8 *macaddr, s16 vlan)
+{
+ struct i40e_mac_filter *f;
+
+ if (!vsi || !macaddr)
+ return;
+
+ f = i40e_find_filter(vsi, macaddr, vlan);
+ __i40e_del_filter(vsi, f);
+}
+
+/**
+ * i40e_add_mac_filter - Add a MAC filter for all active VLANs
+ * @vsi: the VSI to be searched
+ * @macaddr: the mac address to be filtered
+ *
+ * If we're not in VLAN mode, just add the filter to I40E_VLAN_ANY. Otherwise,
+ * go through all the macvlan filters and add a macvlan filter for each
+ * unique vlan that already exists. If a PVID has been assigned, instead only
+ * add the macaddr to that VLAN.
+ *
+ * Returns last filter added on success, else NULL
+ **/
+struct i40e_mac_filter *i40e_add_mac_filter(struct i40e_vsi *vsi,
+ const u8 *macaddr)
+{
+ struct i40e_mac_filter *f, *add = NULL;
+ struct hlist_node *h;
+ int bkt;
+
+ if (vsi->info.pvid)
+ return i40e_add_filter(vsi, macaddr,
+ le16_to_cpu(vsi->info.pvid));
+
+ if (!i40e_is_vsi_in_vlan(vsi))
+ return i40e_add_filter(vsi, macaddr, I40E_VLAN_ANY);
+
+ hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
+ if (f->state == I40E_FILTER_REMOVE)
+ continue;
+ add = i40e_add_filter(vsi, macaddr, f->vlan);
+ if (!add)
+ return NULL;
+ }
+
+ return add;
+}
+
+/**
+ * i40e_del_mac_filter - Remove a MAC filter from all VLANs
+ * @vsi: the VSI to be searched
+ * @macaddr: the mac address to be removed
+ *
+ * Removes a given MAC address from a VSI regardless of what VLAN it has been
+ * associated with.
+ *
+ * Returns 0 for success, or error
+ **/
+int i40e_del_mac_filter(struct i40e_vsi *vsi, const u8 *macaddr)
+{
+ struct i40e_mac_filter *f;
+ struct hlist_node *h;
+ bool found = false;
+ int bkt;
+
+ lockdep_assert_held(&vsi->mac_filter_hash_lock);
+ hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
+ if (ether_addr_equal(macaddr, f->macaddr)) {
+ __i40e_del_filter(vsi, f);
+ found = true;
+ }
+ }
+
+ if (found)
+ return 0;
+ else
+ return -ENOENT;
+}
+
+/**
+ * i40e_set_mac - NDO callback to set mac address
+ * @netdev: network interface device structure
+ * @p: pointer to an address structure
+ *
+ * Returns 0 on success, negative on failure
+ **/
+static int i40e_set_mac(struct net_device *netdev, void *p)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_vsi *vsi = np->vsi;
+ struct i40e_pf *pf = vsi->back;
+ struct i40e_hw *hw = &pf->hw;
+ struct sockaddr *addr = p;
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ if (ether_addr_equal(netdev->dev_addr, addr->sa_data)) {
+ netdev_info(netdev, "already using mac address %pM\n",
+ addr->sa_data);
+ return 0;
+ }
+
+ if (test_bit(__I40E_DOWN, pf->state) ||
+ test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
+ return -EADDRNOTAVAIL;
+
+ if (ether_addr_equal(hw->mac.addr, addr->sa_data))
+ netdev_info(netdev, "returning to hw mac address %pM\n",
+ hw->mac.addr);
+ else
+ netdev_info(netdev, "set new mac address %pM\n", addr->sa_data);
+
+ /* Copy the address first, so that we avoid a possible race with
+ * .set_rx_mode().
+ * - Remove old address from MAC filter
+ * - Copy new address
+ * - Add new address to MAC filter
+ */
+ spin_lock_bh(&vsi->mac_filter_hash_lock);
+ i40e_del_mac_filter(vsi, netdev->dev_addr);
+ eth_hw_addr_set(netdev, addr->sa_data);
+ i40e_add_mac_filter(vsi, netdev->dev_addr);
+ spin_unlock_bh(&vsi->mac_filter_hash_lock);
+
+ if (vsi->type == I40E_VSI_MAIN) {
+ int ret;
+
+ ret = i40e_aq_mac_address_write(hw, I40E_AQC_WRITE_TYPE_LAA_WOL,
+ addr->sa_data, NULL);
+ if (ret)
+ netdev_info(netdev, "Ignoring error from firmware on LAA update, status %pe, AQ ret %s\n",
+ ERR_PTR(ret),
+ i40e_aq_str(hw, hw->aq.asq_last_status));
+ }
+
+ /* schedule our worker thread which will take care of
+ * applying the new filter changes
+ */
+ i40e_service_event_schedule(pf);
+ return 0;
+}
+
+/**
+ * i40e_config_rss_aq - Prepare for RSS using AQ commands
+ * @vsi: vsi structure
+ * @seed: RSS hash seed
+ * @lut: pointer to lookup table of lut_size
+ * @lut_size: size of the lookup table
+ **/
+static int i40e_config_rss_aq(struct i40e_vsi *vsi, const u8 *seed,
+ u8 *lut, u16 lut_size)
+{
+ struct i40e_pf *pf = vsi->back;
+ struct i40e_hw *hw = &pf->hw;
+ int ret = 0;
+
+ if (seed) {
+ struct i40e_aqc_get_set_rss_key_data *seed_dw =
+ (struct i40e_aqc_get_set_rss_key_data *)seed;
+ ret = i40e_aq_set_rss_key(hw, vsi->id, seed_dw);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "Cannot set RSS key, err %pe aq_err %s\n",
+ ERR_PTR(ret),
+ i40e_aq_str(hw, hw->aq.asq_last_status));
+ return ret;
+ }
+ }
+ if (lut) {
+ bool pf_lut = vsi->type == I40E_VSI_MAIN;
+
+ ret = i40e_aq_set_rss_lut(hw, vsi->id, pf_lut, lut, lut_size);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "Cannot set RSS lut, err %pe aq_err %s\n",
+ ERR_PTR(ret),
+ i40e_aq_str(hw, hw->aq.asq_last_status));
+ return ret;
+ }
+ }
+ return ret;
+}
+
+/**
+ * i40e_vsi_config_rss - Prepare for VSI(VMDq) RSS if used
+ * @vsi: VSI structure
+ **/
+static int i40e_vsi_config_rss(struct i40e_vsi *vsi)
+{
+ struct i40e_pf *pf = vsi->back;
+ u8 seed[I40E_HKEY_ARRAY_SIZE];
+ u8 *lut;
+ int ret;
+
+ if (!(pf->hw_features & I40E_HW_RSS_AQ_CAPABLE))
+ return 0;
+ if (!vsi->rss_size)
+ vsi->rss_size = min_t(int, pf->alloc_rss_size,
+ vsi->num_queue_pairs);
+ if (!vsi->rss_size)
+ return -EINVAL;
+ lut = kzalloc(vsi->rss_table_size, GFP_KERNEL);
+ if (!lut)
+ return -ENOMEM;
+
+ /* Use the user configured hash keys and lookup table if there is one,
+ * otherwise use default
+ */
+ if (vsi->rss_lut_user)
+ memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size);
+ else
+ i40e_fill_rss_lut(pf, lut, vsi->rss_table_size, vsi->rss_size);
+ if (vsi->rss_hkey_user)
+ memcpy(seed, vsi->rss_hkey_user, I40E_HKEY_ARRAY_SIZE);
+ else
+ netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE);
+ ret = i40e_config_rss_aq(vsi, seed, lut, vsi->rss_table_size);
+ kfree(lut);
+ return ret;
+}
+
+/**
+ * i40e_vsi_setup_queue_map_mqprio - Prepares mqprio based tc_config
+ * @vsi: the VSI being configured,
+ * @ctxt: VSI context structure
+ * @enabled_tc: number of traffic classes to enable
+ *
+ * Prepares VSI tc_config to have queue configurations based on MQPRIO options.
+ **/
+static int i40e_vsi_setup_queue_map_mqprio(struct i40e_vsi *vsi,
+ struct i40e_vsi_context *ctxt,
+ u8 enabled_tc)
+{
+ u16 qcount = 0, max_qcount, qmap, sections = 0;
+ int i, override_q, pow, num_qps, ret;
+ u8 netdev_tc = 0, offset = 0;
+
+ if (vsi->type != I40E_VSI_MAIN)
+ return -EINVAL;
+ sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
+ sections |= I40E_AQ_VSI_PROP_SCHED_VALID;
+ vsi->tc_config.numtc = vsi->mqprio_qopt.qopt.num_tc;
+ vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1;
+ num_qps = vsi->mqprio_qopt.qopt.count[0];
+
+ /* find the next higher power-of-2 of num queue pairs */
+ pow = ilog2(num_qps);
+ if (!is_power_of_2(num_qps))
+ pow++;
+ qmap = (offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
+ (pow << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT);
+
+ /* Setup queue offset/count for all TCs for given VSI */
+ max_qcount = vsi->mqprio_qopt.qopt.count[0];
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ /* See if the given TC is enabled for the given VSI */
+ if (vsi->tc_config.enabled_tc & BIT(i)) {
+ offset = vsi->mqprio_qopt.qopt.offset[i];
+ qcount = vsi->mqprio_qopt.qopt.count[i];
+ if (qcount > max_qcount)
+ max_qcount = qcount;
+ vsi->tc_config.tc_info[i].qoffset = offset;
+ vsi->tc_config.tc_info[i].qcount = qcount;
+ vsi->tc_config.tc_info[i].netdev_tc = netdev_tc++;
+ } else {
+ /* TC is not enabled so set the offset to
+ * default queue and allocate one queue
+ * for the given TC.
+ */
+ vsi->tc_config.tc_info[i].qoffset = 0;
+ vsi->tc_config.tc_info[i].qcount = 1;
+ vsi->tc_config.tc_info[i].netdev_tc = 0;
+ }
+ }
+
+ /* Set actual Tx/Rx queue pairs */
+ vsi->num_queue_pairs = offset + qcount;
+
+ /* Setup queue TC[0].qmap for given VSI context */
+ ctxt->info.tc_mapping[0] = cpu_to_le16(qmap);
+ ctxt->info.mapping_flags |= cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG);
+ ctxt->info.queue_mapping[0] = cpu_to_le16(vsi->base_queue);
+ ctxt->info.valid_sections |= cpu_to_le16(sections);
+
+ /* Reconfigure RSS for main VSI with max queue count */
+ vsi->rss_size = max_qcount;
+ ret = i40e_vsi_config_rss(vsi);
+ if (ret) {
+ dev_info(&vsi->back->pdev->dev,
+ "Failed to reconfig rss for num_queues (%u)\n",
+ max_qcount);
+ return ret;
+ }
+ vsi->reconfig_rss = true;
+ dev_dbg(&vsi->back->pdev->dev,
+ "Reconfigured rss with num_queues (%u)\n", max_qcount);
+
+ /* Find queue count available for channel VSIs and starting offset
+ * for channel VSIs
+ */
+ override_q = vsi->mqprio_qopt.qopt.count[0];
+ if (override_q && override_q < vsi->num_queue_pairs) {
+ vsi->cnt_q_avail = vsi->num_queue_pairs - override_q;
+ vsi->next_base_queue = override_q;
+ }
+ return 0;
+}
+
+/**
+ * i40e_vsi_setup_queue_map - Setup a VSI queue map based on enabled_tc
+ * @vsi: the VSI being setup
+ * @ctxt: VSI context structure
+ * @enabled_tc: Enabled TCs bitmap
+ * @is_add: True if called before Add VSI
+ *
+ * Setup VSI queue mapping for enabled traffic classes.
+ **/
+static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
+ struct i40e_vsi_context *ctxt,
+ u8 enabled_tc,
+ bool is_add)
+{
+ struct i40e_pf *pf = vsi->back;
+ u16 num_tc_qps = 0;
+ u16 sections = 0;
+ u8 netdev_tc = 0;
+ u16 numtc = 1;
+ u16 qcount;
+ u8 offset;
+ u16 qmap;
+ int i;
+
+ sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
+ offset = 0;
+ /* zero out queue mapping, it will get updated on the end of the function */
+ memset(ctxt->info.queue_mapping, 0, sizeof(ctxt->info.queue_mapping));
+
+ if (vsi->type == I40E_VSI_MAIN) {
+ /* This code helps add more queue to the VSI if we have
+ * more cores than RSS can support, the higher cores will
+ * be served by ATR or other filters. Furthermore, the
+ * non-zero req_queue_pairs says that user requested a new
+ * queue count via ethtool's set_channels, so use this
+ * value for queues distribution across traffic classes
+ * We need at least one queue pair for the interface
+ * to be usable as we see in else statement.
+ */
+ if (vsi->req_queue_pairs > 0)
+ vsi->num_queue_pairs = vsi->req_queue_pairs;
+ else if (pf->flags & I40E_FLAG_MSIX_ENABLED)
+ vsi->num_queue_pairs = pf->num_lan_msix;
+ else
+ vsi->num_queue_pairs = 1;
+ }
+
+ /* Number of queues per enabled TC */
+ if (vsi->type == I40E_VSI_MAIN ||
+ (vsi->type == I40E_VSI_SRIOV && vsi->num_queue_pairs != 0))
+ num_tc_qps = vsi->num_queue_pairs;
+ else
+ num_tc_qps = vsi->alloc_queue_pairs;
+
+ if (enabled_tc && (vsi->back->flags & I40E_FLAG_DCB_ENABLED)) {
+ /* Find numtc from enabled TC bitmap */
+ for (i = 0, numtc = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ if (enabled_tc & BIT(i)) /* TC is enabled */
+ numtc++;
+ }
+ if (!numtc) {
+ dev_warn(&pf->pdev->dev, "DCB is enabled but no TC enabled, forcing TC0\n");
+ numtc = 1;
+ }
+ num_tc_qps = num_tc_qps / numtc;
+ num_tc_qps = min_t(int, num_tc_qps,
+ i40e_pf_get_max_q_per_tc(pf));
+ }
+
+ vsi->tc_config.numtc = numtc;
+ vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1;
+
+ /* Do not allow use more TC queue pairs than MSI-X vectors exist */
+ if (pf->flags & I40E_FLAG_MSIX_ENABLED)
+ num_tc_qps = min_t(int, num_tc_qps, pf->num_lan_msix);
+
+ /* Setup queue offset/count for all TCs for given VSI */
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ /* See if the given TC is enabled for the given VSI */
+ if (vsi->tc_config.enabled_tc & BIT(i)) {
+ /* TC is enabled */
+ int pow, num_qps;
+
+ switch (vsi->type) {
+ case I40E_VSI_MAIN:
+ if (!(pf->flags & (I40E_FLAG_FD_SB_ENABLED |
+ I40E_FLAG_FD_ATR_ENABLED)) ||
+ vsi->tc_config.enabled_tc != 1) {
+ qcount = min_t(int, pf->alloc_rss_size,
+ num_tc_qps);
+ break;
+ }
+ fallthrough;
+ case I40E_VSI_FDIR:
+ case I40E_VSI_SRIOV:
+ case I40E_VSI_VMDQ2:
+ default:
+ qcount = num_tc_qps;
+ WARN_ON(i != 0);
+ break;
+ }
+ vsi->tc_config.tc_info[i].qoffset = offset;
+ vsi->tc_config.tc_info[i].qcount = qcount;
+
+ /* find the next higher power-of-2 of num queue pairs */
+ num_qps = qcount;
+ pow = 0;
+ while (num_qps && (BIT_ULL(pow) < qcount)) {
+ pow++;
+ num_qps >>= 1;
+ }
+
+ vsi->tc_config.tc_info[i].netdev_tc = netdev_tc++;
+ qmap =
+ (offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
+ (pow << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT);
+
+ offset += qcount;
+ } else {
+ /* TC is not enabled so set the offset to
+ * default queue and allocate one queue
+ * for the given TC.
+ */
+ vsi->tc_config.tc_info[i].qoffset = 0;
+ vsi->tc_config.tc_info[i].qcount = 1;
+ vsi->tc_config.tc_info[i].netdev_tc = 0;
+
+ qmap = 0;
+ }
+ ctxt->info.tc_mapping[i] = cpu_to_le16(qmap);
+ }
+ /* Do not change previously set num_queue_pairs for PFs and VFs*/
+ if ((vsi->type == I40E_VSI_MAIN && numtc != 1) ||
+ (vsi->type == I40E_VSI_SRIOV && vsi->num_queue_pairs == 0) ||
+ (vsi->type != I40E_VSI_MAIN && vsi->type != I40E_VSI_SRIOV))
+ vsi->num_queue_pairs = offset;
+
+ /* Scheduler section valid can only be set for ADD VSI */
+ if (is_add) {
+ sections |= I40E_AQ_VSI_PROP_SCHED_VALID;
+
+ ctxt->info.up_enable_bits = enabled_tc;
+ }
+ if (vsi->type == I40E_VSI_SRIOV) {
+ ctxt->info.mapping_flags |=
+ cpu_to_le16(I40E_AQ_VSI_QUE_MAP_NONCONTIG);
+ for (i = 0; i < vsi->num_queue_pairs; i++)
+ ctxt->info.queue_mapping[i] =
+ cpu_to_le16(vsi->base_queue + i);
+ } else {
+ ctxt->info.mapping_flags |=
+ cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG);
+ ctxt->info.queue_mapping[0] = cpu_to_le16(vsi->base_queue);
+ }
+ ctxt->info.valid_sections |= cpu_to_le16(sections);
+}
+
+/**
+ * i40e_addr_sync - Callback for dev_(mc|uc)_sync to add address
+ * @netdev: the netdevice
+ * @addr: address to add
+ *
+ * Called by __dev_(mc|uc)_sync when an address needs to be added. We call
+ * __dev_(uc|mc)_sync from .set_rx_mode and guarantee to hold the hash lock.
+ */
+static int i40e_addr_sync(struct net_device *netdev, const u8 *addr)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_vsi *vsi = np->vsi;
+
+ if (i40e_add_mac_filter(vsi, addr))
+ return 0;
+ else
+ return -ENOMEM;
+}
+
+/**
+ * i40e_addr_unsync - Callback for dev_(mc|uc)_sync to remove address
+ * @netdev: the netdevice
+ * @addr: address to add
+ *
+ * Called by __dev_(mc|uc)_sync when an address needs to be removed. We call
+ * __dev_(uc|mc)_sync from .set_rx_mode and guarantee to hold the hash lock.
+ */
+static int i40e_addr_unsync(struct net_device *netdev, const u8 *addr)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_vsi *vsi = np->vsi;
+
+ /* Under some circumstances, we might receive a request to delete
+ * our own device address from our uc list. Because we store the
+ * device address in the VSI's MAC/VLAN filter list, we need to ignore
+ * such requests and not delete our device address from this list.
+ */
+ if (ether_addr_equal(addr, netdev->dev_addr))
+ return 0;
+
+ i40e_del_mac_filter(vsi, addr);
+
+ return 0;
+}
+
+/**
+ * i40e_set_rx_mode - NDO callback to set the netdev filters
+ * @netdev: network interface device structure
+ **/
+static void i40e_set_rx_mode(struct net_device *netdev)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_vsi *vsi = np->vsi;
+
+ spin_lock_bh(&vsi->mac_filter_hash_lock);
+
+ __dev_uc_sync(netdev, i40e_addr_sync, i40e_addr_unsync);
+ __dev_mc_sync(netdev, i40e_addr_sync, i40e_addr_unsync);
+
+ spin_unlock_bh(&vsi->mac_filter_hash_lock);
+
+ /* check for other flag changes */
+ if (vsi->current_netdev_flags != vsi->netdev->flags) {
+ vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
+ set_bit(__I40E_MACVLAN_SYNC_PENDING, vsi->back->state);
+ }
+}
+
+/**
+ * i40e_undo_del_filter_entries - Undo the changes made to MAC filter entries
+ * @vsi: Pointer to VSI struct
+ * @from: Pointer to list which contains MAC filter entries - changes to
+ * those entries needs to be undone.
+ *
+ * MAC filter entries from this list were slated for deletion.
+ **/
+static void i40e_undo_del_filter_entries(struct i40e_vsi *vsi,
+ struct hlist_head *from)
+{
+ struct i40e_mac_filter *f;
+ struct hlist_node *h;
+
+ hlist_for_each_entry_safe(f, h, from, hlist) {
+ u64 key = i40e_addr_to_hkey(f->macaddr);
+
+ /* Move the element back into MAC filter list*/
+ hlist_del(&f->hlist);
+ hash_add(vsi->mac_filter_hash, &f->hlist, key);
+ }
+}
+
+/**
+ * i40e_undo_add_filter_entries - Undo the changes made to MAC filter entries
+ * @vsi: Pointer to vsi struct
+ * @from: Pointer to list which contains MAC filter entries - changes to
+ * those entries needs to be undone.
+ *
+ * MAC filter entries from this list were slated for addition.
+ **/
+static void i40e_undo_add_filter_entries(struct i40e_vsi *vsi,
+ struct hlist_head *from)
+{
+ struct i40e_new_mac_filter *new;
+ struct hlist_node *h;
+
+ hlist_for_each_entry_safe(new, h, from, hlist) {
+ /* We can simply free the wrapper structure */
+ hlist_del(&new->hlist);
+ netdev_hw_addr_refcnt(new->f, vsi->netdev, -1);
+ kfree(new);
+ }
+}
+
+/**
+ * i40e_next_filter - Get the next non-broadcast filter from a list
+ * @next: pointer to filter in list
+ *
+ * Returns the next non-broadcast filter in the list. Required so that we
+ * ignore broadcast filters within the list, since these are not handled via
+ * the normal firmware update path.
+ */
+static
+struct i40e_new_mac_filter *i40e_next_filter(struct i40e_new_mac_filter *next)
+{
+ hlist_for_each_entry_continue(next, hlist) {
+ if (!is_broadcast_ether_addr(next->f->macaddr))
+ return next;
+ }
+
+ return NULL;
+}
+
+/**
+ * i40e_update_filter_state - Update filter state based on return data
+ * from firmware
+ * @count: Number of filters added
+ * @add_list: return data from fw
+ * @add_head: pointer to first filter in current batch
+ *
+ * MAC filter entries from list were slated to be added to device. Returns
+ * number of successful filters. Note that 0 does NOT mean success!
+ **/
+static int
+i40e_update_filter_state(int count,
+ struct i40e_aqc_add_macvlan_element_data *add_list,
+ struct i40e_new_mac_filter *add_head)
+{
+ int retval = 0;
+ int i;
+
+ for (i = 0; i < count; i++) {
+ /* Always check status of each filter. We don't need to check
+ * the firmware return status because we pre-set the filter
+ * status to I40E_AQC_MM_ERR_NO_RES when sending the filter
+ * request to the adminq. Thus, if it no longer matches then
+ * we know the filter is active.
+ */
+ if (add_list[i].match_method == I40E_AQC_MM_ERR_NO_RES) {
+ add_head->state = I40E_FILTER_FAILED;
+ } else {
+ add_head->state = I40E_FILTER_ACTIVE;
+ retval++;
+ }
+
+ add_head = i40e_next_filter(add_head);
+ if (!add_head)
+ break;
+ }
+
+ return retval;
+}
+
+/**
+ * i40e_aqc_del_filters - Request firmware to delete a set of filters
+ * @vsi: ptr to the VSI
+ * @vsi_name: name to display in messages
+ * @list: the list of filters to send to firmware
+ * @num_del: the number of filters to delete
+ * @retval: Set to -EIO on failure to delete
+ *
+ * Send a request to firmware via AdminQ to delete a set of filters. Uses
+ * *retval instead of a return value so that success does not force ret_val to
+ * be set to 0. This ensures that a sequence of calls to this function
+ * preserve the previous value of *retval on successful delete.
+ */
+static
+void i40e_aqc_del_filters(struct i40e_vsi *vsi, const char *vsi_name,
+ struct i40e_aqc_remove_macvlan_element_data *list,
+ int num_del, int *retval)
+{
+ struct i40e_hw *hw = &vsi->back->hw;
+ enum i40e_admin_queue_err aq_status;
+ int aq_ret;
+
+ aq_ret = i40e_aq_remove_macvlan_v2(hw, vsi->seid, list, num_del, NULL,
+ &aq_status);
+
+ /* Explicitly ignore and do not report when firmware returns ENOENT */
+ if (aq_ret && !(aq_status == I40E_AQ_RC_ENOENT)) {
+ *retval = -EIO;
+ dev_info(&vsi->back->pdev->dev,
+ "ignoring delete macvlan error on %s, err %pe, aq_err %s\n",
+ vsi_name, ERR_PTR(aq_ret),
+ i40e_aq_str(hw, aq_status));
+ }
+}
+
+/**
+ * i40e_aqc_add_filters - Request firmware to add a set of filters
+ * @vsi: ptr to the VSI
+ * @vsi_name: name to display in messages
+ * @list: the list of filters to send to firmware
+ * @add_head: Position in the add hlist
+ * @num_add: the number of filters to add
+ *
+ * Send a request to firmware via AdminQ to add a chunk of filters. Will set
+ * __I40E_VSI_OVERFLOW_PROMISC bit in vsi->state if the firmware has run out of
+ * space for more filters.
+ */
+static
+void i40e_aqc_add_filters(struct i40e_vsi *vsi, const char *vsi_name,
+ struct i40e_aqc_add_macvlan_element_data *list,
+ struct i40e_new_mac_filter *add_head,
+ int num_add)
+{
+ struct i40e_hw *hw = &vsi->back->hw;
+ enum i40e_admin_queue_err aq_status;
+ int fcnt;
+
+ i40e_aq_add_macvlan_v2(hw, vsi->seid, list, num_add, NULL, &aq_status);
+ fcnt = i40e_update_filter_state(num_add, list, add_head);
+
+ if (fcnt != num_add) {
+ if (vsi->type == I40E_VSI_MAIN) {
+ set_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
+ dev_warn(&vsi->back->pdev->dev,
+ "Error %s adding RX filters on %s, promiscuous mode forced on\n",
+ i40e_aq_str(hw, aq_status), vsi_name);
+ } else if (vsi->type == I40E_VSI_SRIOV ||
+ vsi->type == I40E_VSI_VMDQ1 ||
+ vsi->type == I40E_VSI_VMDQ2) {
+ dev_warn(&vsi->back->pdev->dev,
+ "Error %s adding RX filters on %s, please set promiscuous on manually for %s\n",
+ i40e_aq_str(hw, aq_status), vsi_name,
+ vsi_name);
+ } else {
+ dev_warn(&vsi->back->pdev->dev,
+ "Error %s adding RX filters on %s, incorrect VSI type: %i.\n",
+ i40e_aq_str(hw, aq_status), vsi_name,
+ vsi->type);
+ }
+ }
+}
+
+/**
+ * i40e_aqc_broadcast_filter - Set promiscuous broadcast flags
+ * @vsi: pointer to the VSI
+ * @vsi_name: the VSI name
+ * @f: filter data
+ *
+ * This function sets or clears the promiscuous broadcast flags for VLAN
+ * filters in order to properly receive broadcast frames. Assumes that only
+ * broadcast filters are passed.
+ *
+ * Returns status indicating success or failure;
+ **/
+static int
+i40e_aqc_broadcast_filter(struct i40e_vsi *vsi, const char *vsi_name,
+ struct i40e_mac_filter *f)
+{
+ bool enable = f->state == I40E_FILTER_NEW;
+ struct i40e_hw *hw = &vsi->back->hw;
+ int aq_ret;
+
+ if (f->vlan == I40E_VLAN_ANY) {
+ aq_ret = i40e_aq_set_vsi_broadcast(hw,
+ vsi->seid,
+ enable,
+ NULL);
+ } else {
+ aq_ret = i40e_aq_set_vsi_bc_promisc_on_vlan(hw,
+ vsi->seid,
+ enable,
+ f->vlan,
+ NULL);
+ }
+
+ if (aq_ret) {
+ set_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
+ dev_warn(&vsi->back->pdev->dev,
+ "Error %s, forcing overflow promiscuous on %s\n",
+ i40e_aq_str(hw, hw->aq.asq_last_status),
+ vsi_name);
+ }
+
+ return aq_ret;
+}
+
+/**
+ * i40e_set_promiscuous - set promiscuous mode
+ * @pf: board private structure
+ * @promisc: promisc on or off
+ *
+ * There are different ways of setting promiscuous mode on a PF depending on
+ * what state/environment we're in. This identifies and sets it appropriately.
+ * Returns 0 on success.
+ **/
+static int i40e_set_promiscuous(struct i40e_pf *pf, bool promisc)
+{
+ struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
+ struct i40e_hw *hw = &pf->hw;
+ int aq_ret;
+
+ if (vsi->type == I40E_VSI_MAIN &&
+ pf->lan_veb != I40E_NO_VEB &&
+ !(pf->flags & I40E_FLAG_MFP_ENABLED)) {
+ /* set defport ON for Main VSI instead of true promisc
+ * this way we will get all unicast/multicast and VLAN
+ * promisc behavior but will not get VF or VMDq traffic
+ * replicated on the Main VSI.
+ */
+ if (promisc)
+ aq_ret = i40e_aq_set_default_vsi(hw,
+ vsi->seid,
+ NULL);
+ else
+ aq_ret = i40e_aq_clear_default_vsi(hw,
+ vsi->seid,
+ NULL);
+ if (aq_ret) {
+ dev_info(&pf->pdev->dev,
+ "Set default VSI failed, err %pe, aq_err %s\n",
+ ERR_PTR(aq_ret),
+ i40e_aq_str(hw, hw->aq.asq_last_status));
+ }
+ } else {
+ aq_ret = i40e_aq_set_vsi_unicast_promiscuous(
+ hw,
+ vsi->seid,
+ promisc, NULL,
+ true);
+ if (aq_ret) {
+ dev_info(&pf->pdev->dev,
+ "set unicast promisc failed, err %pe, aq_err %s\n",
+ ERR_PTR(aq_ret),
+ i40e_aq_str(hw, hw->aq.asq_last_status));
+ }
+ aq_ret = i40e_aq_set_vsi_multicast_promiscuous(
+ hw,
+ vsi->seid,
+ promisc, NULL);
+ if (aq_ret) {
+ dev_info(&pf->pdev->dev,
+ "set multicast promisc failed, err %pe, aq_err %s\n",
+ ERR_PTR(aq_ret),
+ i40e_aq_str(hw, hw->aq.asq_last_status));
+ }
+ }
+
+ if (!aq_ret)
+ pf->cur_promisc = promisc;
+
+ return aq_ret;
+}
+
+/**
+ * i40e_sync_vsi_filters - Update the VSI filter list to the HW
+ * @vsi: ptr to the VSI
+ *
+ * Push any outstanding VSI filter changes through the AdminQ.
+ *
+ * Returns 0 or error value
+ **/
+int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
+{
+ struct hlist_head tmp_add_list, tmp_del_list;
+ struct i40e_mac_filter *f;
+ struct i40e_new_mac_filter *new, *add_head = NULL;
+ struct i40e_hw *hw = &vsi->back->hw;
+ bool old_overflow, new_overflow;
+ unsigned int failed_filters = 0;
+ unsigned int vlan_filters = 0;
+ char vsi_name[16] = "PF";
+ int filter_list_len = 0;
+ u32 changed_flags = 0;
+ struct hlist_node *h;
+ struct i40e_pf *pf;
+ int num_add = 0;
+ int num_del = 0;
+ int aq_ret = 0;
+ int retval = 0;
+ u16 cmd_flags;
+ int list_size;
+ int bkt;
+
+ /* empty array typed pointers, kcalloc later */
+ struct i40e_aqc_add_macvlan_element_data *add_list;
+ struct i40e_aqc_remove_macvlan_element_data *del_list;
+
+ while (test_and_set_bit(__I40E_VSI_SYNCING_FILTERS, vsi->state))
+ usleep_range(1000, 2000);
+ pf = vsi->back;
+
+ old_overflow = test_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
+
+ if (vsi->netdev) {
+ changed_flags = vsi->current_netdev_flags ^ vsi->netdev->flags;
+ vsi->current_netdev_flags = vsi->netdev->flags;
+ }
+
+ INIT_HLIST_HEAD(&tmp_add_list);
+ INIT_HLIST_HEAD(&tmp_del_list);
+
+ if (vsi->type == I40E_VSI_SRIOV)
+ snprintf(vsi_name, sizeof(vsi_name) - 1, "VF %d", vsi->vf_id);
+ else if (vsi->type != I40E_VSI_MAIN)
+ snprintf(vsi_name, sizeof(vsi_name) - 1, "vsi %d", vsi->seid);
+
+ if (vsi->flags & I40E_VSI_FLAG_FILTER_CHANGED) {
+ vsi->flags &= ~I40E_VSI_FLAG_FILTER_CHANGED;
+
+ spin_lock_bh(&vsi->mac_filter_hash_lock);
+ /* Create a list of filters to delete. */
+ hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
+ if (f->state == I40E_FILTER_REMOVE) {
+ /* Move the element into temporary del_list */
+ hash_del(&f->hlist);
+ hlist_add_head(&f->hlist, &tmp_del_list);
+
+ /* Avoid counting removed filters */
+ continue;
+ }
+ if (f->state == I40E_FILTER_NEW) {
+ /* Create a temporary i40e_new_mac_filter */
+ new = kzalloc(sizeof(*new), GFP_ATOMIC);
+ if (!new)
+ goto err_no_memory_locked;
+
+ /* Store pointer to the real filter */
+ new->f = f;
+ new->state = f->state;
+
+ /* Add it to the hash list */
+ hlist_add_head(&new->hlist, &tmp_add_list);
+ }
+
+ /* Count the number of active (current and new) VLAN
+ * filters we have now. Does not count filters which
+ * are marked for deletion.
+ */
+ if (f->vlan > 0)
+ vlan_filters++;
+ }
+
+ if (vsi->type != I40E_VSI_SRIOV)
+ retval = i40e_correct_mac_vlan_filters
+ (vsi, &tmp_add_list, &tmp_del_list,
+ vlan_filters);
+ else if (pf->vf)
+ retval = i40e_correct_vf_mac_vlan_filters
+ (vsi, &tmp_add_list, &tmp_del_list,
+ vlan_filters, pf->vf[vsi->vf_id].trusted);
+
+ hlist_for_each_entry(new, &tmp_add_list, hlist)
+ netdev_hw_addr_refcnt(new->f, vsi->netdev, 1);
+
+ if (retval)
+ goto err_no_memory_locked;
+
+ spin_unlock_bh(&vsi->mac_filter_hash_lock);
+ }
+
+ /* Now process 'del_list' outside the lock */
+ if (!hlist_empty(&tmp_del_list)) {
+ filter_list_len = hw->aq.asq_buf_size /
+ sizeof(struct i40e_aqc_remove_macvlan_element_data);
+ list_size = filter_list_len *
+ sizeof(struct i40e_aqc_remove_macvlan_element_data);
+ del_list = kzalloc(list_size, GFP_ATOMIC);
+ if (!del_list)
+ goto err_no_memory;
+
+ hlist_for_each_entry_safe(f, h, &tmp_del_list, hlist) {
+ cmd_flags = 0;
+
+ /* handle broadcast filters by updating the broadcast
+ * promiscuous flag and release filter list.
+ */
+ if (is_broadcast_ether_addr(f->macaddr)) {
+ i40e_aqc_broadcast_filter(vsi, vsi_name, f);
+
+ hlist_del(&f->hlist);
+ kfree(f);
+ continue;
+ }
+
+ /* add to delete list */
+ ether_addr_copy(del_list[num_del].mac_addr, f->macaddr);
+ if (f->vlan == I40E_VLAN_ANY) {
+ del_list[num_del].vlan_tag = 0;
+ cmd_flags |= I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
+ } else {
+ del_list[num_del].vlan_tag =
+ cpu_to_le16((u16)(f->vlan));
+ }
+
+ cmd_flags |= I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
+ del_list[num_del].flags = cmd_flags;
+ num_del++;
+
+ /* flush a full buffer */
+ if (num_del == filter_list_len) {
+ i40e_aqc_del_filters(vsi, vsi_name, del_list,
+ num_del, &retval);
+ memset(del_list, 0, list_size);
+ num_del = 0;
+ }
+ /* Release memory for MAC filter entries which were
+ * synced up with HW.
+ */
+ hlist_del(&f->hlist);
+ kfree(f);
+ }
+
+ if (num_del) {
+ i40e_aqc_del_filters(vsi, vsi_name, del_list,
+ num_del, &retval);
+ }
+
+ kfree(del_list);
+ del_list = NULL;
+ }
+
+ if (!hlist_empty(&tmp_add_list)) {
+ /* Do all the adds now. */
+ filter_list_len = hw->aq.asq_buf_size /
+ sizeof(struct i40e_aqc_add_macvlan_element_data);
+ list_size = filter_list_len *
+ sizeof(struct i40e_aqc_add_macvlan_element_data);
+ add_list = kzalloc(list_size, GFP_ATOMIC);
+ if (!add_list)
+ goto err_no_memory;
+
+ num_add = 0;
+ hlist_for_each_entry_safe(new, h, &tmp_add_list, hlist) {
+ /* handle broadcast filters by updating the broadcast
+ * promiscuous flag instead of adding a MAC filter.
+ */
+ if (is_broadcast_ether_addr(new->f->macaddr)) {
+ if (i40e_aqc_broadcast_filter(vsi, vsi_name,
+ new->f))
+ new->state = I40E_FILTER_FAILED;
+ else
+ new->state = I40E_FILTER_ACTIVE;
+ continue;
+ }
+
+ /* add to add array */
+ if (num_add == 0)
+ add_head = new;
+ cmd_flags = 0;
+ ether_addr_copy(add_list[num_add].mac_addr,
+ new->f->macaddr);
+ if (new->f->vlan == I40E_VLAN_ANY) {
+ add_list[num_add].vlan_tag = 0;
+ cmd_flags |= I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
+ } else {
+ add_list[num_add].vlan_tag =
+ cpu_to_le16((u16)(new->f->vlan));
+ }
+ add_list[num_add].queue_number = 0;
+ /* set invalid match method for later detection */
+ add_list[num_add].match_method = I40E_AQC_MM_ERR_NO_RES;
+ cmd_flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
+ add_list[num_add].flags = cpu_to_le16(cmd_flags);
+ num_add++;
+
+ /* flush a full buffer */
+ if (num_add == filter_list_len) {
+ i40e_aqc_add_filters(vsi, vsi_name, add_list,
+ add_head, num_add);
+ memset(add_list, 0, list_size);
+ num_add = 0;
+ }
+ }
+ if (num_add) {
+ i40e_aqc_add_filters(vsi, vsi_name, add_list, add_head,
+ num_add);
+ }
+ /* Now move all of the filters from the temp add list back to
+ * the VSI's list.
+ */
+ spin_lock_bh(&vsi->mac_filter_hash_lock);
+ hlist_for_each_entry_safe(new, h, &tmp_add_list, hlist) {
+ /* Only update the state if we're still NEW */
+ if (new->f->state == I40E_FILTER_NEW)
+ new->f->state = new->state;
+ hlist_del(&new->hlist);
+ netdev_hw_addr_refcnt(new->f, vsi->netdev, -1);
+ kfree(new);
+ }
+ spin_unlock_bh(&vsi->mac_filter_hash_lock);
+ kfree(add_list);
+ add_list = NULL;
+ }
+
+ /* Determine the number of active and failed filters. */
+ spin_lock_bh(&vsi->mac_filter_hash_lock);
+ vsi->active_filters = 0;
+ hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) {
+ if (f->state == I40E_FILTER_ACTIVE)
+ vsi->active_filters++;
+ else if (f->state == I40E_FILTER_FAILED)
+ failed_filters++;
+ }
+ spin_unlock_bh(&vsi->mac_filter_hash_lock);
+
+ /* Check if we are able to exit overflow promiscuous mode. We can
+ * safely exit if we didn't just enter, we no longer have any failed
+ * filters, and we have reduced filters below the threshold value.
+ */
+ if (old_overflow && !failed_filters &&
+ vsi->active_filters < vsi->promisc_threshold) {
+ dev_info(&pf->pdev->dev,
+ "filter logjam cleared on %s, leaving overflow promiscuous mode\n",
+ vsi_name);
+ clear_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
+ vsi->promisc_threshold = 0;
+ }
+
+ /* if the VF is not trusted do not do promisc */
+ if (vsi->type == I40E_VSI_SRIOV && pf->vf &&
+ !pf->vf[vsi->vf_id].trusted) {
+ clear_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
+ goto out;
+ }
+
+ new_overflow = test_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
+
+ /* If we are entering overflow promiscuous, we need to calculate a new
+ * threshold for when we are safe to exit
+ */
+ if (!old_overflow && new_overflow)
+ vsi->promisc_threshold = (vsi->active_filters * 3) / 4;
+
+ /* check for changes in promiscuous modes */
+ if (changed_flags & IFF_ALLMULTI) {
+ bool cur_multipromisc;
+
+ cur_multipromisc = !!(vsi->current_netdev_flags & IFF_ALLMULTI);
+ aq_ret = i40e_aq_set_vsi_multicast_promiscuous(&vsi->back->hw,
+ vsi->seid,
+ cur_multipromisc,
+ NULL);
+ if (aq_ret) {
+ retval = i40e_aq_rc_to_posix(aq_ret,
+ hw->aq.asq_last_status);
+ dev_info(&pf->pdev->dev,
+ "set multi promisc failed on %s, err %pe aq_err %s\n",
+ vsi_name,
+ ERR_PTR(aq_ret),
+ i40e_aq_str(hw, hw->aq.asq_last_status));
+ } else {
+ dev_info(&pf->pdev->dev, "%s allmulti mode.\n",
+ cur_multipromisc ? "entering" : "leaving");
+ }
+ }
+
+ if ((changed_flags & IFF_PROMISC) || old_overflow != new_overflow) {
+ bool cur_promisc;
+
+ cur_promisc = (!!(vsi->current_netdev_flags & IFF_PROMISC) ||
+ new_overflow);
+ aq_ret = i40e_set_promiscuous(pf, cur_promisc);
+ if (aq_ret) {
+ retval = i40e_aq_rc_to_posix(aq_ret,
+ hw->aq.asq_last_status);
+ dev_info(&pf->pdev->dev,
+ "Setting promiscuous %s failed on %s, err %pe aq_err %s\n",
+ cur_promisc ? "on" : "off",
+ vsi_name,
+ ERR_PTR(aq_ret),
+ i40e_aq_str(hw, hw->aq.asq_last_status));
+ }
+ }
+out:
+ /* if something went wrong then set the changed flag so we try again */
+ if (retval)
+ vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
+
+ clear_bit(__I40E_VSI_SYNCING_FILTERS, vsi->state);
+ return retval;
+
+err_no_memory:
+ /* Restore elements on the temporary add and delete lists */
+ spin_lock_bh(&vsi->mac_filter_hash_lock);
+err_no_memory_locked:
+ i40e_undo_del_filter_entries(vsi, &tmp_del_list);
+ i40e_undo_add_filter_entries(vsi, &tmp_add_list);
+ spin_unlock_bh(&vsi->mac_filter_hash_lock);
+
+ vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
+ clear_bit(__I40E_VSI_SYNCING_FILTERS, vsi->state);
+ return -ENOMEM;
+}
+
+/**
+ * i40e_sync_filters_subtask - Sync the VSI filter list with HW
+ * @pf: board private structure
+ **/
+static void i40e_sync_filters_subtask(struct i40e_pf *pf)
+{
+ int v;
+
+ if (!pf)
+ return;
+ if (!test_and_clear_bit(__I40E_MACVLAN_SYNC_PENDING, pf->state))
+ return;
+ if (test_bit(__I40E_VF_DISABLE, pf->state)) {
+ set_bit(__I40E_MACVLAN_SYNC_PENDING, pf->state);
+ return;
+ }
+
+ for (v = 0; v < pf->num_alloc_vsi; v++) {
+ if (pf->vsi[v] &&
+ (pf->vsi[v]->flags & I40E_VSI_FLAG_FILTER_CHANGED) &&
+ !test_bit(__I40E_VSI_RELEASING, pf->vsi[v]->state)) {
+ int ret = i40e_sync_vsi_filters(pf->vsi[v]);
+
+ if (ret) {
+ /* come back and try again later */
+ set_bit(__I40E_MACVLAN_SYNC_PENDING,
+ pf->state);
+ break;
+ }
+ }
+ }
+}
+
+/**
+ * i40e_max_xdp_frame_size - returns the maximum allowed frame size for XDP
+ * @vsi: the vsi
+ **/
+static int i40e_max_xdp_frame_size(struct i40e_vsi *vsi)
+{
+ if (PAGE_SIZE >= 8192 || (vsi->back->flags & I40E_FLAG_LEGACY_RX))
+ return I40E_RXBUFFER_2048;
+ else
+ return I40E_RXBUFFER_3072;
+}
+
+/**
+ * i40e_change_mtu - NDO callback to change the Maximum Transfer Unit
+ * @netdev: network interface device structure
+ * @new_mtu: new value for maximum frame size
+ *
+ * Returns 0 on success, negative on failure
+ **/
+static int i40e_change_mtu(struct net_device *netdev, int new_mtu)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_vsi *vsi = np->vsi;
+ struct i40e_pf *pf = vsi->back;
+
+ if (i40e_enabled_xdp_vsi(vsi)) {
+ int frame_size = new_mtu + I40E_PACKET_HDR_PAD;
+
+ if (frame_size > i40e_max_xdp_frame_size(vsi))
+ return -EINVAL;
+ }
+
+ netdev_dbg(netdev, "changing MTU from %d to %d\n",
+ netdev->mtu, new_mtu);
+ netdev->mtu = new_mtu;
+ if (netif_running(netdev))
+ i40e_vsi_reinit_locked(vsi);
+ set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state);
+ set_bit(__I40E_CLIENT_L2_CHANGE, pf->state);
+ return 0;
+}
+
+/**
+ * i40e_ioctl - Access the hwtstamp interface
+ * @netdev: network interface device structure
+ * @ifr: interface request data
+ * @cmd: ioctl command
+ **/
+int i40e_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_pf *pf = np->vsi->back;
+
+ switch (cmd) {
+ case SIOCGHWTSTAMP:
+ return i40e_ptp_get_ts_config(pf, ifr);
+ case SIOCSHWTSTAMP:
+ return i40e_ptp_set_ts_config(pf, ifr);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+/**
+ * i40e_vlan_stripping_enable - Turn on vlan stripping for the VSI
+ * @vsi: the vsi being adjusted
+ **/
+void i40e_vlan_stripping_enable(struct i40e_vsi *vsi)
+{
+ struct i40e_vsi_context ctxt;
+ int ret;
+
+ /* Don't modify stripping options if a port VLAN is active */
+ if (vsi->info.pvid)
+ return;
+
+ if ((vsi->info.valid_sections &
+ cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID)) &&
+ ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_MODE_MASK) == 0))
+ return; /* already enabled */
+
+ vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
+ vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
+ I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
+
+ ctxt.seid = vsi->seid;
+ ctxt.info = vsi->info;
+ ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
+ if (ret) {
+ dev_info(&vsi->back->pdev->dev,
+ "update vlan stripping failed, err %pe aq_err %s\n",
+ ERR_PTR(ret),
+ i40e_aq_str(&vsi->back->hw,
+ vsi->back->hw.aq.asq_last_status));
+ }
+}
+
+/**
+ * i40e_vlan_stripping_disable - Turn off vlan stripping for the VSI
+ * @vsi: the vsi being adjusted
+ **/
+void i40e_vlan_stripping_disable(struct i40e_vsi *vsi)
+{
+ struct i40e_vsi_context ctxt;
+ int ret;
+
+ /* Don't modify stripping options if a port VLAN is active */
+ if (vsi->info.pvid)
+ return;
+
+ if ((vsi->info.valid_sections &
+ cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID)) &&
+ ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_EMOD_MASK) ==
+ I40E_AQ_VSI_PVLAN_EMOD_MASK))
+ return; /* already disabled */
+
+ vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
+ vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
+ I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
+
+ ctxt.seid = vsi->seid;
+ ctxt.info = vsi->info;
+ ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
+ if (ret) {
+ dev_info(&vsi->back->pdev->dev,
+ "update vlan stripping failed, err %pe aq_err %s\n",
+ ERR_PTR(ret),
+ i40e_aq_str(&vsi->back->hw,
+ vsi->back->hw.aq.asq_last_status));
+ }
+}
+
+/**
+ * i40e_add_vlan_all_mac - Add a MAC/VLAN filter for each existing MAC address
+ * @vsi: the vsi being configured
+ * @vid: vlan id to be added (0 = untagged only , -1 = any)
+ *
+ * This is a helper function for adding a new MAC/VLAN filter with the
+ * specified VLAN for each existing MAC address already in the hash table.
+ * This function does *not* perform any accounting to update filters based on
+ * VLAN mode.
+ *
+ * NOTE: this function expects to be called while under the
+ * mac_filter_hash_lock
+ **/
+int i40e_add_vlan_all_mac(struct i40e_vsi *vsi, s16 vid)
+{
+ struct i40e_mac_filter *f, *add_f;
+ struct hlist_node *h;
+ int bkt;
+
+ hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
+ /* If we're asked to add a filter that has been marked for
+ * removal, it is safe to simply restore it to active state.
+ * __i40e_del_filter will have simply deleted any filters which
+ * were previously marked NEW or FAILED, so if it is currently
+ * marked REMOVE it must have previously been ACTIVE. Since we
+ * haven't yet run the sync filters task, just restore this
+ * filter to the ACTIVE state so that the sync task leaves it
+ * in place.
+ */
+ if (f->state == I40E_FILTER_REMOVE && f->vlan == vid) {
+ f->state = I40E_FILTER_ACTIVE;
+ continue;
+ } else if (f->state == I40E_FILTER_REMOVE) {
+ continue;
+ }
+ add_f = i40e_add_filter(vsi, f->macaddr, vid);
+ if (!add_f) {
+ dev_info(&vsi->back->pdev->dev,
+ "Could not add vlan filter %d for %pM\n",
+ vid, f->macaddr);
+ return -ENOMEM;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * i40e_vsi_add_vlan - Add VSI membership for given VLAN
+ * @vsi: the VSI being configured
+ * @vid: VLAN id to be added
+ **/
+int i40e_vsi_add_vlan(struct i40e_vsi *vsi, u16 vid)
+{
+ int err;
+
+ if (vsi->info.pvid)
+ return -EINVAL;
+
+ /* The network stack will attempt to add VID=0, with the intention to
+ * receive priority tagged packets with a VLAN of 0. Our HW receives
+ * these packets by default when configured to receive untagged
+ * packets, so we don't need to add a filter for this case.
+ * Additionally, HW interprets adding a VID=0 filter as meaning to
+ * receive *only* tagged traffic and stops receiving untagged traffic.
+ * Thus, we do not want to actually add a filter for VID=0
+ */
+ if (!vid)
+ return 0;
+
+ /* Locked once because all functions invoked below iterates list*/
+ spin_lock_bh(&vsi->mac_filter_hash_lock);
+ err = i40e_add_vlan_all_mac(vsi, vid);
+ spin_unlock_bh(&vsi->mac_filter_hash_lock);
+ if (err)
+ return err;
+
+ /* schedule our worker thread which will take care of
+ * applying the new filter changes
+ */
+ i40e_service_event_schedule(vsi->back);
+ return 0;
+}
+
+/**
+ * i40e_rm_vlan_all_mac - Remove MAC/VLAN pair for all MAC with the given VLAN
+ * @vsi: the vsi being configured
+ * @vid: vlan id to be removed (0 = untagged only , -1 = any)
+ *
+ * This function should be used to remove all VLAN filters which match the
+ * given VID. It does not schedule the service event and does not take the
+ * mac_filter_hash_lock so it may be combined with other operations under
+ * a single invocation of the mac_filter_hash_lock.
+ *
+ * NOTE: this function expects to be called while under the
+ * mac_filter_hash_lock
+ */
+void i40e_rm_vlan_all_mac(struct i40e_vsi *vsi, s16 vid)
+{
+ struct i40e_mac_filter *f;
+ struct hlist_node *h;
+ int bkt;
+
+ hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
+ if (f->vlan == vid)
+ __i40e_del_filter(vsi, f);
+ }
+}
+
+/**
+ * i40e_vsi_kill_vlan - Remove VSI membership for given VLAN
+ * @vsi: the VSI being configured
+ * @vid: VLAN id to be removed
+ **/
+void i40e_vsi_kill_vlan(struct i40e_vsi *vsi, u16 vid)
+{
+ if (!vid || vsi->info.pvid)
+ return;
+
+ spin_lock_bh(&vsi->mac_filter_hash_lock);
+ i40e_rm_vlan_all_mac(vsi, vid);
+ spin_unlock_bh(&vsi->mac_filter_hash_lock);
+
+ /* schedule our worker thread which will take care of
+ * applying the new filter changes
+ */
+ i40e_service_event_schedule(vsi->back);
+}
+
+/**
+ * i40e_vlan_rx_add_vid - Add a vlan id filter to HW offload
+ * @netdev: network interface to be adjusted
+ * @proto: unused protocol value
+ * @vid: vlan id to be added
+ *
+ * net_device_ops implementation for adding vlan ids
+ **/
+static int i40e_vlan_rx_add_vid(struct net_device *netdev,
+ __always_unused __be16 proto, u16 vid)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_vsi *vsi = np->vsi;
+ int ret = 0;
+
+ if (vid >= VLAN_N_VID)
+ return -EINVAL;
+
+ ret = i40e_vsi_add_vlan(vsi, vid);
+ if (!ret)
+ set_bit(vid, vsi->active_vlans);
+
+ return ret;
+}
+
+/**
+ * i40e_vlan_rx_add_vid_up - Add a vlan id filter to HW offload in UP path
+ * @netdev: network interface to be adjusted
+ * @proto: unused protocol value
+ * @vid: vlan id to be added
+ **/
+static void i40e_vlan_rx_add_vid_up(struct net_device *netdev,
+ __always_unused __be16 proto, u16 vid)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_vsi *vsi = np->vsi;
+
+ if (vid >= VLAN_N_VID)
+ return;
+ set_bit(vid, vsi->active_vlans);
+}
+
+/**
+ * i40e_vlan_rx_kill_vid - Remove a vlan id filter from HW offload
+ * @netdev: network interface to be adjusted
+ * @proto: unused protocol value
+ * @vid: vlan id to be removed
+ *
+ * net_device_ops implementation for removing vlan ids
+ **/
+static int i40e_vlan_rx_kill_vid(struct net_device *netdev,
+ __always_unused __be16 proto, u16 vid)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_vsi *vsi = np->vsi;
+
+ /* return code is ignored as there is nothing a user
+ * can do about failure to remove and a log message was
+ * already printed from the other function
+ */
+ i40e_vsi_kill_vlan(vsi, vid);
+
+ clear_bit(vid, vsi->active_vlans);
+
+ return 0;
+}
+
+/**
+ * i40e_restore_vlan - Reinstate vlans when vsi/netdev comes back up
+ * @vsi: the vsi being brought back up
+ **/
+static void i40e_restore_vlan(struct i40e_vsi *vsi)
+{
+ u16 vid;
+
+ if (!vsi->netdev)
+ return;
+
+ if (vsi->netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
+ i40e_vlan_stripping_enable(vsi);
+ else
+ i40e_vlan_stripping_disable(vsi);
+
+ for_each_set_bit(vid, vsi->active_vlans, VLAN_N_VID)
+ i40e_vlan_rx_add_vid_up(vsi->netdev, htons(ETH_P_8021Q),
+ vid);
+}
+
+/**
+ * i40e_vsi_add_pvid - Add pvid for the VSI
+ * @vsi: the vsi being adjusted
+ * @vid: the vlan id to set as a PVID
+ **/
+int i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid)
+{
+ struct i40e_vsi_context ctxt;
+ int ret;
+
+ vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
+ vsi->info.pvid = cpu_to_le16(vid);
+ vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_TAGGED |
+ I40E_AQ_VSI_PVLAN_INSERT_PVID |
+ I40E_AQ_VSI_PVLAN_EMOD_STR;
+
+ ctxt.seid = vsi->seid;
+ ctxt.info = vsi->info;
+ ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
+ if (ret) {
+ dev_info(&vsi->back->pdev->dev,
+ "add pvid failed, err %pe aq_err %s\n",
+ ERR_PTR(ret),
+ i40e_aq_str(&vsi->back->hw,
+ vsi->back->hw.aq.asq_last_status));
+ return -ENOENT;
+ }
+
+ return 0;
+}
+
+/**
+ * i40e_vsi_remove_pvid - Remove the pvid from the VSI
+ * @vsi: the vsi being adjusted
+ *
+ * Just use the vlan_rx_register() service to put it back to normal
+ **/
+void i40e_vsi_remove_pvid(struct i40e_vsi *vsi)
+{
+ vsi->info.pvid = 0;
+
+ i40e_vlan_stripping_disable(vsi);
+}
+
+/**
+ * i40e_vsi_setup_tx_resources - Allocate VSI Tx queue resources
+ * @vsi: ptr to the VSI
+ *
+ * If this function returns with an error, then it's possible one or
+ * more of the rings is populated (while the rest are not). It is the
+ * callers duty to clean those orphaned rings.
+ *
+ * Return 0 on success, negative on failure
+ **/
+static int i40e_vsi_setup_tx_resources(struct i40e_vsi *vsi)
+{
+ int i, err = 0;
+
+ for (i = 0; i < vsi->num_queue_pairs && !err; i++)
+ err = i40e_setup_tx_descriptors(vsi->tx_rings[i]);
+
+ if (!i40e_enabled_xdp_vsi(vsi))
+ return err;
+
+ for (i = 0; i < vsi->num_queue_pairs && !err; i++)
+ err = i40e_setup_tx_descriptors(vsi->xdp_rings[i]);
+
+ return err;
+}
+
+/**
+ * i40e_vsi_free_tx_resources - Free Tx resources for VSI queues
+ * @vsi: ptr to the VSI
+ *
+ * Free VSI's transmit software resources
+ **/
+static void i40e_vsi_free_tx_resources(struct i40e_vsi *vsi)
+{
+ int i;
+
+ if (vsi->tx_rings) {
+ for (i = 0; i < vsi->num_queue_pairs; i++)
+ if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc)
+ i40e_free_tx_resources(vsi->tx_rings[i]);
+ }
+
+ if (vsi->xdp_rings) {
+ for (i = 0; i < vsi->num_queue_pairs; i++)
+ if (vsi->xdp_rings[i] && vsi->xdp_rings[i]->desc)
+ i40e_free_tx_resources(vsi->xdp_rings[i]);
+ }
+}
+
+/**
+ * i40e_vsi_setup_rx_resources - Allocate VSI queues Rx resources
+ * @vsi: ptr to the VSI
+ *
+ * If this function returns with an error, then it's possible one or
+ * more of the rings is populated (while the rest are not). It is the
+ * callers duty to clean those orphaned rings.
+ *
+ * Return 0 on success, negative on failure
+ **/
+static int i40e_vsi_setup_rx_resources(struct i40e_vsi *vsi)
+{
+ int i, err = 0;
+
+ for (i = 0; i < vsi->num_queue_pairs && !err; i++)
+ err = i40e_setup_rx_descriptors(vsi->rx_rings[i]);
+ return err;
+}
+
+/**
+ * i40e_vsi_free_rx_resources - Free Rx Resources for VSI queues
+ * @vsi: ptr to the VSI
+ *
+ * Free all receive software resources
+ **/
+static void i40e_vsi_free_rx_resources(struct i40e_vsi *vsi)
+{
+ int i;
+
+ if (!vsi->rx_rings)
+ return;
+
+ for (i = 0; i < vsi->num_queue_pairs; i++)
+ if (vsi->rx_rings[i] && vsi->rx_rings[i]->desc)
+ i40e_free_rx_resources(vsi->rx_rings[i]);
+}
+
+/**
+ * i40e_config_xps_tx_ring - Configure XPS for a Tx ring
+ * @ring: The Tx ring to configure
+ *
+ * This enables/disables XPS for a given Tx descriptor ring
+ * based on the TCs enabled for the VSI that ring belongs to.
+ **/
+static void i40e_config_xps_tx_ring(struct i40e_ring *ring)
+{
+ int cpu;
+
+ if (!ring->q_vector || !ring->netdev || ring->ch)
+ return;
+
+ /* We only initialize XPS once, so as not to overwrite user settings */
+ if (test_and_set_bit(__I40E_TX_XPS_INIT_DONE, ring->state))
+ return;
+
+ cpu = cpumask_local_spread(ring->q_vector->v_idx, -1);
+ netif_set_xps_queue(ring->netdev, get_cpu_mask(cpu),
+ ring->queue_index);
+}
+
+/**
+ * i40e_xsk_pool - Retrieve the AF_XDP buffer pool if XDP and ZC is enabled
+ * @ring: The Tx or Rx ring
+ *
+ * Returns the AF_XDP buffer pool or NULL.
+ **/
+static struct xsk_buff_pool *i40e_xsk_pool(struct i40e_ring *ring)
+{
+ bool xdp_on = i40e_enabled_xdp_vsi(ring->vsi);
+ int qid = ring->queue_index;
+
+ if (ring_is_xdp(ring))
+ qid -= ring->vsi->alloc_queue_pairs;
+
+ if (!xdp_on || !test_bit(qid, ring->vsi->af_xdp_zc_qps))
+ return NULL;
+
+ return xsk_get_pool_from_qid(ring->vsi->netdev, qid);
+}
+
+/**
+ * i40e_configure_tx_ring - Configure a transmit ring context and rest
+ * @ring: The Tx ring to configure
+ *
+ * Configure the Tx descriptor ring in the HMC context.
+ **/
+static int i40e_configure_tx_ring(struct i40e_ring *ring)
+{
+ struct i40e_vsi *vsi = ring->vsi;
+ u16 pf_q = vsi->base_queue + ring->queue_index;
+ struct i40e_hw *hw = &vsi->back->hw;
+ struct i40e_hmc_obj_txq tx_ctx;
+ u32 qtx_ctl = 0;
+ int err = 0;
+
+ if (ring_is_xdp(ring))
+ ring->xsk_pool = i40e_xsk_pool(ring);
+
+ /* some ATR related tx ring init */
+ if (vsi->back->flags & I40E_FLAG_FD_ATR_ENABLED) {
+ ring->atr_sample_rate = vsi->back->atr_sample_rate;
+ ring->atr_count = 0;
+ } else {
+ ring->atr_sample_rate = 0;
+ }
+
+ /* configure XPS */
+ i40e_config_xps_tx_ring(ring);
+
+ /* clear the context structure first */
+ memset(&tx_ctx, 0, sizeof(tx_ctx));
+
+ tx_ctx.new_context = 1;
+ tx_ctx.base = (ring->dma / 128);
+ tx_ctx.qlen = ring->count;
+ tx_ctx.fd_ena = !!(vsi->back->flags & (I40E_FLAG_FD_SB_ENABLED |
+ I40E_FLAG_FD_ATR_ENABLED));
+ tx_ctx.timesync_ena = !!(vsi->back->flags & I40E_FLAG_PTP);
+ /* FDIR VSI tx ring can still use RS bit and writebacks */
+ if (vsi->type != I40E_VSI_FDIR)
+ tx_ctx.head_wb_ena = 1;
+ tx_ctx.head_wb_addr = ring->dma +
+ (ring->count * sizeof(struct i40e_tx_desc));
+
+ /* As part of VSI creation/update, FW allocates certain
+ * Tx arbitration queue sets for each TC enabled for
+ * the VSI. The FW returns the handles to these queue
+ * sets as part of the response buffer to Add VSI,
+ * Update VSI, etc. AQ commands. It is expected that
+ * these queue set handles be associated with the Tx
+ * queues by the driver as part of the TX queue context
+ * initialization. This has to be done regardless of
+ * DCB as by default everything is mapped to TC0.
+ */
+
+ if (ring->ch)
+ tx_ctx.rdylist =
+ le16_to_cpu(ring->ch->info.qs_handle[ring->dcb_tc]);
+
+ else
+ tx_ctx.rdylist = le16_to_cpu(vsi->info.qs_handle[ring->dcb_tc]);
+
+ tx_ctx.rdylist_act = 0;
+
+ /* clear the context in the HMC */
+ err = i40e_clear_lan_tx_queue_context(hw, pf_q);
+ if (err) {
+ dev_info(&vsi->back->pdev->dev,
+ "Failed to clear LAN Tx queue context on Tx ring %d (pf_q %d), error: %d\n",
+ ring->queue_index, pf_q, err);
+ return -ENOMEM;
+ }
+
+ /* set the context in the HMC */
+ err = i40e_set_lan_tx_queue_context(hw, pf_q, &tx_ctx);
+ if (err) {
+ dev_info(&vsi->back->pdev->dev,
+ "Failed to set LAN Tx queue context on Tx ring %d (pf_q %d, error: %d\n",
+ ring->queue_index, pf_q, err);
+ return -ENOMEM;
+ }
+
+ /* Now associate this queue with this PCI function */
+ if (ring->ch) {
+ if (ring->ch->type == I40E_VSI_VMDQ2)
+ qtx_ctl = I40E_QTX_CTL_VM_QUEUE;
+ else
+ return -EINVAL;
+
+ qtx_ctl |= (ring->ch->vsi_number <<
+ I40E_QTX_CTL_VFVM_INDX_SHIFT) &
+ I40E_QTX_CTL_VFVM_INDX_MASK;
+ } else {
+ if (vsi->type == I40E_VSI_VMDQ2) {
+ qtx_ctl = I40E_QTX_CTL_VM_QUEUE;
+ qtx_ctl |= ((vsi->id) << I40E_QTX_CTL_VFVM_INDX_SHIFT) &
+ I40E_QTX_CTL_VFVM_INDX_MASK;
+ } else {
+ qtx_ctl = I40E_QTX_CTL_PF_QUEUE;
+ }
+ }
+
+ qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) &
+ I40E_QTX_CTL_PF_INDX_MASK);
+ wr32(hw, I40E_QTX_CTL(pf_q), qtx_ctl);
+ i40e_flush(hw);
+
+ /* cache tail off for easier writes later */
+ ring->tail = hw->hw_addr + I40E_QTX_TAIL(pf_q);
+
+ return 0;
+}
+
+/**
+ * i40e_rx_offset - Return expected offset into page to access data
+ * @rx_ring: Ring we are requesting offset of
+ *
+ * Returns the offset value for ring into the data buffer.
+ */
+static unsigned int i40e_rx_offset(struct i40e_ring *rx_ring)
+{
+ return ring_uses_build_skb(rx_ring) ? I40E_SKB_PAD : 0;
+}
+
+/**
+ * i40e_configure_rx_ring - Configure a receive ring context
+ * @ring: The Rx ring to configure
+ *
+ * Configure the Rx descriptor ring in the HMC context.
+ **/
+static int i40e_configure_rx_ring(struct i40e_ring *ring)
+{
+ struct i40e_vsi *vsi = ring->vsi;
+ u32 chain_len = vsi->back->hw.func_caps.rx_buf_chain_len;
+ u16 pf_q = vsi->base_queue + ring->queue_index;
+ struct i40e_hw *hw = &vsi->back->hw;
+ struct i40e_hmc_obj_rxq rx_ctx;
+ int err = 0;
+ bool ok;
+ int ret;
+
+ bitmap_zero(ring->state, __I40E_RING_STATE_NBITS);
+
+ /* clear the context structure first */
+ memset(&rx_ctx, 0, sizeof(rx_ctx));
+
+ if (ring->vsi->type == I40E_VSI_MAIN)
+ xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq);
+
+ ring->xsk_pool = i40e_xsk_pool(ring);
+ if (ring->xsk_pool) {
+ ring->rx_buf_len =
+ xsk_pool_get_rx_frame_size(ring->xsk_pool);
+ /* For AF_XDP ZC, we disallow packets to span on
+ * multiple buffers, thus letting us skip that
+ * handling in the fast-path.
+ */
+ chain_len = 1;
+ ret = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
+ MEM_TYPE_XSK_BUFF_POOL,
+ NULL);
+ if (ret)
+ return ret;
+ dev_info(&vsi->back->pdev->dev,
+ "Registered XDP mem model MEM_TYPE_XSK_BUFF_POOL on Rx ring %d\n",
+ ring->queue_index);
+
+ } else {
+ ring->rx_buf_len = vsi->rx_buf_len;
+ if (ring->vsi->type == I40E_VSI_MAIN) {
+ ret = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
+ MEM_TYPE_PAGE_SHARED,
+ NULL);
+ if (ret)
+ return ret;
+ }
+ }
+
+ rx_ctx.dbuff = DIV_ROUND_UP(ring->rx_buf_len,
+ BIT_ULL(I40E_RXQ_CTX_DBUFF_SHIFT));
+
+ rx_ctx.base = (ring->dma / 128);
+ rx_ctx.qlen = ring->count;
+
+ /* use 16 byte descriptors */
+ rx_ctx.dsize = 0;
+
+ /* descriptor type is always zero
+ * rx_ctx.dtype = 0;
+ */
+ rx_ctx.hsplit_0 = 0;
+
+ rx_ctx.rxmax = min_t(u16, vsi->max_frame, chain_len * ring->rx_buf_len);
+ if (hw->revision_id == 0)
+ rx_ctx.lrxqthresh = 0;
+ else
+ rx_ctx.lrxqthresh = 1;
+ rx_ctx.crcstrip = 1;
+ rx_ctx.l2tsel = 1;
+ /* this controls whether VLAN is stripped from inner headers */
+ rx_ctx.showiv = 0;
+ /* set the prefena field to 1 because the manual says to */
+ rx_ctx.prefena = 1;
+
+ /* clear the context in the HMC */
+ err = i40e_clear_lan_rx_queue_context(hw, pf_q);
+ if (err) {
+ dev_info(&vsi->back->pdev->dev,
+ "Failed to clear LAN Rx queue context on Rx ring %d (pf_q %d), error: %d\n",
+ ring->queue_index, pf_q, err);
+ return -ENOMEM;
+ }
+
+ /* set the context in the HMC */
+ err = i40e_set_lan_rx_queue_context(hw, pf_q, &rx_ctx);
+ if (err) {
+ dev_info(&vsi->back->pdev->dev,
+ "Failed to set LAN Rx queue context on Rx ring %d (pf_q %d), error: %d\n",
+ ring->queue_index, pf_q, err);
+ return -ENOMEM;
+ }
+
+ /* configure Rx buffer alignment */
+ if (!vsi->netdev || (vsi->back->flags & I40E_FLAG_LEGACY_RX))
+ clear_ring_build_skb_enabled(ring);
+ else
+ set_ring_build_skb_enabled(ring);
+
+ ring->rx_offset = i40e_rx_offset(ring);
+
+ /* cache tail for quicker writes, and clear the reg before use */
+ ring->tail = hw->hw_addr + I40E_QRX_TAIL(pf_q);
+ writel(0, ring->tail);
+
+ if (ring->xsk_pool) {
+ xsk_pool_set_rxq_info(ring->xsk_pool, &ring->xdp_rxq);
+ ok = i40e_alloc_rx_buffers_zc(ring, I40E_DESC_UNUSED(ring));
+ } else {
+ ok = !i40e_alloc_rx_buffers(ring, I40E_DESC_UNUSED(ring));
+ }
+ if (!ok) {
+ /* Log this in case the user has forgotten to give the kernel
+ * any buffers, even later in the application.
+ */
+ dev_info(&vsi->back->pdev->dev,
+ "Failed to allocate some buffers on %sRx ring %d (pf_q %d)\n",
+ ring->xsk_pool ? "AF_XDP ZC enabled " : "",
+ ring->queue_index, pf_q);
+ }
+
+ return 0;
+}
+
+/**
+ * i40e_vsi_configure_tx - Configure the VSI for Tx
+ * @vsi: VSI structure describing this set of rings and resources
+ *
+ * Configure the Tx VSI for operation.
+ **/
+static int i40e_vsi_configure_tx(struct i40e_vsi *vsi)
+{
+ int err = 0;
+ u16 i;
+
+ for (i = 0; (i < vsi->num_queue_pairs) && !err; i++)
+ err = i40e_configure_tx_ring(vsi->tx_rings[i]);
+
+ if (err || !i40e_enabled_xdp_vsi(vsi))
+ return err;
+
+ for (i = 0; (i < vsi->num_queue_pairs) && !err; i++)
+ err = i40e_configure_tx_ring(vsi->xdp_rings[i]);
+
+ return err;
+}
+
+/**
+ * i40e_calculate_vsi_rx_buf_len - Calculates buffer length
+ *
+ * @vsi: VSI to calculate rx_buf_len from
+ */
+static u16 i40e_calculate_vsi_rx_buf_len(struct i40e_vsi *vsi)
+{
+ if (!vsi->netdev || (vsi->back->flags & I40E_FLAG_LEGACY_RX))
+ return I40E_RXBUFFER_2048;
+
+#if (PAGE_SIZE < 8192)
+ if (!I40E_2K_TOO_SMALL_WITH_PADDING && vsi->netdev->mtu <= ETH_DATA_LEN)
+ return I40E_RXBUFFER_1536 - NET_IP_ALIGN;
+#endif
+
+ return PAGE_SIZE < 8192 ? I40E_RXBUFFER_3072 : I40E_RXBUFFER_2048;
+}
+
+/**
+ * i40e_vsi_configure_rx - Configure the VSI for Rx
+ * @vsi: the VSI being configured
+ *
+ * Configure the Rx VSI for operation.
+ **/
+static int i40e_vsi_configure_rx(struct i40e_vsi *vsi)
+{
+ int err = 0;
+ u16 i;
+
+ vsi->max_frame = I40E_MAX_RXBUFFER;
+ vsi->rx_buf_len = i40e_calculate_vsi_rx_buf_len(vsi);
+
+#if (PAGE_SIZE < 8192)
+ if (vsi->netdev && !I40E_2K_TOO_SMALL_WITH_PADDING &&
+ vsi->netdev->mtu <= ETH_DATA_LEN)
+ vsi->max_frame = I40E_RXBUFFER_1536 - NET_IP_ALIGN;
+#endif
+
+ /* set up individual rings */
+ for (i = 0; i < vsi->num_queue_pairs && !err; i++)
+ err = i40e_configure_rx_ring(vsi->rx_rings[i]);
+
+ return err;
+}
+
+/**
+ * i40e_vsi_config_dcb_rings - Update rings to reflect DCB TC
+ * @vsi: ptr to the VSI
+ **/
+static void i40e_vsi_config_dcb_rings(struct i40e_vsi *vsi)
+{
+ struct i40e_ring *tx_ring, *rx_ring;
+ u16 qoffset, qcount;
+ int i, n;
+
+ if (!(vsi->back->flags & I40E_FLAG_DCB_ENABLED)) {
+ /* Reset the TC information */
+ for (i = 0; i < vsi->num_queue_pairs; i++) {
+ rx_ring = vsi->rx_rings[i];
+ tx_ring = vsi->tx_rings[i];
+ rx_ring->dcb_tc = 0;
+ tx_ring->dcb_tc = 0;
+ }
+ return;
+ }
+
+ for (n = 0; n < I40E_MAX_TRAFFIC_CLASS; n++) {
+ if (!(vsi->tc_config.enabled_tc & BIT_ULL(n)))
+ continue;
+
+ qoffset = vsi->tc_config.tc_info[n].qoffset;
+ qcount = vsi->tc_config.tc_info[n].qcount;
+ for (i = qoffset; i < (qoffset + qcount); i++) {
+ rx_ring = vsi->rx_rings[i];
+ tx_ring = vsi->tx_rings[i];
+ rx_ring->dcb_tc = n;
+ tx_ring->dcb_tc = n;
+ }
+ }
+}
+
+/**
+ * i40e_set_vsi_rx_mode - Call set_rx_mode on a VSI
+ * @vsi: ptr to the VSI
+ **/
+static void i40e_set_vsi_rx_mode(struct i40e_vsi *vsi)
+{
+ if (vsi->netdev)
+ i40e_set_rx_mode(vsi->netdev);
+}
+
+/**
+ * i40e_reset_fdir_filter_cnt - Reset flow director filter counters
+ * @pf: Pointer to the targeted PF
+ *
+ * Set all flow director counters to 0.
+ */
+static void i40e_reset_fdir_filter_cnt(struct i40e_pf *pf)
+{
+ pf->fd_tcp4_filter_cnt = 0;
+ pf->fd_udp4_filter_cnt = 0;
+ pf->fd_sctp4_filter_cnt = 0;
+ pf->fd_ip4_filter_cnt = 0;
+ pf->fd_tcp6_filter_cnt = 0;
+ pf->fd_udp6_filter_cnt = 0;
+ pf->fd_sctp6_filter_cnt = 0;
+ pf->fd_ip6_filter_cnt = 0;
+}
+
+/**
+ * i40e_fdir_filter_restore - Restore the Sideband Flow Director filters
+ * @vsi: Pointer to the targeted VSI
+ *
+ * This function replays the hlist on the hw where all the SB Flow Director
+ * filters were saved.
+ **/
+static void i40e_fdir_filter_restore(struct i40e_vsi *vsi)
+{
+ struct i40e_fdir_filter *filter;
+ struct i40e_pf *pf = vsi->back;
+ struct hlist_node *node;
+
+ if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
+ return;
+
+ /* Reset FDir counters as we're replaying all existing filters */
+ i40e_reset_fdir_filter_cnt(pf);
+
+ hlist_for_each_entry_safe(filter, node,
+ &pf->fdir_filter_list, fdir_node) {
+ i40e_add_del_fdir(vsi, filter, true);
+ }
+}
+
+/**
+ * i40e_vsi_configure - Set up the VSI for action
+ * @vsi: the VSI being configured
+ **/
+static int i40e_vsi_configure(struct i40e_vsi *vsi)
+{
+ int err;
+
+ i40e_set_vsi_rx_mode(vsi);
+ i40e_restore_vlan(vsi);
+ i40e_vsi_config_dcb_rings(vsi);
+ err = i40e_vsi_configure_tx(vsi);
+ if (!err)
+ err = i40e_vsi_configure_rx(vsi);
+
+ return err;
+}
+
+/**
+ * i40e_vsi_configure_msix - MSIX mode Interrupt Config in the HW
+ * @vsi: the VSI being configured
+ **/
+static void i40e_vsi_configure_msix(struct i40e_vsi *vsi)
+{
+ bool has_xdp = i40e_enabled_xdp_vsi(vsi);
+ struct i40e_pf *pf = vsi->back;
+ struct i40e_hw *hw = &pf->hw;
+ u16 vector;
+ int i, q;
+ u32 qp;
+
+ /* The interrupt indexing is offset by 1 in the PFINT_ITRn
+ * and PFINT_LNKLSTn registers, e.g.:
+ * PFINT_ITRn[0..n-1] gets msix-1..msix-n (qpair interrupts)
+ */
+ qp = vsi->base_queue;
+ vector = vsi->base_vector;
+ for (i = 0; i < vsi->num_q_vectors; i++, vector++) {
+ struct i40e_q_vector *q_vector = vsi->q_vectors[i];
+
+ q_vector->rx.next_update = jiffies + 1;
+ q_vector->rx.target_itr =
+ ITR_TO_REG(vsi->rx_rings[i]->itr_setting);
+ wr32(hw, I40E_PFINT_ITRN(I40E_RX_ITR, vector - 1),
+ q_vector->rx.target_itr >> 1);
+ q_vector->rx.current_itr = q_vector->rx.target_itr;
+
+ q_vector->tx.next_update = jiffies + 1;
+ q_vector->tx.target_itr =
+ ITR_TO_REG(vsi->tx_rings[i]->itr_setting);
+ wr32(hw, I40E_PFINT_ITRN(I40E_TX_ITR, vector - 1),
+ q_vector->tx.target_itr >> 1);
+ q_vector->tx.current_itr = q_vector->tx.target_itr;
+
+ wr32(hw, I40E_PFINT_RATEN(vector - 1),
+ i40e_intrl_usec_to_reg(vsi->int_rate_limit));
+
+ /* begin of linked list for RX queue assigned to this vector */
+ wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), qp);
+ for (q = 0; q < q_vector->num_ringpairs; q++) {
+ u32 nextqp = has_xdp ? qp + vsi->alloc_queue_pairs : qp;
+ u32 val;
+
+ val = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
+ (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
+ (vector << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
+ (nextqp << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
+ (I40E_QUEUE_TYPE_TX <<
+ I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT);
+
+ wr32(hw, I40E_QINT_RQCTL(qp), val);
+
+ if (has_xdp) {
+ /* TX queue with next queue set to TX */
+ val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
+ (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
+ (vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
+ (qp << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) |
+ (I40E_QUEUE_TYPE_TX <<
+ I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
+
+ wr32(hw, I40E_QINT_TQCTL(nextqp), val);
+ }
+ /* TX queue with next RX or end of linked list */
+ val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
+ (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
+ (vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
+ ((qp + 1) << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) |
+ (I40E_QUEUE_TYPE_RX <<
+ I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
+
+ /* Terminate the linked list */
+ if (q == (q_vector->num_ringpairs - 1))
+ val |= (I40E_QUEUE_END_OF_LIST <<
+ I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
+
+ wr32(hw, I40E_QINT_TQCTL(qp), val);
+ qp++;
+ }
+ }
+
+ i40e_flush(hw);
+}
+
+/**
+ * i40e_enable_misc_int_causes - enable the non-queue interrupts
+ * @pf: pointer to private device data structure
+ **/
+static void i40e_enable_misc_int_causes(struct i40e_pf *pf)
+{
+ struct i40e_hw *hw = &pf->hw;
+ u32 val;
+
+ /* clear things first */
+ wr32(hw, I40E_PFINT_ICR0_ENA, 0); /* disable all */
+ rd32(hw, I40E_PFINT_ICR0); /* read to clear */
+
+ val = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK |
+ I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK |
+ I40E_PFINT_ICR0_ENA_GRST_MASK |
+ I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK |
+ I40E_PFINT_ICR0_ENA_GPIO_MASK |
+ I40E_PFINT_ICR0_ENA_HMC_ERR_MASK |
+ I40E_PFINT_ICR0_ENA_VFLR_MASK |
+ I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
+
+ if (pf->flags & I40E_FLAG_IWARP_ENABLED)
+ val |= I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK;
+
+ if (pf->flags & I40E_FLAG_PTP)
+ val |= I40E_PFINT_ICR0_ENA_TIMESYNC_MASK;
+
+ wr32(hw, I40E_PFINT_ICR0_ENA, val);
+
+ /* SW_ITR_IDX = 0, but don't change INTENA */
+ wr32(hw, I40E_PFINT_DYN_CTL0, I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK |
+ I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK);
+
+ /* OTHER_ITR_IDX = 0 */
+ wr32(hw, I40E_PFINT_STAT_CTL0, 0);
+}
+
+/**
+ * i40e_configure_msi_and_legacy - Legacy mode interrupt config in the HW
+ * @vsi: the VSI being configured
+ **/
+static void i40e_configure_msi_and_legacy(struct i40e_vsi *vsi)
+{
+ u32 nextqp = i40e_enabled_xdp_vsi(vsi) ? vsi->alloc_queue_pairs : 0;
+ struct i40e_q_vector *q_vector = vsi->q_vectors[0];
+ struct i40e_pf *pf = vsi->back;
+ struct i40e_hw *hw = &pf->hw;
+
+ /* set the ITR configuration */
+ q_vector->rx.next_update = jiffies + 1;
+ q_vector->rx.target_itr = ITR_TO_REG(vsi->rx_rings[0]->itr_setting);
+ wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), q_vector->rx.target_itr >> 1);
+ q_vector->rx.current_itr = q_vector->rx.target_itr;
+ q_vector->tx.next_update = jiffies + 1;
+ q_vector->tx.target_itr = ITR_TO_REG(vsi->tx_rings[0]->itr_setting);
+ wr32(hw, I40E_PFINT_ITR0(I40E_TX_ITR), q_vector->tx.target_itr >> 1);
+ q_vector->tx.current_itr = q_vector->tx.target_itr;
+
+ i40e_enable_misc_int_causes(pf);
+
+ /* FIRSTQ_INDX = 0, FIRSTQ_TYPE = 0 (rx) */
+ wr32(hw, I40E_PFINT_LNKLST0, 0);
+
+ /* Associate the queue pair to the vector and enable the queue
+ * interrupt RX queue in linked list with next queue set to TX
+ */
+ wr32(hw, I40E_QINT_RQCTL(0), I40E_QINT_RQCTL_VAL(nextqp, 0, TX));
+
+ if (i40e_enabled_xdp_vsi(vsi)) {
+ /* TX queue in linked list with next queue set to TX */
+ wr32(hw, I40E_QINT_TQCTL(nextqp),
+ I40E_QINT_TQCTL_VAL(nextqp, 0, TX));
+ }
+
+ /* last TX queue so the next RX queue doesn't matter */
+ wr32(hw, I40E_QINT_TQCTL(0),
+ I40E_QINT_TQCTL_VAL(I40E_QUEUE_END_OF_LIST, 0, RX));
+ i40e_flush(hw);
+}
+
+/**
+ * i40e_irq_dynamic_disable_icr0 - Disable default interrupt generation for icr0
+ * @pf: board private structure
+ **/
+void i40e_irq_dynamic_disable_icr0(struct i40e_pf *pf)
+{
+ struct i40e_hw *hw = &pf->hw;
+
+ wr32(hw, I40E_PFINT_DYN_CTL0,
+ I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
+ i40e_flush(hw);
+}
+
+/**
+ * i40e_irq_dynamic_enable_icr0 - Enable default interrupt generation for icr0
+ * @pf: board private structure
+ **/
+void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf)
+{
+ struct i40e_hw *hw = &pf->hw;
+ u32 val;
+
+ val = I40E_PFINT_DYN_CTL0_INTENA_MASK |
+ I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
+ (I40E_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT);
+
+ wr32(hw, I40E_PFINT_DYN_CTL0, val);
+ i40e_flush(hw);
+}
+
+/**
+ * i40e_msix_clean_rings - MSIX mode Interrupt Handler
+ * @irq: interrupt number
+ * @data: pointer to a q_vector
+ **/
+static irqreturn_t i40e_msix_clean_rings(int irq, void *data)
+{
+ struct i40e_q_vector *q_vector = data;
+
+ if (!q_vector->tx.ring && !q_vector->rx.ring)
+ return IRQ_HANDLED;
+
+ napi_schedule_irqoff(&q_vector->napi);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * i40e_irq_affinity_notify - Callback for affinity changes
+ * @notify: context as to what irq was changed
+ * @mask: the new affinity mask
+ *
+ * This is a callback function used by the irq_set_affinity_notifier function
+ * so that we may register to receive changes to the irq affinity masks.
+ **/
+static void i40e_irq_affinity_notify(struct irq_affinity_notify *notify,
+ const cpumask_t *mask)
+{
+ struct i40e_q_vector *q_vector =
+ container_of(notify, struct i40e_q_vector, affinity_notify);
+
+ cpumask_copy(&q_vector->affinity_mask, mask);
+}
+
+/**
+ * i40e_irq_affinity_release - Callback for affinity notifier release
+ * @ref: internal core kernel usage
+ *
+ * This is a callback function used by the irq_set_affinity_notifier function
+ * to inform the current notification subscriber that they will no longer
+ * receive notifications.
+ **/
+static void i40e_irq_affinity_release(struct kref *ref) {}
+
+/**
+ * i40e_vsi_request_irq_msix - Initialize MSI-X interrupts
+ * @vsi: the VSI being configured
+ * @basename: name for the vector
+ *
+ * Allocates MSI-X vectors and requests interrupts from the kernel.
+ **/
+static int i40e_vsi_request_irq_msix(struct i40e_vsi *vsi, char *basename)
+{
+ int q_vectors = vsi->num_q_vectors;
+ struct i40e_pf *pf = vsi->back;
+ int base = vsi->base_vector;
+ int rx_int_idx = 0;
+ int tx_int_idx = 0;
+ int vector, err;
+ int irq_num;
+ int cpu;
+
+ for (vector = 0; vector < q_vectors; vector++) {
+ struct i40e_q_vector *q_vector = vsi->q_vectors[vector];
+
+ irq_num = pf->msix_entries[base + vector].vector;
+
+ if (q_vector->tx.ring && q_vector->rx.ring) {
+ snprintf(q_vector->name, sizeof(q_vector->name) - 1,
+ "%s-%s-%d", basename, "TxRx", rx_int_idx++);
+ tx_int_idx++;
+ } else if (q_vector->rx.ring) {
+ snprintf(q_vector->name, sizeof(q_vector->name) - 1,
+ "%s-%s-%d", basename, "rx", rx_int_idx++);
+ } else if (q_vector->tx.ring) {
+ snprintf(q_vector->name, sizeof(q_vector->name) - 1,
+ "%s-%s-%d", basename, "tx", tx_int_idx++);
+ } else {
+ /* skip this unused q_vector */
+ continue;
+ }
+ err = request_irq(irq_num,
+ vsi->irq_handler,
+ 0,
+ q_vector->name,
+ q_vector);
+ if (err) {
+ dev_info(&pf->pdev->dev,
+ "MSIX request_irq failed, error: %d\n", err);
+ goto free_queue_irqs;
+ }
+
+ /* register for affinity change notifications */
+ q_vector->affinity_notify.notify = i40e_irq_affinity_notify;
+ q_vector->affinity_notify.release = i40e_irq_affinity_release;
+ irq_set_affinity_notifier(irq_num, &q_vector->affinity_notify);
+ /* Spread affinity hints out across online CPUs.
+ *
+ * get_cpu_mask returns a static constant mask with
+ * a permanent lifetime so it's ok to pass to
+ * irq_update_affinity_hint without making a copy.
+ */
+ cpu = cpumask_local_spread(q_vector->v_idx, -1);
+ irq_update_affinity_hint(irq_num, get_cpu_mask(cpu));
+ }
+
+ vsi->irqs_ready = true;
+ return 0;
+
+free_queue_irqs:
+ while (vector) {
+ vector--;
+ irq_num = pf->msix_entries[base + vector].vector;
+ irq_set_affinity_notifier(irq_num, NULL);
+ irq_update_affinity_hint(irq_num, NULL);
+ free_irq(irq_num, &vsi->q_vectors[vector]);
+ }
+ return err;
+}
+
+/**
+ * i40e_vsi_disable_irq - Mask off queue interrupt generation on the VSI
+ * @vsi: the VSI being un-configured
+ **/
+static void i40e_vsi_disable_irq(struct i40e_vsi *vsi)
+{
+ struct i40e_pf *pf = vsi->back;
+ struct i40e_hw *hw = &pf->hw;
+ int base = vsi->base_vector;
+ int i;
+
+ /* disable interrupt causation from each queue */
+ for (i = 0; i < vsi->num_queue_pairs; i++) {
+ u32 val;
+
+ val = rd32(hw, I40E_QINT_TQCTL(vsi->tx_rings[i]->reg_idx));
+ val &= ~I40E_QINT_TQCTL_CAUSE_ENA_MASK;
+ wr32(hw, I40E_QINT_TQCTL(vsi->tx_rings[i]->reg_idx), val);
+
+ val = rd32(hw, I40E_QINT_RQCTL(vsi->rx_rings[i]->reg_idx));
+ val &= ~I40E_QINT_RQCTL_CAUSE_ENA_MASK;
+ wr32(hw, I40E_QINT_RQCTL(vsi->rx_rings[i]->reg_idx), val);
+
+ if (!i40e_enabled_xdp_vsi(vsi))
+ continue;
+ wr32(hw, I40E_QINT_TQCTL(vsi->xdp_rings[i]->reg_idx), 0);
+ }
+
+ /* disable each interrupt */
+ if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
+ for (i = vsi->base_vector;
+ i < (vsi->num_q_vectors + vsi->base_vector); i++)
+ wr32(hw, I40E_PFINT_DYN_CTLN(i - 1), 0);
+
+ i40e_flush(hw);
+ for (i = 0; i < vsi->num_q_vectors; i++)
+ synchronize_irq(pf->msix_entries[i + base].vector);
+ } else {
+ /* Legacy and MSI mode - this stops all interrupt handling */
+ wr32(hw, I40E_PFINT_ICR0_ENA, 0);
+ wr32(hw, I40E_PFINT_DYN_CTL0, 0);
+ i40e_flush(hw);
+ synchronize_irq(pf->pdev->irq);
+ }
+}
+
+/**
+ * i40e_vsi_enable_irq - Enable IRQ for the given VSI
+ * @vsi: the VSI being configured
+ **/
+static int i40e_vsi_enable_irq(struct i40e_vsi *vsi)
+{
+ struct i40e_pf *pf = vsi->back;
+ int i;
+
+ if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
+ for (i = 0; i < vsi->num_q_vectors; i++)
+ i40e_irq_dynamic_enable(vsi, i);
+ } else {
+ i40e_irq_dynamic_enable_icr0(pf);
+ }
+
+ i40e_flush(&pf->hw);
+ return 0;
+}
+
+/**
+ * i40e_free_misc_vector - Free the vector that handles non-queue events
+ * @pf: board private structure
+ **/
+static void i40e_free_misc_vector(struct i40e_pf *pf)
+{
+ /* Disable ICR 0 */
+ wr32(&pf->hw, I40E_PFINT_ICR0_ENA, 0);
+ i40e_flush(&pf->hw);
+
+ if (pf->flags & I40E_FLAG_MSIX_ENABLED && pf->msix_entries) {
+ free_irq(pf->msix_entries[0].vector, pf);
+ clear_bit(__I40E_MISC_IRQ_REQUESTED, pf->state);
+ }
+}
+
+/**
+ * i40e_intr - MSI/Legacy and non-queue interrupt handler
+ * @irq: interrupt number
+ * @data: pointer to a q_vector
+ *
+ * This is the handler used for all MSI/Legacy interrupts, and deals
+ * with both queue and non-queue interrupts. This is also used in
+ * MSIX mode to handle the non-queue interrupts.
+ **/
+static irqreturn_t i40e_intr(int irq, void *data)
+{
+ struct i40e_pf *pf = (struct i40e_pf *)data;
+ struct i40e_hw *hw = &pf->hw;
+ irqreturn_t ret = IRQ_NONE;
+ u32 icr0, icr0_remaining;
+ u32 val, ena_mask;
+
+ icr0 = rd32(hw, I40E_PFINT_ICR0);
+ ena_mask = rd32(hw, I40E_PFINT_ICR0_ENA);
+
+ /* if sharing a legacy IRQ, we might get called w/o an intr pending */
+ if ((icr0 & I40E_PFINT_ICR0_INTEVENT_MASK) == 0)
+ goto enable_intr;
+
+ /* if interrupt but no bits showing, must be SWINT */
+ if (((icr0 & ~I40E_PFINT_ICR0_INTEVENT_MASK) == 0) ||
+ (icr0 & I40E_PFINT_ICR0_SWINT_MASK))
+ pf->sw_int_count++;
+
+ if ((pf->flags & I40E_FLAG_IWARP_ENABLED) &&
+ (icr0 & I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK)) {
+ ena_mask &= ~I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK;
+ dev_dbg(&pf->pdev->dev, "cleared PE_CRITERR\n");
+ set_bit(__I40E_CORE_RESET_REQUESTED, pf->state);
+ }
+
+ /* only q0 is used in MSI/Legacy mode, and none are used in MSIX */
+ if (icr0 & I40E_PFINT_ICR0_QUEUE_0_MASK) {
+ struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
+ struct i40e_q_vector *q_vector = vsi->q_vectors[0];
+
+ /* We do not have a way to disarm Queue causes while leaving
+ * interrupt enabled for all other causes, ideally
+ * interrupt should be disabled while we are in NAPI but
+ * this is not a performance path and napi_schedule()
+ * can deal with rescheduling.
+ */
+ if (!test_bit(__I40E_DOWN, pf->state))
+ napi_schedule_irqoff(&q_vector->napi);
+ }
+
+ if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
+ ena_mask &= ~I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
+ set_bit(__I40E_ADMINQ_EVENT_PENDING, pf->state);
+ i40e_debug(&pf->hw, I40E_DEBUG_NVM, "AdminQ event\n");
+ }
+
+ if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK) {
+ ena_mask &= ~I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
+ set_bit(__I40E_MDD_EVENT_PENDING, pf->state);
+ }
+
+ if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) {
+ /* disable any further VFLR event notifications */
+ if (test_bit(__I40E_VF_RESETS_DISABLED, pf->state)) {
+ u32 reg = rd32(hw, I40E_PFINT_ICR0_ENA);
+
+ reg &= ~I40E_PFINT_ICR0_VFLR_MASK;
+ wr32(hw, I40E_PFINT_ICR0_ENA, reg);
+ } else {
+ ena_mask &= ~I40E_PFINT_ICR0_ENA_VFLR_MASK;
+ set_bit(__I40E_VFLR_EVENT_PENDING, pf->state);
+ }
+ }
+
+ if (icr0 & I40E_PFINT_ICR0_GRST_MASK) {
+ if (!test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
+ set_bit(__I40E_RESET_INTR_RECEIVED, pf->state);
+ ena_mask &= ~I40E_PFINT_ICR0_ENA_GRST_MASK;
+ val = rd32(hw, I40E_GLGEN_RSTAT);
+ val = (val & I40E_GLGEN_RSTAT_RESET_TYPE_MASK)
+ >> I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT;
+ if (val == I40E_RESET_CORER) {
+ pf->corer_count++;
+ } else if (val == I40E_RESET_GLOBR) {
+ pf->globr_count++;
+ } else if (val == I40E_RESET_EMPR) {
+ pf->empr_count++;
+ set_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state);
+ }
+ }
+
+ if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK) {
+ icr0 &= ~I40E_PFINT_ICR0_HMC_ERR_MASK;
+ dev_info(&pf->pdev->dev, "HMC error interrupt\n");
+ dev_info(&pf->pdev->dev, "HMC error info 0x%x, HMC error data 0x%x\n",
+ rd32(hw, I40E_PFHMC_ERRORINFO),
+ rd32(hw, I40E_PFHMC_ERRORDATA));
+ }
+
+ if (icr0 & I40E_PFINT_ICR0_TIMESYNC_MASK) {
+ u32 prttsyn_stat = rd32(hw, I40E_PRTTSYN_STAT_0);
+
+ if (prttsyn_stat & I40E_PRTTSYN_STAT_0_EVENT0_MASK)
+ schedule_work(&pf->ptp_extts0_work);
+
+ if (prttsyn_stat & I40E_PRTTSYN_STAT_0_TXTIME_MASK)
+ i40e_ptp_tx_hwtstamp(pf);
+
+ icr0 &= ~I40E_PFINT_ICR0_ENA_TIMESYNC_MASK;
+ }
+
+ /* If a critical error is pending we have no choice but to reset the
+ * device.
+ * Report and mask out any remaining unexpected interrupts.
+ */
+ icr0_remaining = icr0 & ena_mask;
+ if (icr0_remaining) {
+ dev_info(&pf->pdev->dev, "unhandled interrupt icr0=0x%08x\n",
+ icr0_remaining);
+ if ((icr0_remaining & I40E_PFINT_ICR0_PE_CRITERR_MASK) ||
+ (icr0_remaining & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK) ||
+ (icr0_remaining & I40E_PFINT_ICR0_ECC_ERR_MASK)) {
+ dev_info(&pf->pdev->dev, "device will be reset\n");
+ set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
+ i40e_service_event_schedule(pf);
+ }
+ ena_mask &= ~icr0_remaining;
+ }
+ ret = IRQ_HANDLED;
+
+enable_intr:
+ /* re-enable interrupt causes */
+ wr32(hw, I40E_PFINT_ICR0_ENA, ena_mask);
+ if (!test_bit(__I40E_DOWN, pf->state) ||
+ test_bit(__I40E_RECOVERY_MODE, pf->state)) {
+ i40e_service_event_schedule(pf);
+ i40e_irq_dynamic_enable_icr0(pf);
+ }
+
+ return ret;
+}
+
+/**
+ * i40e_clean_fdir_tx_irq - Reclaim resources after transmit completes
+ * @tx_ring: tx ring to clean
+ * @budget: how many cleans we're allowed
+ *
+ * Returns true if there's any budget left (e.g. the clean is finished)
+ **/
+static bool i40e_clean_fdir_tx_irq(struct i40e_ring *tx_ring, int budget)
+{
+ struct i40e_vsi *vsi = tx_ring->vsi;
+ u16 i = tx_ring->next_to_clean;
+ struct i40e_tx_buffer *tx_buf;
+ struct i40e_tx_desc *tx_desc;
+
+ tx_buf = &tx_ring->tx_bi[i];
+ tx_desc = I40E_TX_DESC(tx_ring, i);
+ i -= tx_ring->count;
+
+ do {
+ struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch;
+
+ /* if next_to_watch is not set then there is no work pending */
+ if (!eop_desc)
+ break;
+
+ /* prevent any other reads prior to eop_desc */
+ smp_rmb();
+
+ /* if the descriptor isn't done, no work yet to do */
+ if (!(eop_desc->cmd_type_offset_bsz &
+ cpu_to_le64(I40E_TX_DESC_DTYPE_DESC_DONE)))
+ break;
+
+ /* clear next_to_watch to prevent false hangs */
+ tx_buf->next_to_watch = NULL;
+
+ tx_desc->buffer_addr = 0;
+ tx_desc->cmd_type_offset_bsz = 0;
+ /* move past filter desc */
+ tx_buf++;
+ tx_desc++;
+ i++;
+ if (unlikely(!i)) {
+ i -= tx_ring->count;
+ tx_buf = tx_ring->tx_bi;
+ tx_desc = I40E_TX_DESC(tx_ring, 0);
+ }
+ /* unmap skb header data */
+ dma_unmap_single(tx_ring->dev,
+ dma_unmap_addr(tx_buf, dma),
+ dma_unmap_len(tx_buf, len),
+ DMA_TO_DEVICE);
+ if (tx_buf->tx_flags & I40E_TX_FLAGS_FD_SB)
+ kfree(tx_buf->raw_buf);
+
+ tx_buf->raw_buf = NULL;
+ tx_buf->tx_flags = 0;
+ tx_buf->next_to_watch = NULL;
+ dma_unmap_len_set(tx_buf, len, 0);
+ tx_desc->buffer_addr = 0;
+ tx_desc->cmd_type_offset_bsz = 0;
+
+ /* move us past the eop_desc for start of next FD desc */
+ tx_buf++;
+ tx_desc++;
+ i++;
+ if (unlikely(!i)) {
+ i -= tx_ring->count;
+ tx_buf = tx_ring->tx_bi;
+ tx_desc = I40E_TX_DESC(tx_ring, 0);
+ }
+
+ /* update budget accounting */
+ budget--;
+ } while (likely(budget));
+
+ i += tx_ring->count;
+ tx_ring->next_to_clean = i;
+
+ if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED)
+ i40e_irq_dynamic_enable(vsi, tx_ring->q_vector->v_idx);
+
+ return budget > 0;
+}
+
+/**
+ * i40e_fdir_clean_ring - Interrupt Handler for FDIR SB ring
+ * @irq: interrupt number
+ * @data: pointer to a q_vector
+ **/
+static irqreturn_t i40e_fdir_clean_ring(int irq, void *data)
+{
+ struct i40e_q_vector *q_vector = data;
+ struct i40e_vsi *vsi;
+
+ if (!q_vector->tx.ring)
+ return IRQ_HANDLED;
+
+ vsi = q_vector->tx.ring->vsi;
+ i40e_clean_fdir_tx_irq(q_vector->tx.ring, vsi->work_limit);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * i40e_map_vector_to_qp - Assigns the queue pair to the vector
+ * @vsi: the VSI being configured
+ * @v_idx: vector index
+ * @qp_idx: queue pair index
+ **/
+static void i40e_map_vector_to_qp(struct i40e_vsi *vsi, int v_idx, int qp_idx)
+{
+ struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx];
+ struct i40e_ring *tx_ring = vsi->tx_rings[qp_idx];
+ struct i40e_ring *rx_ring = vsi->rx_rings[qp_idx];
+
+ tx_ring->q_vector = q_vector;
+ tx_ring->next = q_vector->tx.ring;
+ q_vector->tx.ring = tx_ring;
+ q_vector->tx.count++;
+
+ /* Place XDP Tx ring in the same q_vector ring list as regular Tx */
+ if (i40e_enabled_xdp_vsi(vsi)) {
+ struct i40e_ring *xdp_ring = vsi->xdp_rings[qp_idx];
+
+ xdp_ring->q_vector = q_vector;
+ xdp_ring->next = q_vector->tx.ring;
+ q_vector->tx.ring = xdp_ring;
+ q_vector->tx.count++;
+ }
+
+ rx_ring->q_vector = q_vector;
+ rx_ring->next = q_vector->rx.ring;
+ q_vector->rx.ring = rx_ring;
+ q_vector->rx.count++;
+}
+
+/**
+ * i40e_vsi_map_rings_to_vectors - Maps descriptor rings to vectors
+ * @vsi: the VSI being configured
+ *
+ * This function maps descriptor rings to the queue-specific vectors
+ * we were allotted through the MSI-X enabling code. Ideally, we'd have
+ * one vector per queue pair, but on a constrained vector budget, we
+ * group the queue pairs as "efficiently" as possible.
+ **/
+static void i40e_vsi_map_rings_to_vectors(struct i40e_vsi *vsi)
+{
+ int qp_remaining = vsi->num_queue_pairs;
+ int q_vectors = vsi->num_q_vectors;
+ int num_ringpairs;
+ int v_start = 0;
+ int qp_idx = 0;
+
+ /* If we don't have enough vectors for a 1-to-1 mapping, we'll have to
+ * group them so there are multiple queues per vector.
+ * It is also important to go through all the vectors available to be
+ * sure that if we don't use all the vectors, that the remaining vectors
+ * are cleared. This is especially important when decreasing the
+ * number of queues in use.
+ */
+ for (; v_start < q_vectors; v_start++) {
+ struct i40e_q_vector *q_vector = vsi->q_vectors[v_start];
+
+ num_ringpairs = DIV_ROUND_UP(qp_remaining, q_vectors - v_start);
+
+ q_vector->num_ringpairs = num_ringpairs;
+ q_vector->reg_idx = q_vector->v_idx + vsi->base_vector - 1;
+
+ q_vector->rx.count = 0;
+ q_vector->tx.count = 0;
+ q_vector->rx.ring = NULL;
+ q_vector->tx.ring = NULL;
+
+ while (num_ringpairs--) {
+ i40e_map_vector_to_qp(vsi, v_start, qp_idx);
+ qp_idx++;
+ qp_remaining--;
+ }
+ }
+}
+
+/**
+ * i40e_vsi_request_irq - Request IRQ from the OS
+ * @vsi: the VSI being configured
+ * @basename: name for the vector
+ **/
+static int i40e_vsi_request_irq(struct i40e_vsi *vsi, char *basename)
+{
+ struct i40e_pf *pf = vsi->back;
+ int err;
+
+ if (pf->flags & I40E_FLAG_MSIX_ENABLED)
+ err = i40e_vsi_request_irq_msix(vsi, basename);
+ else if (pf->flags & I40E_FLAG_MSI_ENABLED)
+ err = request_irq(pf->pdev->irq, i40e_intr, 0,
+ pf->int_name, pf);
+ else
+ err = request_irq(pf->pdev->irq, i40e_intr, IRQF_SHARED,
+ pf->int_name, pf);
+
+ if (err)
+ dev_info(&pf->pdev->dev, "request_irq failed, Error %d\n", err);
+
+ return err;
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+/**
+ * i40e_netpoll - A Polling 'interrupt' handler
+ * @netdev: network interface device structure
+ *
+ * This is used by netconsole to send skbs without having to re-enable
+ * interrupts. It's not called while the normal interrupt routine is executing.
+ **/
+static void i40e_netpoll(struct net_device *netdev)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_vsi *vsi = np->vsi;
+ struct i40e_pf *pf = vsi->back;
+ int i;
+
+ /* if interface is down do nothing */
+ if (test_bit(__I40E_VSI_DOWN, vsi->state))
+ return;
+
+ if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
+ for (i = 0; i < vsi->num_q_vectors; i++)
+ i40e_msix_clean_rings(0, vsi->q_vectors[i]);
+ } else {
+ i40e_intr(pf->pdev->irq, netdev);
+ }
+}
+#endif
+
+#define I40E_QTX_ENA_WAIT_COUNT 50
+
+/**
+ * i40e_pf_txq_wait - Wait for a PF's Tx queue to be enabled or disabled
+ * @pf: the PF being configured
+ * @pf_q: the PF queue
+ * @enable: enable or disable state of the queue
+ *
+ * This routine will wait for the given Tx queue of the PF to reach the
+ * enabled or disabled state.
+ * Returns -ETIMEDOUT in case of failing to reach the requested state after
+ * multiple retries; else will return 0 in case of success.
+ **/
+static int i40e_pf_txq_wait(struct i40e_pf *pf, int pf_q, bool enable)
+{
+ int i;
+ u32 tx_reg;
+
+ for (i = 0; i < I40E_QUEUE_WAIT_RETRY_LIMIT; i++) {
+ tx_reg = rd32(&pf->hw, I40E_QTX_ENA(pf_q));
+ if (enable == !!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
+ break;
+
+ usleep_range(10, 20);
+ }
+ if (i >= I40E_QUEUE_WAIT_RETRY_LIMIT)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+/**
+ * i40e_control_tx_q - Start or stop a particular Tx queue
+ * @pf: the PF structure
+ * @pf_q: the PF queue to configure
+ * @enable: start or stop the queue
+ *
+ * This function enables or disables a single queue. Note that any delay
+ * required after the operation is expected to be handled by the caller of
+ * this function.
+ **/
+static void i40e_control_tx_q(struct i40e_pf *pf, int pf_q, bool enable)
+{
+ struct i40e_hw *hw = &pf->hw;
+ u32 tx_reg;
+ int i;
+
+ /* warn the TX unit of coming changes */
+ i40e_pre_tx_queue_cfg(&pf->hw, pf_q, enable);
+ if (!enable)
+ usleep_range(10, 20);
+
+ for (i = 0; i < I40E_QTX_ENA_WAIT_COUNT; i++) {
+ tx_reg = rd32(hw, I40E_QTX_ENA(pf_q));
+ if (((tx_reg >> I40E_QTX_ENA_QENA_REQ_SHIFT) & 1) ==
+ ((tx_reg >> I40E_QTX_ENA_QENA_STAT_SHIFT) & 1))
+ break;
+ usleep_range(1000, 2000);
+ }
+
+ /* Skip if the queue is already in the requested state */
+ if (enable == !!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
+ return;
+
+ /* turn on/off the queue */
+ if (enable) {
+ wr32(hw, I40E_QTX_HEAD(pf_q), 0);
+ tx_reg |= I40E_QTX_ENA_QENA_REQ_MASK;
+ } else {
+ tx_reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
+ }
+
+ wr32(hw, I40E_QTX_ENA(pf_q), tx_reg);
+}
+
+/**
+ * i40e_control_wait_tx_q - Start/stop Tx queue and wait for completion
+ * @seid: VSI SEID
+ * @pf: the PF structure
+ * @pf_q: the PF queue to configure
+ * @is_xdp: true if the queue is used for XDP
+ * @enable: start or stop the queue
+ **/
+int i40e_control_wait_tx_q(int seid, struct i40e_pf *pf, int pf_q,
+ bool is_xdp, bool enable)
+{
+ int ret;
+
+ i40e_control_tx_q(pf, pf_q, enable);
+
+ /* wait for the change to finish */
+ ret = i40e_pf_txq_wait(pf, pf_q, enable);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "VSI seid %d %sTx ring %d %sable timeout\n",
+ seid, (is_xdp ? "XDP " : ""), pf_q,
+ (enable ? "en" : "dis"));
+ }
+
+ return ret;
+}
+
+/**
+ * i40e_vsi_enable_tx - Start a VSI's rings
+ * @vsi: the VSI being configured
+ **/
+static int i40e_vsi_enable_tx(struct i40e_vsi *vsi)
+{
+ struct i40e_pf *pf = vsi->back;
+ int i, pf_q, ret = 0;
+
+ pf_q = vsi->base_queue;
+ for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
+ ret = i40e_control_wait_tx_q(vsi->seid, pf,
+ pf_q,
+ false /*is xdp*/, true);
+ if (ret)
+ break;
+
+ if (!i40e_enabled_xdp_vsi(vsi))
+ continue;
+
+ ret = i40e_control_wait_tx_q(vsi->seid, pf,
+ pf_q + vsi->alloc_queue_pairs,
+ true /*is xdp*/, true);
+ if (ret)
+ break;
+ }
+ return ret;
+}
+
+/**
+ * i40e_pf_rxq_wait - Wait for a PF's Rx queue to be enabled or disabled
+ * @pf: the PF being configured
+ * @pf_q: the PF queue
+ * @enable: enable or disable state of the queue
+ *
+ * This routine will wait for the given Rx queue of the PF to reach the
+ * enabled or disabled state.
+ * Returns -ETIMEDOUT in case of failing to reach the requested state after
+ * multiple retries; else will return 0 in case of success.
+ **/
+static int i40e_pf_rxq_wait(struct i40e_pf *pf, int pf_q, bool enable)
+{
+ int i;
+ u32 rx_reg;
+
+ for (i = 0; i < I40E_QUEUE_WAIT_RETRY_LIMIT; i++) {
+ rx_reg = rd32(&pf->hw, I40E_QRX_ENA(pf_q));
+ if (enable == !!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
+ break;
+
+ usleep_range(10, 20);
+ }
+ if (i >= I40E_QUEUE_WAIT_RETRY_LIMIT)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+/**
+ * i40e_control_rx_q - Start or stop a particular Rx queue
+ * @pf: the PF structure
+ * @pf_q: the PF queue to configure
+ * @enable: start or stop the queue
+ *
+ * This function enables or disables a single queue. Note that
+ * any delay required after the operation is expected to be
+ * handled by the caller of this function.
+ **/
+static void i40e_control_rx_q(struct i40e_pf *pf, int pf_q, bool enable)
+{
+ struct i40e_hw *hw = &pf->hw;
+ u32 rx_reg;
+ int i;
+
+ for (i = 0; i < I40E_QTX_ENA_WAIT_COUNT; i++) {
+ rx_reg = rd32(hw, I40E_QRX_ENA(pf_q));
+ if (((rx_reg >> I40E_QRX_ENA_QENA_REQ_SHIFT) & 1) ==
+ ((rx_reg >> I40E_QRX_ENA_QENA_STAT_SHIFT) & 1))
+ break;
+ usleep_range(1000, 2000);
+ }
+
+ /* Skip if the queue is already in the requested state */
+ if (enable == !!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
+ return;
+
+ /* turn on/off the queue */
+ if (enable)
+ rx_reg |= I40E_QRX_ENA_QENA_REQ_MASK;
+ else
+ rx_reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
+
+ wr32(hw, I40E_QRX_ENA(pf_q), rx_reg);
+}
+
+/**
+ * i40e_control_wait_rx_q
+ * @pf: the PF structure
+ * @pf_q: queue being configured
+ * @enable: start or stop the rings
+ *
+ * This function enables or disables a single queue along with waiting
+ * for the change to finish. The caller of this function should handle
+ * the delays needed in the case of disabling queues.
+ **/
+int i40e_control_wait_rx_q(struct i40e_pf *pf, int pf_q, bool enable)
+{
+ int ret = 0;
+
+ i40e_control_rx_q(pf, pf_q, enable);
+
+ /* wait for the change to finish */
+ ret = i40e_pf_rxq_wait(pf, pf_q, enable);
+ if (ret)
+ return ret;
+
+ return ret;
+}
+
+/**
+ * i40e_vsi_enable_rx - Start a VSI's rings
+ * @vsi: the VSI being configured
+ **/
+static int i40e_vsi_enable_rx(struct i40e_vsi *vsi)
+{
+ struct i40e_pf *pf = vsi->back;
+ int i, pf_q, ret = 0;
+
+ pf_q = vsi->base_queue;
+ for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
+ ret = i40e_control_wait_rx_q(pf, pf_q, true);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "VSI seid %d Rx ring %d enable timeout\n",
+ vsi->seid, pf_q);
+ break;
+ }
+ }
+
+ return ret;
+}
+
+/**
+ * i40e_vsi_start_rings - Start a VSI's rings
+ * @vsi: the VSI being configured
+ **/
+int i40e_vsi_start_rings(struct i40e_vsi *vsi)
+{
+ int ret = 0;
+
+ /* do rx first for enable and last for disable */
+ ret = i40e_vsi_enable_rx(vsi);
+ if (ret)
+ return ret;
+ ret = i40e_vsi_enable_tx(vsi);
+
+ return ret;
+}
+
+#define I40E_DISABLE_TX_GAP_MSEC 50
+
+/**
+ * i40e_vsi_stop_rings - Stop a VSI's rings
+ * @vsi: the VSI being configured
+ **/
+void i40e_vsi_stop_rings(struct i40e_vsi *vsi)
+{
+ struct i40e_pf *pf = vsi->back;
+ int pf_q, err, q_end;
+
+ /* When port TX is suspended, don't wait */
+ if (test_bit(__I40E_PORT_SUSPENDED, vsi->back->state))
+ return i40e_vsi_stop_rings_no_wait(vsi);
+
+ q_end = vsi->base_queue + vsi->num_queue_pairs;
+ for (pf_q = vsi->base_queue; pf_q < q_end; pf_q++)
+ i40e_pre_tx_queue_cfg(&pf->hw, (u32)pf_q, false);
+
+ for (pf_q = vsi->base_queue; pf_q < q_end; pf_q++) {
+ err = i40e_control_wait_rx_q(pf, pf_q, false);
+ if (err)
+ dev_info(&pf->pdev->dev,
+ "VSI seid %d Rx ring %d disable timeout\n",
+ vsi->seid, pf_q);
+ }
+
+ msleep(I40E_DISABLE_TX_GAP_MSEC);
+ pf_q = vsi->base_queue;
+ for (pf_q = vsi->base_queue; pf_q < q_end; pf_q++)
+ wr32(&pf->hw, I40E_QTX_ENA(pf_q), 0);
+
+ i40e_vsi_wait_queues_disabled(vsi);
+}
+
+/**
+ * i40e_vsi_stop_rings_no_wait - Stop a VSI's rings and do not delay
+ * @vsi: the VSI being shutdown
+ *
+ * This function stops all the rings for a VSI but does not delay to verify
+ * that rings have been disabled. It is expected that the caller is shutting
+ * down multiple VSIs at once and will delay together for all the VSIs after
+ * initiating the shutdown. This is particularly useful for shutting down lots
+ * of VFs together. Otherwise, a large delay can be incurred while configuring
+ * each VSI in serial.
+ **/
+void i40e_vsi_stop_rings_no_wait(struct i40e_vsi *vsi)
+{
+ struct i40e_pf *pf = vsi->back;
+ int i, pf_q;
+
+ pf_q = vsi->base_queue;
+ for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
+ i40e_control_tx_q(pf, pf_q, false);
+ i40e_control_rx_q(pf, pf_q, false);
+ }
+}
+
+/**
+ * i40e_vsi_free_irq - Free the irq association with the OS
+ * @vsi: the VSI being configured
+ **/
+static void i40e_vsi_free_irq(struct i40e_vsi *vsi)
+{
+ struct i40e_pf *pf = vsi->back;
+ struct i40e_hw *hw = &pf->hw;
+ int base = vsi->base_vector;
+ u32 val, qp;
+ int i;
+
+ if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
+ if (!vsi->q_vectors)
+ return;
+
+ if (!vsi->irqs_ready)
+ return;
+
+ vsi->irqs_ready = false;
+ for (i = 0; i < vsi->num_q_vectors; i++) {
+ int irq_num;
+ u16 vector;
+
+ vector = i + base;
+ irq_num = pf->msix_entries[vector].vector;
+
+ /* free only the irqs that were actually requested */
+ if (!vsi->q_vectors[i] ||
+ !vsi->q_vectors[i]->num_ringpairs)
+ continue;
+
+ /* clear the affinity notifier in the IRQ descriptor */
+ irq_set_affinity_notifier(irq_num, NULL);
+ /* remove our suggested affinity mask for this IRQ */
+ irq_update_affinity_hint(irq_num, NULL);
+ free_irq(irq_num, vsi->q_vectors[i]);
+
+ /* Tear down the interrupt queue link list
+ *
+ * We know that they come in pairs and always
+ * the Rx first, then the Tx. To clear the
+ * link list, stick the EOL value into the
+ * next_q field of the registers.
+ */
+ val = rd32(hw, I40E_PFINT_LNKLSTN(vector - 1));
+ qp = (val & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK)
+ >> I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
+ val |= I40E_QUEUE_END_OF_LIST
+ << I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
+ wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), val);
+
+ while (qp != I40E_QUEUE_END_OF_LIST) {
+ u32 next;
+
+ val = rd32(hw, I40E_QINT_RQCTL(qp));
+
+ val &= ~(I40E_QINT_RQCTL_MSIX_INDX_MASK |
+ I40E_QINT_RQCTL_MSIX0_INDX_MASK |
+ I40E_QINT_RQCTL_CAUSE_ENA_MASK |
+ I40E_QINT_RQCTL_INTEVENT_MASK);
+
+ val |= (I40E_QINT_RQCTL_ITR_INDX_MASK |
+ I40E_QINT_RQCTL_NEXTQ_INDX_MASK);
+
+ wr32(hw, I40E_QINT_RQCTL(qp), val);
+
+ val = rd32(hw, I40E_QINT_TQCTL(qp));
+
+ next = (val & I40E_QINT_TQCTL_NEXTQ_INDX_MASK)
+ >> I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT;
+
+ val &= ~(I40E_QINT_TQCTL_MSIX_INDX_MASK |
+ I40E_QINT_TQCTL_MSIX0_INDX_MASK |
+ I40E_QINT_TQCTL_CAUSE_ENA_MASK |
+ I40E_QINT_TQCTL_INTEVENT_MASK);
+
+ val |= (I40E_QINT_TQCTL_ITR_INDX_MASK |
+ I40E_QINT_TQCTL_NEXTQ_INDX_MASK);
+
+ wr32(hw, I40E_QINT_TQCTL(qp), val);
+ qp = next;
+ }
+ }
+ } else {
+ free_irq(pf->pdev->irq, pf);
+
+ val = rd32(hw, I40E_PFINT_LNKLST0);
+ qp = (val & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK)
+ >> I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
+ val |= I40E_QUEUE_END_OF_LIST
+ << I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT;
+ wr32(hw, I40E_PFINT_LNKLST0, val);
+
+ val = rd32(hw, I40E_QINT_RQCTL(qp));
+ val &= ~(I40E_QINT_RQCTL_MSIX_INDX_MASK |
+ I40E_QINT_RQCTL_MSIX0_INDX_MASK |
+ I40E_QINT_RQCTL_CAUSE_ENA_MASK |
+ I40E_QINT_RQCTL_INTEVENT_MASK);
+
+ val |= (I40E_QINT_RQCTL_ITR_INDX_MASK |
+ I40E_QINT_RQCTL_NEXTQ_INDX_MASK);
+
+ wr32(hw, I40E_QINT_RQCTL(qp), val);
+
+ val = rd32(hw, I40E_QINT_TQCTL(qp));
+
+ val &= ~(I40E_QINT_TQCTL_MSIX_INDX_MASK |
+ I40E_QINT_TQCTL_MSIX0_INDX_MASK |
+ I40E_QINT_TQCTL_CAUSE_ENA_MASK |
+ I40E_QINT_TQCTL_INTEVENT_MASK);
+
+ val |= (I40E_QINT_TQCTL_ITR_INDX_MASK |
+ I40E_QINT_TQCTL_NEXTQ_INDX_MASK);
+
+ wr32(hw, I40E_QINT_TQCTL(qp), val);
+ }
+}
+
+/**
+ * i40e_free_q_vector - Free memory allocated for specific interrupt vector
+ * @vsi: the VSI being configured
+ * @v_idx: Index of vector to be freed
+ *
+ * This function frees the memory allocated to the q_vector. In addition if
+ * NAPI is enabled it will delete any references to the NAPI struct prior
+ * to freeing the q_vector.
+ **/
+static void i40e_free_q_vector(struct i40e_vsi *vsi, int v_idx)
+{
+ struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx];
+ struct i40e_ring *ring;
+
+ if (!q_vector)
+ return;
+
+ /* disassociate q_vector from rings */
+ i40e_for_each_ring(ring, q_vector->tx)
+ ring->q_vector = NULL;
+
+ i40e_for_each_ring(ring, q_vector->rx)
+ ring->q_vector = NULL;
+
+ /* only VSI w/ an associated netdev is set up w/ NAPI */
+ if (vsi->netdev)
+ netif_napi_del(&q_vector->napi);
+
+ vsi->q_vectors[v_idx] = NULL;
+
+ kfree_rcu(q_vector, rcu);
+}
+
+/**
+ * i40e_vsi_free_q_vectors - Free memory allocated for interrupt vectors
+ * @vsi: the VSI being un-configured
+ *
+ * This frees the memory allocated to the q_vectors and
+ * deletes references to the NAPI struct.
+ **/
+static void i40e_vsi_free_q_vectors(struct i40e_vsi *vsi)
+{
+ int v_idx;
+
+ for (v_idx = 0; v_idx < vsi->num_q_vectors; v_idx++)
+ i40e_free_q_vector(vsi, v_idx);
+}
+
+/**
+ * i40e_reset_interrupt_capability - Disable interrupt setup in OS
+ * @pf: board private structure
+ **/
+static void i40e_reset_interrupt_capability(struct i40e_pf *pf)
+{
+ /* If we're in Legacy mode, the interrupt was cleaned in vsi_close */
+ if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
+ pci_disable_msix(pf->pdev);
+ kfree(pf->msix_entries);
+ pf->msix_entries = NULL;
+ kfree(pf->irq_pile);
+ pf->irq_pile = NULL;
+ } else if (pf->flags & I40E_FLAG_MSI_ENABLED) {
+ pci_disable_msi(pf->pdev);
+ }
+ pf->flags &= ~(I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED);
+}
+
+/**
+ * i40e_clear_interrupt_scheme - Clear the current interrupt scheme settings
+ * @pf: board private structure
+ *
+ * We go through and clear interrupt specific resources and reset the structure
+ * to pre-load conditions
+ **/
+static void i40e_clear_interrupt_scheme(struct i40e_pf *pf)
+{
+ int i;
+
+ if (test_bit(__I40E_MISC_IRQ_REQUESTED, pf->state))
+ i40e_free_misc_vector(pf);
+
+ i40e_put_lump(pf->irq_pile, pf->iwarp_base_vector,
+ I40E_IWARP_IRQ_PILE_ID);
+
+ i40e_put_lump(pf->irq_pile, 0, I40E_PILE_VALID_BIT-1);
+ for (i = 0; i < pf->num_alloc_vsi; i++)
+ if (pf->vsi[i])
+ i40e_vsi_free_q_vectors(pf->vsi[i]);
+ i40e_reset_interrupt_capability(pf);
+}
+
+/**
+ * i40e_napi_enable_all - Enable NAPI for all q_vectors in the VSI
+ * @vsi: the VSI being configured
+ **/
+static void i40e_napi_enable_all(struct i40e_vsi *vsi)
+{
+ int q_idx;
+
+ if (!vsi->netdev)
+ return;
+
+ for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) {
+ struct i40e_q_vector *q_vector = vsi->q_vectors[q_idx];
+
+ if (q_vector->rx.ring || q_vector->tx.ring)
+ napi_enable(&q_vector->napi);
+ }
+}
+
+/**
+ * i40e_napi_disable_all - Disable NAPI for all q_vectors in the VSI
+ * @vsi: the VSI being configured
+ **/
+static void i40e_napi_disable_all(struct i40e_vsi *vsi)
+{
+ int q_idx;
+
+ if (!vsi->netdev)
+ return;
+
+ for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) {
+ struct i40e_q_vector *q_vector = vsi->q_vectors[q_idx];
+
+ if (q_vector->rx.ring || q_vector->tx.ring)
+ napi_disable(&q_vector->napi);
+ }
+}
+
+/**
+ * i40e_vsi_close - Shut down a VSI
+ * @vsi: the vsi to be quelled
+ **/
+static void i40e_vsi_close(struct i40e_vsi *vsi)
+{
+ struct i40e_pf *pf = vsi->back;
+ if (!test_and_set_bit(__I40E_VSI_DOWN, vsi->state))
+ i40e_down(vsi);
+ i40e_vsi_free_irq(vsi);
+ i40e_vsi_free_tx_resources(vsi);
+ i40e_vsi_free_rx_resources(vsi);
+ vsi->current_netdev_flags = 0;
+ set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state);
+ if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
+ set_bit(__I40E_CLIENT_RESET, pf->state);
+}
+
+/**
+ * i40e_quiesce_vsi - Pause a given VSI
+ * @vsi: the VSI being paused
+ **/
+static void i40e_quiesce_vsi(struct i40e_vsi *vsi)
+{
+ if (test_bit(__I40E_VSI_DOWN, vsi->state))
+ return;
+
+ set_bit(__I40E_VSI_NEEDS_RESTART, vsi->state);
+ if (vsi->netdev && netif_running(vsi->netdev))
+ vsi->netdev->netdev_ops->ndo_stop(vsi->netdev);
+ else
+ i40e_vsi_close(vsi);
+}
+
+/**
+ * i40e_unquiesce_vsi - Resume a given VSI
+ * @vsi: the VSI being resumed
+ **/
+static void i40e_unquiesce_vsi(struct i40e_vsi *vsi)
+{
+ if (!test_and_clear_bit(__I40E_VSI_NEEDS_RESTART, vsi->state))
+ return;
+
+ if (vsi->netdev && netif_running(vsi->netdev))
+ vsi->netdev->netdev_ops->ndo_open(vsi->netdev);
+ else
+ i40e_vsi_open(vsi); /* this clears the DOWN bit */
+}
+
+/**
+ * i40e_pf_quiesce_all_vsi - Pause all VSIs on a PF
+ * @pf: the PF
+ **/
+static void i40e_pf_quiesce_all_vsi(struct i40e_pf *pf)
+{
+ int v;
+
+ for (v = 0; v < pf->num_alloc_vsi; v++) {
+ if (pf->vsi[v])
+ i40e_quiesce_vsi(pf->vsi[v]);
+ }
+}
+
+/**
+ * i40e_pf_unquiesce_all_vsi - Resume all VSIs on a PF
+ * @pf: the PF
+ **/
+static void i40e_pf_unquiesce_all_vsi(struct i40e_pf *pf)
+{
+ int v;
+
+ for (v = 0; v < pf->num_alloc_vsi; v++) {
+ if (pf->vsi[v])
+ i40e_unquiesce_vsi(pf->vsi[v]);
+ }
+}
+
+/**
+ * i40e_vsi_wait_queues_disabled - Wait for VSI's queues to be disabled
+ * @vsi: the VSI being configured
+ *
+ * Wait until all queues on a given VSI have been disabled.
+ **/
+int i40e_vsi_wait_queues_disabled(struct i40e_vsi *vsi)
+{
+ struct i40e_pf *pf = vsi->back;
+ int i, pf_q, ret;
+
+ pf_q = vsi->base_queue;
+ for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
+ /* Check and wait for the Tx queue */
+ ret = i40e_pf_txq_wait(pf, pf_q, false);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "VSI seid %d Tx ring %d disable timeout\n",
+ vsi->seid, pf_q);
+ return ret;
+ }
+
+ if (!i40e_enabled_xdp_vsi(vsi))
+ goto wait_rx;
+
+ /* Check and wait for the XDP Tx queue */
+ ret = i40e_pf_txq_wait(pf, pf_q + vsi->alloc_queue_pairs,
+ false);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "VSI seid %d XDP Tx ring %d disable timeout\n",
+ vsi->seid, pf_q);
+ return ret;
+ }
+wait_rx:
+ /* Check and wait for the Rx queue */
+ ret = i40e_pf_rxq_wait(pf, pf_q, false);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "VSI seid %d Rx ring %d disable timeout\n",
+ vsi->seid, pf_q);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_I40E_DCB
+/**
+ * i40e_pf_wait_queues_disabled - Wait for all queues of PF VSIs to be disabled
+ * @pf: the PF
+ *
+ * This function waits for the queues to be in disabled state for all the
+ * VSIs that are managed by this PF.
+ **/
+static int i40e_pf_wait_queues_disabled(struct i40e_pf *pf)
+{
+ int v, ret = 0;
+
+ for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
+ if (pf->vsi[v]) {
+ ret = i40e_vsi_wait_queues_disabled(pf->vsi[v]);
+ if (ret)
+ break;
+ }
+ }
+
+ return ret;
+}
+
+#endif
+
+/**
+ * i40e_get_iscsi_tc_map - Return TC map for iSCSI APP
+ * @pf: pointer to PF
+ *
+ * Get TC map for ISCSI PF type that will include iSCSI TC
+ * and LAN TC.
+ **/
+static u8 i40e_get_iscsi_tc_map(struct i40e_pf *pf)
+{
+ struct i40e_dcb_app_priority_table app;
+ struct i40e_hw *hw = &pf->hw;
+ u8 enabled_tc = 1; /* TC0 is always enabled */
+ u8 tc, i;
+ /* Get the iSCSI APP TLV */
+ struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
+
+ for (i = 0; i < dcbcfg->numapps; i++) {
+ app = dcbcfg->app[i];
+ if (app.selector == I40E_APP_SEL_TCPIP &&
+ app.protocolid == I40E_APP_PROTOID_ISCSI) {
+ tc = dcbcfg->etscfg.prioritytable[app.priority];
+ enabled_tc |= BIT(tc);
+ break;
+ }
+ }
+
+ return enabled_tc;
+}
+
+/**
+ * i40e_dcb_get_num_tc - Get the number of TCs from DCBx config
+ * @dcbcfg: the corresponding DCBx configuration structure
+ *
+ * Return the number of TCs from given DCBx configuration
+ **/
+static u8 i40e_dcb_get_num_tc(struct i40e_dcbx_config *dcbcfg)
+{
+ int i, tc_unused = 0;
+ u8 num_tc = 0;
+ u8 ret = 0;
+
+ /* Scan the ETS Config Priority Table to find
+ * traffic class enabled for a given priority
+ * and create a bitmask of enabled TCs
+ */
+ for (i = 0; i < I40E_MAX_USER_PRIORITY; i++)
+ num_tc |= BIT(dcbcfg->etscfg.prioritytable[i]);
+
+ /* Now scan the bitmask to check for
+ * contiguous TCs starting with TC0
+ */
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ if (num_tc & BIT(i)) {
+ if (!tc_unused) {
+ ret++;
+ } else {
+ pr_err("Non-contiguous TC - Disabling DCB\n");
+ return 1;
+ }
+ } else {
+ tc_unused = 1;
+ }
+ }
+
+ /* There is always at least TC0 */
+ if (!ret)
+ ret = 1;
+
+ return ret;
+}
+
+/**
+ * i40e_dcb_get_enabled_tc - Get enabled traffic classes
+ * @dcbcfg: the corresponding DCBx configuration structure
+ *
+ * Query the current DCB configuration and return the number of
+ * traffic classes enabled from the given DCBX config
+ **/
+static u8 i40e_dcb_get_enabled_tc(struct i40e_dcbx_config *dcbcfg)
+{
+ u8 num_tc = i40e_dcb_get_num_tc(dcbcfg);
+ u8 enabled_tc = 1;
+ u8 i;
+
+ for (i = 0; i < num_tc; i++)
+ enabled_tc |= BIT(i);
+
+ return enabled_tc;
+}
+
+/**
+ * i40e_mqprio_get_enabled_tc - Get enabled traffic classes
+ * @pf: PF being queried
+ *
+ * Query the current MQPRIO configuration and return the number of
+ * traffic classes enabled.
+ **/
+static u8 i40e_mqprio_get_enabled_tc(struct i40e_pf *pf)
+{
+ struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
+ u8 num_tc = vsi->mqprio_qopt.qopt.num_tc;
+ u8 enabled_tc = 1, i;
+
+ for (i = 1; i < num_tc; i++)
+ enabled_tc |= BIT(i);
+ return enabled_tc;
+}
+
+/**
+ * i40e_pf_get_num_tc - Get enabled traffic classes for PF
+ * @pf: PF being queried
+ *
+ * Return number of traffic classes enabled for the given PF
+ **/
+static u8 i40e_pf_get_num_tc(struct i40e_pf *pf)
+{
+ struct i40e_hw *hw = &pf->hw;
+ u8 i, enabled_tc = 1;
+ u8 num_tc = 0;
+ struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
+
+ if (i40e_is_tc_mqprio_enabled(pf))
+ return pf->vsi[pf->lan_vsi]->mqprio_qopt.qopt.num_tc;
+
+ /* If neither MQPRIO nor DCB is enabled, then always use single TC */
+ if (!(pf->flags & I40E_FLAG_DCB_ENABLED))
+ return 1;
+
+ /* SFP mode will be enabled for all TCs on port */
+ if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
+ return i40e_dcb_get_num_tc(dcbcfg);
+
+ /* MFP mode return count of enabled TCs for this PF */
+ if (pf->hw.func_caps.iscsi)
+ enabled_tc = i40e_get_iscsi_tc_map(pf);
+ else
+ return 1; /* Only TC0 */
+
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ if (enabled_tc & BIT(i))
+ num_tc++;
+ }
+ return num_tc;
+}
+
+/**
+ * i40e_pf_get_tc_map - Get bitmap for enabled traffic classes
+ * @pf: PF being queried
+ *
+ * Return a bitmap for enabled traffic classes for this PF.
+ **/
+static u8 i40e_pf_get_tc_map(struct i40e_pf *pf)
+{
+ if (i40e_is_tc_mqprio_enabled(pf))
+ return i40e_mqprio_get_enabled_tc(pf);
+
+ /* If neither MQPRIO nor DCB is enabled for this PF then just return
+ * default TC
+ */
+ if (!(pf->flags & I40E_FLAG_DCB_ENABLED))
+ return I40E_DEFAULT_TRAFFIC_CLASS;
+
+ /* SFP mode we want PF to be enabled for all TCs */
+ if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
+ return i40e_dcb_get_enabled_tc(&pf->hw.local_dcbx_config);
+
+ /* MFP enabled and iSCSI PF type */
+ if (pf->hw.func_caps.iscsi)
+ return i40e_get_iscsi_tc_map(pf);
+ else
+ return I40E_DEFAULT_TRAFFIC_CLASS;
+}
+
+/**
+ * i40e_vsi_get_bw_info - Query VSI BW Information
+ * @vsi: the VSI being queried
+ *
+ * Returns 0 on success, negative value on failure
+ **/
+static int i40e_vsi_get_bw_info(struct i40e_vsi *vsi)
+{
+ struct i40e_aqc_query_vsi_ets_sla_config_resp bw_ets_config = {0};
+ struct i40e_aqc_query_vsi_bw_config_resp bw_config = {0};
+ struct i40e_pf *pf = vsi->back;
+ struct i40e_hw *hw = &pf->hw;
+ u32 tc_bw_max;
+ int ret;
+ int i;
+
+ /* Get the VSI level BW configuration */
+ ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "couldn't get PF vsi bw config, err %pe aq_err %s\n",
+ ERR_PTR(ret),
+ i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ return -EINVAL;
+ }
+
+ /* Get the VSI level BW configuration per TC */
+ ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid, &bw_ets_config,
+ NULL);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "couldn't get PF vsi ets bw config, err %pe aq_err %s\n",
+ ERR_PTR(ret),
+ i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ return -EINVAL;
+ }
+
+ if (bw_config.tc_valid_bits != bw_ets_config.tc_valid_bits) {
+ dev_info(&pf->pdev->dev,
+ "Enabled TCs mismatch from querying VSI BW info 0x%08x 0x%08x\n",
+ bw_config.tc_valid_bits,
+ bw_ets_config.tc_valid_bits);
+ /* Still continuing */
+ }
+
+ vsi->bw_limit = le16_to_cpu(bw_config.port_bw_limit);
+ vsi->bw_max_quanta = bw_config.max_bw;
+ tc_bw_max = le16_to_cpu(bw_ets_config.tc_bw_max[0]) |
+ (le16_to_cpu(bw_ets_config.tc_bw_max[1]) << 16);
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ vsi->bw_ets_share_credits[i] = bw_ets_config.share_credits[i];
+ vsi->bw_ets_limit_credits[i] =
+ le16_to_cpu(bw_ets_config.credits[i]);
+ /* 3 bits out of 4 for each TC */
+ vsi->bw_ets_max_quanta[i] = (u8)((tc_bw_max >> (i*4)) & 0x7);
+ }
+
+ return 0;
+}
+
+/**
+ * i40e_vsi_configure_bw_alloc - Configure VSI BW allocation per TC
+ * @vsi: the VSI being configured
+ * @enabled_tc: TC bitmap
+ * @bw_share: BW shared credits per TC
+ *
+ * Returns 0 on success, negative value on failure
+ **/
+static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi, u8 enabled_tc,
+ u8 *bw_share)
+{
+ struct i40e_aqc_configure_vsi_tc_bw_data bw_data;
+ struct i40e_pf *pf = vsi->back;
+ int ret;
+ int i;
+
+ /* There is no need to reset BW when mqprio mode is on. */
+ if (i40e_is_tc_mqprio_enabled(pf))
+ return 0;
+ if (!vsi->mqprio_qopt.qopt.hw && !(pf->flags & I40E_FLAG_DCB_ENABLED)) {
+ ret = i40e_set_bw_limit(vsi, vsi->seid, 0);
+ if (ret)
+ dev_info(&pf->pdev->dev,
+ "Failed to reset tx rate for vsi->seid %u\n",
+ vsi->seid);
+ return ret;
+ }
+ memset(&bw_data, 0, sizeof(bw_data));
+ bw_data.tc_valid_bits = enabled_tc;
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
+ bw_data.tc_bw_credits[i] = bw_share[i];
+
+ ret = i40e_aq_config_vsi_tc_bw(&pf->hw, vsi->seid, &bw_data, NULL);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "AQ command Config VSI BW allocation per TC failed = %d\n",
+ pf->hw.aq.asq_last_status);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
+ vsi->info.qs_handle[i] = bw_data.qs_handles[i];
+
+ return 0;
+}
+
+/**
+ * i40e_vsi_config_netdev_tc - Setup the netdev TC configuration
+ * @vsi: the VSI being configured
+ * @enabled_tc: TC map to be enabled
+ *
+ **/
+static void i40e_vsi_config_netdev_tc(struct i40e_vsi *vsi, u8 enabled_tc)
+{
+ struct net_device *netdev = vsi->netdev;
+ struct i40e_pf *pf = vsi->back;
+ struct i40e_hw *hw = &pf->hw;
+ u8 netdev_tc = 0;
+ int i;
+ struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
+
+ if (!netdev)
+ return;
+
+ if (!enabled_tc) {
+ netdev_reset_tc(netdev);
+ return;
+ }
+
+ /* Set up actual enabled TCs on the VSI */
+ if (netdev_set_num_tc(netdev, vsi->tc_config.numtc))
+ return;
+
+ /* set per TC queues for the VSI */
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ /* Only set TC queues for enabled tcs
+ *
+ * e.g. For a VSI that has TC0 and TC3 enabled the
+ * enabled_tc bitmap would be 0x00001001; the driver
+ * will set the numtc for netdev as 2 that will be
+ * referenced by the netdev layer as TC 0 and 1.
+ */
+ if (vsi->tc_config.enabled_tc & BIT(i))
+ netdev_set_tc_queue(netdev,
+ vsi->tc_config.tc_info[i].netdev_tc,
+ vsi->tc_config.tc_info[i].qcount,
+ vsi->tc_config.tc_info[i].qoffset);
+ }
+
+ if (i40e_is_tc_mqprio_enabled(pf))
+ return;
+
+ /* Assign UP2TC map for the VSI */
+ for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
+ /* Get the actual TC# for the UP */
+ u8 ets_tc = dcbcfg->etscfg.prioritytable[i];
+ /* Get the mapped netdev TC# for the UP */
+ netdev_tc = vsi->tc_config.tc_info[ets_tc].netdev_tc;
+ netdev_set_prio_tc_map(netdev, i, netdev_tc);
+ }
+}
+
+/**
+ * i40e_vsi_update_queue_map - Update our copy of VSi info with new queue map
+ * @vsi: the VSI being configured
+ * @ctxt: the ctxt buffer returned from AQ VSI update param command
+ **/
+static void i40e_vsi_update_queue_map(struct i40e_vsi *vsi,
+ struct i40e_vsi_context *ctxt)
+{
+ /* copy just the sections touched not the entire info
+ * since not all sections are valid as returned by
+ * update vsi params
+ */
+ vsi->info.mapping_flags = ctxt->info.mapping_flags;
+ memcpy(&vsi->info.queue_mapping,
+ &ctxt->info.queue_mapping, sizeof(vsi->info.queue_mapping));
+ memcpy(&vsi->info.tc_mapping, ctxt->info.tc_mapping,
+ sizeof(vsi->info.tc_mapping));
+}
+
+/**
+ * i40e_update_adq_vsi_queues - update queue mapping for ADq VSI
+ * @vsi: the VSI being reconfigured
+ * @vsi_offset: offset from main VF VSI
+ */
+int i40e_update_adq_vsi_queues(struct i40e_vsi *vsi, int vsi_offset)
+{
+ struct i40e_vsi_context ctxt = {};
+ struct i40e_pf *pf;
+ struct i40e_hw *hw;
+ int ret;
+
+ if (!vsi)
+ return I40E_ERR_PARAM;
+ pf = vsi->back;
+ hw = &pf->hw;
+
+ ctxt.seid = vsi->seid;
+ ctxt.pf_num = hw->pf_id;
+ ctxt.vf_num = vsi->vf_id + hw->func_caps.vf_base_id + vsi_offset;
+ ctxt.uplink_seid = vsi->uplink_seid;
+ ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
+ ctxt.flags = I40E_AQ_VSI_TYPE_VF;
+ ctxt.info = vsi->info;
+
+ i40e_vsi_setup_queue_map(vsi, &ctxt, vsi->tc_config.enabled_tc,
+ false);
+ if (vsi->reconfig_rss) {
+ vsi->rss_size = min_t(int, pf->alloc_rss_size,
+ vsi->num_queue_pairs);
+ ret = i40e_vsi_config_rss(vsi);
+ if (ret) {
+ dev_info(&pf->pdev->dev, "Failed to reconfig rss for num_queues\n");
+ return ret;
+ }
+ vsi->reconfig_rss = false;
+ }
+
+ ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
+ if (ret) {
+ dev_info(&pf->pdev->dev, "Update vsi config failed, err %pe aq_err %s\n",
+ ERR_PTR(ret),
+ i40e_aq_str(hw, hw->aq.asq_last_status));
+ return ret;
+ }
+ /* update the local VSI info with updated queue map */
+ i40e_vsi_update_queue_map(vsi, &ctxt);
+ vsi->info.valid_sections = 0;
+
+ return ret;
+}
+
+/**
+ * i40e_vsi_config_tc - Configure VSI Tx Scheduler for given TC map
+ * @vsi: VSI to be configured
+ * @enabled_tc: TC bitmap
+ *
+ * This configures a particular VSI for TCs that are mapped to the
+ * given TC bitmap. It uses default bandwidth share for TCs across
+ * VSIs to configure TC for a particular VSI.
+ *
+ * NOTE:
+ * It is expected that the VSI queues have been quisced before calling
+ * this function.
+ **/
+static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc)
+{
+ u8 bw_share[I40E_MAX_TRAFFIC_CLASS] = {0};
+ struct i40e_pf *pf = vsi->back;
+ struct i40e_hw *hw = &pf->hw;
+ struct i40e_vsi_context ctxt;
+ int ret = 0;
+ int i;
+
+ /* Check if enabled_tc is same as existing or new TCs */
+ if (vsi->tc_config.enabled_tc == enabled_tc &&
+ vsi->mqprio_qopt.mode != TC_MQPRIO_MODE_CHANNEL)
+ return ret;
+
+ /* Enable ETS TCs with equal BW Share for now across all VSIs */
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ if (enabled_tc & BIT(i))
+ bw_share[i] = 1;
+ }
+
+ ret = i40e_vsi_configure_bw_alloc(vsi, enabled_tc, bw_share);
+ if (ret) {
+ struct i40e_aqc_query_vsi_bw_config_resp bw_config = {0};
+
+ dev_info(&pf->pdev->dev,
+ "Failed configuring TC map %d for VSI %d\n",
+ enabled_tc, vsi->seid);
+ ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid,
+ &bw_config, NULL);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "Failed querying vsi bw info, err %pe aq_err %s\n",
+ ERR_PTR(ret),
+ i40e_aq_str(hw, hw->aq.asq_last_status));
+ goto out;
+ }
+ if ((bw_config.tc_valid_bits & enabled_tc) != enabled_tc) {
+ u8 valid_tc = bw_config.tc_valid_bits & enabled_tc;
+
+ if (!valid_tc)
+ valid_tc = bw_config.tc_valid_bits;
+ /* Always enable TC0, no matter what */
+ valid_tc |= 1;
+ dev_info(&pf->pdev->dev,
+ "Requested tc 0x%x, but FW reports 0x%x as valid. Attempting to use 0x%x.\n",
+ enabled_tc, bw_config.tc_valid_bits, valid_tc);
+ enabled_tc = valid_tc;
+ }
+
+ ret = i40e_vsi_configure_bw_alloc(vsi, enabled_tc, bw_share);
+ if (ret) {
+ dev_err(&pf->pdev->dev,
+ "Unable to configure TC map %d for VSI %d\n",
+ enabled_tc, vsi->seid);
+ goto out;
+ }
+ }
+
+ /* Update Queue Pairs Mapping for currently enabled UPs */
+ ctxt.seid = vsi->seid;
+ ctxt.pf_num = vsi->back->hw.pf_id;
+ ctxt.vf_num = 0;
+ ctxt.uplink_seid = vsi->uplink_seid;
+ ctxt.info = vsi->info;
+ if (i40e_is_tc_mqprio_enabled(pf)) {
+ ret = i40e_vsi_setup_queue_map_mqprio(vsi, &ctxt, enabled_tc);
+ if (ret)
+ goto out;
+ } else {
+ i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false);
+ }
+
+ /* On destroying the qdisc, reset vsi->rss_size, as number of enabled
+ * queues changed.
+ */
+ if (!vsi->mqprio_qopt.qopt.hw && vsi->reconfig_rss) {
+ vsi->rss_size = min_t(int, vsi->back->alloc_rss_size,
+ vsi->num_queue_pairs);
+ ret = i40e_vsi_config_rss(vsi);
+ if (ret) {
+ dev_info(&vsi->back->pdev->dev,
+ "Failed to reconfig rss for num_queues\n");
+ return ret;
+ }
+ vsi->reconfig_rss = false;
+ }
+ if (vsi->back->flags & I40E_FLAG_IWARP_ENABLED) {
+ ctxt.info.valid_sections |=
+ cpu_to_le16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID);
+ ctxt.info.queueing_opt_flags |= I40E_AQ_VSI_QUE_OPT_TCP_ENA;
+ }
+
+ /* Update the VSI after updating the VSI queue-mapping
+ * information
+ */
+ ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "Update vsi tc config failed, err %pe aq_err %s\n",
+ ERR_PTR(ret),
+ i40e_aq_str(hw, hw->aq.asq_last_status));
+ goto out;
+ }
+ /* update the local VSI info with updated queue map */
+ i40e_vsi_update_queue_map(vsi, &ctxt);
+ vsi->info.valid_sections = 0;
+
+ /* Update current VSI BW information */
+ ret = i40e_vsi_get_bw_info(vsi);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "Failed updating vsi bw info, err %pe aq_err %s\n",
+ ERR_PTR(ret),
+ i40e_aq_str(hw, hw->aq.asq_last_status));
+ goto out;
+ }
+
+ /* Update the netdev TC setup */
+ i40e_vsi_config_netdev_tc(vsi, enabled_tc);
+out:
+ return ret;
+}
+
+/**
+ * i40e_get_link_speed - Returns link speed for the interface
+ * @vsi: VSI to be configured
+ *
+ **/
+static int i40e_get_link_speed(struct i40e_vsi *vsi)
+{
+ struct i40e_pf *pf = vsi->back;
+
+ switch (pf->hw.phy.link_info.link_speed) {
+ case I40E_LINK_SPEED_40GB:
+ return 40000;
+ case I40E_LINK_SPEED_25GB:
+ return 25000;
+ case I40E_LINK_SPEED_20GB:
+ return 20000;
+ case I40E_LINK_SPEED_10GB:
+ return 10000;
+ case I40E_LINK_SPEED_1GB:
+ return 1000;
+ default:
+ return -EINVAL;
+ }
+}
+
+/**
+ * i40e_bw_bytes_to_mbits - Convert max_tx_rate from bytes to mbits
+ * @vsi: Pointer to vsi structure
+ * @max_tx_rate: max TX rate in bytes to be converted into Mbits
+ *
+ * Helper function to convert units before send to set BW limit
+ **/
+static u64 i40e_bw_bytes_to_mbits(struct i40e_vsi *vsi, u64 max_tx_rate)
+{
+ if (max_tx_rate < I40E_BW_MBPS_DIVISOR) {
+ dev_warn(&vsi->back->pdev->dev,
+ "Setting max tx rate to minimum usable value of 50Mbps.\n");
+ max_tx_rate = I40E_BW_CREDIT_DIVISOR;
+ } else {
+ do_div(max_tx_rate, I40E_BW_MBPS_DIVISOR);
+ }
+
+ return max_tx_rate;
+}
+
+/**
+ * i40e_set_bw_limit - setup BW limit for Tx traffic based on max_tx_rate
+ * @vsi: VSI to be configured
+ * @seid: seid of the channel/VSI
+ * @max_tx_rate: max TX rate to be configured as BW limit
+ *
+ * Helper function to set BW limit for a given VSI
+ **/
+int i40e_set_bw_limit(struct i40e_vsi *vsi, u16 seid, u64 max_tx_rate)
+{
+ struct i40e_pf *pf = vsi->back;
+ u64 credits = 0;
+ int speed = 0;
+ int ret = 0;
+
+ speed = i40e_get_link_speed(vsi);
+ if (max_tx_rate > speed) {
+ dev_err(&pf->pdev->dev,
+ "Invalid max tx rate %llu specified for VSI seid %d.",
+ max_tx_rate, seid);
+ return -EINVAL;
+ }
+ if (max_tx_rate && max_tx_rate < I40E_BW_CREDIT_DIVISOR) {
+ dev_warn(&pf->pdev->dev,
+ "Setting max tx rate to minimum usable value of 50Mbps.\n");
+ max_tx_rate = I40E_BW_CREDIT_DIVISOR;
+ }
+
+ /* Tx rate credits are in values of 50Mbps, 0 is disabled */
+ credits = max_tx_rate;
+ do_div(credits, I40E_BW_CREDIT_DIVISOR);
+ ret = i40e_aq_config_vsi_bw_limit(&pf->hw, seid, credits,
+ I40E_MAX_BW_INACTIVE_ACCUM, NULL);
+ if (ret)
+ dev_err(&pf->pdev->dev,
+ "Failed set tx rate (%llu Mbps) for vsi->seid %u, err %pe aq_err %s\n",
+ max_tx_rate, seid, ERR_PTR(ret),
+ i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ return ret;
+}
+
+/**
+ * i40e_remove_queue_channels - Remove queue channels for the TCs
+ * @vsi: VSI to be configured
+ *
+ * Remove queue channels for the TCs
+ **/
+static void i40e_remove_queue_channels(struct i40e_vsi *vsi)
+{
+ enum i40e_admin_queue_err last_aq_status;
+ struct i40e_cloud_filter *cfilter;
+ struct i40e_channel *ch, *ch_tmp;
+ struct i40e_pf *pf = vsi->back;
+ struct hlist_node *node;
+ int ret, i;
+
+ /* Reset rss size that was stored when reconfiguring rss for
+ * channel VSIs with non-power-of-2 queue count.
+ */
+ vsi->current_rss_size = 0;
+
+ /* perform cleanup for channels if they exist */
+ if (list_empty(&vsi->ch_list))
+ return;
+
+ list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, list) {
+ struct i40e_vsi *p_vsi;
+
+ list_del(&ch->list);
+ p_vsi = ch->parent_vsi;
+ if (!p_vsi || !ch->initialized) {
+ kfree(ch);
+ continue;
+ }
+ /* Reset queue contexts */
+ for (i = 0; i < ch->num_queue_pairs; i++) {
+ struct i40e_ring *tx_ring, *rx_ring;
+ u16 pf_q;
+
+ pf_q = ch->base_queue + i;
+ tx_ring = vsi->tx_rings[pf_q];
+ tx_ring->ch = NULL;
+
+ rx_ring = vsi->rx_rings[pf_q];
+ rx_ring->ch = NULL;
+ }
+
+ /* Reset BW configured for this VSI via mqprio */
+ ret = i40e_set_bw_limit(vsi, ch->seid, 0);
+ if (ret)
+ dev_info(&vsi->back->pdev->dev,
+ "Failed to reset tx rate for ch->seid %u\n",
+ ch->seid);
+
+ /* delete cloud filters associated with this channel */
+ hlist_for_each_entry_safe(cfilter, node,
+ &pf->cloud_filter_list, cloud_node) {
+ if (cfilter->seid != ch->seid)
+ continue;
+
+ hash_del(&cfilter->cloud_node);
+ if (cfilter->dst_port)
+ ret = i40e_add_del_cloud_filter_big_buf(vsi,
+ cfilter,
+ false);
+ else
+ ret = i40e_add_del_cloud_filter(vsi, cfilter,
+ false);
+ last_aq_status = pf->hw.aq.asq_last_status;
+ if (ret)
+ dev_info(&pf->pdev->dev,
+ "Failed to delete cloud filter, err %pe aq_err %s\n",
+ ERR_PTR(ret),
+ i40e_aq_str(&pf->hw, last_aq_status));
+ kfree(cfilter);
+ }
+
+ /* delete VSI from FW */
+ ret = i40e_aq_delete_element(&vsi->back->hw, ch->seid,
+ NULL);
+ if (ret)
+ dev_err(&vsi->back->pdev->dev,
+ "unable to remove channel (%d) for parent VSI(%d)\n",
+ ch->seid, p_vsi->seid);
+ kfree(ch);
+ }
+ INIT_LIST_HEAD(&vsi->ch_list);
+}
+
+/**
+ * i40e_get_max_queues_for_channel
+ * @vsi: ptr to VSI to which channels are associated with
+ *
+ * Helper function which returns max value among the queue counts set on the
+ * channels/TCs created.
+ **/
+static int i40e_get_max_queues_for_channel(struct i40e_vsi *vsi)
+{
+ struct i40e_channel *ch, *ch_tmp;
+ int max = 0;
+
+ list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, list) {
+ if (!ch->initialized)
+ continue;
+ if (ch->num_queue_pairs > max)
+ max = ch->num_queue_pairs;
+ }
+
+ return max;
+}
+
+/**
+ * i40e_validate_num_queues - validate num_queues w.r.t channel
+ * @pf: ptr to PF device
+ * @num_queues: number of queues
+ * @vsi: the parent VSI
+ * @reconfig_rss: indicates should the RSS be reconfigured or not
+ *
+ * This function validates number of queues in the context of new channel
+ * which is being established and determines if RSS should be reconfigured
+ * or not for parent VSI.
+ **/
+static int i40e_validate_num_queues(struct i40e_pf *pf, int num_queues,
+ struct i40e_vsi *vsi, bool *reconfig_rss)
+{
+ int max_ch_queues;
+
+ if (!reconfig_rss)
+ return -EINVAL;
+
+ *reconfig_rss = false;
+ if (vsi->current_rss_size) {
+ if (num_queues > vsi->current_rss_size) {
+ dev_dbg(&pf->pdev->dev,
+ "Error: num_queues (%d) > vsi's current_size(%d)\n",
+ num_queues, vsi->current_rss_size);
+ return -EINVAL;
+ } else if ((num_queues < vsi->current_rss_size) &&
+ (!is_power_of_2(num_queues))) {
+ dev_dbg(&pf->pdev->dev,
+ "Error: num_queues (%d) < vsi's current_size(%d), but not power of 2\n",
+ num_queues, vsi->current_rss_size);
+ return -EINVAL;
+ }
+ }
+
+ if (!is_power_of_2(num_queues)) {
+ /* Find the max num_queues configured for channel if channel
+ * exist.
+ * if channel exist, then enforce 'num_queues' to be more than
+ * max ever queues configured for channel.
+ */
+ max_ch_queues = i40e_get_max_queues_for_channel(vsi);
+ if (num_queues < max_ch_queues) {
+ dev_dbg(&pf->pdev->dev,
+ "Error: num_queues (%d) < max queues configured for channel(%d)\n",
+ num_queues, max_ch_queues);
+ return -EINVAL;
+ }
+ *reconfig_rss = true;
+ }
+
+ return 0;
+}
+
+/**
+ * i40e_vsi_reconfig_rss - reconfig RSS based on specified rss_size
+ * @vsi: the VSI being setup
+ * @rss_size: size of RSS, accordingly LUT gets reprogrammed
+ *
+ * This function reconfigures RSS by reprogramming LUTs using 'rss_size'
+ **/
+static int i40e_vsi_reconfig_rss(struct i40e_vsi *vsi, u16 rss_size)
+{
+ struct i40e_pf *pf = vsi->back;
+ u8 seed[I40E_HKEY_ARRAY_SIZE];
+ struct i40e_hw *hw = &pf->hw;
+ int local_rss_size;
+ u8 *lut;
+ int ret;
+
+ if (!vsi->rss_size)
+ return -EINVAL;
+
+ if (rss_size > vsi->rss_size)
+ return -EINVAL;
+
+ local_rss_size = min_t(int, vsi->rss_size, rss_size);
+ lut = kzalloc(vsi->rss_table_size, GFP_KERNEL);
+ if (!lut)
+ return -ENOMEM;
+
+ /* Ignoring user configured lut if there is one */
+ i40e_fill_rss_lut(pf, lut, vsi->rss_table_size, local_rss_size);
+
+ /* Use user configured hash key if there is one, otherwise
+ * use default.
+ */
+ if (vsi->rss_hkey_user)
+ memcpy(seed, vsi->rss_hkey_user, I40E_HKEY_ARRAY_SIZE);
+ else
+ netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE);
+
+ ret = i40e_config_rss(vsi, seed, lut, vsi->rss_table_size);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "Cannot set RSS lut, err %pe aq_err %s\n",
+ ERR_PTR(ret),
+ i40e_aq_str(hw, hw->aq.asq_last_status));
+ kfree(lut);
+ return ret;
+ }
+ kfree(lut);
+
+ /* Do the update w.r.t. storing rss_size */
+ if (!vsi->orig_rss_size)
+ vsi->orig_rss_size = vsi->rss_size;
+ vsi->current_rss_size = local_rss_size;
+
+ return ret;
+}
+
+/**
+ * i40e_channel_setup_queue_map - Setup a channel queue map
+ * @pf: ptr to PF device
+ * @ctxt: VSI context structure
+ * @ch: ptr to channel structure
+ *
+ * Setup queue map for a specific channel
+ **/
+static void i40e_channel_setup_queue_map(struct i40e_pf *pf,
+ struct i40e_vsi_context *ctxt,
+ struct i40e_channel *ch)
+{
+ u16 qcount, qmap, sections = 0;
+ u8 offset = 0;
+ int pow;
+
+ sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
+ sections |= I40E_AQ_VSI_PROP_SCHED_VALID;
+
+ qcount = min_t(int, ch->num_queue_pairs, pf->num_lan_msix);
+ ch->num_queue_pairs = qcount;
+
+ /* find the next higher power-of-2 of num queue pairs */
+ pow = ilog2(qcount);
+ if (!is_power_of_2(qcount))
+ pow++;
+
+ qmap = (offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
+ (pow << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT);
+
+ /* Setup queue TC[0].qmap for given VSI context */
+ ctxt->info.tc_mapping[0] = cpu_to_le16(qmap);
+
+ ctxt->info.up_enable_bits = 0x1; /* TC0 enabled */
+ ctxt->info.mapping_flags |= cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG);
+ ctxt->info.queue_mapping[0] = cpu_to_le16(ch->base_queue);
+ ctxt->info.valid_sections |= cpu_to_le16(sections);
+}
+
+/**
+ * i40e_add_channel - add a channel by adding VSI
+ * @pf: ptr to PF device
+ * @uplink_seid: underlying HW switching element (VEB) ID
+ * @ch: ptr to channel structure
+ *
+ * Add a channel (VSI) using add_vsi and queue_map
+ **/
+static int i40e_add_channel(struct i40e_pf *pf, u16 uplink_seid,
+ struct i40e_channel *ch)
+{
+ struct i40e_hw *hw = &pf->hw;
+ struct i40e_vsi_context ctxt;
+ u8 enabled_tc = 0x1; /* TC0 enabled */
+ int ret;
+
+ if (ch->type != I40E_VSI_VMDQ2) {
+ dev_info(&pf->pdev->dev,
+ "add new vsi failed, ch->type %d\n", ch->type);
+ return -EINVAL;
+ }
+
+ memset(&ctxt, 0, sizeof(ctxt));
+ ctxt.pf_num = hw->pf_id;
+ ctxt.vf_num = 0;
+ ctxt.uplink_seid = uplink_seid;
+ ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
+ if (ch->type == I40E_VSI_VMDQ2)
+ ctxt.flags = I40E_AQ_VSI_TYPE_VMDQ2;
+
+ if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED) {
+ ctxt.info.valid_sections |=
+ cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
+ ctxt.info.switch_id =
+ cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
+ }
+
+ /* Set queue map for a given VSI context */
+ i40e_channel_setup_queue_map(pf, &ctxt, ch);
+
+ /* Now time to create VSI */
+ ret = i40e_aq_add_vsi(hw, &ctxt, NULL);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "add new vsi failed, err %pe aq_err %s\n",
+ ERR_PTR(ret),
+ i40e_aq_str(&pf->hw,
+ pf->hw.aq.asq_last_status));
+ return -ENOENT;
+ }
+
+ /* Success, update channel, set enabled_tc only if the channel
+ * is not a macvlan
+ */
+ ch->enabled_tc = !i40e_is_channel_macvlan(ch) && enabled_tc;
+ ch->seid = ctxt.seid;
+ ch->vsi_number = ctxt.vsi_number;
+ ch->stat_counter_idx = le16_to_cpu(ctxt.info.stat_counter_idx);
+
+ /* copy just the sections touched not the entire info
+ * since not all sections are valid as returned by
+ * update vsi params
+ */
+ ch->info.mapping_flags = ctxt.info.mapping_flags;
+ memcpy(&ch->info.queue_mapping,
+ &ctxt.info.queue_mapping, sizeof(ctxt.info.queue_mapping));
+ memcpy(&ch->info.tc_mapping, ctxt.info.tc_mapping,
+ sizeof(ctxt.info.tc_mapping));
+
+ return 0;
+}
+
+static int i40e_channel_config_bw(struct i40e_vsi *vsi, struct i40e_channel *ch,
+ u8 *bw_share)
+{
+ struct i40e_aqc_configure_vsi_tc_bw_data bw_data;
+ int ret;
+ int i;
+
+ memset(&bw_data, 0, sizeof(bw_data));
+ bw_data.tc_valid_bits = ch->enabled_tc;
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
+ bw_data.tc_bw_credits[i] = bw_share[i];
+
+ ret = i40e_aq_config_vsi_tc_bw(&vsi->back->hw, ch->seid,
+ &bw_data, NULL);
+ if (ret) {
+ dev_info(&vsi->back->pdev->dev,
+ "Config VSI BW allocation per TC failed, aq_err: %d for new_vsi->seid %u\n",
+ vsi->back->hw.aq.asq_last_status, ch->seid);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
+ ch->info.qs_handle[i] = bw_data.qs_handles[i];
+
+ return 0;
+}
+
+/**
+ * i40e_channel_config_tx_ring - config TX ring associated with new channel
+ * @pf: ptr to PF device
+ * @vsi: the VSI being setup
+ * @ch: ptr to channel structure
+ *
+ * Configure TX rings associated with channel (VSI) since queues are being
+ * from parent VSI.
+ **/
+static int i40e_channel_config_tx_ring(struct i40e_pf *pf,
+ struct i40e_vsi *vsi,
+ struct i40e_channel *ch)
+{
+ u8 bw_share[I40E_MAX_TRAFFIC_CLASS] = {0};
+ int ret;
+ int i;
+
+ /* Enable ETS TCs with equal BW Share for now across all VSIs */
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ if (ch->enabled_tc & BIT(i))
+ bw_share[i] = 1;
+ }
+
+ /* configure BW for new VSI */
+ ret = i40e_channel_config_bw(vsi, ch, bw_share);
+ if (ret) {
+ dev_info(&vsi->back->pdev->dev,
+ "Failed configuring TC map %d for channel (seid %u)\n",
+ ch->enabled_tc, ch->seid);
+ return ret;
+ }
+
+ for (i = 0; i < ch->num_queue_pairs; i++) {
+ struct i40e_ring *tx_ring, *rx_ring;
+ u16 pf_q;
+
+ pf_q = ch->base_queue + i;
+
+ /* Get to TX ring ptr of main VSI, for re-setup TX queue
+ * context
+ */
+ tx_ring = vsi->tx_rings[pf_q];
+ tx_ring->ch = ch;
+
+ /* Get the RX ring ptr */
+ rx_ring = vsi->rx_rings[pf_q];
+ rx_ring->ch = ch;
+ }
+
+ return 0;
+}
+
+/**
+ * i40e_setup_hw_channel - setup new channel
+ * @pf: ptr to PF device
+ * @vsi: the VSI being setup
+ * @ch: ptr to channel structure
+ * @uplink_seid: underlying HW switching element (VEB) ID
+ * @type: type of channel to be created (VMDq2/VF)
+ *
+ * Setup new channel (VSI) based on specified type (VMDq2/VF)
+ * and configures TX rings accordingly
+ **/
+static inline int i40e_setup_hw_channel(struct i40e_pf *pf,
+ struct i40e_vsi *vsi,
+ struct i40e_channel *ch,
+ u16 uplink_seid, u8 type)
+{
+ int ret;
+
+ ch->initialized = false;
+ ch->base_queue = vsi->next_base_queue;
+ ch->type = type;
+
+ /* Proceed with creation of channel (VMDq2) VSI */
+ ret = i40e_add_channel(pf, uplink_seid, ch);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "failed to add_channel using uplink_seid %u\n",
+ uplink_seid);
+ return ret;
+ }
+
+ /* Mark the successful creation of channel */
+ ch->initialized = true;
+
+ /* Reconfigure TX queues using QTX_CTL register */
+ ret = i40e_channel_config_tx_ring(pf, vsi, ch);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "failed to configure TX rings for channel %u\n",
+ ch->seid);
+ return ret;
+ }
+
+ /* update 'next_base_queue' */
+ vsi->next_base_queue = vsi->next_base_queue + ch->num_queue_pairs;
+ dev_dbg(&pf->pdev->dev,
+ "Added channel: vsi_seid %u, vsi_number %u, stat_counter_idx %u, num_queue_pairs %u, pf->next_base_queue %d\n",
+ ch->seid, ch->vsi_number, ch->stat_counter_idx,
+ ch->num_queue_pairs,
+ vsi->next_base_queue);
+ return ret;
+}
+
+/**
+ * i40e_setup_channel - setup new channel using uplink element
+ * @pf: ptr to PF device
+ * @vsi: pointer to the VSI to set up the channel within
+ * @ch: ptr to channel structure
+ *
+ * Setup new channel (VSI) based on specified type (VMDq2/VF)
+ * and uplink switching element (uplink_seid)
+ **/
+static bool i40e_setup_channel(struct i40e_pf *pf, struct i40e_vsi *vsi,
+ struct i40e_channel *ch)
+{
+ u8 vsi_type;
+ u16 seid;
+ int ret;
+
+ if (vsi->type == I40E_VSI_MAIN) {
+ vsi_type = I40E_VSI_VMDQ2;
+ } else {
+ dev_err(&pf->pdev->dev, "unsupported parent vsi type(%d)\n",
+ vsi->type);
+ return false;
+ }
+
+ /* underlying switching element */
+ seid = pf->vsi[pf->lan_vsi]->uplink_seid;
+
+ /* create channel (VSI), configure TX rings */
+ ret = i40e_setup_hw_channel(pf, vsi, ch, seid, vsi_type);
+ if (ret) {
+ dev_err(&pf->pdev->dev, "failed to setup hw_channel\n");
+ return false;
+ }
+
+ return ch->initialized ? true : false;
+}
+
+/**
+ * i40e_validate_and_set_switch_mode - sets up switch mode correctly
+ * @vsi: ptr to VSI which has PF backing
+ *
+ * Sets up switch mode correctly if it needs to be changed and perform
+ * what are allowed modes.
+ **/
+static int i40e_validate_and_set_switch_mode(struct i40e_vsi *vsi)
+{
+ u8 mode;
+ struct i40e_pf *pf = vsi->back;
+ struct i40e_hw *hw = &pf->hw;
+ int ret;
+
+ ret = i40e_get_capabilities(pf, i40e_aqc_opc_list_dev_capabilities);
+ if (ret)
+ return -EINVAL;
+
+ if (hw->dev_caps.switch_mode) {
+ /* if switch mode is set, support mode2 (non-tunneled for
+ * cloud filter) for now
+ */
+ u32 switch_mode = hw->dev_caps.switch_mode &
+ I40E_SWITCH_MODE_MASK;
+ if (switch_mode >= I40E_CLOUD_FILTER_MODE1) {
+ if (switch_mode == I40E_CLOUD_FILTER_MODE2)
+ return 0;
+ dev_err(&pf->pdev->dev,
+ "Invalid switch_mode (%d), only non-tunneled mode for cloud filter is supported\n",
+ hw->dev_caps.switch_mode);
+ return -EINVAL;
+ }
+ }
+
+ /* Set Bit 7 to be valid */
+ mode = I40E_AQ_SET_SWITCH_BIT7_VALID;
+
+ /* Set L4type for TCP support */
+ mode |= I40E_AQ_SET_SWITCH_L4_TYPE_TCP;
+
+ /* Set cloud filter mode */
+ mode |= I40E_AQ_SET_SWITCH_MODE_NON_TUNNEL;
+
+ /* Prep mode field for set_switch_config */
+ ret = i40e_aq_set_switch_config(hw, pf->last_sw_conf_flags,
+ pf->last_sw_conf_valid_flags,
+ mode, NULL);
+ if (ret && hw->aq.asq_last_status != I40E_AQ_RC_ESRCH)
+ dev_err(&pf->pdev->dev,
+ "couldn't set switch config bits, err %pe aq_err %s\n",
+ ERR_PTR(ret),
+ i40e_aq_str(hw,
+ hw->aq.asq_last_status));
+
+ return ret;
+}
+
+/**
+ * i40e_create_queue_channel - function to create channel
+ * @vsi: VSI to be configured
+ * @ch: ptr to channel (it contains channel specific params)
+ *
+ * This function creates channel (VSI) using num_queues specified by user,
+ * reconfigs RSS if needed.
+ **/
+int i40e_create_queue_channel(struct i40e_vsi *vsi,
+ struct i40e_channel *ch)
+{
+ struct i40e_pf *pf = vsi->back;
+ bool reconfig_rss;
+ int err;
+
+ if (!ch)
+ return -EINVAL;
+
+ if (!ch->num_queue_pairs) {
+ dev_err(&pf->pdev->dev, "Invalid num_queues requested: %d\n",
+ ch->num_queue_pairs);
+ return -EINVAL;
+ }
+
+ /* validate user requested num_queues for channel */
+ err = i40e_validate_num_queues(pf, ch->num_queue_pairs, vsi,
+ &reconfig_rss);
+ if (err) {
+ dev_info(&pf->pdev->dev, "Failed to validate num_queues (%d)\n",
+ ch->num_queue_pairs);
+ return -EINVAL;
+ }
+
+ /* By default we are in VEPA mode, if this is the first VF/VMDq
+ * VSI to be added switch to VEB mode.
+ */
+
+ if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) {
+ pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
+
+ if (vsi->type == I40E_VSI_MAIN) {
+ if (i40e_is_tc_mqprio_enabled(pf))
+ i40e_do_reset(pf, I40E_PF_RESET_FLAG, true);
+ else
+ i40e_do_reset_safe(pf, I40E_PF_RESET_FLAG);
+ }
+ /* now onwards for main VSI, number of queues will be value
+ * of TC0's queue count
+ */
+ }
+
+ /* By this time, vsi->cnt_q_avail shall be set to non-zero and
+ * it should be more than num_queues
+ */
+ if (!vsi->cnt_q_avail || vsi->cnt_q_avail < ch->num_queue_pairs) {
+ dev_dbg(&pf->pdev->dev,
+ "Error: cnt_q_avail (%u) less than num_queues %d\n",
+ vsi->cnt_q_avail, ch->num_queue_pairs);
+ return -EINVAL;
+ }
+
+ /* reconfig_rss only if vsi type is MAIN_VSI */
+ if (reconfig_rss && (vsi->type == I40E_VSI_MAIN)) {
+ err = i40e_vsi_reconfig_rss(vsi, ch->num_queue_pairs);
+ if (err) {
+ dev_info(&pf->pdev->dev,
+ "Error: unable to reconfig rss for num_queues (%u)\n",
+ ch->num_queue_pairs);
+ return -EINVAL;
+ }
+ }
+
+ if (!i40e_setup_channel(pf, vsi, ch)) {
+ dev_info(&pf->pdev->dev, "Failed to setup channel\n");
+ return -EINVAL;
+ }
+
+ dev_info(&pf->pdev->dev,
+ "Setup channel (id:%u) utilizing num_queues %d\n",
+ ch->seid, ch->num_queue_pairs);
+
+ /* configure VSI for BW limit */
+ if (ch->max_tx_rate) {
+ u64 credits = ch->max_tx_rate;
+
+ if (i40e_set_bw_limit(vsi, ch->seid, ch->max_tx_rate))
+ return -EINVAL;
+
+ do_div(credits, I40E_BW_CREDIT_DIVISOR);
+ dev_dbg(&pf->pdev->dev,
+ "Set tx rate of %llu Mbps (count of 50Mbps %llu) for vsi->seid %u\n",
+ ch->max_tx_rate,
+ credits,
+ ch->seid);
+ }
+
+ /* in case of VF, this will be main SRIOV VSI */
+ ch->parent_vsi = vsi;
+
+ /* and update main_vsi's count for queue_available to use */
+ vsi->cnt_q_avail -= ch->num_queue_pairs;
+
+ return 0;
+}
+
+/**
+ * i40e_configure_queue_channels - Add queue channel for the given TCs
+ * @vsi: VSI to be configured
+ *
+ * Configures queue channel mapping to the given TCs
+ **/
+static int i40e_configure_queue_channels(struct i40e_vsi *vsi)
+{
+ struct i40e_channel *ch;
+ u64 max_rate = 0;
+ int ret = 0, i;
+
+ /* Create app vsi with the TCs. Main VSI with TC0 is already set up */
+ vsi->tc_seid_map[0] = vsi->seid;
+ for (i = 1; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ if (vsi->tc_config.enabled_tc & BIT(i)) {
+ ch = kzalloc(sizeof(*ch), GFP_KERNEL);
+ if (!ch) {
+ ret = -ENOMEM;
+ goto err_free;
+ }
+
+ INIT_LIST_HEAD(&ch->list);
+ ch->num_queue_pairs =
+ vsi->tc_config.tc_info[i].qcount;
+ ch->base_queue =
+ vsi->tc_config.tc_info[i].qoffset;
+
+ /* Bandwidth limit through tc interface is in bytes/s,
+ * change to Mbit/s
+ */
+ max_rate = vsi->mqprio_qopt.max_rate[i];
+ do_div(max_rate, I40E_BW_MBPS_DIVISOR);
+ ch->max_tx_rate = max_rate;
+
+ list_add_tail(&ch->list, &vsi->ch_list);
+
+ ret = i40e_create_queue_channel(vsi, ch);
+ if (ret) {
+ dev_err(&vsi->back->pdev->dev,
+ "Failed creating queue channel with TC%d: queues %d\n",
+ i, ch->num_queue_pairs);
+ goto err_free;
+ }
+ vsi->tc_seid_map[i] = ch->seid;
+ }
+ }
+
+ /* reset to reconfigure TX queue contexts */
+ i40e_do_reset(vsi->back, I40E_PF_RESET_FLAG, true);
+ return ret;
+
+err_free:
+ i40e_remove_queue_channels(vsi);
+ return ret;
+}
+
+/**
+ * i40e_veb_config_tc - Configure TCs for given VEB
+ * @veb: given VEB
+ * @enabled_tc: TC bitmap
+ *
+ * Configures given TC bitmap for VEB (switching) element
+ **/
+int i40e_veb_config_tc(struct i40e_veb *veb, u8 enabled_tc)
+{
+ struct i40e_aqc_configure_switching_comp_bw_config_data bw_data = {0};
+ struct i40e_pf *pf = veb->pf;
+ int ret = 0;
+ int i;
+
+ /* No TCs or already enabled TCs just return */
+ if (!enabled_tc || veb->enabled_tc == enabled_tc)
+ return ret;
+
+ bw_data.tc_valid_bits = enabled_tc;
+ /* bw_data.absolute_credits is not set (relative) */
+
+ /* Enable ETS TCs with equal BW Share for now */
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ if (enabled_tc & BIT(i))
+ bw_data.tc_bw_share_credits[i] = 1;
+ }
+
+ ret = i40e_aq_config_switch_comp_bw_config(&pf->hw, veb->seid,
+ &bw_data, NULL);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "VEB bw config failed, err %pe aq_err %s\n",
+ ERR_PTR(ret),
+ i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ goto out;
+ }
+
+ /* Update the BW information */
+ ret = i40e_veb_get_bw_info(veb);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "Failed getting veb bw config, err %pe aq_err %s\n",
+ ERR_PTR(ret),
+ i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ }
+
+out:
+ return ret;
+}
+
+#ifdef CONFIG_I40E_DCB
+/**
+ * i40e_dcb_reconfigure - Reconfigure all VEBs and VSIs
+ * @pf: PF struct
+ *
+ * Reconfigure VEB/VSIs on a given PF; it is assumed that
+ * the caller would've quiesce all the VSIs before calling
+ * this function
+ **/
+static void i40e_dcb_reconfigure(struct i40e_pf *pf)
+{
+ u8 tc_map = 0;
+ int ret;
+ u8 v;
+
+ /* Enable the TCs available on PF to all VEBs */
+ tc_map = i40e_pf_get_tc_map(pf);
+ if (tc_map == I40E_DEFAULT_TRAFFIC_CLASS)
+ return;
+
+ for (v = 0; v < I40E_MAX_VEB; v++) {
+ if (!pf->veb[v])
+ continue;
+ ret = i40e_veb_config_tc(pf->veb[v], tc_map);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "Failed configuring TC for VEB seid=%d\n",
+ pf->veb[v]->seid);
+ /* Will try to configure as many components */
+ }
+ }
+
+ /* Update each VSI */
+ for (v = 0; v < pf->num_alloc_vsi; v++) {
+ if (!pf->vsi[v])
+ continue;
+
+ /* - Enable all TCs for the LAN VSI
+ * - For all others keep them at TC0 for now
+ */
+ if (v == pf->lan_vsi)
+ tc_map = i40e_pf_get_tc_map(pf);
+ else
+ tc_map = I40E_DEFAULT_TRAFFIC_CLASS;
+
+ ret = i40e_vsi_config_tc(pf->vsi[v], tc_map);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "Failed configuring TC for VSI seid=%d\n",
+ pf->vsi[v]->seid);
+ /* Will try to configure as many components */
+ } else {
+ /* Re-configure VSI vectors based on updated TC map */
+ i40e_vsi_map_rings_to_vectors(pf->vsi[v]);
+ if (pf->vsi[v]->netdev)
+ i40e_dcbnl_set_all(pf->vsi[v]);
+ }
+ }
+}
+
+/**
+ * i40e_resume_port_tx - Resume port Tx
+ * @pf: PF struct
+ *
+ * Resume a port's Tx and issue a PF reset in case of failure to
+ * resume.
+ **/
+static int i40e_resume_port_tx(struct i40e_pf *pf)
+{
+ struct i40e_hw *hw = &pf->hw;
+ int ret;
+
+ ret = i40e_aq_resume_port_tx(hw, NULL);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "Resume Port Tx failed, err %pe aq_err %s\n",
+ ERR_PTR(ret),
+ i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ /* Schedule PF reset to recover */
+ set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
+ i40e_service_event_schedule(pf);
+ }
+
+ return ret;
+}
+
+/**
+ * i40e_suspend_port_tx - Suspend port Tx
+ * @pf: PF struct
+ *
+ * Suspend a port's Tx and issue a PF reset in case of failure.
+ **/
+static int i40e_suspend_port_tx(struct i40e_pf *pf)
+{
+ struct i40e_hw *hw = &pf->hw;
+ int ret;
+
+ ret = i40e_aq_suspend_port_tx(hw, pf->mac_seid, NULL);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "Suspend Port Tx failed, err %pe aq_err %s\n",
+ ERR_PTR(ret),
+ i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ /* Schedule PF reset to recover */
+ set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
+ i40e_service_event_schedule(pf);
+ }
+
+ return ret;
+}
+
+/**
+ * i40e_hw_set_dcb_config - Program new DCBX settings into HW
+ * @pf: PF being configured
+ * @new_cfg: New DCBX configuration
+ *
+ * Program DCB settings into HW and reconfigure VEB/VSIs on
+ * given PF. Uses "Set LLDP MIB" AQC to program the hardware.
+ **/
+static int i40e_hw_set_dcb_config(struct i40e_pf *pf,
+ struct i40e_dcbx_config *new_cfg)
+{
+ struct i40e_dcbx_config *old_cfg = &pf->hw.local_dcbx_config;
+ int ret;
+
+ /* Check if need reconfiguration */
+ if (!memcmp(&new_cfg, &old_cfg, sizeof(new_cfg))) {
+ dev_dbg(&pf->pdev->dev, "No Change in DCB Config required.\n");
+ return 0;
+ }
+
+ /* Config change disable all VSIs */
+ i40e_pf_quiesce_all_vsi(pf);
+
+ /* Copy the new config to the current config */
+ *old_cfg = *new_cfg;
+ old_cfg->etsrec = old_cfg->etscfg;
+ ret = i40e_set_dcb_config(&pf->hw);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "Set DCB Config failed, err %pe aq_err %s\n",
+ ERR_PTR(ret),
+ i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ goto out;
+ }
+
+ /* Changes in configuration update VEB/VSI */
+ i40e_dcb_reconfigure(pf);
+out:
+ /* In case of reset do not try to resume anything */
+ if (!test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state)) {
+ /* Re-start the VSIs if disabled */
+ ret = i40e_resume_port_tx(pf);
+ /* In case of error no point in resuming VSIs */
+ if (ret)
+ goto err;
+ i40e_pf_unquiesce_all_vsi(pf);
+ }
+err:
+ return ret;
+}
+
+/**
+ * i40e_hw_dcb_config - Program new DCBX settings into HW
+ * @pf: PF being configured
+ * @new_cfg: New DCBX configuration
+ *
+ * Program DCB settings into HW and reconfigure VEB/VSIs on
+ * given PF
+ **/
+int i40e_hw_dcb_config(struct i40e_pf *pf, struct i40e_dcbx_config *new_cfg)
+{
+ struct i40e_aqc_configure_switching_comp_ets_data ets_data;
+ u8 prio_type[I40E_MAX_TRAFFIC_CLASS] = {0};
+ u32 mfs_tc[I40E_MAX_TRAFFIC_CLASS];
+ struct i40e_dcbx_config *old_cfg;
+ u8 mode[I40E_MAX_TRAFFIC_CLASS];
+ struct i40e_rx_pb_config pb_cfg;
+ struct i40e_hw *hw = &pf->hw;
+ u8 num_ports = hw->num_ports;
+ bool need_reconfig;
+ int ret = -EINVAL;
+ u8 lltc_map = 0;
+ u8 tc_map = 0;
+ u8 new_numtc;
+ u8 i;
+
+ dev_dbg(&pf->pdev->dev, "Configuring DCB registers directly\n");
+ /* Un-pack information to Program ETS HW via shared API
+ * numtc, tcmap
+ * LLTC map
+ * ETS/NON-ETS arbiter mode
+ * max exponent (credit refills)
+ * Total number of ports
+ * PFC priority bit-map
+ * Priority Table
+ * BW % per TC
+ * Arbiter mode between UPs sharing same TC
+ * TSA table (ETS or non-ETS)
+ * EEE enabled or not
+ * MFS TC table
+ */
+
+ new_numtc = i40e_dcb_get_num_tc(new_cfg);
+
+ memset(&ets_data, 0, sizeof(ets_data));
+ for (i = 0; i < new_numtc; i++) {
+ tc_map |= BIT(i);
+ switch (new_cfg->etscfg.tsatable[i]) {
+ case I40E_IEEE_TSA_ETS:
+ prio_type[i] = I40E_DCB_PRIO_TYPE_ETS;
+ ets_data.tc_bw_share_credits[i] =
+ new_cfg->etscfg.tcbwtable[i];
+ break;
+ case I40E_IEEE_TSA_STRICT:
+ prio_type[i] = I40E_DCB_PRIO_TYPE_STRICT;
+ lltc_map |= BIT(i);
+ ets_data.tc_bw_share_credits[i] =
+ I40E_DCB_STRICT_PRIO_CREDITS;
+ break;
+ default:
+ /* Invalid TSA type */
+ need_reconfig = false;
+ goto out;
+ }
+ }
+
+ old_cfg = &hw->local_dcbx_config;
+ /* Check if need reconfiguration */
+ need_reconfig = i40e_dcb_need_reconfig(pf, old_cfg, new_cfg);
+
+ /* If needed, enable/disable frame tagging, disable all VSIs
+ * and suspend port tx
+ */
+ if (need_reconfig) {
+ /* Enable DCB tagging only when more than one TC */
+ if (new_numtc > 1)
+ pf->flags |= I40E_FLAG_DCB_ENABLED;
+ else
+ pf->flags &= ~I40E_FLAG_DCB_ENABLED;
+
+ set_bit(__I40E_PORT_SUSPENDED, pf->state);
+ /* Reconfiguration needed quiesce all VSIs */
+ i40e_pf_quiesce_all_vsi(pf);
+ ret = i40e_suspend_port_tx(pf);
+ if (ret)
+ goto err;
+ }
+
+ /* Configure Port ETS Tx Scheduler */
+ ets_data.tc_valid_bits = tc_map;
+ ets_data.tc_strict_priority_flags = lltc_map;
+ ret = i40e_aq_config_switch_comp_ets
+ (hw, pf->mac_seid, &ets_data,
+ i40e_aqc_opc_modify_switching_comp_ets, NULL);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "Modify Port ETS failed, err %pe aq_err %s\n",
+ ERR_PTR(ret),
+ i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ goto out;
+ }
+
+ /* Configure Rx ETS HW */
+ memset(&mode, I40E_DCB_ARB_MODE_ROUND_ROBIN, sizeof(mode));
+ i40e_dcb_hw_set_num_tc(hw, new_numtc);
+ i40e_dcb_hw_rx_fifo_config(hw, I40E_DCB_ARB_MODE_ROUND_ROBIN,
+ I40E_DCB_ARB_MODE_STRICT_PRIORITY,
+ I40E_DCB_DEFAULT_MAX_EXPONENT,
+ lltc_map);
+ i40e_dcb_hw_rx_cmd_monitor_config(hw, new_numtc, num_ports);
+ i40e_dcb_hw_rx_ets_bw_config(hw, new_cfg->etscfg.tcbwtable, mode,
+ prio_type);
+ i40e_dcb_hw_pfc_config(hw, new_cfg->pfc.pfcenable,
+ new_cfg->etscfg.prioritytable);
+ i40e_dcb_hw_rx_up2tc_config(hw, new_cfg->etscfg.prioritytable);
+
+ /* Configure Rx Packet Buffers in HW */
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ mfs_tc[i] = pf->vsi[pf->lan_vsi]->netdev->mtu;
+ mfs_tc[i] += I40E_PACKET_HDR_PAD;
+ }
+
+ i40e_dcb_hw_calculate_pool_sizes(hw, num_ports,
+ false, new_cfg->pfc.pfcenable,
+ mfs_tc, &pb_cfg);
+ i40e_dcb_hw_rx_pb_config(hw, &pf->pb_cfg, &pb_cfg);
+
+ /* Update the local Rx Packet buffer config */
+ pf->pb_cfg = pb_cfg;
+
+ /* Inform the FW about changes to DCB configuration */
+ ret = i40e_aq_dcb_updated(&pf->hw, NULL);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "DCB Updated failed, err %pe aq_err %s\n",
+ ERR_PTR(ret),
+ i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ goto out;
+ }
+
+ /* Update the port DCBx configuration */
+ *old_cfg = *new_cfg;
+
+ /* Changes in configuration update VEB/VSI */
+ i40e_dcb_reconfigure(pf);
+out:
+ /* Re-start the VSIs if disabled */
+ if (need_reconfig) {
+ ret = i40e_resume_port_tx(pf);
+
+ clear_bit(__I40E_PORT_SUSPENDED, pf->state);
+ /* In case of error no point in resuming VSIs */
+ if (ret)
+ goto err;
+
+ /* Wait for the PF's queues to be disabled */
+ ret = i40e_pf_wait_queues_disabled(pf);
+ if (ret) {
+ /* Schedule PF reset to recover */
+ set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
+ i40e_service_event_schedule(pf);
+ goto err;
+ } else {
+ i40e_pf_unquiesce_all_vsi(pf);
+ set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state);
+ set_bit(__I40E_CLIENT_L2_CHANGE, pf->state);
+ }
+ /* registers are set, lets apply */
+ if (pf->hw_features & I40E_HW_USE_SET_LLDP_MIB)
+ ret = i40e_hw_set_dcb_config(pf, new_cfg);
+ }
+
+err:
+ return ret;
+}
+
+/**
+ * i40e_dcb_sw_default_config - Set default DCB configuration when DCB in SW
+ * @pf: PF being queried
+ *
+ * Set default DCB configuration in case DCB is to be done in SW.
+ **/
+int i40e_dcb_sw_default_config(struct i40e_pf *pf)
+{
+ struct i40e_dcbx_config *dcb_cfg = &pf->hw.local_dcbx_config;
+ struct i40e_aqc_configure_switching_comp_ets_data ets_data;
+ struct i40e_hw *hw = &pf->hw;
+ int err;
+
+ if (pf->hw_features & I40E_HW_USE_SET_LLDP_MIB) {
+ /* Update the local cached instance with TC0 ETS */
+ memset(&pf->tmp_cfg, 0, sizeof(struct i40e_dcbx_config));
+ pf->tmp_cfg.etscfg.willing = I40E_IEEE_DEFAULT_ETS_WILLING;
+ pf->tmp_cfg.etscfg.maxtcs = 0;
+ pf->tmp_cfg.etscfg.tcbwtable[0] = I40E_IEEE_DEFAULT_ETS_TCBW;
+ pf->tmp_cfg.etscfg.tsatable[0] = I40E_IEEE_TSA_ETS;
+ pf->tmp_cfg.pfc.willing = I40E_IEEE_DEFAULT_PFC_WILLING;
+ pf->tmp_cfg.pfc.pfccap = I40E_MAX_TRAFFIC_CLASS;
+ /* FW needs one App to configure HW */
+ pf->tmp_cfg.numapps = I40E_IEEE_DEFAULT_NUM_APPS;
+ pf->tmp_cfg.app[0].selector = I40E_APP_SEL_ETHTYPE;
+ pf->tmp_cfg.app[0].priority = I40E_IEEE_DEFAULT_APP_PRIO;
+ pf->tmp_cfg.app[0].protocolid = I40E_APP_PROTOID_FCOE;
+
+ return i40e_hw_set_dcb_config(pf, &pf->tmp_cfg);
+ }
+
+ memset(&ets_data, 0, sizeof(ets_data));
+ ets_data.tc_valid_bits = I40E_DEFAULT_TRAFFIC_CLASS; /* TC0 only */
+ ets_data.tc_strict_priority_flags = 0; /* ETS */
+ ets_data.tc_bw_share_credits[0] = I40E_IEEE_DEFAULT_ETS_TCBW; /* 100% to TC0 */
+
+ /* Enable ETS on the Physical port */
+ err = i40e_aq_config_switch_comp_ets
+ (hw, pf->mac_seid, &ets_data,
+ i40e_aqc_opc_enable_switching_comp_ets, NULL);
+ if (err) {
+ dev_info(&pf->pdev->dev,
+ "Enable Port ETS failed, err %pe aq_err %s\n",
+ ERR_PTR(err),
+ i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ err = -ENOENT;
+ goto out;
+ }
+
+ /* Update the local cached instance with TC0 ETS */
+ dcb_cfg->etscfg.willing = I40E_IEEE_DEFAULT_ETS_WILLING;
+ dcb_cfg->etscfg.cbs = 0;
+ dcb_cfg->etscfg.maxtcs = I40E_MAX_TRAFFIC_CLASS;
+ dcb_cfg->etscfg.tcbwtable[0] = I40E_IEEE_DEFAULT_ETS_TCBW;
+
+out:
+ return err;
+}
+
+/**
+ * i40e_init_pf_dcb - Initialize DCB configuration
+ * @pf: PF being configured
+ *
+ * Query the current DCB configuration and cache it
+ * in the hardware structure
+ **/
+static int i40e_init_pf_dcb(struct i40e_pf *pf)
+{
+ struct i40e_hw *hw = &pf->hw;
+ int err;
+
+ /* Do not enable DCB for SW1 and SW2 images even if the FW is capable
+ * Also do not enable DCBx if FW LLDP agent is disabled
+ */
+ if (pf->hw_features & I40E_HW_NO_DCB_SUPPORT) {
+ dev_info(&pf->pdev->dev, "DCB is not supported.\n");
+ err = I40E_NOT_SUPPORTED;
+ goto out;
+ }
+ if (pf->flags & I40E_FLAG_DISABLE_FW_LLDP) {
+ dev_info(&pf->pdev->dev, "FW LLDP is disabled, attempting SW DCB\n");
+ err = i40e_dcb_sw_default_config(pf);
+ if (err) {
+ dev_info(&pf->pdev->dev, "Could not initialize SW DCB\n");
+ goto out;
+ }
+ dev_info(&pf->pdev->dev, "SW DCB initialization succeeded.\n");
+ pf->dcbx_cap = DCB_CAP_DCBX_HOST |
+ DCB_CAP_DCBX_VER_IEEE;
+ /* at init capable but disabled */
+ pf->flags |= I40E_FLAG_DCB_CAPABLE;
+ pf->flags &= ~I40E_FLAG_DCB_ENABLED;
+ goto out;
+ }
+ err = i40e_init_dcb(hw, true);
+ if (!err) {
+ /* Device/Function is not DCBX capable */
+ if ((!hw->func_caps.dcb) ||
+ (hw->dcbx_status == I40E_DCBX_STATUS_DISABLED)) {
+ dev_info(&pf->pdev->dev,
+ "DCBX offload is not supported or is disabled for this PF.\n");
+ } else {
+ /* When status is not DISABLED then DCBX in FW */
+ pf->dcbx_cap = DCB_CAP_DCBX_LLD_MANAGED |
+ DCB_CAP_DCBX_VER_IEEE;
+
+ pf->flags |= I40E_FLAG_DCB_CAPABLE;
+ /* Enable DCB tagging only when more than one TC
+ * or explicitly disable if only one TC
+ */
+ if (i40e_dcb_get_num_tc(&hw->local_dcbx_config) > 1)
+ pf->flags |= I40E_FLAG_DCB_ENABLED;
+ else
+ pf->flags &= ~I40E_FLAG_DCB_ENABLED;
+ dev_dbg(&pf->pdev->dev,
+ "DCBX offload is supported for this PF.\n");
+ }
+ } else if (pf->hw.aq.asq_last_status == I40E_AQ_RC_EPERM) {
+ dev_info(&pf->pdev->dev, "FW LLDP disabled for this PF.\n");
+ pf->flags |= I40E_FLAG_DISABLE_FW_LLDP;
+ } else {
+ dev_info(&pf->pdev->dev,
+ "Query for DCB configuration failed, err %pe aq_err %s\n",
+ ERR_PTR(err),
+ i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ }
+
+out:
+ return err;
+}
+#endif /* CONFIG_I40E_DCB */
+
+/**
+ * i40e_print_link_message - print link up or down
+ * @vsi: the VSI for which link needs a message
+ * @isup: true of link is up, false otherwise
+ */
+void i40e_print_link_message(struct i40e_vsi *vsi, bool isup)
+{
+ enum i40e_aq_link_speed new_speed;
+ struct i40e_pf *pf = vsi->back;
+ char *speed = "Unknown";
+ char *fc = "Unknown";
+ char *fec = "";
+ char *req_fec = "";
+ char *an = "";
+
+ if (isup)
+ new_speed = pf->hw.phy.link_info.link_speed;
+ else
+ new_speed = I40E_LINK_SPEED_UNKNOWN;
+
+ if ((vsi->current_isup == isup) && (vsi->current_speed == new_speed))
+ return;
+ vsi->current_isup = isup;
+ vsi->current_speed = new_speed;
+ if (!isup) {
+ netdev_info(vsi->netdev, "NIC Link is Down\n");
+ return;
+ }
+
+ /* Warn user if link speed on NPAR enabled partition is not at
+ * least 10GB
+ */
+ if (pf->hw.func_caps.npar_enable &&
+ (pf->hw.phy.link_info.link_speed == I40E_LINK_SPEED_1GB ||
+ pf->hw.phy.link_info.link_speed == I40E_LINK_SPEED_100MB))
+ netdev_warn(vsi->netdev,
+ "The partition detected link speed that is less than 10Gbps\n");
+
+ switch (pf->hw.phy.link_info.link_speed) {
+ case I40E_LINK_SPEED_40GB:
+ speed = "40 G";
+ break;
+ case I40E_LINK_SPEED_20GB:
+ speed = "20 G";
+ break;
+ case I40E_LINK_SPEED_25GB:
+ speed = "25 G";
+ break;
+ case I40E_LINK_SPEED_10GB:
+ speed = "10 G";
+ break;
+ case I40E_LINK_SPEED_5GB:
+ speed = "5 G";
+ break;
+ case I40E_LINK_SPEED_2_5GB:
+ speed = "2.5 G";
+ break;
+ case I40E_LINK_SPEED_1GB:
+ speed = "1000 M";
+ break;
+ case I40E_LINK_SPEED_100MB:
+ speed = "100 M";
+ break;
+ default:
+ break;
+ }
+
+ switch (pf->hw.fc.current_mode) {
+ case I40E_FC_FULL:
+ fc = "RX/TX";
+ break;
+ case I40E_FC_TX_PAUSE:
+ fc = "TX";
+ break;
+ case I40E_FC_RX_PAUSE:
+ fc = "RX";
+ break;
+ default:
+ fc = "None";
+ break;
+ }
+
+ if (pf->hw.phy.link_info.link_speed == I40E_LINK_SPEED_25GB) {
+ req_fec = "None";
+ fec = "None";
+ an = "False";
+
+ if (pf->hw.phy.link_info.an_info & I40E_AQ_AN_COMPLETED)
+ an = "True";
+
+ if (pf->hw.phy.link_info.fec_info &
+ I40E_AQ_CONFIG_FEC_KR_ENA)
+ fec = "CL74 FC-FEC/BASE-R";
+ else if (pf->hw.phy.link_info.fec_info &
+ I40E_AQ_CONFIG_FEC_RS_ENA)
+ fec = "CL108 RS-FEC";
+
+ /* 'CL108 RS-FEC' should be displayed when RS is requested, or
+ * both RS and FC are requested
+ */
+ if (vsi->back->hw.phy.link_info.req_fec_info &
+ (I40E_AQ_REQUEST_FEC_KR | I40E_AQ_REQUEST_FEC_RS)) {
+ if (vsi->back->hw.phy.link_info.req_fec_info &
+ I40E_AQ_REQUEST_FEC_RS)
+ req_fec = "CL108 RS-FEC";
+ else
+ req_fec = "CL74 FC-FEC/BASE-R";
+ }
+ netdev_info(vsi->netdev,
+ "NIC Link is Up, %sbps Full Duplex, Requested FEC: %s, Negotiated FEC: %s, Autoneg: %s, Flow Control: %s\n",
+ speed, req_fec, fec, an, fc);
+ } else if (pf->hw.device_id == I40E_DEV_ID_KX_X722) {
+ req_fec = "None";
+ fec = "None";
+ an = "False";
+
+ if (pf->hw.phy.link_info.an_info & I40E_AQ_AN_COMPLETED)
+ an = "True";
+
+ if (pf->hw.phy.link_info.fec_info &
+ I40E_AQ_CONFIG_FEC_KR_ENA)
+ fec = "CL74 FC-FEC/BASE-R";
+
+ if (pf->hw.phy.link_info.req_fec_info &
+ I40E_AQ_REQUEST_FEC_KR)
+ req_fec = "CL74 FC-FEC/BASE-R";
+
+ netdev_info(vsi->netdev,
+ "NIC Link is Up, %sbps Full Duplex, Requested FEC: %s, Negotiated FEC: %s, Autoneg: %s, Flow Control: %s\n",
+ speed, req_fec, fec, an, fc);
+ } else {
+ netdev_info(vsi->netdev,
+ "NIC Link is Up, %sbps Full Duplex, Flow Control: %s\n",
+ speed, fc);
+ }
+
+}
+
+/**
+ * i40e_up_complete - Finish the last steps of bringing up a connection
+ * @vsi: the VSI being configured
+ **/
+static int i40e_up_complete(struct i40e_vsi *vsi)
+{
+ struct i40e_pf *pf = vsi->back;
+ int err;
+
+ if (pf->flags & I40E_FLAG_MSIX_ENABLED)
+ i40e_vsi_configure_msix(vsi);
+ else
+ i40e_configure_msi_and_legacy(vsi);
+
+ /* start rings */
+ err = i40e_vsi_start_rings(vsi);
+ if (err)
+ return err;
+
+ clear_bit(__I40E_VSI_DOWN, vsi->state);
+ i40e_napi_enable_all(vsi);
+ i40e_vsi_enable_irq(vsi);
+
+ if ((pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP) &&
+ (vsi->netdev)) {
+ i40e_print_link_message(vsi, true);
+ netif_tx_start_all_queues(vsi->netdev);
+ netif_carrier_on(vsi->netdev);
+ }
+
+ /* replay FDIR SB filters */
+ if (vsi->type == I40E_VSI_FDIR) {
+ /* reset fd counters */
+ pf->fd_add_err = 0;
+ pf->fd_atr_cnt = 0;
+ i40e_fdir_filter_restore(vsi);
+ }
+
+ /* On the next run of the service_task, notify any clients of the new
+ * opened netdev
+ */
+ set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state);
+ i40e_service_event_schedule(pf);
+
+ return 0;
+}
+
+/**
+ * i40e_vsi_reinit_locked - Reset the VSI
+ * @vsi: the VSI being configured
+ *
+ * Rebuild the ring structs after some configuration
+ * has changed, e.g. MTU size.
+ **/
+static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi)
+{
+ struct i40e_pf *pf = vsi->back;
+
+ while (test_and_set_bit(__I40E_CONFIG_BUSY, pf->state))
+ usleep_range(1000, 2000);
+ i40e_down(vsi);
+
+ i40e_up(vsi);
+ clear_bit(__I40E_CONFIG_BUSY, pf->state);
+}
+
+/**
+ * i40e_force_link_state - Force the link status
+ * @pf: board private structure
+ * @is_up: whether the link state should be forced up or down
+ **/
+static int i40e_force_link_state(struct i40e_pf *pf, bool is_up)
+{
+ struct i40e_aq_get_phy_abilities_resp abilities;
+ struct i40e_aq_set_phy_config config = {0};
+ bool non_zero_phy_type = is_up;
+ struct i40e_hw *hw = &pf->hw;
+ u64 mask;
+ u8 speed;
+ int err;
+
+ /* Card might've been put in an unstable state by other drivers
+ * and applications, which causes incorrect speed values being
+ * set on startup. In order to clear speed registers, we call
+ * get_phy_capabilities twice, once to get initial state of
+ * available speeds, and once to get current PHY config.
+ */
+ err = i40e_aq_get_phy_capabilities(hw, false, true, &abilities,
+ NULL);
+ if (err) {
+ dev_err(&pf->pdev->dev,
+ "failed to get phy cap., ret = %pe last_status = %s\n",
+ ERR_PTR(err),
+ i40e_aq_str(hw, hw->aq.asq_last_status));
+ return err;
+ }
+ speed = abilities.link_speed;
+
+ /* Get the current phy config */
+ err = i40e_aq_get_phy_capabilities(hw, false, false, &abilities,
+ NULL);
+ if (err) {
+ dev_err(&pf->pdev->dev,
+ "failed to get phy cap., ret = %pe last_status = %s\n",
+ ERR_PTR(err),
+ i40e_aq_str(hw, hw->aq.asq_last_status));
+ return err;
+ }
+
+ /* If link needs to go up, but was not forced to go down,
+ * and its speed values are OK, no need for a flap
+ * if non_zero_phy_type was set, still need to force up
+ */
+ if (pf->flags & I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENABLED)
+ non_zero_phy_type = true;
+ else if (is_up && abilities.phy_type != 0 && abilities.link_speed != 0)
+ return I40E_SUCCESS;
+
+ /* To force link we need to set bits for all supported PHY types,
+ * but there are now more than 32, so we need to split the bitmap
+ * across two fields.
+ */
+ mask = I40E_PHY_TYPES_BITMASK;
+ config.phy_type =
+ non_zero_phy_type ? cpu_to_le32((u32)(mask & 0xffffffff)) : 0;
+ config.phy_type_ext =
+ non_zero_phy_type ? (u8)((mask >> 32) & 0xff) : 0;
+ /* Copy the old settings, except of phy_type */
+ config.abilities = abilities.abilities;
+ if (pf->flags & I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENABLED) {
+ if (is_up)
+ config.abilities |= I40E_AQ_PHY_ENABLE_LINK;
+ else
+ config.abilities &= ~(I40E_AQ_PHY_ENABLE_LINK);
+ }
+ if (abilities.link_speed != 0)
+ config.link_speed = abilities.link_speed;
+ else
+ config.link_speed = speed;
+ config.eee_capability = abilities.eee_capability;
+ config.eeer = abilities.eeer_val;
+ config.low_power_ctrl = abilities.d3_lpan;
+ config.fec_config = abilities.fec_cfg_curr_mod_ext_info &
+ I40E_AQ_PHY_FEC_CONFIG_MASK;
+ err = i40e_aq_set_phy_config(hw, &config, NULL);
+
+ if (err) {
+ dev_err(&pf->pdev->dev,
+ "set phy config ret = %pe last_status = %s\n",
+ ERR_PTR(err),
+ i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ return err;
+ }
+
+ /* Update the link info */
+ err = i40e_update_link_info(hw);
+ if (err) {
+ /* Wait a little bit (on 40G cards it sometimes takes a really
+ * long time for link to come back from the atomic reset)
+ * and try once more
+ */
+ msleep(1000);
+ i40e_update_link_info(hw);
+ }
+
+ i40e_aq_set_link_restart_an(hw, is_up, NULL);
+
+ return I40E_SUCCESS;
+}
+
+/**
+ * i40e_up - Bring the connection back up after being down
+ * @vsi: the VSI being configured
+ **/
+int i40e_up(struct i40e_vsi *vsi)
+{
+ int err;
+
+ if (vsi->type == I40E_VSI_MAIN &&
+ (vsi->back->flags & I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED ||
+ vsi->back->flags & I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENABLED))
+ i40e_force_link_state(vsi->back, true);
+
+ err = i40e_vsi_configure(vsi);
+ if (!err)
+ err = i40e_up_complete(vsi);
+
+ return err;
+}
+
+/**
+ * i40e_down - Shutdown the connection processing
+ * @vsi: the VSI being stopped
+ **/
+void i40e_down(struct i40e_vsi *vsi)
+{
+ int i;
+
+ /* It is assumed that the caller of this function
+ * sets the vsi->state __I40E_VSI_DOWN bit.
+ */
+ if (vsi->netdev) {
+ netif_carrier_off(vsi->netdev);
+ netif_tx_disable(vsi->netdev);
+ }
+ i40e_vsi_disable_irq(vsi);
+ i40e_vsi_stop_rings(vsi);
+ if (vsi->type == I40E_VSI_MAIN &&
+ (vsi->back->flags & I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED ||
+ vsi->back->flags & I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENABLED))
+ i40e_force_link_state(vsi->back, false);
+ i40e_napi_disable_all(vsi);
+
+ for (i = 0; i < vsi->num_queue_pairs; i++) {
+ i40e_clean_tx_ring(vsi->tx_rings[i]);
+ if (i40e_enabled_xdp_vsi(vsi)) {
+ /* Make sure that in-progress ndo_xdp_xmit and
+ * ndo_xsk_wakeup calls are completed.
+ */
+ synchronize_rcu();
+ i40e_clean_tx_ring(vsi->xdp_rings[i]);
+ }
+ i40e_clean_rx_ring(vsi->rx_rings[i]);
+ }
+
+}
+
+/**
+ * i40e_validate_mqprio_qopt- validate queue mapping info
+ * @vsi: the VSI being configured
+ * @mqprio_qopt: queue parametrs
+ **/
+static int i40e_validate_mqprio_qopt(struct i40e_vsi *vsi,
+ struct tc_mqprio_qopt_offload *mqprio_qopt)
+{
+ u64 sum_max_rate = 0;
+ u64 max_rate = 0;
+ int i;
+
+ if (mqprio_qopt->qopt.offset[0] != 0 ||
+ mqprio_qopt->qopt.num_tc < 1 ||
+ mqprio_qopt->qopt.num_tc > I40E_MAX_TRAFFIC_CLASS)
+ return -EINVAL;
+ for (i = 0; ; i++) {
+ if (!mqprio_qopt->qopt.count[i])
+ return -EINVAL;
+ if (mqprio_qopt->min_rate[i]) {
+ dev_err(&vsi->back->pdev->dev,
+ "Invalid min tx rate (greater than 0) specified\n");
+ return -EINVAL;
+ }
+ max_rate = mqprio_qopt->max_rate[i];
+ do_div(max_rate, I40E_BW_MBPS_DIVISOR);
+ sum_max_rate += max_rate;
+
+ if (i >= mqprio_qopt->qopt.num_tc - 1)
+ break;
+ if (mqprio_qopt->qopt.offset[i + 1] !=
+ (mqprio_qopt->qopt.offset[i] + mqprio_qopt->qopt.count[i]))
+ return -EINVAL;
+ }
+ if (vsi->num_queue_pairs <
+ (mqprio_qopt->qopt.offset[i] + mqprio_qopt->qopt.count[i])) {
+ dev_err(&vsi->back->pdev->dev,
+ "Failed to create traffic channel, insufficient number of queues.\n");
+ return -EINVAL;
+ }
+ if (sum_max_rate > i40e_get_link_speed(vsi)) {
+ dev_err(&vsi->back->pdev->dev,
+ "Invalid max tx rate specified\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/**
+ * i40e_vsi_set_default_tc_config - set default values for tc configuration
+ * @vsi: the VSI being configured
+ **/
+static void i40e_vsi_set_default_tc_config(struct i40e_vsi *vsi)
+{
+ u16 qcount;
+ int i;
+
+ /* Only TC0 is enabled */
+ vsi->tc_config.numtc = 1;
+ vsi->tc_config.enabled_tc = 1;
+ qcount = min_t(int, vsi->alloc_queue_pairs,
+ i40e_pf_get_max_q_per_tc(vsi->back));
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ /* For the TC that is not enabled set the offset to default
+ * queue and allocate one queue for the given TC.
+ */
+ vsi->tc_config.tc_info[i].qoffset = 0;
+ if (i == 0)
+ vsi->tc_config.tc_info[i].qcount = qcount;
+ else
+ vsi->tc_config.tc_info[i].qcount = 1;
+ vsi->tc_config.tc_info[i].netdev_tc = 0;
+ }
+}
+
+/**
+ * i40e_del_macvlan_filter
+ * @hw: pointer to the HW structure
+ * @seid: seid of the channel VSI
+ * @macaddr: the mac address to apply as a filter
+ * @aq_err: store the admin Q error
+ *
+ * This function deletes a mac filter on the channel VSI which serves as the
+ * macvlan. Returns 0 on success.
+ **/
+static int i40e_del_macvlan_filter(struct i40e_hw *hw, u16 seid,
+ const u8 *macaddr, int *aq_err)
+{
+ struct i40e_aqc_remove_macvlan_element_data element;
+ int status;
+
+ memset(&element, 0, sizeof(element));
+ ether_addr_copy(element.mac_addr, macaddr);
+ element.vlan_tag = 0;
+ element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
+ status = i40e_aq_remove_macvlan(hw, seid, &element, 1, NULL);
+ *aq_err = hw->aq.asq_last_status;
+
+ return status;
+}
+
+/**
+ * i40e_add_macvlan_filter
+ * @hw: pointer to the HW structure
+ * @seid: seid of the channel VSI
+ * @macaddr: the mac address to apply as a filter
+ * @aq_err: store the admin Q error
+ *
+ * This function adds a mac filter on the channel VSI which serves as the
+ * macvlan. Returns 0 on success.
+ **/
+static int i40e_add_macvlan_filter(struct i40e_hw *hw, u16 seid,
+ const u8 *macaddr, int *aq_err)
+{
+ struct i40e_aqc_add_macvlan_element_data element;
+ u16 cmd_flags = 0;
+ int status;
+
+ ether_addr_copy(element.mac_addr, macaddr);
+ element.vlan_tag = 0;
+ element.queue_number = 0;
+ element.match_method = I40E_AQC_MM_ERR_NO_RES;
+ cmd_flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
+ element.flags = cpu_to_le16(cmd_flags);
+ status = i40e_aq_add_macvlan(hw, seid, &element, 1, NULL);
+ *aq_err = hw->aq.asq_last_status;
+
+ return status;
+}
+
+/**
+ * i40e_reset_ch_rings - Reset the queue contexts in a channel
+ * @vsi: the VSI we want to access
+ * @ch: the channel we want to access
+ */
+static void i40e_reset_ch_rings(struct i40e_vsi *vsi, struct i40e_channel *ch)
+{
+ struct i40e_ring *tx_ring, *rx_ring;
+ u16 pf_q;
+ int i;
+
+ for (i = 0; i < ch->num_queue_pairs; i++) {
+ pf_q = ch->base_queue + i;
+ tx_ring = vsi->tx_rings[pf_q];
+ tx_ring->ch = NULL;
+ rx_ring = vsi->rx_rings[pf_q];
+ rx_ring->ch = NULL;
+ }
+}
+
+/**
+ * i40e_free_macvlan_channels
+ * @vsi: the VSI we want to access
+ *
+ * This function frees the Qs of the channel VSI from
+ * the stack and also deletes the channel VSIs which
+ * serve as macvlans.
+ */
+static void i40e_free_macvlan_channels(struct i40e_vsi *vsi)
+{
+ struct i40e_channel *ch, *ch_tmp;
+ int ret;
+
+ if (list_empty(&vsi->macvlan_list))
+ return;
+
+ list_for_each_entry_safe(ch, ch_tmp, &vsi->macvlan_list, list) {
+ struct i40e_vsi *parent_vsi;
+
+ if (i40e_is_channel_macvlan(ch)) {
+ i40e_reset_ch_rings(vsi, ch);
+ clear_bit(ch->fwd->bit_no, vsi->fwd_bitmask);
+ netdev_unbind_sb_channel(vsi->netdev, ch->fwd->netdev);
+ netdev_set_sb_channel(ch->fwd->netdev, 0);
+ kfree(ch->fwd);
+ ch->fwd = NULL;
+ }
+
+ list_del(&ch->list);
+ parent_vsi = ch->parent_vsi;
+ if (!parent_vsi || !ch->initialized) {
+ kfree(ch);
+ continue;
+ }
+
+ /* remove the VSI */
+ ret = i40e_aq_delete_element(&vsi->back->hw, ch->seid,
+ NULL);
+ if (ret)
+ dev_err(&vsi->back->pdev->dev,
+ "unable to remove channel (%d) for parent VSI(%d)\n",
+ ch->seid, parent_vsi->seid);
+ kfree(ch);
+ }
+ vsi->macvlan_cnt = 0;
+}
+
+/**
+ * i40e_fwd_ring_up - bring the macvlan device up
+ * @vsi: the VSI we want to access
+ * @vdev: macvlan netdevice
+ * @fwd: the private fwd structure
+ */
+static int i40e_fwd_ring_up(struct i40e_vsi *vsi, struct net_device *vdev,
+ struct i40e_fwd_adapter *fwd)
+{
+ struct i40e_channel *ch = NULL, *ch_tmp, *iter;
+ int ret = 0, num_tc = 1, i, aq_err;
+ struct i40e_pf *pf = vsi->back;
+ struct i40e_hw *hw = &pf->hw;
+
+ /* Go through the list and find an available channel */
+ list_for_each_entry_safe(iter, ch_tmp, &vsi->macvlan_list, list) {
+ if (!i40e_is_channel_macvlan(iter)) {
+ iter->fwd = fwd;
+ /* record configuration for macvlan interface in vdev */
+ for (i = 0; i < num_tc; i++)
+ netdev_bind_sb_channel_queue(vsi->netdev, vdev,
+ i,
+ iter->num_queue_pairs,
+ iter->base_queue);
+ for (i = 0; i < iter->num_queue_pairs; i++) {
+ struct i40e_ring *tx_ring, *rx_ring;
+ u16 pf_q;
+
+ pf_q = iter->base_queue + i;
+
+ /* Get to TX ring ptr */
+ tx_ring = vsi->tx_rings[pf_q];
+ tx_ring->ch = iter;
+
+ /* Get the RX ring ptr */
+ rx_ring = vsi->rx_rings[pf_q];
+ rx_ring->ch = iter;
+ }
+ ch = iter;
+ break;
+ }
+ }
+
+ if (!ch)
+ return -EINVAL;
+
+ /* Guarantee all rings are updated before we update the
+ * MAC address filter.
+ */
+ wmb();
+
+ /* Add a mac filter */
+ ret = i40e_add_macvlan_filter(hw, ch->seid, vdev->dev_addr, &aq_err);
+ if (ret) {
+ /* if we cannot add the MAC rule then disable the offload */
+ macvlan_release_l2fw_offload(vdev);
+ for (i = 0; i < ch->num_queue_pairs; i++) {
+ struct i40e_ring *rx_ring;
+ u16 pf_q;
+
+ pf_q = ch->base_queue + i;
+ rx_ring = vsi->rx_rings[pf_q];
+ rx_ring->netdev = NULL;
+ }
+ dev_info(&pf->pdev->dev,
+ "Error adding mac filter on macvlan err %pe, aq_err %s\n",
+ ERR_PTR(ret),
+ i40e_aq_str(hw, aq_err));
+ netdev_err(vdev, "L2fwd offload disabled to L2 filter error\n");
+ }
+
+ return ret;
+}
+
+/**
+ * i40e_setup_macvlans - create the channels which will be macvlans
+ * @vsi: the VSI we want to access
+ * @macvlan_cnt: no. of macvlans to be setup
+ * @qcnt: no. of Qs per macvlan
+ * @vdev: macvlan netdevice
+ */
+static int i40e_setup_macvlans(struct i40e_vsi *vsi, u16 macvlan_cnt, u16 qcnt,
+ struct net_device *vdev)
+{
+ struct i40e_pf *pf = vsi->back;
+ struct i40e_hw *hw = &pf->hw;
+ struct i40e_vsi_context ctxt;
+ u16 sections, qmap, num_qps;
+ struct i40e_channel *ch;
+ int i, pow, ret = 0;
+ u8 offset = 0;
+
+ if (vsi->type != I40E_VSI_MAIN || !macvlan_cnt)
+ return -EINVAL;
+
+ num_qps = vsi->num_queue_pairs - (macvlan_cnt * qcnt);
+
+ /* find the next higher power-of-2 of num queue pairs */
+ pow = fls(roundup_pow_of_two(num_qps) - 1);
+
+ qmap = (offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
+ (pow << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT);
+
+ /* Setup context bits for the main VSI */
+ sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
+ sections |= I40E_AQ_VSI_PROP_SCHED_VALID;
+ memset(&ctxt, 0, sizeof(ctxt));
+ ctxt.seid = vsi->seid;
+ ctxt.pf_num = vsi->back->hw.pf_id;
+ ctxt.vf_num = 0;
+ ctxt.uplink_seid = vsi->uplink_seid;
+ ctxt.info = vsi->info;
+ ctxt.info.tc_mapping[0] = cpu_to_le16(qmap);
+ ctxt.info.mapping_flags |= cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG);
+ ctxt.info.queue_mapping[0] = cpu_to_le16(vsi->base_queue);
+ ctxt.info.valid_sections |= cpu_to_le16(sections);
+
+ /* Reconfigure RSS for main VSI with new max queue count */
+ vsi->rss_size = max_t(u16, num_qps, qcnt);
+ ret = i40e_vsi_config_rss(vsi);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "Failed to reconfig RSS for num_queues (%u)\n",
+ vsi->rss_size);
+ return ret;
+ }
+ vsi->reconfig_rss = true;
+ dev_dbg(&vsi->back->pdev->dev,
+ "Reconfigured RSS with num_queues (%u)\n", vsi->rss_size);
+ vsi->next_base_queue = num_qps;
+ vsi->cnt_q_avail = vsi->num_queue_pairs - num_qps;
+
+ /* Update the VSI after updating the VSI queue-mapping
+ * information
+ */
+ ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "Update vsi tc config failed, err %pe aq_err %s\n",
+ ERR_PTR(ret),
+ i40e_aq_str(hw, hw->aq.asq_last_status));
+ return ret;
+ }
+ /* update the local VSI info with updated queue map */
+ i40e_vsi_update_queue_map(vsi, &ctxt);
+ vsi->info.valid_sections = 0;
+
+ /* Create channels for macvlans */
+ INIT_LIST_HEAD(&vsi->macvlan_list);
+ for (i = 0; i < macvlan_cnt; i++) {
+ ch = kzalloc(sizeof(*ch), GFP_KERNEL);
+ if (!ch) {
+ ret = -ENOMEM;
+ goto err_free;
+ }
+ INIT_LIST_HEAD(&ch->list);
+ ch->num_queue_pairs = qcnt;
+ if (!i40e_setup_channel(pf, vsi, ch)) {
+ ret = -EINVAL;
+ kfree(ch);
+ goto err_free;
+ }
+ ch->parent_vsi = vsi;
+ vsi->cnt_q_avail -= ch->num_queue_pairs;
+ vsi->macvlan_cnt++;
+ list_add_tail(&ch->list, &vsi->macvlan_list);
+ }
+
+ return ret;
+
+err_free:
+ dev_info(&pf->pdev->dev, "Failed to setup macvlans\n");
+ i40e_free_macvlan_channels(vsi);
+
+ return ret;
+}
+
+/**
+ * i40e_fwd_add - configure macvlans
+ * @netdev: net device to configure
+ * @vdev: macvlan netdevice
+ **/
+static void *i40e_fwd_add(struct net_device *netdev, struct net_device *vdev)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ u16 q_per_macvlan = 0, macvlan_cnt = 0, vectors;
+ struct i40e_vsi *vsi = np->vsi;
+ struct i40e_pf *pf = vsi->back;
+ struct i40e_fwd_adapter *fwd;
+ int avail_macvlan, ret;
+
+ if ((pf->flags & I40E_FLAG_DCB_ENABLED)) {
+ netdev_info(netdev, "Macvlans are not supported when DCB is enabled\n");
+ return ERR_PTR(-EINVAL);
+ }
+ if (i40e_is_tc_mqprio_enabled(pf)) {
+ netdev_info(netdev, "Macvlans are not supported when HW TC offload is on\n");
+ return ERR_PTR(-EINVAL);
+ }
+ if (pf->num_lan_msix < I40E_MIN_MACVLAN_VECTORS) {
+ netdev_info(netdev, "Not enough vectors available to support macvlans\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ /* The macvlan device has to be a single Q device so that the
+ * tc_to_txq field can be reused to pick the tx queue.
+ */
+ if (netif_is_multiqueue(vdev))
+ return ERR_PTR(-ERANGE);
+
+ if (!vsi->macvlan_cnt) {
+ /* reserve bit 0 for the pf device */
+ set_bit(0, vsi->fwd_bitmask);
+
+ /* Try to reserve as many queues as possible for macvlans. First
+ * reserve 3/4th of max vectors, then half, then quarter and
+ * calculate Qs per macvlan as you go
+ */
+ vectors = pf->num_lan_msix;
+ if (vectors <= I40E_MAX_MACVLANS && vectors > 64) {
+ /* allocate 4 Qs per macvlan and 32 Qs to the PF*/
+ q_per_macvlan = 4;
+ macvlan_cnt = (vectors - 32) / 4;
+ } else if (vectors <= 64 && vectors > 32) {
+ /* allocate 2 Qs per macvlan and 16 Qs to the PF*/
+ q_per_macvlan = 2;
+ macvlan_cnt = (vectors - 16) / 2;
+ } else if (vectors <= 32 && vectors > 16) {
+ /* allocate 1 Q per macvlan and 16 Qs to the PF*/
+ q_per_macvlan = 1;
+ macvlan_cnt = vectors - 16;
+ } else if (vectors <= 16 && vectors > 8) {
+ /* allocate 1 Q per macvlan and 8 Qs to the PF */
+ q_per_macvlan = 1;
+ macvlan_cnt = vectors - 8;
+ } else {
+ /* allocate 1 Q per macvlan and 1 Q to the PF */
+ q_per_macvlan = 1;
+ macvlan_cnt = vectors - 1;
+ }
+
+ if (macvlan_cnt == 0)
+ return ERR_PTR(-EBUSY);
+
+ /* Quiesce VSI queues */
+ i40e_quiesce_vsi(vsi);
+
+ /* sets up the macvlans but does not "enable" them */
+ ret = i40e_setup_macvlans(vsi, macvlan_cnt, q_per_macvlan,
+ vdev);
+ if (ret)
+ return ERR_PTR(ret);
+
+ /* Unquiesce VSI */
+ i40e_unquiesce_vsi(vsi);
+ }
+ avail_macvlan = find_first_zero_bit(vsi->fwd_bitmask,
+ vsi->macvlan_cnt);
+ if (avail_macvlan >= I40E_MAX_MACVLANS)
+ return ERR_PTR(-EBUSY);
+
+ /* create the fwd struct */
+ fwd = kzalloc(sizeof(*fwd), GFP_KERNEL);
+ if (!fwd)
+ return ERR_PTR(-ENOMEM);
+
+ set_bit(avail_macvlan, vsi->fwd_bitmask);
+ fwd->bit_no = avail_macvlan;
+ netdev_set_sb_channel(vdev, avail_macvlan);
+ fwd->netdev = vdev;
+
+ if (!netif_running(netdev))
+ return fwd;
+
+ /* Set fwd ring up */
+ ret = i40e_fwd_ring_up(vsi, vdev, fwd);
+ if (ret) {
+ /* unbind the queues and drop the subordinate channel config */
+ netdev_unbind_sb_channel(netdev, vdev);
+ netdev_set_sb_channel(vdev, 0);
+
+ kfree(fwd);
+ return ERR_PTR(-EINVAL);
+ }
+
+ return fwd;
+}
+
+/**
+ * i40e_del_all_macvlans - Delete all the mac filters on the channels
+ * @vsi: the VSI we want to access
+ */
+static void i40e_del_all_macvlans(struct i40e_vsi *vsi)
+{
+ struct i40e_channel *ch, *ch_tmp;
+ struct i40e_pf *pf = vsi->back;
+ struct i40e_hw *hw = &pf->hw;
+ int aq_err, ret = 0;
+
+ if (list_empty(&vsi->macvlan_list))
+ return;
+
+ list_for_each_entry_safe(ch, ch_tmp, &vsi->macvlan_list, list) {
+ if (i40e_is_channel_macvlan(ch)) {
+ ret = i40e_del_macvlan_filter(hw, ch->seid,
+ i40e_channel_mac(ch),
+ &aq_err);
+ if (!ret) {
+ /* Reset queue contexts */
+ i40e_reset_ch_rings(vsi, ch);
+ clear_bit(ch->fwd->bit_no, vsi->fwd_bitmask);
+ netdev_unbind_sb_channel(vsi->netdev,
+ ch->fwd->netdev);
+ netdev_set_sb_channel(ch->fwd->netdev, 0);
+ kfree(ch->fwd);
+ ch->fwd = NULL;
+ }
+ }
+ }
+}
+
+/**
+ * i40e_fwd_del - delete macvlan interfaces
+ * @netdev: net device to configure
+ * @vdev: macvlan netdevice
+ */
+static void i40e_fwd_del(struct net_device *netdev, void *vdev)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_fwd_adapter *fwd = vdev;
+ struct i40e_channel *ch, *ch_tmp;
+ struct i40e_vsi *vsi = np->vsi;
+ struct i40e_pf *pf = vsi->back;
+ struct i40e_hw *hw = &pf->hw;
+ int aq_err, ret = 0;
+
+ /* Find the channel associated with the macvlan and del mac filter */
+ list_for_each_entry_safe(ch, ch_tmp, &vsi->macvlan_list, list) {
+ if (i40e_is_channel_macvlan(ch) &&
+ ether_addr_equal(i40e_channel_mac(ch),
+ fwd->netdev->dev_addr)) {
+ ret = i40e_del_macvlan_filter(hw, ch->seid,
+ i40e_channel_mac(ch),
+ &aq_err);
+ if (!ret) {
+ /* Reset queue contexts */
+ i40e_reset_ch_rings(vsi, ch);
+ clear_bit(ch->fwd->bit_no, vsi->fwd_bitmask);
+ netdev_unbind_sb_channel(netdev, fwd->netdev);
+ netdev_set_sb_channel(fwd->netdev, 0);
+ kfree(ch->fwd);
+ ch->fwd = NULL;
+ } else {
+ dev_info(&pf->pdev->dev,
+ "Error deleting mac filter on macvlan err %pe, aq_err %s\n",
+ ERR_PTR(ret),
+ i40e_aq_str(hw, aq_err));
+ }
+ break;
+ }
+ }
+}
+
+/**
+ * i40e_setup_tc - configure multiple traffic classes
+ * @netdev: net device to configure
+ * @type_data: tc offload data
+ **/
+static int i40e_setup_tc(struct net_device *netdev, void *type_data)
+{
+ struct tc_mqprio_qopt_offload *mqprio_qopt = type_data;
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_vsi *vsi = np->vsi;
+ struct i40e_pf *pf = vsi->back;
+ u8 enabled_tc = 0, num_tc, hw;
+ bool need_reset = false;
+ int old_queue_pairs;
+ int ret = -EINVAL;
+ u16 mode;
+ int i;
+
+ old_queue_pairs = vsi->num_queue_pairs;
+ num_tc = mqprio_qopt->qopt.num_tc;
+ hw = mqprio_qopt->qopt.hw;
+ mode = mqprio_qopt->mode;
+ if (!hw) {
+ pf->flags &= ~I40E_FLAG_TC_MQPRIO;
+ memcpy(&vsi->mqprio_qopt, mqprio_qopt, sizeof(*mqprio_qopt));
+ goto config_tc;
+ }
+
+ /* Check if MFP enabled */
+ if (pf->flags & I40E_FLAG_MFP_ENABLED) {
+ netdev_info(netdev,
+ "Configuring TC not supported in MFP mode\n");
+ return ret;
+ }
+ switch (mode) {
+ case TC_MQPRIO_MODE_DCB:
+ pf->flags &= ~I40E_FLAG_TC_MQPRIO;
+
+ /* Check if DCB enabled to continue */
+ if (!(pf->flags & I40E_FLAG_DCB_ENABLED)) {
+ netdev_info(netdev,
+ "DCB is not enabled for adapter\n");
+ return ret;
+ }
+
+ /* Check whether tc count is within enabled limit */
+ if (num_tc > i40e_pf_get_num_tc(pf)) {
+ netdev_info(netdev,
+ "TC count greater than enabled on link for adapter\n");
+ return ret;
+ }
+ break;
+ case TC_MQPRIO_MODE_CHANNEL:
+ if (pf->flags & I40E_FLAG_DCB_ENABLED) {
+ netdev_info(netdev,
+ "Full offload of TC Mqprio options is not supported when DCB is enabled\n");
+ return ret;
+ }
+ if (!(pf->flags & I40E_FLAG_MSIX_ENABLED))
+ return ret;
+ ret = i40e_validate_mqprio_qopt(vsi, mqprio_qopt);
+ if (ret)
+ return ret;
+ memcpy(&vsi->mqprio_qopt, mqprio_qopt,
+ sizeof(*mqprio_qopt));
+ pf->flags |= I40E_FLAG_TC_MQPRIO;
+ pf->flags &= ~I40E_FLAG_DCB_ENABLED;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+config_tc:
+ /* Generate TC map for number of tc requested */
+ for (i = 0; i < num_tc; i++)
+ enabled_tc |= BIT(i);
+
+ /* Requesting same TC configuration as already enabled */
+ if (enabled_tc == vsi->tc_config.enabled_tc &&
+ mode != TC_MQPRIO_MODE_CHANNEL)
+ return 0;
+
+ /* Quiesce VSI queues */
+ i40e_quiesce_vsi(vsi);
+
+ if (!hw && !i40e_is_tc_mqprio_enabled(pf))
+ i40e_remove_queue_channels(vsi);
+
+ /* Configure VSI for enabled TCs */
+ ret = i40e_vsi_config_tc(vsi, enabled_tc);
+ if (ret) {
+ netdev_info(netdev, "Failed configuring TC for VSI seid=%d\n",
+ vsi->seid);
+ need_reset = true;
+ goto exit;
+ } else if (enabled_tc &&
+ (!is_power_of_2(vsi->tc_config.tc_info[0].qcount))) {
+ netdev_info(netdev,
+ "Failed to create channel. Override queues (%u) not power of 2\n",
+ vsi->tc_config.tc_info[0].qcount);
+ ret = -EINVAL;
+ need_reset = true;
+ goto exit;
+ }
+
+ dev_info(&vsi->back->pdev->dev,
+ "Setup channel (id:%u) utilizing num_queues %d\n",
+ vsi->seid, vsi->tc_config.tc_info[0].qcount);
+
+ if (i40e_is_tc_mqprio_enabled(pf)) {
+ if (vsi->mqprio_qopt.max_rate[0]) {
+ u64 max_tx_rate = i40e_bw_bytes_to_mbits(vsi,
+ vsi->mqprio_qopt.max_rate[0]);
+
+ ret = i40e_set_bw_limit(vsi, vsi->seid, max_tx_rate);
+ if (!ret) {
+ u64 credits = max_tx_rate;
+
+ do_div(credits, I40E_BW_CREDIT_DIVISOR);
+ dev_dbg(&vsi->back->pdev->dev,
+ "Set tx rate of %llu Mbps (count of 50Mbps %llu) for vsi->seid %u\n",
+ max_tx_rate,
+ credits,
+ vsi->seid);
+ } else {
+ need_reset = true;
+ goto exit;
+ }
+ }
+ ret = i40e_configure_queue_channels(vsi);
+ if (ret) {
+ vsi->num_queue_pairs = old_queue_pairs;
+ netdev_info(netdev,
+ "Failed configuring queue channels\n");
+ need_reset = true;
+ goto exit;
+ }
+ }
+
+exit:
+ /* Reset the configuration data to defaults, only TC0 is enabled */
+ if (need_reset) {
+ i40e_vsi_set_default_tc_config(vsi);
+ need_reset = false;
+ }
+
+ /* Unquiesce VSI */
+ i40e_unquiesce_vsi(vsi);
+ return ret;
+}
+
+/**
+ * i40e_set_cld_element - sets cloud filter element data
+ * @filter: cloud filter rule
+ * @cld: ptr to cloud filter element data
+ *
+ * This is helper function to copy data into cloud filter element
+ **/
+static inline void
+i40e_set_cld_element(struct i40e_cloud_filter *filter,
+ struct i40e_aqc_cloud_filters_element_data *cld)
+{
+ u32 ipa;
+ int i;
+
+ memset(cld, 0, sizeof(*cld));
+ ether_addr_copy(cld->outer_mac, filter->dst_mac);
+ ether_addr_copy(cld->inner_mac, filter->src_mac);
+
+ if (filter->n_proto != ETH_P_IP && filter->n_proto != ETH_P_IPV6)
+ return;
+
+ if (filter->n_proto == ETH_P_IPV6) {
+#define IPV6_MAX_INDEX (ARRAY_SIZE(filter->dst_ipv6) - 1)
+ for (i = 0; i < ARRAY_SIZE(filter->dst_ipv6); i++) {
+ ipa = be32_to_cpu(filter->dst_ipv6[IPV6_MAX_INDEX - i]);
+
+ *(__le32 *)&cld->ipaddr.raw_v6.data[i * 2] = cpu_to_le32(ipa);
+ }
+ } else {
+ ipa = be32_to_cpu(filter->dst_ipv4);
+
+ memcpy(&cld->ipaddr.v4.data, &ipa, sizeof(ipa));
+ }
+
+ cld->inner_vlan = cpu_to_le16(ntohs(filter->vlan_id));
+
+ /* tenant_id is not supported by FW now, once the support is enabled
+ * fill the cld->tenant_id with cpu_to_le32(filter->tenant_id)
+ */
+ if (filter->tenant_id)
+ return;
+}
+
+/**
+ * i40e_add_del_cloud_filter - Add/del cloud filter
+ * @vsi: pointer to VSI
+ * @filter: cloud filter rule
+ * @add: if true, add, if false, delete
+ *
+ * Add or delete a cloud filter for a specific flow spec.
+ * Returns 0 if the filter were successfully added.
+ **/
+int i40e_add_del_cloud_filter(struct i40e_vsi *vsi,
+ struct i40e_cloud_filter *filter, bool add)
+{
+ struct i40e_aqc_cloud_filters_element_data cld_filter;
+ struct i40e_pf *pf = vsi->back;
+ int ret;
+ static const u16 flag_table[128] = {
+ [I40E_CLOUD_FILTER_FLAGS_OMAC] =
+ I40E_AQC_ADD_CLOUD_FILTER_OMAC,
+ [I40E_CLOUD_FILTER_FLAGS_IMAC] =
+ I40E_AQC_ADD_CLOUD_FILTER_IMAC,
+ [I40E_CLOUD_FILTER_FLAGS_IMAC_IVLAN] =
+ I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN,
+ [I40E_CLOUD_FILTER_FLAGS_IMAC_TEN_ID] =
+ I40E_AQC_ADD_CLOUD_FILTER_IMAC_TEN_ID,
+ [I40E_CLOUD_FILTER_FLAGS_OMAC_TEN_ID_IMAC] =
+ I40E_AQC_ADD_CLOUD_FILTER_OMAC_TEN_ID_IMAC,
+ [I40E_CLOUD_FILTER_FLAGS_IMAC_IVLAN_TEN_ID] =
+ I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID,
+ [I40E_CLOUD_FILTER_FLAGS_IIP] =
+ I40E_AQC_ADD_CLOUD_FILTER_IIP,
+ };
+
+ if (filter->flags >= ARRAY_SIZE(flag_table))
+ return I40E_ERR_CONFIG;
+
+ memset(&cld_filter, 0, sizeof(cld_filter));
+
+ /* copy element needed to add cloud filter from filter */
+ i40e_set_cld_element(filter, &cld_filter);
+
+ if (filter->tunnel_type != I40E_CLOUD_TNL_TYPE_NONE)
+ cld_filter.flags = cpu_to_le16(filter->tunnel_type <<
+ I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT);
+
+ if (filter->n_proto == ETH_P_IPV6)
+ cld_filter.flags |= cpu_to_le16(flag_table[filter->flags] |
+ I40E_AQC_ADD_CLOUD_FLAGS_IPV6);
+ else
+ cld_filter.flags |= cpu_to_le16(flag_table[filter->flags] |
+ I40E_AQC_ADD_CLOUD_FLAGS_IPV4);
+
+ if (add)
+ ret = i40e_aq_add_cloud_filters(&pf->hw, filter->seid,
+ &cld_filter, 1);
+ else
+ ret = i40e_aq_rem_cloud_filters(&pf->hw, filter->seid,
+ &cld_filter, 1);
+ if (ret)
+ dev_dbg(&pf->pdev->dev,
+ "Failed to %s cloud filter using l4 port %u, err %d aq_err %d\n",
+ add ? "add" : "delete", filter->dst_port, ret,
+ pf->hw.aq.asq_last_status);
+ else
+ dev_info(&pf->pdev->dev,
+ "%s cloud filter for VSI: %d\n",
+ add ? "Added" : "Deleted", filter->seid);
+ return ret;
+}
+
+/**
+ * i40e_add_del_cloud_filter_big_buf - Add/del cloud filter using big_buf
+ * @vsi: pointer to VSI
+ * @filter: cloud filter rule
+ * @add: if true, add, if false, delete
+ *
+ * Add or delete a cloud filter for a specific flow spec using big buffer.
+ * Returns 0 if the filter were successfully added.
+ **/
+int i40e_add_del_cloud_filter_big_buf(struct i40e_vsi *vsi,
+ struct i40e_cloud_filter *filter,
+ bool add)
+{
+ struct i40e_aqc_cloud_filters_element_bb cld_filter;
+ struct i40e_pf *pf = vsi->back;
+ int ret;
+
+ /* Both (src/dst) valid mac_addr are not supported */
+ if ((is_valid_ether_addr(filter->dst_mac) &&
+ is_valid_ether_addr(filter->src_mac)) ||
+ (is_multicast_ether_addr(filter->dst_mac) &&
+ is_multicast_ether_addr(filter->src_mac)))
+ return -EOPNOTSUPP;
+
+ /* Big buffer cloud filter needs 'L4 port' to be non-zero. Also, UDP
+ * ports are not supported via big buffer now.
+ */
+ if (!filter->dst_port || filter->ip_proto == IPPROTO_UDP)
+ return -EOPNOTSUPP;
+
+ /* adding filter using src_port/src_ip is not supported at this stage */
+ if (filter->src_port ||
+ (filter->src_ipv4 && filter->n_proto != ETH_P_IPV6) ||
+ !ipv6_addr_any(&filter->ip.v6.src_ip6))
+ return -EOPNOTSUPP;
+
+ memset(&cld_filter, 0, sizeof(cld_filter));
+
+ /* copy element needed to add cloud filter from filter */
+ i40e_set_cld_element(filter, &cld_filter.element);
+
+ if (is_valid_ether_addr(filter->dst_mac) ||
+ is_valid_ether_addr(filter->src_mac) ||
+ is_multicast_ether_addr(filter->dst_mac) ||
+ is_multicast_ether_addr(filter->src_mac)) {
+ /* MAC + IP : unsupported mode */
+ if (filter->dst_ipv4)
+ return -EOPNOTSUPP;
+
+ /* since we validated that L4 port must be valid before
+ * we get here, start with respective "flags" value
+ * and update if vlan is present or not
+ */
+ cld_filter.element.flags =
+ cpu_to_le16(I40E_AQC_ADD_CLOUD_FILTER_MAC_PORT);
+
+ if (filter->vlan_id) {
+ cld_filter.element.flags =
+ cpu_to_le16(I40E_AQC_ADD_CLOUD_FILTER_MAC_VLAN_PORT);
+ }
+
+ } else if ((filter->dst_ipv4 && filter->n_proto != ETH_P_IPV6) ||
+ !ipv6_addr_any(&filter->ip.v6.dst_ip6)) {
+ cld_filter.element.flags =
+ cpu_to_le16(I40E_AQC_ADD_CLOUD_FILTER_IP_PORT);
+ if (filter->n_proto == ETH_P_IPV6)
+ cld_filter.element.flags |=
+ cpu_to_le16(I40E_AQC_ADD_CLOUD_FLAGS_IPV6);
+ else
+ cld_filter.element.flags |=
+ cpu_to_le16(I40E_AQC_ADD_CLOUD_FLAGS_IPV4);
+ } else {
+ dev_err(&pf->pdev->dev,
+ "either mac or ip has to be valid for cloud filter\n");
+ return -EINVAL;
+ }
+
+ /* Now copy L4 port in Byte 6..7 in general fields */
+ cld_filter.general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD0] =
+ be16_to_cpu(filter->dst_port);
+
+ if (add) {
+ /* Validate current device switch mode, change if necessary */
+ ret = i40e_validate_and_set_switch_mode(vsi);
+ if (ret) {
+ dev_err(&pf->pdev->dev,
+ "failed to set switch mode, ret %d\n",
+ ret);
+ return ret;
+ }
+
+ ret = i40e_aq_add_cloud_filters_bb(&pf->hw, filter->seid,
+ &cld_filter, 1);
+ } else {
+ ret = i40e_aq_rem_cloud_filters_bb(&pf->hw, filter->seid,
+ &cld_filter, 1);
+ }
+
+ if (ret)
+ dev_dbg(&pf->pdev->dev,
+ "Failed to %s cloud filter(big buffer) err %d aq_err %d\n",
+ add ? "add" : "delete", ret, pf->hw.aq.asq_last_status);
+ else
+ dev_info(&pf->pdev->dev,
+ "%s cloud filter for VSI: %d, L4 port: %d\n",
+ add ? "add" : "delete", filter->seid,
+ ntohs(filter->dst_port));
+ return ret;
+}
+
+/**
+ * i40e_parse_cls_flower - Parse tc flower filters provided by kernel
+ * @vsi: Pointer to VSI
+ * @f: Pointer to struct flow_cls_offload
+ * @filter: Pointer to cloud filter structure
+ *
+ **/
+static int i40e_parse_cls_flower(struct i40e_vsi *vsi,
+ struct flow_cls_offload *f,
+ struct i40e_cloud_filter *filter)
+{
+ struct flow_rule *rule = flow_cls_offload_flow_rule(f);
+ struct flow_dissector *dissector = rule->match.dissector;
+ u16 n_proto_mask = 0, n_proto_key = 0, addr_type = 0;
+ struct i40e_pf *pf = vsi->back;
+ u8 field_flags = 0;
+
+ if (dissector->used_keys &
+ ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
+ BIT(FLOW_DISSECTOR_KEY_BASIC) |
+ BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
+ BIT(FLOW_DISSECTOR_KEY_VLAN) |
+ BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
+ BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
+ BIT(FLOW_DISSECTOR_KEY_PORTS) |
+ BIT(FLOW_DISSECTOR_KEY_ENC_KEYID))) {
+ dev_err(&pf->pdev->dev, "Unsupported key used: 0x%x\n",
+ dissector->used_keys);
+ return -EOPNOTSUPP;
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
+ struct flow_match_enc_keyid match;
+
+ flow_rule_match_enc_keyid(rule, &match);
+ if (match.mask->keyid != 0)
+ field_flags |= I40E_CLOUD_FIELD_TEN_ID;
+
+ filter->tenant_id = be32_to_cpu(match.key->keyid);
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
+ struct flow_match_basic match;
+
+ flow_rule_match_basic(rule, &match);
+ n_proto_key = ntohs(match.key->n_proto);
+ n_proto_mask = ntohs(match.mask->n_proto);
+
+ if (n_proto_key == ETH_P_ALL) {
+ n_proto_key = 0;
+ n_proto_mask = 0;
+ }
+ filter->n_proto = n_proto_key & n_proto_mask;
+ filter->ip_proto = match.key->ip_proto;
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
+ struct flow_match_eth_addrs match;
+
+ flow_rule_match_eth_addrs(rule, &match);
+
+ /* use is_broadcast and is_zero to check for all 0xf or 0 */
+ if (!is_zero_ether_addr(match.mask->dst)) {
+ if (is_broadcast_ether_addr(match.mask->dst)) {
+ field_flags |= I40E_CLOUD_FIELD_OMAC;
+ } else {
+ dev_err(&pf->pdev->dev, "Bad ether dest mask %pM\n",
+ match.mask->dst);
+ return I40E_ERR_CONFIG;
+ }
+ }
+
+ if (!is_zero_ether_addr(match.mask->src)) {
+ if (is_broadcast_ether_addr(match.mask->src)) {
+ field_flags |= I40E_CLOUD_FIELD_IMAC;
+ } else {
+ dev_err(&pf->pdev->dev, "Bad ether src mask %pM\n",
+ match.mask->src);
+ return I40E_ERR_CONFIG;
+ }
+ }
+ ether_addr_copy(filter->dst_mac, match.key->dst);
+ ether_addr_copy(filter->src_mac, match.key->src);
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
+ struct flow_match_vlan match;
+
+ flow_rule_match_vlan(rule, &match);
+ if (match.mask->vlan_id) {
+ if (match.mask->vlan_id == VLAN_VID_MASK) {
+ field_flags |= I40E_CLOUD_FIELD_IVLAN;
+
+ } else {
+ dev_err(&pf->pdev->dev, "Bad vlan mask 0x%04x\n",
+ match.mask->vlan_id);
+ return I40E_ERR_CONFIG;
+ }
+ }
+
+ filter->vlan_id = cpu_to_be16(match.key->vlan_id);
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
+ struct flow_match_control match;
+
+ flow_rule_match_control(rule, &match);
+ addr_type = match.key->addr_type;
+ }
+
+ if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
+ struct flow_match_ipv4_addrs match;
+
+ flow_rule_match_ipv4_addrs(rule, &match);
+ if (match.mask->dst) {
+ if (match.mask->dst == cpu_to_be32(0xffffffff)) {
+ field_flags |= I40E_CLOUD_FIELD_IIP;
+ } else {
+ dev_err(&pf->pdev->dev, "Bad ip dst mask %pI4b\n",
+ &match.mask->dst);
+ return I40E_ERR_CONFIG;
+ }
+ }
+
+ if (match.mask->src) {
+ if (match.mask->src == cpu_to_be32(0xffffffff)) {
+ field_flags |= I40E_CLOUD_FIELD_IIP;
+ } else {
+ dev_err(&pf->pdev->dev, "Bad ip src mask %pI4b\n",
+ &match.mask->src);
+ return I40E_ERR_CONFIG;
+ }
+ }
+
+ if (field_flags & I40E_CLOUD_FIELD_TEN_ID) {
+ dev_err(&pf->pdev->dev, "Tenant id not allowed for ip filter\n");
+ return I40E_ERR_CONFIG;
+ }
+ filter->dst_ipv4 = match.key->dst;
+ filter->src_ipv4 = match.key->src;
+ }
+
+ if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
+ struct flow_match_ipv6_addrs match;
+
+ flow_rule_match_ipv6_addrs(rule, &match);
+
+ /* src and dest IPV6 address should not be LOOPBACK
+ * (0:0:0:0:0:0:0:1), which can be represented as ::1
+ */
+ if (ipv6_addr_loopback(&match.key->dst) ||
+ ipv6_addr_loopback(&match.key->src)) {
+ dev_err(&pf->pdev->dev,
+ "Bad ipv6, addr is LOOPBACK\n");
+ return I40E_ERR_CONFIG;
+ }
+ if (!ipv6_addr_any(&match.mask->dst) ||
+ !ipv6_addr_any(&match.mask->src))
+ field_flags |= I40E_CLOUD_FIELD_IIP;
+
+ memcpy(&filter->src_ipv6, &match.key->src.s6_addr32,
+ sizeof(filter->src_ipv6));
+ memcpy(&filter->dst_ipv6, &match.key->dst.s6_addr32,
+ sizeof(filter->dst_ipv6));
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
+ struct flow_match_ports match;
+
+ flow_rule_match_ports(rule, &match);
+ if (match.mask->src) {
+ if (match.mask->src == cpu_to_be16(0xffff)) {
+ field_flags |= I40E_CLOUD_FIELD_IIP;
+ } else {
+ dev_err(&pf->pdev->dev, "Bad src port mask 0x%04x\n",
+ be16_to_cpu(match.mask->src));
+ return I40E_ERR_CONFIG;
+ }
+ }
+
+ if (match.mask->dst) {
+ if (match.mask->dst == cpu_to_be16(0xffff)) {
+ field_flags |= I40E_CLOUD_FIELD_IIP;
+ } else {
+ dev_err(&pf->pdev->dev, "Bad dst port mask 0x%04x\n",
+ be16_to_cpu(match.mask->dst));
+ return I40E_ERR_CONFIG;
+ }
+ }
+
+ filter->dst_port = match.key->dst;
+ filter->src_port = match.key->src;
+
+ switch (filter->ip_proto) {
+ case IPPROTO_TCP:
+ case IPPROTO_UDP:
+ break;
+ default:
+ dev_err(&pf->pdev->dev,
+ "Only UDP and TCP transport are supported\n");
+ return -EINVAL;
+ }
+ }
+ filter->flags = field_flags;
+ return 0;
+}
+
+/**
+ * i40e_handle_tclass: Forward to a traffic class on the device
+ * @vsi: Pointer to VSI
+ * @tc: traffic class index on the device
+ * @filter: Pointer to cloud filter structure
+ *
+ **/
+static int i40e_handle_tclass(struct i40e_vsi *vsi, u32 tc,
+ struct i40e_cloud_filter *filter)
+{
+ struct i40e_channel *ch, *ch_tmp;
+
+ /* direct to a traffic class on the same device */
+ if (tc == 0) {
+ filter->seid = vsi->seid;
+ return 0;
+ } else if (vsi->tc_config.enabled_tc & BIT(tc)) {
+ if (!filter->dst_port) {
+ dev_err(&vsi->back->pdev->dev,
+ "Specify destination port to direct to traffic class that is not default\n");
+ return -EINVAL;
+ }
+ if (list_empty(&vsi->ch_list))
+ return -EINVAL;
+ list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list,
+ list) {
+ if (ch->seid == vsi->tc_seid_map[tc])
+ filter->seid = ch->seid;
+ }
+ return 0;
+ }
+ dev_err(&vsi->back->pdev->dev, "TC is not enabled\n");
+ return -EINVAL;
+}
+
+/**
+ * i40e_configure_clsflower - Configure tc flower filters
+ * @vsi: Pointer to VSI
+ * @cls_flower: Pointer to struct flow_cls_offload
+ *
+ **/
+static int i40e_configure_clsflower(struct i40e_vsi *vsi,
+ struct flow_cls_offload *cls_flower)
+{
+ int tc = tc_classid_to_hwtc(vsi->netdev, cls_flower->classid);
+ struct i40e_cloud_filter *filter = NULL;
+ struct i40e_pf *pf = vsi->back;
+ int err = 0;
+
+ if (tc < 0) {
+ dev_err(&vsi->back->pdev->dev, "Invalid traffic class\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (!tc) {
+ dev_err(&pf->pdev->dev, "Unable to add filter because of invalid destination");
+ return -EINVAL;
+ }
+
+ if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state) ||
+ test_bit(__I40E_RESET_INTR_RECEIVED, pf->state))
+ return -EBUSY;
+
+ if (pf->fdir_pf_active_filters ||
+ (!hlist_empty(&pf->fdir_filter_list))) {
+ dev_err(&vsi->back->pdev->dev,
+ "Flow Director Sideband filters exists, turn ntuple off to configure cloud filters\n");
+ return -EINVAL;
+ }
+
+ if (vsi->back->flags & I40E_FLAG_FD_SB_ENABLED) {
+ dev_err(&vsi->back->pdev->dev,
+ "Disable Flow Director Sideband, configuring Cloud filters via tc-flower\n");
+ vsi->back->flags &= ~I40E_FLAG_FD_SB_ENABLED;
+ vsi->back->flags |= I40E_FLAG_FD_SB_TO_CLOUD_FILTER;
+ }
+
+ filter = kzalloc(sizeof(*filter), GFP_KERNEL);
+ if (!filter)
+ return -ENOMEM;
+
+ filter->cookie = cls_flower->cookie;
+
+ err = i40e_parse_cls_flower(vsi, cls_flower, filter);
+ if (err < 0)
+ goto err;
+
+ err = i40e_handle_tclass(vsi, tc, filter);
+ if (err < 0)
+ goto err;
+
+ /* Add cloud filter */
+ if (filter->dst_port)
+ err = i40e_add_del_cloud_filter_big_buf(vsi, filter, true);
+ else
+ err = i40e_add_del_cloud_filter(vsi, filter, true);
+
+ if (err) {
+ dev_err(&pf->pdev->dev, "Failed to add cloud filter, err %d\n",
+ err);
+ goto err;
+ }
+
+ /* add filter to the ordered list */
+ INIT_HLIST_NODE(&filter->cloud_node);
+
+ hlist_add_head(&filter->cloud_node, &pf->cloud_filter_list);
+
+ pf->num_cloud_filters++;
+
+ return err;
+err:
+ kfree(filter);
+ return err;
+}
+
+/**
+ * i40e_find_cloud_filter - Find the could filter in the list
+ * @vsi: Pointer to VSI
+ * @cookie: filter specific cookie
+ *
+ **/
+static struct i40e_cloud_filter *i40e_find_cloud_filter(struct i40e_vsi *vsi,
+ unsigned long *cookie)
+{
+ struct i40e_cloud_filter *filter = NULL;
+ struct hlist_node *node2;
+
+ hlist_for_each_entry_safe(filter, node2,
+ &vsi->back->cloud_filter_list, cloud_node)
+ if (!memcmp(cookie, &filter->cookie, sizeof(filter->cookie)))
+ return filter;
+ return NULL;
+}
+
+/**
+ * i40e_delete_clsflower - Remove tc flower filters
+ * @vsi: Pointer to VSI
+ * @cls_flower: Pointer to struct flow_cls_offload
+ *
+ **/
+static int i40e_delete_clsflower(struct i40e_vsi *vsi,
+ struct flow_cls_offload *cls_flower)
+{
+ struct i40e_cloud_filter *filter = NULL;
+ struct i40e_pf *pf = vsi->back;
+ int err = 0;
+
+ filter = i40e_find_cloud_filter(vsi, &cls_flower->cookie);
+
+ if (!filter)
+ return -EINVAL;
+
+ hash_del(&filter->cloud_node);
+
+ if (filter->dst_port)
+ err = i40e_add_del_cloud_filter_big_buf(vsi, filter, false);
+ else
+ err = i40e_add_del_cloud_filter(vsi, filter, false);
+
+ kfree(filter);
+ if (err) {
+ dev_err(&pf->pdev->dev,
+ "Failed to delete cloud filter, err %pe\n",
+ ERR_PTR(err));
+ return i40e_aq_rc_to_posix(err, pf->hw.aq.asq_last_status);
+ }
+
+ pf->num_cloud_filters--;
+ if (!pf->num_cloud_filters)
+ if ((pf->flags & I40E_FLAG_FD_SB_TO_CLOUD_FILTER) &&
+ !(pf->flags & I40E_FLAG_FD_SB_INACTIVE)) {
+ pf->flags |= I40E_FLAG_FD_SB_ENABLED;
+ pf->flags &= ~I40E_FLAG_FD_SB_TO_CLOUD_FILTER;
+ pf->flags &= ~I40E_FLAG_FD_SB_INACTIVE;
+ }
+ return 0;
+}
+
+/**
+ * i40e_setup_tc_cls_flower - flower classifier offloads
+ * @np: net device to configure
+ * @cls_flower: offload data
+ **/
+static int i40e_setup_tc_cls_flower(struct i40e_netdev_priv *np,
+ struct flow_cls_offload *cls_flower)
+{
+ struct i40e_vsi *vsi = np->vsi;
+
+ switch (cls_flower->command) {
+ case FLOW_CLS_REPLACE:
+ return i40e_configure_clsflower(vsi, cls_flower);
+ case FLOW_CLS_DESTROY:
+ return i40e_delete_clsflower(vsi, cls_flower);
+ case FLOW_CLS_STATS:
+ return -EOPNOTSUPP;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int i40e_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
+ void *cb_priv)
+{
+ struct i40e_netdev_priv *np = cb_priv;
+
+ if (!tc_cls_can_offload_and_chain0(np->vsi->netdev, type_data))
+ return -EOPNOTSUPP;
+
+ switch (type) {
+ case TC_SETUP_CLSFLOWER:
+ return i40e_setup_tc_cls_flower(np, type_data);
+
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static LIST_HEAD(i40e_block_cb_list);
+
+static int __i40e_setup_tc(struct net_device *netdev, enum tc_setup_type type,
+ void *type_data)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+
+ switch (type) {
+ case TC_SETUP_QDISC_MQPRIO:
+ return i40e_setup_tc(netdev, type_data);
+ case TC_SETUP_BLOCK:
+ return flow_block_cb_setup_simple(type_data,
+ &i40e_block_cb_list,
+ i40e_setup_tc_block_cb,
+ np, np, true);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+/**
+ * i40e_open - Called when a network interface is made active
+ * @netdev: network interface device structure
+ *
+ * The open entry point is called when a network interface is made
+ * active by the system (IFF_UP). At this point all resources needed
+ * for transmit and receive operations are allocated, the interrupt
+ * handler is registered with the OS, the netdev watchdog subtask is
+ * enabled, and the stack is notified that the interface is ready.
+ *
+ * Returns 0 on success, negative value on failure
+ **/
+int i40e_open(struct net_device *netdev)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_vsi *vsi = np->vsi;
+ struct i40e_pf *pf = vsi->back;
+ int err;
+
+ /* disallow open during test or if eeprom is broken */
+ if (test_bit(__I40E_TESTING, pf->state) ||
+ test_bit(__I40E_BAD_EEPROM, pf->state))
+ return -EBUSY;
+
+ netif_carrier_off(netdev);
+
+ if (i40e_force_link_state(pf, true))
+ return -EAGAIN;
+
+ err = i40e_vsi_open(vsi);
+ if (err)
+ return err;
+
+ /* configure global TSO hardware offload settings */
+ wr32(&pf->hw, I40E_GLLAN_TSOMSK_F, be32_to_cpu(TCP_FLAG_PSH |
+ TCP_FLAG_FIN) >> 16);
+ wr32(&pf->hw, I40E_GLLAN_TSOMSK_M, be32_to_cpu(TCP_FLAG_PSH |
+ TCP_FLAG_FIN |
+ TCP_FLAG_CWR) >> 16);
+ wr32(&pf->hw, I40E_GLLAN_TSOMSK_L, be32_to_cpu(TCP_FLAG_CWR) >> 16);
+ udp_tunnel_get_rx_info(netdev);
+
+ return 0;
+}
+
+/**
+ * i40e_netif_set_realnum_tx_rx_queues - Update number of tx/rx queues
+ * @vsi: vsi structure
+ *
+ * This updates netdev's number of tx/rx queues
+ *
+ * Returns status of setting tx/rx queues
+ **/
+static int i40e_netif_set_realnum_tx_rx_queues(struct i40e_vsi *vsi)
+{
+ int ret;
+
+ ret = netif_set_real_num_rx_queues(vsi->netdev,
+ vsi->num_queue_pairs);
+ if (ret)
+ return ret;
+
+ return netif_set_real_num_tx_queues(vsi->netdev,
+ vsi->num_queue_pairs);
+}
+
+/**
+ * i40e_vsi_open -
+ * @vsi: the VSI to open
+ *
+ * Finish initialization of the VSI.
+ *
+ * Returns 0 on success, negative value on failure
+ *
+ * Note: expects to be called while under rtnl_lock()
+ **/
+int i40e_vsi_open(struct i40e_vsi *vsi)
+{
+ struct i40e_pf *pf = vsi->back;
+ char int_name[I40E_INT_NAME_STR_LEN];
+ int err;
+
+ /* allocate descriptors */
+ err = i40e_vsi_setup_tx_resources(vsi);
+ if (err)
+ goto err_setup_tx;
+ err = i40e_vsi_setup_rx_resources(vsi);
+ if (err)
+ goto err_setup_rx;
+
+ err = i40e_vsi_configure(vsi);
+ if (err)
+ goto err_setup_rx;
+
+ if (vsi->netdev) {
+ snprintf(int_name, sizeof(int_name) - 1, "%s-%s",
+ dev_driver_string(&pf->pdev->dev), vsi->netdev->name);
+ err = i40e_vsi_request_irq(vsi, int_name);
+ if (err)
+ goto err_setup_rx;
+
+ /* Notify the stack of the actual queue counts. */
+ err = i40e_netif_set_realnum_tx_rx_queues(vsi);
+ if (err)
+ goto err_set_queues;
+
+ } else if (vsi->type == I40E_VSI_FDIR) {
+ snprintf(int_name, sizeof(int_name) - 1, "%s-%s:fdir",
+ dev_driver_string(&pf->pdev->dev),
+ dev_name(&pf->pdev->dev));
+ err = i40e_vsi_request_irq(vsi, int_name);
+ if (err)
+ goto err_setup_rx;
+
+ } else {
+ err = -EINVAL;
+ goto err_setup_rx;
+ }
+
+ err = i40e_up_complete(vsi);
+ if (err)
+ goto err_up_complete;
+
+ return 0;
+
+err_up_complete:
+ i40e_down(vsi);
+err_set_queues:
+ i40e_vsi_free_irq(vsi);
+err_setup_rx:
+ i40e_vsi_free_rx_resources(vsi);
+err_setup_tx:
+ i40e_vsi_free_tx_resources(vsi);
+ if (vsi == pf->vsi[pf->lan_vsi])
+ i40e_do_reset(pf, I40E_PF_RESET_FLAG, true);
+
+ return err;
+}
+
+/**
+ * i40e_fdir_filter_exit - Cleans up the Flow Director accounting
+ * @pf: Pointer to PF
+ *
+ * This function destroys the hlist where all the Flow Director
+ * filters were saved.
+ **/
+static void i40e_fdir_filter_exit(struct i40e_pf *pf)
+{
+ struct i40e_fdir_filter *filter;
+ struct i40e_flex_pit *pit_entry, *tmp;
+ struct hlist_node *node2;
+
+ hlist_for_each_entry_safe(filter, node2,
+ &pf->fdir_filter_list, fdir_node) {
+ hlist_del(&filter->fdir_node);
+ kfree(filter);
+ }
+
+ list_for_each_entry_safe(pit_entry, tmp, &pf->l3_flex_pit_list, list) {
+ list_del(&pit_entry->list);
+ kfree(pit_entry);
+ }
+ INIT_LIST_HEAD(&pf->l3_flex_pit_list);
+
+ list_for_each_entry_safe(pit_entry, tmp, &pf->l4_flex_pit_list, list) {
+ list_del(&pit_entry->list);
+ kfree(pit_entry);
+ }
+ INIT_LIST_HEAD(&pf->l4_flex_pit_list);
+
+ pf->fdir_pf_active_filters = 0;
+ i40e_reset_fdir_filter_cnt(pf);
+
+ /* Reprogram the default input set for TCP/IPv4 */
+ i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_TCP,
+ I40E_L3_SRC_MASK | I40E_L3_DST_MASK |
+ I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
+
+ /* Reprogram the default input set for TCP/IPv6 */
+ i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV6_TCP,
+ I40E_L3_V6_SRC_MASK | I40E_L3_V6_DST_MASK |
+ I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
+
+ /* Reprogram the default input set for UDP/IPv4 */
+ i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_UDP,
+ I40E_L3_SRC_MASK | I40E_L3_DST_MASK |
+ I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
+
+ /* Reprogram the default input set for UDP/IPv6 */
+ i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV6_UDP,
+ I40E_L3_V6_SRC_MASK | I40E_L3_V6_DST_MASK |
+ I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
+
+ /* Reprogram the default input set for SCTP/IPv4 */
+ i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_SCTP,
+ I40E_L3_SRC_MASK | I40E_L3_DST_MASK |
+ I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
+
+ /* Reprogram the default input set for SCTP/IPv6 */
+ i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV6_SCTP,
+ I40E_L3_V6_SRC_MASK | I40E_L3_V6_DST_MASK |
+ I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
+
+ /* Reprogram the default input set for Other/IPv4 */
+ i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_OTHER,
+ I40E_L3_SRC_MASK | I40E_L3_DST_MASK);
+
+ i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_FRAG_IPV4,
+ I40E_L3_SRC_MASK | I40E_L3_DST_MASK);
+
+ /* Reprogram the default input set for Other/IPv6 */
+ i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV6_OTHER,
+ I40E_L3_SRC_MASK | I40E_L3_DST_MASK);
+
+ i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_FRAG_IPV6,
+ I40E_L3_SRC_MASK | I40E_L3_DST_MASK);
+}
+
+/**
+ * i40e_cloud_filter_exit - Cleans up the cloud filters
+ * @pf: Pointer to PF
+ *
+ * This function destroys the hlist where all the cloud filters
+ * were saved.
+ **/
+static void i40e_cloud_filter_exit(struct i40e_pf *pf)
+{
+ struct i40e_cloud_filter *cfilter;
+ struct hlist_node *node;
+
+ hlist_for_each_entry_safe(cfilter, node,
+ &pf->cloud_filter_list, cloud_node) {
+ hlist_del(&cfilter->cloud_node);
+ kfree(cfilter);
+ }
+ pf->num_cloud_filters = 0;
+
+ if ((pf->flags & I40E_FLAG_FD_SB_TO_CLOUD_FILTER) &&
+ !(pf->flags & I40E_FLAG_FD_SB_INACTIVE)) {
+ pf->flags |= I40E_FLAG_FD_SB_ENABLED;
+ pf->flags &= ~I40E_FLAG_FD_SB_TO_CLOUD_FILTER;
+ pf->flags &= ~I40E_FLAG_FD_SB_INACTIVE;
+ }
+}
+
+/**
+ * i40e_close - Disables a network interface
+ * @netdev: network interface device structure
+ *
+ * The close entry point is called when an interface is de-activated
+ * by the OS. The hardware is still under the driver's control, but
+ * this netdev interface is disabled.
+ *
+ * Returns 0, this is not allowed to fail
+ **/
+int i40e_close(struct net_device *netdev)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_vsi *vsi = np->vsi;
+
+ i40e_vsi_close(vsi);
+
+ return 0;
+}
+
+/**
+ * i40e_do_reset - Start a PF or Core Reset sequence
+ * @pf: board private structure
+ * @reset_flags: which reset is requested
+ * @lock_acquired: indicates whether or not the lock has been acquired
+ * before this function was called.
+ *
+ * The essential difference in resets is that the PF Reset
+ * doesn't clear the packet buffers, doesn't reset the PE
+ * firmware, and doesn't bother the other PFs on the chip.
+ **/
+void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags, bool lock_acquired)
+{
+ u32 val;
+
+ /* do the biggest reset indicated */
+ if (reset_flags & BIT_ULL(__I40E_GLOBAL_RESET_REQUESTED)) {
+
+ /* Request a Global Reset
+ *
+ * This will start the chip's countdown to the actual full
+ * chip reset event, and a warning interrupt to be sent
+ * to all PFs, including the requestor. Our handler
+ * for the warning interrupt will deal with the shutdown
+ * and recovery of the switch setup.
+ */
+ dev_dbg(&pf->pdev->dev, "GlobalR requested\n");
+ val = rd32(&pf->hw, I40E_GLGEN_RTRIG);
+ val |= I40E_GLGEN_RTRIG_GLOBR_MASK;
+ wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
+
+ } else if (reset_flags & BIT_ULL(__I40E_CORE_RESET_REQUESTED)) {
+
+ /* Request a Core Reset
+ *
+ * Same as Global Reset, except does *not* include the MAC/PHY
+ */
+ dev_dbg(&pf->pdev->dev, "CoreR requested\n");
+ val = rd32(&pf->hw, I40E_GLGEN_RTRIG);
+ val |= I40E_GLGEN_RTRIG_CORER_MASK;
+ wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
+ i40e_flush(&pf->hw);
+
+ } else if (reset_flags & I40E_PF_RESET_FLAG) {
+
+ /* Request a PF Reset
+ *
+ * Resets only the PF-specific registers
+ *
+ * This goes directly to the tear-down and rebuild of
+ * the switch, since we need to do all the recovery as
+ * for the Core Reset.
+ */
+ dev_dbg(&pf->pdev->dev, "PFR requested\n");
+ i40e_handle_reset_warning(pf, lock_acquired);
+
+ } else if (reset_flags & I40E_PF_RESET_AND_REBUILD_FLAG) {
+ /* Request a PF Reset
+ *
+ * Resets PF and reinitializes PFs VSI.
+ */
+ i40e_prep_for_reset(pf);
+ i40e_reset_and_rebuild(pf, true, lock_acquired);
+ dev_info(&pf->pdev->dev,
+ pf->flags & I40E_FLAG_DISABLE_FW_LLDP ?
+ "FW LLDP is disabled\n" :
+ "FW LLDP is enabled\n");
+
+ } else if (reset_flags & BIT_ULL(__I40E_REINIT_REQUESTED)) {
+ int v;
+
+ /* Find the VSI(s) that requested a re-init */
+ dev_info(&pf->pdev->dev,
+ "VSI reinit requested\n");
+ for (v = 0; v < pf->num_alloc_vsi; v++) {
+ struct i40e_vsi *vsi = pf->vsi[v];
+
+ if (vsi != NULL &&
+ test_and_clear_bit(__I40E_VSI_REINIT_REQUESTED,
+ vsi->state))
+ i40e_vsi_reinit_locked(pf->vsi[v]);
+ }
+ } else if (reset_flags & BIT_ULL(__I40E_DOWN_REQUESTED)) {
+ int v;
+
+ /* Find the VSI(s) that needs to be brought down */
+ dev_info(&pf->pdev->dev, "VSI down requested\n");
+ for (v = 0; v < pf->num_alloc_vsi; v++) {
+ struct i40e_vsi *vsi = pf->vsi[v];
+
+ if (vsi != NULL &&
+ test_and_clear_bit(__I40E_VSI_DOWN_REQUESTED,
+ vsi->state)) {
+ set_bit(__I40E_VSI_DOWN, vsi->state);
+ i40e_down(vsi);
+ }
+ }
+ } else {
+ dev_info(&pf->pdev->dev,
+ "bad reset request 0x%08x\n", reset_flags);
+ }
+}
+
+#ifdef CONFIG_I40E_DCB
+/**
+ * i40e_dcb_need_reconfig - Check if DCB needs reconfig
+ * @pf: board private structure
+ * @old_cfg: current DCB config
+ * @new_cfg: new DCB config
+ **/
+bool i40e_dcb_need_reconfig(struct i40e_pf *pf,
+ struct i40e_dcbx_config *old_cfg,
+ struct i40e_dcbx_config *new_cfg)
+{
+ bool need_reconfig = false;
+
+ /* Check if ETS configuration has changed */
+ if (memcmp(&new_cfg->etscfg,
+ &old_cfg->etscfg,
+ sizeof(new_cfg->etscfg))) {
+ /* If Priority Table has changed reconfig is needed */
+ if (memcmp(&new_cfg->etscfg.prioritytable,
+ &old_cfg->etscfg.prioritytable,
+ sizeof(new_cfg->etscfg.prioritytable))) {
+ need_reconfig = true;
+ dev_dbg(&pf->pdev->dev, "ETS UP2TC changed.\n");
+ }
+
+ if (memcmp(&new_cfg->etscfg.tcbwtable,
+ &old_cfg->etscfg.tcbwtable,
+ sizeof(new_cfg->etscfg.tcbwtable)))
+ dev_dbg(&pf->pdev->dev, "ETS TC BW Table changed.\n");
+
+ if (memcmp(&new_cfg->etscfg.tsatable,
+ &old_cfg->etscfg.tsatable,
+ sizeof(new_cfg->etscfg.tsatable)))
+ dev_dbg(&pf->pdev->dev, "ETS TSA Table changed.\n");
+ }
+
+ /* Check if PFC configuration has changed */
+ if (memcmp(&new_cfg->pfc,
+ &old_cfg->pfc,
+ sizeof(new_cfg->pfc))) {
+ need_reconfig = true;
+ dev_dbg(&pf->pdev->dev, "PFC config change detected.\n");
+ }
+
+ /* Check if APP Table has changed */
+ if (memcmp(&new_cfg->app,
+ &old_cfg->app,
+ sizeof(new_cfg->app))) {
+ need_reconfig = true;
+ dev_dbg(&pf->pdev->dev, "APP Table change detected.\n");
+ }
+
+ dev_dbg(&pf->pdev->dev, "dcb need_reconfig=%d\n", need_reconfig);
+ return need_reconfig;
+}
+
+/**
+ * i40e_handle_lldp_event - Handle LLDP Change MIB event
+ * @pf: board private structure
+ * @e: event info posted on ARQ
+ **/
+static int i40e_handle_lldp_event(struct i40e_pf *pf,
+ struct i40e_arq_event_info *e)
+{
+ struct i40e_aqc_lldp_get_mib *mib =
+ (struct i40e_aqc_lldp_get_mib *)&e->desc.params.raw;
+ struct i40e_hw *hw = &pf->hw;
+ struct i40e_dcbx_config tmp_dcbx_cfg;
+ bool need_reconfig = false;
+ int ret = 0;
+ u8 type;
+
+ /* X710-T*L 2.5G and 5G speeds don't support DCB */
+ if (I40E_IS_X710TL_DEVICE(hw->device_id) &&
+ (hw->phy.link_info.link_speed &
+ ~(I40E_LINK_SPEED_2_5GB | I40E_LINK_SPEED_5GB)) &&
+ !(pf->flags & I40E_FLAG_DCB_CAPABLE))
+ /* let firmware decide if the DCB should be disabled */
+ pf->flags |= I40E_FLAG_DCB_CAPABLE;
+
+ /* Not DCB capable or capability disabled */
+ if (!(pf->flags & I40E_FLAG_DCB_CAPABLE))
+ return ret;
+
+ /* Ignore if event is not for Nearest Bridge */
+ type = ((mib->type >> I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT)
+ & I40E_AQ_LLDP_BRIDGE_TYPE_MASK);
+ dev_dbg(&pf->pdev->dev, "LLDP event mib bridge type 0x%x\n", type);
+ if (type != I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE)
+ return ret;
+
+ /* Check MIB Type and return if event for Remote MIB update */
+ type = mib->type & I40E_AQ_LLDP_MIB_TYPE_MASK;
+ dev_dbg(&pf->pdev->dev,
+ "LLDP event mib type %s\n", type ? "remote" : "local");
+ if (type == I40E_AQ_LLDP_MIB_REMOTE) {
+ /* Update the remote cached instance and return */
+ ret = i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_REMOTE,
+ I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE,
+ &hw->remote_dcbx_config);
+ goto exit;
+ }
+
+ /* Store the old configuration */
+ tmp_dcbx_cfg = hw->local_dcbx_config;
+
+ /* Reset the old DCBx configuration data */
+ memset(&hw->local_dcbx_config, 0, sizeof(hw->local_dcbx_config));
+ /* Get updated DCBX data from firmware */
+ ret = i40e_get_dcb_config(&pf->hw);
+ if (ret) {
+ /* X710-T*L 2.5G and 5G speeds don't support DCB */
+ if (I40E_IS_X710TL_DEVICE(hw->device_id) &&
+ (hw->phy.link_info.link_speed &
+ (I40E_LINK_SPEED_2_5GB | I40E_LINK_SPEED_5GB))) {
+ dev_warn(&pf->pdev->dev,
+ "DCB is not supported for X710-T*L 2.5/5G speeds\n");
+ pf->flags &= ~I40E_FLAG_DCB_CAPABLE;
+ } else {
+ dev_info(&pf->pdev->dev,
+ "Failed querying DCB configuration data from firmware, err %pe aq_err %s\n",
+ ERR_PTR(ret),
+ i40e_aq_str(&pf->hw,
+ pf->hw.aq.asq_last_status));
+ }
+ goto exit;
+ }
+
+ /* No change detected in DCBX configs */
+ if (!memcmp(&tmp_dcbx_cfg, &hw->local_dcbx_config,
+ sizeof(tmp_dcbx_cfg))) {
+ dev_dbg(&pf->pdev->dev, "No change detected in DCBX configuration.\n");
+ goto exit;
+ }
+
+ need_reconfig = i40e_dcb_need_reconfig(pf, &tmp_dcbx_cfg,
+ &hw->local_dcbx_config);
+
+ i40e_dcbnl_flush_apps(pf, &tmp_dcbx_cfg, &hw->local_dcbx_config);
+
+ if (!need_reconfig)
+ goto exit;
+
+ /* Enable DCB tagging only when more than one TC */
+ if (i40e_dcb_get_num_tc(&hw->local_dcbx_config) > 1)
+ pf->flags |= I40E_FLAG_DCB_ENABLED;
+ else
+ pf->flags &= ~I40E_FLAG_DCB_ENABLED;
+
+ set_bit(__I40E_PORT_SUSPENDED, pf->state);
+ /* Reconfiguration needed quiesce all VSIs */
+ i40e_pf_quiesce_all_vsi(pf);
+
+ /* Changes in configuration update VEB/VSI */
+ i40e_dcb_reconfigure(pf);
+
+ ret = i40e_resume_port_tx(pf);
+
+ clear_bit(__I40E_PORT_SUSPENDED, pf->state);
+ /* In case of error no point in resuming VSIs */
+ if (ret)
+ goto exit;
+
+ /* Wait for the PF's queues to be disabled */
+ ret = i40e_pf_wait_queues_disabled(pf);
+ if (ret) {
+ /* Schedule PF reset to recover */
+ set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
+ i40e_service_event_schedule(pf);
+ } else {
+ i40e_pf_unquiesce_all_vsi(pf);
+ set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state);
+ set_bit(__I40E_CLIENT_L2_CHANGE, pf->state);
+ }
+
+exit:
+ return ret;
+}
+#endif /* CONFIG_I40E_DCB */
+
+/**
+ * i40e_do_reset_safe - Protected reset path for userland calls.
+ * @pf: board private structure
+ * @reset_flags: which reset is requested
+ *
+ **/
+void i40e_do_reset_safe(struct i40e_pf *pf, u32 reset_flags)
+{
+ rtnl_lock();
+ i40e_do_reset(pf, reset_flags, true);
+ rtnl_unlock();
+}
+
+/**
+ * i40e_handle_lan_overflow_event - Handler for LAN queue overflow event
+ * @pf: board private structure
+ * @e: event info posted on ARQ
+ *
+ * Handler for LAN Queue Overflow Event generated by the firmware for PF
+ * and VF queues
+ **/
+static void i40e_handle_lan_overflow_event(struct i40e_pf *pf,
+ struct i40e_arq_event_info *e)
+{
+ struct i40e_aqc_lan_overflow *data =
+ (struct i40e_aqc_lan_overflow *)&e->desc.params.raw;
+ u32 queue = le32_to_cpu(data->prtdcb_rupto);
+ u32 qtx_ctl = le32_to_cpu(data->otx_ctl);
+ struct i40e_hw *hw = &pf->hw;
+ struct i40e_vf *vf;
+ u16 vf_id;
+
+ dev_dbg(&pf->pdev->dev, "overflow Rx Queue Number = %d QTX_CTL=0x%08x\n",
+ queue, qtx_ctl);
+
+ /* Queue belongs to VF, find the VF and issue VF reset */
+ if (((qtx_ctl & I40E_QTX_CTL_PFVF_Q_MASK)
+ >> I40E_QTX_CTL_PFVF_Q_SHIFT) == I40E_QTX_CTL_VF_QUEUE) {
+ vf_id = (u16)((qtx_ctl & I40E_QTX_CTL_VFVM_INDX_MASK)
+ >> I40E_QTX_CTL_VFVM_INDX_SHIFT);
+ vf_id -= hw->func_caps.vf_base_id;
+ vf = &pf->vf[vf_id];
+ i40e_vc_notify_vf_reset(vf);
+ /* Allow VF to process pending reset notification */
+ msleep(20);
+ i40e_reset_vf(vf, false);
+ }
+}
+
+/**
+ * i40e_get_cur_guaranteed_fd_count - Get the consumed guaranteed FD filters
+ * @pf: board private structure
+ **/
+u32 i40e_get_cur_guaranteed_fd_count(struct i40e_pf *pf)
+{
+ u32 val, fcnt_prog;
+
+ val = rd32(&pf->hw, I40E_PFQF_FDSTAT);
+ fcnt_prog = (val & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK);
+ return fcnt_prog;
+}
+
+/**
+ * i40e_get_current_fd_count - Get total FD filters programmed for this PF
+ * @pf: board private structure
+ **/
+u32 i40e_get_current_fd_count(struct i40e_pf *pf)
+{
+ u32 val, fcnt_prog;
+
+ val = rd32(&pf->hw, I40E_PFQF_FDSTAT);
+ fcnt_prog = (val & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK) +
+ ((val & I40E_PFQF_FDSTAT_BEST_CNT_MASK) >>
+ I40E_PFQF_FDSTAT_BEST_CNT_SHIFT);
+ return fcnt_prog;
+}
+
+/**
+ * i40e_get_global_fd_count - Get total FD filters programmed on device
+ * @pf: board private structure
+ **/
+u32 i40e_get_global_fd_count(struct i40e_pf *pf)
+{
+ u32 val, fcnt_prog;
+
+ val = rd32(&pf->hw, I40E_GLQF_FDCNT_0);
+ fcnt_prog = (val & I40E_GLQF_FDCNT_0_GUARANT_CNT_MASK) +
+ ((val & I40E_GLQF_FDCNT_0_BESTCNT_MASK) >>
+ I40E_GLQF_FDCNT_0_BESTCNT_SHIFT);
+ return fcnt_prog;
+}
+
+/**
+ * i40e_reenable_fdir_sb - Restore FDir SB capability
+ * @pf: board private structure
+ **/
+static void i40e_reenable_fdir_sb(struct i40e_pf *pf)
+{
+ if (test_and_clear_bit(__I40E_FD_SB_AUTO_DISABLED, pf->state))
+ if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
+ (I40E_DEBUG_FD & pf->hw.debug_mask))
+ dev_info(&pf->pdev->dev, "FD Sideband/ntuple is being enabled since we have space in the table now\n");
+}
+
+/**
+ * i40e_reenable_fdir_atr - Restore FDir ATR capability
+ * @pf: board private structure
+ **/
+static void i40e_reenable_fdir_atr(struct i40e_pf *pf)
+{
+ if (test_and_clear_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state)) {
+ /* ATR uses the same filtering logic as SB rules. It only
+ * functions properly if the input set mask is at the default
+ * settings. It is safe to restore the default input set
+ * because there are no active TCPv4 filter rules.
+ */
+ i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_TCP,
+ I40E_L3_SRC_MASK | I40E_L3_DST_MASK |
+ I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
+
+ if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
+ (I40E_DEBUG_FD & pf->hw.debug_mask))
+ dev_info(&pf->pdev->dev, "ATR is being enabled since we have space in the table and there are no conflicting ntuple rules\n");
+ }
+}
+
+/**
+ * i40e_delete_invalid_filter - Delete an invalid FDIR filter
+ * @pf: board private structure
+ * @filter: FDir filter to remove
+ */
+static void i40e_delete_invalid_filter(struct i40e_pf *pf,
+ struct i40e_fdir_filter *filter)
+{
+ /* Update counters */
+ pf->fdir_pf_active_filters--;
+ pf->fd_inv = 0;
+
+ switch (filter->flow_type) {
+ case TCP_V4_FLOW:
+ pf->fd_tcp4_filter_cnt--;
+ break;
+ case UDP_V4_FLOW:
+ pf->fd_udp4_filter_cnt--;
+ break;
+ case SCTP_V4_FLOW:
+ pf->fd_sctp4_filter_cnt--;
+ break;
+ case TCP_V6_FLOW:
+ pf->fd_tcp6_filter_cnt--;
+ break;
+ case UDP_V6_FLOW:
+ pf->fd_udp6_filter_cnt--;
+ break;
+ case SCTP_V6_FLOW:
+ pf->fd_udp6_filter_cnt--;
+ break;
+ case IP_USER_FLOW:
+ switch (filter->ipl4_proto) {
+ case IPPROTO_TCP:
+ pf->fd_tcp4_filter_cnt--;
+ break;
+ case IPPROTO_UDP:
+ pf->fd_udp4_filter_cnt--;
+ break;
+ case IPPROTO_SCTP:
+ pf->fd_sctp4_filter_cnt--;
+ break;
+ case IPPROTO_IP:
+ pf->fd_ip4_filter_cnt--;
+ break;
+ }
+ break;
+ case IPV6_USER_FLOW:
+ switch (filter->ipl4_proto) {
+ case IPPROTO_TCP:
+ pf->fd_tcp6_filter_cnt--;
+ break;
+ case IPPROTO_UDP:
+ pf->fd_udp6_filter_cnt--;
+ break;
+ case IPPROTO_SCTP:
+ pf->fd_sctp6_filter_cnt--;
+ break;
+ case IPPROTO_IP:
+ pf->fd_ip6_filter_cnt--;
+ break;
+ }
+ break;
+ }
+
+ /* Remove the filter from the list and free memory */
+ hlist_del(&filter->fdir_node);
+ kfree(filter);
+}
+
+/**
+ * i40e_fdir_check_and_reenable - Function to reenabe FD ATR or SB if disabled
+ * @pf: board private structure
+ **/
+void i40e_fdir_check_and_reenable(struct i40e_pf *pf)
+{
+ struct i40e_fdir_filter *filter;
+ u32 fcnt_prog, fcnt_avail;
+ struct hlist_node *node;
+
+ if (test_bit(__I40E_FD_FLUSH_REQUESTED, pf->state))
+ return;
+
+ /* Check if we have enough room to re-enable FDir SB capability. */
+ fcnt_prog = i40e_get_global_fd_count(pf);
+ fcnt_avail = pf->fdir_pf_filter_count;
+ if ((fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM)) ||
+ (pf->fd_add_err == 0) ||
+ (i40e_get_current_atr_cnt(pf) < pf->fd_atr_cnt))
+ i40e_reenable_fdir_sb(pf);
+
+ /* We should wait for even more space before re-enabling ATR.
+ * Additionally, we cannot enable ATR as long as we still have TCP SB
+ * rules active.
+ */
+ if ((fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM_FOR_ATR)) &&
+ pf->fd_tcp4_filter_cnt == 0 && pf->fd_tcp6_filter_cnt == 0)
+ i40e_reenable_fdir_atr(pf);
+
+ /* if hw had a problem adding a filter, delete it */
+ if (pf->fd_inv > 0) {
+ hlist_for_each_entry_safe(filter, node,
+ &pf->fdir_filter_list, fdir_node)
+ if (filter->fd_id == pf->fd_inv)
+ i40e_delete_invalid_filter(pf, filter);
+ }
+}
+
+#define I40E_MIN_FD_FLUSH_INTERVAL 10
+#define I40E_MIN_FD_FLUSH_SB_ATR_UNSTABLE 30
+/**
+ * i40e_fdir_flush_and_replay - Function to flush all FD filters and replay SB
+ * @pf: board private structure
+ **/
+static void i40e_fdir_flush_and_replay(struct i40e_pf *pf)
+{
+ unsigned long min_flush_time;
+ int flush_wait_retry = 50;
+ bool disable_atr = false;
+ int fd_room;
+ int reg;
+
+ if (!time_after(jiffies, pf->fd_flush_timestamp +
+ (I40E_MIN_FD_FLUSH_INTERVAL * HZ)))
+ return;
+
+ /* If the flush is happening too quick and we have mostly SB rules we
+ * should not re-enable ATR for some time.
+ */
+ min_flush_time = pf->fd_flush_timestamp +
+ (I40E_MIN_FD_FLUSH_SB_ATR_UNSTABLE * HZ);
+ fd_room = pf->fdir_pf_filter_count - pf->fdir_pf_active_filters;
+
+ if (!(time_after(jiffies, min_flush_time)) &&
+ (fd_room < I40E_FDIR_BUFFER_HEAD_ROOM_FOR_ATR)) {
+ if (I40E_DEBUG_FD & pf->hw.debug_mask)
+ dev_info(&pf->pdev->dev, "ATR disabled, not enough FD filter space.\n");
+ disable_atr = true;
+ }
+
+ pf->fd_flush_timestamp = jiffies;
+ set_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state);
+ /* flush all filters */
+ wr32(&pf->hw, I40E_PFQF_CTL_1,
+ I40E_PFQF_CTL_1_CLEARFDTABLE_MASK);
+ i40e_flush(&pf->hw);
+ pf->fd_flush_cnt++;
+ pf->fd_add_err = 0;
+ do {
+ /* Check FD flush status every 5-6msec */
+ usleep_range(5000, 6000);
+ reg = rd32(&pf->hw, I40E_PFQF_CTL_1);
+ if (!(reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK))
+ break;
+ } while (flush_wait_retry--);
+ if (reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK) {
+ dev_warn(&pf->pdev->dev, "FD table did not flush, needs more time\n");
+ } else {
+ /* replay sideband filters */
+ i40e_fdir_filter_restore(pf->vsi[pf->lan_vsi]);
+ if (!disable_atr && !pf->fd_tcp4_filter_cnt)
+ clear_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state);
+ clear_bit(__I40E_FD_FLUSH_REQUESTED, pf->state);
+ if (I40E_DEBUG_FD & pf->hw.debug_mask)
+ dev_info(&pf->pdev->dev, "FD Filter table flushed and FD-SB replayed.\n");
+ }
+}
+
+/**
+ * i40e_get_current_atr_cnt - Get the count of total FD ATR filters programmed
+ * @pf: board private structure
+ **/
+u32 i40e_get_current_atr_cnt(struct i40e_pf *pf)
+{
+ return i40e_get_current_fd_count(pf) - pf->fdir_pf_active_filters;
+}
+
+/**
+ * i40e_fdir_reinit_subtask - Worker thread to reinit FDIR filter table
+ * @pf: board private structure
+ **/
+static void i40e_fdir_reinit_subtask(struct i40e_pf *pf)
+{
+
+ /* if interface is down do nothing */
+ if (test_bit(__I40E_DOWN, pf->state))
+ return;
+
+ if (test_bit(__I40E_FD_FLUSH_REQUESTED, pf->state))
+ i40e_fdir_flush_and_replay(pf);
+
+ i40e_fdir_check_and_reenable(pf);
+
+}
+
+/**
+ * i40e_vsi_link_event - notify VSI of a link event
+ * @vsi: vsi to be notified
+ * @link_up: link up or down
+ **/
+static void i40e_vsi_link_event(struct i40e_vsi *vsi, bool link_up)
+{
+ if (!vsi || test_bit(__I40E_VSI_DOWN, vsi->state))
+ return;
+
+ switch (vsi->type) {
+ case I40E_VSI_MAIN:
+ if (!vsi->netdev || !vsi->netdev_registered)
+ break;
+
+ if (link_up) {
+ netif_carrier_on(vsi->netdev);
+ netif_tx_wake_all_queues(vsi->netdev);
+ } else {
+ netif_carrier_off(vsi->netdev);
+ netif_tx_stop_all_queues(vsi->netdev);
+ }
+ break;
+
+ case I40E_VSI_SRIOV:
+ case I40E_VSI_VMDQ2:
+ case I40E_VSI_CTRL:
+ case I40E_VSI_IWARP:
+ case I40E_VSI_MIRROR:
+ default:
+ /* there is no notification for other VSIs */
+ break;
+ }
+}
+
+/**
+ * i40e_veb_link_event - notify elements on the veb of a link event
+ * @veb: veb to be notified
+ * @link_up: link up or down
+ **/
+static void i40e_veb_link_event(struct i40e_veb *veb, bool link_up)
+{
+ struct i40e_pf *pf;
+ int i;
+
+ if (!veb || !veb->pf)
+ return;
+ pf = veb->pf;
+
+ /* depth first... */
+ for (i = 0; i < I40E_MAX_VEB; i++)
+ if (pf->veb[i] && (pf->veb[i]->uplink_seid == veb->seid))
+ i40e_veb_link_event(pf->veb[i], link_up);
+
+ /* ... now the local VSIs */
+ for (i = 0; i < pf->num_alloc_vsi; i++)
+ if (pf->vsi[i] && (pf->vsi[i]->uplink_seid == veb->seid))
+ i40e_vsi_link_event(pf->vsi[i], link_up);
+}
+
+/**
+ * i40e_link_event - Update netif_carrier status
+ * @pf: board private structure
+ **/
+static void i40e_link_event(struct i40e_pf *pf)
+{
+ struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
+ u8 new_link_speed, old_link_speed;
+ bool new_link, old_link;
+ int status;
+#ifdef CONFIG_I40E_DCB
+ int err;
+#endif /* CONFIG_I40E_DCB */
+
+ /* set this to force the get_link_status call to refresh state */
+ pf->hw.phy.get_link_info = true;
+ old_link = (pf->hw.phy.link_info_old.link_info & I40E_AQ_LINK_UP);
+ status = i40e_get_link_status(&pf->hw, &new_link);
+
+ /* On success, disable temp link polling */
+ if (status == I40E_SUCCESS) {
+ clear_bit(__I40E_TEMP_LINK_POLLING, pf->state);
+ } else {
+ /* Enable link polling temporarily until i40e_get_link_status
+ * returns I40E_SUCCESS
+ */
+ set_bit(__I40E_TEMP_LINK_POLLING, pf->state);
+ dev_dbg(&pf->pdev->dev, "couldn't get link state, status: %d\n",
+ status);
+ return;
+ }
+
+ old_link_speed = pf->hw.phy.link_info_old.link_speed;
+ new_link_speed = pf->hw.phy.link_info.link_speed;
+
+ if (new_link == old_link &&
+ new_link_speed == old_link_speed &&
+ (test_bit(__I40E_VSI_DOWN, vsi->state) ||
+ new_link == netif_carrier_ok(vsi->netdev)))
+ return;
+
+ i40e_print_link_message(vsi, new_link);
+
+ /* Notify the base of the switch tree connected to
+ * the link. Floating VEBs are not notified.
+ */
+ if (pf->lan_veb < I40E_MAX_VEB && pf->veb[pf->lan_veb])
+ i40e_veb_link_event(pf->veb[pf->lan_veb], new_link);
+ else
+ i40e_vsi_link_event(vsi, new_link);
+
+ if (pf->vf)
+ i40e_vc_notify_link_state(pf);
+
+ if (pf->flags & I40E_FLAG_PTP)
+ i40e_ptp_set_increment(pf);
+#ifdef CONFIG_I40E_DCB
+ if (new_link == old_link)
+ return;
+ /* Not SW DCB so firmware will take care of default settings */
+ if (pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED)
+ return;
+
+ /* We cover here only link down, as after link up in case of SW DCB
+ * SW LLDP agent will take care of setting it up
+ */
+ if (!new_link) {
+ dev_dbg(&pf->pdev->dev, "Reconfig DCB to single TC as result of Link Down\n");
+ memset(&pf->tmp_cfg, 0, sizeof(pf->tmp_cfg));
+ err = i40e_dcb_sw_default_config(pf);
+ if (err) {
+ pf->flags &= ~(I40E_FLAG_DCB_CAPABLE |
+ I40E_FLAG_DCB_ENABLED);
+ } else {
+ pf->dcbx_cap = DCB_CAP_DCBX_HOST |
+ DCB_CAP_DCBX_VER_IEEE;
+ pf->flags |= I40E_FLAG_DCB_CAPABLE;
+ pf->flags &= ~I40E_FLAG_DCB_ENABLED;
+ }
+ }
+#endif /* CONFIG_I40E_DCB */
+}
+
+/**
+ * i40e_watchdog_subtask - periodic checks not using event driven response
+ * @pf: board private structure
+ **/
+static void i40e_watchdog_subtask(struct i40e_pf *pf)
+{
+ int i;
+
+ /* if interface is down do nothing */
+ if (test_bit(__I40E_DOWN, pf->state) ||
+ test_bit(__I40E_CONFIG_BUSY, pf->state))
+ return;
+
+ /* make sure we don't do these things too often */
+ if (time_before(jiffies, (pf->service_timer_previous +
+ pf->service_timer_period)))
+ return;
+ pf->service_timer_previous = jiffies;
+
+ if ((pf->flags & I40E_FLAG_LINK_POLLING_ENABLED) ||
+ test_bit(__I40E_TEMP_LINK_POLLING, pf->state))
+ i40e_link_event(pf);
+
+ /* Update the stats for active netdevs so the network stack
+ * can look at updated numbers whenever it cares to
+ */
+ for (i = 0; i < pf->num_alloc_vsi; i++)
+ if (pf->vsi[i] && pf->vsi[i]->netdev)
+ i40e_update_stats(pf->vsi[i]);
+
+ if (pf->flags & I40E_FLAG_VEB_STATS_ENABLED) {
+ /* Update the stats for the active switching components */
+ for (i = 0; i < I40E_MAX_VEB; i++)
+ if (pf->veb[i])
+ i40e_update_veb_stats(pf->veb[i]);
+ }
+
+ i40e_ptp_rx_hang(pf);
+ i40e_ptp_tx_hang(pf);
+}
+
+/**
+ * i40e_reset_subtask - Set up for resetting the device and driver
+ * @pf: board private structure
+ **/
+static void i40e_reset_subtask(struct i40e_pf *pf)
+{
+ u32 reset_flags = 0;
+
+ if (test_bit(__I40E_REINIT_REQUESTED, pf->state)) {
+ reset_flags |= BIT(__I40E_REINIT_REQUESTED);
+ clear_bit(__I40E_REINIT_REQUESTED, pf->state);
+ }
+ if (test_bit(__I40E_PF_RESET_REQUESTED, pf->state)) {
+ reset_flags |= BIT(__I40E_PF_RESET_REQUESTED);
+ clear_bit(__I40E_PF_RESET_REQUESTED, pf->state);
+ }
+ if (test_bit(__I40E_CORE_RESET_REQUESTED, pf->state)) {
+ reset_flags |= BIT(__I40E_CORE_RESET_REQUESTED);
+ clear_bit(__I40E_CORE_RESET_REQUESTED, pf->state);
+ }
+ if (test_bit(__I40E_GLOBAL_RESET_REQUESTED, pf->state)) {
+ reset_flags |= BIT(__I40E_GLOBAL_RESET_REQUESTED);
+ clear_bit(__I40E_GLOBAL_RESET_REQUESTED, pf->state);
+ }
+ if (test_bit(__I40E_DOWN_REQUESTED, pf->state)) {
+ reset_flags |= BIT(__I40E_DOWN_REQUESTED);
+ clear_bit(__I40E_DOWN_REQUESTED, pf->state);
+ }
+
+ /* If there's a recovery already waiting, it takes
+ * precedence before starting a new reset sequence.
+ */
+ if (test_bit(__I40E_RESET_INTR_RECEIVED, pf->state)) {
+ i40e_prep_for_reset(pf);
+ i40e_reset(pf);
+ i40e_rebuild(pf, false, false);
+ }
+
+ /* If we're already down or resetting, just bail */
+ if (reset_flags &&
+ !test_bit(__I40E_DOWN, pf->state) &&
+ !test_bit(__I40E_CONFIG_BUSY, pf->state)) {
+ i40e_do_reset(pf, reset_flags, false);
+ }
+}
+
+/**
+ * i40e_handle_link_event - Handle link event
+ * @pf: board private structure
+ * @e: event info posted on ARQ
+ **/
+static void i40e_handle_link_event(struct i40e_pf *pf,
+ struct i40e_arq_event_info *e)
+{
+ struct i40e_aqc_get_link_status *status =
+ (struct i40e_aqc_get_link_status *)&e->desc.params.raw;
+
+ /* Do a new status request to re-enable LSE reporting
+ * and load new status information into the hw struct
+ * This completely ignores any state information
+ * in the ARQ event info, instead choosing to always
+ * issue the AQ update link status command.
+ */
+ i40e_link_event(pf);
+
+ /* Check if module meets thermal requirements */
+ if (status->phy_type == I40E_PHY_TYPE_NOT_SUPPORTED_HIGH_TEMP) {
+ dev_err(&pf->pdev->dev,
+ "Rx/Tx is disabled on this device because the module does not meet thermal requirements.\n");
+ dev_err(&pf->pdev->dev,
+ "Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules.\n");
+ } else {
+ /* check for unqualified module, if link is down, suppress
+ * the message if link was forced to be down.
+ */
+ if ((status->link_info & I40E_AQ_MEDIA_AVAILABLE) &&
+ (!(status->an_info & I40E_AQ_QUALIFIED_MODULE)) &&
+ (!(status->link_info & I40E_AQ_LINK_UP)) &&
+ (!(pf->flags & I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED))) {
+ dev_err(&pf->pdev->dev,
+ "Rx/Tx is disabled on this device because an unsupported SFP module type was detected.\n");
+ dev_err(&pf->pdev->dev,
+ "Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules.\n");
+ }
+ }
+}
+
+/**
+ * i40e_clean_adminq_subtask - Clean the AdminQ rings
+ * @pf: board private structure
+ **/
+static void i40e_clean_adminq_subtask(struct i40e_pf *pf)
+{
+ struct i40e_arq_event_info event;
+ struct i40e_hw *hw = &pf->hw;
+ u16 pending, i = 0;
+ u16 opcode;
+ u32 oldval;
+ int ret;
+ u32 val;
+
+ /* Do not run clean AQ when PF reset fails */
+ if (test_bit(__I40E_RESET_FAILED, pf->state))
+ return;
+
+ /* check for error indications */
+ val = rd32(&pf->hw, pf->hw.aq.arq.len);
+ oldval = val;
+ if (val & I40E_PF_ARQLEN_ARQVFE_MASK) {
+ if (hw->debug_mask & I40E_DEBUG_AQ)
+ dev_info(&pf->pdev->dev, "ARQ VF Error detected\n");
+ val &= ~I40E_PF_ARQLEN_ARQVFE_MASK;
+ }
+ if (val & I40E_PF_ARQLEN_ARQOVFL_MASK) {
+ if (hw->debug_mask & I40E_DEBUG_AQ)
+ dev_info(&pf->pdev->dev, "ARQ Overflow Error detected\n");
+ val &= ~I40E_PF_ARQLEN_ARQOVFL_MASK;
+ pf->arq_overflows++;
+ }
+ if (val & I40E_PF_ARQLEN_ARQCRIT_MASK) {
+ if (hw->debug_mask & I40E_DEBUG_AQ)
+ dev_info(&pf->pdev->dev, "ARQ Critical Error detected\n");
+ val &= ~I40E_PF_ARQLEN_ARQCRIT_MASK;
+ }
+ if (oldval != val)
+ wr32(&pf->hw, pf->hw.aq.arq.len, val);
+
+ val = rd32(&pf->hw, pf->hw.aq.asq.len);
+ oldval = val;
+ if (val & I40E_PF_ATQLEN_ATQVFE_MASK) {
+ if (pf->hw.debug_mask & I40E_DEBUG_AQ)
+ dev_info(&pf->pdev->dev, "ASQ VF Error detected\n");
+ val &= ~I40E_PF_ATQLEN_ATQVFE_MASK;
+ }
+ if (val & I40E_PF_ATQLEN_ATQOVFL_MASK) {
+ if (pf->hw.debug_mask & I40E_DEBUG_AQ)
+ dev_info(&pf->pdev->dev, "ASQ Overflow Error detected\n");
+ val &= ~I40E_PF_ATQLEN_ATQOVFL_MASK;
+ }
+ if (val & I40E_PF_ATQLEN_ATQCRIT_MASK) {
+ if (pf->hw.debug_mask & I40E_DEBUG_AQ)
+ dev_info(&pf->pdev->dev, "ASQ Critical Error detected\n");
+ val &= ~I40E_PF_ATQLEN_ATQCRIT_MASK;
+ }
+ if (oldval != val)
+ wr32(&pf->hw, pf->hw.aq.asq.len, val);
+
+ event.buf_len = I40E_MAX_AQ_BUF_SIZE;
+ event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
+ if (!event.msg_buf)
+ return;
+
+ do {
+ ret = i40e_clean_arq_element(hw, &event, &pending);
+ if (ret == I40E_ERR_ADMIN_QUEUE_NO_WORK)
+ break;
+ else if (ret) {
+ dev_info(&pf->pdev->dev, "ARQ event error %d\n", ret);
+ break;
+ }
+
+ opcode = le16_to_cpu(event.desc.opcode);
+ switch (opcode) {
+
+ case i40e_aqc_opc_get_link_status:
+ rtnl_lock();
+ i40e_handle_link_event(pf, &event);
+ rtnl_unlock();
+ break;
+ case i40e_aqc_opc_send_msg_to_pf:
+ ret = i40e_vc_process_vf_msg(pf,
+ le16_to_cpu(event.desc.retval),
+ le32_to_cpu(event.desc.cookie_high),
+ le32_to_cpu(event.desc.cookie_low),
+ event.msg_buf,
+ event.msg_len);
+ break;
+ case i40e_aqc_opc_lldp_update_mib:
+ dev_dbg(&pf->pdev->dev, "ARQ: Update LLDP MIB event received\n");
+#ifdef CONFIG_I40E_DCB
+ rtnl_lock();
+ i40e_handle_lldp_event(pf, &event);
+ rtnl_unlock();
+#endif /* CONFIG_I40E_DCB */
+ break;
+ case i40e_aqc_opc_event_lan_overflow:
+ dev_dbg(&pf->pdev->dev, "ARQ LAN queue overflow event received\n");
+ i40e_handle_lan_overflow_event(pf, &event);
+ break;
+ case i40e_aqc_opc_send_msg_to_peer:
+ dev_info(&pf->pdev->dev, "ARQ: Msg from other pf\n");
+ break;
+ case i40e_aqc_opc_nvm_erase:
+ case i40e_aqc_opc_nvm_update:
+ case i40e_aqc_opc_oem_post_update:
+ i40e_debug(&pf->hw, I40E_DEBUG_NVM,
+ "ARQ NVM operation 0x%04x completed\n",
+ opcode);
+ break;
+ default:
+ dev_info(&pf->pdev->dev,
+ "ARQ: Unknown event 0x%04x ignored\n",
+ opcode);
+ break;
+ }
+ } while (i++ < pf->adminq_work_limit);
+
+ if (i < pf->adminq_work_limit)
+ clear_bit(__I40E_ADMINQ_EVENT_PENDING, pf->state);
+
+ /* re-enable Admin queue interrupt cause */
+ val = rd32(hw, I40E_PFINT_ICR0_ENA);
+ val |= I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
+ wr32(hw, I40E_PFINT_ICR0_ENA, val);
+ i40e_flush(hw);
+
+ kfree(event.msg_buf);
+}
+
+/**
+ * i40e_verify_eeprom - make sure eeprom is good to use
+ * @pf: board private structure
+ **/
+static void i40e_verify_eeprom(struct i40e_pf *pf)
+{
+ int err;
+
+ err = i40e_diag_eeprom_test(&pf->hw);
+ if (err) {
+ /* retry in case of garbage read */
+ err = i40e_diag_eeprom_test(&pf->hw);
+ if (err) {
+ dev_info(&pf->pdev->dev, "eeprom check failed (%d), Tx/Rx traffic disabled\n",
+ err);
+ set_bit(__I40E_BAD_EEPROM, pf->state);
+ }
+ }
+
+ if (!err && test_bit(__I40E_BAD_EEPROM, pf->state)) {
+ dev_info(&pf->pdev->dev, "eeprom check passed, Tx/Rx traffic enabled\n");
+ clear_bit(__I40E_BAD_EEPROM, pf->state);
+ }
+}
+
+/**
+ * i40e_enable_pf_switch_lb
+ * @pf: pointer to the PF structure
+ *
+ * enable switch loop back or die - no point in a return value
+ **/
+static void i40e_enable_pf_switch_lb(struct i40e_pf *pf)
+{
+ struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
+ struct i40e_vsi_context ctxt;
+ int ret;
+
+ ctxt.seid = pf->main_vsi_seid;
+ ctxt.pf_num = pf->hw.pf_id;
+ ctxt.vf_num = 0;
+ ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "couldn't get PF vsi config, err %pe aq_err %s\n",
+ ERR_PTR(ret),
+ i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ return;
+ }
+ ctxt.flags = I40E_AQ_VSI_TYPE_PF;
+ ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
+ ctxt.info.switch_id |= cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
+
+ ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "update vsi switch failed, err %pe aq_err %s\n",
+ ERR_PTR(ret),
+ i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ }
+}
+
+/**
+ * i40e_disable_pf_switch_lb
+ * @pf: pointer to the PF structure
+ *
+ * disable switch loop back or die - no point in a return value
+ **/
+static void i40e_disable_pf_switch_lb(struct i40e_pf *pf)
+{
+ struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
+ struct i40e_vsi_context ctxt;
+ int ret;
+
+ ctxt.seid = pf->main_vsi_seid;
+ ctxt.pf_num = pf->hw.pf_id;
+ ctxt.vf_num = 0;
+ ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "couldn't get PF vsi config, err %pe aq_err %s\n",
+ ERR_PTR(ret),
+ i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ return;
+ }
+ ctxt.flags = I40E_AQ_VSI_TYPE_PF;
+ ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
+ ctxt.info.switch_id &= ~cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
+
+ ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "update vsi switch failed, err %pe aq_err %s\n",
+ ERR_PTR(ret),
+ i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ }
+}
+
+/**
+ * i40e_config_bridge_mode - Configure the HW bridge mode
+ * @veb: pointer to the bridge instance
+ *
+ * Configure the loop back mode for the LAN VSI that is downlink to the
+ * specified HW bridge instance. It is expected this function is called
+ * when a new HW bridge is instantiated.
+ **/
+static void i40e_config_bridge_mode(struct i40e_veb *veb)
+{
+ struct i40e_pf *pf = veb->pf;
+
+ if (pf->hw.debug_mask & I40E_DEBUG_LAN)
+ dev_info(&pf->pdev->dev, "enabling bridge mode: %s\n",
+ veb->bridge_mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
+ if (veb->bridge_mode & BRIDGE_MODE_VEPA)
+ i40e_disable_pf_switch_lb(pf);
+ else
+ i40e_enable_pf_switch_lb(pf);
+}
+
+/**
+ * i40e_reconstitute_veb - rebuild the VEB and anything connected to it
+ * @veb: pointer to the VEB instance
+ *
+ * This is a recursive function that first builds the attached VSIs then
+ * recurses in to build the next layer of VEB. We track the connections
+ * through our own index numbers because the seid's from the HW could
+ * change across the reset.
+ **/
+static int i40e_reconstitute_veb(struct i40e_veb *veb)
+{
+ struct i40e_vsi *ctl_vsi = NULL;
+ struct i40e_pf *pf = veb->pf;
+ int v, veb_idx;
+ int ret;
+
+ /* build VSI that owns this VEB, temporarily attached to base VEB */
+ for (v = 0; v < pf->num_alloc_vsi && !ctl_vsi; v++) {
+ if (pf->vsi[v] &&
+ pf->vsi[v]->veb_idx == veb->idx &&
+ pf->vsi[v]->flags & I40E_VSI_FLAG_VEB_OWNER) {
+ ctl_vsi = pf->vsi[v];
+ break;
+ }
+ }
+ if (!ctl_vsi) {
+ dev_info(&pf->pdev->dev,
+ "missing owner VSI for veb_idx %d\n", veb->idx);
+ ret = -ENOENT;
+ goto end_reconstitute;
+ }
+ if (ctl_vsi != pf->vsi[pf->lan_vsi])
+ ctl_vsi->uplink_seid = pf->vsi[pf->lan_vsi]->uplink_seid;
+ ret = i40e_add_vsi(ctl_vsi);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "rebuild of veb_idx %d owner VSI failed: %d\n",
+ veb->idx, ret);
+ goto end_reconstitute;
+ }
+ i40e_vsi_reset_stats(ctl_vsi);
+
+ /* create the VEB in the switch and move the VSI onto the VEB */
+ ret = i40e_add_veb(veb, ctl_vsi);
+ if (ret)
+ goto end_reconstitute;
+
+ if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED)
+ veb->bridge_mode = BRIDGE_MODE_VEB;
+ else
+ veb->bridge_mode = BRIDGE_MODE_VEPA;
+ i40e_config_bridge_mode(veb);
+
+ /* create the remaining VSIs attached to this VEB */
+ for (v = 0; v < pf->num_alloc_vsi; v++) {
+ if (!pf->vsi[v] || pf->vsi[v] == ctl_vsi)
+ continue;
+
+ if (pf->vsi[v]->veb_idx == veb->idx) {
+ struct i40e_vsi *vsi = pf->vsi[v];
+
+ vsi->uplink_seid = veb->seid;
+ ret = i40e_add_vsi(vsi);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "rebuild of vsi_idx %d failed: %d\n",
+ v, ret);
+ goto end_reconstitute;
+ }
+ i40e_vsi_reset_stats(vsi);
+ }
+ }
+
+ /* create any VEBs attached to this VEB - RECURSION */
+ for (veb_idx = 0; veb_idx < I40E_MAX_VEB; veb_idx++) {
+ if (pf->veb[veb_idx] && pf->veb[veb_idx]->veb_idx == veb->idx) {
+ pf->veb[veb_idx]->uplink_seid = veb->seid;
+ ret = i40e_reconstitute_veb(pf->veb[veb_idx]);
+ if (ret)
+ break;
+ }
+ }
+
+end_reconstitute:
+ return ret;
+}
+
+/**
+ * i40e_get_capabilities - get info about the HW
+ * @pf: the PF struct
+ * @list_type: AQ capability to be queried
+ **/
+static int i40e_get_capabilities(struct i40e_pf *pf,
+ enum i40e_admin_queue_opc list_type)
+{
+ struct i40e_aqc_list_capabilities_element_resp *cap_buf;
+ u16 data_size;
+ int buf_len;
+ int err;
+
+ buf_len = 40 * sizeof(struct i40e_aqc_list_capabilities_element_resp);
+ do {
+ cap_buf = kzalloc(buf_len, GFP_KERNEL);
+ if (!cap_buf)
+ return -ENOMEM;
+
+ /* this loads the data into the hw struct for us */
+ err = i40e_aq_discover_capabilities(&pf->hw, cap_buf, buf_len,
+ &data_size, list_type,
+ NULL);
+ /* data loaded, buffer no longer needed */
+ kfree(cap_buf);
+
+ if (pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) {
+ /* retry with a larger buffer */
+ buf_len = data_size;
+ } else if (pf->hw.aq.asq_last_status != I40E_AQ_RC_OK || err) {
+ dev_info(&pf->pdev->dev,
+ "capability discovery failed, err %pe aq_err %s\n",
+ ERR_PTR(err),
+ i40e_aq_str(&pf->hw,
+ pf->hw.aq.asq_last_status));
+ return -ENODEV;
+ }
+ } while (err);
+
+ if (pf->hw.debug_mask & I40E_DEBUG_USER) {
+ if (list_type == i40e_aqc_opc_list_func_capabilities) {
+ dev_info(&pf->pdev->dev,
+ "pf=%d, num_vfs=%d, msix_pf=%d, msix_vf=%d, fd_g=%d, fd_b=%d, pf_max_q=%d num_vsi=%d\n",
+ pf->hw.pf_id, pf->hw.func_caps.num_vfs,
+ pf->hw.func_caps.num_msix_vectors,
+ pf->hw.func_caps.num_msix_vectors_vf,
+ pf->hw.func_caps.fd_filters_guaranteed,
+ pf->hw.func_caps.fd_filters_best_effort,
+ pf->hw.func_caps.num_tx_qp,
+ pf->hw.func_caps.num_vsis);
+ } else if (list_type == i40e_aqc_opc_list_dev_capabilities) {
+ dev_info(&pf->pdev->dev,
+ "switch_mode=0x%04x, function_valid=0x%08x\n",
+ pf->hw.dev_caps.switch_mode,
+ pf->hw.dev_caps.valid_functions);
+ dev_info(&pf->pdev->dev,
+ "SR-IOV=%d, num_vfs for all function=%u\n",
+ pf->hw.dev_caps.sr_iov_1_1,
+ pf->hw.dev_caps.num_vfs);
+ dev_info(&pf->pdev->dev,
+ "num_vsis=%u, num_rx:%u, num_tx=%u\n",
+ pf->hw.dev_caps.num_vsis,
+ pf->hw.dev_caps.num_rx_qp,
+ pf->hw.dev_caps.num_tx_qp);
+ }
+ }
+ if (list_type == i40e_aqc_opc_list_func_capabilities) {
+#define DEF_NUM_VSI (1 + (pf->hw.func_caps.fcoe ? 1 : 0) \
+ + pf->hw.func_caps.num_vfs)
+ if (pf->hw.revision_id == 0 &&
+ pf->hw.func_caps.num_vsis < DEF_NUM_VSI) {
+ dev_info(&pf->pdev->dev,
+ "got num_vsis %d, setting num_vsis to %d\n",
+ pf->hw.func_caps.num_vsis, DEF_NUM_VSI);
+ pf->hw.func_caps.num_vsis = DEF_NUM_VSI;
+ }
+ }
+ return 0;
+}
+
+static int i40e_vsi_clear(struct i40e_vsi *vsi);
+
+/**
+ * i40e_fdir_sb_setup - initialize the Flow Director resources for Sideband
+ * @pf: board private structure
+ **/
+static void i40e_fdir_sb_setup(struct i40e_pf *pf)
+{
+ struct i40e_vsi *vsi;
+
+ /* quick workaround for an NVM issue that leaves a critical register
+ * uninitialized
+ */
+ if (!rd32(&pf->hw, I40E_GLQF_HKEY(0))) {
+ static const u32 hkey[] = {
+ 0xe640d33f, 0xcdfe98ab, 0x73fa7161, 0x0d7a7d36,
+ 0xeacb7d61, 0xaa4f05b6, 0x9c5c89ed, 0xfc425ddb,
+ 0xa4654832, 0xfc7461d4, 0x8f827619, 0xf5c63c21,
+ 0x95b3a76d};
+ int i;
+
+ for (i = 0; i <= I40E_GLQF_HKEY_MAX_INDEX; i++)
+ wr32(&pf->hw, I40E_GLQF_HKEY(i), hkey[i]);
+ }
+
+ if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
+ return;
+
+ /* find existing VSI and see if it needs configuring */
+ vsi = i40e_find_vsi_by_type(pf, I40E_VSI_FDIR);
+
+ /* create a new VSI if none exists */
+ if (!vsi) {
+ vsi = i40e_vsi_setup(pf, I40E_VSI_FDIR,
+ pf->vsi[pf->lan_vsi]->seid, 0);
+ if (!vsi) {
+ dev_info(&pf->pdev->dev, "Couldn't create FDir VSI\n");
+ pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
+ pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
+ return;
+ }
+ }
+
+ i40e_vsi_setup_irqhandler(vsi, i40e_fdir_clean_ring);
+}
+
+/**
+ * i40e_fdir_teardown - release the Flow Director resources
+ * @pf: board private structure
+ **/
+static void i40e_fdir_teardown(struct i40e_pf *pf)
+{
+ struct i40e_vsi *vsi;
+
+ i40e_fdir_filter_exit(pf);
+ vsi = i40e_find_vsi_by_type(pf, I40E_VSI_FDIR);
+ if (vsi)
+ i40e_vsi_release(vsi);
+}
+
+/**
+ * i40e_rebuild_cloud_filters - Rebuilds cloud filters for VSIs
+ * @vsi: PF main vsi
+ * @seid: seid of main or channel VSIs
+ *
+ * Rebuilds cloud filters associated with main VSI and channel VSIs if they
+ * existed before reset
+ **/
+static int i40e_rebuild_cloud_filters(struct i40e_vsi *vsi, u16 seid)
+{
+ struct i40e_cloud_filter *cfilter;
+ struct i40e_pf *pf = vsi->back;
+ struct hlist_node *node;
+ int ret;
+
+ /* Add cloud filters back if they exist */
+ hlist_for_each_entry_safe(cfilter, node, &pf->cloud_filter_list,
+ cloud_node) {
+ if (cfilter->seid != seid)
+ continue;
+
+ if (cfilter->dst_port)
+ ret = i40e_add_del_cloud_filter_big_buf(vsi, cfilter,
+ true);
+ else
+ ret = i40e_add_del_cloud_filter(vsi, cfilter, true);
+
+ if (ret) {
+ dev_dbg(&pf->pdev->dev,
+ "Failed to rebuild cloud filter, err %pe aq_err %s\n",
+ ERR_PTR(ret),
+ i40e_aq_str(&pf->hw,
+ pf->hw.aq.asq_last_status));
+ return ret;
+ }
+ }
+ return 0;
+}
+
+/**
+ * i40e_rebuild_channels - Rebuilds channel VSIs if they existed before reset
+ * @vsi: PF main vsi
+ *
+ * Rebuilds channel VSIs if they existed before reset
+ **/
+static int i40e_rebuild_channels(struct i40e_vsi *vsi)
+{
+ struct i40e_channel *ch, *ch_tmp;
+ int ret;
+
+ if (list_empty(&vsi->ch_list))
+ return 0;
+
+ list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, list) {
+ if (!ch->initialized)
+ break;
+ /* Proceed with creation of channel (VMDq2) VSI */
+ ret = i40e_add_channel(vsi->back, vsi->uplink_seid, ch);
+ if (ret) {
+ dev_info(&vsi->back->pdev->dev,
+ "failed to rebuild channels using uplink_seid %u\n",
+ vsi->uplink_seid);
+ return ret;
+ }
+ /* Reconfigure TX queues using QTX_CTL register */
+ ret = i40e_channel_config_tx_ring(vsi->back, vsi, ch);
+ if (ret) {
+ dev_info(&vsi->back->pdev->dev,
+ "failed to configure TX rings for channel %u\n",
+ ch->seid);
+ return ret;
+ }
+ /* update 'next_base_queue' */
+ vsi->next_base_queue = vsi->next_base_queue +
+ ch->num_queue_pairs;
+ if (ch->max_tx_rate) {
+ u64 credits = ch->max_tx_rate;
+
+ if (i40e_set_bw_limit(vsi, ch->seid,
+ ch->max_tx_rate))
+ return -EINVAL;
+
+ do_div(credits, I40E_BW_CREDIT_DIVISOR);
+ dev_dbg(&vsi->back->pdev->dev,
+ "Set tx rate of %llu Mbps (count of 50Mbps %llu) for vsi->seid %u\n",
+ ch->max_tx_rate,
+ credits,
+ ch->seid);
+ }
+ ret = i40e_rebuild_cloud_filters(vsi, ch->seid);
+ if (ret) {
+ dev_dbg(&vsi->back->pdev->dev,
+ "Failed to rebuild cloud filters for channel VSI %u\n",
+ ch->seid);
+ return ret;
+ }
+ }
+ return 0;
+}
+
+/**
+ * i40e_clean_xps_state - clean xps state for every tx_ring
+ * @vsi: ptr to the VSI
+ **/
+static void i40e_clean_xps_state(struct i40e_vsi *vsi)
+{
+ int i;
+
+ if (vsi->tx_rings)
+ for (i = 0; i < vsi->num_queue_pairs; i++)
+ if (vsi->tx_rings[i])
+ clear_bit(__I40E_TX_XPS_INIT_DONE,
+ vsi->tx_rings[i]->state);
+}
+
+/**
+ * i40e_prep_for_reset - prep for the core to reset
+ * @pf: board private structure
+ *
+ * Close up the VFs and other things in prep for PF Reset.
+ **/
+static void i40e_prep_for_reset(struct i40e_pf *pf)
+{
+ struct i40e_hw *hw = &pf->hw;
+ int ret = 0;
+ u32 v;
+
+ clear_bit(__I40E_RESET_INTR_RECEIVED, pf->state);
+ if (test_and_set_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
+ return;
+ if (i40e_check_asq_alive(&pf->hw))
+ i40e_vc_notify_reset(pf);
+
+ dev_dbg(&pf->pdev->dev, "Tearing down internal switch for reset\n");
+
+ /* quiesce the VSIs and their queues that are not already DOWN */
+ i40e_pf_quiesce_all_vsi(pf);
+
+ for (v = 0; v < pf->num_alloc_vsi; v++) {
+ if (pf->vsi[v]) {
+ i40e_clean_xps_state(pf->vsi[v]);
+ pf->vsi[v]->seid = 0;
+ }
+ }
+
+ i40e_shutdown_adminq(&pf->hw);
+
+ /* call shutdown HMC */
+ if (hw->hmc.hmc_obj) {
+ ret = i40e_shutdown_lan_hmc(hw);
+ if (ret)
+ dev_warn(&pf->pdev->dev,
+ "shutdown_lan_hmc failed: %d\n", ret);
+ }
+
+ /* Save the current PTP time so that we can restore the time after the
+ * reset completes.
+ */
+ i40e_ptp_save_hw_time(pf);
+}
+
+/**
+ * i40e_send_version - update firmware with driver version
+ * @pf: PF struct
+ */
+static void i40e_send_version(struct i40e_pf *pf)
+{
+ struct i40e_driver_version dv;
+
+ dv.major_version = 0xff;
+ dv.minor_version = 0xff;
+ dv.build_version = 0xff;
+ dv.subbuild_version = 0;
+ strscpy(dv.driver_string, UTS_RELEASE, sizeof(dv.driver_string));
+ i40e_aq_send_driver_version(&pf->hw, &dv, NULL);
+}
+
+/**
+ * i40e_get_oem_version - get OEM specific version information
+ * @hw: pointer to the hardware structure
+ **/
+static void i40e_get_oem_version(struct i40e_hw *hw)
+{
+ u16 block_offset = 0xffff;
+ u16 block_length = 0;
+ u16 capabilities = 0;
+ u16 gen_snap = 0;
+ u16 release = 0;
+
+#define I40E_SR_NVM_OEM_VERSION_PTR 0x1B
+#define I40E_NVM_OEM_LENGTH_OFFSET 0x00
+#define I40E_NVM_OEM_CAPABILITIES_OFFSET 0x01
+#define I40E_NVM_OEM_GEN_OFFSET 0x02
+#define I40E_NVM_OEM_RELEASE_OFFSET 0x03
+#define I40E_NVM_OEM_CAPABILITIES_MASK 0x000F
+#define I40E_NVM_OEM_LENGTH 3
+
+ /* Check if pointer to OEM version block is valid. */
+ i40e_read_nvm_word(hw, I40E_SR_NVM_OEM_VERSION_PTR, &block_offset);
+ if (block_offset == 0xffff)
+ return;
+
+ /* Check if OEM version block has correct length. */
+ i40e_read_nvm_word(hw, block_offset + I40E_NVM_OEM_LENGTH_OFFSET,
+ &block_length);
+ if (block_length < I40E_NVM_OEM_LENGTH)
+ return;
+
+ /* Check if OEM version format is as expected. */
+ i40e_read_nvm_word(hw, block_offset + I40E_NVM_OEM_CAPABILITIES_OFFSET,
+ &capabilities);
+ if ((capabilities & I40E_NVM_OEM_CAPABILITIES_MASK) != 0)
+ return;
+
+ i40e_read_nvm_word(hw, block_offset + I40E_NVM_OEM_GEN_OFFSET,
+ &gen_snap);
+ i40e_read_nvm_word(hw, block_offset + I40E_NVM_OEM_RELEASE_OFFSET,
+ &release);
+ hw->nvm.oem_ver = (gen_snap << I40E_OEM_SNAP_SHIFT) | release;
+ hw->nvm.eetrack = I40E_OEM_EETRACK_ID;
+}
+
+/**
+ * i40e_reset - wait for core reset to finish reset, reset pf if corer not seen
+ * @pf: board private structure
+ **/
+static int i40e_reset(struct i40e_pf *pf)
+{
+ struct i40e_hw *hw = &pf->hw;
+ int ret;
+
+ ret = i40e_pf_reset(hw);
+ if (ret) {
+ dev_info(&pf->pdev->dev, "PF reset failed, %d\n", ret);
+ set_bit(__I40E_RESET_FAILED, pf->state);
+ clear_bit(__I40E_RESET_RECOVERY_PENDING, pf->state);
+ } else {
+ pf->pfr_count++;
+ }
+ return ret;
+}
+
+/**
+ * i40e_rebuild - rebuild using a saved config
+ * @pf: board private structure
+ * @reinit: if the Main VSI needs to re-initialized.
+ * @lock_acquired: indicates whether or not the lock has been acquired
+ * before this function was called.
+ **/
+static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
+{
+ const bool is_recovery_mode_reported = i40e_check_recovery_mode(pf);
+ struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
+ struct i40e_hw *hw = &pf->hw;
+ int ret;
+ u32 val;
+ int v;
+
+ if (test_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state) &&
+ is_recovery_mode_reported)
+ i40e_set_ethtool_ops(pf->vsi[pf->lan_vsi]->netdev);
+
+ if (test_bit(__I40E_DOWN, pf->state) &&
+ !test_bit(__I40E_RECOVERY_MODE, pf->state))
+ goto clear_recovery;
+ dev_dbg(&pf->pdev->dev, "Rebuilding internal switch\n");
+
+ /* rebuild the basics for the AdminQ, HMC, and initial HW switch */
+ ret = i40e_init_adminq(&pf->hw);
+ if (ret) {
+ dev_info(&pf->pdev->dev, "Rebuild AdminQ failed, err %pe aq_err %s\n",
+ ERR_PTR(ret),
+ i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ goto clear_recovery;
+ }
+ i40e_get_oem_version(&pf->hw);
+
+ if (test_and_clear_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state)) {
+ /* The following delay is necessary for firmware update. */
+ mdelay(1000);
+ }
+
+ /* re-verify the eeprom if we just had an EMP reset */
+ if (test_and_clear_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state))
+ i40e_verify_eeprom(pf);
+
+ /* if we are going out of or into recovery mode we have to act
+ * accordingly with regard to resources initialization
+ * and deinitialization
+ */
+ if (test_bit(__I40E_RECOVERY_MODE, pf->state)) {
+ if (i40e_get_capabilities(pf,
+ i40e_aqc_opc_list_func_capabilities))
+ goto end_unlock;
+
+ if (is_recovery_mode_reported) {
+ /* we're staying in recovery mode so we'll reinitialize
+ * misc vector here
+ */
+ if (i40e_setup_misc_vector_for_recovery_mode(pf))
+ goto end_unlock;
+ } else {
+ if (!lock_acquired)
+ rtnl_lock();
+ /* we're going out of recovery mode so we'll free
+ * the IRQ allocated specifically for recovery mode
+ * and restore the interrupt scheme
+ */
+ free_irq(pf->pdev->irq, pf);
+ i40e_clear_interrupt_scheme(pf);
+ if (i40e_restore_interrupt_scheme(pf))
+ goto end_unlock;
+ }
+
+ /* tell the firmware that we're starting */
+ i40e_send_version(pf);
+
+ /* bail out in case recovery mode was detected, as there is
+ * no need for further configuration.
+ */
+ goto end_unlock;
+ }
+
+ i40e_clear_pxe_mode(hw);
+ ret = i40e_get_capabilities(pf, i40e_aqc_opc_list_func_capabilities);
+ if (ret)
+ goto end_core_reset;
+
+ ret = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
+ hw->func_caps.num_rx_qp, 0, 0);
+ if (ret) {
+ dev_info(&pf->pdev->dev, "init_lan_hmc failed: %d\n", ret);
+ goto end_core_reset;
+ }
+ ret = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
+ if (ret) {
+ dev_info(&pf->pdev->dev, "configure_lan_hmc failed: %d\n", ret);
+ goto end_core_reset;
+ }
+
+#ifdef CONFIG_I40E_DCB
+ /* Enable FW to write a default DCB config on link-up
+ * unless I40E_FLAG_TC_MQPRIO was enabled or DCB
+ * is not supported with new link speed
+ */
+ if (i40e_is_tc_mqprio_enabled(pf)) {
+ i40e_aq_set_dcb_parameters(hw, false, NULL);
+ } else {
+ if (I40E_IS_X710TL_DEVICE(hw->device_id) &&
+ (hw->phy.link_info.link_speed &
+ (I40E_LINK_SPEED_2_5GB | I40E_LINK_SPEED_5GB))) {
+ i40e_aq_set_dcb_parameters(hw, false, NULL);
+ dev_warn(&pf->pdev->dev,
+ "DCB is not supported for X710-T*L 2.5/5G speeds\n");
+ pf->flags &= ~I40E_FLAG_DCB_CAPABLE;
+ } else {
+ i40e_aq_set_dcb_parameters(hw, true, NULL);
+ ret = i40e_init_pf_dcb(pf);
+ if (ret) {
+ dev_info(&pf->pdev->dev, "DCB init failed %d, disabled\n",
+ ret);
+ pf->flags &= ~I40E_FLAG_DCB_CAPABLE;
+ /* Continue without DCB enabled */
+ }
+ }
+ }
+
+#endif /* CONFIG_I40E_DCB */
+ if (!lock_acquired)
+ rtnl_lock();
+ ret = i40e_setup_pf_switch(pf, reinit, true);
+ if (ret)
+ goto end_unlock;
+
+ /* The driver only wants link up/down and module qualification
+ * reports from firmware. Note the negative logic.
+ */
+ ret = i40e_aq_set_phy_int_mask(&pf->hw,
+ ~(I40E_AQ_EVENT_LINK_UPDOWN |
+ I40E_AQ_EVENT_MEDIA_NA |
+ I40E_AQ_EVENT_MODULE_QUAL_FAIL), NULL);
+ if (ret)
+ dev_info(&pf->pdev->dev, "set phy mask fail, err %pe aq_err %s\n",
+ ERR_PTR(ret),
+ i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+
+ /* Rebuild the VSIs and VEBs that existed before reset.
+ * They are still in our local switch element arrays, so only
+ * need to rebuild the switch model in the HW.
+ *
+ * If there were VEBs but the reconstitution failed, we'll try
+ * to recover minimal use by getting the basic PF VSI working.
+ */
+ if (vsi->uplink_seid != pf->mac_seid) {
+ dev_dbg(&pf->pdev->dev, "attempting to rebuild switch\n");
+ /* find the one VEB connected to the MAC, and find orphans */
+ for (v = 0; v < I40E_MAX_VEB; v++) {
+ if (!pf->veb[v])
+ continue;
+
+ if (pf->veb[v]->uplink_seid == pf->mac_seid ||
+ pf->veb[v]->uplink_seid == 0) {
+ ret = i40e_reconstitute_veb(pf->veb[v]);
+
+ if (!ret)
+ continue;
+
+ /* If Main VEB failed, we're in deep doodoo,
+ * so give up rebuilding the switch and set up
+ * for minimal rebuild of PF VSI.
+ * If orphan failed, we'll report the error
+ * but try to keep going.
+ */
+ if (pf->veb[v]->uplink_seid == pf->mac_seid) {
+ dev_info(&pf->pdev->dev,
+ "rebuild of switch failed: %d, will try to set up simple PF connection\n",
+ ret);
+ vsi->uplink_seid = pf->mac_seid;
+ break;
+ } else if (pf->veb[v]->uplink_seid == 0) {
+ dev_info(&pf->pdev->dev,
+ "rebuild of orphan VEB failed: %d\n",
+ ret);
+ }
+ }
+ }
+ }
+
+ if (vsi->uplink_seid == pf->mac_seid) {
+ dev_dbg(&pf->pdev->dev, "attempting to rebuild PF VSI\n");
+ /* no VEB, so rebuild only the Main VSI */
+ ret = i40e_add_vsi(vsi);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "rebuild of Main VSI failed: %d\n", ret);
+ goto end_unlock;
+ }
+ }
+
+ if (vsi->mqprio_qopt.max_rate[0]) {
+ u64 max_tx_rate = i40e_bw_bytes_to_mbits(vsi,
+ vsi->mqprio_qopt.max_rate[0]);
+ u64 credits = 0;
+
+ ret = i40e_set_bw_limit(vsi, vsi->seid, max_tx_rate);
+ if (ret)
+ goto end_unlock;
+
+ credits = max_tx_rate;
+ do_div(credits, I40E_BW_CREDIT_DIVISOR);
+ dev_dbg(&vsi->back->pdev->dev,
+ "Set tx rate of %llu Mbps (count of 50Mbps %llu) for vsi->seid %u\n",
+ max_tx_rate,
+ credits,
+ vsi->seid);
+ }
+
+ ret = i40e_rebuild_cloud_filters(vsi, vsi->seid);
+ if (ret)
+ goto end_unlock;
+
+ /* PF Main VSI is rebuild by now, go ahead and rebuild channel VSIs
+ * for this main VSI if they exist
+ */
+ ret = i40e_rebuild_channels(vsi);
+ if (ret)
+ goto end_unlock;
+
+ /* Reconfigure hardware for allowing smaller MSS in the case
+ * of TSO, so that we avoid the MDD being fired and causing
+ * a reset in the case of small MSS+TSO.
+ */
+#define I40E_REG_MSS 0x000E64DC
+#define I40E_REG_MSS_MIN_MASK 0x3FF0000
+#define I40E_64BYTE_MSS 0x400000
+ val = rd32(hw, I40E_REG_MSS);
+ if ((val & I40E_REG_MSS_MIN_MASK) > I40E_64BYTE_MSS) {
+ val &= ~I40E_REG_MSS_MIN_MASK;
+ val |= I40E_64BYTE_MSS;
+ wr32(hw, I40E_REG_MSS, val);
+ }
+
+ if (pf->hw_features & I40E_HW_RESTART_AUTONEG) {
+ msleep(75);
+ ret = i40e_aq_set_link_restart_an(&pf->hw, true, NULL);
+ if (ret)
+ dev_info(&pf->pdev->dev, "link restart failed, err %pe aq_err %s\n",
+ ERR_PTR(ret),
+ i40e_aq_str(&pf->hw,
+ pf->hw.aq.asq_last_status));
+ }
+ /* reinit the misc interrupt */
+ if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
+ ret = i40e_setup_misc_vector(pf);
+ if (ret)
+ goto end_unlock;
+ }
+
+ /* Add a filter to drop all Flow control frames from any VSI from being
+ * transmitted. By doing so we stop a malicious VF from sending out
+ * PAUSE or PFC frames and potentially controlling traffic for other
+ * PF/VF VSIs.
+ * The FW can still send Flow control frames if enabled.
+ */
+ i40e_add_filter_to_drop_tx_flow_control_frames(&pf->hw,
+ pf->main_vsi_seid);
+
+ /* restart the VSIs that were rebuilt and running before the reset */
+ i40e_pf_unquiesce_all_vsi(pf);
+
+ /* Release the RTNL lock before we start resetting VFs */
+ if (!lock_acquired)
+ rtnl_unlock();
+
+ /* Restore promiscuous settings */
+ ret = i40e_set_promiscuous(pf, pf->cur_promisc);
+ if (ret)
+ dev_warn(&pf->pdev->dev,
+ "Failed to restore promiscuous setting: %s, err %pe aq_err %s\n",
+ pf->cur_promisc ? "on" : "off",
+ ERR_PTR(ret),
+ i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+
+ i40e_reset_all_vfs(pf, true);
+
+ /* tell the firmware that we're starting */
+ i40e_send_version(pf);
+
+ /* We've already released the lock, so don't do it again */
+ goto end_core_reset;
+
+end_unlock:
+ if (!lock_acquired)
+ rtnl_unlock();
+end_core_reset:
+ clear_bit(__I40E_RESET_FAILED, pf->state);
+clear_recovery:
+ clear_bit(__I40E_RESET_RECOVERY_PENDING, pf->state);
+ clear_bit(__I40E_TIMEOUT_RECOVERY_PENDING, pf->state);
+}
+
+/**
+ * i40e_reset_and_rebuild - reset and rebuild using a saved config
+ * @pf: board private structure
+ * @reinit: if the Main VSI needs to re-initialized.
+ * @lock_acquired: indicates whether or not the lock has been acquired
+ * before this function was called.
+ **/
+static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit,
+ bool lock_acquired)
+{
+ int ret;
+
+ if (test_bit(__I40E_IN_REMOVE, pf->state))
+ return;
+ /* Now we wait for GRST to settle out.
+ * We don't have to delete the VEBs or VSIs from the hw switch
+ * because the reset will make them disappear.
+ */
+ ret = i40e_reset(pf);
+ if (!ret)
+ i40e_rebuild(pf, reinit, lock_acquired);
+}
+
+/**
+ * i40e_handle_reset_warning - prep for the PF to reset, reset and rebuild
+ * @pf: board private structure
+ *
+ * Close up the VFs and other things in prep for a Core Reset,
+ * then get ready to rebuild the world.
+ * @lock_acquired: indicates whether or not the lock has been acquired
+ * before this function was called.
+ **/
+static void i40e_handle_reset_warning(struct i40e_pf *pf, bool lock_acquired)
+{
+ i40e_prep_for_reset(pf);
+ i40e_reset_and_rebuild(pf, false, lock_acquired);
+}
+
+/**
+ * i40e_handle_mdd_event
+ * @pf: pointer to the PF structure
+ *
+ * Called from the MDD irq handler to identify possibly malicious vfs
+ **/
+static void i40e_handle_mdd_event(struct i40e_pf *pf)
+{
+ struct i40e_hw *hw = &pf->hw;
+ bool mdd_detected = false;
+ struct i40e_vf *vf;
+ u32 reg;
+ int i;
+
+ if (!test_bit(__I40E_MDD_EVENT_PENDING, pf->state))
+ return;
+
+ /* find what triggered the MDD event */
+ reg = rd32(hw, I40E_GL_MDET_TX);
+ if (reg & I40E_GL_MDET_TX_VALID_MASK) {
+ u8 pf_num = (reg & I40E_GL_MDET_TX_PF_NUM_MASK) >>
+ I40E_GL_MDET_TX_PF_NUM_SHIFT;
+ u16 vf_num = (reg & I40E_GL_MDET_TX_VF_NUM_MASK) >>
+ I40E_GL_MDET_TX_VF_NUM_SHIFT;
+ u8 event = (reg & I40E_GL_MDET_TX_EVENT_MASK) >>
+ I40E_GL_MDET_TX_EVENT_SHIFT;
+ u16 queue = ((reg & I40E_GL_MDET_TX_QUEUE_MASK) >>
+ I40E_GL_MDET_TX_QUEUE_SHIFT) -
+ pf->hw.func_caps.base_queue;
+ if (netif_msg_tx_err(pf))
+ dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on TX queue %d PF number 0x%02x VF number 0x%02x\n",
+ event, queue, pf_num, vf_num);
+ wr32(hw, I40E_GL_MDET_TX, 0xffffffff);
+ mdd_detected = true;
+ }
+ reg = rd32(hw, I40E_GL_MDET_RX);
+ if (reg & I40E_GL_MDET_RX_VALID_MASK) {
+ u8 func = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >>
+ I40E_GL_MDET_RX_FUNCTION_SHIFT;
+ u8 event = (reg & I40E_GL_MDET_RX_EVENT_MASK) >>
+ I40E_GL_MDET_RX_EVENT_SHIFT;
+ u16 queue = ((reg & I40E_GL_MDET_RX_QUEUE_MASK) >>
+ I40E_GL_MDET_RX_QUEUE_SHIFT) -
+ pf->hw.func_caps.base_queue;
+ if (netif_msg_rx_err(pf))
+ dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on RX queue %d of function 0x%02x\n",
+ event, queue, func);
+ wr32(hw, I40E_GL_MDET_RX, 0xffffffff);
+ mdd_detected = true;
+ }
+
+ if (mdd_detected) {
+ reg = rd32(hw, I40E_PF_MDET_TX);
+ if (reg & I40E_PF_MDET_TX_VALID_MASK) {
+ wr32(hw, I40E_PF_MDET_TX, 0xFFFF);
+ dev_dbg(&pf->pdev->dev, "TX driver issue detected on PF\n");
+ }
+ reg = rd32(hw, I40E_PF_MDET_RX);
+ if (reg & I40E_PF_MDET_RX_VALID_MASK) {
+ wr32(hw, I40E_PF_MDET_RX, 0xFFFF);
+ dev_dbg(&pf->pdev->dev, "RX driver issue detected on PF\n");
+ }
+ }
+
+ /* see if one of the VFs needs its hand slapped */
+ for (i = 0; i < pf->num_alloc_vfs && mdd_detected; i++) {
+ vf = &(pf->vf[i]);
+ reg = rd32(hw, I40E_VP_MDET_TX(i));
+ if (reg & I40E_VP_MDET_TX_VALID_MASK) {
+ wr32(hw, I40E_VP_MDET_TX(i), 0xFFFF);
+ vf->num_mdd_events++;
+ dev_info(&pf->pdev->dev, "TX driver issue detected on VF %d\n",
+ i);
+ dev_info(&pf->pdev->dev,
+ "Use PF Control I/F to re-enable the VF\n");
+ set_bit(I40E_VF_STATE_DISABLED, &vf->vf_states);
+ }
+
+ reg = rd32(hw, I40E_VP_MDET_RX(i));
+ if (reg & I40E_VP_MDET_RX_VALID_MASK) {
+ wr32(hw, I40E_VP_MDET_RX(i), 0xFFFF);
+ vf->num_mdd_events++;
+ dev_info(&pf->pdev->dev, "RX driver issue detected on VF %d\n",
+ i);
+ dev_info(&pf->pdev->dev,
+ "Use PF Control I/F to re-enable the VF\n");
+ set_bit(I40E_VF_STATE_DISABLED, &vf->vf_states);
+ }
+ }
+
+ /* re-enable mdd interrupt cause */
+ clear_bit(__I40E_MDD_EVENT_PENDING, pf->state);
+ reg = rd32(hw, I40E_PFINT_ICR0_ENA);
+ reg |= I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
+ wr32(hw, I40E_PFINT_ICR0_ENA, reg);
+ i40e_flush(hw);
+}
+
+/**
+ * i40e_service_task - Run the driver's async subtasks
+ * @work: pointer to work_struct containing our data
+ **/
+static void i40e_service_task(struct work_struct *work)
+{
+ struct i40e_pf *pf = container_of(work,
+ struct i40e_pf,
+ service_task);
+ unsigned long start_time = jiffies;
+
+ /* don't bother with service tasks if a reset is in progress */
+ if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state) ||
+ test_bit(__I40E_SUSPENDED, pf->state))
+ return;
+
+ if (test_and_set_bit(__I40E_SERVICE_SCHED, pf->state))
+ return;
+
+ if (!test_bit(__I40E_RECOVERY_MODE, pf->state)) {
+ i40e_detect_recover_hung(pf->vsi[pf->lan_vsi]);
+ i40e_sync_filters_subtask(pf);
+ i40e_reset_subtask(pf);
+ i40e_handle_mdd_event(pf);
+ i40e_vc_process_vflr_event(pf);
+ i40e_watchdog_subtask(pf);
+ i40e_fdir_reinit_subtask(pf);
+ if (test_and_clear_bit(__I40E_CLIENT_RESET, pf->state)) {
+ /* Client subtask will reopen next time through. */
+ i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi],
+ true);
+ } else {
+ i40e_client_subtask(pf);
+ if (test_and_clear_bit(__I40E_CLIENT_L2_CHANGE,
+ pf->state))
+ i40e_notify_client_of_l2_param_changes(
+ pf->vsi[pf->lan_vsi]);
+ }
+ i40e_sync_filters_subtask(pf);
+ } else {
+ i40e_reset_subtask(pf);
+ }
+
+ i40e_clean_adminq_subtask(pf);
+
+ /* flush memory to make sure state is correct before next watchdog */
+ smp_mb__before_atomic();
+ clear_bit(__I40E_SERVICE_SCHED, pf->state);
+
+ /* If the tasks have taken longer than one timer cycle or there
+ * is more work to be done, reschedule the service task now
+ * rather than wait for the timer to tick again.
+ */
+ if (time_after(jiffies, (start_time + pf->service_timer_period)) ||
+ test_bit(__I40E_ADMINQ_EVENT_PENDING, pf->state) ||
+ test_bit(__I40E_MDD_EVENT_PENDING, pf->state) ||
+ test_bit(__I40E_VFLR_EVENT_PENDING, pf->state))
+ i40e_service_event_schedule(pf);
+}
+
+/**
+ * i40e_service_timer - timer callback
+ * @t: timer list pointer
+ **/
+static void i40e_service_timer(struct timer_list *t)
+{
+ struct i40e_pf *pf = from_timer(pf, t, service_timer);
+
+ mod_timer(&pf->service_timer,
+ round_jiffies(jiffies + pf->service_timer_period));
+ i40e_service_event_schedule(pf);
+}
+
+/**
+ * i40e_set_num_rings_in_vsi - Determine number of rings in the VSI
+ * @vsi: the VSI being configured
+ **/
+static int i40e_set_num_rings_in_vsi(struct i40e_vsi *vsi)
+{
+ struct i40e_pf *pf = vsi->back;
+
+ switch (vsi->type) {
+ case I40E_VSI_MAIN:
+ vsi->alloc_queue_pairs = pf->num_lan_qps;
+ if (!vsi->num_tx_desc)
+ vsi->num_tx_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
+ I40E_REQ_DESCRIPTOR_MULTIPLE);
+ if (!vsi->num_rx_desc)
+ vsi->num_rx_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
+ I40E_REQ_DESCRIPTOR_MULTIPLE);
+ if (pf->flags & I40E_FLAG_MSIX_ENABLED)
+ vsi->num_q_vectors = pf->num_lan_msix;
+ else
+ vsi->num_q_vectors = 1;
+
+ break;
+
+ case I40E_VSI_FDIR:
+ vsi->alloc_queue_pairs = 1;
+ vsi->num_tx_desc = ALIGN(I40E_FDIR_RING_COUNT,
+ I40E_REQ_DESCRIPTOR_MULTIPLE);
+ vsi->num_rx_desc = ALIGN(I40E_FDIR_RING_COUNT,
+ I40E_REQ_DESCRIPTOR_MULTIPLE);
+ vsi->num_q_vectors = pf->num_fdsb_msix;
+ break;
+
+ case I40E_VSI_VMDQ2:
+ vsi->alloc_queue_pairs = pf->num_vmdq_qps;
+ if (!vsi->num_tx_desc)
+ vsi->num_tx_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
+ I40E_REQ_DESCRIPTOR_MULTIPLE);
+ if (!vsi->num_rx_desc)
+ vsi->num_rx_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
+ I40E_REQ_DESCRIPTOR_MULTIPLE);
+ vsi->num_q_vectors = pf->num_vmdq_msix;
+ break;
+
+ case I40E_VSI_SRIOV:
+ vsi->alloc_queue_pairs = pf->num_vf_qps;
+ if (!vsi->num_tx_desc)
+ vsi->num_tx_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
+ I40E_REQ_DESCRIPTOR_MULTIPLE);
+ if (!vsi->num_rx_desc)
+ vsi->num_rx_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
+ I40E_REQ_DESCRIPTOR_MULTIPLE);
+ break;
+
+ default:
+ WARN_ON(1);
+ return -ENODATA;
+ }
+
+ if (is_kdump_kernel()) {
+ vsi->num_tx_desc = I40E_MIN_NUM_DESCRIPTORS;
+ vsi->num_rx_desc = I40E_MIN_NUM_DESCRIPTORS;
+ }
+
+ return 0;
+}
+
+/**
+ * i40e_vsi_alloc_arrays - Allocate queue and vector pointer arrays for the vsi
+ * @vsi: VSI pointer
+ * @alloc_qvectors: a bool to specify if q_vectors need to be allocated.
+ *
+ * On error: returns error code (negative)
+ * On success: returns 0
+ **/
+static int i40e_vsi_alloc_arrays(struct i40e_vsi *vsi, bool alloc_qvectors)
+{
+ struct i40e_ring **next_rings;
+ int size;
+ int ret = 0;
+
+ /* allocate memory for both Tx, XDP Tx and Rx ring pointers */
+ size = sizeof(struct i40e_ring *) * vsi->alloc_queue_pairs *
+ (i40e_enabled_xdp_vsi(vsi) ? 3 : 2);
+ vsi->tx_rings = kzalloc(size, GFP_KERNEL);
+ if (!vsi->tx_rings)
+ return -ENOMEM;
+ next_rings = vsi->tx_rings + vsi->alloc_queue_pairs;
+ if (i40e_enabled_xdp_vsi(vsi)) {
+ vsi->xdp_rings = next_rings;
+ next_rings += vsi->alloc_queue_pairs;
+ }
+ vsi->rx_rings = next_rings;
+
+ if (alloc_qvectors) {
+ /* allocate memory for q_vector pointers */
+ size = sizeof(struct i40e_q_vector *) * vsi->num_q_vectors;
+ vsi->q_vectors = kzalloc(size, GFP_KERNEL);
+ if (!vsi->q_vectors) {
+ ret = -ENOMEM;
+ goto err_vectors;
+ }
+ }
+ return ret;
+
+err_vectors:
+ kfree(vsi->tx_rings);
+ return ret;
+}
+
+/**
+ * i40e_vsi_mem_alloc - Allocates the next available struct vsi in the PF
+ * @pf: board private structure
+ * @type: type of VSI
+ *
+ * On error: returns error code (negative)
+ * On success: returns vsi index in PF (positive)
+ **/
+static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type)
+{
+ int ret = -ENODEV;
+ struct i40e_vsi *vsi;
+ int vsi_idx;
+ int i;
+
+ /* Need to protect the allocation of the VSIs at the PF level */
+ mutex_lock(&pf->switch_mutex);
+
+ /* VSI list may be fragmented if VSI creation/destruction has
+ * been happening. We can afford to do a quick scan to look
+ * for any free VSIs in the list.
+ *
+ * find next empty vsi slot, looping back around if necessary
+ */
+ i = pf->next_vsi;
+ while (i < pf->num_alloc_vsi && pf->vsi[i])
+ i++;
+ if (i >= pf->num_alloc_vsi) {
+ i = 0;
+ while (i < pf->next_vsi && pf->vsi[i])
+ i++;
+ }
+
+ if (i < pf->num_alloc_vsi && !pf->vsi[i]) {
+ vsi_idx = i; /* Found one! */
+ } else {
+ ret = -ENODEV;
+ goto unlock_pf; /* out of VSI slots! */
+ }
+ pf->next_vsi = ++i;
+
+ vsi = kzalloc(sizeof(*vsi), GFP_KERNEL);
+ if (!vsi) {
+ ret = -ENOMEM;
+ goto unlock_pf;
+ }
+ vsi->type = type;
+ vsi->back = pf;
+ set_bit(__I40E_VSI_DOWN, vsi->state);
+ vsi->flags = 0;
+ vsi->idx = vsi_idx;
+ vsi->int_rate_limit = 0;
+ vsi->rss_table_size = (vsi->type == I40E_VSI_MAIN) ?
+ pf->rss_table_size : 64;
+ vsi->netdev_registered = false;
+ vsi->work_limit = I40E_DEFAULT_IRQ_WORK;
+ hash_init(vsi->mac_filter_hash);
+ vsi->irqs_ready = false;
+
+ if (type == I40E_VSI_MAIN) {
+ vsi->af_xdp_zc_qps = bitmap_zalloc(pf->num_lan_qps, GFP_KERNEL);
+ if (!vsi->af_xdp_zc_qps)
+ goto err_rings;
+ }
+
+ ret = i40e_set_num_rings_in_vsi(vsi);
+ if (ret)
+ goto err_rings;
+
+ ret = i40e_vsi_alloc_arrays(vsi, true);
+ if (ret)
+ goto err_rings;
+
+ /* Setup default MSIX irq handler for VSI */
+ i40e_vsi_setup_irqhandler(vsi, i40e_msix_clean_rings);
+
+ /* Initialize VSI lock */
+ spin_lock_init(&vsi->mac_filter_hash_lock);
+ pf->vsi[vsi_idx] = vsi;
+ ret = vsi_idx;
+ goto unlock_pf;
+
+err_rings:
+ bitmap_free(vsi->af_xdp_zc_qps);
+ pf->next_vsi = i - 1;
+ kfree(vsi);
+unlock_pf:
+ mutex_unlock(&pf->switch_mutex);
+ return ret;
+}
+
+/**
+ * i40e_vsi_free_arrays - Free queue and vector pointer arrays for the VSI
+ * @vsi: VSI pointer
+ * @free_qvectors: a bool to specify if q_vectors need to be freed.
+ *
+ * On error: returns error code (negative)
+ * On success: returns 0
+ **/
+static void i40e_vsi_free_arrays(struct i40e_vsi *vsi, bool free_qvectors)
+{
+ /* free the ring and vector containers */
+ if (free_qvectors) {
+ kfree(vsi->q_vectors);
+ vsi->q_vectors = NULL;
+ }
+ kfree(vsi->tx_rings);
+ vsi->tx_rings = NULL;
+ vsi->rx_rings = NULL;
+ vsi->xdp_rings = NULL;
+}
+
+/**
+ * i40e_clear_rss_config_user - clear the user configured RSS hash keys
+ * and lookup table
+ * @vsi: Pointer to VSI structure
+ */
+static void i40e_clear_rss_config_user(struct i40e_vsi *vsi)
+{
+ if (!vsi)
+ return;
+
+ kfree(vsi->rss_hkey_user);
+ vsi->rss_hkey_user = NULL;
+
+ kfree(vsi->rss_lut_user);
+ vsi->rss_lut_user = NULL;
+}
+
+/**
+ * i40e_vsi_clear - Deallocate the VSI provided
+ * @vsi: the VSI being un-configured
+ **/
+static int i40e_vsi_clear(struct i40e_vsi *vsi)
+{
+ struct i40e_pf *pf;
+
+ if (!vsi)
+ return 0;
+
+ if (!vsi->back)
+ goto free_vsi;
+ pf = vsi->back;
+
+ mutex_lock(&pf->switch_mutex);
+ if (!pf->vsi[vsi->idx]) {
+ dev_err(&pf->pdev->dev, "pf->vsi[%d] is NULL, just free vsi[%d](type %d)\n",
+ vsi->idx, vsi->idx, vsi->type);
+ goto unlock_vsi;
+ }
+
+ if (pf->vsi[vsi->idx] != vsi) {
+ dev_err(&pf->pdev->dev,
+ "pf->vsi[%d](type %d) != vsi[%d](type %d): no free!\n",
+ pf->vsi[vsi->idx]->idx,
+ pf->vsi[vsi->idx]->type,
+ vsi->idx, vsi->type);
+ goto unlock_vsi;
+ }
+
+ /* updates the PF for this cleared vsi */
+ i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx);
+ i40e_put_lump(pf->irq_pile, vsi->base_vector, vsi->idx);
+
+ bitmap_free(vsi->af_xdp_zc_qps);
+ i40e_vsi_free_arrays(vsi, true);
+ i40e_clear_rss_config_user(vsi);
+
+ pf->vsi[vsi->idx] = NULL;
+ if (vsi->idx < pf->next_vsi)
+ pf->next_vsi = vsi->idx;
+
+unlock_vsi:
+ mutex_unlock(&pf->switch_mutex);
+free_vsi:
+ kfree(vsi);
+
+ return 0;
+}
+
+/**
+ * i40e_vsi_clear_rings - Deallocates the Rx and Tx rings for the provided VSI
+ * @vsi: the VSI being cleaned
+ **/
+static void i40e_vsi_clear_rings(struct i40e_vsi *vsi)
+{
+ int i;
+
+ if (vsi->tx_rings && vsi->tx_rings[0]) {
+ for (i = 0; i < vsi->alloc_queue_pairs; i++) {
+ kfree_rcu(vsi->tx_rings[i], rcu);
+ WRITE_ONCE(vsi->tx_rings[i], NULL);
+ WRITE_ONCE(vsi->rx_rings[i], NULL);
+ if (vsi->xdp_rings)
+ WRITE_ONCE(vsi->xdp_rings[i], NULL);
+ }
+ }
+}
+
+/**
+ * i40e_alloc_rings - Allocates the Rx and Tx rings for the provided VSI
+ * @vsi: the VSI being configured
+ **/
+static int i40e_alloc_rings(struct i40e_vsi *vsi)
+{
+ int i, qpv = i40e_enabled_xdp_vsi(vsi) ? 3 : 2;
+ struct i40e_pf *pf = vsi->back;
+ struct i40e_ring *ring;
+
+ /* Set basic values in the rings to be used later during open() */
+ for (i = 0; i < vsi->alloc_queue_pairs; i++) {
+ /* allocate space for both Tx and Rx in one shot */
+ ring = kcalloc(qpv, sizeof(struct i40e_ring), GFP_KERNEL);
+ if (!ring)
+ goto err_out;
+
+ ring->queue_index = i;
+ ring->reg_idx = vsi->base_queue + i;
+ ring->ring_active = false;
+ ring->vsi = vsi;
+ ring->netdev = vsi->netdev;
+ ring->dev = &pf->pdev->dev;
+ ring->count = vsi->num_tx_desc;
+ ring->size = 0;
+ ring->dcb_tc = 0;
+ if (vsi->back->hw_features & I40E_HW_WB_ON_ITR_CAPABLE)
+ ring->flags = I40E_TXR_FLAGS_WB_ON_ITR;
+ ring->itr_setting = pf->tx_itr_default;
+ WRITE_ONCE(vsi->tx_rings[i], ring++);
+
+ if (!i40e_enabled_xdp_vsi(vsi))
+ goto setup_rx;
+
+ ring->queue_index = vsi->alloc_queue_pairs + i;
+ ring->reg_idx = vsi->base_queue + ring->queue_index;
+ ring->ring_active = false;
+ ring->vsi = vsi;
+ ring->netdev = NULL;
+ ring->dev = &pf->pdev->dev;
+ ring->count = vsi->num_tx_desc;
+ ring->size = 0;
+ ring->dcb_tc = 0;
+ if (vsi->back->hw_features & I40E_HW_WB_ON_ITR_CAPABLE)
+ ring->flags = I40E_TXR_FLAGS_WB_ON_ITR;
+ set_ring_xdp(ring);
+ ring->itr_setting = pf->tx_itr_default;
+ WRITE_ONCE(vsi->xdp_rings[i], ring++);
+
+setup_rx:
+ ring->queue_index = i;
+ ring->reg_idx = vsi->base_queue + i;
+ ring->ring_active = false;
+ ring->vsi = vsi;
+ ring->netdev = vsi->netdev;
+ ring->dev = &pf->pdev->dev;
+ ring->count = vsi->num_rx_desc;
+ ring->size = 0;
+ ring->dcb_tc = 0;
+ ring->itr_setting = pf->rx_itr_default;
+ WRITE_ONCE(vsi->rx_rings[i], ring);
+ }
+
+ return 0;
+
+err_out:
+ i40e_vsi_clear_rings(vsi);
+ return -ENOMEM;
+}
+
+/**
+ * i40e_reserve_msix_vectors - Reserve MSI-X vectors in the kernel
+ * @pf: board private structure
+ * @vectors: the number of MSI-X vectors to request
+ *
+ * Returns the number of vectors reserved, or error
+ **/
+static int i40e_reserve_msix_vectors(struct i40e_pf *pf, int vectors)
+{
+ vectors = pci_enable_msix_range(pf->pdev, pf->msix_entries,
+ I40E_MIN_MSIX, vectors);
+ if (vectors < 0) {
+ dev_info(&pf->pdev->dev,
+ "MSI-X vector reservation failed: %d\n", vectors);
+ vectors = 0;
+ }
+
+ return vectors;
+}
+
+/**
+ * i40e_init_msix - Setup the MSIX capability
+ * @pf: board private structure
+ *
+ * Work with the OS to set up the MSIX vectors needed.
+ *
+ * Returns the number of vectors reserved or negative on failure
+ **/
+static int i40e_init_msix(struct i40e_pf *pf)
+{
+ struct i40e_hw *hw = &pf->hw;
+ int cpus, extra_vectors;
+ int vectors_left;
+ int v_budget, i;
+ int v_actual;
+ int iwarp_requested = 0;
+
+ if (!(pf->flags & I40E_FLAG_MSIX_ENABLED))
+ return -ENODEV;
+
+ /* The number of vectors we'll request will be comprised of:
+ * - Add 1 for "other" cause for Admin Queue events, etc.
+ * - The number of LAN queue pairs
+ * - Queues being used for RSS.
+ * We don't need as many as max_rss_size vectors.
+ * use rss_size instead in the calculation since that
+ * is governed by number of cpus in the system.
+ * - assumes symmetric Tx/Rx pairing
+ * - The number of VMDq pairs
+ * - The CPU count within the NUMA node if iWARP is enabled
+ * Once we count this up, try the request.
+ *
+ * If we can't get what we want, we'll simplify to nearly nothing
+ * and try again. If that still fails, we punt.
+ */
+ vectors_left = hw->func_caps.num_msix_vectors;
+ v_budget = 0;
+
+ /* reserve one vector for miscellaneous handler */
+ if (vectors_left) {
+ v_budget++;
+ vectors_left--;
+ }
+
+ /* reserve some vectors for the main PF traffic queues. Initially we
+ * only reserve at most 50% of the available vectors, in the case that
+ * the number of online CPUs is large. This ensures that we can enable
+ * extra features as well. Once we've enabled the other features, we
+ * will use any remaining vectors to reach as close as we can to the
+ * number of online CPUs.
+ */
+ cpus = num_online_cpus();
+ pf->num_lan_msix = min_t(int, cpus, vectors_left / 2);
+ vectors_left -= pf->num_lan_msix;
+
+ /* reserve one vector for sideband flow director */
+ if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
+ if (vectors_left) {
+ pf->num_fdsb_msix = 1;
+ v_budget++;
+ vectors_left--;
+ } else {
+ pf->num_fdsb_msix = 0;
+ }
+ }
+
+ /* can we reserve enough for iWARP? */
+ if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
+ iwarp_requested = pf->num_iwarp_msix;
+
+ if (!vectors_left)
+ pf->num_iwarp_msix = 0;
+ else if (vectors_left < pf->num_iwarp_msix)
+ pf->num_iwarp_msix = 1;
+ v_budget += pf->num_iwarp_msix;
+ vectors_left -= pf->num_iwarp_msix;
+ }
+
+ /* any vectors left over go for VMDq support */
+ if (pf->flags & I40E_FLAG_VMDQ_ENABLED) {
+ if (!vectors_left) {
+ pf->num_vmdq_msix = 0;
+ pf->num_vmdq_qps = 0;
+ } else {
+ int vmdq_vecs_wanted =
+ pf->num_vmdq_vsis * pf->num_vmdq_qps;
+ int vmdq_vecs =
+ min_t(int, vectors_left, vmdq_vecs_wanted);
+
+ /* if we're short on vectors for what's desired, we limit
+ * the queues per vmdq. If this is still more than are
+ * available, the user will need to change the number of
+ * queues/vectors used by the PF later with the ethtool
+ * channels command
+ */
+ if (vectors_left < vmdq_vecs_wanted) {
+ pf->num_vmdq_qps = 1;
+ vmdq_vecs_wanted = pf->num_vmdq_vsis;
+ vmdq_vecs = min_t(int,
+ vectors_left,
+ vmdq_vecs_wanted);
+ }
+ pf->num_vmdq_msix = pf->num_vmdq_qps;
+
+ v_budget += vmdq_vecs;
+ vectors_left -= vmdq_vecs;
+ }
+ }
+
+ /* On systems with a large number of SMP cores, we previously limited
+ * the number of vectors for num_lan_msix to be at most 50% of the
+ * available vectors, to allow for other features. Now, we add back
+ * the remaining vectors. However, we ensure that the total
+ * num_lan_msix will not exceed num_online_cpus(). To do this, we
+ * calculate the number of vectors we can add without going over the
+ * cap of CPUs. For systems with a small number of CPUs this will be
+ * zero.
+ */
+ extra_vectors = min_t(int, cpus - pf->num_lan_msix, vectors_left);
+ pf->num_lan_msix += extra_vectors;
+ vectors_left -= extra_vectors;
+
+ WARN(vectors_left < 0,
+ "Calculation of remaining vectors underflowed. This is an accounting bug when determining total MSI-X vectors.\n");
+
+ v_budget += pf->num_lan_msix;
+ pf->msix_entries = kcalloc(v_budget, sizeof(struct msix_entry),
+ GFP_KERNEL);
+ if (!pf->msix_entries)
+ return -ENOMEM;
+
+ for (i = 0; i < v_budget; i++)
+ pf->msix_entries[i].entry = i;
+ v_actual = i40e_reserve_msix_vectors(pf, v_budget);
+
+ if (v_actual < I40E_MIN_MSIX) {
+ pf->flags &= ~I40E_FLAG_MSIX_ENABLED;
+ kfree(pf->msix_entries);
+ pf->msix_entries = NULL;
+ pci_disable_msix(pf->pdev);
+ return -ENODEV;
+
+ } else if (v_actual == I40E_MIN_MSIX) {
+ /* Adjust for minimal MSIX use */
+ pf->num_vmdq_vsis = 0;
+ pf->num_vmdq_qps = 0;
+ pf->num_lan_qps = 1;
+ pf->num_lan_msix = 1;
+
+ } else if (v_actual != v_budget) {
+ /* If we have limited resources, we will start with no vectors
+ * for the special features and then allocate vectors to some
+ * of these features based on the policy and at the end disable
+ * the features that did not get any vectors.
+ */
+ int vec;
+
+ dev_info(&pf->pdev->dev,
+ "MSI-X vector limit reached with %d, wanted %d, attempting to redistribute vectors\n",
+ v_actual, v_budget);
+ /* reserve the misc vector */
+ vec = v_actual - 1;
+
+ /* Scale vector usage down */
+ pf->num_vmdq_msix = 1; /* force VMDqs to only one vector */
+ pf->num_vmdq_vsis = 1;
+ pf->num_vmdq_qps = 1;
+
+ /* partition out the remaining vectors */
+ switch (vec) {
+ case 2:
+ pf->num_lan_msix = 1;
+ break;
+ case 3:
+ if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
+ pf->num_lan_msix = 1;
+ pf->num_iwarp_msix = 1;
+ } else {
+ pf->num_lan_msix = 2;
+ }
+ break;
+ default:
+ if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
+ pf->num_iwarp_msix = min_t(int, (vec / 3),
+ iwarp_requested);
+ pf->num_vmdq_vsis = min_t(int, (vec / 3),
+ I40E_DEFAULT_NUM_VMDQ_VSI);
+ } else {
+ pf->num_vmdq_vsis = min_t(int, (vec / 2),
+ I40E_DEFAULT_NUM_VMDQ_VSI);
+ }
+ if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
+ pf->num_fdsb_msix = 1;
+ vec--;
+ }
+ pf->num_lan_msix = min_t(int,
+ (vec - (pf->num_iwarp_msix + pf->num_vmdq_vsis)),
+ pf->num_lan_msix);
+ pf->num_lan_qps = pf->num_lan_msix;
+ break;
+ }
+ }
+
+ if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
+ (pf->num_fdsb_msix == 0)) {
+ dev_info(&pf->pdev->dev, "Sideband Flowdir disabled, not enough MSI-X vectors\n");
+ pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
+ pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
+ }
+ if ((pf->flags & I40E_FLAG_VMDQ_ENABLED) &&
+ (pf->num_vmdq_msix == 0)) {
+ dev_info(&pf->pdev->dev, "VMDq disabled, not enough MSI-X vectors\n");
+ pf->flags &= ~I40E_FLAG_VMDQ_ENABLED;
+ }
+
+ if ((pf->flags & I40E_FLAG_IWARP_ENABLED) &&
+ (pf->num_iwarp_msix == 0)) {
+ dev_info(&pf->pdev->dev, "IWARP disabled, not enough MSI-X vectors\n");
+ pf->flags &= ~I40E_FLAG_IWARP_ENABLED;
+ }
+ i40e_debug(&pf->hw, I40E_DEBUG_INIT,
+ "MSI-X vector distribution: PF %d, VMDq %d, FDSB %d, iWARP %d\n",
+ pf->num_lan_msix,
+ pf->num_vmdq_msix * pf->num_vmdq_vsis,
+ pf->num_fdsb_msix,
+ pf->num_iwarp_msix);
+
+ return v_actual;
+}
+
+/**
+ * i40e_vsi_alloc_q_vector - Allocate memory for a single interrupt vector
+ * @vsi: the VSI being configured
+ * @v_idx: index of the vector in the vsi struct
+ *
+ * We allocate one q_vector. If allocation fails we return -ENOMEM.
+ **/
+static int i40e_vsi_alloc_q_vector(struct i40e_vsi *vsi, int v_idx)
+{
+ struct i40e_q_vector *q_vector;
+
+ /* allocate q_vector */
+ q_vector = kzalloc(sizeof(struct i40e_q_vector), GFP_KERNEL);
+ if (!q_vector)
+ return -ENOMEM;
+
+ q_vector->vsi = vsi;
+ q_vector->v_idx = v_idx;
+ cpumask_copy(&q_vector->affinity_mask, cpu_possible_mask);
+
+ if (vsi->netdev)
+ netif_napi_add(vsi->netdev, &q_vector->napi, i40e_napi_poll);
+
+ /* tie q_vector and vsi together */
+ vsi->q_vectors[v_idx] = q_vector;
+
+ return 0;
+}
+
+/**
+ * i40e_vsi_alloc_q_vectors - Allocate memory for interrupt vectors
+ * @vsi: the VSI being configured
+ *
+ * We allocate one q_vector per queue interrupt. If allocation fails we
+ * return -ENOMEM.
+ **/
+static int i40e_vsi_alloc_q_vectors(struct i40e_vsi *vsi)
+{
+ struct i40e_pf *pf = vsi->back;
+ int err, v_idx, num_q_vectors;
+
+ /* if not MSIX, give the one vector only to the LAN VSI */
+ if (pf->flags & I40E_FLAG_MSIX_ENABLED)
+ num_q_vectors = vsi->num_q_vectors;
+ else if (vsi == pf->vsi[pf->lan_vsi])
+ num_q_vectors = 1;
+ else
+ return -EINVAL;
+
+ for (v_idx = 0; v_idx < num_q_vectors; v_idx++) {
+ err = i40e_vsi_alloc_q_vector(vsi, v_idx);
+ if (err)
+ goto err_out;
+ }
+
+ return 0;
+
+err_out:
+ while (v_idx--)
+ i40e_free_q_vector(vsi, v_idx);
+
+ return err;
+}
+
+/**
+ * i40e_init_interrupt_scheme - Determine proper interrupt scheme
+ * @pf: board private structure to initialize
+ **/
+static int i40e_init_interrupt_scheme(struct i40e_pf *pf)
+{
+ int vectors = 0;
+ ssize_t size;
+
+ if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
+ vectors = i40e_init_msix(pf);
+ if (vectors < 0) {
+ pf->flags &= ~(I40E_FLAG_MSIX_ENABLED |
+ I40E_FLAG_IWARP_ENABLED |
+ I40E_FLAG_RSS_ENABLED |
+ I40E_FLAG_DCB_CAPABLE |
+ I40E_FLAG_DCB_ENABLED |
+ I40E_FLAG_SRIOV_ENABLED |
+ I40E_FLAG_FD_SB_ENABLED |
+ I40E_FLAG_FD_ATR_ENABLED |
+ I40E_FLAG_VMDQ_ENABLED);
+ pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
+
+ /* rework the queue expectations without MSIX */
+ i40e_determine_queue_usage(pf);
+ }
+ }
+
+ if (!(pf->flags & I40E_FLAG_MSIX_ENABLED) &&
+ (pf->flags & I40E_FLAG_MSI_ENABLED)) {
+ dev_info(&pf->pdev->dev, "MSI-X not available, trying MSI\n");
+ vectors = pci_enable_msi(pf->pdev);
+ if (vectors < 0) {
+ dev_info(&pf->pdev->dev, "MSI init failed - %d\n",
+ vectors);
+ pf->flags &= ~I40E_FLAG_MSI_ENABLED;
+ }
+ vectors = 1; /* one MSI or Legacy vector */
+ }
+
+ if (!(pf->flags & (I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED)))
+ dev_info(&pf->pdev->dev, "MSI-X and MSI not available, falling back to Legacy IRQ\n");
+
+ /* set up vector assignment tracking */
+ size = sizeof(struct i40e_lump_tracking) + (sizeof(u16) * vectors);
+ pf->irq_pile = kzalloc(size, GFP_KERNEL);
+ if (!pf->irq_pile)
+ return -ENOMEM;
+
+ pf->irq_pile->num_entries = vectors;
+
+ /* track first vector for misc interrupts, ignore return */
+ (void)i40e_get_lump(pf, pf->irq_pile, 1, I40E_PILE_VALID_BIT - 1);
+
+ return 0;
+}
+
+/**
+ * i40e_restore_interrupt_scheme - Restore the interrupt scheme
+ * @pf: private board data structure
+ *
+ * Restore the interrupt scheme that was cleared when we suspended the
+ * device. This should be called during resume to re-allocate the q_vectors
+ * and reacquire IRQs.
+ */
+static int i40e_restore_interrupt_scheme(struct i40e_pf *pf)
+{
+ int err, i;
+
+ /* We cleared the MSI and MSI-X flags when disabling the old interrupt
+ * scheme. We need to re-enabled them here in order to attempt to
+ * re-acquire the MSI or MSI-X vectors
+ */
+ pf->flags |= (I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED);
+
+ err = i40e_init_interrupt_scheme(pf);
+ if (err)
+ return err;
+
+ /* Now that we've re-acquired IRQs, we need to remap the vectors and
+ * rings together again.
+ */
+ for (i = 0; i < pf->num_alloc_vsi; i++) {
+ if (pf->vsi[i]) {
+ err = i40e_vsi_alloc_q_vectors(pf->vsi[i]);
+ if (err)
+ goto err_unwind;
+ i40e_vsi_map_rings_to_vectors(pf->vsi[i]);
+ }
+ }
+
+ err = i40e_setup_misc_vector(pf);
+ if (err)
+ goto err_unwind;
+
+ if (pf->flags & I40E_FLAG_IWARP_ENABLED)
+ i40e_client_update_msix_info(pf);
+
+ return 0;
+
+err_unwind:
+ while (i--) {
+ if (pf->vsi[i])
+ i40e_vsi_free_q_vectors(pf->vsi[i]);
+ }
+
+ return err;
+}
+
+/**
+ * i40e_setup_misc_vector_for_recovery_mode - Setup the misc vector to handle
+ * non queue events in recovery mode
+ * @pf: board private structure
+ *
+ * This sets up the handler for MSIX 0 or MSI/legacy, which is used to manage
+ * the non-queue interrupts, e.g. AdminQ and errors in recovery mode.
+ * This is handled differently than in recovery mode since no Tx/Rx resources
+ * are being allocated.
+ **/
+static int i40e_setup_misc_vector_for_recovery_mode(struct i40e_pf *pf)
+{
+ int err;
+
+ if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
+ err = i40e_setup_misc_vector(pf);
+
+ if (err) {
+ dev_info(&pf->pdev->dev,
+ "MSI-X misc vector request failed, error %d\n",
+ err);
+ return err;
+ }
+ } else {
+ u32 flags = pf->flags & I40E_FLAG_MSI_ENABLED ? 0 : IRQF_SHARED;
+
+ err = request_irq(pf->pdev->irq, i40e_intr, flags,
+ pf->int_name, pf);
+
+ if (err) {
+ dev_info(&pf->pdev->dev,
+ "MSI/legacy misc vector request failed, error %d\n",
+ err);
+ return err;
+ }
+ i40e_enable_misc_int_causes(pf);
+ i40e_irq_dynamic_enable_icr0(pf);
+ }
+
+ return 0;
+}
+
+/**
+ * i40e_setup_misc_vector - Setup the misc vector to handle non queue events
+ * @pf: board private structure
+ *
+ * This sets up the handler for MSIX 0, which is used to manage the
+ * non-queue interrupts, e.g. AdminQ and errors. This is not used
+ * when in MSI or Legacy interrupt mode.
+ **/
+static int i40e_setup_misc_vector(struct i40e_pf *pf)
+{
+ struct i40e_hw *hw = &pf->hw;
+ int err = 0;
+
+ /* Only request the IRQ once, the first time through. */
+ if (!test_and_set_bit(__I40E_MISC_IRQ_REQUESTED, pf->state)) {
+ err = request_irq(pf->msix_entries[0].vector,
+ i40e_intr, 0, pf->int_name, pf);
+ if (err) {
+ clear_bit(__I40E_MISC_IRQ_REQUESTED, pf->state);
+ dev_info(&pf->pdev->dev,
+ "request_irq for %s failed: %d\n",
+ pf->int_name, err);
+ return -EFAULT;
+ }
+ }
+
+ i40e_enable_misc_int_causes(pf);
+
+ /* associate no queues to the misc vector */
+ wr32(hw, I40E_PFINT_LNKLST0, I40E_QUEUE_END_OF_LIST);
+ wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), I40E_ITR_8K >> 1);
+
+ i40e_flush(hw);
+
+ i40e_irq_dynamic_enable_icr0(pf);
+
+ return err;
+}
+
+/**
+ * i40e_get_rss_aq - Get RSS keys and lut by using AQ commands
+ * @vsi: Pointer to vsi structure
+ * @seed: Buffter to store the hash keys
+ * @lut: Buffer to store the lookup table entries
+ * @lut_size: Size of buffer to store the lookup table entries
+ *
+ * Return 0 on success, negative on failure
+ */
+static int i40e_get_rss_aq(struct i40e_vsi *vsi, const u8 *seed,
+ u8 *lut, u16 lut_size)
+{
+ struct i40e_pf *pf = vsi->back;
+ struct i40e_hw *hw = &pf->hw;
+ int ret = 0;
+
+ if (seed) {
+ ret = i40e_aq_get_rss_key(hw, vsi->id,
+ (struct i40e_aqc_get_set_rss_key_data *)seed);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "Cannot get RSS key, err %pe aq_err %s\n",
+ ERR_PTR(ret),
+ i40e_aq_str(&pf->hw,
+ pf->hw.aq.asq_last_status));
+ return ret;
+ }
+ }
+
+ if (lut) {
+ bool pf_lut = vsi->type == I40E_VSI_MAIN;
+
+ ret = i40e_aq_get_rss_lut(hw, vsi->id, pf_lut, lut, lut_size);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "Cannot get RSS lut, err %pe aq_err %s\n",
+ ERR_PTR(ret),
+ i40e_aq_str(&pf->hw,
+ pf->hw.aq.asq_last_status));
+ return ret;
+ }
+ }
+
+ return ret;
+}
+
+/**
+ * i40e_config_rss_reg - Configure RSS keys and lut by writing registers
+ * @vsi: Pointer to vsi structure
+ * @seed: RSS hash seed
+ * @lut: Lookup table
+ * @lut_size: Lookup table size
+ *
+ * Returns 0 on success, negative on failure
+ **/
+static int i40e_config_rss_reg(struct i40e_vsi *vsi, const u8 *seed,
+ const u8 *lut, u16 lut_size)
+{
+ struct i40e_pf *pf = vsi->back;
+ struct i40e_hw *hw = &pf->hw;
+ u16 vf_id = vsi->vf_id;
+ u8 i;
+
+ /* Fill out hash function seed */
+ if (seed) {
+ u32 *seed_dw = (u32 *)seed;
+
+ if (vsi->type == I40E_VSI_MAIN) {
+ for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
+ wr32(hw, I40E_PFQF_HKEY(i), seed_dw[i]);
+ } else if (vsi->type == I40E_VSI_SRIOV) {
+ for (i = 0; i <= I40E_VFQF_HKEY1_MAX_INDEX; i++)
+ wr32(hw, I40E_VFQF_HKEY1(i, vf_id), seed_dw[i]);
+ } else {
+ dev_err(&pf->pdev->dev, "Cannot set RSS seed - invalid VSI type\n");
+ }
+ }
+
+ if (lut) {
+ u32 *lut_dw = (u32 *)lut;
+
+ if (vsi->type == I40E_VSI_MAIN) {
+ if (lut_size != I40E_HLUT_ARRAY_SIZE)
+ return -EINVAL;
+ for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++)
+ wr32(hw, I40E_PFQF_HLUT(i), lut_dw[i]);
+ } else if (vsi->type == I40E_VSI_SRIOV) {
+ if (lut_size != I40E_VF_HLUT_ARRAY_SIZE)
+ return -EINVAL;
+ for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++)
+ wr32(hw, I40E_VFQF_HLUT1(i, vf_id), lut_dw[i]);
+ } else {
+ dev_err(&pf->pdev->dev, "Cannot set RSS LUT - invalid VSI type\n");
+ }
+ }
+ i40e_flush(hw);
+
+ return 0;
+}
+
+/**
+ * i40e_get_rss_reg - Get the RSS keys and lut by reading registers
+ * @vsi: Pointer to VSI structure
+ * @seed: Buffer to store the keys
+ * @lut: Buffer to store the lookup table entries
+ * @lut_size: Size of buffer to store the lookup table entries
+ *
+ * Returns 0 on success, negative on failure
+ */
+static int i40e_get_rss_reg(struct i40e_vsi *vsi, u8 *seed,
+ u8 *lut, u16 lut_size)
+{
+ struct i40e_pf *pf = vsi->back;
+ struct i40e_hw *hw = &pf->hw;
+ u16 i;
+
+ if (seed) {
+ u32 *seed_dw = (u32 *)seed;
+
+ for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
+ seed_dw[i] = i40e_read_rx_ctl(hw, I40E_PFQF_HKEY(i));
+ }
+ if (lut) {
+ u32 *lut_dw = (u32 *)lut;
+
+ if (lut_size != I40E_HLUT_ARRAY_SIZE)
+ return -EINVAL;
+ for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++)
+ lut_dw[i] = rd32(hw, I40E_PFQF_HLUT(i));
+ }
+
+ return 0;
+}
+
+/**
+ * i40e_config_rss - Configure RSS keys and lut
+ * @vsi: Pointer to VSI structure
+ * @seed: RSS hash seed
+ * @lut: Lookup table
+ * @lut_size: Lookup table size
+ *
+ * Returns 0 on success, negative on failure
+ */
+int i40e_config_rss(struct i40e_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size)
+{
+ struct i40e_pf *pf = vsi->back;
+
+ if (pf->hw_features & I40E_HW_RSS_AQ_CAPABLE)
+ return i40e_config_rss_aq(vsi, seed, lut, lut_size);
+ else
+ return i40e_config_rss_reg(vsi, seed, lut, lut_size);
+}
+
+/**
+ * i40e_get_rss - Get RSS keys and lut
+ * @vsi: Pointer to VSI structure
+ * @seed: Buffer to store the keys
+ * @lut: Buffer to store the lookup table entries
+ * @lut_size: Size of buffer to store the lookup table entries
+ *
+ * Returns 0 on success, negative on failure
+ */
+int i40e_get_rss(struct i40e_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size)
+{
+ struct i40e_pf *pf = vsi->back;
+
+ if (pf->hw_features & I40E_HW_RSS_AQ_CAPABLE)
+ return i40e_get_rss_aq(vsi, seed, lut, lut_size);
+ else
+ return i40e_get_rss_reg(vsi, seed, lut, lut_size);
+}
+
+/**
+ * i40e_fill_rss_lut - Fill the RSS lookup table with default values
+ * @pf: Pointer to board private structure
+ * @lut: Lookup table
+ * @rss_table_size: Lookup table size
+ * @rss_size: Range of queue number for hashing
+ */
+void i40e_fill_rss_lut(struct i40e_pf *pf, u8 *lut,
+ u16 rss_table_size, u16 rss_size)
+{
+ u16 i;
+
+ for (i = 0; i < rss_table_size; i++)
+ lut[i] = i % rss_size;
+}
+
+/**
+ * i40e_pf_config_rss - Prepare for RSS if used
+ * @pf: board private structure
+ **/
+static int i40e_pf_config_rss(struct i40e_pf *pf)
+{
+ struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
+ u8 seed[I40E_HKEY_ARRAY_SIZE];
+ u8 *lut;
+ struct i40e_hw *hw = &pf->hw;
+ u32 reg_val;
+ u64 hena;
+ int ret;
+
+ /* By default we enable TCP/UDP with IPv4/IPv6 ptypes */
+ hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) |
+ ((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32);
+ hena |= i40e_pf_get_default_rss_hena(pf);
+
+ i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (u32)hena);
+ i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
+
+ /* Determine the RSS table size based on the hardware capabilities */
+ reg_val = i40e_read_rx_ctl(hw, I40E_PFQF_CTL_0);
+ reg_val = (pf->rss_table_size == 512) ?
+ (reg_val | I40E_PFQF_CTL_0_HASHLUTSIZE_512) :
+ (reg_val & ~I40E_PFQF_CTL_0_HASHLUTSIZE_512);
+ i40e_write_rx_ctl(hw, I40E_PFQF_CTL_0, reg_val);
+
+ /* Determine the RSS size of the VSI */
+ if (!vsi->rss_size) {
+ u16 qcount;
+ /* If the firmware does something weird during VSI init, we
+ * could end up with zero TCs. Check for that to avoid
+ * divide-by-zero. It probably won't pass traffic, but it also
+ * won't panic.
+ */
+ qcount = vsi->num_queue_pairs /
+ (vsi->tc_config.numtc ? vsi->tc_config.numtc : 1);
+ vsi->rss_size = min_t(int, pf->alloc_rss_size, qcount);
+ }
+ if (!vsi->rss_size)
+ return -EINVAL;
+
+ lut = kzalloc(vsi->rss_table_size, GFP_KERNEL);
+ if (!lut)
+ return -ENOMEM;
+
+ /* Use user configured lut if there is one, otherwise use default */
+ if (vsi->rss_lut_user)
+ memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size);
+ else
+ i40e_fill_rss_lut(pf, lut, vsi->rss_table_size, vsi->rss_size);
+
+ /* Use user configured hash key if there is one, otherwise
+ * use default.
+ */
+ if (vsi->rss_hkey_user)
+ memcpy(seed, vsi->rss_hkey_user, I40E_HKEY_ARRAY_SIZE);
+ else
+ netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE);
+ ret = i40e_config_rss(vsi, seed, lut, vsi->rss_table_size);
+ kfree(lut);
+
+ return ret;
+}
+
+/**
+ * i40e_reconfig_rss_queues - change number of queues for rss and rebuild
+ * @pf: board private structure
+ * @queue_count: the requested queue count for rss.
+ *
+ * returns 0 if rss is not enabled, if enabled returns the final rss queue
+ * count which may be different from the requested queue count.
+ * Note: expects to be called while under rtnl_lock()
+ **/
+int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count)
+{
+ struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
+ int new_rss_size;
+
+ if (!(pf->flags & I40E_FLAG_RSS_ENABLED))
+ return 0;
+
+ queue_count = min_t(int, queue_count, num_online_cpus());
+ new_rss_size = min_t(int, queue_count, pf->rss_size_max);
+
+ if (queue_count != vsi->num_queue_pairs) {
+ u16 qcount;
+
+ vsi->req_queue_pairs = queue_count;
+ i40e_prep_for_reset(pf);
+ if (test_bit(__I40E_IN_REMOVE, pf->state))
+ return pf->alloc_rss_size;
+
+ pf->alloc_rss_size = new_rss_size;
+
+ i40e_reset_and_rebuild(pf, true, true);
+
+ /* Discard the user configured hash keys and lut, if less
+ * queues are enabled.
+ */
+ if (queue_count < vsi->rss_size) {
+ i40e_clear_rss_config_user(vsi);
+ dev_dbg(&pf->pdev->dev,
+ "discard user configured hash keys and lut\n");
+ }
+
+ /* Reset vsi->rss_size, as number of enabled queues changed */
+ qcount = vsi->num_queue_pairs / vsi->tc_config.numtc;
+ vsi->rss_size = min_t(int, pf->alloc_rss_size, qcount);
+
+ i40e_pf_config_rss(pf);
+ }
+ dev_info(&pf->pdev->dev, "User requested queue count/HW max RSS count: %d/%d\n",
+ vsi->req_queue_pairs, pf->rss_size_max);
+ return pf->alloc_rss_size;
+}
+
+/**
+ * i40e_get_partition_bw_setting - Retrieve BW settings for this PF partition
+ * @pf: board private structure
+ **/
+int i40e_get_partition_bw_setting(struct i40e_pf *pf)
+{
+ bool min_valid, max_valid;
+ u32 max_bw, min_bw;
+ int status;
+
+ status = i40e_read_bw_from_alt_ram(&pf->hw, &max_bw, &min_bw,
+ &min_valid, &max_valid);
+
+ if (!status) {
+ if (min_valid)
+ pf->min_bw = min_bw;
+ if (max_valid)
+ pf->max_bw = max_bw;
+ }
+
+ return status;
+}
+
+/**
+ * i40e_set_partition_bw_setting - Set BW settings for this PF partition
+ * @pf: board private structure
+ **/
+int i40e_set_partition_bw_setting(struct i40e_pf *pf)
+{
+ struct i40e_aqc_configure_partition_bw_data bw_data;
+ int status;
+
+ memset(&bw_data, 0, sizeof(bw_data));
+
+ /* Set the valid bit for this PF */
+ bw_data.pf_valid_bits = cpu_to_le16(BIT(pf->hw.pf_id));
+ bw_data.max_bw[pf->hw.pf_id] = pf->max_bw & I40E_ALT_BW_VALUE_MASK;
+ bw_data.min_bw[pf->hw.pf_id] = pf->min_bw & I40E_ALT_BW_VALUE_MASK;
+
+ /* Set the new bandwidths */
+ status = i40e_aq_configure_partition_bw(&pf->hw, &bw_data, NULL);
+
+ return status;
+}
+
+/**
+ * i40e_commit_partition_bw_setting - Commit BW settings for this PF partition
+ * @pf: board private structure
+ **/
+int i40e_commit_partition_bw_setting(struct i40e_pf *pf)
+{
+ /* Commit temporary BW setting to permanent NVM image */
+ enum i40e_admin_queue_err last_aq_status;
+ u16 nvm_word;
+ int ret;
+
+ if (pf->hw.partition_id != 1) {
+ dev_info(&pf->pdev->dev,
+ "Commit BW only works on partition 1! This is partition %d",
+ pf->hw.partition_id);
+ ret = I40E_NOT_SUPPORTED;
+ goto bw_commit_out;
+ }
+
+ /* Acquire NVM for read access */
+ ret = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_READ);
+ last_aq_status = pf->hw.aq.asq_last_status;
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "Cannot acquire NVM for read access, err %pe aq_err %s\n",
+ ERR_PTR(ret),
+ i40e_aq_str(&pf->hw, last_aq_status));
+ goto bw_commit_out;
+ }
+
+ /* Read word 0x10 of NVM - SW compatibility word 1 */
+ ret = i40e_aq_read_nvm(&pf->hw,
+ I40E_SR_NVM_CONTROL_WORD,
+ 0x10, sizeof(nvm_word), &nvm_word,
+ false, NULL);
+ /* Save off last admin queue command status before releasing
+ * the NVM
+ */
+ last_aq_status = pf->hw.aq.asq_last_status;
+ i40e_release_nvm(&pf->hw);
+ if (ret) {
+ dev_info(&pf->pdev->dev, "NVM read error, err %pe aq_err %s\n",
+ ERR_PTR(ret),
+ i40e_aq_str(&pf->hw, last_aq_status));
+ goto bw_commit_out;
+ }
+
+ /* Wait a bit for NVM release to complete */
+ msleep(50);
+
+ /* Acquire NVM for write access */
+ ret = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_WRITE);
+ last_aq_status = pf->hw.aq.asq_last_status;
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "Cannot acquire NVM for write access, err %pe aq_err %s\n",
+ ERR_PTR(ret),
+ i40e_aq_str(&pf->hw, last_aq_status));
+ goto bw_commit_out;
+ }
+ /* Write it back out unchanged to initiate update NVM,
+ * which will force a write of the shadow (alt) RAM to
+ * the NVM - thus storing the bandwidth values permanently.
+ */
+ ret = i40e_aq_update_nvm(&pf->hw,
+ I40E_SR_NVM_CONTROL_WORD,
+ 0x10, sizeof(nvm_word),
+ &nvm_word, true, 0, NULL);
+ /* Save off last admin queue command status before releasing
+ * the NVM
+ */
+ last_aq_status = pf->hw.aq.asq_last_status;
+ i40e_release_nvm(&pf->hw);
+ if (ret)
+ dev_info(&pf->pdev->dev,
+ "BW settings NOT SAVED, err %pe aq_err %s\n",
+ ERR_PTR(ret),
+ i40e_aq_str(&pf->hw, last_aq_status));
+bw_commit_out:
+
+ return ret;
+}
+
+/**
+ * i40e_is_total_port_shutdown_enabled - read NVM and return value
+ * if total port shutdown feature is enabled for this PF
+ * @pf: board private structure
+ **/
+static bool i40e_is_total_port_shutdown_enabled(struct i40e_pf *pf)
+{
+#define I40E_TOTAL_PORT_SHUTDOWN_ENABLED BIT(4)
+#define I40E_FEATURES_ENABLE_PTR 0x2A
+#define I40E_CURRENT_SETTING_PTR 0x2B
+#define I40E_LINK_BEHAVIOR_WORD_OFFSET 0x2D
+#define I40E_LINK_BEHAVIOR_WORD_LENGTH 0x1
+#define I40E_LINK_BEHAVIOR_OS_FORCED_ENABLED BIT(0)
+#define I40E_LINK_BEHAVIOR_PORT_BIT_LENGTH 4
+ int read_status = I40E_SUCCESS;
+ u16 sr_emp_sr_settings_ptr = 0;
+ u16 features_enable = 0;
+ u16 link_behavior = 0;
+ bool ret = false;
+
+ read_status = i40e_read_nvm_word(&pf->hw,
+ I40E_SR_EMP_SR_SETTINGS_PTR,
+ &sr_emp_sr_settings_ptr);
+ if (read_status)
+ goto err_nvm;
+ read_status = i40e_read_nvm_word(&pf->hw,
+ sr_emp_sr_settings_ptr +
+ I40E_FEATURES_ENABLE_PTR,
+ &features_enable);
+ if (read_status)
+ goto err_nvm;
+ if (I40E_TOTAL_PORT_SHUTDOWN_ENABLED & features_enable) {
+ read_status = i40e_read_nvm_module_data(&pf->hw,
+ I40E_SR_EMP_SR_SETTINGS_PTR,
+ I40E_CURRENT_SETTING_PTR,
+ I40E_LINK_BEHAVIOR_WORD_OFFSET,
+ I40E_LINK_BEHAVIOR_WORD_LENGTH,
+ &link_behavior);
+ if (read_status)
+ goto err_nvm;
+ link_behavior >>= (pf->hw.port * I40E_LINK_BEHAVIOR_PORT_BIT_LENGTH);
+ ret = I40E_LINK_BEHAVIOR_OS_FORCED_ENABLED & link_behavior;
+ }
+ return ret;
+
+err_nvm:
+ dev_warn(&pf->pdev->dev,
+ "total-port-shutdown feature is off due to read nvm error: %pe\n",
+ ERR_PTR(read_status));
+ return ret;
+}
+
+/**
+ * i40e_sw_init - Initialize general software structures (struct i40e_pf)
+ * @pf: board private structure to initialize
+ *
+ * i40e_sw_init initializes the Adapter private data structure.
+ * Fields are initialized based on PCI device information and
+ * OS network device settings (MTU size).
+ **/
+static int i40e_sw_init(struct i40e_pf *pf)
+{
+ int err = 0;
+ int size;
+ u16 pow;
+
+ /* Set default capability flags */
+ pf->flags = I40E_FLAG_RX_CSUM_ENABLED |
+ I40E_FLAG_MSI_ENABLED |
+ I40E_FLAG_MSIX_ENABLED;
+
+ /* Set default ITR */
+ pf->rx_itr_default = I40E_ITR_RX_DEF;
+ pf->tx_itr_default = I40E_ITR_TX_DEF;
+
+ /* Depending on PF configurations, it is possible that the RSS
+ * maximum might end up larger than the available queues
+ */
+ pf->rss_size_max = BIT(pf->hw.func_caps.rss_table_entry_width);
+ pf->alloc_rss_size = 1;
+ pf->rss_table_size = pf->hw.func_caps.rss_table_size;
+ pf->rss_size_max = min_t(int, pf->rss_size_max,
+ pf->hw.func_caps.num_tx_qp);
+
+ /* find the next higher power-of-2 of num cpus */
+ pow = roundup_pow_of_two(num_online_cpus());
+ pf->rss_size_max = min_t(int, pf->rss_size_max, pow);
+
+ if (pf->hw.func_caps.rss) {
+ pf->flags |= I40E_FLAG_RSS_ENABLED;
+ pf->alloc_rss_size = min_t(int, pf->rss_size_max,
+ num_online_cpus());
+ }
+
+ /* MFP mode enabled */
+ if (pf->hw.func_caps.npar_enable || pf->hw.func_caps.flex10_enable) {
+ pf->flags |= I40E_FLAG_MFP_ENABLED;
+ dev_info(&pf->pdev->dev, "MFP mode Enabled\n");
+ if (i40e_get_partition_bw_setting(pf)) {
+ dev_warn(&pf->pdev->dev,
+ "Could not get partition bw settings\n");
+ } else {
+ dev_info(&pf->pdev->dev,
+ "Partition BW Min = %8.8x, Max = %8.8x\n",
+ pf->min_bw, pf->max_bw);
+
+ /* nudge the Tx scheduler */
+ i40e_set_partition_bw_setting(pf);
+ }
+ }
+
+ if ((pf->hw.func_caps.fd_filters_guaranteed > 0) ||
+ (pf->hw.func_caps.fd_filters_best_effort > 0)) {
+ pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
+ pf->atr_sample_rate = I40E_DEFAULT_ATR_SAMPLE_RATE;
+ if (pf->flags & I40E_FLAG_MFP_ENABLED &&
+ pf->hw.num_partitions > 1)
+ dev_info(&pf->pdev->dev,
+ "Flow Director Sideband mode Disabled in MFP mode\n");
+ else
+ pf->flags |= I40E_FLAG_FD_SB_ENABLED;
+ pf->fdir_pf_filter_count =
+ pf->hw.func_caps.fd_filters_guaranteed;
+ pf->hw.fdir_shared_filter_count =
+ pf->hw.func_caps.fd_filters_best_effort;
+ }
+
+ if (pf->hw.mac.type == I40E_MAC_X722) {
+ pf->hw_features |= (I40E_HW_RSS_AQ_CAPABLE |
+ I40E_HW_128_QP_RSS_CAPABLE |
+ I40E_HW_ATR_EVICT_CAPABLE |
+ I40E_HW_WB_ON_ITR_CAPABLE |
+ I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE |
+ I40E_HW_NO_PCI_LINK_CHECK |
+ I40E_HW_USE_SET_LLDP_MIB |
+ I40E_HW_GENEVE_OFFLOAD_CAPABLE |
+ I40E_HW_PTP_L4_CAPABLE |
+ I40E_HW_WOL_MC_MAGIC_PKT_WAKE |
+ I40E_HW_OUTER_UDP_CSUM_CAPABLE);
+
+#define I40E_FDEVICT_PCTYPE_DEFAULT 0xc03
+ if (rd32(&pf->hw, I40E_GLQF_FDEVICTENA(1)) !=
+ I40E_FDEVICT_PCTYPE_DEFAULT) {
+ dev_warn(&pf->pdev->dev,
+ "FD EVICT PCTYPES are not right, disable FD HW EVICT\n");
+ pf->hw_features &= ~I40E_HW_ATR_EVICT_CAPABLE;
+ }
+ } else if ((pf->hw.aq.api_maj_ver > 1) ||
+ ((pf->hw.aq.api_maj_ver == 1) &&
+ (pf->hw.aq.api_min_ver > 4))) {
+ /* Supported in FW API version higher than 1.4 */
+ pf->hw_features |= I40E_HW_GENEVE_OFFLOAD_CAPABLE;
+ }
+
+ /* Enable HW ATR eviction if possible */
+ if (pf->hw_features & I40E_HW_ATR_EVICT_CAPABLE)
+ pf->flags |= I40E_FLAG_HW_ATR_EVICT_ENABLED;
+
+ if ((pf->hw.mac.type == I40E_MAC_XL710) &&
+ (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) ||
+ (pf->hw.aq.fw_maj_ver < 4))) {
+ pf->hw_features |= I40E_HW_RESTART_AUTONEG;
+ /* No DCB support for FW < v4.33 */
+ pf->hw_features |= I40E_HW_NO_DCB_SUPPORT;
+ }
+
+ /* Disable FW LLDP if FW < v4.3 */
+ if ((pf->hw.mac.type == I40E_MAC_XL710) &&
+ (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 3)) ||
+ (pf->hw.aq.fw_maj_ver < 4)))
+ pf->hw_features |= I40E_HW_STOP_FW_LLDP;
+
+ /* Use the FW Set LLDP MIB API if FW > v4.40 */
+ if ((pf->hw.mac.type == I40E_MAC_XL710) &&
+ (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver >= 40)) ||
+ (pf->hw.aq.fw_maj_ver >= 5)))
+ pf->hw_features |= I40E_HW_USE_SET_LLDP_MIB;
+
+ /* Enable PTP L4 if FW > v6.0 */
+ if (pf->hw.mac.type == I40E_MAC_XL710 &&
+ pf->hw.aq.fw_maj_ver >= 6)
+ pf->hw_features |= I40E_HW_PTP_L4_CAPABLE;
+
+ if (pf->hw.func_caps.vmdq && num_online_cpus() != 1) {
+ pf->num_vmdq_vsis = I40E_DEFAULT_NUM_VMDQ_VSI;
+ pf->flags |= I40E_FLAG_VMDQ_ENABLED;
+ pf->num_vmdq_qps = i40e_default_queues_per_vmdq(pf);
+ }
+
+ if (pf->hw.func_caps.iwarp && num_online_cpus() != 1) {
+ pf->flags |= I40E_FLAG_IWARP_ENABLED;
+ /* IWARP needs one extra vector for CQP just like MISC.*/
+ pf->num_iwarp_msix = (int)num_online_cpus() + 1;
+ }
+ /* Stopping FW LLDP engine is supported on XL710 and X722
+ * starting from FW versions determined in i40e_init_adminq.
+ * Stopping the FW LLDP engine is not supported on XL710
+ * if NPAR is functioning so unset this hw flag in this case.
+ */
+ if (pf->hw.mac.type == I40E_MAC_XL710 &&
+ pf->hw.func_caps.npar_enable &&
+ (pf->hw.flags & I40E_HW_FLAG_FW_LLDP_STOPPABLE))
+ pf->hw.flags &= ~I40E_HW_FLAG_FW_LLDP_STOPPABLE;
+
+#ifdef CONFIG_PCI_IOV
+ if (pf->hw.func_caps.num_vfs && pf->hw.partition_id == 1) {
+ pf->num_vf_qps = I40E_DEFAULT_QUEUES_PER_VF;
+ pf->flags |= I40E_FLAG_SRIOV_ENABLED;
+ pf->num_req_vfs = min_t(int,
+ pf->hw.func_caps.num_vfs,
+ I40E_MAX_VF_COUNT);
+ }
+#endif /* CONFIG_PCI_IOV */
+ pf->eeprom_version = 0xDEAD;
+ pf->lan_veb = I40E_NO_VEB;
+ pf->lan_vsi = I40E_NO_VSI;
+
+ /* By default FW has this off for performance reasons */
+ pf->flags &= ~I40E_FLAG_VEB_STATS_ENABLED;
+
+ /* set up queue assignment tracking */
+ size = sizeof(struct i40e_lump_tracking)
+ + (sizeof(u16) * pf->hw.func_caps.num_tx_qp);
+ pf->qp_pile = kzalloc(size, GFP_KERNEL);
+ if (!pf->qp_pile) {
+ err = -ENOMEM;
+ goto sw_init_done;
+ }
+ pf->qp_pile->num_entries = pf->hw.func_caps.num_tx_qp;
+
+ pf->tx_timeout_recovery_level = 1;
+
+ if (pf->hw.mac.type != I40E_MAC_X722 &&
+ i40e_is_total_port_shutdown_enabled(pf)) {
+ /* Link down on close must be on when total port shutdown
+ * is enabled for a given port
+ */
+ pf->flags |= (I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENABLED |
+ I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED);
+ dev_info(&pf->pdev->dev,
+ "total-port-shutdown was enabled, link-down-on-close is forced on\n");
+ }
+ mutex_init(&pf->switch_mutex);
+
+sw_init_done:
+ return err;
+}
+
+/**
+ * i40e_set_ntuple - set the ntuple feature flag and take action
+ * @pf: board private structure to initialize
+ * @features: the feature set that the stack is suggesting
+ *
+ * returns a bool to indicate if reset needs to happen
+ **/
+bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features)
+{
+ bool need_reset = false;
+
+ /* Check if Flow Director n-tuple support was enabled or disabled. If
+ * the state changed, we need to reset.
+ */
+ if (features & NETIF_F_NTUPLE) {
+ /* Enable filters and mark for reset */
+ if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
+ need_reset = true;
+ /* enable FD_SB only if there is MSI-X vector and no cloud
+ * filters exist
+ */
+ if (pf->num_fdsb_msix > 0 && !pf->num_cloud_filters) {
+ pf->flags |= I40E_FLAG_FD_SB_ENABLED;
+ pf->flags &= ~I40E_FLAG_FD_SB_INACTIVE;
+ }
+ } else {
+ /* turn off filters, mark for reset and clear SW filter list */
+ if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
+ need_reset = true;
+ i40e_fdir_filter_exit(pf);
+ }
+ pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
+ clear_bit(__I40E_FD_SB_AUTO_DISABLED, pf->state);
+ pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
+
+ /* reset fd counters */
+ pf->fd_add_err = 0;
+ pf->fd_atr_cnt = 0;
+ /* if ATR was auto disabled it can be re-enabled. */
+ if (test_and_clear_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state))
+ if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
+ (I40E_DEBUG_FD & pf->hw.debug_mask))
+ dev_info(&pf->pdev->dev, "ATR re-enabled.\n");
+ }
+ return need_reset;
+}
+
+/**
+ * i40e_clear_rss_lut - clear the rx hash lookup table
+ * @vsi: the VSI being configured
+ **/
+static void i40e_clear_rss_lut(struct i40e_vsi *vsi)
+{
+ struct i40e_pf *pf = vsi->back;
+ struct i40e_hw *hw = &pf->hw;
+ u16 vf_id = vsi->vf_id;
+ u8 i;
+
+ if (vsi->type == I40E_VSI_MAIN) {
+ for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++)
+ wr32(hw, I40E_PFQF_HLUT(i), 0);
+ } else if (vsi->type == I40E_VSI_SRIOV) {
+ for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++)
+ i40e_write_rx_ctl(hw, I40E_VFQF_HLUT1(i, vf_id), 0);
+ } else {
+ dev_err(&pf->pdev->dev, "Cannot set RSS LUT - invalid VSI type\n");
+ }
+}
+
+/**
+ * i40e_set_features - set the netdev feature flags
+ * @netdev: ptr to the netdev being adjusted
+ * @features: the feature set that the stack is suggesting
+ * Note: expects to be called while under rtnl_lock()
+ **/
+static int i40e_set_features(struct net_device *netdev,
+ netdev_features_t features)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_vsi *vsi = np->vsi;
+ struct i40e_pf *pf = vsi->back;
+ bool need_reset;
+
+ if (features & NETIF_F_RXHASH && !(netdev->features & NETIF_F_RXHASH))
+ i40e_pf_config_rss(pf);
+ else if (!(features & NETIF_F_RXHASH) &&
+ netdev->features & NETIF_F_RXHASH)
+ i40e_clear_rss_lut(vsi);
+
+ if (features & NETIF_F_HW_VLAN_CTAG_RX)
+ i40e_vlan_stripping_enable(vsi);
+ else
+ i40e_vlan_stripping_disable(vsi);
+
+ if (!(features & NETIF_F_HW_TC) &&
+ (netdev->features & NETIF_F_HW_TC) && pf->num_cloud_filters) {
+ dev_err(&pf->pdev->dev,
+ "Offloaded tc filters active, can't turn hw_tc_offload off");
+ return -EINVAL;
+ }
+
+ if (!(features & NETIF_F_HW_L2FW_DOFFLOAD) && vsi->macvlan_cnt)
+ i40e_del_all_macvlans(vsi);
+
+ need_reset = i40e_set_ntuple(pf, features);
+
+ if (need_reset)
+ i40e_do_reset(pf, I40E_PF_RESET_FLAG, true);
+
+ return 0;
+}
+
+static int i40e_udp_tunnel_set_port(struct net_device *netdev,
+ unsigned int table, unsigned int idx,
+ struct udp_tunnel_info *ti)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_hw *hw = &np->vsi->back->hw;
+ u8 type, filter_index;
+ int ret;
+
+ type = ti->type == UDP_TUNNEL_TYPE_VXLAN ? I40E_AQC_TUNNEL_TYPE_VXLAN :
+ I40E_AQC_TUNNEL_TYPE_NGE;
+
+ ret = i40e_aq_add_udp_tunnel(hw, ntohs(ti->port), type, &filter_index,
+ NULL);
+ if (ret) {
+ netdev_info(netdev, "add UDP port failed, err %pe aq_err %s\n",
+ ERR_PTR(ret),
+ i40e_aq_str(hw, hw->aq.asq_last_status));
+ return -EIO;
+ }
+
+ udp_tunnel_nic_set_port_priv(netdev, table, idx, filter_index);
+ return 0;
+}
+
+static int i40e_udp_tunnel_unset_port(struct net_device *netdev,
+ unsigned int table, unsigned int idx,
+ struct udp_tunnel_info *ti)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_hw *hw = &np->vsi->back->hw;
+ int ret;
+
+ ret = i40e_aq_del_udp_tunnel(hw, ti->hw_priv, NULL);
+ if (ret) {
+ netdev_info(netdev, "delete UDP port failed, err %pe aq_err %s\n",
+ ERR_PTR(ret),
+ i40e_aq_str(hw, hw->aq.asq_last_status));
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int i40e_get_phys_port_id(struct net_device *netdev,
+ struct netdev_phys_item_id *ppid)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_pf *pf = np->vsi->back;
+ struct i40e_hw *hw = &pf->hw;
+
+ if (!(pf->hw_features & I40E_HW_PORT_ID_VALID))
+ return -EOPNOTSUPP;
+
+ ppid->id_len = min_t(int, sizeof(hw->mac.port_addr), sizeof(ppid->id));
+ memcpy(ppid->id, hw->mac.port_addr, ppid->id_len);
+
+ return 0;
+}
+
+/**
+ * i40e_ndo_fdb_add - add an entry to the hardware database
+ * @ndm: the input from the stack
+ * @tb: pointer to array of nladdr (unused)
+ * @dev: the net device pointer
+ * @addr: the MAC address entry being added
+ * @vid: VLAN ID
+ * @flags: instructions from stack about fdb operation
+ * @extack: netlink extended ack, unused currently
+ */
+static int i40e_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
+ struct net_device *dev,
+ const unsigned char *addr, u16 vid,
+ u16 flags,
+ struct netlink_ext_ack *extack)
+{
+ struct i40e_netdev_priv *np = netdev_priv(dev);
+ struct i40e_pf *pf = np->vsi->back;
+ int err = 0;
+
+ if (!(pf->flags & I40E_FLAG_SRIOV_ENABLED))
+ return -EOPNOTSUPP;
+
+ if (vid) {
+ pr_info("%s: vlans aren't supported yet for dev_uc|mc_add()\n", dev->name);
+ return -EINVAL;
+ }
+
+ /* Hardware does not support aging addresses so if a
+ * ndm_state is given only allow permanent addresses
+ */
+ if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) {
+ netdev_info(dev, "FDB only supports static addresses\n");
+ return -EINVAL;
+ }
+
+ if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
+ err = dev_uc_add_excl(dev, addr);
+ else if (is_multicast_ether_addr(addr))
+ err = dev_mc_add_excl(dev, addr);
+ else
+ err = -EINVAL;
+
+ /* Only return duplicate errors if NLM_F_EXCL is set */
+ if (err == -EEXIST && !(flags & NLM_F_EXCL))
+ err = 0;
+
+ return err;
+}
+
+/**
+ * i40e_ndo_bridge_setlink - Set the hardware bridge mode
+ * @dev: the netdev being configured
+ * @nlh: RTNL message
+ * @flags: bridge flags
+ * @extack: netlink extended ack
+ *
+ * Inserts a new hardware bridge if not already created and
+ * enables the bridging mode requested (VEB or VEPA). If the
+ * hardware bridge has already been inserted and the request
+ * is to change the mode then that requires a PF reset to
+ * allow rebuild of the components with required hardware
+ * bridge mode enabled.
+ *
+ * Note: expects to be called while under rtnl_lock()
+ **/
+static int i40e_ndo_bridge_setlink(struct net_device *dev,
+ struct nlmsghdr *nlh,
+ u16 flags,
+ struct netlink_ext_ack *extack)
+{
+ struct i40e_netdev_priv *np = netdev_priv(dev);
+ struct i40e_vsi *vsi = np->vsi;
+ struct i40e_pf *pf = vsi->back;
+ struct i40e_veb *veb = NULL;
+ struct nlattr *attr, *br_spec;
+ int i, rem;
+
+ /* Only for PF VSI for now */
+ if (vsi->seid != pf->vsi[pf->lan_vsi]->seid)
+ return -EOPNOTSUPP;
+
+ /* Find the HW bridge for PF VSI */
+ for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
+ if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid)
+ veb = pf->veb[i];
+ }
+
+ br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
+ if (!br_spec)
+ return -EINVAL;
+
+ nla_for_each_nested(attr, br_spec, rem) {
+ __u16 mode;
+
+ if (nla_type(attr) != IFLA_BRIDGE_MODE)
+ continue;
+
+ mode = nla_get_u16(attr);
+ if ((mode != BRIDGE_MODE_VEPA) &&
+ (mode != BRIDGE_MODE_VEB))
+ return -EINVAL;
+
+ /* Insert a new HW bridge */
+ if (!veb) {
+ veb = i40e_veb_setup(pf, 0, vsi->uplink_seid, vsi->seid,
+ vsi->tc_config.enabled_tc);
+ if (veb) {
+ veb->bridge_mode = mode;
+ i40e_config_bridge_mode(veb);
+ } else {
+ /* No Bridge HW offload available */
+ return -ENOENT;
+ }
+ break;
+ } else if (mode != veb->bridge_mode) {
+ /* Existing HW bridge but different mode needs reset */
+ veb->bridge_mode = mode;
+ /* TODO: If no VFs or VMDq VSIs, disallow VEB mode */
+ if (mode == BRIDGE_MODE_VEB)
+ pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
+ else
+ pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
+ i40e_do_reset(pf, I40E_PF_RESET_FLAG, true);
+ break;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * i40e_ndo_bridge_getlink - Get the hardware bridge mode
+ * @skb: skb buff
+ * @pid: process id
+ * @seq: RTNL message seq #
+ * @dev: the netdev being configured
+ * @filter_mask: unused
+ * @nlflags: netlink flags passed in
+ *
+ * Return the mode in which the hardware bridge is operating in
+ * i.e VEB or VEPA.
+ **/
+static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
+ struct net_device *dev,
+ u32 __always_unused filter_mask,
+ int nlflags)
+{
+ struct i40e_netdev_priv *np = netdev_priv(dev);
+ struct i40e_vsi *vsi = np->vsi;
+ struct i40e_pf *pf = vsi->back;
+ struct i40e_veb *veb = NULL;
+ int i;
+
+ /* Only for PF VSI for now */
+ if (vsi->seid != pf->vsi[pf->lan_vsi]->seid)
+ return -EOPNOTSUPP;
+
+ /* Find the HW bridge for the PF VSI */
+ for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
+ if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid)
+ veb = pf->veb[i];
+ }
+
+ if (!veb)
+ return 0;
+
+ return ndo_dflt_bridge_getlink(skb, pid, seq, dev, veb->bridge_mode,
+ 0, 0, nlflags, filter_mask, NULL);
+}
+
+/**
+ * i40e_features_check - Validate encapsulated packet conforms to limits
+ * @skb: skb buff
+ * @dev: This physical port's netdev
+ * @features: Offload features that the stack believes apply
+ **/
+static netdev_features_t i40e_features_check(struct sk_buff *skb,
+ struct net_device *dev,
+ netdev_features_t features)
+{
+ size_t len;
+
+ /* No point in doing any of this if neither checksum nor GSO are
+ * being requested for this frame. We can rule out both by just
+ * checking for CHECKSUM_PARTIAL
+ */
+ if (skb->ip_summed != CHECKSUM_PARTIAL)
+ return features;
+
+ /* We cannot support GSO if the MSS is going to be less than
+ * 64 bytes. If it is then we need to drop support for GSO.
+ */
+ if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_size < 64))
+ features &= ~NETIF_F_GSO_MASK;
+
+ /* MACLEN can support at most 63 words */
+ len = skb_network_header(skb) - skb->data;
+ if (len & ~(63 * 2))
+ goto out_err;
+
+ /* IPLEN and EIPLEN can support at most 127 dwords */
+ len = skb_transport_header(skb) - skb_network_header(skb);
+ if (len & ~(127 * 4))
+ goto out_err;
+
+ if (skb->encapsulation) {
+ /* L4TUNLEN can support 127 words */
+ len = skb_inner_network_header(skb) - skb_transport_header(skb);
+ if (len & ~(127 * 2))
+ goto out_err;
+
+ /* IPLEN can support at most 127 dwords */
+ len = skb_inner_transport_header(skb) -
+ skb_inner_network_header(skb);
+ if (len & ~(127 * 4))
+ goto out_err;
+ }
+
+ /* No need to validate L4LEN as TCP is the only protocol with a
+ * flexible value and we support all possible values supported
+ * by TCP, which is at most 15 dwords
+ */
+
+ return features;
+out_err:
+ return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
+}
+
+/**
+ * i40e_xdp_setup - add/remove an XDP program
+ * @vsi: VSI to changed
+ * @prog: XDP program
+ * @extack: netlink extended ack
+ **/
+static int i40e_xdp_setup(struct i40e_vsi *vsi, struct bpf_prog *prog,
+ struct netlink_ext_ack *extack)
+{
+ int frame_size = vsi->netdev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
+ struct i40e_pf *pf = vsi->back;
+ struct bpf_prog *old_prog;
+ bool need_reset;
+ int i;
+
+ /* Don't allow frames that span over multiple buffers */
+ if (frame_size > i40e_calculate_vsi_rx_buf_len(vsi)) {
+ NL_SET_ERR_MSG_MOD(extack, "MTU too large to enable XDP");
+ return -EINVAL;
+ }
+
+ /* When turning XDP on->off/off->on we reset and rebuild the rings. */
+ need_reset = (i40e_enabled_xdp_vsi(vsi) != !!prog);
+
+ if (need_reset)
+ i40e_prep_for_reset(pf);
+
+ /* VSI shall be deleted in a moment, just return EINVAL */
+ if (test_bit(__I40E_IN_REMOVE, pf->state))
+ return -EINVAL;
+
+ old_prog = xchg(&vsi->xdp_prog, prog);
+
+ if (need_reset) {
+ if (!prog)
+ /* Wait until ndo_xsk_wakeup completes. */
+ synchronize_rcu();
+ i40e_reset_and_rebuild(pf, true, true);
+ }
+
+ if (!i40e_enabled_xdp_vsi(vsi) && prog) {
+ if (i40e_realloc_rx_bi_zc(vsi, true))
+ return -ENOMEM;
+ } else if (i40e_enabled_xdp_vsi(vsi) && !prog) {
+ if (i40e_realloc_rx_bi_zc(vsi, false))
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < vsi->num_queue_pairs; i++)
+ WRITE_ONCE(vsi->rx_rings[i]->xdp_prog, vsi->xdp_prog);
+
+ if (old_prog)
+ bpf_prog_put(old_prog);
+
+ /* Kick start the NAPI context if there is an AF_XDP socket open
+ * on that queue id. This so that receiving will start.
+ */
+ if (need_reset && prog)
+ for (i = 0; i < vsi->num_queue_pairs; i++)
+ if (vsi->xdp_rings[i]->xsk_pool)
+ (void)i40e_xsk_wakeup(vsi->netdev, i,
+ XDP_WAKEUP_RX);
+
+ return 0;
+}
+
+/**
+ * i40e_enter_busy_conf - Enters busy config state
+ * @vsi: vsi
+ *
+ * Returns 0 on success, <0 for failure.
+ **/
+static int i40e_enter_busy_conf(struct i40e_vsi *vsi)
+{
+ struct i40e_pf *pf = vsi->back;
+ int timeout = 50;
+
+ while (test_and_set_bit(__I40E_CONFIG_BUSY, pf->state)) {
+ timeout--;
+ if (!timeout)
+ return -EBUSY;
+ usleep_range(1000, 2000);
+ }
+
+ return 0;
+}
+
+/**
+ * i40e_exit_busy_conf - Exits busy config state
+ * @vsi: vsi
+ **/
+static void i40e_exit_busy_conf(struct i40e_vsi *vsi)
+{
+ struct i40e_pf *pf = vsi->back;
+
+ clear_bit(__I40E_CONFIG_BUSY, pf->state);
+}
+
+/**
+ * i40e_queue_pair_reset_stats - Resets all statistics for a queue pair
+ * @vsi: vsi
+ * @queue_pair: queue pair
+ **/
+static void i40e_queue_pair_reset_stats(struct i40e_vsi *vsi, int queue_pair)
+{
+ memset(&vsi->rx_rings[queue_pair]->rx_stats, 0,
+ sizeof(vsi->rx_rings[queue_pair]->rx_stats));
+ memset(&vsi->tx_rings[queue_pair]->stats, 0,
+ sizeof(vsi->tx_rings[queue_pair]->stats));
+ if (i40e_enabled_xdp_vsi(vsi)) {
+ memset(&vsi->xdp_rings[queue_pair]->stats, 0,
+ sizeof(vsi->xdp_rings[queue_pair]->stats));
+ }
+}
+
+/**
+ * i40e_queue_pair_clean_rings - Cleans all the rings of a queue pair
+ * @vsi: vsi
+ * @queue_pair: queue pair
+ **/
+static void i40e_queue_pair_clean_rings(struct i40e_vsi *vsi, int queue_pair)
+{
+ i40e_clean_tx_ring(vsi->tx_rings[queue_pair]);
+ if (i40e_enabled_xdp_vsi(vsi)) {
+ /* Make sure that in-progress ndo_xdp_xmit calls are
+ * completed.
+ */
+ synchronize_rcu();
+ i40e_clean_tx_ring(vsi->xdp_rings[queue_pair]);
+ }
+ i40e_clean_rx_ring(vsi->rx_rings[queue_pair]);
+}
+
+/**
+ * i40e_queue_pair_toggle_napi - Enables/disables NAPI for a queue pair
+ * @vsi: vsi
+ * @queue_pair: queue pair
+ * @enable: true for enable, false for disable
+ **/
+static void i40e_queue_pair_toggle_napi(struct i40e_vsi *vsi, int queue_pair,
+ bool enable)
+{
+ struct i40e_ring *rxr = vsi->rx_rings[queue_pair];
+ struct i40e_q_vector *q_vector = rxr->q_vector;
+
+ if (!vsi->netdev)
+ return;
+
+ /* All rings in a qp belong to the same qvector. */
+ if (q_vector->rx.ring || q_vector->tx.ring) {
+ if (enable)
+ napi_enable(&q_vector->napi);
+ else
+ napi_disable(&q_vector->napi);
+ }
+}
+
+/**
+ * i40e_queue_pair_toggle_rings - Enables/disables all rings for a queue pair
+ * @vsi: vsi
+ * @queue_pair: queue pair
+ * @enable: true for enable, false for disable
+ *
+ * Returns 0 on success, <0 on failure.
+ **/
+static int i40e_queue_pair_toggle_rings(struct i40e_vsi *vsi, int queue_pair,
+ bool enable)
+{
+ struct i40e_pf *pf = vsi->back;
+ int pf_q, ret = 0;
+
+ pf_q = vsi->base_queue + queue_pair;
+ ret = i40e_control_wait_tx_q(vsi->seid, pf, pf_q,
+ false /*is xdp*/, enable);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "VSI seid %d Tx ring %d %sable timeout\n",
+ vsi->seid, pf_q, (enable ? "en" : "dis"));
+ return ret;
+ }
+
+ i40e_control_rx_q(pf, pf_q, enable);
+ ret = i40e_pf_rxq_wait(pf, pf_q, enable);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "VSI seid %d Rx ring %d %sable timeout\n",
+ vsi->seid, pf_q, (enable ? "en" : "dis"));
+ return ret;
+ }
+
+ /* Due to HW errata, on Rx disable only, the register can
+ * indicate done before it really is. Needs 50ms to be sure
+ */
+ if (!enable)
+ mdelay(50);
+
+ if (!i40e_enabled_xdp_vsi(vsi))
+ return ret;
+
+ ret = i40e_control_wait_tx_q(vsi->seid, pf,
+ pf_q + vsi->alloc_queue_pairs,
+ true /*is xdp*/, enable);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "VSI seid %d XDP Tx ring %d %sable timeout\n",
+ vsi->seid, pf_q, (enable ? "en" : "dis"));
+ }
+
+ return ret;
+}
+
+/**
+ * i40e_queue_pair_enable_irq - Enables interrupts for a queue pair
+ * @vsi: vsi
+ * @queue_pair: queue_pair
+ **/
+static void i40e_queue_pair_enable_irq(struct i40e_vsi *vsi, int queue_pair)
+{
+ struct i40e_ring *rxr = vsi->rx_rings[queue_pair];
+ struct i40e_pf *pf = vsi->back;
+ struct i40e_hw *hw = &pf->hw;
+
+ /* All rings in a qp belong to the same qvector. */
+ if (pf->flags & I40E_FLAG_MSIX_ENABLED)
+ i40e_irq_dynamic_enable(vsi, rxr->q_vector->v_idx);
+ else
+ i40e_irq_dynamic_enable_icr0(pf);
+
+ i40e_flush(hw);
+}
+
+/**
+ * i40e_queue_pair_disable_irq - Disables interrupts for a queue pair
+ * @vsi: vsi
+ * @queue_pair: queue_pair
+ **/
+static void i40e_queue_pair_disable_irq(struct i40e_vsi *vsi, int queue_pair)
+{
+ struct i40e_ring *rxr = vsi->rx_rings[queue_pair];
+ struct i40e_pf *pf = vsi->back;
+ struct i40e_hw *hw = &pf->hw;
+
+ /* For simplicity, instead of removing the qp interrupt causes
+ * from the interrupt linked list, we simply disable the interrupt, and
+ * leave the list intact.
+ *
+ * All rings in a qp belong to the same qvector.
+ */
+ if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
+ u32 intpf = vsi->base_vector + rxr->q_vector->v_idx;
+
+ wr32(hw, I40E_PFINT_DYN_CTLN(intpf - 1), 0);
+ i40e_flush(hw);
+ synchronize_irq(pf->msix_entries[intpf].vector);
+ } else {
+ /* Legacy and MSI mode - this stops all interrupt handling */
+ wr32(hw, I40E_PFINT_ICR0_ENA, 0);
+ wr32(hw, I40E_PFINT_DYN_CTL0, 0);
+ i40e_flush(hw);
+ synchronize_irq(pf->pdev->irq);
+ }
+}
+
+/**
+ * i40e_queue_pair_disable - Disables a queue pair
+ * @vsi: vsi
+ * @queue_pair: queue pair
+ *
+ * Returns 0 on success, <0 on failure.
+ **/
+int i40e_queue_pair_disable(struct i40e_vsi *vsi, int queue_pair)
+{
+ int err;
+
+ err = i40e_enter_busy_conf(vsi);
+ if (err)
+ return err;
+
+ i40e_queue_pair_disable_irq(vsi, queue_pair);
+ err = i40e_queue_pair_toggle_rings(vsi, queue_pair, false /* off */);
+ i40e_clean_rx_ring(vsi->rx_rings[queue_pair]);
+ i40e_queue_pair_toggle_napi(vsi, queue_pair, false /* off */);
+ i40e_queue_pair_clean_rings(vsi, queue_pair);
+ i40e_queue_pair_reset_stats(vsi, queue_pair);
+
+ return err;
+}
+
+/**
+ * i40e_queue_pair_enable - Enables a queue pair
+ * @vsi: vsi
+ * @queue_pair: queue pair
+ *
+ * Returns 0 on success, <0 on failure.
+ **/
+int i40e_queue_pair_enable(struct i40e_vsi *vsi, int queue_pair)
+{
+ int err;
+
+ err = i40e_configure_tx_ring(vsi->tx_rings[queue_pair]);
+ if (err)
+ return err;
+
+ if (i40e_enabled_xdp_vsi(vsi)) {
+ err = i40e_configure_tx_ring(vsi->xdp_rings[queue_pair]);
+ if (err)
+ return err;
+ }
+
+ err = i40e_configure_rx_ring(vsi->rx_rings[queue_pair]);
+ if (err)
+ return err;
+
+ err = i40e_queue_pair_toggle_rings(vsi, queue_pair, true /* on */);
+ i40e_queue_pair_toggle_napi(vsi, queue_pair, true /* on */);
+ i40e_queue_pair_enable_irq(vsi, queue_pair);
+
+ i40e_exit_busy_conf(vsi);
+
+ return err;
+}
+
+/**
+ * i40e_xdp - implements ndo_bpf for i40e
+ * @dev: netdevice
+ * @xdp: XDP command
+ **/
+static int i40e_xdp(struct net_device *dev,
+ struct netdev_bpf *xdp)
+{
+ struct i40e_netdev_priv *np = netdev_priv(dev);
+ struct i40e_vsi *vsi = np->vsi;
+
+ if (vsi->type != I40E_VSI_MAIN)
+ return -EINVAL;
+
+ switch (xdp->command) {
+ case XDP_SETUP_PROG:
+ return i40e_xdp_setup(vsi, xdp->prog, xdp->extack);
+ case XDP_SETUP_XSK_POOL:
+ return i40e_xsk_pool_setup(vsi, xdp->xsk.pool,
+ xdp->xsk.queue_id);
+ default:
+ return -EINVAL;
+ }
+}
+
+static const struct net_device_ops i40e_netdev_ops = {
+ .ndo_open = i40e_open,
+ .ndo_stop = i40e_close,
+ .ndo_start_xmit = i40e_lan_xmit_frame,
+ .ndo_get_stats64 = i40e_get_netdev_stats_struct,
+ .ndo_set_rx_mode = i40e_set_rx_mode,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = i40e_set_mac,
+ .ndo_change_mtu = i40e_change_mtu,
+ .ndo_eth_ioctl = i40e_ioctl,
+ .ndo_tx_timeout = i40e_tx_timeout,
+ .ndo_vlan_rx_add_vid = i40e_vlan_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = i40e_vlan_rx_kill_vid,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = i40e_netpoll,
+#endif
+ .ndo_setup_tc = __i40e_setup_tc,
+ .ndo_select_queue = i40e_lan_select_queue,
+ .ndo_set_features = i40e_set_features,
+ .ndo_set_vf_mac = i40e_ndo_set_vf_mac,
+ .ndo_set_vf_vlan = i40e_ndo_set_vf_port_vlan,
+ .ndo_get_vf_stats = i40e_get_vf_stats,
+ .ndo_set_vf_rate = i40e_ndo_set_vf_bw,
+ .ndo_get_vf_config = i40e_ndo_get_vf_config,
+ .ndo_set_vf_link_state = i40e_ndo_set_vf_link_state,
+ .ndo_set_vf_spoofchk = i40e_ndo_set_vf_spoofchk,
+ .ndo_set_vf_trust = i40e_ndo_set_vf_trust,
+ .ndo_get_phys_port_id = i40e_get_phys_port_id,
+ .ndo_fdb_add = i40e_ndo_fdb_add,
+ .ndo_features_check = i40e_features_check,
+ .ndo_bridge_getlink = i40e_ndo_bridge_getlink,
+ .ndo_bridge_setlink = i40e_ndo_bridge_setlink,
+ .ndo_bpf = i40e_xdp,
+ .ndo_xdp_xmit = i40e_xdp_xmit,
+ .ndo_xsk_wakeup = i40e_xsk_wakeup,
+ .ndo_dfwd_add_station = i40e_fwd_add,
+ .ndo_dfwd_del_station = i40e_fwd_del,
+};
+
+/**
+ * i40e_config_netdev - Setup the netdev flags
+ * @vsi: the VSI being configured
+ *
+ * Returns 0 on success, negative value on failure
+ **/
+static int i40e_config_netdev(struct i40e_vsi *vsi)
+{
+ struct i40e_pf *pf = vsi->back;
+ struct i40e_hw *hw = &pf->hw;
+ struct i40e_netdev_priv *np;
+ struct net_device *netdev;
+ u8 broadcast[ETH_ALEN];
+ u8 mac_addr[ETH_ALEN];
+ int etherdev_size;
+ netdev_features_t hw_enc_features;
+ netdev_features_t hw_features;
+
+ etherdev_size = sizeof(struct i40e_netdev_priv);
+ netdev = alloc_etherdev_mq(etherdev_size, vsi->alloc_queue_pairs);
+ if (!netdev)
+ return -ENOMEM;
+
+ vsi->netdev = netdev;
+ np = netdev_priv(netdev);
+ np->vsi = vsi;
+
+ hw_enc_features = NETIF_F_SG |
+ NETIF_F_HW_CSUM |
+ NETIF_F_HIGHDMA |
+ NETIF_F_SOFT_FEATURES |
+ NETIF_F_TSO |
+ NETIF_F_TSO_ECN |
+ NETIF_F_TSO6 |
+ NETIF_F_GSO_GRE |
+ NETIF_F_GSO_GRE_CSUM |
+ NETIF_F_GSO_PARTIAL |
+ NETIF_F_GSO_IPXIP4 |
+ NETIF_F_GSO_IPXIP6 |
+ NETIF_F_GSO_UDP_TUNNEL |
+ NETIF_F_GSO_UDP_TUNNEL_CSUM |
+ NETIF_F_GSO_UDP_L4 |
+ NETIF_F_SCTP_CRC |
+ NETIF_F_RXHASH |
+ NETIF_F_RXCSUM |
+ 0;
+
+ if (!(pf->hw_features & I40E_HW_OUTER_UDP_CSUM_CAPABLE))
+ netdev->gso_partial_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM;
+
+ netdev->udp_tunnel_nic_info = &pf->udp_tunnel_nic;
+
+ netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM;
+
+ netdev->hw_enc_features |= hw_enc_features;
+
+ /* record features VLANs can make use of */
+ netdev->vlan_features |= hw_enc_features | NETIF_F_TSO_MANGLEID;
+
+#define I40E_GSO_PARTIAL_FEATURES (NETIF_F_GSO_GRE | \
+ NETIF_F_GSO_GRE_CSUM | \
+ NETIF_F_GSO_IPXIP4 | \
+ NETIF_F_GSO_IPXIP6 | \
+ NETIF_F_GSO_UDP_TUNNEL | \
+ NETIF_F_GSO_UDP_TUNNEL_CSUM)
+
+ netdev->gso_partial_features = I40E_GSO_PARTIAL_FEATURES;
+ netdev->features |= NETIF_F_GSO_PARTIAL |
+ I40E_GSO_PARTIAL_FEATURES;
+
+ netdev->mpls_features |= NETIF_F_SG;
+ netdev->mpls_features |= NETIF_F_HW_CSUM;
+ netdev->mpls_features |= NETIF_F_TSO;
+ netdev->mpls_features |= NETIF_F_TSO6;
+ netdev->mpls_features |= I40E_GSO_PARTIAL_FEATURES;
+
+ /* enable macvlan offloads */
+ netdev->hw_features |= NETIF_F_HW_L2FW_DOFFLOAD;
+
+ hw_features = hw_enc_features |
+ NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_HW_VLAN_CTAG_RX;
+
+ if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
+ hw_features |= NETIF_F_NTUPLE | NETIF_F_HW_TC;
+
+ netdev->hw_features |= hw_features;
+
+ netdev->features |= hw_features | NETIF_F_HW_VLAN_CTAG_FILTER;
+ netdev->hw_enc_features |= NETIF_F_TSO_MANGLEID;
+
+ netdev->features &= ~NETIF_F_HW_TC;
+
+ if (vsi->type == I40E_VSI_MAIN) {
+ SET_NETDEV_DEV(netdev, &pf->pdev->dev);
+ ether_addr_copy(mac_addr, hw->mac.perm_addr);
+ /* The following steps are necessary for two reasons. First,
+ * some older NVM configurations load a default MAC-VLAN
+ * filter that will accept any tagged packet, and we want to
+ * replace this with a normal filter. Additionally, it is
+ * possible our MAC address was provided by the platform using
+ * Open Firmware or similar.
+ *
+ * Thus, we need to remove the default filter and install one
+ * specific to the MAC address.
+ */
+ i40e_rm_default_mac_filter(vsi, mac_addr);
+ spin_lock_bh(&vsi->mac_filter_hash_lock);
+ i40e_add_mac_filter(vsi, mac_addr);
+ spin_unlock_bh(&vsi->mac_filter_hash_lock);
+ } else {
+ /* Relate the VSI_VMDQ name to the VSI_MAIN name. Note that we
+ * are still limited by IFNAMSIZ, but we're adding 'v%d\0' to
+ * the end, which is 4 bytes long, so force truncation of the
+ * original name by IFNAMSIZ - 4
+ */
+ snprintf(netdev->name, IFNAMSIZ, "%.*sv%%d",
+ IFNAMSIZ - 4,
+ pf->vsi[pf->lan_vsi]->netdev->name);
+ eth_random_addr(mac_addr);
+
+ spin_lock_bh(&vsi->mac_filter_hash_lock);
+ i40e_add_mac_filter(vsi, mac_addr);
+ spin_unlock_bh(&vsi->mac_filter_hash_lock);
+ }
+
+ /* Add the broadcast filter so that we initially will receive
+ * broadcast packets. Note that when a new VLAN is first added the
+ * driver will convert all filters marked I40E_VLAN_ANY into VLAN
+ * specific filters as part of transitioning into "vlan" operation.
+ * When more VLANs are added, the driver will copy each existing MAC
+ * filter and add it for the new VLAN.
+ *
+ * Broadcast filters are handled specially by
+ * i40e_sync_filters_subtask, as the driver must to set the broadcast
+ * promiscuous bit instead of adding this directly as a MAC/VLAN
+ * filter. The subtask will update the correct broadcast promiscuous
+ * bits as VLANs become active or inactive.
+ */
+ eth_broadcast_addr(broadcast);
+ spin_lock_bh(&vsi->mac_filter_hash_lock);
+ i40e_add_mac_filter(vsi, broadcast);
+ spin_unlock_bh(&vsi->mac_filter_hash_lock);
+
+ eth_hw_addr_set(netdev, mac_addr);
+ ether_addr_copy(netdev->perm_addr, mac_addr);
+
+ /* i40iw_net_event() reads 16 bytes from neigh->primary_key */
+ netdev->neigh_priv_len = sizeof(u32) * 4;
+
+ netdev->priv_flags |= IFF_UNICAST_FLT;
+ netdev->priv_flags |= IFF_SUPP_NOFCS;
+ /* Setup netdev TC information */
+ i40e_vsi_config_netdev_tc(vsi, vsi->tc_config.enabled_tc);
+
+ netdev->netdev_ops = &i40e_netdev_ops;
+ netdev->watchdog_timeo = 5 * HZ;
+ i40e_set_ethtool_ops(netdev);
+
+ /* MTU range: 68 - 9706 */
+ netdev->min_mtu = ETH_MIN_MTU;
+ netdev->max_mtu = I40E_MAX_RXBUFFER - I40E_PACKET_HDR_PAD;
+
+ return 0;
+}
+
+/**
+ * i40e_vsi_delete - Delete a VSI from the switch
+ * @vsi: the VSI being removed
+ *
+ * Returns 0 on success, negative value on failure
+ **/
+static void i40e_vsi_delete(struct i40e_vsi *vsi)
+{
+ /* remove default VSI is not allowed */
+ if (vsi == vsi->back->vsi[vsi->back->lan_vsi])
+ return;
+
+ i40e_aq_delete_element(&vsi->back->hw, vsi->seid, NULL);
+}
+
+/**
+ * i40e_is_vsi_uplink_mode_veb - Check if the VSI's uplink bridge mode is VEB
+ * @vsi: the VSI being queried
+ *
+ * Returns 1 if HW bridge mode is VEB and return 0 in case of VEPA mode
+ **/
+int i40e_is_vsi_uplink_mode_veb(struct i40e_vsi *vsi)
+{
+ struct i40e_veb *veb;
+ struct i40e_pf *pf = vsi->back;
+
+ /* Uplink is not a bridge so default to VEB */
+ if (vsi->veb_idx >= I40E_MAX_VEB)
+ return 1;
+
+ veb = pf->veb[vsi->veb_idx];
+ if (!veb) {
+ dev_info(&pf->pdev->dev,
+ "There is no veb associated with the bridge\n");
+ return -ENOENT;
+ }
+
+ /* Uplink is a bridge in VEPA mode */
+ if (veb->bridge_mode & BRIDGE_MODE_VEPA) {
+ return 0;
+ } else {
+ /* Uplink is a bridge in VEB mode */
+ return 1;
+ }
+
+ /* VEPA is now default bridge, so return 0 */
+ return 0;
+}
+
+/**
+ * i40e_add_vsi - Add a VSI to the switch
+ * @vsi: the VSI being configured
+ *
+ * This initializes a VSI context depending on the VSI type to be added and
+ * passes it down to the add_vsi aq command.
+ **/
+static int i40e_add_vsi(struct i40e_vsi *vsi)
+{
+ int ret = -ENODEV;
+ struct i40e_pf *pf = vsi->back;
+ struct i40e_hw *hw = &pf->hw;
+ struct i40e_vsi_context ctxt;
+ struct i40e_mac_filter *f;
+ struct hlist_node *h;
+ int bkt;
+
+ u8 enabled_tc = 0x1; /* TC0 enabled */
+ int f_count = 0;
+
+ memset(&ctxt, 0, sizeof(ctxt));
+ switch (vsi->type) {
+ case I40E_VSI_MAIN:
+ /* The PF's main VSI is already setup as part of the
+ * device initialization, so we'll not bother with
+ * the add_vsi call, but we will retrieve the current
+ * VSI context.
+ */
+ ctxt.seid = pf->main_vsi_seid;
+ ctxt.pf_num = pf->hw.pf_id;
+ ctxt.vf_num = 0;
+ ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
+ ctxt.flags = I40E_AQ_VSI_TYPE_PF;
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "couldn't get PF vsi config, err %pe aq_err %s\n",
+ ERR_PTR(ret),
+ i40e_aq_str(&pf->hw,
+ pf->hw.aq.asq_last_status));
+ return -ENOENT;
+ }
+ vsi->info = ctxt.info;
+ vsi->info.valid_sections = 0;
+
+ vsi->seid = ctxt.seid;
+ vsi->id = ctxt.vsi_number;
+
+ enabled_tc = i40e_pf_get_tc_map(pf);
+
+ /* Source pruning is enabled by default, so the flag is
+ * negative logic - if it's set, we need to fiddle with
+ * the VSI to disable source pruning.
+ */
+ if (pf->flags & I40E_FLAG_SOURCE_PRUNING_DISABLED) {
+ memset(&ctxt, 0, sizeof(ctxt));
+ ctxt.seid = pf->main_vsi_seid;
+ ctxt.pf_num = pf->hw.pf_id;
+ ctxt.vf_num = 0;
+ ctxt.info.valid_sections |=
+ cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
+ ctxt.info.switch_id =
+ cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_LOCAL_LB);
+ ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "update vsi failed, err %d aq_err %s\n",
+ ret,
+ i40e_aq_str(&pf->hw,
+ pf->hw.aq.asq_last_status));
+ ret = -ENOENT;
+ goto err;
+ }
+ }
+
+ /* MFP mode setup queue map and update VSI */
+ if ((pf->flags & I40E_FLAG_MFP_ENABLED) &&
+ !(pf->hw.func_caps.iscsi)) { /* NIC type PF */
+ memset(&ctxt, 0, sizeof(ctxt));
+ ctxt.seid = pf->main_vsi_seid;
+ ctxt.pf_num = pf->hw.pf_id;
+ ctxt.vf_num = 0;
+ i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false);
+ ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "update vsi failed, err %pe aq_err %s\n",
+ ERR_PTR(ret),
+ i40e_aq_str(&pf->hw,
+ pf->hw.aq.asq_last_status));
+ ret = -ENOENT;
+ goto err;
+ }
+ /* update the local VSI info queue map */
+ i40e_vsi_update_queue_map(vsi, &ctxt);
+ vsi->info.valid_sections = 0;
+ } else {
+ /* Default/Main VSI is only enabled for TC0
+ * reconfigure it to enable all TCs that are
+ * available on the port in SFP mode.
+ * For MFP case the iSCSI PF would use this
+ * flow to enable LAN+iSCSI TC.
+ */
+ ret = i40e_vsi_config_tc(vsi, enabled_tc);
+ if (ret) {
+ /* Single TC condition is not fatal,
+ * message and continue
+ */
+ dev_info(&pf->pdev->dev,
+ "failed to configure TCs for main VSI tc_map 0x%08x, err %pe aq_err %s\n",
+ enabled_tc,
+ ERR_PTR(ret),
+ i40e_aq_str(&pf->hw,
+ pf->hw.aq.asq_last_status));
+ }
+ }
+ break;
+
+ case I40E_VSI_FDIR:
+ ctxt.pf_num = hw->pf_id;
+ ctxt.vf_num = 0;
+ ctxt.uplink_seid = vsi->uplink_seid;
+ ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
+ ctxt.flags = I40E_AQ_VSI_TYPE_PF;
+ if ((pf->flags & I40E_FLAG_VEB_MODE_ENABLED) &&
+ (i40e_is_vsi_uplink_mode_veb(vsi))) {
+ ctxt.info.valid_sections |=
+ cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
+ ctxt.info.switch_id =
+ cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
+ }
+ i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
+ break;
+
+ case I40E_VSI_VMDQ2:
+ ctxt.pf_num = hw->pf_id;
+ ctxt.vf_num = 0;
+ ctxt.uplink_seid = vsi->uplink_seid;
+ ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
+ ctxt.flags = I40E_AQ_VSI_TYPE_VMDQ2;
+
+ /* This VSI is connected to VEB so the switch_id
+ * should be set to zero by default.
+ */
+ if (i40e_is_vsi_uplink_mode_veb(vsi)) {
+ ctxt.info.valid_sections |=
+ cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
+ ctxt.info.switch_id =
+ cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
+ }
+
+ /* Setup the VSI tx/rx queue map for TC0 only for now */
+ i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
+ break;
+
+ case I40E_VSI_SRIOV:
+ ctxt.pf_num = hw->pf_id;
+ ctxt.vf_num = vsi->vf_id + hw->func_caps.vf_base_id;
+ ctxt.uplink_seid = vsi->uplink_seid;
+ ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
+ ctxt.flags = I40E_AQ_VSI_TYPE_VF;
+
+ /* This VSI is connected to VEB so the switch_id
+ * should be set to zero by default.
+ */
+ if (i40e_is_vsi_uplink_mode_veb(vsi)) {
+ ctxt.info.valid_sections |=
+ cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
+ ctxt.info.switch_id =
+ cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
+ }
+
+ if (vsi->back->flags & I40E_FLAG_IWARP_ENABLED) {
+ ctxt.info.valid_sections |=
+ cpu_to_le16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID);
+ ctxt.info.queueing_opt_flags |=
+ (I40E_AQ_VSI_QUE_OPT_TCP_ENA |
+ I40E_AQ_VSI_QUE_OPT_RSS_LUT_VSI);
+ }
+
+ ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
+ ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL;
+ if (pf->vf[vsi->vf_id].spoofchk) {
+ ctxt.info.valid_sections |=
+ cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID);
+ ctxt.info.sec_flags |=
+ (I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK |
+ I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK);
+ }
+ /* Setup the VSI tx/rx queue map for TC0 only for now */
+ i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
+ break;
+
+ case I40E_VSI_IWARP:
+ /* send down message to iWARP */
+ break;
+
+ default:
+ return -ENODEV;
+ }
+
+ if (vsi->type != I40E_VSI_MAIN) {
+ ret = i40e_aq_add_vsi(hw, &ctxt, NULL);
+ if (ret) {
+ dev_info(&vsi->back->pdev->dev,
+ "add vsi failed, err %pe aq_err %s\n",
+ ERR_PTR(ret),
+ i40e_aq_str(&pf->hw,
+ pf->hw.aq.asq_last_status));
+ ret = -ENOENT;
+ goto err;
+ }
+ vsi->info = ctxt.info;
+ vsi->info.valid_sections = 0;
+ vsi->seid = ctxt.seid;
+ vsi->id = ctxt.vsi_number;
+ }
+
+ spin_lock_bh(&vsi->mac_filter_hash_lock);
+ vsi->active_filters = 0;
+ /* If macvlan filters already exist, force them to get loaded */
+ hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
+ f->state = I40E_FILTER_NEW;
+ f_count++;
+ }
+ spin_unlock_bh(&vsi->mac_filter_hash_lock);
+ clear_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
+
+ if (f_count) {
+ vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
+ set_bit(__I40E_MACVLAN_SYNC_PENDING, pf->state);
+ }
+
+ /* Update VSI BW information */
+ ret = i40e_vsi_get_bw_info(vsi);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "couldn't get vsi bw info, err %pe aq_err %s\n",
+ ERR_PTR(ret),
+ i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ /* VSI is already added so not tearing that up */
+ ret = 0;
+ }
+
+err:
+ return ret;
+}
+
+/**
+ * i40e_vsi_release - Delete a VSI and free its resources
+ * @vsi: the VSI being removed
+ *
+ * Returns 0 on success or < 0 on error
+ **/
+int i40e_vsi_release(struct i40e_vsi *vsi)
+{
+ struct i40e_mac_filter *f;
+ struct hlist_node *h;
+ struct i40e_veb *veb = NULL;
+ struct i40e_pf *pf;
+ u16 uplink_seid;
+ int i, n, bkt;
+
+ pf = vsi->back;
+
+ /* release of a VEB-owner or last VSI is not allowed */
+ if (vsi->flags & I40E_VSI_FLAG_VEB_OWNER) {
+ dev_info(&pf->pdev->dev, "VSI %d has existing VEB %d\n",
+ vsi->seid, vsi->uplink_seid);
+ return -ENODEV;
+ }
+ if (vsi == pf->vsi[pf->lan_vsi] &&
+ !test_bit(__I40E_DOWN, pf->state)) {
+ dev_info(&pf->pdev->dev, "Can't remove PF VSI\n");
+ return -ENODEV;
+ }
+ set_bit(__I40E_VSI_RELEASING, vsi->state);
+ uplink_seid = vsi->uplink_seid;
+ if (vsi->type != I40E_VSI_SRIOV) {
+ if (vsi->netdev_registered) {
+ vsi->netdev_registered = false;
+ if (vsi->netdev) {
+ /* results in a call to i40e_close() */
+ unregister_netdev(vsi->netdev);
+ }
+ } else {
+ i40e_vsi_close(vsi);
+ }
+ i40e_vsi_disable_irq(vsi);
+ }
+
+ spin_lock_bh(&vsi->mac_filter_hash_lock);
+
+ /* clear the sync flag on all filters */
+ if (vsi->netdev) {
+ __dev_uc_unsync(vsi->netdev, NULL);
+ __dev_mc_unsync(vsi->netdev, NULL);
+ }
+
+ /* make sure any remaining filters are marked for deletion */
+ hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist)
+ __i40e_del_filter(vsi, f);
+
+ spin_unlock_bh(&vsi->mac_filter_hash_lock);
+
+ i40e_sync_vsi_filters(vsi);
+
+ i40e_vsi_delete(vsi);
+ i40e_vsi_free_q_vectors(vsi);
+ if (vsi->netdev) {
+ free_netdev(vsi->netdev);
+ vsi->netdev = NULL;
+ }
+ i40e_vsi_clear_rings(vsi);
+ i40e_vsi_clear(vsi);
+
+ /* If this was the last thing on the VEB, except for the
+ * controlling VSI, remove the VEB, which puts the controlling
+ * VSI onto the next level down in the switch.
+ *
+ * Well, okay, there's one more exception here: don't remove
+ * the orphan VEBs yet. We'll wait for an explicit remove request
+ * from up the network stack.
+ */
+ for (n = 0, i = 0; i < pf->num_alloc_vsi; i++) {
+ if (pf->vsi[i] &&
+ pf->vsi[i]->uplink_seid == uplink_seid &&
+ (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) {
+ n++; /* count the VSIs */
+ }
+ }
+ for (i = 0; i < I40E_MAX_VEB; i++) {
+ if (!pf->veb[i])
+ continue;
+ if (pf->veb[i]->uplink_seid == uplink_seid)
+ n++; /* count the VEBs */
+ if (pf->veb[i]->seid == uplink_seid)
+ veb = pf->veb[i];
+ }
+ if (n == 0 && veb && veb->uplink_seid != 0)
+ i40e_veb_release(veb);
+
+ return 0;
+}
+
+/**
+ * i40e_vsi_setup_vectors - Set up the q_vectors for the given VSI
+ * @vsi: ptr to the VSI
+ *
+ * This should only be called after i40e_vsi_mem_alloc() which allocates the
+ * corresponding SW VSI structure and initializes num_queue_pairs for the
+ * newly allocated VSI.
+ *
+ * Returns 0 on success or negative on failure
+ **/
+static int i40e_vsi_setup_vectors(struct i40e_vsi *vsi)
+{
+ int ret = -ENOENT;
+ struct i40e_pf *pf = vsi->back;
+
+ if (vsi->q_vectors[0]) {
+ dev_info(&pf->pdev->dev, "VSI %d has existing q_vectors\n",
+ vsi->seid);
+ return -EEXIST;
+ }
+
+ if (vsi->base_vector) {
+ dev_info(&pf->pdev->dev, "VSI %d has non-zero base vector %d\n",
+ vsi->seid, vsi->base_vector);
+ return -EEXIST;
+ }
+
+ ret = i40e_vsi_alloc_q_vectors(vsi);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "failed to allocate %d q_vector for VSI %d, ret=%d\n",
+ vsi->num_q_vectors, vsi->seid, ret);
+ vsi->num_q_vectors = 0;
+ goto vector_setup_out;
+ }
+
+ /* In Legacy mode, we do not have to get any other vector since we
+ * piggyback on the misc/ICR0 for queue interrupts.
+ */
+ if (!(pf->flags & I40E_FLAG_MSIX_ENABLED))
+ return ret;
+ if (vsi->num_q_vectors)
+ vsi->base_vector = i40e_get_lump(pf, pf->irq_pile,
+ vsi->num_q_vectors, vsi->idx);
+ if (vsi->base_vector < 0) {
+ dev_info(&pf->pdev->dev,
+ "failed to get tracking for %d vectors for VSI %d, err=%d\n",
+ vsi->num_q_vectors, vsi->seid, vsi->base_vector);
+ i40e_vsi_free_q_vectors(vsi);
+ ret = -ENOENT;
+ goto vector_setup_out;
+ }
+
+vector_setup_out:
+ return ret;
+}
+
+/**
+ * i40e_vsi_reinit_setup - return and reallocate resources for a VSI
+ * @vsi: pointer to the vsi.
+ *
+ * This re-allocates a vsi's queue resources.
+ *
+ * Returns pointer to the successfully allocated and configured VSI sw struct
+ * on success, otherwise returns NULL on failure.
+ **/
+static struct i40e_vsi *i40e_vsi_reinit_setup(struct i40e_vsi *vsi)
+{
+ u16 alloc_queue_pairs;
+ struct i40e_pf *pf;
+ u8 enabled_tc;
+ int ret;
+
+ if (!vsi)
+ return NULL;
+
+ pf = vsi->back;
+
+ i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx);
+ i40e_vsi_clear_rings(vsi);
+
+ i40e_vsi_free_arrays(vsi, false);
+ i40e_set_num_rings_in_vsi(vsi);
+ ret = i40e_vsi_alloc_arrays(vsi, false);
+ if (ret)
+ goto err_vsi;
+
+ alloc_queue_pairs = vsi->alloc_queue_pairs *
+ (i40e_enabled_xdp_vsi(vsi) ? 2 : 1);
+
+ ret = i40e_get_lump(pf, pf->qp_pile, alloc_queue_pairs, vsi->idx);
+ if (ret < 0) {
+ dev_info(&pf->pdev->dev,
+ "failed to get tracking for %d queues for VSI %d err %d\n",
+ alloc_queue_pairs, vsi->seid, ret);
+ goto err_vsi;
+ }
+ vsi->base_queue = ret;
+
+ /* Update the FW view of the VSI. Force a reset of TC and queue
+ * layout configurations.
+ */
+ enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc;
+ pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0;
+ pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid;
+ i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc);
+ if (vsi->type == I40E_VSI_MAIN)
+ i40e_rm_default_mac_filter(vsi, pf->hw.mac.perm_addr);
+
+ /* assign it some queues */
+ ret = i40e_alloc_rings(vsi);
+ if (ret)
+ goto err_rings;
+
+ /* map all of the rings to the q_vectors */
+ i40e_vsi_map_rings_to_vectors(vsi);
+ return vsi;
+
+err_rings:
+ i40e_vsi_free_q_vectors(vsi);
+ if (vsi->netdev_registered) {
+ vsi->netdev_registered = false;
+ unregister_netdev(vsi->netdev);
+ free_netdev(vsi->netdev);
+ vsi->netdev = NULL;
+ }
+ i40e_aq_delete_element(&pf->hw, vsi->seid, NULL);
+err_vsi:
+ i40e_vsi_clear(vsi);
+ return NULL;
+}
+
+/**
+ * i40e_vsi_setup - Set up a VSI by a given type
+ * @pf: board private structure
+ * @type: VSI type
+ * @uplink_seid: the switch element to link to
+ * @param1: usage depends upon VSI type. For VF types, indicates VF id
+ *
+ * This allocates the sw VSI structure and its queue resources, then add a VSI
+ * to the identified VEB.
+ *
+ * Returns pointer to the successfully allocated and configure VSI sw struct on
+ * success, otherwise returns NULL on failure.
+ **/
+struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
+ u16 uplink_seid, u32 param1)
+{
+ struct i40e_vsi *vsi = NULL;
+ struct i40e_veb *veb = NULL;
+ u16 alloc_queue_pairs;
+ int ret, i;
+ int v_idx;
+
+ /* The requested uplink_seid must be either
+ * - the PF's port seid
+ * no VEB is needed because this is the PF
+ * or this is a Flow Director special case VSI
+ * - seid of an existing VEB
+ * - seid of a VSI that owns an existing VEB
+ * - seid of a VSI that doesn't own a VEB
+ * a new VEB is created and the VSI becomes the owner
+ * - seid of the PF VSI, which is what creates the first VEB
+ * this is a special case of the previous
+ *
+ * Find which uplink_seid we were given and create a new VEB if needed
+ */
+ for (i = 0; i < I40E_MAX_VEB; i++) {
+ if (pf->veb[i] && pf->veb[i]->seid == uplink_seid) {
+ veb = pf->veb[i];
+ break;
+ }
+ }
+
+ if (!veb && uplink_seid != pf->mac_seid) {
+
+ for (i = 0; i < pf->num_alloc_vsi; i++) {
+ if (pf->vsi[i] && pf->vsi[i]->seid == uplink_seid) {
+ vsi = pf->vsi[i];
+ break;
+ }
+ }
+ if (!vsi) {
+ dev_info(&pf->pdev->dev, "no such uplink_seid %d\n",
+ uplink_seid);
+ return NULL;
+ }
+
+ if (vsi->uplink_seid == pf->mac_seid)
+ veb = i40e_veb_setup(pf, 0, pf->mac_seid, vsi->seid,
+ vsi->tc_config.enabled_tc);
+ else if ((vsi->flags & I40E_VSI_FLAG_VEB_OWNER) == 0)
+ veb = i40e_veb_setup(pf, 0, vsi->uplink_seid, vsi->seid,
+ vsi->tc_config.enabled_tc);
+ if (veb) {
+ if (vsi->seid != pf->vsi[pf->lan_vsi]->seid) {
+ dev_info(&vsi->back->pdev->dev,
+ "New VSI creation error, uplink seid of LAN VSI expected.\n");
+ return NULL;
+ }
+ /* We come up by default in VEPA mode if SRIOV is not
+ * already enabled, in which case we can't force VEPA
+ * mode.
+ */
+ if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) {
+ veb->bridge_mode = BRIDGE_MODE_VEPA;
+ pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
+ }
+ i40e_config_bridge_mode(veb);
+ }
+ for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
+ if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid)
+ veb = pf->veb[i];
+ }
+ if (!veb) {
+ dev_info(&pf->pdev->dev, "couldn't add VEB\n");
+ return NULL;
+ }
+
+ vsi->flags |= I40E_VSI_FLAG_VEB_OWNER;
+ uplink_seid = veb->seid;
+ }
+
+ /* get vsi sw struct */
+ v_idx = i40e_vsi_mem_alloc(pf, type);
+ if (v_idx < 0)
+ goto err_alloc;
+ vsi = pf->vsi[v_idx];
+ if (!vsi)
+ goto err_alloc;
+ vsi->type = type;
+ vsi->veb_idx = (veb ? veb->idx : I40E_NO_VEB);
+
+ if (type == I40E_VSI_MAIN)
+ pf->lan_vsi = v_idx;
+ else if (type == I40E_VSI_SRIOV)
+ vsi->vf_id = param1;
+ /* assign it some queues */
+ alloc_queue_pairs = vsi->alloc_queue_pairs *
+ (i40e_enabled_xdp_vsi(vsi) ? 2 : 1);
+
+ ret = i40e_get_lump(pf, pf->qp_pile, alloc_queue_pairs, vsi->idx);
+ if (ret < 0) {
+ dev_info(&pf->pdev->dev,
+ "failed to get tracking for %d queues for VSI %d err=%d\n",
+ alloc_queue_pairs, vsi->seid, ret);
+ goto err_vsi;
+ }
+ vsi->base_queue = ret;
+
+ /* get a VSI from the hardware */
+ vsi->uplink_seid = uplink_seid;
+ ret = i40e_add_vsi(vsi);
+ if (ret)
+ goto err_vsi;
+
+ switch (vsi->type) {
+ /* setup the netdev if needed */
+ case I40E_VSI_MAIN:
+ case I40E_VSI_VMDQ2:
+ ret = i40e_config_netdev(vsi);
+ if (ret)
+ goto err_netdev;
+ ret = i40e_netif_set_realnum_tx_rx_queues(vsi);
+ if (ret)
+ goto err_netdev;
+ ret = register_netdev(vsi->netdev);
+ if (ret)
+ goto err_netdev;
+ vsi->netdev_registered = true;
+ netif_carrier_off(vsi->netdev);
+#ifdef CONFIG_I40E_DCB
+ /* Setup DCB netlink interface */
+ i40e_dcbnl_setup(vsi);
+#endif /* CONFIG_I40E_DCB */
+ fallthrough;
+ case I40E_VSI_FDIR:
+ /* set up vectors and rings if needed */
+ ret = i40e_vsi_setup_vectors(vsi);
+ if (ret)
+ goto err_msix;
+
+ ret = i40e_alloc_rings(vsi);
+ if (ret)
+ goto err_rings;
+
+ /* map all of the rings to the q_vectors */
+ i40e_vsi_map_rings_to_vectors(vsi);
+
+ i40e_vsi_reset_stats(vsi);
+ break;
+ default:
+ /* no netdev or rings for the other VSI types */
+ break;
+ }
+
+ if ((pf->hw_features & I40E_HW_RSS_AQ_CAPABLE) &&
+ (vsi->type == I40E_VSI_VMDQ2)) {
+ ret = i40e_vsi_config_rss(vsi);
+ }
+ return vsi;
+
+err_rings:
+ i40e_vsi_free_q_vectors(vsi);
+err_msix:
+ if (vsi->netdev_registered) {
+ vsi->netdev_registered = false;
+ unregister_netdev(vsi->netdev);
+ free_netdev(vsi->netdev);
+ vsi->netdev = NULL;
+ }
+err_netdev:
+ i40e_aq_delete_element(&pf->hw, vsi->seid, NULL);
+err_vsi:
+ i40e_vsi_clear(vsi);
+err_alloc:
+ return NULL;
+}
+
+/**
+ * i40e_veb_get_bw_info - Query VEB BW information
+ * @veb: the veb to query
+ *
+ * Query the Tx scheduler BW configuration data for given VEB
+ **/
+static int i40e_veb_get_bw_info(struct i40e_veb *veb)
+{
+ struct i40e_aqc_query_switching_comp_ets_config_resp ets_data;
+ struct i40e_aqc_query_switching_comp_bw_config_resp bw_data;
+ struct i40e_pf *pf = veb->pf;
+ struct i40e_hw *hw = &pf->hw;
+ u32 tc_bw_max;
+ int ret = 0;
+ int i;
+
+ ret = i40e_aq_query_switch_comp_bw_config(hw, veb->seid,
+ &bw_data, NULL);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "query veb bw config failed, err %pe aq_err %s\n",
+ ERR_PTR(ret),
+ i40e_aq_str(&pf->hw, hw->aq.asq_last_status));
+ goto out;
+ }
+
+ ret = i40e_aq_query_switch_comp_ets_config(hw, veb->seid,
+ &ets_data, NULL);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "query veb bw ets config failed, err %pe aq_err %s\n",
+ ERR_PTR(ret),
+ i40e_aq_str(&pf->hw, hw->aq.asq_last_status));
+ goto out;
+ }
+
+ veb->bw_limit = le16_to_cpu(ets_data.port_bw_limit);
+ veb->bw_max_quanta = ets_data.tc_bw_max;
+ veb->is_abs_credits = bw_data.absolute_credits_enable;
+ veb->enabled_tc = ets_data.tc_valid_bits;
+ tc_bw_max = le16_to_cpu(bw_data.tc_bw_max[0]) |
+ (le16_to_cpu(bw_data.tc_bw_max[1]) << 16);
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ veb->bw_tc_share_credits[i] = bw_data.tc_bw_share_credits[i];
+ veb->bw_tc_limit_credits[i] =
+ le16_to_cpu(bw_data.tc_bw_limits[i]);
+ veb->bw_tc_max_quanta[i] = ((tc_bw_max >> (i*4)) & 0x7);
+ }
+
+out:
+ return ret;
+}
+
+/**
+ * i40e_veb_mem_alloc - Allocates the next available struct veb in the PF
+ * @pf: board private structure
+ *
+ * On error: returns error code (negative)
+ * On success: returns vsi index in PF (positive)
+ **/
+static int i40e_veb_mem_alloc(struct i40e_pf *pf)
+{
+ int ret = -ENOENT;
+ struct i40e_veb *veb;
+ int i;
+
+ /* Need to protect the allocation of switch elements at the PF level */
+ mutex_lock(&pf->switch_mutex);
+
+ /* VEB list may be fragmented if VEB creation/destruction has
+ * been happening. We can afford to do a quick scan to look
+ * for any free slots in the list.
+ *
+ * find next empty veb slot, looping back around if necessary
+ */
+ i = 0;
+ while ((i < I40E_MAX_VEB) && (pf->veb[i] != NULL))
+ i++;
+ if (i >= I40E_MAX_VEB) {
+ ret = -ENOMEM;
+ goto err_alloc_veb; /* out of VEB slots! */
+ }
+
+ veb = kzalloc(sizeof(*veb), GFP_KERNEL);
+ if (!veb) {
+ ret = -ENOMEM;
+ goto err_alloc_veb;
+ }
+ veb->pf = pf;
+ veb->idx = i;
+ veb->enabled_tc = 1;
+
+ pf->veb[i] = veb;
+ ret = i;
+err_alloc_veb:
+ mutex_unlock(&pf->switch_mutex);
+ return ret;
+}
+
+/**
+ * i40e_switch_branch_release - Delete a branch of the switch tree
+ * @branch: where to start deleting
+ *
+ * This uses recursion to find the tips of the branch to be
+ * removed, deleting until we get back to and can delete this VEB.
+ **/
+static void i40e_switch_branch_release(struct i40e_veb *branch)
+{
+ struct i40e_pf *pf = branch->pf;
+ u16 branch_seid = branch->seid;
+ u16 veb_idx = branch->idx;
+ int i;
+
+ /* release any VEBs on this VEB - RECURSION */
+ for (i = 0; i < I40E_MAX_VEB; i++) {
+ if (!pf->veb[i])
+ continue;
+ if (pf->veb[i]->uplink_seid == branch->seid)
+ i40e_switch_branch_release(pf->veb[i]);
+ }
+
+ /* Release the VSIs on this VEB, but not the owner VSI.
+ *
+ * NOTE: Removing the last VSI on a VEB has the SIDE EFFECT of removing
+ * the VEB itself, so don't use (*branch) after this loop.
+ */
+ for (i = 0; i < pf->num_alloc_vsi; i++) {
+ if (!pf->vsi[i])
+ continue;
+ if (pf->vsi[i]->uplink_seid == branch_seid &&
+ (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) {
+ i40e_vsi_release(pf->vsi[i]);
+ }
+ }
+
+ /* There's one corner case where the VEB might not have been
+ * removed, so double check it here and remove it if needed.
+ * This case happens if the veb was created from the debugfs
+ * commands and no VSIs were added to it.
+ */
+ if (pf->veb[veb_idx])
+ i40e_veb_release(pf->veb[veb_idx]);
+}
+
+/**
+ * i40e_veb_clear - remove veb struct
+ * @veb: the veb to remove
+ **/
+static void i40e_veb_clear(struct i40e_veb *veb)
+{
+ if (!veb)
+ return;
+
+ if (veb->pf) {
+ struct i40e_pf *pf = veb->pf;
+
+ mutex_lock(&pf->switch_mutex);
+ if (pf->veb[veb->idx] == veb)
+ pf->veb[veb->idx] = NULL;
+ mutex_unlock(&pf->switch_mutex);
+ }
+
+ kfree(veb);
+}
+
+/**
+ * i40e_veb_release - Delete a VEB and free its resources
+ * @veb: the VEB being removed
+ **/
+void i40e_veb_release(struct i40e_veb *veb)
+{
+ struct i40e_vsi *vsi = NULL;
+ struct i40e_pf *pf;
+ int i, n = 0;
+
+ pf = veb->pf;
+
+ /* find the remaining VSI and check for extras */
+ for (i = 0; i < pf->num_alloc_vsi; i++) {
+ if (pf->vsi[i] && pf->vsi[i]->uplink_seid == veb->seid) {
+ n++;
+ vsi = pf->vsi[i];
+ }
+ }
+ if (n != 1) {
+ dev_info(&pf->pdev->dev,
+ "can't remove VEB %d with %d VSIs left\n",
+ veb->seid, n);
+ return;
+ }
+
+ /* move the remaining VSI to uplink veb */
+ vsi->flags &= ~I40E_VSI_FLAG_VEB_OWNER;
+ if (veb->uplink_seid) {
+ vsi->uplink_seid = veb->uplink_seid;
+ if (veb->uplink_seid == pf->mac_seid)
+ vsi->veb_idx = I40E_NO_VEB;
+ else
+ vsi->veb_idx = veb->veb_idx;
+ } else {
+ /* floating VEB */
+ vsi->uplink_seid = pf->vsi[pf->lan_vsi]->uplink_seid;
+ vsi->veb_idx = pf->vsi[pf->lan_vsi]->veb_idx;
+ }
+
+ i40e_aq_delete_element(&pf->hw, veb->seid, NULL);
+ i40e_veb_clear(veb);
+}
+
+/**
+ * i40e_add_veb - create the VEB in the switch
+ * @veb: the VEB to be instantiated
+ * @vsi: the controlling VSI
+ **/
+static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi)
+{
+ struct i40e_pf *pf = veb->pf;
+ bool enable_stats = !!(pf->flags & I40E_FLAG_VEB_STATS_ENABLED);
+ int ret;
+
+ ret = i40e_aq_add_veb(&pf->hw, veb->uplink_seid, vsi->seid,
+ veb->enabled_tc, false,
+ &veb->seid, enable_stats, NULL);
+
+ /* get a VEB from the hardware */
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "couldn't add VEB, err %pe aq_err %s\n",
+ ERR_PTR(ret),
+ i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ return -EPERM;
+ }
+
+ /* get statistics counter */
+ ret = i40e_aq_get_veb_parameters(&pf->hw, veb->seid, NULL, NULL,
+ &veb->stats_idx, NULL, NULL, NULL);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "couldn't get VEB statistics idx, err %pe aq_err %s\n",
+ ERR_PTR(ret),
+ i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ return -EPERM;
+ }
+ ret = i40e_veb_get_bw_info(veb);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "couldn't get VEB bw info, err %pe aq_err %s\n",
+ ERR_PTR(ret),
+ i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ i40e_aq_delete_element(&pf->hw, veb->seid, NULL);
+ return -ENOENT;
+ }
+
+ vsi->uplink_seid = veb->seid;
+ vsi->veb_idx = veb->idx;
+ vsi->flags |= I40E_VSI_FLAG_VEB_OWNER;
+
+ return 0;
+}
+
+/**
+ * i40e_veb_setup - Set up a VEB
+ * @pf: board private structure
+ * @flags: VEB setup flags
+ * @uplink_seid: the switch element to link to
+ * @vsi_seid: the initial VSI seid
+ * @enabled_tc: Enabled TC bit-map
+ *
+ * This allocates the sw VEB structure and links it into the switch
+ * It is possible and legal for this to be a duplicate of an already
+ * existing VEB. It is also possible for both uplink and vsi seids
+ * to be zero, in order to create a floating VEB.
+ *
+ * Returns pointer to the successfully allocated VEB sw struct on
+ * success, otherwise returns NULL on failure.
+ **/
+struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 flags,
+ u16 uplink_seid, u16 vsi_seid,
+ u8 enabled_tc)
+{
+ struct i40e_veb *veb, *uplink_veb = NULL;
+ int vsi_idx, veb_idx;
+ int ret;
+
+ /* if one seid is 0, the other must be 0 to create a floating relay */
+ if ((uplink_seid == 0 || vsi_seid == 0) &&
+ (uplink_seid + vsi_seid != 0)) {
+ dev_info(&pf->pdev->dev,
+ "one, not both seid's are 0: uplink=%d vsi=%d\n",
+ uplink_seid, vsi_seid);
+ return NULL;
+ }
+
+ /* make sure there is such a vsi and uplink */
+ for (vsi_idx = 0; vsi_idx < pf->num_alloc_vsi; vsi_idx++)
+ if (pf->vsi[vsi_idx] && pf->vsi[vsi_idx]->seid == vsi_seid)
+ break;
+ if (vsi_idx == pf->num_alloc_vsi && vsi_seid != 0) {
+ dev_info(&pf->pdev->dev, "vsi seid %d not found\n",
+ vsi_seid);
+ return NULL;
+ }
+
+ if (uplink_seid && uplink_seid != pf->mac_seid) {
+ for (veb_idx = 0; veb_idx < I40E_MAX_VEB; veb_idx++) {
+ if (pf->veb[veb_idx] &&
+ pf->veb[veb_idx]->seid == uplink_seid) {
+ uplink_veb = pf->veb[veb_idx];
+ break;
+ }
+ }
+ if (!uplink_veb) {
+ dev_info(&pf->pdev->dev,
+ "uplink seid %d not found\n", uplink_seid);
+ return NULL;
+ }
+ }
+
+ /* get veb sw struct */
+ veb_idx = i40e_veb_mem_alloc(pf);
+ if (veb_idx < 0)
+ goto err_alloc;
+ veb = pf->veb[veb_idx];
+ veb->flags = flags;
+ veb->uplink_seid = uplink_seid;
+ veb->veb_idx = (uplink_veb ? uplink_veb->idx : I40E_NO_VEB);
+ veb->enabled_tc = (enabled_tc ? enabled_tc : 0x1);
+
+ /* create the VEB in the switch */
+ ret = i40e_add_veb(veb, pf->vsi[vsi_idx]);
+ if (ret)
+ goto err_veb;
+ if (vsi_idx == pf->lan_vsi)
+ pf->lan_veb = veb->idx;
+
+ return veb;
+
+err_veb:
+ i40e_veb_clear(veb);
+err_alloc:
+ return NULL;
+}
+
+/**
+ * i40e_setup_pf_switch_element - set PF vars based on switch type
+ * @pf: board private structure
+ * @ele: element we are building info from
+ * @num_reported: total number of elements
+ * @printconfig: should we print the contents
+ *
+ * helper function to assist in extracting a few useful SEID values.
+ **/
+static void i40e_setup_pf_switch_element(struct i40e_pf *pf,
+ struct i40e_aqc_switch_config_element_resp *ele,
+ u16 num_reported, bool printconfig)
+{
+ u16 downlink_seid = le16_to_cpu(ele->downlink_seid);
+ u16 uplink_seid = le16_to_cpu(ele->uplink_seid);
+ u8 element_type = ele->element_type;
+ u16 seid = le16_to_cpu(ele->seid);
+
+ if (printconfig)
+ dev_info(&pf->pdev->dev,
+ "type=%d seid=%d uplink=%d downlink=%d\n",
+ element_type, seid, uplink_seid, downlink_seid);
+
+ switch (element_type) {
+ case I40E_SWITCH_ELEMENT_TYPE_MAC:
+ pf->mac_seid = seid;
+ break;
+ case I40E_SWITCH_ELEMENT_TYPE_VEB:
+ /* Main VEB? */
+ if (uplink_seid != pf->mac_seid)
+ break;
+ if (pf->lan_veb >= I40E_MAX_VEB) {
+ int v;
+
+ /* find existing or else empty VEB */
+ for (v = 0; v < I40E_MAX_VEB; v++) {
+ if (pf->veb[v] && (pf->veb[v]->seid == seid)) {
+ pf->lan_veb = v;
+ break;
+ }
+ }
+ if (pf->lan_veb >= I40E_MAX_VEB) {
+ v = i40e_veb_mem_alloc(pf);
+ if (v < 0)
+ break;
+ pf->lan_veb = v;
+ }
+ }
+ if (pf->lan_veb >= I40E_MAX_VEB)
+ break;
+
+ pf->veb[pf->lan_veb]->seid = seid;
+ pf->veb[pf->lan_veb]->uplink_seid = pf->mac_seid;
+ pf->veb[pf->lan_veb]->pf = pf;
+ pf->veb[pf->lan_veb]->veb_idx = I40E_NO_VEB;
+ break;
+ case I40E_SWITCH_ELEMENT_TYPE_VSI:
+ if (num_reported != 1)
+ break;
+ /* This is immediately after a reset so we can assume this is
+ * the PF's VSI
+ */
+ pf->mac_seid = uplink_seid;
+ pf->pf_seid = downlink_seid;
+ pf->main_vsi_seid = seid;
+ if (printconfig)
+ dev_info(&pf->pdev->dev,
+ "pf_seid=%d main_vsi_seid=%d\n",
+ pf->pf_seid, pf->main_vsi_seid);
+ break;
+ case I40E_SWITCH_ELEMENT_TYPE_PF:
+ case I40E_SWITCH_ELEMENT_TYPE_VF:
+ case I40E_SWITCH_ELEMENT_TYPE_EMP:
+ case I40E_SWITCH_ELEMENT_TYPE_BMC:
+ case I40E_SWITCH_ELEMENT_TYPE_PE:
+ case I40E_SWITCH_ELEMENT_TYPE_PA:
+ /* ignore these for now */
+ break;
+ default:
+ dev_info(&pf->pdev->dev, "unknown element type=%d seid=%d\n",
+ element_type, seid);
+ break;
+ }
+}
+
+/**
+ * i40e_fetch_switch_configuration - Get switch config from firmware
+ * @pf: board private structure
+ * @printconfig: should we print the contents
+ *
+ * Get the current switch configuration from the device and
+ * extract a few useful SEID values.
+ **/
+int i40e_fetch_switch_configuration(struct i40e_pf *pf, bool printconfig)
+{
+ struct i40e_aqc_get_switch_config_resp *sw_config;
+ u16 next_seid = 0;
+ int ret = 0;
+ u8 *aq_buf;
+ int i;
+
+ aq_buf = kzalloc(I40E_AQ_LARGE_BUF, GFP_KERNEL);
+ if (!aq_buf)
+ return -ENOMEM;
+
+ sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
+ do {
+ u16 num_reported, num_total;
+
+ ret = i40e_aq_get_switch_config(&pf->hw, sw_config,
+ I40E_AQ_LARGE_BUF,
+ &next_seid, NULL);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "get switch config failed err %d aq_err %s\n",
+ ret,
+ i40e_aq_str(&pf->hw,
+ pf->hw.aq.asq_last_status));
+ kfree(aq_buf);
+ return -ENOENT;
+ }
+
+ num_reported = le16_to_cpu(sw_config->header.num_reported);
+ num_total = le16_to_cpu(sw_config->header.num_total);
+
+ if (printconfig)
+ dev_info(&pf->pdev->dev,
+ "header: %d reported %d total\n",
+ num_reported, num_total);
+
+ for (i = 0; i < num_reported; i++) {
+ struct i40e_aqc_switch_config_element_resp *ele =
+ &sw_config->element[i];
+
+ i40e_setup_pf_switch_element(pf, ele, num_reported,
+ printconfig);
+ }
+ } while (next_seid != 0);
+
+ kfree(aq_buf);
+ return ret;
+}
+
+/**
+ * i40e_setup_pf_switch - Setup the HW switch on startup or after reset
+ * @pf: board private structure
+ * @reinit: if the Main VSI needs to re-initialized.
+ * @lock_acquired: indicates whether or not the lock has been acquired
+ *
+ * Returns 0 on success, negative value on failure
+ **/
+static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit, bool lock_acquired)
+{
+ u16 flags = 0;
+ int ret;
+
+ /* find out what's out there already */
+ ret = i40e_fetch_switch_configuration(pf, false);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "couldn't fetch switch config, err %pe aq_err %s\n",
+ ERR_PTR(ret),
+ i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ return ret;
+ }
+ i40e_pf_reset_stats(pf);
+
+ /* set the switch config bit for the whole device to
+ * support limited promisc or true promisc
+ * when user requests promisc. The default is limited
+ * promisc.
+ */
+
+ if ((pf->hw.pf_id == 0) &&
+ !(pf->flags & I40E_FLAG_TRUE_PROMISC_SUPPORT)) {
+ flags = I40E_AQ_SET_SWITCH_CFG_PROMISC;
+ pf->last_sw_conf_flags = flags;
+ }
+
+ if (pf->hw.pf_id == 0) {
+ u16 valid_flags;
+
+ valid_flags = I40E_AQ_SET_SWITCH_CFG_PROMISC;
+ ret = i40e_aq_set_switch_config(&pf->hw, flags, valid_flags, 0,
+ NULL);
+ if (ret && pf->hw.aq.asq_last_status != I40E_AQ_RC_ESRCH) {
+ dev_info(&pf->pdev->dev,
+ "couldn't set switch config bits, err %pe aq_err %s\n",
+ ERR_PTR(ret),
+ i40e_aq_str(&pf->hw,
+ pf->hw.aq.asq_last_status));
+ /* not a fatal problem, just keep going */
+ }
+ pf->last_sw_conf_valid_flags = valid_flags;
+ }
+
+ /* first time setup */
+ if (pf->lan_vsi == I40E_NO_VSI || reinit) {
+ struct i40e_vsi *vsi = NULL;
+ u16 uplink_seid;
+
+ /* Set up the PF VSI associated with the PF's main VSI
+ * that is already in the HW switch
+ */
+ if (pf->lan_veb < I40E_MAX_VEB && pf->veb[pf->lan_veb])
+ uplink_seid = pf->veb[pf->lan_veb]->seid;
+ else
+ uplink_seid = pf->mac_seid;
+ if (pf->lan_vsi == I40E_NO_VSI)
+ vsi = i40e_vsi_setup(pf, I40E_VSI_MAIN, uplink_seid, 0);
+ else if (reinit)
+ vsi = i40e_vsi_reinit_setup(pf->vsi[pf->lan_vsi]);
+ if (!vsi) {
+ dev_info(&pf->pdev->dev, "setup of MAIN VSI failed\n");
+ i40e_cloud_filter_exit(pf);
+ i40e_fdir_teardown(pf);
+ return -EAGAIN;
+ }
+ } else {
+ /* force a reset of TC and queue layout configurations */
+ u8 enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc;
+
+ pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0;
+ pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid;
+ i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc);
+ }
+ i40e_vlan_stripping_disable(pf->vsi[pf->lan_vsi]);
+
+ i40e_fdir_sb_setup(pf);
+
+ /* Setup static PF queue filter control settings */
+ ret = i40e_setup_pf_filter_control(pf);
+ if (ret) {
+ dev_info(&pf->pdev->dev, "setup_pf_filter_control failed: %d\n",
+ ret);
+ /* Failure here should not stop continuing other steps */
+ }
+
+ /* enable RSS in the HW, even for only one queue, as the stack can use
+ * the hash
+ */
+ if ((pf->flags & I40E_FLAG_RSS_ENABLED))
+ i40e_pf_config_rss(pf);
+
+ /* fill in link information and enable LSE reporting */
+ i40e_link_event(pf);
+
+ /* Initialize user-specific link properties */
+ pf->fc_autoneg_status = ((pf->hw.phy.link_info.an_info &
+ I40E_AQ_AN_COMPLETED) ? true : false);
+
+ i40e_ptp_init(pf);
+
+ if (!lock_acquired)
+ rtnl_lock();
+
+ /* repopulate tunnel port filters */
+ udp_tunnel_nic_reset_ntf(pf->vsi[pf->lan_vsi]->netdev);
+
+ if (!lock_acquired)
+ rtnl_unlock();
+
+ return ret;
+}
+
+/**
+ * i40e_determine_queue_usage - Work out queue distribution
+ * @pf: board private structure
+ **/
+static void i40e_determine_queue_usage(struct i40e_pf *pf)
+{
+ int queues_left;
+ int q_max;
+
+ pf->num_lan_qps = 0;
+
+ /* Find the max queues to be put into basic use. We'll always be
+ * using TC0, whether or not DCB is running, and TC0 will get the
+ * big RSS set.
+ */
+ queues_left = pf->hw.func_caps.num_tx_qp;
+
+ if ((queues_left == 1) ||
+ !(pf->flags & I40E_FLAG_MSIX_ENABLED)) {
+ /* one qp for PF, no queues for anything else */
+ queues_left = 0;
+ pf->alloc_rss_size = pf->num_lan_qps = 1;
+
+ /* make sure all the fancies are disabled */
+ pf->flags &= ~(I40E_FLAG_RSS_ENABLED |
+ I40E_FLAG_IWARP_ENABLED |
+ I40E_FLAG_FD_SB_ENABLED |
+ I40E_FLAG_FD_ATR_ENABLED |
+ I40E_FLAG_DCB_CAPABLE |
+ I40E_FLAG_DCB_ENABLED |
+ I40E_FLAG_SRIOV_ENABLED |
+ I40E_FLAG_VMDQ_ENABLED);
+ pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
+ } else if (!(pf->flags & (I40E_FLAG_RSS_ENABLED |
+ I40E_FLAG_FD_SB_ENABLED |
+ I40E_FLAG_FD_ATR_ENABLED |
+ I40E_FLAG_DCB_CAPABLE))) {
+ /* one qp for PF */
+ pf->alloc_rss_size = pf->num_lan_qps = 1;
+ queues_left -= pf->num_lan_qps;
+
+ pf->flags &= ~(I40E_FLAG_RSS_ENABLED |
+ I40E_FLAG_IWARP_ENABLED |
+ I40E_FLAG_FD_SB_ENABLED |
+ I40E_FLAG_FD_ATR_ENABLED |
+ I40E_FLAG_DCB_ENABLED |
+ I40E_FLAG_VMDQ_ENABLED);
+ pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
+ } else {
+ /* Not enough queues for all TCs */
+ if ((pf->flags & I40E_FLAG_DCB_CAPABLE) &&
+ (queues_left < I40E_MAX_TRAFFIC_CLASS)) {
+ pf->flags &= ~(I40E_FLAG_DCB_CAPABLE |
+ I40E_FLAG_DCB_ENABLED);
+ dev_info(&pf->pdev->dev, "not enough queues for DCB. DCB is disabled.\n");
+ }
+
+ /* limit lan qps to the smaller of qps, cpus or msix */
+ q_max = max_t(int, pf->rss_size_max, num_online_cpus());
+ q_max = min_t(int, q_max, pf->hw.func_caps.num_tx_qp);
+ q_max = min_t(int, q_max, pf->hw.func_caps.num_msix_vectors);
+ pf->num_lan_qps = q_max;
+
+ queues_left -= pf->num_lan_qps;
+ }
+
+ if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
+ if (queues_left > 1) {
+ queues_left -= 1; /* save 1 queue for FD */
+ } else {
+ pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
+ pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
+ dev_info(&pf->pdev->dev, "not enough queues for Flow Director. Flow Director feature is disabled\n");
+ }
+ }
+
+ if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
+ pf->num_vf_qps && pf->num_req_vfs && queues_left) {
+ pf->num_req_vfs = min_t(int, pf->num_req_vfs,
+ (queues_left / pf->num_vf_qps));
+ queues_left -= (pf->num_req_vfs * pf->num_vf_qps);
+ }
+
+ if ((pf->flags & I40E_FLAG_VMDQ_ENABLED) &&
+ pf->num_vmdq_vsis && pf->num_vmdq_qps && queues_left) {
+ pf->num_vmdq_vsis = min_t(int, pf->num_vmdq_vsis,
+ (queues_left / pf->num_vmdq_qps));
+ queues_left -= (pf->num_vmdq_vsis * pf->num_vmdq_qps);
+ }
+
+ pf->queues_left = queues_left;
+ dev_dbg(&pf->pdev->dev,
+ "qs_avail=%d FD SB=%d lan_qs=%d lan_tc0=%d vf=%d*%d vmdq=%d*%d, remaining=%d\n",
+ pf->hw.func_caps.num_tx_qp,
+ !!(pf->flags & I40E_FLAG_FD_SB_ENABLED),
+ pf->num_lan_qps, pf->alloc_rss_size, pf->num_req_vfs,
+ pf->num_vf_qps, pf->num_vmdq_vsis, pf->num_vmdq_qps,
+ queues_left);
+}
+
+/**
+ * i40e_setup_pf_filter_control - Setup PF static filter control
+ * @pf: PF to be setup
+ *
+ * i40e_setup_pf_filter_control sets up a PF's initial filter control
+ * settings. If PE/FCoE are enabled then it will also set the per PF
+ * based filter sizes required for them. It also enables Flow director,
+ * ethertype and macvlan type filter settings for the pf.
+ *
+ * Returns 0 on success, negative on failure
+ **/
+static int i40e_setup_pf_filter_control(struct i40e_pf *pf)
+{
+ struct i40e_filter_control_settings *settings = &pf->filter_settings;
+
+ settings->hash_lut_size = I40E_HASH_LUT_SIZE_128;
+
+ /* Flow Director is enabled */
+ if (pf->flags & (I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED))
+ settings->enable_fdir = true;
+
+ /* Ethtype and MACVLAN filters enabled for PF */
+ settings->enable_ethtype = true;
+ settings->enable_macvlan = true;
+
+ if (i40e_set_filter_control(&pf->hw, settings))
+ return -ENOENT;
+
+ return 0;
+}
+
+#define INFO_STRING_LEN 255
+#define REMAIN(__x) (INFO_STRING_LEN - (__x))
+static void i40e_print_features(struct i40e_pf *pf)
+{
+ struct i40e_hw *hw = &pf->hw;
+ char *buf;
+ int i;
+
+ buf = kmalloc(INFO_STRING_LEN, GFP_KERNEL);
+ if (!buf)
+ return;
+
+ i = snprintf(buf, INFO_STRING_LEN, "Features: PF-id[%d]", hw->pf_id);
+#ifdef CONFIG_PCI_IOV
+ i += scnprintf(&buf[i], REMAIN(i), " VFs: %d", pf->num_req_vfs);
+#endif
+ i += scnprintf(&buf[i], REMAIN(i), " VSIs: %d QP: %d",
+ pf->hw.func_caps.num_vsis,
+ pf->vsi[pf->lan_vsi]->num_queue_pairs);
+ if (pf->flags & I40E_FLAG_RSS_ENABLED)
+ i += scnprintf(&buf[i], REMAIN(i), " RSS");
+ if (pf->flags & I40E_FLAG_FD_ATR_ENABLED)
+ i += scnprintf(&buf[i], REMAIN(i), " FD_ATR");
+ if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
+ i += scnprintf(&buf[i], REMAIN(i), " FD_SB");
+ i += scnprintf(&buf[i], REMAIN(i), " NTUPLE");
+ }
+ if (pf->flags & I40E_FLAG_DCB_CAPABLE)
+ i += scnprintf(&buf[i], REMAIN(i), " DCB");
+ i += scnprintf(&buf[i], REMAIN(i), " VxLAN");
+ i += scnprintf(&buf[i], REMAIN(i), " Geneve");
+ if (pf->flags & I40E_FLAG_PTP)
+ i += scnprintf(&buf[i], REMAIN(i), " PTP");
+ if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED)
+ i += scnprintf(&buf[i], REMAIN(i), " VEB");
+ else
+ i += scnprintf(&buf[i], REMAIN(i), " VEPA");
+
+ dev_info(&pf->pdev->dev, "%s\n", buf);
+ kfree(buf);
+ WARN_ON(i > INFO_STRING_LEN);
+}
+
+/**
+ * i40e_get_platform_mac_addr - get platform-specific MAC address
+ * @pdev: PCI device information struct
+ * @pf: board private structure
+ *
+ * Look up the MAC address for the device. First we'll try
+ * eth_platform_get_mac_address, which will check Open Firmware, or arch
+ * specific fallback. Otherwise, we'll default to the stored value in
+ * firmware.
+ **/
+static void i40e_get_platform_mac_addr(struct pci_dev *pdev, struct i40e_pf *pf)
+{
+ if (eth_platform_get_mac_address(&pdev->dev, pf->hw.mac.addr))
+ i40e_get_mac_addr(&pf->hw, pf->hw.mac.addr);
+}
+
+/**
+ * i40e_set_fec_in_flags - helper function for setting FEC options in flags
+ * @fec_cfg: FEC option to set in flags
+ * @flags: ptr to flags in which we set FEC option
+ **/
+void i40e_set_fec_in_flags(u8 fec_cfg, u32 *flags)
+{
+ if (fec_cfg & I40E_AQ_SET_FEC_AUTO)
+ *flags |= I40E_FLAG_RS_FEC | I40E_FLAG_BASE_R_FEC;
+ if ((fec_cfg & I40E_AQ_SET_FEC_REQUEST_RS) ||
+ (fec_cfg & I40E_AQ_SET_FEC_ABILITY_RS)) {
+ *flags |= I40E_FLAG_RS_FEC;
+ *flags &= ~I40E_FLAG_BASE_R_FEC;
+ }
+ if ((fec_cfg & I40E_AQ_SET_FEC_REQUEST_KR) ||
+ (fec_cfg & I40E_AQ_SET_FEC_ABILITY_KR)) {
+ *flags |= I40E_FLAG_BASE_R_FEC;
+ *flags &= ~I40E_FLAG_RS_FEC;
+ }
+ if (fec_cfg == 0)
+ *flags &= ~(I40E_FLAG_RS_FEC | I40E_FLAG_BASE_R_FEC);
+}
+
+/**
+ * i40e_check_recovery_mode - check if we are running transition firmware
+ * @pf: board private structure
+ *
+ * Check registers indicating the firmware runs in recovery mode. Sets the
+ * appropriate driver state.
+ *
+ * Returns true if the recovery mode was detected, false otherwise
+ **/
+static bool i40e_check_recovery_mode(struct i40e_pf *pf)
+{
+ u32 val = rd32(&pf->hw, I40E_GL_FWSTS);
+
+ if (val & I40E_GL_FWSTS_FWS1B_MASK) {
+ dev_crit(&pf->pdev->dev, "Firmware recovery mode detected. Limiting functionality.\n");
+ dev_crit(&pf->pdev->dev, "Refer to the Intel(R) Ethernet Adapters and Devices User Guide for details on firmware recovery mode.\n");
+ set_bit(__I40E_RECOVERY_MODE, pf->state);
+
+ return true;
+ }
+ if (test_bit(__I40E_RECOVERY_MODE, pf->state))
+ dev_info(&pf->pdev->dev, "Please do Power-On Reset to initialize adapter in normal mode with full functionality.\n");
+
+ return false;
+}
+
+/**
+ * i40e_pf_loop_reset - perform reset in a loop.
+ * @pf: board private structure
+ *
+ * This function is useful when a NIC is about to enter recovery mode.
+ * When a NIC's internal data structures are corrupted the NIC's
+ * firmware is going to enter recovery mode.
+ * Right after a POR it takes about 7 minutes for firmware to enter
+ * recovery mode. Until that time a NIC is in some kind of intermediate
+ * state. After that time period the NIC almost surely enters
+ * recovery mode. The only way for a driver to detect intermediate
+ * state is to issue a series of pf-resets and check a return value.
+ * If a PF reset returns success then the firmware could be in recovery
+ * mode so the caller of this code needs to check for recovery mode
+ * if this function returns success. There is a little chance that
+ * firmware will hang in intermediate state forever.
+ * Since waiting 7 minutes is quite a lot of time this function waits
+ * 10 seconds and then gives up by returning an error.
+ *
+ * Return 0 on success, negative on failure.
+ **/
+static int i40e_pf_loop_reset(struct i40e_pf *pf)
+{
+ /* wait max 10 seconds for PF reset to succeed */
+ const unsigned long time_end = jiffies + 10 * HZ;
+ struct i40e_hw *hw = &pf->hw;
+ int ret;
+
+ ret = i40e_pf_reset(hw);
+ while (ret != I40E_SUCCESS && time_before(jiffies, time_end)) {
+ usleep_range(10000, 20000);
+ ret = i40e_pf_reset(hw);
+ }
+
+ if (ret == I40E_SUCCESS)
+ pf->pfr_count++;
+ else
+ dev_info(&pf->pdev->dev, "PF reset failed: %d\n", ret);
+
+ return ret;
+}
+
+/**
+ * i40e_check_fw_empr - check if FW issued unexpected EMP Reset
+ * @pf: board private structure
+ *
+ * Check FW registers to determine if FW issued unexpected EMP Reset.
+ * Every time when unexpected EMP Reset occurs the FW increments
+ * a counter of unexpected EMP Resets. When the counter reaches 10
+ * the FW should enter the Recovery mode
+ *
+ * Returns true if FW issued unexpected EMP Reset
+ **/
+static bool i40e_check_fw_empr(struct i40e_pf *pf)
+{
+ const u32 fw_sts = rd32(&pf->hw, I40E_GL_FWSTS) &
+ I40E_GL_FWSTS_FWS1B_MASK;
+ return (fw_sts > I40E_GL_FWSTS_FWS1B_EMPR_0) &&
+ (fw_sts <= I40E_GL_FWSTS_FWS1B_EMPR_10);
+}
+
+/**
+ * i40e_handle_resets - handle EMP resets and PF resets
+ * @pf: board private structure
+ *
+ * Handle both EMP resets and PF resets and conclude whether there are
+ * any issues regarding these resets. If there are any issues then
+ * generate log entry.
+ *
+ * Return 0 if NIC is healthy or negative value when there are issues
+ * with resets
+ **/
+static int i40e_handle_resets(struct i40e_pf *pf)
+{
+ const int pfr = i40e_pf_loop_reset(pf);
+ const bool is_empr = i40e_check_fw_empr(pf);
+
+ if (is_empr || pfr != I40E_SUCCESS)
+ dev_crit(&pf->pdev->dev, "Entering recovery mode due to repeated FW resets. This may take several minutes. Refer to the Intel(R) Ethernet Adapters and Devices User Guide.\n");
+
+ return is_empr ? I40E_ERR_RESET_FAILED : pfr;
+}
+
+/**
+ * i40e_init_recovery_mode - initialize subsystems needed in recovery mode
+ * @pf: board private structure
+ * @hw: ptr to the hardware info
+ *
+ * This function does a minimal setup of all subsystems needed for running
+ * recovery mode.
+ *
+ * Returns 0 on success, negative on failure
+ **/
+static int i40e_init_recovery_mode(struct i40e_pf *pf, struct i40e_hw *hw)
+{
+ struct i40e_vsi *vsi;
+ int err;
+ int v_idx;
+
+ pci_set_drvdata(pf->pdev, pf);
+ pci_save_state(pf->pdev);
+
+ /* set up periodic task facility */
+ timer_setup(&pf->service_timer, i40e_service_timer, 0);
+ pf->service_timer_period = HZ;
+
+ INIT_WORK(&pf->service_task, i40e_service_task);
+ clear_bit(__I40E_SERVICE_SCHED, pf->state);
+
+ err = i40e_init_interrupt_scheme(pf);
+ if (err)
+ goto err_switch_setup;
+
+ /* The number of VSIs reported by the FW is the minimum guaranteed
+ * to us; HW supports far more and we share the remaining pool with
+ * the other PFs. We allocate space for more than the guarantee with
+ * the understanding that we might not get them all later.
+ */
+ if (pf->hw.func_caps.num_vsis < I40E_MIN_VSI_ALLOC)
+ pf->num_alloc_vsi = I40E_MIN_VSI_ALLOC;
+ else
+ pf->num_alloc_vsi = pf->hw.func_caps.num_vsis;
+
+ /* Set up the vsi struct and our local tracking of the MAIN PF vsi. */
+ pf->vsi = kcalloc(pf->num_alloc_vsi, sizeof(struct i40e_vsi *),
+ GFP_KERNEL);
+ if (!pf->vsi) {
+ err = -ENOMEM;
+ goto err_switch_setup;
+ }
+
+ /* We allocate one VSI which is needed as absolute minimum
+ * in order to register the netdev
+ */
+ v_idx = i40e_vsi_mem_alloc(pf, I40E_VSI_MAIN);
+ if (v_idx < 0) {
+ err = v_idx;
+ goto err_switch_setup;
+ }
+ pf->lan_vsi = v_idx;
+ vsi = pf->vsi[v_idx];
+ if (!vsi) {
+ err = -EFAULT;
+ goto err_switch_setup;
+ }
+ vsi->alloc_queue_pairs = 1;
+ err = i40e_config_netdev(vsi);
+ if (err)
+ goto err_switch_setup;
+ err = register_netdev(vsi->netdev);
+ if (err)
+ goto err_switch_setup;
+ vsi->netdev_registered = true;
+ i40e_dbg_pf_init(pf);
+
+ err = i40e_setup_misc_vector_for_recovery_mode(pf);
+ if (err)
+ goto err_switch_setup;
+
+ /* tell the firmware that we're starting */
+ i40e_send_version(pf);
+
+ /* since everything's happy, start the service_task timer */
+ mod_timer(&pf->service_timer,
+ round_jiffies(jiffies + pf->service_timer_period));
+
+ return 0;
+
+err_switch_setup:
+ i40e_reset_interrupt_capability(pf);
+ del_timer_sync(&pf->service_timer);
+ i40e_shutdown_adminq(hw);
+ iounmap(hw->hw_addr);
+ pci_disable_pcie_error_reporting(pf->pdev);
+ pci_release_mem_regions(pf->pdev);
+ pci_disable_device(pf->pdev);
+ kfree(pf);
+
+ return err;
+}
+
+/**
+ * i40e_set_subsystem_device_id - set subsystem device id
+ * @hw: pointer to the hardware info
+ *
+ * Set PCI subsystem device id either from a pci_dev structure or
+ * a specific FW register.
+ **/
+static inline void i40e_set_subsystem_device_id(struct i40e_hw *hw)
+{
+ struct pci_dev *pdev = ((struct i40e_pf *)hw->back)->pdev;
+
+ hw->subsystem_device_id = pdev->subsystem_device ?
+ pdev->subsystem_device :
+ (ushort)(rd32(hw, I40E_PFPCI_SUBSYSID) & USHRT_MAX);
+}
+
+/**
+ * i40e_probe - Device initialization routine
+ * @pdev: PCI device information struct
+ * @ent: entry in i40e_pci_tbl
+ *
+ * i40e_probe initializes a PF identified by a pci_dev structure.
+ * The OS initialization, configuring of the PF private structure,
+ * and a hardware reset occur.
+ *
+ * Returns 0 on success, negative on failure
+ **/
+static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ struct i40e_aq_get_phy_abilities_resp abilities;
+#ifdef CONFIG_I40E_DCB
+ enum i40e_get_fw_lldp_status_resp lldp_status;
+#endif /* CONFIG_I40E_DCB */
+ struct i40e_pf *pf;
+ struct i40e_hw *hw;
+ static u16 pfs_found;
+ u16 wol_nvm_bits;
+ u16 link_status;
+#ifdef CONFIG_I40E_DCB
+ int status;
+#endif /* CONFIG_I40E_DCB */
+ int err;
+ u32 val;
+ u32 i;
+
+ err = pci_enable_device_mem(pdev);
+ if (err)
+ return err;
+
+ /* set up for high or low dma */
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (err) {
+ dev_err(&pdev->dev,
+ "DMA configuration failed: 0x%x\n", err);
+ goto err_dma;
+ }
+
+ /* set up pci connections */
+ err = pci_request_mem_regions(pdev, i40e_driver_name);
+ if (err) {
+ dev_info(&pdev->dev,
+ "pci_request_selected_regions failed %d\n", err);
+ goto err_pci_reg;
+ }
+
+ pci_enable_pcie_error_reporting(pdev);
+ pci_set_master(pdev);
+
+ /* Now that we have a PCI connection, we need to do the
+ * low level device setup. This is primarily setting up
+ * the Admin Queue structures and then querying for the
+ * device's current profile information.
+ */
+ pf = kzalloc(sizeof(*pf), GFP_KERNEL);
+ if (!pf) {
+ err = -ENOMEM;
+ goto err_pf_alloc;
+ }
+ pf->next_vsi = 0;
+ pf->pdev = pdev;
+ set_bit(__I40E_DOWN, pf->state);
+
+ hw = &pf->hw;
+ hw->back = pf;
+
+ pf->ioremap_len = min_t(int, pci_resource_len(pdev, 0),
+ I40E_MAX_CSR_SPACE);
+ /* We believe that the highest register to read is
+ * I40E_GLGEN_STAT_CLEAR, so we check if the BAR size
+ * is not less than that before mapping to prevent a
+ * kernel panic.
+ */
+ if (pf->ioremap_len < I40E_GLGEN_STAT_CLEAR) {
+ dev_err(&pdev->dev, "Cannot map registers, bar size 0x%X too small, aborting\n",
+ pf->ioremap_len);
+ err = -ENOMEM;
+ goto err_ioremap;
+ }
+ hw->hw_addr = ioremap(pci_resource_start(pdev, 0), pf->ioremap_len);
+ if (!hw->hw_addr) {
+ err = -EIO;
+ dev_info(&pdev->dev, "ioremap(0x%04x, 0x%04x) failed: 0x%x\n",
+ (unsigned int)pci_resource_start(pdev, 0),
+ pf->ioremap_len, err);
+ goto err_ioremap;
+ }
+ hw->vendor_id = pdev->vendor;
+ hw->device_id = pdev->device;
+ pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
+ hw->subsystem_vendor_id = pdev->subsystem_vendor;
+ i40e_set_subsystem_device_id(hw);
+ hw->bus.device = PCI_SLOT(pdev->devfn);
+ hw->bus.func = PCI_FUNC(pdev->devfn);
+ hw->bus.bus_id = pdev->bus->number;
+ pf->instance = pfs_found;
+
+ /* Select something other than the 802.1ad ethertype for the
+ * switch to use internally and drop on ingress.
+ */
+ hw->switch_tag = 0xffff;
+ hw->first_tag = ETH_P_8021AD;
+ hw->second_tag = ETH_P_8021Q;
+
+ INIT_LIST_HEAD(&pf->l3_flex_pit_list);
+ INIT_LIST_HEAD(&pf->l4_flex_pit_list);
+ INIT_LIST_HEAD(&pf->ddp_old_prof);
+
+ /* set up the locks for the AQ, do this only once in probe
+ * and destroy them only once in remove
+ */
+ mutex_init(&hw->aq.asq_mutex);
+ mutex_init(&hw->aq.arq_mutex);
+
+ pf->msg_enable = netif_msg_init(debug,
+ NETIF_MSG_DRV |
+ NETIF_MSG_PROBE |
+ NETIF_MSG_LINK);
+ if (debug < -1)
+ pf->hw.debug_mask = debug;
+
+ /* do a special CORER for clearing PXE mode once at init */
+ if (hw->revision_id == 0 &&
+ (rd32(hw, I40E_GLLAN_RCTL_0) & I40E_GLLAN_RCTL_0_PXE_MODE_MASK)) {
+ wr32(hw, I40E_GLGEN_RTRIG, I40E_GLGEN_RTRIG_CORER_MASK);
+ i40e_flush(hw);
+ msleep(200);
+ pf->corer_count++;
+
+ i40e_clear_pxe_mode(hw);
+ }
+
+ /* Reset here to make sure all is clean and to define PF 'n' */
+ i40e_clear_hw(hw);
+
+ err = i40e_set_mac_type(hw);
+ if (err) {
+ dev_warn(&pdev->dev, "unidentified MAC or BLANK NVM: %d\n",
+ err);
+ goto err_pf_reset;
+ }
+
+ err = i40e_handle_resets(pf);
+ if (err)
+ goto err_pf_reset;
+
+ i40e_check_recovery_mode(pf);
+
+ if (is_kdump_kernel()) {
+ hw->aq.num_arq_entries = I40E_MIN_ARQ_LEN;
+ hw->aq.num_asq_entries = I40E_MIN_ASQ_LEN;
+ } else {
+ hw->aq.num_arq_entries = I40E_AQ_LEN;
+ hw->aq.num_asq_entries = I40E_AQ_LEN;
+ }
+ hw->aq.arq_buf_size = I40E_MAX_AQ_BUF_SIZE;
+ hw->aq.asq_buf_size = I40E_MAX_AQ_BUF_SIZE;
+ pf->adminq_work_limit = I40E_AQ_WORK_LIMIT;
+
+ snprintf(pf->int_name, sizeof(pf->int_name) - 1,
+ "%s-%s:misc",
+ dev_driver_string(&pf->pdev->dev), dev_name(&pdev->dev));
+
+ err = i40e_init_shared_code(hw);
+ if (err) {
+ dev_warn(&pdev->dev, "unidentified MAC or BLANK NVM: %d\n",
+ err);
+ goto err_pf_reset;
+ }
+
+ /* set up a default setting for link flow control */
+ pf->hw.fc.requested_mode = I40E_FC_NONE;
+
+ err = i40e_init_adminq(hw);
+ if (err) {
+ if (err == I40E_ERR_FIRMWARE_API_VERSION)
+ dev_info(&pdev->dev,
+ "The driver for the device stopped because the NVM image v%u.%u is newer than expected v%u.%u. You must install the most recent version of the network driver.\n",
+ hw->aq.api_maj_ver,
+ hw->aq.api_min_ver,
+ I40E_FW_API_VERSION_MAJOR,
+ I40E_FW_MINOR_VERSION(hw));
+ else
+ dev_info(&pdev->dev,
+ "The driver for the device stopped because the device firmware failed to init. Try updating your NVM image.\n");
+
+ goto err_pf_reset;
+ }
+ i40e_get_oem_version(hw);
+
+ /* provide nvm, fw, api versions, vendor:device id, subsys vendor:device id */
+ dev_info(&pdev->dev, "fw %d.%d.%05d api %d.%d nvm %s [%04x:%04x] [%04x:%04x]\n",
+ hw->aq.fw_maj_ver, hw->aq.fw_min_ver, hw->aq.fw_build,
+ hw->aq.api_maj_ver, hw->aq.api_min_ver,
+ i40e_nvm_version_str(hw), hw->vendor_id, hw->device_id,
+ hw->subsystem_vendor_id, hw->subsystem_device_id);
+
+ if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
+ hw->aq.api_min_ver > I40E_FW_MINOR_VERSION(hw))
+ dev_dbg(&pdev->dev,
+ "The driver for the device detected a newer version of the NVM image v%u.%u than v%u.%u.\n",
+ hw->aq.api_maj_ver,
+ hw->aq.api_min_ver,
+ I40E_FW_API_VERSION_MAJOR,
+ I40E_FW_MINOR_VERSION(hw));
+ else if (hw->aq.api_maj_ver == 1 && hw->aq.api_min_ver < 4)
+ dev_info(&pdev->dev,
+ "The driver for the device detected an older version of the NVM image v%u.%u than expected v%u.%u. Please update the NVM image.\n",
+ hw->aq.api_maj_ver,
+ hw->aq.api_min_ver,
+ I40E_FW_API_VERSION_MAJOR,
+ I40E_FW_MINOR_VERSION(hw));
+
+ i40e_verify_eeprom(pf);
+
+ /* Rev 0 hardware was never productized */
+ if (hw->revision_id < 1)
+ dev_warn(&pdev->dev, "This device is a pre-production adapter/LOM. Please be aware there may be issues with your hardware. If you are experiencing problems please contact your Intel or hardware representative who provided you with this hardware.\n");
+
+ i40e_clear_pxe_mode(hw);
+
+ err = i40e_get_capabilities(pf, i40e_aqc_opc_list_func_capabilities);
+ if (err)
+ goto err_adminq_setup;
+
+ err = i40e_sw_init(pf);
+ if (err) {
+ dev_info(&pdev->dev, "sw_init failed: %d\n", err);
+ goto err_sw_init;
+ }
+
+ if (test_bit(__I40E_RECOVERY_MODE, pf->state))
+ return i40e_init_recovery_mode(pf, hw);
+
+ err = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
+ hw->func_caps.num_rx_qp, 0, 0);
+ if (err) {
+ dev_info(&pdev->dev, "init_lan_hmc failed: %d\n", err);
+ goto err_init_lan_hmc;
+ }
+
+ err = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
+ if (err) {
+ dev_info(&pdev->dev, "configure_lan_hmc failed: %d\n", err);
+ err = -ENOENT;
+ goto err_configure_lan_hmc;
+ }
+
+ /* Disable LLDP for NICs that have firmware versions lower than v4.3.
+ * Ignore error return codes because if it was already disabled via
+ * hardware settings this will fail
+ */
+ if (pf->hw_features & I40E_HW_STOP_FW_LLDP) {
+ dev_info(&pdev->dev, "Stopping firmware LLDP agent.\n");
+ i40e_aq_stop_lldp(hw, true, false, NULL);
+ }
+
+ /* allow a platform config to override the HW addr */
+ i40e_get_platform_mac_addr(pdev, pf);
+
+ if (!is_valid_ether_addr(hw->mac.addr)) {
+ dev_info(&pdev->dev, "invalid MAC address %pM\n", hw->mac.addr);
+ err = -EIO;
+ goto err_mac_addr;
+ }
+ dev_info(&pdev->dev, "MAC address: %pM\n", hw->mac.addr);
+ ether_addr_copy(hw->mac.perm_addr, hw->mac.addr);
+ i40e_get_port_mac_addr(hw, hw->mac.port_addr);
+ if (is_valid_ether_addr(hw->mac.port_addr))
+ pf->hw_features |= I40E_HW_PORT_ID_VALID;
+
+ i40e_ptp_alloc_pins(pf);
+ pci_set_drvdata(pdev, pf);
+ pci_save_state(pdev);
+
+#ifdef CONFIG_I40E_DCB
+ status = i40e_get_fw_lldp_status(&pf->hw, &lldp_status);
+ (!status &&
+ lldp_status == I40E_GET_FW_LLDP_STATUS_ENABLED) ?
+ (pf->flags &= ~I40E_FLAG_DISABLE_FW_LLDP) :
+ (pf->flags |= I40E_FLAG_DISABLE_FW_LLDP);
+ dev_info(&pdev->dev,
+ (pf->flags & I40E_FLAG_DISABLE_FW_LLDP) ?
+ "FW LLDP is disabled\n" :
+ "FW LLDP is enabled\n");
+
+ /* Enable FW to write default DCB config on link-up */
+ i40e_aq_set_dcb_parameters(hw, true, NULL);
+
+ err = i40e_init_pf_dcb(pf);
+ if (err) {
+ dev_info(&pdev->dev, "DCB init failed %d, disabled\n", err);
+ pf->flags &= ~(I40E_FLAG_DCB_CAPABLE | I40E_FLAG_DCB_ENABLED);
+ /* Continue without DCB enabled */
+ }
+#endif /* CONFIG_I40E_DCB */
+
+ /* set up periodic task facility */
+ timer_setup(&pf->service_timer, i40e_service_timer, 0);
+ pf->service_timer_period = HZ;
+
+ INIT_WORK(&pf->service_task, i40e_service_task);
+ clear_bit(__I40E_SERVICE_SCHED, pf->state);
+
+ /* NVM bit on means WoL disabled for the port */
+ i40e_read_nvm_word(hw, I40E_SR_NVM_WAKE_ON_LAN, &wol_nvm_bits);
+ if (BIT (hw->port) & wol_nvm_bits || hw->partition_id != 1)
+ pf->wol_en = false;
+ else
+ pf->wol_en = true;
+ device_set_wakeup_enable(&pf->pdev->dev, pf->wol_en);
+
+ /* set up the main switch operations */
+ i40e_determine_queue_usage(pf);
+ err = i40e_init_interrupt_scheme(pf);
+ if (err)
+ goto err_switch_setup;
+
+ /* Reduce Tx and Rx pairs for kdump
+ * When MSI-X is enabled, it's not allowed to use more TC queue
+ * pairs than MSI-X vectors (pf->num_lan_msix) exist. Thus
+ * vsi->num_queue_pairs will be equal to pf->num_lan_msix, i.e., 1.
+ */
+ if (is_kdump_kernel())
+ pf->num_lan_msix = 1;
+
+ pf->udp_tunnel_nic.set_port = i40e_udp_tunnel_set_port;
+ pf->udp_tunnel_nic.unset_port = i40e_udp_tunnel_unset_port;
+ pf->udp_tunnel_nic.flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP;
+ pf->udp_tunnel_nic.shared = &pf->udp_tunnel_shared;
+ pf->udp_tunnel_nic.tables[0].n_entries = I40E_MAX_PF_UDP_OFFLOAD_PORTS;
+ pf->udp_tunnel_nic.tables[0].tunnel_types = UDP_TUNNEL_TYPE_VXLAN |
+ UDP_TUNNEL_TYPE_GENEVE;
+
+ /* The number of VSIs reported by the FW is the minimum guaranteed
+ * to us; HW supports far more and we share the remaining pool with
+ * the other PFs. We allocate space for more than the guarantee with
+ * the understanding that we might not get them all later.
+ */
+ if (pf->hw.func_caps.num_vsis < I40E_MIN_VSI_ALLOC)
+ pf->num_alloc_vsi = I40E_MIN_VSI_ALLOC;
+ else
+ pf->num_alloc_vsi = pf->hw.func_caps.num_vsis;
+ if (pf->num_alloc_vsi > UDP_TUNNEL_NIC_MAX_SHARING_DEVICES) {
+ dev_warn(&pf->pdev->dev,
+ "limiting the VSI count due to UDP tunnel limitation %d > %d\n",
+ pf->num_alloc_vsi, UDP_TUNNEL_NIC_MAX_SHARING_DEVICES);
+ pf->num_alloc_vsi = UDP_TUNNEL_NIC_MAX_SHARING_DEVICES;
+ }
+
+ /* Set up the *vsi struct and our local tracking of the MAIN PF vsi. */
+ pf->vsi = kcalloc(pf->num_alloc_vsi, sizeof(struct i40e_vsi *),
+ GFP_KERNEL);
+ if (!pf->vsi) {
+ err = -ENOMEM;
+ goto err_switch_setup;
+ }
+
+#ifdef CONFIG_PCI_IOV
+ /* prep for VF support */
+ if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
+ (pf->flags & I40E_FLAG_MSIX_ENABLED) &&
+ !test_bit(__I40E_BAD_EEPROM, pf->state)) {
+ if (pci_num_vf(pdev))
+ pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
+ }
+#endif
+ err = i40e_setup_pf_switch(pf, false, false);
+ if (err) {
+ dev_info(&pdev->dev, "setup_pf_switch failed: %d\n", err);
+ goto err_vsis;
+ }
+ INIT_LIST_HEAD(&pf->vsi[pf->lan_vsi]->ch_list);
+
+ /* if FDIR VSI was set up, start it now */
+ for (i = 0; i < pf->num_alloc_vsi; i++) {
+ if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) {
+ i40e_vsi_open(pf->vsi[i]);
+ break;
+ }
+ }
+
+ /* The driver only wants link up/down and module qualification
+ * reports from firmware. Note the negative logic.
+ */
+ err = i40e_aq_set_phy_int_mask(&pf->hw,
+ ~(I40E_AQ_EVENT_LINK_UPDOWN |
+ I40E_AQ_EVENT_MEDIA_NA |
+ I40E_AQ_EVENT_MODULE_QUAL_FAIL), NULL);
+ if (err)
+ dev_info(&pf->pdev->dev, "set phy mask fail, err %pe aq_err %s\n",
+ ERR_PTR(err),
+ i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+
+ /* Reconfigure hardware for allowing smaller MSS in the case
+ * of TSO, so that we avoid the MDD being fired and causing
+ * a reset in the case of small MSS+TSO.
+ */
+ val = rd32(hw, I40E_REG_MSS);
+ if ((val & I40E_REG_MSS_MIN_MASK) > I40E_64BYTE_MSS) {
+ val &= ~I40E_REG_MSS_MIN_MASK;
+ val |= I40E_64BYTE_MSS;
+ wr32(hw, I40E_REG_MSS, val);
+ }
+
+ if (pf->hw_features & I40E_HW_RESTART_AUTONEG) {
+ msleep(75);
+ err = i40e_aq_set_link_restart_an(&pf->hw, true, NULL);
+ if (err)
+ dev_info(&pf->pdev->dev, "link restart failed, err %pe aq_err %s\n",
+ ERR_PTR(err),
+ i40e_aq_str(&pf->hw,
+ pf->hw.aq.asq_last_status));
+ }
+ /* The main driver is (mostly) up and happy. We need to set this state
+ * before setting up the misc vector or we get a race and the vector
+ * ends up disabled forever.
+ */
+ clear_bit(__I40E_DOWN, pf->state);
+
+ /* In case of MSIX we are going to setup the misc vector right here
+ * to handle admin queue events etc. In case of legacy and MSI
+ * the misc functionality and queue processing is combined in
+ * the same vector and that gets setup at open.
+ */
+ if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
+ err = i40e_setup_misc_vector(pf);
+ if (err) {
+ dev_info(&pdev->dev,
+ "setup of misc vector failed: %d\n", err);
+ i40e_cloud_filter_exit(pf);
+ i40e_fdir_teardown(pf);
+ goto err_vsis;
+ }
+ }
+
+#ifdef CONFIG_PCI_IOV
+ /* prep for VF support */
+ if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
+ (pf->flags & I40E_FLAG_MSIX_ENABLED) &&
+ !test_bit(__I40E_BAD_EEPROM, pf->state)) {
+ /* disable link interrupts for VFs */
+ val = rd32(hw, I40E_PFGEN_PORTMDIO_NUM);
+ val &= ~I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_MASK;
+ wr32(hw, I40E_PFGEN_PORTMDIO_NUM, val);
+ i40e_flush(hw);
+
+ if (pci_num_vf(pdev)) {
+ dev_info(&pdev->dev,
+ "Active VFs found, allocating resources.\n");
+ err = i40e_alloc_vfs(pf, pci_num_vf(pdev));
+ if (err)
+ dev_info(&pdev->dev,
+ "Error %d allocating resources for existing VFs\n",
+ err);
+ }
+ }
+#endif /* CONFIG_PCI_IOV */
+
+ if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
+ pf->iwarp_base_vector = i40e_get_lump(pf, pf->irq_pile,
+ pf->num_iwarp_msix,
+ I40E_IWARP_IRQ_PILE_ID);
+ if (pf->iwarp_base_vector < 0) {
+ dev_info(&pdev->dev,
+ "failed to get tracking for %d vectors for IWARP err=%d\n",
+ pf->num_iwarp_msix, pf->iwarp_base_vector);
+ pf->flags &= ~I40E_FLAG_IWARP_ENABLED;
+ }
+ }
+
+ i40e_dbg_pf_init(pf);
+
+ /* tell the firmware that we're starting */
+ i40e_send_version(pf);
+
+ /* since everything's happy, start the service_task timer */
+ mod_timer(&pf->service_timer,
+ round_jiffies(jiffies + pf->service_timer_period));
+
+ /* add this PF to client device list and launch a client service task */
+ if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
+ err = i40e_lan_add_device(pf);
+ if (err)
+ dev_info(&pdev->dev, "Failed to add PF to client API service list: %d\n",
+ err);
+ }
+
+#define PCI_SPEED_SIZE 8
+#define PCI_WIDTH_SIZE 8
+ /* Devices on the IOSF bus do not have this information
+ * and will report PCI Gen 1 x 1 by default so don't bother
+ * checking them.
+ */
+ if (!(pf->hw_features & I40E_HW_NO_PCI_LINK_CHECK)) {
+ char speed[PCI_SPEED_SIZE] = "Unknown";
+ char width[PCI_WIDTH_SIZE] = "Unknown";
+
+ /* Get the negotiated link width and speed from PCI config
+ * space
+ */
+ pcie_capability_read_word(pf->pdev, PCI_EXP_LNKSTA,
+ &link_status);
+
+ i40e_set_pci_config_data(hw, link_status);
+
+ switch (hw->bus.speed) {
+ case i40e_bus_speed_8000:
+ strscpy(speed, "8.0", PCI_SPEED_SIZE); break;
+ case i40e_bus_speed_5000:
+ strscpy(speed, "5.0", PCI_SPEED_SIZE); break;
+ case i40e_bus_speed_2500:
+ strscpy(speed, "2.5", PCI_SPEED_SIZE); break;
+ default:
+ break;
+ }
+ switch (hw->bus.width) {
+ case i40e_bus_width_pcie_x8:
+ strscpy(width, "8", PCI_WIDTH_SIZE); break;
+ case i40e_bus_width_pcie_x4:
+ strscpy(width, "4", PCI_WIDTH_SIZE); break;
+ case i40e_bus_width_pcie_x2:
+ strscpy(width, "2", PCI_WIDTH_SIZE); break;
+ case i40e_bus_width_pcie_x1:
+ strscpy(width, "1", PCI_WIDTH_SIZE); break;
+ default:
+ break;
+ }
+
+ dev_info(&pdev->dev, "PCI-Express: Speed %sGT/s Width x%s\n",
+ speed, width);
+
+ if (hw->bus.width < i40e_bus_width_pcie_x8 ||
+ hw->bus.speed < i40e_bus_speed_8000) {
+ dev_warn(&pdev->dev, "PCI-Express bandwidth available for this device may be insufficient for optimal performance.\n");
+ dev_warn(&pdev->dev, "Please move the device to a different PCI-e link with more lanes and/or higher transfer rate.\n");
+ }
+ }
+
+ /* get the requested speeds from the fw */
+ err = i40e_aq_get_phy_capabilities(hw, false, false, &abilities, NULL);
+ if (err)
+ dev_dbg(&pf->pdev->dev, "get requested speeds ret = %pe last_status = %s\n",
+ ERR_PTR(err),
+ i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ pf->hw.phy.link_info.requested_speeds = abilities.link_speed;
+
+ /* set the FEC config due to the board capabilities */
+ i40e_set_fec_in_flags(abilities.fec_cfg_curr_mod_ext_info, &pf->flags);
+
+ /* get the supported phy types from the fw */
+ err = i40e_aq_get_phy_capabilities(hw, false, true, &abilities, NULL);
+ if (err)
+ dev_dbg(&pf->pdev->dev, "get supported phy types ret = %pe last_status = %s\n",
+ ERR_PTR(err),
+ i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+
+ /* make sure the MFS hasn't been set lower than the default */
+#define MAX_FRAME_SIZE_DEFAULT 0x2600
+ val = (rd32(&pf->hw, I40E_PRTGL_SAH) &
+ I40E_PRTGL_SAH_MFS_MASK) >> I40E_PRTGL_SAH_MFS_SHIFT;
+ if (val < MAX_FRAME_SIZE_DEFAULT)
+ dev_warn(&pdev->dev, "MFS for port %x has been set below the default: %x\n",
+ pf->hw.port, val);
+
+ /* Add a filter to drop all Flow control frames from any VSI from being
+ * transmitted. By doing so we stop a malicious VF from sending out
+ * PAUSE or PFC frames and potentially controlling traffic for other
+ * PF/VF VSIs.
+ * The FW can still send Flow control frames if enabled.
+ */
+ i40e_add_filter_to_drop_tx_flow_control_frames(&pf->hw,
+ pf->main_vsi_seid);
+
+ if ((pf->hw.device_id == I40E_DEV_ID_10G_BASE_T) ||
+ (pf->hw.device_id == I40E_DEV_ID_10G_BASE_T4))
+ pf->hw_features |= I40E_HW_PHY_CONTROLS_LEDS;
+ if (pf->hw.device_id == I40E_DEV_ID_SFP_I_X722)
+ pf->hw_features |= I40E_HW_HAVE_CRT_RETIMER;
+ /* print a string summarizing features */
+ i40e_print_features(pf);
+
+ return 0;
+
+ /* Unwind what we've done if something failed in the setup */
+err_vsis:
+ set_bit(__I40E_DOWN, pf->state);
+ i40e_clear_interrupt_scheme(pf);
+ kfree(pf->vsi);
+err_switch_setup:
+ i40e_reset_interrupt_capability(pf);
+ del_timer_sync(&pf->service_timer);
+err_mac_addr:
+err_configure_lan_hmc:
+ (void)i40e_shutdown_lan_hmc(hw);
+err_init_lan_hmc:
+ kfree(pf->qp_pile);
+err_sw_init:
+err_adminq_setup:
+err_pf_reset:
+ iounmap(hw->hw_addr);
+err_ioremap:
+ kfree(pf);
+err_pf_alloc:
+ pci_disable_pcie_error_reporting(pdev);
+ pci_release_mem_regions(pdev);
+err_pci_reg:
+err_dma:
+ pci_disable_device(pdev);
+ return err;
+}
+
+/**
+ * i40e_remove - Device removal routine
+ * @pdev: PCI device information struct
+ *
+ * i40e_remove is called by the PCI subsystem to alert the driver
+ * that is should release a PCI device. This could be caused by a
+ * Hot-Plug event, or because the driver is going to be removed from
+ * memory.
+ **/
+static void i40e_remove(struct pci_dev *pdev)
+{
+ struct i40e_pf *pf = pci_get_drvdata(pdev);
+ struct i40e_hw *hw = &pf->hw;
+ int ret_code;
+ int i;
+
+ i40e_dbg_pf_exit(pf);
+
+ i40e_ptp_stop(pf);
+
+ /* Disable RSS in hw */
+ i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), 0);
+ i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), 0);
+
+ /* Grab __I40E_RESET_RECOVERY_PENDING and set __I40E_IN_REMOVE
+ * flags, once they are set, i40e_rebuild should not be called as
+ * i40e_prep_for_reset always returns early.
+ */
+ while (test_and_set_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
+ usleep_range(1000, 2000);
+ set_bit(__I40E_IN_REMOVE, pf->state);
+
+ if (pf->flags & I40E_FLAG_SRIOV_ENABLED) {
+ set_bit(__I40E_VF_RESETS_DISABLED, pf->state);
+ i40e_free_vfs(pf);
+ pf->flags &= ~I40E_FLAG_SRIOV_ENABLED;
+ }
+ /* no more scheduling of any task */
+ set_bit(__I40E_SUSPENDED, pf->state);
+ set_bit(__I40E_DOWN, pf->state);
+ if (pf->service_timer.function)
+ del_timer_sync(&pf->service_timer);
+ if (pf->service_task.func)
+ cancel_work_sync(&pf->service_task);
+
+ if (test_bit(__I40E_RECOVERY_MODE, pf->state)) {
+ struct i40e_vsi *vsi = pf->vsi[0];
+
+ /* We know that we have allocated only one vsi for this PF,
+ * it was just for registering netdevice, so the interface
+ * could be visible in the 'ifconfig' output
+ */
+ unregister_netdev(vsi->netdev);
+ free_netdev(vsi->netdev);
+
+ goto unmap;
+ }
+
+ /* Client close must be called explicitly here because the timer
+ * has been stopped.
+ */
+ i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], false);
+
+ i40e_fdir_teardown(pf);
+
+ /* If there is a switch structure or any orphans, remove them.
+ * This will leave only the PF's VSI remaining.
+ */
+ for (i = 0; i < I40E_MAX_VEB; i++) {
+ if (!pf->veb[i])
+ continue;
+
+ if (pf->veb[i]->uplink_seid == pf->mac_seid ||
+ pf->veb[i]->uplink_seid == 0)
+ i40e_switch_branch_release(pf->veb[i]);
+ }
+
+ /* Now we can shutdown the PF's VSIs, just before we kill
+ * adminq and hmc.
+ */
+ for (i = pf->num_alloc_vsi; i--;)
+ if (pf->vsi[i]) {
+ i40e_vsi_close(pf->vsi[i]);
+ i40e_vsi_release(pf->vsi[i]);
+ pf->vsi[i] = NULL;
+ }
+
+ i40e_cloud_filter_exit(pf);
+
+ /* remove attached clients */
+ if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
+ ret_code = i40e_lan_del_device(pf);
+ if (ret_code)
+ dev_warn(&pdev->dev, "Failed to delete client device: %d\n",
+ ret_code);
+ }
+
+ /* shutdown and destroy the HMC */
+ if (hw->hmc.hmc_obj) {
+ ret_code = i40e_shutdown_lan_hmc(hw);
+ if (ret_code)
+ dev_warn(&pdev->dev,
+ "Failed to destroy the HMC resources: %d\n",
+ ret_code);
+ }
+
+unmap:
+ /* Free MSI/legacy interrupt 0 when in recovery mode. */
+ if (test_bit(__I40E_RECOVERY_MODE, pf->state) &&
+ !(pf->flags & I40E_FLAG_MSIX_ENABLED))
+ free_irq(pf->pdev->irq, pf);
+
+ /* shutdown the adminq */
+ i40e_shutdown_adminq(hw);
+
+ /* destroy the locks only once, here */
+ mutex_destroy(&hw->aq.arq_mutex);
+ mutex_destroy(&hw->aq.asq_mutex);
+
+ /* Clear all dynamic memory lists of rings, q_vectors, and VSIs */
+ rtnl_lock();
+ i40e_clear_interrupt_scheme(pf);
+ for (i = 0; i < pf->num_alloc_vsi; i++) {
+ if (pf->vsi[i]) {
+ if (!test_bit(__I40E_RECOVERY_MODE, pf->state))
+ i40e_vsi_clear_rings(pf->vsi[i]);
+ i40e_vsi_clear(pf->vsi[i]);
+ pf->vsi[i] = NULL;
+ }
+ }
+ rtnl_unlock();
+
+ for (i = 0; i < I40E_MAX_VEB; i++) {
+ kfree(pf->veb[i]);
+ pf->veb[i] = NULL;
+ }
+
+ kfree(pf->qp_pile);
+ kfree(pf->vsi);
+
+ iounmap(hw->hw_addr);
+ kfree(pf);
+ pci_release_mem_regions(pdev);
+
+ pci_disable_pcie_error_reporting(pdev);
+ pci_disable_device(pdev);
+}
+
+/**
+ * i40e_pci_error_detected - warning that something funky happened in PCI land
+ * @pdev: PCI device information struct
+ * @error: the type of PCI error
+ *
+ * Called to warn that something happened and the error handling steps
+ * are in progress. Allows the driver to quiesce things, be ready for
+ * remediation.
+ **/
+static pci_ers_result_t i40e_pci_error_detected(struct pci_dev *pdev,
+ pci_channel_state_t error)
+{
+ struct i40e_pf *pf = pci_get_drvdata(pdev);
+
+ dev_info(&pdev->dev, "%s: error %d\n", __func__, error);
+
+ if (!pf) {
+ dev_info(&pdev->dev,
+ "Cannot recover - error happened during device probe\n");
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+
+ /* shutdown all operations */
+ if (!test_bit(__I40E_SUSPENDED, pf->state))
+ i40e_prep_for_reset(pf);
+
+ /* Request a slot reset */
+ return PCI_ERS_RESULT_NEED_RESET;
+}
+
+/**
+ * i40e_pci_error_slot_reset - a PCI slot reset just happened
+ * @pdev: PCI device information struct
+ *
+ * Called to find if the driver can work with the device now that
+ * the pci slot has been reset. If a basic connection seems good
+ * (registers are readable and have sane content) then return a
+ * happy little PCI_ERS_RESULT_xxx.
+ **/
+static pci_ers_result_t i40e_pci_error_slot_reset(struct pci_dev *pdev)
+{
+ struct i40e_pf *pf = pci_get_drvdata(pdev);
+ pci_ers_result_t result;
+ u32 reg;
+
+ dev_dbg(&pdev->dev, "%s\n", __func__);
+ if (pci_enable_device_mem(pdev)) {
+ dev_info(&pdev->dev,
+ "Cannot re-enable PCI device after reset.\n");
+ result = PCI_ERS_RESULT_DISCONNECT;
+ } else {
+ pci_set_master(pdev);
+ pci_restore_state(pdev);
+ pci_save_state(pdev);
+ pci_wake_from_d3(pdev, false);
+
+ reg = rd32(&pf->hw, I40E_GLGEN_RTRIG);
+ if (reg == 0)
+ result = PCI_ERS_RESULT_RECOVERED;
+ else
+ result = PCI_ERS_RESULT_DISCONNECT;
+ }
+
+ return result;
+}
+
+/**
+ * i40e_pci_error_reset_prepare - prepare device driver for pci reset
+ * @pdev: PCI device information struct
+ */
+static void i40e_pci_error_reset_prepare(struct pci_dev *pdev)
+{
+ struct i40e_pf *pf = pci_get_drvdata(pdev);
+
+ i40e_prep_for_reset(pf);
+}
+
+/**
+ * i40e_pci_error_reset_done - pci reset done, device driver reset can begin
+ * @pdev: PCI device information struct
+ */
+static void i40e_pci_error_reset_done(struct pci_dev *pdev)
+{
+ struct i40e_pf *pf = pci_get_drvdata(pdev);
+
+ if (test_bit(__I40E_IN_REMOVE, pf->state))
+ return;
+
+ i40e_reset_and_rebuild(pf, false, false);
+#ifdef CONFIG_PCI_IOV
+ i40e_restore_all_vfs_msi_state(pdev);
+#endif /* CONFIG_PCI_IOV */
+}
+
+/**
+ * i40e_pci_error_resume - restart operations after PCI error recovery
+ * @pdev: PCI device information struct
+ *
+ * Called to allow the driver to bring things back up after PCI error
+ * and/or reset recovery has finished.
+ **/
+static void i40e_pci_error_resume(struct pci_dev *pdev)
+{
+ struct i40e_pf *pf = pci_get_drvdata(pdev);
+
+ dev_dbg(&pdev->dev, "%s\n", __func__);
+ if (test_bit(__I40E_SUSPENDED, pf->state))
+ return;
+
+ i40e_handle_reset_warning(pf, false);
+}
+
+/**
+ * i40e_enable_mc_magic_wake - enable multicast magic packet wake up
+ * using the mac_address_write admin q function
+ * @pf: pointer to i40e_pf struct
+ **/
+static void i40e_enable_mc_magic_wake(struct i40e_pf *pf)
+{
+ struct i40e_hw *hw = &pf->hw;
+ u8 mac_addr[6];
+ u16 flags = 0;
+ int ret;
+
+ /* Get current MAC address in case it's an LAA */
+ if (pf->vsi[pf->lan_vsi] && pf->vsi[pf->lan_vsi]->netdev) {
+ ether_addr_copy(mac_addr,
+ pf->vsi[pf->lan_vsi]->netdev->dev_addr);
+ } else {
+ dev_err(&pf->pdev->dev,
+ "Failed to retrieve MAC address; using default\n");
+ ether_addr_copy(mac_addr, hw->mac.addr);
+ }
+
+ /* The FW expects the mac address write cmd to first be called with
+ * one of these flags before calling it again with the multicast
+ * enable flags.
+ */
+ flags = I40E_AQC_WRITE_TYPE_LAA_WOL;
+
+ if (hw->func_caps.flex10_enable && hw->partition_id != 1)
+ flags = I40E_AQC_WRITE_TYPE_LAA_ONLY;
+
+ ret = i40e_aq_mac_address_write(hw, flags, mac_addr, NULL);
+ if (ret) {
+ dev_err(&pf->pdev->dev,
+ "Failed to update MAC address registers; cannot enable Multicast Magic packet wake up");
+ return;
+ }
+
+ flags = I40E_AQC_MC_MAG_EN
+ | I40E_AQC_WOL_PRESERVE_ON_PFR
+ | I40E_AQC_WRITE_TYPE_UPDATE_MC_MAG;
+ ret = i40e_aq_mac_address_write(hw, flags, mac_addr, NULL);
+ if (ret)
+ dev_err(&pf->pdev->dev,
+ "Failed to enable Multicast Magic Packet wake up\n");
+}
+
+/**
+ * i40e_shutdown - PCI callback for shutting down
+ * @pdev: PCI device information struct
+ **/
+static void i40e_shutdown(struct pci_dev *pdev)
+{
+ struct i40e_pf *pf = pci_get_drvdata(pdev);
+ struct i40e_hw *hw = &pf->hw;
+
+ set_bit(__I40E_SUSPENDED, pf->state);
+ set_bit(__I40E_DOWN, pf->state);
+
+ del_timer_sync(&pf->service_timer);
+ cancel_work_sync(&pf->service_task);
+ i40e_cloud_filter_exit(pf);
+ i40e_fdir_teardown(pf);
+
+ /* Client close must be called explicitly here because the timer
+ * has been stopped.
+ */
+ i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], false);
+
+ if (pf->wol_en && (pf->hw_features & I40E_HW_WOL_MC_MAGIC_PKT_WAKE))
+ i40e_enable_mc_magic_wake(pf);
+
+ i40e_prep_for_reset(pf);
+
+ wr32(hw, I40E_PFPM_APM,
+ (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
+ wr32(hw, I40E_PFPM_WUFC,
+ (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
+
+ /* Free MSI/legacy interrupt 0 when in recovery mode. */
+ if (test_bit(__I40E_RECOVERY_MODE, pf->state) &&
+ !(pf->flags & I40E_FLAG_MSIX_ENABLED))
+ free_irq(pf->pdev->irq, pf);
+
+ /* Since we're going to destroy queues during the
+ * i40e_clear_interrupt_scheme() we should hold the RTNL lock for this
+ * whole section
+ */
+ rtnl_lock();
+ i40e_clear_interrupt_scheme(pf);
+ rtnl_unlock();
+
+ if (system_state == SYSTEM_POWER_OFF) {
+ pci_wake_from_d3(pdev, pf->wol_en);
+ pci_set_power_state(pdev, PCI_D3hot);
+ }
+}
+
+/**
+ * i40e_suspend - PM callback for moving to D3
+ * @dev: generic device information structure
+ **/
+static int __maybe_unused i40e_suspend(struct device *dev)
+{
+ struct i40e_pf *pf = dev_get_drvdata(dev);
+ struct i40e_hw *hw = &pf->hw;
+
+ /* If we're already suspended, then there is nothing to do */
+ if (test_and_set_bit(__I40E_SUSPENDED, pf->state))
+ return 0;
+
+ set_bit(__I40E_DOWN, pf->state);
+
+ /* Ensure service task will not be running */
+ del_timer_sync(&pf->service_timer);
+ cancel_work_sync(&pf->service_task);
+
+ /* Client close must be called explicitly here because the timer
+ * has been stopped.
+ */
+ i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], false);
+
+ if (pf->wol_en && (pf->hw_features & I40E_HW_WOL_MC_MAGIC_PKT_WAKE))
+ i40e_enable_mc_magic_wake(pf);
+
+ /* Since we're going to destroy queues during the
+ * i40e_clear_interrupt_scheme() we should hold the RTNL lock for this
+ * whole section
+ */
+ rtnl_lock();
+
+ i40e_prep_for_reset(pf);
+
+ wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
+ wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
+
+ /* Clear the interrupt scheme and release our IRQs so that the system
+ * can safely hibernate even when there are a large number of CPUs.
+ * Otherwise hibernation might fail when mapping all the vectors back
+ * to CPU0.
+ */
+ i40e_clear_interrupt_scheme(pf);
+
+ rtnl_unlock();
+
+ return 0;
+}
+
+/**
+ * i40e_resume - PM callback for waking up from D3
+ * @dev: generic device information structure
+ **/
+static int __maybe_unused i40e_resume(struct device *dev)
+{
+ struct i40e_pf *pf = dev_get_drvdata(dev);
+ int err;
+
+ /* If we're not suspended, then there is nothing to do */
+ if (!test_bit(__I40E_SUSPENDED, pf->state))
+ return 0;
+
+ /* We need to hold the RTNL lock prior to restoring interrupt schemes,
+ * since we're going to be restoring queues
+ */
+ rtnl_lock();
+
+ /* We cleared the interrupt scheme when we suspended, so we need to
+ * restore it now to resume device functionality.
+ */
+ err = i40e_restore_interrupt_scheme(pf);
+ if (err) {
+ dev_err(dev, "Cannot restore interrupt scheme: %d\n",
+ err);
+ }
+
+ clear_bit(__I40E_DOWN, pf->state);
+ i40e_reset_and_rebuild(pf, false, true);
+
+ rtnl_unlock();
+
+ /* Clear suspended state last after everything is recovered */
+ clear_bit(__I40E_SUSPENDED, pf->state);
+
+ /* Restart the service task */
+ mod_timer(&pf->service_timer,
+ round_jiffies(jiffies + pf->service_timer_period));
+
+ return 0;
+}
+
+static const struct pci_error_handlers i40e_err_handler = {
+ .error_detected = i40e_pci_error_detected,
+ .slot_reset = i40e_pci_error_slot_reset,
+ .reset_prepare = i40e_pci_error_reset_prepare,
+ .reset_done = i40e_pci_error_reset_done,
+ .resume = i40e_pci_error_resume,
+};
+
+static SIMPLE_DEV_PM_OPS(i40e_pm_ops, i40e_suspend, i40e_resume);
+
+static struct pci_driver i40e_driver = {
+ .name = i40e_driver_name,
+ .id_table = i40e_pci_tbl,
+ .probe = i40e_probe,
+ .remove = i40e_remove,
+ .driver = {
+ .pm = &i40e_pm_ops,
+ },
+ .shutdown = i40e_shutdown,
+ .err_handler = &i40e_err_handler,
+ .sriov_configure = i40e_pci_sriov_configure,
+};
+
+/**
+ * i40e_init_module - Driver registration routine
+ *
+ * i40e_init_module is the first routine called when the driver is
+ * loaded. All it does is register with the PCI subsystem.
+ **/
+static int __init i40e_init_module(void)
+{
+ int err;
+
+ pr_info("%s: %s\n", i40e_driver_name, i40e_driver_string);
+ pr_info("%s: %s\n", i40e_driver_name, i40e_copyright);
+
+ /* There is no need to throttle the number of active tasks because
+ * each device limits its own task using a state bit for scheduling
+ * the service task, and the device tasks do not interfere with each
+ * other, so we don't set a max task limit. We must set WQ_MEM_RECLAIM
+ * since we need to be able to guarantee forward progress even under
+ * memory pressure.
+ */
+ i40e_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0, i40e_driver_name);
+ if (!i40e_wq) {
+ pr_err("%s: Failed to create workqueue\n", i40e_driver_name);
+ return -ENOMEM;
+ }
+
+ i40e_dbg_init();
+ err = pci_register_driver(&i40e_driver);
+ if (err) {
+ destroy_workqueue(i40e_wq);
+ i40e_dbg_exit();
+ return err;
+ }
+
+ return 0;
+}
+module_init(i40e_init_module);
+
+/**
+ * i40e_exit_module - Driver exit cleanup routine
+ *
+ * i40e_exit_module is called just before the driver is removed
+ * from memory.
+ **/
+static void __exit i40e_exit_module(void)
+{
+ pci_unregister_driver(&i40e_driver);
+ destroy_workqueue(i40e_wq);
+ ida_destroy(&i40e_client_ida);
+ i40e_dbg_exit();
+}
+module_exit(i40e_exit_module);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_nvm.c b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
new file mode 100644
index 000000000..f99c1f7fe
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
@@ -0,0 +1,1671 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
+
+#include "i40e_prototype.h"
+
+/**
+ * i40e_init_nvm - Initialize NVM function pointers
+ * @hw: pointer to the HW structure
+ *
+ * Setup the function pointers and the NVM info structure. Should be called
+ * once per NVM initialization, e.g. inside the i40e_init_shared_code().
+ * Please notice that the NVM term is used here (& in all methods covered
+ * in this file) as an equivalent of the FLASH part mapped into the SR.
+ * We are accessing FLASH always thru the Shadow RAM.
+ **/
+int i40e_init_nvm(struct i40e_hw *hw)
+{
+ struct i40e_nvm_info *nvm = &hw->nvm;
+ int ret_code = 0;
+ u32 fla, gens;
+ u8 sr_size;
+
+ /* The SR size is stored regardless of the nvm programming mode
+ * as the blank mode may be used in the factory line.
+ */
+ gens = rd32(hw, I40E_GLNVM_GENS);
+ sr_size = ((gens & I40E_GLNVM_GENS_SR_SIZE_MASK) >>
+ I40E_GLNVM_GENS_SR_SIZE_SHIFT);
+ /* Switching to words (sr_size contains power of 2KB) */
+ nvm->sr_size = BIT(sr_size) * I40E_SR_WORDS_IN_1KB;
+
+ /* Check if we are in the normal or blank NVM programming mode */
+ fla = rd32(hw, I40E_GLNVM_FLA);
+ if (fla & I40E_GLNVM_FLA_LOCKED_MASK) { /* Normal programming mode */
+ /* Max NVM timeout */
+ nvm->timeout = I40E_MAX_NVM_TIMEOUT;
+ nvm->blank_nvm_mode = false;
+ } else { /* Blank programming mode */
+ nvm->blank_nvm_mode = true;
+ ret_code = I40E_ERR_NVM_BLANK_MODE;
+ i40e_debug(hw, I40E_DEBUG_NVM, "NVM init error: unsupported blank mode.\n");
+ }
+
+ return ret_code;
+}
+
+/**
+ * i40e_acquire_nvm - Generic request for acquiring the NVM ownership
+ * @hw: pointer to the HW structure
+ * @access: NVM access type (read or write)
+ *
+ * This function will request NVM ownership for reading
+ * via the proper Admin Command.
+ **/
+int i40e_acquire_nvm(struct i40e_hw *hw,
+ enum i40e_aq_resource_access_type access)
+{
+ u64 gtime, timeout;
+ u64 time_left = 0;
+ int ret_code = 0;
+
+ if (hw->nvm.blank_nvm_mode)
+ goto i40e_i40e_acquire_nvm_exit;
+
+ ret_code = i40e_aq_request_resource(hw, I40E_NVM_RESOURCE_ID, access,
+ 0, &time_left, NULL);
+ /* Reading the Global Device Timer */
+ gtime = rd32(hw, I40E_GLVFGEN_TIMER);
+
+ /* Store the timeout */
+ hw->nvm.hw_semaphore_timeout = I40E_MS_TO_GTIME(time_left) + gtime;
+
+ if (ret_code)
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "NVM acquire type %d failed time_left=%llu ret=%d aq_err=%d\n",
+ access, time_left, ret_code, hw->aq.asq_last_status);
+
+ if (ret_code && time_left) {
+ /* Poll until the current NVM owner timeouts */
+ timeout = I40E_MS_TO_GTIME(I40E_MAX_NVM_TIMEOUT) + gtime;
+ while ((gtime < timeout) && time_left) {
+ usleep_range(10000, 20000);
+ gtime = rd32(hw, I40E_GLVFGEN_TIMER);
+ ret_code = i40e_aq_request_resource(hw,
+ I40E_NVM_RESOURCE_ID,
+ access, 0, &time_left,
+ NULL);
+ if (!ret_code) {
+ hw->nvm.hw_semaphore_timeout =
+ I40E_MS_TO_GTIME(time_left) + gtime;
+ break;
+ }
+ }
+ if (ret_code) {
+ hw->nvm.hw_semaphore_timeout = 0;
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "NVM acquire timed out, wait %llu ms before trying again. status=%d aq_err=%d\n",
+ time_left, ret_code, hw->aq.asq_last_status);
+ }
+ }
+
+i40e_i40e_acquire_nvm_exit:
+ return ret_code;
+}
+
+/**
+ * i40e_release_nvm - Generic request for releasing the NVM ownership
+ * @hw: pointer to the HW structure
+ *
+ * This function will release NVM resource via the proper Admin Command.
+ **/
+void i40e_release_nvm(struct i40e_hw *hw)
+{
+ int ret_code = I40E_SUCCESS;
+ u32 total_delay = 0;
+
+ if (hw->nvm.blank_nvm_mode)
+ return;
+
+ ret_code = i40e_aq_release_resource(hw, I40E_NVM_RESOURCE_ID, 0, NULL);
+
+ /* there are some rare cases when trying to release the resource
+ * results in an admin Q timeout, so handle them correctly
+ */
+ while ((ret_code == I40E_ERR_ADMIN_QUEUE_TIMEOUT) &&
+ (total_delay < hw->aq.asq_cmd_timeout)) {
+ usleep_range(1000, 2000);
+ ret_code = i40e_aq_release_resource(hw,
+ I40E_NVM_RESOURCE_ID,
+ 0, NULL);
+ total_delay++;
+ }
+}
+
+/**
+ * i40e_poll_sr_srctl_done_bit - Polls the GLNVM_SRCTL done bit
+ * @hw: pointer to the HW structure
+ *
+ * Polls the SRCTL Shadow RAM register done bit.
+ **/
+static int i40e_poll_sr_srctl_done_bit(struct i40e_hw *hw)
+{
+ int ret_code = I40E_ERR_TIMEOUT;
+ u32 srctl, wait_cnt;
+
+ /* Poll the I40E_GLNVM_SRCTL until the done bit is set */
+ for (wait_cnt = 0; wait_cnt < I40E_SRRD_SRCTL_ATTEMPTS; wait_cnt++) {
+ srctl = rd32(hw, I40E_GLNVM_SRCTL);
+ if (srctl & I40E_GLNVM_SRCTL_DONE_MASK) {
+ ret_code = 0;
+ break;
+ }
+ udelay(5);
+ }
+ if (ret_code == I40E_ERR_TIMEOUT)
+ i40e_debug(hw, I40E_DEBUG_NVM, "Done bit in GLNVM_SRCTL not set");
+ return ret_code;
+}
+
+/**
+ * i40e_read_nvm_word_srctl - Reads Shadow RAM via SRCTL register
+ * @hw: pointer to the HW structure
+ * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
+ * @data: word read from the Shadow RAM
+ *
+ * Reads one 16 bit word from the Shadow RAM using the GLNVM_SRCTL register.
+ **/
+static int i40e_read_nvm_word_srctl(struct i40e_hw *hw, u16 offset,
+ u16 *data)
+{
+ int ret_code = I40E_ERR_TIMEOUT;
+ u32 sr_reg;
+
+ if (offset >= hw->nvm.sr_size) {
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "NVM read error: offset %d beyond Shadow RAM limit %d\n",
+ offset, hw->nvm.sr_size);
+ ret_code = I40E_ERR_PARAM;
+ goto read_nvm_exit;
+ }
+
+ /* Poll the done bit first */
+ ret_code = i40e_poll_sr_srctl_done_bit(hw);
+ if (!ret_code) {
+ /* Write the address and start reading */
+ sr_reg = ((u32)offset << I40E_GLNVM_SRCTL_ADDR_SHIFT) |
+ BIT(I40E_GLNVM_SRCTL_START_SHIFT);
+ wr32(hw, I40E_GLNVM_SRCTL, sr_reg);
+
+ /* Poll I40E_GLNVM_SRCTL until the done bit is set */
+ ret_code = i40e_poll_sr_srctl_done_bit(hw);
+ if (!ret_code) {
+ sr_reg = rd32(hw, I40E_GLNVM_SRDATA);
+ *data = (u16)((sr_reg &
+ I40E_GLNVM_SRDATA_RDDATA_MASK)
+ >> I40E_GLNVM_SRDATA_RDDATA_SHIFT);
+ }
+ }
+ if (ret_code)
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "NVM read error: Couldn't access Shadow RAM address: 0x%x\n",
+ offset);
+
+read_nvm_exit:
+ return ret_code;
+}
+
+/**
+ * i40e_read_nvm_aq - Read Shadow RAM.
+ * @hw: pointer to the HW structure.
+ * @module_pointer: module pointer location in words from the NVM beginning
+ * @offset: offset in words from module start
+ * @words: number of words to read
+ * @data: buffer with words to read to the Shadow RAM
+ * @last_command: tells the AdminQ that this is the last command
+ *
+ * Reads a 16 bit words buffer to the Shadow RAM using the admin command.
+ **/
+static int i40e_read_nvm_aq(struct i40e_hw *hw,
+ u8 module_pointer, u32 offset,
+ u16 words, void *data,
+ bool last_command)
+{
+ struct i40e_asq_cmd_details cmd_details;
+ int ret_code = I40E_ERR_NVM;
+
+ memset(&cmd_details, 0, sizeof(cmd_details));
+ cmd_details.wb_desc = &hw->nvm_wb_desc;
+
+ /* Here we are checking the SR limit only for the flat memory model.
+ * We cannot do it for the module-based model, as we did not acquire
+ * the NVM resource yet (we cannot get the module pointer value).
+ * Firmware will check the module-based model.
+ */
+ if ((offset + words) > hw->nvm.sr_size)
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "NVM read error: offset %d beyond Shadow RAM limit %d\n",
+ (offset + words), hw->nvm.sr_size);
+ else if (words > I40E_SR_SECTOR_SIZE_IN_WORDS)
+ /* We can read only up to 4KB (one sector), in one AQ write */
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "NVM read fail error: tried to read %d words, limit is %d.\n",
+ words, I40E_SR_SECTOR_SIZE_IN_WORDS);
+ else if (((offset + (words - 1)) / I40E_SR_SECTOR_SIZE_IN_WORDS)
+ != (offset / I40E_SR_SECTOR_SIZE_IN_WORDS))
+ /* A single read cannot spread over two sectors */
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "NVM read error: cannot spread over two sectors in a single read offset=%d words=%d\n",
+ offset, words);
+ else
+ ret_code = i40e_aq_read_nvm(hw, module_pointer,
+ 2 * offset, /*bytes*/
+ 2 * words, /*bytes*/
+ data, last_command, &cmd_details);
+
+ return ret_code;
+}
+
+/**
+ * i40e_read_nvm_word_aq - Reads Shadow RAM via AQ
+ * @hw: pointer to the HW structure
+ * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
+ * @data: word read from the Shadow RAM
+ *
+ * Reads one 16 bit word from the Shadow RAM using the AdminQ
+ **/
+static int i40e_read_nvm_word_aq(struct i40e_hw *hw, u16 offset,
+ u16 *data)
+{
+ int ret_code = I40E_ERR_TIMEOUT;
+
+ ret_code = i40e_read_nvm_aq(hw, 0x0, offset, 1, data, true);
+ *data = le16_to_cpu(*(__le16 *)data);
+
+ return ret_code;
+}
+
+/**
+ * __i40e_read_nvm_word - Reads nvm word, assumes caller does the locking
+ * @hw: pointer to the HW structure
+ * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
+ * @data: word read from the Shadow RAM
+ *
+ * Reads one 16 bit word from the Shadow RAM.
+ *
+ * Do not use this function except in cases where the nvm lock is already
+ * taken via i40e_acquire_nvm().
+ **/
+static int __i40e_read_nvm_word(struct i40e_hw *hw,
+ u16 offset, u16 *data)
+{
+ if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE)
+ return i40e_read_nvm_word_aq(hw, offset, data);
+
+ return i40e_read_nvm_word_srctl(hw, offset, data);
+}
+
+/**
+ * i40e_read_nvm_word - Reads nvm word and acquire lock if necessary
+ * @hw: pointer to the HW structure
+ * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
+ * @data: word read from the Shadow RAM
+ *
+ * Reads one 16 bit word from the Shadow RAM.
+ **/
+int i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
+ u16 *data)
+{
+ int ret_code = 0;
+
+ if (hw->flags & I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK)
+ ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
+ if (ret_code)
+ return ret_code;
+
+ ret_code = __i40e_read_nvm_word(hw, offset, data);
+
+ if (hw->flags & I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK)
+ i40e_release_nvm(hw);
+
+ return ret_code;
+}
+
+/**
+ * i40e_read_nvm_module_data - Reads NVM Buffer to specified memory location
+ * @hw: Pointer to the HW structure
+ * @module_ptr: Pointer to module in words with respect to NVM beginning
+ * @module_offset: Offset in words from module start
+ * @data_offset: Offset in words from reading data area start
+ * @words_data_size: Words to read from NVM
+ * @data_ptr: Pointer to memory location where resulting buffer will be stored
+ **/
+int i40e_read_nvm_module_data(struct i40e_hw *hw,
+ u8 module_ptr,
+ u16 module_offset,
+ u16 data_offset,
+ u16 words_data_size,
+ u16 *data_ptr)
+{
+ u16 specific_ptr = 0;
+ u16 ptr_value = 0;
+ u32 offset = 0;
+ int status;
+
+ if (module_ptr != 0) {
+ status = i40e_read_nvm_word(hw, module_ptr, &ptr_value);
+ if (status) {
+ i40e_debug(hw, I40E_DEBUG_ALL,
+ "Reading nvm word failed.Error code: %d.\n",
+ status);
+ return I40E_ERR_NVM;
+ }
+ }
+#define I40E_NVM_INVALID_PTR_VAL 0x7FFF
+#define I40E_NVM_INVALID_VAL 0xFFFF
+
+ /* Pointer not initialized */
+ if (ptr_value == I40E_NVM_INVALID_PTR_VAL ||
+ ptr_value == I40E_NVM_INVALID_VAL) {
+ i40e_debug(hw, I40E_DEBUG_ALL, "Pointer not initialized.\n");
+ return I40E_ERR_BAD_PTR;
+ }
+
+ /* Check whether the module is in SR mapped area or outside */
+ if (ptr_value & I40E_PTR_TYPE) {
+ /* Pointer points outside of the Shared RAM mapped area */
+ i40e_debug(hw, I40E_DEBUG_ALL,
+ "Reading nvm data failed. Pointer points outside of the Shared RAM mapped area.\n");
+
+ return I40E_ERR_PARAM;
+ } else {
+ /* Read from the Shadow RAM */
+
+ status = i40e_read_nvm_word(hw, ptr_value + module_offset,
+ &specific_ptr);
+ if (status) {
+ i40e_debug(hw, I40E_DEBUG_ALL,
+ "Reading nvm word failed.Error code: %d.\n",
+ status);
+ return I40E_ERR_NVM;
+ }
+
+ offset = ptr_value + module_offset + specific_ptr +
+ data_offset;
+
+ status = i40e_read_nvm_buffer(hw, offset, &words_data_size,
+ data_ptr);
+ if (status) {
+ i40e_debug(hw, I40E_DEBUG_ALL,
+ "Reading nvm buffer failed.Error code: %d.\n",
+ status);
+ }
+ }
+
+ return status;
+}
+
+/**
+ * i40e_read_nvm_buffer_srctl - Reads Shadow RAM buffer via SRCTL register
+ * @hw: pointer to the HW structure
+ * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
+ * @words: (in) number of words to read; (out) number of words actually read
+ * @data: words read from the Shadow RAM
+ *
+ * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd()
+ * method. The buffer read is preceded by the NVM ownership take
+ * and followed by the release.
+ **/
+static int i40e_read_nvm_buffer_srctl(struct i40e_hw *hw, u16 offset,
+ u16 *words, u16 *data)
+{
+ int ret_code = 0;
+ u16 index, word;
+
+ /* Loop thru the selected region */
+ for (word = 0; word < *words; word++) {
+ index = offset + word;
+ ret_code = i40e_read_nvm_word_srctl(hw, index, &data[word]);
+ if (ret_code)
+ break;
+ }
+
+ /* Update the number of words read from the Shadow RAM */
+ *words = word;
+
+ return ret_code;
+}
+
+/**
+ * i40e_read_nvm_buffer_aq - Reads Shadow RAM buffer via AQ
+ * @hw: pointer to the HW structure
+ * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
+ * @words: (in) number of words to read; (out) number of words actually read
+ * @data: words read from the Shadow RAM
+ *
+ * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_aq()
+ * method. The buffer read is preceded by the NVM ownership take
+ * and followed by the release.
+ **/
+static int i40e_read_nvm_buffer_aq(struct i40e_hw *hw, u16 offset,
+ u16 *words, u16 *data)
+{
+ bool last_cmd = false;
+ u16 words_read = 0;
+ u16 read_size;
+ int ret_code;
+ u16 i = 0;
+
+ do {
+ /* Calculate number of bytes we should read in this step.
+ * FVL AQ do not allow to read more than one page at a time or
+ * to cross page boundaries.
+ */
+ if (offset % I40E_SR_SECTOR_SIZE_IN_WORDS)
+ read_size = min(*words,
+ (u16)(I40E_SR_SECTOR_SIZE_IN_WORDS -
+ (offset % I40E_SR_SECTOR_SIZE_IN_WORDS)));
+ else
+ read_size = min((*words - words_read),
+ I40E_SR_SECTOR_SIZE_IN_WORDS);
+
+ /* Check if this is last command, if so set proper flag */
+ if ((words_read + read_size) >= *words)
+ last_cmd = true;
+
+ ret_code = i40e_read_nvm_aq(hw, 0x0, offset, read_size,
+ data + words_read, last_cmd);
+ if (ret_code)
+ goto read_nvm_buffer_aq_exit;
+
+ /* Increment counter for words already read and move offset to
+ * new read location
+ */
+ words_read += read_size;
+ offset += read_size;
+ } while (words_read < *words);
+
+ for (i = 0; i < *words; i++)
+ data[i] = le16_to_cpu(((__le16 *)data)[i]);
+
+read_nvm_buffer_aq_exit:
+ *words = words_read;
+ return ret_code;
+}
+
+/**
+ * __i40e_read_nvm_buffer - Reads nvm buffer, caller must acquire lock
+ * @hw: pointer to the HW structure
+ * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
+ * @words: (in) number of words to read; (out) number of words actually read
+ * @data: words read from the Shadow RAM
+ *
+ * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd()
+ * method.
+ **/
+static int __i40e_read_nvm_buffer(struct i40e_hw *hw,
+ u16 offset, u16 *words,
+ u16 *data)
+{
+ if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE)
+ return i40e_read_nvm_buffer_aq(hw, offset, words, data);
+
+ return i40e_read_nvm_buffer_srctl(hw, offset, words, data);
+}
+
+/**
+ * i40e_read_nvm_buffer - Reads Shadow RAM buffer and acquire lock if necessary
+ * @hw: pointer to the HW structure
+ * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
+ * @words: (in) number of words to read; (out) number of words actually read
+ * @data: words read from the Shadow RAM
+ *
+ * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd()
+ * method. The buffer read is preceded by the NVM ownership take
+ * and followed by the release.
+ **/
+int i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
+ u16 *words, u16 *data)
+{
+ int ret_code = 0;
+
+ if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE) {
+ ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
+ if (!ret_code) {
+ ret_code = i40e_read_nvm_buffer_aq(hw, offset, words,
+ data);
+ i40e_release_nvm(hw);
+ }
+ } else {
+ ret_code = i40e_read_nvm_buffer_srctl(hw, offset, words, data);
+ }
+
+ return ret_code;
+}
+
+/**
+ * i40e_write_nvm_aq - Writes Shadow RAM.
+ * @hw: pointer to the HW structure.
+ * @module_pointer: module pointer location in words from the NVM beginning
+ * @offset: offset in words from module start
+ * @words: number of words to write
+ * @data: buffer with words to write to the Shadow RAM
+ * @last_command: tells the AdminQ that this is the last command
+ *
+ * Writes a 16 bit words buffer to the Shadow RAM using the admin command.
+ **/
+static int i40e_write_nvm_aq(struct i40e_hw *hw, u8 module_pointer,
+ u32 offset, u16 words, void *data,
+ bool last_command)
+{
+ struct i40e_asq_cmd_details cmd_details;
+ int ret_code = I40E_ERR_NVM;
+
+ memset(&cmd_details, 0, sizeof(cmd_details));
+ cmd_details.wb_desc = &hw->nvm_wb_desc;
+
+ /* Here we are checking the SR limit only for the flat memory model.
+ * We cannot do it for the module-based model, as we did not acquire
+ * the NVM resource yet (we cannot get the module pointer value).
+ * Firmware will check the module-based model.
+ */
+ if ((offset + words) > hw->nvm.sr_size)
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "NVM write error: offset %d beyond Shadow RAM limit %d\n",
+ (offset + words), hw->nvm.sr_size);
+ else if (words > I40E_SR_SECTOR_SIZE_IN_WORDS)
+ /* We can write only up to 4KB (one sector), in one AQ write */
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "NVM write fail error: tried to write %d words, limit is %d.\n",
+ words, I40E_SR_SECTOR_SIZE_IN_WORDS);
+ else if (((offset + (words - 1)) / I40E_SR_SECTOR_SIZE_IN_WORDS)
+ != (offset / I40E_SR_SECTOR_SIZE_IN_WORDS))
+ /* A single write cannot spread over two sectors */
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "NVM write error: cannot spread over two sectors in a single write offset=%d words=%d\n",
+ offset, words);
+ else
+ ret_code = i40e_aq_update_nvm(hw, module_pointer,
+ 2 * offset, /*bytes*/
+ 2 * words, /*bytes*/
+ data, last_command, 0,
+ &cmd_details);
+
+ return ret_code;
+}
+
+/**
+ * i40e_calc_nvm_checksum - Calculates and returns the checksum
+ * @hw: pointer to hardware structure
+ * @checksum: pointer to the checksum
+ *
+ * This function calculates SW Checksum that covers the whole 64kB shadow RAM
+ * except the VPD and PCIe ALT Auto-load modules. The structure and size of VPD
+ * is customer specific and unknown. Therefore, this function skips all maximum
+ * possible size of VPD (1kB).
+ **/
+static int i40e_calc_nvm_checksum(struct i40e_hw *hw,
+ u16 *checksum)
+{
+ struct i40e_virt_mem vmem;
+ u16 pcie_alt_module = 0;
+ u16 checksum_local = 0;
+ u16 vpd_module = 0;
+ int ret_code;
+ u16 *data;
+ u16 i = 0;
+
+ ret_code = i40e_allocate_virt_mem(hw, &vmem,
+ I40E_SR_SECTOR_SIZE_IN_WORDS * sizeof(u16));
+ if (ret_code)
+ goto i40e_calc_nvm_checksum_exit;
+ data = (u16 *)vmem.va;
+
+ /* read pointer to VPD area */
+ ret_code = __i40e_read_nvm_word(hw, I40E_SR_VPD_PTR, &vpd_module);
+ if (ret_code) {
+ ret_code = I40E_ERR_NVM_CHECKSUM;
+ goto i40e_calc_nvm_checksum_exit;
+ }
+
+ /* read pointer to PCIe Alt Auto-load module */
+ ret_code = __i40e_read_nvm_word(hw, I40E_SR_PCIE_ALT_AUTO_LOAD_PTR,
+ &pcie_alt_module);
+ if (ret_code) {
+ ret_code = I40E_ERR_NVM_CHECKSUM;
+ goto i40e_calc_nvm_checksum_exit;
+ }
+
+ /* Calculate SW checksum that covers the whole 64kB shadow RAM
+ * except the VPD and PCIe ALT Auto-load modules
+ */
+ for (i = 0; i < hw->nvm.sr_size; i++) {
+ /* Read SR page */
+ if ((i % I40E_SR_SECTOR_SIZE_IN_WORDS) == 0) {
+ u16 words = I40E_SR_SECTOR_SIZE_IN_WORDS;
+
+ ret_code = __i40e_read_nvm_buffer(hw, i, &words, data);
+ if (ret_code) {
+ ret_code = I40E_ERR_NVM_CHECKSUM;
+ goto i40e_calc_nvm_checksum_exit;
+ }
+ }
+
+ /* Skip Checksum word */
+ if (i == I40E_SR_SW_CHECKSUM_WORD)
+ continue;
+ /* Skip VPD module (convert byte size to word count) */
+ if ((i >= (u32)vpd_module) &&
+ (i < ((u32)vpd_module +
+ (I40E_SR_VPD_MODULE_MAX_SIZE / 2)))) {
+ continue;
+ }
+ /* Skip PCIe ALT module (convert byte size to word count) */
+ if ((i >= (u32)pcie_alt_module) &&
+ (i < ((u32)pcie_alt_module +
+ (I40E_SR_PCIE_ALT_MODULE_MAX_SIZE / 2)))) {
+ continue;
+ }
+
+ checksum_local += data[i % I40E_SR_SECTOR_SIZE_IN_WORDS];
+ }
+
+ *checksum = (u16)I40E_SR_SW_CHECKSUM_BASE - checksum_local;
+
+i40e_calc_nvm_checksum_exit:
+ i40e_free_virt_mem(hw, &vmem);
+ return ret_code;
+}
+
+/**
+ * i40e_update_nvm_checksum - Updates the NVM checksum
+ * @hw: pointer to hardware structure
+ *
+ * NVM ownership must be acquired before calling this function and released
+ * on ARQ completion event reception by caller.
+ * This function will commit SR to NVM.
+ **/
+int i40e_update_nvm_checksum(struct i40e_hw *hw)
+{
+ __le16 le_sum;
+ int ret_code;
+ u16 checksum;
+
+ ret_code = i40e_calc_nvm_checksum(hw, &checksum);
+ if (!ret_code) {
+ le_sum = cpu_to_le16(checksum);
+ ret_code = i40e_write_nvm_aq(hw, 0x00, I40E_SR_SW_CHECKSUM_WORD,
+ 1, &le_sum, true);
+ }
+
+ return ret_code;
+}
+
+/**
+ * i40e_validate_nvm_checksum - Validate EEPROM checksum
+ * @hw: pointer to hardware structure
+ * @checksum: calculated checksum
+ *
+ * Performs checksum calculation and validates the NVM SW checksum. If the
+ * caller does not need checksum, the value can be NULL.
+ **/
+int i40e_validate_nvm_checksum(struct i40e_hw *hw,
+ u16 *checksum)
+{
+ u16 checksum_local = 0;
+ u16 checksum_sr = 0;
+ int ret_code = 0;
+
+ /* We must acquire the NVM lock in order to correctly synchronize the
+ * NVM accesses across multiple PFs. Without doing so it is possible
+ * for one of the PFs to read invalid data potentially indicating that
+ * the checksum is invalid.
+ */
+ ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
+ if (ret_code)
+ return ret_code;
+ ret_code = i40e_calc_nvm_checksum(hw, &checksum_local);
+ __i40e_read_nvm_word(hw, I40E_SR_SW_CHECKSUM_WORD, &checksum_sr);
+ i40e_release_nvm(hw);
+ if (ret_code)
+ return ret_code;
+
+ /* Verify read checksum from EEPROM is the same as
+ * calculated checksum
+ */
+ if (checksum_local != checksum_sr)
+ ret_code = I40E_ERR_NVM_CHECKSUM;
+
+ /* If the user cares, return the calculated checksum */
+ if (checksum)
+ *checksum = checksum_local;
+
+ return ret_code;
+}
+
+static int i40e_nvmupd_state_init(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ u8 *bytes, int *perrno);
+static int i40e_nvmupd_state_reading(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ u8 *bytes, int *perrno);
+static int i40e_nvmupd_state_writing(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ u8 *bytes, int *errno);
+static enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ int *perrno);
+static int i40e_nvmupd_nvm_erase(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ int *perrno);
+static int i40e_nvmupd_nvm_write(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ u8 *bytes, int *perrno);
+static int i40e_nvmupd_nvm_read(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ u8 *bytes, int *perrno);
+static int i40e_nvmupd_exec_aq(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ u8 *bytes, int *perrno);
+static int i40e_nvmupd_get_aq_result(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ u8 *bytes, int *perrno);
+static int i40e_nvmupd_get_aq_event(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ u8 *bytes, int *perrno);
+static inline u8 i40e_nvmupd_get_module(u32 val)
+{
+ return (u8)(val & I40E_NVM_MOD_PNT_MASK);
+}
+static inline u8 i40e_nvmupd_get_transaction(u32 val)
+{
+ return (u8)((val & I40E_NVM_TRANS_MASK) >> I40E_NVM_TRANS_SHIFT);
+}
+
+static inline u8 i40e_nvmupd_get_preservation_flags(u32 val)
+{
+ return (u8)((val & I40E_NVM_PRESERVATION_FLAGS_MASK) >>
+ I40E_NVM_PRESERVATION_FLAGS_SHIFT);
+}
+
+static const char * const i40e_nvm_update_state_str[] = {
+ "I40E_NVMUPD_INVALID",
+ "I40E_NVMUPD_READ_CON",
+ "I40E_NVMUPD_READ_SNT",
+ "I40E_NVMUPD_READ_LCB",
+ "I40E_NVMUPD_READ_SA",
+ "I40E_NVMUPD_WRITE_ERA",
+ "I40E_NVMUPD_WRITE_CON",
+ "I40E_NVMUPD_WRITE_SNT",
+ "I40E_NVMUPD_WRITE_LCB",
+ "I40E_NVMUPD_WRITE_SA",
+ "I40E_NVMUPD_CSUM_CON",
+ "I40E_NVMUPD_CSUM_SA",
+ "I40E_NVMUPD_CSUM_LCB",
+ "I40E_NVMUPD_STATUS",
+ "I40E_NVMUPD_EXEC_AQ",
+ "I40E_NVMUPD_GET_AQ_RESULT",
+ "I40E_NVMUPD_GET_AQ_EVENT",
+};
+
+/**
+ * i40e_nvmupd_command - Process an NVM update command
+ * @hw: pointer to hardware structure
+ * @cmd: pointer to nvm update command
+ * @bytes: pointer to the data buffer
+ * @perrno: pointer to return error code
+ *
+ * Dispatches command depending on what update state is current
+ **/
+int i40e_nvmupd_command(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ u8 *bytes, int *perrno)
+{
+ enum i40e_nvmupd_cmd upd_cmd;
+ int status;
+
+ /* assume success */
+ *perrno = 0;
+
+ /* early check for status command and debug msgs */
+ upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);
+
+ i40e_debug(hw, I40E_DEBUG_NVM, "%s state %d nvm_release_on_hold %d opc 0x%04x cmd 0x%08x config 0x%08x offset 0x%08x data_size 0x%08x\n",
+ i40e_nvm_update_state_str[upd_cmd],
+ hw->nvmupd_state,
+ hw->nvm_release_on_done, hw->nvm_wait_opcode,
+ cmd->command, cmd->config, cmd->offset, cmd->data_size);
+
+ if (upd_cmd == I40E_NVMUPD_INVALID) {
+ *perrno = -EFAULT;
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "i40e_nvmupd_validate_command returns %d errno %d\n",
+ upd_cmd, *perrno);
+ }
+
+ /* a status request returns immediately rather than
+ * going into the state machine
+ */
+ if (upd_cmd == I40E_NVMUPD_STATUS) {
+ if (!cmd->data_size) {
+ *perrno = -EFAULT;
+ return I40E_ERR_BUF_TOO_SHORT;
+ }
+
+ bytes[0] = hw->nvmupd_state;
+
+ if (cmd->data_size >= 4) {
+ bytes[1] = 0;
+ *((u16 *)&bytes[2]) = hw->nvm_wait_opcode;
+ }
+
+ /* Clear error status on read */
+ if (hw->nvmupd_state == I40E_NVMUPD_STATE_ERROR)
+ hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
+
+ return 0;
+ }
+
+ /* Clear status even it is not read and log */
+ if (hw->nvmupd_state == I40E_NVMUPD_STATE_ERROR) {
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "Clearing I40E_NVMUPD_STATE_ERROR state without reading\n");
+ hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
+ }
+
+ /* Acquire lock to prevent race condition where adminq_task
+ * can execute after i40e_nvmupd_nvm_read/write but before state
+ * variables (nvm_wait_opcode, nvm_release_on_done) are updated.
+ *
+ * During NVMUpdate, it is observed that lock could be held for
+ * ~5ms for most commands. However lock is held for ~60ms for
+ * NVMUPD_CSUM_LCB command.
+ */
+ mutex_lock(&hw->aq.arq_mutex);
+ switch (hw->nvmupd_state) {
+ case I40E_NVMUPD_STATE_INIT:
+ status = i40e_nvmupd_state_init(hw, cmd, bytes, perrno);
+ break;
+
+ case I40E_NVMUPD_STATE_READING:
+ status = i40e_nvmupd_state_reading(hw, cmd, bytes, perrno);
+ break;
+
+ case I40E_NVMUPD_STATE_WRITING:
+ status = i40e_nvmupd_state_writing(hw, cmd, bytes, perrno);
+ break;
+
+ case I40E_NVMUPD_STATE_INIT_WAIT:
+ case I40E_NVMUPD_STATE_WRITE_WAIT:
+ /* if we need to stop waiting for an event, clear
+ * the wait info and return before doing anything else
+ */
+ if (cmd->offset == 0xffff) {
+ i40e_nvmupd_clear_wait_state(hw);
+ status = 0;
+ break;
+ }
+
+ status = I40E_ERR_NOT_READY;
+ *perrno = -EBUSY;
+ break;
+
+ default:
+ /* invalid state, should never happen */
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "NVMUPD: no such state %d\n", hw->nvmupd_state);
+ status = I40E_NOT_SUPPORTED;
+ *perrno = -ESRCH;
+ break;
+ }
+
+ mutex_unlock(&hw->aq.arq_mutex);
+ return status;
+}
+
+/**
+ * i40e_nvmupd_state_init - Handle NVM update state Init
+ * @hw: pointer to hardware structure
+ * @cmd: pointer to nvm update command buffer
+ * @bytes: pointer to the data buffer
+ * @perrno: pointer to return error code
+ *
+ * Process legitimate commands of the Init state and conditionally set next
+ * state. Reject all other commands.
+ **/
+static int i40e_nvmupd_state_init(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ u8 *bytes, int *perrno)
+{
+ enum i40e_nvmupd_cmd upd_cmd;
+ int status = 0;
+
+ upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);
+
+ switch (upd_cmd) {
+ case I40E_NVMUPD_READ_SA:
+ status = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
+ if (status) {
+ *perrno = i40e_aq_rc_to_posix(status,
+ hw->aq.asq_last_status);
+ } else {
+ status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno);
+ i40e_release_nvm(hw);
+ }
+ break;
+
+ case I40E_NVMUPD_READ_SNT:
+ status = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
+ if (status) {
+ *perrno = i40e_aq_rc_to_posix(status,
+ hw->aq.asq_last_status);
+ } else {
+ status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno);
+ if (status)
+ i40e_release_nvm(hw);
+ else
+ hw->nvmupd_state = I40E_NVMUPD_STATE_READING;
+ }
+ break;
+
+ case I40E_NVMUPD_WRITE_ERA:
+ status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
+ if (status) {
+ *perrno = i40e_aq_rc_to_posix(status,
+ hw->aq.asq_last_status);
+ } else {
+ status = i40e_nvmupd_nvm_erase(hw, cmd, perrno);
+ if (status) {
+ i40e_release_nvm(hw);
+ } else {
+ hw->nvm_release_on_done = true;
+ hw->nvm_wait_opcode = i40e_aqc_opc_nvm_erase;
+ hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
+ }
+ }
+ break;
+
+ case I40E_NVMUPD_WRITE_SA:
+ status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
+ if (status) {
+ *perrno = i40e_aq_rc_to_posix(status,
+ hw->aq.asq_last_status);
+ } else {
+ status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno);
+ if (status) {
+ i40e_release_nvm(hw);
+ } else {
+ hw->nvm_release_on_done = true;
+ hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
+ hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
+ }
+ }
+ break;
+
+ case I40E_NVMUPD_WRITE_SNT:
+ status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
+ if (status) {
+ *perrno = i40e_aq_rc_to_posix(status,
+ hw->aq.asq_last_status);
+ } else {
+ status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno);
+ if (status) {
+ i40e_release_nvm(hw);
+ } else {
+ hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
+ hw->nvmupd_state = I40E_NVMUPD_STATE_WRITE_WAIT;
+ }
+ }
+ break;
+
+ case I40E_NVMUPD_CSUM_SA:
+ status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
+ if (status) {
+ *perrno = i40e_aq_rc_to_posix(status,
+ hw->aq.asq_last_status);
+ } else {
+ status = i40e_update_nvm_checksum(hw);
+ if (status) {
+ *perrno = hw->aq.asq_last_status ?
+ i40e_aq_rc_to_posix(status,
+ hw->aq.asq_last_status) :
+ -EIO;
+ i40e_release_nvm(hw);
+ } else {
+ hw->nvm_release_on_done = true;
+ hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
+ hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
+ }
+ }
+ break;
+
+ case I40E_NVMUPD_EXEC_AQ:
+ status = i40e_nvmupd_exec_aq(hw, cmd, bytes, perrno);
+ break;
+
+ case I40E_NVMUPD_GET_AQ_RESULT:
+ status = i40e_nvmupd_get_aq_result(hw, cmd, bytes, perrno);
+ break;
+
+ case I40E_NVMUPD_GET_AQ_EVENT:
+ status = i40e_nvmupd_get_aq_event(hw, cmd, bytes, perrno);
+ break;
+
+ default:
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "NVMUPD: bad cmd %s in init state\n",
+ i40e_nvm_update_state_str[upd_cmd]);
+ status = I40E_ERR_NVM;
+ *perrno = -ESRCH;
+ break;
+ }
+ return status;
+}
+
+/**
+ * i40e_nvmupd_state_reading - Handle NVM update state Reading
+ * @hw: pointer to hardware structure
+ * @cmd: pointer to nvm update command buffer
+ * @bytes: pointer to the data buffer
+ * @perrno: pointer to return error code
+ *
+ * NVM ownership is already held. Process legitimate commands and set any
+ * change in state; reject all other commands.
+ **/
+static int i40e_nvmupd_state_reading(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ u8 *bytes, int *perrno)
+{
+ enum i40e_nvmupd_cmd upd_cmd;
+ int status = 0;
+
+ upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);
+
+ switch (upd_cmd) {
+ case I40E_NVMUPD_READ_SA:
+ case I40E_NVMUPD_READ_CON:
+ status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno);
+ break;
+
+ case I40E_NVMUPD_READ_LCB:
+ status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno);
+ i40e_release_nvm(hw);
+ hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
+ break;
+
+ default:
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "NVMUPD: bad cmd %s in reading state.\n",
+ i40e_nvm_update_state_str[upd_cmd]);
+ status = I40E_NOT_SUPPORTED;
+ *perrno = -ESRCH;
+ break;
+ }
+ return status;
+}
+
+/**
+ * i40e_nvmupd_state_writing - Handle NVM update state Writing
+ * @hw: pointer to hardware structure
+ * @cmd: pointer to nvm update command buffer
+ * @bytes: pointer to the data buffer
+ * @perrno: pointer to return error code
+ *
+ * NVM ownership is already held. Process legitimate commands and set any
+ * change in state; reject all other commands
+ **/
+static int i40e_nvmupd_state_writing(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ u8 *bytes, int *perrno)
+{
+ enum i40e_nvmupd_cmd upd_cmd;
+ bool retry_attempt = false;
+ int status = 0;
+
+ upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);
+
+retry:
+ switch (upd_cmd) {
+ case I40E_NVMUPD_WRITE_CON:
+ status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno);
+ if (!status) {
+ hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
+ hw->nvmupd_state = I40E_NVMUPD_STATE_WRITE_WAIT;
+ }
+ break;
+
+ case I40E_NVMUPD_WRITE_LCB:
+ status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno);
+ if (status) {
+ *perrno = hw->aq.asq_last_status ?
+ i40e_aq_rc_to_posix(status,
+ hw->aq.asq_last_status) :
+ -EIO;
+ hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
+ } else {
+ hw->nvm_release_on_done = true;
+ hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
+ hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
+ }
+ break;
+
+ case I40E_NVMUPD_CSUM_CON:
+ /* Assumes the caller has acquired the nvm */
+ status = i40e_update_nvm_checksum(hw);
+ if (status) {
+ *perrno = hw->aq.asq_last_status ?
+ i40e_aq_rc_to_posix(status,
+ hw->aq.asq_last_status) :
+ -EIO;
+ hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
+ } else {
+ hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
+ hw->nvmupd_state = I40E_NVMUPD_STATE_WRITE_WAIT;
+ }
+ break;
+
+ case I40E_NVMUPD_CSUM_LCB:
+ /* Assumes the caller has acquired the nvm */
+ status = i40e_update_nvm_checksum(hw);
+ if (status) {
+ *perrno = hw->aq.asq_last_status ?
+ i40e_aq_rc_to_posix(status,
+ hw->aq.asq_last_status) :
+ -EIO;
+ hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
+ } else {
+ hw->nvm_release_on_done = true;
+ hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
+ hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
+ }
+ break;
+
+ default:
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "NVMUPD: bad cmd %s in writing state.\n",
+ i40e_nvm_update_state_str[upd_cmd]);
+ status = I40E_NOT_SUPPORTED;
+ *perrno = -ESRCH;
+ break;
+ }
+
+ /* In some circumstances, a multi-write transaction takes longer
+ * than the default 3 minute timeout on the write semaphore. If
+ * the write failed with an EBUSY status, this is likely the problem,
+ * so here we try to reacquire the semaphore then retry the write.
+ * We only do one retry, then give up.
+ */
+ if (status && (hw->aq.asq_last_status == I40E_AQ_RC_EBUSY) &&
+ !retry_attempt) {
+ u32 old_asq_status = hw->aq.asq_last_status;
+ int old_status = status;
+ u32 gtime;
+
+ gtime = rd32(hw, I40E_GLVFGEN_TIMER);
+ if (gtime >= hw->nvm.hw_semaphore_timeout) {
+ i40e_debug(hw, I40E_DEBUG_ALL,
+ "NVMUPD: write semaphore expired (%d >= %lld), retrying\n",
+ gtime, hw->nvm.hw_semaphore_timeout);
+ i40e_release_nvm(hw);
+ status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
+ if (status) {
+ i40e_debug(hw, I40E_DEBUG_ALL,
+ "NVMUPD: write semaphore reacquire failed aq_err = %d\n",
+ hw->aq.asq_last_status);
+ status = old_status;
+ hw->aq.asq_last_status = old_asq_status;
+ } else {
+ retry_attempt = true;
+ goto retry;
+ }
+ }
+ }
+
+ return status;
+}
+
+/**
+ * i40e_nvmupd_clear_wait_state - clear wait state on hw
+ * @hw: pointer to the hardware structure
+ **/
+void i40e_nvmupd_clear_wait_state(struct i40e_hw *hw)
+{
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "NVMUPD: clearing wait on opcode 0x%04x\n",
+ hw->nvm_wait_opcode);
+
+ if (hw->nvm_release_on_done) {
+ i40e_release_nvm(hw);
+ hw->nvm_release_on_done = false;
+ }
+ hw->nvm_wait_opcode = 0;
+
+ if (hw->aq.arq_last_status) {
+ hw->nvmupd_state = I40E_NVMUPD_STATE_ERROR;
+ return;
+ }
+
+ switch (hw->nvmupd_state) {
+ case I40E_NVMUPD_STATE_INIT_WAIT:
+ hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
+ break;
+
+ case I40E_NVMUPD_STATE_WRITE_WAIT:
+ hw->nvmupd_state = I40E_NVMUPD_STATE_WRITING;
+ break;
+
+ default:
+ break;
+ }
+}
+
+/**
+ * i40e_nvmupd_check_wait_event - handle NVM update operation events
+ * @hw: pointer to the hardware structure
+ * @opcode: the event that just happened
+ * @desc: AdminQ descriptor
+ **/
+void i40e_nvmupd_check_wait_event(struct i40e_hw *hw, u16 opcode,
+ struct i40e_aq_desc *desc)
+{
+ u32 aq_desc_len = sizeof(struct i40e_aq_desc);
+
+ if (opcode == hw->nvm_wait_opcode) {
+ memcpy(&hw->nvm_aq_event_desc, desc, aq_desc_len);
+ i40e_nvmupd_clear_wait_state(hw);
+ }
+}
+
+/**
+ * i40e_nvmupd_validate_command - Validate given command
+ * @hw: pointer to hardware structure
+ * @cmd: pointer to nvm update command buffer
+ * @perrno: pointer to return error code
+ *
+ * Return one of the valid command types or I40E_NVMUPD_INVALID
+ **/
+static enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ int *perrno)
+{
+ enum i40e_nvmupd_cmd upd_cmd;
+ u8 module, transaction;
+
+ /* anything that doesn't match a recognized case is an error */
+ upd_cmd = I40E_NVMUPD_INVALID;
+
+ transaction = i40e_nvmupd_get_transaction(cmd->config);
+ module = i40e_nvmupd_get_module(cmd->config);
+
+ /* limits on data size */
+ if ((cmd->data_size < 1) ||
+ (cmd->data_size > I40E_NVMUPD_MAX_DATA)) {
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "i40e_nvmupd_validate_command data_size %d\n",
+ cmd->data_size);
+ *perrno = -EFAULT;
+ return I40E_NVMUPD_INVALID;
+ }
+
+ switch (cmd->command) {
+ case I40E_NVM_READ:
+ switch (transaction) {
+ case I40E_NVM_CON:
+ upd_cmd = I40E_NVMUPD_READ_CON;
+ break;
+ case I40E_NVM_SNT:
+ upd_cmd = I40E_NVMUPD_READ_SNT;
+ break;
+ case I40E_NVM_LCB:
+ upd_cmd = I40E_NVMUPD_READ_LCB;
+ break;
+ case I40E_NVM_SA:
+ upd_cmd = I40E_NVMUPD_READ_SA;
+ break;
+ case I40E_NVM_EXEC:
+ if (module == 0xf)
+ upd_cmd = I40E_NVMUPD_STATUS;
+ else if (module == 0)
+ upd_cmd = I40E_NVMUPD_GET_AQ_RESULT;
+ break;
+ case I40E_NVM_AQE:
+ upd_cmd = I40E_NVMUPD_GET_AQ_EVENT;
+ break;
+ }
+ break;
+
+ case I40E_NVM_WRITE:
+ switch (transaction) {
+ case I40E_NVM_CON:
+ upd_cmd = I40E_NVMUPD_WRITE_CON;
+ break;
+ case I40E_NVM_SNT:
+ upd_cmd = I40E_NVMUPD_WRITE_SNT;
+ break;
+ case I40E_NVM_LCB:
+ upd_cmd = I40E_NVMUPD_WRITE_LCB;
+ break;
+ case I40E_NVM_SA:
+ upd_cmd = I40E_NVMUPD_WRITE_SA;
+ break;
+ case I40E_NVM_ERA:
+ upd_cmd = I40E_NVMUPD_WRITE_ERA;
+ break;
+ case I40E_NVM_CSUM:
+ upd_cmd = I40E_NVMUPD_CSUM_CON;
+ break;
+ case (I40E_NVM_CSUM|I40E_NVM_SA):
+ upd_cmd = I40E_NVMUPD_CSUM_SA;
+ break;
+ case (I40E_NVM_CSUM|I40E_NVM_LCB):
+ upd_cmd = I40E_NVMUPD_CSUM_LCB;
+ break;
+ case I40E_NVM_EXEC:
+ if (module == 0)
+ upd_cmd = I40E_NVMUPD_EXEC_AQ;
+ break;
+ }
+ break;
+ }
+
+ return upd_cmd;
+}
+
+/**
+ * i40e_nvmupd_exec_aq - Run an AQ command
+ * @hw: pointer to hardware structure
+ * @cmd: pointer to nvm update command buffer
+ * @bytes: pointer to the data buffer
+ * @perrno: pointer to return error code
+ *
+ * cmd structure contains identifiers and data buffer
+ **/
+static int i40e_nvmupd_exec_aq(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ u8 *bytes, int *perrno)
+{
+ struct i40e_asq_cmd_details cmd_details;
+ struct i40e_aq_desc *aq_desc;
+ u32 buff_size = 0;
+ u8 *buff = NULL;
+ u32 aq_desc_len;
+ u32 aq_data_len;
+ int status;
+
+ i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__);
+ if (cmd->offset == 0xffff)
+ return 0;
+
+ memset(&cmd_details, 0, sizeof(cmd_details));
+ cmd_details.wb_desc = &hw->nvm_wb_desc;
+
+ aq_desc_len = sizeof(struct i40e_aq_desc);
+ memset(&hw->nvm_wb_desc, 0, aq_desc_len);
+
+ /* get the aq descriptor */
+ if (cmd->data_size < aq_desc_len) {
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "NVMUPD: not enough aq desc bytes for exec, size %d < %d\n",
+ cmd->data_size, aq_desc_len);
+ *perrno = -EINVAL;
+ return I40E_ERR_PARAM;
+ }
+ aq_desc = (struct i40e_aq_desc *)bytes;
+
+ /* if data buffer needed, make sure it's ready */
+ aq_data_len = cmd->data_size - aq_desc_len;
+ buff_size = max_t(u32, aq_data_len, le16_to_cpu(aq_desc->datalen));
+ if (buff_size) {
+ if (!hw->nvm_buff.va) {
+ status = i40e_allocate_virt_mem(hw, &hw->nvm_buff,
+ hw->aq.asq_buf_size);
+ if (status)
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "NVMUPD: i40e_allocate_virt_mem for exec buff failed, %d\n",
+ status);
+ }
+
+ if (hw->nvm_buff.va) {
+ buff = hw->nvm_buff.va;
+ memcpy(buff, &bytes[aq_desc_len], aq_data_len);
+ }
+ }
+
+ if (cmd->offset)
+ memset(&hw->nvm_aq_event_desc, 0, aq_desc_len);
+
+ /* and away we go! */
+ status = i40e_asq_send_command(hw, aq_desc, buff,
+ buff_size, &cmd_details);
+ if (status) {
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "%s err %pe aq_err %s\n",
+ __func__, ERR_PTR(status),
+ i40e_aq_str(hw, hw->aq.asq_last_status));
+ *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
+ return status;
+ }
+
+ /* should we wait for a followup event? */
+ if (cmd->offset) {
+ hw->nvm_wait_opcode = cmd->offset;
+ hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
+ }
+
+ return status;
+}
+
+/**
+ * i40e_nvmupd_get_aq_result - Get the results from the previous exec_aq
+ * @hw: pointer to hardware structure
+ * @cmd: pointer to nvm update command buffer
+ * @bytes: pointer to the data buffer
+ * @perrno: pointer to return error code
+ *
+ * cmd structure contains identifiers and data buffer
+ **/
+static int i40e_nvmupd_get_aq_result(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ u8 *bytes, int *perrno)
+{
+ u32 aq_total_len;
+ u32 aq_desc_len;
+ int remainder;
+ u8 *buff;
+
+ i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__);
+
+ aq_desc_len = sizeof(struct i40e_aq_desc);
+ aq_total_len = aq_desc_len + le16_to_cpu(hw->nvm_wb_desc.datalen);
+
+ /* check offset range */
+ if (cmd->offset > aq_total_len) {
+ i40e_debug(hw, I40E_DEBUG_NVM, "%s: offset too big %d > %d\n",
+ __func__, cmd->offset, aq_total_len);
+ *perrno = -EINVAL;
+ return I40E_ERR_PARAM;
+ }
+
+ /* check copylength range */
+ if (cmd->data_size > (aq_total_len - cmd->offset)) {
+ int new_len = aq_total_len - cmd->offset;
+
+ i40e_debug(hw, I40E_DEBUG_NVM, "%s: copy length %d too big, trimming to %d\n",
+ __func__, cmd->data_size, new_len);
+ cmd->data_size = new_len;
+ }
+
+ remainder = cmd->data_size;
+ if (cmd->offset < aq_desc_len) {
+ u32 len = aq_desc_len - cmd->offset;
+
+ len = min(len, cmd->data_size);
+ i40e_debug(hw, I40E_DEBUG_NVM, "%s: aq_desc bytes %d to %d\n",
+ __func__, cmd->offset, cmd->offset + len);
+
+ buff = ((u8 *)&hw->nvm_wb_desc) + cmd->offset;
+ memcpy(bytes, buff, len);
+
+ bytes += len;
+ remainder -= len;
+ buff = hw->nvm_buff.va;
+ } else {
+ buff = hw->nvm_buff.va + (cmd->offset - aq_desc_len);
+ }
+
+ if (remainder > 0) {
+ int start_byte = buff - (u8 *)hw->nvm_buff.va;
+
+ i40e_debug(hw, I40E_DEBUG_NVM, "%s: databuf bytes %d to %d\n",
+ __func__, start_byte, start_byte + remainder);
+ memcpy(bytes, buff, remainder);
+ }
+
+ return 0;
+}
+
+/**
+ * i40e_nvmupd_get_aq_event - Get the Admin Queue event from previous exec_aq
+ * @hw: pointer to hardware structure
+ * @cmd: pointer to nvm update command buffer
+ * @bytes: pointer to the data buffer
+ * @perrno: pointer to return error code
+ *
+ * cmd structure contains identifiers and data buffer
+ **/
+static int i40e_nvmupd_get_aq_event(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ u8 *bytes, int *perrno)
+{
+ u32 aq_total_len;
+ u32 aq_desc_len;
+
+ i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__);
+
+ aq_desc_len = sizeof(struct i40e_aq_desc);
+ aq_total_len = aq_desc_len + le16_to_cpu(hw->nvm_aq_event_desc.datalen);
+
+ /* check copylength range */
+ if (cmd->data_size > aq_total_len) {
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "%s: copy length %d too big, trimming to %d\n",
+ __func__, cmd->data_size, aq_total_len);
+ cmd->data_size = aq_total_len;
+ }
+
+ memcpy(bytes, &hw->nvm_aq_event_desc, cmd->data_size);
+
+ return 0;
+}
+
+/**
+ * i40e_nvmupd_nvm_read - Read NVM
+ * @hw: pointer to hardware structure
+ * @cmd: pointer to nvm update command buffer
+ * @bytes: pointer to the data buffer
+ * @perrno: pointer to return error code
+ *
+ * cmd structure contains identifiers and data buffer
+ **/
+static int i40e_nvmupd_nvm_read(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ u8 *bytes, int *perrno)
+{
+ struct i40e_asq_cmd_details cmd_details;
+ u8 module, transaction;
+ int status;
+ bool last;
+
+ transaction = i40e_nvmupd_get_transaction(cmd->config);
+ module = i40e_nvmupd_get_module(cmd->config);
+ last = (transaction == I40E_NVM_LCB) || (transaction == I40E_NVM_SA);
+
+ memset(&cmd_details, 0, sizeof(cmd_details));
+ cmd_details.wb_desc = &hw->nvm_wb_desc;
+
+ status = i40e_aq_read_nvm(hw, module, cmd->offset, (u16)cmd->data_size,
+ bytes, last, &cmd_details);
+ if (status) {
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "i40e_nvmupd_nvm_read mod 0x%x off 0x%x len 0x%x\n",
+ module, cmd->offset, cmd->data_size);
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "i40e_nvmupd_nvm_read status %d aq %d\n",
+ status, hw->aq.asq_last_status);
+ *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
+ }
+
+ return status;
+}
+
+/**
+ * i40e_nvmupd_nvm_erase - Erase an NVM module
+ * @hw: pointer to hardware structure
+ * @cmd: pointer to nvm update command buffer
+ * @perrno: pointer to return error code
+ *
+ * module, offset, data_size and data are in cmd structure
+ **/
+static int i40e_nvmupd_nvm_erase(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ int *perrno)
+{
+ struct i40e_asq_cmd_details cmd_details;
+ u8 module, transaction;
+ int status = 0;
+ bool last;
+
+ transaction = i40e_nvmupd_get_transaction(cmd->config);
+ module = i40e_nvmupd_get_module(cmd->config);
+ last = (transaction & I40E_NVM_LCB);
+
+ memset(&cmd_details, 0, sizeof(cmd_details));
+ cmd_details.wb_desc = &hw->nvm_wb_desc;
+
+ status = i40e_aq_erase_nvm(hw, module, cmd->offset, (u16)cmd->data_size,
+ last, &cmd_details);
+ if (status) {
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "i40e_nvmupd_nvm_erase mod 0x%x off 0x%x len 0x%x\n",
+ module, cmd->offset, cmd->data_size);
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "i40e_nvmupd_nvm_erase status %d aq %d\n",
+ status, hw->aq.asq_last_status);
+ *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
+ }
+
+ return status;
+}
+
+/**
+ * i40e_nvmupd_nvm_write - Write NVM
+ * @hw: pointer to hardware structure
+ * @cmd: pointer to nvm update command buffer
+ * @bytes: pointer to the data buffer
+ * @perrno: pointer to return error code
+ *
+ * module, offset, data_size and data are in cmd structure
+ **/
+static int i40e_nvmupd_nvm_write(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ u8 *bytes, int *perrno)
+{
+ struct i40e_asq_cmd_details cmd_details;
+ u8 module, transaction;
+ u8 preservation_flags;
+ int status = 0;
+ bool last;
+
+ transaction = i40e_nvmupd_get_transaction(cmd->config);
+ module = i40e_nvmupd_get_module(cmd->config);
+ last = (transaction & I40E_NVM_LCB);
+ preservation_flags = i40e_nvmupd_get_preservation_flags(cmd->config);
+
+ memset(&cmd_details, 0, sizeof(cmd_details));
+ cmd_details.wb_desc = &hw->nvm_wb_desc;
+
+ status = i40e_aq_update_nvm(hw, module, cmd->offset,
+ (u16)cmd->data_size, bytes, last,
+ preservation_flags, &cmd_details);
+ if (status) {
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "i40e_nvmupd_nvm_write mod 0x%x off 0x%x len 0x%x\n",
+ module, cmd->offset, cmd->data_size);
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "i40e_nvmupd_nvm_write status %d aq %d\n",
+ status, hw->aq.asq_last_status);
+ *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
+ }
+
+ return status;
+}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_osdep.h b/drivers/net/ethernet/intel/i40e/i40e_osdep.h
new file mode 100644
index 000000000..2bd4de03d
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_osdep.h
@@ -0,0 +1,59 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
+
+#ifndef _I40E_OSDEP_H_
+#define _I40E_OSDEP_H_
+
+#include <linux/types.h>
+#include <linux/if_ether.h>
+#include <linux/if_vlan.h>
+#include <linux/tcp.h>
+#include <linux/pci.h>
+#include <linux/highuid.h>
+
+/* get readq/writeq support for 32 bit kernels, use the low-first version */
+#include <linux/io-64-nonatomic-lo-hi.h>
+
+/* File to be the magic between shared code and
+ * actual OS primitives
+ */
+
+#define hw_dbg(hw, S, A...) \
+do { \
+ dev_dbg(&((struct i40e_pf *)hw->back)->pdev->dev, S, ##A); \
+} while (0)
+
+#define wr32(a, reg, value) writel((value), ((a)->hw_addr + (reg)))
+#define rd32(a, reg) readl((a)->hw_addr + (reg))
+
+#define rd64(a, reg) readq((a)->hw_addr + (reg))
+#define i40e_flush(a) readl((a)->hw_addr + I40E_GLGEN_STAT)
+
+/* memory allocation tracking */
+struct i40e_dma_mem {
+ void *va;
+ dma_addr_t pa;
+ u32 size;
+};
+
+#define i40e_allocate_dma_mem(h, m, unused, s, a) \
+ i40e_allocate_dma_mem_d(h, m, s, a)
+#define i40e_free_dma_mem(h, m) i40e_free_dma_mem_d(h, m)
+
+struct i40e_virt_mem {
+ void *va;
+ u32 size;
+};
+
+#define i40e_allocate_virt_mem(h, m, s) i40e_allocate_virt_mem_d(h, m, s)
+#define i40e_free_virt_mem(h, m) i40e_free_virt_mem_d(h, m)
+
+#define i40e_debug(h, m, s, ...) \
+do { \
+ if (((m) & (h)->debug_mask)) \
+ pr_info("i40e %02x:%02x.%x " s, \
+ (h)->bus.bus_id, (h)->bus.device, \
+ (h)->bus.func, ##__VA_ARGS__); \
+} while (0)
+
+#endif /* _I40E_OSDEP_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
new file mode 100644
index 000000000..c9c3726ea
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
@@ -0,0 +1,492 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2013 - 2021 Intel Corporation. */
+
+#ifndef _I40E_PROTOTYPE_H_
+#define _I40E_PROTOTYPE_H_
+
+#include "i40e_type.h"
+#include "i40e_alloc.h"
+#include <linux/avf/virtchnl.h>
+
+/* Prototypes for shared code functions that are not in
+ * the standard function pointer structures. These are
+ * mostly because they are needed even before the init
+ * has happened and will assist in the early SW and FW
+ * setup.
+ */
+
+/* adminq functions */
+int i40e_init_adminq(struct i40e_hw *hw);
+void i40e_shutdown_adminq(struct i40e_hw *hw);
+void i40e_adminq_init_ring_data(struct i40e_hw *hw);
+int i40e_clean_arq_element(struct i40e_hw *hw,
+ struct i40e_arq_event_info *e,
+ u16 *events_pending);
+int
+i40e_asq_send_command(struct i40e_hw *hw, struct i40e_aq_desc *desc,
+ void *buff, /* can be NULL */ u16 buff_size,
+ struct i40e_asq_cmd_details *cmd_details);
+int
+i40e_asq_send_command_v2(struct i40e_hw *hw,
+ struct i40e_aq_desc *desc,
+ void *buff, /* can be NULL */
+ u16 buff_size,
+ struct i40e_asq_cmd_details *cmd_details,
+ enum i40e_admin_queue_err *aq_status);
+int
+i40e_asq_send_command_atomic(struct i40e_hw *hw, struct i40e_aq_desc *desc,
+ void *buff, /* can be NULL */ u16 buff_size,
+ struct i40e_asq_cmd_details *cmd_details,
+ bool is_atomic_context);
+int
+i40e_asq_send_command_atomic_v2(struct i40e_hw *hw,
+ struct i40e_aq_desc *desc,
+ void *buff, /* can be NULL */
+ u16 buff_size,
+ struct i40e_asq_cmd_details *cmd_details,
+ bool is_atomic_context,
+ enum i40e_admin_queue_err *aq_status);
+
+/* debug function for adminq */
+void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask,
+ void *desc, void *buffer, u16 buf_len);
+
+void i40e_idle_aq(struct i40e_hw *hw);
+bool i40e_check_asq_alive(struct i40e_hw *hw);
+int i40e_aq_queue_shutdown(struct i40e_hw *hw, bool unloading);
+const char *i40e_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err);
+
+int i40e_aq_get_rss_lut(struct i40e_hw *hw, u16 seid,
+ bool pf_lut, u8 *lut, u16 lut_size);
+int i40e_aq_set_rss_lut(struct i40e_hw *hw, u16 seid,
+ bool pf_lut, u8 *lut, u16 lut_size);
+int i40e_aq_get_rss_key(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_get_set_rss_key_data *key);
+int i40e_aq_set_rss_key(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_get_set_rss_key_data *key);
+
+u32 i40e_led_get(struct i40e_hw *hw);
+void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink);
+int i40e_led_set_phy(struct i40e_hw *hw, bool on,
+ u16 led_addr, u32 mode);
+int i40e_led_get_phy(struct i40e_hw *hw, u16 *led_addr,
+ u16 *val);
+int i40e_blink_phy_link_led(struct i40e_hw *hw,
+ u32 time, u32 interval);
+
+/* admin send queue commands */
+
+int i40e_aq_get_firmware_version(struct i40e_hw *hw,
+ u16 *fw_major_version, u16 *fw_minor_version,
+ u32 *fw_build,
+ u16 *api_major_version, u16 *api_minor_version,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_debug_write_register(struct i40e_hw *hw,
+ u32 reg_addr, u64 reg_val,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_debug_read_register(struct i40e_hw *hw,
+ u32 reg_addr, u64 *reg_val,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_set_phy_debug(struct i40e_hw *hw, u8 cmd_flags,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_set_default_vsi(struct i40e_hw *hw, u16 vsi_id,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_clear_default_vsi(struct i40e_hw *hw, u16 vsi_id,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_get_phy_capabilities(struct i40e_hw *hw,
+ bool qualified_modules, bool report_init,
+ struct i40e_aq_get_phy_abilities_resp *abilities,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_set_phy_config(struct i40e_hw *hw,
+ struct i40e_aq_set_phy_config *config,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures, bool atomic_reset);
+int i40e_aq_set_phy_int_mask(struct i40e_hw *hw, u16 mask,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_clear_pxe_mode(struct i40e_hw *hw,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_set_link_restart_an(struct i40e_hw *hw,
+ bool enable_link,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_get_link_info(struct i40e_hw *hw,
+ bool enable_lse, struct i40e_link_status *link,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_set_local_advt_reg(struct i40e_hw *hw,
+ u64 advt_reg,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_send_driver_version(struct i40e_hw *hw,
+ struct i40e_driver_version *dv,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_add_vsi(struct i40e_hw *hw,
+ struct i40e_vsi_context *vsi_ctx,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_set_vsi_broadcast(struct i40e_hw *hw,
+ u16 vsi_id, bool set_filter,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw,
+ u16 vsi_id, bool set,
+ struct i40e_asq_cmd_details *cmd_details,
+ bool rx_only_promisc);
+int i40e_aq_set_vsi_multicast_promiscuous(struct i40e_hw *hw,
+ u16 vsi_id, bool set, struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_set_vsi_mc_promisc_on_vlan(struct i40e_hw *hw,
+ u16 seid, bool enable,
+ u16 vid,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_set_vsi_uc_promisc_on_vlan(struct i40e_hw *hw,
+ u16 seid, bool enable,
+ u16 vid,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_set_vsi_bc_promisc_on_vlan(struct i40e_hw *hw,
+ u16 seid, bool enable, u16 vid,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_set_vsi_vlan_promisc(struct i40e_hw *hw,
+ u16 seid, bool enable,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_get_vsi_params(struct i40e_hw *hw,
+ struct i40e_vsi_context *vsi_ctx,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_update_vsi_params(struct i40e_hw *hw,
+ struct i40e_vsi_context *vsi_ctx,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_add_veb(struct i40e_hw *hw, u16 uplink_seid,
+ u16 downlink_seid, u8 enabled_tc,
+ bool default_port, u16 *pveb_seid,
+ bool enable_stats,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_get_veb_parameters(struct i40e_hw *hw,
+ u16 veb_seid, u16 *switch_id, bool *floating,
+ u16 *statistic_index, u16 *vebs_used,
+ u16 *vebs_free,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_add_macvlan(struct i40e_hw *hw, u16 vsi_id,
+ struct i40e_aqc_add_macvlan_element_data *mv_list,
+ u16 count, struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_add_macvlan_v2(struct i40e_hw *hw, u16 seid,
+ struct i40e_aqc_add_macvlan_element_data *mv_list,
+ u16 count, struct i40e_asq_cmd_details *cmd_details,
+ enum i40e_admin_queue_err *aq_status);
+int i40e_aq_remove_macvlan(struct i40e_hw *hw, u16 vsi_id,
+ struct i40e_aqc_remove_macvlan_element_data *mv_list,
+ u16 count, struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_remove_macvlan_v2(struct i40e_hw *hw, u16 seid,
+ struct i40e_aqc_remove_macvlan_element_data *mv_list,
+ u16 count, struct i40e_asq_cmd_details *cmd_details,
+ enum i40e_admin_queue_err *aq_status);
+int i40e_aq_add_mirrorrule(struct i40e_hw *hw, u16 sw_seid,
+ u16 rule_type, u16 dest_vsi, u16 count, __le16 *mr_list,
+ struct i40e_asq_cmd_details *cmd_details,
+ u16 *rule_id, u16 *rules_used, u16 *rules_free);
+int i40e_aq_delete_mirrorrule(struct i40e_hw *hw, u16 sw_seid,
+ u16 rule_type, u16 rule_id, u16 count, __le16 *mr_list,
+ struct i40e_asq_cmd_details *cmd_details,
+ u16 *rules_used, u16 *rules_free);
+
+int i40e_aq_send_msg_to_vf(struct i40e_hw *hw, u16 vfid,
+ u32 v_opcode, u32 v_retval, u8 *msg, u16 msglen,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_get_switch_config(struct i40e_hw *hw,
+ struct i40e_aqc_get_switch_config_resp *buf,
+ u16 buf_size, u16 *start_seid,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_set_switch_config(struct i40e_hw *hw,
+ u16 flags,
+ u16 valid_flags, u8 mode,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_request_resource(struct i40e_hw *hw,
+ enum i40e_aq_resources_ids resource,
+ enum i40e_aq_resource_access_type access,
+ u8 sdp_number, u64 *timeout,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_release_resource(struct i40e_hw *hw,
+ enum i40e_aq_resources_ids resource,
+ u8 sdp_number,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_read_nvm(struct i40e_hw *hw, u8 module_pointer,
+ u32 offset, u16 length, void *data,
+ bool last_command,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_erase_nvm(struct i40e_hw *hw, u8 module_pointer,
+ u32 offset, u16 length, bool last_command,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_discover_capabilities(struct i40e_hw *hw,
+ void *buff, u16 buff_size, u16 *data_size,
+ enum i40e_admin_queue_opc list_type_opc,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_update_nvm(struct i40e_hw *hw, u8 module_pointer,
+ u32 offset, u16 length, void *data,
+ bool last_command, u8 preservation_flags,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_rearrange_nvm(struct i40e_hw *hw,
+ u8 rearrange_nvm,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_get_lldp_mib(struct i40e_hw *hw, u8 bridge_type,
+ u8 mib_type, void *buff, u16 buff_size,
+ u16 *local_len, u16 *remote_len,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_set_lldp_mib(struct i40e_hw *hw,
+ u8 mib_type, void *buff, u16 buff_size,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_cfg_lldp_mib_change_event(struct i40e_hw *hw,
+ bool enable_update,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_restore_lldp(struct i40e_hw *hw, u8 *setting, bool restore,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent,
+ bool persist,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_set_dcb_parameters(struct i40e_hw *hw,
+ bool dcb_enable,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_start_lldp(struct i40e_hw *hw, bool persist,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_get_cee_dcb_config(struct i40e_hw *hw,
+ void *buff, u16 buff_size,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_add_udp_tunnel(struct i40e_hw *hw,
+ u16 udp_port, u8 protocol_index,
+ u8 *filter_index,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_del_udp_tunnel(struct i40e_hw *hw, u8 index,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_delete_element(struct i40e_hw *hw, u16 seid,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_mac_address_write(struct i40e_hw *hw,
+ u16 flags, u8 *mac_addr,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_config_vsi_bw_limit(struct i40e_hw *hw,
+ u16 seid, u16 credit, u8 max_credit,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_dcb_updated(struct i40e_hw *hw,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_config_switch_comp_bw_limit(struct i40e_hw *hw,
+ u16 seid, u16 credit, u8 max_bw,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_config_vsi_tc_bw(struct i40e_hw *hw, u16 seid,
+ struct i40e_aqc_configure_vsi_tc_bw_data *bw_data,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_config_switch_comp_ets(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_configure_switching_comp_ets_data *ets_data,
+ enum i40e_admin_queue_opc opcode,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_config_switch_comp_bw_config(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_configure_switching_comp_bw_config_data *bw_data,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_query_vsi_bw_config(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_query_vsi_bw_config_resp *bw_data,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_query_vsi_ets_sla_config(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_query_vsi_ets_sla_config_resp *bw_data,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_query_switch_comp_ets_config(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_query_switching_comp_ets_config_resp *bw_data,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_query_port_ets_config(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_query_port_ets_config_resp *bw_data,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_query_switch_comp_bw_config(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_query_switching_comp_bw_config_resp *bw_data,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_resume_port_tx(struct i40e_hw *hw,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_add_cloud_filters_bb(struct i40e_hw *hw, u16 seid,
+ struct i40e_aqc_cloud_filters_element_bb *filters,
+ u8 filter_count);
+int i40e_aq_add_cloud_filters(struct i40e_hw *hw, u16 vsi,
+ struct i40e_aqc_cloud_filters_element_data *filters,
+ u8 filter_count);
+int i40e_aq_rem_cloud_filters(struct i40e_hw *hw, u16 vsi,
+ struct i40e_aqc_cloud_filters_element_data *filters,
+ u8 filter_count);
+int i40e_aq_rem_cloud_filters_bb(struct i40e_hw *hw, u16 seid,
+ struct i40e_aqc_cloud_filters_element_bb *filters,
+ u8 filter_count);
+int i40e_read_lldp_cfg(struct i40e_hw *hw, struct i40e_lldp_variables *lldp_cfg);
+int i40e_aq_suspend_port_tx(struct i40e_hw *hw, u16 seid,
+ struct i40e_asq_cmd_details *cmd_details);
+/* i40e_common */
+int i40e_init_shared_code(struct i40e_hw *hw);
+int i40e_pf_reset(struct i40e_hw *hw);
+void i40e_clear_hw(struct i40e_hw *hw);
+void i40e_clear_pxe_mode(struct i40e_hw *hw);
+int i40e_get_link_status(struct i40e_hw *hw, bool *link_up);
+int i40e_update_link_info(struct i40e_hw *hw);
+int i40e_get_mac_addr(struct i40e_hw *hw, u8 *mac_addr);
+int i40e_read_bw_from_alt_ram(struct i40e_hw *hw,
+ u32 *max_bw, u32 *min_bw, bool *min_valid,
+ bool *max_valid);
+int i40e_aq_configure_partition_bw(struct i40e_hw *hw,
+ struct i40e_aqc_configure_partition_bw_data *bw_data,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_get_port_mac_addr(struct i40e_hw *hw, u8 *mac_addr);
+int i40e_read_pba_string(struct i40e_hw *hw, u8 *pba_num, u32 pba_num_size);
+int i40e_validate_mac_addr(u8 *mac_addr);
+void i40e_pre_tx_queue_cfg(struct i40e_hw *hw, u32 queue, bool enable);
+/* prototype for functions used for NVM access */
+int i40e_init_nvm(struct i40e_hw *hw);
+int i40e_acquire_nvm(struct i40e_hw *hw, enum i40e_aq_resource_access_type access);
+void i40e_release_nvm(struct i40e_hw *hw);
+int i40e_read_nvm_word(struct i40e_hw *hw, u16 offset, u16 *data);
+int i40e_read_nvm_module_data(struct i40e_hw *hw,
+ u8 module_ptr,
+ u16 module_offset,
+ u16 data_offset,
+ u16 words_data_size,
+ u16 *data_ptr);
+int i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset, u16 *words, u16 *data);
+int i40e_update_nvm_checksum(struct i40e_hw *hw);
+int i40e_validate_nvm_checksum(struct i40e_hw *hw, u16 *checksum);
+int i40e_nvmupd_command(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ u8 *bytes, int *);
+void i40e_nvmupd_check_wait_event(struct i40e_hw *hw, u16 opcode,
+ struct i40e_aq_desc *desc);
+void i40e_nvmupd_clear_wait_state(struct i40e_hw *hw);
+void i40e_set_pci_config_data(struct i40e_hw *hw, u16 link_status);
+
+int i40e_set_mac_type(struct i40e_hw *hw);
+
+extern struct i40e_rx_ptype_decoded i40e_ptype_lookup[];
+
+static inline struct i40e_rx_ptype_decoded decode_rx_desc_ptype(u8 ptype)
+{
+ return i40e_ptype_lookup[ptype];
+}
+
+/**
+ * i40e_virtchnl_link_speed - Convert AdminQ link_speed to virtchnl definition
+ * @link_speed: the speed to convert
+ *
+ * Returns the link_speed in terms of the virtchnl interface, for use in
+ * converting link_speed as reported by the AdminQ into the format used for
+ * talking to virtchnl devices. If we can't represent the link speed properly,
+ * report LINK_SPEED_UNKNOWN.
+ **/
+static inline enum virtchnl_link_speed
+i40e_virtchnl_link_speed(enum i40e_aq_link_speed link_speed)
+{
+ switch (link_speed) {
+ case I40E_LINK_SPEED_100MB:
+ return VIRTCHNL_LINK_SPEED_100MB;
+ case I40E_LINK_SPEED_1GB:
+ return VIRTCHNL_LINK_SPEED_1GB;
+ case I40E_LINK_SPEED_2_5GB:
+ return VIRTCHNL_LINK_SPEED_2_5GB;
+ case I40E_LINK_SPEED_5GB:
+ return VIRTCHNL_LINK_SPEED_5GB;
+ case I40E_LINK_SPEED_10GB:
+ return VIRTCHNL_LINK_SPEED_10GB;
+ case I40E_LINK_SPEED_40GB:
+ return VIRTCHNL_LINK_SPEED_40GB;
+ case I40E_LINK_SPEED_20GB:
+ return VIRTCHNL_LINK_SPEED_20GB;
+ case I40E_LINK_SPEED_25GB:
+ return VIRTCHNL_LINK_SPEED_25GB;
+ case I40E_LINK_SPEED_UNKNOWN:
+ default:
+ return VIRTCHNL_LINK_SPEED_UNKNOWN;
+ }
+}
+
+/* prototype for functions used for SW locks */
+
+/* i40e_common for VF drivers*/
+void i40e_vf_parse_hw_config(struct i40e_hw *hw,
+ struct virtchnl_vf_resource *msg);
+int i40e_vf_reset(struct i40e_hw *hw);
+int i40e_aq_send_msg_to_pf(struct i40e_hw *hw,
+ enum virtchnl_ops v_opcode,
+ int v_retval,
+ u8 *msg, u16 msglen,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_set_filter_control(struct i40e_hw *hw,
+ struct i40e_filter_control_settings *settings);
+int i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw,
+ u8 *mac_addr, u16 ethtype, u16 flags,
+ u16 vsi_seid, u16 queue, bool is_add,
+ struct i40e_control_filter_stats *stats,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_debug_dump(struct i40e_hw *hw, u8 cluster_id,
+ u8 table_id, u32 start_index, u16 buff_size,
+ void *buff, u16 *ret_buff_size,
+ u8 *ret_next_table, u32 *ret_next_index,
+ struct i40e_asq_cmd_details *cmd_details);
+void i40e_add_filter_to_drop_tx_flow_control_frames(struct i40e_hw *hw,
+ u16 vsi_seid);
+int i40e_aq_rx_ctl_read_register(struct i40e_hw *hw,
+ u32 reg_addr, u32 *reg_val,
+ struct i40e_asq_cmd_details *cmd_details);
+u32 i40e_read_rx_ctl(struct i40e_hw *hw, u32 reg_addr);
+int i40e_aq_rx_ctl_write_register(struct i40e_hw *hw,
+ u32 reg_addr, u32 reg_val,
+ struct i40e_asq_cmd_details *cmd_details);
+void i40e_write_rx_ctl(struct i40e_hw *hw, u32 reg_addr, u32 reg_val);
+int
+i40e_aq_set_phy_register_ext(struct i40e_hw *hw,
+ u8 phy_select, u8 dev_addr, bool page_change,
+ bool set_mdio, u8 mdio_num,
+ u32 reg_addr, u32 reg_val,
+ struct i40e_asq_cmd_details *cmd_details);
+int
+i40e_aq_get_phy_register_ext(struct i40e_hw *hw,
+ u8 phy_select, u8 dev_addr, bool page_change,
+ bool set_mdio, u8 mdio_num,
+ u32 reg_addr, u32 *reg_val,
+ struct i40e_asq_cmd_details *cmd_details);
+
+/* Convenience wrappers for most common use case */
+#define i40e_aq_set_phy_register(hw, ps, da, pc, ra, rv, cd) \
+ i40e_aq_set_phy_register_ext(hw, ps, da, pc, false, 0, ra, rv, cd)
+#define i40e_aq_get_phy_register(hw, ps, da, pc, ra, rv, cd) \
+ i40e_aq_get_phy_register_ext(hw, ps, da, pc, false, 0, ra, rv, cd)
+
+int i40e_read_phy_register_clause22(struct i40e_hw *hw,
+ u16 reg, u8 phy_addr, u16 *value);
+int i40e_write_phy_register_clause22(struct i40e_hw *hw,
+ u16 reg, u8 phy_addr, u16 value);
+int i40e_read_phy_register_clause45(struct i40e_hw *hw,
+ u8 page, u16 reg, u8 phy_addr, u16 *value);
+int i40e_write_phy_register_clause45(struct i40e_hw *hw,
+ u8 page, u16 reg, u8 phy_addr, u16 value);
+int i40e_read_phy_register(struct i40e_hw *hw, u8 page, u16 reg,
+ u8 phy_addr, u16 *value);
+int i40e_write_phy_register(struct i40e_hw *hw, u8 page, u16 reg,
+ u8 phy_addr, u16 value);
+u8 i40e_get_phy_address(struct i40e_hw *hw, u8 dev_num);
+int i40e_blink_phy_link_led(struct i40e_hw *hw,
+ u32 time, u32 interval);
+int i40e_aq_write_ddp(struct i40e_hw *hw, void *buff,
+ u16 buff_size, u32 track_id,
+ u32 *error_offset, u32 *error_info,
+ struct i40e_asq_cmd_details *
+ cmd_details);
+int i40e_aq_get_ddp_list(struct i40e_hw *hw, void *buff,
+ u16 buff_size, u8 flags,
+ struct i40e_asq_cmd_details *
+ cmd_details);
+struct i40e_generic_seg_header *
+i40e_find_segment_in_package(u32 segment_type,
+ struct i40e_package_header *pkg_header);
+struct i40e_profile_section_header *
+i40e_find_section_in_profile(u32 section_type,
+ struct i40e_profile_segment *profile);
+int
+i40e_write_profile(struct i40e_hw *hw, struct i40e_profile_segment *i40e_seg,
+ u32 track_id);
+int
+i40e_rollback_profile(struct i40e_hw *hw, struct i40e_profile_segment *i40e_seg,
+ u32 track_id);
+int
+i40e_add_pinfo_to_list(struct i40e_hw *hw,
+ struct i40e_profile_segment *profile,
+ u8 *profile_info_sec, u32 track_id);
+#endif /* _I40E_PROTOTYPE_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
new file mode 100644
index 000000000..97a9efe7b
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
@@ -0,0 +1,1587 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
+
+#include "i40e.h"
+#include <linux/ptp_classify.h>
+#include <linux/posix-clock.h>
+
+/* The XL710 timesync is very much like Intel's 82599 design when it comes to
+ * the fundamental clock design. However, the clock operations are much simpler
+ * in the XL710 because the device supports a full 64 bits of nanoseconds.
+ * Because the field is so wide, we can forgo the cycle counter and just
+ * operate with the nanosecond field directly without fear of overflow.
+ *
+ * Much like the 82599, the update period is dependent upon the link speed:
+ * At 40Gb, 25Gb, or no link, the period is 1.6ns.
+ * At 10Gb or 5Gb link, the period is multiplied by 2. (3.2ns)
+ * At 1Gb link, the period is multiplied by 20. (32ns)
+ * 1588 functionality is not supported at 100Mbps.
+ */
+#define I40E_PTP_40GB_INCVAL 0x0199999999ULL
+#define I40E_PTP_10GB_INCVAL_MULT 2
+#define I40E_PTP_5GB_INCVAL_MULT 2
+#define I40E_PTP_1GB_INCVAL_MULT 20
+#define I40E_ISGN 0x80000000
+
+#define I40E_PRTTSYN_CTL1_TSYNTYPE_V1 BIT(I40E_PRTTSYN_CTL1_TSYNTYPE_SHIFT)
+#define I40E_PRTTSYN_CTL1_TSYNTYPE_V2 (2 << \
+ I40E_PRTTSYN_CTL1_TSYNTYPE_SHIFT)
+#define I40E_SUBDEV_ID_25G_PTP_PIN 0xB
+
+enum i40e_ptp_pin {
+ SDP3_2 = 0,
+ SDP3_3,
+ GPIO_4
+};
+
+enum i40e_can_set_pins_t {
+ CANT_DO_PINS = -1,
+ CAN_SET_PINS,
+ CAN_DO_PINS
+};
+
+static struct ptp_pin_desc sdp_desc[] = {
+ /* name idx func chan */
+ {"SDP3_2", SDP3_2, PTP_PF_NONE, 0},
+ {"SDP3_3", SDP3_3, PTP_PF_NONE, 1},
+ {"GPIO_4", GPIO_4, PTP_PF_NONE, 1},
+};
+
+enum i40e_ptp_gpio_pin_state {
+ end = -2,
+ invalid,
+ off,
+ in_A,
+ in_B,
+ out_A,
+ out_B,
+};
+
+static const char * const i40e_ptp_gpio_pin_state2str[] = {
+ "off", "in_A", "in_B", "out_A", "out_B"
+};
+
+enum i40e_ptp_led_pin_state {
+ led_end = -2,
+ low = 0,
+ high,
+};
+
+struct i40e_ptp_pins_settings {
+ enum i40e_ptp_gpio_pin_state sdp3_2;
+ enum i40e_ptp_gpio_pin_state sdp3_3;
+ enum i40e_ptp_gpio_pin_state gpio_4;
+ enum i40e_ptp_led_pin_state led2_0;
+ enum i40e_ptp_led_pin_state led2_1;
+ enum i40e_ptp_led_pin_state led3_0;
+ enum i40e_ptp_led_pin_state led3_1;
+};
+
+static const struct i40e_ptp_pins_settings
+ i40e_ptp_pin_led_allowed_states[] = {
+ {off, off, off, high, high, high, high},
+ {off, in_A, off, high, high, high, low},
+ {off, out_A, off, high, low, high, high},
+ {off, in_B, off, high, high, high, low},
+ {off, out_B, off, high, low, high, high},
+ {in_A, off, off, high, high, high, low},
+ {in_A, in_B, off, high, high, high, low},
+ {in_A, out_B, off, high, low, high, high},
+ {out_A, off, off, high, low, high, high},
+ {out_A, in_B, off, high, low, high, high},
+ {in_B, off, off, high, high, high, low},
+ {in_B, in_A, off, high, high, high, low},
+ {in_B, out_A, off, high, low, high, high},
+ {out_B, off, off, high, low, high, high},
+ {out_B, in_A, off, high, low, high, high},
+ {off, off, in_A, high, high, low, high},
+ {off, out_A, in_A, high, low, low, high},
+ {off, in_B, in_A, high, high, low, low},
+ {off, out_B, in_A, high, low, low, high},
+ {out_A, off, in_A, high, low, low, high},
+ {out_A, in_B, in_A, high, low, low, high},
+ {in_B, off, in_A, high, high, low, low},
+ {in_B, out_A, in_A, high, low, low, high},
+ {out_B, off, in_A, high, low, low, high},
+ {off, off, out_A, low, high, high, high},
+ {off, in_A, out_A, low, high, high, low},
+ {off, in_B, out_A, low, high, high, low},
+ {off, out_B, out_A, low, low, high, high},
+ {in_A, off, out_A, low, high, high, low},
+ {in_A, in_B, out_A, low, high, high, low},
+ {in_A, out_B, out_A, low, low, high, high},
+ {in_B, off, out_A, low, high, high, low},
+ {in_B, in_A, out_A, low, high, high, low},
+ {out_B, off, out_A, low, low, high, high},
+ {out_B, in_A, out_A, low, low, high, high},
+ {off, off, in_B, high, high, low, high},
+ {off, in_A, in_B, high, high, low, low},
+ {off, out_A, in_B, high, low, low, high},
+ {off, out_B, in_B, high, low, low, high},
+ {in_A, off, in_B, high, high, low, low},
+ {in_A, out_B, in_B, high, low, low, high},
+ {out_A, off, in_B, high, low, low, high},
+ {out_B, off, in_B, high, low, low, high},
+ {out_B, in_A, in_B, high, low, low, high},
+ {off, off, out_B, low, high, high, high},
+ {off, in_A, out_B, low, high, high, low},
+ {off, out_A, out_B, low, low, high, high},
+ {off, in_B, out_B, low, high, high, low},
+ {in_A, off, out_B, low, high, high, low},
+ {in_A, in_B, out_B, low, high, high, low},
+ {out_A, off, out_B, low, low, high, high},
+ {out_A, in_B, out_B, low, low, high, high},
+ {in_B, off, out_B, low, high, high, low},
+ {in_B, in_A, out_B, low, high, high, low},
+ {in_B, out_A, out_B, low, low, high, high},
+ {end, end, end, led_end, led_end, led_end, led_end}
+};
+
+static int i40e_ptp_set_pins(struct i40e_pf *pf,
+ struct i40e_ptp_pins_settings *pins);
+
+/**
+ * i40e_ptp_extts0_work - workqueue task function
+ * @work: workqueue task structure
+ *
+ * Service for PTP external clock event
+ **/
+static void i40e_ptp_extts0_work(struct work_struct *work)
+{
+ struct i40e_pf *pf = container_of(work, struct i40e_pf,
+ ptp_extts0_work);
+ struct i40e_hw *hw = &pf->hw;
+ struct ptp_clock_event event;
+ u32 hi, lo;
+
+ /* Event time is captured by one of the two matched registers
+ * PRTTSYN_EVNT_L: 32 LSB of sampled time event
+ * PRTTSYN_EVNT_H: 32 MSB of sampled time event
+ * Event is defined in PRTTSYN_EVNT_0 register
+ */
+ lo = rd32(hw, I40E_PRTTSYN_EVNT_L(0));
+ hi = rd32(hw, I40E_PRTTSYN_EVNT_H(0));
+
+ event.timestamp = (((u64)hi) << 32) | lo;
+
+ event.type = PTP_CLOCK_EXTTS;
+ event.index = hw->pf_id;
+
+ /* fire event */
+ ptp_clock_event(pf->ptp_clock, &event);
+}
+
+/**
+ * i40e_is_ptp_pin_dev - check if device supports PTP pins
+ * @hw: pointer to the hardware structure
+ *
+ * Return true if device supports PTP pins, false otherwise.
+ **/
+static bool i40e_is_ptp_pin_dev(struct i40e_hw *hw)
+{
+ return hw->device_id == I40E_DEV_ID_25G_SFP28 &&
+ hw->subsystem_device_id == I40E_SUBDEV_ID_25G_PTP_PIN;
+}
+
+/**
+ * i40e_can_set_pins - check possibility of manipulating the pins
+ * @pf: board private structure
+ *
+ * Check if all conditions are satisfied to manipulate PTP pins.
+ * Return CAN_SET_PINS if pins can be set on a specific PF or
+ * return CAN_DO_PINS if pins can be manipulated within a NIC or
+ * return CANT_DO_PINS otherwise.
+ **/
+static enum i40e_can_set_pins_t i40e_can_set_pins(struct i40e_pf *pf)
+{
+ if (!i40e_is_ptp_pin_dev(&pf->hw)) {
+ dev_warn(&pf->pdev->dev,
+ "PTP external clock not supported.\n");
+ return CANT_DO_PINS;
+ }
+
+ if (!pf->ptp_pins) {
+ dev_warn(&pf->pdev->dev,
+ "PTP PIN manipulation not allowed.\n");
+ return CANT_DO_PINS;
+ }
+
+ if (pf->hw.pf_id) {
+ dev_warn(&pf->pdev->dev,
+ "PTP PINs should be accessed via PF0.\n");
+ return CAN_DO_PINS;
+ }
+
+ return CAN_SET_PINS;
+}
+
+/**
+ * i40_ptp_reset_timing_events - Reset PTP timing events
+ * @pf: Board private structure
+ *
+ * This function resets timing events for pf.
+ **/
+static void i40_ptp_reset_timing_events(struct i40e_pf *pf)
+{
+ u32 i;
+
+ spin_lock_bh(&pf->ptp_rx_lock);
+ for (i = 0; i <= I40E_PRTTSYN_RXTIME_L_MAX_INDEX; i++) {
+ /* reading and automatically clearing timing events registers */
+ rd32(&pf->hw, I40E_PRTTSYN_RXTIME_L(i));
+ rd32(&pf->hw, I40E_PRTTSYN_RXTIME_H(i));
+ pf->latch_events[i] = 0;
+ }
+ /* reading and automatically clearing timing events registers */
+ rd32(&pf->hw, I40E_PRTTSYN_TXTIME_L);
+ rd32(&pf->hw, I40E_PRTTSYN_TXTIME_H);
+
+ pf->tx_hwtstamp_timeouts = 0;
+ pf->tx_hwtstamp_skipped = 0;
+ pf->rx_hwtstamp_cleared = 0;
+ pf->latch_event_flags = 0;
+ spin_unlock_bh(&pf->ptp_rx_lock);
+}
+
+/**
+ * i40e_ptp_verify - check pins
+ * @ptp: ptp clock
+ * @pin: pin index
+ * @func: assigned function
+ * @chan: channel
+ *
+ * Check pins consistency.
+ * Return 0 on success or error on failure.
+ **/
+static int i40e_ptp_verify(struct ptp_clock_info *ptp, unsigned int pin,
+ enum ptp_pin_function func, unsigned int chan)
+{
+ switch (func) {
+ case PTP_PF_NONE:
+ case PTP_PF_EXTTS:
+ case PTP_PF_PEROUT:
+ break;
+ case PTP_PF_PHYSYNC:
+ return -EOPNOTSUPP;
+ }
+ return 0;
+}
+
+/**
+ * i40e_ptp_read - Read the PHC time from the device
+ * @pf: Board private structure
+ * @ts: timespec structure to hold the current time value
+ * @sts: structure to hold the system time before and after reading the PHC
+ *
+ * This function reads the PRTTSYN_TIME registers and stores them in a
+ * timespec. However, since the registers are 64 bits of nanoseconds, we must
+ * convert the result to a timespec before we can return.
+ **/
+static void i40e_ptp_read(struct i40e_pf *pf, struct timespec64 *ts,
+ struct ptp_system_timestamp *sts)
+{
+ struct i40e_hw *hw = &pf->hw;
+ u32 hi, lo;
+ u64 ns;
+
+ /* The timer latches on the lowest register read. */
+ ptp_read_system_prets(sts);
+ lo = rd32(hw, I40E_PRTTSYN_TIME_L);
+ ptp_read_system_postts(sts);
+ hi = rd32(hw, I40E_PRTTSYN_TIME_H);
+
+ ns = (((u64)hi) << 32) | lo;
+
+ *ts = ns_to_timespec64(ns);
+}
+
+/**
+ * i40e_ptp_write - Write the PHC time to the device
+ * @pf: Board private structure
+ * @ts: timespec structure that holds the new time value
+ *
+ * This function writes the PRTTSYN_TIME registers with the user value. Since
+ * we receive a timespec from the stack, we must convert that timespec into
+ * nanoseconds before programming the registers.
+ **/
+static void i40e_ptp_write(struct i40e_pf *pf, const struct timespec64 *ts)
+{
+ struct i40e_hw *hw = &pf->hw;
+ u64 ns = timespec64_to_ns(ts);
+
+ /* The timer will not update until the high register is written, so
+ * write the low register first.
+ */
+ wr32(hw, I40E_PRTTSYN_TIME_L, ns & 0xFFFFFFFF);
+ wr32(hw, I40E_PRTTSYN_TIME_H, ns >> 32);
+}
+
+/**
+ * i40e_ptp_convert_to_hwtstamp - Convert device clock to system time
+ * @hwtstamps: Timestamp structure to update
+ * @timestamp: Timestamp from the hardware
+ *
+ * We need to convert the NIC clock value into a hwtstamp which can be used by
+ * the upper level timestamping functions. Since the timestamp is simply a 64-
+ * bit nanosecond value, we can call ns_to_ktime directly to handle this.
+ **/
+static void i40e_ptp_convert_to_hwtstamp(struct skb_shared_hwtstamps *hwtstamps,
+ u64 timestamp)
+{
+ memset(hwtstamps, 0, sizeof(*hwtstamps));
+
+ hwtstamps->hwtstamp = ns_to_ktime(timestamp);
+}
+
+/**
+ * i40e_ptp_adjfine - Adjust the PHC frequency
+ * @ptp: The PTP clock structure
+ * @scaled_ppm: Scaled parts per million adjustment from base
+ *
+ * Adjust the frequency of the PHC by the indicated delta from the base
+ * frequency.
+ *
+ * Scaled parts per million is ppm with a 16 bit binary fractional field.
+ **/
+static int i40e_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
+{
+ struct i40e_pf *pf = container_of(ptp, struct i40e_pf, ptp_caps);
+ struct i40e_hw *hw = &pf->hw;
+ u64 adj, freq, diff;
+ int neg_adj = 0;
+
+ if (scaled_ppm < 0) {
+ neg_adj = 1;
+ scaled_ppm = -scaled_ppm;
+ }
+
+ smp_mb(); /* Force any pending update before accessing. */
+ freq = I40E_PTP_40GB_INCVAL * READ_ONCE(pf->ptp_adj_mult);
+ diff = mul_u64_u64_div_u64(freq, (u64)scaled_ppm,
+ 1000000ULL << 16);
+
+ if (neg_adj)
+ adj = freq - diff;
+ else
+ adj = freq + diff;
+
+ wr32(hw, I40E_PRTTSYN_INC_L, adj & 0xFFFFFFFF);
+ wr32(hw, I40E_PRTTSYN_INC_H, adj >> 32);
+
+ return 0;
+}
+
+/**
+ * i40e_ptp_set_1pps_signal_hw - configure 1PPS PTP signal for pins
+ * @pf: the PF private data structure
+ *
+ * Configure 1PPS signal used for PTP pins
+ **/
+static void i40e_ptp_set_1pps_signal_hw(struct i40e_pf *pf)
+{
+ struct i40e_hw *hw = &pf->hw;
+ struct timespec64 now;
+ u64 ns;
+
+ wr32(hw, I40E_PRTTSYN_AUX_0(1), 0);
+ wr32(hw, I40E_PRTTSYN_AUX_1(1), I40E_PRTTSYN_AUX_1_INSTNT);
+ wr32(hw, I40E_PRTTSYN_AUX_0(1), I40E_PRTTSYN_AUX_0_OUT_ENABLE);
+
+ i40e_ptp_read(pf, &now, NULL);
+ now.tv_sec += I40E_PTP_2_SEC_DELAY;
+ now.tv_nsec = 0;
+ ns = timespec64_to_ns(&now);
+
+ /* I40E_PRTTSYN_TGT_L(1) */
+ wr32(hw, I40E_PRTTSYN_TGT_L(1), ns & 0xFFFFFFFF);
+ /* I40E_PRTTSYN_TGT_H(1) */
+ wr32(hw, I40E_PRTTSYN_TGT_H(1), ns >> 32);
+ wr32(hw, I40E_PRTTSYN_CLKO(1), I40E_PTP_HALF_SECOND);
+ wr32(hw, I40E_PRTTSYN_AUX_1(1), I40E_PRTTSYN_AUX_1_INSTNT);
+ wr32(hw, I40E_PRTTSYN_AUX_0(1),
+ I40E_PRTTSYN_AUX_0_OUT_ENABLE_CLK_MOD);
+}
+
+/**
+ * i40e_ptp_adjtime - Adjust the PHC time
+ * @ptp: The PTP clock structure
+ * @delta: Offset in nanoseconds to adjust the PHC time by
+ *
+ * Adjust the current clock time by a delta specified in nanoseconds.
+ **/
+static int i40e_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
+{
+ struct i40e_pf *pf = container_of(ptp, struct i40e_pf, ptp_caps);
+ struct i40e_hw *hw = &pf->hw;
+
+ mutex_lock(&pf->tmreg_lock);
+
+ if (delta > -999999900LL && delta < 999999900LL) {
+ int neg_adj = 0;
+ u32 timadj;
+ u64 tohw;
+
+ if (delta < 0) {
+ neg_adj = 1;
+ tohw = -delta;
+ } else {
+ tohw = delta;
+ }
+
+ timadj = tohw & 0x3FFFFFFF;
+ if (neg_adj)
+ timadj |= I40E_ISGN;
+ wr32(hw, I40E_PRTTSYN_ADJ, timadj);
+ } else {
+ struct timespec64 then, now;
+
+ then = ns_to_timespec64(delta);
+ i40e_ptp_read(pf, &now, NULL);
+ now = timespec64_add(now, then);
+ i40e_ptp_write(pf, (const struct timespec64 *)&now);
+ i40e_ptp_set_1pps_signal_hw(pf);
+ }
+
+ mutex_unlock(&pf->tmreg_lock);
+
+ return 0;
+}
+
+/**
+ * i40e_ptp_gettimex - Get the time of the PHC
+ * @ptp: The PTP clock structure
+ * @ts: timespec structure to hold the current time value
+ * @sts: structure to hold the system time before and after reading the PHC
+ *
+ * Read the device clock and return the correct value on ns, after converting it
+ * into a timespec struct.
+ **/
+static int i40e_ptp_gettimex(struct ptp_clock_info *ptp, struct timespec64 *ts,
+ struct ptp_system_timestamp *sts)
+{
+ struct i40e_pf *pf = container_of(ptp, struct i40e_pf, ptp_caps);
+
+ mutex_lock(&pf->tmreg_lock);
+ i40e_ptp_read(pf, ts, sts);
+ mutex_unlock(&pf->tmreg_lock);
+
+ return 0;
+}
+
+/**
+ * i40e_ptp_settime - Set the time of the PHC
+ * @ptp: The PTP clock structure
+ * @ts: timespec64 structure that holds the new time value
+ *
+ * Set the device clock to the user input value. The conversion from timespec
+ * to ns happens in the write function.
+ **/
+static int i40e_ptp_settime(struct ptp_clock_info *ptp,
+ const struct timespec64 *ts)
+{
+ struct i40e_pf *pf = container_of(ptp, struct i40e_pf, ptp_caps);
+
+ mutex_lock(&pf->tmreg_lock);
+ i40e_ptp_write(pf, ts);
+ mutex_unlock(&pf->tmreg_lock);
+
+ return 0;
+}
+
+/**
+ * i40e_pps_configure - configure PPS events
+ * @ptp: ptp clock
+ * @rq: clock request
+ * @on: status
+ *
+ * Configure PPS events for external clock source.
+ * Return 0 on success or error on failure.
+ **/
+static int i40e_pps_configure(struct ptp_clock_info *ptp,
+ struct ptp_clock_request *rq,
+ int on)
+{
+ struct i40e_pf *pf = container_of(ptp, struct i40e_pf, ptp_caps);
+
+ if (!!on)
+ i40e_ptp_set_1pps_signal_hw(pf);
+
+ return 0;
+}
+
+/**
+ * i40e_pin_state - determine PIN state
+ * @index: PIN index
+ * @func: function assigned to PIN
+ *
+ * Determine PIN state based on PIN index and function assigned.
+ * Return PIN state.
+ **/
+static enum i40e_ptp_gpio_pin_state i40e_pin_state(int index, int func)
+{
+ enum i40e_ptp_gpio_pin_state state = off;
+
+ if (index == 0 && func == PTP_PF_EXTTS)
+ state = in_A;
+ if (index == 1 && func == PTP_PF_EXTTS)
+ state = in_B;
+ if (index == 0 && func == PTP_PF_PEROUT)
+ state = out_A;
+ if (index == 1 && func == PTP_PF_PEROUT)
+ state = out_B;
+
+ return state;
+}
+
+/**
+ * i40e_ptp_enable_pin - enable PINs.
+ * @pf: private board structure
+ * @chan: channel
+ * @func: PIN function
+ * @on: state
+ *
+ * Enable PTP pins for external clock source.
+ * Return 0 on success or error code on failure.
+ **/
+static int i40e_ptp_enable_pin(struct i40e_pf *pf, unsigned int chan,
+ enum ptp_pin_function func, int on)
+{
+ enum i40e_ptp_gpio_pin_state *pin = NULL;
+ struct i40e_ptp_pins_settings pins;
+ int pin_index;
+
+ /* Use PF0 to set pins. Return success for user space tools */
+ if (pf->hw.pf_id)
+ return 0;
+
+ /* Preserve previous state of pins that we don't touch */
+ pins.sdp3_2 = pf->ptp_pins->sdp3_2;
+ pins.sdp3_3 = pf->ptp_pins->sdp3_3;
+ pins.gpio_4 = pf->ptp_pins->gpio_4;
+
+ /* To turn on the pin - find the corresponding one based on
+ * the given index. To to turn the function off - find
+ * which pin had it assigned. Don't use ptp_find_pin here
+ * because it tries to lock the pincfg_mux which is locked by
+ * ptp_pin_store() that calls here.
+ */
+ if (on) {
+ pin_index = ptp_find_pin(pf->ptp_clock, func, chan);
+ if (pin_index < 0)
+ return -EBUSY;
+
+ switch (pin_index) {
+ case SDP3_2:
+ pin = &pins.sdp3_2;
+ break;
+ case SDP3_3:
+ pin = &pins.sdp3_3;
+ break;
+ case GPIO_4:
+ pin = &pins.gpio_4;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ *pin = i40e_pin_state(chan, func);
+ } else {
+ pins.sdp3_2 = off;
+ pins.sdp3_3 = off;
+ pins.gpio_4 = off;
+ }
+
+ return i40e_ptp_set_pins(pf, &pins) ? -EINVAL : 0;
+}
+
+/**
+ * i40e_ptp_feature_enable - Enable external clock pins
+ * @ptp: The PTP clock structure
+ * @rq: The PTP clock request structure
+ * @on: To turn feature on/off
+ *
+ * Setting on/off PTP PPS feature for pin.
+ **/
+static int i40e_ptp_feature_enable(struct ptp_clock_info *ptp,
+ struct ptp_clock_request *rq,
+ int on)
+{
+ struct i40e_pf *pf = container_of(ptp, struct i40e_pf, ptp_caps);
+
+ enum ptp_pin_function func;
+ unsigned int chan;
+
+ /* TODO: Implement flags handling for EXTTS and PEROUT */
+ switch (rq->type) {
+ case PTP_CLK_REQ_EXTTS:
+ func = PTP_PF_EXTTS;
+ chan = rq->extts.index;
+ break;
+ case PTP_CLK_REQ_PEROUT:
+ func = PTP_PF_PEROUT;
+ chan = rq->perout.index;
+ break;
+ case PTP_CLK_REQ_PPS:
+ return i40e_pps_configure(ptp, rq, on);
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return i40e_ptp_enable_pin(pf, chan, func, on);
+}
+
+/**
+ * i40e_ptp_get_rx_events - Read I40E_PRTTSYN_STAT_1 and latch events
+ * @pf: the PF data structure
+ *
+ * This function reads I40E_PRTTSYN_STAT_1 and updates the corresponding timers
+ * for noticed latch events. This allows the driver to keep track of the first
+ * time a latch event was noticed which will be used to help clear out Rx
+ * timestamps for packets that got dropped or lost.
+ *
+ * This function will return the current value of I40E_PRTTSYN_STAT_1 and is
+ * expected to be called only while under the ptp_rx_lock.
+ **/
+static u32 i40e_ptp_get_rx_events(struct i40e_pf *pf)
+{
+ struct i40e_hw *hw = &pf->hw;
+ u32 prttsyn_stat, new_latch_events;
+ int i;
+
+ prttsyn_stat = rd32(hw, I40E_PRTTSYN_STAT_1);
+ new_latch_events = prttsyn_stat & ~pf->latch_event_flags;
+
+ /* Update the jiffies time for any newly latched timestamp. This
+ * ensures that we store the time that we first discovered a timestamp
+ * was latched by the hardware. The service task will later determine
+ * if we should free the latch and drop that timestamp should too much
+ * time pass. This flow ensures that we only update jiffies for new
+ * events latched since the last time we checked, and not all events
+ * currently latched, so that the service task accounting remains
+ * accurate.
+ */
+ for (i = 0; i < 4; i++) {
+ if (new_latch_events & BIT(i))
+ pf->latch_events[i] = jiffies;
+ }
+
+ /* Finally, we store the current status of the Rx timestamp latches */
+ pf->latch_event_flags = prttsyn_stat;
+
+ return prttsyn_stat;
+}
+
+/**
+ * i40e_ptp_rx_hang - Detect error case when Rx timestamp registers are hung
+ * @pf: The PF private data structure
+ *
+ * This watchdog task is scheduled to detect error case where hardware has
+ * dropped an Rx packet that was timestamped when the ring is full. The
+ * particular error is rare but leaves the device in a state unable to timestamp
+ * any future packets.
+ **/
+void i40e_ptp_rx_hang(struct i40e_pf *pf)
+{
+ struct i40e_hw *hw = &pf->hw;
+ unsigned int i, cleared = 0;
+
+ /* Since we cannot turn off the Rx timestamp logic if the device is
+ * configured for Tx timestamping, we check if Rx timestamping is
+ * configured. We don't want to spuriously warn about Rx timestamp
+ * hangs if we don't care about the timestamps.
+ */
+ if (!(pf->flags & I40E_FLAG_PTP) || !pf->ptp_rx)
+ return;
+
+ spin_lock_bh(&pf->ptp_rx_lock);
+
+ /* Update current latch times for Rx events */
+ i40e_ptp_get_rx_events(pf);
+
+ /* Check all the currently latched Rx events and see whether they have
+ * been latched for over a second. It is assumed that any timestamp
+ * should have been cleared within this time, or else it was captured
+ * for a dropped frame that the driver never received. Thus, we will
+ * clear any timestamp that has been latched for over 1 second.
+ */
+ for (i = 0; i < 4; i++) {
+ if ((pf->latch_event_flags & BIT(i)) &&
+ time_is_before_jiffies(pf->latch_events[i] + HZ)) {
+ rd32(hw, I40E_PRTTSYN_RXTIME_H(i));
+ pf->latch_event_flags &= ~BIT(i);
+ cleared++;
+ }
+ }
+
+ spin_unlock_bh(&pf->ptp_rx_lock);
+
+ /* Log a warning if more than 2 timestamps got dropped in the same
+ * check. We don't want to warn about all drops because it can occur
+ * in normal scenarios such as PTP frames on multicast addresses we
+ * aren't listening to. However, administrator should know if this is
+ * the reason packets aren't receiving timestamps.
+ */
+ if (cleared > 2)
+ dev_dbg(&pf->pdev->dev,
+ "Dropped %d missed RXTIME timestamp events\n",
+ cleared);
+
+ /* Finally, update the rx_hwtstamp_cleared counter */
+ pf->rx_hwtstamp_cleared += cleared;
+}
+
+/**
+ * i40e_ptp_tx_hang - Detect error case when Tx timestamp register is hung
+ * @pf: The PF private data structure
+ *
+ * This watchdog task is run periodically to make sure that we clear the Tx
+ * timestamp logic if we don't obtain a timestamp in a reasonable amount of
+ * time. It is unexpected in the normal case but if it occurs it results in
+ * permanently preventing timestamps of future packets.
+ **/
+void i40e_ptp_tx_hang(struct i40e_pf *pf)
+{
+ struct sk_buff *skb;
+
+ if (!(pf->flags & I40E_FLAG_PTP) || !pf->ptp_tx)
+ return;
+
+ /* Nothing to do if we're not already waiting for a timestamp */
+ if (!test_bit(__I40E_PTP_TX_IN_PROGRESS, pf->state))
+ return;
+
+ /* We already have a handler routine which is run when we are notified
+ * of a Tx timestamp in the hardware. If we don't get an interrupt
+ * within a second it is reasonable to assume that we never will.
+ */
+ if (time_is_before_jiffies(pf->ptp_tx_start + HZ)) {
+ skb = pf->ptp_tx_skb;
+ pf->ptp_tx_skb = NULL;
+ clear_bit_unlock(__I40E_PTP_TX_IN_PROGRESS, pf->state);
+
+ /* Free the skb after we clear the bitlock */
+ dev_kfree_skb_any(skb);
+ pf->tx_hwtstamp_timeouts++;
+ }
+}
+
+/**
+ * i40e_ptp_tx_hwtstamp - Utility function which returns the Tx timestamp
+ * @pf: Board private structure
+ *
+ * Read the value of the Tx timestamp from the registers, convert it into a
+ * value consumable by the stack, and store that result into the shhwtstamps
+ * struct before returning it up the stack.
+ **/
+void i40e_ptp_tx_hwtstamp(struct i40e_pf *pf)
+{
+ struct skb_shared_hwtstamps shhwtstamps;
+ struct sk_buff *skb = pf->ptp_tx_skb;
+ struct i40e_hw *hw = &pf->hw;
+ u32 hi, lo;
+ u64 ns;
+
+ if (!(pf->flags & I40E_FLAG_PTP) || !pf->ptp_tx)
+ return;
+
+ /* don't attempt to timestamp if we don't have an skb */
+ if (!pf->ptp_tx_skb)
+ return;
+
+ lo = rd32(hw, I40E_PRTTSYN_TXTIME_L);
+ hi = rd32(hw, I40E_PRTTSYN_TXTIME_H);
+
+ ns = (((u64)hi) << 32) | lo;
+ i40e_ptp_convert_to_hwtstamp(&shhwtstamps, ns);
+
+ /* Clear the bit lock as soon as possible after reading the register,
+ * and prior to notifying the stack via skb_tstamp_tx(). Otherwise
+ * applications might wake up and attempt to request another transmit
+ * timestamp prior to the bit lock being cleared.
+ */
+ pf->ptp_tx_skb = NULL;
+ clear_bit_unlock(__I40E_PTP_TX_IN_PROGRESS, pf->state);
+
+ /* Notify the stack and free the skb after we've unlocked */
+ skb_tstamp_tx(skb, &shhwtstamps);
+ dev_kfree_skb_any(skb);
+}
+
+/**
+ * i40e_ptp_rx_hwtstamp - Utility function which checks for an Rx timestamp
+ * @pf: Board private structure
+ * @skb: Particular skb to send timestamp with
+ * @index: Index into the receive timestamp registers for the timestamp
+ *
+ * The XL710 receives a notification in the receive descriptor with an offset
+ * into the set of RXTIME registers where the timestamp is for that skb. This
+ * function goes and fetches the receive timestamp from that offset, if a valid
+ * one exists. The RXTIME registers are in ns, so we must convert the result
+ * first.
+ **/
+void i40e_ptp_rx_hwtstamp(struct i40e_pf *pf, struct sk_buff *skb, u8 index)
+{
+ u32 prttsyn_stat, hi, lo;
+ struct i40e_hw *hw;
+ u64 ns;
+
+ /* Since we cannot turn off the Rx timestamp logic if the device is
+ * doing Tx timestamping, check if Rx timestamping is configured.
+ */
+ if (!(pf->flags & I40E_FLAG_PTP) || !pf->ptp_rx)
+ return;
+
+ hw = &pf->hw;
+
+ spin_lock_bh(&pf->ptp_rx_lock);
+
+ /* Get current Rx events and update latch times */
+ prttsyn_stat = i40e_ptp_get_rx_events(pf);
+
+ /* TODO: Should we warn about missing Rx timestamp event? */
+ if (!(prttsyn_stat & BIT(index))) {
+ spin_unlock_bh(&pf->ptp_rx_lock);
+ return;
+ }
+
+ /* Clear the latched event since we're about to read its register */
+ pf->latch_event_flags &= ~BIT(index);
+
+ lo = rd32(hw, I40E_PRTTSYN_RXTIME_L(index));
+ hi = rd32(hw, I40E_PRTTSYN_RXTIME_H(index));
+
+ spin_unlock_bh(&pf->ptp_rx_lock);
+
+ ns = (((u64)hi) << 32) | lo;
+
+ i40e_ptp_convert_to_hwtstamp(skb_hwtstamps(skb), ns);
+}
+
+/**
+ * i40e_ptp_set_increment - Utility function to update clock increment rate
+ * @pf: Board private structure
+ *
+ * During a link change, the DMA frequency that drives the 1588 logic will
+ * change. In order to keep the PRTTSYN_TIME registers in units of nanoseconds,
+ * we must update the increment value per clock tick.
+ **/
+void i40e_ptp_set_increment(struct i40e_pf *pf)
+{
+ struct i40e_link_status *hw_link_info;
+ struct i40e_hw *hw = &pf->hw;
+ u64 incval;
+ u32 mult;
+
+ hw_link_info = &hw->phy.link_info;
+
+ i40e_aq_get_link_info(&pf->hw, true, NULL, NULL);
+
+ switch (hw_link_info->link_speed) {
+ case I40E_LINK_SPEED_10GB:
+ mult = I40E_PTP_10GB_INCVAL_MULT;
+ break;
+ case I40E_LINK_SPEED_5GB:
+ mult = I40E_PTP_5GB_INCVAL_MULT;
+ break;
+ case I40E_LINK_SPEED_1GB:
+ mult = I40E_PTP_1GB_INCVAL_MULT;
+ break;
+ case I40E_LINK_SPEED_100MB:
+ {
+ static int warn_once;
+
+ if (!warn_once) {
+ dev_warn(&pf->pdev->dev,
+ "1588 functionality is not supported at 100 Mbps. Stopping the PHC.\n");
+ warn_once++;
+ }
+ mult = 0;
+ break;
+ }
+ case I40E_LINK_SPEED_40GB:
+ default:
+ mult = 1;
+ break;
+ }
+
+ /* The increment value is calculated by taking the base 40GbE incvalue
+ * and multiplying it by a factor based on the link speed.
+ */
+ incval = I40E_PTP_40GB_INCVAL * mult;
+
+ /* Write the new increment value into the increment register. The
+ * hardware will not update the clock until both registers have been
+ * written.
+ */
+ wr32(hw, I40E_PRTTSYN_INC_L, incval & 0xFFFFFFFF);
+ wr32(hw, I40E_PRTTSYN_INC_H, incval >> 32);
+
+ /* Update the base adjustement value. */
+ WRITE_ONCE(pf->ptp_adj_mult, mult);
+ smp_mb(); /* Force the above update. */
+}
+
+/**
+ * i40e_ptp_get_ts_config - ioctl interface to read the HW timestamping
+ * @pf: Board private structure
+ * @ifr: ioctl data
+ *
+ * Obtain the current hardware timestamping settigs as requested. To do this,
+ * keep a shadow copy of the timestamp settings rather than attempting to
+ * deconstruct it from the registers.
+ **/
+int i40e_ptp_get_ts_config(struct i40e_pf *pf, struct ifreq *ifr)
+{
+ struct hwtstamp_config *config = &pf->tstamp_config;
+
+ if (!(pf->flags & I40E_FLAG_PTP))
+ return -EOPNOTSUPP;
+
+ return copy_to_user(ifr->ifr_data, config, sizeof(*config)) ?
+ -EFAULT : 0;
+}
+
+/**
+ * i40e_ptp_free_pins - free memory used by PTP pins
+ * @pf: Board private structure
+ *
+ * Release memory allocated for PTP pins.
+ **/
+static void i40e_ptp_free_pins(struct i40e_pf *pf)
+{
+ if (i40e_is_ptp_pin_dev(&pf->hw)) {
+ kfree(pf->ptp_pins);
+ kfree(pf->ptp_caps.pin_config);
+ pf->ptp_pins = NULL;
+ }
+}
+
+/**
+ * i40e_ptp_set_pin_hw - Set HW GPIO pin
+ * @hw: pointer to the hardware structure
+ * @pin: pin index
+ * @state: pin state
+ *
+ * Set status of GPIO pin for external clock handling.
+ **/
+static void i40e_ptp_set_pin_hw(struct i40e_hw *hw,
+ unsigned int pin,
+ enum i40e_ptp_gpio_pin_state state)
+{
+ switch (state) {
+ case off:
+ wr32(hw, I40E_GLGEN_GPIO_CTL(pin), 0);
+ break;
+ case in_A:
+ wr32(hw, I40E_GLGEN_GPIO_CTL(pin),
+ I40E_GLGEN_GPIO_CTL_PORT_0_IN_TIMESYNC_0);
+ break;
+ case in_B:
+ wr32(hw, I40E_GLGEN_GPIO_CTL(pin),
+ I40E_GLGEN_GPIO_CTL_PORT_1_IN_TIMESYNC_0);
+ break;
+ case out_A:
+ wr32(hw, I40E_GLGEN_GPIO_CTL(pin),
+ I40E_GLGEN_GPIO_CTL_PORT_0_OUT_TIMESYNC_1);
+ break;
+ case out_B:
+ wr32(hw, I40E_GLGEN_GPIO_CTL(pin),
+ I40E_GLGEN_GPIO_CTL_PORT_1_OUT_TIMESYNC_1);
+ break;
+ default:
+ break;
+ }
+}
+
+/**
+ * i40e_ptp_set_led_hw - Set HW GPIO led
+ * @hw: pointer to the hardware structure
+ * @led: led index
+ * @state: led state
+ *
+ * Set status of GPIO led for external clock handling.
+ **/
+static void i40e_ptp_set_led_hw(struct i40e_hw *hw,
+ unsigned int led,
+ enum i40e_ptp_led_pin_state state)
+{
+ switch (state) {
+ case low:
+ wr32(hw, I40E_GLGEN_GPIO_SET,
+ I40E_GLGEN_GPIO_SET_DRV_SDP_DATA | led);
+ break;
+ case high:
+ wr32(hw, I40E_GLGEN_GPIO_SET,
+ I40E_GLGEN_GPIO_SET_DRV_SDP_DATA |
+ I40E_GLGEN_GPIO_SET_SDP_DATA_HI | led);
+ break;
+ default:
+ break;
+ }
+}
+
+/**
+ * i40e_ptp_init_leds_hw - init LEDs
+ * @hw: pointer to a hardware structure
+ *
+ * Set initial state of LEDs
+ **/
+static void i40e_ptp_init_leds_hw(struct i40e_hw *hw)
+{
+ wr32(hw, I40E_GLGEN_GPIO_CTL(I40E_LED2_0),
+ I40E_GLGEN_GPIO_CTL_LED_INIT);
+ wr32(hw, I40E_GLGEN_GPIO_CTL(I40E_LED2_1),
+ I40E_GLGEN_GPIO_CTL_LED_INIT);
+ wr32(hw, I40E_GLGEN_GPIO_CTL(I40E_LED3_0),
+ I40E_GLGEN_GPIO_CTL_LED_INIT);
+ wr32(hw, I40E_GLGEN_GPIO_CTL(I40E_LED3_1),
+ I40E_GLGEN_GPIO_CTL_LED_INIT);
+}
+
+/**
+ * i40e_ptp_set_pins_hw - Set HW GPIO pins
+ * @pf: Board private structure
+ *
+ * This function sets GPIO pins for PTP
+ **/
+static void i40e_ptp_set_pins_hw(struct i40e_pf *pf)
+{
+ const struct i40e_ptp_pins_settings *pins = pf->ptp_pins;
+ struct i40e_hw *hw = &pf->hw;
+
+ /* pin must be disabled before it may be used */
+ i40e_ptp_set_pin_hw(hw, I40E_SDP3_2, off);
+ i40e_ptp_set_pin_hw(hw, I40E_SDP3_3, off);
+ i40e_ptp_set_pin_hw(hw, I40E_GPIO_4, off);
+
+ i40e_ptp_set_pin_hw(hw, I40E_SDP3_2, pins->sdp3_2);
+ i40e_ptp_set_pin_hw(hw, I40E_SDP3_3, pins->sdp3_3);
+ i40e_ptp_set_pin_hw(hw, I40E_GPIO_4, pins->gpio_4);
+
+ i40e_ptp_set_led_hw(hw, I40E_LED2_0, pins->led2_0);
+ i40e_ptp_set_led_hw(hw, I40E_LED2_1, pins->led2_1);
+ i40e_ptp_set_led_hw(hw, I40E_LED3_0, pins->led3_0);
+ i40e_ptp_set_led_hw(hw, I40E_LED3_1, pins->led3_1);
+
+ dev_info(&pf->pdev->dev,
+ "PTP configuration set to: SDP3_2: %s, SDP3_3: %s, GPIO_4: %s.\n",
+ i40e_ptp_gpio_pin_state2str[pins->sdp3_2],
+ i40e_ptp_gpio_pin_state2str[pins->sdp3_3],
+ i40e_ptp_gpio_pin_state2str[pins->gpio_4]);
+}
+
+/**
+ * i40e_ptp_set_pins - set PTP pins in HW
+ * @pf: Board private structure
+ * @pins: PTP pins to be applied
+ *
+ * Validate and set PTP pins in HW for specific PF.
+ * Return 0 on success or negative value on error.
+ **/
+static int i40e_ptp_set_pins(struct i40e_pf *pf,
+ struct i40e_ptp_pins_settings *pins)
+{
+ enum i40e_can_set_pins_t pin_caps = i40e_can_set_pins(pf);
+ int i = 0;
+
+ if (pin_caps == CANT_DO_PINS)
+ return -EOPNOTSUPP;
+ else if (pin_caps == CAN_DO_PINS)
+ return 0;
+
+ if (pins->sdp3_2 == invalid)
+ pins->sdp3_2 = pf->ptp_pins->sdp3_2;
+ if (pins->sdp3_3 == invalid)
+ pins->sdp3_3 = pf->ptp_pins->sdp3_3;
+ if (pins->gpio_4 == invalid)
+ pins->gpio_4 = pf->ptp_pins->gpio_4;
+ while (i40e_ptp_pin_led_allowed_states[i].sdp3_2 != end) {
+ if (pins->sdp3_2 == i40e_ptp_pin_led_allowed_states[i].sdp3_2 &&
+ pins->sdp3_3 == i40e_ptp_pin_led_allowed_states[i].sdp3_3 &&
+ pins->gpio_4 == i40e_ptp_pin_led_allowed_states[i].gpio_4) {
+ pins->led2_0 =
+ i40e_ptp_pin_led_allowed_states[i].led2_0;
+ pins->led2_1 =
+ i40e_ptp_pin_led_allowed_states[i].led2_1;
+ pins->led3_0 =
+ i40e_ptp_pin_led_allowed_states[i].led3_0;
+ pins->led3_1 =
+ i40e_ptp_pin_led_allowed_states[i].led3_1;
+ break;
+ }
+ i++;
+ }
+ if (i40e_ptp_pin_led_allowed_states[i].sdp3_2 == end) {
+ dev_warn(&pf->pdev->dev,
+ "Unsupported PTP pin configuration: SDP3_2: %s, SDP3_3: %s, GPIO_4: %s.\n",
+ i40e_ptp_gpio_pin_state2str[pins->sdp3_2],
+ i40e_ptp_gpio_pin_state2str[pins->sdp3_3],
+ i40e_ptp_gpio_pin_state2str[pins->gpio_4]);
+
+ return -EPERM;
+ }
+ memcpy(pf->ptp_pins, pins, sizeof(*pins));
+ i40e_ptp_set_pins_hw(pf);
+ i40_ptp_reset_timing_events(pf);
+
+ return 0;
+}
+
+/**
+ * i40e_ptp_alloc_pins - allocate PTP pins structure
+ * @pf: Board private structure
+ *
+ * allocate PTP pins structure
+ **/
+int i40e_ptp_alloc_pins(struct i40e_pf *pf)
+{
+ if (!i40e_is_ptp_pin_dev(&pf->hw))
+ return 0;
+
+ pf->ptp_pins =
+ kzalloc(sizeof(struct i40e_ptp_pins_settings), GFP_KERNEL);
+
+ if (!pf->ptp_pins) {
+ dev_warn(&pf->pdev->dev, "Cannot allocate memory for PTP pins structure.\n");
+ return -I40E_ERR_NO_MEMORY;
+ }
+
+ pf->ptp_pins->sdp3_2 = off;
+ pf->ptp_pins->sdp3_3 = off;
+ pf->ptp_pins->gpio_4 = off;
+ pf->ptp_pins->led2_0 = high;
+ pf->ptp_pins->led2_1 = high;
+ pf->ptp_pins->led3_0 = high;
+ pf->ptp_pins->led3_1 = high;
+
+ /* Use PF0 to set pins in HW. Return success for user space tools */
+ if (pf->hw.pf_id)
+ return 0;
+
+ i40e_ptp_init_leds_hw(&pf->hw);
+ i40e_ptp_set_pins_hw(pf);
+
+ return 0;
+}
+
+/**
+ * i40e_ptp_set_timestamp_mode - setup hardware for requested timestamp mode
+ * @pf: Board private structure
+ * @config: hwtstamp settings requested or saved
+ *
+ * Control hardware registers to enter the specific mode requested by the
+ * user. Also used during reset path to ensure that timestamp settings are
+ * maintained.
+ *
+ * Note: modifies config in place, and may update the requested mode to be
+ * more broad if the specific filter is not directly supported.
+ **/
+static int i40e_ptp_set_timestamp_mode(struct i40e_pf *pf,
+ struct hwtstamp_config *config)
+{
+ struct i40e_hw *hw = &pf->hw;
+ u32 tsyntype, regval;
+
+ /* Selects external trigger to cause event */
+ regval = rd32(hw, I40E_PRTTSYN_AUX_0(0));
+ /* Bit 17:16 is EVNTLVL, 01B rising edge */
+ regval &= 0;
+ regval |= (1 << I40E_PRTTSYN_AUX_0_EVNTLVL_SHIFT);
+ /* regval: 0001 0000 0000 0000 0000 */
+ wr32(hw, I40E_PRTTSYN_AUX_0(0), regval);
+
+ /* Enabel interrupts */
+ regval = rd32(hw, I40E_PRTTSYN_CTL0);
+ regval |= 1 << I40E_PRTTSYN_CTL0_EVENT_INT_ENA_SHIFT;
+ wr32(hw, I40E_PRTTSYN_CTL0, regval);
+
+ INIT_WORK(&pf->ptp_extts0_work, i40e_ptp_extts0_work);
+
+ switch (config->tx_type) {
+ case HWTSTAMP_TX_OFF:
+ pf->ptp_tx = false;
+ break;
+ case HWTSTAMP_TX_ON:
+ pf->ptp_tx = true;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ switch (config->rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ pf->ptp_rx = false;
+ /* We set the type to V1, but do not enable UDP packet
+ * recognition. In this way, we should be as close to
+ * disabling PTP Rx timestamps as possible since V1 packets
+ * are always UDP, since L2 packets are a V2 feature.
+ */
+ tsyntype = I40E_PRTTSYN_CTL1_TSYNTYPE_V1;
+ break;
+ case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+ if (!(pf->hw_features & I40E_HW_PTP_L4_CAPABLE))
+ return -ERANGE;
+ pf->ptp_rx = true;
+ tsyntype = I40E_PRTTSYN_CTL1_V1MESSTYPE0_MASK |
+ I40E_PRTTSYN_CTL1_TSYNTYPE_V1 |
+ I40E_PRTTSYN_CTL1_UDP_ENA_MASK;
+ config->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
+ break;
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ if (!(pf->hw_features & I40E_HW_PTP_L4_CAPABLE))
+ return -ERANGE;
+ fallthrough;
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+ pf->ptp_rx = true;
+ tsyntype = I40E_PRTTSYN_CTL1_V2MESSTYPE0_MASK |
+ I40E_PRTTSYN_CTL1_TSYNTYPE_V2;
+ if (pf->hw_features & I40E_HW_PTP_L4_CAPABLE) {
+ tsyntype |= I40E_PRTTSYN_CTL1_UDP_ENA_MASK;
+ config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
+ } else {
+ config->rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
+ }
+ break;
+ case HWTSTAMP_FILTER_NTP_ALL:
+ case HWTSTAMP_FILTER_ALL:
+ default:
+ return -ERANGE;
+ }
+
+ /* Clear out all 1588-related registers to clear and unlatch them. */
+ spin_lock_bh(&pf->ptp_rx_lock);
+ rd32(hw, I40E_PRTTSYN_STAT_0);
+ rd32(hw, I40E_PRTTSYN_TXTIME_H);
+ rd32(hw, I40E_PRTTSYN_RXTIME_H(0));
+ rd32(hw, I40E_PRTTSYN_RXTIME_H(1));
+ rd32(hw, I40E_PRTTSYN_RXTIME_H(2));
+ rd32(hw, I40E_PRTTSYN_RXTIME_H(3));
+ pf->latch_event_flags = 0;
+ spin_unlock_bh(&pf->ptp_rx_lock);
+
+ /* Enable/disable the Tx timestamp interrupt based on user input. */
+ regval = rd32(hw, I40E_PRTTSYN_CTL0);
+ if (pf->ptp_tx)
+ regval |= I40E_PRTTSYN_CTL0_TXTIME_INT_ENA_MASK;
+ else
+ regval &= ~I40E_PRTTSYN_CTL0_TXTIME_INT_ENA_MASK;
+ wr32(hw, I40E_PRTTSYN_CTL0, regval);
+
+ regval = rd32(hw, I40E_PFINT_ICR0_ENA);
+ if (pf->ptp_tx)
+ regval |= I40E_PFINT_ICR0_ENA_TIMESYNC_MASK;
+ else
+ regval &= ~I40E_PFINT_ICR0_ENA_TIMESYNC_MASK;
+ wr32(hw, I40E_PFINT_ICR0_ENA, regval);
+
+ /* Although there is no simple on/off switch for Rx, we "disable" Rx
+ * timestamps by setting to V1 only mode and clear the UDP
+ * recognition. This ought to disable all PTP Rx timestamps as V1
+ * packets are always over UDP. Note that software is configured to
+ * ignore Rx timestamps via the pf->ptp_rx flag.
+ */
+ regval = rd32(hw, I40E_PRTTSYN_CTL1);
+ /* clear everything but the enable bit */
+ regval &= I40E_PRTTSYN_CTL1_TSYNENA_MASK;
+ /* now enable bits for desired Rx timestamps */
+ regval |= tsyntype;
+ wr32(hw, I40E_PRTTSYN_CTL1, regval);
+
+ return 0;
+}
+
+/**
+ * i40e_ptp_set_ts_config - ioctl interface to control the HW timestamping
+ * @pf: Board private structure
+ * @ifr: ioctl data
+ *
+ * Respond to the user filter requests and make the appropriate hardware
+ * changes here. The XL710 cannot support splitting of the Tx/Rx timestamping
+ * logic, so keep track in software of whether to indicate these timestamps
+ * or not.
+ *
+ * It is permissible to "upgrade" the user request to a broader filter, as long
+ * as the user receives the timestamps they care about and the user is notified
+ * the filter has been broadened.
+ **/
+int i40e_ptp_set_ts_config(struct i40e_pf *pf, struct ifreq *ifr)
+{
+ struct hwtstamp_config config;
+ int err;
+
+ if (!(pf->flags & I40E_FLAG_PTP))
+ return -EOPNOTSUPP;
+
+ if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
+ return -EFAULT;
+
+ err = i40e_ptp_set_timestamp_mode(pf, &config);
+ if (err)
+ return err;
+
+ /* save these settings for future reference */
+ pf->tstamp_config = config;
+
+ return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
+ -EFAULT : 0;
+}
+
+/**
+ * i40e_init_pin_config - initialize pins.
+ * @pf: private board structure
+ *
+ * Initialize pins for external clock source.
+ * Return 0 on success or error code on failure.
+ **/
+static int i40e_init_pin_config(struct i40e_pf *pf)
+{
+ int i;
+
+ pf->ptp_caps.n_pins = 3;
+ pf->ptp_caps.n_ext_ts = 2;
+ pf->ptp_caps.pps = 1;
+ pf->ptp_caps.n_per_out = 2;
+
+ pf->ptp_caps.pin_config = kcalloc(pf->ptp_caps.n_pins,
+ sizeof(*pf->ptp_caps.pin_config),
+ GFP_KERNEL);
+ if (!pf->ptp_caps.pin_config)
+ return -ENOMEM;
+
+ for (i = 0; i < pf->ptp_caps.n_pins; i++) {
+ snprintf(pf->ptp_caps.pin_config[i].name,
+ sizeof(pf->ptp_caps.pin_config[i].name),
+ "%s", sdp_desc[i].name);
+ pf->ptp_caps.pin_config[i].index = sdp_desc[i].index;
+ pf->ptp_caps.pin_config[i].func = PTP_PF_NONE;
+ pf->ptp_caps.pin_config[i].chan = sdp_desc[i].chan;
+ }
+
+ pf->ptp_caps.verify = i40e_ptp_verify;
+ pf->ptp_caps.enable = i40e_ptp_feature_enable;
+
+ pf->ptp_caps.pps = 1;
+
+ return 0;
+}
+
+/**
+ * i40e_ptp_create_clock - Create PTP clock device for userspace
+ * @pf: Board private structure
+ *
+ * This function creates a new PTP clock device. It only creates one if we
+ * don't already have one, so it is safe to call. Will return error if it
+ * can't create one, but success if we already have a device. Should be used
+ * by i40e_ptp_init to create clock initially, and prevent global resets from
+ * creating new clock devices.
+ **/
+static long i40e_ptp_create_clock(struct i40e_pf *pf)
+{
+ /* no need to create a clock device if we already have one */
+ if (!IS_ERR_OR_NULL(pf->ptp_clock))
+ return 0;
+
+ strscpy(pf->ptp_caps.name, i40e_driver_name,
+ sizeof(pf->ptp_caps.name) - 1);
+ pf->ptp_caps.owner = THIS_MODULE;
+ pf->ptp_caps.max_adj = 999999999;
+ pf->ptp_caps.adjfine = i40e_ptp_adjfine;
+ pf->ptp_caps.adjtime = i40e_ptp_adjtime;
+ pf->ptp_caps.gettimex64 = i40e_ptp_gettimex;
+ pf->ptp_caps.settime64 = i40e_ptp_settime;
+ if (i40e_is_ptp_pin_dev(&pf->hw)) {
+ int err = i40e_init_pin_config(pf);
+
+ if (err)
+ return err;
+ }
+
+ /* Attempt to register the clock before enabling the hardware. */
+ pf->ptp_clock = ptp_clock_register(&pf->ptp_caps, &pf->pdev->dev);
+ if (IS_ERR(pf->ptp_clock))
+ return PTR_ERR(pf->ptp_clock);
+
+ /* clear the hwtstamp settings here during clock create, instead of
+ * during regular init, so that we can maintain settings across a
+ * reset or suspend.
+ */
+ pf->tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;
+ pf->tstamp_config.tx_type = HWTSTAMP_TX_OFF;
+
+ /* Set the previous "reset" time to the current Kernel clock time */
+ ktime_get_real_ts64(&pf->ptp_prev_hw_time);
+ pf->ptp_reset_start = ktime_get();
+
+ return 0;
+}
+
+/**
+ * i40e_ptp_save_hw_time - Save the current PTP time as ptp_prev_hw_time
+ * @pf: Board private structure
+ *
+ * Read the current PTP time and save it into pf->ptp_prev_hw_time. This should
+ * be called at the end of preparing to reset, just before hardware reset
+ * occurs, in order to preserve the PTP time as close as possible across
+ * resets.
+ */
+void i40e_ptp_save_hw_time(struct i40e_pf *pf)
+{
+ /* don't try to access the PTP clock if it's not enabled */
+ if (!(pf->flags & I40E_FLAG_PTP))
+ return;
+
+ i40e_ptp_gettimex(&pf->ptp_caps, &pf->ptp_prev_hw_time, NULL);
+ /* Get a monotonic starting time for this reset */
+ pf->ptp_reset_start = ktime_get();
+}
+
+/**
+ * i40e_ptp_restore_hw_time - Restore the ptp_prev_hw_time + delta to PTP regs
+ * @pf: Board private structure
+ *
+ * Restore the PTP hardware clock registers. We previously cached the PTP
+ * hardware time as pf->ptp_prev_hw_time. To be as accurate as possible,
+ * update this value based on the time delta since the time was saved, using
+ * CLOCK_MONOTONIC (via ktime_get()) to calculate the time difference.
+ *
+ * This ensures that the hardware clock is restored to nearly what it should
+ * have been if a reset had not occurred.
+ */
+void i40e_ptp_restore_hw_time(struct i40e_pf *pf)
+{
+ ktime_t delta = ktime_sub(ktime_get(), pf->ptp_reset_start);
+
+ /* Update the previous HW time with the ktime delta */
+ timespec64_add_ns(&pf->ptp_prev_hw_time, ktime_to_ns(delta));
+
+ /* Restore the hardware clock registers */
+ i40e_ptp_settime(&pf->ptp_caps, &pf->ptp_prev_hw_time);
+}
+
+/**
+ * i40e_ptp_init - Initialize the 1588 support after device probe or reset
+ * @pf: Board private structure
+ *
+ * This function sets device up for 1588 support. The first time it is run, it
+ * will create a PHC clock device. It does not create a clock device if one
+ * already exists. It also reconfigures the device after a reset.
+ *
+ * The first time a clock is created, i40e_ptp_create_clock will set
+ * pf->ptp_prev_hw_time to the current system time. During resets, it is
+ * expected that this timespec will be set to the last known PTP clock time,
+ * in order to preserve the clock time as close as possible across a reset.
+ **/
+void i40e_ptp_init(struct i40e_pf *pf)
+{
+ struct net_device *netdev = pf->vsi[pf->lan_vsi]->netdev;
+ struct i40e_hw *hw = &pf->hw;
+ u32 pf_id;
+ long err;
+
+ /* Only one PF is assigned to control 1588 logic per port. Do not
+ * enable any support for PFs not assigned via PRTTSYN_CTL0.PF_ID
+ */
+ pf_id = (rd32(hw, I40E_PRTTSYN_CTL0) & I40E_PRTTSYN_CTL0_PF_ID_MASK) >>
+ I40E_PRTTSYN_CTL0_PF_ID_SHIFT;
+ if (hw->pf_id != pf_id) {
+ pf->flags &= ~I40E_FLAG_PTP;
+ dev_info(&pf->pdev->dev, "%s: PTP not supported on %s\n",
+ __func__,
+ netdev->name);
+ return;
+ }
+
+ mutex_init(&pf->tmreg_lock);
+ spin_lock_init(&pf->ptp_rx_lock);
+
+ /* ensure we have a clock device */
+ err = i40e_ptp_create_clock(pf);
+ if (err) {
+ pf->ptp_clock = NULL;
+ dev_err(&pf->pdev->dev, "%s: ptp_clock_register failed\n",
+ __func__);
+ } else if (pf->ptp_clock) {
+ u32 regval;
+
+ if (pf->hw.debug_mask & I40E_DEBUG_LAN)
+ dev_info(&pf->pdev->dev, "PHC enabled\n");
+ pf->flags |= I40E_FLAG_PTP;
+
+ /* Ensure the clocks are running. */
+ regval = rd32(hw, I40E_PRTTSYN_CTL0);
+ regval |= I40E_PRTTSYN_CTL0_TSYNENA_MASK;
+ wr32(hw, I40E_PRTTSYN_CTL0, regval);
+ regval = rd32(hw, I40E_PRTTSYN_CTL1);
+ regval |= I40E_PRTTSYN_CTL1_TSYNENA_MASK;
+ wr32(hw, I40E_PRTTSYN_CTL1, regval);
+
+ /* Set the increment value per clock tick. */
+ i40e_ptp_set_increment(pf);
+
+ /* reset timestamping mode */
+ i40e_ptp_set_timestamp_mode(pf, &pf->tstamp_config);
+
+ /* Restore the clock time based on last known value */
+ i40e_ptp_restore_hw_time(pf);
+ }
+
+ i40e_ptp_set_1pps_signal_hw(pf);
+}
+
+/**
+ * i40e_ptp_stop - Disable the driver/hardware support and unregister the PHC
+ * @pf: Board private structure
+ *
+ * This function handles the cleanup work required from the initialization by
+ * clearing out the important information and unregistering the PHC.
+ **/
+void i40e_ptp_stop(struct i40e_pf *pf)
+{
+ struct i40e_hw *hw = &pf->hw;
+ u32 regval;
+
+ pf->flags &= ~I40E_FLAG_PTP;
+ pf->ptp_tx = false;
+ pf->ptp_rx = false;
+
+ if (pf->ptp_tx_skb) {
+ struct sk_buff *skb = pf->ptp_tx_skb;
+
+ pf->ptp_tx_skb = NULL;
+ clear_bit_unlock(__I40E_PTP_TX_IN_PROGRESS, pf->state);
+ dev_kfree_skb_any(skb);
+ }
+
+ if (pf->ptp_clock) {
+ ptp_clock_unregister(pf->ptp_clock);
+ pf->ptp_clock = NULL;
+ dev_info(&pf->pdev->dev, "%s: removed PHC on %s\n", __func__,
+ pf->vsi[pf->lan_vsi]->netdev->name);
+ }
+
+ if (i40e_is_ptp_pin_dev(&pf->hw)) {
+ i40e_ptp_set_pin_hw(hw, I40E_SDP3_2, off);
+ i40e_ptp_set_pin_hw(hw, I40E_SDP3_3, off);
+ i40e_ptp_set_pin_hw(hw, I40E_GPIO_4, off);
+ }
+
+ regval = rd32(hw, I40E_PRTTSYN_AUX_0(0));
+ regval &= ~I40E_PRTTSYN_AUX_0_PTPFLAG_MASK;
+ wr32(hw, I40E_PRTTSYN_AUX_0(0), regval);
+
+ /* Disable interrupts */
+ regval = rd32(hw, I40E_PRTTSYN_CTL0);
+ regval &= ~I40E_PRTTSYN_CTL0_EVENT_INT_ENA_MASK;
+ wr32(hw, I40E_PRTTSYN_CTL0, regval);
+
+ i40e_ptp_free_pins(pf);
+}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_register.h b/drivers/net/ethernet/intel/i40e/i40e_register.h
new file mode 100644
index 000000000..7339003aa
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_register.h
@@ -0,0 +1,899 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2013 - 2021 Intel Corporation. */
+
+#ifndef _I40E_REGISTER_H_
+#define _I40E_REGISTER_H_
+
+#define I40E_GL_ATQLEN_ATQCRIT_SHIFT 30
+#define I40E_GL_ATQLEN_ATQCRIT_MASK I40E_MASK(0x1, I40E_GL_ATQLEN_ATQCRIT_SHIFT)
+#define I40E_PF_ARQBAH 0x00080180 /* Reset: EMPR */
+#define I40E_PF_ARQBAL 0x00080080 /* Reset: EMPR */
+#define I40E_PF_ARQH 0x00080380 /* Reset: EMPR */
+#define I40E_PF_ARQH_ARQH_SHIFT 0
+#define I40E_PF_ARQH_ARQH_MASK I40E_MASK(0x3FF, I40E_PF_ARQH_ARQH_SHIFT)
+#define I40E_PF_ARQLEN 0x00080280 /* Reset: EMPR */
+#define I40E_PF_ARQLEN_ARQVFE_SHIFT 28
+#define I40E_PF_ARQLEN_ARQVFE_MASK I40E_MASK(0x1, I40E_PF_ARQLEN_ARQVFE_SHIFT)
+#define I40E_PF_ARQLEN_ARQOVFL_SHIFT 29
+#define I40E_PF_ARQLEN_ARQOVFL_MASK I40E_MASK(0x1, I40E_PF_ARQLEN_ARQOVFL_SHIFT)
+#define I40E_PF_ARQLEN_ARQCRIT_SHIFT 30
+#define I40E_PF_ARQLEN_ARQCRIT_MASK I40E_MASK(0x1, I40E_PF_ARQLEN_ARQCRIT_SHIFT)
+#define I40E_PF_ARQLEN_ARQENABLE_SHIFT 31
+#define I40E_PF_ARQLEN_ARQENABLE_MASK I40E_MASK(0x1u, I40E_PF_ARQLEN_ARQENABLE_SHIFT)
+#define I40E_PF_ARQT 0x00080480 /* Reset: EMPR */
+#define I40E_PF_ATQBAH 0x00080100 /* Reset: EMPR */
+#define I40E_PF_ATQBAL 0x00080000 /* Reset: EMPR */
+#define I40E_PF_ATQH 0x00080300 /* Reset: EMPR */
+#define I40E_PF_ATQLEN 0x00080200 /* Reset: EMPR */
+#define I40E_PF_ATQLEN_ATQVFE_SHIFT 28
+#define I40E_PF_ATQLEN_ATQVFE_MASK I40E_MASK(0x1, I40E_PF_ATQLEN_ATQVFE_SHIFT)
+#define I40E_PF_ATQLEN_ATQOVFL_SHIFT 29
+#define I40E_PF_ATQLEN_ATQOVFL_MASK I40E_MASK(0x1, I40E_PF_ATQLEN_ATQOVFL_SHIFT)
+#define I40E_PF_ATQLEN_ATQCRIT_SHIFT 30
+#define I40E_PF_ATQLEN_ATQCRIT_MASK I40E_MASK(0x1, I40E_PF_ATQLEN_ATQCRIT_SHIFT)
+#define I40E_PF_ATQLEN_ATQENABLE_SHIFT 31
+#define I40E_PF_ATQLEN_ATQENABLE_MASK I40E_MASK(0x1u, I40E_PF_ATQLEN_ATQENABLE_SHIFT)
+#define I40E_PF_ATQT 0x00080400 /* Reset: EMPR */
+#define I40E_PRT_SWR_PM_THR 0x0026CD00 /* Reset: CORER */
+#define I40E_PRT_SWR_PM_THR_THRESHOLD_SHIFT 0
+#define I40E_PRT_SWR_PM_THR_THRESHOLD_MASK I40E_MASK(0xFF, I40E_PRT_SWR_PM_THR_THRESHOLD_SHIFT)
+#define I40E_PRTDCB_FCCFG 0x001E4640 /* Reset: GLOBR */
+#define I40E_PRTDCB_FCCFG_TFCE_SHIFT 3
+#define I40E_PRTDCB_FCCFG_TFCE_MASK I40E_MASK(0x3, I40E_PRTDCB_FCCFG_TFCE_SHIFT)
+#define I40E_PRTDCB_GENC 0x00083000 /* Reset: CORER */
+#define I40E_PRTDCB_GENC_NUMTC_SHIFT 2
+#define I40E_PRTDCB_GENC_NUMTC_MASK I40E_MASK(0xF, I40E_PRTDCB_GENC_NUMTC_SHIFT)
+#define I40E_PRTDCB_GENC_PFCLDA_SHIFT 16
+#define I40E_PRTDCB_GENC_PFCLDA_MASK I40E_MASK(0xFFFF, I40E_PRTDCB_GENC_PFCLDA_SHIFT)
+#define I40E_PRTDCB_GENS 0x00083020 /* Reset: CORER */
+#define I40E_PRTDCB_GENS_DCBX_STATUS_SHIFT 0
+#define I40E_PRTDCB_GENS_DCBX_STATUS_MASK I40E_MASK(0x7, I40E_PRTDCB_GENS_DCBX_STATUS_SHIFT)
+#define I40E_PRTDCB_MFLCN 0x001E2400 /* Reset: GLOBR */
+#define I40E_PRTDCB_MFLCN_PMCF_SHIFT 0
+#define I40E_PRTDCB_MFLCN_PMCF_MASK I40E_MASK(0x1, I40E_PRTDCB_MFLCN_PMCF_SHIFT)
+#define I40E_PRTDCB_MFLCN_DPF_SHIFT 1
+#define I40E_PRTDCB_MFLCN_DPF_MASK I40E_MASK(0x1, I40E_PRTDCB_MFLCN_DPF_SHIFT)
+#define I40E_PRTDCB_MFLCN_RPFCM_SHIFT 2
+#define I40E_PRTDCB_MFLCN_RPFCM_MASK I40E_MASK(0x1, I40E_PRTDCB_MFLCN_RPFCM_SHIFT)
+#define I40E_PRTDCB_MFLCN_RFCE_SHIFT 3
+#define I40E_PRTDCB_MFLCN_RFCE_MASK I40E_MASK(0x1, I40E_PRTDCB_MFLCN_RFCE_SHIFT)
+#define I40E_PRTDCB_MFLCN_RPFCE_SHIFT 4
+#define I40E_PRTDCB_MFLCN_RPFCE_MASK I40E_MASK(0xFF, I40E_PRTDCB_MFLCN_RPFCE_SHIFT)
+#define I40E_PRTDCB_RETSC 0x001223E0 /* Reset: CORER */
+#define I40E_PRTDCB_RETSC_ETS_MODE_SHIFT 0
+#define I40E_PRTDCB_RETSC_ETS_MODE_MASK I40E_MASK(0x1, I40E_PRTDCB_RETSC_ETS_MODE_SHIFT)
+#define I40E_PRTDCB_RETSC_NON_ETS_MODE_SHIFT 1
+#define I40E_PRTDCB_RETSC_NON_ETS_MODE_MASK I40E_MASK(0x1, I40E_PRTDCB_RETSC_NON_ETS_MODE_SHIFT)
+#define I40E_PRTDCB_RETSC_ETS_MAX_EXP_SHIFT 2
+#define I40E_PRTDCB_RETSC_ETS_MAX_EXP_MASK I40E_MASK(0xF, I40E_PRTDCB_RETSC_ETS_MAX_EXP_SHIFT)
+#define I40E_PRTDCB_RETSC_LLTC_SHIFT 8
+#define I40E_PRTDCB_RETSC_LLTC_MASK I40E_MASK(0xFF, I40E_PRTDCB_RETSC_LLTC_SHIFT)
+#define I40E_PRTDCB_RETSTCC(_i) (0x00122180 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */
+#define I40E_PRTDCB_RETSTCC_MAX_INDEX 7
+#define I40E_PRTDCB_RETSTCC_BWSHARE_SHIFT 0
+#define I40E_PRTDCB_RETSTCC_BWSHARE_MASK I40E_MASK(0x7F, I40E_PRTDCB_RETSTCC_BWSHARE_SHIFT)
+#define I40E_PRTDCB_RETSTCC_UPINTC_MODE_SHIFT 30
+#define I40E_PRTDCB_RETSTCC_UPINTC_MODE_MASK I40E_MASK(0x1, I40E_PRTDCB_RETSTCC_UPINTC_MODE_SHIFT)
+#define I40E_PRTDCB_RETSTCC_ETSTC_SHIFT 31
+#define I40E_PRTDCB_RETSTCC_ETSTC_MASK I40E_MASK(0x1u, I40E_PRTDCB_RETSTCC_ETSTC_SHIFT)
+#define I40E_PRTDCB_RPPMC 0x001223A0 /* Reset: CORER */
+#define I40E_PRTDCB_RPPMC_LANRPPM_SHIFT 0
+#define I40E_PRTDCB_RPPMC_LANRPPM_MASK I40E_MASK(0xFF, I40E_PRTDCB_RPPMC_LANRPPM_SHIFT)
+#define I40E_PRTDCB_RPPMC_RDMARPPM_SHIFT 8
+#define I40E_PRTDCB_RPPMC_RDMARPPM_MASK I40E_MASK(0xFF, I40E_PRTDCB_RPPMC_RDMARPPM_SHIFT)
+#define I40E_PRTDCB_RPPMC_RX_FIFO_SIZE_SHIFT 16
+#define I40E_PRTDCB_RPPMC_RX_FIFO_SIZE_MASK I40E_MASK(0xFF, I40E_PRTDCB_RPPMC_RX_FIFO_SIZE_SHIFT)
+#define I40E_PRTDCB_RUP 0x001C0B00 /* Reset: CORER */
+#define I40E_PRTDCB_RUP_NOVLANUP_SHIFT 0
+#define I40E_PRTDCB_RUP_NOVLANUP_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP_NOVLANUP_SHIFT)
+#define I40E_PRTDCB_RUP2TC 0x001C09A0 /* Reset: CORER */
+#define I40E_PRTDCB_RUP2TC_UP0TC_SHIFT 0
+#define I40E_PRTDCB_RUP2TC_UP0TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP0TC_SHIFT)
+#define I40E_PRTDCB_RUP2TC_UP1TC_SHIFT 3
+#define I40E_PRTDCB_RUP2TC_UP1TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP1TC_SHIFT)
+#define I40E_PRTDCB_RUP2TC_UP2TC_SHIFT 6
+#define I40E_PRTDCB_RUP2TC_UP2TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP2TC_SHIFT)
+#define I40E_PRTDCB_RUP2TC_UP3TC_SHIFT 9
+#define I40E_PRTDCB_RUP2TC_UP3TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP3TC_SHIFT)
+#define I40E_PRTDCB_RUP2TC_UP4TC_SHIFT 12
+#define I40E_PRTDCB_RUP2TC_UP4TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP4TC_SHIFT)
+#define I40E_PRTDCB_RUP2TC_UP5TC_SHIFT 15
+#define I40E_PRTDCB_RUP2TC_UP5TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP5TC_SHIFT)
+#define I40E_PRTDCB_RUP2TC_UP6TC_SHIFT 18
+#define I40E_PRTDCB_RUP2TC_UP6TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP6TC_SHIFT)
+#define I40E_PRTDCB_RUP2TC_UP7TC_SHIFT 21
+#define I40E_PRTDCB_RUP2TC_UP7TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP7TC_SHIFT)
+#define I40E_PRTDCB_RUPTQ(_i) (0x00122400 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */
+#define I40E_PRTDCB_RUPTQ_MAX_INDEX 7
+#define I40E_PRTDCB_RUPTQ_RXQNUM_SHIFT 0
+#define I40E_PRTDCB_RUPTQ_RXQNUM_MASK I40E_MASK(0x3FFF, I40E_PRTDCB_RUPTQ_RXQNUM_SHIFT)
+#define I40E_PRTDCB_TC2PFC 0x001C0980 /* Reset: CORER */
+#define I40E_PRTDCB_TC2PFC_TC2PFC_SHIFT 0
+#define I40E_PRTDCB_TC2PFC_TC2PFC_MASK I40E_MASK(0xFF, I40E_PRTDCB_TC2PFC_TC2PFC_SHIFT)
+#define I40E_PRTDCB_TCMSTC(_i) (0x000A0040 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */
+#define I40E_PRTDCB_TCMSTC_MAX_INDEX 7
+#define I40E_PRTDCB_TCMSTC_MSTC_SHIFT 0
+#define I40E_PRTDCB_TCMSTC_MSTC_MASK I40E_MASK(0xFFFFF, I40E_PRTDCB_TCMSTC_MSTC_SHIFT)
+#define I40E_PRTDCB_TCPMC 0x000A21A0 /* Reset: CORER */
+#define I40E_PRTDCB_TCPMC_CPM_SHIFT 0
+#define I40E_PRTDCB_TCPMC_CPM_MASK I40E_MASK(0x1FFF, I40E_PRTDCB_TCPMC_CPM_SHIFT)
+#define I40E_PRTDCB_TCPMC_LLTC_SHIFT 13
+#define I40E_PRTDCB_TCPMC_LLTC_MASK I40E_MASK(0xFF, I40E_PRTDCB_TCPMC_LLTC_SHIFT)
+#define I40E_PRTDCB_TCPMC_TCPM_MODE_SHIFT 30
+#define I40E_PRTDCB_TCPMC_TCPM_MODE_MASK I40E_MASK(0x1, I40E_PRTDCB_TCPMC_TCPM_MODE_SHIFT)
+#define I40E_PRTDCB_TCWSTC(_i) (0x000A2040 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */
+#define I40E_PRTDCB_TCWSTC_MAX_INDEX 7
+#define I40E_PRTDCB_TCWSTC_MSTC_SHIFT 0
+#define I40E_PRTDCB_TCWSTC_MSTC_MASK I40E_MASK(0xFFFFF, I40E_PRTDCB_TCWSTC_MSTC_SHIFT)
+#define I40E_PRTDCB_TDPMC 0x000A0180 /* Reset: CORER */
+#define I40E_PRTDCB_TDPMC_DPM_SHIFT 0
+#define I40E_PRTDCB_TDPMC_DPM_MASK I40E_MASK(0xFF, I40E_PRTDCB_TDPMC_DPM_SHIFT)
+#define I40E_PRTDCB_TDPMC_TCPM_MODE_SHIFT 30
+#define I40E_PRTDCB_TDPMC_TCPM_MODE_MASK I40E_MASK(0x1, I40E_PRTDCB_TDPMC_TCPM_MODE_SHIFT)
+#define I40E_PRTDCB_TETSC_TCB 0x000AE060 /* Reset: CORER */
+#define I40E_PRTDCB_TETSC_TCB_EN_LL_STRICT_PRIORITY_SHIFT 0
+#define I40E_PRTDCB_TETSC_TCB_EN_LL_STRICT_PRIORITY_MASK I40E_MASK(0x1, \
+ I40E_PRTDCB_TETSC_TCB_EN_LL_STRICT_PRIORITY_SHIFT)
+#define I40E_PRTDCB_TETSC_TCB_LLTC_SHIFT 8
+#define I40E_PRTDCB_TETSC_TCB_LLTC_MASK I40E_MASK(0xFF, I40E_PRTDCB_TETSC_TCB_LLTC_SHIFT)
+#define I40E_PRTDCB_TETSC_TPB 0x00098060 /* Reset: CORER */
+#define I40E_PRTDCB_TETSC_TPB_EN_LL_STRICT_PRIORITY_SHIFT 0
+#define I40E_PRTDCB_TETSC_TPB_EN_LL_STRICT_PRIORITY_MASK I40E_MASK(0x1, \
+ I40E_PRTDCB_TETSC_TPB_EN_LL_STRICT_PRIORITY_SHIFT)
+#define I40E_PRTDCB_TETSC_TPB_LLTC_SHIFT 8
+#define I40E_PRTDCB_TETSC_TPB_LLTC_MASK I40E_MASK(0xFF, I40E_PRTDCB_TETSC_TPB_LLTC_SHIFT)
+#define I40E_PRTDCB_TFCS 0x001E4560 /* Reset: GLOBR */
+#define I40E_PRTDCB_TFCS_TXOFF_SHIFT 0
+#define I40E_PRTDCB_TFCS_TXOFF_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF_SHIFT)
+#define I40E_PRTDCB_TFCS_TXOFF0_SHIFT 8
+#define I40E_PRTDCB_TFCS_TXOFF0_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF0_SHIFT)
+#define I40E_PRTDCB_TFCS_TXOFF1_SHIFT 9
+#define I40E_PRTDCB_TFCS_TXOFF1_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF1_SHIFT)
+#define I40E_PRTDCB_TFCS_TXOFF2_SHIFT 10
+#define I40E_PRTDCB_TFCS_TXOFF2_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF2_SHIFT)
+#define I40E_PRTDCB_TFCS_TXOFF3_SHIFT 11
+#define I40E_PRTDCB_TFCS_TXOFF3_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF3_SHIFT)
+#define I40E_PRTDCB_TFCS_TXOFF4_SHIFT 12
+#define I40E_PRTDCB_TFCS_TXOFF4_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF4_SHIFT)
+#define I40E_PRTDCB_TFCS_TXOFF5_SHIFT 13
+#define I40E_PRTDCB_TFCS_TXOFF5_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF5_SHIFT)
+#define I40E_PRTDCB_TFCS_TXOFF6_SHIFT 14
+#define I40E_PRTDCB_TFCS_TXOFF6_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF6_SHIFT)
+#define I40E_PRTDCB_TFCS_TXOFF7_SHIFT 15
+#define I40E_PRTDCB_TFCS_TXOFF7_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF7_SHIFT)
+#define I40E_PRTDCB_TPFCTS(_i) (0x001E4660 + ((_i) * 32)) /* _i=0...7 */ /* Reset: GLOBR */
+#define I40E_PRTDCB_TPFCTS_MAX_INDEX 7
+#define I40E_PRTDCB_TPFCTS_PFCTIMER_SHIFT 0
+#define I40E_PRTDCB_TPFCTS_PFCTIMER_MASK I40E_MASK(0x3FFF, I40E_PRTDCB_TPFCTS_PFCTIMER_SHIFT)
+#define I40E_GL_FWSTS 0x00083048 /* Reset: POR */
+#define I40E_GL_FWSTS_FWS1B_SHIFT 16
+#define I40E_GL_FWSTS_FWS1B_MASK I40E_MASK(0xFF, I40E_GL_FWSTS_FWS1B_SHIFT)
+#define I40E_GL_FWSTS_FWS1B_EMPR_0 I40E_MASK(0x20, I40E_GL_FWSTS_FWS1B_SHIFT)
+#define I40E_GL_FWSTS_FWS1B_EMPR_10 I40E_MASK(0x2A, I40E_GL_FWSTS_FWS1B_SHIFT)
+#define I40E_XL710_GL_FWSTS_FWS1B_REC_MOD_CORER_MASK I40E_MASK(0x30, I40E_GL_FWSTS_FWS1B_SHIFT)
+#define I40E_XL710_GL_FWSTS_FWS1B_REC_MOD_GLOBR_MASK I40E_MASK(0x31, I40E_GL_FWSTS_FWS1B_SHIFT)
+#define I40E_XL710_GL_FWSTS_FWS1B_REC_MOD_TRANSITION_MASK I40E_MASK(0x32, I40E_GL_FWSTS_FWS1B_SHIFT)
+#define I40E_XL710_GL_FWSTS_FWS1B_REC_MOD_NVM_MASK I40E_MASK(0x33, I40E_GL_FWSTS_FWS1B_SHIFT)
+#define I40E_X722_GL_FWSTS_FWS1B_REC_MOD_CORER_MASK I40E_MASK(0xB, I40E_GL_FWSTS_FWS1B_SHIFT)
+#define I40E_X722_GL_FWSTS_FWS1B_REC_MOD_GLOBR_MASK I40E_MASK(0xC, I40E_GL_FWSTS_FWS1B_SHIFT)
+#define I40E_GLGEN_GPIO_CTL(_i) (0x00088100 + ((_i) * 4)) /* _i=0...29 */ /* Reset: POR */
+#define I40E_GLGEN_GPIO_CTL_MAX_INDEX 29
+#define I40E_GLGEN_GPIO_CTL_PRT_NUM_SHIFT 0
+#define I40E_GLGEN_GPIO_CTL_PRT_NUM_MASK I40E_MASK(0x3, I40E_GLGEN_GPIO_CTL_PRT_NUM_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_SHIFT 3
+#define I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_PIN_DIR_SHIFT 4
+#define I40E_GLGEN_GPIO_CTL_TRI_CTL_SHIFT 5
+#define I40E_GLGEN_GPIO_CTL_OUT_CTL_SHIFT 6
+#define I40E_GLGEN_GPIO_CTL_PIN_FUNC_SHIFT 7
+#define I40E_GLGEN_GPIO_CTL_PIN_FUNC_MASK I40E_MASK(0x7, I40E_GLGEN_GPIO_CTL_PIN_FUNC_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT 11
+#define I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT 12
+#define I40E_GLGEN_GPIO_CTL_LED_MODE_MASK I40E_MASK(0x1F, I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_OUT_DEFAULT_SHIFT 19
+#define I40E_GLGEN_GPIO_CTL_OUT_DEFAULT_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_OUT_DEFAULT_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_SHIFT 20
+#define I40E_GLGEN_GPIO_SET 0x00088184 /* Reset: POR */
+#define I40E_GLGEN_GPIO_SET_SDP_DATA_SHIFT 5
+#define I40E_GLGEN_GPIO_SET_DRIVE_SDP_SHIFT 6
+#define I40E_GLGEN_MDIO_I2C_SEL(_i) (0x000881C0 + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */
+#define I40E_GLGEN_MSCA(_i) (0x0008818C + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */
+#define I40E_GLGEN_MSCA_MDIADD_SHIFT 0
+#define I40E_GLGEN_MSCA_DEVADD_SHIFT 16
+#define I40E_GLGEN_MSCA_PHYADD_SHIFT 21
+#define I40E_GLGEN_MSCA_OPCODE_SHIFT 26
+#define I40E_GLGEN_MSCA_STCODE_SHIFT 28
+#define I40E_GLGEN_MSCA_MDICMD_SHIFT 30
+#define I40E_GLGEN_MSCA_MDICMD_MASK I40E_MASK(0x1, I40E_GLGEN_MSCA_MDICMD_SHIFT)
+#define I40E_GLGEN_MSCA_MDIINPROGEN_SHIFT 31
+#define I40E_GLGEN_MSCA_MDIINPROGEN_MASK I40E_MASK(0x1u, I40E_GLGEN_MSCA_MDIINPROGEN_SHIFT)
+#define I40E_GLGEN_MSRWD(_i) (0x0008819C + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */
+#define I40E_GLGEN_MSRWD_MDIWRDATA_SHIFT 0
+#define I40E_GLGEN_MSRWD_MDIRDDATA_SHIFT 16
+#define I40E_GLGEN_MSRWD_MDIRDDATA_MASK I40E_MASK(0xFFFF, I40E_GLGEN_MSRWD_MDIRDDATA_SHIFT)
+#define I40E_GLGEN_PCIFCNCNT 0x001C0AB4 /* Reset: PCIR */
+#define I40E_GLGEN_PCIFCNCNT_PCIPFCNT_SHIFT 0
+#define I40E_GLGEN_PCIFCNCNT_PCIPFCNT_MASK I40E_MASK(0x1F, I40E_GLGEN_PCIFCNCNT_PCIPFCNT_SHIFT)
+#define I40E_GLGEN_PCIFCNCNT_PCIVFCNT_SHIFT 16
+#define I40E_GLGEN_PCIFCNCNT_PCIVFCNT_MASK I40E_MASK(0xFF, I40E_GLGEN_PCIFCNCNT_PCIVFCNT_SHIFT)
+#define I40E_GLGEN_RSTAT 0x000B8188 /* Reset: POR */
+#define I40E_GLGEN_RSTAT_DEVSTATE_SHIFT 0
+#define I40E_GLGEN_RSTAT_DEVSTATE_MASK I40E_MASK(0x3, I40E_GLGEN_RSTAT_DEVSTATE_SHIFT)
+#define I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT 2
+#define I40E_GLGEN_RSTAT_RESET_TYPE_MASK I40E_MASK(0x3, I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT)
+#define I40E_GLGEN_RSTCTL 0x000B8180 /* Reset: POR */
+#define I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT 0
+#define I40E_GLGEN_RSTCTL_GRSTDEL_MASK I40E_MASK(0x3F, I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT)
+#define I40E_GLGEN_RTRIG 0x000B8190 /* Reset: CORER */
+#define I40E_GLGEN_RTRIG_CORER_SHIFT 0
+#define I40E_GLGEN_RTRIG_CORER_MASK I40E_MASK(0x1, I40E_GLGEN_RTRIG_CORER_SHIFT)
+#define I40E_GLGEN_RTRIG_GLOBR_SHIFT 1
+#define I40E_GLGEN_RTRIG_GLOBR_MASK I40E_MASK(0x1, I40E_GLGEN_RTRIG_GLOBR_SHIFT)
+#define I40E_GLGEN_STAT 0x000B612C /* Reset: POR */
+#define I40E_GLGEN_VFLRSTAT(_i) (0x00092600 + ((_i) * 4)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLVFGEN_TIMER 0x000881BC /* Reset: CORER */
+#define I40E_PFGEN_CTRL 0x00092400 /* Reset: PFR */
+#define I40E_PFGEN_CTRL_PFSWR_SHIFT 0
+#define I40E_PFGEN_CTRL_PFSWR_MASK I40E_MASK(0x1, I40E_PFGEN_CTRL_PFSWR_SHIFT)
+#define I40E_PFGEN_PORTNUM 0x001C0480 /* Reset: CORER */
+#define I40E_PFGEN_PORTNUM_PORT_NUM_SHIFT 0
+#define I40E_PFGEN_PORTNUM_PORT_NUM_MASK I40E_MASK(0x3, I40E_PFGEN_PORTNUM_PORT_NUM_SHIFT)
+#define I40E_PRTGEN_CNF 0x000B8120 /* Reset: POR */
+#define I40E_PRTGEN_CNF_PORT_DIS_SHIFT 0
+#define I40E_PRTGEN_CNF_PORT_DIS_MASK I40E_MASK(0x1, I40E_PRTGEN_CNF_PORT_DIS_SHIFT)
+#define I40E_PRTGEN_STATUS 0x000B8100 /* Reset: POR */
+#define I40E_VFGEN_RSTAT1(_VF) (0x00074400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VPGEN_VFRSTAT(_VF) (0x00091C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */
+#define I40E_VPGEN_VFRSTAT_VFRD_SHIFT 0
+#define I40E_VPGEN_VFRSTAT_VFRD_MASK I40E_MASK(0x1, I40E_VPGEN_VFRSTAT_VFRD_SHIFT)
+#define I40E_VPGEN_VFRTRIG(_VF) (0x00091800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */
+#define I40E_VPGEN_VFRTRIG_VFSWR_SHIFT 0
+#define I40E_VPGEN_VFRTRIG_VFSWR_MASK I40E_MASK(0x1, I40E_VPGEN_VFRTRIG_VFSWR_SHIFT)
+#define I40E_GLHMC_FCOEDDPBASE(_i) (0x000C6600 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_FCOEDDPBASE_FPMFCOEDDPBASE_SHIFT 0
+#define I40E_GLHMC_FCOEDDPBASE_FPMFCOEDDPBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_FCOEDDPBASE_FPMFCOEDDPBASE_SHIFT)
+#define I40E_GLHMC_FCOEDDPCNT(_i) (0x000C6700 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_FCOEDDPOBJSZ 0x000C2010 /* Reset: CORER */
+#define I40E_GLHMC_FCOEFBASE(_i) (0x000C6800 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_FCOEFBASE_FPMFCOEFBASE_SHIFT 0
+#define I40E_GLHMC_FCOEFBASE_FPMFCOEFBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_FCOEFBASE_FPMFCOEFBASE_SHIFT)
+#define I40E_GLHMC_FCOEFCNT(_i) (0x000C6900 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_FCOEFMAX 0x000C20D0 /* Reset: CORER */
+#define I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_SHIFT 0
+#define I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_MASK I40E_MASK(0xFFFF, I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_SHIFT)
+#define I40E_GLHMC_FCOEFOBJSZ 0x000C2018 /* Reset: CORER */
+#define I40E_GLHMC_FCOEMAX 0x000C2014 /* Reset: CORER */
+#define I40E_GLHMC_LANQMAX 0x000C2008 /* Reset: CORER */
+#define I40E_GLHMC_LANRXBASE(_i) (0x000C6400 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_LANRXBASE_FPMLANRXBASE_SHIFT 0
+#define I40E_GLHMC_LANRXBASE_FPMLANRXBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_LANRXBASE_FPMLANRXBASE_SHIFT)
+#define I40E_GLHMC_LANRXCNT(_i) (0x000C6500 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_LANRXOBJSZ 0x000C200c /* Reset: CORER */
+#define I40E_GLHMC_LANTXBASE(_i) (0x000C6200 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_LANTXBASE_FPMLANTXBASE_SHIFT 0
+#define I40E_GLHMC_LANTXBASE_FPMLANTXBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_LANTXBASE_FPMLANTXBASE_SHIFT)
+#define I40E_GLHMC_LANTXCNT(_i) (0x000C6300 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_LANTXOBJSZ 0x000C2004 /* Reset: CORER */
+#define I40E_PFHMC_ERRORDATA 0x000C0500 /* Reset: PFR */
+#define I40E_PFHMC_ERRORINFO 0x000C0400 /* Reset: PFR */
+#define I40E_PFHMC_PDINV 0x000C0300 /* Reset: PFR */
+#define I40E_PFHMC_PDINV_PMSDIDX_SHIFT 0
+#define I40E_PFHMC_PDINV_PMPDIDX_SHIFT 16
+#define I40E_PFHMC_SDCMD 0x000C0000 /* Reset: PFR */
+#define I40E_PFHMC_SDCMD_PMSDWR_SHIFT 31
+#define I40E_PFHMC_SDDATAHIGH 0x000C0200 /* Reset: PFR */
+#define I40E_PFHMC_SDDATALOW 0x000C0100 /* Reset: PFR */
+#define I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT 0
+#define I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT 1
+#define I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT 2
+#define I40E_PFGEN_PORTMDIO_NUM 0x0003F100 /* Reset: CORER */
+#define I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_SHIFT 4
+#define I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_MASK I40E_MASK(0x1, I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_SHIFT)
+#define I40E_PFINT_AEQCTL 0x00038700 /* Reset: CORER */
+#define I40E_PFINT_AEQCTL_MSIX_INDX_SHIFT 0
+#define I40E_PFINT_AEQCTL_ITR_INDX_SHIFT 11
+#define I40E_PFINT_AEQCTL_CAUSE_ENA_SHIFT 30
+#define I40E_PFINT_AEQCTL_CAUSE_ENA_MASK I40E_MASK(0x1, I40E_PFINT_AEQCTL_CAUSE_ENA_SHIFT)
+#define I40E_PFINT_CEQCTL(_INTPF) (0x00036800 + ((_INTPF) * 4)) /* _i=0...511 */ /* Reset: CORER */
+#define I40E_PFINT_CEQCTL_MSIX_INDX_SHIFT 0
+#define I40E_PFINT_CEQCTL_ITR_INDX_SHIFT 11
+#define I40E_PFINT_CEQCTL_NEXTQ_INDX_SHIFT 16
+#define I40E_PFINT_CEQCTL_CAUSE_ENA_SHIFT 30
+#define I40E_PFINT_CEQCTL_CAUSE_ENA_MASK I40E_MASK(0x1, I40E_PFINT_CEQCTL_CAUSE_ENA_SHIFT)
+#define I40E_GLINT_CTL 0x0003F800 /* Reset: CORER */
+#define I40E_GLINT_CTL_DIS_AUTOMASK_VF0_SHIFT 1
+#define I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK I40E_MASK(0x1, I40E_GLINT_CTL_DIS_AUTOMASK_VF0_SHIFT)
+#define I40E_PFINT_DYN_CTL0 0x00038480 /* Reset: PFR */
+#define I40E_PFINT_DYN_CTL0_INTENA_SHIFT 0
+#define I40E_PFINT_DYN_CTL0_INTENA_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTL0_INTENA_SHIFT)
+#define I40E_PFINT_DYN_CTL0_CLEARPBA_SHIFT 1
+#define I40E_PFINT_DYN_CTL0_CLEARPBA_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTL0_CLEARPBA_SHIFT)
+#define I40E_PFINT_DYN_CTL0_SWINT_TRIG_SHIFT 2
+#define I40E_PFINT_DYN_CTL0_SWINT_TRIG_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTL0_SWINT_TRIG_SHIFT)
+#define I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT 3
+#define I40E_PFINT_DYN_CTL0_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT)
+#define I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_SHIFT 24
+#define I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_SHIFT)
+#define I40E_PFINT_DYN_CTL0_SW_ITR_INDX_SHIFT 25
+#define I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_DYN_CTL0_SW_ITR_INDX_SHIFT)
+#define I40E_PFINT_DYN_CTL0_INTENA_MSK_SHIFT 31
+#define I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTL0_INTENA_MSK_SHIFT)
+#define I40E_PFINT_DYN_CTLN(_INTPF) (0x00034800 + ((_INTPF) * 4)) /* _i=0...511 */ /* Reset: PFR */
+#define I40E_PFINT_DYN_CTLN_INTENA_SHIFT 0
+#define I40E_PFINT_DYN_CTLN_INTENA_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTLN_INTENA_SHIFT)
+#define I40E_PFINT_DYN_CTLN_CLEARPBA_SHIFT 1
+#define I40E_PFINT_DYN_CTLN_CLEARPBA_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTLN_CLEARPBA_SHIFT)
+#define I40E_PFINT_DYN_CTLN_SWINT_TRIG_SHIFT 2
+#define I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTLN_SWINT_TRIG_SHIFT)
+#define I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT 3
+#define I40E_PFINT_DYN_CTLN_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT)
+#define I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT 5
+#define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_SHIFT 24
+#define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_SHIFT)
+#define I40E_PFINT_ICR0 0x00038780 /* Reset: CORER */
+#define I40E_PFINT_ICR0_INTEVENT_SHIFT 0
+#define I40E_PFINT_ICR0_INTEVENT_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_INTEVENT_SHIFT)
+#define I40E_PFINT_ICR0_QUEUE_0_SHIFT 1
+#define I40E_PFINT_ICR0_QUEUE_0_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_0_SHIFT)
+#define I40E_PFINT_ICR0_ECC_ERR_SHIFT 16
+#define I40E_PFINT_ICR0_ECC_ERR_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ECC_ERR_SHIFT)
+#define I40E_PFINT_ICR0_MAL_DETECT_SHIFT 19
+#define I40E_PFINT_ICR0_MAL_DETECT_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_MAL_DETECT_SHIFT)
+#define I40E_PFINT_ICR0_GRST_SHIFT 20
+#define I40E_PFINT_ICR0_GRST_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_GRST_SHIFT)
+#define I40E_PFINT_ICR0_PCI_EXCEPTION_SHIFT 21
+#define I40E_PFINT_ICR0_PCI_EXCEPTION_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_PCI_EXCEPTION_SHIFT)
+#define I40E_PFINT_ICR0_TIMESYNC_SHIFT 23
+#define I40E_PFINT_ICR0_TIMESYNC_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_TIMESYNC_SHIFT)
+#define I40E_PFINT_ICR0_HMC_ERR_SHIFT 26
+#define I40E_PFINT_ICR0_HMC_ERR_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_HMC_ERR_SHIFT)
+#define I40E_PFINT_ICR0_PE_CRITERR_SHIFT 28
+#define I40E_PFINT_ICR0_PE_CRITERR_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_PE_CRITERR_SHIFT)
+#define I40E_PFINT_ICR0_VFLR_SHIFT 29
+#define I40E_PFINT_ICR0_VFLR_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_VFLR_SHIFT)
+#define I40E_PFINT_ICR0_ADMINQ_SHIFT 30
+#define I40E_PFINT_ICR0_ADMINQ_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ADMINQ_SHIFT)
+#define I40E_PFINT_ICR0_SWINT_SHIFT 31
+#define I40E_PFINT_ICR0_SWINT_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_SWINT_SHIFT)
+#define I40E_PFINT_ICR0_ENA 0x00038800 /* Reset: CORER */
+#define I40E_PFINT_ICR0_ENA_ECC_ERR_SHIFT 16
+#define I40E_PFINT_ICR0_ENA_ECC_ERR_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_ECC_ERR_SHIFT)
+#define I40E_PFINT_ICR0_ENA_MAL_DETECT_SHIFT 19
+#define I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_MAL_DETECT_SHIFT)
+#define I40E_PFINT_ICR0_ENA_GRST_SHIFT 20
+#define I40E_PFINT_ICR0_ENA_GRST_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_GRST_SHIFT)
+#define I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_SHIFT 21
+#define I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_SHIFT)
+#define I40E_PFINT_ICR0_ENA_GPIO_SHIFT 22
+#define I40E_PFINT_ICR0_ENA_GPIO_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_GPIO_SHIFT)
+#define I40E_PFINT_ICR0_ENA_TIMESYNC_SHIFT 23
+#define I40E_PFINT_ICR0_ENA_TIMESYNC_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_TIMESYNC_SHIFT)
+#define I40E_PFINT_ICR0_ENA_HMC_ERR_SHIFT 26
+#define I40E_PFINT_ICR0_ENA_HMC_ERR_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_HMC_ERR_SHIFT)
+#define I40E_PFINT_ICR0_ENA_PE_CRITERR_SHIFT 28
+#define I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_PE_CRITERR_SHIFT)
+#define I40E_PFINT_ICR0_ENA_VFLR_SHIFT 29
+#define I40E_PFINT_ICR0_ENA_VFLR_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_VFLR_SHIFT)
+#define I40E_PFINT_ICR0_ENA_ADMINQ_SHIFT 30
+#define I40E_PFINT_ICR0_ENA_ADMINQ_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_ADMINQ_SHIFT)
+#define I40E_PFINT_ITR0(_i) (0x00038000 + ((_i) * 128)) /* _i=0...2 */ /* Reset: PFR */
+#define I40E_PFINT_ITRN(_i, _INTPF) (0x00030000 + ((_i) * 2048 + (_INTPF) * 4)) /* _i=0...2, _INTPF=0...511 */ /* Reset: PFR */
+#define I40E_PFINT_LNKLST0 0x00038500 /* Reset: PFR */
+#define I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT 0
+#define I40E_PFINT_LNKLSTN(_INTPF) (0x00035000 + ((_INTPF) * 4)) /* _i=0...511 */ /* Reset: PFR */
+#define I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT 0
+#define I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK I40E_MASK(0x7FF, I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT)
+#define I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT 11
+#define I40E_PFINT_RATEN(_INTPF) (0x00035800 + ((_INTPF) * 4)) /* _i=0...511 */ /* Reset: PFR */
+#define I40E_PFINT_STAT_CTL0 0x00038400 /* Reset: CORER */
+#define I40E_QINT_RQCTL(_Q) (0x0003A000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: CORER */
+#define I40E_QINT_RQCTL_MSIX_INDX_SHIFT 0
+#define I40E_QINT_RQCTL_MSIX_INDX_MASK I40E_MASK(0xFF, I40E_QINT_RQCTL_MSIX_INDX_SHIFT)
+#define I40E_QINT_RQCTL_ITR_INDX_SHIFT 11
+#define I40E_QINT_RQCTL_ITR_INDX_MASK I40E_MASK(0x3, I40E_QINT_RQCTL_ITR_INDX_SHIFT)
+#define I40E_QINT_RQCTL_MSIX0_INDX_SHIFT 13
+#define I40E_QINT_RQCTL_MSIX0_INDX_MASK I40E_MASK(0x7, I40E_QINT_RQCTL_MSIX0_INDX_SHIFT)
+#define I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT 16
+#define I40E_QINT_RQCTL_NEXTQ_INDX_MASK I40E_MASK(0x7FF, I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT)
+#define I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT 27
+#define I40E_QINT_RQCTL_CAUSE_ENA_SHIFT 30
+#define I40E_QINT_RQCTL_CAUSE_ENA_MASK I40E_MASK(0x1, I40E_QINT_RQCTL_CAUSE_ENA_SHIFT)
+#define I40E_QINT_RQCTL_INTEVENT_SHIFT 31
+#define I40E_QINT_RQCTL_INTEVENT_MASK I40E_MASK(0x1, I40E_QINT_RQCTL_INTEVENT_SHIFT)
+#define I40E_QINT_TQCTL(_Q) (0x0003C000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: CORER */
+#define I40E_QINT_TQCTL_MSIX_INDX_SHIFT 0
+#define I40E_QINT_TQCTL_MSIX_INDX_MASK I40E_MASK(0xFF, I40E_QINT_TQCTL_MSIX_INDX_SHIFT)
+#define I40E_QINT_TQCTL_ITR_INDX_SHIFT 11
+#define I40E_QINT_TQCTL_ITR_INDX_MASK I40E_MASK(0x3, I40E_QINT_TQCTL_ITR_INDX_SHIFT)
+#define I40E_QINT_TQCTL_MSIX0_INDX_SHIFT 13
+#define I40E_QINT_TQCTL_MSIX0_INDX_MASK I40E_MASK(0x7, I40E_QINT_TQCTL_MSIX0_INDX_SHIFT)
+#define I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT 16
+#define I40E_QINT_TQCTL_NEXTQ_INDX_MASK I40E_MASK(0x7FF, I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT)
+#define I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT 27
+#define I40E_QINT_TQCTL_CAUSE_ENA_SHIFT 30
+#define I40E_QINT_TQCTL_CAUSE_ENA_MASK I40E_MASK(0x1, I40E_QINT_TQCTL_CAUSE_ENA_SHIFT)
+#define I40E_QINT_TQCTL_INTEVENT_SHIFT 31
+#define I40E_QINT_TQCTL_INTEVENT_MASK I40E_MASK(0x1, I40E_QINT_TQCTL_INTEVENT_SHIFT)
+#define I40E_VFINT_DYN_CTL0(_VF) (0x0002A400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFINT_DYN_CTLN(_INTVF) (0x00024800 + ((_INTVF) * 4)) /* _i=0...511 */ /* Reset: VFR */
+#define I40E_VFINT_DYN_CTLN_CLEARPBA_SHIFT 1
+#define I40E_VFINT_DYN_CTLN_CLEARPBA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN_CLEARPBA_SHIFT)
+#define I40E_VFINT_ICR0_ADMINQ_SHIFT 30
+#define I40E_VFINT_ICR0_ADMINQ_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ADMINQ_SHIFT)
+#define I40E_VFINT_ICR0_ENA(_VF) (0x0002C000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */
+#define I40E_VPINT_AEQCTL(_VF) (0x0002B800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */
+#define I40E_VPINT_AEQCTL_MSIX_INDX_SHIFT 0
+#define I40E_VPINT_AEQCTL_ITR_INDX_SHIFT 11
+#define I40E_VPINT_AEQCTL_CAUSE_ENA_SHIFT 30
+#define I40E_VPINT_AEQCTL_CAUSE_ENA_MASK I40E_MASK(0x1, I40E_VPINT_AEQCTL_CAUSE_ENA_SHIFT)
+#define I40E_VPINT_CEQCTL(_INTVF) (0x00026800 + ((_INTVF) * 4)) /* _i=0...511 */ /* Reset: CORER */
+#define I40E_VPINT_CEQCTL_MSIX_INDX_SHIFT 0
+#define I40E_VPINT_CEQCTL_ITR_INDX_SHIFT 11
+#define I40E_VPINT_CEQCTL_NEXTQ_INDX_SHIFT 16
+#define I40E_VPINT_CEQCTL_NEXTQ_INDX_MASK I40E_MASK(0x7FF, I40E_VPINT_CEQCTL_NEXTQ_INDX_SHIFT)
+#define I40E_VPINT_CEQCTL_NEXTQ_TYPE_SHIFT 27
+#define I40E_VPINT_CEQCTL_NEXTQ_TYPE_MASK I40E_MASK(0x3, I40E_VPINT_CEQCTL_NEXTQ_TYPE_SHIFT)
+#define I40E_VPINT_CEQCTL_CAUSE_ENA_SHIFT 30
+#define I40E_VPINT_CEQCTL_CAUSE_ENA_MASK I40E_MASK(0x1, I40E_VPINT_CEQCTL_CAUSE_ENA_SHIFT)
+#define I40E_VPINT_LNKLST0(_VF) (0x0002A800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT 0
+#define I40E_VPINT_LNKLST0_FIRSTQ_INDX_MASK I40E_MASK(0x7FF, I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT)
+#define I40E_VPINT_LNKLSTN(_INTVF) (0x00025000 + ((_INTVF) * 4)) /* _i=0...511 */ /* Reset: VFR */
+#define I40E_VPINT_LNKLSTN_FIRSTQ_INDX_SHIFT 0
+#define I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK I40E_MASK(0x7FF, I40E_VPINT_LNKLSTN_FIRSTQ_INDX_SHIFT)
+#define I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT 11
+#define I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK I40E_MASK(0x3, I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT)
+#define I40E_GLLAN_RCTL_0 0x0012A500 /* Reset: CORER */
+#define I40E_GLLAN_RCTL_0_PXE_MODE_SHIFT 0
+#define I40E_GLLAN_RCTL_0_PXE_MODE_MASK I40E_MASK(0x1, I40E_GLLAN_RCTL_0_PXE_MODE_SHIFT)
+#define I40E_GLLAN_TSOMSK_F 0x000442D8 /* Reset: CORER */
+#define I40E_GLLAN_TSOMSK_L 0x000442E0 /* Reset: CORER */
+#define I40E_GLLAN_TSOMSK_M 0x000442DC /* Reset: CORER */
+#define I40E_GLLAN_TXPRE_QDIS(_i) (0x000e6500 + ((_i) * 4)) /* _i=0...11 */ /* Reset: CORER */
+#define I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT 0
+#define I40E_GLLAN_TXPRE_QDIS_QINDX_MASK I40E_MASK(0x7FF, I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT)
+#define I40E_GLLAN_TXPRE_QDIS_SET_QDIS_SHIFT 30
+#define I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK I40E_MASK(0x1, I40E_GLLAN_TXPRE_QDIS_SET_QDIS_SHIFT)
+#define I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_SHIFT 31
+#define I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_MASK I40E_MASK(0x1u, I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_SHIFT)
+#define I40E_PFLAN_QALLOC 0x001C0400 /* Reset: CORER */
+#define I40E_PFLAN_QALLOC_FIRSTQ_SHIFT 0
+#define I40E_PFLAN_QALLOC_FIRSTQ_MASK I40E_MASK(0x7FF, I40E_PFLAN_QALLOC_FIRSTQ_SHIFT)
+#define I40E_PFLAN_QALLOC_LASTQ_SHIFT 16
+#define I40E_PFLAN_QALLOC_LASTQ_MASK I40E_MASK(0x7FF, I40E_PFLAN_QALLOC_LASTQ_SHIFT)
+#define I40E_PFLAN_QALLOC_VALID_SHIFT 31
+#define I40E_PFLAN_QALLOC_VALID_MASK I40E_MASK(0x1u, I40E_PFLAN_QALLOC_VALID_SHIFT)
+#define I40E_QRX_ENA(_Q) (0x00120000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: PFR */
+#define I40E_QRX_ENA_QENA_REQ_SHIFT 0
+#define I40E_QRX_ENA_QENA_REQ_MASK I40E_MASK(0x1, I40E_QRX_ENA_QENA_REQ_SHIFT)
+#define I40E_QRX_ENA_QENA_STAT_SHIFT 2
+#define I40E_QRX_ENA_QENA_STAT_MASK I40E_MASK(0x1, I40E_QRX_ENA_QENA_STAT_SHIFT)
+#define I40E_QRX_TAIL(_Q) (0x00128000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: CORER */
+#define I40E_QTX_CTL(_Q) (0x00104000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: CORER */
+#define I40E_QTX_CTL_PFVF_Q_SHIFT 0
+#define I40E_QTX_CTL_PFVF_Q_MASK I40E_MASK(0x3, I40E_QTX_CTL_PFVF_Q_SHIFT)
+#define I40E_QTX_CTL_PF_INDX_SHIFT 2
+#define I40E_QTX_CTL_PF_INDX_MASK I40E_MASK(0xF, I40E_QTX_CTL_PF_INDX_SHIFT)
+#define I40E_QTX_CTL_VFVM_INDX_SHIFT 7
+#define I40E_QTX_CTL_VFVM_INDX_MASK I40E_MASK(0x1FF, I40E_QTX_CTL_VFVM_INDX_SHIFT)
+#define I40E_QTX_ENA(_Q) (0x00100000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: PFR */
+#define I40E_QTX_ENA_QENA_REQ_SHIFT 0
+#define I40E_QTX_ENA_QENA_REQ_MASK I40E_MASK(0x1, I40E_QTX_ENA_QENA_REQ_SHIFT)
+#define I40E_QTX_ENA_QENA_STAT_SHIFT 2
+#define I40E_QTX_ENA_QENA_STAT_MASK I40E_MASK(0x1, I40E_QTX_ENA_QENA_STAT_SHIFT)
+#define I40E_QTX_HEAD(_Q) (0x000E4000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: CORER */
+#define I40E_QTX_TAIL(_Q) (0x00108000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: PFR */
+#define I40E_VPLAN_MAPENA(_VF) (0x00074000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VPLAN_MAPENA_TXRX_ENA_SHIFT 0
+#define I40E_VPLAN_MAPENA_TXRX_ENA_MASK I40E_MASK(0x1, I40E_VPLAN_MAPENA_TXRX_ENA_SHIFT)
+#define I40E_VPLAN_QTABLE(_i, _VF) (0x00070000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...15, _VF=0...127 */ /* Reset: VFR */
+#define I40E_VPLAN_QTABLE_QINDEX_SHIFT 0
+#define I40E_VPLAN_QTABLE_QINDEX_MASK I40E_MASK(0x7FF, I40E_VPLAN_QTABLE_QINDEX_SHIFT)
+#define I40E_VSILAN_QBASE(_VSI) (0x0020C800 + ((_VSI) * 4)) /* _i=0...383 */ /* Reset: PFR */
+#define I40E_VSILAN_QBASE_VSIQTABLE_ENA_SHIFT 11
+#define I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK I40E_MASK(0x1, I40E_VSILAN_QBASE_VSIQTABLE_ENA_SHIFT)
+#define I40E_VSILAN_QTABLE(_i, _VSI) (0x00200000 + ((_i) * 2048 + (_VSI) * 4)) /* _i=0...7, _VSI=0...383 */ /* Reset: PFR */
+#define I40E_PRTGL_SAH 0x001E2140 /* Reset: GLOBR */
+#define I40E_PRTGL_SAH_FC_SAH_SHIFT 0
+#define I40E_PRTGL_SAH_FC_SAH_MASK I40E_MASK(0xFFFF, I40E_PRTGL_SAH_FC_SAH_SHIFT)
+#define I40E_PRTGL_SAH_MFS_SHIFT 16
+#define I40E_PRTGL_SAH_MFS_MASK I40E_MASK(0xFFFF, I40E_PRTGL_SAH_MFS_SHIFT)
+#define I40E_PRTGL_SAL 0x001E2120 /* Reset: GLOBR */
+#define I40E_PRTGL_SAL_FC_SAL_SHIFT 0
+#define I40E_PRTGL_SAL_FC_SAL_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTGL_SAL_FC_SAL_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GPP 0x001E3260 /* Reset: GLOBR */
+#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GPP_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GPP_MASK I40E_MASK(0x1, \
+ I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GPP_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_PPP 0x001E32E0 /* Reset: GLOBR */
+#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_PPP_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_PPP_MASK I40E_MASK(0x1, \
+ I40E_PRTMAC_HSEC_CTL_RX_ENABLE_PPP_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE 0x001E30C0 /* Reset: GLOBR */
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_MASK I40E_MASK(0x1FF, \
+ I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE 0x001E30D0 /* Reset: GLOBR */
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_MASK I40E_MASK(0x1FF, \
+ I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER(_i) (0x001E3400 + ((_i) * 16)) /* _i=0...8 */
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_MAX_INDEX 8
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_MASK I40E_MASK(0xFFFF, \
+ I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_SHIFT)
+#define I40E_GLNVM_FLA 0x000B6108 /* Reset: POR */
+#define I40E_GLNVM_FLA_LOCKED_SHIFT 6
+#define I40E_GLNVM_FLA_LOCKED_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_LOCKED_SHIFT)
+#define I40E_GLNVM_GENS 0x000B6100 /* Reset: POR */
+#define I40E_GLNVM_GENS_SR_SIZE_SHIFT 5
+#define I40E_GLNVM_GENS_SR_SIZE_MASK I40E_MASK(0x7, I40E_GLNVM_GENS_SR_SIZE_SHIFT)
+#define I40E_GLNVM_SRCTL 0x000B6110 /* Reset: POR */
+#define I40E_GLNVM_SRCTL_ADDR_SHIFT 14
+#define I40E_GLNVM_SRCTL_START_SHIFT 30
+#define I40E_GLNVM_SRCTL_DONE_SHIFT 31
+#define I40E_GLNVM_SRCTL_DONE_MASK I40E_MASK(0x1u, I40E_GLNVM_SRCTL_DONE_SHIFT)
+#define I40E_GLNVM_SRDATA 0x000B6114 /* Reset: POR */
+#define I40E_GLNVM_SRDATA_RDDATA_SHIFT 16
+#define I40E_GLNVM_SRDATA_RDDATA_MASK I40E_MASK(0xFFFF, I40E_GLNVM_SRDATA_RDDATA_SHIFT)
+#define I40E_GLNVM_ULD 0x000B6008 /* Reset: POR */
+#define I40E_GLNVM_ULD_CONF_CORE_DONE_SHIFT 3
+#define I40E_GLNVM_ULD_CONF_CORE_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_CORE_DONE_SHIFT)
+#define I40E_GLNVM_ULD_CONF_GLOBAL_DONE_SHIFT 4
+#define I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_GLOBAL_DONE_SHIFT)
+#define I40E_GLPCI_CAPSUP 0x000BE4A8 /* Reset: PCIR */
+#define I40E_GLPCI_CAPSUP_ARI_EN_SHIFT 4
+#define I40E_GLPCI_CAPSUP_ARI_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_ARI_EN_SHIFT)
+#define I40E_GLPCI_CNF2 0x000BE494 /* Reset: PCIR */
+#define I40E_GLPCI_CNF2_MSI_X_PF_N_SHIFT 2
+#define I40E_GLPCI_CNF2_MSI_X_PF_N_MASK I40E_MASK(0x7FF, I40E_GLPCI_CNF2_MSI_X_PF_N_SHIFT)
+#define I40E_GLPCI_CNF2_MSI_X_VF_N_SHIFT 13
+#define I40E_GLPCI_CNF2_MSI_X_VF_N_MASK I40E_MASK(0x7FF, I40E_GLPCI_CNF2_MSI_X_VF_N_SHIFT)
+#define I40E_GLPCI_LBARCTRL 0x000BE484 /* Reset: POR */
+#define I40E_GLPCI_LBARCTRL_FL_SIZE_SHIFT 6
+#define I40E_GLPCI_LBARCTRL_FL_SIZE_MASK I40E_MASK(0x7, I40E_GLPCI_LBARCTRL_FL_SIZE_SHIFT)
+#define I40E_PF_FUNC_RID 0x0009C000 /* Reset: PCIR */
+#define I40E_PF_PCI_CIAA 0x0009C080 /* Reset: FLR */
+#define I40E_PF_PCI_CIAA_VF_NUM_SHIFT 12
+#define I40E_PF_PCI_CIAD 0x0009C100 /* Reset: FLR */
+#define I40E_PRTPM_EEE_STAT 0x001E4320 /* Reset: GLOBR */
+#define I40E_PFPCI_SUBSYSID 0x000BE100 /* Reset: PCIR */
+#define I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT 30
+#define I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_MASK I40E_MASK(0x1, I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT)
+#define I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT 31
+#define I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_MASK I40E_MASK(0x1, I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT)
+#define I40E_PRTPM_EEER_TX_LPI_EN_SHIFT 16
+#define I40E_PRTPM_EEER_TX_LPI_EN_MASK I40E_MASK(0x1, I40E_PRTPM_EEER_TX_LPI_EN_SHIFT)
+#define I40E_PRTPM_RLPIC 0x001E43A0 /* Reset: GLOBR */
+#define I40E_PRTPM_TLPIC 0x001E43C0 /* Reset: GLOBR */
+#define I40E_PRTRPB_DHW(_i) (0x000AC100 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */
+#define I40E_PRTRPB_DHW_DHW_TCN_SHIFT 0
+#define I40E_PRTRPB_DHW_DHW_TCN_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_DHW_DHW_TCN_SHIFT)
+#define I40E_PRTRPB_DLW(_i) (0x000AC220 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */
+#define I40E_PRTRPB_DLW_DLW_TCN_SHIFT 0
+#define I40E_PRTRPB_DLW_DLW_TCN_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_DLW_DLW_TCN_SHIFT)
+#define I40E_PRTRPB_DPS(_i) (0x000AC320 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */
+#define I40E_PRTRPB_DPS_DPS_TCN_SHIFT 0
+#define I40E_PRTRPB_DPS_DPS_TCN_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_DPS_DPS_TCN_SHIFT)
+#define I40E_PRTRPB_SHT(_i) (0x000AC480 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */
+#define I40E_PRTRPB_SHT_SHT_TCN_SHIFT 0
+#define I40E_PRTRPB_SHT_SHT_TCN_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_SHT_SHT_TCN_SHIFT)
+#define I40E_PRTRPB_SHW 0x000AC580 /* Reset: CORER */
+#define I40E_PRTRPB_SHW_SHW_SHIFT 0
+#define I40E_PRTRPB_SHW_SHW_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_SHW_SHW_SHIFT)
+#define I40E_PRTRPB_SLT(_i) (0x000AC5A0 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */
+#define I40E_PRTRPB_SLT_SLT_TCN_SHIFT 0
+#define I40E_PRTRPB_SLT_SLT_TCN_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_SLT_SLT_TCN_SHIFT)
+#define I40E_PRTRPB_SLW 0x000AC6A0 /* Reset: CORER */
+#define I40E_PRTRPB_SLW_SLW_SHIFT 0
+#define I40E_PRTRPB_SLW_SLW_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_SLW_SLW_SHIFT)
+#define I40E_PRTRPB_SPS 0x000AC7C0 /* Reset: CORER */
+#define I40E_PRTRPB_SPS_SPS_SHIFT 0
+#define I40E_PRTRPB_SPS_SPS_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_SPS_SPS_SHIFT)
+#define I40E_GLQF_FDCNT_0 0x00269BAC /* Reset: CORER */
+#define I40E_GLQF_FDCNT_0_GUARANT_CNT_SHIFT 0
+#define I40E_GLQF_FDCNT_0_GUARANT_CNT_MASK I40E_MASK(0x1FFF, I40E_GLQF_FDCNT_0_GUARANT_CNT_SHIFT)
+#define I40E_GLQF_FDCNT_0_BESTCNT_SHIFT 13
+#define I40E_GLQF_FDCNT_0_BESTCNT_MASK I40E_MASK(0x1FFF, I40E_GLQF_FDCNT_0_BESTCNT_SHIFT)
+#define I40E_GLQF_HKEY(_i) (0x00270140 + ((_i) * 4)) /* _i=0...12 */ /* Reset: CORER */
+#define I40E_GLQF_HKEY_MAX_INDEX 12
+#define I40E_GLQF_PCNT(_i) (0x00266800 + ((_i) * 4)) /* _i=0...511 */ /* Reset: CORER */
+#define I40E_PFQF_CTL_0 0x001C0AC0 /* Reset: CORER */
+#define I40E_PFQF_CTL_0_PEHSIZE_SHIFT 0
+#define I40E_PFQF_CTL_0_PEHSIZE_MASK I40E_MASK(0x1F, I40E_PFQF_CTL_0_PEHSIZE_SHIFT)
+#define I40E_PFQF_CTL_0_PEDSIZE_SHIFT 5
+#define I40E_PFQF_CTL_0_PEDSIZE_MASK I40E_MASK(0x1F, I40E_PFQF_CTL_0_PEDSIZE_SHIFT)
+#define I40E_PFQF_CTL_0_PFFCHSIZE_SHIFT 10
+#define I40E_PFQF_CTL_0_PFFCHSIZE_MASK I40E_MASK(0xF, I40E_PFQF_CTL_0_PFFCHSIZE_SHIFT)
+#define I40E_PFQF_CTL_0_PFFCDSIZE_SHIFT 14
+#define I40E_PFQF_CTL_0_PFFCDSIZE_MASK I40E_MASK(0x3, I40E_PFQF_CTL_0_PFFCDSIZE_SHIFT)
+#define I40E_PFQF_CTL_0_HASHLUTSIZE_SHIFT 16
+#define I40E_PFQF_CTL_0_HASHLUTSIZE_MASK I40E_MASK(0x1, I40E_PFQF_CTL_0_HASHLUTSIZE_SHIFT)
+#define I40E_PFQF_CTL_0_FD_ENA_SHIFT 17
+#define I40E_PFQF_CTL_0_FD_ENA_MASK I40E_MASK(0x1, I40E_PFQF_CTL_0_FD_ENA_SHIFT)
+#define I40E_PFQF_CTL_0_ETYPE_ENA_SHIFT 18
+#define I40E_PFQF_CTL_0_ETYPE_ENA_MASK I40E_MASK(0x1, I40E_PFQF_CTL_0_ETYPE_ENA_SHIFT)
+#define I40E_PFQF_CTL_0_MACVLAN_ENA_SHIFT 19
+#define I40E_PFQF_CTL_0_MACVLAN_ENA_MASK I40E_MASK(0x1, I40E_PFQF_CTL_0_MACVLAN_ENA_SHIFT)
+#define I40E_PFQF_CTL_1 0x00245D80 /* Reset: CORER */
+#define I40E_PFQF_CTL_1_CLEARFDTABLE_SHIFT 0
+#define I40E_PFQF_CTL_1_CLEARFDTABLE_MASK I40E_MASK(0x1, I40E_PFQF_CTL_1_CLEARFDTABLE_SHIFT)
+#define I40E_PFQF_FDSTAT 0x00246380 /* Reset: CORER */
+#define I40E_PFQF_FDSTAT_GUARANT_CNT_SHIFT 0
+#define I40E_PFQF_FDSTAT_GUARANT_CNT_MASK I40E_MASK(0x1FFF, I40E_PFQF_FDSTAT_GUARANT_CNT_SHIFT)
+#define I40E_PFQF_FDSTAT_BEST_CNT_SHIFT 16
+#define I40E_PFQF_FDSTAT_BEST_CNT_MASK I40E_MASK(0x1FFF, I40E_PFQF_FDSTAT_BEST_CNT_SHIFT)
+#define I40E_PFQF_HENA(_i) (0x00245900 + ((_i) * 128)) /* _i=0...1 */ /* Reset: CORER */
+#define I40E_PFQF_HKEY(_i) (0x00244800 + ((_i) * 128)) /* _i=0...12 */ /* Reset: CORER */
+#define I40E_PFQF_HKEY_MAX_INDEX 12
+#define I40E_PFQF_HLUT(_i) (0x00240000 + ((_i) * 128)) /* _i=0...127 */ /* Reset: CORER */
+#define I40E_PFQF_HLUT_MAX_INDEX 127
+#define I40E_PRTQF_FD_INSET(_i, _j) (0x00250000 + ((_i) * 64 + (_j) * 32)) /* _i=0...63, _j=0...1 */ /* Reset: CORER */
+#define I40E_PRTQF_FD_INSET_MAX_INDEX 63
+#define I40E_PRTQF_FD_INSET_INSET_SHIFT 0
+#define I40E_PRTQF_FD_INSET_INSET_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTQF_FD_INSET_INSET_SHIFT)
+#define I40E_PRTQF_FD_INSET(_i, _j) (0x00250000 + ((_i) * 64 + (_j) * 32)) /* _i=0...63, _j=0...1 */ /* Reset: CORER */
+#define I40E_PRTQF_FD_INSET_MAX_INDEX 63
+#define I40E_PRTQF_FD_INSET_INSET_SHIFT 0
+#define I40E_PRTQF_FD_INSET_INSET_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTQF_FD_INSET_INSET_SHIFT)
+#define I40E_PRTQF_FLX_PIT(_i) (0x00255200 + ((_i) * 32)) /* _i=0...8 */ /* Reset: CORER */
+#define I40E_PRTQF_FLX_PIT_SOURCE_OFF_SHIFT 0
+#define I40E_PRTQF_FLX_PIT_SOURCE_OFF_MASK I40E_MASK(0x1F, I40E_PRTQF_FLX_PIT_SOURCE_OFF_SHIFT)
+#define I40E_PRTQF_FLX_PIT_FSIZE_SHIFT 5
+#define I40E_PRTQF_FLX_PIT_FSIZE_MASK I40E_MASK(0x1F, I40E_PRTQF_FLX_PIT_FSIZE_SHIFT)
+#define I40E_PRTQF_FLX_PIT_DEST_OFF_SHIFT 10
+#define I40E_PRTQF_FLX_PIT_DEST_OFF_MASK I40E_MASK(0x3F, I40E_PRTQF_FLX_PIT_DEST_OFF_SHIFT)
+#define I40E_VFQF_HENA1(_i, _VF) (0x00230800 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...1, _VF=0...127 */ /* Reset: CORER */
+#define I40E_VFQF_HKEY1(_i, _VF) (0x00228000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...12, _VF=0...127 */ /* Reset: CORER */
+#define I40E_VFQF_HKEY1_MAX_INDEX 12
+#define I40E_VFQF_HLUT1(_i, _VF) (0x00220000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...15, _VF=0...127 */ /* Reset: CORER */
+#define I40E_VFQF_HLUT1_MAX_INDEX 15
+#define I40E_GL_RXERR1H(_i) (0x00318004 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
+#define I40E_GL_RXERR1H_MAX_INDEX 143
+#define I40E_GL_RXERR1H_RXERR1H_SHIFT 0
+#define I40E_GL_RXERR1H_RXERR1H_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_RXERR1H_RXERR1H_SHIFT)
+#define I40E_GL_RXERR1L(_i) (0x00318000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
+#define I40E_GL_RXERR1L_MAX_INDEX 143
+#define I40E_GL_RXERR1L_RXERR1L_SHIFT 0
+#define I40E_GL_RXERR1L_RXERR1L_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_RXERR1L_RXERR1L_SHIFT)
+#define I40E_GLPRT_BPRCH(_i) (0x003005E4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_BPRCL(_i) (0x003005E0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_BPTCH(_i) (0x00300A04 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_BPTCL(_i) (0x00300A00 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_CRCERRS(_i) (0x00300080 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_GORCH(_i) (0x00300004 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_GORCL(_i) (0x00300000 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_GOTCH(_i) (0x00300684 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_GOTCL(_i) (0x00300680 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_ILLERRC(_i) (0x003000E0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_LXOFFRXC(_i) (0x00300160 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_LXOFFTXC(_i) (0x003009A0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_LXONRXC(_i) (0x00300140 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_LXONTXC(_i) (0x00300980 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_MLFC(_i) (0x00300020 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_MPRCH(_i) (0x003005C4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_MPRCL(_i) (0x003005C0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_MPTCH(_i) (0x003009E4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_MPTCL(_i) (0x003009E0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_MRFC(_i) (0x00300040 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PRC1023H(_i) (0x00300504 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PRC1023L(_i) (0x00300500 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PRC127H(_i) (0x003004A4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PRC127L(_i) (0x003004A0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PRC1522H(_i) (0x00300524 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PRC1522L(_i) (0x00300520 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PRC255H(_i) (0x003004C4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PRC255L(_i) (0x003004C0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PRC511H(_i) (0x003004E4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PRC511L(_i) (0x003004E0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PRC64H(_i) (0x00300484 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PRC64L(_i) (0x00300480 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PRC9522H(_i) (0x00300544 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PRC9522L(_i) (0x00300540 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PTC1023H(_i) (0x00300724 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PTC1023L(_i) (0x00300720 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PTC127H(_i) (0x003006C4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PTC127L(_i) (0x003006C0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PTC1522H(_i) (0x00300744 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PTC1522L(_i) (0x00300740 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PTC255H(_i) (0x003006E4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PTC255L(_i) (0x003006E0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PTC511H(_i) (0x00300704 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PTC511L(_i) (0x00300700 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PTC64H(_i) (0x003006A4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PTC64L(_i) (0x003006A0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PTC9522H(_i) (0x00300764 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PTC9522L(_i) (0x00300760 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PXOFFRXC(_i, _j) (0x00300280 + ((_i) * 8 + (_j) * 32)) /* _i=0...3, _j=0...7 */ /* Reset: CORER */
+#define I40E_GLPRT_PXOFFTXC(_i, _j) (0x00300880 + ((_i) * 8 + (_j) * 32)) /* _i=0...3, _j=0...7 */ /* Reset: CORER */
+#define I40E_GLPRT_PXONRXC(_i, _j) (0x00300180 + ((_i) * 8 + (_j) * 32)) /* _i=0...3, _j=0...7 */ /* Reset: CORER */
+#define I40E_GLPRT_PXONTXC(_i, _j) (0x00300780 + ((_i) * 8 + (_j) * 32)) /* _i=0...3, _j=0...7 */ /* Reset: CORER */
+#define I40E_GLPRT_RDPC(_i) (0x00300600 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_RFC(_i) (0x00300560 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_RJC(_i) (0x00300580 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_RLEC(_i) (0x003000A0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_ROC(_i) (0x00300120 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_RUC(_i) (0x00300100 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_RXON2OFFCNT(_i, _j) (0x00300380 + ((_i) * 8 + (_j) * 32)) /* _i=0...3, _j=0...7 */ /* Reset: CORER */
+#define I40E_GLPRT_TDOLD(_i) (0x00300A20 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_UPRCH(_i) (0x003005A4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_UPRCL(_i) (0x003005A0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_UPTCH(_i) (0x003009C4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_UPTCL(_i) (0x003009C0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLSW_BPRCH(_i) (0x00370104 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLSW_BPRCL(_i) (0x00370100 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLSW_BPTCH(_i) (0x00340104 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLSW_BPTCL(_i) (0x00340100 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLSW_GORCH(_i) (0x0035C004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLSW_GORCL(_i) (0x0035c000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLSW_GOTCH(_i) (0x0032C004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLSW_GOTCL(_i) (0x0032c000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLSW_MPRCH(_i) (0x00370084 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLSW_MPRCL(_i) (0x00370080 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLSW_MPTCH(_i) (0x00340084 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLSW_MPTCL(_i) (0x00340080 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLSW_RUPP(_i) (0x00370180 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLSW_TDPC(_i) (0x00348000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLSW_UPRCH(_i) (0x00370004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLSW_UPRCL(_i) (0x00370000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLSW_UPTCH(_i) (0x00340004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLSW_UPTCL(_i) (0x00340000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLV_BPRCH(_i) (0x0036D804 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
+#define I40E_GLV_BPRCL(_i) (0x0036d800 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
+#define I40E_GLV_BPTCH(_i) (0x0033D804 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
+#define I40E_GLV_BPTCL(_i) (0x0033d800 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
+#define I40E_GLV_GORCH(_i) (0x00358004 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
+#define I40E_GLV_GORCL(_i) (0x00358000 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
+#define I40E_GLV_GOTCH(_i) (0x00328004 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
+#define I40E_GLV_GOTCL(_i) (0x00328000 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
+#define I40E_GLV_MPRCH(_i) (0x0036CC04 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
+#define I40E_GLV_MPRCL(_i) (0x0036cc00 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
+#define I40E_GLV_MPTCH(_i) (0x0033CC04 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
+#define I40E_GLV_MPTCL(_i) (0x0033cc00 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
+#define I40E_GLV_RDPC(_i) (0x00310000 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
+#define I40E_GLV_RUPP(_i) (0x0036E400 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
+#define I40E_GLV_TEPC(_i) (0x00344000 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
+#define I40E_GLV_UPRCH(_i) (0x0036C004 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
+#define I40E_GLV_UPRCL(_i) (0x0036c000 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
+#define I40E_GLV_UPTCH(_i) (0x0033C004 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
+#define I40E_GLV_UPTCL(_i) (0x0033c000 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
+#define I40E_GLVEBTC_RBCH(_i, _j) (0x00364004 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */
+#define I40E_GLVEBTC_RBCL(_i, _j) (0x00364000 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */
+#define I40E_GLVEBTC_RPCH(_i, _j) (0x00368004 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */
+#define I40E_GLVEBTC_RPCL(_i, _j) (0x00368000 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */
+#define I40E_GLVEBTC_TBCH(_i, _j) (0x00334004 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */
+#define I40E_GLVEBTC_TBCL(_i, _j) (0x00334000 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */
+#define I40E_GLVEBTC_TPCH(_i, _j) (0x00338004 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */
+#define I40E_GLVEBTC_TPCL(_i, _j) (0x00338000 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */
+#define I40E_PRTTSYN_CTL0 0x001E4200 /* Reset: GLOBR */
+#define I40E_PRTTSYN_CTL0_TXTIME_INT_ENA_SHIFT 1
+#define I40E_PRTTSYN_CTL0_TXTIME_INT_ENA_MASK I40E_MASK(0x1, I40E_PRTTSYN_CTL0_TXTIME_INT_ENA_SHIFT)
+#define I40E_PRTTSYN_CTL0_EVENT_INT_ENA_SHIFT 2
+#define I40E_PRTTSYN_CTL0_EVENT_INT_ENA_MASK I40E_MASK(0x1, I40E_PRTTSYN_CTL0_EVENT_INT_ENA_SHIFT)
+#define I40E_PRTTSYN_CTL0_PF_ID_SHIFT 8
+#define I40E_PRTTSYN_CTL0_PF_ID_MASK I40E_MASK(0xF, I40E_PRTTSYN_CTL0_PF_ID_SHIFT)
+#define I40E_PRTTSYN_CTL0_TSYNENA_SHIFT 31
+#define I40E_PRTTSYN_CTL0_TSYNENA_MASK I40E_MASK(0x1, I40E_PRTTSYN_CTL0_TSYNENA_SHIFT)
+#define I40E_PRTTSYN_CTL1 0x00085020 /* Reset: CORER */
+#define I40E_PRTTSYN_CTL1_V1MESSTYPE0_SHIFT 0
+#define I40E_PRTTSYN_CTL1_V1MESSTYPE0_MASK I40E_MASK(0xFF, I40E_PRTTSYN_CTL1_V1MESSTYPE0_SHIFT)
+#define I40E_PRTTSYN_CTL1_V2MESSTYPE0_SHIFT 16
+#define I40E_PRTTSYN_CTL1_V2MESSTYPE0_MASK I40E_MASK(0xF, I40E_PRTTSYN_CTL1_V2MESSTYPE0_SHIFT)
+#define I40E_PRTTSYN_CTL1_TSYNTYPE_SHIFT 24
+#define I40E_PRTTSYN_CTL1_UDP_ENA_SHIFT 26
+#define I40E_PRTTSYN_CTL1_UDP_ENA_MASK I40E_MASK(0x3, I40E_PRTTSYN_CTL1_UDP_ENA_SHIFT)
+#define I40E_PRTTSYN_CTL1_TSYNENA_SHIFT 31
+#define I40E_PRTTSYN_CTL1_TSYNENA_MASK I40E_MASK(0x1, I40E_PRTTSYN_CTL1_TSYNENA_SHIFT)
+#define I40E_PRTTSYN_INC_H 0x001E4060 /* Reset: GLOBR */
+#define I40E_PRTTSYN_INC_L 0x001E4040 /* Reset: GLOBR */
+#define I40E_PRTTSYN_RXTIME_H(_i) (0x00085040 + ((_i) * 32)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_PRTTSYN_RXTIME_L(_i) (0x000850C0 + ((_i) * 32)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_PRTTSYN_RXTIME_L_MAX_INDEX 3
+#define I40E_PRTTSYN_STAT_0 0x001E4220 /* Reset: GLOBR */
+#define I40E_PRTTSYN_STAT_0_EVENT0_SHIFT 0
+#define I40E_PRTTSYN_STAT_0_EVENT0_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_0_EVENT0_SHIFT)
+#define I40E_PRTTSYN_STAT_0_TXTIME_SHIFT 4
+#define I40E_PRTTSYN_STAT_0_TXTIME_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_0_TXTIME_SHIFT)
+#define I40E_PRTTSYN_STAT_1 0x00085140 /* Reset: CORER */
+#define I40E_PRTTSYN_TIME_H 0x001E4120 /* Reset: GLOBR */
+#define I40E_PRTTSYN_TIME_L 0x001E4100 /* Reset: GLOBR */
+#define I40E_PRTTSYN_TXTIME_H 0x001E41E0 /* Reset: GLOBR */
+#define I40E_PRTTSYN_TXTIME_L 0x001E41C0 /* Reset: GLOBR */
+#define I40E_PRTTSYN_EVNT_H(_i) (0x001E40C0 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */
+#define I40E_PRTTSYN_EVNT_L(_i) (0x001E4080 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */
+#define I40E_PRTTSYN_AUX_0(_i) (0x001E42A0 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */
+#define I40E_PRTTSYN_AUX_0_OUT_ENA_SHIFT 0
+#define I40E_PRTTSYN_AUX_0_OUTMOD_SHIFT 1
+#define I40E_PRTTSYN_AUX_0_EVNTLVL_SHIFT 16
+#define I40E_PRTTSYN_AUX_0_PTPFLAG_SHIFT 17
+#define I40E_PRTTSYN_AUX_0_PTPFLAG_MASK I40E_MASK(0x1, I40E_PRTTSYN_AUX_0_PTPFLAG_SHIFT)
+#define I40E_PRTTSYN_AUX_1(_i) (0x001E42E0 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */
+#define I40E_PRTTSYN_AUX_1_INSTNT_SHIFT 0
+#define I40E_PRTTSYN_TGT_H(_i) (0x001E4180 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */
+#define I40E_PRTTSYN_TGT_L(_i) (0x001E4140 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */
+#define I40E_PRTTSYN_CLKO(_i) (0x001E4240 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */
+#define I40E_PRTTSYN_ADJ 0x001E4280 /* Reset: GLOBR */
+#define I40E_GL_MDET_RX 0x0012A510 /* Reset: CORER */
+#define I40E_GL_MDET_RX_FUNCTION_SHIFT 0
+#define I40E_GL_MDET_RX_FUNCTION_MASK I40E_MASK(0xFF, I40E_GL_MDET_RX_FUNCTION_SHIFT)
+#define I40E_GL_MDET_RX_EVENT_SHIFT 8
+#define I40E_GL_MDET_RX_EVENT_MASK I40E_MASK(0x1FF, I40E_GL_MDET_RX_EVENT_SHIFT)
+#define I40E_GL_MDET_RX_QUEUE_SHIFT 17
+#define I40E_GL_MDET_RX_QUEUE_MASK I40E_MASK(0x3FFF, I40E_GL_MDET_RX_QUEUE_SHIFT)
+#define I40E_GL_MDET_RX_VALID_SHIFT 31
+#define I40E_GL_MDET_RX_VALID_MASK I40E_MASK(0x1, I40E_GL_MDET_RX_VALID_SHIFT)
+#define I40E_GL_MDET_TX 0x000E6480 /* Reset: CORER */
+#define I40E_GL_MDET_TX_QUEUE_SHIFT 0
+#define I40E_GL_MDET_TX_QUEUE_MASK I40E_MASK(0xFFF, I40E_GL_MDET_TX_QUEUE_SHIFT)
+#define I40E_GL_MDET_TX_VF_NUM_SHIFT 12
+#define I40E_GL_MDET_TX_VF_NUM_MASK I40E_MASK(0x1FF, I40E_GL_MDET_TX_VF_NUM_SHIFT)
+#define I40E_GL_MDET_TX_PF_NUM_SHIFT 21
+#define I40E_GL_MDET_TX_PF_NUM_MASK I40E_MASK(0xF, I40E_GL_MDET_TX_PF_NUM_SHIFT)
+#define I40E_GL_MDET_TX_EVENT_SHIFT 25
+#define I40E_GL_MDET_TX_EVENT_MASK I40E_MASK(0x1F, I40E_GL_MDET_TX_EVENT_SHIFT)
+#define I40E_GL_MDET_TX_VALID_SHIFT 31
+#define I40E_GL_MDET_TX_VALID_MASK I40E_MASK(0x1, I40E_GL_MDET_TX_VALID_SHIFT)
+#define I40E_PF_MDET_RX 0x0012A400 /* Reset: CORER */
+#define I40E_PF_MDET_RX_VALID_SHIFT 0
+#define I40E_PF_MDET_RX_VALID_MASK I40E_MASK(0x1, I40E_PF_MDET_RX_VALID_SHIFT)
+#define I40E_PF_MDET_TX 0x000E6400 /* Reset: CORER */
+#define I40E_PF_MDET_TX_VALID_SHIFT 0
+#define I40E_PF_MDET_TX_VALID_MASK I40E_MASK(0x1, I40E_PF_MDET_TX_VALID_SHIFT)
+#define I40E_PF_VT_PFALLOC 0x001C0500 /* Reset: CORER */
+#define I40E_PF_VT_PFALLOC_FIRSTVF_SHIFT 0
+#define I40E_PF_VT_PFALLOC_FIRSTVF_MASK I40E_MASK(0xFF, I40E_PF_VT_PFALLOC_FIRSTVF_SHIFT)
+#define I40E_PF_VT_PFALLOC_LASTVF_SHIFT 8
+#define I40E_PF_VT_PFALLOC_LASTVF_MASK I40E_MASK(0xFF, I40E_PF_VT_PFALLOC_LASTVF_SHIFT)
+#define I40E_PF_VT_PFALLOC_VALID_SHIFT 31
+#define I40E_PF_VT_PFALLOC_VALID_MASK I40E_MASK(0x1u, I40E_PF_VT_PFALLOC_VALID_SHIFT)
+#define I40E_VP_MDET_RX(_VF) (0x0012A000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */
+#define I40E_VP_MDET_RX_VALID_SHIFT 0
+#define I40E_VP_MDET_RX_VALID_MASK I40E_MASK(0x1, I40E_VP_MDET_RX_VALID_SHIFT)
+#define I40E_VP_MDET_TX(_VF) (0x000E6000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */
+#define I40E_VP_MDET_TX_VALID_SHIFT 0
+#define I40E_VP_MDET_TX_VALID_MASK I40E_MASK(0x1, I40E_VP_MDET_TX_VALID_SHIFT)
+#define I40E_PFPM_APM 0x000B8080 /* Reset: POR */
+#define I40E_PFPM_APM_APME_SHIFT 0
+#define I40E_PFPM_APM_APME_MASK I40E_MASK(0x1, I40E_PFPM_APM_APME_SHIFT)
+#define I40E_PFPM_WUFC 0x0006B400 /* Reset: POR */
+#define I40E_PFPM_WUFC_MAG_SHIFT 1
+#define I40E_PFPM_WUFC_MAG_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_MAG_SHIFT)
+#define I40E_VF_ARQBAH1 0x00006000 /* Reset: EMPR */
+#define I40E_VF_ARQBAL1 0x00006C00 /* Reset: EMPR */
+#define I40E_VF_ARQH1 0x00007400 /* Reset: EMPR */
+#define I40E_VF_ARQLEN1 0x00008000 /* Reset: EMPR */
+#define I40E_VF_ARQT1 0x00007000 /* Reset: EMPR */
+#define I40E_VF_ATQBAH1 0x00007800 /* Reset: EMPR */
+#define I40E_VF_ATQBAL1 0x00007C00 /* Reset: EMPR */
+#define I40E_VF_ATQH1 0x00006400 /* Reset: EMPR */
+#define I40E_VF_ATQLEN1 0x00006800 /* Reset: EMPR */
+#define I40E_VF_ATQT1 0x00008400 /* Reset: EMPR */
+#define I40E_VFQF_HLUT_MAX_INDEX 15
+
+
+
+
+#define I40E_PFINT_DYN_CTL0_WB_ON_ITR_SHIFT 30
+#define I40E_PFINT_DYN_CTL0_WB_ON_ITR_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTL0_WB_ON_ITR_SHIFT)
+#define I40E_PFINT_DYN_CTLN_WB_ON_ITR_SHIFT 30
+#define I40E_PFINT_DYN_CTLN_WB_ON_ITR_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTLN_WB_ON_ITR_SHIFT)
+#define I40E_GLNVM_FLA 0x000B6108 /* Reset: POR */
+#define I40E_GLNVM_FLA_LOCKED_SHIFT 6
+#define I40E_GLNVM_FLA_LOCKED_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_LOCKED_SHIFT)
+
+#define I40E_GLNVM_ULD 0x000B6008 /* Reset: POR */
+
+
+
+#define I40E_GLQF_HASH_INSET(_i, _j) (0x00267600 + ((_i) * 4 + (_j) * 8)) /* _i=0...1, _j=0...63 */ /* Reset: CORER */
+#define I40E_GLQF_ORT(_i) (0x00268900 + ((_i) * 4)) /* _i=0...63 */ /* Reset: CORER */
+#define I40E_GLQF_ORT_PIT_INDX_SHIFT 0
+#define I40E_GLQF_ORT_PIT_INDX_MASK I40E_MASK(0x1F, I40E_GLQF_ORT_PIT_INDX_SHIFT)
+#define I40E_GLQF_ORT_FIELD_CNT_SHIFT 5
+#define I40E_GLQF_ORT_FIELD_CNT_MASK I40E_MASK(0x3, I40E_GLQF_ORT_FIELD_CNT_SHIFT)
+#define I40E_GLQF_ORT_FLX_PAYLOAD_SHIFT 7
+#define I40E_GLQF_ORT_FLX_PAYLOAD_MASK I40E_MASK(0x1, I40E_GLQF_ORT_FLX_PAYLOAD_SHIFT)
+#define I40E_GLQF_FDEVICTENA(_i) (0x00270384 + ((_i) * 4)) /* _i=0...1 */ /* Reset: CORER */
+/* Redefined for X722 family */
+#define I40E_GLGEN_STAT_CLEAR 0x00390004 /* Reset: CORER */
+#endif /* _I40E_REGISTER_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_status.h b/drivers/net/ethernet/intel/i40e/i40e_status.h
new file mode 100644
index 000000000..4d2782e76
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_status.h
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
+
+#ifndef _I40E_STATUS_H_
+#define _I40E_STATUS_H_
+
+/* Error Codes */
+enum i40e_status_code {
+ I40E_SUCCESS = 0,
+ I40E_ERR_NVM = -1,
+ I40E_ERR_NVM_CHECKSUM = -2,
+ I40E_ERR_CONFIG = -4,
+ I40E_ERR_PARAM = -5,
+ I40E_ERR_UNKNOWN_PHY = -7,
+ I40E_ERR_INVALID_MAC_ADDR = -10,
+ I40E_ERR_DEVICE_NOT_SUPPORTED = -11,
+ I40E_ERR_RESET_FAILED = -15,
+ I40E_ERR_NO_AVAILABLE_VSI = -17,
+ I40E_ERR_NO_MEMORY = -18,
+ I40E_ERR_BAD_PTR = -19,
+ I40E_ERR_INVALID_SIZE = -26,
+ I40E_ERR_QUEUE_EMPTY = -32,
+ I40E_ERR_TIMEOUT = -37,
+ I40E_ERR_INVALID_SD_INDEX = -45,
+ I40E_ERR_INVALID_PAGE_DESC_INDEX = -46,
+ I40E_ERR_INVALID_SD_TYPE = -47,
+ I40E_ERR_INVALID_HMC_OBJ_INDEX = -49,
+ I40E_ERR_INVALID_HMC_OBJ_COUNT = -50,
+ I40E_ERR_ADMIN_QUEUE_ERROR = -53,
+ I40E_ERR_ADMIN_QUEUE_TIMEOUT = -54,
+ I40E_ERR_BUF_TOO_SHORT = -55,
+ I40E_ERR_ADMIN_QUEUE_FULL = -56,
+ I40E_ERR_ADMIN_QUEUE_NO_WORK = -57,
+ I40E_ERR_NVM_BLANK_MODE = -59,
+ I40E_ERR_NOT_IMPLEMENTED = -60,
+ I40E_ERR_DIAG_TEST_FAILED = -62,
+ I40E_ERR_NOT_READY = -63,
+ I40E_NOT_SUPPORTED = -64,
+ I40E_ERR_FIRMWARE_API_VERSION = -65,
+ I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR = -66,
+};
+
+#endif /* _I40E_STATUS_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_trace.h b/drivers/net/ethernet/intel/i40e/i40e_trace.h
new file mode 100644
index 000000000..b5b122999
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_trace.h
@@ -0,0 +1,209 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
+
+/* Modeled on trace-events-sample.h */
+
+/* The trace subsystem name for i40e will be "i40e".
+ *
+ * This file is named i40e_trace.h.
+ *
+ * Since this include file's name is different from the trace
+ * subsystem name, we'll have to define TRACE_INCLUDE_FILE at the end
+ * of this file.
+ */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM i40e
+
+/* See trace-events-sample.h for a detailed description of why this
+ * guard clause is different from most normal include files.
+ */
+#if !defined(_I40E_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
+#define _I40E_TRACE_H_
+
+#include <linux/tracepoint.h>
+
+/*
+ * i40e_trace() macro enables shared code to refer to trace points
+ * like:
+ *
+ * trace_i40e{,vf}_example(args...)
+ *
+ * ... as:
+ *
+ * i40e_trace(example, args...)
+ *
+ * ... to resolve to the PF or VF version of the tracepoint without
+ * ifdefs, and to allow tracepoints to be disabled entirely at build
+ * time.
+ *
+ * Trace point should always be referred to in the driver via this
+ * macro.
+ *
+ * Similarly, i40e_trace_enabled(trace_name) wraps references to
+ * trace_i40e{,vf}_<trace_name>_enabled() functions.
+ */
+#define _I40E_TRACE_NAME(trace_name) (trace_ ## i40e ## _ ## trace_name)
+#define I40E_TRACE_NAME(trace_name) _I40E_TRACE_NAME(trace_name)
+
+#define i40e_trace(trace_name, args...) I40E_TRACE_NAME(trace_name)(args)
+
+#define i40e_trace_enabled(trace_name) I40E_TRACE_NAME(trace_name##_enabled)()
+
+/* Events common to PF and VF. Corresponding versions will be defined
+ * for both, named trace_i40e_* and trace_i40evf_*. The i40e_trace()
+ * macro above will select the right trace point name for the driver
+ * being built from shared code.
+ */
+
+/* Events related to a vsi & ring */
+DECLARE_EVENT_CLASS(
+ i40e_tx_template,
+
+ TP_PROTO(struct i40e_ring *ring,
+ struct i40e_tx_desc *desc,
+ struct i40e_tx_buffer *buf),
+
+ TP_ARGS(ring, desc, buf),
+
+ /* The convention here is to make the first fields in the
+ * TP_STRUCT match the TP_PROTO exactly. This enables the use
+ * of the args struct generated by the tplist tool (from the
+ * bcc-tools package) to be used for those fields. To access
+ * fields other than the tracepoint args will require the
+ * tplist output to be adjusted.
+ */
+ TP_STRUCT__entry(
+ __field(void*, ring)
+ __field(void*, desc)
+ __field(void*, buf)
+ __string(devname, ring->netdev->name)
+ ),
+
+ TP_fast_assign(
+ __entry->ring = ring;
+ __entry->desc = desc;
+ __entry->buf = buf;
+ __assign_str(devname, ring->netdev->name);
+ ),
+
+ TP_printk(
+ "netdev: %s ring: %p desc: %p buf %p",
+ __get_str(devname), __entry->ring,
+ __entry->desc, __entry->buf)
+);
+
+DEFINE_EVENT(
+ i40e_tx_template, i40e_clean_tx_irq,
+ TP_PROTO(struct i40e_ring *ring,
+ struct i40e_tx_desc *desc,
+ struct i40e_tx_buffer *buf),
+
+ TP_ARGS(ring, desc, buf));
+
+DEFINE_EVENT(
+ i40e_tx_template, i40e_clean_tx_irq_unmap,
+ TP_PROTO(struct i40e_ring *ring,
+ struct i40e_tx_desc *desc,
+ struct i40e_tx_buffer *buf),
+
+ TP_ARGS(ring, desc, buf));
+
+DECLARE_EVENT_CLASS(
+ i40e_rx_template,
+
+ TP_PROTO(struct i40e_ring *ring,
+ union i40e_16byte_rx_desc *desc,
+ struct sk_buff *skb),
+
+ TP_ARGS(ring, desc, skb),
+
+ TP_STRUCT__entry(
+ __field(void*, ring)
+ __field(void*, desc)
+ __field(void*, skb)
+ __string(devname, ring->netdev->name)
+ ),
+
+ TP_fast_assign(
+ __entry->ring = ring;
+ __entry->desc = desc;
+ __entry->skb = skb;
+ __assign_str(devname, ring->netdev->name);
+ ),
+
+ TP_printk(
+ "netdev: %s ring: %p desc: %p skb %p",
+ __get_str(devname), __entry->ring,
+ __entry->desc, __entry->skb)
+);
+
+DEFINE_EVENT(
+ i40e_rx_template, i40e_clean_rx_irq,
+ TP_PROTO(struct i40e_ring *ring,
+ union i40e_16byte_rx_desc *desc,
+ struct sk_buff *skb),
+
+ TP_ARGS(ring, desc, skb));
+
+DEFINE_EVENT(
+ i40e_rx_template, i40e_clean_rx_irq_rx,
+ TP_PROTO(struct i40e_ring *ring,
+ union i40e_16byte_rx_desc *desc,
+ struct sk_buff *skb),
+
+ TP_ARGS(ring, desc, skb));
+
+DECLARE_EVENT_CLASS(
+ i40e_xmit_template,
+
+ TP_PROTO(struct sk_buff *skb,
+ struct i40e_ring *ring),
+
+ TP_ARGS(skb, ring),
+
+ TP_STRUCT__entry(
+ __field(void*, skb)
+ __field(void*, ring)
+ __string(devname, ring->netdev->name)
+ ),
+
+ TP_fast_assign(
+ __entry->skb = skb;
+ __entry->ring = ring;
+ __assign_str(devname, ring->netdev->name);
+ ),
+
+ TP_printk(
+ "netdev: %s skb: %p ring: %p",
+ __get_str(devname), __entry->skb,
+ __entry->ring)
+);
+
+DEFINE_EVENT(
+ i40e_xmit_template, i40e_xmit_frame_ring,
+ TP_PROTO(struct sk_buff *skb,
+ struct i40e_ring *ring),
+
+ TP_ARGS(skb, ring));
+
+DEFINE_EVENT(
+ i40e_xmit_template, i40e_xmit_frame_ring_drop,
+ TP_PROTO(struct sk_buff *skb,
+ struct i40e_ring *ring),
+
+ TP_ARGS(skb, ring));
+
+/* Events unique to the PF. */
+
+#endif /* _I40E_TRACE_H_ */
+/* This must be outside ifdef _I40E_TRACE_H */
+
+/* This trace include file is not located in the .../include/trace
+ * with the kernel tracepoint definitions, because we're a loadable
+ * module.
+ */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE i40e_trace
+#include <trace/define_trace.h>
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
new file mode 100644
index 000000000..94cf82668
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -0,0 +1,3977 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
+
+#include <linux/prefetch.h>
+#include <linux/bpf_trace.h>
+#include <net/mpls.h>
+#include <net/xdp.h>
+#include "i40e.h"
+#include "i40e_trace.h"
+#include "i40e_prototype.h"
+#include "i40e_txrx_common.h"
+#include "i40e_xsk.h"
+
+#define I40E_TXD_CMD (I40E_TX_DESC_CMD_EOP | I40E_TX_DESC_CMD_RS)
+/**
+ * i40e_fdir - Generate a Flow Director descriptor based on fdata
+ * @tx_ring: Tx ring to send buffer on
+ * @fdata: Flow director filter data
+ * @add: Indicate if we are adding a rule or deleting one
+ *
+ **/
+static void i40e_fdir(struct i40e_ring *tx_ring,
+ struct i40e_fdir_filter *fdata, bool add)
+{
+ struct i40e_filter_program_desc *fdir_desc;
+ struct i40e_pf *pf = tx_ring->vsi->back;
+ u32 flex_ptype, dtype_cmd;
+ u16 i;
+
+ /* grab the next descriptor */
+ i = tx_ring->next_to_use;
+ fdir_desc = I40E_TX_FDIRDESC(tx_ring, i);
+
+ i++;
+ tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
+
+ flex_ptype = I40E_TXD_FLTR_QW0_QINDEX_MASK &
+ (fdata->q_index << I40E_TXD_FLTR_QW0_QINDEX_SHIFT);
+
+ flex_ptype |= I40E_TXD_FLTR_QW0_FLEXOFF_MASK &
+ (fdata->flex_off << I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT);
+
+ flex_ptype |= I40E_TXD_FLTR_QW0_PCTYPE_MASK &
+ (fdata->pctype << I40E_TXD_FLTR_QW0_PCTYPE_SHIFT);
+
+ /* Use LAN VSI Id if not programmed by user */
+ flex_ptype |= I40E_TXD_FLTR_QW0_DEST_VSI_MASK &
+ ((u32)(fdata->dest_vsi ? : pf->vsi[pf->lan_vsi]->id) <<
+ I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT);
+
+ dtype_cmd = I40E_TX_DESC_DTYPE_FILTER_PROG;
+
+ dtype_cmd |= add ?
+ I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
+ I40E_TXD_FLTR_QW1_PCMD_SHIFT :
+ I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
+ I40E_TXD_FLTR_QW1_PCMD_SHIFT;
+
+ dtype_cmd |= I40E_TXD_FLTR_QW1_DEST_MASK &
+ (fdata->dest_ctl << I40E_TXD_FLTR_QW1_DEST_SHIFT);
+
+ dtype_cmd |= I40E_TXD_FLTR_QW1_FD_STATUS_MASK &
+ (fdata->fd_status << I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT);
+
+ if (fdata->cnt_index) {
+ dtype_cmd |= I40E_TXD_FLTR_QW1_CNT_ENA_MASK;
+ dtype_cmd |= I40E_TXD_FLTR_QW1_CNTINDEX_MASK &
+ ((u32)fdata->cnt_index <<
+ I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT);
+ }
+
+ fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(flex_ptype);
+ fdir_desc->rsvd = cpu_to_le32(0);
+ fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dtype_cmd);
+ fdir_desc->fd_id = cpu_to_le32(fdata->fd_id);
+}
+
+#define I40E_FD_CLEAN_DELAY 10
+/**
+ * i40e_program_fdir_filter - Program a Flow Director filter
+ * @fdir_data: Packet data that will be filter parameters
+ * @raw_packet: the pre-allocated packet buffer for FDir
+ * @pf: The PF pointer
+ * @add: True for add/update, False for remove
+ **/
+static int i40e_program_fdir_filter(struct i40e_fdir_filter *fdir_data,
+ u8 *raw_packet, struct i40e_pf *pf,
+ bool add)
+{
+ struct i40e_tx_buffer *tx_buf, *first;
+ struct i40e_tx_desc *tx_desc;
+ struct i40e_ring *tx_ring;
+ struct i40e_vsi *vsi;
+ struct device *dev;
+ dma_addr_t dma;
+ u32 td_cmd = 0;
+ u16 i;
+
+ /* find existing FDIR VSI */
+ vsi = i40e_find_vsi_by_type(pf, I40E_VSI_FDIR);
+ if (!vsi)
+ return -ENOENT;
+
+ tx_ring = vsi->tx_rings[0];
+ dev = tx_ring->dev;
+
+ /* we need two descriptors to add/del a filter and we can wait */
+ for (i = I40E_FD_CLEAN_DELAY; I40E_DESC_UNUSED(tx_ring) < 2; i--) {
+ if (!i)
+ return -EAGAIN;
+ msleep_interruptible(1);
+ }
+
+ dma = dma_map_single(dev, raw_packet,
+ I40E_FDIR_MAX_RAW_PACKET_SIZE, DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, dma))
+ goto dma_fail;
+
+ /* grab the next descriptor */
+ i = tx_ring->next_to_use;
+ first = &tx_ring->tx_bi[i];
+ i40e_fdir(tx_ring, fdir_data, add);
+
+ /* Now program a dummy descriptor */
+ i = tx_ring->next_to_use;
+ tx_desc = I40E_TX_DESC(tx_ring, i);
+ tx_buf = &tx_ring->tx_bi[i];
+
+ tx_ring->next_to_use = ((i + 1) < tx_ring->count) ? i + 1 : 0;
+
+ memset(tx_buf, 0, sizeof(struct i40e_tx_buffer));
+
+ /* record length, and DMA address */
+ dma_unmap_len_set(tx_buf, len, I40E_FDIR_MAX_RAW_PACKET_SIZE);
+ dma_unmap_addr_set(tx_buf, dma, dma);
+
+ tx_desc->buffer_addr = cpu_to_le64(dma);
+ td_cmd = I40E_TXD_CMD | I40E_TX_DESC_CMD_DUMMY;
+
+ tx_buf->tx_flags = I40E_TX_FLAGS_FD_SB;
+ tx_buf->raw_buf = (void *)raw_packet;
+
+ tx_desc->cmd_type_offset_bsz =
+ build_ctob(td_cmd, 0, I40E_FDIR_MAX_RAW_PACKET_SIZE, 0);
+
+ /* Force memory writes to complete before letting h/w
+ * know there are new descriptors to fetch.
+ */
+ wmb();
+
+ /* Mark the data descriptor to be watched */
+ first->next_to_watch = tx_desc;
+
+ writel(tx_ring->next_to_use, tx_ring->tail);
+ return 0;
+
+dma_fail:
+ return -1;
+}
+
+/**
+ * i40e_create_dummy_packet - Constructs dummy packet for HW
+ * @dummy_packet: preallocated space for dummy packet
+ * @ipv4: is layer 3 packet of version 4 or 6
+ * @l4proto: next level protocol used in data portion of l3
+ * @data: filter data
+ *
+ * Returns address of layer 4 protocol dummy packet.
+ **/
+static char *i40e_create_dummy_packet(u8 *dummy_packet, bool ipv4, u8 l4proto,
+ struct i40e_fdir_filter *data)
+{
+ bool is_vlan = !!data->vlan_tag;
+ struct vlan_hdr vlan = {};
+ struct ipv6hdr ipv6 = {};
+ struct ethhdr eth = {};
+ struct iphdr ip = {};
+ u8 *tmp;
+
+ if (ipv4) {
+ eth.h_proto = cpu_to_be16(ETH_P_IP);
+ ip.protocol = l4proto;
+ ip.version = 0x4;
+ ip.ihl = 0x5;
+
+ ip.daddr = data->dst_ip;
+ ip.saddr = data->src_ip;
+ } else {
+ eth.h_proto = cpu_to_be16(ETH_P_IPV6);
+ ipv6.nexthdr = l4proto;
+ ipv6.version = 0x6;
+
+ memcpy(&ipv6.saddr.in6_u.u6_addr32, data->src_ip6,
+ sizeof(__be32) * 4);
+ memcpy(&ipv6.daddr.in6_u.u6_addr32, data->dst_ip6,
+ sizeof(__be32) * 4);
+ }
+
+ if (is_vlan) {
+ vlan.h_vlan_TCI = data->vlan_tag;
+ vlan.h_vlan_encapsulated_proto = eth.h_proto;
+ eth.h_proto = data->vlan_etype;
+ }
+
+ tmp = dummy_packet;
+ memcpy(tmp, &eth, sizeof(eth));
+ tmp += sizeof(eth);
+
+ if (is_vlan) {
+ memcpy(tmp, &vlan, sizeof(vlan));
+ tmp += sizeof(vlan);
+ }
+
+ if (ipv4) {
+ memcpy(tmp, &ip, sizeof(ip));
+ tmp += sizeof(ip);
+ } else {
+ memcpy(tmp, &ipv6, sizeof(ipv6));
+ tmp += sizeof(ipv6);
+ }
+
+ return tmp;
+}
+
+/**
+ * i40e_create_dummy_udp_packet - helper function to create UDP packet
+ * @raw_packet: preallocated space for dummy packet
+ * @ipv4: is layer 3 packet of version 4 or 6
+ * @l4proto: next level protocol used in data portion of l3
+ * @data: filter data
+ *
+ * Helper function to populate udp fields.
+ **/
+static void i40e_create_dummy_udp_packet(u8 *raw_packet, bool ipv4, u8 l4proto,
+ struct i40e_fdir_filter *data)
+{
+ struct udphdr *udp;
+ u8 *tmp;
+
+ tmp = i40e_create_dummy_packet(raw_packet, ipv4, IPPROTO_UDP, data);
+ udp = (struct udphdr *)(tmp);
+ udp->dest = data->dst_port;
+ udp->source = data->src_port;
+}
+
+/**
+ * i40e_create_dummy_tcp_packet - helper function to create TCP packet
+ * @raw_packet: preallocated space for dummy packet
+ * @ipv4: is layer 3 packet of version 4 or 6
+ * @l4proto: next level protocol used in data portion of l3
+ * @data: filter data
+ *
+ * Helper function to populate tcp fields.
+ **/
+static void i40e_create_dummy_tcp_packet(u8 *raw_packet, bool ipv4, u8 l4proto,
+ struct i40e_fdir_filter *data)
+{
+ struct tcphdr *tcp;
+ u8 *tmp;
+ /* Dummy tcp packet */
+ static const char tcp_packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0x50, 0x11, 0x0, 0x72, 0, 0, 0, 0};
+
+ tmp = i40e_create_dummy_packet(raw_packet, ipv4, IPPROTO_TCP, data);
+
+ tcp = (struct tcphdr *)tmp;
+ memcpy(tcp, tcp_packet, sizeof(tcp_packet));
+ tcp->dest = data->dst_port;
+ tcp->source = data->src_port;
+}
+
+/**
+ * i40e_create_dummy_sctp_packet - helper function to create SCTP packet
+ * @raw_packet: preallocated space for dummy packet
+ * @ipv4: is layer 3 packet of version 4 or 6
+ * @l4proto: next level protocol used in data portion of l3
+ * @data: filter data
+ *
+ * Helper function to populate sctp fields.
+ **/
+static void i40e_create_dummy_sctp_packet(u8 *raw_packet, bool ipv4,
+ u8 l4proto,
+ struct i40e_fdir_filter *data)
+{
+ struct sctphdr *sctp;
+ u8 *tmp;
+
+ tmp = i40e_create_dummy_packet(raw_packet, ipv4, IPPROTO_SCTP, data);
+
+ sctp = (struct sctphdr *)tmp;
+ sctp->dest = data->dst_port;
+ sctp->source = data->src_port;
+}
+
+/**
+ * i40e_prepare_fdir_filter - Prepare and program fdir filter
+ * @pf: physical function to attach filter to
+ * @fd_data: filter data
+ * @add: add or delete filter
+ * @packet_addr: address of dummy packet, used in filtering
+ * @payload_offset: offset from dummy packet address to user defined data
+ * @pctype: Packet type for which filter is used
+ *
+ * Helper function to offset data of dummy packet, program it and
+ * handle errors.
+ **/
+static int i40e_prepare_fdir_filter(struct i40e_pf *pf,
+ struct i40e_fdir_filter *fd_data,
+ bool add, char *packet_addr,
+ int payload_offset, u8 pctype)
+{
+ int ret;
+
+ if (fd_data->flex_filter) {
+ u8 *payload;
+ __be16 pattern = fd_data->flex_word;
+ u16 off = fd_data->flex_offset;
+
+ payload = packet_addr + payload_offset;
+
+ /* If user provided vlan, offset payload by vlan header length */
+ if (!!fd_data->vlan_tag)
+ payload += VLAN_HLEN;
+
+ *((__force __be16 *)(payload + off)) = pattern;
+ }
+
+ fd_data->pctype = pctype;
+ ret = i40e_program_fdir_filter(fd_data, packet_addr, pf, add);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
+ fd_data->pctype, fd_data->fd_id, ret);
+ /* Free the packet buffer since it wasn't added to the ring */
+ return -EOPNOTSUPP;
+ } else if (I40E_DEBUG_FD & pf->hw.debug_mask) {
+ if (add)
+ dev_info(&pf->pdev->dev,
+ "Filter OK for PCTYPE %d loc = %d\n",
+ fd_data->pctype, fd_data->fd_id);
+ else
+ dev_info(&pf->pdev->dev,
+ "Filter deleted for PCTYPE %d loc = %d\n",
+ fd_data->pctype, fd_data->fd_id);
+ }
+
+ return ret;
+}
+
+/**
+ * i40e_change_filter_num - Prepare and program fdir filter
+ * @ipv4: is layer 3 packet of version 4 or 6
+ * @add: add or delete filter
+ * @ipv4_filter_num: field to update
+ * @ipv6_filter_num: field to update
+ *
+ * Update filter number field for pf.
+ **/
+static void i40e_change_filter_num(bool ipv4, bool add, u16 *ipv4_filter_num,
+ u16 *ipv6_filter_num)
+{
+ if (add) {
+ if (ipv4)
+ (*ipv4_filter_num)++;
+ else
+ (*ipv6_filter_num)++;
+ } else {
+ if (ipv4)
+ (*ipv4_filter_num)--;
+ else
+ (*ipv6_filter_num)--;
+ }
+}
+
+#define I40E_UDPIP_DUMMY_PACKET_LEN 42
+#define I40E_UDPIP6_DUMMY_PACKET_LEN 62
+/**
+ * i40e_add_del_fdir_udp - Add/Remove UDP filters
+ * @vsi: pointer to the targeted VSI
+ * @fd_data: the flow director data required for the FDir descriptor
+ * @add: true adds a filter, false removes it
+ * @ipv4: true is v4, false is v6
+ *
+ * Returns 0 if the filters were successfully added or removed
+ **/
+static int i40e_add_del_fdir_udp(struct i40e_vsi *vsi,
+ struct i40e_fdir_filter *fd_data,
+ bool add,
+ bool ipv4)
+{
+ struct i40e_pf *pf = vsi->back;
+ u8 *raw_packet;
+ int ret;
+
+ raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
+ if (!raw_packet)
+ return -ENOMEM;
+
+ i40e_create_dummy_udp_packet(raw_packet, ipv4, IPPROTO_UDP, fd_data);
+
+ if (ipv4)
+ ret = i40e_prepare_fdir_filter
+ (pf, fd_data, add, raw_packet,
+ I40E_UDPIP_DUMMY_PACKET_LEN,
+ I40E_FILTER_PCTYPE_NONF_IPV4_UDP);
+ else
+ ret = i40e_prepare_fdir_filter
+ (pf, fd_data, add, raw_packet,
+ I40E_UDPIP6_DUMMY_PACKET_LEN,
+ I40E_FILTER_PCTYPE_NONF_IPV6_UDP);
+
+ if (ret) {
+ kfree(raw_packet);
+ return ret;
+ }
+
+ i40e_change_filter_num(ipv4, add, &pf->fd_udp4_filter_cnt,
+ &pf->fd_udp6_filter_cnt);
+
+ return 0;
+}
+
+#define I40E_TCPIP_DUMMY_PACKET_LEN 54
+#define I40E_TCPIP6_DUMMY_PACKET_LEN 74
+/**
+ * i40e_add_del_fdir_tcp - Add/Remove TCPv4 filters
+ * @vsi: pointer to the targeted VSI
+ * @fd_data: the flow director data required for the FDir descriptor
+ * @add: true adds a filter, false removes it
+ * @ipv4: true is v4, false is v6
+ *
+ * Returns 0 if the filters were successfully added or removed
+ **/
+static int i40e_add_del_fdir_tcp(struct i40e_vsi *vsi,
+ struct i40e_fdir_filter *fd_data,
+ bool add,
+ bool ipv4)
+{
+ struct i40e_pf *pf = vsi->back;
+ u8 *raw_packet;
+ int ret;
+
+ raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
+ if (!raw_packet)
+ return -ENOMEM;
+
+ i40e_create_dummy_tcp_packet(raw_packet, ipv4, IPPROTO_TCP, fd_data);
+ if (ipv4)
+ ret = i40e_prepare_fdir_filter
+ (pf, fd_data, add, raw_packet,
+ I40E_TCPIP_DUMMY_PACKET_LEN,
+ I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
+ else
+ ret = i40e_prepare_fdir_filter
+ (pf, fd_data, add, raw_packet,
+ I40E_TCPIP6_DUMMY_PACKET_LEN,
+ I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
+
+ if (ret) {
+ kfree(raw_packet);
+ return ret;
+ }
+
+ i40e_change_filter_num(ipv4, add, &pf->fd_tcp4_filter_cnt,
+ &pf->fd_tcp6_filter_cnt);
+
+ if (add) {
+ if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
+ I40E_DEBUG_FD & pf->hw.debug_mask)
+ dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 flow being applied\n");
+ set_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state);
+ }
+ return 0;
+}
+
+#define I40E_SCTPIP_DUMMY_PACKET_LEN 46
+#define I40E_SCTPIP6_DUMMY_PACKET_LEN 66
+/**
+ * i40e_add_del_fdir_sctp - Add/Remove SCTPv4 Flow Director filters for
+ * a specific flow spec
+ * @vsi: pointer to the targeted VSI
+ * @fd_data: the flow director data required for the FDir descriptor
+ * @add: true adds a filter, false removes it
+ * @ipv4: true is v4, false is v6
+ *
+ * Returns 0 if the filters were successfully added or removed
+ **/
+static int i40e_add_del_fdir_sctp(struct i40e_vsi *vsi,
+ struct i40e_fdir_filter *fd_data,
+ bool add,
+ bool ipv4)
+{
+ struct i40e_pf *pf = vsi->back;
+ u8 *raw_packet;
+ int ret;
+
+ raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
+ if (!raw_packet)
+ return -ENOMEM;
+
+ i40e_create_dummy_sctp_packet(raw_packet, ipv4, IPPROTO_SCTP, fd_data);
+
+ if (ipv4)
+ ret = i40e_prepare_fdir_filter
+ (pf, fd_data, add, raw_packet,
+ I40E_SCTPIP_DUMMY_PACKET_LEN,
+ I40E_FILTER_PCTYPE_NONF_IPV4_SCTP);
+ else
+ ret = i40e_prepare_fdir_filter
+ (pf, fd_data, add, raw_packet,
+ I40E_SCTPIP6_DUMMY_PACKET_LEN,
+ I40E_FILTER_PCTYPE_NONF_IPV6_SCTP);
+
+ if (ret) {
+ kfree(raw_packet);
+ return ret;
+ }
+
+ i40e_change_filter_num(ipv4, add, &pf->fd_sctp4_filter_cnt,
+ &pf->fd_sctp6_filter_cnt);
+
+ return 0;
+}
+
+#define I40E_IP_DUMMY_PACKET_LEN 34
+#define I40E_IP6_DUMMY_PACKET_LEN 54
+/**
+ * i40e_add_del_fdir_ip - Add/Remove IPv4 Flow Director filters for
+ * a specific flow spec
+ * @vsi: pointer to the targeted VSI
+ * @fd_data: the flow director data required for the FDir descriptor
+ * @add: true adds a filter, false removes it
+ * @ipv4: true is v4, false is v6
+ *
+ * Returns 0 if the filters were successfully added or removed
+ **/
+static int i40e_add_del_fdir_ip(struct i40e_vsi *vsi,
+ struct i40e_fdir_filter *fd_data,
+ bool add,
+ bool ipv4)
+{
+ struct i40e_pf *pf = vsi->back;
+ int payload_offset;
+ u8 *raw_packet;
+ int iter_start;
+ int iter_end;
+ int ret;
+ int i;
+
+ if (ipv4) {
+ iter_start = I40E_FILTER_PCTYPE_NONF_IPV4_OTHER;
+ iter_end = I40E_FILTER_PCTYPE_FRAG_IPV4;
+ } else {
+ iter_start = I40E_FILTER_PCTYPE_NONF_IPV6_OTHER;
+ iter_end = I40E_FILTER_PCTYPE_FRAG_IPV6;
+ }
+
+ for (i = iter_start; i <= iter_end; i++) {
+ raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
+ if (!raw_packet)
+ return -ENOMEM;
+
+ /* IPv6 no header option differs from IPv4 */
+ (void)i40e_create_dummy_packet
+ (raw_packet, ipv4, (ipv4) ? IPPROTO_IP : IPPROTO_NONE,
+ fd_data);
+
+ payload_offset = (ipv4) ? I40E_IP_DUMMY_PACKET_LEN :
+ I40E_IP6_DUMMY_PACKET_LEN;
+ ret = i40e_prepare_fdir_filter(pf, fd_data, add, raw_packet,
+ payload_offset, i);
+ if (ret)
+ goto err;
+ }
+
+ i40e_change_filter_num(ipv4, add, &pf->fd_ip4_filter_cnt,
+ &pf->fd_ip6_filter_cnt);
+
+ return 0;
+err:
+ kfree(raw_packet);
+ return ret;
+}
+
+/**
+ * i40e_add_del_fdir - Build raw packets to add/del fdir filter
+ * @vsi: pointer to the targeted VSI
+ * @input: filter to add or delete
+ * @add: true adds a filter, false removes it
+ *
+ **/
+int i40e_add_del_fdir(struct i40e_vsi *vsi,
+ struct i40e_fdir_filter *input, bool add)
+{
+ enum ip_ver { ipv6 = 0, ipv4 = 1 };
+ struct i40e_pf *pf = vsi->back;
+ int ret;
+
+ switch (input->flow_type & ~FLOW_EXT) {
+ case TCP_V4_FLOW:
+ ret = i40e_add_del_fdir_tcp(vsi, input, add, ipv4);
+ break;
+ case UDP_V4_FLOW:
+ ret = i40e_add_del_fdir_udp(vsi, input, add, ipv4);
+ break;
+ case SCTP_V4_FLOW:
+ ret = i40e_add_del_fdir_sctp(vsi, input, add, ipv4);
+ break;
+ case TCP_V6_FLOW:
+ ret = i40e_add_del_fdir_tcp(vsi, input, add, ipv6);
+ break;
+ case UDP_V6_FLOW:
+ ret = i40e_add_del_fdir_udp(vsi, input, add, ipv6);
+ break;
+ case SCTP_V6_FLOW:
+ ret = i40e_add_del_fdir_sctp(vsi, input, add, ipv6);
+ break;
+ case IP_USER_FLOW:
+ switch (input->ipl4_proto) {
+ case IPPROTO_TCP:
+ ret = i40e_add_del_fdir_tcp(vsi, input, add, ipv4);
+ break;
+ case IPPROTO_UDP:
+ ret = i40e_add_del_fdir_udp(vsi, input, add, ipv4);
+ break;
+ case IPPROTO_SCTP:
+ ret = i40e_add_del_fdir_sctp(vsi, input, add, ipv4);
+ break;
+ case IPPROTO_IP:
+ ret = i40e_add_del_fdir_ip(vsi, input, add, ipv4);
+ break;
+ default:
+ /* We cannot support masking based on protocol */
+ dev_info(&pf->pdev->dev, "Unsupported IPv4 protocol 0x%02x\n",
+ input->ipl4_proto);
+ return -EINVAL;
+ }
+ break;
+ case IPV6_USER_FLOW:
+ switch (input->ipl4_proto) {
+ case IPPROTO_TCP:
+ ret = i40e_add_del_fdir_tcp(vsi, input, add, ipv6);
+ break;
+ case IPPROTO_UDP:
+ ret = i40e_add_del_fdir_udp(vsi, input, add, ipv6);
+ break;
+ case IPPROTO_SCTP:
+ ret = i40e_add_del_fdir_sctp(vsi, input, add, ipv6);
+ break;
+ case IPPROTO_IP:
+ ret = i40e_add_del_fdir_ip(vsi, input, add, ipv6);
+ break;
+ default:
+ /* We cannot support masking based on protocol */
+ dev_info(&pf->pdev->dev, "Unsupported IPv6 protocol 0x%02x\n",
+ input->ipl4_proto);
+ return -EINVAL;
+ }
+ break;
+ default:
+ dev_info(&pf->pdev->dev, "Unsupported flow type 0x%02x\n",
+ input->flow_type);
+ return -EINVAL;
+ }
+
+ /* The buffer allocated here will be normally be freed by
+ * i40e_clean_fdir_tx_irq() as it reclaims resources after transmit
+ * completion. In the event of an error adding the buffer to the FDIR
+ * ring, it will immediately be freed. It may also be freed by
+ * i40e_clean_tx_ring() when closing the VSI.
+ */
+ return ret;
+}
+
+/**
+ * i40e_fd_handle_status - check the Programming Status for FD
+ * @rx_ring: the Rx ring for this descriptor
+ * @qword0_raw: qword0
+ * @qword1: qword1 after le_to_cpu
+ * @prog_id: the id originally used for programming
+ *
+ * This is used to verify if the FD programming or invalidation
+ * requested by SW to the HW is successful or not and take actions accordingly.
+ **/
+static void i40e_fd_handle_status(struct i40e_ring *rx_ring, u64 qword0_raw,
+ u64 qword1, u8 prog_id)
+{
+ struct i40e_pf *pf = rx_ring->vsi->back;
+ struct pci_dev *pdev = pf->pdev;
+ struct i40e_16b_rx_wb_qw0 *qw0;
+ u32 fcnt_prog, fcnt_avail;
+ u32 error;
+
+ qw0 = (struct i40e_16b_rx_wb_qw0 *)&qword0_raw;
+ error = (qword1 & I40E_RX_PROG_STATUS_DESC_QW1_ERROR_MASK) >>
+ I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT;
+
+ if (error == BIT(I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT)) {
+ pf->fd_inv = le32_to_cpu(qw0->hi_dword.fd_id);
+ if (qw0->hi_dword.fd_id != 0 ||
+ (I40E_DEBUG_FD & pf->hw.debug_mask))
+ dev_warn(&pdev->dev, "ntuple filter loc = %d, could not be added\n",
+ pf->fd_inv);
+
+ /* Check if the programming error is for ATR.
+ * If so, auto disable ATR and set a state for
+ * flush in progress. Next time we come here if flush is in
+ * progress do nothing, once flush is complete the state will
+ * be cleared.
+ */
+ if (test_bit(__I40E_FD_FLUSH_REQUESTED, pf->state))
+ return;
+
+ pf->fd_add_err++;
+ /* store the current atr filter count */
+ pf->fd_atr_cnt = i40e_get_current_atr_cnt(pf);
+
+ if (qw0->hi_dword.fd_id == 0 &&
+ test_bit(__I40E_FD_SB_AUTO_DISABLED, pf->state)) {
+ /* These set_bit() calls aren't atomic with the
+ * test_bit() here, but worse case we potentially
+ * disable ATR and queue a flush right after SB
+ * support is re-enabled. That shouldn't cause an
+ * issue in practice
+ */
+ set_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state);
+ set_bit(__I40E_FD_FLUSH_REQUESTED, pf->state);
+ }
+
+ /* filter programming failed most likely due to table full */
+ fcnt_prog = i40e_get_global_fd_count(pf);
+ fcnt_avail = pf->fdir_pf_filter_count;
+ /* If ATR is running fcnt_prog can quickly change,
+ * if we are very close to full, it makes sense to disable
+ * FD ATR/SB and then re-enable it when there is room.
+ */
+ if (fcnt_prog >= (fcnt_avail - I40E_FDIR_BUFFER_FULL_MARGIN)) {
+ if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
+ !test_and_set_bit(__I40E_FD_SB_AUTO_DISABLED,
+ pf->state))
+ if (I40E_DEBUG_FD & pf->hw.debug_mask)
+ dev_warn(&pdev->dev, "FD filter space full, new ntuple rules will not be added\n");
+ }
+ } else if (error == BIT(I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT)) {
+ if (I40E_DEBUG_FD & pf->hw.debug_mask)
+ dev_info(&pdev->dev, "ntuple filter fd_id = %d, could not be removed\n",
+ qw0->hi_dword.fd_id);
+ }
+}
+
+/**
+ * i40e_unmap_and_free_tx_resource - Release a Tx buffer
+ * @ring: the ring that owns the buffer
+ * @tx_buffer: the buffer to free
+ **/
+static void i40e_unmap_and_free_tx_resource(struct i40e_ring *ring,
+ struct i40e_tx_buffer *tx_buffer)
+{
+ if (tx_buffer->skb) {
+ if (tx_buffer->tx_flags & I40E_TX_FLAGS_FD_SB)
+ kfree(tx_buffer->raw_buf);
+ else if (ring_is_xdp(ring))
+ xdp_return_frame(tx_buffer->xdpf);
+ else
+ dev_kfree_skb_any(tx_buffer->skb);
+ if (dma_unmap_len(tx_buffer, len))
+ dma_unmap_single(ring->dev,
+ dma_unmap_addr(tx_buffer, dma),
+ dma_unmap_len(tx_buffer, len),
+ DMA_TO_DEVICE);
+ } else if (dma_unmap_len(tx_buffer, len)) {
+ dma_unmap_page(ring->dev,
+ dma_unmap_addr(tx_buffer, dma),
+ dma_unmap_len(tx_buffer, len),
+ DMA_TO_DEVICE);
+ }
+
+ tx_buffer->next_to_watch = NULL;
+ tx_buffer->skb = NULL;
+ dma_unmap_len_set(tx_buffer, len, 0);
+ /* tx_buffer must be completely set up in the transmit path */
+}
+
+/**
+ * i40e_clean_tx_ring - Free any empty Tx buffers
+ * @tx_ring: ring to be cleaned
+ **/
+void i40e_clean_tx_ring(struct i40e_ring *tx_ring)
+{
+ unsigned long bi_size;
+ u16 i;
+
+ if (ring_is_xdp(tx_ring) && tx_ring->xsk_pool) {
+ i40e_xsk_clean_tx_ring(tx_ring);
+ } else {
+ /* ring already cleared, nothing to do */
+ if (!tx_ring->tx_bi)
+ return;
+
+ /* Free all the Tx ring sk_buffs */
+ for (i = 0; i < tx_ring->count; i++)
+ i40e_unmap_and_free_tx_resource(tx_ring,
+ &tx_ring->tx_bi[i]);
+ }
+
+ bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count;
+ memset(tx_ring->tx_bi, 0, bi_size);
+
+ /* Zero out the descriptor ring */
+ memset(tx_ring->desc, 0, tx_ring->size);
+
+ tx_ring->next_to_use = 0;
+ tx_ring->next_to_clean = 0;
+
+ if (!tx_ring->netdev)
+ return;
+
+ /* cleanup Tx queue statistics */
+ netdev_tx_reset_queue(txring_txq(tx_ring));
+}
+
+/**
+ * i40e_free_tx_resources - Free Tx resources per queue
+ * @tx_ring: Tx descriptor ring for a specific queue
+ *
+ * Free all transmit software resources
+ **/
+void i40e_free_tx_resources(struct i40e_ring *tx_ring)
+{
+ i40e_clean_tx_ring(tx_ring);
+ kfree(tx_ring->tx_bi);
+ tx_ring->tx_bi = NULL;
+
+ if (tx_ring->desc) {
+ dma_free_coherent(tx_ring->dev, tx_ring->size,
+ tx_ring->desc, tx_ring->dma);
+ tx_ring->desc = NULL;
+ }
+}
+
+/**
+ * i40e_get_tx_pending - how many tx descriptors not processed
+ * @ring: the ring of descriptors
+ * @in_sw: use SW variables
+ *
+ * Since there is no access to the ring head register
+ * in XL710, we need to use our local copies
+ **/
+u32 i40e_get_tx_pending(struct i40e_ring *ring, bool in_sw)
+{
+ u32 head, tail;
+
+ if (!in_sw) {
+ head = i40e_get_head(ring);
+ tail = readl(ring->tail);
+ } else {
+ head = ring->next_to_clean;
+ tail = ring->next_to_use;
+ }
+
+ if (head != tail)
+ return (head < tail) ?
+ tail - head : (tail + ring->count - head);
+
+ return 0;
+}
+
+/**
+ * i40e_detect_recover_hung - Function to detect and recover hung_queues
+ * @vsi: pointer to vsi struct with tx queues
+ *
+ * VSI has netdev and netdev has TX queues. This function is to check each of
+ * those TX queues if they are hung, trigger recovery by issuing SW interrupt.
+ **/
+void i40e_detect_recover_hung(struct i40e_vsi *vsi)
+{
+ struct i40e_ring *tx_ring = NULL;
+ struct net_device *netdev;
+ unsigned int i;
+ int packets;
+
+ if (!vsi)
+ return;
+
+ if (test_bit(__I40E_VSI_DOWN, vsi->state))
+ return;
+
+ netdev = vsi->netdev;
+ if (!netdev)
+ return;
+
+ if (!netif_carrier_ok(netdev))
+ return;
+
+ for (i = 0; i < vsi->num_queue_pairs; i++) {
+ tx_ring = vsi->tx_rings[i];
+ if (tx_ring && tx_ring->desc) {
+ /* If packet counter has not changed the queue is
+ * likely stalled, so force an interrupt for this
+ * queue.
+ *
+ * prev_pkt_ctr would be negative if there was no
+ * pending work.
+ */
+ packets = tx_ring->stats.packets & INT_MAX;
+ if (tx_ring->tx_stats.prev_pkt_ctr == packets) {
+ i40e_force_wb(vsi, tx_ring->q_vector);
+ continue;
+ }
+
+ /* Memory barrier between read of packet count and call
+ * to i40e_get_tx_pending()
+ */
+ smp_rmb();
+ tx_ring->tx_stats.prev_pkt_ctr =
+ i40e_get_tx_pending(tx_ring, true) ? packets : -1;
+ }
+ }
+}
+
+/**
+ * i40e_clean_tx_irq - Reclaim resources after transmit completes
+ * @vsi: the VSI we care about
+ * @tx_ring: Tx ring to clean
+ * @napi_budget: Used to determine if we are in netpoll
+ *
+ * Returns true if there's any budget left (e.g. the clean is finished)
+ **/
+static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,
+ struct i40e_ring *tx_ring, int napi_budget)
+{
+ int i = tx_ring->next_to_clean;
+ struct i40e_tx_buffer *tx_buf;
+ struct i40e_tx_desc *tx_head;
+ struct i40e_tx_desc *tx_desc;
+ unsigned int total_bytes = 0, total_packets = 0;
+ unsigned int budget = vsi->work_limit;
+
+ tx_buf = &tx_ring->tx_bi[i];
+ tx_desc = I40E_TX_DESC(tx_ring, i);
+ i -= tx_ring->count;
+
+ tx_head = I40E_TX_DESC(tx_ring, i40e_get_head(tx_ring));
+
+ do {
+ struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch;
+
+ /* if next_to_watch is not set then there is no work pending */
+ if (!eop_desc)
+ break;
+
+ /* prevent any other reads prior to eop_desc */
+ smp_rmb();
+
+ i40e_trace(clean_tx_irq, tx_ring, tx_desc, tx_buf);
+ /* we have caught up to head, no work left to do */
+ if (tx_head == tx_desc)
+ break;
+
+ /* clear next_to_watch to prevent false hangs */
+ tx_buf->next_to_watch = NULL;
+
+ /* update the statistics for this packet */
+ total_bytes += tx_buf->bytecount;
+ total_packets += tx_buf->gso_segs;
+
+ /* free the skb/XDP data */
+ if (ring_is_xdp(tx_ring))
+ xdp_return_frame(tx_buf->xdpf);
+ else
+ napi_consume_skb(tx_buf->skb, napi_budget);
+
+ /* unmap skb header data */
+ dma_unmap_single(tx_ring->dev,
+ dma_unmap_addr(tx_buf, dma),
+ dma_unmap_len(tx_buf, len),
+ DMA_TO_DEVICE);
+
+ /* clear tx_buffer data */
+ tx_buf->skb = NULL;
+ dma_unmap_len_set(tx_buf, len, 0);
+
+ /* unmap remaining buffers */
+ while (tx_desc != eop_desc) {
+ i40e_trace(clean_tx_irq_unmap,
+ tx_ring, tx_desc, tx_buf);
+
+ tx_buf++;
+ tx_desc++;
+ i++;
+ if (unlikely(!i)) {
+ i -= tx_ring->count;
+ tx_buf = tx_ring->tx_bi;
+ tx_desc = I40E_TX_DESC(tx_ring, 0);
+ }
+
+ /* unmap any remaining paged data */
+ if (dma_unmap_len(tx_buf, len)) {
+ dma_unmap_page(tx_ring->dev,
+ dma_unmap_addr(tx_buf, dma),
+ dma_unmap_len(tx_buf, len),
+ DMA_TO_DEVICE);
+ dma_unmap_len_set(tx_buf, len, 0);
+ }
+ }
+
+ /* move us one more past the eop_desc for start of next pkt */
+ tx_buf++;
+ tx_desc++;
+ i++;
+ if (unlikely(!i)) {
+ i -= tx_ring->count;
+ tx_buf = tx_ring->tx_bi;
+ tx_desc = I40E_TX_DESC(tx_ring, 0);
+ }
+
+ prefetch(tx_desc);
+
+ /* update budget accounting */
+ budget--;
+ } while (likely(budget));
+
+ i += tx_ring->count;
+ tx_ring->next_to_clean = i;
+ i40e_update_tx_stats(tx_ring, total_packets, total_bytes);
+ i40e_arm_wb(tx_ring, vsi, budget);
+
+ if (ring_is_xdp(tx_ring))
+ return !!budget;
+
+ /* notify netdev of completed buffers */
+ netdev_tx_completed_queue(txring_txq(tx_ring),
+ total_packets, total_bytes);
+
+#define TX_WAKE_THRESHOLD ((s16)(DESC_NEEDED * 2))
+ if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) &&
+ (I40E_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
+ /* Make sure that anybody stopping the queue after this
+ * sees the new next_to_clean.
+ */
+ smp_mb();
+ if (__netif_subqueue_stopped(tx_ring->netdev,
+ tx_ring->queue_index) &&
+ !test_bit(__I40E_VSI_DOWN, vsi->state)) {
+ netif_wake_subqueue(tx_ring->netdev,
+ tx_ring->queue_index);
+ ++tx_ring->tx_stats.restart_queue;
+ }
+ }
+
+ return !!budget;
+}
+
+/**
+ * i40e_enable_wb_on_itr - Arm hardware to do a wb, interrupts are not enabled
+ * @vsi: the VSI we care about
+ * @q_vector: the vector on which to enable writeback
+ *
+ **/
+static void i40e_enable_wb_on_itr(struct i40e_vsi *vsi,
+ struct i40e_q_vector *q_vector)
+{
+ u16 flags = q_vector->tx.ring[0].flags;
+ u32 val;
+
+ if (!(flags & I40E_TXR_FLAGS_WB_ON_ITR))
+ return;
+
+ if (q_vector->arm_wb_state)
+ return;
+
+ if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) {
+ val = I40E_PFINT_DYN_CTLN_WB_ON_ITR_MASK |
+ I40E_PFINT_DYN_CTLN_ITR_INDX_MASK; /* set noitr */
+
+ wr32(&vsi->back->hw,
+ I40E_PFINT_DYN_CTLN(q_vector->reg_idx),
+ val);
+ } else {
+ val = I40E_PFINT_DYN_CTL0_WB_ON_ITR_MASK |
+ I40E_PFINT_DYN_CTL0_ITR_INDX_MASK; /* set noitr */
+
+ wr32(&vsi->back->hw, I40E_PFINT_DYN_CTL0, val);
+ }
+ q_vector->arm_wb_state = true;
+}
+
+/**
+ * i40e_force_wb - Issue SW Interrupt so HW does a wb
+ * @vsi: the VSI we care about
+ * @q_vector: the vector on which to force writeback
+ *
+ **/
+void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector)
+{
+ if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) {
+ u32 val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
+ I40E_PFINT_DYN_CTLN_ITR_INDX_MASK | /* set noitr */
+ I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK |
+ I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK;
+ /* allow 00 to be written to the index */
+
+ wr32(&vsi->back->hw,
+ I40E_PFINT_DYN_CTLN(q_vector->reg_idx), val);
+ } else {
+ u32 val = I40E_PFINT_DYN_CTL0_INTENA_MASK |
+ I40E_PFINT_DYN_CTL0_ITR_INDX_MASK | /* set noitr */
+ I40E_PFINT_DYN_CTL0_SWINT_TRIG_MASK |
+ I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_MASK;
+ /* allow 00 to be written to the index */
+
+ wr32(&vsi->back->hw, I40E_PFINT_DYN_CTL0, val);
+ }
+}
+
+static inline bool i40e_container_is_rx(struct i40e_q_vector *q_vector,
+ struct i40e_ring_container *rc)
+{
+ return &q_vector->rx == rc;
+}
+
+static inline unsigned int i40e_itr_divisor(struct i40e_q_vector *q_vector)
+{
+ unsigned int divisor;
+
+ switch (q_vector->vsi->back->hw.phy.link_info.link_speed) {
+ case I40E_LINK_SPEED_40GB:
+ divisor = I40E_ITR_ADAPTIVE_MIN_INC * 1024;
+ break;
+ case I40E_LINK_SPEED_25GB:
+ case I40E_LINK_SPEED_20GB:
+ divisor = I40E_ITR_ADAPTIVE_MIN_INC * 512;
+ break;
+ default:
+ case I40E_LINK_SPEED_10GB:
+ divisor = I40E_ITR_ADAPTIVE_MIN_INC * 256;
+ break;
+ case I40E_LINK_SPEED_1GB:
+ case I40E_LINK_SPEED_100MB:
+ divisor = I40E_ITR_ADAPTIVE_MIN_INC * 32;
+ break;
+ }
+
+ return divisor;
+}
+
+/**
+ * i40e_update_itr - update the dynamic ITR value based on statistics
+ * @q_vector: structure containing interrupt and ring information
+ * @rc: structure containing ring performance data
+ *
+ * Stores a new ITR value based on packets and byte
+ * counts during the last interrupt. The advantage of per interrupt
+ * computation is faster updates and more accurate ITR for the current
+ * traffic pattern. Constants in this function were computed
+ * based on theoretical maximum wire speed and thresholds were set based
+ * on testing data as well as attempting to minimize response time
+ * while increasing bulk throughput.
+ **/
+static void i40e_update_itr(struct i40e_q_vector *q_vector,
+ struct i40e_ring_container *rc)
+{
+ unsigned int avg_wire_size, packets, bytes, itr;
+ unsigned long next_update = jiffies;
+
+ /* If we don't have any rings just leave ourselves set for maximum
+ * possible latency so we take ourselves out of the equation.
+ */
+ if (!rc->ring || !ITR_IS_DYNAMIC(rc->ring->itr_setting))
+ return;
+
+ /* For Rx we want to push the delay up and default to low latency.
+ * for Tx we want to pull the delay down and default to high latency.
+ */
+ itr = i40e_container_is_rx(q_vector, rc) ?
+ I40E_ITR_ADAPTIVE_MIN_USECS | I40E_ITR_ADAPTIVE_LATENCY :
+ I40E_ITR_ADAPTIVE_MAX_USECS | I40E_ITR_ADAPTIVE_LATENCY;
+
+ /* If we didn't update within up to 1 - 2 jiffies we can assume
+ * that either packets are coming in so slow there hasn't been
+ * any work, or that there is so much work that NAPI is dealing
+ * with interrupt moderation and we don't need to do anything.
+ */
+ if (time_after(next_update, rc->next_update))
+ goto clear_counts;
+
+ /* If itr_countdown is set it means we programmed an ITR within
+ * the last 4 interrupt cycles. This has a side effect of us
+ * potentially firing an early interrupt. In order to work around
+ * this we need to throw out any data received for a few
+ * interrupts following the update.
+ */
+ if (q_vector->itr_countdown) {
+ itr = rc->target_itr;
+ goto clear_counts;
+ }
+
+ packets = rc->total_packets;
+ bytes = rc->total_bytes;
+
+ if (i40e_container_is_rx(q_vector, rc)) {
+ /* If Rx there are 1 to 4 packets and bytes are less than
+ * 9000 assume insufficient data to use bulk rate limiting
+ * approach unless Tx is already in bulk rate limiting. We
+ * are likely latency driven.
+ */
+ if (packets && packets < 4 && bytes < 9000 &&
+ (q_vector->tx.target_itr & I40E_ITR_ADAPTIVE_LATENCY)) {
+ itr = I40E_ITR_ADAPTIVE_LATENCY;
+ goto adjust_by_size;
+ }
+ } else if (packets < 4) {
+ /* If we have Tx and Rx ITR maxed and Tx ITR is running in
+ * bulk mode and we are receiving 4 or fewer packets just
+ * reset the ITR_ADAPTIVE_LATENCY bit for latency mode so
+ * that the Rx can relax.
+ */
+ if (rc->target_itr == I40E_ITR_ADAPTIVE_MAX_USECS &&
+ (q_vector->rx.target_itr & I40E_ITR_MASK) ==
+ I40E_ITR_ADAPTIVE_MAX_USECS)
+ goto clear_counts;
+ } else if (packets > 32) {
+ /* If we have processed over 32 packets in a single interrupt
+ * for Tx assume we need to switch over to "bulk" mode.
+ */
+ rc->target_itr &= ~I40E_ITR_ADAPTIVE_LATENCY;
+ }
+
+ /* We have no packets to actually measure against. This means
+ * either one of the other queues on this vector is active or
+ * we are a Tx queue doing TSO with too high of an interrupt rate.
+ *
+ * Between 4 and 56 we can assume that our current interrupt delay
+ * is only slightly too low. As such we should increase it by a small
+ * fixed amount.
+ */
+ if (packets < 56) {
+ itr = rc->target_itr + I40E_ITR_ADAPTIVE_MIN_INC;
+ if ((itr & I40E_ITR_MASK) > I40E_ITR_ADAPTIVE_MAX_USECS) {
+ itr &= I40E_ITR_ADAPTIVE_LATENCY;
+ itr += I40E_ITR_ADAPTIVE_MAX_USECS;
+ }
+ goto clear_counts;
+ }
+
+ if (packets <= 256) {
+ itr = min(q_vector->tx.current_itr, q_vector->rx.current_itr);
+ itr &= I40E_ITR_MASK;
+
+ /* Between 56 and 112 is our "goldilocks" zone where we are
+ * working out "just right". Just report that our current
+ * ITR is good for us.
+ */
+ if (packets <= 112)
+ goto clear_counts;
+
+ /* If packet count is 128 or greater we are likely looking
+ * at a slight overrun of the delay we want. Try halving
+ * our delay to see if that will cut the number of packets
+ * in half per interrupt.
+ */
+ itr /= 2;
+ itr &= I40E_ITR_MASK;
+ if (itr < I40E_ITR_ADAPTIVE_MIN_USECS)
+ itr = I40E_ITR_ADAPTIVE_MIN_USECS;
+
+ goto clear_counts;
+ }
+
+ /* The paths below assume we are dealing with a bulk ITR since
+ * number of packets is greater than 256. We are just going to have
+ * to compute a value and try to bring the count under control,
+ * though for smaller packet sizes there isn't much we can do as
+ * NAPI polling will likely be kicking in sooner rather than later.
+ */
+ itr = I40E_ITR_ADAPTIVE_BULK;
+
+adjust_by_size:
+ /* If packet counts are 256 or greater we can assume we have a gross
+ * overestimation of what the rate should be. Instead of trying to fine
+ * tune it just use the formula below to try and dial in an exact value
+ * give the current packet size of the frame.
+ */
+ avg_wire_size = bytes / packets;
+
+ /* The following is a crude approximation of:
+ * wmem_default / (size + overhead) = desired_pkts_per_int
+ * rate / bits_per_byte / (size + ethernet overhead) = pkt_rate
+ * (desired_pkt_rate / pkt_rate) * usecs_per_sec = ITR value
+ *
+ * Assuming wmem_default is 212992 and overhead is 640 bytes per
+ * packet, (256 skb, 64 headroom, 320 shared info), we can reduce the
+ * formula down to
+ *
+ * (170 * (size + 24)) / (size + 640) = ITR
+ *
+ * We first do some math on the packet size and then finally bitshift
+ * by 8 after rounding up. We also have to account for PCIe link speed
+ * difference as ITR scales based on this.
+ */
+ if (avg_wire_size <= 60) {
+ /* Start at 250k ints/sec */
+ avg_wire_size = 4096;
+ } else if (avg_wire_size <= 380) {
+ /* 250K ints/sec to 60K ints/sec */
+ avg_wire_size *= 40;
+ avg_wire_size += 1696;
+ } else if (avg_wire_size <= 1084) {
+ /* 60K ints/sec to 36K ints/sec */
+ avg_wire_size *= 15;
+ avg_wire_size += 11452;
+ } else if (avg_wire_size <= 1980) {
+ /* 36K ints/sec to 30K ints/sec */
+ avg_wire_size *= 5;
+ avg_wire_size += 22420;
+ } else {
+ /* plateau at a limit of 30K ints/sec */
+ avg_wire_size = 32256;
+ }
+
+ /* If we are in low latency mode halve our delay which doubles the
+ * rate to somewhere between 100K to 16K ints/sec
+ */
+ if (itr & I40E_ITR_ADAPTIVE_LATENCY)
+ avg_wire_size /= 2;
+
+ /* Resultant value is 256 times larger than it needs to be. This
+ * gives us room to adjust the value as needed to either increase
+ * or decrease the value based on link speeds of 10G, 2.5G, 1G, etc.
+ *
+ * Use addition as we have already recorded the new latency flag
+ * for the ITR value.
+ */
+ itr += DIV_ROUND_UP(avg_wire_size, i40e_itr_divisor(q_vector)) *
+ I40E_ITR_ADAPTIVE_MIN_INC;
+
+ if ((itr & I40E_ITR_MASK) > I40E_ITR_ADAPTIVE_MAX_USECS) {
+ itr &= I40E_ITR_ADAPTIVE_LATENCY;
+ itr += I40E_ITR_ADAPTIVE_MAX_USECS;
+ }
+
+clear_counts:
+ /* write back value */
+ rc->target_itr = itr;
+
+ /* next update should occur within next jiffy */
+ rc->next_update = next_update + 1;
+
+ rc->total_bytes = 0;
+ rc->total_packets = 0;
+}
+
+static struct i40e_rx_buffer *i40e_rx_bi(struct i40e_ring *rx_ring, u32 idx)
+{
+ return &rx_ring->rx_bi[idx];
+}
+
+/**
+ * i40e_reuse_rx_page - page flip buffer and store it back on the ring
+ * @rx_ring: rx descriptor ring to store buffers on
+ * @old_buff: donor buffer to have page reused
+ *
+ * Synchronizes page for reuse by the adapter
+ **/
+static void i40e_reuse_rx_page(struct i40e_ring *rx_ring,
+ struct i40e_rx_buffer *old_buff)
+{
+ struct i40e_rx_buffer *new_buff;
+ u16 nta = rx_ring->next_to_alloc;
+
+ new_buff = i40e_rx_bi(rx_ring, nta);
+
+ /* update, and store next to alloc */
+ nta++;
+ rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
+
+ /* transfer page from old buffer to new buffer */
+ new_buff->dma = old_buff->dma;
+ new_buff->page = old_buff->page;
+ new_buff->page_offset = old_buff->page_offset;
+ new_buff->pagecnt_bias = old_buff->pagecnt_bias;
+
+ /* clear contents of buffer_info */
+ old_buff->page = NULL;
+}
+
+/**
+ * i40e_clean_programming_status - clean the programming status descriptor
+ * @rx_ring: the rx ring that has this descriptor
+ * @qword0_raw: qword0
+ * @qword1: qword1 representing status_error_len in CPU ordering
+ *
+ * Flow director should handle FD_FILTER_STATUS to check its filter programming
+ * status being successful or not and take actions accordingly. FCoE should
+ * handle its context/filter programming/invalidation status and take actions.
+ *
+ * Returns an i40e_rx_buffer to reuse if the cleanup occurred, otherwise NULL.
+ **/
+void i40e_clean_programming_status(struct i40e_ring *rx_ring, u64 qword0_raw,
+ u64 qword1)
+{
+ u8 id;
+
+ id = (qword1 & I40E_RX_PROG_STATUS_DESC_QW1_PROGID_MASK) >>
+ I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT;
+
+ if (id == I40E_RX_PROG_STATUS_DESC_FD_FILTER_STATUS)
+ i40e_fd_handle_status(rx_ring, qword0_raw, qword1, id);
+}
+
+/**
+ * i40e_setup_tx_descriptors - Allocate the Tx descriptors
+ * @tx_ring: the tx ring to set up
+ *
+ * Return 0 on success, negative on error
+ **/
+int i40e_setup_tx_descriptors(struct i40e_ring *tx_ring)
+{
+ struct device *dev = tx_ring->dev;
+ int bi_size;
+
+ if (!dev)
+ return -ENOMEM;
+
+ /* warn if we are about to overwrite the pointer */
+ WARN_ON(tx_ring->tx_bi);
+ bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count;
+ tx_ring->tx_bi = kzalloc(bi_size, GFP_KERNEL);
+ if (!tx_ring->tx_bi)
+ goto err;
+
+ u64_stats_init(&tx_ring->syncp);
+
+ /* round up to nearest 4K */
+ tx_ring->size = tx_ring->count * sizeof(struct i40e_tx_desc);
+ /* add u32 for head writeback, align after this takes care of
+ * guaranteeing this is at least one cache line in size
+ */
+ tx_ring->size += sizeof(u32);
+ tx_ring->size = ALIGN(tx_ring->size, 4096);
+ tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
+ &tx_ring->dma, GFP_KERNEL);
+ if (!tx_ring->desc) {
+ dev_info(dev, "Unable to allocate memory for the Tx descriptor ring, size=%d\n",
+ tx_ring->size);
+ goto err;
+ }
+
+ tx_ring->next_to_use = 0;
+ tx_ring->next_to_clean = 0;
+ tx_ring->tx_stats.prev_pkt_ctr = -1;
+ return 0;
+
+err:
+ kfree(tx_ring->tx_bi);
+ tx_ring->tx_bi = NULL;
+ return -ENOMEM;
+}
+
+static void i40e_clear_rx_bi(struct i40e_ring *rx_ring)
+{
+ memset(rx_ring->rx_bi, 0, sizeof(*rx_ring->rx_bi) * rx_ring->count);
+}
+
+/**
+ * i40e_clean_rx_ring - Free Rx buffers
+ * @rx_ring: ring to be cleaned
+ **/
+void i40e_clean_rx_ring(struct i40e_ring *rx_ring)
+{
+ u16 i;
+
+ /* ring already cleared, nothing to do */
+ if (!rx_ring->rx_bi)
+ return;
+
+ dev_kfree_skb(rx_ring->skb);
+ rx_ring->skb = NULL;
+
+ if (rx_ring->xsk_pool) {
+ i40e_xsk_clean_rx_ring(rx_ring);
+ goto skip_free;
+ }
+
+ /* Free all the Rx ring sk_buffs */
+ for (i = 0; i < rx_ring->count; i++) {
+ struct i40e_rx_buffer *rx_bi = i40e_rx_bi(rx_ring, i);
+
+ if (!rx_bi->page)
+ continue;
+
+ /* Invalidate cache lines that may have been written to by
+ * device so that we avoid corrupting memory.
+ */
+ dma_sync_single_range_for_cpu(rx_ring->dev,
+ rx_bi->dma,
+ rx_bi->page_offset,
+ rx_ring->rx_buf_len,
+ DMA_FROM_DEVICE);
+
+ /* free resources associated with mapping */
+ dma_unmap_page_attrs(rx_ring->dev, rx_bi->dma,
+ i40e_rx_pg_size(rx_ring),
+ DMA_FROM_DEVICE,
+ I40E_RX_DMA_ATTR);
+
+ __page_frag_cache_drain(rx_bi->page, rx_bi->pagecnt_bias);
+
+ rx_bi->page = NULL;
+ rx_bi->page_offset = 0;
+ }
+
+skip_free:
+ if (rx_ring->xsk_pool)
+ i40e_clear_rx_bi_zc(rx_ring);
+ else
+ i40e_clear_rx_bi(rx_ring);
+
+ /* Zero out the descriptor ring */
+ memset(rx_ring->desc, 0, rx_ring->size);
+
+ rx_ring->next_to_alloc = 0;
+ rx_ring->next_to_clean = 0;
+ rx_ring->next_to_use = 0;
+}
+
+/**
+ * i40e_free_rx_resources - Free Rx resources
+ * @rx_ring: ring to clean the resources from
+ *
+ * Free all receive software resources
+ **/
+void i40e_free_rx_resources(struct i40e_ring *rx_ring)
+{
+ i40e_clean_rx_ring(rx_ring);
+ if (rx_ring->vsi->type == I40E_VSI_MAIN)
+ xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
+ rx_ring->xdp_prog = NULL;
+ kfree(rx_ring->rx_bi);
+ rx_ring->rx_bi = NULL;
+
+ if (rx_ring->desc) {
+ dma_free_coherent(rx_ring->dev, rx_ring->size,
+ rx_ring->desc, rx_ring->dma);
+ rx_ring->desc = NULL;
+ }
+}
+
+/**
+ * i40e_setup_rx_descriptors - Allocate Rx descriptors
+ * @rx_ring: Rx descriptor ring (for a specific queue) to setup
+ *
+ * Returns 0 on success, negative on failure
+ **/
+int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring)
+{
+ struct device *dev = rx_ring->dev;
+ int err;
+
+ u64_stats_init(&rx_ring->syncp);
+
+ /* Round up to nearest 4K */
+ rx_ring->size = rx_ring->count * sizeof(union i40e_rx_desc);
+ rx_ring->size = ALIGN(rx_ring->size, 4096);
+ rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
+ &rx_ring->dma, GFP_KERNEL);
+
+ if (!rx_ring->desc) {
+ dev_info(dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n",
+ rx_ring->size);
+ return -ENOMEM;
+ }
+
+ rx_ring->next_to_alloc = 0;
+ rx_ring->next_to_clean = 0;
+ rx_ring->next_to_use = 0;
+
+ /* XDP RX-queue info only needed for RX rings exposed to XDP */
+ if (rx_ring->vsi->type == I40E_VSI_MAIN) {
+ err = xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev,
+ rx_ring->queue_index, rx_ring->q_vector->napi.napi_id);
+ if (err < 0)
+ return err;
+ }
+
+ rx_ring->xdp_prog = rx_ring->vsi->xdp_prog;
+
+ rx_ring->rx_bi =
+ kcalloc(rx_ring->count, sizeof(*rx_ring->rx_bi), GFP_KERNEL);
+ if (!rx_ring->rx_bi)
+ return -ENOMEM;
+
+ return 0;
+}
+
+/**
+ * i40e_release_rx_desc - Store the new tail and head values
+ * @rx_ring: ring to bump
+ * @val: new head index
+ **/
+void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val)
+{
+ rx_ring->next_to_use = val;
+
+ /* update next to alloc since we have filled the ring */
+ rx_ring->next_to_alloc = val;
+
+ /* Force memory writes to complete before letting h/w
+ * know there are new descriptors to fetch. (Only
+ * applicable for weak-ordered memory model archs,
+ * such as IA-64).
+ */
+ wmb();
+ writel(val, rx_ring->tail);
+}
+
+static unsigned int i40e_rx_frame_truesize(struct i40e_ring *rx_ring,
+ unsigned int size)
+{
+ unsigned int truesize;
+
+#if (PAGE_SIZE < 8192)
+ truesize = i40e_rx_pg_size(rx_ring) / 2; /* Must be power-of-2 */
+#else
+ truesize = rx_ring->rx_offset ?
+ SKB_DATA_ALIGN(size + rx_ring->rx_offset) +
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) :
+ SKB_DATA_ALIGN(size);
+#endif
+ return truesize;
+}
+
+/**
+ * i40e_alloc_mapped_page - recycle or make a new page
+ * @rx_ring: ring to use
+ * @bi: rx_buffer struct to modify
+ *
+ * Returns true if the page was successfully allocated or
+ * reused.
+ **/
+static bool i40e_alloc_mapped_page(struct i40e_ring *rx_ring,
+ struct i40e_rx_buffer *bi)
+{
+ struct page *page = bi->page;
+ dma_addr_t dma;
+
+ /* since we are recycling buffers we should seldom need to alloc */
+ if (likely(page)) {
+ rx_ring->rx_stats.page_reuse_count++;
+ return true;
+ }
+
+ /* alloc new page for storage */
+ page = dev_alloc_pages(i40e_rx_pg_order(rx_ring));
+ if (unlikely(!page)) {
+ rx_ring->rx_stats.alloc_page_failed++;
+ return false;
+ }
+
+ rx_ring->rx_stats.page_alloc_count++;
+
+ /* map page for use */
+ dma = dma_map_page_attrs(rx_ring->dev, page, 0,
+ i40e_rx_pg_size(rx_ring),
+ DMA_FROM_DEVICE,
+ I40E_RX_DMA_ATTR);
+
+ /* if mapping failed free memory back to system since
+ * there isn't much point in holding memory we can't use
+ */
+ if (dma_mapping_error(rx_ring->dev, dma)) {
+ __free_pages(page, i40e_rx_pg_order(rx_ring));
+ rx_ring->rx_stats.alloc_page_failed++;
+ return false;
+ }
+
+ bi->dma = dma;
+ bi->page = page;
+ bi->page_offset = rx_ring->rx_offset;
+ page_ref_add(page, USHRT_MAX - 1);
+ bi->pagecnt_bias = USHRT_MAX;
+
+ return true;
+}
+
+/**
+ * i40e_alloc_rx_buffers - Replace used receive buffers
+ * @rx_ring: ring to place buffers on
+ * @cleaned_count: number of buffers to replace
+ *
+ * Returns false if all allocations were successful, true if any fail
+ **/
+bool i40e_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count)
+{
+ u16 ntu = rx_ring->next_to_use;
+ union i40e_rx_desc *rx_desc;
+ struct i40e_rx_buffer *bi;
+
+ /* do nothing if no valid netdev defined */
+ if (!rx_ring->netdev || !cleaned_count)
+ return false;
+
+ rx_desc = I40E_RX_DESC(rx_ring, ntu);
+ bi = i40e_rx_bi(rx_ring, ntu);
+
+ do {
+ if (!i40e_alloc_mapped_page(rx_ring, bi))
+ goto no_buffers;
+
+ /* sync the buffer for use by the device */
+ dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
+ bi->page_offset,
+ rx_ring->rx_buf_len,
+ DMA_FROM_DEVICE);
+
+ /* Refresh the desc even if buffer_addrs didn't change
+ * because each write-back erases this info.
+ */
+ rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
+
+ rx_desc++;
+ bi++;
+ ntu++;
+ if (unlikely(ntu == rx_ring->count)) {
+ rx_desc = I40E_RX_DESC(rx_ring, 0);
+ bi = i40e_rx_bi(rx_ring, 0);
+ ntu = 0;
+ }
+
+ /* clear the status bits for the next_to_use descriptor */
+ rx_desc->wb.qword1.status_error_len = 0;
+
+ cleaned_count--;
+ } while (cleaned_count);
+
+ if (rx_ring->next_to_use != ntu)
+ i40e_release_rx_desc(rx_ring, ntu);
+
+ return false;
+
+no_buffers:
+ if (rx_ring->next_to_use != ntu)
+ i40e_release_rx_desc(rx_ring, ntu);
+
+ /* make sure to come back via polling to try again after
+ * allocation failure
+ */
+ return true;
+}
+
+/**
+ * i40e_rx_checksum - Indicate in skb if hw indicated a good cksum
+ * @vsi: the VSI we care about
+ * @skb: skb currently being received and modified
+ * @rx_desc: the receive descriptor
+ **/
+static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
+ struct sk_buff *skb,
+ union i40e_rx_desc *rx_desc)
+{
+ struct i40e_rx_ptype_decoded decoded;
+ u32 rx_error, rx_status;
+ bool ipv4, ipv6;
+ u8 ptype;
+ u64 qword;
+
+ qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
+ ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >> I40E_RXD_QW1_PTYPE_SHIFT;
+ rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >>
+ I40E_RXD_QW1_ERROR_SHIFT;
+ rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
+ I40E_RXD_QW1_STATUS_SHIFT;
+ decoded = decode_rx_desc_ptype(ptype);
+
+ skb->ip_summed = CHECKSUM_NONE;
+
+ skb_checksum_none_assert(skb);
+
+ /* Rx csum enabled and ip headers found? */
+ if (!(vsi->netdev->features & NETIF_F_RXCSUM))
+ return;
+
+ /* did the hardware decode the packet and checksum? */
+ if (!(rx_status & BIT(I40E_RX_DESC_STATUS_L3L4P_SHIFT)))
+ return;
+
+ /* both known and outer_ip must be set for the below code to work */
+ if (!(decoded.known && decoded.outer_ip))
+ return;
+
+ ipv4 = (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP) &&
+ (decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV4);
+ ipv6 = (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP) &&
+ (decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV6);
+
+ if (ipv4 &&
+ (rx_error & (BIT(I40E_RX_DESC_ERROR_IPE_SHIFT) |
+ BIT(I40E_RX_DESC_ERROR_EIPE_SHIFT))))
+ goto checksum_fail;
+
+ /* likely incorrect csum if alternate IP extension headers found */
+ if (ipv6 &&
+ rx_status & BIT(I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT))
+ /* don't increment checksum err here, non-fatal err */
+ return;
+
+ /* there was some L4 error, count error and punt packet to the stack */
+ if (rx_error & BIT(I40E_RX_DESC_ERROR_L4E_SHIFT))
+ goto checksum_fail;
+
+ /* handle packets that were not able to be checksummed due
+ * to arrival speed, in this case the stack can compute
+ * the csum.
+ */
+ if (rx_error & BIT(I40E_RX_DESC_ERROR_PPRS_SHIFT))
+ return;
+
+ /* If there is an outer header present that might contain a checksum
+ * we need to bump the checksum level by 1 to reflect the fact that
+ * we are indicating we validated the inner checksum.
+ */
+ if (decoded.tunnel_type >= I40E_RX_PTYPE_TUNNEL_IP_GRENAT)
+ skb->csum_level = 1;
+
+ /* Only report checksum unnecessary for TCP, UDP, or SCTP */
+ switch (decoded.inner_prot) {
+ case I40E_RX_PTYPE_INNER_PROT_TCP:
+ case I40E_RX_PTYPE_INNER_PROT_UDP:
+ case I40E_RX_PTYPE_INNER_PROT_SCTP:
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ fallthrough;
+ default:
+ break;
+ }
+
+ return;
+
+checksum_fail:
+ vsi->back->hw_csum_rx_error++;
+}
+
+/**
+ * i40e_ptype_to_htype - get a hash type
+ * @ptype: the ptype value from the descriptor
+ *
+ * Returns a hash type to be used by skb_set_hash
+ **/
+static inline int i40e_ptype_to_htype(u8 ptype)
+{
+ struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(ptype);
+
+ if (!decoded.known)
+ return PKT_HASH_TYPE_NONE;
+
+ if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
+ decoded.payload_layer == I40E_RX_PTYPE_PAYLOAD_LAYER_PAY4)
+ return PKT_HASH_TYPE_L4;
+ else if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
+ decoded.payload_layer == I40E_RX_PTYPE_PAYLOAD_LAYER_PAY3)
+ return PKT_HASH_TYPE_L3;
+ else
+ return PKT_HASH_TYPE_L2;
+}
+
+/**
+ * i40e_rx_hash - set the hash value in the skb
+ * @ring: descriptor ring
+ * @rx_desc: specific descriptor
+ * @skb: skb currently being received and modified
+ * @rx_ptype: Rx packet type
+ **/
+static inline void i40e_rx_hash(struct i40e_ring *ring,
+ union i40e_rx_desc *rx_desc,
+ struct sk_buff *skb,
+ u8 rx_ptype)
+{
+ u32 hash;
+ const __le64 rss_mask =
+ cpu_to_le64((u64)I40E_RX_DESC_FLTSTAT_RSS_HASH <<
+ I40E_RX_DESC_STATUS_FLTSTAT_SHIFT);
+
+ if (!(ring->netdev->features & NETIF_F_RXHASH))
+ return;
+
+ if ((rx_desc->wb.qword1.status_error_len & rss_mask) == rss_mask) {
+ hash = le32_to_cpu(rx_desc->wb.qword0.hi_dword.rss);
+ skb_set_hash(skb, hash, i40e_ptype_to_htype(rx_ptype));
+ }
+}
+
+/**
+ * i40e_process_skb_fields - Populate skb header fields from Rx descriptor
+ * @rx_ring: rx descriptor ring packet is being transacted on
+ * @rx_desc: pointer to the EOP Rx descriptor
+ * @skb: pointer to current skb being populated
+ *
+ * This function checks the ring, descriptor, and packet information in
+ * order to populate the hash, checksum, VLAN, protocol, and
+ * other fields within the skb.
+ **/
+void i40e_process_skb_fields(struct i40e_ring *rx_ring,
+ union i40e_rx_desc *rx_desc, struct sk_buff *skb)
+{
+ u64 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
+ u32 rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
+ I40E_RXD_QW1_STATUS_SHIFT;
+ u32 tsynvalid = rx_status & I40E_RXD_QW1_STATUS_TSYNVALID_MASK;
+ u32 tsyn = (rx_status & I40E_RXD_QW1_STATUS_TSYNINDX_MASK) >>
+ I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT;
+ u8 rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
+ I40E_RXD_QW1_PTYPE_SHIFT;
+
+ if (unlikely(tsynvalid))
+ i40e_ptp_rx_hwtstamp(rx_ring->vsi->back, skb, tsyn);
+
+ i40e_rx_hash(rx_ring, rx_desc, skb, rx_ptype);
+
+ i40e_rx_checksum(rx_ring->vsi, skb, rx_desc);
+
+ skb_record_rx_queue(skb, rx_ring->queue_index);
+
+ if (qword & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)) {
+ __le16 vlan_tag = rx_desc->wb.qword0.lo_dword.l2tag1;
+
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
+ le16_to_cpu(vlan_tag));
+ }
+
+ /* modifies the skb - consumes the enet header */
+ skb->protocol = eth_type_trans(skb, rx_ring->netdev);
+}
+
+/**
+ * i40e_cleanup_headers - Correct empty headers
+ * @rx_ring: rx descriptor ring packet is being transacted on
+ * @skb: pointer to current skb being fixed
+ * @rx_desc: pointer to the EOP Rx descriptor
+ *
+ * In addition if skb is not at least 60 bytes we need to pad it so that
+ * it is large enough to qualify as a valid Ethernet frame.
+ *
+ * Returns true if an error was encountered and skb was freed.
+ **/
+static bool i40e_cleanup_headers(struct i40e_ring *rx_ring, struct sk_buff *skb,
+ union i40e_rx_desc *rx_desc)
+
+{
+ /* ERR_MASK will only have valid bits if EOP set, and
+ * what we are doing here is actually checking
+ * I40E_RX_DESC_ERROR_RXE_SHIFT, since it is the zeroth bit in
+ * the error field
+ */
+ if (unlikely(i40e_test_staterr(rx_desc,
+ BIT(I40E_RXD_QW1_ERROR_SHIFT)))) {
+ dev_kfree_skb_any(skb);
+ return true;
+ }
+
+ /* if eth_skb_pad returns an error the skb was freed */
+ if (eth_skb_pad(skb))
+ return true;
+
+ return false;
+}
+
+/**
+ * i40e_can_reuse_rx_page - Determine if page can be reused for another Rx
+ * @rx_buffer: buffer containing the page
+ * @rx_stats: rx stats structure for the rx ring
+ * @rx_buffer_pgcnt: buffer page refcount pre xdp_do_redirect() call
+ *
+ * If page is reusable, we have a green light for calling i40e_reuse_rx_page,
+ * which will assign the current buffer to the buffer that next_to_alloc is
+ * pointing to; otherwise, the DMA mapping needs to be destroyed and
+ * page freed.
+ *
+ * rx_stats will be updated to indicate whether the page was waived
+ * or busy if it could not be reused.
+ */
+static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer,
+ struct i40e_rx_queue_stats *rx_stats,
+ int rx_buffer_pgcnt)
+{
+ unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
+ struct page *page = rx_buffer->page;
+
+ /* Is any reuse possible? */
+ if (!dev_page_is_reusable(page)) {
+ rx_stats->page_waive_count++;
+ return false;
+ }
+
+#if (PAGE_SIZE < 8192)
+ /* if we are only owner of page we can reuse it */
+ if (unlikely((rx_buffer_pgcnt - pagecnt_bias) > 1)) {
+ rx_stats->page_busy_count++;
+ return false;
+ }
+#else
+#define I40E_LAST_OFFSET \
+ (SKB_WITH_OVERHEAD(PAGE_SIZE) - I40E_RXBUFFER_2048)
+ if (rx_buffer->page_offset > I40E_LAST_OFFSET) {
+ rx_stats->page_busy_count++;
+ return false;
+ }
+#endif
+
+ /* If we have drained the page fragment pool we need to update
+ * the pagecnt_bias and page count so that we fully restock the
+ * number of references the driver holds.
+ */
+ if (unlikely(pagecnt_bias == 1)) {
+ page_ref_add(page, USHRT_MAX - 1);
+ rx_buffer->pagecnt_bias = USHRT_MAX;
+ }
+
+ return true;
+}
+
+/**
+ * i40e_add_rx_frag - Add contents of Rx buffer to sk_buff
+ * @rx_ring: rx descriptor ring to transact packets on
+ * @rx_buffer: buffer containing page to add
+ * @skb: sk_buff to place the data into
+ * @size: packet length from rx_desc
+ *
+ * This function will add the data contained in rx_buffer->page to the skb.
+ * It will just attach the page as a frag to the skb.
+ *
+ * The function will then update the page offset.
+ **/
+static void i40e_add_rx_frag(struct i40e_ring *rx_ring,
+ struct i40e_rx_buffer *rx_buffer,
+ struct sk_buff *skb,
+ unsigned int size)
+{
+#if (PAGE_SIZE < 8192)
+ unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2;
+#else
+ unsigned int truesize = SKB_DATA_ALIGN(size + rx_ring->rx_offset);
+#endif
+
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page,
+ rx_buffer->page_offset, size, truesize);
+
+ /* page is being used so we must update the page offset */
+#if (PAGE_SIZE < 8192)
+ rx_buffer->page_offset ^= truesize;
+#else
+ rx_buffer->page_offset += truesize;
+#endif
+}
+
+/**
+ * i40e_get_rx_buffer - Fetch Rx buffer and synchronize data for use
+ * @rx_ring: rx descriptor ring to transact packets on
+ * @size: size of buffer to add to skb
+ * @rx_buffer_pgcnt: buffer page refcount
+ *
+ * This function will pull an Rx buffer from the ring and synchronize it
+ * for use by the CPU.
+ */
+static struct i40e_rx_buffer *i40e_get_rx_buffer(struct i40e_ring *rx_ring,
+ const unsigned int size,
+ int *rx_buffer_pgcnt)
+{
+ struct i40e_rx_buffer *rx_buffer;
+
+ rx_buffer = i40e_rx_bi(rx_ring, rx_ring->next_to_clean);
+ *rx_buffer_pgcnt =
+#if (PAGE_SIZE < 8192)
+ page_count(rx_buffer->page);
+#else
+ 0;
+#endif
+ prefetch_page_address(rx_buffer->page);
+
+ /* we are reusing so sync this buffer for CPU use */
+ dma_sync_single_range_for_cpu(rx_ring->dev,
+ rx_buffer->dma,
+ rx_buffer->page_offset,
+ size,
+ DMA_FROM_DEVICE);
+
+ /* We have pulled a buffer for use, so decrement pagecnt_bias */
+ rx_buffer->pagecnt_bias--;
+
+ return rx_buffer;
+}
+
+/**
+ * i40e_construct_skb - Allocate skb and populate it
+ * @rx_ring: rx descriptor ring to transact packets on
+ * @rx_buffer: rx buffer to pull data from
+ * @xdp: xdp_buff pointing to the data
+ *
+ * This function allocates an skb. It then populates it with the page
+ * data from the current receive descriptor, taking care to set up the
+ * skb correctly.
+ */
+static struct sk_buff *i40e_construct_skb(struct i40e_ring *rx_ring,
+ struct i40e_rx_buffer *rx_buffer,
+ struct xdp_buff *xdp)
+{
+ unsigned int size = xdp->data_end - xdp->data;
+#if (PAGE_SIZE < 8192)
+ unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2;
+#else
+ unsigned int truesize = SKB_DATA_ALIGN(size);
+#endif
+ unsigned int headlen;
+ struct sk_buff *skb;
+
+ /* prefetch first cache line of first page */
+ net_prefetch(xdp->data);
+
+ /* Note, we get here by enabling legacy-rx via:
+ *
+ * ethtool --set-priv-flags <dev> legacy-rx on
+ *
+ * In this mode, we currently get 0 extra XDP headroom as
+ * opposed to having legacy-rx off, where we process XDP
+ * packets going to stack via i40e_build_skb(). The latter
+ * provides us currently with 192 bytes of headroom.
+ *
+ * For i40e_construct_skb() mode it means that the
+ * xdp->data_meta will always point to xdp->data, since
+ * the helper cannot expand the head. Should this ever
+ * change in future for legacy-rx mode on, then lets also
+ * add xdp->data_meta handling here.
+ */
+
+ /* allocate a skb to store the frags */
+ skb = __napi_alloc_skb(&rx_ring->q_vector->napi,
+ I40E_RX_HDR_SIZE,
+ GFP_ATOMIC | __GFP_NOWARN);
+ if (unlikely(!skb))
+ return NULL;
+
+ /* Determine available headroom for copy */
+ headlen = size;
+ if (headlen > I40E_RX_HDR_SIZE)
+ headlen = eth_get_headlen(skb->dev, xdp->data,
+ I40E_RX_HDR_SIZE);
+
+ /* align pull length to size of long to optimize memcpy performance */
+ memcpy(__skb_put(skb, headlen), xdp->data,
+ ALIGN(headlen, sizeof(long)));
+
+ /* update all of the pointers */
+ size -= headlen;
+ if (size) {
+ skb_add_rx_frag(skb, 0, rx_buffer->page,
+ rx_buffer->page_offset + headlen,
+ size, truesize);
+
+ /* buffer is used by skb, update page_offset */
+#if (PAGE_SIZE < 8192)
+ rx_buffer->page_offset ^= truesize;
+#else
+ rx_buffer->page_offset += truesize;
+#endif
+ } else {
+ /* buffer is unused, reset bias back to rx_buffer */
+ rx_buffer->pagecnt_bias++;
+ }
+
+ return skb;
+}
+
+/**
+ * i40e_build_skb - Build skb around an existing buffer
+ * @rx_ring: Rx descriptor ring to transact packets on
+ * @rx_buffer: Rx buffer to pull data from
+ * @xdp: xdp_buff pointing to the data
+ *
+ * This function builds an skb around an existing Rx buffer, taking care
+ * to set up the skb correctly and avoid any memcpy overhead.
+ */
+static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring,
+ struct i40e_rx_buffer *rx_buffer,
+ struct xdp_buff *xdp)
+{
+ unsigned int metasize = xdp->data - xdp->data_meta;
+#if (PAGE_SIZE < 8192)
+ unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2;
+#else
+ unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
+ SKB_DATA_ALIGN(xdp->data_end -
+ xdp->data_hard_start);
+#endif
+ struct sk_buff *skb;
+
+ /* Prefetch first cache line of first page. If xdp->data_meta
+ * is unused, this points exactly as xdp->data, otherwise we
+ * likely have a consumer accessing first few bytes of meta
+ * data, and then actual data.
+ */
+ net_prefetch(xdp->data_meta);
+
+ /* build an skb around the page buffer */
+ skb = napi_build_skb(xdp->data_hard_start, truesize);
+ if (unlikely(!skb))
+ return NULL;
+
+ /* update pointers within the skb to store the data */
+ skb_reserve(skb, xdp->data - xdp->data_hard_start);
+ __skb_put(skb, xdp->data_end - xdp->data);
+ if (metasize)
+ skb_metadata_set(skb, metasize);
+
+ /* buffer is used by skb, update page_offset */
+#if (PAGE_SIZE < 8192)
+ rx_buffer->page_offset ^= truesize;
+#else
+ rx_buffer->page_offset += truesize;
+#endif
+
+ return skb;
+}
+
+/**
+ * i40e_put_rx_buffer - Clean up used buffer and either recycle or free
+ * @rx_ring: rx descriptor ring to transact packets on
+ * @rx_buffer: rx buffer to pull data from
+ * @rx_buffer_pgcnt: rx buffer page refcount pre xdp_do_redirect() call
+ *
+ * This function will clean up the contents of the rx_buffer. It will
+ * either recycle the buffer or unmap it and free the associated resources.
+ */
+static void i40e_put_rx_buffer(struct i40e_ring *rx_ring,
+ struct i40e_rx_buffer *rx_buffer,
+ int rx_buffer_pgcnt)
+{
+ if (i40e_can_reuse_rx_page(rx_buffer, &rx_ring->rx_stats, rx_buffer_pgcnt)) {
+ /* hand second half of page back to the ring */
+ i40e_reuse_rx_page(rx_ring, rx_buffer);
+ } else {
+ /* we are not reusing the buffer so unmap it */
+ dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
+ i40e_rx_pg_size(rx_ring),
+ DMA_FROM_DEVICE, I40E_RX_DMA_ATTR);
+ __page_frag_cache_drain(rx_buffer->page,
+ rx_buffer->pagecnt_bias);
+ /* clear contents of buffer_info */
+ rx_buffer->page = NULL;
+ }
+}
+
+/**
+ * i40e_is_non_eop - process handling of non-EOP buffers
+ * @rx_ring: Rx ring being processed
+ * @rx_desc: Rx descriptor for current buffer
+ *
+ * If the buffer is an EOP buffer, this function exits returning false,
+ * otherwise return true indicating that this is in fact a non-EOP buffer.
+ */
+static bool i40e_is_non_eop(struct i40e_ring *rx_ring,
+ union i40e_rx_desc *rx_desc)
+{
+ /* if we are the last buffer then there is nothing else to do */
+#define I40E_RXD_EOF BIT(I40E_RX_DESC_STATUS_EOF_SHIFT)
+ if (likely(i40e_test_staterr(rx_desc, I40E_RXD_EOF)))
+ return false;
+
+ rx_ring->rx_stats.non_eop_descs++;
+
+ return true;
+}
+
+static int i40e_xmit_xdp_ring(struct xdp_frame *xdpf,
+ struct i40e_ring *xdp_ring);
+
+int i40e_xmit_xdp_tx_ring(struct xdp_buff *xdp, struct i40e_ring *xdp_ring)
+{
+ struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
+
+ if (unlikely(!xdpf))
+ return I40E_XDP_CONSUMED;
+
+ return i40e_xmit_xdp_ring(xdpf, xdp_ring);
+}
+
+/**
+ * i40e_run_xdp - run an XDP program
+ * @rx_ring: Rx ring being processed
+ * @xdp: XDP buffer containing the frame
+ * @xdp_prog: XDP program to run
+ **/
+static int i40e_run_xdp(struct i40e_ring *rx_ring, struct xdp_buff *xdp, struct bpf_prog *xdp_prog)
+{
+ int err, result = I40E_XDP_PASS;
+ struct i40e_ring *xdp_ring;
+ u32 act;
+
+ if (!xdp_prog)
+ goto xdp_out;
+
+ prefetchw(xdp->data_hard_start); /* xdp_frame write */
+
+ act = bpf_prog_run_xdp(xdp_prog, xdp);
+ switch (act) {
+ case XDP_PASS:
+ break;
+ case XDP_TX:
+ xdp_ring = rx_ring->vsi->xdp_rings[rx_ring->queue_index];
+ result = i40e_xmit_xdp_tx_ring(xdp, xdp_ring);
+ if (result == I40E_XDP_CONSUMED)
+ goto out_failure;
+ break;
+ case XDP_REDIRECT:
+ err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
+ if (err)
+ goto out_failure;
+ result = I40E_XDP_REDIR;
+ break;
+ default:
+ bpf_warn_invalid_xdp_action(rx_ring->netdev, xdp_prog, act);
+ fallthrough;
+ case XDP_ABORTED:
+out_failure:
+ trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
+ fallthrough; /* handle aborts by dropping packet */
+ case XDP_DROP:
+ result = I40E_XDP_CONSUMED;
+ break;
+ }
+xdp_out:
+ return result;
+}
+
+/**
+ * i40e_rx_buffer_flip - adjusted rx_buffer to point to an unused region
+ * @rx_ring: Rx ring
+ * @rx_buffer: Rx buffer to adjust
+ * @size: Size of adjustment
+ **/
+static void i40e_rx_buffer_flip(struct i40e_ring *rx_ring,
+ struct i40e_rx_buffer *rx_buffer,
+ unsigned int size)
+{
+ unsigned int truesize = i40e_rx_frame_truesize(rx_ring, size);
+
+#if (PAGE_SIZE < 8192)
+ rx_buffer->page_offset ^= truesize;
+#else
+ rx_buffer->page_offset += truesize;
+#endif
+}
+
+/**
+ * i40e_xdp_ring_update_tail - Updates the XDP Tx ring tail register
+ * @xdp_ring: XDP Tx ring
+ *
+ * This function updates the XDP Tx ring tail register.
+ **/
+void i40e_xdp_ring_update_tail(struct i40e_ring *xdp_ring)
+{
+ /* Force memory writes to complete before letting h/w
+ * know there are new descriptors to fetch.
+ */
+ wmb();
+ writel_relaxed(xdp_ring->next_to_use, xdp_ring->tail);
+}
+
+/**
+ * i40e_update_rx_stats - Update Rx ring statistics
+ * @rx_ring: rx descriptor ring
+ * @total_rx_bytes: number of bytes received
+ * @total_rx_packets: number of packets received
+ *
+ * This function updates the Rx ring statistics.
+ **/
+void i40e_update_rx_stats(struct i40e_ring *rx_ring,
+ unsigned int total_rx_bytes,
+ unsigned int total_rx_packets)
+{
+ u64_stats_update_begin(&rx_ring->syncp);
+ rx_ring->stats.packets += total_rx_packets;
+ rx_ring->stats.bytes += total_rx_bytes;
+ u64_stats_update_end(&rx_ring->syncp);
+ rx_ring->q_vector->rx.total_packets += total_rx_packets;
+ rx_ring->q_vector->rx.total_bytes += total_rx_bytes;
+}
+
+/**
+ * i40e_finalize_xdp_rx - Bump XDP Tx tail and/or flush redirect map
+ * @rx_ring: Rx ring
+ * @xdp_res: Result of the receive batch
+ *
+ * This function bumps XDP Tx tail and/or flush redirect map, and
+ * should be called when a batch of packets has been processed in the
+ * napi loop.
+ **/
+void i40e_finalize_xdp_rx(struct i40e_ring *rx_ring, unsigned int xdp_res)
+{
+ if (xdp_res & I40E_XDP_REDIR)
+ xdp_do_flush_map();
+
+ if (xdp_res & I40E_XDP_TX) {
+ struct i40e_ring *xdp_ring =
+ rx_ring->vsi->xdp_rings[rx_ring->queue_index];
+
+ i40e_xdp_ring_update_tail(xdp_ring);
+ }
+}
+
+/**
+ * i40e_inc_ntc: Advance the next_to_clean index
+ * @rx_ring: Rx ring
+ **/
+static void i40e_inc_ntc(struct i40e_ring *rx_ring)
+{
+ u32 ntc = rx_ring->next_to_clean + 1;
+
+ ntc = (ntc < rx_ring->count) ? ntc : 0;
+ rx_ring->next_to_clean = ntc;
+ prefetch(I40E_RX_DESC(rx_ring, ntc));
+}
+
+/**
+ * i40e_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf
+ * @rx_ring: rx descriptor ring to transact packets on
+ * @budget: Total limit on number of packets to process
+ *
+ * This function provides a "bounce buffer" approach to Rx interrupt
+ * processing. The advantage to this is that on systems that have
+ * expensive overhead for IOMMU access this provides a means of avoiding
+ * it by maintaining the mapping of the page to the system.
+ *
+ * Returns amount of work completed
+ **/
+static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
+{
+ unsigned int total_rx_bytes = 0, total_rx_packets = 0, frame_sz = 0;
+ u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
+ unsigned int offset = rx_ring->rx_offset;
+ struct sk_buff *skb = rx_ring->skb;
+ unsigned int xdp_xmit = 0;
+ struct bpf_prog *xdp_prog;
+ bool failure = false;
+ struct xdp_buff xdp;
+ int xdp_res = 0;
+
+#if (PAGE_SIZE < 8192)
+ frame_sz = i40e_rx_frame_truesize(rx_ring, 0);
+#endif
+ xdp_init_buff(&xdp, frame_sz, &rx_ring->xdp_rxq);
+
+ xdp_prog = READ_ONCE(rx_ring->xdp_prog);
+
+ while (likely(total_rx_packets < (unsigned int)budget)) {
+ struct i40e_rx_buffer *rx_buffer;
+ union i40e_rx_desc *rx_desc;
+ int rx_buffer_pgcnt;
+ unsigned int size;
+ u64 qword;
+
+ /* return some buffers to hardware, one at a time is too slow */
+ if (cleaned_count >= I40E_RX_BUFFER_WRITE) {
+ failure = failure ||
+ i40e_alloc_rx_buffers(rx_ring, cleaned_count);
+ cleaned_count = 0;
+ }
+
+ rx_desc = I40E_RX_DESC(rx_ring, rx_ring->next_to_clean);
+
+ /* status_error_len will always be zero for unused descriptors
+ * because it's cleared in cleanup, and overlaps with hdr_addr
+ * which is always zero because packet split isn't used, if the
+ * hardware wrote DD then the length will be non-zero
+ */
+ qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
+
+ /* This memory barrier is needed to keep us from reading
+ * any other fields out of the rx_desc until we have
+ * verified the descriptor has been written back.
+ */
+ dma_rmb();
+
+ if (i40e_rx_is_programming_status(qword)) {
+ i40e_clean_programming_status(rx_ring,
+ rx_desc->raw.qword[0],
+ qword);
+ rx_buffer = i40e_rx_bi(rx_ring, rx_ring->next_to_clean);
+ i40e_inc_ntc(rx_ring);
+ i40e_reuse_rx_page(rx_ring, rx_buffer);
+ cleaned_count++;
+ continue;
+ }
+
+ size = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
+ I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
+ if (!size)
+ break;
+
+ i40e_trace(clean_rx_irq, rx_ring, rx_desc, skb);
+ rx_buffer = i40e_get_rx_buffer(rx_ring, size, &rx_buffer_pgcnt);
+
+ /* retrieve a buffer from the ring */
+ if (!skb) {
+ unsigned char *hard_start;
+
+ hard_start = page_address(rx_buffer->page) +
+ rx_buffer->page_offset - offset;
+ xdp_prepare_buff(&xdp, hard_start, offset, size, true);
+ xdp_buff_clear_frags_flag(&xdp);
+#if (PAGE_SIZE > 4096)
+ /* At larger PAGE_SIZE, frame_sz depend on len size */
+ xdp.frame_sz = i40e_rx_frame_truesize(rx_ring, size);
+#endif
+ xdp_res = i40e_run_xdp(rx_ring, &xdp, xdp_prog);
+ }
+
+ if (xdp_res) {
+ if (xdp_res & (I40E_XDP_TX | I40E_XDP_REDIR)) {
+ xdp_xmit |= xdp_res;
+ i40e_rx_buffer_flip(rx_ring, rx_buffer, size);
+ } else {
+ rx_buffer->pagecnt_bias++;
+ }
+ total_rx_bytes += size;
+ total_rx_packets++;
+ } else if (skb) {
+ i40e_add_rx_frag(rx_ring, rx_buffer, skb, size);
+ } else if (ring_uses_build_skb(rx_ring)) {
+ skb = i40e_build_skb(rx_ring, rx_buffer, &xdp);
+ } else {
+ skb = i40e_construct_skb(rx_ring, rx_buffer, &xdp);
+ }
+
+ /* exit if we failed to retrieve a buffer */
+ if (!xdp_res && !skb) {
+ rx_ring->rx_stats.alloc_buff_failed++;
+ rx_buffer->pagecnt_bias++;
+ break;
+ }
+
+ i40e_put_rx_buffer(rx_ring, rx_buffer, rx_buffer_pgcnt);
+ cleaned_count++;
+
+ i40e_inc_ntc(rx_ring);
+ if (i40e_is_non_eop(rx_ring, rx_desc))
+ continue;
+
+ if (xdp_res || i40e_cleanup_headers(rx_ring, skb, rx_desc)) {
+ skb = NULL;
+ continue;
+ }
+
+ /* probably a little skewed due to removing CRC */
+ total_rx_bytes += skb->len;
+
+ /* populate checksum, VLAN, and protocol */
+ i40e_process_skb_fields(rx_ring, rx_desc, skb);
+
+ i40e_trace(clean_rx_irq_rx, rx_ring, rx_desc, skb);
+ napi_gro_receive(&rx_ring->q_vector->napi, skb);
+ skb = NULL;
+
+ /* update budget accounting */
+ total_rx_packets++;
+ }
+
+ i40e_finalize_xdp_rx(rx_ring, xdp_xmit);
+ rx_ring->skb = skb;
+
+ i40e_update_rx_stats(rx_ring, total_rx_bytes, total_rx_packets);
+
+ /* guarantee a trip back through this routine if there was a failure */
+ return failure ? budget : (int)total_rx_packets;
+}
+
+static inline u32 i40e_buildreg_itr(const int type, u16 itr)
+{
+ u32 val;
+
+ /* We don't bother with setting the CLEARPBA bit as the data sheet
+ * points out doing so is "meaningless since it was already
+ * auto-cleared". The auto-clearing happens when the interrupt is
+ * asserted.
+ *
+ * Hardware errata 28 for also indicates that writing to a
+ * xxINT_DYN_CTLx CSR with INTENA_MSK (bit 31) set to 0 will clear
+ * an event in the PBA anyway so we need to rely on the automask
+ * to hold pending events for us until the interrupt is re-enabled
+ *
+ * The itr value is reported in microseconds, and the register
+ * value is recorded in 2 microsecond units. For this reason we
+ * only need to shift by the interval shift - 1 instead of the
+ * full value.
+ */
+ itr &= I40E_ITR_MASK;
+
+ val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
+ (type << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) |
+ (itr << (I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT - 1));
+
+ return val;
+}
+
+/* a small macro to shorten up some long lines */
+#define INTREG I40E_PFINT_DYN_CTLN
+
+/* The act of updating the ITR will cause it to immediately trigger. In order
+ * to prevent this from throwing off adaptive update statistics we defer the
+ * update so that it can only happen so often. So after either Tx or Rx are
+ * updated we make the adaptive scheme wait until either the ITR completely
+ * expires via the next_update expiration or we have been through at least
+ * 3 interrupts.
+ */
+#define ITR_COUNTDOWN_START 3
+
+/**
+ * i40e_update_enable_itr - Update itr and re-enable MSIX interrupt
+ * @vsi: the VSI we care about
+ * @q_vector: q_vector for which itr is being updated and interrupt enabled
+ *
+ **/
+static inline void i40e_update_enable_itr(struct i40e_vsi *vsi,
+ struct i40e_q_vector *q_vector)
+{
+ struct i40e_hw *hw = &vsi->back->hw;
+ u32 intval;
+
+ /* If we don't have MSIX, then we only need to re-enable icr0 */
+ if (!(vsi->back->flags & I40E_FLAG_MSIX_ENABLED)) {
+ i40e_irq_dynamic_enable_icr0(vsi->back);
+ return;
+ }
+
+ /* These will do nothing if dynamic updates are not enabled */
+ i40e_update_itr(q_vector, &q_vector->tx);
+ i40e_update_itr(q_vector, &q_vector->rx);
+
+ /* This block of logic allows us to get away with only updating
+ * one ITR value with each interrupt. The idea is to perform a
+ * pseudo-lazy update with the following criteria.
+ *
+ * 1. Rx is given higher priority than Tx if both are in same state
+ * 2. If we must reduce an ITR that is given highest priority.
+ * 3. We then give priority to increasing ITR based on amount.
+ */
+ if (q_vector->rx.target_itr < q_vector->rx.current_itr) {
+ /* Rx ITR needs to be reduced, this is highest priority */
+ intval = i40e_buildreg_itr(I40E_RX_ITR,
+ q_vector->rx.target_itr);
+ q_vector->rx.current_itr = q_vector->rx.target_itr;
+ q_vector->itr_countdown = ITR_COUNTDOWN_START;
+ } else if ((q_vector->tx.target_itr < q_vector->tx.current_itr) ||
+ ((q_vector->rx.target_itr - q_vector->rx.current_itr) <
+ (q_vector->tx.target_itr - q_vector->tx.current_itr))) {
+ /* Tx ITR needs to be reduced, this is second priority
+ * Tx ITR needs to be increased more than Rx, fourth priority
+ */
+ intval = i40e_buildreg_itr(I40E_TX_ITR,
+ q_vector->tx.target_itr);
+ q_vector->tx.current_itr = q_vector->tx.target_itr;
+ q_vector->itr_countdown = ITR_COUNTDOWN_START;
+ } else if (q_vector->rx.current_itr != q_vector->rx.target_itr) {
+ /* Rx ITR needs to be increased, third priority */
+ intval = i40e_buildreg_itr(I40E_RX_ITR,
+ q_vector->rx.target_itr);
+ q_vector->rx.current_itr = q_vector->rx.target_itr;
+ q_vector->itr_countdown = ITR_COUNTDOWN_START;
+ } else {
+ /* No ITR update, lowest priority */
+ intval = i40e_buildreg_itr(I40E_ITR_NONE, 0);
+ if (q_vector->itr_countdown)
+ q_vector->itr_countdown--;
+ }
+
+ if (!test_bit(__I40E_VSI_DOWN, vsi->state))
+ wr32(hw, INTREG(q_vector->reg_idx), intval);
+}
+
+/**
+ * i40e_napi_poll - NAPI polling Rx/Tx cleanup routine
+ * @napi: napi struct with our devices info in it
+ * @budget: amount of work driver is allowed to do this pass, in packets
+ *
+ * This function will clean all queues associated with a q_vector.
+ *
+ * Returns the amount of work done
+ **/
+int i40e_napi_poll(struct napi_struct *napi, int budget)
+{
+ struct i40e_q_vector *q_vector =
+ container_of(napi, struct i40e_q_vector, napi);
+ struct i40e_vsi *vsi = q_vector->vsi;
+ struct i40e_ring *ring;
+ bool clean_complete = true;
+ bool arm_wb = false;
+ int budget_per_ring;
+ int work_done = 0;
+
+ if (test_bit(__I40E_VSI_DOWN, vsi->state)) {
+ napi_complete(napi);
+ return 0;
+ }
+
+ /* Since the actual Tx work is minimal, we can give the Tx a larger
+ * budget and be more aggressive about cleaning up the Tx descriptors.
+ */
+ i40e_for_each_ring(ring, q_vector->tx) {
+ bool wd = ring->xsk_pool ?
+ i40e_clean_xdp_tx_irq(vsi, ring) :
+ i40e_clean_tx_irq(vsi, ring, budget);
+
+ if (!wd) {
+ clean_complete = false;
+ continue;
+ }
+ arm_wb |= ring->arm_wb;
+ ring->arm_wb = false;
+ }
+
+ /* Handle case where we are called by netpoll with a budget of 0 */
+ if (budget <= 0)
+ goto tx_only;
+
+ /* normally we have 1 Rx ring per q_vector */
+ if (unlikely(q_vector->num_ringpairs > 1))
+ /* We attempt to distribute budget to each Rx queue fairly, but
+ * don't allow the budget to go below 1 because that would exit
+ * polling early.
+ */
+ budget_per_ring = max_t(int, budget / q_vector->num_ringpairs, 1);
+ else
+ /* Max of 1 Rx ring in this q_vector so give it the budget */
+ budget_per_ring = budget;
+
+ i40e_for_each_ring(ring, q_vector->rx) {
+ int cleaned = ring->xsk_pool ?
+ i40e_clean_rx_irq_zc(ring, budget_per_ring) :
+ i40e_clean_rx_irq(ring, budget_per_ring);
+
+ work_done += cleaned;
+ /* if we clean as many as budgeted, we must not be done */
+ if (cleaned >= budget_per_ring)
+ clean_complete = false;
+ }
+
+ /* If work not completed, return budget and polling will return */
+ if (!clean_complete) {
+ int cpu_id = smp_processor_id();
+
+ /* It is possible that the interrupt affinity has changed but,
+ * if the cpu is pegged at 100%, polling will never exit while
+ * traffic continues and the interrupt will be stuck on this
+ * cpu. We check to make sure affinity is correct before we
+ * continue to poll, otherwise we must stop polling so the
+ * interrupt can move to the correct cpu.
+ */
+ if (!cpumask_test_cpu(cpu_id, &q_vector->affinity_mask)) {
+ /* Tell napi that we are done polling */
+ napi_complete_done(napi, work_done);
+
+ /* Force an interrupt */
+ i40e_force_wb(vsi, q_vector);
+
+ /* Return budget-1 so that polling stops */
+ return budget - 1;
+ }
+tx_only:
+ if (arm_wb) {
+ q_vector->tx.ring[0].tx_stats.tx_force_wb++;
+ i40e_enable_wb_on_itr(vsi, q_vector);
+ }
+ return budget;
+ }
+
+ if (q_vector->tx.ring[0].flags & I40E_TXR_FLAGS_WB_ON_ITR)
+ q_vector->arm_wb_state = false;
+
+ /* Exit the polling mode, but don't re-enable interrupts if stack might
+ * poll us due to busy-polling
+ */
+ if (likely(napi_complete_done(napi, work_done)))
+ i40e_update_enable_itr(vsi, q_vector);
+
+ return min(work_done, budget - 1);
+}
+
+/**
+ * i40e_atr - Add a Flow Director ATR filter
+ * @tx_ring: ring to add programming descriptor to
+ * @skb: send buffer
+ * @tx_flags: send tx flags
+ **/
+static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
+ u32 tx_flags)
+{
+ struct i40e_filter_program_desc *fdir_desc;
+ struct i40e_pf *pf = tx_ring->vsi->back;
+ union {
+ unsigned char *network;
+ struct iphdr *ipv4;
+ struct ipv6hdr *ipv6;
+ } hdr;
+ struct tcphdr *th;
+ unsigned int hlen;
+ u32 flex_ptype, dtype_cmd;
+ int l4_proto;
+ u16 i;
+
+ /* make sure ATR is enabled */
+ if (!(pf->flags & I40E_FLAG_FD_ATR_ENABLED))
+ return;
+
+ if (test_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state))
+ return;
+
+ /* if sampling is disabled do nothing */
+ if (!tx_ring->atr_sample_rate)
+ return;
+
+ /* Currently only IPv4/IPv6 with TCP is supported */
+ if (!(tx_flags & (I40E_TX_FLAGS_IPV4 | I40E_TX_FLAGS_IPV6)))
+ return;
+
+ /* snag network header to get L4 type and address */
+ hdr.network = (tx_flags & I40E_TX_FLAGS_UDP_TUNNEL) ?
+ skb_inner_network_header(skb) : skb_network_header(skb);
+
+ /* Note: tx_flags gets modified to reflect inner protocols in
+ * tx_enable_csum function if encap is enabled.
+ */
+ if (tx_flags & I40E_TX_FLAGS_IPV4) {
+ /* access ihl as u8 to avoid unaligned access on ia64 */
+ hlen = (hdr.network[0] & 0x0F) << 2;
+ l4_proto = hdr.ipv4->protocol;
+ } else {
+ /* find the start of the innermost ipv6 header */
+ unsigned int inner_hlen = hdr.network - skb->data;
+ unsigned int h_offset = inner_hlen;
+
+ /* this function updates h_offset to the end of the header */
+ l4_proto =
+ ipv6_find_hdr(skb, &h_offset, IPPROTO_TCP, NULL, NULL);
+ /* hlen will contain our best estimate of the tcp header */
+ hlen = h_offset - inner_hlen;
+ }
+
+ if (l4_proto != IPPROTO_TCP)
+ return;
+
+ th = (struct tcphdr *)(hdr.network + hlen);
+
+ /* Due to lack of space, no more new filters can be programmed */
+ if (th->syn && test_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state))
+ return;
+ if (pf->flags & I40E_FLAG_HW_ATR_EVICT_ENABLED) {
+ /* HW ATR eviction will take care of removing filters on FIN
+ * and RST packets.
+ */
+ if (th->fin || th->rst)
+ return;
+ }
+
+ tx_ring->atr_count++;
+
+ /* sample on all syn/fin/rst packets or once every atr sample rate */
+ if (!th->fin &&
+ !th->syn &&
+ !th->rst &&
+ (tx_ring->atr_count < tx_ring->atr_sample_rate))
+ return;
+
+ tx_ring->atr_count = 0;
+
+ /* grab the next descriptor */
+ i = tx_ring->next_to_use;
+ fdir_desc = I40E_TX_FDIRDESC(tx_ring, i);
+
+ i++;
+ tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
+
+ flex_ptype = (tx_ring->queue_index << I40E_TXD_FLTR_QW0_QINDEX_SHIFT) &
+ I40E_TXD_FLTR_QW0_QINDEX_MASK;
+ flex_ptype |= (tx_flags & I40E_TX_FLAGS_IPV4) ?
+ (I40E_FILTER_PCTYPE_NONF_IPV4_TCP <<
+ I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) :
+ (I40E_FILTER_PCTYPE_NONF_IPV6_TCP <<
+ I40E_TXD_FLTR_QW0_PCTYPE_SHIFT);
+
+ flex_ptype |= tx_ring->vsi->id << I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT;
+
+ dtype_cmd = I40E_TX_DESC_DTYPE_FILTER_PROG;
+
+ dtype_cmd |= (th->fin || th->rst) ?
+ (I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
+ I40E_TXD_FLTR_QW1_PCMD_SHIFT) :
+ (I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
+ I40E_TXD_FLTR_QW1_PCMD_SHIFT);
+
+ dtype_cmd |= I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX <<
+ I40E_TXD_FLTR_QW1_DEST_SHIFT;
+
+ dtype_cmd |= I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID <<
+ I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT;
+
+ dtype_cmd |= I40E_TXD_FLTR_QW1_CNT_ENA_MASK;
+ if (!(tx_flags & I40E_TX_FLAGS_UDP_TUNNEL))
+ dtype_cmd |=
+ ((u32)I40E_FD_ATR_STAT_IDX(pf->hw.pf_id) <<
+ I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
+ I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
+ else
+ dtype_cmd |=
+ ((u32)I40E_FD_ATR_TUNNEL_STAT_IDX(pf->hw.pf_id) <<
+ I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
+ I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
+
+ if (pf->flags & I40E_FLAG_HW_ATR_EVICT_ENABLED)
+ dtype_cmd |= I40E_TXD_FLTR_QW1_ATR_MASK;
+
+ fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(flex_ptype);
+ fdir_desc->rsvd = cpu_to_le32(0);
+ fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dtype_cmd);
+ fdir_desc->fd_id = cpu_to_le32(0);
+}
+
+/**
+ * i40e_tx_prepare_vlan_flags - prepare generic TX VLAN tagging flags for HW
+ * @skb: send buffer
+ * @tx_ring: ring to send buffer on
+ * @flags: the tx flags to be set
+ *
+ * Checks the skb and set up correspondingly several generic transmit flags
+ * related to VLAN tagging for the HW, such as VLAN, DCB, etc.
+ *
+ * Returns error code indicate the frame should be dropped upon error and the
+ * otherwise returns 0 to indicate the flags has been set properly.
+ **/
+static inline int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
+ struct i40e_ring *tx_ring,
+ u32 *flags)
+{
+ __be16 protocol = skb->protocol;
+ u32 tx_flags = 0;
+
+ if (protocol == htons(ETH_P_8021Q) &&
+ !(tx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) {
+ /* When HW VLAN acceleration is turned off by the user the
+ * stack sets the protocol to 8021q so that the driver
+ * can take any steps required to support the SW only
+ * VLAN handling. In our case the driver doesn't need
+ * to take any further steps so just set the protocol
+ * to the encapsulated ethertype.
+ */
+ skb->protocol = vlan_get_protocol(skb);
+ goto out;
+ }
+
+ /* if we have a HW VLAN tag being added, default to the HW one */
+ if (skb_vlan_tag_present(skb)) {
+ tx_flags |= skb_vlan_tag_get(skb) << I40E_TX_FLAGS_VLAN_SHIFT;
+ tx_flags |= I40E_TX_FLAGS_HW_VLAN;
+ /* else if it is a SW VLAN, check the next protocol and store the tag */
+ } else if (protocol == htons(ETH_P_8021Q)) {
+ struct vlan_hdr *vhdr, _vhdr;
+
+ vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(_vhdr), &_vhdr);
+ if (!vhdr)
+ return -EINVAL;
+
+ protocol = vhdr->h_vlan_encapsulated_proto;
+ tx_flags |= ntohs(vhdr->h_vlan_TCI) << I40E_TX_FLAGS_VLAN_SHIFT;
+ tx_flags |= I40E_TX_FLAGS_SW_VLAN;
+ }
+
+ if (!(tx_ring->vsi->back->flags & I40E_FLAG_DCB_ENABLED))
+ goto out;
+
+ /* Insert 802.1p priority into VLAN header */
+ if ((tx_flags & (I40E_TX_FLAGS_HW_VLAN | I40E_TX_FLAGS_SW_VLAN)) ||
+ (skb->priority != TC_PRIO_CONTROL)) {
+ tx_flags &= ~I40E_TX_FLAGS_VLAN_PRIO_MASK;
+ tx_flags |= (skb->priority & 0x7) <<
+ I40E_TX_FLAGS_VLAN_PRIO_SHIFT;
+ if (tx_flags & I40E_TX_FLAGS_SW_VLAN) {
+ struct vlan_ethhdr *vhdr;
+ int rc;
+
+ rc = skb_cow_head(skb, 0);
+ if (rc < 0)
+ return rc;
+ vhdr = skb_vlan_eth_hdr(skb);
+ vhdr->h_vlan_TCI = htons(tx_flags >>
+ I40E_TX_FLAGS_VLAN_SHIFT);
+ } else {
+ tx_flags |= I40E_TX_FLAGS_HW_VLAN;
+ }
+ }
+
+out:
+ *flags = tx_flags;
+ return 0;
+}
+
+/**
+ * i40e_tso - set up the tso context descriptor
+ * @first: pointer to first Tx buffer for xmit
+ * @hdr_len: ptr to the size of the packet header
+ * @cd_type_cmd_tso_mss: Quad Word 1
+ *
+ * Returns 0 if no TSO can happen, 1 if tso is going, or error
+ **/
+static int i40e_tso(struct i40e_tx_buffer *first, u8 *hdr_len,
+ u64 *cd_type_cmd_tso_mss)
+{
+ struct sk_buff *skb = first->skb;
+ u64 cd_cmd, cd_tso_len, cd_mss;
+ __be16 protocol;
+ union {
+ struct iphdr *v4;
+ struct ipv6hdr *v6;
+ unsigned char *hdr;
+ } ip;
+ union {
+ struct tcphdr *tcp;
+ struct udphdr *udp;
+ unsigned char *hdr;
+ } l4;
+ u32 paylen, l4_offset;
+ u16 gso_size;
+ int err;
+
+ if (skb->ip_summed != CHECKSUM_PARTIAL)
+ return 0;
+
+ if (!skb_is_gso(skb))
+ return 0;
+
+ err = skb_cow_head(skb, 0);
+ if (err < 0)
+ return err;
+
+ protocol = vlan_get_protocol(skb);
+
+ if (eth_p_mpls(protocol))
+ ip.hdr = skb_inner_network_header(skb);
+ else
+ ip.hdr = skb_network_header(skb);
+ l4.hdr = skb_checksum_start(skb);
+
+ /* initialize outer IP header fields */
+ if (ip.v4->version == 4) {
+ ip.v4->tot_len = 0;
+ ip.v4->check = 0;
+
+ first->tx_flags |= I40E_TX_FLAGS_TSO;
+ } else {
+ ip.v6->payload_len = 0;
+ first->tx_flags |= I40E_TX_FLAGS_TSO;
+ }
+
+ if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE |
+ SKB_GSO_GRE_CSUM |
+ SKB_GSO_IPXIP4 |
+ SKB_GSO_IPXIP6 |
+ SKB_GSO_UDP_TUNNEL |
+ SKB_GSO_UDP_TUNNEL_CSUM)) {
+ if (!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) &&
+ (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)) {
+ l4.udp->len = 0;
+
+ /* determine offset of outer transport header */
+ l4_offset = l4.hdr - skb->data;
+
+ /* remove payload length from outer checksum */
+ paylen = skb->len - l4_offset;
+ csum_replace_by_diff(&l4.udp->check,
+ (__force __wsum)htonl(paylen));
+ }
+
+ /* reset pointers to inner headers */
+ ip.hdr = skb_inner_network_header(skb);
+ l4.hdr = skb_inner_transport_header(skb);
+
+ /* initialize inner IP header fields */
+ if (ip.v4->version == 4) {
+ ip.v4->tot_len = 0;
+ ip.v4->check = 0;
+ } else {
+ ip.v6->payload_len = 0;
+ }
+ }
+
+ /* determine offset of inner transport header */
+ l4_offset = l4.hdr - skb->data;
+
+ /* remove payload length from inner checksum */
+ paylen = skb->len - l4_offset;
+
+ if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
+ csum_replace_by_diff(&l4.udp->check, (__force __wsum)htonl(paylen));
+ /* compute length of segmentation header */
+ *hdr_len = sizeof(*l4.udp) + l4_offset;
+ } else {
+ csum_replace_by_diff(&l4.tcp->check, (__force __wsum)htonl(paylen));
+ /* compute length of segmentation header */
+ *hdr_len = (l4.tcp->doff * 4) + l4_offset;
+ }
+
+ /* pull values out of skb_shinfo */
+ gso_size = skb_shinfo(skb)->gso_size;
+
+ /* update GSO size and bytecount with header size */
+ first->gso_segs = skb_shinfo(skb)->gso_segs;
+ first->bytecount += (first->gso_segs - 1) * *hdr_len;
+
+ /* find the field values */
+ cd_cmd = I40E_TX_CTX_DESC_TSO;
+ cd_tso_len = skb->len - *hdr_len;
+ cd_mss = gso_size;
+ *cd_type_cmd_tso_mss |= (cd_cmd << I40E_TXD_CTX_QW1_CMD_SHIFT) |
+ (cd_tso_len << I40E_TXD_CTX_QW1_TSO_LEN_SHIFT) |
+ (cd_mss << I40E_TXD_CTX_QW1_MSS_SHIFT);
+ return 1;
+}
+
+/**
+ * i40e_tsyn - set up the tsyn context descriptor
+ * @tx_ring: ptr to the ring to send
+ * @skb: ptr to the skb we're sending
+ * @tx_flags: the collected send information
+ * @cd_type_cmd_tso_mss: Quad Word 1
+ *
+ * Returns 0 if no Tx timestamp can happen and 1 if the timestamp will happen
+ **/
+static int i40e_tsyn(struct i40e_ring *tx_ring, struct sk_buff *skb,
+ u32 tx_flags, u64 *cd_type_cmd_tso_mss)
+{
+ struct i40e_pf *pf;
+
+ if (likely(!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)))
+ return 0;
+
+ /* Tx timestamps cannot be sampled when doing TSO */
+ if (tx_flags & I40E_TX_FLAGS_TSO)
+ return 0;
+
+ /* only timestamp the outbound packet if the user has requested it and
+ * we are not already transmitting a packet to be timestamped
+ */
+ pf = i40e_netdev_to_pf(tx_ring->netdev);
+ if (!(pf->flags & I40E_FLAG_PTP))
+ return 0;
+
+ if (pf->ptp_tx &&
+ !test_and_set_bit_lock(__I40E_PTP_TX_IN_PROGRESS, pf->state)) {
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ pf->ptp_tx_start = jiffies;
+ pf->ptp_tx_skb = skb_get(skb);
+ } else {
+ pf->tx_hwtstamp_skipped++;
+ return 0;
+ }
+
+ *cd_type_cmd_tso_mss |= (u64)I40E_TX_CTX_DESC_TSYN <<
+ I40E_TXD_CTX_QW1_CMD_SHIFT;
+
+ return 1;
+}
+
+/**
+ * i40e_tx_enable_csum - Enable Tx checksum offloads
+ * @skb: send buffer
+ * @tx_flags: pointer to Tx flags currently set
+ * @td_cmd: Tx descriptor command bits to set
+ * @td_offset: Tx descriptor header offsets to set
+ * @tx_ring: Tx descriptor ring
+ * @cd_tunneling: ptr to context desc bits
+ **/
+static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
+ u32 *td_cmd, u32 *td_offset,
+ struct i40e_ring *tx_ring,
+ u32 *cd_tunneling)
+{
+ union {
+ struct iphdr *v4;
+ struct ipv6hdr *v6;
+ unsigned char *hdr;
+ } ip;
+ union {
+ struct tcphdr *tcp;
+ struct udphdr *udp;
+ unsigned char *hdr;
+ } l4;
+ unsigned char *exthdr;
+ u32 offset, cmd = 0;
+ __be16 frag_off;
+ __be16 protocol;
+ u8 l4_proto = 0;
+
+ if (skb->ip_summed != CHECKSUM_PARTIAL)
+ return 0;
+
+ protocol = vlan_get_protocol(skb);
+
+ if (eth_p_mpls(protocol)) {
+ ip.hdr = skb_inner_network_header(skb);
+ l4.hdr = skb_checksum_start(skb);
+ } else {
+ ip.hdr = skb_network_header(skb);
+ l4.hdr = skb_transport_header(skb);
+ }
+
+ /* set the tx_flags to indicate the IP protocol type. this is
+ * required so that checksum header computation below is accurate.
+ */
+ if (ip.v4->version == 4)
+ *tx_flags |= I40E_TX_FLAGS_IPV4;
+ else
+ *tx_flags |= I40E_TX_FLAGS_IPV6;
+
+ /* compute outer L2 header size */
+ offset = ((ip.hdr - skb->data) / 2) << I40E_TX_DESC_LENGTH_MACLEN_SHIFT;
+
+ if (skb->encapsulation) {
+ u32 tunnel = 0;
+ /* define outer network header type */
+ if (*tx_flags & I40E_TX_FLAGS_IPV4) {
+ tunnel |= (*tx_flags & I40E_TX_FLAGS_TSO) ?
+ I40E_TX_CTX_EXT_IP_IPV4 :
+ I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM;
+
+ l4_proto = ip.v4->protocol;
+ } else if (*tx_flags & I40E_TX_FLAGS_IPV6) {
+ int ret;
+
+ tunnel |= I40E_TX_CTX_EXT_IP_IPV6;
+
+ exthdr = ip.hdr + sizeof(*ip.v6);
+ l4_proto = ip.v6->nexthdr;
+ ret = ipv6_skip_exthdr(skb, exthdr - skb->data,
+ &l4_proto, &frag_off);
+ if (ret < 0)
+ return -1;
+ }
+
+ /* define outer transport */
+ switch (l4_proto) {
+ case IPPROTO_UDP:
+ tunnel |= I40E_TXD_CTX_UDP_TUNNELING;
+ *tx_flags |= I40E_TX_FLAGS_UDP_TUNNEL;
+ break;
+ case IPPROTO_GRE:
+ tunnel |= I40E_TXD_CTX_GRE_TUNNELING;
+ *tx_flags |= I40E_TX_FLAGS_UDP_TUNNEL;
+ break;
+ case IPPROTO_IPIP:
+ case IPPROTO_IPV6:
+ *tx_flags |= I40E_TX_FLAGS_UDP_TUNNEL;
+ l4.hdr = skb_inner_network_header(skb);
+ break;
+ default:
+ if (*tx_flags & I40E_TX_FLAGS_TSO)
+ return -1;
+
+ skb_checksum_help(skb);
+ return 0;
+ }
+
+ /* compute outer L3 header size */
+ tunnel |= ((l4.hdr - ip.hdr) / 4) <<
+ I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT;
+
+ /* switch IP header pointer from outer to inner header */
+ ip.hdr = skb_inner_network_header(skb);
+
+ /* compute tunnel header size */
+ tunnel |= ((ip.hdr - l4.hdr) / 2) <<
+ I40E_TXD_CTX_QW0_NATLEN_SHIFT;
+
+ /* indicate if we need to offload outer UDP header */
+ if ((*tx_flags & I40E_TX_FLAGS_TSO) &&
+ !(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) &&
+ (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM))
+ tunnel |= I40E_TXD_CTX_QW0_L4T_CS_MASK;
+
+ /* record tunnel offload values */
+ *cd_tunneling |= tunnel;
+
+ /* switch L4 header pointer from outer to inner */
+ l4.hdr = skb_inner_transport_header(skb);
+ l4_proto = 0;
+
+ /* reset type as we transition from outer to inner headers */
+ *tx_flags &= ~(I40E_TX_FLAGS_IPV4 | I40E_TX_FLAGS_IPV6);
+ if (ip.v4->version == 4)
+ *tx_flags |= I40E_TX_FLAGS_IPV4;
+ if (ip.v6->version == 6)
+ *tx_flags |= I40E_TX_FLAGS_IPV6;
+ }
+
+ /* Enable IP checksum offloads */
+ if (*tx_flags & I40E_TX_FLAGS_IPV4) {
+ l4_proto = ip.v4->protocol;
+ /* the stack computes the IP header already, the only time we
+ * need the hardware to recompute it is in the case of TSO.
+ */
+ cmd |= (*tx_flags & I40E_TX_FLAGS_TSO) ?
+ I40E_TX_DESC_CMD_IIPT_IPV4_CSUM :
+ I40E_TX_DESC_CMD_IIPT_IPV4;
+ } else if (*tx_flags & I40E_TX_FLAGS_IPV6) {
+ cmd |= I40E_TX_DESC_CMD_IIPT_IPV6;
+
+ exthdr = ip.hdr + sizeof(*ip.v6);
+ l4_proto = ip.v6->nexthdr;
+ if (l4.hdr != exthdr)
+ ipv6_skip_exthdr(skb, exthdr - skb->data,
+ &l4_proto, &frag_off);
+ }
+
+ /* compute inner L3 header size */
+ offset |= ((l4.hdr - ip.hdr) / 4) << I40E_TX_DESC_LENGTH_IPLEN_SHIFT;
+
+ /* Enable L4 checksum offloads */
+ switch (l4_proto) {
+ case IPPROTO_TCP:
+ /* enable checksum offloads */
+ cmd |= I40E_TX_DESC_CMD_L4T_EOFT_TCP;
+ offset |= l4.tcp->doff << I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
+ break;
+ case IPPROTO_SCTP:
+ /* enable SCTP checksum offload */
+ cmd |= I40E_TX_DESC_CMD_L4T_EOFT_SCTP;
+ offset |= (sizeof(struct sctphdr) >> 2) <<
+ I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
+ break;
+ case IPPROTO_UDP:
+ /* enable UDP checksum offload */
+ cmd |= I40E_TX_DESC_CMD_L4T_EOFT_UDP;
+ offset |= (sizeof(struct udphdr) >> 2) <<
+ I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
+ break;
+ default:
+ if (*tx_flags & I40E_TX_FLAGS_TSO)
+ return -1;
+ skb_checksum_help(skb);
+ return 0;
+ }
+
+ *td_cmd |= cmd;
+ *td_offset |= offset;
+
+ return 1;
+}
+
+/**
+ * i40e_create_tx_ctx - Build the Tx context descriptor
+ * @tx_ring: ring to create the descriptor on
+ * @cd_type_cmd_tso_mss: Quad Word 1
+ * @cd_tunneling: Quad Word 0 - bits 0-31
+ * @cd_l2tag2: Quad Word 0 - bits 32-63
+ **/
+static void i40e_create_tx_ctx(struct i40e_ring *tx_ring,
+ const u64 cd_type_cmd_tso_mss,
+ const u32 cd_tunneling, const u32 cd_l2tag2)
+{
+ struct i40e_tx_context_desc *context_desc;
+ int i = tx_ring->next_to_use;
+
+ if ((cd_type_cmd_tso_mss == I40E_TX_DESC_DTYPE_CONTEXT) &&
+ !cd_tunneling && !cd_l2tag2)
+ return;
+
+ /* grab the next descriptor */
+ context_desc = I40E_TX_CTXTDESC(tx_ring, i);
+
+ i++;
+ tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
+
+ /* cpu_to_le32 and assign to struct fields */
+ context_desc->tunneling_params = cpu_to_le32(cd_tunneling);
+ context_desc->l2tag2 = cpu_to_le16(cd_l2tag2);
+ context_desc->rsvd = cpu_to_le16(0);
+ context_desc->type_cmd_tso_mss = cpu_to_le64(cd_type_cmd_tso_mss);
+}
+
+/**
+ * __i40e_maybe_stop_tx - 2nd level check for tx stop conditions
+ * @tx_ring: the ring to be checked
+ * @size: the size buffer we want to assure is available
+ *
+ * Returns -EBUSY if a stop is needed, else 0
+ **/
+int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
+{
+ netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
+ /* Memory barrier before checking head and tail */
+ smp_mb();
+
+ ++tx_ring->tx_stats.tx_stopped;
+
+ /* Check again in a case another CPU has just made room available. */
+ if (likely(I40E_DESC_UNUSED(tx_ring) < size))
+ return -EBUSY;
+
+ /* A reprieve! - use start_queue because it doesn't call schedule */
+ netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
+ ++tx_ring->tx_stats.restart_queue;
+ return 0;
+}
+
+/**
+ * __i40e_chk_linearize - Check if there are more than 8 buffers per packet
+ * @skb: send buffer
+ *
+ * Note: Our HW can't DMA more than 8 buffers to build a packet on the wire
+ * and so we need to figure out the cases where we need to linearize the skb.
+ *
+ * For TSO we need to count the TSO header and segment payload separately.
+ * As such we need to check cases where we have 7 fragments or more as we
+ * can potentially require 9 DMA transactions, 1 for the TSO header, 1 for
+ * the segment payload in the first descriptor, and another 7 for the
+ * fragments.
+ **/
+bool __i40e_chk_linearize(struct sk_buff *skb)
+{
+ const skb_frag_t *frag, *stale;
+ int nr_frags, sum;
+
+ /* no need to check if number of frags is less than 7 */
+ nr_frags = skb_shinfo(skb)->nr_frags;
+ if (nr_frags < (I40E_MAX_BUFFER_TXD - 1))
+ return false;
+
+ /* We need to walk through the list and validate that each group
+ * of 6 fragments totals at least gso_size.
+ */
+ nr_frags -= I40E_MAX_BUFFER_TXD - 2;
+ frag = &skb_shinfo(skb)->frags[0];
+
+ /* Initialize size to the negative value of gso_size minus 1. We
+ * use this as the worst case scenerio in which the frag ahead
+ * of us only provides one byte which is why we are limited to 6
+ * descriptors for a single transmit as the header and previous
+ * fragment are already consuming 2 descriptors.
+ */
+ sum = 1 - skb_shinfo(skb)->gso_size;
+
+ /* Add size of frags 0 through 4 to create our initial sum */
+ sum += skb_frag_size(frag++);
+ sum += skb_frag_size(frag++);
+ sum += skb_frag_size(frag++);
+ sum += skb_frag_size(frag++);
+ sum += skb_frag_size(frag++);
+
+ /* Walk through fragments adding latest fragment, testing it, and
+ * then removing stale fragments from the sum.
+ */
+ for (stale = &skb_shinfo(skb)->frags[0];; stale++) {
+ int stale_size = skb_frag_size(stale);
+
+ sum += skb_frag_size(frag++);
+
+ /* The stale fragment may present us with a smaller
+ * descriptor than the actual fragment size. To account
+ * for that we need to remove all the data on the front and
+ * figure out what the remainder would be in the last
+ * descriptor associated with the fragment.
+ */
+ if (stale_size > I40E_MAX_DATA_PER_TXD) {
+ int align_pad = -(skb_frag_off(stale)) &
+ (I40E_MAX_READ_REQ_SIZE - 1);
+
+ sum -= align_pad;
+ stale_size -= align_pad;
+
+ do {
+ sum -= I40E_MAX_DATA_PER_TXD_ALIGNED;
+ stale_size -= I40E_MAX_DATA_PER_TXD_ALIGNED;
+ } while (stale_size > I40E_MAX_DATA_PER_TXD);
+ }
+
+ /* if sum is negative we failed to make sufficient progress */
+ if (sum < 0)
+ return true;
+
+ if (!nr_frags--)
+ break;
+
+ sum -= stale_size;
+ }
+
+ return false;
+}
+
+/**
+ * i40e_tx_map - Build the Tx descriptor
+ * @tx_ring: ring to send buffer on
+ * @skb: send buffer
+ * @first: first buffer info buffer to use
+ * @tx_flags: collected send information
+ * @hdr_len: size of the packet header
+ * @td_cmd: the command field in the descriptor
+ * @td_offset: offset for checksum or crc
+ *
+ * Returns 0 on success, -1 on failure to DMA
+ **/
+static inline int i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
+ struct i40e_tx_buffer *first, u32 tx_flags,
+ const u8 hdr_len, u32 td_cmd, u32 td_offset)
+{
+ unsigned int data_len = skb->data_len;
+ unsigned int size = skb_headlen(skb);
+ skb_frag_t *frag;
+ struct i40e_tx_buffer *tx_bi;
+ struct i40e_tx_desc *tx_desc;
+ u16 i = tx_ring->next_to_use;
+ u32 td_tag = 0;
+ dma_addr_t dma;
+ u16 desc_count = 1;
+
+ if (tx_flags & I40E_TX_FLAGS_HW_VLAN) {
+ td_cmd |= I40E_TX_DESC_CMD_IL2TAG1;
+ td_tag = (tx_flags & I40E_TX_FLAGS_VLAN_MASK) >>
+ I40E_TX_FLAGS_VLAN_SHIFT;
+ }
+
+ first->tx_flags = tx_flags;
+
+ dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
+
+ tx_desc = I40E_TX_DESC(tx_ring, i);
+ tx_bi = first;
+
+ for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
+ unsigned int max_data = I40E_MAX_DATA_PER_TXD_ALIGNED;
+
+ if (dma_mapping_error(tx_ring->dev, dma))
+ goto dma_error;
+
+ /* record length, and DMA address */
+ dma_unmap_len_set(tx_bi, len, size);
+ dma_unmap_addr_set(tx_bi, dma, dma);
+
+ /* align size to end of page */
+ max_data += -dma & (I40E_MAX_READ_REQ_SIZE - 1);
+ tx_desc->buffer_addr = cpu_to_le64(dma);
+
+ while (unlikely(size > I40E_MAX_DATA_PER_TXD)) {
+ tx_desc->cmd_type_offset_bsz =
+ build_ctob(td_cmd, td_offset,
+ max_data, td_tag);
+
+ tx_desc++;
+ i++;
+ desc_count++;
+
+ if (i == tx_ring->count) {
+ tx_desc = I40E_TX_DESC(tx_ring, 0);
+ i = 0;
+ }
+
+ dma += max_data;
+ size -= max_data;
+
+ max_data = I40E_MAX_DATA_PER_TXD_ALIGNED;
+ tx_desc->buffer_addr = cpu_to_le64(dma);
+ }
+
+ if (likely(!data_len))
+ break;
+
+ tx_desc->cmd_type_offset_bsz = build_ctob(td_cmd, td_offset,
+ size, td_tag);
+
+ tx_desc++;
+ i++;
+ desc_count++;
+
+ if (i == tx_ring->count) {
+ tx_desc = I40E_TX_DESC(tx_ring, 0);
+ i = 0;
+ }
+
+ size = skb_frag_size(frag);
+ data_len -= size;
+
+ dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
+ DMA_TO_DEVICE);
+
+ tx_bi = &tx_ring->tx_bi[i];
+ }
+
+ netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
+
+ i++;
+ if (i == tx_ring->count)
+ i = 0;
+
+ tx_ring->next_to_use = i;
+
+ i40e_maybe_stop_tx(tx_ring, DESC_NEEDED);
+
+ /* write last descriptor with EOP bit */
+ td_cmd |= I40E_TX_DESC_CMD_EOP;
+
+ /* We OR these values together to check both against 4 (WB_STRIDE)
+ * below. This is safe since we don't re-use desc_count afterwards.
+ */
+ desc_count |= ++tx_ring->packet_stride;
+
+ if (desc_count >= WB_STRIDE) {
+ /* write last descriptor with RS bit set */
+ td_cmd |= I40E_TX_DESC_CMD_RS;
+ tx_ring->packet_stride = 0;
+ }
+
+ tx_desc->cmd_type_offset_bsz =
+ build_ctob(td_cmd, td_offset, size, td_tag);
+
+ skb_tx_timestamp(skb);
+
+ /* Force memory writes to complete before letting h/w know there
+ * are new descriptors to fetch.
+ *
+ * We also use this memory barrier to make certain all of the
+ * status bits have been updated before next_to_watch is written.
+ */
+ wmb();
+
+ /* set next_to_watch value indicating a packet is present */
+ first->next_to_watch = tx_desc;
+
+ /* notify HW of packet */
+ if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) {
+ writel(i, tx_ring->tail);
+ }
+
+ return 0;
+
+dma_error:
+ dev_info(tx_ring->dev, "TX DMA map failed\n");
+
+ /* clear dma mappings for failed tx_bi map */
+ for (;;) {
+ tx_bi = &tx_ring->tx_bi[i];
+ i40e_unmap_and_free_tx_resource(tx_ring, tx_bi);
+ if (tx_bi == first)
+ break;
+ if (i == 0)
+ i = tx_ring->count;
+ i--;
+ }
+
+ tx_ring->next_to_use = i;
+
+ return -1;
+}
+
+static u16 i40e_swdcb_skb_tx_hash(struct net_device *dev,
+ const struct sk_buff *skb,
+ u16 num_tx_queues)
+{
+ u32 jhash_initval_salt = 0xd631614b;
+ u32 hash;
+
+ if (skb->sk && skb->sk->sk_hash)
+ hash = skb->sk->sk_hash;
+ else
+ hash = (__force u16)skb->protocol ^ skb->hash;
+
+ hash = jhash_1word(hash, jhash_initval_salt);
+
+ return (u16)(((u64)hash * num_tx_queues) >> 32);
+}
+
+u16 i40e_lan_select_queue(struct net_device *netdev,
+ struct sk_buff *skb,
+ struct net_device __always_unused *sb_dev)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_vsi *vsi = np->vsi;
+ struct i40e_hw *hw;
+ u16 qoffset;
+ u16 qcount;
+ u8 tclass;
+ u16 hash;
+ u8 prio;
+
+ /* is DCB enabled at all? */
+ if (vsi->tc_config.numtc == 1 ||
+ i40e_is_tc_mqprio_enabled(vsi->back))
+ return netdev_pick_tx(netdev, skb, sb_dev);
+
+ prio = skb->priority;
+ hw = &vsi->back->hw;
+ tclass = hw->local_dcbx_config.etscfg.prioritytable[prio];
+ /* sanity check */
+ if (unlikely(!(vsi->tc_config.enabled_tc & BIT(tclass))))
+ tclass = 0;
+
+ /* select a queue assigned for the given TC */
+ qcount = vsi->tc_config.tc_info[tclass].qcount;
+ hash = i40e_swdcb_skb_tx_hash(netdev, skb, qcount);
+
+ qoffset = vsi->tc_config.tc_info[tclass].qoffset;
+ return qoffset + hash;
+}
+
+/**
+ * i40e_xmit_xdp_ring - transmits an XDP buffer to an XDP Tx ring
+ * @xdpf: data to transmit
+ * @xdp_ring: XDP Tx ring
+ **/
+static int i40e_xmit_xdp_ring(struct xdp_frame *xdpf,
+ struct i40e_ring *xdp_ring)
+{
+ struct skb_shared_info *sinfo = xdp_get_shared_info_from_frame(xdpf);
+ u8 nr_frags = unlikely(xdp_frame_has_frags(xdpf)) ? sinfo->nr_frags : 0;
+ u16 i = 0, index = xdp_ring->next_to_use;
+ struct i40e_tx_buffer *tx_head = &xdp_ring->tx_bi[index];
+ struct i40e_tx_buffer *tx_bi = tx_head;
+ struct i40e_tx_desc *tx_desc = I40E_TX_DESC(xdp_ring, index);
+ void *data = xdpf->data;
+ u32 size = xdpf->len;
+
+ if (unlikely(I40E_DESC_UNUSED(xdp_ring) < 1 + nr_frags)) {
+ xdp_ring->tx_stats.tx_busy++;
+ return I40E_XDP_CONSUMED;
+ }
+
+ tx_head->bytecount = xdp_get_frame_len(xdpf);
+ tx_head->gso_segs = 1;
+ tx_head->xdpf = xdpf;
+
+ for (;;) {
+ dma_addr_t dma;
+
+ dma = dma_map_single(xdp_ring->dev, data, size, DMA_TO_DEVICE);
+ if (dma_mapping_error(xdp_ring->dev, dma))
+ goto unmap;
+
+ /* record length, and DMA address */
+ dma_unmap_len_set(tx_bi, len, size);
+ dma_unmap_addr_set(tx_bi, dma, dma);
+
+ tx_desc->buffer_addr = cpu_to_le64(dma);
+ tx_desc->cmd_type_offset_bsz =
+ build_ctob(I40E_TX_DESC_CMD_ICRC, 0, size, 0);
+
+ if (++index == xdp_ring->count)
+ index = 0;
+
+ if (i == nr_frags)
+ break;
+
+ tx_bi = &xdp_ring->tx_bi[index];
+ tx_desc = I40E_TX_DESC(xdp_ring, index);
+
+ data = skb_frag_address(&sinfo->frags[i]);
+ size = skb_frag_size(&sinfo->frags[i]);
+ i++;
+ }
+
+ tx_desc->cmd_type_offset_bsz |=
+ cpu_to_le64(I40E_TXD_CMD << I40E_TXD_QW1_CMD_SHIFT);
+
+ /* Make certain all of the status bits have been updated
+ * before next_to_watch is written.
+ */
+ smp_wmb();
+
+ xdp_ring->xdp_tx_active++;
+
+ tx_head->next_to_watch = tx_desc;
+ xdp_ring->next_to_use = index;
+
+ return I40E_XDP_TX;
+
+unmap:
+ for (;;) {
+ tx_bi = &xdp_ring->tx_bi[index];
+ if (dma_unmap_len(tx_bi, len))
+ dma_unmap_page(xdp_ring->dev,
+ dma_unmap_addr(tx_bi, dma),
+ dma_unmap_len(tx_bi, len),
+ DMA_TO_DEVICE);
+ dma_unmap_len_set(tx_bi, len, 0);
+ if (tx_bi == tx_head)
+ break;
+
+ if (!index)
+ index += xdp_ring->count;
+ index--;
+ }
+
+ return I40E_XDP_CONSUMED;
+}
+
+/**
+ * i40e_xmit_frame_ring - Sends buffer on Tx ring
+ * @skb: send buffer
+ * @tx_ring: ring to send buffer on
+ *
+ * Returns NETDEV_TX_OK if sent, else an error code
+ **/
+static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
+ struct i40e_ring *tx_ring)
+{
+ u64 cd_type_cmd_tso_mss = I40E_TX_DESC_DTYPE_CONTEXT;
+ u32 cd_tunneling = 0, cd_l2tag2 = 0;
+ struct i40e_tx_buffer *first;
+ u32 td_offset = 0;
+ u32 tx_flags = 0;
+ u32 td_cmd = 0;
+ u8 hdr_len = 0;
+ int tso, count;
+ int tsyn;
+
+ /* prefetch the data, we'll need it later */
+ prefetch(skb->data);
+
+ i40e_trace(xmit_frame_ring, skb, tx_ring);
+
+ count = i40e_xmit_descriptor_count(skb);
+ if (i40e_chk_linearize(skb, count)) {
+ if (__skb_linearize(skb)) {
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
+ count = i40e_txd_use_count(skb->len);
+ tx_ring->tx_stats.tx_linearize++;
+ }
+
+ /* need: 1 descriptor per page * PAGE_SIZE/I40E_MAX_DATA_PER_TXD,
+ * + 1 desc for skb_head_len/I40E_MAX_DATA_PER_TXD,
+ * + 4 desc gap to avoid the cache line where head is,
+ * + 1 desc for context descriptor,
+ * otherwise try next time
+ */
+ if (i40e_maybe_stop_tx(tx_ring, count + 4 + 1)) {
+ tx_ring->tx_stats.tx_busy++;
+ return NETDEV_TX_BUSY;
+ }
+
+ /* record the location of the first descriptor for this packet */
+ first = &tx_ring->tx_bi[tx_ring->next_to_use];
+ first->skb = skb;
+ first->bytecount = skb->len;
+ first->gso_segs = 1;
+
+ /* prepare the xmit flags */
+ if (i40e_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags))
+ goto out_drop;
+
+ tso = i40e_tso(first, &hdr_len, &cd_type_cmd_tso_mss);
+
+ if (tso < 0)
+ goto out_drop;
+ else if (tso)
+ tx_flags |= I40E_TX_FLAGS_TSO;
+
+ /* Always offload the checksum, since it's in the data descriptor */
+ tso = i40e_tx_enable_csum(skb, &tx_flags, &td_cmd, &td_offset,
+ tx_ring, &cd_tunneling);
+ if (tso < 0)
+ goto out_drop;
+
+ tsyn = i40e_tsyn(tx_ring, skb, tx_flags, &cd_type_cmd_tso_mss);
+
+ if (tsyn)
+ tx_flags |= I40E_TX_FLAGS_TSYN;
+
+ /* always enable CRC insertion offload */
+ td_cmd |= I40E_TX_DESC_CMD_ICRC;
+
+ i40e_create_tx_ctx(tx_ring, cd_type_cmd_tso_mss,
+ cd_tunneling, cd_l2tag2);
+
+ /* Add Flow Director ATR if it's enabled.
+ *
+ * NOTE: this must always be directly before the data descriptor.
+ */
+ i40e_atr(tx_ring, skb, tx_flags);
+
+ if (i40e_tx_map(tx_ring, skb, first, tx_flags, hdr_len,
+ td_cmd, td_offset))
+ goto cleanup_tx_tstamp;
+
+ return NETDEV_TX_OK;
+
+out_drop:
+ i40e_trace(xmit_frame_ring_drop, first->skb, tx_ring);
+ dev_kfree_skb_any(first->skb);
+ first->skb = NULL;
+cleanup_tx_tstamp:
+ if (unlikely(tx_flags & I40E_TX_FLAGS_TSYN)) {
+ struct i40e_pf *pf = i40e_netdev_to_pf(tx_ring->netdev);
+
+ dev_kfree_skb_any(pf->ptp_tx_skb);
+ pf->ptp_tx_skb = NULL;
+ clear_bit_unlock(__I40E_PTP_TX_IN_PROGRESS, pf->state);
+ }
+
+ return NETDEV_TX_OK;
+}
+
+/**
+ * i40e_lan_xmit_frame - Selects the correct VSI and Tx queue to send buffer
+ * @skb: send buffer
+ * @netdev: network interface device structure
+ *
+ * Returns NETDEV_TX_OK if sent, else an error code
+ **/
+netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_vsi *vsi = np->vsi;
+ struct i40e_ring *tx_ring = vsi->tx_rings[skb->queue_mapping];
+
+ /* hardware can't handle really short frames, hardware padding works
+ * beyond this point
+ */
+ if (skb_put_padto(skb, I40E_MIN_TX_LEN))
+ return NETDEV_TX_OK;
+
+ return i40e_xmit_frame_ring(skb, tx_ring);
+}
+
+/**
+ * i40e_xdp_xmit - Implements ndo_xdp_xmit
+ * @dev: netdev
+ * @n: number of frames
+ * @frames: array of XDP buffer pointers
+ * @flags: XDP extra info
+ *
+ * Returns number of frames successfully sent. Failed frames
+ * will be free'ed by XDP core.
+ *
+ * For error cases, a negative errno code is returned and no-frames
+ * are transmitted (caller must handle freeing frames).
+ **/
+int i40e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
+ u32 flags)
+{
+ struct i40e_netdev_priv *np = netdev_priv(dev);
+ unsigned int queue_index = smp_processor_id();
+ struct i40e_vsi *vsi = np->vsi;
+ struct i40e_pf *pf = vsi->back;
+ struct i40e_ring *xdp_ring;
+ int nxmit = 0;
+ int i;
+
+ if (test_bit(__I40E_VSI_DOWN, vsi->state))
+ return -ENETDOWN;
+
+ if (!i40e_enabled_xdp_vsi(vsi) || queue_index >= vsi->num_queue_pairs ||
+ test_bit(__I40E_CONFIG_BUSY, pf->state))
+ return -ENXIO;
+
+ if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
+ return -EINVAL;
+
+ xdp_ring = vsi->xdp_rings[queue_index];
+
+ for (i = 0; i < n; i++) {
+ struct xdp_frame *xdpf = frames[i];
+ int err;
+
+ err = i40e_xmit_xdp_ring(xdpf, xdp_ring);
+ if (err != I40E_XDP_TX)
+ break;
+ nxmit++;
+ }
+
+ if (unlikely(flags & XDP_XMIT_FLUSH))
+ i40e_xdp_ring_update_tail(xdp_ring);
+
+ return nxmit;
+}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
new file mode 100644
index 000000000..768290dc6
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
@@ -0,0 +1,557 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
+
+#ifndef _I40E_TXRX_H_
+#define _I40E_TXRX_H_
+
+#include <net/xdp.h>
+
+/* Interrupt Throttling and Rate Limiting Goodies */
+#define I40E_DEFAULT_IRQ_WORK 256
+
+/* The datasheet for the X710 and XL710 indicate that the maximum value for
+ * the ITR is 8160usec which is then called out as 0xFF0 with a 2usec
+ * resolution. 8160 is 0x1FE0 when written out in hex. So instead of storing
+ * the register value which is divided by 2 lets use the actual values and
+ * avoid an excessive amount of translation.
+ */
+#define I40E_ITR_DYNAMIC 0x8000 /* use top bit as a flag */
+#define I40E_ITR_MASK 0x1FFE /* mask for ITR register value */
+#define I40E_MIN_ITR 2 /* reg uses 2 usec resolution */
+#define I40E_ITR_20K 50
+#define I40E_ITR_8K 122
+#define I40E_MAX_ITR 8160 /* maximum value as per datasheet */
+#define ITR_TO_REG(setting) ((setting) & ~I40E_ITR_DYNAMIC)
+#define ITR_REG_ALIGN(setting) __ALIGN_MASK(setting, ~I40E_ITR_MASK)
+#define ITR_IS_DYNAMIC(setting) (!!((setting) & I40E_ITR_DYNAMIC))
+
+#define I40E_ITR_RX_DEF (I40E_ITR_20K | I40E_ITR_DYNAMIC)
+#define I40E_ITR_TX_DEF (I40E_ITR_20K | I40E_ITR_DYNAMIC)
+
+/* 0x40 is the enable bit for interrupt rate limiting, and must be set if
+ * the value of the rate limit is non-zero
+ */
+#define INTRL_ENA BIT(6)
+#define I40E_MAX_INTRL 0x3B /* reg uses 4 usec resolution */
+#define INTRL_REG_TO_USEC(intrl) ((intrl & ~INTRL_ENA) << 2)
+
+/**
+ * i40e_intrl_usec_to_reg - convert interrupt rate limit to register
+ * @intrl: interrupt rate limit to convert
+ *
+ * This function converts a decimal interrupt rate limit to the appropriate
+ * register format expected by the firmware when setting interrupt rate limit.
+ */
+static inline u16 i40e_intrl_usec_to_reg(int intrl)
+{
+ if (intrl >> 2)
+ return ((intrl >> 2) | INTRL_ENA);
+ else
+ return 0;
+}
+
+#define I40E_QUEUE_END_OF_LIST 0x7FF
+
+/* this enum matches hardware bits and is meant to be used by DYN_CTLN
+ * registers and QINT registers or more generally anywhere in the manual
+ * mentioning ITR_INDX, ITR_NONE cannot be used as an index 'n' into any
+ * register but instead is a special value meaning "don't update" ITR0/1/2.
+ */
+enum i40e_dyn_idx_t {
+ I40E_IDX_ITR0 = 0,
+ I40E_IDX_ITR1 = 1,
+ I40E_IDX_ITR2 = 2,
+ I40E_ITR_NONE = 3 /* ITR_NONE must not be used as an index */
+};
+
+/* these are indexes into ITRN registers */
+#define I40E_RX_ITR I40E_IDX_ITR0
+#define I40E_TX_ITR I40E_IDX_ITR1
+
+/* Supported RSS offloads */
+#define I40E_DEFAULT_RSS_HENA ( \
+ BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | \
+ BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) | \
+ BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP) | \
+ BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) | \
+ BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4) | \
+ BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | \
+ BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP) | \
+ BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) | \
+ BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) | \
+ BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6) | \
+ BIT_ULL(I40E_FILTER_PCTYPE_L2_PAYLOAD))
+
+#define I40E_DEFAULT_RSS_HENA_EXPANDED (I40E_DEFAULT_RSS_HENA | \
+ BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK) | \
+ BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | \
+ BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) | \
+ BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK) | \
+ BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | \
+ BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP))
+
+#define i40e_pf_get_default_rss_hena(pf) \
+ (((pf)->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE) ? \
+ I40E_DEFAULT_RSS_HENA_EXPANDED : I40E_DEFAULT_RSS_HENA)
+
+/* Supported Rx Buffer Sizes (a multiple of 128) */
+#define I40E_RXBUFFER_256 256
+#define I40E_RXBUFFER_1536 1536 /* 128B aligned standard Ethernet frame */
+#define I40E_RXBUFFER_2048 2048
+#define I40E_RXBUFFER_3072 3072 /* Used for large frames w/ padding */
+#define I40E_MAX_RXBUFFER 9728 /* largest size for single descriptor */
+
+/* NOTE: netdev_alloc_skb reserves up to 64 bytes, NET_IP_ALIGN means we
+ * reserve 2 more, and skb_shared_info adds an additional 384 bytes more,
+ * this adds up to 512 bytes of extra data meaning the smallest allocation
+ * we could have is 1K.
+ * i.e. RXBUFFER_256 --> 960 byte skb (size-1024 slab)
+ * i.e. RXBUFFER_512 --> 1216 byte skb (size-2048 slab)
+ */
+#define I40E_RX_HDR_SIZE I40E_RXBUFFER_256
+#define I40E_PACKET_HDR_PAD (ETH_HLEN + ETH_FCS_LEN + (VLAN_HLEN * 2))
+#define i40e_rx_desc i40e_16byte_rx_desc
+
+#define I40E_RX_DMA_ATTR \
+ (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING)
+
+/* Attempt to maximize the headroom available for incoming frames. We
+ * use a 2K buffer for receives and need 1536/1534 to store the data for
+ * the frame. This leaves us with 512 bytes of room. From that we need
+ * to deduct the space needed for the shared info and the padding needed
+ * to IP align the frame.
+ *
+ * Note: For cache line sizes 256 or larger this value is going to end
+ * up negative. In these cases we should fall back to the legacy
+ * receive path.
+ */
+#if (PAGE_SIZE < 8192)
+#define I40E_2K_TOO_SMALL_WITH_PADDING \
+((NET_SKB_PAD + I40E_RXBUFFER_1536) > SKB_WITH_OVERHEAD(I40E_RXBUFFER_2048))
+
+static inline int i40e_compute_pad(int rx_buf_len)
+{
+ int page_size, pad_size;
+
+ page_size = ALIGN(rx_buf_len, PAGE_SIZE / 2);
+ pad_size = SKB_WITH_OVERHEAD(page_size) - rx_buf_len;
+
+ return pad_size;
+}
+
+static inline int i40e_skb_pad(void)
+{
+ int rx_buf_len;
+
+ /* If a 2K buffer cannot handle a standard Ethernet frame then
+ * optimize padding for a 3K buffer instead of a 1.5K buffer.
+ *
+ * For a 3K buffer we need to add enough padding to allow for
+ * tailroom due to NET_IP_ALIGN possibly shifting us out of
+ * cache-line alignment.
+ */
+ if (I40E_2K_TOO_SMALL_WITH_PADDING)
+ rx_buf_len = I40E_RXBUFFER_3072 + SKB_DATA_ALIGN(NET_IP_ALIGN);
+ else
+ rx_buf_len = I40E_RXBUFFER_1536;
+
+ /* if needed make room for NET_IP_ALIGN */
+ rx_buf_len -= NET_IP_ALIGN;
+
+ return i40e_compute_pad(rx_buf_len);
+}
+
+#define I40E_SKB_PAD i40e_skb_pad()
+#else
+#define I40E_2K_TOO_SMALL_WITH_PADDING false
+#define I40E_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN)
+#endif
+
+/**
+ * i40e_test_staterr - tests bits in Rx descriptor status and error fields
+ * @rx_desc: pointer to receive descriptor (in le64 format)
+ * @stat_err_bits: value to mask
+ *
+ * This function does some fast chicanery in order to return the
+ * value of the mask which is really only used for boolean tests.
+ * The status_error_len doesn't need to be shifted because it begins
+ * at offset zero.
+ */
+static inline bool i40e_test_staterr(union i40e_rx_desc *rx_desc,
+ const u64 stat_err_bits)
+{
+ return !!(rx_desc->wb.qword1.status_error_len &
+ cpu_to_le64(stat_err_bits));
+}
+
+/* How many Rx Buffers do we bundle into one write to the hardware ? */
+#define I40E_RX_BUFFER_WRITE 32 /* Must be power of 2 */
+
+#define I40E_RX_NEXT_DESC(r, i, n) \
+ do { \
+ (i)++; \
+ if ((i) == (r)->count) \
+ i = 0; \
+ (n) = I40E_RX_DESC((r), (i)); \
+ } while (0)
+
+
+#define I40E_MAX_BUFFER_TXD 8
+#define I40E_MIN_TX_LEN 17
+
+/* The size limit for a transmit buffer in a descriptor is (16K - 1).
+ * In order to align with the read requests we will align the value to
+ * the nearest 4K which represents our maximum read request size.
+ */
+#define I40E_MAX_READ_REQ_SIZE 4096
+#define I40E_MAX_DATA_PER_TXD (16 * 1024 - 1)
+#define I40E_MAX_DATA_PER_TXD_ALIGNED \
+ (I40E_MAX_DATA_PER_TXD & ~(I40E_MAX_READ_REQ_SIZE - 1))
+
+/**
+ * i40e_txd_use_count - estimate the number of descriptors needed for Tx
+ * @size: transmit request size in bytes
+ *
+ * Due to hardware alignment restrictions (4K alignment), we need to
+ * assume that we can have no more than 12K of data per descriptor, even
+ * though each descriptor can take up to 16K - 1 bytes of aligned memory.
+ * Thus, we need to divide by 12K. But division is slow! Instead,
+ * we decompose the operation into shifts and one relatively cheap
+ * multiply operation.
+ *
+ * To divide by 12K, we first divide by 4K, then divide by 3:
+ * To divide by 4K, shift right by 12 bits
+ * To divide by 3, multiply by 85, then divide by 256
+ * (Divide by 256 is done by shifting right by 8 bits)
+ * Finally, we add one to round up. Because 256 isn't an exact multiple of
+ * 3, we'll underestimate near each multiple of 12K. This is actually more
+ * accurate as we have 4K - 1 of wiggle room that we can fit into the last
+ * segment. For our purposes this is accurate out to 1M which is orders of
+ * magnitude greater than our largest possible GSO size.
+ *
+ * This would then be implemented as:
+ * return (((size >> 12) * 85) >> 8) + 1;
+ *
+ * Since multiplication and division are commutative, we can reorder
+ * operations into:
+ * return ((size * 85) >> 20) + 1;
+ */
+static inline unsigned int i40e_txd_use_count(unsigned int size)
+{
+ return ((size * 85) >> 20) + 1;
+}
+
+/* Tx Descriptors needed, worst case */
+#define DESC_NEEDED (MAX_SKB_FRAGS + 6)
+
+#define I40E_TX_FLAGS_HW_VLAN BIT(1)
+#define I40E_TX_FLAGS_SW_VLAN BIT(2)
+#define I40E_TX_FLAGS_TSO BIT(3)
+#define I40E_TX_FLAGS_IPV4 BIT(4)
+#define I40E_TX_FLAGS_IPV6 BIT(5)
+#define I40E_TX_FLAGS_TSYN BIT(8)
+#define I40E_TX_FLAGS_FD_SB BIT(9)
+#define I40E_TX_FLAGS_UDP_TUNNEL BIT(10)
+#define I40E_TX_FLAGS_VLAN_MASK 0xffff0000
+#define I40E_TX_FLAGS_VLAN_PRIO_MASK 0xe0000000
+#define I40E_TX_FLAGS_VLAN_PRIO_SHIFT 29
+#define I40E_TX_FLAGS_VLAN_SHIFT 16
+
+struct i40e_tx_buffer {
+ struct i40e_tx_desc *next_to_watch;
+ union {
+ struct xdp_frame *xdpf;
+ struct sk_buff *skb;
+ void *raw_buf;
+ };
+ unsigned int bytecount;
+ unsigned short gso_segs;
+
+ DEFINE_DMA_UNMAP_ADDR(dma);
+ DEFINE_DMA_UNMAP_LEN(len);
+ u32 tx_flags;
+};
+
+struct i40e_rx_buffer {
+ dma_addr_t dma;
+ struct page *page;
+ __u32 page_offset;
+ __u16 pagecnt_bias;
+};
+
+struct i40e_queue_stats {
+ u64 packets;
+ u64 bytes;
+};
+
+struct i40e_tx_queue_stats {
+ u64 restart_queue;
+ u64 tx_busy;
+ u64 tx_done_old;
+ u64 tx_linearize;
+ u64 tx_force_wb;
+ u64 tx_stopped;
+ int prev_pkt_ctr;
+};
+
+struct i40e_rx_queue_stats {
+ u64 non_eop_descs;
+ u64 alloc_page_failed;
+ u64 alloc_buff_failed;
+ u64 page_reuse_count;
+ u64 page_alloc_count;
+ u64 page_waive_count;
+ u64 page_busy_count;
+};
+
+enum i40e_ring_state_t {
+ __I40E_TX_FDIR_INIT_DONE,
+ __I40E_TX_XPS_INIT_DONE,
+ __I40E_RING_STATE_NBITS /* must be last */
+};
+
+/* some useful defines for virtchannel interface, which
+ * is the only remaining user of header split
+ */
+#define I40E_RX_DTYPE_HEADER_SPLIT 1
+#define I40E_RX_SPLIT_L2 0x1
+#define I40E_RX_SPLIT_IP 0x2
+#define I40E_RX_SPLIT_TCP_UDP 0x4
+#define I40E_RX_SPLIT_SCTP 0x8
+
+/* struct that defines a descriptor ring, associated with a VSI */
+struct i40e_ring {
+ struct i40e_ring *next; /* pointer to next ring in q_vector */
+ void *desc; /* Descriptor ring memory */
+ struct device *dev; /* Used for DMA mapping */
+ struct net_device *netdev; /* netdev ring maps to */
+ struct bpf_prog *xdp_prog;
+ union {
+ struct i40e_tx_buffer *tx_bi;
+ struct i40e_rx_buffer *rx_bi;
+ struct xdp_buff **rx_bi_zc;
+ };
+ DECLARE_BITMAP(state, __I40E_RING_STATE_NBITS);
+ u16 queue_index; /* Queue number of ring */
+ u8 dcb_tc; /* Traffic class of ring */
+ u8 __iomem *tail;
+
+ /* high bit set means dynamic, use accessor routines to read/write.
+ * hardware only supports 2us resolution for the ITR registers.
+ * these values always store the USER setting, and must be converted
+ * before programming to a register.
+ */
+ u16 itr_setting;
+
+ u16 count; /* Number of descriptors */
+ u16 reg_idx; /* HW register index of the ring */
+ u16 rx_buf_len;
+
+ /* used in interrupt processing */
+ u16 next_to_use;
+ u16 next_to_clean;
+ u16 xdp_tx_active;
+
+ u8 atr_sample_rate;
+ u8 atr_count;
+
+ bool ring_active; /* is ring online or not */
+ bool arm_wb; /* do something to arm write back */
+ u8 packet_stride;
+
+ u16 flags;
+#define I40E_TXR_FLAGS_WB_ON_ITR BIT(0)
+#define I40E_RXR_FLAGS_BUILD_SKB_ENABLED BIT(1)
+#define I40E_TXR_FLAGS_XDP BIT(2)
+
+ /* stats structs */
+ struct i40e_queue_stats stats;
+ struct u64_stats_sync syncp;
+ union {
+ struct i40e_tx_queue_stats tx_stats;
+ struct i40e_rx_queue_stats rx_stats;
+ };
+
+ unsigned int size; /* length of descriptor ring in bytes */
+ dma_addr_t dma; /* physical address of ring */
+
+ struct i40e_vsi *vsi; /* Backreference to associated VSI */
+ struct i40e_q_vector *q_vector; /* Backreference to associated vector */
+
+ struct rcu_head rcu; /* to avoid race on free */
+ u16 next_to_alloc;
+ struct sk_buff *skb; /* When i40e_clean_rx_ring_irq() must
+ * return before it sees the EOP for
+ * the current packet, we save that skb
+ * here and resume receiving this
+ * packet the next time
+ * i40e_clean_rx_ring_irq() is called
+ * for this ring.
+ */
+
+ struct i40e_channel *ch;
+ u16 rx_offset;
+ struct xdp_rxq_info xdp_rxq;
+ struct xsk_buff_pool *xsk_pool;
+} ____cacheline_internodealigned_in_smp;
+
+static inline bool ring_uses_build_skb(struct i40e_ring *ring)
+{
+ return !!(ring->flags & I40E_RXR_FLAGS_BUILD_SKB_ENABLED);
+}
+
+static inline void set_ring_build_skb_enabled(struct i40e_ring *ring)
+{
+ ring->flags |= I40E_RXR_FLAGS_BUILD_SKB_ENABLED;
+}
+
+static inline void clear_ring_build_skb_enabled(struct i40e_ring *ring)
+{
+ ring->flags &= ~I40E_RXR_FLAGS_BUILD_SKB_ENABLED;
+}
+
+static inline bool ring_is_xdp(struct i40e_ring *ring)
+{
+ return !!(ring->flags & I40E_TXR_FLAGS_XDP);
+}
+
+static inline void set_ring_xdp(struct i40e_ring *ring)
+{
+ ring->flags |= I40E_TXR_FLAGS_XDP;
+}
+
+#define I40E_ITR_ADAPTIVE_MIN_INC 0x0002
+#define I40E_ITR_ADAPTIVE_MIN_USECS 0x0002
+#define I40E_ITR_ADAPTIVE_MAX_USECS 0x007e
+#define I40E_ITR_ADAPTIVE_LATENCY 0x8000
+#define I40E_ITR_ADAPTIVE_BULK 0x0000
+
+struct i40e_ring_container {
+ struct i40e_ring *ring; /* pointer to linked list of ring(s) */
+ unsigned long next_update; /* jiffies value of next update */
+ unsigned int total_bytes; /* total bytes processed this int */
+ unsigned int total_packets; /* total packets processed this int */
+ u16 count;
+ u16 target_itr; /* target ITR setting for ring(s) */
+ u16 current_itr; /* current ITR setting for ring(s) */
+};
+
+/* iterator for handling rings in ring container */
+#define i40e_for_each_ring(pos, head) \
+ for (pos = (head).ring; pos != NULL; pos = pos->next)
+
+static inline unsigned int i40e_rx_pg_order(struct i40e_ring *ring)
+{
+#if (PAGE_SIZE < 8192)
+ if (ring->rx_buf_len > (PAGE_SIZE / 2))
+ return 1;
+#endif
+ return 0;
+}
+
+#define i40e_rx_pg_size(_ring) (PAGE_SIZE << i40e_rx_pg_order(_ring))
+
+bool i40e_alloc_rx_buffers(struct i40e_ring *rxr, u16 cleaned_count);
+netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
+u16 i40e_lan_select_queue(struct net_device *netdev, struct sk_buff *skb,
+ struct net_device *sb_dev);
+void i40e_clean_tx_ring(struct i40e_ring *tx_ring);
+void i40e_clean_rx_ring(struct i40e_ring *rx_ring);
+int i40e_setup_tx_descriptors(struct i40e_ring *tx_ring);
+int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring);
+void i40e_free_tx_resources(struct i40e_ring *tx_ring);
+void i40e_free_rx_resources(struct i40e_ring *rx_ring);
+int i40e_napi_poll(struct napi_struct *napi, int budget);
+void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector);
+u32 i40e_get_tx_pending(struct i40e_ring *ring, bool in_sw);
+void i40e_detect_recover_hung(struct i40e_vsi *vsi);
+int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size);
+bool __i40e_chk_linearize(struct sk_buff *skb);
+int i40e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
+ u32 flags);
+
+/**
+ * i40e_get_head - Retrieve head from head writeback
+ * @tx_ring: tx ring to fetch head of
+ *
+ * Returns value of Tx ring head based on value stored
+ * in head write-back location
+ **/
+static inline u32 i40e_get_head(struct i40e_ring *tx_ring)
+{
+ void *head = (struct i40e_tx_desc *)tx_ring->desc + tx_ring->count;
+
+ return le32_to_cpu(*(volatile __le32 *)head);
+}
+
+/**
+ * i40e_xmit_descriptor_count - calculate number of Tx descriptors needed
+ * @skb: send buffer
+ *
+ * Returns number of data descriptors needed for this skb. Returns 0 to indicate
+ * there is not enough descriptors available in this ring since we need at least
+ * one descriptor.
+ **/
+static inline int i40e_xmit_descriptor_count(struct sk_buff *skb)
+{
+ const skb_frag_t *frag = &skb_shinfo(skb)->frags[0];
+ unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
+ int count = 0, size = skb_headlen(skb);
+
+ for (;;) {
+ count += i40e_txd_use_count(size);
+
+ if (!nr_frags--)
+ break;
+
+ size = skb_frag_size(frag++);
+ }
+
+ return count;
+}
+
+/**
+ * i40e_maybe_stop_tx - 1st level check for Tx stop conditions
+ * @tx_ring: the ring to be checked
+ * @size: the size buffer we want to assure is available
+ *
+ * Returns 0 if stop is not needed
+ **/
+static inline int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
+{
+ if (likely(I40E_DESC_UNUSED(tx_ring) >= size))
+ return 0;
+ return __i40e_maybe_stop_tx(tx_ring, size);
+}
+
+/**
+ * i40e_chk_linearize - Check if there are more than 8 fragments per packet
+ * @skb: send buffer
+ * @count: number of buffers used
+ *
+ * Note: Our HW can't scatter-gather more than 8 fragments to build
+ * a packet on the wire and so we need to figure out the cases where we
+ * need to linearize the skb.
+ **/
+static inline bool i40e_chk_linearize(struct sk_buff *skb, int count)
+{
+ /* Both TSO and single send will work if count is less than 8 */
+ if (likely(count < I40E_MAX_BUFFER_TXD))
+ return false;
+
+ if (skb_is_gso(skb))
+ return __i40e_chk_linearize(skb);
+
+ /* we can support up to 8 data buffers for a single send */
+ return count != I40E_MAX_BUFFER_TXD;
+}
+
+/**
+ * txring_txq - Find the netdev Tx ring based on the i40e Tx ring
+ * @ring: Tx ring to find the netdev equivalent of
+ **/
+static inline struct netdev_queue *txring_txq(const struct i40e_ring *ring)
+{
+ return netdev_get_tx_queue(ring->netdev, ring->queue_index);
+}
+#endif /* _I40E_TXRX_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx_common.h b/drivers/net/ethernet/intel/i40e/i40e_txrx_common.h
new file mode 100644
index 000000000..8c5118c8b
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx_common.h
@@ -0,0 +1,107 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2018 Intel Corporation. */
+
+#ifndef I40E_TXRX_COMMON_
+#define I40E_TXRX_COMMON_
+
+int i40e_xmit_xdp_tx_ring(struct xdp_buff *xdp, struct i40e_ring *xdp_ring);
+void i40e_clean_programming_status(struct i40e_ring *rx_ring, u64 qword0_raw,
+ u64 qword1);
+void i40e_process_skb_fields(struct i40e_ring *rx_ring,
+ union i40e_rx_desc *rx_desc, struct sk_buff *skb);
+void i40e_xdp_ring_update_tail(struct i40e_ring *xdp_ring);
+void i40e_update_rx_stats(struct i40e_ring *rx_ring,
+ unsigned int total_rx_bytes,
+ unsigned int total_rx_packets);
+void i40e_finalize_xdp_rx(struct i40e_ring *rx_ring, unsigned int xdp_res);
+void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val);
+
+#define I40E_XDP_PASS 0
+#define I40E_XDP_CONSUMED BIT(0)
+#define I40E_XDP_TX BIT(1)
+#define I40E_XDP_REDIR BIT(2)
+#define I40E_XDP_EXIT BIT(3)
+
+/*
+ * build_ctob - Builds the Tx descriptor (cmd, offset and type) qword
+ */
+static inline __le64 build_ctob(u32 td_cmd, u32 td_offset, unsigned int size,
+ u32 td_tag)
+{
+ return cpu_to_le64(I40E_TX_DESC_DTYPE_DATA |
+ ((u64)td_cmd << I40E_TXD_QW1_CMD_SHIFT) |
+ ((u64)td_offset << I40E_TXD_QW1_OFFSET_SHIFT) |
+ ((u64)size << I40E_TXD_QW1_TX_BUF_SZ_SHIFT) |
+ ((u64)td_tag << I40E_TXD_QW1_L2TAG1_SHIFT));
+}
+
+/**
+ * i40e_update_tx_stats - Update the egress statistics for the Tx ring
+ * @tx_ring: Tx ring to update
+ * @total_packets: total packets sent
+ * @total_bytes: total bytes sent
+ **/
+static inline void i40e_update_tx_stats(struct i40e_ring *tx_ring,
+ unsigned int total_packets,
+ unsigned int total_bytes)
+{
+ u64_stats_update_begin(&tx_ring->syncp);
+ tx_ring->stats.bytes += total_bytes;
+ tx_ring->stats.packets += total_packets;
+ u64_stats_update_end(&tx_ring->syncp);
+ tx_ring->q_vector->tx.total_bytes += total_bytes;
+ tx_ring->q_vector->tx.total_packets += total_packets;
+}
+
+#define WB_STRIDE 4
+
+/**
+ * i40e_arm_wb - (Possibly) arms Tx write-back
+ * @tx_ring: Tx ring to update
+ * @vsi: the VSI
+ * @budget: the NAPI budget left
+ **/
+static inline void i40e_arm_wb(struct i40e_ring *tx_ring,
+ struct i40e_vsi *vsi,
+ int budget)
+{
+ if (tx_ring->flags & I40E_TXR_FLAGS_WB_ON_ITR) {
+ /* check to see if there are < 4 descriptors
+ * waiting to be written back, then kick the hardware to force
+ * them to be written back in case we stay in NAPI.
+ * In this mode on X722 we do not enable Interrupt.
+ */
+ unsigned int j = i40e_get_tx_pending(tx_ring, false);
+
+ if (budget &&
+ ((j / WB_STRIDE) == 0) && j > 0 &&
+ !test_bit(__I40E_VSI_DOWN, vsi->state) &&
+ (I40E_DESC_UNUSED(tx_ring) != tx_ring->count))
+ tx_ring->arm_wb = true;
+ }
+}
+
+/**
+ * i40e_rx_is_programming_status - check for programming status descriptor
+ * @qword1: qword1 representing status_error_len in CPU ordering
+ *
+ * The value of in the descriptor length field indicate if this
+ * is a programming status descriptor for flow director or FCoE
+ * by the value of I40E_RX_PROG_STATUS_DESC_LENGTH, otherwise
+ * it is a packet descriptor.
+ **/
+static inline bool i40e_rx_is_programming_status(u64 qword1)
+{
+ /* The Rx filter programming status and SPH bit occupy the same
+ * spot in the descriptor. Since we don't support packet split we
+ * can just reuse the bit as an indication that this is a
+ * programming status descriptor.
+ */
+ return qword1 & I40E_RXD_QW1_LENGTH_SPH_MASK;
+}
+
+void i40e_xsk_clean_rx_ring(struct i40e_ring *rx_ring);
+void i40e_xsk_clean_tx_ring(struct i40e_ring *tx_ring);
+bool i40e_xsk_any_rx_ring_enabled(struct i40e_vsi *vsi);
+
+#endif /* I40E_TXRX_COMMON_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h
new file mode 100644
index 000000000..388c3d36d
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_type.h
@@ -0,0 +1,1539 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2013 - 2021 Intel Corporation. */
+
+#ifndef _I40E_TYPE_H_
+#define _I40E_TYPE_H_
+
+#include "i40e_status.h"
+#include "i40e_osdep.h"
+#include "i40e_register.h"
+#include "i40e_adminq.h"
+#include "i40e_hmc.h"
+#include "i40e_lan_hmc.h"
+#include "i40e_devids.h"
+
+/* I40E_MASK is a macro used on 32 bit registers */
+#define I40E_MASK(mask, shift) ((u32)(mask) << (shift))
+
+#define I40E_MAX_VSI_QP 16
+#define I40E_MAX_VF_VSI 4
+#define I40E_MAX_CHAINED_RX_BUFFERS 5
+#define I40E_MAX_PF_UDP_OFFLOAD_PORTS 16
+
+/* Max default timeout in ms, */
+#define I40E_MAX_NVM_TIMEOUT 18000
+
+/* Max timeout in ms for the phy to respond */
+#define I40E_MAX_PHY_TIMEOUT 500
+
+/* Switch from ms to the 1usec global time (this is the GTIME resolution) */
+#define I40E_MS_TO_GTIME(time) ((time) * 1000)
+
+/* forward declaration */
+struct i40e_hw;
+typedef void (*I40E_ADMINQ_CALLBACK)(struct i40e_hw *, struct i40e_aq_desc *);
+
+/* Data type manipulation macros. */
+
+#define I40E_DESC_UNUSED(R) \
+ ((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \
+ (R)->next_to_clean - (R)->next_to_use - 1)
+
+/* bitfields for Tx queue mapping in QTX_CTL */
+#define I40E_QTX_CTL_VF_QUEUE 0x0
+#define I40E_QTX_CTL_VM_QUEUE 0x1
+#define I40E_QTX_CTL_PF_QUEUE 0x2
+
+/* debug masks - set these bits in hw->debug_mask to control output */
+enum i40e_debug_mask {
+ I40E_DEBUG_INIT = 0x00000001,
+ I40E_DEBUG_RELEASE = 0x00000002,
+
+ I40E_DEBUG_LINK = 0x00000010,
+ I40E_DEBUG_PHY = 0x00000020,
+ I40E_DEBUG_HMC = 0x00000040,
+ I40E_DEBUG_NVM = 0x00000080,
+ I40E_DEBUG_LAN = 0x00000100,
+ I40E_DEBUG_FLOW = 0x00000200,
+ I40E_DEBUG_DCB = 0x00000400,
+ I40E_DEBUG_DIAG = 0x00000800,
+ I40E_DEBUG_FD = 0x00001000,
+ I40E_DEBUG_PACKAGE = 0x00002000,
+ I40E_DEBUG_IWARP = 0x00F00000,
+ I40E_DEBUG_AQ_MESSAGE = 0x01000000,
+ I40E_DEBUG_AQ_DESCRIPTOR = 0x02000000,
+ I40E_DEBUG_AQ_DESC_BUFFER = 0x04000000,
+ I40E_DEBUG_AQ_COMMAND = 0x06000000,
+ I40E_DEBUG_AQ = 0x0F000000,
+
+ I40E_DEBUG_USER = 0xF0000000,
+
+ I40E_DEBUG_ALL = 0xFFFFFFFF
+};
+
+#define I40E_MDIO_CLAUSE22_STCODE_MASK I40E_MASK(1, \
+ I40E_GLGEN_MSCA_STCODE_SHIFT)
+#define I40E_MDIO_CLAUSE22_OPCODE_WRITE_MASK I40E_MASK(1, \
+ I40E_GLGEN_MSCA_OPCODE_SHIFT)
+#define I40E_MDIO_CLAUSE22_OPCODE_READ_MASK I40E_MASK(2, \
+ I40E_GLGEN_MSCA_OPCODE_SHIFT)
+
+#define I40E_MDIO_CLAUSE45_STCODE_MASK I40E_MASK(0, \
+ I40E_GLGEN_MSCA_STCODE_SHIFT)
+#define I40E_MDIO_CLAUSE45_OPCODE_ADDRESS_MASK I40E_MASK(0, \
+ I40E_GLGEN_MSCA_OPCODE_SHIFT)
+#define I40E_MDIO_CLAUSE45_OPCODE_WRITE_MASK I40E_MASK(1, \
+ I40E_GLGEN_MSCA_OPCODE_SHIFT)
+#define I40E_MDIO_CLAUSE45_OPCODE_READ_MASK I40E_MASK(3, \
+ I40E_GLGEN_MSCA_OPCODE_SHIFT)
+
+#define I40E_PHY_COM_REG_PAGE 0x1E
+#define I40E_PHY_LED_LINK_MODE_MASK 0xF0
+#define I40E_PHY_LED_MANUAL_ON 0x100
+#define I40E_PHY_LED_PROV_REG_1 0xC430
+#define I40E_PHY_LED_MODE_MASK 0xFFFF
+#define I40E_PHY_LED_MODE_ORIG 0x80000000
+
+/* These are structs for managing the hardware information and the operations.
+ * The structures of function pointers are filled out at init time when we
+ * know for sure exactly which hardware we're working with. This gives us the
+ * flexibility of using the same main driver code but adapting to slightly
+ * different hardware needs as new parts are developed. For this architecture,
+ * the Firmware and AdminQ are intended to insulate the driver from most of the
+ * future changes, but these structures will also do part of the job.
+ */
+enum i40e_mac_type {
+ I40E_MAC_UNKNOWN = 0,
+ I40E_MAC_XL710,
+ I40E_MAC_VF,
+ I40E_MAC_X722,
+ I40E_MAC_X722_VF,
+ I40E_MAC_GENERIC,
+};
+
+enum i40e_media_type {
+ I40E_MEDIA_TYPE_UNKNOWN = 0,
+ I40E_MEDIA_TYPE_FIBER,
+ I40E_MEDIA_TYPE_BASET,
+ I40E_MEDIA_TYPE_BACKPLANE,
+ I40E_MEDIA_TYPE_CX4,
+ I40E_MEDIA_TYPE_DA,
+ I40E_MEDIA_TYPE_VIRTUAL
+};
+
+enum i40e_fc_mode {
+ I40E_FC_NONE = 0,
+ I40E_FC_RX_PAUSE,
+ I40E_FC_TX_PAUSE,
+ I40E_FC_FULL,
+ I40E_FC_PFC,
+ I40E_FC_DEFAULT
+};
+
+enum i40e_set_fc_aq_failures {
+ I40E_SET_FC_AQ_FAIL_NONE = 0,
+ I40E_SET_FC_AQ_FAIL_GET = 1,
+ I40E_SET_FC_AQ_FAIL_SET = 2,
+ I40E_SET_FC_AQ_FAIL_UPDATE = 4,
+ I40E_SET_FC_AQ_FAIL_SET_UPDATE = 6
+};
+
+enum i40e_vsi_type {
+ I40E_VSI_MAIN = 0,
+ I40E_VSI_VMDQ1 = 1,
+ I40E_VSI_VMDQ2 = 2,
+ I40E_VSI_CTRL = 3,
+ I40E_VSI_FCOE = 4,
+ I40E_VSI_MIRROR = 5,
+ I40E_VSI_SRIOV = 6,
+ I40E_VSI_FDIR = 7,
+ I40E_VSI_IWARP = 8,
+ I40E_VSI_TYPE_UNKNOWN
+};
+
+enum i40e_queue_type {
+ I40E_QUEUE_TYPE_RX = 0,
+ I40E_QUEUE_TYPE_TX,
+ I40E_QUEUE_TYPE_PE_CEQ,
+ I40E_QUEUE_TYPE_UNKNOWN
+};
+
+struct i40e_link_status {
+ enum i40e_aq_phy_type phy_type;
+ enum i40e_aq_link_speed link_speed;
+ u8 link_info;
+ u8 an_info;
+ u8 req_fec_info;
+ u8 fec_info;
+ u8 ext_info;
+ u8 loopback;
+ /* is Link Status Event notification to SW enabled */
+ bool lse_enable;
+ u16 max_frame_size;
+ bool crc_enable;
+ u8 pacing;
+ u8 requested_speeds;
+ u8 module_type[3];
+ /* 1st byte: module identifier */
+#define I40E_MODULE_TYPE_SFP 0x03
+ /* 3rd byte: ethernet compliance codes for 1G */
+#define I40E_MODULE_TYPE_1000BASE_SX 0x01
+#define I40E_MODULE_TYPE_1000BASE_LX 0x02
+};
+
+struct i40e_phy_info {
+ struct i40e_link_status link_info;
+ struct i40e_link_status link_info_old;
+ bool get_link_info;
+ enum i40e_media_type media_type;
+ /* all the phy types the NVM is capable of */
+ u64 phy_types;
+};
+
+#define I40E_CAP_PHY_TYPE_SGMII BIT_ULL(I40E_PHY_TYPE_SGMII)
+#define I40E_CAP_PHY_TYPE_1000BASE_KX BIT_ULL(I40E_PHY_TYPE_1000BASE_KX)
+#define I40E_CAP_PHY_TYPE_10GBASE_KX4 BIT_ULL(I40E_PHY_TYPE_10GBASE_KX4)
+#define I40E_CAP_PHY_TYPE_10GBASE_KR BIT_ULL(I40E_PHY_TYPE_10GBASE_KR)
+#define I40E_CAP_PHY_TYPE_40GBASE_KR4 BIT_ULL(I40E_PHY_TYPE_40GBASE_KR4)
+#define I40E_CAP_PHY_TYPE_XAUI BIT_ULL(I40E_PHY_TYPE_XAUI)
+#define I40E_CAP_PHY_TYPE_XFI BIT_ULL(I40E_PHY_TYPE_XFI)
+#define I40E_CAP_PHY_TYPE_SFI BIT_ULL(I40E_PHY_TYPE_SFI)
+#define I40E_CAP_PHY_TYPE_XLAUI BIT_ULL(I40E_PHY_TYPE_XLAUI)
+#define I40E_CAP_PHY_TYPE_XLPPI BIT_ULL(I40E_PHY_TYPE_XLPPI)
+#define I40E_CAP_PHY_TYPE_40GBASE_CR4_CU BIT_ULL(I40E_PHY_TYPE_40GBASE_CR4_CU)
+#define I40E_CAP_PHY_TYPE_10GBASE_CR1_CU BIT_ULL(I40E_PHY_TYPE_10GBASE_CR1_CU)
+#define I40E_CAP_PHY_TYPE_10GBASE_AOC BIT_ULL(I40E_PHY_TYPE_10GBASE_AOC)
+#define I40E_CAP_PHY_TYPE_40GBASE_AOC BIT_ULL(I40E_PHY_TYPE_40GBASE_AOC)
+#define I40E_CAP_PHY_TYPE_100BASE_TX BIT_ULL(I40E_PHY_TYPE_100BASE_TX)
+#define I40E_CAP_PHY_TYPE_1000BASE_T BIT_ULL(I40E_PHY_TYPE_1000BASE_T)
+#define I40E_CAP_PHY_TYPE_10GBASE_T BIT_ULL(I40E_PHY_TYPE_10GBASE_T)
+#define I40E_CAP_PHY_TYPE_10GBASE_SR BIT_ULL(I40E_PHY_TYPE_10GBASE_SR)
+#define I40E_CAP_PHY_TYPE_10GBASE_LR BIT_ULL(I40E_PHY_TYPE_10GBASE_LR)
+#define I40E_CAP_PHY_TYPE_10GBASE_SFPP_CU BIT_ULL(I40E_PHY_TYPE_10GBASE_SFPP_CU)
+#define I40E_CAP_PHY_TYPE_10GBASE_CR1 BIT_ULL(I40E_PHY_TYPE_10GBASE_CR1)
+#define I40E_CAP_PHY_TYPE_40GBASE_CR4 BIT_ULL(I40E_PHY_TYPE_40GBASE_CR4)
+#define I40E_CAP_PHY_TYPE_40GBASE_SR4 BIT_ULL(I40E_PHY_TYPE_40GBASE_SR4)
+#define I40E_CAP_PHY_TYPE_40GBASE_LR4 BIT_ULL(I40E_PHY_TYPE_40GBASE_LR4)
+#define I40E_CAP_PHY_TYPE_1000BASE_SX BIT_ULL(I40E_PHY_TYPE_1000BASE_SX)
+#define I40E_CAP_PHY_TYPE_1000BASE_LX BIT_ULL(I40E_PHY_TYPE_1000BASE_LX)
+#define I40E_CAP_PHY_TYPE_1000BASE_T_OPTICAL \
+ BIT_ULL(I40E_PHY_TYPE_1000BASE_T_OPTICAL)
+#define I40E_CAP_PHY_TYPE_20GBASE_KR2 BIT_ULL(I40E_PHY_TYPE_20GBASE_KR2)
+/* Defining the macro I40E_TYPE_OFFSET to implement a bit shift for some
+ * PHY types. There is an unused bit (31) in the I40E_CAP_PHY_TYPE_* bit
+ * fields but no corresponding gap in the i40e_aq_phy_type enumeration. So,
+ * a shift is needed to adjust for this with values larger than 31. The
+ * only affected values are I40E_PHY_TYPE_25GBASE_*.
+ */
+#define I40E_PHY_TYPE_OFFSET 1
+#define I40E_CAP_PHY_TYPE_25GBASE_KR BIT_ULL(I40E_PHY_TYPE_25GBASE_KR + \
+ I40E_PHY_TYPE_OFFSET)
+#define I40E_CAP_PHY_TYPE_25GBASE_CR BIT_ULL(I40E_PHY_TYPE_25GBASE_CR + \
+ I40E_PHY_TYPE_OFFSET)
+#define I40E_CAP_PHY_TYPE_25GBASE_SR BIT_ULL(I40E_PHY_TYPE_25GBASE_SR + \
+ I40E_PHY_TYPE_OFFSET)
+#define I40E_CAP_PHY_TYPE_25GBASE_LR BIT_ULL(I40E_PHY_TYPE_25GBASE_LR + \
+ I40E_PHY_TYPE_OFFSET)
+#define I40E_CAP_PHY_TYPE_25GBASE_AOC BIT_ULL(I40E_PHY_TYPE_25GBASE_AOC + \
+ I40E_PHY_TYPE_OFFSET)
+#define I40E_CAP_PHY_TYPE_25GBASE_ACC BIT_ULL(I40E_PHY_TYPE_25GBASE_ACC + \
+ I40E_PHY_TYPE_OFFSET)
+/* Offset for 2.5G/5G PHY Types value to bit number conversion */
+#define I40E_CAP_PHY_TYPE_2_5GBASE_T BIT_ULL(I40E_PHY_TYPE_2_5GBASE_T)
+#define I40E_CAP_PHY_TYPE_5GBASE_T BIT_ULL(I40E_PHY_TYPE_5GBASE_T)
+#define I40E_HW_CAP_MAX_GPIO 30
+/* Capabilities of a PF or a VF or the whole device */
+struct i40e_hw_capabilities {
+ u32 switch_mode;
+
+ /* Cloud filter modes:
+ * Mode1: Filter on L4 port only
+ * Mode2: Filter for non-tunneled traffic
+ * Mode3: Filter for tunnel traffic
+ */
+#define I40E_CLOUD_FILTER_MODE1 0x6
+#define I40E_CLOUD_FILTER_MODE2 0x7
+#define I40E_SWITCH_MODE_MASK 0xF
+
+ u32 management_mode;
+ u32 mng_protocols_over_mctp;
+ u32 npar_enable;
+ u32 os2bmc;
+ u32 valid_functions;
+ bool sr_iov_1_1;
+ bool vmdq;
+ bool evb_802_1_qbg; /* Edge Virtual Bridging */
+ bool evb_802_1_qbh; /* Bridge Port Extension */
+ bool dcb;
+ bool fcoe;
+ bool iscsi; /* Indicates iSCSI enabled */
+ bool flex10_enable;
+ bool flex10_capable;
+ u32 flex10_mode;
+
+ u32 flex10_status;
+
+ bool sec_rev_disabled;
+ bool update_disabled;
+#define I40E_NVM_MGMT_SEC_REV_DISABLED 0x1
+#define I40E_NVM_MGMT_UPDATE_DISABLED 0x2
+
+ bool mgmt_cem;
+ bool ieee_1588;
+ bool iwarp;
+ bool fd;
+ u32 fd_filters_guaranteed;
+ u32 fd_filters_best_effort;
+ bool rss;
+ u32 rss_table_size;
+ u32 rss_table_entry_width;
+ bool led[I40E_HW_CAP_MAX_GPIO];
+ bool sdp[I40E_HW_CAP_MAX_GPIO];
+ u32 nvm_image_type;
+ u32 num_flow_director_filters;
+ u32 num_vfs;
+ u32 vf_base_id;
+ u32 num_vsis;
+ u32 num_rx_qp;
+ u32 num_tx_qp;
+ u32 base_queue;
+ u32 num_msix_vectors;
+ u32 num_msix_vectors_vf;
+ u32 led_pin_num;
+ u32 sdp_pin_num;
+ u32 mdio_port_num;
+ u32 mdio_port_mode;
+ u8 rx_buf_chain_len;
+ u32 enabled_tcmap;
+ u32 maxtc;
+ u64 wr_csr_prot;
+};
+
+struct i40e_mac_info {
+ enum i40e_mac_type type;
+ u8 addr[ETH_ALEN];
+ u8 perm_addr[ETH_ALEN];
+ u8 san_addr[ETH_ALEN];
+ u8 port_addr[ETH_ALEN];
+ u16 max_fcoeq;
+};
+
+enum i40e_aq_resources_ids {
+ I40E_NVM_RESOURCE_ID = 1
+};
+
+enum i40e_aq_resource_access_type {
+ I40E_RESOURCE_READ = 1,
+ I40E_RESOURCE_WRITE
+};
+
+struct i40e_nvm_info {
+ u64 hw_semaphore_timeout; /* usec global time (GTIME resolution) */
+ u32 timeout; /* [ms] */
+ u16 sr_size; /* Shadow RAM size in words */
+ bool blank_nvm_mode; /* is NVM empty (no FW present)*/
+ u16 version; /* NVM package version */
+ u32 eetrack; /* NVM data version */
+ u32 oem_ver; /* OEM version info */
+};
+
+/* definitions used in NVM update support */
+
+enum i40e_nvmupd_cmd {
+ I40E_NVMUPD_INVALID,
+ I40E_NVMUPD_READ_CON,
+ I40E_NVMUPD_READ_SNT,
+ I40E_NVMUPD_READ_LCB,
+ I40E_NVMUPD_READ_SA,
+ I40E_NVMUPD_WRITE_ERA,
+ I40E_NVMUPD_WRITE_CON,
+ I40E_NVMUPD_WRITE_SNT,
+ I40E_NVMUPD_WRITE_LCB,
+ I40E_NVMUPD_WRITE_SA,
+ I40E_NVMUPD_CSUM_CON,
+ I40E_NVMUPD_CSUM_SA,
+ I40E_NVMUPD_CSUM_LCB,
+ I40E_NVMUPD_STATUS,
+ I40E_NVMUPD_EXEC_AQ,
+ I40E_NVMUPD_GET_AQ_RESULT,
+ I40E_NVMUPD_GET_AQ_EVENT,
+};
+
+enum i40e_nvmupd_state {
+ I40E_NVMUPD_STATE_INIT,
+ I40E_NVMUPD_STATE_READING,
+ I40E_NVMUPD_STATE_WRITING,
+ I40E_NVMUPD_STATE_INIT_WAIT,
+ I40E_NVMUPD_STATE_WRITE_WAIT,
+ I40E_NVMUPD_STATE_ERROR
+};
+
+/* nvm_access definition and its masks/shifts need to be accessible to
+ * application, core driver, and shared code. Where is the right file?
+ */
+#define I40E_NVM_READ 0xB
+#define I40E_NVM_WRITE 0xC
+
+#define I40E_NVM_MOD_PNT_MASK 0xFF
+
+#define I40E_NVM_TRANS_SHIFT 8
+#define I40E_NVM_TRANS_MASK (0xf << I40E_NVM_TRANS_SHIFT)
+#define I40E_NVM_PRESERVATION_FLAGS_SHIFT 12
+#define I40E_NVM_PRESERVATION_FLAGS_MASK \
+ (0x3 << I40E_NVM_PRESERVATION_FLAGS_SHIFT)
+#define I40E_NVM_PRESERVATION_FLAGS_SELECTED 0x01
+#define I40E_NVM_PRESERVATION_FLAGS_ALL 0x02
+#define I40E_NVM_CON 0x0
+#define I40E_NVM_SNT 0x1
+#define I40E_NVM_LCB 0x2
+#define I40E_NVM_SA (I40E_NVM_SNT | I40E_NVM_LCB)
+#define I40E_NVM_ERA 0x4
+#define I40E_NVM_CSUM 0x8
+#define I40E_NVM_AQE 0xe
+#define I40E_NVM_EXEC 0xf
+
+
+#define I40E_NVMUPD_MAX_DATA 4096
+
+struct i40e_nvm_access {
+ u32 command;
+ u32 config;
+ u32 offset; /* in bytes */
+ u32 data_size; /* in bytes */
+ u8 data[1];
+};
+
+/* (Q)SFP module access definitions */
+#define I40E_I2C_EEPROM_DEV_ADDR 0xA0
+#define I40E_I2C_EEPROM_DEV_ADDR2 0xA2
+#define I40E_MODULE_REVISION_ADDR 0x01
+#define I40E_MODULE_SFF_8472_COMP 0x5E
+#define I40E_MODULE_SFF_8472_SWAP 0x5C
+#define I40E_MODULE_SFF_ADDR_MODE 0x04
+#define I40E_MODULE_SFF_DDM_IMPLEMENTED 0x40
+#define I40E_MODULE_TYPE_QSFP_PLUS 0x0D
+#define I40E_MODULE_TYPE_QSFP28 0x11
+#define I40E_MODULE_QSFP_MAX_LEN 640
+
+/* PCI bus types */
+enum i40e_bus_type {
+ i40e_bus_type_unknown = 0,
+ i40e_bus_type_pci,
+ i40e_bus_type_pcix,
+ i40e_bus_type_pci_express,
+ i40e_bus_type_reserved
+};
+
+/* PCI bus speeds */
+enum i40e_bus_speed {
+ i40e_bus_speed_unknown = 0,
+ i40e_bus_speed_33 = 33,
+ i40e_bus_speed_66 = 66,
+ i40e_bus_speed_100 = 100,
+ i40e_bus_speed_120 = 120,
+ i40e_bus_speed_133 = 133,
+ i40e_bus_speed_2500 = 2500,
+ i40e_bus_speed_5000 = 5000,
+ i40e_bus_speed_8000 = 8000,
+ i40e_bus_speed_reserved
+};
+
+/* PCI bus widths */
+enum i40e_bus_width {
+ i40e_bus_width_unknown = 0,
+ i40e_bus_width_pcie_x1 = 1,
+ i40e_bus_width_pcie_x2 = 2,
+ i40e_bus_width_pcie_x4 = 4,
+ i40e_bus_width_pcie_x8 = 8,
+ i40e_bus_width_32 = 32,
+ i40e_bus_width_64 = 64,
+ i40e_bus_width_reserved
+};
+
+/* Bus parameters */
+struct i40e_bus_info {
+ enum i40e_bus_speed speed;
+ enum i40e_bus_width width;
+ enum i40e_bus_type type;
+
+ u16 func;
+ u16 device;
+ u16 lan_id;
+ u16 bus_id;
+};
+
+/* Flow control (FC) parameters */
+struct i40e_fc_info {
+ enum i40e_fc_mode current_mode; /* FC mode in effect */
+ enum i40e_fc_mode requested_mode; /* FC mode requested by caller */
+};
+
+#define I40E_MAX_TRAFFIC_CLASS 8
+#define I40E_MAX_USER_PRIORITY 8
+#define I40E_DCBX_MAX_APPS 32
+#define I40E_LLDPDU_SIZE 1500
+#define I40E_TLV_STATUS_OPER 0x1
+#define I40E_TLV_STATUS_SYNC 0x2
+#define I40E_TLV_STATUS_ERR 0x4
+#define I40E_CEE_OPER_MAX_APPS 3
+#define I40E_APP_PROTOID_FCOE 0x8906
+#define I40E_APP_PROTOID_ISCSI 0x0cbc
+#define I40E_APP_PROTOID_FIP 0x8914
+#define I40E_APP_SEL_ETHTYPE 0x1
+#define I40E_APP_SEL_TCPIP 0x2
+#define I40E_CEE_APP_SEL_ETHTYPE 0x0
+#define I40E_CEE_APP_SEL_TCPIP 0x1
+
+/* CEE or IEEE 802.1Qaz ETS Configuration data */
+struct i40e_dcb_ets_config {
+ u8 willing;
+ u8 cbs;
+ u8 maxtcs;
+ u8 prioritytable[I40E_MAX_TRAFFIC_CLASS];
+ u8 tcbwtable[I40E_MAX_TRAFFIC_CLASS];
+ u8 tsatable[I40E_MAX_TRAFFIC_CLASS];
+};
+
+/* CEE or IEEE 802.1Qaz PFC Configuration data */
+struct i40e_dcb_pfc_config {
+ u8 willing;
+ u8 mbc;
+ u8 pfccap;
+ u8 pfcenable;
+};
+
+/* CEE or IEEE 802.1Qaz Application Priority data */
+struct i40e_dcb_app_priority_table {
+ u8 priority;
+ u8 selector;
+ u16 protocolid;
+};
+
+struct i40e_dcbx_config {
+ u8 dcbx_mode;
+#define I40E_DCBX_MODE_CEE 0x1
+#define I40E_DCBX_MODE_IEEE 0x2
+ u8 app_mode;
+#define I40E_DCBX_APPS_NON_WILLING 0x1
+ u32 numapps;
+ u32 tlv_status; /* CEE mode TLV status */
+ struct i40e_dcb_ets_config etscfg;
+ struct i40e_dcb_ets_config etsrec;
+ struct i40e_dcb_pfc_config pfc;
+ struct i40e_dcb_app_priority_table app[I40E_DCBX_MAX_APPS];
+};
+
+/* Port hardware description */
+struct i40e_hw {
+ u8 __iomem *hw_addr;
+ void *back;
+
+ /* subsystem structs */
+ struct i40e_phy_info phy;
+ struct i40e_mac_info mac;
+ struct i40e_bus_info bus;
+ struct i40e_nvm_info nvm;
+ struct i40e_fc_info fc;
+
+ /* pci info */
+ u16 device_id;
+ u16 vendor_id;
+ u16 subsystem_device_id;
+ u16 subsystem_vendor_id;
+ u8 revision_id;
+ u8 port;
+ bool adapter_stopped;
+
+ /* capabilities for entire device and PCI func */
+ struct i40e_hw_capabilities dev_caps;
+ struct i40e_hw_capabilities func_caps;
+
+ /* Flow Director shared filter space */
+ u16 fdir_shared_filter_count;
+
+ /* device profile info */
+ u8 pf_id;
+ u16 main_vsi_seid;
+
+ /* for multi-function MACs */
+ u16 partition_id;
+ u16 num_partitions;
+ u16 num_ports;
+
+ /* Closest numa node to the device */
+ u16 numa_node;
+
+ /* Admin Queue info */
+ struct i40e_adminq_info aq;
+
+ /* state of nvm update process */
+ enum i40e_nvmupd_state nvmupd_state;
+ struct i40e_aq_desc nvm_wb_desc;
+ struct i40e_aq_desc nvm_aq_event_desc;
+ struct i40e_virt_mem nvm_buff;
+ bool nvm_release_on_done;
+ u16 nvm_wait_opcode;
+
+ /* HMC info */
+ struct i40e_hmc_info hmc; /* HMC info struct */
+
+ /* LLDP/DCBX Status */
+ u16 dcbx_status;
+
+ /* DCBX info */
+ struct i40e_dcbx_config local_dcbx_config; /* Oper/Local Cfg */
+ struct i40e_dcbx_config remote_dcbx_config; /* Peer Cfg */
+ struct i40e_dcbx_config desired_dcbx_config; /* CEE Desired Cfg */
+
+#define I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE BIT_ULL(0)
+#define I40E_HW_FLAG_802_1AD_CAPABLE BIT_ULL(1)
+#define I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE BIT_ULL(2)
+#define I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK BIT_ULL(3)
+#define I40E_HW_FLAG_FW_LLDP_STOPPABLE BIT_ULL(4)
+#define I40E_HW_FLAG_FW_LLDP_PERSISTENT BIT_ULL(5)
+#define I40E_HW_FLAG_AQ_PHY_ACCESS_EXTENDED BIT_ULL(6)
+#define I40E_HW_FLAG_DROP_MODE BIT_ULL(7)
+#define I40E_HW_FLAG_X722_FEC_REQUEST_CAPABLE BIT_ULL(8)
+ u64 flags;
+
+ /* Used in set switch config AQ command */
+ u16 switch_tag;
+ u16 first_tag;
+ u16 second_tag;
+
+ /* debug mask */
+ u32 debug_mask;
+ char err_str[16];
+};
+
+static inline bool i40e_is_vf(struct i40e_hw *hw)
+{
+ return (hw->mac.type == I40E_MAC_VF ||
+ hw->mac.type == I40E_MAC_X722_VF);
+}
+
+struct i40e_driver_version {
+ u8 major_version;
+ u8 minor_version;
+ u8 build_version;
+ u8 subbuild_version;
+ u8 driver_string[32];
+};
+
+/* RX Descriptors */
+union i40e_16byte_rx_desc {
+ struct {
+ __le64 pkt_addr; /* Packet buffer address */
+ __le64 hdr_addr; /* Header buffer address */
+ } read;
+ struct {
+ struct i40e_16b_rx_wb_qw0 {
+ struct {
+ union {
+ __le16 mirroring_status;
+ __le16 fcoe_ctx_id;
+ } mirr_fcoe;
+ __le16 l2tag1;
+ } lo_dword;
+ union {
+ __le32 rss; /* RSS Hash */
+ __le32 fd_id; /* Flow director filter id */
+ __le32 fcoe_param; /* FCoE DDP Context id */
+ } hi_dword;
+ } qword0;
+ struct {
+ /* ext status/error/pktype/length */
+ __le64 status_error_len;
+ } qword1;
+ } wb; /* writeback */
+ struct {
+ u64 qword[2];
+ } raw;
+};
+
+union i40e_32byte_rx_desc {
+ struct {
+ __le64 pkt_addr; /* Packet buffer address */
+ __le64 hdr_addr; /* Header buffer address */
+ /* bit 0 of hdr_buffer_addr is DD bit */
+ __le64 rsvd1;
+ __le64 rsvd2;
+ } read;
+ struct {
+ struct i40e_32b_rx_wb_qw0 {
+ struct {
+ union {
+ __le16 mirroring_status;
+ __le16 fcoe_ctx_id;
+ } mirr_fcoe;
+ __le16 l2tag1;
+ } lo_dword;
+ union {
+ __le32 rss; /* RSS Hash */
+ __le32 fcoe_param; /* FCoE DDP Context id */
+ /* Flow director filter id in case of
+ * Programming status desc WB
+ */
+ __le32 fd_id;
+ } hi_dword;
+ } qword0;
+ struct {
+ /* status/error/pktype/length */
+ __le64 status_error_len;
+ } qword1;
+ struct {
+ __le16 ext_status; /* extended status */
+ __le16 rsvd;
+ __le16 l2tag2_1;
+ __le16 l2tag2_2;
+ } qword2;
+ struct {
+ union {
+ __le32 flex_bytes_lo;
+ __le32 pe_status;
+ } lo_dword;
+ union {
+ __le32 flex_bytes_hi;
+ __le32 fd_id;
+ } hi_dword;
+ } qword3;
+ } wb; /* writeback */
+ struct {
+ u64 qword[4];
+ } raw;
+};
+
+enum i40e_rx_desc_status_bits {
+ /* Note: These are predefined bit offsets */
+ I40E_RX_DESC_STATUS_DD_SHIFT = 0,
+ I40E_RX_DESC_STATUS_EOF_SHIFT = 1,
+ I40E_RX_DESC_STATUS_L2TAG1P_SHIFT = 2,
+ I40E_RX_DESC_STATUS_L3L4P_SHIFT = 3,
+ I40E_RX_DESC_STATUS_CRCP_SHIFT = 4,
+ I40E_RX_DESC_STATUS_TSYNINDX_SHIFT = 5, /* 2 BITS */
+ I40E_RX_DESC_STATUS_TSYNVALID_SHIFT = 7,
+ /* Note: Bit 8 is reserved in X710 and XL710 */
+ I40E_RX_DESC_STATUS_EXT_UDP_0_SHIFT = 8,
+ I40E_RX_DESC_STATUS_UMBCAST_SHIFT = 9, /* 2 BITS */
+ I40E_RX_DESC_STATUS_FLM_SHIFT = 11,
+ I40E_RX_DESC_STATUS_FLTSTAT_SHIFT = 12, /* 2 BITS */
+ I40E_RX_DESC_STATUS_LPBK_SHIFT = 14,
+ I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT = 15,
+ I40E_RX_DESC_STATUS_RESERVED_SHIFT = 16, /* 2 BITS */
+ /* Note: For non-tunnel packets INT_UDP_0 is the right status for
+ * UDP header
+ */
+ I40E_RX_DESC_STATUS_INT_UDP_0_SHIFT = 18,
+ I40E_RX_DESC_STATUS_LAST /* this entry must be last!!! */
+};
+
+#define I40E_RXD_QW1_STATUS_SHIFT 0
+#define I40E_RXD_QW1_STATUS_MASK ((BIT(I40E_RX_DESC_STATUS_LAST) - 1) \
+ << I40E_RXD_QW1_STATUS_SHIFT)
+
+#define I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT I40E_RX_DESC_STATUS_TSYNINDX_SHIFT
+#define I40E_RXD_QW1_STATUS_TSYNINDX_MASK (0x3UL << \
+ I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT)
+
+#define I40E_RXD_QW1_STATUS_TSYNVALID_SHIFT I40E_RX_DESC_STATUS_TSYNVALID_SHIFT
+#define I40E_RXD_QW1_STATUS_TSYNVALID_MASK \
+ BIT_ULL(I40E_RXD_QW1_STATUS_TSYNVALID_SHIFT)
+
+enum i40e_rx_desc_fltstat_values {
+ I40E_RX_DESC_FLTSTAT_NO_DATA = 0,
+ I40E_RX_DESC_FLTSTAT_RSV_FD_ID = 1, /* 16byte desc? FD_ID : RSV */
+ I40E_RX_DESC_FLTSTAT_RSV = 2,
+ I40E_RX_DESC_FLTSTAT_RSS_HASH = 3,
+};
+
+#define I40E_RXD_QW1_ERROR_SHIFT 19
+#define I40E_RXD_QW1_ERROR_MASK (0xFFUL << I40E_RXD_QW1_ERROR_SHIFT)
+
+enum i40e_rx_desc_error_bits {
+ /* Note: These are predefined bit offsets */
+ I40E_RX_DESC_ERROR_RXE_SHIFT = 0,
+ I40E_RX_DESC_ERROR_RECIPE_SHIFT = 1,
+ I40E_RX_DESC_ERROR_HBO_SHIFT = 2,
+ I40E_RX_DESC_ERROR_L3L4E_SHIFT = 3, /* 3 BITS */
+ I40E_RX_DESC_ERROR_IPE_SHIFT = 3,
+ I40E_RX_DESC_ERROR_L4E_SHIFT = 4,
+ I40E_RX_DESC_ERROR_EIPE_SHIFT = 5,
+ I40E_RX_DESC_ERROR_OVERSIZE_SHIFT = 6,
+ I40E_RX_DESC_ERROR_PPRS_SHIFT = 7
+};
+
+enum i40e_rx_desc_error_l3l4e_fcoe_masks {
+ I40E_RX_DESC_ERROR_L3L4E_NONE = 0,
+ I40E_RX_DESC_ERROR_L3L4E_PROT = 1,
+ I40E_RX_DESC_ERROR_L3L4E_FC = 2,
+ I40E_RX_DESC_ERROR_L3L4E_DMAC_ERR = 3,
+ I40E_RX_DESC_ERROR_L3L4E_DMAC_WARN = 4
+};
+
+#define I40E_RXD_QW1_PTYPE_SHIFT 30
+#define I40E_RXD_QW1_PTYPE_MASK (0xFFULL << I40E_RXD_QW1_PTYPE_SHIFT)
+
+/* Packet type non-ip values */
+enum i40e_rx_l2_ptype {
+ I40E_RX_PTYPE_L2_RESERVED = 0,
+ I40E_RX_PTYPE_L2_MAC_PAY2 = 1,
+ I40E_RX_PTYPE_L2_TIMESYNC_PAY2 = 2,
+ I40E_RX_PTYPE_L2_FIP_PAY2 = 3,
+ I40E_RX_PTYPE_L2_OUI_PAY2 = 4,
+ I40E_RX_PTYPE_L2_MACCNTRL_PAY2 = 5,
+ I40E_RX_PTYPE_L2_LLDP_PAY2 = 6,
+ I40E_RX_PTYPE_L2_ECP_PAY2 = 7,
+ I40E_RX_PTYPE_L2_EVB_PAY2 = 8,
+ I40E_RX_PTYPE_L2_QCN_PAY2 = 9,
+ I40E_RX_PTYPE_L2_EAPOL_PAY2 = 10,
+ I40E_RX_PTYPE_L2_ARP = 11,
+ I40E_RX_PTYPE_L2_FCOE_PAY3 = 12,
+ I40E_RX_PTYPE_L2_FCOE_FCDATA_PAY3 = 13,
+ I40E_RX_PTYPE_L2_FCOE_FCRDY_PAY3 = 14,
+ I40E_RX_PTYPE_L2_FCOE_FCRSP_PAY3 = 15,
+ I40E_RX_PTYPE_L2_FCOE_FCOTHER_PA = 16,
+ I40E_RX_PTYPE_L2_FCOE_VFT_PAY3 = 17,
+ I40E_RX_PTYPE_L2_FCOE_VFT_FCDATA = 18,
+ I40E_RX_PTYPE_L2_FCOE_VFT_FCRDY = 19,
+ I40E_RX_PTYPE_L2_FCOE_VFT_FCRSP = 20,
+ I40E_RX_PTYPE_L2_FCOE_VFT_FCOTHER = 21,
+ I40E_RX_PTYPE_GRENAT4_MAC_PAY3 = 58,
+ I40E_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4 = 87,
+ I40E_RX_PTYPE_GRENAT6_MAC_PAY3 = 124,
+ I40E_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4 = 153
+};
+
+struct i40e_rx_ptype_decoded {
+ u32 known:1;
+ u32 outer_ip:1;
+ u32 outer_ip_ver:1;
+ u32 outer_frag:1;
+ u32 tunnel_type:3;
+ u32 tunnel_end_prot:2;
+ u32 tunnel_end_frag:1;
+ u32 inner_prot:4;
+ u32 payload_layer:3;
+};
+
+enum i40e_rx_ptype_outer_ip {
+ I40E_RX_PTYPE_OUTER_L2 = 0,
+ I40E_RX_PTYPE_OUTER_IP = 1
+};
+
+enum i40e_rx_ptype_outer_ip_ver {
+ I40E_RX_PTYPE_OUTER_NONE = 0,
+ I40E_RX_PTYPE_OUTER_IPV4 = 0,
+ I40E_RX_PTYPE_OUTER_IPV6 = 1
+};
+
+enum i40e_rx_ptype_outer_fragmented {
+ I40E_RX_PTYPE_NOT_FRAG = 0,
+ I40E_RX_PTYPE_FRAG = 1
+};
+
+enum i40e_rx_ptype_tunnel_type {
+ I40E_RX_PTYPE_TUNNEL_NONE = 0,
+ I40E_RX_PTYPE_TUNNEL_IP_IP = 1,
+ I40E_RX_PTYPE_TUNNEL_IP_GRENAT = 2,
+ I40E_RX_PTYPE_TUNNEL_IP_GRENAT_MAC = 3,
+ I40E_RX_PTYPE_TUNNEL_IP_GRENAT_MAC_VLAN = 4,
+};
+
+enum i40e_rx_ptype_tunnel_end_prot {
+ I40E_RX_PTYPE_TUNNEL_END_NONE = 0,
+ I40E_RX_PTYPE_TUNNEL_END_IPV4 = 1,
+ I40E_RX_PTYPE_TUNNEL_END_IPV6 = 2,
+};
+
+enum i40e_rx_ptype_inner_prot {
+ I40E_RX_PTYPE_INNER_PROT_NONE = 0,
+ I40E_RX_PTYPE_INNER_PROT_UDP = 1,
+ I40E_RX_PTYPE_INNER_PROT_TCP = 2,
+ I40E_RX_PTYPE_INNER_PROT_SCTP = 3,
+ I40E_RX_PTYPE_INNER_PROT_ICMP = 4,
+ I40E_RX_PTYPE_INNER_PROT_TIMESYNC = 5
+};
+
+enum i40e_rx_ptype_payload_layer {
+ I40E_RX_PTYPE_PAYLOAD_LAYER_NONE = 0,
+ I40E_RX_PTYPE_PAYLOAD_LAYER_PAY2 = 1,
+ I40E_RX_PTYPE_PAYLOAD_LAYER_PAY3 = 2,
+ I40E_RX_PTYPE_PAYLOAD_LAYER_PAY4 = 3,
+};
+
+#define I40E_RXD_QW1_LENGTH_PBUF_SHIFT 38
+#define I40E_RXD_QW1_LENGTH_PBUF_MASK (0x3FFFULL << \
+ I40E_RXD_QW1_LENGTH_PBUF_SHIFT)
+
+
+#define I40E_RXD_QW1_LENGTH_SPH_SHIFT 63
+#define I40E_RXD_QW1_LENGTH_SPH_MASK BIT_ULL(I40E_RXD_QW1_LENGTH_SPH_SHIFT)
+
+enum i40e_rx_desc_ext_status_bits {
+ /* Note: These are predefined bit offsets */
+ I40E_RX_DESC_EXT_STATUS_L2TAG2P_SHIFT = 0,
+ I40E_RX_DESC_EXT_STATUS_L2TAG3P_SHIFT = 1,
+ I40E_RX_DESC_EXT_STATUS_FLEXBL_SHIFT = 2, /* 2 BITS */
+ I40E_RX_DESC_EXT_STATUS_FLEXBH_SHIFT = 4, /* 2 BITS */
+ I40E_RX_DESC_EXT_STATUS_FDLONGB_SHIFT = 9,
+ I40E_RX_DESC_EXT_STATUS_FCOELONGB_SHIFT = 10,
+ I40E_RX_DESC_EXT_STATUS_PELONGB_SHIFT = 11,
+};
+
+enum i40e_rx_desc_pe_status_bits {
+ /* Note: These are predefined bit offsets */
+ I40E_RX_DESC_PE_STATUS_QPID_SHIFT = 0, /* 18 BITS */
+ I40E_RX_DESC_PE_STATUS_L4PORT_SHIFT = 0, /* 16 BITS */
+ I40E_RX_DESC_PE_STATUS_IPINDEX_SHIFT = 16, /* 8 BITS */
+ I40E_RX_DESC_PE_STATUS_QPIDHIT_SHIFT = 24,
+ I40E_RX_DESC_PE_STATUS_APBVTHIT_SHIFT = 25,
+ I40E_RX_DESC_PE_STATUS_PORTV_SHIFT = 26,
+ I40E_RX_DESC_PE_STATUS_URG_SHIFT = 27,
+ I40E_RX_DESC_PE_STATUS_IPFRAG_SHIFT = 28,
+ I40E_RX_DESC_PE_STATUS_IPOPT_SHIFT = 29
+};
+
+#define I40E_RX_PROG_STATUS_DESC_LENGTH 0x2000000
+
+#define I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT 2
+#define I40E_RX_PROG_STATUS_DESC_QW1_PROGID_MASK (0x7UL << \
+ I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT)
+
+#define I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT 19
+#define I40E_RX_PROG_STATUS_DESC_QW1_ERROR_MASK (0x3FUL << \
+ I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT)
+
+enum i40e_rx_prog_status_desc_status_bits {
+ /* Note: These are predefined bit offsets */
+ I40E_RX_PROG_STATUS_DESC_DD_SHIFT = 0,
+ I40E_RX_PROG_STATUS_DESC_PROG_ID_SHIFT = 2 /* 3 BITS */
+};
+
+enum i40e_rx_prog_status_desc_prog_id_masks {
+ I40E_RX_PROG_STATUS_DESC_FD_FILTER_STATUS = 1,
+ I40E_RX_PROG_STATUS_DESC_FCOE_CTXT_PROG_STATUS = 2,
+ I40E_RX_PROG_STATUS_DESC_FCOE_CTXT_INVL_STATUS = 4,
+};
+
+enum i40e_rx_prog_status_desc_error_bits {
+ /* Note: These are predefined bit offsets */
+ I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT = 0,
+ I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT = 1,
+ I40E_RX_PROG_STATUS_DESC_FCOE_TBL_FULL_SHIFT = 2,
+ I40E_RX_PROG_STATUS_DESC_FCOE_CONFLICT_SHIFT = 3
+};
+
+/* TX Descriptor */
+struct i40e_tx_desc {
+ __le64 buffer_addr; /* Address of descriptor's data buf */
+ __le64 cmd_type_offset_bsz;
+};
+
+
+enum i40e_tx_desc_dtype_value {
+ I40E_TX_DESC_DTYPE_DATA = 0x0,
+ I40E_TX_DESC_DTYPE_NOP = 0x1, /* same as Context desc */
+ I40E_TX_DESC_DTYPE_CONTEXT = 0x1,
+ I40E_TX_DESC_DTYPE_FCOE_CTX = 0x2,
+ I40E_TX_DESC_DTYPE_FILTER_PROG = 0x8,
+ I40E_TX_DESC_DTYPE_DDP_CTX = 0x9,
+ I40E_TX_DESC_DTYPE_FLEX_DATA = 0xB,
+ I40E_TX_DESC_DTYPE_FLEX_CTX_1 = 0xC,
+ I40E_TX_DESC_DTYPE_FLEX_CTX_2 = 0xD,
+ I40E_TX_DESC_DTYPE_DESC_DONE = 0xF
+};
+
+#define I40E_TXD_QW1_CMD_SHIFT 4
+
+enum i40e_tx_desc_cmd_bits {
+ I40E_TX_DESC_CMD_EOP = 0x0001,
+ I40E_TX_DESC_CMD_RS = 0x0002,
+ I40E_TX_DESC_CMD_ICRC = 0x0004,
+ I40E_TX_DESC_CMD_IL2TAG1 = 0x0008,
+ I40E_TX_DESC_CMD_DUMMY = 0x0010,
+ I40E_TX_DESC_CMD_IIPT_NONIP = 0x0000, /* 2 BITS */
+ I40E_TX_DESC_CMD_IIPT_IPV6 = 0x0020, /* 2 BITS */
+ I40E_TX_DESC_CMD_IIPT_IPV4 = 0x0040, /* 2 BITS */
+ I40E_TX_DESC_CMD_IIPT_IPV4_CSUM = 0x0060, /* 2 BITS */
+ I40E_TX_DESC_CMD_FCOET = 0x0080,
+ I40E_TX_DESC_CMD_L4T_EOFT_UNK = 0x0000, /* 2 BITS */
+ I40E_TX_DESC_CMD_L4T_EOFT_TCP = 0x0100, /* 2 BITS */
+ I40E_TX_DESC_CMD_L4T_EOFT_SCTP = 0x0200, /* 2 BITS */
+ I40E_TX_DESC_CMD_L4T_EOFT_UDP = 0x0300, /* 2 BITS */
+ I40E_TX_DESC_CMD_L4T_EOFT_EOF_N = 0x0000, /* 2 BITS */
+ I40E_TX_DESC_CMD_L4T_EOFT_EOF_T = 0x0100, /* 2 BITS */
+ I40E_TX_DESC_CMD_L4T_EOFT_EOF_NI = 0x0200, /* 2 BITS */
+ I40E_TX_DESC_CMD_L4T_EOFT_EOF_A = 0x0300, /* 2 BITS */
+};
+
+#define I40E_TXD_QW1_OFFSET_SHIFT 16
+
+enum i40e_tx_desc_length_fields {
+ /* Note: These are predefined bit offsets */
+ I40E_TX_DESC_LENGTH_MACLEN_SHIFT = 0, /* 7 BITS */
+ I40E_TX_DESC_LENGTH_IPLEN_SHIFT = 7, /* 7 BITS */
+ I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT = 14 /* 4 BITS */
+};
+
+#define I40E_TXD_QW1_TX_BUF_SZ_SHIFT 34
+
+#define I40E_TXD_QW1_L2TAG1_SHIFT 48
+
+/* Context descriptors */
+struct i40e_tx_context_desc {
+ __le32 tunneling_params;
+ __le16 l2tag2;
+ __le16 rsvd;
+ __le64 type_cmd_tso_mss;
+};
+
+
+#define I40E_TXD_CTX_QW1_CMD_SHIFT 4
+
+enum i40e_tx_ctx_desc_cmd_bits {
+ I40E_TX_CTX_DESC_TSO = 0x01,
+ I40E_TX_CTX_DESC_TSYN = 0x02,
+ I40E_TX_CTX_DESC_IL2TAG2 = 0x04,
+ I40E_TX_CTX_DESC_IL2TAG2_IL2H = 0x08,
+ I40E_TX_CTX_DESC_SWTCH_NOTAG = 0x00,
+ I40E_TX_CTX_DESC_SWTCH_UPLINK = 0x10,
+ I40E_TX_CTX_DESC_SWTCH_LOCAL = 0x20,
+ I40E_TX_CTX_DESC_SWTCH_VSI = 0x30,
+ I40E_TX_CTX_DESC_SWPE = 0x40
+};
+
+#define I40E_TXD_CTX_QW1_TSO_LEN_SHIFT 30
+
+#define I40E_TXD_CTX_QW1_MSS_SHIFT 50
+
+
+
+enum i40e_tx_ctx_desc_eipt_offload {
+ I40E_TX_CTX_EXT_IP_NONE = 0x0,
+ I40E_TX_CTX_EXT_IP_IPV6 = 0x1,
+ I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM = 0x2,
+ I40E_TX_CTX_EXT_IP_IPV4 = 0x3
+};
+
+#define I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT 2
+
+#define I40E_TXD_CTX_QW0_NATT_SHIFT 9
+
+#define I40E_TXD_CTX_UDP_TUNNELING BIT_ULL(I40E_TXD_CTX_QW0_NATT_SHIFT)
+#define I40E_TXD_CTX_GRE_TUNNELING (0x2ULL << I40E_TXD_CTX_QW0_NATT_SHIFT)
+
+
+
+#define I40E_TXD_CTX_QW0_NATLEN_SHIFT 12
+
+
+#define I40E_TXD_CTX_QW0_L4T_CS_SHIFT 23
+#define I40E_TXD_CTX_QW0_L4T_CS_MASK BIT_ULL(I40E_TXD_CTX_QW0_L4T_CS_SHIFT)
+struct i40e_filter_program_desc {
+ __le32 qindex_flex_ptype_vsi;
+ __le32 rsvd;
+ __le32 dtype_cmd_cntindex;
+ __le32 fd_id;
+};
+#define I40E_TXD_FLTR_QW0_QINDEX_SHIFT 0
+#define I40E_TXD_FLTR_QW0_QINDEX_MASK (0x7FFUL << \
+ I40E_TXD_FLTR_QW0_QINDEX_SHIFT)
+#define I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT 11
+#define I40E_TXD_FLTR_QW0_FLEXOFF_MASK (0x7UL << \
+ I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT)
+#define I40E_TXD_FLTR_QW0_PCTYPE_SHIFT 17
+#define I40E_TXD_FLTR_QW0_PCTYPE_MASK (0x3FUL << \
+ I40E_TXD_FLTR_QW0_PCTYPE_SHIFT)
+
+/* Packet Classifier Types for filters */
+enum i40e_filter_pctype {
+ /* Note: Values 0-28 are reserved for future use.
+ * Value 29, 30, 32 are not supported on XL710 and X710.
+ */
+ I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP = 29,
+ I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP = 30,
+ I40E_FILTER_PCTYPE_NONF_IPV4_UDP = 31,
+ I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK = 32,
+ I40E_FILTER_PCTYPE_NONF_IPV4_TCP = 33,
+ I40E_FILTER_PCTYPE_NONF_IPV4_SCTP = 34,
+ I40E_FILTER_PCTYPE_NONF_IPV4_OTHER = 35,
+ I40E_FILTER_PCTYPE_FRAG_IPV4 = 36,
+ /* Note: Values 37-38 are reserved for future use.
+ * Value 39, 40, 42 are not supported on XL710 and X710.
+ */
+ I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP = 39,
+ I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP = 40,
+ I40E_FILTER_PCTYPE_NONF_IPV6_UDP = 41,
+ I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK = 42,
+ I40E_FILTER_PCTYPE_NONF_IPV6_TCP = 43,
+ I40E_FILTER_PCTYPE_NONF_IPV6_SCTP = 44,
+ I40E_FILTER_PCTYPE_NONF_IPV6_OTHER = 45,
+ I40E_FILTER_PCTYPE_FRAG_IPV6 = 46,
+ /* Note: Value 47 is reserved for future use */
+ I40E_FILTER_PCTYPE_FCOE_OX = 48,
+ I40E_FILTER_PCTYPE_FCOE_RX = 49,
+ I40E_FILTER_PCTYPE_FCOE_OTHER = 50,
+ /* Note: Values 51-62 are reserved for future use */
+ I40E_FILTER_PCTYPE_L2_PAYLOAD = 63,
+};
+
+enum i40e_filter_program_desc_dest {
+ I40E_FILTER_PROGRAM_DESC_DEST_DROP_PACKET = 0x0,
+ I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX = 0x1,
+ I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_OTHER = 0x2,
+};
+
+enum i40e_filter_program_desc_fd_status {
+ I40E_FILTER_PROGRAM_DESC_FD_STATUS_NONE = 0x0,
+ I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID = 0x1,
+ I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID_4FLEX_BYTES = 0x2,
+ I40E_FILTER_PROGRAM_DESC_FD_STATUS_8FLEX_BYTES = 0x3,
+};
+
+#define I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT 23
+#define I40E_TXD_FLTR_QW0_DEST_VSI_MASK (0x1FFUL << \
+ I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT)
+
+#define I40E_TXD_FLTR_QW1_CMD_SHIFT 4
+
+#define I40E_TXD_FLTR_QW1_PCMD_SHIFT (0x0ULL + I40E_TXD_FLTR_QW1_CMD_SHIFT)
+
+enum i40e_filter_program_desc_pcmd {
+ I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE = 0x1,
+ I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE = 0x2,
+};
+
+#define I40E_TXD_FLTR_QW1_DEST_SHIFT (0x3ULL + I40E_TXD_FLTR_QW1_CMD_SHIFT)
+#define I40E_TXD_FLTR_QW1_DEST_MASK (0x3ULL << I40E_TXD_FLTR_QW1_DEST_SHIFT)
+
+#define I40E_TXD_FLTR_QW1_CNT_ENA_SHIFT (0x7ULL + I40E_TXD_FLTR_QW1_CMD_SHIFT)
+#define I40E_TXD_FLTR_QW1_CNT_ENA_MASK BIT_ULL(I40E_TXD_FLTR_QW1_CNT_ENA_SHIFT)
+
+#define I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT (0x9ULL + \
+ I40E_TXD_FLTR_QW1_CMD_SHIFT)
+#define I40E_TXD_FLTR_QW1_FD_STATUS_MASK (0x3ULL << \
+ I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT)
+
+#define I40E_TXD_FLTR_QW1_ATR_SHIFT (0xEULL + \
+ I40E_TXD_FLTR_QW1_CMD_SHIFT)
+#define I40E_TXD_FLTR_QW1_ATR_MASK BIT_ULL(I40E_TXD_FLTR_QW1_ATR_SHIFT)
+
+#define I40E_TXD_FLTR_QW1_ATR_SHIFT (0xEULL + \
+ I40E_TXD_FLTR_QW1_CMD_SHIFT)
+#define I40E_TXD_FLTR_QW1_ATR_MASK BIT_ULL(I40E_TXD_FLTR_QW1_ATR_SHIFT)
+
+#define I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT 20
+#define I40E_TXD_FLTR_QW1_CNTINDEX_MASK (0x1FFUL << \
+ I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT)
+
+enum i40e_filter_type {
+ I40E_FLOW_DIRECTOR_FLTR = 0,
+ I40E_PE_QUAD_HASH_FLTR = 1,
+ I40E_ETHERTYPE_FLTR,
+ I40E_FCOE_CTX_FLTR,
+ I40E_MAC_VLAN_FLTR,
+ I40E_HASH_FLTR
+};
+
+struct i40e_vsi_context {
+ u16 seid;
+ u16 uplink_seid;
+ u16 vsi_number;
+ u16 vsis_allocated;
+ u16 vsis_unallocated;
+ u16 flags;
+ u8 pf_num;
+ u8 vf_num;
+ u8 connection_type;
+ struct i40e_aqc_vsi_properties_data info;
+};
+
+struct i40e_veb_context {
+ u16 seid;
+ u16 uplink_seid;
+ u16 veb_number;
+ u16 vebs_allocated;
+ u16 vebs_unallocated;
+ u16 flags;
+ struct i40e_aqc_get_veb_parameters_completion info;
+};
+
+/* Statistics collected by each port, VSI, VEB, and S-channel */
+struct i40e_eth_stats {
+ u64 rx_bytes; /* gorc */
+ u64 rx_unicast; /* uprc */
+ u64 rx_multicast; /* mprc */
+ u64 rx_broadcast; /* bprc */
+ u64 rx_discards; /* rdpc */
+ u64 rx_unknown_protocol; /* rupp */
+ u64 tx_bytes; /* gotc */
+ u64 tx_unicast; /* uptc */
+ u64 tx_multicast; /* mptc */
+ u64 tx_broadcast; /* bptc */
+ u64 tx_discards; /* tdpc */
+ u64 tx_errors; /* tepc */
+ u64 rx_discards_other; /* rxerr1 */
+};
+
+/* Statistics collected per VEB per TC */
+struct i40e_veb_tc_stats {
+ u64 tc_rx_packets[I40E_MAX_TRAFFIC_CLASS];
+ u64 tc_rx_bytes[I40E_MAX_TRAFFIC_CLASS];
+ u64 tc_tx_packets[I40E_MAX_TRAFFIC_CLASS];
+ u64 tc_tx_bytes[I40E_MAX_TRAFFIC_CLASS];
+};
+
+/* Statistics collected by the MAC */
+struct i40e_hw_port_stats {
+ /* eth stats collected by the port */
+ struct i40e_eth_stats eth;
+
+ /* additional port specific stats */
+ u64 tx_dropped_link_down; /* tdold */
+ u64 crc_errors; /* crcerrs */
+ u64 illegal_bytes; /* illerrc */
+ u64 error_bytes; /* errbc */
+ u64 mac_local_faults; /* mlfc */
+ u64 mac_remote_faults; /* mrfc */
+ u64 rx_length_errors; /* rlec */
+ u64 link_xon_rx; /* lxonrxc */
+ u64 link_xoff_rx; /* lxoffrxc */
+ u64 priority_xon_rx[8]; /* pxonrxc[8] */
+ u64 priority_xoff_rx[8]; /* pxoffrxc[8] */
+ u64 link_xon_tx; /* lxontxc */
+ u64 link_xoff_tx; /* lxofftxc */
+ u64 priority_xon_tx[8]; /* pxontxc[8] */
+ u64 priority_xoff_tx[8]; /* pxofftxc[8] */
+ u64 priority_xon_2_xoff[8]; /* pxon2offc[8] */
+ u64 rx_size_64; /* prc64 */
+ u64 rx_size_127; /* prc127 */
+ u64 rx_size_255; /* prc255 */
+ u64 rx_size_511; /* prc511 */
+ u64 rx_size_1023; /* prc1023 */
+ u64 rx_size_1522; /* prc1522 */
+ u64 rx_size_big; /* prc9522 */
+ u64 rx_undersize; /* ruc */
+ u64 rx_fragments; /* rfc */
+ u64 rx_oversize; /* roc */
+ u64 rx_jabber; /* rjc */
+ u64 tx_size_64; /* ptc64 */
+ u64 tx_size_127; /* ptc127 */
+ u64 tx_size_255; /* ptc255 */
+ u64 tx_size_511; /* ptc511 */
+ u64 tx_size_1023; /* ptc1023 */
+ u64 tx_size_1522; /* ptc1522 */
+ u64 tx_size_big; /* ptc9522 */
+ u64 mac_short_packet_dropped; /* mspdc */
+ u64 checksum_error; /* xec */
+ /* flow director stats */
+ u64 fd_atr_match;
+ u64 fd_sb_match;
+ u64 fd_atr_tunnel_match;
+ u32 fd_atr_status;
+ u32 fd_sb_status;
+ /* EEE LPI */
+ u32 tx_lpi_status;
+ u32 rx_lpi_status;
+ u64 tx_lpi_count; /* etlpic */
+ u64 rx_lpi_count; /* erlpic */
+};
+
+/* Checksum and Shadow RAM pointers */
+#define I40E_SR_NVM_CONTROL_WORD 0x00
+#define I40E_EMP_MODULE_PTR 0x0F
+#define I40E_SR_EMP_MODULE_PTR 0x48
+#define I40E_SR_PBA_FLAGS 0x15
+#define I40E_SR_PBA_BLOCK_PTR 0x16
+#define I40E_SR_BOOT_CONFIG_PTR 0x17
+#define I40E_NVM_OEM_VER_OFF 0x83
+#define I40E_SR_NVM_DEV_STARTER_VERSION 0x18
+#define I40E_SR_NVM_WAKE_ON_LAN 0x19
+#define I40E_SR_NVM_EETRACK_LO 0x2D
+#define I40E_SR_NVM_EETRACK_HI 0x2E
+#define I40E_SR_VPD_PTR 0x2F
+#define I40E_SR_PCIE_ALT_AUTO_LOAD_PTR 0x3E
+#define I40E_SR_SW_CHECKSUM_WORD 0x3F
+#define I40E_SR_EMP_SR_SETTINGS_PTR 0x48
+
+/* Auxiliary field, mask and shift definition for Shadow RAM and NVM Flash */
+#define I40E_SR_VPD_MODULE_MAX_SIZE 1024
+#define I40E_SR_PCIE_ALT_MODULE_MAX_SIZE 1024
+#define I40E_SR_CONTROL_WORD_1_SHIFT 0x06
+#define I40E_SR_CONTROL_WORD_1_MASK (0x03 << I40E_SR_CONTROL_WORD_1_SHIFT)
+#define I40E_SR_NVM_MAP_STRUCTURE_TYPE BIT(12)
+#define I40E_PTR_TYPE BIT(15)
+#define I40E_SR_OCP_CFG_WORD0 0x2B
+#define I40E_SR_OCP_ENABLED BIT(15)
+
+/* Shadow RAM related */
+#define I40E_SR_SECTOR_SIZE_IN_WORDS 0x800
+#define I40E_SR_WORDS_IN_1KB 512
+/* Checksum should be calculated such that after adding all the words,
+ * including the checksum word itself, the sum should be 0xBABA.
+ */
+#define I40E_SR_SW_CHECKSUM_BASE 0xBABA
+
+#define I40E_SRRD_SRCTL_ATTEMPTS 100000
+
+enum i40e_switch_element_types {
+ I40E_SWITCH_ELEMENT_TYPE_MAC = 1,
+ I40E_SWITCH_ELEMENT_TYPE_PF = 2,
+ I40E_SWITCH_ELEMENT_TYPE_VF = 3,
+ I40E_SWITCH_ELEMENT_TYPE_EMP = 4,
+ I40E_SWITCH_ELEMENT_TYPE_BMC = 6,
+ I40E_SWITCH_ELEMENT_TYPE_PE = 16,
+ I40E_SWITCH_ELEMENT_TYPE_VEB = 17,
+ I40E_SWITCH_ELEMENT_TYPE_PA = 18,
+ I40E_SWITCH_ELEMENT_TYPE_VSI = 19,
+};
+
+/* Supported EtherType filters */
+enum i40e_ether_type_index {
+ I40E_ETHER_TYPE_1588 = 0,
+ I40E_ETHER_TYPE_FIP = 1,
+ I40E_ETHER_TYPE_OUI_EXTENDED = 2,
+ I40E_ETHER_TYPE_MAC_CONTROL = 3,
+ I40E_ETHER_TYPE_LLDP = 4,
+ I40E_ETHER_TYPE_EVB_PROTOCOL1 = 5,
+ I40E_ETHER_TYPE_EVB_PROTOCOL2 = 6,
+ I40E_ETHER_TYPE_QCN_CNM = 7,
+ I40E_ETHER_TYPE_8021X = 8,
+ I40E_ETHER_TYPE_ARP = 9,
+ I40E_ETHER_TYPE_RSV1 = 10,
+ I40E_ETHER_TYPE_RSV2 = 11,
+};
+
+/* Filter context base size is 1K */
+#define I40E_HASH_FILTER_BASE_SIZE 1024
+/* Supported Hash filter values */
+enum i40e_hash_filter_size {
+ I40E_HASH_FILTER_SIZE_1K = 0,
+ I40E_HASH_FILTER_SIZE_2K = 1,
+ I40E_HASH_FILTER_SIZE_4K = 2,
+ I40E_HASH_FILTER_SIZE_8K = 3,
+ I40E_HASH_FILTER_SIZE_16K = 4,
+ I40E_HASH_FILTER_SIZE_32K = 5,
+ I40E_HASH_FILTER_SIZE_64K = 6,
+ I40E_HASH_FILTER_SIZE_128K = 7,
+ I40E_HASH_FILTER_SIZE_256K = 8,
+ I40E_HASH_FILTER_SIZE_512K = 9,
+ I40E_HASH_FILTER_SIZE_1M = 10,
+};
+
+/* DMA context base size is 0.5K */
+#define I40E_DMA_CNTX_BASE_SIZE 512
+/* Supported DMA context values */
+enum i40e_dma_cntx_size {
+ I40E_DMA_CNTX_SIZE_512 = 0,
+ I40E_DMA_CNTX_SIZE_1K = 1,
+ I40E_DMA_CNTX_SIZE_2K = 2,
+ I40E_DMA_CNTX_SIZE_4K = 3,
+ I40E_DMA_CNTX_SIZE_8K = 4,
+ I40E_DMA_CNTX_SIZE_16K = 5,
+ I40E_DMA_CNTX_SIZE_32K = 6,
+ I40E_DMA_CNTX_SIZE_64K = 7,
+ I40E_DMA_CNTX_SIZE_128K = 8,
+ I40E_DMA_CNTX_SIZE_256K = 9,
+};
+
+/* Supported Hash look up table (LUT) sizes */
+enum i40e_hash_lut_size {
+ I40E_HASH_LUT_SIZE_128 = 0,
+ I40E_HASH_LUT_SIZE_512 = 1,
+};
+
+/* Structure to hold a per PF filter control settings */
+struct i40e_filter_control_settings {
+ /* number of PE Quad Hash filter buckets */
+ enum i40e_hash_filter_size pe_filt_num;
+ /* number of PE Quad Hash contexts */
+ enum i40e_dma_cntx_size pe_cntx_num;
+ /* number of FCoE filter buckets */
+ enum i40e_hash_filter_size fcoe_filt_num;
+ /* number of FCoE DDP contexts */
+ enum i40e_dma_cntx_size fcoe_cntx_num;
+ /* size of the Hash LUT */
+ enum i40e_hash_lut_size hash_lut_size;
+ /* enable FDIR filters for PF and its VFs */
+ bool enable_fdir;
+ /* enable Ethertype filters for PF and its VFs */
+ bool enable_ethtype;
+ /* enable MAC/VLAN filters for PF and its VFs */
+ bool enable_macvlan;
+};
+
+/* Structure to hold device level control filter counts */
+struct i40e_control_filter_stats {
+ u16 mac_etype_used; /* Used perfect match MAC/EtherType filters */
+ u16 etype_used; /* Used perfect EtherType filters */
+ u16 mac_etype_free; /* Un-used perfect match MAC/EtherType filters */
+ u16 etype_free; /* Un-used perfect EtherType filters */
+};
+
+enum i40e_reset_type {
+ I40E_RESET_POR = 0,
+ I40E_RESET_CORER = 1,
+ I40E_RESET_GLOBR = 2,
+ I40E_RESET_EMPR = 3,
+};
+
+/* IEEE 802.1AB LLDP Agent Variables from NVM */
+#define I40E_NVM_LLDP_CFG_PTR 0x06
+#define I40E_SR_LLDP_CFG_PTR 0x31
+struct i40e_lldp_variables {
+ u16 length;
+ u16 adminstatus;
+ u16 msgfasttx;
+ u16 msgtxinterval;
+ u16 txparams;
+ u16 timers;
+ u16 crc8;
+};
+
+/* Offsets into Alternate Ram */
+#define I40E_ALT_STRUCT_FIRST_PF_OFFSET 0 /* in dwords */
+#define I40E_ALT_STRUCT_DWORDS_PER_PF 64 /* in dwords */
+#define I40E_ALT_STRUCT_MIN_BW_OFFSET 0xE /* in dwords */
+#define I40E_ALT_STRUCT_MAX_BW_OFFSET 0xF /* in dwords */
+
+/* Alternate Ram Bandwidth Masks */
+#define I40E_ALT_BW_VALUE_MASK 0xFF
+#define I40E_ALT_BW_VALID_MASK 0x80000000
+
+/* RSS Hash Table Size */
+#define I40E_PFQF_CTL_0_HASHLUTSIZE_512 0x00010000
+
+/* INPUT SET MASK for RSS, flow director, and flexible payload */
+#define I40E_X722_L3_SRC_SHIFT 49
+#define I40E_X722_L3_SRC_MASK (0x3ULL << I40E_X722_L3_SRC_SHIFT)
+#define I40E_X722_L3_DST_SHIFT 41
+#define I40E_X722_L3_DST_MASK (0x3ULL << I40E_X722_L3_DST_SHIFT)
+#define I40E_L3_SRC_SHIFT 47
+#define I40E_L3_SRC_MASK (0x3ULL << I40E_L3_SRC_SHIFT)
+#define I40E_L3_V6_SRC_SHIFT 43
+#define I40E_L3_V6_SRC_MASK (0xFFULL << I40E_L3_V6_SRC_SHIFT)
+#define I40E_L3_DST_SHIFT 35
+#define I40E_L3_DST_MASK (0x3ULL << I40E_L3_DST_SHIFT)
+#define I40E_L3_V6_DST_SHIFT 35
+#define I40E_L3_V6_DST_MASK (0xFFULL << I40E_L3_V6_DST_SHIFT)
+#define I40E_L4_SRC_SHIFT 34
+#define I40E_L4_SRC_MASK (0x1ULL << I40E_L4_SRC_SHIFT)
+#define I40E_L4_DST_SHIFT 33
+#define I40E_L4_DST_MASK (0x1ULL << I40E_L4_DST_SHIFT)
+#define I40E_VERIFY_TAG_SHIFT 31
+#define I40E_VERIFY_TAG_MASK (0x3ULL << I40E_VERIFY_TAG_SHIFT)
+#define I40E_VLAN_SRC_SHIFT 55
+#define I40E_VLAN_SRC_MASK (0x1ULL << I40E_VLAN_SRC_SHIFT)
+
+#define I40E_FLEX_50_SHIFT 13
+#define I40E_FLEX_50_MASK (0x1ULL << I40E_FLEX_50_SHIFT)
+#define I40E_FLEX_51_SHIFT 12
+#define I40E_FLEX_51_MASK (0x1ULL << I40E_FLEX_51_SHIFT)
+#define I40E_FLEX_52_SHIFT 11
+#define I40E_FLEX_52_MASK (0x1ULL << I40E_FLEX_52_SHIFT)
+#define I40E_FLEX_53_SHIFT 10
+#define I40E_FLEX_53_MASK (0x1ULL << I40E_FLEX_53_SHIFT)
+#define I40E_FLEX_54_SHIFT 9
+#define I40E_FLEX_54_MASK (0x1ULL << I40E_FLEX_54_SHIFT)
+#define I40E_FLEX_55_SHIFT 8
+#define I40E_FLEX_55_MASK (0x1ULL << I40E_FLEX_55_SHIFT)
+#define I40E_FLEX_56_SHIFT 7
+#define I40E_FLEX_56_MASK (0x1ULL << I40E_FLEX_56_SHIFT)
+#define I40E_FLEX_57_SHIFT 6
+#define I40E_FLEX_57_MASK (0x1ULL << I40E_FLEX_57_SHIFT)
+
+/* Version format for Dynamic Device Personalization(DDP) */
+struct i40e_ddp_version {
+ u8 major;
+ u8 minor;
+ u8 update;
+ u8 draft;
+};
+
+#define I40E_DDP_NAME_SIZE 32
+
+/* Package header */
+struct i40e_package_header {
+ struct i40e_ddp_version version;
+ u32 segment_count;
+ u32 segment_offset[1];
+};
+
+/* Generic segment header */
+struct i40e_generic_seg_header {
+#define SEGMENT_TYPE_METADATA 0x00000001
+#define SEGMENT_TYPE_I40E 0x00000011
+ u32 type;
+ struct i40e_ddp_version version;
+ u32 size;
+ char name[I40E_DDP_NAME_SIZE];
+};
+
+struct i40e_metadata_segment {
+ struct i40e_generic_seg_header header;
+ struct i40e_ddp_version version;
+#define I40E_DDP_TRACKID_INVALID 0xFFFFFFFF
+ u32 track_id;
+ char name[I40E_DDP_NAME_SIZE];
+};
+
+struct i40e_device_id_entry {
+ u32 vendor_dev_id;
+ u32 sub_vendor_dev_id;
+};
+
+struct i40e_profile_segment {
+ struct i40e_generic_seg_header header;
+ struct i40e_ddp_version version;
+ char name[I40E_DDP_NAME_SIZE];
+ u32 device_table_count;
+ struct i40e_device_id_entry device_table[1];
+};
+
+struct i40e_section_table {
+ u32 section_count;
+ u32 section_offset[1];
+};
+
+struct i40e_profile_section_header {
+ u16 tbl_size;
+ u16 data_end;
+ struct {
+#define SECTION_TYPE_INFO 0x00000010
+#define SECTION_TYPE_MMIO 0x00000800
+#define SECTION_TYPE_RB_MMIO 0x00001800
+#define SECTION_TYPE_AQ 0x00000801
+#define SECTION_TYPE_RB_AQ 0x00001801
+#define SECTION_TYPE_NOTE 0x80000000
+ u32 type;
+ u32 offset;
+ u32 size;
+ } section;
+};
+
+struct i40e_profile_tlv_section_record {
+ u8 rtype;
+ u8 type;
+ u16 len;
+ u8 data[12];
+};
+
+/* Generic AQ section in proflie */
+struct i40e_profile_aq_section {
+ u16 opcode;
+ u16 flags;
+ u8 param[16];
+ u16 datalen;
+ u8 data[1];
+};
+
+struct i40e_profile_info {
+ u32 track_id;
+ struct i40e_ddp_version version;
+ u8 op;
+#define I40E_DDP_ADD_TRACKID 0x01
+#define I40E_DDP_REMOVE_TRACKID 0x02
+ u8 reserved[7];
+ u8 name[I40E_DDP_NAME_SIZE];
+};
+#endif /* _I40E_TYPE_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
new file mode 100644
index 000000000..c7d761426
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -0,0 +1,4877 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
+
+#include "i40e.h"
+
+/*********************notification routines***********************/
+
+/**
+ * i40e_vc_vf_broadcast
+ * @pf: pointer to the PF structure
+ * @v_opcode: operation code
+ * @v_retval: return value
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ *
+ * send a message to all VFs on a given PF
+ **/
+static void i40e_vc_vf_broadcast(struct i40e_pf *pf,
+ enum virtchnl_ops v_opcode,
+ int v_retval, u8 *msg,
+ u16 msglen)
+{
+ struct i40e_hw *hw = &pf->hw;
+ struct i40e_vf *vf = pf->vf;
+ int i;
+
+ for (i = 0; i < pf->num_alloc_vfs; i++, vf++) {
+ int abs_vf_id = vf->vf_id + (int)hw->func_caps.vf_base_id;
+ /* Not all vfs are enabled so skip the ones that are not */
+ if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states) &&
+ !test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states))
+ continue;
+
+ /* Ignore return value on purpose - a given VF may fail, but
+ * we need to keep going and send to all of them
+ */
+ i40e_aq_send_msg_to_vf(hw, abs_vf_id, v_opcode, v_retval,
+ msg, msglen, NULL);
+ }
+}
+
+/**
+ * i40e_vc_link_speed2mbps
+ * converts i40e_aq_link_speed to integer value of Mbps
+ * @link_speed: the speed to convert
+ *
+ * return the speed as direct value of Mbps.
+ **/
+static u32
+i40e_vc_link_speed2mbps(enum i40e_aq_link_speed link_speed)
+{
+ switch (link_speed) {
+ case I40E_LINK_SPEED_100MB:
+ return SPEED_100;
+ case I40E_LINK_SPEED_1GB:
+ return SPEED_1000;
+ case I40E_LINK_SPEED_2_5GB:
+ return SPEED_2500;
+ case I40E_LINK_SPEED_5GB:
+ return SPEED_5000;
+ case I40E_LINK_SPEED_10GB:
+ return SPEED_10000;
+ case I40E_LINK_SPEED_20GB:
+ return SPEED_20000;
+ case I40E_LINK_SPEED_25GB:
+ return SPEED_25000;
+ case I40E_LINK_SPEED_40GB:
+ return SPEED_40000;
+ case I40E_LINK_SPEED_UNKNOWN:
+ return SPEED_UNKNOWN;
+ }
+ return SPEED_UNKNOWN;
+}
+
+/**
+ * i40e_set_vf_link_state
+ * @vf: pointer to the VF structure
+ * @pfe: pointer to PF event structure
+ * @ls: pointer to link status structure
+ *
+ * set a link state on a single vf
+ **/
+static void i40e_set_vf_link_state(struct i40e_vf *vf,
+ struct virtchnl_pf_event *pfe, struct i40e_link_status *ls)
+{
+ u8 link_status = ls->link_info & I40E_AQ_LINK_UP;
+
+ if (vf->link_forced)
+ link_status = vf->link_up;
+
+ if (vf->driver_caps & VIRTCHNL_VF_CAP_ADV_LINK_SPEED) {
+ pfe->event_data.link_event_adv.link_speed = link_status ?
+ i40e_vc_link_speed2mbps(ls->link_speed) : 0;
+ pfe->event_data.link_event_adv.link_status = link_status;
+ } else {
+ pfe->event_data.link_event.link_speed = link_status ?
+ i40e_virtchnl_link_speed(ls->link_speed) : 0;
+ pfe->event_data.link_event.link_status = link_status;
+ }
+}
+
+/**
+ * i40e_vc_notify_vf_link_state
+ * @vf: pointer to the VF structure
+ *
+ * send a link status message to a single VF
+ **/
+static void i40e_vc_notify_vf_link_state(struct i40e_vf *vf)
+{
+ struct virtchnl_pf_event pfe;
+ struct i40e_pf *pf = vf->pf;
+ struct i40e_hw *hw = &pf->hw;
+ struct i40e_link_status *ls = &pf->hw.phy.link_info;
+ int abs_vf_id = vf->vf_id + (int)hw->func_caps.vf_base_id;
+
+ pfe.event = VIRTCHNL_EVENT_LINK_CHANGE;
+ pfe.severity = PF_EVENT_SEVERITY_INFO;
+
+ i40e_set_vf_link_state(vf, &pfe, ls);
+
+ i40e_aq_send_msg_to_vf(hw, abs_vf_id, VIRTCHNL_OP_EVENT,
+ 0, (u8 *)&pfe, sizeof(pfe), NULL);
+}
+
+/**
+ * i40e_vc_notify_link_state
+ * @pf: pointer to the PF structure
+ *
+ * send a link status message to all VFs on a given PF
+ **/
+void i40e_vc_notify_link_state(struct i40e_pf *pf)
+{
+ int i;
+
+ for (i = 0; i < pf->num_alloc_vfs; i++)
+ i40e_vc_notify_vf_link_state(&pf->vf[i]);
+}
+
+/**
+ * i40e_vc_notify_reset
+ * @pf: pointer to the PF structure
+ *
+ * indicate a pending reset to all VFs on a given PF
+ **/
+void i40e_vc_notify_reset(struct i40e_pf *pf)
+{
+ struct virtchnl_pf_event pfe;
+
+ pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING;
+ pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM;
+ i40e_vc_vf_broadcast(pf, VIRTCHNL_OP_EVENT, 0,
+ (u8 *)&pfe, sizeof(struct virtchnl_pf_event));
+}
+
+#ifdef CONFIG_PCI_IOV
+void i40e_restore_all_vfs_msi_state(struct pci_dev *pdev)
+{
+ u16 vf_id;
+ u16 pos;
+
+ /* Continue only if this is a PF */
+ if (!pdev->is_physfn)
+ return;
+
+ if (!pci_num_vf(pdev))
+ return;
+
+ pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
+ if (pos) {
+ struct pci_dev *vf_dev = NULL;
+
+ pci_read_config_word(pdev, pos + PCI_SRIOV_VF_DID, &vf_id);
+ while ((vf_dev = pci_get_device(pdev->vendor, vf_id, vf_dev))) {
+ if (vf_dev->is_virtfn && vf_dev->physfn == pdev)
+ pci_restore_msi_state(vf_dev);
+ }
+ }
+}
+#endif /* CONFIG_PCI_IOV */
+
+/**
+ * i40e_vc_notify_vf_reset
+ * @vf: pointer to the VF structure
+ *
+ * indicate a pending reset to the given VF
+ **/
+void i40e_vc_notify_vf_reset(struct i40e_vf *vf)
+{
+ struct virtchnl_pf_event pfe;
+ int abs_vf_id;
+
+ /* validate the request */
+ if (!vf || vf->vf_id >= vf->pf->num_alloc_vfs)
+ return;
+
+ /* verify if the VF is in either init or active before proceeding */
+ if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states) &&
+ !test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states))
+ return;
+
+ abs_vf_id = vf->vf_id + (int)vf->pf->hw.func_caps.vf_base_id;
+
+ pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING;
+ pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM;
+ i40e_aq_send_msg_to_vf(&vf->pf->hw, abs_vf_id, VIRTCHNL_OP_EVENT,
+ 0, (u8 *)&pfe,
+ sizeof(struct virtchnl_pf_event), NULL);
+}
+/***********************misc routines*****************************/
+
+/**
+ * i40e_vc_reset_vf
+ * @vf: pointer to the VF info
+ * @notify_vf: notify vf about reset or not
+ * Reset VF handler.
+ **/
+static void i40e_vc_reset_vf(struct i40e_vf *vf, bool notify_vf)
+{
+ struct i40e_pf *pf = vf->pf;
+ int i;
+
+ if (notify_vf)
+ i40e_vc_notify_vf_reset(vf);
+
+ /* We want to ensure that an actual reset occurs initiated after this
+ * function was called. However, we do not want to wait forever, so
+ * we'll give a reasonable time and print a message if we failed to
+ * ensure a reset.
+ */
+ for (i = 0; i < 20; i++) {
+ /* If PF is in VFs releasing state reset VF is impossible,
+ * so leave it.
+ */
+ if (test_bit(__I40E_VFS_RELEASING, pf->state))
+ return;
+ if (i40e_reset_vf(vf, false))
+ return;
+ usleep_range(10000, 20000);
+ }
+
+ if (notify_vf)
+ dev_warn(&vf->pf->pdev->dev,
+ "Failed to initiate reset for VF %d after 200 milliseconds\n",
+ vf->vf_id);
+ else
+ dev_dbg(&vf->pf->pdev->dev,
+ "Failed to initiate reset for VF %d after 200 milliseconds\n",
+ vf->vf_id);
+}
+
+/**
+ * i40e_vc_isvalid_vsi_id
+ * @vf: pointer to the VF info
+ * @vsi_id: VF relative VSI id
+ *
+ * check for the valid VSI id
+ **/
+static inline bool i40e_vc_isvalid_vsi_id(struct i40e_vf *vf, u16 vsi_id)
+{
+ struct i40e_pf *pf = vf->pf;
+ struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id);
+
+ return (vsi && (vsi->vf_id == vf->vf_id));
+}
+
+/**
+ * i40e_vc_isvalid_queue_id
+ * @vf: pointer to the VF info
+ * @vsi_id: vsi id
+ * @qid: vsi relative queue id
+ *
+ * check for the valid queue id
+ **/
+static inline bool i40e_vc_isvalid_queue_id(struct i40e_vf *vf, u16 vsi_id,
+ u16 qid)
+{
+ struct i40e_pf *pf = vf->pf;
+ struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id);
+
+ return (vsi && (qid < vsi->alloc_queue_pairs));
+}
+
+/**
+ * i40e_vc_isvalid_vector_id
+ * @vf: pointer to the VF info
+ * @vector_id: VF relative vector id
+ *
+ * check for the valid vector id
+ **/
+static inline bool i40e_vc_isvalid_vector_id(struct i40e_vf *vf, u32 vector_id)
+{
+ struct i40e_pf *pf = vf->pf;
+
+ return vector_id < pf->hw.func_caps.num_msix_vectors_vf;
+}
+
+/***********************vf resource mgmt routines*****************/
+
+/**
+ * i40e_vc_get_pf_queue_id
+ * @vf: pointer to the VF info
+ * @vsi_id: id of VSI as provided by the FW
+ * @vsi_queue_id: vsi relative queue id
+ *
+ * return PF relative queue id
+ **/
+static u16 i40e_vc_get_pf_queue_id(struct i40e_vf *vf, u16 vsi_id,
+ u8 vsi_queue_id)
+{
+ struct i40e_pf *pf = vf->pf;
+ struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id);
+ u16 pf_queue_id = I40E_QUEUE_END_OF_LIST;
+
+ if (!vsi)
+ return pf_queue_id;
+
+ if (le16_to_cpu(vsi->info.mapping_flags) &
+ I40E_AQ_VSI_QUE_MAP_NONCONTIG)
+ pf_queue_id =
+ le16_to_cpu(vsi->info.queue_mapping[vsi_queue_id]);
+ else
+ pf_queue_id = le16_to_cpu(vsi->info.queue_mapping[0]) +
+ vsi_queue_id;
+
+ return pf_queue_id;
+}
+
+/**
+ * i40e_get_real_pf_qid
+ * @vf: pointer to the VF info
+ * @vsi_id: vsi id
+ * @queue_id: queue number
+ *
+ * wrapper function to get pf_queue_id handling ADq code as well
+ **/
+static u16 i40e_get_real_pf_qid(struct i40e_vf *vf, u16 vsi_id, u16 queue_id)
+{
+ int i;
+
+ if (vf->adq_enabled) {
+ /* Although VF considers all the queues(can be 1 to 16) as its
+ * own but they may actually belong to different VSIs(up to 4).
+ * We need to find which queues belongs to which VSI.
+ */
+ for (i = 0; i < vf->num_tc; i++) {
+ if (queue_id < vf->ch[i].num_qps) {
+ vsi_id = vf->ch[i].vsi_id;
+ break;
+ }
+ /* find right queue id which is relative to a
+ * given VSI.
+ */
+ queue_id -= vf->ch[i].num_qps;
+ }
+ }
+
+ return i40e_vc_get_pf_queue_id(vf, vsi_id, queue_id);
+}
+
+/**
+ * i40e_config_irq_link_list
+ * @vf: pointer to the VF info
+ * @vsi_id: id of VSI as given by the FW
+ * @vecmap: irq map info
+ *
+ * configure irq link list from the map
+ **/
+static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_id,
+ struct virtchnl_vector_map *vecmap)
+{
+ unsigned long linklistmap = 0, tempmap;
+ struct i40e_pf *pf = vf->pf;
+ struct i40e_hw *hw = &pf->hw;
+ u16 vsi_queue_id, pf_queue_id;
+ enum i40e_queue_type qtype;
+ u16 next_q, vector_id, size;
+ u32 reg, reg_idx;
+ u16 itr_idx = 0;
+
+ vector_id = vecmap->vector_id;
+ /* setup the head */
+ if (0 == vector_id)
+ reg_idx = I40E_VPINT_LNKLST0(vf->vf_id);
+ else
+ reg_idx = I40E_VPINT_LNKLSTN(
+ ((pf->hw.func_caps.num_msix_vectors_vf - 1) * vf->vf_id) +
+ (vector_id - 1));
+
+ if (vecmap->rxq_map == 0 && vecmap->txq_map == 0) {
+ /* Special case - No queues mapped on this vector */
+ wr32(hw, reg_idx, I40E_VPINT_LNKLST0_FIRSTQ_INDX_MASK);
+ goto irq_list_done;
+ }
+ tempmap = vecmap->rxq_map;
+ for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) {
+ linklistmap |= (BIT(I40E_VIRTCHNL_SUPPORTED_QTYPES *
+ vsi_queue_id));
+ }
+
+ tempmap = vecmap->txq_map;
+ for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) {
+ linklistmap |= (BIT(I40E_VIRTCHNL_SUPPORTED_QTYPES *
+ vsi_queue_id + 1));
+ }
+
+ size = I40E_MAX_VSI_QP * I40E_VIRTCHNL_SUPPORTED_QTYPES;
+ next_q = find_first_bit(&linklistmap, size);
+ if (unlikely(next_q == size))
+ goto irq_list_done;
+
+ vsi_queue_id = next_q / I40E_VIRTCHNL_SUPPORTED_QTYPES;
+ qtype = next_q % I40E_VIRTCHNL_SUPPORTED_QTYPES;
+ pf_queue_id = i40e_get_real_pf_qid(vf, vsi_id, vsi_queue_id);
+ reg = ((qtype << I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT) | pf_queue_id);
+
+ wr32(hw, reg_idx, reg);
+
+ while (next_q < size) {
+ switch (qtype) {
+ case I40E_QUEUE_TYPE_RX:
+ reg_idx = I40E_QINT_RQCTL(pf_queue_id);
+ itr_idx = vecmap->rxitr_idx;
+ break;
+ case I40E_QUEUE_TYPE_TX:
+ reg_idx = I40E_QINT_TQCTL(pf_queue_id);
+ itr_idx = vecmap->txitr_idx;
+ break;
+ default:
+ break;
+ }
+
+ next_q = find_next_bit(&linklistmap, size, next_q + 1);
+ if (next_q < size) {
+ vsi_queue_id = next_q / I40E_VIRTCHNL_SUPPORTED_QTYPES;
+ qtype = next_q % I40E_VIRTCHNL_SUPPORTED_QTYPES;
+ pf_queue_id = i40e_get_real_pf_qid(vf,
+ vsi_id,
+ vsi_queue_id);
+ } else {
+ pf_queue_id = I40E_QUEUE_END_OF_LIST;
+ qtype = 0;
+ }
+
+ /* format for the RQCTL & TQCTL regs is same */
+ reg = (vector_id) |
+ (qtype << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
+ (pf_queue_id << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
+ BIT(I40E_QINT_RQCTL_CAUSE_ENA_SHIFT) |
+ (itr_idx << I40E_QINT_RQCTL_ITR_INDX_SHIFT);
+ wr32(hw, reg_idx, reg);
+ }
+
+ /* if the vf is running in polling mode and using interrupt zero,
+ * need to disable auto-mask on enabling zero interrupt for VFs.
+ */
+ if ((vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RX_POLLING) &&
+ (vector_id == 0)) {
+ reg = rd32(hw, I40E_GLINT_CTL);
+ if (!(reg & I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK)) {
+ reg |= I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK;
+ wr32(hw, I40E_GLINT_CTL, reg);
+ }
+ }
+
+irq_list_done:
+ i40e_flush(hw);
+}
+
+/**
+ * i40e_release_iwarp_qvlist
+ * @vf: pointer to the VF.
+ *
+ **/
+static void i40e_release_iwarp_qvlist(struct i40e_vf *vf)
+{
+ struct i40e_pf *pf = vf->pf;
+ struct virtchnl_iwarp_qvlist_info *qvlist_info = vf->qvlist_info;
+ u32 msix_vf;
+ u32 i;
+
+ if (!vf->qvlist_info)
+ return;
+
+ msix_vf = pf->hw.func_caps.num_msix_vectors_vf;
+ for (i = 0; i < qvlist_info->num_vectors; i++) {
+ struct virtchnl_iwarp_qv_info *qv_info;
+ u32 next_q_index, next_q_type;
+ struct i40e_hw *hw = &pf->hw;
+ u32 v_idx, reg_idx, reg;
+
+ qv_info = &qvlist_info->qv_info[i];
+ if (!qv_info)
+ continue;
+ v_idx = qv_info->v_idx;
+ if (qv_info->ceq_idx != I40E_QUEUE_INVALID_IDX) {
+ /* Figure out the queue after CEQ and make that the
+ * first queue.
+ */
+ reg_idx = (msix_vf - 1) * vf->vf_id + qv_info->ceq_idx;
+ reg = rd32(hw, I40E_VPINT_CEQCTL(reg_idx));
+ next_q_index = (reg & I40E_VPINT_CEQCTL_NEXTQ_INDX_MASK)
+ >> I40E_VPINT_CEQCTL_NEXTQ_INDX_SHIFT;
+ next_q_type = (reg & I40E_VPINT_CEQCTL_NEXTQ_TYPE_MASK)
+ >> I40E_VPINT_CEQCTL_NEXTQ_TYPE_SHIFT;
+
+ reg_idx = ((msix_vf - 1) * vf->vf_id) + (v_idx - 1);
+ reg = (next_q_index &
+ I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK) |
+ (next_q_type <<
+ I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT);
+
+ wr32(hw, I40E_VPINT_LNKLSTN(reg_idx), reg);
+ }
+ }
+ kfree(vf->qvlist_info);
+ vf->qvlist_info = NULL;
+}
+
+/**
+ * i40e_config_iwarp_qvlist
+ * @vf: pointer to the VF info
+ * @qvlist_info: queue and vector list
+ *
+ * Return 0 on success or < 0 on error
+ **/
+static int i40e_config_iwarp_qvlist(struct i40e_vf *vf,
+ struct virtchnl_iwarp_qvlist_info *qvlist_info)
+{
+ struct i40e_pf *pf = vf->pf;
+ struct i40e_hw *hw = &pf->hw;
+ struct virtchnl_iwarp_qv_info *qv_info;
+ u32 v_idx, i, reg_idx, reg;
+ u32 next_q_idx, next_q_type;
+ u32 msix_vf;
+ int ret = 0;
+
+ msix_vf = pf->hw.func_caps.num_msix_vectors_vf;
+
+ if (qvlist_info->num_vectors > msix_vf) {
+ dev_warn(&pf->pdev->dev,
+ "Incorrect number of iwarp vectors %u. Maximum %u allowed.\n",
+ qvlist_info->num_vectors,
+ msix_vf);
+ ret = -EINVAL;
+ goto err_out;
+ }
+
+ kfree(vf->qvlist_info);
+ vf->qvlist_info = kzalloc(struct_size(vf->qvlist_info, qv_info,
+ qvlist_info->num_vectors - 1),
+ GFP_KERNEL);
+ if (!vf->qvlist_info) {
+ ret = -ENOMEM;
+ goto err_out;
+ }
+ vf->qvlist_info->num_vectors = qvlist_info->num_vectors;
+
+ msix_vf = pf->hw.func_caps.num_msix_vectors_vf;
+ for (i = 0; i < qvlist_info->num_vectors; i++) {
+ qv_info = &qvlist_info->qv_info[i];
+ if (!qv_info)
+ continue;
+
+ /* Validate vector id belongs to this vf */
+ if (!i40e_vc_isvalid_vector_id(vf, qv_info->v_idx)) {
+ ret = -EINVAL;
+ goto err_free;
+ }
+
+ v_idx = qv_info->v_idx;
+
+ vf->qvlist_info->qv_info[i] = *qv_info;
+
+ reg_idx = ((msix_vf - 1) * vf->vf_id) + (v_idx - 1);
+ /* We might be sharing the interrupt, so get the first queue
+ * index and type, push it down the list by adding the new
+ * queue on top. Also link it with the new queue in CEQCTL.
+ */
+ reg = rd32(hw, I40E_VPINT_LNKLSTN(reg_idx));
+ next_q_idx = ((reg & I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK) >>
+ I40E_VPINT_LNKLSTN_FIRSTQ_INDX_SHIFT);
+ next_q_type = ((reg & I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK) >>
+ I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT);
+
+ if (qv_info->ceq_idx != I40E_QUEUE_INVALID_IDX) {
+ reg_idx = (msix_vf - 1) * vf->vf_id + qv_info->ceq_idx;
+ reg = (I40E_VPINT_CEQCTL_CAUSE_ENA_MASK |
+ (v_idx << I40E_VPINT_CEQCTL_MSIX_INDX_SHIFT) |
+ (qv_info->itr_idx << I40E_VPINT_CEQCTL_ITR_INDX_SHIFT) |
+ (next_q_type << I40E_VPINT_CEQCTL_NEXTQ_TYPE_SHIFT) |
+ (next_q_idx << I40E_VPINT_CEQCTL_NEXTQ_INDX_SHIFT));
+ wr32(hw, I40E_VPINT_CEQCTL(reg_idx), reg);
+
+ reg_idx = ((msix_vf - 1) * vf->vf_id) + (v_idx - 1);
+ reg = (qv_info->ceq_idx &
+ I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK) |
+ (I40E_QUEUE_TYPE_PE_CEQ <<
+ I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT);
+ wr32(hw, I40E_VPINT_LNKLSTN(reg_idx), reg);
+ }
+
+ if (qv_info->aeq_idx != I40E_QUEUE_INVALID_IDX) {
+ reg = (I40E_VPINT_AEQCTL_CAUSE_ENA_MASK |
+ (v_idx << I40E_VPINT_AEQCTL_MSIX_INDX_SHIFT) |
+ (qv_info->itr_idx << I40E_VPINT_AEQCTL_ITR_INDX_SHIFT));
+
+ wr32(hw, I40E_VPINT_AEQCTL(vf->vf_id), reg);
+ }
+ }
+
+ return 0;
+err_free:
+ kfree(vf->qvlist_info);
+ vf->qvlist_info = NULL;
+err_out:
+ return ret;
+}
+
+/**
+ * i40e_config_vsi_tx_queue
+ * @vf: pointer to the VF info
+ * @vsi_id: id of VSI as provided by the FW
+ * @vsi_queue_id: vsi relative queue index
+ * @info: config. info
+ *
+ * configure tx queue
+ **/
+static int i40e_config_vsi_tx_queue(struct i40e_vf *vf, u16 vsi_id,
+ u16 vsi_queue_id,
+ struct virtchnl_txq_info *info)
+{
+ struct i40e_pf *pf = vf->pf;
+ struct i40e_hw *hw = &pf->hw;
+ struct i40e_hmc_obj_txq tx_ctx;
+ struct i40e_vsi *vsi;
+ u16 pf_queue_id;
+ u32 qtx_ctl;
+ int ret = 0;
+
+ if (!i40e_vc_isvalid_vsi_id(vf, info->vsi_id)) {
+ ret = -ENOENT;
+ goto error_context;
+ }
+ pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id, vsi_queue_id);
+ vsi = i40e_find_vsi_from_id(pf, vsi_id);
+ if (!vsi) {
+ ret = -ENOENT;
+ goto error_context;
+ }
+
+ /* clear the context structure first */
+ memset(&tx_ctx, 0, sizeof(struct i40e_hmc_obj_txq));
+
+ /* only set the required fields */
+ tx_ctx.base = info->dma_ring_addr / 128;
+ tx_ctx.qlen = info->ring_len;
+ tx_ctx.rdylist = le16_to_cpu(vsi->info.qs_handle[0]);
+ tx_ctx.rdylist_act = 0;
+ tx_ctx.head_wb_ena = info->headwb_enabled;
+ tx_ctx.head_wb_addr = info->dma_headwb_addr;
+
+ /* clear the context in the HMC */
+ ret = i40e_clear_lan_tx_queue_context(hw, pf_queue_id);
+ if (ret) {
+ dev_err(&pf->pdev->dev,
+ "Failed to clear VF LAN Tx queue context %d, error: %d\n",
+ pf_queue_id, ret);
+ ret = -ENOENT;
+ goto error_context;
+ }
+
+ /* set the context in the HMC */
+ ret = i40e_set_lan_tx_queue_context(hw, pf_queue_id, &tx_ctx);
+ if (ret) {
+ dev_err(&pf->pdev->dev,
+ "Failed to set VF LAN Tx queue context %d error: %d\n",
+ pf_queue_id, ret);
+ ret = -ENOENT;
+ goto error_context;
+ }
+
+ /* associate this queue with the PCI VF function */
+ qtx_ctl = I40E_QTX_CTL_VF_QUEUE;
+ qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT)
+ & I40E_QTX_CTL_PF_INDX_MASK);
+ qtx_ctl |= (((vf->vf_id + hw->func_caps.vf_base_id)
+ << I40E_QTX_CTL_VFVM_INDX_SHIFT)
+ & I40E_QTX_CTL_VFVM_INDX_MASK);
+ wr32(hw, I40E_QTX_CTL(pf_queue_id), qtx_ctl);
+ i40e_flush(hw);
+
+error_context:
+ return ret;
+}
+
+/**
+ * i40e_config_vsi_rx_queue
+ * @vf: pointer to the VF info
+ * @vsi_id: id of VSI as provided by the FW
+ * @vsi_queue_id: vsi relative queue index
+ * @info: config. info
+ *
+ * configure rx queue
+ **/
+static int i40e_config_vsi_rx_queue(struct i40e_vf *vf, u16 vsi_id,
+ u16 vsi_queue_id,
+ struct virtchnl_rxq_info *info)
+{
+ u16 pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id, vsi_queue_id);
+ struct i40e_pf *pf = vf->pf;
+ struct i40e_vsi *vsi = pf->vsi[vf->lan_vsi_idx];
+ struct i40e_hw *hw = &pf->hw;
+ struct i40e_hmc_obj_rxq rx_ctx;
+ int ret = 0;
+
+ /* clear the context structure first */
+ memset(&rx_ctx, 0, sizeof(struct i40e_hmc_obj_rxq));
+
+ /* only set the required fields */
+ rx_ctx.base = info->dma_ring_addr / 128;
+ rx_ctx.qlen = info->ring_len;
+
+ if (info->splithdr_enabled) {
+ rx_ctx.hsplit_0 = I40E_RX_SPLIT_L2 |
+ I40E_RX_SPLIT_IP |
+ I40E_RX_SPLIT_TCP_UDP |
+ I40E_RX_SPLIT_SCTP;
+ /* header length validation */
+ if (info->hdr_size > ((2 * 1024) - 64)) {
+ ret = -EINVAL;
+ goto error_param;
+ }
+ rx_ctx.hbuff = info->hdr_size >> I40E_RXQ_CTX_HBUFF_SHIFT;
+
+ /* set split mode 10b */
+ rx_ctx.dtype = I40E_RX_DTYPE_HEADER_SPLIT;
+ }
+
+ /* databuffer length validation */
+ if (info->databuffer_size > ((16 * 1024) - 128)) {
+ ret = -EINVAL;
+ goto error_param;
+ }
+ rx_ctx.dbuff = info->databuffer_size >> I40E_RXQ_CTX_DBUFF_SHIFT;
+
+ /* max pkt. length validation */
+ if (info->max_pkt_size >= (16 * 1024) || info->max_pkt_size < 64) {
+ ret = -EINVAL;
+ goto error_param;
+ }
+ rx_ctx.rxmax = info->max_pkt_size;
+
+ /* if port VLAN is configured increase the max packet size */
+ if (vsi->info.pvid)
+ rx_ctx.rxmax += VLAN_HLEN;
+
+ /* enable 32bytes desc always */
+ rx_ctx.dsize = 1;
+
+ /* default values */
+ rx_ctx.lrxqthresh = 1;
+ rx_ctx.crcstrip = 1;
+ rx_ctx.prefena = 1;
+ rx_ctx.l2tsel = 1;
+
+ /* clear the context in the HMC */
+ ret = i40e_clear_lan_rx_queue_context(hw, pf_queue_id);
+ if (ret) {
+ dev_err(&pf->pdev->dev,
+ "Failed to clear VF LAN Rx queue context %d, error: %d\n",
+ pf_queue_id, ret);
+ ret = -ENOENT;
+ goto error_param;
+ }
+
+ /* set the context in the HMC */
+ ret = i40e_set_lan_rx_queue_context(hw, pf_queue_id, &rx_ctx);
+ if (ret) {
+ dev_err(&pf->pdev->dev,
+ "Failed to set VF LAN Rx queue context %d error: %d\n",
+ pf_queue_id, ret);
+ ret = -ENOENT;
+ goto error_param;
+ }
+
+error_param:
+ return ret;
+}
+
+/**
+ * i40e_alloc_vsi_res
+ * @vf: pointer to the VF info
+ * @idx: VSI index, applies only for ADq mode, zero otherwise
+ *
+ * alloc VF vsi context & resources
+ **/
+static int i40e_alloc_vsi_res(struct i40e_vf *vf, u8 idx)
+{
+ struct i40e_mac_filter *f = NULL;
+ struct i40e_pf *pf = vf->pf;
+ struct i40e_vsi *vsi;
+ u64 max_tx_rate = 0;
+ int ret = 0;
+
+ vsi = i40e_vsi_setup(pf, I40E_VSI_SRIOV, pf->vsi[pf->lan_vsi]->seid,
+ vf->vf_id);
+
+ if (!vsi) {
+ dev_err(&pf->pdev->dev,
+ "add vsi failed for VF %d, aq_err %d\n",
+ vf->vf_id, pf->hw.aq.asq_last_status);
+ ret = -ENOENT;
+ goto error_alloc_vsi_res;
+ }
+
+ if (!idx) {
+ u64 hena = i40e_pf_get_default_rss_hena(pf);
+ u8 broadcast[ETH_ALEN];
+
+ vf->lan_vsi_idx = vsi->idx;
+ vf->lan_vsi_id = vsi->id;
+ /* If the port VLAN has been configured and then the
+ * VF driver was removed then the VSI port VLAN
+ * configuration was destroyed. Check if there is
+ * a port VLAN and restore the VSI configuration if
+ * needed.
+ */
+ if (vf->port_vlan_id)
+ i40e_vsi_add_pvid(vsi, vf->port_vlan_id);
+
+ spin_lock_bh(&vsi->mac_filter_hash_lock);
+ if (is_valid_ether_addr(vf->default_lan_addr.addr)) {
+ f = i40e_add_mac_filter(vsi,
+ vf->default_lan_addr.addr);
+ if (!f)
+ dev_info(&pf->pdev->dev,
+ "Could not add MAC filter %pM for VF %d\n",
+ vf->default_lan_addr.addr, vf->vf_id);
+ }
+ eth_broadcast_addr(broadcast);
+ f = i40e_add_mac_filter(vsi, broadcast);
+ if (!f)
+ dev_info(&pf->pdev->dev,
+ "Could not allocate VF broadcast filter\n");
+ spin_unlock_bh(&vsi->mac_filter_hash_lock);
+ wr32(&pf->hw, I40E_VFQF_HENA1(0, vf->vf_id), (u32)hena);
+ wr32(&pf->hw, I40E_VFQF_HENA1(1, vf->vf_id), (u32)(hena >> 32));
+ /* program mac filter only for VF VSI */
+ ret = i40e_sync_vsi_filters(vsi);
+ if (ret)
+ dev_err(&pf->pdev->dev, "Unable to program ucast filters\n");
+ }
+
+ /* storing VSI index and id for ADq and don't apply the mac filter */
+ if (vf->adq_enabled) {
+ vf->ch[idx].vsi_idx = vsi->idx;
+ vf->ch[idx].vsi_id = vsi->id;
+ }
+
+ /* Set VF bandwidth if specified */
+ if (vf->tx_rate) {
+ max_tx_rate = vf->tx_rate;
+ } else if (vf->ch[idx].max_tx_rate) {
+ max_tx_rate = vf->ch[idx].max_tx_rate;
+ }
+
+ if (max_tx_rate) {
+ max_tx_rate = div_u64(max_tx_rate, I40E_BW_CREDIT_DIVISOR);
+ ret = i40e_aq_config_vsi_bw_limit(&pf->hw, vsi->seid,
+ max_tx_rate, 0, NULL);
+ if (ret)
+ dev_err(&pf->pdev->dev, "Unable to set tx rate, VF %d, error code %d.\n",
+ vf->vf_id, ret);
+ }
+
+error_alloc_vsi_res:
+ return ret;
+}
+
+/**
+ * i40e_map_pf_queues_to_vsi
+ * @vf: pointer to the VF info
+ *
+ * PF maps LQPs to a VF by programming VSILAN_QTABLE & VPLAN_QTABLE. This
+ * function takes care of first part VSILAN_QTABLE, mapping pf queues to VSI.
+ **/
+static void i40e_map_pf_queues_to_vsi(struct i40e_vf *vf)
+{
+ struct i40e_pf *pf = vf->pf;
+ struct i40e_hw *hw = &pf->hw;
+ u32 reg, num_tc = 1; /* VF has at least one traffic class */
+ u16 vsi_id, qps;
+ int i, j;
+
+ if (vf->adq_enabled)
+ num_tc = vf->num_tc;
+
+ for (i = 0; i < num_tc; i++) {
+ if (vf->adq_enabled) {
+ qps = vf->ch[i].num_qps;
+ vsi_id = vf->ch[i].vsi_id;
+ } else {
+ qps = pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs;
+ vsi_id = vf->lan_vsi_id;
+ }
+
+ for (j = 0; j < 7; j++) {
+ if (j * 2 >= qps) {
+ /* end of list */
+ reg = 0x07FF07FF;
+ } else {
+ u16 qid = i40e_vc_get_pf_queue_id(vf,
+ vsi_id,
+ j * 2);
+ reg = qid;
+ qid = i40e_vc_get_pf_queue_id(vf, vsi_id,
+ (j * 2) + 1);
+ reg |= qid << 16;
+ }
+ i40e_write_rx_ctl(hw,
+ I40E_VSILAN_QTABLE(j, vsi_id),
+ reg);
+ }
+ }
+}
+
+/**
+ * i40e_map_pf_to_vf_queues
+ * @vf: pointer to the VF info
+ *
+ * PF maps LQPs to a VF by programming VSILAN_QTABLE & VPLAN_QTABLE. This
+ * function takes care of the second part VPLAN_QTABLE & completes VF mappings.
+ **/
+static void i40e_map_pf_to_vf_queues(struct i40e_vf *vf)
+{
+ struct i40e_pf *pf = vf->pf;
+ struct i40e_hw *hw = &pf->hw;
+ u32 reg, total_qps = 0;
+ u32 qps, num_tc = 1; /* VF has at least one traffic class */
+ u16 vsi_id, qid;
+ int i, j;
+
+ if (vf->adq_enabled)
+ num_tc = vf->num_tc;
+
+ for (i = 0; i < num_tc; i++) {
+ if (vf->adq_enabled) {
+ qps = vf->ch[i].num_qps;
+ vsi_id = vf->ch[i].vsi_id;
+ } else {
+ qps = pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs;
+ vsi_id = vf->lan_vsi_id;
+ }
+
+ for (j = 0; j < qps; j++) {
+ qid = i40e_vc_get_pf_queue_id(vf, vsi_id, j);
+
+ reg = (qid & I40E_VPLAN_QTABLE_QINDEX_MASK);
+ wr32(hw, I40E_VPLAN_QTABLE(total_qps, vf->vf_id),
+ reg);
+ total_qps++;
+ }
+ }
+}
+
+/**
+ * i40e_enable_vf_mappings
+ * @vf: pointer to the VF info
+ *
+ * enable VF mappings
+ **/
+static void i40e_enable_vf_mappings(struct i40e_vf *vf)
+{
+ struct i40e_pf *pf = vf->pf;
+ struct i40e_hw *hw = &pf->hw;
+ u32 reg;
+
+ /* Tell the hardware we're using noncontiguous mapping. HW requires
+ * that VF queues be mapped using this method, even when they are
+ * contiguous in real life
+ */
+ i40e_write_rx_ctl(hw, I40E_VSILAN_QBASE(vf->lan_vsi_id),
+ I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK);
+
+ /* enable VF vplan_qtable mappings */
+ reg = I40E_VPLAN_MAPENA_TXRX_ENA_MASK;
+ wr32(hw, I40E_VPLAN_MAPENA(vf->vf_id), reg);
+
+ i40e_map_pf_to_vf_queues(vf);
+ i40e_map_pf_queues_to_vsi(vf);
+
+ i40e_flush(hw);
+}
+
+/**
+ * i40e_disable_vf_mappings
+ * @vf: pointer to the VF info
+ *
+ * disable VF mappings
+ **/
+static void i40e_disable_vf_mappings(struct i40e_vf *vf)
+{
+ struct i40e_pf *pf = vf->pf;
+ struct i40e_hw *hw = &pf->hw;
+ int i;
+
+ /* disable qp mappings */
+ wr32(hw, I40E_VPLAN_MAPENA(vf->vf_id), 0);
+ for (i = 0; i < I40E_MAX_VSI_QP; i++)
+ wr32(hw, I40E_VPLAN_QTABLE(i, vf->vf_id),
+ I40E_QUEUE_END_OF_LIST);
+ i40e_flush(hw);
+}
+
+/**
+ * i40e_free_vf_res
+ * @vf: pointer to the VF info
+ *
+ * free VF resources
+ **/
+static void i40e_free_vf_res(struct i40e_vf *vf)
+{
+ struct i40e_pf *pf = vf->pf;
+ struct i40e_hw *hw = &pf->hw;
+ u32 reg_idx, reg;
+ int i, j, msix_vf;
+
+ /* Start by disabling VF's configuration API to prevent the OS from
+ * accessing the VF's VSI after it's freed / invalidated.
+ */
+ clear_bit(I40E_VF_STATE_INIT, &vf->vf_states);
+
+ /* It's possible the VF had requeuested more queues than the default so
+ * do the accounting here when we're about to free them.
+ */
+ if (vf->num_queue_pairs > I40E_DEFAULT_QUEUES_PER_VF) {
+ pf->queues_left += vf->num_queue_pairs -
+ I40E_DEFAULT_QUEUES_PER_VF;
+ }
+
+ /* free vsi & disconnect it from the parent uplink */
+ if (vf->lan_vsi_idx) {
+ i40e_vsi_release(pf->vsi[vf->lan_vsi_idx]);
+ vf->lan_vsi_idx = 0;
+ vf->lan_vsi_id = 0;
+ }
+
+ /* do the accounting and remove additional ADq VSI's */
+ if (vf->adq_enabled && vf->ch[0].vsi_idx) {
+ for (j = 0; j < vf->num_tc; j++) {
+ /* At this point VSI0 is already released so don't
+ * release it again and only clear their values in
+ * structure variables
+ */
+ if (j)
+ i40e_vsi_release(pf->vsi[vf->ch[j].vsi_idx]);
+ vf->ch[j].vsi_idx = 0;
+ vf->ch[j].vsi_id = 0;
+ }
+ }
+ msix_vf = pf->hw.func_caps.num_msix_vectors_vf;
+
+ /* disable interrupts so the VF starts in a known state */
+ for (i = 0; i < msix_vf; i++) {
+ /* format is same for both registers */
+ if (0 == i)
+ reg_idx = I40E_VFINT_DYN_CTL0(vf->vf_id);
+ else
+ reg_idx = I40E_VFINT_DYN_CTLN(((msix_vf - 1) *
+ (vf->vf_id))
+ + (i - 1));
+ wr32(hw, reg_idx, I40E_VFINT_DYN_CTLN_CLEARPBA_MASK);
+ i40e_flush(hw);
+ }
+
+ /* clear the irq settings */
+ for (i = 0; i < msix_vf; i++) {
+ /* format is same for both registers */
+ if (0 == i)
+ reg_idx = I40E_VPINT_LNKLST0(vf->vf_id);
+ else
+ reg_idx = I40E_VPINT_LNKLSTN(((msix_vf - 1) *
+ (vf->vf_id))
+ + (i - 1));
+ reg = (I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK |
+ I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK);
+ wr32(hw, reg_idx, reg);
+ i40e_flush(hw);
+ }
+ /* reset some of the state variables keeping track of the resources */
+ vf->num_queue_pairs = 0;
+ clear_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states);
+ clear_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states);
+}
+
+/**
+ * i40e_alloc_vf_res
+ * @vf: pointer to the VF info
+ *
+ * allocate VF resources
+ **/
+static int i40e_alloc_vf_res(struct i40e_vf *vf)
+{
+ struct i40e_pf *pf = vf->pf;
+ int total_queue_pairs = 0;
+ int ret, idx;
+
+ if (vf->num_req_queues &&
+ vf->num_req_queues <= pf->queues_left + I40E_DEFAULT_QUEUES_PER_VF)
+ pf->num_vf_qps = vf->num_req_queues;
+ else
+ pf->num_vf_qps = I40E_DEFAULT_QUEUES_PER_VF;
+
+ /* allocate hw vsi context & associated resources */
+ ret = i40e_alloc_vsi_res(vf, 0);
+ if (ret)
+ goto error_alloc;
+ total_queue_pairs += pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs;
+
+ /* allocate additional VSIs based on tc information for ADq */
+ if (vf->adq_enabled) {
+ if (pf->queues_left >=
+ (I40E_MAX_VF_QUEUES - I40E_DEFAULT_QUEUES_PER_VF)) {
+ /* TC 0 always belongs to VF VSI */
+ for (idx = 1; idx < vf->num_tc; idx++) {
+ ret = i40e_alloc_vsi_res(vf, idx);
+ if (ret)
+ goto error_alloc;
+ }
+ /* send correct number of queues */
+ total_queue_pairs = I40E_MAX_VF_QUEUES;
+ } else {
+ dev_info(&pf->pdev->dev, "VF %d: Not enough queues to allocate, disabling ADq\n",
+ vf->vf_id);
+ vf->adq_enabled = false;
+ }
+ }
+
+ /* We account for each VF to get a default number of queue pairs. If
+ * the VF has now requested more, we need to account for that to make
+ * certain we never request more queues than we actually have left in
+ * HW.
+ */
+ if (total_queue_pairs > I40E_DEFAULT_QUEUES_PER_VF)
+ pf->queues_left -=
+ total_queue_pairs - I40E_DEFAULT_QUEUES_PER_VF;
+
+ if (vf->trusted)
+ set_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
+ else
+ clear_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
+
+ /* store the total qps number for the runtime
+ * VF req validation
+ */
+ vf->num_queue_pairs = total_queue_pairs;
+
+ /* VF is now completely initialized */
+ set_bit(I40E_VF_STATE_INIT, &vf->vf_states);
+
+error_alloc:
+ if (ret)
+ i40e_free_vf_res(vf);
+
+ return ret;
+}
+
+#define VF_DEVICE_STATUS 0xAA
+#define VF_TRANS_PENDING_MASK 0x20
+/**
+ * i40e_quiesce_vf_pci
+ * @vf: pointer to the VF structure
+ *
+ * Wait for VF PCI transactions to be cleared after reset. Returns -EIO
+ * if the transactions never clear.
+ **/
+static int i40e_quiesce_vf_pci(struct i40e_vf *vf)
+{
+ struct i40e_pf *pf = vf->pf;
+ struct i40e_hw *hw = &pf->hw;
+ int vf_abs_id, i;
+ u32 reg;
+
+ vf_abs_id = vf->vf_id + hw->func_caps.vf_base_id;
+
+ wr32(hw, I40E_PF_PCI_CIAA,
+ VF_DEVICE_STATUS | (vf_abs_id << I40E_PF_PCI_CIAA_VF_NUM_SHIFT));
+ for (i = 0; i < 100; i++) {
+ reg = rd32(hw, I40E_PF_PCI_CIAD);
+ if ((reg & VF_TRANS_PENDING_MASK) == 0)
+ return 0;
+ udelay(1);
+ }
+ return -EIO;
+}
+
+/**
+ * __i40e_getnum_vf_vsi_vlan_filters
+ * @vsi: pointer to the vsi
+ *
+ * called to get the number of VLANs offloaded on this VF
+ **/
+static int __i40e_getnum_vf_vsi_vlan_filters(struct i40e_vsi *vsi)
+{
+ struct i40e_mac_filter *f;
+ u16 num_vlans = 0, bkt;
+
+ hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) {
+ if (f->vlan >= 0 && f->vlan <= I40E_MAX_VLANID)
+ num_vlans++;
+ }
+
+ return num_vlans;
+}
+
+/**
+ * i40e_getnum_vf_vsi_vlan_filters
+ * @vsi: pointer to the vsi
+ *
+ * wrapper for __i40e_getnum_vf_vsi_vlan_filters() with spinlock held
+ **/
+static int i40e_getnum_vf_vsi_vlan_filters(struct i40e_vsi *vsi)
+{
+ int num_vlans;
+
+ spin_lock_bh(&vsi->mac_filter_hash_lock);
+ num_vlans = __i40e_getnum_vf_vsi_vlan_filters(vsi);
+ spin_unlock_bh(&vsi->mac_filter_hash_lock);
+
+ return num_vlans;
+}
+
+/**
+ * i40e_get_vlan_list_sync
+ * @vsi: pointer to the VSI
+ * @num_vlans: number of VLANs in mac_filter_hash, returned to caller
+ * @vlan_list: list of VLANs present in mac_filter_hash, returned to caller.
+ * This array is allocated here, but has to be freed in caller.
+ *
+ * Called to get number of VLANs and VLAN list present in mac_filter_hash.
+ **/
+static void i40e_get_vlan_list_sync(struct i40e_vsi *vsi, u16 *num_vlans,
+ s16 **vlan_list)
+{
+ struct i40e_mac_filter *f;
+ int i = 0;
+ int bkt;
+
+ spin_lock_bh(&vsi->mac_filter_hash_lock);
+ *num_vlans = __i40e_getnum_vf_vsi_vlan_filters(vsi);
+ *vlan_list = kcalloc(*num_vlans, sizeof(**vlan_list), GFP_ATOMIC);
+ if (!(*vlan_list))
+ goto err;
+
+ hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) {
+ if (f->vlan < 0 || f->vlan > I40E_MAX_VLANID)
+ continue;
+ (*vlan_list)[i++] = f->vlan;
+ }
+err:
+ spin_unlock_bh(&vsi->mac_filter_hash_lock);
+}
+
+/**
+ * i40e_set_vsi_promisc
+ * @vf: pointer to the VF struct
+ * @seid: VSI number
+ * @multi_enable: set MAC L2 layer multicast promiscuous enable/disable
+ * for a given VLAN
+ * @unicast_enable: set MAC L2 layer unicast promiscuous enable/disable
+ * for a given VLAN
+ * @vl: List of VLANs - apply filter for given VLANs
+ * @num_vlans: Number of elements in @vl
+ **/
+static int
+i40e_set_vsi_promisc(struct i40e_vf *vf, u16 seid, bool multi_enable,
+ bool unicast_enable, s16 *vl, u16 num_vlans)
+{
+ struct i40e_pf *pf = vf->pf;
+ struct i40e_hw *hw = &pf->hw;
+ int aq_ret, aq_tmp = 0;
+ int i;
+
+ /* No VLAN to set promisc on, set on VSI */
+ if (!num_vlans || !vl) {
+ aq_ret = i40e_aq_set_vsi_multicast_promiscuous(hw, seid,
+ multi_enable,
+ NULL);
+ if (aq_ret) {
+ int aq_err = pf->hw.aq.asq_last_status;
+
+ dev_err(&pf->pdev->dev,
+ "VF %d failed to set multicast promiscuous mode err %pe aq_err %s\n",
+ vf->vf_id,
+ ERR_PTR(aq_ret),
+ i40e_aq_str(&pf->hw, aq_err));
+
+ return aq_ret;
+ }
+
+ aq_ret = i40e_aq_set_vsi_unicast_promiscuous(hw, seid,
+ unicast_enable,
+ NULL, true);
+
+ if (aq_ret) {
+ int aq_err = pf->hw.aq.asq_last_status;
+
+ dev_err(&pf->pdev->dev,
+ "VF %d failed to set unicast promiscuous mode err %pe aq_err %s\n",
+ vf->vf_id,
+ ERR_PTR(aq_ret),
+ i40e_aq_str(&pf->hw, aq_err));
+ }
+
+ return aq_ret;
+ }
+
+ for (i = 0; i < num_vlans; i++) {
+ aq_ret = i40e_aq_set_vsi_mc_promisc_on_vlan(hw, seid,
+ multi_enable,
+ vl[i], NULL);
+ if (aq_ret) {
+ int aq_err = pf->hw.aq.asq_last_status;
+
+ dev_err(&pf->pdev->dev,
+ "VF %d failed to set multicast promiscuous mode err %pe aq_err %s\n",
+ vf->vf_id,
+ ERR_PTR(aq_ret),
+ i40e_aq_str(&pf->hw, aq_err));
+
+ if (!aq_tmp)
+ aq_tmp = aq_ret;
+ }
+
+ aq_ret = i40e_aq_set_vsi_uc_promisc_on_vlan(hw, seid,
+ unicast_enable,
+ vl[i], NULL);
+ if (aq_ret) {
+ int aq_err = pf->hw.aq.asq_last_status;
+
+ dev_err(&pf->pdev->dev,
+ "VF %d failed to set unicast promiscuous mode err %pe aq_err %s\n",
+ vf->vf_id,
+ ERR_PTR(aq_ret),
+ i40e_aq_str(&pf->hw, aq_err));
+
+ if (!aq_tmp)
+ aq_tmp = aq_ret;
+ }
+ }
+
+ if (aq_tmp)
+ aq_ret = aq_tmp;
+
+ return aq_ret;
+}
+
+/**
+ * i40e_config_vf_promiscuous_mode
+ * @vf: pointer to the VF info
+ * @vsi_id: VSI id
+ * @allmulti: set MAC L2 layer multicast promiscuous enable/disable
+ * @alluni: set MAC L2 layer unicast promiscuous enable/disable
+ *
+ * Called from the VF to configure the promiscuous mode of
+ * VF vsis and from the VF reset path to reset promiscuous mode.
+ **/
+static int i40e_config_vf_promiscuous_mode(struct i40e_vf *vf,
+ u16 vsi_id,
+ bool allmulti,
+ bool alluni)
+{
+ struct i40e_pf *pf = vf->pf;
+ int aq_ret = I40E_SUCCESS;
+ struct i40e_vsi *vsi;
+ u16 num_vlans;
+ s16 *vl;
+
+ vsi = i40e_find_vsi_from_id(pf, vsi_id);
+ if (!i40e_vc_isvalid_vsi_id(vf, vsi_id) || !vsi)
+ return I40E_ERR_PARAM;
+
+ if (vf->port_vlan_id) {
+ aq_ret = i40e_set_vsi_promisc(vf, vsi->seid, allmulti,
+ alluni, &vf->port_vlan_id, 1);
+ return aq_ret;
+ } else if (i40e_getnum_vf_vsi_vlan_filters(vsi)) {
+ i40e_get_vlan_list_sync(vsi, &num_vlans, &vl);
+
+ if (!vl)
+ return I40E_ERR_NO_MEMORY;
+
+ aq_ret = i40e_set_vsi_promisc(vf, vsi->seid, allmulti, alluni,
+ vl, num_vlans);
+ kfree(vl);
+ return aq_ret;
+ }
+
+ /* no VLANs to set on, set on VSI */
+ aq_ret = i40e_set_vsi_promisc(vf, vsi->seid, allmulti, alluni,
+ NULL, 0);
+ return aq_ret;
+}
+
+/**
+ * i40e_sync_vfr_reset
+ * @hw: pointer to hw struct
+ * @vf_id: VF identifier
+ *
+ * Before trigger hardware reset, we need to know if no other process has
+ * reserved the hardware for any reset operations. This check is done by
+ * examining the status of the RSTAT1 register used to signal the reset.
+ **/
+static int i40e_sync_vfr_reset(struct i40e_hw *hw, int vf_id)
+{
+ u32 reg;
+ int i;
+
+ for (i = 0; i < I40E_VFR_WAIT_COUNT; i++) {
+ reg = rd32(hw, I40E_VFINT_ICR0_ENA(vf_id)) &
+ I40E_VFINT_ICR0_ADMINQ_MASK;
+ if (reg)
+ return 0;
+
+ usleep_range(100, 200);
+ }
+
+ return -EAGAIN;
+}
+
+/**
+ * i40e_trigger_vf_reset
+ * @vf: pointer to the VF structure
+ * @flr: VFLR was issued or not
+ *
+ * Trigger hardware to start a reset for a particular VF. Expects the caller
+ * to wait the proper amount of time to allow hardware to reset the VF before
+ * it cleans up and restores VF functionality.
+ **/
+static void i40e_trigger_vf_reset(struct i40e_vf *vf, bool flr)
+{
+ struct i40e_pf *pf = vf->pf;
+ struct i40e_hw *hw = &pf->hw;
+ u32 reg, reg_idx, bit_idx;
+ bool vf_active;
+ u32 radq;
+
+ /* warn the VF */
+ vf_active = test_and_clear_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states);
+
+ /* Disable VF's configuration API during reset. The flag is re-enabled
+ * in i40e_alloc_vf_res(), when it's safe again to access VF's VSI.
+ * It's normally disabled in i40e_free_vf_res(), but it's safer
+ * to do it earlier to give some time to finish to any VF config
+ * functions that may still be running at this point.
+ */
+ clear_bit(I40E_VF_STATE_INIT, &vf->vf_states);
+
+ /* In the case of a VFLR, the HW has already reset the VF and we
+ * just need to clean up, so don't hit the VFRTRIG register.
+ */
+ if (!flr) {
+ /* Sync VFR reset before trigger next one */
+ radq = rd32(hw, I40E_VFINT_ICR0_ENA(vf->vf_id)) &
+ I40E_VFINT_ICR0_ADMINQ_MASK;
+ if (vf_active && !radq)
+ /* waiting for finish reset by virtual driver */
+ if (i40e_sync_vfr_reset(hw, vf->vf_id))
+ dev_info(&pf->pdev->dev,
+ "Reset VF %d never finished\n",
+ vf->vf_id);
+
+ /* Reset VF using VPGEN_VFRTRIG reg. It is also setting
+ * in progress state in rstat1 register.
+ */
+ reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id));
+ reg |= I40E_VPGEN_VFRTRIG_VFSWR_MASK;
+ wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg);
+ i40e_flush(hw);
+ }
+ /* clear the VFLR bit in GLGEN_VFLRSTAT */
+ reg_idx = (hw->func_caps.vf_base_id + vf->vf_id) / 32;
+ bit_idx = (hw->func_caps.vf_base_id + vf->vf_id) % 32;
+ wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
+ i40e_flush(hw);
+
+ if (i40e_quiesce_vf_pci(vf))
+ dev_err(&pf->pdev->dev, "VF %d PCI transactions stuck\n",
+ vf->vf_id);
+}
+
+/**
+ * i40e_cleanup_reset_vf
+ * @vf: pointer to the VF structure
+ *
+ * Cleanup a VF after the hardware reset is finished. Expects the caller to
+ * have verified whether the reset is finished properly, and ensure the
+ * minimum amount of wait time has passed.
+ **/
+static void i40e_cleanup_reset_vf(struct i40e_vf *vf)
+{
+ struct i40e_pf *pf = vf->pf;
+ struct i40e_hw *hw = &pf->hw;
+ u32 reg;
+
+ /* disable promisc modes in case they were enabled */
+ i40e_config_vf_promiscuous_mode(vf, vf->lan_vsi_id, false, false);
+
+ /* free VF resources to begin resetting the VSI state */
+ i40e_free_vf_res(vf);
+
+ /* Enable hardware by clearing the reset bit in the VPGEN_VFRTRIG reg.
+ * By doing this we allow HW to access VF memory at any point. If we
+ * did it any sooner, HW could access memory while it was being freed
+ * in i40e_free_vf_res(), causing an IOMMU fault.
+ *
+ * On the other hand, this needs to be done ASAP, because the VF driver
+ * is waiting for this to happen and may report a timeout. It's
+ * harmless, but it gets logged into Guest OS kernel log, so best avoid
+ * it.
+ */
+ reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id));
+ reg &= ~I40E_VPGEN_VFRTRIG_VFSWR_MASK;
+ wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg);
+
+ /* reallocate VF resources to finish resetting the VSI state */
+ if (!i40e_alloc_vf_res(vf)) {
+ int abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
+ i40e_enable_vf_mappings(vf);
+ set_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states);
+ clear_bit(I40E_VF_STATE_DISABLED, &vf->vf_states);
+ /* Do not notify the client during VF init */
+ if (!test_and_clear_bit(I40E_VF_STATE_PRE_ENABLE,
+ &vf->vf_states))
+ i40e_notify_client_of_vf_reset(pf, abs_vf_id);
+ vf->num_vlan = 0;
+ }
+
+ /* Tell the VF driver the reset is done. This needs to be done only
+ * after VF has been fully initialized, because the VF driver may
+ * request resources immediately after setting this flag.
+ */
+ wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), VIRTCHNL_VFR_VFACTIVE);
+}
+
+/**
+ * i40e_reset_vf
+ * @vf: pointer to the VF structure
+ * @flr: VFLR was issued or not
+ *
+ * Returns true if the VF is in reset, resets successfully, or resets
+ * are disabled and false otherwise.
+ **/
+bool i40e_reset_vf(struct i40e_vf *vf, bool flr)
+{
+ struct i40e_pf *pf = vf->pf;
+ struct i40e_hw *hw = &pf->hw;
+ bool rsd = false;
+ u32 reg;
+ int i;
+
+ if (test_bit(__I40E_VF_RESETS_DISABLED, pf->state))
+ return true;
+
+ /* Bail out if VFs are disabled. */
+ if (test_bit(__I40E_VF_DISABLE, pf->state))
+ return true;
+
+ /* If VF is being reset already we don't need to continue. */
+ if (test_and_set_bit(I40E_VF_STATE_RESETTING, &vf->vf_states))
+ return true;
+
+ i40e_trigger_vf_reset(vf, flr);
+
+ /* poll VPGEN_VFRSTAT reg to make sure
+ * that reset is complete
+ */
+ for (i = 0; i < 10; i++) {
+ /* VF reset requires driver to first reset the VF and then
+ * poll the status register to make sure that the reset
+ * completed successfully. Due to internal HW FIFO flushes,
+ * we must wait 10ms before the register will be valid.
+ */
+ usleep_range(10000, 20000);
+ reg = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_id));
+ if (reg & I40E_VPGEN_VFRSTAT_VFRD_MASK) {
+ rsd = true;
+ break;
+ }
+ }
+
+ if (flr)
+ usleep_range(10000, 20000);
+
+ if (!rsd)
+ dev_err(&pf->pdev->dev, "VF reset check timeout on VF %d\n",
+ vf->vf_id);
+ usleep_range(10000, 20000);
+
+ /* On initial reset, we don't have any queues to disable */
+ if (vf->lan_vsi_idx != 0)
+ i40e_vsi_stop_rings(pf->vsi[vf->lan_vsi_idx]);
+
+ i40e_cleanup_reset_vf(vf);
+
+ i40e_flush(hw);
+ usleep_range(20000, 40000);
+ clear_bit(I40E_VF_STATE_RESETTING, &vf->vf_states);
+
+ return true;
+}
+
+/**
+ * i40e_reset_all_vfs
+ * @pf: pointer to the PF structure
+ * @flr: VFLR was issued or not
+ *
+ * Reset all allocated VFs in one go. First, tell the hardware to reset each
+ * VF, then do all the waiting in one chunk, and finally finish restoring each
+ * VF after the wait. This is useful during PF routines which need to reset
+ * all VFs, as otherwise it must perform these resets in a serialized fashion.
+ *
+ * Returns true if any VFs were reset, and false otherwise.
+ **/
+bool i40e_reset_all_vfs(struct i40e_pf *pf, bool flr)
+{
+ struct i40e_hw *hw = &pf->hw;
+ struct i40e_vf *vf;
+ int i, v;
+ u32 reg;
+
+ /* If we don't have any VFs, then there is nothing to reset */
+ if (!pf->num_alloc_vfs)
+ return false;
+
+ /* If VFs have been disabled, there is no need to reset */
+ if (test_and_set_bit(__I40E_VF_DISABLE, pf->state))
+ return false;
+
+ /* Begin reset on all VFs at once */
+ for (v = 0; v < pf->num_alloc_vfs; v++) {
+ vf = &pf->vf[v];
+ /* If VF is being reset no need to trigger reset again */
+ if (!test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states))
+ i40e_trigger_vf_reset(&pf->vf[v], flr);
+ }
+
+ /* HW requires some time to make sure it can flush the FIFO for a VF
+ * when it resets it. Poll the VPGEN_VFRSTAT register for each VF in
+ * sequence to make sure that it has completed. We'll keep track of
+ * the VFs using a simple iterator that increments once that VF has
+ * finished resetting.
+ */
+ for (i = 0, v = 0; i < 10 && v < pf->num_alloc_vfs; i++) {
+ usleep_range(10000, 20000);
+
+ /* Check each VF in sequence, beginning with the VF to fail
+ * the previous check.
+ */
+ while (v < pf->num_alloc_vfs) {
+ vf = &pf->vf[v];
+ if (!test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states)) {
+ reg = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_id));
+ if (!(reg & I40E_VPGEN_VFRSTAT_VFRD_MASK))
+ break;
+ }
+
+ /* If the current VF has finished resetting, move on
+ * to the next VF in sequence.
+ */
+ v++;
+ }
+ }
+
+ if (flr)
+ usleep_range(10000, 20000);
+
+ /* Display a warning if at least one VF didn't manage to reset in
+ * time, but continue on with the operation.
+ */
+ if (v < pf->num_alloc_vfs)
+ dev_err(&pf->pdev->dev, "VF reset check timeout on VF %d\n",
+ pf->vf[v].vf_id);
+ usleep_range(10000, 20000);
+
+ /* Begin disabling all the rings associated with VFs, but do not wait
+ * between each VF.
+ */
+ for (v = 0; v < pf->num_alloc_vfs; v++) {
+ /* On initial reset, we don't have any queues to disable */
+ if (pf->vf[v].lan_vsi_idx == 0)
+ continue;
+
+ /* If VF is reset in another thread just continue */
+ if (test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states))
+ continue;
+
+ i40e_vsi_stop_rings_no_wait(pf->vsi[pf->vf[v].lan_vsi_idx]);
+ }
+
+ /* Now that we've notified HW to disable all of the VF rings, wait
+ * until they finish.
+ */
+ for (v = 0; v < pf->num_alloc_vfs; v++) {
+ /* On initial reset, we don't have any queues to disable */
+ if (pf->vf[v].lan_vsi_idx == 0)
+ continue;
+
+ /* If VF is reset in another thread just continue */
+ if (test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states))
+ continue;
+
+ i40e_vsi_wait_queues_disabled(pf->vsi[pf->vf[v].lan_vsi_idx]);
+ }
+
+ /* Hw may need up to 50ms to finish disabling the RX queues. We
+ * minimize the wait by delaying only once for all VFs.
+ */
+ mdelay(50);
+
+ /* Finish the reset on each VF */
+ for (v = 0; v < pf->num_alloc_vfs; v++) {
+ /* If VF is reset in another thread just continue */
+ if (test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states))
+ continue;
+
+ i40e_cleanup_reset_vf(&pf->vf[v]);
+ }
+
+ i40e_flush(hw);
+ usleep_range(20000, 40000);
+ clear_bit(__I40E_VF_DISABLE, pf->state);
+
+ return true;
+}
+
+/**
+ * i40e_free_vfs
+ * @pf: pointer to the PF structure
+ *
+ * free VF resources
+ **/
+void i40e_free_vfs(struct i40e_pf *pf)
+{
+ struct i40e_hw *hw = &pf->hw;
+ u32 reg_idx, bit_idx;
+ int i, tmp, vf_id;
+
+ if (!pf->vf)
+ return;
+
+ set_bit(__I40E_VFS_RELEASING, pf->state);
+ while (test_and_set_bit(__I40E_VF_DISABLE, pf->state))
+ usleep_range(1000, 2000);
+
+ i40e_notify_client_of_vf_enable(pf, 0);
+
+ /* Disable IOV before freeing resources. This lets any VF drivers
+ * running in the host get themselves cleaned up before we yank
+ * the carpet out from underneath their feet.
+ */
+ if (!pci_vfs_assigned(pf->pdev))
+ pci_disable_sriov(pf->pdev);
+ else
+ dev_warn(&pf->pdev->dev, "VFs are assigned - not disabling SR-IOV\n");
+
+ /* Amortize wait time by stopping all VFs at the same time */
+ for (i = 0; i < pf->num_alloc_vfs; i++) {
+ if (test_bit(I40E_VF_STATE_INIT, &pf->vf[i].vf_states))
+ continue;
+
+ i40e_vsi_stop_rings_no_wait(pf->vsi[pf->vf[i].lan_vsi_idx]);
+ }
+
+ for (i = 0; i < pf->num_alloc_vfs; i++) {
+ if (test_bit(I40E_VF_STATE_INIT, &pf->vf[i].vf_states))
+ continue;
+
+ i40e_vsi_wait_queues_disabled(pf->vsi[pf->vf[i].lan_vsi_idx]);
+ }
+
+ /* free up VF resources */
+ tmp = pf->num_alloc_vfs;
+ pf->num_alloc_vfs = 0;
+ for (i = 0; i < tmp; i++) {
+ if (test_bit(I40E_VF_STATE_INIT, &pf->vf[i].vf_states))
+ i40e_free_vf_res(&pf->vf[i]);
+ /* disable qp mappings */
+ i40e_disable_vf_mappings(&pf->vf[i]);
+ }
+
+ kfree(pf->vf);
+ pf->vf = NULL;
+
+ /* This check is for when the driver is unloaded while VFs are
+ * assigned. Setting the number of VFs to 0 through sysfs is caught
+ * before this function ever gets called.
+ */
+ if (!pci_vfs_assigned(pf->pdev)) {
+ /* Acknowledge VFLR for all VFS. Without this, VFs will fail to
+ * work correctly when SR-IOV gets re-enabled.
+ */
+ for (vf_id = 0; vf_id < tmp; vf_id++) {
+ reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32;
+ bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32;
+ wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
+ }
+ }
+ clear_bit(__I40E_VF_DISABLE, pf->state);
+ clear_bit(__I40E_VFS_RELEASING, pf->state);
+}
+
+#ifdef CONFIG_PCI_IOV
+/**
+ * i40e_alloc_vfs
+ * @pf: pointer to the PF structure
+ * @num_alloc_vfs: number of VFs to allocate
+ *
+ * allocate VF resources
+ **/
+int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs)
+{
+ struct i40e_vf *vfs;
+ int i, ret = 0;
+
+ /* Disable interrupt 0 so we don't try to handle the VFLR. */
+ i40e_irq_dynamic_disable_icr0(pf);
+
+ /* Check to see if we're just allocating resources for extant VFs */
+ if (pci_num_vf(pf->pdev) != num_alloc_vfs) {
+ ret = pci_enable_sriov(pf->pdev, num_alloc_vfs);
+ if (ret) {
+ pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
+ pf->num_alloc_vfs = 0;
+ goto err_iov;
+ }
+ }
+ /* allocate memory */
+ vfs = kcalloc(num_alloc_vfs, sizeof(struct i40e_vf), GFP_KERNEL);
+ if (!vfs) {
+ ret = -ENOMEM;
+ goto err_alloc;
+ }
+ pf->vf = vfs;
+
+ /* apply default profile */
+ for (i = 0; i < num_alloc_vfs; i++) {
+ vfs[i].pf = pf;
+ vfs[i].parent_type = I40E_SWITCH_ELEMENT_TYPE_VEB;
+ vfs[i].vf_id = i;
+
+ /* assign default capabilities */
+ set_bit(I40E_VIRTCHNL_VF_CAP_L2, &vfs[i].vf_caps);
+ vfs[i].spoofchk = true;
+
+ set_bit(I40E_VF_STATE_PRE_ENABLE, &vfs[i].vf_states);
+
+ }
+ pf->num_alloc_vfs = num_alloc_vfs;
+
+ /* VF resources get allocated during reset */
+ i40e_reset_all_vfs(pf, false);
+
+ i40e_notify_client_of_vf_enable(pf, num_alloc_vfs);
+
+err_alloc:
+ if (ret)
+ i40e_free_vfs(pf);
+err_iov:
+ /* Re-enable interrupt 0. */
+ i40e_irq_dynamic_enable_icr0(pf);
+ return ret;
+}
+
+#endif
+/**
+ * i40e_pci_sriov_enable
+ * @pdev: pointer to a pci_dev structure
+ * @num_vfs: number of VFs to allocate
+ *
+ * Enable or change the number of VFs
+ **/
+static int i40e_pci_sriov_enable(struct pci_dev *pdev, int num_vfs)
+{
+#ifdef CONFIG_PCI_IOV
+ struct i40e_pf *pf = pci_get_drvdata(pdev);
+ int pre_existing_vfs = pci_num_vf(pdev);
+ int err = 0;
+
+ if (test_bit(__I40E_TESTING, pf->state)) {
+ dev_warn(&pdev->dev,
+ "Cannot enable SR-IOV virtual functions while the device is undergoing diagnostic testing\n");
+ err = -EPERM;
+ goto err_out;
+ }
+
+ if (pre_existing_vfs && pre_existing_vfs != num_vfs)
+ i40e_free_vfs(pf);
+ else if (pre_existing_vfs && pre_existing_vfs == num_vfs)
+ goto out;
+
+ if (num_vfs > pf->num_req_vfs) {
+ dev_warn(&pdev->dev, "Unable to enable %d VFs. Limited to %d VFs due to device resource constraints.\n",
+ num_vfs, pf->num_req_vfs);
+ err = -EPERM;
+ goto err_out;
+ }
+
+ dev_info(&pdev->dev, "Allocating %d VFs.\n", num_vfs);
+ err = i40e_alloc_vfs(pf, num_vfs);
+ if (err) {
+ dev_warn(&pdev->dev, "Failed to enable SR-IOV: %d\n", err);
+ goto err_out;
+ }
+
+out:
+ return num_vfs;
+
+err_out:
+ return err;
+#endif
+ return 0;
+}
+
+/**
+ * i40e_pci_sriov_configure
+ * @pdev: pointer to a pci_dev structure
+ * @num_vfs: number of VFs to allocate
+ *
+ * Enable or change the number of VFs. Called when the user updates the number
+ * of VFs in sysfs.
+ **/
+int i40e_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
+{
+ struct i40e_pf *pf = pci_get_drvdata(pdev);
+ int ret = 0;
+
+ if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
+ dev_warn(&pdev->dev, "Unable to configure VFs, other operation is pending.\n");
+ return -EAGAIN;
+ }
+
+ if (num_vfs) {
+ if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) {
+ pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
+ i40e_do_reset_safe(pf, I40E_PF_RESET_AND_REBUILD_FLAG);
+ }
+ ret = i40e_pci_sriov_enable(pdev, num_vfs);
+ goto sriov_configure_out;
+ }
+
+ if (!pci_vfs_assigned(pf->pdev)) {
+ i40e_free_vfs(pf);
+ pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
+ i40e_do_reset_safe(pf, I40E_PF_RESET_AND_REBUILD_FLAG);
+ } else {
+ dev_warn(&pdev->dev, "Unable to free VFs because some are assigned to VMs.\n");
+ ret = -EINVAL;
+ goto sriov_configure_out;
+ }
+sriov_configure_out:
+ clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
+ return ret;
+}
+
+/***********************virtual channel routines******************/
+
+/**
+ * i40e_vc_send_msg_to_vf
+ * @vf: pointer to the VF info
+ * @v_opcode: virtual channel opcode
+ * @v_retval: virtual channel return value
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ *
+ * send msg to VF
+ **/
+static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode,
+ u32 v_retval, u8 *msg, u16 msglen)
+{
+ struct i40e_pf *pf;
+ struct i40e_hw *hw;
+ int abs_vf_id;
+ int aq_ret;
+
+ /* validate the request */
+ if (!vf || vf->vf_id >= vf->pf->num_alloc_vfs)
+ return -EINVAL;
+
+ pf = vf->pf;
+ hw = &pf->hw;
+ abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
+
+ aq_ret = i40e_aq_send_msg_to_vf(hw, abs_vf_id, v_opcode, v_retval,
+ msg, msglen, NULL);
+ if (aq_ret) {
+ dev_info(&pf->pdev->dev,
+ "Unable to send the message to VF %d aq_err %d\n",
+ vf->vf_id, pf->hw.aq.asq_last_status);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+/**
+ * i40e_vc_send_resp_to_vf
+ * @vf: pointer to the VF info
+ * @opcode: operation code
+ * @retval: return value
+ *
+ * send resp msg to VF
+ **/
+static int i40e_vc_send_resp_to_vf(struct i40e_vf *vf,
+ enum virtchnl_ops opcode,
+ int retval)
+{
+ return i40e_vc_send_msg_to_vf(vf, opcode, retval, NULL, 0);
+}
+
+/**
+ * i40e_sync_vf_state
+ * @vf: pointer to the VF info
+ * @state: VF state
+ *
+ * Called from a VF message to synchronize the service with a potential
+ * VF reset state
+ **/
+static bool i40e_sync_vf_state(struct i40e_vf *vf, enum i40e_vf_states state)
+{
+ int i;
+
+ /* When handling some messages, it needs VF state to be set.
+ * It is possible that this flag is cleared during VF reset,
+ * so there is a need to wait until the end of the reset to
+ * handle the request message correctly.
+ */
+ for (i = 0; i < I40E_VF_STATE_WAIT_COUNT; i++) {
+ if (test_bit(state, &vf->vf_states))
+ return true;
+ usleep_range(10000, 20000);
+ }
+
+ return test_bit(state, &vf->vf_states);
+}
+
+/**
+ * i40e_vc_get_version_msg
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * called from the VF to request the API version used by the PF
+ **/
+static int i40e_vc_get_version_msg(struct i40e_vf *vf, u8 *msg)
+{
+ struct virtchnl_version_info info = {
+ VIRTCHNL_VERSION_MAJOR, VIRTCHNL_VERSION_MINOR
+ };
+
+ vf->vf_ver = *(struct virtchnl_version_info *)msg;
+ /* VFs running the 1.0 API expect to get 1.0 back or they will cry. */
+ if (VF_IS_V10(&vf->vf_ver))
+ info.minor = VIRTCHNL_VERSION_MINOR_NO_VF_CAPS;
+ return i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_VERSION,
+ I40E_SUCCESS, (u8 *)&info,
+ sizeof(struct virtchnl_version_info));
+}
+
+/**
+ * i40e_del_qch - delete all the additional VSIs created as a part of ADq
+ * @vf: pointer to VF structure
+ **/
+static void i40e_del_qch(struct i40e_vf *vf)
+{
+ struct i40e_pf *pf = vf->pf;
+ int i;
+
+ /* first element in the array belongs to primary VF VSI and we shouldn't
+ * delete it. We should however delete the rest of the VSIs created
+ */
+ for (i = 1; i < vf->num_tc; i++) {
+ if (vf->ch[i].vsi_idx) {
+ i40e_vsi_release(pf->vsi[vf->ch[i].vsi_idx]);
+ vf->ch[i].vsi_idx = 0;
+ vf->ch[i].vsi_id = 0;
+ }
+ }
+}
+
+/**
+ * i40e_vc_get_max_frame_size
+ * @vf: pointer to the VF
+ *
+ * Max frame size is determined based on the current port's max frame size and
+ * whether a port VLAN is configured on this VF. The VF is not aware whether
+ * it's in a port VLAN so the PF needs to account for this in max frame size
+ * checks and sending the max frame size to the VF.
+ **/
+static u16 i40e_vc_get_max_frame_size(struct i40e_vf *vf)
+{
+ u16 max_frame_size = vf->pf->hw.phy.link_info.max_frame_size;
+
+ if (vf->port_vlan_id)
+ max_frame_size -= VLAN_HLEN;
+
+ return max_frame_size;
+}
+
+/**
+ * i40e_vc_get_vf_resources_msg
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * called from the VF to request its resources
+ **/
+static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg)
+{
+ struct virtchnl_vf_resource *vfres = NULL;
+ struct i40e_pf *pf = vf->pf;
+ struct i40e_vsi *vsi;
+ int num_vsis = 1;
+ int aq_ret = 0;
+ size_t len = 0;
+ int ret;
+
+ if (!i40e_sync_vf_state(vf, I40E_VF_STATE_INIT)) {
+ aq_ret = I40E_ERR_PARAM;
+ goto err;
+ }
+
+ len = struct_size(vfres, vsi_res, num_vsis);
+ vfres = kzalloc(len, GFP_KERNEL);
+ if (!vfres) {
+ aq_ret = I40E_ERR_NO_MEMORY;
+ len = 0;
+ goto err;
+ }
+ if (VF_IS_V11(&vf->vf_ver))
+ vf->driver_caps = *(u32 *)msg;
+ else
+ vf->driver_caps = VIRTCHNL_VF_OFFLOAD_L2 |
+ VIRTCHNL_VF_OFFLOAD_RSS_REG |
+ VIRTCHNL_VF_OFFLOAD_VLAN;
+
+ vfres->vf_cap_flags = VIRTCHNL_VF_OFFLOAD_L2;
+ vfres->vf_cap_flags |= VIRTCHNL_VF_CAP_ADV_LINK_SPEED;
+ vsi = pf->vsi[vf->lan_vsi_idx];
+ if (!vsi->info.pvid)
+ vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_VLAN;
+
+ if (i40e_vf_client_capable(pf, vf->vf_id) &&
+ (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_IWARP)) {
+ vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_IWARP;
+ set_bit(I40E_VF_STATE_IWARPENA, &vf->vf_states);
+ } else {
+ clear_bit(I40E_VF_STATE_IWARPENA, &vf->vf_states);
+ }
+
+ if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
+ vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_PF;
+ } else {
+ if ((pf->hw_features & I40E_HW_RSS_AQ_CAPABLE) &&
+ (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_AQ))
+ vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_AQ;
+ else
+ vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_REG;
+ }
+
+ if (pf->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE) {
+ if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2)
+ vfres->vf_cap_flags |=
+ VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2;
+ }
+
+ if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP)
+ vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP;
+
+ if ((pf->hw_features & I40E_HW_OUTER_UDP_CSUM_CAPABLE) &&
+ (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM))
+ vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM;
+
+ if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RX_POLLING) {
+ if (pf->flags & I40E_FLAG_MFP_ENABLED) {
+ dev_err(&pf->pdev->dev,
+ "VF %d requested polling mode: this feature is supported only when the device is running in single function per port (SFP) mode\n",
+ vf->vf_id);
+ aq_ret = I40E_ERR_PARAM;
+ goto err;
+ }
+ vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RX_POLLING;
+ }
+
+ if (pf->hw_features & I40E_HW_WB_ON_ITR_CAPABLE) {
+ if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR)
+ vfres->vf_cap_flags |=
+ VIRTCHNL_VF_OFFLOAD_WB_ON_ITR;
+ }
+
+ if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_REQ_QUEUES)
+ vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_REQ_QUEUES;
+
+ if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ADQ)
+ vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ADQ;
+
+ vfres->num_vsis = num_vsis;
+ vfres->num_queue_pairs = vf->num_queue_pairs;
+ vfres->max_vectors = pf->hw.func_caps.num_msix_vectors_vf;
+ vfres->rss_key_size = I40E_HKEY_ARRAY_SIZE;
+ vfres->rss_lut_size = I40E_VF_HLUT_ARRAY_SIZE;
+ vfres->max_mtu = i40e_vc_get_max_frame_size(vf);
+
+ if (vf->lan_vsi_idx) {
+ vfres->vsi_res[0].vsi_id = vf->lan_vsi_id;
+ vfres->vsi_res[0].vsi_type = VIRTCHNL_VSI_SRIOV;
+ vfres->vsi_res[0].num_queue_pairs = vsi->alloc_queue_pairs;
+ /* VFs only use TC 0 */
+ vfres->vsi_res[0].qset_handle
+ = le16_to_cpu(vsi->info.qs_handle[0]);
+ if (!(vf->driver_caps & VIRTCHNL_VF_OFFLOAD_USO) && !vf->pf_set_mac) {
+ i40e_del_mac_filter(vsi, vf->default_lan_addr.addr);
+ eth_zero_addr(vf->default_lan_addr.addr);
+ }
+ ether_addr_copy(vfres->vsi_res[0].default_mac_addr,
+ vf->default_lan_addr.addr);
+ }
+ set_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states);
+
+err:
+ /* send the response back to the VF */
+ ret = i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_VF_RESOURCES,
+ aq_ret, (u8 *)vfres, len);
+
+ kfree(vfres);
+ return ret;
+}
+
+/**
+ * i40e_vc_config_promiscuous_mode_msg
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * called from the VF to configure the promiscuous mode of
+ * VF vsis
+ **/
+static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf, u8 *msg)
+{
+ struct virtchnl_promisc_info *info =
+ (struct virtchnl_promisc_info *)msg;
+ struct i40e_pf *pf = vf->pf;
+ bool allmulti = false;
+ bool alluni = false;
+ int aq_ret = 0;
+
+ if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
+ aq_ret = I40E_ERR_PARAM;
+ goto err_out;
+ }
+ if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) {
+ dev_err(&pf->pdev->dev,
+ "Unprivileged VF %d is attempting to configure promiscuous mode\n",
+ vf->vf_id);
+
+ /* Lie to the VF on purpose, because this is an error we can
+ * ignore. Unprivileged VF is not a virtual channel error.
+ */
+ aq_ret = 0;
+ goto err_out;
+ }
+
+ if (info->flags > I40E_MAX_VF_PROMISC_FLAGS) {
+ aq_ret = I40E_ERR_PARAM;
+ goto err_out;
+ }
+
+ if (!i40e_vc_isvalid_vsi_id(vf, info->vsi_id)) {
+ aq_ret = I40E_ERR_PARAM;
+ goto err_out;
+ }
+
+ /* Multicast promiscuous handling*/
+ if (info->flags & FLAG_VF_MULTICAST_PROMISC)
+ allmulti = true;
+
+ if (info->flags & FLAG_VF_UNICAST_PROMISC)
+ alluni = true;
+ aq_ret = i40e_config_vf_promiscuous_mode(vf, info->vsi_id, allmulti,
+ alluni);
+ if (aq_ret)
+ goto err_out;
+
+ if (allmulti) {
+ if (!test_and_set_bit(I40E_VF_STATE_MC_PROMISC,
+ &vf->vf_states))
+ dev_info(&pf->pdev->dev,
+ "VF %d successfully set multicast promiscuous mode\n",
+ vf->vf_id);
+ } else if (test_and_clear_bit(I40E_VF_STATE_MC_PROMISC,
+ &vf->vf_states))
+ dev_info(&pf->pdev->dev,
+ "VF %d successfully unset multicast promiscuous mode\n",
+ vf->vf_id);
+
+ if (alluni) {
+ if (!test_and_set_bit(I40E_VF_STATE_UC_PROMISC,
+ &vf->vf_states))
+ dev_info(&pf->pdev->dev,
+ "VF %d successfully set unicast promiscuous mode\n",
+ vf->vf_id);
+ } else if (test_and_clear_bit(I40E_VF_STATE_UC_PROMISC,
+ &vf->vf_states))
+ dev_info(&pf->pdev->dev,
+ "VF %d successfully unset unicast promiscuous mode\n",
+ vf->vf_id);
+
+err_out:
+ /* send the response to the VF */
+ return i40e_vc_send_resp_to_vf(vf,
+ VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
+ aq_ret);
+}
+
+/**
+ * i40e_vc_config_queues_msg
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * called from the VF to configure the rx/tx
+ * queues
+ **/
+static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg)
+{
+ struct virtchnl_vsi_queue_config_info *qci =
+ (struct virtchnl_vsi_queue_config_info *)msg;
+ struct virtchnl_queue_pair_info *qpi;
+ u16 vsi_id, vsi_queue_id = 0;
+ struct i40e_pf *pf = vf->pf;
+ int i, j = 0, idx = 0;
+ struct i40e_vsi *vsi;
+ u16 num_qps_all = 0;
+ int aq_ret = 0;
+
+ if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
+ aq_ret = I40E_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (!i40e_vc_isvalid_vsi_id(vf, qci->vsi_id)) {
+ aq_ret = I40E_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (qci->num_queue_pairs > I40E_MAX_VF_QUEUES) {
+ aq_ret = I40E_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (vf->adq_enabled) {
+ for (i = 0; i < vf->num_tc; i++)
+ num_qps_all += vf->ch[i].num_qps;
+ if (num_qps_all != qci->num_queue_pairs) {
+ aq_ret = I40E_ERR_PARAM;
+ goto error_param;
+ }
+ }
+
+ vsi_id = qci->vsi_id;
+
+ for (i = 0; i < qci->num_queue_pairs; i++) {
+ qpi = &qci->qpair[i];
+
+ if (!vf->adq_enabled) {
+ if (!i40e_vc_isvalid_queue_id(vf, vsi_id,
+ qpi->txq.queue_id)) {
+ aq_ret = I40E_ERR_PARAM;
+ goto error_param;
+ }
+
+ vsi_queue_id = qpi->txq.queue_id;
+
+ if (qpi->txq.vsi_id != qci->vsi_id ||
+ qpi->rxq.vsi_id != qci->vsi_id ||
+ qpi->rxq.queue_id != vsi_queue_id) {
+ aq_ret = I40E_ERR_PARAM;
+ goto error_param;
+ }
+ }
+
+ if (vf->adq_enabled) {
+ if (idx >= ARRAY_SIZE(vf->ch)) {
+ aq_ret = I40E_ERR_NO_AVAILABLE_VSI;
+ goto error_param;
+ }
+ vsi_id = vf->ch[idx].vsi_id;
+ }
+
+ if (i40e_config_vsi_rx_queue(vf, vsi_id, vsi_queue_id,
+ &qpi->rxq) ||
+ i40e_config_vsi_tx_queue(vf, vsi_id, vsi_queue_id,
+ &qpi->txq)) {
+ aq_ret = I40E_ERR_PARAM;
+ goto error_param;
+ }
+
+ /* For ADq there can be up to 4 VSIs with max 4 queues each.
+ * VF does not know about these additional VSIs and all
+ * it cares is about its own queues. PF configures these queues
+ * to its appropriate VSIs based on TC mapping
+ */
+ if (vf->adq_enabled) {
+ if (idx >= ARRAY_SIZE(vf->ch)) {
+ aq_ret = I40E_ERR_NO_AVAILABLE_VSI;
+ goto error_param;
+ }
+ if (j == (vf->ch[idx].num_qps - 1)) {
+ idx++;
+ j = 0; /* resetting the queue count */
+ vsi_queue_id = 0;
+ } else {
+ j++;
+ vsi_queue_id++;
+ }
+ }
+ }
+ /* set vsi num_queue_pairs in use to num configured by VF */
+ if (!vf->adq_enabled) {
+ pf->vsi[vf->lan_vsi_idx]->num_queue_pairs =
+ qci->num_queue_pairs;
+ } else {
+ for (i = 0; i < vf->num_tc; i++) {
+ vsi = pf->vsi[vf->ch[i].vsi_idx];
+ vsi->num_queue_pairs = vf->ch[i].num_qps;
+
+ if (i40e_update_adq_vsi_queues(vsi, i)) {
+ aq_ret = I40E_ERR_CONFIG;
+ goto error_param;
+ }
+ }
+ }
+
+error_param:
+ /* send the response to the VF */
+ return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES,
+ aq_ret);
+}
+
+/**
+ * i40e_validate_queue_map - check queue map is valid
+ * @vf: the VF structure pointer
+ * @vsi_id: vsi id
+ * @queuemap: Tx or Rx queue map
+ *
+ * check if Tx or Rx queue map is valid
+ **/
+static int i40e_validate_queue_map(struct i40e_vf *vf, u16 vsi_id,
+ unsigned long queuemap)
+{
+ u16 vsi_queue_id, queue_id;
+
+ for_each_set_bit(vsi_queue_id, &queuemap, I40E_MAX_VSI_QP) {
+ if (vf->adq_enabled) {
+ vsi_id = vf->ch[vsi_queue_id / I40E_MAX_VF_VSI].vsi_id;
+ queue_id = (vsi_queue_id % I40E_DEFAULT_QUEUES_PER_VF);
+ } else {
+ queue_id = vsi_queue_id;
+ }
+
+ if (!i40e_vc_isvalid_queue_id(vf, vsi_id, queue_id))
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * i40e_vc_config_irq_map_msg
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * called from the VF to configure the irq to
+ * queue map
+ **/
+static int i40e_vc_config_irq_map_msg(struct i40e_vf *vf, u8 *msg)
+{
+ struct virtchnl_irq_map_info *irqmap_info =
+ (struct virtchnl_irq_map_info *)msg;
+ struct virtchnl_vector_map *map;
+ int aq_ret = 0;
+ u16 vsi_id;
+ int i;
+
+ if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
+ aq_ret = I40E_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (irqmap_info->num_vectors >
+ vf->pf->hw.func_caps.num_msix_vectors_vf) {
+ aq_ret = I40E_ERR_PARAM;
+ goto error_param;
+ }
+
+ for (i = 0; i < irqmap_info->num_vectors; i++) {
+ map = &irqmap_info->vecmap[i];
+ /* validate msg params */
+ if (!i40e_vc_isvalid_vector_id(vf, map->vector_id) ||
+ !i40e_vc_isvalid_vsi_id(vf, map->vsi_id)) {
+ aq_ret = I40E_ERR_PARAM;
+ goto error_param;
+ }
+ vsi_id = map->vsi_id;
+
+ if (i40e_validate_queue_map(vf, vsi_id, map->rxq_map)) {
+ aq_ret = I40E_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (i40e_validate_queue_map(vf, vsi_id, map->txq_map)) {
+ aq_ret = I40E_ERR_PARAM;
+ goto error_param;
+ }
+
+ i40e_config_irq_link_list(vf, vsi_id, map);
+ }
+error_param:
+ /* send the response to the VF */
+ return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_IRQ_MAP,
+ aq_ret);
+}
+
+/**
+ * i40e_ctrl_vf_tx_rings
+ * @vsi: the SRIOV VSI being configured
+ * @q_map: bit map of the queues to be enabled
+ * @enable: start or stop the queue
+ **/
+static int i40e_ctrl_vf_tx_rings(struct i40e_vsi *vsi, unsigned long q_map,
+ bool enable)
+{
+ struct i40e_pf *pf = vsi->back;
+ int ret = 0;
+ u16 q_id;
+
+ for_each_set_bit(q_id, &q_map, I40E_MAX_VF_QUEUES) {
+ ret = i40e_control_wait_tx_q(vsi->seid, pf,
+ vsi->base_queue + q_id,
+ false /*is xdp*/, enable);
+ if (ret)
+ break;
+ }
+ return ret;
+}
+
+/**
+ * i40e_ctrl_vf_rx_rings
+ * @vsi: the SRIOV VSI being configured
+ * @q_map: bit map of the queues to be enabled
+ * @enable: start or stop the queue
+ **/
+static int i40e_ctrl_vf_rx_rings(struct i40e_vsi *vsi, unsigned long q_map,
+ bool enable)
+{
+ struct i40e_pf *pf = vsi->back;
+ int ret = 0;
+ u16 q_id;
+
+ for_each_set_bit(q_id, &q_map, I40E_MAX_VF_QUEUES) {
+ ret = i40e_control_wait_rx_q(pf, vsi->base_queue + q_id,
+ enable);
+ if (ret)
+ break;
+ }
+ return ret;
+}
+
+/**
+ * i40e_vc_validate_vqs_bitmaps - validate Rx/Tx queue bitmaps from VIRTHCHNL
+ * @vqs: virtchnl_queue_select structure containing bitmaps to validate
+ *
+ * Returns true if validation was successful, else false.
+ */
+static bool i40e_vc_validate_vqs_bitmaps(struct virtchnl_queue_select *vqs)
+{
+ if ((!vqs->rx_queues && !vqs->tx_queues) ||
+ vqs->rx_queues >= BIT(I40E_MAX_VF_QUEUES) ||
+ vqs->tx_queues >= BIT(I40E_MAX_VF_QUEUES))
+ return false;
+
+ return true;
+}
+
+/**
+ * i40e_vc_enable_queues_msg
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * called from the VF to enable all or specific queue(s)
+ **/
+static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg)
+{
+ struct virtchnl_queue_select *vqs =
+ (struct virtchnl_queue_select *)msg;
+ struct i40e_pf *pf = vf->pf;
+ int aq_ret = 0;
+ int i;
+
+ if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
+ aq_ret = I40E_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
+ aq_ret = I40E_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (!i40e_vc_validate_vqs_bitmaps(vqs)) {
+ aq_ret = I40E_ERR_PARAM;
+ goto error_param;
+ }
+
+ /* Use the queue bit map sent by the VF */
+ if (i40e_ctrl_vf_rx_rings(pf->vsi[vf->lan_vsi_idx], vqs->rx_queues,
+ true)) {
+ aq_ret = I40E_ERR_TIMEOUT;
+ goto error_param;
+ }
+ if (i40e_ctrl_vf_tx_rings(pf->vsi[vf->lan_vsi_idx], vqs->tx_queues,
+ true)) {
+ aq_ret = I40E_ERR_TIMEOUT;
+ goto error_param;
+ }
+
+ /* need to start the rings for additional ADq VSI's as well */
+ if (vf->adq_enabled) {
+ /* zero belongs to LAN VSI */
+ for (i = 1; i < vf->num_tc; i++) {
+ if (i40e_vsi_start_rings(pf->vsi[vf->ch[i].vsi_idx]))
+ aq_ret = I40E_ERR_TIMEOUT;
+ }
+ }
+
+error_param:
+ /* send the response to the VF */
+ return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ENABLE_QUEUES,
+ aq_ret);
+}
+
+/**
+ * i40e_vc_disable_queues_msg
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * called from the VF to disable all or specific
+ * queue(s)
+ **/
+static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg)
+{
+ struct virtchnl_queue_select *vqs =
+ (struct virtchnl_queue_select *)msg;
+ struct i40e_pf *pf = vf->pf;
+ int aq_ret = 0;
+
+ if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
+ aq_ret = I40E_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
+ aq_ret = I40E_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (!i40e_vc_validate_vqs_bitmaps(vqs)) {
+ aq_ret = I40E_ERR_PARAM;
+ goto error_param;
+ }
+
+ /* Use the queue bit map sent by the VF */
+ if (i40e_ctrl_vf_tx_rings(pf->vsi[vf->lan_vsi_idx], vqs->tx_queues,
+ false)) {
+ aq_ret = I40E_ERR_TIMEOUT;
+ goto error_param;
+ }
+ if (i40e_ctrl_vf_rx_rings(pf->vsi[vf->lan_vsi_idx], vqs->rx_queues,
+ false)) {
+ aq_ret = I40E_ERR_TIMEOUT;
+ goto error_param;
+ }
+error_param:
+ /* send the response to the VF */
+ return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DISABLE_QUEUES,
+ aq_ret);
+}
+
+/**
+ * i40e_check_enough_queue - find big enough queue number
+ * @vf: pointer to the VF info
+ * @needed: the number of items needed
+ *
+ * Returns the base item index of the queue, or negative for error
+ **/
+static int i40e_check_enough_queue(struct i40e_vf *vf, u16 needed)
+{
+ unsigned int i, cur_queues, more, pool_size;
+ struct i40e_lump_tracking *pile;
+ struct i40e_pf *pf = vf->pf;
+ struct i40e_vsi *vsi;
+
+ vsi = pf->vsi[vf->lan_vsi_idx];
+ cur_queues = vsi->alloc_queue_pairs;
+
+ /* if current allocated queues are enough for need */
+ if (cur_queues >= needed)
+ return vsi->base_queue;
+
+ pile = pf->qp_pile;
+ if (cur_queues > 0) {
+ /* if the allocated queues are not zero
+ * just check if there are enough queues for more
+ * behind the allocated queues.
+ */
+ more = needed - cur_queues;
+ for (i = vsi->base_queue + cur_queues;
+ i < pile->num_entries; i++) {
+ if (pile->list[i] & I40E_PILE_VALID_BIT)
+ break;
+
+ if (more-- == 1)
+ /* there is enough */
+ return vsi->base_queue;
+ }
+ }
+
+ pool_size = 0;
+ for (i = 0; i < pile->num_entries; i++) {
+ if (pile->list[i] & I40E_PILE_VALID_BIT) {
+ pool_size = 0;
+ continue;
+ }
+ if (needed <= ++pool_size)
+ /* there is enough */
+ return i;
+ }
+
+ return -ENOMEM;
+}
+
+/**
+ * i40e_vc_request_queues_msg
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * VFs get a default number of queues but can use this message to request a
+ * different number. If the request is successful, PF will reset the VF and
+ * return 0. If unsuccessful, PF will send message informing VF of number of
+ * available queues and return result of sending VF a message.
+ **/
+static int i40e_vc_request_queues_msg(struct i40e_vf *vf, u8 *msg)
+{
+ struct virtchnl_vf_res_request *vfres =
+ (struct virtchnl_vf_res_request *)msg;
+ u16 req_pairs = vfres->num_queue_pairs;
+ u8 cur_pairs = vf->num_queue_pairs;
+ struct i40e_pf *pf = vf->pf;
+
+ if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE))
+ return -EINVAL;
+
+ if (req_pairs > I40E_MAX_VF_QUEUES) {
+ dev_err(&pf->pdev->dev,
+ "VF %d tried to request more than %d queues.\n",
+ vf->vf_id,
+ I40E_MAX_VF_QUEUES);
+ vfres->num_queue_pairs = I40E_MAX_VF_QUEUES;
+ } else if (req_pairs - cur_pairs > pf->queues_left) {
+ dev_warn(&pf->pdev->dev,
+ "VF %d requested %d more queues, but only %d left.\n",
+ vf->vf_id,
+ req_pairs - cur_pairs,
+ pf->queues_left);
+ vfres->num_queue_pairs = pf->queues_left + cur_pairs;
+ } else if (i40e_check_enough_queue(vf, req_pairs) < 0) {
+ dev_warn(&pf->pdev->dev,
+ "VF %d requested %d more queues, but there is not enough for it.\n",
+ vf->vf_id,
+ req_pairs - cur_pairs);
+ vfres->num_queue_pairs = cur_pairs;
+ } else {
+ /* successful request */
+ vf->num_req_queues = req_pairs;
+ i40e_vc_reset_vf(vf, true);
+ return 0;
+ }
+
+ return i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_REQUEST_QUEUES, 0,
+ (u8 *)vfres, sizeof(*vfres));
+}
+
+/**
+ * i40e_vc_get_stats_msg
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * called from the VF to get vsi stats
+ **/
+static int i40e_vc_get_stats_msg(struct i40e_vf *vf, u8 *msg)
+{
+ struct virtchnl_queue_select *vqs =
+ (struct virtchnl_queue_select *)msg;
+ struct i40e_pf *pf = vf->pf;
+ struct i40e_eth_stats stats;
+ int aq_ret = 0;
+ struct i40e_vsi *vsi;
+
+ memset(&stats, 0, sizeof(struct i40e_eth_stats));
+
+ if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
+ aq_ret = I40E_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
+ aq_ret = I40E_ERR_PARAM;
+ goto error_param;
+ }
+
+ vsi = pf->vsi[vf->lan_vsi_idx];
+ if (!vsi) {
+ aq_ret = I40E_ERR_PARAM;
+ goto error_param;
+ }
+ i40e_update_eth_stats(vsi);
+ stats = vsi->eth_stats;
+
+error_param:
+ /* send the response back to the VF */
+ return i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_STATS, aq_ret,
+ (u8 *)&stats, sizeof(stats));
+}
+
+#define I40E_MAX_MACVLAN_PER_HW 3072
+#define I40E_MAX_MACVLAN_PER_PF(num_ports) (I40E_MAX_MACVLAN_PER_HW / \
+ (num_ports))
+/* If the VF is not trusted restrict the number of MAC/VLAN it can program
+ * MAC filters: 16 for multicast, 1 for MAC, 1 for broadcast
+ */
+#define I40E_VC_MAX_MAC_ADDR_PER_VF (16 + 1 + 1)
+#define I40E_VC_MAX_VLAN_PER_VF 16
+
+#define I40E_VC_MAX_MACVLAN_PER_TRUSTED_VF(vf_num, num_ports) \
+({ typeof(vf_num) vf_num_ = (vf_num); \
+ typeof(num_ports) num_ports_ = (num_ports); \
+ ((I40E_MAX_MACVLAN_PER_PF(num_ports_) - vf_num_ * \
+ I40E_VC_MAX_MAC_ADDR_PER_VF) / vf_num_) + \
+ I40E_VC_MAX_MAC_ADDR_PER_VF; })
+/**
+ * i40e_check_vf_permission
+ * @vf: pointer to the VF info
+ * @al: MAC address list from virtchnl
+ *
+ * Check that the given list of MAC addresses is allowed. Will return -EPERM
+ * if any address in the list is not valid. Checks the following conditions:
+ *
+ * 1) broadcast and zero addresses are never valid
+ * 2) unicast addresses are not allowed if the VMM has administratively set
+ * the VF MAC address, unless the VF is marked as privileged.
+ * 3) There is enough space to add all the addresses.
+ *
+ * Note that to guarantee consistency, it is expected this function be called
+ * while holding the mac_filter_hash_lock, as otherwise the current number of
+ * addresses might not be accurate.
+ **/
+static inline int i40e_check_vf_permission(struct i40e_vf *vf,
+ struct virtchnl_ether_addr_list *al)
+{
+ struct i40e_pf *pf = vf->pf;
+ struct i40e_vsi *vsi = pf->vsi[vf->lan_vsi_idx];
+ struct i40e_hw *hw = &pf->hw;
+ int mac2add_cnt = 0;
+ int i;
+
+ for (i = 0; i < al->num_elements; i++) {
+ struct i40e_mac_filter *f;
+ u8 *addr = al->list[i].addr;
+
+ if (is_broadcast_ether_addr(addr) ||
+ is_zero_ether_addr(addr)) {
+ dev_err(&pf->pdev->dev, "invalid VF MAC addr %pM\n",
+ addr);
+ return I40E_ERR_INVALID_MAC_ADDR;
+ }
+
+ /* If the host VMM administrator has set the VF MAC address
+ * administratively via the ndo_set_vf_mac command then deny
+ * permission to the VF to add or delete unicast MAC addresses.
+ * Unless the VF is privileged and then it can do whatever.
+ * The VF may request to set the MAC address filter already
+ * assigned to it so do not return an error in that case.
+ */
+ if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) &&
+ !is_multicast_ether_addr(addr) && vf->pf_set_mac &&
+ !ether_addr_equal(addr, vf->default_lan_addr.addr)) {
+ dev_err(&pf->pdev->dev,
+ "VF attempting to override administratively set MAC address, bring down and up the VF interface to resume normal operation\n");
+ return -EPERM;
+ }
+
+ /*count filters that really will be added*/
+ f = i40e_find_mac(vsi, addr);
+ if (!f)
+ ++mac2add_cnt;
+ }
+
+ /* If this VF is not privileged, then we can't add more than a limited
+ * number of addresses. Check to make sure that the additions do not
+ * push us over the limit.
+ */
+ if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) {
+ if ((i40e_count_filters(vsi) + mac2add_cnt) >
+ I40E_VC_MAX_MAC_ADDR_PER_VF) {
+ dev_err(&pf->pdev->dev,
+ "Cannot add more MAC addresses, VF is not trusted, switch the VF to trusted to add more functionality\n");
+ return -EPERM;
+ }
+ /* If this VF is trusted, it can use more resources than untrusted.
+ * However to ensure that every trusted VF has appropriate number of
+ * resources, divide whole pool of resources per port and then across
+ * all VFs.
+ */
+ } else {
+ if ((i40e_count_filters(vsi) + mac2add_cnt) >
+ I40E_VC_MAX_MACVLAN_PER_TRUSTED_VF(pf->num_alloc_vfs,
+ hw->num_ports)) {
+ dev_err(&pf->pdev->dev,
+ "Cannot add more MAC addresses, trusted VF exhausted it's resources\n");
+ return -EPERM;
+ }
+ }
+ return 0;
+}
+
+/**
+ * i40e_vc_add_mac_addr_msg
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * add guest mac address filter
+ **/
+static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg)
+{
+ struct virtchnl_ether_addr_list *al =
+ (struct virtchnl_ether_addr_list *)msg;
+ struct i40e_pf *pf = vf->pf;
+ struct i40e_vsi *vsi = NULL;
+ int ret = 0;
+ int i;
+
+ if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) ||
+ !i40e_vc_isvalid_vsi_id(vf, al->vsi_id)) {
+ ret = I40E_ERR_PARAM;
+ goto error_param;
+ }
+
+ vsi = pf->vsi[vf->lan_vsi_idx];
+
+ /* Lock once, because all function inside for loop accesses VSI's
+ * MAC filter list which needs to be protected using same lock.
+ */
+ spin_lock_bh(&vsi->mac_filter_hash_lock);
+
+ ret = i40e_check_vf_permission(vf, al);
+ if (ret) {
+ spin_unlock_bh(&vsi->mac_filter_hash_lock);
+ goto error_param;
+ }
+
+ /* add new addresses to the list */
+ for (i = 0; i < al->num_elements; i++) {
+ struct i40e_mac_filter *f;
+
+ f = i40e_find_mac(vsi, al->list[i].addr);
+ if (!f) {
+ f = i40e_add_mac_filter(vsi, al->list[i].addr);
+
+ if (!f) {
+ dev_err(&pf->pdev->dev,
+ "Unable to add MAC filter %pM for VF %d\n",
+ al->list[i].addr, vf->vf_id);
+ ret = I40E_ERR_PARAM;
+ spin_unlock_bh(&vsi->mac_filter_hash_lock);
+ goto error_param;
+ }
+ if (is_valid_ether_addr(al->list[i].addr) &&
+ is_zero_ether_addr(vf->default_lan_addr.addr))
+ ether_addr_copy(vf->default_lan_addr.addr,
+ al->list[i].addr);
+ }
+ }
+ spin_unlock_bh(&vsi->mac_filter_hash_lock);
+
+ /* program the updated filter list */
+ ret = i40e_sync_vsi_filters(vsi);
+ if (ret)
+ dev_err(&pf->pdev->dev, "Unable to program VF %d MAC filters, error %d\n",
+ vf->vf_id, ret);
+
+error_param:
+ /* send the response to the VF */
+ return i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_ETH_ADDR,
+ ret, NULL, 0);
+}
+
+/**
+ * i40e_vc_del_mac_addr_msg
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * remove guest mac address filter
+ **/
+static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg)
+{
+ struct virtchnl_ether_addr_list *al =
+ (struct virtchnl_ether_addr_list *)msg;
+ bool was_unimac_deleted = false;
+ struct i40e_pf *pf = vf->pf;
+ struct i40e_vsi *vsi = NULL;
+ int ret = 0;
+ int i;
+
+ if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) ||
+ !i40e_vc_isvalid_vsi_id(vf, al->vsi_id)) {
+ ret = I40E_ERR_PARAM;
+ goto error_param;
+ }
+
+ for (i = 0; i < al->num_elements; i++) {
+ if (is_broadcast_ether_addr(al->list[i].addr) ||
+ is_zero_ether_addr(al->list[i].addr)) {
+ dev_err(&pf->pdev->dev, "Invalid MAC addr %pM for VF %d\n",
+ al->list[i].addr, vf->vf_id);
+ ret = I40E_ERR_INVALID_MAC_ADDR;
+ goto error_param;
+ }
+ if (ether_addr_equal(al->list[i].addr, vf->default_lan_addr.addr))
+ was_unimac_deleted = true;
+ }
+ vsi = pf->vsi[vf->lan_vsi_idx];
+
+ spin_lock_bh(&vsi->mac_filter_hash_lock);
+ /* delete addresses from the list */
+ for (i = 0; i < al->num_elements; i++)
+ if (i40e_del_mac_filter(vsi, al->list[i].addr)) {
+ ret = I40E_ERR_INVALID_MAC_ADDR;
+ spin_unlock_bh(&vsi->mac_filter_hash_lock);
+ goto error_param;
+ }
+
+ spin_unlock_bh(&vsi->mac_filter_hash_lock);
+
+ /* program the updated filter list */
+ ret = i40e_sync_vsi_filters(vsi);
+ if (ret)
+ dev_err(&pf->pdev->dev, "Unable to program VF %d MAC filters, error %d\n",
+ vf->vf_id, ret);
+
+ if (vf->trusted && was_unimac_deleted) {
+ struct i40e_mac_filter *f;
+ struct hlist_node *h;
+ u8 *macaddr = NULL;
+ int bkt;
+
+ /* set last unicast mac address as default */
+ spin_lock_bh(&vsi->mac_filter_hash_lock);
+ hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
+ if (is_valid_ether_addr(f->macaddr))
+ macaddr = f->macaddr;
+ }
+ if (macaddr)
+ ether_addr_copy(vf->default_lan_addr.addr, macaddr);
+ spin_unlock_bh(&vsi->mac_filter_hash_lock);
+ }
+error_param:
+ /* send the response to the VF */
+ return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DEL_ETH_ADDR, ret);
+}
+
+/**
+ * i40e_vc_add_vlan_msg
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * program guest vlan id
+ **/
+static int i40e_vc_add_vlan_msg(struct i40e_vf *vf, u8 *msg)
+{
+ struct virtchnl_vlan_filter_list *vfl =
+ (struct virtchnl_vlan_filter_list *)msg;
+ struct i40e_pf *pf = vf->pf;
+ struct i40e_vsi *vsi = NULL;
+ int aq_ret = 0;
+ int i;
+
+ if ((vf->num_vlan >= I40E_VC_MAX_VLAN_PER_VF) &&
+ !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) {
+ dev_err(&pf->pdev->dev,
+ "VF is not trusted, switch the VF to trusted to add more VLAN addresses\n");
+ goto error_param;
+ }
+ if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
+ !i40e_vc_isvalid_vsi_id(vf, vfl->vsi_id)) {
+ aq_ret = I40E_ERR_PARAM;
+ goto error_param;
+ }
+
+ for (i = 0; i < vfl->num_elements; i++) {
+ if (vfl->vlan_id[i] > I40E_MAX_VLANID) {
+ aq_ret = I40E_ERR_PARAM;
+ dev_err(&pf->pdev->dev,
+ "invalid VF VLAN id %d\n", vfl->vlan_id[i]);
+ goto error_param;
+ }
+ }
+ vsi = pf->vsi[vf->lan_vsi_idx];
+ if (vsi->info.pvid) {
+ aq_ret = I40E_ERR_PARAM;
+ goto error_param;
+ }
+
+ i40e_vlan_stripping_enable(vsi);
+ for (i = 0; i < vfl->num_elements; i++) {
+ /* add new VLAN filter */
+ int ret = i40e_vsi_add_vlan(vsi, vfl->vlan_id[i]);
+ if (!ret)
+ vf->num_vlan++;
+
+ if (test_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states))
+ i40e_aq_set_vsi_uc_promisc_on_vlan(&pf->hw, vsi->seid,
+ true,
+ vfl->vlan_id[i],
+ NULL);
+ if (test_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states))
+ i40e_aq_set_vsi_mc_promisc_on_vlan(&pf->hw, vsi->seid,
+ true,
+ vfl->vlan_id[i],
+ NULL);
+
+ if (ret)
+ dev_err(&pf->pdev->dev,
+ "Unable to add VLAN filter %d for VF %d, error %d\n",
+ vfl->vlan_id[i], vf->vf_id, ret);
+ }
+
+error_param:
+ /* send the response to the VF */
+ return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ADD_VLAN, aq_ret);
+}
+
+/**
+ * i40e_vc_remove_vlan_msg
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * remove programmed guest vlan id
+ **/
+static int i40e_vc_remove_vlan_msg(struct i40e_vf *vf, u8 *msg)
+{
+ struct virtchnl_vlan_filter_list *vfl =
+ (struct virtchnl_vlan_filter_list *)msg;
+ struct i40e_pf *pf = vf->pf;
+ struct i40e_vsi *vsi = NULL;
+ int aq_ret = 0;
+ int i;
+
+ if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) ||
+ !i40e_vc_isvalid_vsi_id(vf, vfl->vsi_id)) {
+ aq_ret = I40E_ERR_PARAM;
+ goto error_param;
+ }
+
+ for (i = 0; i < vfl->num_elements; i++) {
+ if (vfl->vlan_id[i] > I40E_MAX_VLANID) {
+ aq_ret = I40E_ERR_PARAM;
+ goto error_param;
+ }
+ }
+
+ vsi = pf->vsi[vf->lan_vsi_idx];
+ if (vsi->info.pvid) {
+ if (vfl->num_elements > 1 || vfl->vlan_id[0])
+ aq_ret = I40E_ERR_PARAM;
+ goto error_param;
+ }
+
+ for (i = 0; i < vfl->num_elements; i++) {
+ i40e_vsi_kill_vlan(vsi, vfl->vlan_id[i]);
+ vf->num_vlan--;
+
+ if (test_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states))
+ i40e_aq_set_vsi_uc_promisc_on_vlan(&pf->hw, vsi->seid,
+ false,
+ vfl->vlan_id[i],
+ NULL);
+ if (test_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states))
+ i40e_aq_set_vsi_mc_promisc_on_vlan(&pf->hw, vsi->seid,
+ false,
+ vfl->vlan_id[i],
+ NULL);
+ }
+
+error_param:
+ /* send the response to the VF */
+ return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DEL_VLAN, aq_ret);
+}
+
+/**
+ * i40e_vc_iwarp_msg
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ *
+ * called from the VF for the iwarp msgs
+ **/
+static int i40e_vc_iwarp_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
+{
+ struct i40e_pf *pf = vf->pf;
+ int abs_vf_id = vf->vf_id + pf->hw.func_caps.vf_base_id;
+ int aq_ret = 0;
+
+ if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
+ !test_bit(I40E_VF_STATE_IWARPENA, &vf->vf_states)) {
+ aq_ret = I40E_ERR_PARAM;
+ goto error_param;
+ }
+
+ i40e_notify_client_of_vf_msg(pf->vsi[pf->lan_vsi], abs_vf_id,
+ msg, msglen);
+
+error_param:
+ /* send the response to the VF */
+ return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_IWARP,
+ aq_ret);
+}
+
+/**
+ * i40e_vc_iwarp_qvmap_msg
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ * @config: config qvmap or release it
+ *
+ * called from the VF for the iwarp msgs
+ **/
+static int i40e_vc_iwarp_qvmap_msg(struct i40e_vf *vf, u8 *msg, bool config)
+{
+ struct virtchnl_iwarp_qvlist_info *qvlist_info =
+ (struct virtchnl_iwarp_qvlist_info *)msg;
+ int aq_ret = 0;
+
+ if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
+ !test_bit(I40E_VF_STATE_IWARPENA, &vf->vf_states)) {
+ aq_ret = I40E_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (config) {
+ if (i40e_config_iwarp_qvlist(vf, qvlist_info))
+ aq_ret = I40E_ERR_PARAM;
+ } else {
+ i40e_release_iwarp_qvlist(vf);
+ }
+
+error_param:
+ /* send the response to the VF */
+ return i40e_vc_send_resp_to_vf(vf,
+ config ? VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP :
+ VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP,
+ aq_ret);
+}
+
+/**
+ * i40e_vc_config_rss_key
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * Configure the VF's RSS key
+ **/
+static int i40e_vc_config_rss_key(struct i40e_vf *vf, u8 *msg)
+{
+ struct virtchnl_rss_key *vrk =
+ (struct virtchnl_rss_key *)msg;
+ struct i40e_pf *pf = vf->pf;
+ struct i40e_vsi *vsi = NULL;
+ int aq_ret = 0;
+
+ if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) ||
+ !i40e_vc_isvalid_vsi_id(vf, vrk->vsi_id) ||
+ vrk->key_len != I40E_HKEY_ARRAY_SIZE) {
+ aq_ret = I40E_ERR_PARAM;
+ goto err;
+ }
+
+ vsi = pf->vsi[vf->lan_vsi_idx];
+ aq_ret = i40e_config_rss(vsi, vrk->key, NULL, 0);
+err:
+ /* send the response to the VF */
+ return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_KEY,
+ aq_ret);
+}
+
+/**
+ * i40e_vc_config_rss_lut
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * Configure the VF's RSS LUT
+ **/
+static int i40e_vc_config_rss_lut(struct i40e_vf *vf, u8 *msg)
+{
+ struct virtchnl_rss_lut *vrl =
+ (struct virtchnl_rss_lut *)msg;
+ struct i40e_pf *pf = vf->pf;
+ struct i40e_vsi *vsi = NULL;
+ int aq_ret = 0;
+ u16 i;
+
+ if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) ||
+ !i40e_vc_isvalid_vsi_id(vf, vrl->vsi_id) ||
+ vrl->lut_entries != I40E_VF_HLUT_ARRAY_SIZE) {
+ aq_ret = I40E_ERR_PARAM;
+ goto err;
+ }
+
+ for (i = 0; i < vrl->lut_entries; i++)
+ if (vrl->lut[i] >= vf->num_queue_pairs) {
+ aq_ret = I40E_ERR_PARAM;
+ goto err;
+ }
+
+ vsi = pf->vsi[vf->lan_vsi_idx];
+ aq_ret = i40e_config_rss(vsi, NULL, vrl->lut, I40E_VF_HLUT_ARRAY_SIZE);
+ /* send the response to the VF */
+err:
+ return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_LUT,
+ aq_ret);
+}
+
+/**
+ * i40e_vc_get_rss_hena
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * Return the RSS HENA bits allowed by the hardware
+ **/
+static int i40e_vc_get_rss_hena(struct i40e_vf *vf, u8 *msg)
+{
+ struct virtchnl_rss_hena *vrh = NULL;
+ struct i40e_pf *pf = vf->pf;
+ int aq_ret = 0;
+ int len = 0;
+
+ if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
+ aq_ret = I40E_ERR_PARAM;
+ goto err;
+ }
+ len = sizeof(struct virtchnl_rss_hena);
+
+ vrh = kzalloc(len, GFP_KERNEL);
+ if (!vrh) {
+ aq_ret = I40E_ERR_NO_MEMORY;
+ len = 0;
+ goto err;
+ }
+ vrh->hena = i40e_pf_get_default_rss_hena(pf);
+err:
+ /* send the response back to the VF */
+ aq_ret = i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_RSS_HENA_CAPS,
+ aq_ret, (u8 *)vrh, len);
+ kfree(vrh);
+ return aq_ret;
+}
+
+/**
+ * i40e_vc_set_rss_hena
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * Set the RSS HENA bits for the VF
+ **/
+static int i40e_vc_set_rss_hena(struct i40e_vf *vf, u8 *msg)
+{
+ struct virtchnl_rss_hena *vrh =
+ (struct virtchnl_rss_hena *)msg;
+ struct i40e_pf *pf = vf->pf;
+ struct i40e_hw *hw = &pf->hw;
+ int aq_ret = 0;
+
+ if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
+ aq_ret = I40E_ERR_PARAM;
+ goto err;
+ }
+ i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(0, vf->vf_id), (u32)vrh->hena);
+ i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(1, vf->vf_id),
+ (u32)(vrh->hena >> 32));
+
+ /* send the response to the VF */
+err:
+ return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_SET_RSS_HENA, aq_ret);
+}
+
+/**
+ * i40e_vc_enable_vlan_stripping
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * Enable vlan header stripping for the VF
+ **/
+static int i40e_vc_enable_vlan_stripping(struct i40e_vf *vf, u8 *msg)
+{
+ struct i40e_vsi *vsi;
+ int aq_ret = 0;
+
+ if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
+ aq_ret = I40E_ERR_PARAM;
+ goto err;
+ }
+
+ vsi = vf->pf->vsi[vf->lan_vsi_idx];
+ i40e_vlan_stripping_enable(vsi);
+
+ /* send the response to the VF */
+err:
+ return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING,
+ aq_ret);
+}
+
+/**
+ * i40e_vc_disable_vlan_stripping
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * Disable vlan header stripping for the VF
+ **/
+static int i40e_vc_disable_vlan_stripping(struct i40e_vf *vf, u8 *msg)
+{
+ struct i40e_vsi *vsi;
+ int aq_ret = 0;
+
+ if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
+ aq_ret = I40E_ERR_PARAM;
+ goto err;
+ }
+
+ vsi = vf->pf->vsi[vf->lan_vsi_idx];
+ i40e_vlan_stripping_disable(vsi);
+
+ /* send the response to the VF */
+err:
+ return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING,
+ aq_ret);
+}
+
+/**
+ * i40e_validate_cloud_filter
+ * @vf: pointer to VF structure
+ * @tc_filter: pointer to filter requested
+ *
+ * This function validates cloud filter programmed as TC filter for ADq
+ **/
+static int i40e_validate_cloud_filter(struct i40e_vf *vf,
+ struct virtchnl_filter *tc_filter)
+{
+ struct virtchnl_l4_spec mask = tc_filter->mask.tcp_spec;
+ struct virtchnl_l4_spec data = tc_filter->data.tcp_spec;
+ struct i40e_pf *pf = vf->pf;
+ struct i40e_vsi *vsi = NULL;
+ struct i40e_mac_filter *f;
+ struct hlist_node *h;
+ bool found = false;
+ int bkt;
+
+ if (tc_filter->action != VIRTCHNL_ACTION_TC_REDIRECT) {
+ dev_info(&pf->pdev->dev,
+ "VF %d: ADQ doesn't support this action (%d)\n",
+ vf->vf_id, tc_filter->action);
+ goto err;
+ }
+
+ /* action_meta is TC number here to which the filter is applied */
+ if (!tc_filter->action_meta ||
+ tc_filter->action_meta > vf->num_tc) {
+ dev_info(&pf->pdev->dev, "VF %d: Invalid TC number %u\n",
+ vf->vf_id, tc_filter->action_meta);
+ goto err;
+ }
+
+ /* Check filter if it's programmed for advanced mode or basic mode.
+ * There are two ADq modes (for VF only),
+ * 1. Basic mode: intended to allow as many filter options as possible
+ * to be added to a VF in Non-trusted mode. Main goal is
+ * to add filters to its own MAC and VLAN id.
+ * 2. Advanced mode: is for allowing filters to be applied other than
+ * its own MAC or VLAN. This mode requires the VF to be
+ * Trusted.
+ */
+ if (mask.dst_mac[0] && !mask.dst_ip[0]) {
+ vsi = pf->vsi[vf->lan_vsi_idx];
+ f = i40e_find_mac(vsi, data.dst_mac);
+
+ if (!f) {
+ dev_info(&pf->pdev->dev,
+ "Destination MAC %pM doesn't belong to VF %d\n",
+ data.dst_mac, vf->vf_id);
+ goto err;
+ }
+
+ if (mask.vlan_id) {
+ hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f,
+ hlist) {
+ if (f->vlan == ntohs(data.vlan_id)) {
+ found = true;
+ break;
+ }
+ }
+ if (!found) {
+ dev_info(&pf->pdev->dev,
+ "VF %d doesn't have any VLAN id %u\n",
+ vf->vf_id, ntohs(data.vlan_id));
+ goto err;
+ }
+ }
+ } else {
+ /* Check if VF is trusted */
+ if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) {
+ dev_err(&pf->pdev->dev,
+ "VF %d not trusted, make VF trusted to add advanced mode ADq cloud filters\n",
+ vf->vf_id);
+ return I40E_ERR_CONFIG;
+ }
+ }
+
+ if (mask.dst_mac[0] & data.dst_mac[0]) {
+ if (is_broadcast_ether_addr(data.dst_mac) ||
+ is_zero_ether_addr(data.dst_mac)) {
+ dev_info(&pf->pdev->dev, "VF %d: Invalid Dest MAC addr %pM\n",
+ vf->vf_id, data.dst_mac);
+ goto err;
+ }
+ }
+
+ if (mask.src_mac[0] & data.src_mac[0]) {
+ if (is_broadcast_ether_addr(data.src_mac) ||
+ is_zero_ether_addr(data.src_mac)) {
+ dev_info(&pf->pdev->dev, "VF %d: Invalid Source MAC addr %pM\n",
+ vf->vf_id, data.src_mac);
+ goto err;
+ }
+ }
+
+ if (mask.dst_port & data.dst_port) {
+ if (!data.dst_port) {
+ dev_info(&pf->pdev->dev, "VF %d: Invalid Dest port\n",
+ vf->vf_id);
+ goto err;
+ }
+ }
+
+ if (mask.src_port & data.src_port) {
+ if (!data.src_port) {
+ dev_info(&pf->pdev->dev, "VF %d: Invalid Source port\n",
+ vf->vf_id);
+ goto err;
+ }
+ }
+
+ if (tc_filter->flow_type != VIRTCHNL_TCP_V6_FLOW &&
+ tc_filter->flow_type != VIRTCHNL_TCP_V4_FLOW) {
+ dev_info(&pf->pdev->dev, "VF %d: Invalid Flow type\n",
+ vf->vf_id);
+ goto err;
+ }
+
+ if (mask.vlan_id & data.vlan_id) {
+ if (ntohs(data.vlan_id) > I40E_MAX_VLANID) {
+ dev_info(&pf->pdev->dev, "VF %d: invalid VLAN ID\n",
+ vf->vf_id);
+ goto err;
+ }
+ }
+
+ return I40E_SUCCESS;
+err:
+ return I40E_ERR_CONFIG;
+}
+
+/**
+ * i40e_find_vsi_from_seid - searches for the vsi with the given seid
+ * @vf: pointer to the VF info
+ * @seid: seid of the vsi it is searching for
+ **/
+static struct i40e_vsi *i40e_find_vsi_from_seid(struct i40e_vf *vf, u16 seid)
+{
+ struct i40e_pf *pf = vf->pf;
+ struct i40e_vsi *vsi = NULL;
+ int i;
+
+ for (i = 0; i < vf->num_tc ; i++) {
+ vsi = i40e_find_vsi_from_id(pf, vf->ch[i].vsi_id);
+ if (vsi && vsi->seid == seid)
+ return vsi;
+ }
+ return NULL;
+}
+
+/**
+ * i40e_del_all_cloud_filters
+ * @vf: pointer to the VF info
+ *
+ * This function deletes all cloud filters
+ **/
+static void i40e_del_all_cloud_filters(struct i40e_vf *vf)
+{
+ struct i40e_cloud_filter *cfilter = NULL;
+ struct i40e_pf *pf = vf->pf;
+ struct i40e_vsi *vsi = NULL;
+ struct hlist_node *node;
+ int ret;
+
+ hlist_for_each_entry_safe(cfilter, node,
+ &vf->cloud_filter_list, cloud_node) {
+ vsi = i40e_find_vsi_from_seid(vf, cfilter->seid);
+
+ if (!vsi) {
+ dev_err(&pf->pdev->dev, "VF %d: no VSI found for matching %u seid, can't delete cloud filter\n",
+ vf->vf_id, cfilter->seid);
+ continue;
+ }
+
+ if (cfilter->dst_port)
+ ret = i40e_add_del_cloud_filter_big_buf(vsi, cfilter,
+ false);
+ else
+ ret = i40e_add_del_cloud_filter(vsi, cfilter, false);
+ if (ret)
+ dev_err(&pf->pdev->dev,
+ "VF %d: Failed to delete cloud filter, err %pe aq_err %s\n",
+ vf->vf_id, ERR_PTR(ret),
+ i40e_aq_str(&pf->hw,
+ pf->hw.aq.asq_last_status));
+
+ hlist_del(&cfilter->cloud_node);
+ kfree(cfilter);
+ vf->num_cloud_filters--;
+ }
+}
+
+/**
+ * i40e_vc_del_cloud_filter
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * This function deletes a cloud filter programmed as TC filter for ADq
+ **/
+static int i40e_vc_del_cloud_filter(struct i40e_vf *vf, u8 *msg)
+{
+ struct virtchnl_filter *vcf = (struct virtchnl_filter *)msg;
+ struct virtchnl_l4_spec mask = vcf->mask.tcp_spec;
+ struct virtchnl_l4_spec tcf = vcf->data.tcp_spec;
+ struct i40e_cloud_filter cfilter, *cf = NULL;
+ struct i40e_pf *pf = vf->pf;
+ struct i40e_vsi *vsi = NULL;
+ struct hlist_node *node;
+ int aq_ret = 0;
+ int i, ret;
+
+ if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
+ aq_ret = I40E_ERR_PARAM;
+ goto err;
+ }
+
+ if (!vf->adq_enabled) {
+ dev_info(&pf->pdev->dev,
+ "VF %d: ADq not enabled, can't apply cloud filter\n",
+ vf->vf_id);
+ aq_ret = I40E_ERR_PARAM;
+ goto err;
+ }
+
+ if (i40e_validate_cloud_filter(vf, vcf)) {
+ dev_info(&pf->pdev->dev,
+ "VF %d: Invalid input, can't apply cloud filter\n",
+ vf->vf_id);
+ aq_ret = I40E_ERR_PARAM;
+ goto err;
+ }
+
+ memset(&cfilter, 0, sizeof(cfilter));
+ /* parse destination mac address */
+ for (i = 0; i < ETH_ALEN; i++)
+ cfilter.dst_mac[i] = mask.dst_mac[i] & tcf.dst_mac[i];
+
+ /* parse source mac address */
+ for (i = 0; i < ETH_ALEN; i++)
+ cfilter.src_mac[i] = mask.src_mac[i] & tcf.src_mac[i];
+
+ cfilter.vlan_id = mask.vlan_id & tcf.vlan_id;
+ cfilter.dst_port = mask.dst_port & tcf.dst_port;
+ cfilter.src_port = mask.src_port & tcf.src_port;
+
+ switch (vcf->flow_type) {
+ case VIRTCHNL_TCP_V4_FLOW:
+ cfilter.n_proto = ETH_P_IP;
+ if (mask.dst_ip[0] & tcf.dst_ip[0])
+ memcpy(&cfilter.ip.v4.dst_ip, tcf.dst_ip,
+ ARRAY_SIZE(tcf.dst_ip));
+ else if (mask.src_ip[0] & tcf.dst_ip[0])
+ memcpy(&cfilter.ip.v4.src_ip, tcf.src_ip,
+ ARRAY_SIZE(tcf.dst_ip));
+ break;
+ case VIRTCHNL_TCP_V6_FLOW:
+ cfilter.n_proto = ETH_P_IPV6;
+ if (mask.dst_ip[3] & tcf.dst_ip[3])
+ memcpy(&cfilter.ip.v6.dst_ip6, tcf.dst_ip,
+ sizeof(cfilter.ip.v6.dst_ip6));
+ if (mask.src_ip[3] & tcf.src_ip[3])
+ memcpy(&cfilter.ip.v6.src_ip6, tcf.src_ip,
+ sizeof(cfilter.ip.v6.src_ip6));
+ break;
+ default:
+ /* TC filter can be configured based on different combinations
+ * and in this case IP is not a part of filter config
+ */
+ dev_info(&pf->pdev->dev, "VF %d: Flow type not configured\n",
+ vf->vf_id);
+ }
+
+ /* get the vsi to which the tc belongs to */
+ vsi = pf->vsi[vf->ch[vcf->action_meta].vsi_idx];
+ cfilter.seid = vsi->seid;
+ cfilter.flags = vcf->field_flags;
+
+ /* Deleting TC filter */
+ if (tcf.dst_port)
+ ret = i40e_add_del_cloud_filter_big_buf(vsi, &cfilter, false);
+ else
+ ret = i40e_add_del_cloud_filter(vsi, &cfilter, false);
+ if (ret) {
+ dev_err(&pf->pdev->dev,
+ "VF %d: Failed to delete cloud filter, err %pe aq_err %s\n",
+ vf->vf_id, ERR_PTR(ret),
+ i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ goto err;
+ }
+
+ hlist_for_each_entry_safe(cf, node,
+ &vf->cloud_filter_list, cloud_node) {
+ if (cf->seid != cfilter.seid)
+ continue;
+ if (mask.dst_port)
+ if (cfilter.dst_port != cf->dst_port)
+ continue;
+ if (mask.dst_mac[0])
+ if (!ether_addr_equal(cf->src_mac, cfilter.src_mac))
+ continue;
+ /* for ipv4 data to be valid, only first byte of mask is set */
+ if (cfilter.n_proto == ETH_P_IP && mask.dst_ip[0])
+ if (memcmp(&cfilter.ip.v4.dst_ip, &cf->ip.v4.dst_ip,
+ ARRAY_SIZE(tcf.dst_ip)))
+ continue;
+ /* for ipv6, mask is set for all sixteen bytes (4 words) */
+ if (cfilter.n_proto == ETH_P_IPV6 && mask.dst_ip[3])
+ if (memcmp(&cfilter.ip.v6.dst_ip6, &cf->ip.v6.dst_ip6,
+ sizeof(cfilter.ip.v6.src_ip6)))
+ continue;
+ if (mask.vlan_id)
+ if (cfilter.vlan_id != cf->vlan_id)
+ continue;
+
+ hlist_del(&cf->cloud_node);
+ kfree(cf);
+ vf->num_cloud_filters--;
+ }
+
+err:
+ return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DEL_CLOUD_FILTER,
+ aq_ret);
+}
+
+/**
+ * i40e_vc_add_cloud_filter
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * This function adds a cloud filter programmed as TC filter for ADq
+ **/
+static int i40e_vc_add_cloud_filter(struct i40e_vf *vf, u8 *msg)
+{
+ struct virtchnl_filter *vcf = (struct virtchnl_filter *)msg;
+ struct virtchnl_l4_spec mask = vcf->mask.tcp_spec;
+ struct virtchnl_l4_spec tcf = vcf->data.tcp_spec;
+ struct i40e_cloud_filter *cfilter = NULL;
+ struct i40e_pf *pf = vf->pf;
+ struct i40e_vsi *vsi = NULL;
+ int aq_ret = 0;
+ int i;
+
+ if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
+ aq_ret = I40E_ERR_PARAM;
+ goto err_out;
+ }
+
+ if (!vf->adq_enabled) {
+ dev_info(&pf->pdev->dev,
+ "VF %d: ADq is not enabled, can't apply cloud filter\n",
+ vf->vf_id);
+ aq_ret = I40E_ERR_PARAM;
+ goto err_out;
+ }
+
+ if (i40e_validate_cloud_filter(vf, vcf)) {
+ dev_info(&pf->pdev->dev,
+ "VF %d: Invalid input/s, can't apply cloud filter\n",
+ vf->vf_id);
+ aq_ret = I40E_ERR_PARAM;
+ goto err_out;
+ }
+
+ cfilter = kzalloc(sizeof(*cfilter), GFP_KERNEL);
+ if (!cfilter) {
+ aq_ret = -ENOMEM;
+ goto err_out;
+ }
+
+ /* parse destination mac address */
+ for (i = 0; i < ETH_ALEN; i++)
+ cfilter->dst_mac[i] = mask.dst_mac[i] & tcf.dst_mac[i];
+
+ /* parse source mac address */
+ for (i = 0; i < ETH_ALEN; i++)
+ cfilter->src_mac[i] = mask.src_mac[i] & tcf.src_mac[i];
+
+ cfilter->vlan_id = mask.vlan_id & tcf.vlan_id;
+ cfilter->dst_port = mask.dst_port & tcf.dst_port;
+ cfilter->src_port = mask.src_port & tcf.src_port;
+
+ switch (vcf->flow_type) {
+ case VIRTCHNL_TCP_V4_FLOW:
+ cfilter->n_proto = ETH_P_IP;
+ if (mask.dst_ip[0] & tcf.dst_ip[0])
+ memcpy(&cfilter->ip.v4.dst_ip, tcf.dst_ip,
+ ARRAY_SIZE(tcf.dst_ip));
+ else if (mask.src_ip[0] & tcf.dst_ip[0])
+ memcpy(&cfilter->ip.v4.src_ip, tcf.src_ip,
+ ARRAY_SIZE(tcf.dst_ip));
+ break;
+ case VIRTCHNL_TCP_V6_FLOW:
+ cfilter->n_proto = ETH_P_IPV6;
+ if (mask.dst_ip[3] & tcf.dst_ip[3])
+ memcpy(&cfilter->ip.v6.dst_ip6, tcf.dst_ip,
+ sizeof(cfilter->ip.v6.dst_ip6));
+ if (mask.src_ip[3] & tcf.src_ip[3])
+ memcpy(&cfilter->ip.v6.src_ip6, tcf.src_ip,
+ sizeof(cfilter->ip.v6.src_ip6));
+ break;
+ default:
+ /* TC filter can be configured based on different combinations
+ * and in this case IP is not a part of filter config
+ */
+ dev_info(&pf->pdev->dev, "VF %d: Flow type not configured\n",
+ vf->vf_id);
+ }
+
+ /* get the VSI to which the TC belongs to */
+ vsi = pf->vsi[vf->ch[vcf->action_meta].vsi_idx];
+ cfilter->seid = vsi->seid;
+ cfilter->flags = vcf->field_flags;
+
+ /* Adding cloud filter programmed as TC filter */
+ if (tcf.dst_port)
+ aq_ret = i40e_add_del_cloud_filter_big_buf(vsi, cfilter, true);
+ else
+ aq_ret = i40e_add_del_cloud_filter(vsi, cfilter, true);
+ if (aq_ret) {
+ dev_err(&pf->pdev->dev,
+ "VF %d: Failed to add cloud filter, err %pe aq_err %s\n",
+ vf->vf_id, ERR_PTR(aq_ret),
+ i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ goto err_free;
+ }
+
+ INIT_HLIST_NODE(&cfilter->cloud_node);
+ hlist_add_head(&cfilter->cloud_node, &vf->cloud_filter_list);
+ /* release the pointer passing it to the collection */
+ cfilter = NULL;
+ vf->num_cloud_filters++;
+err_free:
+ kfree(cfilter);
+err_out:
+ return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ADD_CLOUD_FILTER,
+ aq_ret);
+}
+
+/**
+ * i40e_vc_add_qch_msg: Add queue channel and enable ADq
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ **/
+static int i40e_vc_add_qch_msg(struct i40e_vf *vf, u8 *msg)
+{
+ struct virtchnl_tc_info *tci =
+ (struct virtchnl_tc_info *)msg;
+ struct i40e_pf *pf = vf->pf;
+ struct i40e_link_status *ls = &pf->hw.phy.link_info;
+ int i, adq_request_qps = 0;
+ int aq_ret = 0;
+ u64 speed = 0;
+
+ if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
+ aq_ret = I40E_ERR_PARAM;
+ goto err;
+ }
+
+ /* ADq cannot be applied if spoof check is ON */
+ if (vf->spoofchk) {
+ dev_err(&pf->pdev->dev,
+ "Spoof check is ON, turn it OFF to enable ADq\n");
+ aq_ret = I40E_ERR_PARAM;
+ goto err;
+ }
+
+ if (!(vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ADQ)) {
+ dev_err(&pf->pdev->dev,
+ "VF %d attempting to enable ADq, but hasn't properly negotiated that capability\n",
+ vf->vf_id);
+ aq_ret = I40E_ERR_PARAM;
+ goto err;
+ }
+
+ /* max number of traffic classes for VF currently capped at 4 */
+ if (!tci->num_tc || tci->num_tc > I40E_MAX_VF_VSI) {
+ dev_err(&pf->pdev->dev,
+ "VF %d trying to set %u TCs, valid range 1-%u TCs per VF\n",
+ vf->vf_id, tci->num_tc, I40E_MAX_VF_VSI);
+ aq_ret = I40E_ERR_PARAM;
+ goto err;
+ }
+
+ /* validate queues for each TC */
+ for (i = 0; i < tci->num_tc; i++)
+ if (!tci->list[i].count ||
+ tci->list[i].count > I40E_DEFAULT_QUEUES_PER_VF) {
+ dev_err(&pf->pdev->dev,
+ "VF %d: TC %d trying to set %u queues, valid range 1-%u queues per TC\n",
+ vf->vf_id, i, tci->list[i].count,
+ I40E_DEFAULT_QUEUES_PER_VF);
+ aq_ret = I40E_ERR_PARAM;
+ goto err;
+ }
+
+ /* need Max VF queues but already have default number of queues */
+ adq_request_qps = I40E_MAX_VF_QUEUES - I40E_DEFAULT_QUEUES_PER_VF;
+
+ if (pf->queues_left < adq_request_qps) {
+ dev_err(&pf->pdev->dev,
+ "No queues left to allocate to VF %d\n",
+ vf->vf_id);
+ aq_ret = I40E_ERR_PARAM;
+ goto err;
+ } else {
+ /* we need to allocate max VF queues to enable ADq so as to
+ * make sure ADq enabled VF always gets back queues when it
+ * goes through a reset.
+ */
+ vf->num_queue_pairs = I40E_MAX_VF_QUEUES;
+ }
+
+ /* get link speed in MB to validate rate limit */
+ speed = i40e_vc_link_speed2mbps(ls->link_speed);
+ if (speed == SPEED_UNKNOWN) {
+ dev_err(&pf->pdev->dev,
+ "Cannot detect link speed\n");
+ aq_ret = I40E_ERR_PARAM;
+ goto err;
+ }
+
+ /* parse data from the queue channel info */
+ vf->num_tc = tci->num_tc;
+ for (i = 0; i < vf->num_tc; i++) {
+ if (tci->list[i].max_tx_rate) {
+ if (tci->list[i].max_tx_rate > speed) {
+ dev_err(&pf->pdev->dev,
+ "Invalid max tx rate %llu specified for VF %d.",
+ tci->list[i].max_tx_rate,
+ vf->vf_id);
+ aq_ret = I40E_ERR_PARAM;
+ goto err;
+ } else {
+ vf->ch[i].max_tx_rate =
+ tci->list[i].max_tx_rate;
+ }
+ }
+ vf->ch[i].num_qps = tci->list[i].count;
+ }
+
+ /* set this flag only after making sure all inputs are sane */
+ vf->adq_enabled = true;
+
+ /* reset the VF in order to allocate resources */
+ i40e_vc_reset_vf(vf, true);
+
+ return I40E_SUCCESS;
+
+ /* send the response to the VF */
+err:
+ return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ENABLE_CHANNELS,
+ aq_ret);
+}
+
+/**
+ * i40e_vc_del_qch_msg
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ **/
+static int i40e_vc_del_qch_msg(struct i40e_vf *vf, u8 *msg)
+{
+ struct i40e_pf *pf = vf->pf;
+ int aq_ret = 0;
+
+ if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
+ aq_ret = I40E_ERR_PARAM;
+ goto err;
+ }
+
+ if (vf->adq_enabled) {
+ i40e_del_all_cloud_filters(vf);
+ i40e_del_qch(vf);
+ vf->adq_enabled = false;
+ vf->num_tc = 0;
+ dev_info(&pf->pdev->dev,
+ "Deleting Queue Channels and cloud filters for ADq on VF %d\n",
+ vf->vf_id);
+ } else {
+ dev_info(&pf->pdev->dev, "VF %d trying to delete queue channels but ADq isn't enabled\n",
+ vf->vf_id);
+ aq_ret = I40E_ERR_PARAM;
+ }
+
+ /* reset the VF in order to allocate resources */
+ i40e_vc_reset_vf(vf, true);
+
+ return I40E_SUCCESS;
+
+err:
+ return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DISABLE_CHANNELS,
+ aq_ret);
+}
+
+/**
+ * i40e_vc_process_vf_msg
+ * @pf: pointer to the PF structure
+ * @vf_id: source VF id
+ * @v_opcode: operation code
+ * @v_retval: unused return value code
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ *
+ * called from the common aeq/arq handler to
+ * process request from VF
+ **/
+int i40e_vc_process_vf_msg(struct i40e_pf *pf, s16 vf_id, u32 v_opcode,
+ u32 __always_unused v_retval, u8 *msg, u16 msglen)
+{
+ struct i40e_hw *hw = &pf->hw;
+ int local_vf_id = vf_id - (s16)hw->func_caps.vf_base_id;
+ struct i40e_vf *vf;
+ int ret;
+
+ pf->vf_aq_requests++;
+ if (local_vf_id < 0 || local_vf_id >= pf->num_alloc_vfs)
+ return -EINVAL;
+ vf = &(pf->vf[local_vf_id]);
+
+ /* Check if VF is disabled. */
+ if (test_bit(I40E_VF_STATE_DISABLED, &vf->vf_states))
+ return I40E_ERR_PARAM;
+
+ /* perform basic checks on the msg */
+ ret = virtchnl_vc_validate_vf_msg(&vf->vf_ver, v_opcode, msg, msglen);
+
+ if (ret) {
+ i40e_vc_send_resp_to_vf(vf, v_opcode, I40E_ERR_PARAM);
+ dev_err(&pf->pdev->dev, "Invalid message from VF %d, opcode %d, len %d\n",
+ local_vf_id, v_opcode, msglen);
+ switch (ret) {
+ case VIRTCHNL_STATUS_ERR_PARAM:
+ return -EPERM;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ switch (v_opcode) {
+ case VIRTCHNL_OP_VERSION:
+ ret = i40e_vc_get_version_msg(vf, msg);
+ break;
+ case VIRTCHNL_OP_GET_VF_RESOURCES:
+ ret = i40e_vc_get_vf_resources_msg(vf, msg);
+ i40e_vc_notify_vf_link_state(vf);
+ break;
+ case VIRTCHNL_OP_RESET_VF:
+ i40e_vc_reset_vf(vf, false);
+ ret = 0;
+ break;
+ case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
+ ret = i40e_vc_config_promiscuous_mode_msg(vf, msg);
+ break;
+ case VIRTCHNL_OP_CONFIG_VSI_QUEUES:
+ ret = i40e_vc_config_queues_msg(vf, msg);
+ break;
+ case VIRTCHNL_OP_CONFIG_IRQ_MAP:
+ ret = i40e_vc_config_irq_map_msg(vf, msg);
+ break;
+ case VIRTCHNL_OP_ENABLE_QUEUES:
+ ret = i40e_vc_enable_queues_msg(vf, msg);
+ i40e_vc_notify_vf_link_state(vf);
+ break;
+ case VIRTCHNL_OP_DISABLE_QUEUES:
+ ret = i40e_vc_disable_queues_msg(vf, msg);
+ break;
+ case VIRTCHNL_OP_ADD_ETH_ADDR:
+ ret = i40e_vc_add_mac_addr_msg(vf, msg);
+ break;
+ case VIRTCHNL_OP_DEL_ETH_ADDR:
+ ret = i40e_vc_del_mac_addr_msg(vf, msg);
+ break;
+ case VIRTCHNL_OP_ADD_VLAN:
+ ret = i40e_vc_add_vlan_msg(vf, msg);
+ break;
+ case VIRTCHNL_OP_DEL_VLAN:
+ ret = i40e_vc_remove_vlan_msg(vf, msg);
+ break;
+ case VIRTCHNL_OP_GET_STATS:
+ ret = i40e_vc_get_stats_msg(vf, msg);
+ break;
+ case VIRTCHNL_OP_IWARP:
+ ret = i40e_vc_iwarp_msg(vf, msg, msglen);
+ break;
+ case VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP:
+ ret = i40e_vc_iwarp_qvmap_msg(vf, msg, true);
+ break;
+ case VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP:
+ ret = i40e_vc_iwarp_qvmap_msg(vf, msg, false);
+ break;
+ case VIRTCHNL_OP_CONFIG_RSS_KEY:
+ ret = i40e_vc_config_rss_key(vf, msg);
+ break;
+ case VIRTCHNL_OP_CONFIG_RSS_LUT:
+ ret = i40e_vc_config_rss_lut(vf, msg);
+ break;
+ case VIRTCHNL_OP_GET_RSS_HENA_CAPS:
+ ret = i40e_vc_get_rss_hena(vf, msg);
+ break;
+ case VIRTCHNL_OP_SET_RSS_HENA:
+ ret = i40e_vc_set_rss_hena(vf, msg);
+ break;
+ case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING:
+ ret = i40e_vc_enable_vlan_stripping(vf, msg);
+ break;
+ case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING:
+ ret = i40e_vc_disable_vlan_stripping(vf, msg);
+ break;
+ case VIRTCHNL_OP_REQUEST_QUEUES:
+ ret = i40e_vc_request_queues_msg(vf, msg);
+ break;
+ case VIRTCHNL_OP_ENABLE_CHANNELS:
+ ret = i40e_vc_add_qch_msg(vf, msg);
+ break;
+ case VIRTCHNL_OP_DISABLE_CHANNELS:
+ ret = i40e_vc_del_qch_msg(vf, msg);
+ break;
+ case VIRTCHNL_OP_ADD_CLOUD_FILTER:
+ ret = i40e_vc_add_cloud_filter(vf, msg);
+ break;
+ case VIRTCHNL_OP_DEL_CLOUD_FILTER:
+ ret = i40e_vc_del_cloud_filter(vf, msg);
+ break;
+ case VIRTCHNL_OP_UNKNOWN:
+ default:
+ dev_err(&pf->pdev->dev, "Unsupported opcode %d from VF %d\n",
+ v_opcode, local_vf_id);
+ ret = i40e_vc_send_resp_to_vf(vf, v_opcode,
+ I40E_ERR_NOT_IMPLEMENTED);
+ break;
+ }
+
+ return ret;
+}
+
+/**
+ * i40e_vc_process_vflr_event
+ * @pf: pointer to the PF structure
+ *
+ * called from the vlfr irq handler to
+ * free up VF resources and state variables
+ **/
+int i40e_vc_process_vflr_event(struct i40e_pf *pf)
+{
+ struct i40e_hw *hw = &pf->hw;
+ u32 reg, reg_idx, bit_idx;
+ struct i40e_vf *vf;
+ int vf_id;
+
+ if (!test_bit(__I40E_VFLR_EVENT_PENDING, pf->state))
+ return 0;
+
+ /* Re-enable the VFLR interrupt cause here, before looking for which
+ * VF got reset. Otherwise, if another VF gets a reset while the
+ * first one is being processed, that interrupt will be lost, and
+ * that VF will be stuck in reset forever.
+ */
+ reg = rd32(hw, I40E_PFINT_ICR0_ENA);
+ reg |= I40E_PFINT_ICR0_ENA_VFLR_MASK;
+ wr32(hw, I40E_PFINT_ICR0_ENA, reg);
+ i40e_flush(hw);
+
+ clear_bit(__I40E_VFLR_EVENT_PENDING, pf->state);
+ for (vf_id = 0; vf_id < pf->num_alloc_vfs; vf_id++) {
+ reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32;
+ bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32;
+ /* read GLGEN_VFLRSTAT register to find out the flr VFs */
+ vf = &pf->vf[vf_id];
+ reg = rd32(hw, I40E_GLGEN_VFLRSTAT(reg_idx));
+ if (reg & BIT(bit_idx))
+ /* i40e_reset_vf will clear the bit in GLGEN_VFLRSTAT */
+ i40e_reset_vf(vf, true);
+ }
+
+ return 0;
+}
+
+/**
+ * i40e_validate_vf
+ * @pf: the physical function
+ * @vf_id: VF identifier
+ *
+ * Check that the VF is enabled and the VSI exists.
+ *
+ * Returns 0 on success, negative on failure
+ **/
+static int i40e_validate_vf(struct i40e_pf *pf, int vf_id)
+{
+ struct i40e_vsi *vsi;
+ struct i40e_vf *vf;
+ int ret = 0;
+
+ if (vf_id >= pf->num_alloc_vfs) {
+ dev_err(&pf->pdev->dev,
+ "Invalid VF Identifier %d\n", vf_id);
+ ret = -EINVAL;
+ goto err_out;
+ }
+ vf = &pf->vf[vf_id];
+ vsi = i40e_find_vsi_from_id(pf, vf->lan_vsi_id);
+ if (!vsi)
+ ret = -EINVAL;
+err_out:
+ return ret;
+}
+
+/**
+ * i40e_ndo_set_vf_mac
+ * @netdev: network interface device structure
+ * @vf_id: VF identifier
+ * @mac: mac address
+ *
+ * program VF mac address
+ **/
+int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_vsi *vsi = np->vsi;
+ struct i40e_pf *pf = vsi->back;
+ struct i40e_mac_filter *f;
+ struct i40e_vf *vf;
+ int ret = 0;
+ struct hlist_node *h;
+ int bkt;
+ u8 i;
+
+ if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
+ dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
+ return -EAGAIN;
+ }
+
+ /* validate the request */
+ ret = i40e_validate_vf(pf, vf_id);
+ if (ret)
+ goto error_param;
+
+ vf = &pf->vf[vf_id];
+
+ /* When the VF is resetting wait until it is done.
+ * It can take up to 200 milliseconds,
+ * but wait for up to 300 milliseconds to be safe.
+ * Acquire the VSI pointer only after the VF has been
+ * properly initialized.
+ */
+ for (i = 0; i < 15; i++) {
+ if (test_bit(I40E_VF_STATE_INIT, &vf->vf_states))
+ break;
+ msleep(20);
+ }
+ if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
+ dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n",
+ vf_id);
+ ret = -EAGAIN;
+ goto error_param;
+ }
+ vsi = pf->vsi[vf->lan_vsi_idx];
+
+ if (is_multicast_ether_addr(mac)) {
+ dev_err(&pf->pdev->dev,
+ "Invalid Ethernet address %pM for VF %d\n", mac, vf_id);
+ ret = -EINVAL;
+ goto error_param;
+ }
+
+ /* Lock once because below invoked function add/del_filter requires
+ * mac_filter_hash_lock to be held
+ */
+ spin_lock_bh(&vsi->mac_filter_hash_lock);
+
+ /* delete the temporary mac address */
+ if (!is_zero_ether_addr(vf->default_lan_addr.addr))
+ i40e_del_mac_filter(vsi, vf->default_lan_addr.addr);
+
+ /* Delete all the filters for this VSI - we're going to kill it
+ * anyway.
+ */
+ hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist)
+ __i40e_del_filter(vsi, f);
+
+ spin_unlock_bh(&vsi->mac_filter_hash_lock);
+
+ /* program mac filter */
+ if (i40e_sync_vsi_filters(vsi)) {
+ dev_err(&pf->pdev->dev, "Unable to program ucast filters\n");
+ ret = -EIO;
+ goto error_param;
+ }
+ ether_addr_copy(vf->default_lan_addr.addr, mac);
+
+ if (is_zero_ether_addr(mac)) {
+ vf->pf_set_mac = false;
+ dev_info(&pf->pdev->dev, "Removing MAC on VF %d\n", vf_id);
+ } else {
+ vf->pf_set_mac = true;
+ dev_info(&pf->pdev->dev, "Setting MAC %pM on VF %d\n",
+ mac, vf_id);
+ }
+
+ /* Force the VF interface down so it has to bring up with new MAC
+ * address
+ */
+ i40e_vc_reset_vf(vf, true);
+ dev_info(&pf->pdev->dev, "Bring down and up the VF interface to make this change effective.\n");
+
+error_param:
+ clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
+ return ret;
+}
+
+/**
+ * i40e_ndo_set_vf_port_vlan
+ * @netdev: network interface device structure
+ * @vf_id: VF identifier
+ * @vlan_id: mac address
+ * @qos: priority setting
+ * @vlan_proto: vlan protocol
+ *
+ * program VF vlan id and/or qos
+ **/
+int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, int vf_id,
+ u16 vlan_id, u8 qos, __be16 vlan_proto)
+{
+ u16 vlanprio = vlan_id | (qos << I40E_VLAN_PRIORITY_SHIFT);
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ bool allmulti = false, alluni = false;
+ struct i40e_pf *pf = np->vsi->back;
+ struct i40e_vsi *vsi;
+ struct i40e_vf *vf;
+ int ret = 0;
+
+ if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
+ dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
+ return -EAGAIN;
+ }
+
+ /* validate the request */
+ ret = i40e_validate_vf(pf, vf_id);
+ if (ret)
+ goto error_pvid;
+
+ if ((vlan_id > I40E_MAX_VLANID) || (qos > 7)) {
+ dev_err(&pf->pdev->dev, "Invalid VF Parameters\n");
+ ret = -EINVAL;
+ goto error_pvid;
+ }
+
+ if (vlan_proto != htons(ETH_P_8021Q)) {
+ dev_err(&pf->pdev->dev, "VF VLAN protocol is not supported\n");
+ ret = -EPROTONOSUPPORT;
+ goto error_pvid;
+ }
+
+ vf = &pf->vf[vf_id];
+ vsi = pf->vsi[vf->lan_vsi_idx];
+ if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
+ dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n",
+ vf_id);
+ ret = -EAGAIN;
+ goto error_pvid;
+ }
+
+ if (le16_to_cpu(vsi->info.pvid) == vlanprio)
+ /* duplicate request, so just return success */
+ goto error_pvid;
+
+ i40e_vlan_stripping_enable(vsi);
+
+ /* Locked once because multiple functions below iterate list */
+ spin_lock_bh(&vsi->mac_filter_hash_lock);
+
+ /* Check for condition where there was already a port VLAN ID
+ * filter set and now it is being deleted by setting it to zero.
+ * Additionally check for the condition where there was a port
+ * VLAN but now there is a new and different port VLAN being set.
+ * Before deleting all the old VLAN filters we must add new ones
+ * with -1 (I40E_VLAN_ANY) or otherwise we're left with all our
+ * MAC addresses deleted.
+ */
+ if ((!(vlan_id || qos) ||
+ vlanprio != le16_to_cpu(vsi->info.pvid)) &&
+ vsi->info.pvid) {
+ ret = i40e_add_vlan_all_mac(vsi, I40E_VLAN_ANY);
+ if (ret) {
+ dev_info(&vsi->back->pdev->dev,
+ "add VF VLAN failed, ret=%d aq_err=%d\n", ret,
+ vsi->back->hw.aq.asq_last_status);
+ spin_unlock_bh(&vsi->mac_filter_hash_lock);
+ goto error_pvid;
+ }
+ }
+
+ if (vsi->info.pvid) {
+ /* remove all filters on the old VLAN */
+ i40e_rm_vlan_all_mac(vsi, (le16_to_cpu(vsi->info.pvid) &
+ VLAN_VID_MASK));
+ }
+
+ spin_unlock_bh(&vsi->mac_filter_hash_lock);
+
+ /* disable promisc modes in case they were enabled */
+ ret = i40e_config_vf_promiscuous_mode(vf, vf->lan_vsi_id,
+ allmulti, alluni);
+ if (ret) {
+ dev_err(&pf->pdev->dev, "Unable to config VF promiscuous mode\n");
+ goto error_pvid;
+ }
+
+ if (vlan_id || qos)
+ ret = i40e_vsi_add_pvid(vsi, vlanprio);
+ else
+ i40e_vsi_remove_pvid(vsi);
+ spin_lock_bh(&vsi->mac_filter_hash_lock);
+
+ if (vlan_id) {
+ dev_info(&pf->pdev->dev, "Setting VLAN %d, QOS 0x%x on VF %d\n",
+ vlan_id, qos, vf_id);
+
+ /* add new VLAN filter for each MAC */
+ ret = i40e_add_vlan_all_mac(vsi, vlan_id);
+ if (ret) {
+ dev_info(&vsi->back->pdev->dev,
+ "add VF VLAN failed, ret=%d aq_err=%d\n", ret,
+ vsi->back->hw.aq.asq_last_status);
+ spin_unlock_bh(&vsi->mac_filter_hash_lock);
+ goto error_pvid;
+ }
+
+ /* remove the previously added non-VLAN MAC filters */
+ i40e_rm_vlan_all_mac(vsi, I40E_VLAN_ANY);
+ }
+
+ spin_unlock_bh(&vsi->mac_filter_hash_lock);
+
+ if (test_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states))
+ alluni = true;
+
+ if (test_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states))
+ allmulti = true;
+
+ /* Schedule the worker thread to take care of applying changes */
+ i40e_service_event_schedule(vsi->back);
+
+ if (ret) {
+ dev_err(&pf->pdev->dev, "Unable to update VF vsi context\n");
+ goto error_pvid;
+ }
+
+ /* The Port VLAN needs to be saved across resets the same as the
+ * default LAN MAC address.
+ */
+ vf->port_vlan_id = le16_to_cpu(vsi->info.pvid);
+
+ i40e_vc_reset_vf(vf, true);
+ /* During reset the VF got a new VSI, so refresh a pointer. */
+ vsi = pf->vsi[vf->lan_vsi_idx];
+
+ ret = i40e_config_vf_promiscuous_mode(vf, vsi->id, allmulti, alluni);
+ if (ret) {
+ dev_err(&pf->pdev->dev, "Unable to config vf promiscuous mode\n");
+ goto error_pvid;
+ }
+
+ ret = 0;
+
+error_pvid:
+ clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
+ return ret;
+}
+
+/**
+ * i40e_ndo_set_vf_bw
+ * @netdev: network interface device structure
+ * @vf_id: VF identifier
+ * @min_tx_rate: Minimum Tx rate
+ * @max_tx_rate: Maximum Tx rate
+ *
+ * configure VF Tx rate
+ **/
+int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate,
+ int max_tx_rate)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_pf *pf = np->vsi->back;
+ struct i40e_vsi *vsi;
+ struct i40e_vf *vf;
+ int ret = 0;
+
+ if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
+ dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
+ return -EAGAIN;
+ }
+
+ /* validate the request */
+ ret = i40e_validate_vf(pf, vf_id);
+ if (ret)
+ goto error;
+
+ if (min_tx_rate) {
+ dev_err(&pf->pdev->dev, "Invalid min tx rate (%d) (greater than 0) specified for VF %d.\n",
+ min_tx_rate, vf_id);
+ ret = -EINVAL;
+ goto error;
+ }
+
+ vf = &pf->vf[vf_id];
+ vsi = pf->vsi[vf->lan_vsi_idx];
+ if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
+ dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n",
+ vf_id);
+ ret = -EAGAIN;
+ goto error;
+ }
+
+ ret = i40e_set_bw_limit(vsi, vsi->seid, max_tx_rate);
+ if (ret)
+ goto error;
+
+ vf->tx_rate = max_tx_rate;
+error:
+ clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
+ return ret;
+}
+
+/**
+ * i40e_ndo_get_vf_config
+ * @netdev: network interface device structure
+ * @vf_id: VF identifier
+ * @ivi: VF configuration structure
+ *
+ * return VF configuration
+ **/
+int i40e_ndo_get_vf_config(struct net_device *netdev,
+ int vf_id, struct ifla_vf_info *ivi)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_vsi *vsi = np->vsi;
+ struct i40e_pf *pf = vsi->back;
+ struct i40e_vf *vf;
+ int ret = 0;
+
+ if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
+ dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
+ return -EAGAIN;
+ }
+
+ /* validate the request */
+ ret = i40e_validate_vf(pf, vf_id);
+ if (ret)
+ goto error_param;
+
+ vf = &pf->vf[vf_id];
+ /* first vsi is always the LAN vsi */
+ vsi = pf->vsi[vf->lan_vsi_idx];
+ if (!vsi) {
+ ret = -ENOENT;
+ goto error_param;
+ }
+
+ ivi->vf = vf_id;
+
+ ether_addr_copy(ivi->mac, vf->default_lan_addr.addr);
+
+ ivi->max_tx_rate = vf->tx_rate;
+ ivi->min_tx_rate = 0;
+ ivi->vlan = le16_to_cpu(vsi->info.pvid) & I40E_VLAN_MASK;
+ ivi->qos = (le16_to_cpu(vsi->info.pvid) & I40E_PRIORITY_MASK) >>
+ I40E_VLAN_PRIORITY_SHIFT;
+ if (vf->link_forced == false)
+ ivi->linkstate = IFLA_VF_LINK_STATE_AUTO;
+ else if (vf->link_up == true)
+ ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE;
+ else
+ ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE;
+ ivi->spoofchk = vf->spoofchk;
+ ivi->trusted = vf->trusted;
+ ret = 0;
+
+error_param:
+ clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
+ return ret;
+}
+
+/**
+ * i40e_ndo_set_vf_link_state
+ * @netdev: network interface device structure
+ * @vf_id: VF identifier
+ * @link: required link state
+ *
+ * Set the link state of a specified VF, regardless of physical link state
+ **/
+int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_pf *pf = np->vsi->back;
+ struct i40e_link_status *ls = &pf->hw.phy.link_info;
+ struct virtchnl_pf_event pfe;
+ struct i40e_hw *hw = &pf->hw;
+ struct i40e_vf *vf;
+ int abs_vf_id;
+ int ret = 0;
+
+ if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
+ dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
+ return -EAGAIN;
+ }
+
+ /* validate the request */
+ if (vf_id >= pf->num_alloc_vfs) {
+ dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
+ ret = -EINVAL;
+ goto error_out;
+ }
+
+ vf = &pf->vf[vf_id];
+ abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
+
+ pfe.event = VIRTCHNL_EVENT_LINK_CHANGE;
+ pfe.severity = PF_EVENT_SEVERITY_INFO;
+
+ switch (link) {
+ case IFLA_VF_LINK_STATE_AUTO:
+ vf->link_forced = false;
+ i40e_set_vf_link_state(vf, &pfe, ls);
+ break;
+ case IFLA_VF_LINK_STATE_ENABLE:
+ vf->link_forced = true;
+ vf->link_up = true;
+ i40e_set_vf_link_state(vf, &pfe, ls);
+ break;
+ case IFLA_VF_LINK_STATE_DISABLE:
+ vf->link_forced = true;
+ vf->link_up = false;
+ i40e_set_vf_link_state(vf, &pfe, ls);
+ break;
+ default:
+ ret = -EINVAL;
+ goto error_out;
+ }
+ /* Notify the VF of its new link state */
+ i40e_aq_send_msg_to_vf(hw, abs_vf_id, VIRTCHNL_OP_EVENT,
+ 0, (u8 *)&pfe, sizeof(pfe), NULL);
+
+error_out:
+ clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
+ return ret;
+}
+
+/**
+ * i40e_ndo_set_vf_spoofchk
+ * @netdev: network interface device structure
+ * @vf_id: VF identifier
+ * @enable: flag to enable or disable feature
+ *
+ * Enable or disable VF spoof checking
+ **/
+int i40e_ndo_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool enable)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_vsi *vsi = np->vsi;
+ struct i40e_pf *pf = vsi->back;
+ struct i40e_vsi_context ctxt;
+ struct i40e_hw *hw = &pf->hw;
+ struct i40e_vf *vf;
+ int ret = 0;
+
+ if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
+ dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
+ return -EAGAIN;
+ }
+
+ /* validate the request */
+ if (vf_id >= pf->num_alloc_vfs) {
+ dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ vf = &(pf->vf[vf_id]);
+ if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
+ dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n",
+ vf_id);
+ ret = -EAGAIN;
+ goto out;
+ }
+
+ if (enable == vf->spoofchk)
+ goto out;
+
+ vf->spoofchk = enable;
+ memset(&ctxt, 0, sizeof(ctxt));
+ ctxt.seid = pf->vsi[vf->lan_vsi_idx]->seid;
+ ctxt.pf_num = pf->hw.pf_id;
+ ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID);
+ if (enable)
+ ctxt.info.sec_flags |= (I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK |
+ I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK);
+ ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
+ if (ret) {
+ dev_err(&pf->pdev->dev, "Error %d updating VSI parameters\n",
+ ret);
+ ret = -EIO;
+ }
+out:
+ clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
+ return ret;
+}
+
+/**
+ * i40e_ndo_set_vf_trust
+ * @netdev: network interface device structure of the pf
+ * @vf_id: VF identifier
+ * @setting: trust setting
+ *
+ * Enable or disable VF trust setting
+ **/
+int i40e_ndo_set_vf_trust(struct net_device *netdev, int vf_id, bool setting)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_pf *pf = np->vsi->back;
+ struct i40e_vf *vf;
+ int ret = 0;
+
+ if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
+ dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
+ return -EAGAIN;
+ }
+
+ /* validate the request */
+ if (vf_id >= pf->num_alloc_vfs) {
+ dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (pf->flags & I40E_FLAG_MFP_ENABLED) {
+ dev_err(&pf->pdev->dev, "Trusted VF not supported in MFP mode.\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ vf = &pf->vf[vf_id];
+
+ if (setting == vf->trusted)
+ goto out;
+
+ vf->trusted = setting;
+
+ /* request PF to sync mac/vlan filters for the VF */
+ set_bit(__I40E_MACVLAN_SYNC_PENDING, pf->state);
+ pf->vsi[vf->lan_vsi_idx]->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
+
+ i40e_vc_reset_vf(vf, true);
+ dev_info(&pf->pdev->dev, "VF %u is now %strusted\n",
+ vf_id, setting ? "" : "un");
+
+ if (vf->adq_enabled) {
+ if (!vf->trusted) {
+ dev_info(&pf->pdev->dev,
+ "VF %u no longer Trusted, deleting all cloud filters\n",
+ vf_id);
+ i40e_del_all_cloud_filters(vf);
+ }
+ }
+
+out:
+ clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
+ return ret;
+}
+
+/**
+ * i40e_get_vf_stats - populate some stats for the VF
+ * @netdev: the netdev of the PF
+ * @vf_id: the host OS identifier (0-127)
+ * @vf_stats: pointer to the OS memory to be initialized
+ */
+int i40e_get_vf_stats(struct net_device *netdev, int vf_id,
+ struct ifla_vf_stats *vf_stats)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_pf *pf = np->vsi->back;
+ struct i40e_eth_stats *stats;
+ struct i40e_vsi *vsi;
+ struct i40e_vf *vf;
+
+ /* validate the request */
+ if (i40e_validate_vf(pf, vf_id))
+ return -EINVAL;
+
+ vf = &pf->vf[vf_id];
+ if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
+ dev_err(&pf->pdev->dev, "VF %d in reset. Try again.\n", vf_id);
+ return -EBUSY;
+ }
+
+ vsi = pf->vsi[vf->lan_vsi_idx];
+ if (!vsi)
+ return -EINVAL;
+
+ i40e_update_eth_stats(vsi);
+ stats = &vsi->eth_stats;
+
+ memset(vf_stats, 0, sizeof(*vf_stats));
+
+ vf_stats->rx_packets = stats->rx_unicast + stats->rx_broadcast +
+ stats->rx_multicast;
+ vf_stats->tx_packets = stats->tx_unicast + stats->tx_broadcast +
+ stats->tx_multicast;
+ vf_stats->rx_bytes = stats->rx_bytes;
+ vf_stats->tx_bytes = stats->tx_bytes;
+ vf_stats->broadcast = stats->rx_broadcast;
+ vf_stats->multicast = stats->rx_multicast;
+ vf_stats->rx_dropped = stats->rx_discards;
+ vf_stats->tx_dropped = stats->tx_discards;
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
new file mode 100644
index 000000000..bd497cc53
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
@@ -0,0 +1,144 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
+
+#ifndef _I40E_VIRTCHNL_PF_H_
+#define _I40E_VIRTCHNL_PF_H_
+
+#include "i40e.h"
+
+#define I40E_MAX_VLANID 4095
+
+#define I40E_VIRTCHNL_SUPPORTED_QTYPES 2
+
+#define I40E_VLAN_PRIORITY_SHIFT 13
+#define I40E_VLAN_MASK 0xFFF
+#define I40E_PRIORITY_MASK 0xE000
+
+#define I40E_MAX_VF_PROMISC_FLAGS 3
+
+#define I40E_VF_STATE_WAIT_COUNT 20
+#define I40E_VFR_WAIT_COUNT 100
+
+/* Various queue ctrls */
+enum i40e_queue_ctrl {
+ I40E_QUEUE_CTRL_UNKNOWN = 0,
+ I40E_QUEUE_CTRL_ENABLE,
+ I40E_QUEUE_CTRL_ENABLECHECK,
+ I40E_QUEUE_CTRL_DISABLE,
+ I40E_QUEUE_CTRL_DISABLECHECK,
+ I40E_QUEUE_CTRL_FASTDISABLE,
+ I40E_QUEUE_CTRL_FASTDISABLECHECK,
+};
+
+/* VF states */
+enum i40e_vf_states {
+ I40E_VF_STATE_INIT = 0,
+ I40E_VF_STATE_ACTIVE,
+ I40E_VF_STATE_IWARPENA,
+ I40E_VF_STATE_DISABLED,
+ I40E_VF_STATE_MC_PROMISC,
+ I40E_VF_STATE_UC_PROMISC,
+ I40E_VF_STATE_PRE_ENABLE,
+ I40E_VF_STATE_RESETTING
+};
+
+/* VF capabilities */
+enum i40e_vf_capabilities {
+ I40E_VIRTCHNL_VF_CAP_PRIVILEGE = 0,
+ I40E_VIRTCHNL_VF_CAP_L2,
+ I40E_VIRTCHNL_VF_CAP_IWARP,
+};
+
+/* In ADq, max 4 VSI's can be allocated per VF including primary VF VSI.
+ * These variables are used to store indices, id's and number of queues
+ * for each VSI including that of primary VF VSI. Each Traffic class is
+ * termed as channel and each channel can in-turn have 4 queues which
+ * means max 16 queues overall per VF.
+ */
+struct i40evf_channel {
+ u16 vsi_idx; /* index in PF struct for all channel VSIs */
+ u16 vsi_id; /* VSI ID used by firmware */
+ u16 num_qps; /* number of queue pairs requested by user */
+ u64 max_tx_rate; /* bandwidth rate allocation for VSIs */
+};
+
+/* VF information structure */
+struct i40e_vf {
+ struct i40e_pf *pf;
+
+ /* VF id in the PF space */
+ s16 vf_id;
+ /* all VF vsis connect to the same parent */
+ enum i40e_switch_element_types parent_type;
+ struct virtchnl_version_info vf_ver;
+ u32 driver_caps; /* reported by VF driver */
+
+ /* VF Port Extender (PE) stag if used */
+ u16 stag;
+
+ struct virtchnl_ether_addr default_lan_addr;
+ u16 port_vlan_id;
+ bool pf_set_mac; /* The VMM admin set the VF MAC address */
+ bool trusted;
+
+ /* VSI indices - actual VSI pointers are maintained in the PF structure
+ * When assigned, these will be non-zero, because VSI 0 is always
+ * the main LAN VSI for the PF.
+ */
+ u16 lan_vsi_idx; /* index into PF struct */
+ u16 lan_vsi_id; /* ID as used by firmware */
+
+ u8 num_queue_pairs; /* num of qps assigned to VF vsis */
+ u8 num_req_queues; /* num of requested qps */
+ u64 num_mdd_events; /* num of mdd events detected */
+
+ unsigned long vf_caps; /* vf's adv. capabilities */
+ unsigned long vf_states; /* vf's runtime states */
+ unsigned int tx_rate; /* Tx bandwidth limit in Mbps */
+ bool link_forced;
+ bool link_up; /* only valid if VF link is forced */
+ bool spoofchk;
+ u16 num_vlan;
+
+ /* ADq related variables */
+ bool adq_enabled; /* flag to enable adq */
+ u8 num_tc;
+ struct i40evf_channel ch[I40E_MAX_VF_VSI];
+ struct hlist_head cloud_filter_list;
+ u16 num_cloud_filters;
+
+ /* RDMA Client */
+ struct virtchnl_iwarp_qvlist_info *qvlist_info;
+};
+
+void i40e_free_vfs(struct i40e_pf *pf);
+int i40e_pci_sriov_configure(struct pci_dev *dev, int num_vfs);
+int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs);
+int i40e_vc_process_vf_msg(struct i40e_pf *pf, s16 vf_id, u32 v_opcode,
+ u32 v_retval, u8 *msg, u16 msglen);
+int i40e_vc_process_vflr_event(struct i40e_pf *pf);
+bool i40e_reset_vf(struct i40e_vf *vf, bool flr);
+bool i40e_reset_all_vfs(struct i40e_pf *pf, bool flr);
+void i40e_vc_notify_vf_reset(struct i40e_vf *vf);
+
+/* VF configuration related iplink handlers */
+int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac);
+int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, int vf_id,
+ u16 vlan_id, u8 qos, __be16 vlan_proto);
+int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate,
+ int max_tx_rate);
+int i40e_ndo_set_vf_trust(struct net_device *netdev, int vf_id, bool setting);
+int i40e_ndo_get_vf_config(struct net_device *netdev,
+ int vf_id, struct ifla_vf_info *ivi);
+int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link);
+int i40e_ndo_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool enable);
+
+void i40e_vc_notify_link_state(struct i40e_pf *pf);
+void i40e_vc_notify_reset(struct i40e_pf *pf);
+#ifdef CONFIG_PCI_IOV
+void i40e_restore_all_vfs_msi_state(struct pci_dev *pdev);
+#endif /* CONFIG_PCI_IOV */
+int i40e_get_vf_stats(struct net_device *netdev, int vf_id,
+ struct ifla_vf_stats *vf_stats);
+
+#endif /* _I40E_VIRTCHNL_PF_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.c b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
new file mode 100644
index 000000000..cd7b52fb6
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
@@ -0,0 +1,745 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2018 Intel Corporation. */
+
+#include <linux/bpf_trace.h>
+#include <linux/stringify.h>
+#include <net/xdp_sock_drv.h>
+#include <net/xdp.h>
+
+#include "i40e.h"
+#include "i40e_txrx_common.h"
+#include "i40e_xsk.h"
+
+void i40e_clear_rx_bi_zc(struct i40e_ring *rx_ring)
+{
+ memset(rx_ring->rx_bi_zc, 0,
+ sizeof(*rx_ring->rx_bi_zc) * rx_ring->count);
+}
+
+static struct xdp_buff **i40e_rx_bi(struct i40e_ring *rx_ring, u32 idx)
+{
+ return &rx_ring->rx_bi_zc[idx];
+}
+
+/**
+ * i40e_realloc_rx_xdp_bi - reallocate SW ring for either XSK or normal buffer
+ * @rx_ring: Current rx ring
+ * @pool_present: is pool for XSK present
+ *
+ * Try allocating memory and return ENOMEM, if failed to allocate.
+ * If allocation was successful, substitute buffer with allocated one.
+ * Returns 0 on success, negative on failure
+ */
+static int i40e_realloc_rx_xdp_bi(struct i40e_ring *rx_ring, bool pool_present)
+{
+ size_t elem_size = pool_present ? sizeof(*rx_ring->rx_bi_zc) :
+ sizeof(*rx_ring->rx_bi);
+ void *sw_ring = kcalloc(rx_ring->count, elem_size, GFP_KERNEL);
+
+ if (!sw_ring)
+ return -ENOMEM;
+
+ if (pool_present) {
+ kfree(rx_ring->rx_bi);
+ rx_ring->rx_bi = NULL;
+ rx_ring->rx_bi_zc = sw_ring;
+ } else {
+ kfree(rx_ring->rx_bi_zc);
+ rx_ring->rx_bi_zc = NULL;
+ rx_ring->rx_bi = sw_ring;
+ }
+ return 0;
+}
+
+/**
+ * i40e_realloc_rx_bi_zc - reallocate rx SW rings
+ * @vsi: Current VSI
+ * @zc: is zero copy set
+ *
+ * Reallocate buffer for rx_rings that might be used by XSK.
+ * XDP requires more memory, than rx_buf provides.
+ * Returns 0 on success, negative on failure
+ */
+int i40e_realloc_rx_bi_zc(struct i40e_vsi *vsi, bool zc)
+{
+ struct i40e_ring *rx_ring;
+ unsigned long q;
+
+ for_each_set_bit(q, vsi->af_xdp_zc_qps, vsi->alloc_queue_pairs) {
+ rx_ring = vsi->rx_rings[q];
+ if (i40e_realloc_rx_xdp_bi(rx_ring, zc))
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+/**
+ * i40e_xsk_pool_enable - Enable/associate an AF_XDP buffer pool to a
+ * certain ring/qid
+ * @vsi: Current VSI
+ * @pool: buffer pool
+ * @qid: Rx ring to associate buffer pool with
+ *
+ * Returns 0 on success, <0 on failure
+ **/
+static int i40e_xsk_pool_enable(struct i40e_vsi *vsi,
+ struct xsk_buff_pool *pool,
+ u16 qid)
+{
+ struct net_device *netdev = vsi->netdev;
+ bool if_running;
+ int err;
+
+ if (vsi->type != I40E_VSI_MAIN)
+ return -EINVAL;
+
+ if (qid >= vsi->num_queue_pairs)
+ return -EINVAL;
+
+ if (qid >= netdev->real_num_rx_queues ||
+ qid >= netdev->real_num_tx_queues)
+ return -EINVAL;
+
+ err = xsk_pool_dma_map(pool, &vsi->back->pdev->dev, I40E_RX_DMA_ATTR);
+ if (err)
+ return err;
+
+ set_bit(qid, vsi->af_xdp_zc_qps);
+
+ if_running = netif_running(vsi->netdev) && i40e_enabled_xdp_vsi(vsi);
+
+ if (if_running) {
+ err = i40e_queue_pair_disable(vsi, qid);
+ if (err)
+ return err;
+
+ err = i40e_realloc_rx_xdp_bi(vsi->rx_rings[qid], true);
+ if (err)
+ return err;
+
+ err = i40e_queue_pair_enable(vsi, qid);
+ if (err)
+ return err;
+
+ /* Kick start the NAPI context so that receiving will start */
+ err = i40e_xsk_wakeup(vsi->netdev, qid, XDP_WAKEUP_RX);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+/**
+ * i40e_xsk_pool_disable - Disassociate an AF_XDP buffer pool from a
+ * certain ring/qid
+ * @vsi: Current VSI
+ * @qid: Rx ring to associate buffer pool with
+ *
+ * Returns 0 on success, <0 on failure
+ **/
+static int i40e_xsk_pool_disable(struct i40e_vsi *vsi, u16 qid)
+{
+ struct net_device *netdev = vsi->netdev;
+ struct xsk_buff_pool *pool;
+ bool if_running;
+ int err;
+
+ pool = xsk_get_pool_from_qid(netdev, qid);
+ if (!pool)
+ return -EINVAL;
+
+ if_running = netif_running(vsi->netdev) && i40e_enabled_xdp_vsi(vsi);
+
+ if (if_running) {
+ err = i40e_queue_pair_disable(vsi, qid);
+ if (err)
+ return err;
+ }
+
+ clear_bit(qid, vsi->af_xdp_zc_qps);
+ xsk_pool_dma_unmap(pool, I40E_RX_DMA_ATTR);
+
+ if (if_running) {
+ err = i40e_realloc_rx_xdp_bi(vsi->rx_rings[qid], false);
+ if (err)
+ return err;
+ err = i40e_queue_pair_enable(vsi, qid);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+/**
+ * i40e_xsk_pool_setup - Enable/disassociate an AF_XDP buffer pool to/from
+ * a ring/qid
+ * @vsi: Current VSI
+ * @pool: Buffer pool to enable/associate to a ring, or NULL to disable
+ * @qid: Rx ring to (dis)associate buffer pool (from)to
+ *
+ * This function enables or disables a buffer pool to a certain ring.
+ *
+ * Returns 0 on success, <0 on failure
+ **/
+int i40e_xsk_pool_setup(struct i40e_vsi *vsi, struct xsk_buff_pool *pool,
+ u16 qid)
+{
+ return pool ? i40e_xsk_pool_enable(vsi, pool, qid) :
+ i40e_xsk_pool_disable(vsi, qid);
+}
+
+/**
+ * i40e_run_xdp_zc - Executes an XDP program on an xdp_buff
+ * @rx_ring: Rx ring
+ * @xdp: xdp_buff used as input to the XDP program
+ * @xdp_prog: XDP program to run
+ *
+ * Returns any of I40E_XDP_{PASS, CONSUMED, TX, REDIR}
+ **/
+static int i40e_run_xdp_zc(struct i40e_ring *rx_ring, struct xdp_buff *xdp,
+ struct bpf_prog *xdp_prog)
+{
+ int err, result = I40E_XDP_PASS;
+ struct i40e_ring *xdp_ring;
+ u32 act;
+
+ act = bpf_prog_run_xdp(xdp_prog, xdp);
+
+ if (likely(act == XDP_REDIRECT)) {
+ err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
+ if (!err)
+ return I40E_XDP_REDIR;
+ if (xsk_uses_need_wakeup(rx_ring->xsk_pool) && err == -ENOBUFS)
+ result = I40E_XDP_EXIT;
+ else
+ result = I40E_XDP_CONSUMED;
+ goto out_failure;
+ }
+
+ switch (act) {
+ case XDP_PASS:
+ break;
+ case XDP_TX:
+ xdp_ring = rx_ring->vsi->xdp_rings[rx_ring->queue_index];
+ result = i40e_xmit_xdp_tx_ring(xdp, xdp_ring);
+ if (result == I40E_XDP_CONSUMED)
+ goto out_failure;
+ break;
+ case XDP_DROP:
+ result = I40E_XDP_CONSUMED;
+ break;
+ default:
+ bpf_warn_invalid_xdp_action(rx_ring->netdev, xdp_prog, act);
+ fallthrough;
+ case XDP_ABORTED:
+ result = I40E_XDP_CONSUMED;
+out_failure:
+ trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
+ }
+ return result;
+}
+
+bool i40e_alloc_rx_buffers_zc(struct i40e_ring *rx_ring, u16 count)
+{
+ u16 ntu = rx_ring->next_to_use;
+ union i40e_rx_desc *rx_desc;
+ struct xdp_buff **xdp;
+ u32 nb_buffs, i;
+ dma_addr_t dma;
+
+ rx_desc = I40E_RX_DESC(rx_ring, ntu);
+ xdp = i40e_rx_bi(rx_ring, ntu);
+
+ nb_buffs = min_t(u16, count, rx_ring->count - ntu);
+ nb_buffs = xsk_buff_alloc_batch(rx_ring->xsk_pool, xdp, nb_buffs);
+ if (!nb_buffs)
+ return false;
+
+ i = nb_buffs;
+ while (i--) {
+ dma = xsk_buff_xdp_get_dma(*xdp);
+ rx_desc->read.pkt_addr = cpu_to_le64(dma);
+ rx_desc->read.hdr_addr = 0;
+
+ rx_desc++;
+ xdp++;
+ }
+
+ ntu += nb_buffs;
+ if (ntu == rx_ring->count) {
+ rx_desc = I40E_RX_DESC(rx_ring, 0);
+ ntu = 0;
+ }
+
+ /* clear the status bits for the next_to_use descriptor */
+ rx_desc->wb.qword1.status_error_len = 0;
+ i40e_release_rx_desc(rx_ring, ntu);
+
+ return count == nb_buffs;
+}
+
+/**
+ * i40e_construct_skb_zc - Create skbuff from zero-copy Rx buffer
+ * @rx_ring: Rx ring
+ * @xdp: xdp_buff
+ *
+ * This functions allocates a new skb from a zero-copy Rx buffer.
+ *
+ * Returns the skb, or NULL on failure.
+ **/
+static struct sk_buff *i40e_construct_skb_zc(struct i40e_ring *rx_ring,
+ struct xdp_buff *xdp)
+{
+ unsigned int totalsize = xdp->data_end - xdp->data_meta;
+ unsigned int metasize = xdp->data - xdp->data_meta;
+ struct sk_buff *skb;
+
+ net_prefetch(xdp->data_meta);
+
+ /* allocate a skb to store the frags */
+ skb = __napi_alloc_skb(&rx_ring->q_vector->napi, totalsize,
+ GFP_ATOMIC | __GFP_NOWARN);
+ if (unlikely(!skb))
+ goto out;
+
+ memcpy(__skb_put(skb, totalsize), xdp->data_meta,
+ ALIGN(totalsize, sizeof(long)));
+
+ if (metasize) {
+ skb_metadata_set(skb, metasize);
+ __skb_pull(skb, metasize);
+ }
+
+out:
+ xsk_buff_free(xdp);
+ return skb;
+}
+
+static void i40e_handle_xdp_result_zc(struct i40e_ring *rx_ring,
+ struct xdp_buff *xdp_buff,
+ union i40e_rx_desc *rx_desc,
+ unsigned int *rx_packets,
+ unsigned int *rx_bytes,
+ unsigned int size,
+ unsigned int xdp_res,
+ bool *failure)
+{
+ struct sk_buff *skb;
+
+ *rx_packets = 1;
+ *rx_bytes = size;
+
+ if (likely(xdp_res == I40E_XDP_REDIR) || xdp_res == I40E_XDP_TX)
+ return;
+
+ if (xdp_res == I40E_XDP_EXIT) {
+ *failure = true;
+ return;
+ }
+
+ if (xdp_res == I40E_XDP_CONSUMED) {
+ xsk_buff_free(xdp_buff);
+ return;
+ }
+ if (xdp_res == I40E_XDP_PASS) {
+ /* NB! We are not checking for errors using
+ * i40e_test_staterr with
+ * BIT(I40E_RXD_QW1_ERROR_SHIFT). This is due to that
+ * SBP is *not* set in PRT_SBPVSI (default not set).
+ */
+ skb = i40e_construct_skb_zc(rx_ring, xdp_buff);
+ if (!skb) {
+ rx_ring->rx_stats.alloc_buff_failed++;
+ *rx_packets = 0;
+ *rx_bytes = 0;
+ return;
+ }
+
+ if (eth_skb_pad(skb)) {
+ *rx_packets = 0;
+ *rx_bytes = 0;
+ return;
+ }
+
+ *rx_bytes = skb->len;
+ i40e_process_skb_fields(rx_ring, rx_desc, skb);
+ napi_gro_receive(&rx_ring->q_vector->napi, skb);
+ return;
+ }
+
+ /* Should never get here, as all valid cases have been handled already.
+ */
+ WARN_ON_ONCE(1);
+}
+
+/**
+ * i40e_clean_rx_irq_zc - Consumes Rx packets from the hardware ring
+ * @rx_ring: Rx ring
+ * @budget: NAPI budget
+ *
+ * Returns amount of work completed
+ **/
+int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget)
+{
+ unsigned int total_rx_bytes = 0, total_rx_packets = 0;
+ u16 next_to_clean = rx_ring->next_to_clean;
+ u16 count_mask = rx_ring->count - 1;
+ unsigned int xdp_res, xdp_xmit = 0;
+ struct bpf_prog *xdp_prog;
+ bool failure = false;
+ u16 cleaned_count;
+
+ /* NB! xdp_prog will always be !NULL, due to the fact that
+ * this path is enabled by setting an XDP program.
+ */
+ xdp_prog = READ_ONCE(rx_ring->xdp_prog);
+
+ while (likely(total_rx_packets < (unsigned int)budget)) {
+ union i40e_rx_desc *rx_desc;
+ unsigned int rx_packets;
+ unsigned int rx_bytes;
+ struct xdp_buff *bi;
+ unsigned int size;
+ u64 qword;
+
+ rx_desc = I40E_RX_DESC(rx_ring, next_to_clean);
+ qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
+
+ /* This memory barrier is needed to keep us from reading
+ * any other fields out of the rx_desc until we have
+ * verified the descriptor has been written back.
+ */
+ dma_rmb();
+
+ if (i40e_rx_is_programming_status(qword)) {
+ i40e_clean_programming_status(rx_ring,
+ rx_desc->raw.qword[0],
+ qword);
+ bi = *i40e_rx_bi(rx_ring, next_to_clean);
+ xsk_buff_free(bi);
+ next_to_clean = (next_to_clean + 1) & count_mask;
+ continue;
+ }
+
+ size = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
+ I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
+ if (!size)
+ break;
+
+ bi = *i40e_rx_bi(rx_ring, next_to_clean);
+ xsk_buff_set_size(bi, size);
+ xsk_buff_dma_sync_for_cpu(bi, rx_ring->xsk_pool);
+
+ xdp_res = i40e_run_xdp_zc(rx_ring, bi, xdp_prog);
+ i40e_handle_xdp_result_zc(rx_ring, bi, rx_desc, &rx_packets,
+ &rx_bytes, size, xdp_res, &failure);
+ if (failure)
+ break;
+ total_rx_packets += rx_packets;
+ total_rx_bytes += rx_bytes;
+ xdp_xmit |= xdp_res & (I40E_XDP_TX | I40E_XDP_REDIR);
+ next_to_clean = (next_to_clean + 1) & count_mask;
+ }
+
+ rx_ring->next_to_clean = next_to_clean;
+ cleaned_count = (next_to_clean - rx_ring->next_to_use - 1) & count_mask;
+
+ if (cleaned_count >= I40E_RX_BUFFER_WRITE)
+ failure |= !i40e_alloc_rx_buffers_zc(rx_ring, cleaned_count);
+
+ i40e_finalize_xdp_rx(rx_ring, xdp_xmit);
+ i40e_update_rx_stats(rx_ring, total_rx_bytes, total_rx_packets);
+
+ if (xsk_uses_need_wakeup(rx_ring->xsk_pool)) {
+ if (failure || next_to_clean == rx_ring->next_to_use)
+ xsk_set_rx_need_wakeup(rx_ring->xsk_pool);
+ else
+ xsk_clear_rx_need_wakeup(rx_ring->xsk_pool);
+
+ return (int)total_rx_packets;
+ }
+ return failure ? budget : (int)total_rx_packets;
+}
+
+static void i40e_xmit_pkt(struct i40e_ring *xdp_ring, struct xdp_desc *desc,
+ unsigned int *total_bytes)
+{
+ struct i40e_tx_desc *tx_desc;
+ dma_addr_t dma;
+
+ dma = xsk_buff_raw_get_dma(xdp_ring->xsk_pool, desc->addr);
+ xsk_buff_raw_dma_sync_for_device(xdp_ring->xsk_pool, dma, desc->len);
+
+ tx_desc = I40E_TX_DESC(xdp_ring, xdp_ring->next_to_use++);
+ tx_desc->buffer_addr = cpu_to_le64(dma);
+ tx_desc->cmd_type_offset_bsz = build_ctob(I40E_TX_DESC_CMD_ICRC | I40E_TX_DESC_CMD_EOP,
+ 0, desc->len, 0);
+
+ *total_bytes += desc->len;
+}
+
+static void i40e_xmit_pkt_batch(struct i40e_ring *xdp_ring, struct xdp_desc *desc,
+ unsigned int *total_bytes)
+{
+ u16 ntu = xdp_ring->next_to_use;
+ struct i40e_tx_desc *tx_desc;
+ dma_addr_t dma;
+ u32 i;
+
+ loop_unrolled_for(i = 0; i < PKTS_PER_BATCH; i++) {
+ dma = xsk_buff_raw_get_dma(xdp_ring->xsk_pool, desc[i].addr);
+ xsk_buff_raw_dma_sync_for_device(xdp_ring->xsk_pool, dma, desc[i].len);
+
+ tx_desc = I40E_TX_DESC(xdp_ring, ntu++);
+ tx_desc->buffer_addr = cpu_to_le64(dma);
+ tx_desc->cmd_type_offset_bsz = build_ctob(I40E_TX_DESC_CMD_ICRC |
+ I40E_TX_DESC_CMD_EOP,
+ 0, desc[i].len, 0);
+
+ *total_bytes += desc[i].len;
+ }
+
+ xdp_ring->next_to_use = ntu;
+}
+
+static void i40e_fill_tx_hw_ring(struct i40e_ring *xdp_ring, struct xdp_desc *descs, u32 nb_pkts,
+ unsigned int *total_bytes)
+{
+ u32 batched, leftover, i;
+
+ batched = nb_pkts & ~(PKTS_PER_BATCH - 1);
+ leftover = nb_pkts & (PKTS_PER_BATCH - 1);
+ for (i = 0; i < batched; i += PKTS_PER_BATCH)
+ i40e_xmit_pkt_batch(xdp_ring, &descs[i], total_bytes);
+ for (i = batched; i < batched + leftover; i++)
+ i40e_xmit_pkt(xdp_ring, &descs[i], total_bytes);
+}
+
+static void i40e_set_rs_bit(struct i40e_ring *xdp_ring)
+{
+ u16 ntu = xdp_ring->next_to_use ? xdp_ring->next_to_use - 1 : xdp_ring->count - 1;
+ struct i40e_tx_desc *tx_desc;
+
+ tx_desc = I40E_TX_DESC(xdp_ring, ntu);
+ tx_desc->cmd_type_offset_bsz |= cpu_to_le64(I40E_TX_DESC_CMD_RS << I40E_TXD_QW1_CMD_SHIFT);
+}
+
+/**
+ * i40e_xmit_zc - Performs zero-copy Tx AF_XDP
+ * @xdp_ring: XDP Tx ring
+ * @budget: NAPI budget
+ *
+ * Returns true if the work is finished.
+ **/
+static bool i40e_xmit_zc(struct i40e_ring *xdp_ring, unsigned int budget)
+{
+ struct xdp_desc *descs = xdp_ring->xsk_pool->tx_descs;
+ u32 nb_pkts, nb_processed = 0;
+ unsigned int total_bytes = 0;
+
+ nb_pkts = xsk_tx_peek_release_desc_batch(xdp_ring->xsk_pool, budget);
+ if (!nb_pkts)
+ return true;
+
+ if (xdp_ring->next_to_use + nb_pkts >= xdp_ring->count) {
+ nb_processed = xdp_ring->count - xdp_ring->next_to_use;
+ i40e_fill_tx_hw_ring(xdp_ring, descs, nb_processed, &total_bytes);
+ xdp_ring->next_to_use = 0;
+ }
+
+ i40e_fill_tx_hw_ring(xdp_ring, &descs[nb_processed], nb_pkts - nb_processed,
+ &total_bytes);
+
+ /* Request an interrupt for the last frame and bump tail ptr. */
+ i40e_set_rs_bit(xdp_ring);
+ i40e_xdp_ring_update_tail(xdp_ring);
+
+ i40e_update_tx_stats(xdp_ring, nb_pkts, total_bytes);
+
+ return nb_pkts < budget;
+}
+
+/**
+ * i40e_clean_xdp_tx_buffer - Frees and unmaps an XDP Tx entry
+ * @tx_ring: XDP Tx ring
+ * @tx_bi: Tx buffer info to clean
+ **/
+static void i40e_clean_xdp_tx_buffer(struct i40e_ring *tx_ring,
+ struct i40e_tx_buffer *tx_bi)
+{
+ xdp_return_frame(tx_bi->xdpf);
+ tx_ring->xdp_tx_active--;
+ dma_unmap_single(tx_ring->dev,
+ dma_unmap_addr(tx_bi, dma),
+ dma_unmap_len(tx_bi, len), DMA_TO_DEVICE);
+ dma_unmap_len_set(tx_bi, len, 0);
+}
+
+/**
+ * i40e_clean_xdp_tx_irq - Completes AF_XDP entries, and cleans XDP entries
+ * @vsi: Current VSI
+ * @tx_ring: XDP Tx ring
+ *
+ * Returns true if cleanup/tranmission is done.
+ **/
+bool i40e_clean_xdp_tx_irq(struct i40e_vsi *vsi, struct i40e_ring *tx_ring)
+{
+ struct xsk_buff_pool *bp = tx_ring->xsk_pool;
+ u32 i, completed_frames, xsk_frames = 0;
+ u32 head_idx = i40e_get_head(tx_ring);
+ struct i40e_tx_buffer *tx_bi;
+ unsigned int ntc;
+
+ if (head_idx < tx_ring->next_to_clean)
+ head_idx += tx_ring->count;
+ completed_frames = head_idx - tx_ring->next_to_clean;
+
+ if (completed_frames == 0)
+ goto out_xmit;
+
+ if (likely(!tx_ring->xdp_tx_active)) {
+ xsk_frames = completed_frames;
+ goto skip;
+ }
+
+ ntc = tx_ring->next_to_clean;
+
+ for (i = 0; i < completed_frames; i++) {
+ tx_bi = &tx_ring->tx_bi[ntc];
+
+ if (tx_bi->xdpf) {
+ i40e_clean_xdp_tx_buffer(tx_ring, tx_bi);
+ tx_bi->xdpf = NULL;
+ } else {
+ xsk_frames++;
+ }
+
+ if (++ntc >= tx_ring->count)
+ ntc = 0;
+ }
+
+skip:
+ tx_ring->next_to_clean += completed_frames;
+ if (unlikely(tx_ring->next_to_clean >= tx_ring->count))
+ tx_ring->next_to_clean -= tx_ring->count;
+
+ if (xsk_frames)
+ xsk_tx_completed(bp, xsk_frames);
+
+ i40e_arm_wb(tx_ring, vsi, completed_frames);
+
+out_xmit:
+ if (xsk_uses_need_wakeup(tx_ring->xsk_pool))
+ xsk_set_tx_need_wakeup(tx_ring->xsk_pool);
+
+ return i40e_xmit_zc(tx_ring, I40E_DESC_UNUSED(tx_ring));
+}
+
+/**
+ * i40e_xsk_wakeup - Implements the ndo_xsk_wakeup
+ * @dev: the netdevice
+ * @queue_id: queue id to wake up
+ * @flags: ignored in our case since we have Rx and Tx in the same NAPI.
+ *
+ * Returns <0 for errors, 0 otherwise.
+ **/
+int i40e_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags)
+{
+ struct i40e_netdev_priv *np = netdev_priv(dev);
+ struct i40e_vsi *vsi = np->vsi;
+ struct i40e_pf *pf = vsi->back;
+ struct i40e_ring *ring;
+
+ if (test_bit(__I40E_CONFIG_BUSY, pf->state))
+ return -EAGAIN;
+
+ if (test_bit(__I40E_VSI_DOWN, vsi->state))
+ return -ENETDOWN;
+
+ if (!i40e_enabled_xdp_vsi(vsi))
+ return -EINVAL;
+
+ if (queue_id >= vsi->num_queue_pairs)
+ return -EINVAL;
+
+ if (!vsi->xdp_rings[queue_id]->xsk_pool)
+ return -EINVAL;
+
+ ring = vsi->xdp_rings[queue_id];
+
+ /* The idea here is that if NAPI is running, mark a miss, so
+ * it will run again. If not, trigger an interrupt and
+ * schedule the NAPI from interrupt context. If NAPI would be
+ * scheduled here, the interrupt affinity would not be
+ * honored.
+ */
+ if (!napi_if_scheduled_mark_missed(&ring->q_vector->napi))
+ i40e_force_wb(vsi, ring->q_vector);
+
+ return 0;
+}
+
+void i40e_xsk_clean_rx_ring(struct i40e_ring *rx_ring)
+{
+ u16 count_mask = rx_ring->count - 1;
+ u16 ntc = rx_ring->next_to_clean;
+ u16 ntu = rx_ring->next_to_use;
+
+ for ( ; ntc != ntu; ntc = (ntc + 1) & count_mask) {
+ struct xdp_buff *rx_bi = *i40e_rx_bi(rx_ring, ntc);
+
+ xsk_buff_free(rx_bi);
+ }
+}
+
+/**
+ * i40e_xsk_clean_tx_ring - Clean the XDP Tx ring on shutdown
+ * @tx_ring: XDP Tx ring
+ **/
+void i40e_xsk_clean_tx_ring(struct i40e_ring *tx_ring)
+{
+ u16 ntc = tx_ring->next_to_clean, ntu = tx_ring->next_to_use;
+ struct xsk_buff_pool *bp = tx_ring->xsk_pool;
+ struct i40e_tx_buffer *tx_bi;
+ u32 xsk_frames = 0;
+
+ while (ntc != ntu) {
+ tx_bi = &tx_ring->tx_bi[ntc];
+
+ if (tx_bi->xdpf)
+ i40e_clean_xdp_tx_buffer(tx_ring, tx_bi);
+ else
+ xsk_frames++;
+
+ tx_bi->xdpf = NULL;
+
+ ntc++;
+ if (ntc >= tx_ring->count)
+ ntc = 0;
+ }
+
+ if (xsk_frames)
+ xsk_tx_completed(bp, xsk_frames);
+}
+
+/**
+ * i40e_xsk_any_rx_ring_enabled - Checks if Rx rings have an AF_XDP
+ * buffer pool attached
+ * @vsi: vsi
+ *
+ * Returns true if any of the Rx rings has an AF_XDP buffer pool attached
+ **/
+bool i40e_xsk_any_rx_ring_enabled(struct i40e_vsi *vsi)
+{
+ struct net_device *netdev = vsi->netdev;
+ int i;
+
+ for (i = 0; i < vsi->num_queue_pairs; i++) {
+ if (xsk_get_pool_from_qid(netdev, i))
+ return true;
+ }
+
+ return false;
+}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.h b/drivers/net/ethernet/intel/i40e/i40e_xsk.h
new file mode 100644
index 000000000..821df248f
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2018 Intel Corporation. */
+
+#ifndef _I40E_XSK_H_
+#define _I40E_XSK_H_
+
+/* This value should match the pragma in the loop_unrolled_for
+ * macro. Why 4? It is strictly empirical. It seems to be a good
+ * compromise between the advantage of having simultaneous outstanding
+ * reads to the DMA array that can hide each others latency and the
+ * disadvantage of having a larger code path.
+ */
+#define PKTS_PER_BATCH 4
+
+#ifdef __clang__
+#define loop_unrolled_for _Pragma("clang loop unroll_count(4)") for
+#elif __GNUC__ >= 8
+#define loop_unrolled_for _Pragma("GCC unroll 4") for
+#else
+#define loop_unrolled_for for
+#endif
+
+struct i40e_vsi;
+struct xsk_buff_pool;
+
+int i40e_queue_pair_disable(struct i40e_vsi *vsi, int queue_pair);
+int i40e_queue_pair_enable(struct i40e_vsi *vsi, int queue_pair);
+int i40e_xsk_pool_setup(struct i40e_vsi *vsi, struct xsk_buff_pool *pool,
+ u16 qid);
+bool i40e_alloc_rx_buffers_zc(struct i40e_ring *rx_ring, u16 cleaned_count);
+int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget);
+
+bool i40e_clean_xdp_tx_irq(struct i40e_vsi *vsi, struct i40e_ring *tx_ring);
+int i40e_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags);
+int i40e_realloc_rx_bi_zc(struct i40e_vsi *vsi, bool zc);
+void i40e_clear_rx_bi_zc(struct i40e_ring *rx_ring);
+
+#endif /* _I40E_XSK_H_ */