summaryrefslogtreecommitdiffstats
path: root/drivers/net/vmxnet3
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-11 08:27:49 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-11 08:27:49 +0000
commitace9429bb58fd418f0c81d4c2835699bddf6bde6 (patch)
treeb2d64bc10158fdd5497876388cd68142ca374ed3 /drivers/net/vmxnet3
parentInitial commit. (diff)
downloadlinux-ace9429bb58fd418f0c81d4c2835699bddf6bde6.tar.xz
linux-ace9429bb58fd418f0c81d4c2835699bddf6bde6.zip
Adding upstream version 6.6.15.upstream/6.6.15
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/net/vmxnet3')
-rw-r--r--drivers/net/vmxnet3/Makefile35
-rw-r--r--drivers/net/vmxnet3/upt1_defs.h99
-rw-r--r--drivers/net/vmxnet3/vmxnet3_defs.h864
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c4442
-rw-r--r--drivers/net/vmxnet3/vmxnet3_ethtool.c1377
-rw-r--r--drivers/net/vmxnet3/vmxnet3_int.h538
-rw-r--r--drivers/net/vmxnet3/vmxnet3_xdp.c419
-rw-r--r--drivers/net/vmxnet3/vmxnet3_xdp.h47
8 files changed, 7821 insertions, 0 deletions
diff --git a/drivers/net/vmxnet3/Makefile b/drivers/net/vmxnet3/Makefile
new file mode 100644
index 000000000..f82870c10
--- /dev/null
+++ b/drivers/net/vmxnet3/Makefile
@@ -0,0 +1,35 @@
+################################################################################
+#
+# Linux driver for VMware's vmxnet3 ethernet NIC.
+#
+# Copyright (C) 2007-2022, VMware, Inc. All Rights Reserved.
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; version 2 of the License and no later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+# NON INFRINGEMENT. See the GNU General Public License for more
+# details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# The full GNU General Public License is included in this distribution in
+# the file called "COPYING".
+#
+# Maintained by: pv-drivers@vmware.com
+#
+#
+################################################################################
+
+#
+# Makefile for the VMware vmxnet3 ethernet NIC driver
+#
+
+obj-$(CONFIG_VMXNET3) += vmxnet3.o
+
+vmxnet3-objs := vmxnet3_drv.o vmxnet3_ethtool.o vmxnet3_xdp.o
diff --git a/drivers/net/vmxnet3/upt1_defs.h b/drivers/net/vmxnet3/upt1_defs.h
new file mode 100644
index 000000000..41c0660a0
--- /dev/null
+++ b/drivers/net/vmxnet3/upt1_defs.h
@@ -0,0 +1,99 @@
+/*
+ * Linux driver for VMware's vmxnet3 ethernet NIC.
+ *
+ * Copyright (C) 2008-2022, VMware, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; version 2 of the License and no later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Maintained by: pv-drivers@vmware.com
+ *
+ */
+
+#ifndef _UPT1_DEFS_H
+#define _UPT1_DEFS_H
+
+struct UPT1_TxStats {
+ u64 TSOPktsTxOK; /* TSO pkts post-segmentation */
+ u64 TSOBytesTxOK;
+ u64 ucastPktsTxOK;
+ u64 ucastBytesTxOK;
+ u64 mcastPktsTxOK;
+ u64 mcastBytesTxOK;
+ u64 bcastPktsTxOK;
+ u64 bcastBytesTxOK;
+ u64 pktsTxError;
+ u64 pktsTxDiscard;
+};
+
+struct UPT1_RxStats {
+ u64 LROPktsRxOK; /* LRO pkts */
+ u64 LROBytesRxOK; /* bytes from LRO pkts */
+ /* the following counters are for pkts from the wire, i.e., pre-LRO */
+ u64 ucastPktsRxOK;
+ u64 ucastBytesRxOK;
+ u64 mcastPktsRxOK;
+ u64 mcastBytesRxOK;
+ u64 bcastPktsRxOK;
+ u64 bcastBytesRxOK;
+ u64 pktsRxOutOfBuf;
+ u64 pktsRxError;
+};
+
+/* interrupt moderation level */
+enum {
+ UPT1_IML_NONE = 0, /* no interrupt moderation */
+ UPT1_IML_HIGHEST = 7, /* least intr generated */
+ UPT1_IML_ADAPTIVE = 8, /* adpative intr moderation */
+};
+/* values for UPT1_RSSConf.hashFunc */
+enum {
+ UPT1_RSS_HASH_TYPE_NONE = 0x0,
+ UPT1_RSS_HASH_TYPE_IPV4 = 0x01,
+ UPT1_RSS_HASH_TYPE_TCP_IPV4 = 0x02,
+ UPT1_RSS_HASH_TYPE_IPV6 = 0x04,
+ UPT1_RSS_HASH_TYPE_TCP_IPV6 = 0x08,
+};
+
+enum {
+ UPT1_RSS_HASH_FUNC_NONE = 0x0,
+ UPT1_RSS_HASH_FUNC_TOEPLITZ = 0x01,
+};
+
+#define UPT1_RSS_MAX_KEY_SIZE 40
+#define UPT1_RSS_MAX_IND_TABLE_SIZE 128
+
+struct UPT1_RSSConf {
+ u16 hashType;
+ u16 hashFunc;
+ u16 hashKeySize;
+ u16 indTableSize;
+ u8 hashKey[UPT1_RSS_MAX_KEY_SIZE];
+ u8 indTable[UPT1_RSS_MAX_IND_TABLE_SIZE];
+};
+
+/* features */
+enum {
+ UPT1_F_RXCSUM = cpu_to_le64(0x0001), /* rx csum verification */
+ UPT1_F_RSS = cpu_to_le64(0x0002),
+ UPT1_F_RXVLAN = cpu_to_le64(0x0004), /* VLAN tag stripping */
+ UPT1_F_LRO = cpu_to_le64(0x0008),
+ UPT1_F_RXINNEROFLD = cpu_to_le64(0x00010), /* Geneve/Vxlan rx csum
+ * offloading
+ */
+};
+#endif
diff --git a/drivers/net/vmxnet3/vmxnet3_defs.h b/drivers/net/vmxnet3/vmxnet3_defs.h
new file mode 100644
index 000000000..41d676728
--- /dev/null
+++ b/drivers/net/vmxnet3/vmxnet3_defs.h
@@ -0,0 +1,864 @@
+/*
+ * Linux driver for VMware's vmxnet3 ethernet NIC.
+ *
+ * Copyright (C) 2008-2022, VMware, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; version 2 of the License and no later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Maintained by: pv-drivers@vmware.com
+ *
+ */
+
+#ifndef _VMXNET3_DEFS_H_
+#define _VMXNET3_DEFS_H_
+
+#include "upt1_defs.h"
+
+/* all registers are 32 bit wide */
+/* BAR 1 */
+enum {
+ VMXNET3_REG_VRRS = 0x0, /* Vmxnet3 Revision Report Selection */
+ VMXNET3_REG_UVRS = 0x8, /* UPT Version Report Selection */
+ VMXNET3_REG_DSAL = 0x10, /* Driver Shared Address Low */
+ VMXNET3_REG_DSAH = 0x18, /* Driver Shared Address High */
+ VMXNET3_REG_CMD = 0x20, /* Command */
+ VMXNET3_REG_MACL = 0x28, /* MAC Address Low */
+ VMXNET3_REG_MACH = 0x30, /* MAC Address High */
+ VMXNET3_REG_ICR = 0x38, /* Interrupt Cause Register */
+ VMXNET3_REG_ECR = 0x40, /* Event Cause Register */
+ VMXNET3_REG_DCR = 0x48, /* Device capability register,
+ * from 0x48 to 0x80
+ */
+ VMXNET3_REG_PTCR = 0x88, /* Passthru capbility register
+ * from 0x88 to 0xb0
+ */
+};
+
+/* BAR 0 */
+enum {
+ VMXNET3_REG_IMR = 0x0, /* Interrupt Mask Register */
+ VMXNET3_REG_TXPROD = 0x600, /* Tx Producer Index */
+ VMXNET3_REG_RXPROD = 0x800, /* Rx Producer Index for ring 1 */
+ VMXNET3_REG_RXPROD2 = 0xA00 /* Rx Producer Index for ring 2 */
+};
+
+/* For Large PT BAR, the following offset to DB register */
+enum {
+ VMXNET3_REG_LB_TXPROD = 0x1000, /* Tx Producer Index */
+ VMXNET3_REG_LB_RXPROD = 0x1400, /* Rx Producer Index for ring 1 */
+ VMXNET3_REG_LB_RXPROD2 = 0x1800, /* Rx Producer Index for ring 2 */
+};
+
+#define VMXNET3_PT_REG_SIZE 4096 /* BAR 0 */
+#define VMXNET3_LARGE_PT_REG_SIZE 8192 /* large PT pages */
+#define VMXNET3_VD_REG_SIZE 4096 /* BAR 1 */
+#define VMXNET3_LARGE_BAR0_REG_SIZE (4096 * 4096) /* LARGE BAR 0 */
+#define VMXNET3_OOB_REG_SIZE (4094 * 4096) /* OOB pages */
+
+#define VMXNET3_REG_ALIGN 8 /* All registers are 8-byte aligned. */
+#define VMXNET3_REG_ALIGN_MASK 0x7
+
+/* I/O Mapped access to registers */
+#define VMXNET3_IO_TYPE_PT 0
+#define VMXNET3_IO_TYPE_VD 1
+#define VMXNET3_IO_ADDR(type, reg) (((type) << 24) | ((reg) & 0xFFFFFF))
+#define VMXNET3_IO_TYPE(addr) ((addr) >> 24)
+#define VMXNET3_IO_REG(addr) ((addr) & 0xFFFFFF)
+
+enum {
+ VMXNET3_CMD_FIRST_SET = 0xCAFE0000,
+ VMXNET3_CMD_ACTIVATE_DEV = VMXNET3_CMD_FIRST_SET,
+ VMXNET3_CMD_QUIESCE_DEV,
+ VMXNET3_CMD_RESET_DEV,
+ VMXNET3_CMD_UPDATE_RX_MODE,
+ VMXNET3_CMD_UPDATE_MAC_FILTERS,
+ VMXNET3_CMD_UPDATE_VLAN_FILTERS,
+ VMXNET3_CMD_UPDATE_RSSIDT,
+ VMXNET3_CMD_UPDATE_IML,
+ VMXNET3_CMD_UPDATE_PMCFG,
+ VMXNET3_CMD_UPDATE_FEATURE,
+ VMXNET3_CMD_RESERVED1,
+ VMXNET3_CMD_LOAD_PLUGIN,
+ VMXNET3_CMD_RESERVED2,
+ VMXNET3_CMD_RESERVED3,
+ VMXNET3_CMD_SET_COALESCE,
+ VMXNET3_CMD_REGISTER_MEMREGS,
+ VMXNET3_CMD_SET_RSS_FIELDS,
+ VMXNET3_CMD_RESERVED4,
+ VMXNET3_CMD_RESERVED5,
+ VMXNET3_CMD_SET_RING_BUFFER_SIZE,
+
+ VMXNET3_CMD_FIRST_GET = 0xF00D0000,
+ VMXNET3_CMD_GET_QUEUE_STATUS = VMXNET3_CMD_FIRST_GET,
+ VMXNET3_CMD_GET_STATS,
+ VMXNET3_CMD_GET_LINK,
+ VMXNET3_CMD_GET_PERM_MAC_LO,
+ VMXNET3_CMD_GET_PERM_MAC_HI,
+ VMXNET3_CMD_GET_DID_LO,
+ VMXNET3_CMD_GET_DID_HI,
+ VMXNET3_CMD_GET_DEV_EXTRA_INFO,
+ VMXNET3_CMD_GET_CONF_INTR,
+ VMXNET3_CMD_GET_RESERVED1,
+ VMXNET3_CMD_GET_TXDATA_DESC_SIZE,
+ VMXNET3_CMD_GET_COALESCE,
+ VMXNET3_CMD_GET_RSS_FIELDS,
+ VMXNET3_CMD_GET_RESERVED2,
+ VMXNET3_CMD_GET_RESERVED3,
+ VMXNET3_CMD_GET_MAX_QUEUES_CONF,
+ VMXNET3_CMD_GET_RESERVED4,
+ VMXNET3_CMD_GET_MAX_CAPABILITIES,
+ VMXNET3_CMD_GET_DCR0_REG,
+};
+
+/*
+ * Little Endian layout of bitfields -
+ * Byte 0 : 7.....len.....0
+ * Byte 1 : oco gen 13.len.8
+ * Byte 2 : 5.msscof.0 ext1 dtype
+ * Byte 3 : 13...msscof...6
+ *
+ * Big Endian layout of bitfields -
+ * Byte 0: 13...msscof...6
+ * Byte 1 : 5.msscof.0 ext1 dtype
+ * Byte 2 : oco gen 13.len.8
+ * Byte 3 : 7.....len.....0
+ *
+ * Thus, le32_to_cpu on the dword will allow the big endian driver to read
+ * the bit fields correctly. And cpu_to_le32 will convert bitfields
+ * bit fields written by big endian driver to format required by device.
+ */
+
+struct Vmxnet3_TxDesc {
+ __le64 addr;
+
+#ifdef __BIG_ENDIAN_BITFIELD
+ u32 msscof:14; /* MSS, checksum offset, flags */
+ u32 ext1:1; /* set to 1 to indicate inner csum/tso, vmxnet3 v7 */
+ u32 dtype:1; /* descriptor type */
+ u32 oco:1; /* Outer csum offload */
+ u32 gen:1; /* generation bit */
+ u32 len:14;
+#else
+ u32 len:14;
+ u32 gen:1; /* generation bit */
+ u32 oco:1; /* Outer csum offload */
+ u32 dtype:1; /* descriptor type */
+ u32 ext1:1; /* set to 1 to indicate inner csum/tso, vmxnet3 v7 */
+ u32 msscof:14; /* MSS, checksum offset, flags */
+#endif /* __BIG_ENDIAN_BITFIELD */
+
+#ifdef __BIG_ENDIAN_BITFIELD
+ u32 tci:16; /* Tag to Insert */
+ u32 ti:1; /* VLAN Tag Insertion */
+ u32 ext2:1;
+ u32 cq:1; /* completion request */
+ u32 eop:1; /* End Of Packet */
+ u32 om:2; /* offload mode */
+ u32 hlen:10; /* header len */
+#else
+ u32 hlen:10; /* header len */
+ u32 om:2; /* offload mode */
+ u32 eop:1; /* End Of Packet */
+ u32 cq:1; /* completion request */
+ u32 ext2:1;
+ u32 ti:1; /* VLAN Tag Insertion */
+ u32 tci:16; /* Tag to Insert */
+#endif /* __BIG_ENDIAN_BITFIELD */
+};
+
+/* TxDesc.OM values */
+#define VMXNET3_OM_NONE 0
+#define VMXNET3_OM_ENCAP 1
+#define VMXNET3_OM_CSUM 2
+#define VMXNET3_OM_TSO 3
+
+/* fields in TxDesc we access w/o using bit fields */
+#define VMXNET3_TXD_EOP_SHIFT 12
+#define VMXNET3_TXD_CQ_SHIFT 13
+#define VMXNET3_TXD_GEN_SHIFT 14
+#define VMXNET3_TXD_EOP_DWORD_SHIFT 3
+#define VMXNET3_TXD_GEN_DWORD_SHIFT 2
+
+#define VMXNET3_TXD_CQ (1 << VMXNET3_TXD_CQ_SHIFT)
+#define VMXNET3_TXD_EOP (1 << VMXNET3_TXD_EOP_SHIFT)
+#define VMXNET3_TXD_GEN (1 << VMXNET3_TXD_GEN_SHIFT)
+
+#define VMXNET3_HDR_COPY_SIZE 128
+
+
+struct Vmxnet3_TxDataDesc {
+ u8 data[VMXNET3_HDR_COPY_SIZE];
+};
+
+typedef u8 Vmxnet3_RxDataDesc;
+
+#define VMXNET3_TCD_GEN_SHIFT 31
+#define VMXNET3_TCD_GEN_SIZE 1
+#define VMXNET3_TCD_TXIDX_SHIFT 0
+#define VMXNET3_TCD_TXIDX_SIZE 12
+#define VMXNET3_TCD_GEN_DWORD_SHIFT 3
+
+struct Vmxnet3_TxCompDesc {
+ u32 txdIdx:12; /* Index of the EOP TxDesc */
+ u32 ext1:20;
+
+ __le32 ext2;
+ __le32 ext3;
+
+ u32 rsvd:24;
+ u32 type:7; /* completion type */
+ u32 gen:1; /* generation bit */
+};
+
+struct Vmxnet3_RxDesc {
+ __le64 addr;
+
+#ifdef __BIG_ENDIAN_BITFIELD
+ u32 gen:1; /* Generation bit */
+ u32 rsvd:15;
+ u32 dtype:1; /* Descriptor type */
+ u32 btype:1; /* Buffer Type */
+ u32 len:14;
+#else
+ u32 len:14;
+ u32 btype:1; /* Buffer Type */
+ u32 dtype:1; /* Descriptor type */
+ u32 rsvd:15;
+ u32 gen:1; /* Generation bit */
+#endif
+ u32 ext1;
+};
+
+/* values of RXD.BTYPE */
+#define VMXNET3_RXD_BTYPE_HEAD 0 /* head only */
+#define VMXNET3_RXD_BTYPE_BODY 1 /* body only */
+
+/* fields in RxDesc we access w/o using bit fields */
+#define VMXNET3_RXD_BTYPE_SHIFT 14
+#define VMXNET3_RXD_GEN_SHIFT 31
+
+#define VMXNET3_RCD_HDR_INNER_SHIFT 13
+
+struct Vmxnet3_RxCompDesc {
+#ifdef __BIG_ENDIAN_BITFIELD
+ u32 ext2:1;
+ u32 cnc:1; /* Checksum Not Calculated */
+ u32 rssType:4; /* RSS hash type used */
+ u32 rqID:10; /* rx queue/ring ID */
+ u32 sop:1; /* Start of Packet */
+ u32 eop:1; /* End of Packet */
+ u32 ext1:2; /* bit 0: indicating v4/v6/.. is for inner header */
+ /* bit 1: indicating rssType is based on inner header */
+ u32 rxdIdx:12; /* Index of the RxDesc */
+#else
+ u32 rxdIdx:12; /* Index of the RxDesc */
+ u32 ext1:2; /* bit 0: indicating v4/v6/.. is for inner header */
+ /* bit 1: indicating rssType is based on inner header */
+ u32 eop:1; /* End of Packet */
+ u32 sop:1; /* Start of Packet */
+ u32 rqID:10; /* rx queue/ring ID */
+ u32 rssType:4; /* RSS hash type used */
+ u32 cnc:1; /* Checksum Not Calculated */
+ u32 ext2:1;
+#endif /* __BIG_ENDIAN_BITFIELD */
+
+ __le32 rssHash; /* RSS hash value */
+
+#ifdef __BIG_ENDIAN_BITFIELD
+ u32 tci:16; /* Tag stripped */
+ u32 ts:1; /* Tag is stripped */
+ u32 err:1; /* Error */
+ u32 len:14; /* data length */
+#else
+ u32 len:14; /* data length */
+ u32 err:1; /* Error */
+ u32 ts:1; /* Tag is stripped */
+ u32 tci:16; /* Tag stripped */
+#endif /* __BIG_ENDIAN_BITFIELD */
+
+
+#ifdef __BIG_ENDIAN_BITFIELD
+ u32 gen:1; /* generation bit */
+ u32 type:7; /* completion type */
+ u32 fcs:1; /* Frame CRC correct */
+ u32 frg:1; /* IP Fragment */
+ u32 v4:1; /* IPv4 */
+ u32 v6:1; /* IPv6 */
+ u32 ipc:1; /* IP Checksum Correct */
+ u32 tcp:1; /* TCP packet */
+ u32 udp:1; /* UDP packet */
+ u32 tuc:1; /* TCP/UDP Checksum Correct */
+ u32 csum:16;
+#else
+ u32 csum:16;
+ u32 tuc:1; /* TCP/UDP Checksum Correct */
+ u32 udp:1; /* UDP packet */
+ u32 tcp:1; /* TCP packet */
+ u32 ipc:1; /* IP Checksum Correct */
+ u32 v6:1; /* IPv6 */
+ u32 v4:1; /* IPv4 */
+ u32 frg:1; /* IP Fragment */
+ u32 fcs:1; /* Frame CRC correct */
+ u32 type:7; /* completion type */
+ u32 gen:1; /* generation bit */
+#endif /* __BIG_ENDIAN_BITFIELD */
+};
+
+struct Vmxnet3_RxCompDescExt {
+ __le32 dword1;
+ u8 segCnt; /* Number of aggregated packets */
+ u8 dupAckCnt; /* Number of duplicate Acks */
+ __le16 tsDelta; /* TCP timestamp difference */
+ __le32 dword2;
+#ifdef __BIG_ENDIAN_BITFIELD
+ u32 gen:1; /* generation bit */
+ u32 type:7; /* completion type */
+ u32 fcs:1; /* Frame CRC correct */
+ u32 frg:1; /* IP Fragment */
+ u32 v4:1; /* IPv4 */
+ u32 v6:1; /* IPv6 */
+ u32 ipc:1; /* IP Checksum Correct */
+ u32 tcp:1; /* TCP packet */
+ u32 udp:1; /* UDP packet */
+ u32 tuc:1; /* TCP/UDP Checksum Correct */
+ u32 mss:16;
+#else
+ u32 mss:16;
+ u32 tuc:1; /* TCP/UDP Checksum Correct */
+ u32 udp:1; /* UDP packet */
+ u32 tcp:1; /* TCP packet */
+ u32 ipc:1; /* IP Checksum Correct */
+ u32 v6:1; /* IPv6 */
+ u32 v4:1; /* IPv4 */
+ u32 frg:1; /* IP Fragment */
+ u32 fcs:1; /* Frame CRC correct */
+ u32 type:7; /* completion type */
+ u32 gen:1; /* generation bit */
+#endif /* __BIG_ENDIAN_BITFIELD */
+};
+
+
+/* fields in RxCompDesc we access via Vmxnet3_GenericDesc.dword[3] */
+#define VMXNET3_RCD_TUC_SHIFT 16
+#define VMXNET3_RCD_IPC_SHIFT 19
+
+/* fields in RxCompDesc we access via Vmxnet3_GenericDesc.qword[1] */
+#define VMXNET3_RCD_TYPE_SHIFT 56
+#define VMXNET3_RCD_GEN_SHIFT 63
+
+/* csum OK for TCP/UDP pkts over IP */
+#define VMXNET3_RCD_CSUM_OK (1 << VMXNET3_RCD_TUC_SHIFT | \
+ 1 << VMXNET3_RCD_IPC_SHIFT)
+#define VMXNET3_TXD_GEN_SIZE 1
+#define VMXNET3_TXD_EOP_SIZE 1
+
+/* value of RxCompDesc.rssType */
+#define VMXNET3_RCD_RSS_TYPE_NONE 0
+#define VMXNET3_RCD_RSS_TYPE_IPV4 1
+#define VMXNET3_RCD_RSS_TYPE_TCPIPV4 2
+#define VMXNET3_RCD_RSS_TYPE_IPV6 3
+#define VMXNET3_RCD_RSS_TYPE_TCPIPV6 4
+#define VMXNET3_RCD_RSS_TYPE_UDPIPV4 5
+#define VMXNET3_RCD_RSS_TYPE_UDPIPV6 6
+#define VMXNET3_RCD_RSS_TYPE_ESPIPV4 7
+#define VMXNET3_RCD_RSS_TYPE_ESPIPV6 8
+
+
+/* a union for accessing all cmd/completion descriptors */
+union Vmxnet3_GenericDesc {
+ __le64 qword[2];
+ __le32 dword[4];
+ __le16 word[8];
+ struct Vmxnet3_TxDesc txd;
+ struct Vmxnet3_RxDesc rxd;
+ struct Vmxnet3_TxCompDesc tcd;
+ struct Vmxnet3_RxCompDesc rcd;
+ struct Vmxnet3_RxCompDescExt rcdExt;
+};
+
+#define VMXNET3_INIT_GEN 1
+
+/* Max size of a single tx buffer */
+#define VMXNET3_MAX_TX_BUF_SIZE (1 << 14)
+
+/* # of tx desc needed for a tx buffer size */
+#define VMXNET3_TXD_NEEDED(size) (((size) + VMXNET3_MAX_TX_BUF_SIZE - 1) / \
+ VMXNET3_MAX_TX_BUF_SIZE)
+
+/* max # of tx descs for a non-tso pkt */
+#define VMXNET3_MAX_TXD_PER_PKT 16
+/* max # of tx descs for a tso pkt */
+#define VMXNET3_MAX_TSO_TXD_PER_PKT 24
+
+/* Max size of a single rx buffer */
+#define VMXNET3_MAX_RX_BUF_SIZE ((1 << 14) - 1)
+/* Minimum size of a type 0 buffer */
+#define VMXNET3_MIN_T0_BUF_SIZE 128
+#define VMXNET3_MAX_CSUM_OFFSET 1024
+
+/* Ring base address alignment */
+#define VMXNET3_RING_BA_ALIGN 512
+#define VMXNET3_RING_BA_MASK (VMXNET3_RING_BA_ALIGN - 1)
+
+/* Ring size must be a multiple of 32 */
+#define VMXNET3_RING_SIZE_ALIGN 32
+#define VMXNET3_RING_SIZE_MASK (VMXNET3_RING_SIZE_ALIGN - 1)
+
+/* Tx Data Ring buffer size must be a multiple of 64 */
+#define VMXNET3_TXDATA_DESC_SIZE_ALIGN 64
+#define VMXNET3_TXDATA_DESC_SIZE_MASK (VMXNET3_TXDATA_DESC_SIZE_ALIGN - 1)
+
+/* Rx Data Ring buffer size must be a multiple of 64 */
+#define VMXNET3_RXDATA_DESC_SIZE_ALIGN 64
+#define VMXNET3_RXDATA_DESC_SIZE_MASK (VMXNET3_RXDATA_DESC_SIZE_ALIGN - 1)
+
+/* Max ring size */
+#define VMXNET3_TX_RING_MAX_SIZE 4096
+#define VMXNET3_TC_RING_MAX_SIZE 4096
+#define VMXNET3_RX_RING_MAX_SIZE 4096
+#define VMXNET3_RX_RING2_MAX_SIZE 4096
+#define VMXNET3_RC_RING_MAX_SIZE 8192
+
+#define VMXNET3_TXDATA_DESC_MIN_SIZE 128
+#define VMXNET3_TXDATA_DESC_MAX_SIZE 2048
+
+#define VMXNET3_RXDATA_DESC_MAX_SIZE 2048
+
+/* a list of reasons for queue stop */
+
+enum {
+ VMXNET3_ERR_NOEOP = 0x80000000, /* cannot find the EOP desc of a pkt */
+ VMXNET3_ERR_TXD_REUSE = 0x80000001, /* reuse TxDesc before tx completion */
+ VMXNET3_ERR_BIG_PKT = 0x80000002, /* too many TxDesc for a pkt */
+ VMXNET3_ERR_DESC_NOT_SPT = 0x80000003, /* descriptor type not supported */
+ VMXNET3_ERR_SMALL_BUF = 0x80000004, /* type 0 buffer too small */
+ VMXNET3_ERR_STRESS = 0x80000005, /* stress option firing in vmkernel */
+ VMXNET3_ERR_SWITCH = 0x80000006, /* mode switch failure */
+ VMXNET3_ERR_TXD_INVALID = 0x80000007, /* invalid TxDesc */
+};
+
+/* completion descriptor types */
+#define VMXNET3_CDTYPE_TXCOMP 0 /* Tx Completion Descriptor */
+#define VMXNET3_CDTYPE_RXCOMP 3 /* Rx Completion Descriptor */
+#define VMXNET3_CDTYPE_RXCOMP_LRO 4 /* Rx Completion Descriptor for LRO */
+
+enum {
+ VMXNET3_GOS_BITS_UNK = 0, /* unknown */
+ VMXNET3_GOS_BITS_32 = 1,
+ VMXNET3_GOS_BITS_64 = 2,
+};
+
+#define VMXNET3_GOS_TYPE_LINUX 1
+
+
+struct Vmxnet3_GOSInfo {
+#ifdef __BIG_ENDIAN_BITFIELD
+ u32 gosMisc:10; /* other info about gos */
+ u32 gosVer:16; /* gos version */
+ u32 gosType:4; /* which guest */
+ u32 gosBits:2; /* 32-bit or 64-bit? */
+#else
+ u32 gosBits:2; /* 32-bit or 64-bit? */
+ u32 gosType:4; /* which guest */
+ u32 gosVer:16; /* gos version */
+ u32 gosMisc:10; /* other info about gos */
+#endif /* __BIG_ENDIAN_BITFIELD */
+};
+
+struct Vmxnet3_DriverInfo {
+ __le32 version;
+ struct Vmxnet3_GOSInfo gos;
+ __le32 vmxnet3RevSpt;
+ __le32 uptVerSpt;
+};
+
+
+#define VMXNET3_REV1_MAGIC 3133079265u
+
+/*
+ * QueueDescPA must be 128 bytes aligned. It points to an array of
+ * Vmxnet3_TxQueueDesc followed by an array of Vmxnet3_RxQueueDesc.
+ * The number of Vmxnet3_TxQueueDesc/Vmxnet3_RxQueueDesc are specified by
+ * Vmxnet3_MiscConf.numTxQueues/numRxQueues, respectively.
+ */
+#define VMXNET3_QUEUE_DESC_ALIGN 128
+
+
+struct Vmxnet3_MiscConf {
+ struct Vmxnet3_DriverInfo driverInfo;
+ __le64 uptFeatures;
+ __le64 ddPA; /* driver data PA */
+ __le64 queueDescPA; /* queue descriptor table PA */
+ __le32 ddLen; /* driver data len */
+ __le32 queueDescLen; /* queue desc. table len in bytes */
+ __le32 mtu;
+ __le16 maxNumRxSG;
+ u8 numTxQueues;
+ u8 numRxQueues;
+ __le32 reserved[4];
+};
+
+
+struct Vmxnet3_TxQueueConf {
+ __le64 txRingBasePA;
+ __le64 dataRingBasePA;
+ __le64 compRingBasePA;
+ __le64 ddPA; /* driver data */
+ __le64 reserved;
+ __le32 txRingSize; /* # of tx desc */
+ __le32 dataRingSize; /* # of data desc */
+ __le32 compRingSize; /* # of comp desc */
+ __le32 ddLen; /* size of driver data */
+ u8 intrIdx;
+ u8 _pad1[1];
+ __le16 txDataRingDescSize;
+ u8 _pad2[4];
+};
+
+
+struct Vmxnet3_RxQueueConf {
+ __le64 rxRingBasePA[2];
+ __le64 compRingBasePA;
+ __le64 ddPA; /* driver data */
+ __le64 rxDataRingBasePA;
+ __le32 rxRingSize[2]; /* # of rx desc */
+ __le32 compRingSize; /* # of rx comp desc */
+ __le32 ddLen; /* size of driver data */
+ u8 intrIdx;
+ u8 _pad1[1];
+ __le16 rxDataRingDescSize; /* size of rx data ring buffer */
+ u8 _pad2[4];
+};
+
+
+enum vmxnet3_intr_mask_mode {
+ VMXNET3_IMM_AUTO = 0,
+ VMXNET3_IMM_ACTIVE = 1,
+ VMXNET3_IMM_LAZY = 2
+};
+
+enum vmxnet3_intr_type {
+ VMXNET3_IT_AUTO = 0,
+ VMXNET3_IT_INTX = 1,
+ VMXNET3_IT_MSI = 2,
+ VMXNET3_IT_MSIX = 3
+};
+
+#define VMXNET3_MAX_TX_QUEUES 8
+#define VMXNET3_MAX_RX_QUEUES 16
+/* addition 1 for events */
+#define VMXNET3_MAX_INTRS 25
+
+/* Version 6 and later will use below macros */
+#define VMXNET3_EXT_MAX_TX_QUEUES 32
+#define VMXNET3_EXT_MAX_RX_QUEUES 32
+/* addition 1 for events */
+#define VMXNET3_EXT_MAX_INTRS 65
+#define VMXNET3_FIRST_SET_INTRS 64
+
+/* value of intrCtrl */
+#define VMXNET3_IC_DISABLE_ALL 0x1 /* bit 0 */
+
+
+struct Vmxnet3_IntrConf {
+ bool autoMask;
+ u8 numIntrs; /* # of interrupts */
+ u8 eventIntrIdx;
+ u8 modLevels[VMXNET3_MAX_INTRS]; /* moderation level for
+ * each intr */
+ __le32 intrCtrl;
+ __le32 reserved[2];
+};
+
+struct Vmxnet3_IntrConfExt {
+ u8 autoMask;
+ u8 numIntrs; /* # of interrupts */
+ u8 eventIntrIdx;
+ u8 reserved;
+ __le32 intrCtrl;
+ __le32 reserved1;
+ u8 modLevels[VMXNET3_EXT_MAX_INTRS]; /* moderation level for
+ * each intr
+ */
+ u8 reserved2[3];
+};
+
+/* one bit per VLAN ID, the size is in the units of u32 */
+#define VMXNET3_VFT_SIZE (4096 / (sizeof(u32) * 8))
+
+
+struct Vmxnet3_QueueStatus {
+ bool stopped;
+ u8 _pad[3];
+ __le32 error;
+};
+
+
+struct Vmxnet3_TxQueueCtrl {
+ __le32 txNumDeferred;
+ __le32 txThreshold;
+ __le64 reserved;
+};
+
+
+struct Vmxnet3_RxQueueCtrl {
+ bool updateRxProd;
+ u8 _pad[7];
+ __le64 reserved;
+};
+
+enum {
+ VMXNET3_RXM_UCAST = 0x01, /* unicast only */
+ VMXNET3_RXM_MCAST = 0x02, /* multicast passing the filters */
+ VMXNET3_RXM_BCAST = 0x04, /* broadcast only */
+ VMXNET3_RXM_ALL_MULTI = 0x08, /* all multicast */
+ VMXNET3_RXM_PROMISC = 0x10 /* promiscuous */
+};
+
+struct Vmxnet3_RxFilterConf {
+ __le32 rxMode; /* VMXNET3_RXM_xxx */
+ __le16 mfTableLen; /* size of the multicast filter table */
+ __le16 _pad1;
+ __le64 mfTablePA; /* PA of the multicast filters table */
+ __le32 vfTable[VMXNET3_VFT_SIZE]; /* vlan filter */
+};
+
+
+#define VMXNET3_PM_MAX_FILTERS 6
+#define VMXNET3_PM_MAX_PATTERN_SIZE 128
+#define VMXNET3_PM_MAX_MASK_SIZE (VMXNET3_PM_MAX_PATTERN_SIZE / 8)
+
+#define VMXNET3_PM_WAKEUP_MAGIC cpu_to_le16(0x01) /* wake up on magic pkts */
+#define VMXNET3_PM_WAKEUP_FILTER cpu_to_le16(0x02) /* wake up on pkts matching
+ * filters */
+
+
+struct Vmxnet3_PM_PktFilter {
+ u8 maskSize;
+ u8 patternSize;
+ u8 mask[VMXNET3_PM_MAX_MASK_SIZE];
+ u8 pattern[VMXNET3_PM_MAX_PATTERN_SIZE];
+ u8 pad[6];
+};
+
+
+struct Vmxnet3_PMConf {
+ __le16 wakeUpEvents; /* VMXNET3_PM_WAKEUP_xxx */
+ u8 numFilters;
+ u8 pad[5];
+ struct Vmxnet3_PM_PktFilter filters[VMXNET3_PM_MAX_FILTERS];
+};
+
+
+struct Vmxnet3_VariableLenConfDesc {
+ __le32 confVer;
+ __le32 confLen;
+ __le64 confPA;
+};
+
+
+struct Vmxnet3_TxQueueDesc {
+ struct Vmxnet3_TxQueueCtrl ctrl;
+ struct Vmxnet3_TxQueueConf conf;
+
+ /* Driver read after a GET command */
+ struct Vmxnet3_QueueStatus status;
+ struct UPT1_TxStats stats;
+ u8 _pad[88]; /* 128 aligned */
+};
+
+
+struct Vmxnet3_RxQueueDesc {
+ struct Vmxnet3_RxQueueCtrl ctrl;
+ struct Vmxnet3_RxQueueConf conf;
+ /* Driver read after a GET commad */
+ struct Vmxnet3_QueueStatus status;
+ struct UPT1_RxStats stats;
+ u8 __pad[88]; /* 128 aligned */
+};
+
+struct Vmxnet3_SetPolling {
+ u8 enablePolling;
+};
+
+#define VMXNET3_COAL_STATIC_MAX_DEPTH 128
+#define VMXNET3_COAL_RBC_MIN_RATE 100
+#define VMXNET3_COAL_RBC_MAX_RATE 100000
+
+enum Vmxnet3_CoalesceMode {
+ VMXNET3_COALESCE_DISABLED = 0,
+ VMXNET3_COALESCE_ADAPT = 1,
+ VMXNET3_COALESCE_STATIC = 2,
+ VMXNET3_COALESCE_RBC = 3
+};
+
+struct Vmxnet3_CoalesceRbc {
+ u32 rbc_rate;
+};
+
+struct Vmxnet3_CoalesceStatic {
+ u32 tx_depth;
+ u32 tx_comp_depth;
+ u32 rx_depth;
+};
+
+struct Vmxnet3_CoalesceScheme {
+ enum Vmxnet3_CoalesceMode coalMode;
+ union {
+ struct Vmxnet3_CoalesceRbc coalRbc;
+ struct Vmxnet3_CoalesceStatic coalStatic;
+ } coalPara;
+};
+
+struct Vmxnet3_MemoryRegion {
+ __le64 startPA;
+ __le32 length;
+ __le16 txQueueBits;
+ __le16 rxQueueBits;
+};
+
+#define MAX_MEMORY_REGION_PER_QUEUE 16
+#define MAX_MEMORY_REGION_PER_DEVICE 256
+
+struct Vmxnet3_MemRegs {
+ __le16 numRegs;
+ __le16 pad[3];
+ struct Vmxnet3_MemoryRegion memRegs[1];
+};
+
+enum Vmxnet3_RSSField {
+ VMXNET3_RSS_FIELDS_TCPIP4 = 0x0001,
+ VMXNET3_RSS_FIELDS_TCPIP6 = 0x0002,
+ VMXNET3_RSS_FIELDS_UDPIP4 = 0x0004,
+ VMXNET3_RSS_FIELDS_UDPIP6 = 0x0008,
+ VMXNET3_RSS_FIELDS_ESPIP4 = 0x0010,
+ VMXNET3_RSS_FIELDS_ESPIP6 = 0x0020,
+};
+
+struct Vmxnet3_RingBufferSize {
+ __le16 ring1BufSizeType0;
+ __le16 ring1BufSizeType1;
+ __le16 ring2BufSizeType1;
+ __le16 pad;
+};
+
+/* If the command data <= 16 bytes, use the shared memory directly.
+ * otherwise, use variable length configuration descriptor.
+ */
+union Vmxnet3_CmdInfo {
+ struct Vmxnet3_VariableLenConfDesc varConf;
+ struct Vmxnet3_SetPolling setPolling;
+ enum Vmxnet3_RSSField setRssFields;
+ struct Vmxnet3_RingBufferSize ringBufSize;
+ __le64 data[2];
+};
+
+struct Vmxnet3_DSDevRead {
+ /* read-only region for device, read by dev in response to a SET cmd */
+ struct Vmxnet3_MiscConf misc;
+ struct Vmxnet3_IntrConf intrConf;
+ struct Vmxnet3_RxFilterConf rxFilterConf;
+ struct Vmxnet3_VariableLenConfDesc rssConfDesc;
+ struct Vmxnet3_VariableLenConfDesc pmConfDesc;
+ struct Vmxnet3_VariableLenConfDesc pluginConfDesc;
+};
+
+struct Vmxnet3_DSDevReadExt {
+ /* read-only region for device, read by dev in response to a SET cmd */
+ struct Vmxnet3_IntrConfExt intrConfExt;
+};
+
+/* All structures in DriverShared are padded to multiples of 8 bytes */
+struct Vmxnet3_DriverShared {
+ __le32 magic;
+ /* make devRead start at 64bit boundaries */
+ __le32 size; /* size of DriverShared */
+ struct Vmxnet3_DSDevRead devRead;
+ __le32 ecr;
+ __le32 reserved;
+ union {
+ __le32 reserved1[4];
+ union Vmxnet3_CmdInfo cmdInfo; /* only valid in the context of
+ * executing the relevant
+ * command
+ */
+ } cu;
+ struct Vmxnet3_DSDevReadExt devReadExt;
+};
+
+
+#define VMXNET3_ECR_RQERR (1 << 0)
+#define VMXNET3_ECR_TQERR (1 << 1)
+#define VMXNET3_ECR_LINK (1 << 2)
+#define VMXNET3_ECR_DIC (1 << 3)
+#define VMXNET3_ECR_DEBUG (1 << 4)
+
+/* flip the gen bit of a ring */
+#define VMXNET3_FLIP_RING_GEN(gen) ((gen) = (gen) ^ 0x1)
+
+/* only use this if moving the idx won't affect the gen bit */
+#define VMXNET3_INC_RING_IDX_ONLY(idx, ring_size) \
+ do {\
+ (idx)++;\
+ if (unlikely((idx) == (ring_size))) {\
+ (idx) = 0;\
+ } \
+ } while (0)
+
+#define VMXNET3_SET_VFTABLE_ENTRY(vfTable, vid) \
+ (vfTable[vid >> 5] |= (1 << (vid & 31)))
+#define VMXNET3_CLEAR_VFTABLE_ENTRY(vfTable, vid) \
+ (vfTable[vid >> 5] &= ~(1 << (vid & 31)))
+
+#define VMXNET3_VFTABLE_ENTRY_IS_SET(vfTable, vid) \
+ ((vfTable[vid >> 5] & (1 << (vid & 31))) != 0)
+
+#define VMXNET3_MAX_MTU 9000
+#define VMXNET3_V6_MAX_MTU 9190
+#define VMXNET3_MIN_MTU 60
+
+#define VMXNET3_LINK_UP (10000 << 16 | 1) /* 10 Gbps, up */
+#define VMXNET3_LINK_DOWN 0
+
+#define VMXNET3_DCR_ERROR 31 /* error when bit 31 of DCR is set */
+#define VMXNET3_CAP_UDP_RSS 0 /* bit 0 of DCR 0 */
+#define VMXNET3_CAP_ESP_RSS_IPV4 1 /* bit 1 of DCR 0 */
+#define VMXNET3_CAP_GENEVE_CHECKSUM_OFFLOAD 2 /* bit 2 of DCR 0 */
+#define VMXNET3_CAP_GENEVE_TSO 3 /* bit 3 of DCR 0 */
+#define VMXNET3_CAP_VXLAN_CHECKSUM_OFFLOAD 4 /* bit 4 of DCR 0 */
+#define VMXNET3_CAP_VXLAN_TSO 5 /* bit 5 of DCR 0 */
+#define VMXNET3_CAP_GENEVE_OUTER_CHECKSUM_OFFLOAD 6 /* bit 6 of DCR 0 */
+#define VMXNET3_CAP_VXLAN_OUTER_CHECKSUM_OFFLOAD 7 /* bit 7 of DCR 0 */
+#define VMXNET3_CAP_PKT_STEERING_IPV4 8 /* bit 8 of DCR 0 */
+#define VMXNET3_CAP_VERSION_4_MAX VMXNET3_CAP_PKT_STEERING_IPV4
+#define VMXNET3_CAP_ESP_RSS_IPV6 9 /* bit 9 of DCR 0 */
+#define VMXNET3_CAP_VERSION_5_MAX VMXNET3_CAP_ESP_RSS_IPV6
+#define VMXNET3_CAP_ESP_OVER_UDP_RSS 10 /* bit 10 of DCR 0 */
+#define VMXNET3_CAP_INNER_RSS 11 /* bit 11 of DCR 0 */
+#define VMXNET3_CAP_INNER_ESP_RSS 12 /* bit 12 of DCR 0 */
+#define VMXNET3_CAP_CRC32_HASH_FUNC 13 /* bit 13 of DCR 0 */
+#define VMXNET3_CAP_VERSION_6_MAX VMXNET3_CAP_CRC32_HASH_FUNC
+#define VMXNET3_CAP_OAM_FILTER 14 /* bit 14 of DCR 0 */
+#define VMXNET3_CAP_ESP_QS 15 /* bit 15 of DCR 0 */
+#define VMXNET3_CAP_LARGE_BAR 16 /* bit 16 of DCR 0 */
+#define VMXNET3_CAP_OOORX_COMP 17 /* bit 17 of DCR 0 */
+#define VMXNET3_CAP_VERSION_7_MAX 18
+/* when new capability is introduced, update VMXNET3_CAP_MAX */
+#define VMXNET3_CAP_MAX VMXNET3_CAP_VERSION_7_MAX
+
+#endif /* _VMXNET3_DEFS_H_ */
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
new file mode 100644
index 000000000..057886479
--- /dev/null
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -0,0 +1,4442 @@
+/*
+ * Linux driver for VMware's vmxnet3 ethernet NIC.
+ *
+ * Copyright (C) 2008-2022, VMware, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; version 2 of the License and no later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Maintained by: pv-drivers@vmware.com
+ *
+ */
+
+#include <linux/module.h>
+#include <net/ip6_checksum.h>
+
+#include "vmxnet3_int.h"
+#include "vmxnet3_xdp.h"
+
+char vmxnet3_driver_name[] = "vmxnet3";
+#define VMXNET3_DRIVER_DESC "VMware vmxnet3 virtual NIC driver"
+
+/*
+ * PCI Device ID Table
+ * Last entry must be all 0s
+ */
+static const struct pci_device_id vmxnet3_pciid_table[] = {
+ {PCI_VDEVICE(VMWARE, PCI_DEVICE_ID_VMWARE_VMXNET3)},
+ {0}
+};
+
+MODULE_DEVICE_TABLE(pci, vmxnet3_pciid_table);
+
+static int enable_mq = 1;
+
+static void
+vmxnet3_write_mac_addr(struct vmxnet3_adapter *adapter, const u8 *mac);
+
+/*
+ * Enable/Disable the given intr
+ */
+static void
+vmxnet3_enable_intr(struct vmxnet3_adapter *adapter, unsigned intr_idx)
+{
+ VMXNET3_WRITE_BAR0_REG(adapter, VMXNET3_REG_IMR + intr_idx * 8, 0);
+}
+
+
+static void
+vmxnet3_disable_intr(struct vmxnet3_adapter *adapter, unsigned intr_idx)
+{
+ VMXNET3_WRITE_BAR0_REG(adapter, VMXNET3_REG_IMR + intr_idx * 8, 1);
+}
+
+
+/*
+ * Enable/Disable all intrs used by the device
+ */
+static void
+vmxnet3_enable_all_intrs(struct vmxnet3_adapter *adapter)
+{
+ int i;
+
+ for (i = 0; i < adapter->intr.num_intrs; i++)
+ vmxnet3_enable_intr(adapter, i);
+ if (!VMXNET3_VERSION_GE_6(adapter) ||
+ !adapter->queuesExtEnabled) {
+ adapter->shared->devRead.intrConf.intrCtrl &=
+ cpu_to_le32(~VMXNET3_IC_DISABLE_ALL);
+ } else {
+ adapter->shared->devReadExt.intrConfExt.intrCtrl &=
+ cpu_to_le32(~VMXNET3_IC_DISABLE_ALL);
+ }
+}
+
+
+static void
+vmxnet3_disable_all_intrs(struct vmxnet3_adapter *adapter)
+{
+ int i;
+
+ if (!VMXNET3_VERSION_GE_6(adapter) ||
+ !adapter->queuesExtEnabled) {
+ adapter->shared->devRead.intrConf.intrCtrl |=
+ cpu_to_le32(VMXNET3_IC_DISABLE_ALL);
+ } else {
+ adapter->shared->devReadExt.intrConfExt.intrCtrl |=
+ cpu_to_le32(VMXNET3_IC_DISABLE_ALL);
+ }
+ for (i = 0; i < adapter->intr.num_intrs; i++)
+ vmxnet3_disable_intr(adapter, i);
+}
+
+
+static void
+vmxnet3_ack_events(struct vmxnet3_adapter *adapter, u32 events)
+{
+ VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_ECR, events);
+}
+
+
+static bool
+vmxnet3_tq_stopped(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
+{
+ return tq->stopped;
+}
+
+
+static void
+vmxnet3_tq_start(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
+{
+ tq->stopped = false;
+ netif_start_subqueue(adapter->netdev, tq - adapter->tx_queue);
+}
+
+
+static void
+vmxnet3_tq_wake(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
+{
+ tq->stopped = false;
+ netif_wake_subqueue(adapter->netdev, (tq - adapter->tx_queue));
+}
+
+
+static void
+vmxnet3_tq_stop(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
+{
+ tq->stopped = true;
+ tq->num_stop++;
+ netif_stop_subqueue(adapter->netdev, (tq - adapter->tx_queue));
+}
+
+/* Check if capability is supported by UPT device or
+ * UPT is even requested
+ */
+bool
+vmxnet3_check_ptcapability(u32 cap_supported, u32 cap)
+{
+ if (cap_supported & (1UL << VMXNET3_DCR_ERROR) ||
+ cap_supported & (1UL << cap)) {
+ return true;
+ }
+
+ return false;
+}
+
+
+/*
+ * Check the link state. This may start or stop the tx queue.
+ */
+static void
+vmxnet3_check_link(struct vmxnet3_adapter *adapter, bool affectTxQueue)
+{
+ u32 ret;
+ int i;
+ unsigned long flags;
+
+ spin_lock_irqsave(&adapter->cmd_lock, flags);
+ VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_LINK);
+ ret = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
+ spin_unlock_irqrestore(&adapter->cmd_lock, flags);
+
+ adapter->link_speed = ret >> 16;
+ if (ret & 1) { /* Link is up. */
+ netdev_info(adapter->netdev, "NIC Link is Up %d Mbps\n",
+ adapter->link_speed);
+ netif_carrier_on(adapter->netdev);
+
+ if (affectTxQueue) {
+ for (i = 0; i < adapter->num_tx_queues; i++)
+ vmxnet3_tq_start(&adapter->tx_queue[i],
+ adapter);
+ }
+ } else {
+ netdev_info(adapter->netdev, "NIC Link is Down\n");
+ netif_carrier_off(adapter->netdev);
+
+ if (affectTxQueue) {
+ for (i = 0; i < adapter->num_tx_queues; i++)
+ vmxnet3_tq_stop(&adapter->tx_queue[i], adapter);
+ }
+ }
+}
+
+static void
+vmxnet3_process_events(struct vmxnet3_adapter *adapter)
+{
+ int i;
+ unsigned long flags;
+ u32 events = le32_to_cpu(adapter->shared->ecr);
+ if (!events)
+ return;
+
+ vmxnet3_ack_events(adapter, events);
+
+ /* Check if link state has changed */
+ if (events & VMXNET3_ECR_LINK)
+ vmxnet3_check_link(adapter, true);
+
+ /* Check if there is an error on xmit/recv queues */
+ if (events & (VMXNET3_ECR_TQERR | VMXNET3_ECR_RQERR)) {
+ spin_lock_irqsave(&adapter->cmd_lock, flags);
+ VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
+ VMXNET3_CMD_GET_QUEUE_STATUS);
+ spin_unlock_irqrestore(&adapter->cmd_lock, flags);
+
+ for (i = 0; i < adapter->num_tx_queues; i++)
+ if (adapter->tqd_start[i].status.stopped)
+ dev_err(&adapter->netdev->dev,
+ "%s: tq[%d] error 0x%x\n",
+ adapter->netdev->name, i, le32_to_cpu(
+ adapter->tqd_start[i].status.error));
+ for (i = 0; i < adapter->num_rx_queues; i++)
+ if (adapter->rqd_start[i].status.stopped)
+ dev_err(&adapter->netdev->dev,
+ "%s: rq[%d] error 0x%x\n",
+ adapter->netdev->name, i,
+ adapter->rqd_start[i].status.error);
+
+ schedule_work(&adapter->work);
+ }
+}
+
+#ifdef __BIG_ENDIAN_BITFIELD
+/*
+ * The device expects the bitfields in shared structures to be written in
+ * little endian. When CPU is big endian, the following routines are used to
+ * correctly read and write into ABI.
+ * The general technique used here is : double word bitfields are defined in
+ * opposite order for big endian architecture. Then before reading them in
+ * driver the complete double word is translated using le32_to_cpu. Similarly
+ * After the driver writes into bitfields, cpu_to_le32 is used to translate the
+ * double words into required format.
+ * In order to avoid touching bits in shared structure more than once, temporary
+ * descriptors are used. These are passed as srcDesc to following functions.
+ */
+static void vmxnet3_RxDescToCPU(const struct Vmxnet3_RxDesc *srcDesc,
+ struct Vmxnet3_RxDesc *dstDesc)
+{
+ u32 *src = (u32 *)srcDesc + 2;
+ u32 *dst = (u32 *)dstDesc + 2;
+ dstDesc->addr = le64_to_cpu(srcDesc->addr);
+ *dst = le32_to_cpu(*src);
+ dstDesc->ext1 = le32_to_cpu(srcDesc->ext1);
+}
+
+static void vmxnet3_TxDescToLe(const struct Vmxnet3_TxDesc *srcDesc,
+ struct Vmxnet3_TxDesc *dstDesc)
+{
+ int i;
+ u32 *src = (u32 *)(srcDesc + 1);
+ u32 *dst = (u32 *)(dstDesc + 1);
+
+ /* Working backwards so that the gen bit is set at the end. */
+ for (i = 2; i > 0; i--) {
+ src--;
+ dst--;
+ *dst = cpu_to_le32(*src);
+ }
+}
+
+
+static void vmxnet3_RxCompToCPU(const struct Vmxnet3_RxCompDesc *srcDesc,
+ struct Vmxnet3_RxCompDesc *dstDesc)
+{
+ int i = 0;
+ u32 *src = (u32 *)srcDesc;
+ u32 *dst = (u32 *)dstDesc;
+ for (i = 0; i < sizeof(struct Vmxnet3_RxCompDesc) / sizeof(u32); i++) {
+ *dst = le32_to_cpu(*src);
+ src++;
+ dst++;
+ }
+}
+
+
+/* Used to read bitfield values from double words. */
+static u32 get_bitfield32(const __le32 *bitfield, u32 pos, u32 size)
+{
+ u32 temp = le32_to_cpu(*bitfield);
+ u32 mask = ((1 << size) - 1) << pos;
+ temp &= mask;
+ temp >>= pos;
+ return temp;
+}
+
+
+
+#endif /* __BIG_ENDIAN_BITFIELD */
+
+#ifdef __BIG_ENDIAN_BITFIELD
+
+# define VMXNET3_TXDESC_GET_GEN(txdesc) get_bitfield32(((const __le32 *) \
+ txdesc) + VMXNET3_TXD_GEN_DWORD_SHIFT, \
+ VMXNET3_TXD_GEN_SHIFT, VMXNET3_TXD_GEN_SIZE)
+# define VMXNET3_TXDESC_GET_EOP(txdesc) get_bitfield32(((const __le32 *) \
+ txdesc) + VMXNET3_TXD_EOP_DWORD_SHIFT, \
+ VMXNET3_TXD_EOP_SHIFT, VMXNET3_TXD_EOP_SIZE)
+# define VMXNET3_TCD_GET_GEN(tcd) get_bitfield32(((const __le32 *)tcd) + \
+ VMXNET3_TCD_GEN_DWORD_SHIFT, VMXNET3_TCD_GEN_SHIFT, \
+ VMXNET3_TCD_GEN_SIZE)
+# define VMXNET3_TCD_GET_TXIDX(tcd) get_bitfield32((const __le32 *)tcd, \
+ VMXNET3_TCD_TXIDX_SHIFT, VMXNET3_TCD_TXIDX_SIZE)
+# define vmxnet3_getRxComp(dstrcd, rcd, tmp) do { \
+ (dstrcd) = (tmp); \
+ vmxnet3_RxCompToCPU((rcd), (tmp)); \
+ } while (0)
+# define vmxnet3_getRxDesc(dstrxd, rxd, tmp) do { \
+ (dstrxd) = (tmp); \
+ vmxnet3_RxDescToCPU((rxd), (tmp)); \
+ } while (0)
+
+#else
+
+# define VMXNET3_TXDESC_GET_GEN(txdesc) ((txdesc)->gen)
+# define VMXNET3_TXDESC_GET_EOP(txdesc) ((txdesc)->eop)
+# define VMXNET3_TCD_GET_GEN(tcd) ((tcd)->gen)
+# define VMXNET3_TCD_GET_TXIDX(tcd) ((tcd)->txdIdx)
+# define vmxnet3_getRxComp(dstrcd, rcd, tmp) (dstrcd) = (rcd)
+# define vmxnet3_getRxDesc(dstrxd, rxd, tmp) (dstrxd) = (rxd)
+
+#endif /* __BIG_ENDIAN_BITFIELD */
+
+
+static void
+vmxnet3_unmap_tx_buf(struct vmxnet3_tx_buf_info *tbi,
+ struct pci_dev *pdev)
+{
+ u32 map_type = tbi->map_type;
+
+ if (map_type & VMXNET3_MAP_SINGLE)
+ dma_unmap_single(&pdev->dev, tbi->dma_addr, tbi->len,
+ DMA_TO_DEVICE);
+ else if (map_type & VMXNET3_MAP_PAGE)
+ dma_unmap_page(&pdev->dev, tbi->dma_addr, tbi->len,
+ DMA_TO_DEVICE);
+ else
+ BUG_ON(map_type & ~VMXNET3_MAP_XDP);
+
+ tbi->map_type = VMXNET3_MAP_NONE; /* to help debugging */
+}
+
+
+static int
+vmxnet3_unmap_pkt(u32 eop_idx, struct vmxnet3_tx_queue *tq,
+ struct pci_dev *pdev, struct vmxnet3_adapter *adapter,
+ struct xdp_frame_bulk *bq)
+{
+ struct vmxnet3_tx_buf_info *tbi;
+ int entries = 0;
+ u32 map_type;
+
+ /* no out of order completion */
+ BUG_ON(tq->buf_info[eop_idx].sop_idx != tq->tx_ring.next2comp);
+ BUG_ON(VMXNET3_TXDESC_GET_EOP(&(tq->tx_ring.base[eop_idx].txd)) != 1);
+
+ tbi = &tq->buf_info[eop_idx];
+ BUG_ON(!tbi->skb);
+ map_type = tbi->map_type;
+ VMXNET3_INC_RING_IDX_ONLY(eop_idx, tq->tx_ring.size);
+
+ while (tq->tx_ring.next2comp != eop_idx) {
+ vmxnet3_unmap_tx_buf(tq->buf_info + tq->tx_ring.next2comp,
+ pdev);
+
+ /* update next2comp w/o tx_lock. Since we are marking more,
+ * instead of less, tx ring entries avail, the worst case is
+ * that the tx routine incorrectly re-queues a pkt due to
+ * insufficient tx ring entries.
+ */
+ vmxnet3_cmd_ring_adv_next2comp(&tq->tx_ring);
+ entries++;
+ }
+
+ if (map_type & VMXNET3_MAP_XDP)
+ xdp_return_frame_bulk(tbi->xdpf, bq);
+ else
+ dev_kfree_skb_any(tbi->skb);
+
+ /* xdpf and skb are in an anonymous union. */
+ tbi->skb = NULL;
+
+ return entries;
+}
+
+
+static int
+vmxnet3_tq_tx_complete(struct vmxnet3_tx_queue *tq,
+ struct vmxnet3_adapter *adapter)
+{
+ union Vmxnet3_GenericDesc *gdesc;
+ struct xdp_frame_bulk bq;
+ int completed = 0;
+
+ xdp_frame_bulk_init(&bq);
+ rcu_read_lock();
+
+ gdesc = tq->comp_ring.base + tq->comp_ring.next2proc;
+ while (VMXNET3_TCD_GET_GEN(&gdesc->tcd) == tq->comp_ring.gen) {
+ /* Prevent any &gdesc->tcd field from being (speculatively)
+ * read before (&gdesc->tcd)->gen is read.
+ */
+ dma_rmb();
+
+ completed += vmxnet3_unmap_pkt(VMXNET3_TCD_GET_TXIDX(
+ &gdesc->tcd), tq, adapter->pdev,
+ adapter, &bq);
+
+ vmxnet3_comp_ring_adv_next2proc(&tq->comp_ring);
+ gdesc = tq->comp_ring.base + tq->comp_ring.next2proc;
+ }
+ xdp_flush_frame_bulk(&bq);
+ rcu_read_unlock();
+
+ if (completed) {
+ spin_lock(&tq->tx_lock);
+ if (unlikely(vmxnet3_tq_stopped(tq, adapter) &&
+ vmxnet3_cmd_ring_desc_avail(&tq->tx_ring) >
+ VMXNET3_WAKE_QUEUE_THRESHOLD(tq) &&
+ netif_carrier_ok(adapter->netdev))) {
+ vmxnet3_tq_wake(tq, adapter);
+ }
+ spin_unlock(&tq->tx_lock);
+ }
+ return completed;
+}
+
+
+static void
+vmxnet3_tq_cleanup(struct vmxnet3_tx_queue *tq,
+ struct vmxnet3_adapter *adapter)
+{
+ struct xdp_frame_bulk bq;
+ u32 map_type;
+ int i;
+
+ xdp_frame_bulk_init(&bq);
+ rcu_read_lock();
+
+ while (tq->tx_ring.next2comp != tq->tx_ring.next2fill) {
+ struct vmxnet3_tx_buf_info *tbi;
+
+ tbi = tq->buf_info + tq->tx_ring.next2comp;
+ map_type = tbi->map_type;
+
+ vmxnet3_unmap_tx_buf(tbi, adapter->pdev);
+ if (tbi->skb) {
+ if (map_type & VMXNET3_MAP_XDP)
+ xdp_return_frame_bulk(tbi->xdpf, &bq);
+ else
+ dev_kfree_skb_any(tbi->skb);
+ tbi->skb = NULL;
+ }
+ vmxnet3_cmd_ring_adv_next2comp(&tq->tx_ring);
+ }
+
+ xdp_flush_frame_bulk(&bq);
+ rcu_read_unlock();
+
+ /* sanity check, verify all buffers are indeed unmapped */
+ for (i = 0; i < tq->tx_ring.size; i++)
+ BUG_ON(tq->buf_info[i].map_type != VMXNET3_MAP_NONE);
+
+ tq->tx_ring.gen = VMXNET3_INIT_GEN;
+ tq->tx_ring.next2fill = tq->tx_ring.next2comp = 0;
+
+ tq->comp_ring.gen = VMXNET3_INIT_GEN;
+ tq->comp_ring.next2proc = 0;
+}
+
+
+static void
+vmxnet3_tq_destroy(struct vmxnet3_tx_queue *tq,
+ struct vmxnet3_adapter *adapter)
+{
+ if (tq->tx_ring.base) {
+ dma_free_coherent(&adapter->pdev->dev, tq->tx_ring.size *
+ sizeof(struct Vmxnet3_TxDesc),
+ tq->tx_ring.base, tq->tx_ring.basePA);
+ tq->tx_ring.base = NULL;
+ }
+ if (tq->data_ring.base) {
+ dma_free_coherent(&adapter->pdev->dev,
+ tq->data_ring.size * tq->txdata_desc_size,
+ tq->data_ring.base, tq->data_ring.basePA);
+ tq->data_ring.base = NULL;
+ }
+ if (tq->comp_ring.base) {
+ dma_free_coherent(&adapter->pdev->dev, tq->comp_ring.size *
+ sizeof(struct Vmxnet3_TxCompDesc),
+ tq->comp_ring.base, tq->comp_ring.basePA);
+ tq->comp_ring.base = NULL;
+ }
+ kfree(tq->buf_info);
+ tq->buf_info = NULL;
+}
+
+
+/* Destroy all tx queues */
+void
+vmxnet3_tq_destroy_all(struct vmxnet3_adapter *adapter)
+{
+ int i;
+
+ for (i = 0; i < adapter->num_tx_queues; i++)
+ vmxnet3_tq_destroy(&adapter->tx_queue[i], adapter);
+}
+
+
+static void
+vmxnet3_tq_init(struct vmxnet3_tx_queue *tq,
+ struct vmxnet3_adapter *adapter)
+{
+ int i;
+
+ /* reset the tx ring contents to 0 and reset the tx ring states */
+ memset(tq->tx_ring.base, 0, tq->tx_ring.size *
+ sizeof(struct Vmxnet3_TxDesc));
+ tq->tx_ring.next2fill = tq->tx_ring.next2comp = 0;
+ tq->tx_ring.gen = VMXNET3_INIT_GEN;
+
+ memset(tq->data_ring.base, 0,
+ tq->data_ring.size * tq->txdata_desc_size);
+
+ /* reset the tx comp ring contents to 0 and reset comp ring states */
+ memset(tq->comp_ring.base, 0, tq->comp_ring.size *
+ sizeof(struct Vmxnet3_TxCompDesc));
+ tq->comp_ring.next2proc = 0;
+ tq->comp_ring.gen = VMXNET3_INIT_GEN;
+
+ /* reset the bookkeeping data */
+ memset(tq->buf_info, 0, sizeof(tq->buf_info[0]) * tq->tx_ring.size);
+ for (i = 0; i < tq->tx_ring.size; i++)
+ tq->buf_info[i].map_type = VMXNET3_MAP_NONE;
+
+ /* stats are not reset */
+}
+
+
+static int
+vmxnet3_tq_create(struct vmxnet3_tx_queue *tq,
+ struct vmxnet3_adapter *adapter)
+{
+ BUG_ON(tq->tx_ring.base || tq->data_ring.base ||
+ tq->comp_ring.base || tq->buf_info);
+
+ tq->tx_ring.base = dma_alloc_coherent(&adapter->pdev->dev,
+ tq->tx_ring.size * sizeof(struct Vmxnet3_TxDesc),
+ &tq->tx_ring.basePA, GFP_KERNEL);
+ if (!tq->tx_ring.base) {
+ netdev_err(adapter->netdev, "failed to allocate tx ring\n");
+ goto err;
+ }
+
+ tq->data_ring.base = dma_alloc_coherent(&adapter->pdev->dev,
+ tq->data_ring.size * tq->txdata_desc_size,
+ &tq->data_ring.basePA, GFP_KERNEL);
+ if (!tq->data_ring.base) {
+ netdev_err(adapter->netdev, "failed to allocate tx data ring\n");
+ goto err;
+ }
+
+ tq->comp_ring.base = dma_alloc_coherent(&adapter->pdev->dev,
+ tq->comp_ring.size * sizeof(struct Vmxnet3_TxCompDesc),
+ &tq->comp_ring.basePA, GFP_KERNEL);
+ if (!tq->comp_ring.base) {
+ netdev_err(adapter->netdev, "failed to allocate tx comp ring\n");
+ goto err;
+ }
+
+ tq->buf_info = kcalloc_node(tq->tx_ring.size, sizeof(tq->buf_info[0]),
+ GFP_KERNEL,
+ dev_to_node(&adapter->pdev->dev));
+ if (!tq->buf_info)
+ goto err;
+
+ return 0;
+
+err:
+ vmxnet3_tq_destroy(tq, adapter);
+ return -ENOMEM;
+}
+
+static void
+vmxnet3_tq_cleanup_all(struct vmxnet3_adapter *adapter)
+{
+ int i;
+
+ for (i = 0; i < adapter->num_tx_queues; i++)
+ vmxnet3_tq_cleanup(&adapter->tx_queue[i], adapter);
+}
+
+/*
+ * starting from ring->next2fill, allocate rx buffers for the given ring
+ * of the rx queue and update the rx desc. stop after @num_to_alloc buffers
+ * are allocated or allocation fails
+ */
+
+static int
+vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue *rq, u32 ring_idx,
+ int num_to_alloc, struct vmxnet3_adapter *adapter)
+{
+ int num_allocated = 0;
+ struct vmxnet3_rx_buf_info *rbi_base = rq->buf_info[ring_idx];
+ struct vmxnet3_cmd_ring *ring = &rq->rx_ring[ring_idx];
+ u32 val;
+
+ while (num_allocated <= num_to_alloc) {
+ struct vmxnet3_rx_buf_info *rbi;
+ union Vmxnet3_GenericDesc *gd;
+
+ rbi = rbi_base + ring->next2fill;
+ gd = ring->base + ring->next2fill;
+ rbi->comp_state = VMXNET3_RXD_COMP_PENDING;
+
+ if (rbi->buf_type == VMXNET3_RX_BUF_XDP) {
+ void *data = vmxnet3_pp_get_buff(rq->page_pool,
+ &rbi->dma_addr,
+ GFP_KERNEL);
+ if (!data) {
+ rq->stats.rx_buf_alloc_failure++;
+ break;
+ }
+ rbi->page = virt_to_page(data);
+ val = VMXNET3_RXD_BTYPE_HEAD << VMXNET3_RXD_BTYPE_SHIFT;
+ } else if (rbi->buf_type == VMXNET3_RX_BUF_SKB) {
+ if (rbi->skb == NULL) {
+ rbi->skb = __netdev_alloc_skb_ip_align(adapter->netdev,
+ rbi->len,
+ GFP_KERNEL);
+ if (unlikely(rbi->skb == NULL)) {
+ rq->stats.rx_buf_alloc_failure++;
+ break;
+ }
+
+ rbi->dma_addr = dma_map_single(
+ &adapter->pdev->dev,
+ rbi->skb->data, rbi->len,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(&adapter->pdev->dev,
+ rbi->dma_addr)) {
+ dev_kfree_skb_any(rbi->skb);
+ rbi->skb = NULL;
+ rq->stats.rx_buf_alloc_failure++;
+ break;
+ }
+ } else {
+ /* rx buffer skipped by the device */
+ }
+ val = VMXNET3_RXD_BTYPE_HEAD << VMXNET3_RXD_BTYPE_SHIFT;
+ } else {
+ BUG_ON(rbi->buf_type != VMXNET3_RX_BUF_PAGE ||
+ rbi->len != PAGE_SIZE);
+
+ if (rbi->page == NULL) {
+ rbi->page = alloc_page(GFP_ATOMIC);
+ if (unlikely(rbi->page == NULL)) {
+ rq->stats.rx_buf_alloc_failure++;
+ break;
+ }
+ rbi->dma_addr = dma_map_page(
+ &adapter->pdev->dev,
+ rbi->page, 0, PAGE_SIZE,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(&adapter->pdev->dev,
+ rbi->dma_addr)) {
+ put_page(rbi->page);
+ rbi->page = NULL;
+ rq->stats.rx_buf_alloc_failure++;
+ break;
+ }
+ } else {
+ /* rx buffers skipped by the device */
+ }
+ val = VMXNET3_RXD_BTYPE_BODY << VMXNET3_RXD_BTYPE_SHIFT;
+ }
+
+ gd->rxd.addr = cpu_to_le64(rbi->dma_addr);
+ gd->dword[2] = cpu_to_le32((!ring->gen << VMXNET3_RXD_GEN_SHIFT)
+ | val | rbi->len);
+
+ /* Fill the last buffer but dont mark it ready, or else the
+ * device will think that the queue is full */
+ if (num_allocated == num_to_alloc) {
+ rbi->comp_state = VMXNET3_RXD_COMP_DONE;
+ break;
+ }
+
+ gd->dword[2] |= cpu_to_le32(ring->gen << VMXNET3_RXD_GEN_SHIFT);
+ num_allocated++;
+ vmxnet3_cmd_ring_adv_next2fill(ring);
+ }
+
+ netdev_dbg(adapter->netdev,
+ "alloc_rx_buf: %d allocated, next2fill %u, next2comp %u\n",
+ num_allocated, ring->next2fill, ring->next2comp);
+
+ /* so that the device can distinguish a full ring and an empty ring */
+ BUG_ON(num_allocated != 0 && ring->next2fill == ring->next2comp);
+
+ return num_allocated;
+}
+
+
+static void
+vmxnet3_append_frag(struct sk_buff *skb, struct Vmxnet3_RxCompDesc *rcd,
+ struct vmxnet3_rx_buf_info *rbi)
+{
+ skb_frag_t *frag = skb_shinfo(skb)->frags + skb_shinfo(skb)->nr_frags;
+
+ BUG_ON(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS);
+
+ skb_frag_fill_page_desc(frag, rbi->page, 0, rcd->len);
+ skb->data_len += rcd->len;
+ skb->truesize += PAGE_SIZE;
+ skb_shinfo(skb)->nr_frags++;
+}
+
+
+static int
+vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx,
+ struct vmxnet3_tx_queue *tq, struct pci_dev *pdev,
+ struct vmxnet3_adapter *adapter)
+{
+ u32 dw2, len;
+ unsigned long buf_offset;
+ int i;
+ union Vmxnet3_GenericDesc *gdesc;
+ struct vmxnet3_tx_buf_info *tbi = NULL;
+
+ BUG_ON(ctx->copy_size > skb_headlen(skb));
+
+ /* use the previous gen bit for the SOP desc */
+ dw2 = (tq->tx_ring.gen ^ 0x1) << VMXNET3_TXD_GEN_SHIFT;
+
+ ctx->sop_txd = tq->tx_ring.base + tq->tx_ring.next2fill;
+ gdesc = ctx->sop_txd; /* both loops below can be skipped */
+
+ /* no need to map the buffer if headers are copied */
+ if (ctx->copy_size) {
+ ctx->sop_txd->txd.addr = cpu_to_le64(tq->data_ring.basePA +
+ tq->tx_ring.next2fill *
+ tq->txdata_desc_size);
+ ctx->sop_txd->dword[2] = cpu_to_le32(dw2 | ctx->copy_size);
+ ctx->sop_txd->dword[3] = 0;
+
+ tbi = tq->buf_info + tq->tx_ring.next2fill;
+ tbi->map_type = VMXNET3_MAP_NONE;
+
+ netdev_dbg(adapter->netdev,
+ "txd[%u]: 0x%Lx 0x%x 0x%x\n",
+ tq->tx_ring.next2fill,
+ le64_to_cpu(ctx->sop_txd->txd.addr),
+ ctx->sop_txd->dword[2], ctx->sop_txd->dword[3]);
+ vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
+
+ /* use the right gen for non-SOP desc */
+ dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT;
+ }
+
+ /* linear part can use multiple tx desc if it's big */
+ len = skb_headlen(skb) - ctx->copy_size;
+ buf_offset = ctx->copy_size;
+ while (len) {
+ u32 buf_size;
+
+ if (len < VMXNET3_MAX_TX_BUF_SIZE) {
+ buf_size = len;
+ dw2 |= len;
+ } else {
+ buf_size = VMXNET3_MAX_TX_BUF_SIZE;
+ /* spec says that for TxDesc.len, 0 == 2^14 */
+ }
+
+ tbi = tq->buf_info + tq->tx_ring.next2fill;
+ tbi->map_type = VMXNET3_MAP_SINGLE;
+ tbi->dma_addr = dma_map_single(&adapter->pdev->dev,
+ skb->data + buf_offset, buf_size,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(&adapter->pdev->dev, tbi->dma_addr))
+ return -EFAULT;
+
+ tbi->len = buf_size;
+
+ gdesc = tq->tx_ring.base + tq->tx_ring.next2fill;
+ BUG_ON(gdesc->txd.gen == tq->tx_ring.gen);
+
+ gdesc->txd.addr = cpu_to_le64(tbi->dma_addr);
+ gdesc->dword[2] = cpu_to_le32(dw2);
+ gdesc->dword[3] = 0;
+
+ netdev_dbg(adapter->netdev,
+ "txd[%u]: 0x%Lx 0x%x 0x%x\n",
+ tq->tx_ring.next2fill, le64_to_cpu(gdesc->txd.addr),
+ le32_to_cpu(gdesc->dword[2]), gdesc->dword[3]);
+ vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
+ dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT;
+
+ len -= buf_size;
+ buf_offset += buf_size;
+ }
+
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+ u32 buf_size;
+
+ buf_offset = 0;
+ len = skb_frag_size(frag);
+ while (len) {
+ tbi = tq->buf_info + tq->tx_ring.next2fill;
+ if (len < VMXNET3_MAX_TX_BUF_SIZE) {
+ buf_size = len;
+ dw2 |= len;
+ } else {
+ buf_size = VMXNET3_MAX_TX_BUF_SIZE;
+ /* spec says that for TxDesc.len, 0 == 2^14 */
+ }
+ tbi->map_type = VMXNET3_MAP_PAGE;
+ tbi->dma_addr = skb_frag_dma_map(&adapter->pdev->dev, frag,
+ buf_offset, buf_size,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(&adapter->pdev->dev, tbi->dma_addr))
+ return -EFAULT;
+
+ tbi->len = buf_size;
+
+ gdesc = tq->tx_ring.base + tq->tx_ring.next2fill;
+ BUG_ON(gdesc->txd.gen == tq->tx_ring.gen);
+
+ gdesc->txd.addr = cpu_to_le64(tbi->dma_addr);
+ gdesc->dword[2] = cpu_to_le32(dw2);
+ gdesc->dword[3] = 0;
+
+ netdev_dbg(adapter->netdev,
+ "txd[%u]: 0x%llx %u %u\n",
+ tq->tx_ring.next2fill, le64_to_cpu(gdesc->txd.addr),
+ le32_to_cpu(gdesc->dword[2]), gdesc->dword[3]);
+ vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
+ dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT;
+
+ len -= buf_size;
+ buf_offset += buf_size;
+ }
+ }
+
+ ctx->eop_txd = gdesc;
+
+ /* set the last buf_info for the pkt */
+ tbi->skb = skb;
+ tbi->sop_idx = ctx->sop_txd - tq->tx_ring.base;
+
+ return 0;
+}
+
+
+/* Init all tx queues */
+static void
+vmxnet3_tq_init_all(struct vmxnet3_adapter *adapter)
+{
+ int i;
+
+ for (i = 0; i < adapter->num_tx_queues; i++)
+ vmxnet3_tq_init(&adapter->tx_queue[i], adapter);
+}
+
+
+/*
+ * parse relevant protocol headers:
+ * For a tso pkt, relevant headers are L2/3/4 including options
+ * For a pkt requesting csum offloading, they are L2/3 and may include L4
+ * if it's a TCP/UDP pkt
+ *
+ * Returns:
+ * -1: error happens during parsing
+ * 0: protocol headers parsed, but too big to be copied
+ * 1: protocol headers parsed and copied
+ *
+ * Other effects:
+ * 1. related *ctx fields are updated.
+ * 2. ctx->copy_size is # of bytes copied
+ * 3. the portion to be copied is guaranteed to be in the linear part
+ *
+ */
+static int
+vmxnet3_parse_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
+ struct vmxnet3_tx_ctx *ctx,
+ struct vmxnet3_adapter *adapter)
+{
+ u8 protocol = 0;
+
+ if (ctx->mss) { /* TSO */
+ if (VMXNET3_VERSION_GE_4(adapter) && skb->encapsulation) {
+ ctx->l4_offset = skb_inner_transport_offset(skb);
+ ctx->l4_hdr_size = inner_tcp_hdrlen(skb);
+ ctx->copy_size = ctx->l4_offset + ctx->l4_hdr_size;
+ } else {
+ ctx->l4_offset = skb_transport_offset(skb);
+ ctx->l4_hdr_size = tcp_hdrlen(skb);
+ ctx->copy_size = ctx->l4_offset + ctx->l4_hdr_size;
+ }
+ } else {
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ /* For encap packets, skb_checksum_start_offset refers
+ * to inner L4 offset. Thus, below works for encap as
+ * well as non-encap case
+ */
+ ctx->l4_offset = skb_checksum_start_offset(skb);
+
+ if (VMXNET3_VERSION_GE_4(adapter) &&
+ skb->encapsulation) {
+ struct iphdr *iph = inner_ip_hdr(skb);
+
+ if (iph->version == 4) {
+ protocol = iph->protocol;
+ } else {
+ const struct ipv6hdr *ipv6h;
+
+ ipv6h = inner_ipv6_hdr(skb);
+ protocol = ipv6h->nexthdr;
+ }
+ } else {
+ if (ctx->ipv4) {
+ const struct iphdr *iph = ip_hdr(skb);
+
+ protocol = iph->protocol;
+ } else if (ctx->ipv6) {
+ const struct ipv6hdr *ipv6h;
+
+ ipv6h = ipv6_hdr(skb);
+ protocol = ipv6h->nexthdr;
+ }
+ }
+
+ switch (protocol) {
+ case IPPROTO_TCP:
+ ctx->l4_hdr_size = skb->encapsulation ? inner_tcp_hdrlen(skb) :
+ tcp_hdrlen(skb);
+ break;
+ case IPPROTO_UDP:
+ ctx->l4_hdr_size = sizeof(struct udphdr);
+ break;
+ default:
+ ctx->l4_hdr_size = 0;
+ break;
+ }
+
+ ctx->copy_size = min(ctx->l4_offset +
+ ctx->l4_hdr_size, skb->len);
+ } else {
+ ctx->l4_offset = 0;
+ ctx->l4_hdr_size = 0;
+ /* copy as much as allowed */
+ ctx->copy_size = min_t(unsigned int,
+ tq->txdata_desc_size,
+ skb_headlen(skb));
+ }
+
+ if (skb->len <= VMXNET3_HDR_COPY_SIZE)
+ ctx->copy_size = skb->len;
+
+ /* make sure headers are accessible directly */
+ if (unlikely(!pskb_may_pull(skb, ctx->copy_size)))
+ goto err;
+ }
+
+ if (unlikely(ctx->copy_size > tq->txdata_desc_size)) {
+ tq->stats.oversized_hdr++;
+ ctx->copy_size = 0;
+ return 0;
+ }
+
+ return 1;
+err:
+ return -1;
+}
+
+/*
+ * copy relevant protocol headers to the transmit ring:
+ * For a tso pkt, relevant headers are L2/3/4 including options
+ * For a pkt requesting csum offloading, they are L2/3 and may include L4
+ * if it's a TCP/UDP pkt
+ *
+ *
+ * Note that this requires that vmxnet3_parse_hdr be called first to set the
+ * appropriate bits in ctx first
+ */
+static void
+vmxnet3_copy_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
+ struct vmxnet3_tx_ctx *ctx,
+ struct vmxnet3_adapter *adapter)
+{
+ struct Vmxnet3_TxDataDesc *tdd;
+
+ tdd = (struct Vmxnet3_TxDataDesc *)((u8 *)tq->data_ring.base +
+ tq->tx_ring.next2fill *
+ tq->txdata_desc_size);
+
+ memcpy(tdd->data, skb->data, ctx->copy_size);
+ netdev_dbg(adapter->netdev,
+ "copy %u bytes to dataRing[%u]\n",
+ ctx->copy_size, tq->tx_ring.next2fill);
+}
+
+
+static void
+vmxnet3_prepare_inner_tso(struct sk_buff *skb,
+ struct vmxnet3_tx_ctx *ctx)
+{
+ struct tcphdr *tcph = inner_tcp_hdr(skb);
+ struct iphdr *iph = inner_ip_hdr(skb);
+
+ if (iph->version == 4) {
+ iph->check = 0;
+ tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, 0,
+ IPPROTO_TCP, 0);
+ } else {
+ struct ipv6hdr *iph = inner_ipv6_hdr(skb);
+
+ tcph->check = ~csum_ipv6_magic(&iph->saddr, &iph->daddr, 0,
+ IPPROTO_TCP, 0);
+ }
+}
+
+static void
+vmxnet3_prepare_tso(struct sk_buff *skb,
+ struct vmxnet3_tx_ctx *ctx)
+{
+ struct tcphdr *tcph = tcp_hdr(skb);
+
+ if (ctx->ipv4) {
+ struct iphdr *iph = ip_hdr(skb);
+
+ iph->check = 0;
+ tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, 0,
+ IPPROTO_TCP, 0);
+ } else if (ctx->ipv6) {
+ tcp_v6_gso_csum_prep(skb);
+ }
+}
+
+static int txd_estimate(const struct sk_buff *skb)
+{
+ int count = VMXNET3_TXD_NEEDED(skb_headlen(skb)) + 1;
+ int i;
+
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+
+ count += VMXNET3_TXD_NEEDED(skb_frag_size(frag));
+ }
+ return count;
+}
+
+/*
+ * Transmits a pkt thru a given tq
+ * Returns:
+ * NETDEV_TX_OK: descriptors are setup successfully
+ * NETDEV_TX_OK: error occurred, the pkt is dropped
+ * NETDEV_TX_BUSY: tx ring is full, queue is stopped
+ *
+ * Side-effects:
+ * 1. tx ring may be changed
+ * 2. tq stats may be updated accordingly
+ * 3. shared->txNumDeferred may be updated
+ */
+
+static int
+vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
+ struct vmxnet3_adapter *adapter, struct net_device *netdev)
+{
+ int ret;
+ u32 count;
+ int num_pkts;
+ int tx_num_deferred;
+ unsigned long flags;
+ struct vmxnet3_tx_ctx ctx;
+ union Vmxnet3_GenericDesc *gdesc;
+#ifdef __BIG_ENDIAN_BITFIELD
+ /* Use temporary descriptor to avoid touching bits multiple times */
+ union Vmxnet3_GenericDesc tempTxDesc;
+#endif
+
+ count = txd_estimate(skb);
+
+ ctx.ipv4 = (vlan_get_protocol(skb) == cpu_to_be16(ETH_P_IP));
+ ctx.ipv6 = (vlan_get_protocol(skb) == cpu_to_be16(ETH_P_IPV6));
+
+ ctx.mss = skb_shinfo(skb)->gso_size;
+ if (ctx.mss) {
+ if (skb_header_cloned(skb)) {
+ if (unlikely(pskb_expand_head(skb, 0, 0,
+ GFP_ATOMIC) != 0)) {
+ tq->stats.drop_tso++;
+ goto drop_pkt;
+ }
+ tq->stats.copy_skb_header++;
+ }
+ if (unlikely(count > VMXNET3_MAX_TSO_TXD_PER_PKT)) {
+ /* tso pkts must not use more than
+ * VMXNET3_MAX_TSO_TXD_PER_PKT entries
+ */
+ if (skb_linearize(skb) != 0) {
+ tq->stats.drop_too_many_frags++;
+ goto drop_pkt;
+ }
+ tq->stats.linearized++;
+
+ /* recalculate the # of descriptors to use */
+ count = VMXNET3_TXD_NEEDED(skb_headlen(skb)) + 1;
+ if (unlikely(count > VMXNET3_MAX_TSO_TXD_PER_PKT)) {
+ tq->stats.drop_too_many_frags++;
+ goto drop_pkt;
+ }
+ }
+ if (skb->encapsulation) {
+ vmxnet3_prepare_inner_tso(skb, &ctx);
+ } else {
+ vmxnet3_prepare_tso(skb, &ctx);
+ }
+ } else {
+ if (unlikely(count > VMXNET3_MAX_TXD_PER_PKT)) {
+
+ /* non-tso pkts must not use more than
+ * VMXNET3_MAX_TXD_PER_PKT entries
+ */
+ if (skb_linearize(skb) != 0) {
+ tq->stats.drop_too_many_frags++;
+ goto drop_pkt;
+ }
+ tq->stats.linearized++;
+
+ /* recalculate the # of descriptors to use */
+ count = VMXNET3_TXD_NEEDED(skb_headlen(skb)) + 1;
+ }
+ }
+
+ ret = vmxnet3_parse_hdr(skb, tq, &ctx, adapter);
+ if (ret >= 0) {
+ BUG_ON(ret <= 0 && ctx.copy_size != 0);
+ /* hdrs parsed, check against other limits */
+ if (ctx.mss) {
+ if (unlikely(ctx.l4_offset + ctx.l4_hdr_size >
+ VMXNET3_MAX_TX_BUF_SIZE)) {
+ tq->stats.drop_oversized_hdr++;
+ goto drop_pkt;
+ }
+ } else {
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ if (unlikely(ctx.l4_offset +
+ skb->csum_offset >
+ VMXNET3_MAX_CSUM_OFFSET)) {
+ tq->stats.drop_oversized_hdr++;
+ goto drop_pkt;
+ }
+ }
+ }
+ } else {
+ tq->stats.drop_hdr_inspect_err++;
+ goto drop_pkt;
+ }
+
+ spin_lock_irqsave(&tq->tx_lock, flags);
+
+ if (count > vmxnet3_cmd_ring_desc_avail(&tq->tx_ring)) {
+ tq->stats.tx_ring_full++;
+ netdev_dbg(adapter->netdev,
+ "tx queue stopped on %s, next2comp %u"
+ " next2fill %u\n", adapter->netdev->name,
+ tq->tx_ring.next2comp, tq->tx_ring.next2fill);
+
+ vmxnet3_tq_stop(tq, adapter);
+ spin_unlock_irqrestore(&tq->tx_lock, flags);
+ return NETDEV_TX_BUSY;
+ }
+
+
+ vmxnet3_copy_hdr(skb, tq, &ctx, adapter);
+
+ /* fill tx descs related to addr & len */
+ if (vmxnet3_map_pkt(skb, &ctx, tq, adapter->pdev, adapter))
+ goto unlock_drop_pkt;
+
+ /* setup the EOP desc */
+ ctx.eop_txd->dword[3] = cpu_to_le32(VMXNET3_TXD_CQ | VMXNET3_TXD_EOP);
+
+ /* setup the SOP desc */
+#ifdef __BIG_ENDIAN_BITFIELD
+ gdesc = &tempTxDesc;
+ gdesc->dword[2] = ctx.sop_txd->dword[2];
+ gdesc->dword[3] = ctx.sop_txd->dword[3];
+#else
+ gdesc = ctx.sop_txd;
+#endif
+ tx_num_deferred = le32_to_cpu(tq->shared->txNumDeferred);
+ if (ctx.mss) {
+ if (VMXNET3_VERSION_GE_4(adapter) && skb->encapsulation) {
+ gdesc->txd.hlen = ctx.l4_offset + ctx.l4_hdr_size;
+ if (VMXNET3_VERSION_GE_7(adapter)) {
+ gdesc->txd.om = VMXNET3_OM_TSO;
+ gdesc->txd.ext1 = 1;
+ } else {
+ gdesc->txd.om = VMXNET3_OM_ENCAP;
+ }
+ gdesc->txd.msscof = ctx.mss;
+
+ if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)
+ gdesc->txd.oco = 1;
+ } else {
+ gdesc->txd.hlen = ctx.l4_offset + ctx.l4_hdr_size;
+ gdesc->txd.om = VMXNET3_OM_TSO;
+ gdesc->txd.msscof = ctx.mss;
+ }
+ num_pkts = (skb->len - gdesc->txd.hlen + ctx.mss - 1) / ctx.mss;
+ } else {
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ if (VMXNET3_VERSION_GE_4(adapter) &&
+ skb->encapsulation) {
+ gdesc->txd.hlen = ctx.l4_offset +
+ ctx.l4_hdr_size;
+ if (VMXNET3_VERSION_GE_7(adapter)) {
+ gdesc->txd.om = VMXNET3_OM_CSUM;
+ gdesc->txd.msscof = ctx.l4_offset +
+ skb->csum_offset;
+ gdesc->txd.ext1 = 1;
+ } else {
+ gdesc->txd.om = VMXNET3_OM_ENCAP;
+ gdesc->txd.msscof = 0; /* Reserved */
+ }
+ } else {
+ gdesc->txd.hlen = ctx.l4_offset;
+ gdesc->txd.om = VMXNET3_OM_CSUM;
+ gdesc->txd.msscof = ctx.l4_offset +
+ skb->csum_offset;
+ }
+ } else {
+ gdesc->txd.om = 0;
+ gdesc->txd.msscof = 0;
+ }
+ num_pkts = 1;
+ }
+ le32_add_cpu(&tq->shared->txNumDeferred, num_pkts);
+ tx_num_deferred += num_pkts;
+
+ if (skb_vlan_tag_present(skb)) {
+ gdesc->txd.ti = 1;
+ gdesc->txd.tci = skb_vlan_tag_get(skb);
+ }
+
+ /* Ensure that the write to (&gdesc->txd)->gen will be observed after
+ * all other writes to &gdesc->txd.
+ */
+ dma_wmb();
+
+ /* finally flips the GEN bit of the SOP desc. */
+ gdesc->dword[2] = cpu_to_le32(le32_to_cpu(gdesc->dword[2]) ^
+ VMXNET3_TXD_GEN);
+#ifdef __BIG_ENDIAN_BITFIELD
+ /* Finished updating in bitfields of Tx Desc, so write them in original
+ * place.
+ */
+ vmxnet3_TxDescToLe((struct Vmxnet3_TxDesc *)gdesc,
+ (struct Vmxnet3_TxDesc *)ctx.sop_txd);
+ gdesc = ctx.sop_txd;
+#endif
+ netdev_dbg(adapter->netdev,
+ "txd[%u]: SOP 0x%Lx 0x%x 0x%x\n",
+ (u32)(ctx.sop_txd -
+ tq->tx_ring.base), le64_to_cpu(gdesc->txd.addr),
+ le32_to_cpu(gdesc->dword[2]), le32_to_cpu(gdesc->dword[3]));
+
+ spin_unlock_irqrestore(&tq->tx_lock, flags);
+
+ if (tx_num_deferred >= le32_to_cpu(tq->shared->txThreshold)) {
+ tq->shared->txNumDeferred = 0;
+ VMXNET3_WRITE_BAR0_REG(adapter,
+ adapter->tx_prod_offset + tq->qid * 8,
+ tq->tx_ring.next2fill);
+ }
+
+ return NETDEV_TX_OK;
+
+unlock_drop_pkt:
+ spin_unlock_irqrestore(&tq->tx_lock, flags);
+drop_pkt:
+ tq->stats.drop_total++;
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+}
+
+static int
+vmxnet3_create_pp(struct vmxnet3_adapter *adapter,
+ struct vmxnet3_rx_queue *rq, int size)
+{
+ bool xdp_prog = vmxnet3_xdp_enabled(adapter);
+ const struct page_pool_params pp_params = {
+ .order = 0,
+ .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
+ .pool_size = size,
+ .nid = NUMA_NO_NODE,
+ .dev = &adapter->pdev->dev,
+ .offset = VMXNET3_XDP_RX_OFFSET,
+ .max_len = VMXNET3_XDP_MAX_FRSIZE,
+ .dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE,
+ };
+ struct page_pool *pp;
+ int err;
+
+ pp = page_pool_create(&pp_params);
+ if (IS_ERR(pp))
+ return PTR_ERR(pp);
+
+ err = xdp_rxq_info_reg(&rq->xdp_rxq, adapter->netdev, rq->qid,
+ rq->napi.napi_id);
+ if (err < 0)
+ goto err_free_pp;
+
+ err = xdp_rxq_info_reg_mem_model(&rq->xdp_rxq, MEM_TYPE_PAGE_POOL, pp);
+ if (err)
+ goto err_unregister_rxq;
+
+ rq->page_pool = pp;
+
+ return 0;
+
+err_unregister_rxq:
+ xdp_rxq_info_unreg(&rq->xdp_rxq);
+err_free_pp:
+ page_pool_destroy(pp);
+
+ return err;
+}
+
+void *
+vmxnet3_pp_get_buff(struct page_pool *pp, dma_addr_t *dma_addr,
+ gfp_t gfp_mask)
+{
+ struct page *page;
+
+ page = page_pool_alloc_pages(pp, gfp_mask | __GFP_NOWARN);
+ if (unlikely(!page))
+ return NULL;
+
+ *dma_addr = page_pool_get_dma_addr(page) + pp->p.offset;
+
+ return page_address(page);
+}
+
+static netdev_tx_t
+vmxnet3_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
+{
+ struct vmxnet3_adapter *adapter = netdev_priv(netdev);
+
+ BUG_ON(skb->queue_mapping > adapter->num_tx_queues);
+ return vmxnet3_tq_xmit(skb,
+ &adapter->tx_queue[skb->queue_mapping],
+ adapter, netdev);
+}
+
+
+static void
+vmxnet3_rx_csum(struct vmxnet3_adapter *adapter,
+ struct sk_buff *skb,
+ union Vmxnet3_GenericDesc *gdesc)
+{
+ if (!gdesc->rcd.cnc && adapter->netdev->features & NETIF_F_RXCSUM) {
+ if (gdesc->rcd.v4 &&
+ (le32_to_cpu(gdesc->dword[3]) &
+ VMXNET3_RCD_CSUM_OK) == VMXNET3_RCD_CSUM_OK) {
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ if ((le32_to_cpu(gdesc->dword[0]) &
+ (1UL << VMXNET3_RCD_HDR_INNER_SHIFT))) {
+ skb->csum_level = 1;
+ }
+ WARN_ON_ONCE(!(gdesc->rcd.tcp || gdesc->rcd.udp) &&
+ !(le32_to_cpu(gdesc->dword[0]) &
+ (1UL << VMXNET3_RCD_HDR_INNER_SHIFT)));
+ WARN_ON_ONCE(gdesc->rcd.frg &&
+ !(le32_to_cpu(gdesc->dword[0]) &
+ (1UL << VMXNET3_RCD_HDR_INNER_SHIFT)));
+ } else if (gdesc->rcd.v6 && (le32_to_cpu(gdesc->dword[3]) &
+ (1 << VMXNET3_RCD_TUC_SHIFT))) {
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ if ((le32_to_cpu(gdesc->dword[0]) &
+ (1UL << VMXNET3_RCD_HDR_INNER_SHIFT))) {
+ skb->csum_level = 1;
+ }
+ WARN_ON_ONCE(!(gdesc->rcd.tcp || gdesc->rcd.udp) &&
+ !(le32_to_cpu(gdesc->dword[0]) &
+ (1UL << VMXNET3_RCD_HDR_INNER_SHIFT)));
+ WARN_ON_ONCE(gdesc->rcd.frg &&
+ !(le32_to_cpu(gdesc->dword[0]) &
+ (1UL << VMXNET3_RCD_HDR_INNER_SHIFT)));
+ } else {
+ if (gdesc->rcd.csum) {
+ skb->csum = htons(gdesc->rcd.csum);
+ skb->ip_summed = CHECKSUM_PARTIAL;
+ } else {
+ skb_checksum_none_assert(skb);
+ }
+ }
+ } else {
+ skb_checksum_none_assert(skb);
+ }
+}
+
+
+static void
+vmxnet3_rx_error(struct vmxnet3_rx_queue *rq, struct Vmxnet3_RxCompDesc *rcd,
+ struct vmxnet3_rx_ctx *ctx, struct vmxnet3_adapter *adapter)
+{
+ rq->stats.drop_err++;
+ if (!rcd->fcs)
+ rq->stats.drop_fcs++;
+
+ rq->stats.drop_total++;
+
+ /*
+ * We do not unmap and chain the rx buffer to the skb.
+ * We basically pretend this buffer is not used and will be recycled
+ * by vmxnet3_rq_alloc_rx_buf()
+ */
+
+ /*
+ * ctx->skb may be NULL if this is the first and the only one
+ * desc for the pkt
+ */
+ if (ctx->skb)
+ dev_kfree_skb_irq(ctx->skb);
+
+ ctx->skb = NULL;
+}
+
+
+static u32
+vmxnet3_get_hdr_len(struct vmxnet3_adapter *adapter, struct sk_buff *skb,
+ union Vmxnet3_GenericDesc *gdesc)
+{
+ u32 hlen, maplen;
+ union {
+ void *ptr;
+ struct ethhdr *eth;
+ struct vlan_ethhdr *veth;
+ struct iphdr *ipv4;
+ struct ipv6hdr *ipv6;
+ struct tcphdr *tcp;
+ } hdr;
+ BUG_ON(gdesc->rcd.tcp == 0);
+
+ maplen = skb_headlen(skb);
+ if (unlikely(sizeof(struct iphdr) + sizeof(struct tcphdr) > maplen))
+ return 0;
+
+ if (skb->protocol == cpu_to_be16(ETH_P_8021Q) ||
+ skb->protocol == cpu_to_be16(ETH_P_8021AD))
+ hlen = sizeof(struct vlan_ethhdr);
+ else
+ hlen = sizeof(struct ethhdr);
+
+ hdr.eth = eth_hdr(skb);
+ if (gdesc->rcd.v4) {
+ BUG_ON(hdr.eth->h_proto != htons(ETH_P_IP) &&
+ hdr.veth->h_vlan_encapsulated_proto != htons(ETH_P_IP));
+ hdr.ptr += hlen;
+ BUG_ON(hdr.ipv4->protocol != IPPROTO_TCP);
+ hlen = hdr.ipv4->ihl << 2;
+ hdr.ptr += hdr.ipv4->ihl << 2;
+ } else if (gdesc->rcd.v6) {
+ BUG_ON(hdr.eth->h_proto != htons(ETH_P_IPV6) &&
+ hdr.veth->h_vlan_encapsulated_proto != htons(ETH_P_IPV6));
+ hdr.ptr += hlen;
+ /* Use an estimated value, since we also need to handle
+ * TSO case.
+ */
+ if (hdr.ipv6->nexthdr != IPPROTO_TCP)
+ return sizeof(struct ipv6hdr) + sizeof(struct tcphdr);
+ hlen = sizeof(struct ipv6hdr);
+ hdr.ptr += sizeof(struct ipv6hdr);
+ } else {
+ /* Non-IP pkt, dont estimate header length */
+ return 0;
+ }
+
+ if (hlen + sizeof(struct tcphdr) > maplen)
+ return 0;
+
+ return (hlen + (hdr.tcp->doff << 2));
+}
+
+static int
+vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
+ struct vmxnet3_adapter *adapter, int quota)
+{
+ u32 rxprod_reg[2] = {
+ adapter->rx_prod_offset, adapter->rx_prod2_offset
+ };
+ u32 num_pkts = 0;
+ bool skip_page_frags = false;
+ bool encap_lro = false;
+ struct Vmxnet3_RxCompDesc *rcd;
+ struct vmxnet3_rx_ctx *ctx = &rq->rx_ctx;
+ u16 segCnt = 0, mss = 0;
+ int comp_offset, fill_offset;
+#ifdef __BIG_ENDIAN_BITFIELD
+ struct Vmxnet3_RxDesc rxCmdDesc;
+ struct Vmxnet3_RxCompDesc rxComp;
+#endif
+ bool need_flush = false;
+
+ vmxnet3_getRxComp(rcd, &rq->comp_ring.base[rq->comp_ring.next2proc].rcd,
+ &rxComp);
+ while (rcd->gen == rq->comp_ring.gen) {
+ struct vmxnet3_rx_buf_info *rbi;
+ struct sk_buff *skb, *new_skb = NULL;
+ struct page *new_page = NULL;
+ dma_addr_t new_dma_addr;
+ int num_to_alloc;
+ struct Vmxnet3_RxDesc *rxd;
+ u32 idx, ring_idx;
+ struct vmxnet3_cmd_ring *ring = NULL;
+ if (num_pkts >= quota) {
+ /* we may stop even before we see the EOP desc of
+ * the current pkt
+ */
+ break;
+ }
+
+ /* Prevent any rcd field from being (speculatively) read before
+ * rcd->gen is read.
+ */
+ dma_rmb();
+
+ BUG_ON(rcd->rqID != rq->qid && rcd->rqID != rq->qid2 &&
+ rcd->rqID != rq->dataRingQid);
+ idx = rcd->rxdIdx;
+ ring_idx = VMXNET3_GET_RING_IDX(adapter, rcd->rqID);
+ ring = rq->rx_ring + ring_idx;
+ vmxnet3_getRxDesc(rxd, &rq->rx_ring[ring_idx].base[idx].rxd,
+ &rxCmdDesc);
+ rbi = rq->buf_info[ring_idx] + idx;
+
+ BUG_ON(rxd->addr != rbi->dma_addr ||
+ rxd->len != rbi->len);
+
+ if (unlikely(rcd->eop && rcd->err)) {
+ vmxnet3_rx_error(rq, rcd, ctx, adapter);
+ goto rcd_done;
+ }
+
+ if (rcd->sop && rcd->eop && vmxnet3_xdp_enabled(adapter)) {
+ struct sk_buff *skb_xdp_pass;
+ int act;
+
+ if (VMXNET3_RX_DATA_RING(adapter, rcd->rqID)) {
+ ctx->skb = NULL;
+ goto skip_xdp; /* Handle it later. */
+ }
+
+ if (rbi->buf_type != VMXNET3_RX_BUF_XDP)
+ goto rcd_done;
+
+ act = vmxnet3_process_xdp(adapter, rq, rcd, rbi, rxd,
+ &skb_xdp_pass);
+ if (act == XDP_PASS) {
+ ctx->skb = skb_xdp_pass;
+ goto sop_done;
+ }
+ ctx->skb = NULL;
+ need_flush |= act == XDP_REDIRECT;
+
+ goto rcd_done;
+ }
+skip_xdp:
+
+ if (rcd->sop) { /* first buf of the pkt */
+ bool rxDataRingUsed;
+ u16 len;
+
+ BUG_ON(rxd->btype != VMXNET3_RXD_BTYPE_HEAD ||
+ (rcd->rqID != rq->qid &&
+ rcd->rqID != rq->dataRingQid));
+
+ BUG_ON(rbi->buf_type != VMXNET3_RX_BUF_SKB &&
+ rbi->buf_type != VMXNET3_RX_BUF_XDP);
+ BUG_ON(ctx->skb != NULL || rbi->skb == NULL);
+
+ if (unlikely(rcd->len == 0)) {
+ /* Pretend the rx buffer is skipped. */
+ BUG_ON(!(rcd->sop && rcd->eop));
+ netdev_dbg(adapter->netdev,
+ "rxRing[%u][%u] 0 length\n",
+ ring_idx, idx);
+ goto rcd_done;
+ }
+
+ skip_page_frags = false;
+ ctx->skb = rbi->skb;
+
+ rxDataRingUsed =
+ VMXNET3_RX_DATA_RING(adapter, rcd->rqID);
+ len = rxDataRingUsed ? rcd->len : rbi->len;
+
+ if (rxDataRingUsed && vmxnet3_xdp_enabled(adapter)) {
+ struct sk_buff *skb_xdp_pass;
+ size_t sz;
+ int act;
+
+ sz = rcd->rxdIdx * rq->data_ring.desc_size;
+ act = vmxnet3_process_xdp_small(adapter, rq,
+ &rq->data_ring.base[sz],
+ rcd->len,
+ &skb_xdp_pass);
+ if (act == XDP_PASS) {
+ ctx->skb = skb_xdp_pass;
+ goto sop_done;
+ }
+ need_flush |= act == XDP_REDIRECT;
+
+ goto rcd_done;
+ }
+ new_skb = netdev_alloc_skb_ip_align(adapter->netdev,
+ len);
+ if (new_skb == NULL) {
+ /* Skb allocation failed, do not handover this
+ * skb to stack. Reuse it. Drop the existing pkt
+ */
+ rq->stats.rx_buf_alloc_failure++;
+ ctx->skb = NULL;
+ rq->stats.drop_total++;
+ skip_page_frags = true;
+ goto rcd_done;
+ }
+
+ if (rxDataRingUsed && adapter->rxdataring_enabled) {
+ size_t sz;
+
+ BUG_ON(rcd->len > rq->data_ring.desc_size);
+
+ ctx->skb = new_skb;
+ sz = rcd->rxdIdx * rq->data_ring.desc_size;
+ memcpy(new_skb->data,
+ &rq->data_ring.base[sz], rcd->len);
+ } else {
+ ctx->skb = rbi->skb;
+
+ new_dma_addr =
+ dma_map_single(&adapter->pdev->dev,
+ new_skb->data, rbi->len,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(&adapter->pdev->dev,
+ new_dma_addr)) {
+ dev_kfree_skb(new_skb);
+ /* Skb allocation failed, do not
+ * handover this skb to stack. Reuse
+ * it. Drop the existing pkt.
+ */
+ rq->stats.rx_buf_alloc_failure++;
+ ctx->skb = NULL;
+ rq->stats.drop_total++;
+ skip_page_frags = true;
+ goto rcd_done;
+ }
+
+ dma_unmap_single(&adapter->pdev->dev,
+ rbi->dma_addr,
+ rbi->len,
+ DMA_FROM_DEVICE);
+
+ /* Immediate refill */
+ rbi->skb = new_skb;
+ rbi->dma_addr = new_dma_addr;
+ rxd->addr = cpu_to_le64(rbi->dma_addr);
+ rxd->len = rbi->len;
+ }
+
+ skb_record_rx_queue(ctx->skb, rq->qid);
+ skb_put(ctx->skb, rcd->len);
+
+ if (VMXNET3_VERSION_GE_2(adapter) &&
+ rcd->type == VMXNET3_CDTYPE_RXCOMP_LRO) {
+ struct Vmxnet3_RxCompDescExt *rcdlro;
+ union Vmxnet3_GenericDesc *gdesc;
+
+ rcdlro = (struct Vmxnet3_RxCompDescExt *)rcd;
+ gdesc = (union Vmxnet3_GenericDesc *)rcd;
+
+ segCnt = rcdlro->segCnt;
+ WARN_ON_ONCE(segCnt == 0);
+ mss = rcdlro->mss;
+ if (unlikely(segCnt <= 1))
+ segCnt = 0;
+ encap_lro = (le32_to_cpu(gdesc->dword[0]) &
+ (1UL << VMXNET3_RCD_HDR_INNER_SHIFT));
+ } else {
+ segCnt = 0;
+ }
+ } else {
+ BUG_ON(ctx->skb == NULL && !skip_page_frags);
+
+ /* non SOP buffer must be type 1 in most cases */
+ BUG_ON(rbi->buf_type != VMXNET3_RX_BUF_PAGE);
+ BUG_ON(rxd->btype != VMXNET3_RXD_BTYPE_BODY);
+
+ /* If an sop buffer was dropped, skip all
+ * following non-sop fragments. They will be reused.
+ */
+ if (skip_page_frags)
+ goto rcd_done;
+
+ if (rcd->len) {
+ new_page = alloc_page(GFP_ATOMIC);
+ /* Replacement page frag could not be allocated.
+ * Reuse this page. Drop the pkt and free the
+ * skb which contained this page as a frag. Skip
+ * processing all the following non-sop frags.
+ */
+ if (unlikely(!new_page)) {
+ rq->stats.rx_buf_alloc_failure++;
+ dev_kfree_skb(ctx->skb);
+ ctx->skb = NULL;
+ skip_page_frags = true;
+ goto rcd_done;
+ }
+ new_dma_addr = dma_map_page(&adapter->pdev->dev,
+ new_page,
+ 0, PAGE_SIZE,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(&adapter->pdev->dev,
+ new_dma_addr)) {
+ put_page(new_page);
+ rq->stats.rx_buf_alloc_failure++;
+ dev_kfree_skb(ctx->skb);
+ ctx->skb = NULL;
+ skip_page_frags = true;
+ goto rcd_done;
+ }
+
+ dma_unmap_page(&adapter->pdev->dev,
+ rbi->dma_addr, rbi->len,
+ DMA_FROM_DEVICE);
+
+ vmxnet3_append_frag(ctx->skb, rcd, rbi);
+
+ /* Immediate refill */
+ rbi->page = new_page;
+ rbi->dma_addr = new_dma_addr;
+ rxd->addr = cpu_to_le64(rbi->dma_addr);
+ rxd->len = rbi->len;
+ }
+ }
+
+
+sop_done:
+ skb = ctx->skb;
+ if (rcd->eop) {
+ u32 mtu = adapter->netdev->mtu;
+ skb->len += skb->data_len;
+
+#ifdef VMXNET3_RSS
+ if (rcd->rssType != VMXNET3_RCD_RSS_TYPE_NONE &&
+ (adapter->netdev->features & NETIF_F_RXHASH)) {
+ enum pkt_hash_types hash_type;
+
+ switch (rcd->rssType) {
+ case VMXNET3_RCD_RSS_TYPE_IPV4:
+ case VMXNET3_RCD_RSS_TYPE_IPV6:
+ hash_type = PKT_HASH_TYPE_L3;
+ break;
+ case VMXNET3_RCD_RSS_TYPE_TCPIPV4:
+ case VMXNET3_RCD_RSS_TYPE_TCPIPV6:
+ case VMXNET3_RCD_RSS_TYPE_UDPIPV4:
+ case VMXNET3_RCD_RSS_TYPE_UDPIPV6:
+ hash_type = PKT_HASH_TYPE_L4;
+ break;
+ default:
+ hash_type = PKT_HASH_TYPE_L3;
+ break;
+ }
+ skb_set_hash(skb,
+ le32_to_cpu(rcd->rssHash),
+ hash_type);
+ }
+#endif
+ vmxnet3_rx_csum(adapter, skb,
+ (union Vmxnet3_GenericDesc *)rcd);
+ skb->protocol = eth_type_trans(skb, adapter->netdev);
+ if ((!rcd->tcp && !encap_lro) ||
+ !(adapter->netdev->features & NETIF_F_LRO))
+ goto not_lro;
+
+ if (segCnt != 0 && mss != 0) {
+ skb_shinfo(skb)->gso_type = rcd->v4 ?
+ SKB_GSO_TCPV4 : SKB_GSO_TCPV6;
+ skb_shinfo(skb)->gso_size = mss;
+ skb_shinfo(skb)->gso_segs = segCnt;
+ } else if ((segCnt != 0 || skb->len > mtu) && !encap_lro) {
+ u32 hlen;
+
+ hlen = vmxnet3_get_hdr_len(adapter, skb,
+ (union Vmxnet3_GenericDesc *)rcd);
+ if (hlen == 0)
+ goto not_lro;
+
+ skb_shinfo(skb)->gso_type =
+ rcd->v4 ? SKB_GSO_TCPV4 : SKB_GSO_TCPV6;
+ if (segCnt != 0) {
+ skb_shinfo(skb)->gso_segs = segCnt;
+ skb_shinfo(skb)->gso_size =
+ DIV_ROUND_UP(skb->len -
+ hlen, segCnt);
+ } else {
+ skb_shinfo(skb)->gso_size = mtu - hlen;
+ }
+ }
+not_lro:
+ if (unlikely(rcd->ts))
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rcd->tci);
+
+ /* Use GRO callback if UPT is enabled */
+ if ((adapter->netdev->features & NETIF_F_LRO) &&
+ !rq->shared->updateRxProd)
+ netif_receive_skb(skb);
+ else
+ napi_gro_receive(&rq->napi, skb);
+
+ ctx->skb = NULL;
+ encap_lro = false;
+ num_pkts++;
+ }
+
+rcd_done:
+ /* device may have skipped some rx descs */
+ ring = rq->rx_ring + ring_idx;
+ rbi->comp_state = VMXNET3_RXD_COMP_DONE;
+
+ comp_offset = vmxnet3_cmd_ring_desc_avail(ring);
+ fill_offset = (idx > ring->next2fill ? 0 : ring->size) +
+ idx - ring->next2fill - 1;
+ if (!ring->isOutOfOrder || fill_offset >= comp_offset)
+ ring->next2comp = idx;
+ num_to_alloc = vmxnet3_cmd_ring_desc_avail(ring);
+
+ /* Ensure that the writes to rxd->gen bits will be observed
+ * after all other writes to rxd objects.
+ */
+ dma_wmb();
+
+ while (num_to_alloc) {
+ rbi = rq->buf_info[ring_idx] + ring->next2fill;
+ if (!(adapter->dev_caps[0] & (1UL << VMXNET3_CAP_OOORX_COMP)))
+ goto refill_buf;
+ if (ring_idx == 0) {
+ /* ring0 Type1 buffers can get skipped; re-fill them */
+ if (rbi->buf_type != VMXNET3_RX_BUF_SKB)
+ goto refill_buf;
+ }
+ if (rbi->comp_state == VMXNET3_RXD_COMP_DONE) {
+refill_buf:
+ vmxnet3_getRxDesc(rxd, &ring->base[ring->next2fill].rxd,
+ &rxCmdDesc);
+ WARN_ON(!rxd->addr);
+
+ /* Recv desc is ready to be used by the device */
+ rxd->gen = ring->gen;
+ vmxnet3_cmd_ring_adv_next2fill(ring);
+ rbi->comp_state = VMXNET3_RXD_COMP_PENDING;
+ num_to_alloc--;
+ } else {
+ /* rx completion hasn't occurred */
+ ring->isOutOfOrder = 1;
+ break;
+ }
+ }
+
+ if (num_to_alloc == 0) {
+ ring->isOutOfOrder = 0;
+ }
+
+ /* if needed, update the register */
+ if (unlikely(rq->shared->updateRxProd) && (ring->next2fill & 0xf) == 0) {
+ VMXNET3_WRITE_BAR0_REG(adapter,
+ rxprod_reg[ring_idx] + rq->qid * 8,
+ ring->next2fill);
+ }
+
+ vmxnet3_comp_ring_adv_next2proc(&rq->comp_ring);
+ vmxnet3_getRxComp(rcd,
+ &rq->comp_ring.base[rq->comp_ring.next2proc].rcd, &rxComp);
+ }
+ if (need_flush)
+ xdp_do_flush();
+
+ return num_pkts;
+}
+
+
+static void
+vmxnet3_rq_cleanup(struct vmxnet3_rx_queue *rq,
+ struct vmxnet3_adapter *adapter)
+{
+ u32 i, ring_idx;
+ struct Vmxnet3_RxDesc *rxd;
+
+ /* ring has already been cleaned up */
+ if (!rq->rx_ring[0].base)
+ return;
+
+ for (ring_idx = 0; ring_idx < 2; ring_idx++) {
+ for (i = 0; i < rq->rx_ring[ring_idx].size; i++) {
+ struct vmxnet3_rx_buf_info *rbi;
+#ifdef __BIG_ENDIAN_BITFIELD
+ struct Vmxnet3_RxDesc rxDesc;
+#endif
+
+ rbi = &rq->buf_info[ring_idx][i];
+ vmxnet3_getRxDesc(rxd,
+ &rq->rx_ring[ring_idx].base[i].rxd, &rxDesc);
+
+ if (rxd->btype == VMXNET3_RXD_BTYPE_HEAD &&
+ rbi->page && rbi->buf_type == VMXNET3_RX_BUF_XDP) {
+ page_pool_recycle_direct(rq->page_pool,
+ rbi->page);
+ rbi->page = NULL;
+ } else if (rxd->btype == VMXNET3_RXD_BTYPE_HEAD &&
+ rbi->skb) {
+ dma_unmap_single(&adapter->pdev->dev, rxd->addr,
+ rxd->len, DMA_FROM_DEVICE);
+ dev_kfree_skb(rbi->skb);
+ rbi->skb = NULL;
+ } else if (rxd->btype == VMXNET3_RXD_BTYPE_BODY &&
+ rbi->page) {
+ dma_unmap_page(&adapter->pdev->dev, rxd->addr,
+ rxd->len, DMA_FROM_DEVICE);
+ put_page(rbi->page);
+ rbi->page = NULL;
+ }
+ }
+
+ rq->rx_ring[ring_idx].gen = VMXNET3_INIT_GEN;
+ rq->rx_ring[ring_idx].next2fill =
+ rq->rx_ring[ring_idx].next2comp = 0;
+ }
+
+ rq->comp_ring.gen = VMXNET3_INIT_GEN;
+ rq->comp_ring.next2proc = 0;
+}
+
+
+static void
+vmxnet3_rq_cleanup_all(struct vmxnet3_adapter *adapter)
+{
+ int i;
+
+ for (i = 0; i < adapter->num_rx_queues; i++)
+ vmxnet3_rq_cleanup(&adapter->rx_queue[i], adapter);
+ rcu_assign_pointer(adapter->xdp_bpf_prog, NULL);
+}
+
+
+static void vmxnet3_rq_destroy(struct vmxnet3_rx_queue *rq,
+ struct vmxnet3_adapter *adapter)
+{
+ int i;
+ int j;
+
+ /* all rx buffers must have already been freed */
+ for (i = 0; i < 2; i++) {
+ if (rq->buf_info[i]) {
+ for (j = 0; j < rq->rx_ring[i].size; j++)
+ BUG_ON(rq->buf_info[i][j].page != NULL);
+ }
+ }
+
+
+ for (i = 0; i < 2; i++) {
+ if (rq->rx_ring[i].base) {
+ dma_free_coherent(&adapter->pdev->dev,
+ rq->rx_ring[i].size
+ * sizeof(struct Vmxnet3_RxDesc),
+ rq->rx_ring[i].base,
+ rq->rx_ring[i].basePA);
+ rq->rx_ring[i].base = NULL;
+ }
+ }
+
+ if (xdp_rxq_info_is_reg(&rq->xdp_rxq))
+ xdp_rxq_info_unreg(&rq->xdp_rxq);
+ page_pool_destroy(rq->page_pool);
+ rq->page_pool = NULL;
+
+ if (rq->data_ring.base) {
+ dma_free_coherent(&adapter->pdev->dev,
+ rq->rx_ring[0].size * rq->data_ring.desc_size,
+ rq->data_ring.base, rq->data_ring.basePA);
+ rq->data_ring.base = NULL;
+ }
+
+ if (rq->comp_ring.base) {
+ dma_free_coherent(&adapter->pdev->dev, rq->comp_ring.size
+ * sizeof(struct Vmxnet3_RxCompDesc),
+ rq->comp_ring.base, rq->comp_ring.basePA);
+ rq->comp_ring.base = NULL;
+ }
+
+ kfree(rq->buf_info[0]);
+ rq->buf_info[0] = NULL;
+ rq->buf_info[1] = NULL;
+}
+
+static void
+vmxnet3_rq_destroy_all_rxdataring(struct vmxnet3_adapter *adapter)
+{
+ int i;
+
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i];
+
+ if (rq->data_ring.base) {
+ dma_free_coherent(&adapter->pdev->dev,
+ (rq->rx_ring[0].size *
+ rq->data_ring.desc_size),
+ rq->data_ring.base,
+ rq->data_ring.basePA);
+ rq->data_ring.base = NULL;
+ rq->data_ring.desc_size = 0;
+ }
+ }
+}
+
+static int
+vmxnet3_rq_init(struct vmxnet3_rx_queue *rq,
+ struct vmxnet3_adapter *adapter)
+{
+ int i, err;
+
+ /* initialize buf_info */
+ for (i = 0; i < rq->rx_ring[0].size; i++) {
+
+ /* 1st buf for a pkt is skbuff or xdp page */
+ if (i % adapter->rx_buf_per_pkt == 0) {
+ rq->buf_info[0][i].buf_type = vmxnet3_xdp_enabled(adapter) ?
+ VMXNET3_RX_BUF_XDP :
+ VMXNET3_RX_BUF_SKB;
+ rq->buf_info[0][i].len = adapter->skb_buf_size;
+ } else { /* subsequent bufs for a pkt is frag */
+ rq->buf_info[0][i].buf_type = VMXNET3_RX_BUF_PAGE;
+ rq->buf_info[0][i].len = PAGE_SIZE;
+ }
+ }
+ for (i = 0; i < rq->rx_ring[1].size; i++) {
+ rq->buf_info[1][i].buf_type = VMXNET3_RX_BUF_PAGE;
+ rq->buf_info[1][i].len = PAGE_SIZE;
+ }
+
+ /* reset internal state and allocate buffers for both rings */
+ for (i = 0; i < 2; i++) {
+ rq->rx_ring[i].next2fill = rq->rx_ring[i].next2comp = 0;
+
+ memset(rq->rx_ring[i].base, 0, rq->rx_ring[i].size *
+ sizeof(struct Vmxnet3_RxDesc));
+ rq->rx_ring[i].gen = VMXNET3_INIT_GEN;
+ rq->rx_ring[i].isOutOfOrder = 0;
+ }
+
+ err = vmxnet3_create_pp(adapter, rq,
+ rq->rx_ring[0].size + rq->rx_ring[1].size);
+ if (err)
+ return err;
+
+ if (vmxnet3_rq_alloc_rx_buf(rq, 0, rq->rx_ring[0].size - 1,
+ adapter) == 0) {
+ xdp_rxq_info_unreg(&rq->xdp_rxq);
+ page_pool_destroy(rq->page_pool);
+ rq->page_pool = NULL;
+
+ /* at least has 1 rx buffer for the 1st ring */
+ return -ENOMEM;
+ }
+ vmxnet3_rq_alloc_rx_buf(rq, 1, rq->rx_ring[1].size - 1, adapter);
+
+ /* reset the comp ring */
+ rq->comp_ring.next2proc = 0;
+ memset(rq->comp_ring.base, 0, rq->comp_ring.size *
+ sizeof(struct Vmxnet3_RxCompDesc));
+ rq->comp_ring.gen = VMXNET3_INIT_GEN;
+
+ /* reset rxctx */
+ rq->rx_ctx.skb = NULL;
+
+ /* stats are not reset */
+ return 0;
+}
+
+
+static int
+vmxnet3_rq_init_all(struct vmxnet3_adapter *adapter)
+{
+ int i, err = 0;
+
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ err = vmxnet3_rq_init(&adapter->rx_queue[i], adapter);
+ if (unlikely(err)) {
+ dev_err(&adapter->netdev->dev, "%s: failed to "
+ "initialize rx queue%i\n",
+ adapter->netdev->name, i);
+ break;
+ }
+ }
+ return err;
+
+}
+
+
+static int
+vmxnet3_rq_create(struct vmxnet3_rx_queue *rq, struct vmxnet3_adapter *adapter)
+{
+ int i;
+ size_t sz;
+ struct vmxnet3_rx_buf_info *bi;
+
+ for (i = 0; i < 2; i++) {
+
+ sz = rq->rx_ring[i].size * sizeof(struct Vmxnet3_RxDesc);
+ rq->rx_ring[i].base = dma_alloc_coherent(
+ &adapter->pdev->dev, sz,
+ &rq->rx_ring[i].basePA,
+ GFP_KERNEL);
+ if (!rq->rx_ring[i].base) {
+ netdev_err(adapter->netdev,
+ "failed to allocate rx ring %d\n", i);
+ goto err;
+ }
+ }
+
+ if ((adapter->rxdataring_enabled) && (rq->data_ring.desc_size != 0)) {
+ sz = rq->rx_ring[0].size * rq->data_ring.desc_size;
+ rq->data_ring.base =
+ dma_alloc_coherent(&adapter->pdev->dev, sz,
+ &rq->data_ring.basePA,
+ GFP_KERNEL);
+ if (!rq->data_ring.base) {
+ netdev_err(adapter->netdev,
+ "rx data ring will be disabled\n");
+ adapter->rxdataring_enabled = false;
+ }
+ } else {
+ rq->data_ring.base = NULL;
+ rq->data_ring.desc_size = 0;
+ }
+
+ sz = rq->comp_ring.size * sizeof(struct Vmxnet3_RxCompDesc);
+ rq->comp_ring.base = dma_alloc_coherent(&adapter->pdev->dev, sz,
+ &rq->comp_ring.basePA,
+ GFP_KERNEL);
+ if (!rq->comp_ring.base) {
+ netdev_err(adapter->netdev, "failed to allocate rx comp ring\n");
+ goto err;
+ }
+
+ bi = kcalloc_node(rq->rx_ring[0].size + rq->rx_ring[1].size,
+ sizeof(rq->buf_info[0][0]), GFP_KERNEL,
+ dev_to_node(&adapter->pdev->dev));
+ if (!bi)
+ goto err;
+
+ rq->buf_info[0] = bi;
+ rq->buf_info[1] = bi + rq->rx_ring[0].size;
+
+ return 0;
+
+err:
+ vmxnet3_rq_destroy(rq, adapter);
+ return -ENOMEM;
+}
+
+
+int
+vmxnet3_rq_create_all(struct vmxnet3_adapter *adapter)
+{
+ int i, err = 0;
+
+ adapter->rxdataring_enabled = VMXNET3_VERSION_GE_3(adapter);
+
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ err = vmxnet3_rq_create(&adapter->rx_queue[i], adapter);
+ if (unlikely(err)) {
+ dev_err(&adapter->netdev->dev,
+ "%s: failed to create rx queue%i\n",
+ adapter->netdev->name, i);
+ goto err_out;
+ }
+ }
+
+ if (!adapter->rxdataring_enabled)
+ vmxnet3_rq_destroy_all_rxdataring(adapter);
+
+ return err;
+err_out:
+ vmxnet3_rq_destroy_all(adapter);
+ return err;
+
+}
+
+/* Multiple queue aware polling function for tx and rx */
+
+static int
+vmxnet3_do_poll(struct vmxnet3_adapter *adapter, int budget)
+{
+ int rcd_done = 0, i;
+ if (unlikely(adapter->shared->ecr))
+ vmxnet3_process_events(adapter);
+ for (i = 0; i < adapter->num_tx_queues; i++)
+ vmxnet3_tq_tx_complete(&adapter->tx_queue[i], adapter);
+
+ for (i = 0; i < adapter->num_rx_queues; i++)
+ rcd_done += vmxnet3_rq_rx_complete(&adapter->rx_queue[i],
+ adapter, budget);
+ return rcd_done;
+}
+
+
+static int
+vmxnet3_poll(struct napi_struct *napi, int budget)
+{
+ struct vmxnet3_rx_queue *rx_queue = container_of(napi,
+ struct vmxnet3_rx_queue, napi);
+ int rxd_done;
+
+ rxd_done = vmxnet3_do_poll(rx_queue->adapter, budget);
+
+ if (rxd_done < budget) {
+ napi_complete_done(napi, rxd_done);
+ vmxnet3_enable_all_intrs(rx_queue->adapter);
+ }
+ return rxd_done;
+}
+
+/*
+ * NAPI polling function for MSI-X mode with multiple Rx queues
+ * Returns the # of the NAPI credit consumed (# of rx descriptors processed)
+ */
+
+static int
+vmxnet3_poll_rx_only(struct napi_struct *napi, int budget)
+{
+ struct vmxnet3_rx_queue *rq = container_of(napi,
+ struct vmxnet3_rx_queue, napi);
+ struct vmxnet3_adapter *adapter = rq->adapter;
+ int rxd_done;
+
+ /* When sharing interrupt with corresponding tx queue, process
+ * tx completions in that queue as well
+ */
+ if (adapter->share_intr == VMXNET3_INTR_BUDDYSHARE) {
+ struct vmxnet3_tx_queue *tq =
+ &adapter->tx_queue[rq - adapter->rx_queue];
+ vmxnet3_tq_tx_complete(tq, adapter);
+ }
+
+ rxd_done = vmxnet3_rq_rx_complete(rq, adapter, budget);
+
+ if (rxd_done < budget) {
+ napi_complete_done(napi, rxd_done);
+ vmxnet3_enable_intr(adapter, rq->comp_ring.intr_idx);
+ }
+ return rxd_done;
+}
+
+
+#ifdef CONFIG_PCI_MSI
+
+/*
+ * Handle completion interrupts on tx queues
+ * Returns whether or not the intr is handled
+ */
+
+static irqreturn_t
+vmxnet3_msix_tx(int irq, void *data)
+{
+ struct vmxnet3_tx_queue *tq = data;
+ struct vmxnet3_adapter *adapter = tq->adapter;
+
+ if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
+ vmxnet3_disable_intr(adapter, tq->comp_ring.intr_idx);
+
+ /* Handle the case where only one irq is allocate for all tx queues */
+ if (adapter->share_intr == VMXNET3_INTR_TXSHARE) {
+ int i;
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ struct vmxnet3_tx_queue *txq = &adapter->tx_queue[i];
+ vmxnet3_tq_tx_complete(txq, adapter);
+ }
+ } else {
+ vmxnet3_tq_tx_complete(tq, adapter);
+ }
+ vmxnet3_enable_intr(adapter, tq->comp_ring.intr_idx);
+
+ return IRQ_HANDLED;
+}
+
+
+/*
+ * Handle completion interrupts on rx queues. Returns whether or not the
+ * intr is handled
+ */
+
+static irqreturn_t
+vmxnet3_msix_rx(int irq, void *data)
+{
+ struct vmxnet3_rx_queue *rq = data;
+ struct vmxnet3_adapter *adapter = rq->adapter;
+
+ /* disable intr if needed */
+ if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
+ vmxnet3_disable_intr(adapter, rq->comp_ring.intr_idx);
+ napi_schedule(&rq->napi);
+
+ return IRQ_HANDLED;
+}
+
+/*
+ *----------------------------------------------------------------------------
+ *
+ * vmxnet3_msix_event --
+ *
+ * vmxnet3 msix event intr handler
+ *
+ * Result:
+ * whether or not the intr is handled
+ *
+ *----------------------------------------------------------------------------
+ */
+
+static irqreturn_t
+vmxnet3_msix_event(int irq, void *data)
+{
+ struct net_device *dev = data;
+ struct vmxnet3_adapter *adapter = netdev_priv(dev);
+
+ /* disable intr if needed */
+ if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
+ vmxnet3_disable_intr(adapter, adapter->intr.event_intr_idx);
+
+ if (adapter->shared->ecr)
+ vmxnet3_process_events(adapter);
+
+ vmxnet3_enable_intr(adapter, adapter->intr.event_intr_idx);
+
+ return IRQ_HANDLED;
+}
+
+#endif /* CONFIG_PCI_MSI */
+
+
+/* Interrupt handler for vmxnet3 */
+static irqreturn_t
+vmxnet3_intr(int irq, void *dev_id)
+{
+ struct net_device *dev = dev_id;
+ struct vmxnet3_adapter *adapter = netdev_priv(dev);
+
+ if (adapter->intr.type == VMXNET3_IT_INTX) {
+ u32 icr = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_ICR);
+ if (unlikely(icr == 0))
+ /* not ours */
+ return IRQ_NONE;
+ }
+
+
+ /* disable intr if needed */
+ if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
+ vmxnet3_disable_all_intrs(adapter);
+
+ napi_schedule(&adapter->rx_queue[0].napi);
+
+ return IRQ_HANDLED;
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+
+/* netpoll callback. */
+static void
+vmxnet3_netpoll(struct net_device *netdev)
+{
+ struct vmxnet3_adapter *adapter = netdev_priv(netdev);
+
+ switch (adapter->intr.type) {
+#ifdef CONFIG_PCI_MSI
+ case VMXNET3_IT_MSIX: {
+ int i;
+ for (i = 0; i < adapter->num_rx_queues; i++)
+ vmxnet3_msix_rx(0, &adapter->rx_queue[i]);
+ break;
+ }
+#endif
+ case VMXNET3_IT_MSI:
+ default:
+ vmxnet3_intr(0, adapter->netdev);
+ break;
+ }
+
+}
+#endif /* CONFIG_NET_POLL_CONTROLLER */
+
+static int
+vmxnet3_request_irqs(struct vmxnet3_adapter *adapter)
+{
+ struct vmxnet3_intr *intr = &adapter->intr;
+ int err = 0, i;
+ int vector = 0;
+
+#ifdef CONFIG_PCI_MSI
+ if (adapter->intr.type == VMXNET3_IT_MSIX) {
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE) {
+ sprintf(adapter->tx_queue[i].name, "%s-tx-%d",
+ adapter->netdev->name, vector);
+ err = request_irq(
+ intr->msix_entries[vector].vector,
+ vmxnet3_msix_tx, 0,
+ adapter->tx_queue[i].name,
+ &adapter->tx_queue[i]);
+ } else {
+ sprintf(adapter->tx_queue[i].name, "%s-rxtx-%d",
+ adapter->netdev->name, vector);
+ }
+ if (err) {
+ dev_err(&adapter->netdev->dev,
+ "Failed to request irq for MSIX, %s, "
+ "error %d\n",
+ adapter->tx_queue[i].name, err);
+ return err;
+ }
+
+ /* Handle the case where only 1 MSIx was allocated for
+ * all tx queues */
+ if (adapter->share_intr == VMXNET3_INTR_TXSHARE) {
+ for (; i < adapter->num_tx_queues; i++)
+ adapter->tx_queue[i].comp_ring.intr_idx
+ = vector;
+ vector++;
+ break;
+ } else {
+ adapter->tx_queue[i].comp_ring.intr_idx
+ = vector++;
+ }
+ }
+ if (adapter->share_intr == VMXNET3_INTR_BUDDYSHARE)
+ vector = 0;
+
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE)
+ sprintf(adapter->rx_queue[i].name, "%s-rx-%d",
+ adapter->netdev->name, vector);
+ else
+ sprintf(adapter->rx_queue[i].name, "%s-rxtx-%d",
+ adapter->netdev->name, vector);
+ err = request_irq(intr->msix_entries[vector].vector,
+ vmxnet3_msix_rx, 0,
+ adapter->rx_queue[i].name,
+ &(adapter->rx_queue[i]));
+ if (err) {
+ netdev_err(adapter->netdev,
+ "Failed to request irq for MSIX, "
+ "%s, error %d\n",
+ adapter->rx_queue[i].name, err);
+ return err;
+ }
+
+ adapter->rx_queue[i].comp_ring.intr_idx = vector++;
+ }
+
+ sprintf(intr->event_msi_vector_name, "%s-event-%d",
+ adapter->netdev->name, vector);
+ err = request_irq(intr->msix_entries[vector].vector,
+ vmxnet3_msix_event, 0,
+ intr->event_msi_vector_name, adapter->netdev);
+ intr->event_intr_idx = vector;
+
+ } else if (intr->type == VMXNET3_IT_MSI) {
+ adapter->num_rx_queues = 1;
+ err = request_irq(adapter->pdev->irq, vmxnet3_intr, 0,
+ adapter->netdev->name, adapter->netdev);
+ } else {
+#endif
+ adapter->num_rx_queues = 1;
+ err = request_irq(adapter->pdev->irq, vmxnet3_intr,
+ IRQF_SHARED, adapter->netdev->name,
+ adapter->netdev);
+#ifdef CONFIG_PCI_MSI
+ }
+#endif
+ intr->num_intrs = vector + 1;
+ if (err) {
+ netdev_err(adapter->netdev,
+ "Failed to request irq (intr type:%d), error %d\n",
+ intr->type, err);
+ } else {
+ /* Number of rx queues will not change after this */
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i];
+ rq->qid = i;
+ rq->qid2 = i + adapter->num_rx_queues;
+ rq->dataRingQid = i + 2 * adapter->num_rx_queues;
+ }
+
+ /* init our intr settings */
+ for (i = 0; i < intr->num_intrs; i++)
+ intr->mod_levels[i] = UPT1_IML_ADAPTIVE;
+ if (adapter->intr.type != VMXNET3_IT_MSIX) {
+ adapter->intr.event_intr_idx = 0;
+ for (i = 0; i < adapter->num_tx_queues; i++)
+ adapter->tx_queue[i].comp_ring.intr_idx = 0;
+ adapter->rx_queue[0].comp_ring.intr_idx = 0;
+ }
+
+ netdev_info(adapter->netdev,
+ "intr type %u, mode %u, %u vectors allocated\n",
+ intr->type, intr->mask_mode, intr->num_intrs);
+ }
+
+ return err;
+}
+
+
+static void
+vmxnet3_free_irqs(struct vmxnet3_adapter *adapter)
+{
+ struct vmxnet3_intr *intr = &adapter->intr;
+ BUG_ON(intr->type == VMXNET3_IT_AUTO || intr->num_intrs <= 0);
+
+ switch (intr->type) {
+#ifdef CONFIG_PCI_MSI
+ case VMXNET3_IT_MSIX:
+ {
+ int i, vector = 0;
+
+ if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE) {
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ free_irq(intr->msix_entries[vector++].vector,
+ &(adapter->tx_queue[i]));
+ if (adapter->share_intr == VMXNET3_INTR_TXSHARE)
+ break;
+ }
+ }
+
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ free_irq(intr->msix_entries[vector++].vector,
+ &(adapter->rx_queue[i]));
+ }
+
+ free_irq(intr->msix_entries[vector].vector,
+ adapter->netdev);
+ BUG_ON(vector >= intr->num_intrs);
+ break;
+ }
+#endif
+ case VMXNET3_IT_MSI:
+ free_irq(adapter->pdev->irq, adapter->netdev);
+ break;
+ case VMXNET3_IT_INTX:
+ free_irq(adapter->pdev->irq, adapter->netdev);
+ break;
+ default:
+ BUG();
+ }
+}
+
+
+static void
+vmxnet3_restore_vlan(struct vmxnet3_adapter *adapter)
+{
+ u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
+ u16 vid;
+
+ /* allow untagged pkts */
+ VMXNET3_SET_VFTABLE_ENTRY(vfTable, 0);
+
+ for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
+ VMXNET3_SET_VFTABLE_ENTRY(vfTable, vid);
+}
+
+
+static int
+vmxnet3_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
+{
+ struct vmxnet3_adapter *adapter = netdev_priv(netdev);
+
+ if (!(netdev->flags & IFF_PROMISC)) {
+ u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
+ unsigned long flags;
+
+ VMXNET3_SET_VFTABLE_ENTRY(vfTable, vid);
+ spin_lock_irqsave(&adapter->cmd_lock, flags);
+ VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
+ VMXNET3_CMD_UPDATE_VLAN_FILTERS);
+ spin_unlock_irqrestore(&adapter->cmd_lock, flags);
+ }
+
+ set_bit(vid, adapter->active_vlans);
+
+ return 0;
+}
+
+
+static int
+vmxnet3_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
+{
+ struct vmxnet3_adapter *adapter = netdev_priv(netdev);
+
+ if (!(netdev->flags & IFF_PROMISC)) {
+ u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
+ unsigned long flags;
+
+ VMXNET3_CLEAR_VFTABLE_ENTRY(vfTable, vid);
+ spin_lock_irqsave(&adapter->cmd_lock, flags);
+ VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
+ VMXNET3_CMD_UPDATE_VLAN_FILTERS);
+ spin_unlock_irqrestore(&adapter->cmd_lock, flags);
+ }
+
+ clear_bit(vid, adapter->active_vlans);
+
+ return 0;
+}
+
+
+static u8 *
+vmxnet3_copy_mc(struct net_device *netdev)
+{
+ u8 *buf = NULL;
+ u32 sz = netdev_mc_count(netdev) * ETH_ALEN;
+
+ /* struct Vmxnet3_RxFilterConf.mfTableLen is u16. */
+ if (sz <= 0xffff) {
+ /* We may be called with BH disabled */
+ buf = kmalloc(sz, GFP_ATOMIC);
+ if (buf) {
+ struct netdev_hw_addr *ha;
+ int i = 0;
+
+ netdev_for_each_mc_addr(ha, netdev)
+ memcpy(buf + i++ * ETH_ALEN, ha->addr,
+ ETH_ALEN);
+ }
+ }
+ return buf;
+}
+
+
+static void
+vmxnet3_set_mc(struct net_device *netdev)
+{
+ struct vmxnet3_adapter *adapter = netdev_priv(netdev);
+ unsigned long flags;
+ struct Vmxnet3_RxFilterConf *rxConf =
+ &adapter->shared->devRead.rxFilterConf;
+ u8 *new_table = NULL;
+ dma_addr_t new_table_pa = 0;
+ bool new_table_pa_valid = false;
+ u32 new_mode = VMXNET3_RXM_UCAST;
+
+ if (netdev->flags & IFF_PROMISC) {
+ u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
+ memset(vfTable, 0, VMXNET3_VFT_SIZE * sizeof(*vfTable));
+
+ new_mode |= VMXNET3_RXM_PROMISC;
+ } else {
+ vmxnet3_restore_vlan(adapter);
+ }
+
+ if (netdev->flags & IFF_BROADCAST)
+ new_mode |= VMXNET3_RXM_BCAST;
+
+ if (netdev->flags & IFF_ALLMULTI)
+ new_mode |= VMXNET3_RXM_ALL_MULTI;
+ else
+ if (!netdev_mc_empty(netdev)) {
+ new_table = vmxnet3_copy_mc(netdev);
+ if (new_table) {
+ size_t sz = netdev_mc_count(netdev) * ETH_ALEN;
+
+ rxConf->mfTableLen = cpu_to_le16(sz);
+ new_table_pa = dma_map_single(
+ &adapter->pdev->dev,
+ new_table,
+ sz,
+ DMA_TO_DEVICE);
+ if (!dma_mapping_error(&adapter->pdev->dev,
+ new_table_pa)) {
+ new_mode |= VMXNET3_RXM_MCAST;
+ new_table_pa_valid = true;
+ rxConf->mfTablePA = cpu_to_le64(
+ new_table_pa);
+ }
+ }
+ if (!new_table_pa_valid) {
+ netdev_info(netdev,
+ "failed to copy mcast list, setting ALL_MULTI\n");
+ new_mode |= VMXNET3_RXM_ALL_MULTI;
+ }
+ }
+
+ if (!(new_mode & VMXNET3_RXM_MCAST)) {
+ rxConf->mfTableLen = 0;
+ rxConf->mfTablePA = 0;
+ }
+
+ spin_lock_irqsave(&adapter->cmd_lock, flags);
+ if (new_mode != rxConf->rxMode) {
+ rxConf->rxMode = cpu_to_le32(new_mode);
+ VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
+ VMXNET3_CMD_UPDATE_RX_MODE);
+ VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
+ VMXNET3_CMD_UPDATE_VLAN_FILTERS);
+ }
+
+ VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
+ VMXNET3_CMD_UPDATE_MAC_FILTERS);
+ spin_unlock_irqrestore(&adapter->cmd_lock, flags);
+
+ if (new_table_pa_valid)
+ dma_unmap_single(&adapter->pdev->dev, new_table_pa,
+ rxConf->mfTableLen, DMA_TO_DEVICE);
+ kfree(new_table);
+}
+
+void
+vmxnet3_rq_destroy_all(struct vmxnet3_adapter *adapter)
+{
+ int i;
+
+ for (i = 0; i < adapter->num_rx_queues; i++)
+ vmxnet3_rq_destroy(&adapter->rx_queue[i], adapter);
+}
+
+
+/*
+ * Set up driver_shared based on settings in adapter.
+ */
+
+static void
+vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter)
+{
+ struct Vmxnet3_DriverShared *shared = adapter->shared;
+ struct Vmxnet3_DSDevRead *devRead = &shared->devRead;
+ struct Vmxnet3_DSDevReadExt *devReadExt = &shared->devReadExt;
+ struct Vmxnet3_TxQueueConf *tqc;
+ struct Vmxnet3_RxQueueConf *rqc;
+ int i;
+
+ memset(shared, 0, sizeof(*shared));
+
+ /* driver settings */
+ shared->magic = cpu_to_le32(VMXNET3_REV1_MAGIC);
+ devRead->misc.driverInfo.version = cpu_to_le32(
+ VMXNET3_DRIVER_VERSION_NUM);
+ devRead->misc.driverInfo.gos.gosBits = (sizeof(void *) == 4 ?
+ VMXNET3_GOS_BITS_32 : VMXNET3_GOS_BITS_64);
+ devRead->misc.driverInfo.gos.gosType = VMXNET3_GOS_TYPE_LINUX;
+ *((u32 *)&devRead->misc.driverInfo.gos) = cpu_to_le32(
+ *((u32 *)&devRead->misc.driverInfo.gos));
+ devRead->misc.driverInfo.vmxnet3RevSpt = cpu_to_le32(1);
+ devRead->misc.driverInfo.uptVerSpt = cpu_to_le32(1);
+
+ devRead->misc.ddPA = cpu_to_le64(adapter->adapter_pa);
+ devRead->misc.ddLen = cpu_to_le32(sizeof(struct vmxnet3_adapter));
+
+ /* set up feature flags */
+ if (adapter->netdev->features & NETIF_F_RXCSUM)
+ devRead->misc.uptFeatures |= UPT1_F_RXCSUM;
+
+ if (adapter->netdev->features & NETIF_F_LRO) {
+ devRead->misc.uptFeatures |= UPT1_F_LRO;
+ devRead->misc.maxNumRxSG = cpu_to_le16(1 + MAX_SKB_FRAGS);
+ }
+ if (adapter->netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
+ devRead->misc.uptFeatures |= UPT1_F_RXVLAN;
+
+ if (adapter->netdev->features & (NETIF_F_GSO_UDP_TUNNEL |
+ NETIF_F_GSO_UDP_TUNNEL_CSUM))
+ devRead->misc.uptFeatures |= UPT1_F_RXINNEROFLD;
+
+ devRead->misc.mtu = cpu_to_le32(adapter->netdev->mtu);
+ devRead->misc.queueDescPA = cpu_to_le64(adapter->queue_desc_pa);
+ devRead->misc.queueDescLen = cpu_to_le32(
+ adapter->num_tx_queues * sizeof(struct Vmxnet3_TxQueueDesc) +
+ adapter->num_rx_queues * sizeof(struct Vmxnet3_RxQueueDesc));
+
+ /* tx queue settings */
+ devRead->misc.numTxQueues = adapter->num_tx_queues;
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ struct vmxnet3_tx_queue *tq = &adapter->tx_queue[i];
+ BUG_ON(adapter->tx_queue[i].tx_ring.base == NULL);
+ tqc = &adapter->tqd_start[i].conf;
+ tqc->txRingBasePA = cpu_to_le64(tq->tx_ring.basePA);
+ tqc->dataRingBasePA = cpu_to_le64(tq->data_ring.basePA);
+ tqc->compRingBasePA = cpu_to_le64(tq->comp_ring.basePA);
+ tqc->ddPA = cpu_to_le64(~0ULL);
+ tqc->txRingSize = cpu_to_le32(tq->tx_ring.size);
+ tqc->dataRingSize = cpu_to_le32(tq->data_ring.size);
+ tqc->txDataRingDescSize = cpu_to_le32(tq->txdata_desc_size);
+ tqc->compRingSize = cpu_to_le32(tq->comp_ring.size);
+ tqc->ddLen = cpu_to_le32(0);
+ tqc->intrIdx = tq->comp_ring.intr_idx;
+ }
+
+ /* rx queue settings */
+ devRead->misc.numRxQueues = adapter->num_rx_queues;
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i];
+ rqc = &adapter->rqd_start[i].conf;
+ rqc->rxRingBasePA[0] = cpu_to_le64(rq->rx_ring[0].basePA);
+ rqc->rxRingBasePA[1] = cpu_to_le64(rq->rx_ring[1].basePA);
+ rqc->compRingBasePA = cpu_to_le64(rq->comp_ring.basePA);
+ rqc->ddPA = cpu_to_le64(~0ULL);
+ rqc->rxRingSize[0] = cpu_to_le32(rq->rx_ring[0].size);
+ rqc->rxRingSize[1] = cpu_to_le32(rq->rx_ring[1].size);
+ rqc->compRingSize = cpu_to_le32(rq->comp_ring.size);
+ rqc->ddLen = cpu_to_le32(0);
+ rqc->intrIdx = rq->comp_ring.intr_idx;
+ if (VMXNET3_VERSION_GE_3(adapter)) {
+ rqc->rxDataRingBasePA =
+ cpu_to_le64(rq->data_ring.basePA);
+ rqc->rxDataRingDescSize =
+ cpu_to_le16(rq->data_ring.desc_size);
+ }
+ }
+
+#ifdef VMXNET3_RSS
+ memset(adapter->rss_conf, 0, sizeof(*adapter->rss_conf));
+
+ if (adapter->rss) {
+ struct UPT1_RSSConf *rssConf = adapter->rss_conf;
+
+ devRead->misc.uptFeatures |= UPT1_F_RSS;
+ devRead->misc.numRxQueues = adapter->num_rx_queues;
+ rssConf->hashType = UPT1_RSS_HASH_TYPE_TCP_IPV4 |
+ UPT1_RSS_HASH_TYPE_IPV4 |
+ UPT1_RSS_HASH_TYPE_TCP_IPV6 |
+ UPT1_RSS_HASH_TYPE_IPV6;
+ rssConf->hashFunc = UPT1_RSS_HASH_FUNC_TOEPLITZ;
+ rssConf->hashKeySize = UPT1_RSS_MAX_KEY_SIZE;
+ rssConf->indTableSize = VMXNET3_RSS_IND_TABLE_SIZE;
+ netdev_rss_key_fill(rssConf->hashKey, sizeof(rssConf->hashKey));
+
+ for (i = 0; i < rssConf->indTableSize; i++)
+ rssConf->indTable[i] = ethtool_rxfh_indir_default(
+ i, adapter->num_rx_queues);
+
+ devRead->rssConfDesc.confVer = 1;
+ devRead->rssConfDesc.confLen = cpu_to_le32(sizeof(*rssConf));
+ devRead->rssConfDesc.confPA =
+ cpu_to_le64(adapter->rss_conf_pa);
+ }
+
+#endif /* VMXNET3_RSS */
+
+ /* intr settings */
+ if (!VMXNET3_VERSION_GE_6(adapter) ||
+ !adapter->queuesExtEnabled) {
+ devRead->intrConf.autoMask = adapter->intr.mask_mode ==
+ VMXNET3_IMM_AUTO;
+ devRead->intrConf.numIntrs = adapter->intr.num_intrs;
+ for (i = 0; i < adapter->intr.num_intrs; i++)
+ devRead->intrConf.modLevels[i] = adapter->intr.mod_levels[i];
+
+ devRead->intrConf.eventIntrIdx = adapter->intr.event_intr_idx;
+ devRead->intrConf.intrCtrl |= cpu_to_le32(VMXNET3_IC_DISABLE_ALL);
+ } else {
+ devReadExt->intrConfExt.autoMask = adapter->intr.mask_mode ==
+ VMXNET3_IMM_AUTO;
+ devReadExt->intrConfExt.numIntrs = adapter->intr.num_intrs;
+ for (i = 0; i < adapter->intr.num_intrs; i++)
+ devReadExt->intrConfExt.modLevels[i] = adapter->intr.mod_levels[i];
+
+ devReadExt->intrConfExt.eventIntrIdx = adapter->intr.event_intr_idx;
+ devReadExt->intrConfExt.intrCtrl |= cpu_to_le32(VMXNET3_IC_DISABLE_ALL);
+ }
+
+ /* rx filter settings */
+ devRead->rxFilterConf.rxMode = 0;
+ vmxnet3_restore_vlan(adapter);
+ vmxnet3_write_mac_addr(adapter, adapter->netdev->dev_addr);
+
+ /* the rest are already zeroed */
+}
+
+static void
+vmxnet3_init_bufsize(struct vmxnet3_adapter *adapter)
+{
+ struct Vmxnet3_DriverShared *shared = adapter->shared;
+ union Vmxnet3_CmdInfo *cmdInfo = &shared->cu.cmdInfo;
+ unsigned long flags;
+
+ if (!VMXNET3_VERSION_GE_7(adapter))
+ return;
+
+ cmdInfo->ringBufSize = adapter->ringBufSize;
+ spin_lock_irqsave(&adapter->cmd_lock, flags);
+ VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
+ VMXNET3_CMD_SET_RING_BUFFER_SIZE);
+ spin_unlock_irqrestore(&adapter->cmd_lock, flags);
+}
+
+static void
+vmxnet3_init_coalesce(struct vmxnet3_adapter *adapter)
+{
+ struct Vmxnet3_DriverShared *shared = adapter->shared;
+ union Vmxnet3_CmdInfo *cmdInfo = &shared->cu.cmdInfo;
+ unsigned long flags;
+
+ if (!VMXNET3_VERSION_GE_3(adapter))
+ return;
+
+ spin_lock_irqsave(&adapter->cmd_lock, flags);
+ cmdInfo->varConf.confVer = 1;
+ cmdInfo->varConf.confLen =
+ cpu_to_le32(sizeof(*adapter->coal_conf));
+ cmdInfo->varConf.confPA = cpu_to_le64(adapter->coal_conf_pa);
+
+ if (adapter->default_coal_mode) {
+ VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
+ VMXNET3_CMD_GET_COALESCE);
+ } else {
+ VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
+ VMXNET3_CMD_SET_COALESCE);
+ }
+
+ spin_unlock_irqrestore(&adapter->cmd_lock, flags);
+}
+
+static void
+vmxnet3_init_rssfields(struct vmxnet3_adapter *adapter)
+{
+ struct Vmxnet3_DriverShared *shared = adapter->shared;
+ union Vmxnet3_CmdInfo *cmdInfo = &shared->cu.cmdInfo;
+ unsigned long flags;
+
+ if (!VMXNET3_VERSION_GE_4(adapter))
+ return;
+
+ spin_lock_irqsave(&adapter->cmd_lock, flags);
+
+ if (adapter->default_rss_fields) {
+ VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
+ VMXNET3_CMD_GET_RSS_FIELDS);
+ adapter->rss_fields =
+ VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
+ } else {
+ if (VMXNET3_VERSION_GE_7(adapter)) {
+ if ((adapter->rss_fields & VMXNET3_RSS_FIELDS_UDPIP4 ||
+ adapter->rss_fields & VMXNET3_RSS_FIELDS_UDPIP6) &&
+ vmxnet3_check_ptcapability(adapter->ptcap_supported[0],
+ VMXNET3_CAP_UDP_RSS)) {
+ adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_UDP_RSS;
+ } else {
+ adapter->dev_caps[0] &= ~(1UL << VMXNET3_CAP_UDP_RSS);
+ }
+
+ if ((adapter->rss_fields & VMXNET3_RSS_FIELDS_ESPIP4) &&
+ vmxnet3_check_ptcapability(adapter->ptcap_supported[0],
+ VMXNET3_CAP_ESP_RSS_IPV4)) {
+ adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_ESP_RSS_IPV4;
+ } else {
+ adapter->dev_caps[0] &= ~(1UL << VMXNET3_CAP_ESP_RSS_IPV4);
+ }
+
+ if ((adapter->rss_fields & VMXNET3_RSS_FIELDS_ESPIP6) &&
+ vmxnet3_check_ptcapability(adapter->ptcap_supported[0],
+ VMXNET3_CAP_ESP_RSS_IPV6)) {
+ adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_ESP_RSS_IPV6;
+ } else {
+ adapter->dev_caps[0] &= ~(1UL << VMXNET3_CAP_ESP_RSS_IPV6);
+ }
+
+ VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DCR, adapter->dev_caps[0]);
+ VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_DCR0_REG);
+ adapter->dev_caps[0] = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
+ }
+ cmdInfo->setRssFields = adapter->rss_fields;
+ VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
+ VMXNET3_CMD_SET_RSS_FIELDS);
+ /* Not all requested RSS may get applied, so get and
+ * cache what was actually applied.
+ */
+ VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
+ VMXNET3_CMD_GET_RSS_FIELDS);
+ adapter->rss_fields =
+ VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
+ }
+
+ spin_unlock_irqrestore(&adapter->cmd_lock, flags);
+}
+
+int
+vmxnet3_activate_dev(struct vmxnet3_adapter *adapter)
+{
+ int err, i;
+ u32 ret;
+ unsigned long flags;
+
+ netdev_dbg(adapter->netdev, "%s: skb_buf_size %d, rx_buf_per_pkt %d,"
+ " ring sizes %u %u %u\n", adapter->netdev->name,
+ adapter->skb_buf_size, adapter->rx_buf_per_pkt,
+ adapter->tx_queue[0].tx_ring.size,
+ adapter->rx_queue[0].rx_ring[0].size,
+ adapter->rx_queue[0].rx_ring[1].size);
+
+ vmxnet3_tq_init_all(adapter);
+ err = vmxnet3_rq_init_all(adapter);
+ if (err) {
+ netdev_err(adapter->netdev,
+ "Failed to init rx queue error %d\n", err);
+ goto rq_err;
+ }
+
+ err = vmxnet3_request_irqs(adapter);
+ if (err) {
+ netdev_err(adapter->netdev,
+ "Failed to setup irq for error %d\n", err);
+ goto irq_err;
+ }
+
+ vmxnet3_setup_driver_shared(adapter);
+
+ VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAL, VMXNET3_GET_ADDR_LO(
+ adapter->shared_pa));
+ VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAH, VMXNET3_GET_ADDR_HI(
+ adapter->shared_pa));
+ spin_lock_irqsave(&adapter->cmd_lock, flags);
+ VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
+ VMXNET3_CMD_ACTIVATE_DEV);
+ ret = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
+ spin_unlock_irqrestore(&adapter->cmd_lock, flags);
+
+ if (ret != 0) {
+ netdev_err(adapter->netdev,
+ "Failed to activate dev: error %u\n", ret);
+ err = -EINVAL;
+ goto activate_err;
+ }
+
+ vmxnet3_init_bufsize(adapter);
+ vmxnet3_init_coalesce(adapter);
+ vmxnet3_init_rssfields(adapter);
+
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ VMXNET3_WRITE_BAR0_REG(adapter,
+ adapter->rx_prod_offset + i * VMXNET3_REG_ALIGN,
+ adapter->rx_queue[i].rx_ring[0].next2fill);
+ VMXNET3_WRITE_BAR0_REG(adapter, (adapter->rx_prod2_offset +
+ (i * VMXNET3_REG_ALIGN)),
+ adapter->rx_queue[i].rx_ring[1].next2fill);
+ }
+
+ /* Apply the rx filter settins last. */
+ vmxnet3_set_mc(adapter->netdev);
+
+ /*
+ * Check link state when first activating device. It will start the
+ * tx queue if the link is up.
+ */
+ vmxnet3_check_link(adapter, true);
+ netif_tx_wake_all_queues(adapter->netdev);
+ for (i = 0; i < adapter->num_rx_queues; i++)
+ napi_enable(&adapter->rx_queue[i].napi);
+ vmxnet3_enable_all_intrs(adapter);
+ clear_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state);
+ return 0;
+
+activate_err:
+ VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAL, 0);
+ VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAH, 0);
+ vmxnet3_free_irqs(adapter);
+irq_err:
+rq_err:
+ /* free up buffers we allocated */
+ vmxnet3_rq_cleanup_all(adapter);
+ return err;
+}
+
+
+void
+vmxnet3_reset_dev(struct vmxnet3_adapter *adapter)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&adapter->cmd_lock, flags);
+ VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_RESET_DEV);
+ spin_unlock_irqrestore(&adapter->cmd_lock, flags);
+}
+
+
+int
+vmxnet3_quiesce_dev(struct vmxnet3_adapter *adapter)
+{
+ int i;
+ unsigned long flags;
+ if (test_and_set_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state))
+ return 0;
+
+
+ spin_lock_irqsave(&adapter->cmd_lock, flags);
+ VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
+ VMXNET3_CMD_QUIESCE_DEV);
+ spin_unlock_irqrestore(&adapter->cmd_lock, flags);
+ vmxnet3_disable_all_intrs(adapter);
+
+ for (i = 0; i < adapter->num_rx_queues; i++)
+ napi_disable(&adapter->rx_queue[i].napi);
+ netif_tx_disable(adapter->netdev);
+ adapter->link_speed = 0;
+ netif_carrier_off(adapter->netdev);
+
+ vmxnet3_tq_cleanup_all(adapter);
+ vmxnet3_rq_cleanup_all(adapter);
+ vmxnet3_free_irqs(adapter);
+ return 0;
+}
+
+
+static void
+vmxnet3_write_mac_addr(struct vmxnet3_adapter *adapter, const u8 *mac)
+{
+ u32 tmp;
+
+ tmp = *(u32 *)mac;
+ VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_MACL, tmp);
+
+ tmp = (mac[5] << 8) | mac[4];
+ VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_MACH, tmp);
+}
+
+
+static int
+vmxnet3_set_mac_addr(struct net_device *netdev, void *p)
+{
+ struct sockaddr *addr = p;
+ struct vmxnet3_adapter *adapter = netdev_priv(netdev);
+
+ dev_addr_set(netdev, addr->sa_data);
+ vmxnet3_write_mac_addr(adapter, addr->sa_data);
+
+ return 0;
+}
+
+
+/* ==================== initialization and cleanup routines ============ */
+
+static int
+vmxnet3_alloc_pci_resources(struct vmxnet3_adapter *adapter)
+{
+ int err;
+ unsigned long mmio_start, mmio_len;
+ struct pci_dev *pdev = adapter->pdev;
+
+ err = pci_enable_device(pdev);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to enable adapter: error %d\n", err);
+ return err;
+ }
+
+ err = pci_request_selected_regions(pdev, (1 << 2) - 1,
+ vmxnet3_driver_name);
+ if (err) {
+ dev_err(&pdev->dev,
+ "Failed to request region for adapter: error %d\n", err);
+ goto err_enable_device;
+ }
+
+ pci_set_master(pdev);
+
+ mmio_start = pci_resource_start(pdev, 0);
+ mmio_len = pci_resource_len(pdev, 0);
+ adapter->hw_addr0 = ioremap(mmio_start, mmio_len);
+ if (!adapter->hw_addr0) {
+ dev_err(&pdev->dev, "Failed to map bar0\n");
+ err = -EIO;
+ goto err_ioremap;
+ }
+
+ mmio_start = pci_resource_start(pdev, 1);
+ mmio_len = pci_resource_len(pdev, 1);
+ adapter->hw_addr1 = ioremap(mmio_start, mmio_len);
+ if (!adapter->hw_addr1) {
+ dev_err(&pdev->dev, "Failed to map bar1\n");
+ err = -EIO;
+ goto err_bar1;
+ }
+ return 0;
+
+err_bar1:
+ iounmap(adapter->hw_addr0);
+err_ioremap:
+ pci_release_selected_regions(pdev, (1 << 2) - 1);
+err_enable_device:
+ pci_disable_device(pdev);
+ return err;
+}
+
+
+static void
+vmxnet3_free_pci_resources(struct vmxnet3_adapter *adapter)
+{
+ BUG_ON(!adapter->pdev);
+
+ iounmap(adapter->hw_addr0);
+ iounmap(adapter->hw_addr1);
+ pci_release_selected_regions(adapter->pdev, (1 << 2) - 1);
+ pci_disable_device(adapter->pdev);
+}
+
+
+void
+vmxnet3_adjust_rx_ring_size(struct vmxnet3_adapter *adapter)
+{
+ size_t sz, i, ring0_size, ring1_size, comp_size;
+ /* With version7 ring1 will have only T0 buffers */
+ if (!VMXNET3_VERSION_GE_7(adapter)) {
+ if (adapter->netdev->mtu <= VMXNET3_MAX_SKB_BUF_SIZE -
+ VMXNET3_MAX_ETH_HDR_SIZE) {
+ adapter->skb_buf_size = adapter->netdev->mtu +
+ VMXNET3_MAX_ETH_HDR_SIZE;
+ if (adapter->skb_buf_size < VMXNET3_MIN_T0_BUF_SIZE)
+ adapter->skb_buf_size = VMXNET3_MIN_T0_BUF_SIZE;
+
+ adapter->rx_buf_per_pkt = 1;
+ } else {
+ adapter->skb_buf_size = VMXNET3_MAX_SKB_BUF_SIZE;
+ sz = adapter->netdev->mtu - VMXNET3_MAX_SKB_BUF_SIZE +
+ VMXNET3_MAX_ETH_HDR_SIZE;
+ adapter->rx_buf_per_pkt = 1 + (sz + PAGE_SIZE - 1) / PAGE_SIZE;
+ }
+ } else {
+ adapter->skb_buf_size = min((int)adapter->netdev->mtu + VMXNET3_MAX_ETH_HDR_SIZE,
+ VMXNET3_MAX_SKB_BUF_SIZE);
+ adapter->rx_buf_per_pkt = 1;
+ adapter->ringBufSize.ring1BufSizeType0 = cpu_to_le16(adapter->skb_buf_size);
+ adapter->ringBufSize.ring1BufSizeType1 = 0;
+ adapter->ringBufSize.ring2BufSizeType1 = cpu_to_le16(PAGE_SIZE);
+ }
+
+ /*
+ * for simplicity, force the ring0 size to be a multiple of
+ * rx_buf_per_pkt * VMXNET3_RING_SIZE_ALIGN
+ */
+ sz = adapter->rx_buf_per_pkt * VMXNET3_RING_SIZE_ALIGN;
+ ring0_size = adapter->rx_queue[0].rx_ring[0].size;
+ ring0_size = (ring0_size + sz - 1) / sz * sz;
+ ring0_size = min_t(u32, ring0_size, VMXNET3_RX_RING_MAX_SIZE /
+ sz * sz);
+ ring1_size = adapter->rx_queue[0].rx_ring[1].size;
+ ring1_size = (ring1_size + sz - 1) / sz * sz;
+ ring1_size = min_t(u32, ring1_size, VMXNET3_RX_RING2_MAX_SIZE /
+ sz * sz);
+ /* For v7 and later, keep ring size power of 2 for UPT */
+ if (VMXNET3_VERSION_GE_7(adapter)) {
+ ring0_size = rounddown_pow_of_two(ring0_size);
+ ring1_size = rounddown_pow_of_two(ring1_size);
+ }
+ comp_size = ring0_size + ring1_size;
+
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i];
+
+ rq->rx_ring[0].size = ring0_size;
+ rq->rx_ring[1].size = ring1_size;
+ rq->comp_ring.size = comp_size;
+ }
+}
+
+
+int
+vmxnet3_create_queues(struct vmxnet3_adapter *adapter, u32 tx_ring_size,
+ u32 rx_ring_size, u32 rx_ring2_size,
+ u16 txdata_desc_size, u16 rxdata_desc_size)
+{
+ int err = 0, i;
+
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ struct vmxnet3_tx_queue *tq = &adapter->tx_queue[i];
+ tq->tx_ring.size = tx_ring_size;
+ tq->data_ring.size = tx_ring_size;
+ tq->comp_ring.size = tx_ring_size;
+ tq->txdata_desc_size = txdata_desc_size;
+ tq->shared = &adapter->tqd_start[i].ctrl;
+ tq->stopped = true;
+ tq->adapter = adapter;
+ tq->qid = i;
+ err = vmxnet3_tq_create(tq, adapter);
+ /*
+ * Too late to change num_tx_queues. We cannot do away with
+ * lesser number of queues than what we asked for
+ */
+ if (err)
+ goto queue_err;
+ }
+
+ adapter->rx_queue[0].rx_ring[0].size = rx_ring_size;
+ adapter->rx_queue[0].rx_ring[1].size = rx_ring2_size;
+ vmxnet3_adjust_rx_ring_size(adapter);
+
+ adapter->rxdataring_enabled = VMXNET3_VERSION_GE_3(adapter);
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i];
+ /* qid and qid2 for rx queues will be assigned later when num
+ * of rx queues is finalized after allocating intrs */
+ rq->shared = &adapter->rqd_start[i].ctrl;
+ rq->adapter = adapter;
+ rq->data_ring.desc_size = rxdata_desc_size;
+ err = vmxnet3_rq_create(rq, adapter);
+ if (err) {
+ if (i == 0) {
+ netdev_err(adapter->netdev,
+ "Could not allocate any rx queues. "
+ "Aborting.\n");
+ goto queue_err;
+ } else {
+ netdev_info(adapter->netdev,
+ "Number of rx queues changed "
+ "to : %d.\n", i);
+ adapter->num_rx_queues = i;
+ err = 0;
+ break;
+ }
+ }
+ }
+
+ if (!adapter->rxdataring_enabled)
+ vmxnet3_rq_destroy_all_rxdataring(adapter);
+
+ return err;
+queue_err:
+ vmxnet3_tq_destroy_all(adapter);
+ return err;
+}
+
+static int
+vmxnet3_open(struct net_device *netdev)
+{
+ struct vmxnet3_adapter *adapter;
+ int err, i;
+
+ adapter = netdev_priv(netdev);
+
+ for (i = 0; i < adapter->num_tx_queues; i++)
+ spin_lock_init(&adapter->tx_queue[i].tx_lock);
+
+ if (VMXNET3_VERSION_GE_3(adapter)) {
+ unsigned long flags;
+ u16 txdata_desc_size;
+
+ spin_lock_irqsave(&adapter->cmd_lock, flags);
+ VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
+ VMXNET3_CMD_GET_TXDATA_DESC_SIZE);
+ txdata_desc_size = VMXNET3_READ_BAR1_REG(adapter,
+ VMXNET3_REG_CMD);
+ spin_unlock_irqrestore(&adapter->cmd_lock, flags);
+
+ if ((txdata_desc_size < VMXNET3_TXDATA_DESC_MIN_SIZE) ||
+ (txdata_desc_size > VMXNET3_TXDATA_DESC_MAX_SIZE) ||
+ (txdata_desc_size & VMXNET3_TXDATA_DESC_SIZE_MASK)) {
+ adapter->txdata_desc_size =
+ sizeof(struct Vmxnet3_TxDataDesc);
+ } else {
+ adapter->txdata_desc_size = txdata_desc_size;
+ }
+ } else {
+ adapter->txdata_desc_size = sizeof(struct Vmxnet3_TxDataDesc);
+ }
+
+ err = vmxnet3_create_queues(adapter,
+ adapter->tx_ring_size,
+ adapter->rx_ring_size,
+ adapter->rx_ring2_size,
+ adapter->txdata_desc_size,
+ adapter->rxdata_desc_size);
+ if (err)
+ goto queue_err;
+
+ err = vmxnet3_activate_dev(adapter);
+ if (err)
+ goto activate_err;
+
+ return 0;
+
+activate_err:
+ vmxnet3_rq_destroy_all(adapter);
+ vmxnet3_tq_destroy_all(adapter);
+queue_err:
+ return err;
+}
+
+
+static int
+vmxnet3_close(struct net_device *netdev)
+{
+ struct vmxnet3_adapter *adapter = netdev_priv(netdev);
+
+ /*
+ * Reset_work may be in the middle of resetting the device, wait for its
+ * completion.
+ */
+ while (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state))
+ usleep_range(1000, 2000);
+
+ vmxnet3_quiesce_dev(adapter);
+
+ vmxnet3_rq_destroy_all(adapter);
+ vmxnet3_tq_destroy_all(adapter);
+
+ clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
+
+
+ return 0;
+}
+
+
+void
+vmxnet3_force_close(struct vmxnet3_adapter *adapter)
+{
+ int i;
+
+ /*
+ * we must clear VMXNET3_STATE_BIT_RESETTING, otherwise
+ * vmxnet3_close() will deadlock.
+ */
+ BUG_ON(test_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state));
+
+ /* we need to enable NAPI, otherwise dev_close will deadlock */
+ for (i = 0; i < adapter->num_rx_queues; i++)
+ napi_enable(&adapter->rx_queue[i].napi);
+ /*
+ * Need to clear the quiesce bit to ensure that vmxnet3_close
+ * can quiesce the device properly
+ */
+ clear_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state);
+ dev_close(adapter->netdev);
+}
+
+
+static int
+vmxnet3_change_mtu(struct net_device *netdev, int new_mtu)
+{
+ struct vmxnet3_adapter *adapter = netdev_priv(netdev);
+ int err = 0;
+
+ netdev->mtu = new_mtu;
+
+ /*
+ * Reset_work may be in the middle of resetting the device, wait for its
+ * completion.
+ */
+ while (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state))
+ usleep_range(1000, 2000);
+
+ if (netif_running(netdev)) {
+ vmxnet3_quiesce_dev(adapter);
+ vmxnet3_reset_dev(adapter);
+
+ /* we need to re-create the rx queue based on the new mtu */
+ vmxnet3_rq_destroy_all(adapter);
+ vmxnet3_adjust_rx_ring_size(adapter);
+ err = vmxnet3_rq_create_all(adapter);
+ if (err) {
+ netdev_err(netdev,
+ "failed to re-create rx queues, "
+ " error %d. Closing it.\n", err);
+ goto out;
+ }
+
+ err = vmxnet3_activate_dev(adapter);
+ if (err) {
+ netdev_err(netdev,
+ "failed to re-activate, error %d. "
+ "Closing it\n", err);
+ goto out;
+ }
+ }
+
+out:
+ clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
+ if (err)
+ vmxnet3_force_close(adapter);
+
+ return err;
+}
+
+
+static void
+vmxnet3_declare_features(struct vmxnet3_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+
+ netdev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM |
+ NETIF_F_HW_CSUM | NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_TSO | NETIF_F_TSO6 |
+ NETIF_F_LRO | NETIF_F_HIGHDMA;
+
+ if (VMXNET3_VERSION_GE_4(adapter)) {
+ netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL |
+ NETIF_F_GSO_UDP_TUNNEL_CSUM;
+
+ netdev->hw_enc_features = NETIF_F_SG | NETIF_F_RXCSUM |
+ NETIF_F_HW_CSUM | NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_TSO | NETIF_F_TSO6 |
+ NETIF_F_LRO | NETIF_F_GSO_UDP_TUNNEL |
+ NETIF_F_GSO_UDP_TUNNEL_CSUM;
+ }
+
+ if (VMXNET3_VERSION_GE_7(adapter)) {
+ unsigned long flags;
+
+ if (vmxnet3_check_ptcapability(adapter->ptcap_supported[0],
+ VMXNET3_CAP_GENEVE_CHECKSUM_OFFLOAD)) {
+ adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_GENEVE_CHECKSUM_OFFLOAD;
+ }
+ if (vmxnet3_check_ptcapability(adapter->ptcap_supported[0],
+ VMXNET3_CAP_VXLAN_CHECKSUM_OFFLOAD)) {
+ adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_VXLAN_CHECKSUM_OFFLOAD;
+ }
+ if (vmxnet3_check_ptcapability(adapter->ptcap_supported[0],
+ VMXNET3_CAP_GENEVE_TSO)) {
+ adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_GENEVE_TSO;
+ }
+ if (vmxnet3_check_ptcapability(adapter->ptcap_supported[0],
+ VMXNET3_CAP_VXLAN_TSO)) {
+ adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_VXLAN_TSO;
+ }
+ if (vmxnet3_check_ptcapability(adapter->ptcap_supported[0],
+ VMXNET3_CAP_GENEVE_OUTER_CHECKSUM_OFFLOAD)) {
+ adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_GENEVE_OUTER_CHECKSUM_OFFLOAD;
+ }
+ if (vmxnet3_check_ptcapability(adapter->ptcap_supported[0],
+ VMXNET3_CAP_VXLAN_OUTER_CHECKSUM_OFFLOAD)) {
+ adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_VXLAN_OUTER_CHECKSUM_OFFLOAD;
+ }
+
+ VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DCR, adapter->dev_caps[0]);
+ spin_lock_irqsave(&adapter->cmd_lock, flags);
+ VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_DCR0_REG);
+ adapter->dev_caps[0] = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
+ spin_unlock_irqrestore(&adapter->cmd_lock, flags);
+
+ if (!(adapter->dev_caps[0] & (1UL << VMXNET3_CAP_GENEVE_CHECKSUM_OFFLOAD)) &&
+ !(adapter->dev_caps[0] & (1UL << VMXNET3_CAP_VXLAN_CHECKSUM_OFFLOAD)) &&
+ !(adapter->dev_caps[0] & (1UL << VMXNET3_CAP_GENEVE_TSO)) &&
+ !(adapter->dev_caps[0] & (1UL << VMXNET3_CAP_VXLAN_TSO))) {
+ netdev->hw_enc_features &= ~NETIF_F_GSO_UDP_TUNNEL;
+ netdev->hw_features &= ~NETIF_F_GSO_UDP_TUNNEL;
+ }
+ if (!(adapter->dev_caps[0] & (1UL << VMXNET3_CAP_GENEVE_OUTER_CHECKSUM_OFFLOAD)) &&
+ !(adapter->dev_caps[0] & (1UL << VMXNET3_CAP_VXLAN_OUTER_CHECKSUM_OFFLOAD))) {
+ netdev->hw_enc_features &= ~NETIF_F_GSO_UDP_TUNNEL_CSUM;
+ netdev->hw_features &= ~NETIF_F_GSO_UDP_TUNNEL_CSUM;
+ }
+ }
+
+ netdev->vlan_features = netdev->hw_features &
+ ~(NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_HW_VLAN_CTAG_RX);
+ netdev->features = netdev->hw_features | NETIF_F_HW_VLAN_CTAG_FILTER;
+}
+
+
+static void
+vmxnet3_read_mac_addr(struct vmxnet3_adapter *adapter, u8 *mac)
+{
+ u32 tmp;
+
+ tmp = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_MACL);
+ *(u32 *)mac = tmp;
+
+ tmp = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_MACH);
+ mac[4] = tmp & 0xff;
+ mac[5] = (tmp >> 8) & 0xff;
+}
+
+#ifdef CONFIG_PCI_MSI
+
+/*
+ * Enable MSIx vectors.
+ * Returns :
+ * VMXNET3_LINUX_MIN_MSIX_VECT when only minimum number of vectors required
+ * were enabled.
+ * number of vectors which were enabled otherwise (this number is greater
+ * than VMXNET3_LINUX_MIN_MSIX_VECT)
+ */
+
+static int
+vmxnet3_acquire_msix_vectors(struct vmxnet3_adapter *adapter, int nvec)
+{
+ int ret = pci_enable_msix_range(adapter->pdev,
+ adapter->intr.msix_entries, nvec, nvec);
+
+ if (ret == -ENOSPC && nvec > VMXNET3_LINUX_MIN_MSIX_VECT) {
+ dev_err(&adapter->netdev->dev,
+ "Failed to enable %d MSI-X, trying %d\n",
+ nvec, VMXNET3_LINUX_MIN_MSIX_VECT);
+
+ ret = pci_enable_msix_range(adapter->pdev,
+ adapter->intr.msix_entries,
+ VMXNET3_LINUX_MIN_MSIX_VECT,
+ VMXNET3_LINUX_MIN_MSIX_VECT);
+ }
+
+ if (ret < 0) {
+ dev_err(&adapter->netdev->dev,
+ "Failed to enable MSI-X, error: %d\n", ret);
+ }
+
+ return ret;
+}
+
+
+#endif /* CONFIG_PCI_MSI */
+
+static void
+vmxnet3_alloc_intr_resources(struct vmxnet3_adapter *adapter)
+{
+ u32 cfg;
+ unsigned long flags;
+
+ /* intr settings */
+ spin_lock_irqsave(&adapter->cmd_lock, flags);
+ VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
+ VMXNET3_CMD_GET_CONF_INTR);
+ cfg = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
+ spin_unlock_irqrestore(&adapter->cmd_lock, flags);
+ adapter->intr.type = cfg & 0x3;
+ adapter->intr.mask_mode = (cfg >> 2) & 0x3;
+
+ if (adapter->intr.type == VMXNET3_IT_AUTO) {
+ adapter->intr.type = VMXNET3_IT_MSIX;
+ }
+
+#ifdef CONFIG_PCI_MSI
+ if (adapter->intr.type == VMXNET3_IT_MSIX) {
+ int i, nvec, nvec_allocated;
+
+ nvec = adapter->share_intr == VMXNET3_INTR_TXSHARE ?
+ 1 : adapter->num_tx_queues;
+ nvec += adapter->share_intr == VMXNET3_INTR_BUDDYSHARE ?
+ 0 : adapter->num_rx_queues;
+ nvec += 1; /* for link event */
+ nvec = nvec > VMXNET3_LINUX_MIN_MSIX_VECT ?
+ nvec : VMXNET3_LINUX_MIN_MSIX_VECT;
+
+ for (i = 0; i < nvec; i++)
+ adapter->intr.msix_entries[i].entry = i;
+
+ nvec_allocated = vmxnet3_acquire_msix_vectors(adapter, nvec);
+ if (nvec_allocated < 0)
+ goto msix_err;
+
+ /* If we cannot allocate one MSIx vector per queue
+ * then limit the number of rx queues to 1
+ */
+ if (nvec_allocated == VMXNET3_LINUX_MIN_MSIX_VECT &&
+ nvec != VMXNET3_LINUX_MIN_MSIX_VECT) {
+ if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE
+ || adapter->num_rx_queues != 1) {
+ adapter->share_intr = VMXNET3_INTR_TXSHARE;
+ netdev_err(adapter->netdev,
+ "Number of rx queues : 1\n");
+ adapter->num_rx_queues = 1;
+ }
+ }
+
+ adapter->intr.num_intrs = nvec_allocated;
+ return;
+
+msix_err:
+ /* If we cannot allocate MSIx vectors use only one rx queue */
+ dev_info(&adapter->pdev->dev,
+ "Failed to enable MSI-X, error %d. "
+ "Limiting #rx queues to 1, try MSI.\n", nvec_allocated);
+
+ adapter->intr.type = VMXNET3_IT_MSI;
+ }
+
+ if (adapter->intr.type == VMXNET3_IT_MSI) {
+ if (!pci_enable_msi(adapter->pdev)) {
+ adapter->num_rx_queues = 1;
+ adapter->intr.num_intrs = 1;
+ return;
+ }
+ }
+#endif /* CONFIG_PCI_MSI */
+
+ adapter->num_rx_queues = 1;
+ dev_info(&adapter->netdev->dev,
+ "Using INTx interrupt, #Rx queues: 1.\n");
+ adapter->intr.type = VMXNET3_IT_INTX;
+
+ /* INT-X related setting */
+ adapter->intr.num_intrs = 1;
+}
+
+
+static void
+vmxnet3_free_intr_resources(struct vmxnet3_adapter *adapter)
+{
+ if (adapter->intr.type == VMXNET3_IT_MSIX)
+ pci_disable_msix(adapter->pdev);
+ else if (adapter->intr.type == VMXNET3_IT_MSI)
+ pci_disable_msi(adapter->pdev);
+ else
+ BUG_ON(adapter->intr.type != VMXNET3_IT_INTX);
+}
+
+
+static void
+vmxnet3_tx_timeout(struct net_device *netdev, unsigned int txqueue)
+{
+ struct vmxnet3_adapter *adapter = netdev_priv(netdev);
+ adapter->tx_timeout_count++;
+
+ netdev_err(adapter->netdev, "tx hang\n");
+ schedule_work(&adapter->work);
+}
+
+
+static void
+vmxnet3_reset_work(struct work_struct *data)
+{
+ struct vmxnet3_adapter *adapter;
+
+ adapter = container_of(data, struct vmxnet3_adapter, work);
+
+ /* if another thread is resetting the device, no need to proceed */
+ if (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state))
+ return;
+
+ /* if the device is closed, we must leave it alone */
+ rtnl_lock();
+ if (netif_running(adapter->netdev)) {
+ netdev_notice(adapter->netdev, "resetting\n");
+ vmxnet3_quiesce_dev(adapter);
+ vmxnet3_reset_dev(adapter);
+ vmxnet3_activate_dev(adapter);
+ } else {
+ netdev_info(adapter->netdev, "already closed\n");
+ }
+ rtnl_unlock();
+
+ netif_wake_queue(adapter->netdev);
+ clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
+}
+
+
+static int
+vmxnet3_probe_device(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+{
+ static const struct net_device_ops vmxnet3_netdev_ops = {
+ .ndo_open = vmxnet3_open,
+ .ndo_stop = vmxnet3_close,
+ .ndo_start_xmit = vmxnet3_xmit_frame,
+ .ndo_set_mac_address = vmxnet3_set_mac_addr,
+ .ndo_change_mtu = vmxnet3_change_mtu,
+ .ndo_fix_features = vmxnet3_fix_features,
+ .ndo_set_features = vmxnet3_set_features,
+ .ndo_features_check = vmxnet3_features_check,
+ .ndo_get_stats64 = vmxnet3_get_stats64,
+ .ndo_tx_timeout = vmxnet3_tx_timeout,
+ .ndo_set_rx_mode = vmxnet3_set_mc,
+ .ndo_vlan_rx_add_vid = vmxnet3_vlan_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = vmxnet3_vlan_rx_kill_vid,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = vmxnet3_netpoll,
+#endif
+ .ndo_bpf = vmxnet3_xdp,
+ .ndo_xdp_xmit = vmxnet3_xdp_xmit,
+ };
+ int err;
+ u32 ver;
+ struct net_device *netdev;
+ struct vmxnet3_adapter *adapter;
+ u8 mac[ETH_ALEN];
+ int size;
+ int num_tx_queues;
+ int num_rx_queues;
+ int queues;
+ unsigned long flags;
+
+ if (!pci_msi_enabled())
+ enable_mq = 0;
+
+#ifdef VMXNET3_RSS
+ if (enable_mq)
+ num_rx_queues = min(VMXNET3_DEVICE_MAX_RX_QUEUES,
+ (int)num_online_cpus());
+ else
+#endif
+ num_rx_queues = 1;
+
+ if (enable_mq)
+ num_tx_queues = min(VMXNET3_DEVICE_MAX_TX_QUEUES,
+ (int)num_online_cpus());
+ else
+ num_tx_queues = 1;
+
+ netdev = alloc_etherdev_mq(sizeof(struct vmxnet3_adapter),
+ max(num_tx_queues, num_rx_queues));
+ if (!netdev)
+ return -ENOMEM;
+
+ pci_set_drvdata(pdev, netdev);
+ adapter = netdev_priv(netdev);
+ adapter->netdev = netdev;
+ adapter->pdev = pdev;
+
+ adapter->tx_ring_size = VMXNET3_DEF_TX_RING_SIZE;
+ adapter->rx_ring_size = VMXNET3_DEF_RX_RING_SIZE;
+ adapter->rx_ring2_size = VMXNET3_DEF_RX_RING2_SIZE;
+
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (err) {
+ dev_err(&pdev->dev, "dma_set_mask failed\n");
+ goto err_set_mask;
+ }
+
+ spin_lock_init(&adapter->cmd_lock);
+ adapter->adapter_pa = dma_map_single(&adapter->pdev->dev, adapter,
+ sizeof(struct vmxnet3_adapter),
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(&adapter->pdev->dev, adapter->adapter_pa)) {
+ dev_err(&pdev->dev, "Failed to map dma\n");
+ err = -EFAULT;
+ goto err_set_mask;
+ }
+ adapter->shared = dma_alloc_coherent(
+ &adapter->pdev->dev,
+ sizeof(struct Vmxnet3_DriverShared),
+ &adapter->shared_pa, GFP_KERNEL);
+ if (!adapter->shared) {
+ dev_err(&pdev->dev, "Failed to allocate memory\n");
+ err = -ENOMEM;
+ goto err_alloc_shared;
+ }
+
+ err = vmxnet3_alloc_pci_resources(adapter);
+ if (err < 0)
+ goto err_alloc_pci;
+
+ ver = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_VRRS);
+ if (ver & (1 << VMXNET3_REV_7)) {
+ VMXNET3_WRITE_BAR1_REG(adapter,
+ VMXNET3_REG_VRRS,
+ 1 << VMXNET3_REV_7);
+ adapter->version = VMXNET3_REV_7 + 1;
+ } else if (ver & (1 << VMXNET3_REV_6)) {
+ VMXNET3_WRITE_BAR1_REG(adapter,
+ VMXNET3_REG_VRRS,
+ 1 << VMXNET3_REV_6);
+ adapter->version = VMXNET3_REV_6 + 1;
+ } else if (ver & (1 << VMXNET3_REV_5)) {
+ VMXNET3_WRITE_BAR1_REG(adapter,
+ VMXNET3_REG_VRRS,
+ 1 << VMXNET3_REV_5);
+ adapter->version = VMXNET3_REV_5 + 1;
+ } else if (ver & (1 << VMXNET3_REV_4)) {
+ VMXNET3_WRITE_BAR1_REG(adapter,
+ VMXNET3_REG_VRRS,
+ 1 << VMXNET3_REV_4);
+ adapter->version = VMXNET3_REV_4 + 1;
+ } else if (ver & (1 << VMXNET3_REV_3)) {
+ VMXNET3_WRITE_BAR1_REG(adapter,
+ VMXNET3_REG_VRRS,
+ 1 << VMXNET3_REV_3);
+ adapter->version = VMXNET3_REV_3 + 1;
+ } else if (ver & (1 << VMXNET3_REV_2)) {
+ VMXNET3_WRITE_BAR1_REG(adapter,
+ VMXNET3_REG_VRRS,
+ 1 << VMXNET3_REV_2);
+ adapter->version = VMXNET3_REV_2 + 1;
+ } else if (ver & (1 << VMXNET3_REV_1)) {
+ VMXNET3_WRITE_BAR1_REG(adapter,
+ VMXNET3_REG_VRRS,
+ 1 << VMXNET3_REV_1);
+ adapter->version = VMXNET3_REV_1 + 1;
+ } else {
+ dev_err(&pdev->dev,
+ "Incompatible h/w version (0x%x) for adapter\n", ver);
+ err = -EBUSY;
+ goto err_ver;
+ }
+ dev_dbg(&pdev->dev, "Using device version %d\n", adapter->version);
+
+ ver = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_UVRS);
+ if (ver & 1) {
+ VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_UVRS, 1);
+ } else {
+ dev_err(&pdev->dev,
+ "Incompatible upt version (0x%x) for adapter\n", ver);
+ err = -EBUSY;
+ goto err_ver;
+ }
+
+ if (VMXNET3_VERSION_GE_7(adapter)) {
+ adapter->devcap_supported[0] = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_DCR);
+ adapter->ptcap_supported[0] = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_PTCR);
+ if (adapter->devcap_supported[0] & (1UL << VMXNET3_CAP_LARGE_BAR)) {
+ adapter->dev_caps[0] = adapter->devcap_supported[0] &
+ (1UL << VMXNET3_CAP_LARGE_BAR);
+ }
+ if (!(adapter->ptcap_supported[0] & (1UL << VMXNET3_DCR_ERROR)) &&
+ adapter->ptcap_supported[0] & (1UL << VMXNET3_CAP_OOORX_COMP) &&
+ adapter->devcap_supported[0] & (1UL << VMXNET3_CAP_OOORX_COMP)) {
+ adapter->dev_caps[0] |= adapter->devcap_supported[0] &
+ (1UL << VMXNET3_CAP_OOORX_COMP);
+ }
+ if (adapter->dev_caps[0])
+ VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DCR, adapter->dev_caps[0]);
+
+ spin_lock_irqsave(&adapter->cmd_lock, flags);
+ VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_DCR0_REG);
+ adapter->dev_caps[0] = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
+ spin_unlock_irqrestore(&adapter->cmd_lock, flags);
+ }
+
+ if (VMXNET3_VERSION_GE_7(adapter) &&
+ adapter->dev_caps[0] & (1UL << VMXNET3_CAP_LARGE_BAR)) {
+ adapter->tx_prod_offset = VMXNET3_REG_LB_TXPROD;
+ adapter->rx_prod_offset = VMXNET3_REG_LB_RXPROD;
+ adapter->rx_prod2_offset = VMXNET3_REG_LB_RXPROD2;
+ } else {
+ adapter->tx_prod_offset = VMXNET3_REG_TXPROD;
+ adapter->rx_prod_offset = VMXNET3_REG_RXPROD;
+ adapter->rx_prod2_offset = VMXNET3_REG_RXPROD2;
+ }
+
+ if (VMXNET3_VERSION_GE_6(adapter)) {
+ spin_lock_irqsave(&adapter->cmd_lock, flags);
+ VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
+ VMXNET3_CMD_GET_MAX_QUEUES_CONF);
+ queues = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
+ spin_unlock_irqrestore(&adapter->cmd_lock, flags);
+ if (queues > 0) {
+ adapter->num_rx_queues = min(num_rx_queues, ((queues >> 8) & 0xff));
+ adapter->num_tx_queues = min(num_tx_queues, (queues & 0xff));
+ } else {
+ adapter->num_rx_queues = min(num_rx_queues,
+ VMXNET3_DEVICE_DEFAULT_RX_QUEUES);
+ adapter->num_tx_queues = min(num_tx_queues,
+ VMXNET3_DEVICE_DEFAULT_TX_QUEUES);
+ }
+ if (adapter->num_rx_queues > VMXNET3_MAX_RX_QUEUES ||
+ adapter->num_tx_queues > VMXNET3_MAX_TX_QUEUES) {
+ adapter->queuesExtEnabled = true;
+ } else {
+ adapter->queuesExtEnabled = false;
+ }
+ } else {
+ adapter->queuesExtEnabled = false;
+ num_rx_queues = rounddown_pow_of_two(num_rx_queues);
+ num_tx_queues = rounddown_pow_of_two(num_tx_queues);
+ adapter->num_rx_queues = min(num_rx_queues,
+ VMXNET3_DEVICE_DEFAULT_RX_QUEUES);
+ adapter->num_tx_queues = min(num_tx_queues,
+ VMXNET3_DEVICE_DEFAULT_TX_QUEUES);
+ }
+ dev_info(&pdev->dev,
+ "# of Tx queues : %d, # of Rx queues : %d\n",
+ adapter->num_tx_queues, adapter->num_rx_queues);
+
+ adapter->rx_buf_per_pkt = 1;
+
+ size = sizeof(struct Vmxnet3_TxQueueDesc) * adapter->num_tx_queues;
+ size += sizeof(struct Vmxnet3_RxQueueDesc) * adapter->num_rx_queues;
+ adapter->tqd_start = dma_alloc_coherent(&adapter->pdev->dev, size,
+ &adapter->queue_desc_pa,
+ GFP_KERNEL);
+
+ if (!adapter->tqd_start) {
+ dev_err(&pdev->dev, "Failed to allocate memory\n");
+ err = -ENOMEM;
+ goto err_ver;
+ }
+ adapter->rqd_start = (struct Vmxnet3_RxQueueDesc *)(adapter->tqd_start +
+ adapter->num_tx_queues);
+
+ adapter->pm_conf = dma_alloc_coherent(&adapter->pdev->dev,
+ sizeof(struct Vmxnet3_PMConf),
+ &adapter->pm_conf_pa,
+ GFP_KERNEL);
+ if (adapter->pm_conf == NULL) {
+ err = -ENOMEM;
+ goto err_alloc_pm;
+ }
+
+#ifdef VMXNET3_RSS
+
+ adapter->rss_conf = dma_alloc_coherent(&adapter->pdev->dev,
+ sizeof(struct UPT1_RSSConf),
+ &adapter->rss_conf_pa,
+ GFP_KERNEL);
+ if (adapter->rss_conf == NULL) {
+ err = -ENOMEM;
+ goto err_alloc_rss;
+ }
+#endif /* VMXNET3_RSS */
+
+ if (VMXNET3_VERSION_GE_3(adapter)) {
+ adapter->coal_conf =
+ dma_alloc_coherent(&adapter->pdev->dev,
+ sizeof(struct Vmxnet3_CoalesceScheme)
+ ,
+ &adapter->coal_conf_pa,
+ GFP_KERNEL);
+ if (!adapter->coal_conf) {
+ err = -ENOMEM;
+ goto err_coal_conf;
+ }
+ adapter->coal_conf->coalMode = VMXNET3_COALESCE_DISABLED;
+ adapter->default_coal_mode = true;
+ }
+
+ if (VMXNET3_VERSION_GE_4(adapter)) {
+ adapter->default_rss_fields = true;
+ adapter->rss_fields = VMXNET3_RSS_FIELDS_DEFAULT;
+ }
+
+ SET_NETDEV_DEV(netdev, &pdev->dev);
+ vmxnet3_declare_features(adapter);
+ netdev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
+ NETDEV_XDP_ACT_NDO_XMIT;
+
+ adapter->rxdata_desc_size = VMXNET3_VERSION_GE_3(adapter) ?
+ VMXNET3_DEF_RXDATA_DESC_SIZE : 0;
+
+ if (adapter->num_tx_queues == adapter->num_rx_queues)
+ adapter->share_intr = VMXNET3_INTR_BUDDYSHARE;
+ else
+ adapter->share_intr = VMXNET3_INTR_DONTSHARE;
+
+ vmxnet3_alloc_intr_resources(adapter);
+
+#ifdef VMXNET3_RSS
+ if (adapter->num_rx_queues > 1 &&
+ adapter->intr.type == VMXNET3_IT_MSIX) {
+ adapter->rss = true;
+ netdev->hw_features |= NETIF_F_RXHASH;
+ netdev->features |= NETIF_F_RXHASH;
+ dev_dbg(&pdev->dev, "RSS is enabled.\n");
+ } else {
+ adapter->rss = false;
+ }
+#endif
+
+ vmxnet3_read_mac_addr(adapter, mac);
+ dev_addr_set(netdev, mac);
+
+ netdev->netdev_ops = &vmxnet3_netdev_ops;
+ vmxnet3_set_ethtool_ops(netdev);
+ netdev->watchdog_timeo = 5 * HZ;
+
+ /* MTU range: 60 - 9190 */
+ netdev->min_mtu = VMXNET3_MIN_MTU;
+ if (VMXNET3_VERSION_GE_6(adapter))
+ netdev->max_mtu = VMXNET3_V6_MAX_MTU;
+ else
+ netdev->max_mtu = VMXNET3_MAX_MTU;
+
+ INIT_WORK(&adapter->work, vmxnet3_reset_work);
+ set_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state);
+
+ if (adapter->intr.type == VMXNET3_IT_MSIX) {
+ int i;
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ netif_napi_add(adapter->netdev,
+ &adapter->rx_queue[i].napi,
+ vmxnet3_poll_rx_only);
+ }
+ } else {
+ netif_napi_add(adapter->netdev, &adapter->rx_queue[0].napi,
+ vmxnet3_poll);
+ }
+
+ netif_set_real_num_tx_queues(adapter->netdev, adapter->num_tx_queues);
+ netif_set_real_num_rx_queues(adapter->netdev, adapter->num_rx_queues);
+
+ netif_carrier_off(netdev);
+ err = register_netdev(netdev);
+
+ if (err) {
+ dev_err(&pdev->dev, "Failed to register adapter\n");
+ goto err_register;
+ }
+
+ vmxnet3_check_link(adapter, false);
+ return 0;
+
+err_register:
+ if (VMXNET3_VERSION_GE_3(adapter)) {
+ dma_free_coherent(&adapter->pdev->dev,
+ sizeof(struct Vmxnet3_CoalesceScheme),
+ adapter->coal_conf, adapter->coal_conf_pa);
+ }
+ vmxnet3_free_intr_resources(adapter);
+err_coal_conf:
+#ifdef VMXNET3_RSS
+ dma_free_coherent(&adapter->pdev->dev, sizeof(struct UPT1_RSSConf),
+ adapter->rss_conf, adapter->rss_conf_pa);
+err_alloc_rss:
+#endif
+ dma_free_coherent(&adapter->pdev->dev, sizeof(struct Vmxnet3_PMConf),
+ adapter->pm_conf, adapter->pm_conf_pa);
+err_alloc_pm:
+ dma_free_coherent(&adapter->pdev->dev, size, adapter->tqd_start,
+ adapter->queue_desc_pa);
+err_ver:
+ vmxnet3_free_pci_resources(adapter);
+err_alloc_pci:
+ dma_free_coherent(&adapter->pdev->dev,
+ sizeof(struct Vmxnet3_DriverShared),
+ adapter->shared, adapter->shared_pa);
+err_alloc_shared:
+ dma_unmap_single(&adapter->pdev->dev, adapter->adapter_pa,
+ sizeof(struct vmxnet3_adapter), DMA_TO_DEVICE);
+err_set_mask:
+ free_netdev(netdev);
+ return err;
+}
+
+
+static void
+vmxnet3_remove_device(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct vmxnet3_adapter *adapter = netdev_priv(netdev);
+ int size = 0;
+ int num_rx_queues, rx_queues;
+ unsigned long flags;
+
+#ifdef VMXNET3_RSS
+ if (enable_mq)
+ num_rx_queues = min(VMXNET3_DEVICE_MAX_RX_QUEUES,
+ (int)num_online_cpus());
+ else
+#endif
+ num_rx_queues = 1;
+ if (!VMXNET3_VERSION_GE_6(adapter)) {
+ num_rx_queues = rounddown_pow_of_two(num_rx_queues);
+ }
+ if (VMXNET3_VERSION_GE_6(adapter)) {
+ spin_lock_irqsave(&adapter->cmd_lock, flags);
+ VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
+ VMXNET3_CMD_GET_MAX_QUEUES_CONF);
+ rx_queues = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
+ spin_unlock_irqrestore(&adapter->cmd_lock, flags);
+ if (rx_queues > 0)
+ rx_queues = (rx_queues >> 8) & 0xff;
+ else
+ rx_queues = min(num_rx_queues, VMXNET3_DEVICE_DEFAULT_RX_QUEUES);
+ num_rx_queues = min(num_rx_queues, rx_queues);
+ } else {
+ num_rx_queues = min(num_rx_queues,
+ VMXNET3_DEVICE_DEFAULT_RX_QUEUES);
+ }
+
+ cancel_work_sync(&adapter->work);
+
+ unregister_netdev(netdev);
+
+ vmxnet3_free_intr_resources(adapter);
+ vmxnet3_free_pci_resources(adapter);
+ if (VMXNET3_VERSION_GE_3(adapter)) {
+ dma_free_coherent(&adapter->pdev->dev,
+ sizeof(struct Vmxnet3_CoalesceScheme),
+ adapter->coal_conf, adapter->coal_conf_pa);
+ }
+#ifdef VMXNET3_RSS
+ dma_free_coherent(&adapter->pdev->dev, sizeof(struct UPT1_RSSConf),
+ adapter->rss_conf, adapter->rss_conf_pa);
+#endif
+ dma_free_coherent(&adapter->pdev->dev, sizeof(struct Vmxnet3_PMConf),
+ adapter->pm_conf, adapter->pm_conf_pa);
+
+ size = sizeof(struct Vmxnet3_TxQueueDesc) * adapter->num_tx_queues;
+ size += sizeof(struct Vmxnet3_RxQueueDesc) * num_rx_queues;
+ dma_free_coherent(&adapter->pdev->dev, size, adapter->tqd_start,
+ adapter->queue_desc_pa);
+ dma_free_coherent(&adapter->pdev->dev,
+ sizeof(struct Vmxnet3_DriverShared),
+ adapter->shared, adapter->shared_pa);
+ dma_unmap_single(&adapter->pdev->dev, adapter->adapter_pa,
+ sizeof(struct vmxnet3_adapter), DMA_TO_DEVICE);
+ free_netdev(netdev);
+}
+
+static void vmxnet3_shutdown_device(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct vmxnet3_adapter *adapter = netdev_priv(netdev);
+ unsigned long flags;
+
+ /* Reset_work may be in the middle of resetting the device, wait for its
+ * completion.
+ */
+ while (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state))
+ usleep_range(1000, 2000);
+
+ if (test_and_set_bit(VMXNET3_STATE_BIT_QUIESCED,
+ &adapter->state)) {
+ clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
+ return;
+ }
+ spin_lock_irqsave(&adapter->cmd_lock, flags);
+ VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
+ VMXNET3_CMD_QUIESCE_DEV);
+ spin_unlock_irqrestore(&adapter->cmd_lock, flags);
+ vmxnet3_disable_all_intrs(adapter);
+
+ clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
+}
+
+
+#ifdef CONFIG_PM
+
+static int
+vmxnet3_suspend(struct device *device)
+{
+ struct pci_dev *pdev = to_pci_dev(device);
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct vmxnet3_adapter *adapter = netdev_priv(netdev);
+ struct Vmxnet3_PMConf *pmConf;
+ struct ethhdr *ehdr;
+ struct arphdr *ahdr;
+ u8 *arpreq;
+ struct in_device *in_dev;
+ struct in_ifaddr *ifa;
+ unsigned long flags;
+ int i = 0;
+
+ if (!netif_running(netdev))
+ return 0;
+
+ for (i = 0; i < adapter->num_rx_queues; i++)
+ napi_disable(&adapter->rx_queue[i].napi);
+
+ vmxnet3_disable_all_intrs(adapter);
+ vmxnet3_free_irqs(adapter);
+ vmxnet3_free_intr_resources(adapter);
+
+ netif_device_detach(netdev);
+
+ /* Create wake-up filters. */
+ pmConf = adapter->pm_conf;
+ memset(pmConf, 0, sizeof(*pmConf));
+
+ if (adapter->wol & WAKE_UCAST) {
+ pmConf->filters[i].patternSize = ETH_ALEN;
+ pmConf->filters[i].maskSize = 1;
+ memcpy(pmConf->filters[i].pattern, netdev->dev_addr, ETH_ALEN);
+ pmConf->filters[i].mask[0] = 0x3F; /* LSB ETH_ALEN bits */
+
+ pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_FILTER;
+ i++;
+ }
+
+ if (adapter->wol & WAKE_ARP) {
+ rcu_read_lock();
+
+ in_dev = __in_dev_get_rcu(netdev);
+ if (!in_dev) {
+ rcu_read_unlock();
+ goto skip_arp;
+ }
+
+ ifa = rcu_dereference(in_dev->ifa_list);
+ if (!ifa) {
+ rcu_read_unlock();
+ goto skip_arp;
+ }
+
+ pmConf->filters[i].patternSize = ETH_HLEN + /* Ethernet header*/
+ sizeof(struct arphdr) + /* ARP header */
+ 2 * ETH_ALEN + /* 2 Ethernet addresses*/
+ 2 * sizeof(u32); /*2 IPv4 addresses */
+ pmConf->filters[i].maskSize =
+ (pmConf->filters[i].patternSize - 1) / 8 + 1;
+
+ /* ETH_P_ARP in Ethernet header. */
+ ehdr = (struct ethhdr *)pmConf->filters[i].pattern;
+ ehdr->h_proto = htons(ETH_P_ARP);
+
+ /* ARPOP_REQUEST in ARP header. */
+ ahdr = (struct arphdr *)&pmConf->filters[i].pattern[ETH_HLEN];
+ ahdr->ar_op = htons(ARPOP_REQUEST);
+ arpreq = (u8 *)(ahdr + 1);
+
+ /* The Unicast IPv4 address in 'tip' field. */
+ arpreq += 2 * ETH_ALEN + sizeof(u32);
+ *(__be32 *)arpreq = ifa->ifa_address;
+
+ rcu_read_unlock();
+
+ /* The mask for the relevant bits. */
+ pmConf->filters[i].mask[0] = 0x00;
+ pmConf->filters[i].mask[1] = 0x30; /* ETH_P_ARP */
+ pmConf->filters[i].mask[2] = 0x30; /* ARPOP_REQUEST */
+ pmConf->filters[i].mask[3] = 0x00;
+ pmConf->filters[i].mask[4] = 0xC0; /* IPv4 TIP */
+ pmConf->filters[i].mask[5] = 0x03; /* IPv4 TIP */
+
+ pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_FILTER;
+ i++;
+ }
+
+skip_arp:
+ if (adapter->wol & WAKE_MAGIC)
+ pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_MAGIC;
+
+ pmConf->numFilters = i;
+
+ adapter->shared->devRead.pmConfDesc.confVer = cpu_to_le32(1);
+ adapter->shared->devRead.pmConfDesc.confLen = cpu_to_le32(sizeof(
+ *pmConf));
+ adapter->shared->devRead.pmConfDesc.confPA =
+ cpu_to_le64(adapter->pm_conf_pa);
+
+ spin_lock_irqsave(&adapter->cmd_lock, flags);
+ VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
+ VMXNET3_CMD_UPDATE_PMCFG);
+ spin_unlock_irqrestore(&adapter->cmd_lock, flags);
+
+ pci_save_state(pdev);
+ pci_enable_wake(pdev, pci_choose_state(pdev, PMSG_SUSPEND),
+ adapter->wol);
+ pci_disable_device(pdev);
+ pci_set_power_state(pdev, pci_choose_state(pdev, PMSG_SUSPEND));
+
+ return 0;
+}
+
+
+static int
+vmxnet3_resume(struct device *device)
+{
+ int err;
+ unsigned long flags;
+ struct pci_dev *pdev = to_pci_dev(device);
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct vmxnet3_adapter *adapter = netdev_priv(netdev);
+
+ if (!netif_running(netdev))
+ return 0;
+
+ pci_set_power_state(pdev, PCI_D0);
+ pci_restore_state(pdev);
+ err = pci_enable_device_mem(pdev);
+ if (err != 0)
+ return err;
+
+ pci_enable_wake(pdev, PCI_D0, 0);
+
+ vmxnet3_alloc_intr_resources(adapter);
+
+ /* During hibernate and suspend, device has to be reinitialized as the
+ * device state need not be preserved.
+ */
+
+ /* Need not check adapter state as other reset tasks cannot run during
+ * device resume.
+ */
+ spin_lock_irqsave(&adapter->cmd_lock, flags);
+ VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
+ VMXNET3_CMD_QUIESCE_DEV);
+ spin_unlock_irqrestore(&adapter->cmd_lock, flags);
+ vmxnet3_tq_cleanup_all(adapter);
+ vmxnet3_rq_cleanup_all(adapter);
+
+ vmxnet3_reset_dev(adapter);
+ err = vmxnet3_activate_dev(adapter);
+ if (err != 0) {
+ netdev_err(netdev,
+ "failed to re-activate on resume, error: %d", err);
+ vmxnet3_force_close(adapter);
+ return err;
+ }
+ netif_device_attach(netdev);
+
+ return 0;
+}
+
+static const struct dev_pm_ops vmxnet3_pm_ops = {
+ .suspend = vmxnet3_suspend,
+ .resume = vmxnet3_resume,
+ .freeze = vmxnet3_suspend,
+ .restore = vmxnet3_resume,
+};
+#endif
+
+static struct pci_driver vmxnet3_driver = {
+ .name = vmxnet3_driver_name,
+ .id_table = vmxnet3_pciid_table,
+ .probe = vmxnet3_probe_device,
+ .remove = vmxnet3_remove_device,
+ .shutdown = vmxnet3_shutdown_device,
+#ifdef CONFIG_PM
+ .driver.pm = &vmxnet3_pm_ops,
+#endif
+};
+
+
+static int __init
+vmxnet3_init_module(void)
+{
+ pr_info("%s - version %s\n", VMXNET3_DRIVER_DESC,
+ VMXNET3_DRIVER_VERSION_REPORT);
+ return pci_register_driver(&vmxnet3_driver);
+}
+
+module_init(vmxnet3_init_module);
+
+
+static void
+vmxnet3_exit_module(void)
+{
+ pci_unregister_driver(&vmxnet3_driver);
+}
+
+module_exit(vmxnet3_exit_module);
+
+MODULE_AUTHOR("VMware, Inc.");
+MODULE_DESCRIPTION(VMXNET3_DRIVER_DESC);
+MODULE_LICENSE("GPL v2");
+MODULE_VERSION(VMXNET3_DRIVER_VERSION_STRING);
diff --git a/drivers/net/vmxnet3/vmxnet3_ethtool.c b/drivers/net/vmxnet3/vmxnet3_ethtool.c
new file mode 100644
index 000000000..98c22d7d8
--- /dev/null
+++ b/drivers/net/vmxnet3/vmxnet3_ethtool.c
@@ -0,0 +1,1377 @@
+/*
+ * Linux driver for VMware's vmxnet3 ethernet NIC.
+ *
+ * Copyright (C) 2008-2022, VMware, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; version 2 of the License and no later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Maintained by: pv-drivers@vmware.com
+ *
+ */
+
+
+#include "vmxnet3_int.h"
+#include <net/vxlan.h>
+#include <net/geneve.h>
+#include "vmxnet3_xdp.h"
+
+#define VXLAN_UDP_PORT 8472
+
+struct vmxnet3_stat_desc {
+ char desc[ETH_GSTRING_LEN];
+ int offset;
+};
+
+
+/* per tq stats maintained by the device */
+static const struct vmxnet3_stat_desc
+vmxnet3_tq_dev_stats[] = {
+ /* description, offset */
+ { "Tx Queue#", 0 },
+ { " TSO pkts tx", offsetof(struct UPT1_TxStats, TSOPktsTxOK) },
+ { " TSO bytes tx", offsetof(struct UPT1_TxStats, TSOBytesTxOK) },
+ { " ucast pkts tx", offsetof(struct UPT1_TxStats, ucastPktsTxOK) },
+ { " ucast bytes tx", offsetof(struct UPT1_TxStats, ucastBytesTxOK) },
+ { " mcast pkts tx", offsetof(struct UPT1_TxStats, mcastPktsTxOK) },
+ { " mcast bytes tx", offsetof(struct UPT1_TxStats, mcastBytesTxOK) },
+ { " bcast pkts tx", offsetof(struct UPT1_TxStats, bcastPktsTxOK) },
+ { " bcast bytes tx", offsetof(struct UPT1_TxStats, bcastBytesTxOK) },
+ { " pkts tx err", offsetof(struct UPT1_TxStats, pktsTxError) },
+ { " pkts tx discard", offsetof(struct UPT1_TxStats, pktsTxDiscard) },
+};
+
+/* per tq stats maintained by the driver */
+static const struct vmxnet3_stat_desc
+vmxnet3_tq_driver_stats[] = {
+ /* description, offset */
+ {" drv dropped tx total", offsetof(struct vmxnet3_tq_driver_stats,
+ drop_total) },
+ { " too many frags", offsetof(struct vmxnet3_tq_driver_stats,
+ drop_too_many_frags) },
+ { " giant hdr", offsetof(struct vmxnet3_tq_driver_stats,
+ drop_oversized_hdr) },
+ { " hdr err", offsetof(struct vmxnet3_tq_driver_stats,
+ drop_hdr_inspect_err) },
+ { " tso", offsetof(struct vmxnet3_tq_driver_stats,
+ drop_tso) },
+ { " ring full", offsetof(struct vmxnet3_tq_driver_stats,
+ tx_ring_full) },
+ { " pkts linearized", offsetof(struct vmxnet3_tq_driver_stats,
+ linearized) },
+ { " hdr cloned", offsetof(struct vmxnet3_tq_driver_stats,
+ copy_skb_header) },
+ { " giant hdr", offsetof(struct vmxnet3_tq_driver_stats,
+ oversized_hdr) },
+ { " xdp xmit", offsetof(struct vmxnet3_tq_driver_stats,
+ xdp_xmit) },
+ { " xdp xmit err", offsetof(struct vmxnet3_tq_driver_stats,
+ xdp_xmit_err) },
+};
+
+/* per rq stats maintained by the device */
+static const struct vmxnet3_stat_desc
+vmxnet3_rq_dev_stats[] = {
+ { "Rx Queue#", 0 },
+ { " LRO pkts rx", offsetof(struct UPT1_RxStats, LROPktsRxOK) },
+ { " LRO byte rx", offsetof(struct UPT1_RxStats, LROBytesRxOK) },
+ { " ucast pkts rx", offsetof(struct UPT1_RxStats, ucastPktsRxOK) },
+ { " ucast bytes rx", offsetof(struct UPT1_RxStats, ucastBytesRxOK) },
+ { " mcast pkts rx", offsetof(struct UPT1_RxStats, mcastPktsRxOK) },
+ { " mcast bytes rx", offsetof(struct UPT1_RxStats, mcastBytesRxOK) },
+ { " bcast pkts rx", offsetof(struct UPT1_RxStats, bcastPktsRxOK) },
+ { " bcast bytes rx", offsetof(struct UPT1_RxStats, bcastBytesRxOK) },
+ { " pkts rx OOB", offsetof(struct UPT1_RxStats, pktsRxOutOfBuf) },
+ { " pkts rx err", offsetof(struct UPT1_RxStats, pktsRxError) },
+};
+
+/* per rq stats maintained by the driver */
+static const struct vmxnet3_stat_desc
+vmxnet3_rq_driver_stats[] = {
+ /* description, offset */
+ { " drv dropped rx total", offsetof(struct vmxnet3_rq_driver_stats,
+ drop_total) },
+ { " err", offsetof(struct vmxnet3_rq_driver_stats,
+ drop_err) },
+ { " fcs", offsetof(struct vmxnet3_rq_driver_stats,
+ drop_fcs) },
+ { " rx buf alloc fail", offsetof(struct vmxnet3_rq_driver_stats,
+ rx_buf_alloc_failure) },
+ { " xdp packets", offsetof(struct vmxnet3_rq_driver_stats,
+ xdp_packets) },
+ { " xdp tx", offsetof(struct vmxnet3_rq_driver_stats,
+ xdp_tx) },
+ { " xdp redirects", offsetof(struct vmxnet3_rq_driver_stats,
+ xdp_redirects) },
+ { " xdp drops", offsetof(struct vmxnet3_rq_driver_stats,
+ xdp_drops) },
+ { " xdp aborted", offsetof(struct vmxnet3_rq_driver_stats,
+ xdp_aborted) },
+};
+
+/* global stats maintained by the driver */
+static const struct vmxnet3_stat_desc
+vmxnet3_global_stats[] = {
+ /* description, offset */
+ { "tx timeout count", offsetof(struct vmxnet3_adapter,
+ tx_timeout_count) }
+};
+
+
+void
+vmxnet3_get_stats64(struct net_device *netdev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct vmxnet3_adapter *adapter;
+ struct vmxnet3_tq_driver_stats *drvTxStats;
+ struct vmxnet3_rq_driver_stats *drvRxStats;
+ struct UPT1_TxStats *devTxStats;
+ struct UPT1_RxStats *devRxStats;
+ unsigned long flags;
+ int i;
+
+ adapter = netdev_priv(netdev);
+
+ /* Collect the dev stats into the shared area */
+ spin_lock_irqsave(&adapter->cmd_lock, flags);
+ VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_STATS);
+ spin_unlock_irqrestore(&adapter->cmd_lock, flags);
+
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ devTxStats = &adapter->tqd_start[i].stats;
+ drvTxStats = &adapter->tx_queue[i].stats;
+ stats->tx_packets += devTxStats->ucastPktsTxOK +
+ devTxStats->mcastPktsTxOK +
+ devTxStats->bcastPktsTxOK;
+ stats->tx_bytes += devTxStats->ucastBytesTxOK +
+ devTxStats->mcastBytesTxOK +
+ devTxStats->bcastBytesTxOK;
+ stats->tx_errors += devTxStats->pktsTxError;
+ stats->tx_dropped += drvTxStats->drop_total;
+ }
+
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ devRxStats = &adapter->rqd_start[i].stats;
+ drvRxStats = &adapter->rx_queue[i].stats;
+ stats->rx_packets += devRxStats->ucastPktsRxOK +
+ devRxStats->mcastPktsRxOK +
+ devRxStats->bcastPktsRxOK;
+
+ stats->rx_bytes += devRxStats->ucastBytesRxOK +
+ devRxStats->mcastBytesRxOK +
+ devRxStats->bcastBytesRxOK;
+
+ stats->rx_errors += devRxStats->pktsRxError;
+ stats->rx_dropped += drvRxStats->drop_total;
+ stats->multicast += devRxStats->mcastPktsRxOK;
+ }
+}
+
+static int
+vmxnet3_get_sset_count(struct net_device *netdev, int sset)
+{
+ struct vmxnet3_adapter *adapter = netdev_priv(netdev);
+ switch (sset) {
+ case ETH_SS_STATS:
+ return (ARRAY_SIZE(vmxnet3_tq_dev_stats) +
+ ARRAY_SIZE(vmxnet3_tq_driver_stats)) *
+ adapter->num_tx_queues +
+ (ARRAY_SIZE(vmxnet3_rq_dev_stats) +
+ ARRAY_SIZE(vmxnet3_rq_driver_stats)) *
+ adapter->num_rx_queues +
+ ARRAY_SIZE(vmxnet3_global_stats);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+
+/* This is a version 2 of the vmxnet3 ethtool_regs which goes hand in hand with
+ * the version 2 of the vmxnet3 support for ethtool(8) --register-dump.
+ * Therefore, if any registers are added, removed or modified, then a version
+ * bump and a corresponding change in the vmxnet3 support for ethtool(8)
+ * --register-dump would be required.
+ */
+static int
+vmxnet3_get_regs_len(struct net_device *netdev)
+{
+ struct vmxnet3_adapter *adapter = netdev_priv(netdev);
+
+ return ((9 /* BAR1 registers */ +
+ (1 + adapter->intr.num_intrs) +
+ (1 + adapter->num_tx_queues * 17 /* Tx queue registers */) +
+ (1 + adapter->num_rx_queues * 23 /* Rx queue registers */)) *
+ sizeof(u32));
+}
+
+
+static void
+vmxnet3_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
+{
+ struct vmxnet3_adapter *adapter = netdev_priv(netdev);
+
+ strscpy(drvinfo->driver, vmxnet3_driver_name, sizeof(drvinfo->driver));
+
+ strscpy(drvinfo->version, VMXNET3_DRIVER_VERSION_REPORT,
+ sizeof(drvinfo->version));
+
+ strscpy(drvinfo->bus_info, pci_name(adapter->pdev),
+ sizeof(drvinfo->bus_info));
+}
+
+
+static void
+vmxnet3_get_strings(struct net_device *netdev, u32 stringset, u8 *buf)
+{
+ struct vmxnet3_adapter *adapter = netdev_priv(netdev);
+ int i, j;
+
+ if (stringset != ETH_SS_STATS)
+ return;
+
+ for (j = 0; j < adapter->num_tx_queues; j++) {
+ for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_dev_stats); i++)
+ ethtool_sprintf(&buf, vmxnet3_tq_dev_stats[i].desc);
+ for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_driver_stats); i++)
+ ethtool_sprintf(&buf, vmxnet3_tq_driver_stats[i].desc);
+ }
+
+ for (j = 0; j < adapter->num_rx_queues; j++) {
+ for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_dev_stats); i++)
+ ethtool_sprintf(&buf, vmxnet3_rq_dev_stats[i].desc);
+ for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_driver_stats); i++)
+ ethtool_sprintf(&buf, vmxnet3_rq_driver_stats[i].desc);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(vmxnet3_global_stats); i++)
+ ethtool_sprintf(&buf, vmxnet3_global_stats[i].desc);
+}
+
+netdev_features_t vmxnet3_fix_features(struct net_device *netdev,
+ netdev_features_t features)
+{
+ struct vmxnet3_adapter *adapter = netdev_priv(netdev);
+
+ /* If Rx checksum is disabled, then LRO should also be disabled */
+ if (!(features & NETIF_F_RXCSUM))
+ features &= ~NETIF_F_LRO;
+
+ /* If XDP is enabled, then LRO should not be enabled */
+ if (vmxnet3_xdp_enabled(adapter) && (features & NETIF_F_LRO)) {
+ netdev_err(netdev, "LRO is not supported with XDP");
+ features &= ~NETIF_F_LRO;
+ }
+
+ return features;
+}
+
+netdev_features_t vmxnet3_features_check(struct sk_buff *skb,
+ struct net_device *netdev,
+ netdev_features_t features)
+{
+ struct vmxnet3_adapter *adapter = netdev_priv(netdev);
+
+ /* Validate if the tunneled packet is being offloaded by the device */
+ if (VMXNET3_VERSION_GE_4(adapter) &&
+ skb->encapsulation && skb->ip_summed == CHECKSUM_PARTIAL) {
+ u8 l4_proto = 0;
+ u16 port;
+ struct udphdr *udph;
+
+ switch (vlan_get_protocol(skb)) {
+ case htons(ETH_P_IP):
+ l4_proto = ip_hdr(skb)->protocol;
+ break;
+ case htons(ETH_P_IPV6):
+ l4_proto = ipv6_hdr(skb)->nexthdr;
+ break;
+ default:
+ return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
+ }
+
+ switch (l4_proto) {
+ case IPPROTO_UDP:
+ udph = udp_hdr(skb);
+ port = be16_to_cpu(udph->dest);
+ /* Check if offloaded port is supported */
+ if (port != GENEVE_UDP_PORT &&
+ port != IANA_VXLAN_UDP_PORT &&
+ port != VXLAN_UDP_PORT) {
+ return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
+ }
+ break;
+ default:
+ return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
+ }
+ }
+ return features;
+}
+
+static void vmxnet3_enable_encap_offloads(struct net_device *netdev, netdev_features_t features)
+{
+ struct vmxnet3_adapter *adapter = netdev_priv(netdev);
+
+ if (VMXNET3_VERSION_GE_4(adapter)) {
+ netdev->hw_enc_features |= NETIF_F_SG | NETIF_F_RXCSUM |
+ NETIF_F_HW_CSUM | NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_TSO | NETIF_F_TSO6 |
+ NETIF_F_LRO;
+ if (features & NETIF_F_GSO_UDP_TUNNEL)
+ netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL;
+ if (features & NETIF_F_GSO_UDP_TUNNEL_CSUM)
+ netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM;
+ }
+ if (VMXNET3_VERSION_GE_7(adapter)) {
+ unsigned long flags;
+
+ if (vmxnet3_check_ptcapability(adapter->ptcap_supported[0],
+ VMXNET3_CAP_GENEVE_CHECKSUM_OFFLOAD)) {
+ adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_GENEVE_CHECKSUM_OFFLOAD;
+ }
+ if (vmxnet3_check_ptcapability(adapter->ptcap_supported[0],
+ VMXNET3_CAP_VXLAN_CHECKSUM_OFFLOAD)) {
+ adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_VXLAN_CHECKSUM_OFFLOAD;
+ }
+ if (vmxnet3_check_ptcapability(adapter->ptcap_supported[0],
+ VMXNET3_CAP_GENEVE_TSO)) {
+ adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_GENEVE_TSO;
+ }
+ if (vmxnet3_check_ptcapability(adapter->ptcap_supported[0],
+ VMXNET3_CAP_VXLAN_TSO)) {
+ adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_VXLAN_TSO;
+ }
+ if (vmxnet3_check_ptcapability(adapter->ptcap_supported[0],
+ VMXNET3_CAP_GENEVE_OUTER_CHECKSUM_OFFLOAD)) {
+ adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_GENEVE_OUTER_CHECKSUM_OFFLOAD;
+ }
+ if (vmxnet3_check_ptcapability(adapter->ptcap_supported[0],
+ VMXNET3_CAP_VXLAN_OUTER_CHECKSUM_OFFLOAD)) {
+ adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_VXLAN_OUTER_CHECKSUM_OFFLOAD;
+ }
+
+ VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DCR, adapter->dev_caps[0]);
+ spin_lock_irqsave(&adapter->cmd_lock, flags);
+ VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_DCR0_REG);
+ adapter->dev_caps[0] = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
+ spin_unlock_irqrestore(&adapter->cmd_lock, flags);
+
+ if (!(adapter->dev_caps[0] & (1UL << VMXNET3_CAP_GENEVE_CHECKSUM_OFFLOAD)) &&
+ !(adapter->dev_caps[0] & (1UL << VMXNET3_CAP_VXLAN_CHECKSUM_OFFLOAD)) &&
+ !(adapter->dev_caps[0] & (1UL << VMXNET3_CAP_GENEVE_TSO)) &&
+ !(adapter->dev_caps[0] & (1UL << VMXNET3_CAP_VXLAN_TSO))) {
+ netdev->hw_enc_features &= ~NETIF_F_GSO_UDP_TUNNEL;
+ }
+ if (!(adapter->dev_caps[0] & (1UL << VMXNET3_CAP_GENEVE_OUTER_CHECKSUM_OFFLOAD)) &&
+ !(adapter->dev_caps[0] & (1UL << VMXNET3_CAP_VXLAN_OUTER_CHECKSUM_OFFLOAD))) {
+ netdev->hw_enc_features &= ~NETIF_F_GSO_UDP_TUNNEL_CSUM;
+ }
+ }
+}
+
+static void vmxnet3_disable_encap_offloads(struct net_device *netdev)
+{
+ struct vmxnet3_adapter *adapter = netdev_priv(netdev);
+
+ if (VMXNET3_VERSION_GE_4(adapter)) {
+ netdev->hw_enc_features &= ~(NETIF_F_SG | NETIF_F_RXCSUM |
+ NETIF_F_HW_CSUM | NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_TSO | NETIF_F_TSO6 |
+ NETIF_F_LRO | NETIF_F_GSO_UDP_TUNNEL |
+ NETIF_F_GSO_UDP_TUNNEL_CSUM);
+ }
+ if (VMXNET3_VERSION_GE_7(adapter)) {
+ unsigned long flags;
+
+ adapter->dev_caps[0] &= ~(1UL << VMXNET3_CAP_GENEVE_CHECKSUM_OFFLOAD |
+ 1UL << VMXNET3_CAP_VXLAN_CHECKSUM_OFFLOAD |
+ 1UL << VMXNET3_CAP_GENEVE_TSO |
+ 1UL << VMXNET3_CAP_VXLAN_TSO |
+ 1UL << VMXNET3_CAP_GENEVE_OUTER_CHECKSUM_OFFLOAD |
+ 1UL << VMXNET3_CAP_VXLAN_OUTER_CHECKSUM_OFFLOAD);
+
+ VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DCR, adapter->dev_caps[0]);
+ spin_lock_irqsave(&adapter->cmd_lock, flags);
+ VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_DCR0_REG);
+ adapter->dev_caps[0] = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
+ spin_unlock_irqrestore(&adapter->cmd_lock, flags);
+ }
+}
+
+int vmxnet3_set_features(struct net_device *netdev, netdev_features_t features)
+{
+ struct vmxnet3_adapter *adapter = netdev_priv(netdev);
+ unsigned long flags;
+ netdev_features_t changed = features ^ netdev->features;
+ netdev_features_t tun_offload_mask = NETIF_F_GSO_UDP_TUNNEL |
+ NETIF_F_GSO_UDP_TUNNEL_CSUM;
+ u8 udp_tun_enabled = (netdev->features & tun_offload_mask) != 0;
+
+ if (changed & (NETIF_F_RXCSUM | NETIF_F_LRO |
+ NETIF_F_HW_VLAN_CTAG_RX | tun_offload_mask)) {
+ if (features & NETIF_F_RXCSUM)
+ adapter->shared->devRead.misc.uptFeatures |=
+ UPT1_F_RXCSUM;
+ else
+ adapter->shared->devRead.misc.uptFeatures &=
+ ~UPT1_F_RXCSUM;
+
+ /* update hardware LRO capability accordingly */
+ if (features & NETIF_F_LRO)
+ adapter->shared->devRead.misc.uptFeatures |=
+ UPT1_F_LRO;
+ else
+ adapter->shared->devRead.misc.uptFeatures &=
+ ~UPT1_F_LRO;
+
+ if (features & NETIF_F_HW_VLAN_CTAG_RX)
+ adapter->shared->devRead.misc.uptFeatures |=
+ UPT1_F_RXVLAN;
+ else
+ adapter->shared->devRead.misc.uptFeatures &=
+ ~UPT1_F_RXVLAN;
+
+ if ((features & tun_offload_mask) != 0) {
+ vmxnet3_enable_encap_offloads(netdev, features);
+ adapter->shared->devRead.misc.uptFeatures |=
+ UPT1_F_RXINNEROFLD;
+ } else if ((features & tun_offload_mask) == 0 &&
+ udp_tun_enabled) {
+ vmxnet3_disable_encap_offloads(netdev);
+ adapter->shared->devRead.misc.uptFeatures &=
+ ~UPT1_F_RXINNEROFLD;
+ }
+
+ spin_lock_irqsave(&adapter->cmd_lock, flags);
+ VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
+ VMXNET3_CMD_UPDATE_FEATURE);
+ spin_unlock_irqrestore(&adapter->cmd_lock, flags);
+ }
+ return 0;
+}
+
+static void
+vmxnet3_get_ethtool_stats(struct net_device *netdev,
+ struct ethtool_stats *stats, u64 *buf)
+{
+ struct vmxnet3_adapter *adapter = netdev_priv(netdev);
+ unsigned long flags;
+ u8 *base;
+ int i;
+ int j = 0;
+
+ spin_lock_irqsave(&adapter->cmd_lock, flags);
+ VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_STATS);
+ spin_unlock_irqrestore(&adapter->cmd_lock, flags);
+
+ /* this does assume each counter is 64-bit wide */
+ for (j = 0; j < adapter->num_tx_queues; j++) {
+ base = (u8 *)&adapter->tqd_start[j].stats;
+ *buf++ = (u64)j;
+ for (i = 1; i < ARRAY_SIZE(vmxnet3_tq_dev_stats); i++)
+ *buf++ = *(u64 *)(base +
+ vmxnet3_tq_dev_stats[i].offset);
+
+ base = (u8 *)&adapter->tx_queue[j].stats;
+ for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_driver_stats); i++)
+ *buf++ = *(u64 *)(base +
+ vmxnet3_tq_driver_stats[i].offset);
+ }
+
+ for (j = 0; j < adapter->num_rx_queues; j++) {
+ base = (u8 *)&adapter->rqd_start[j].stats;
+ *buf++ = (u64) j;
+ for (i = 1; i < ARRAY_SIZE(vmxnet3_rq_dev_stats); i++)
+ *buf++ = *(u64 *)(base +
+ vmxnet3_rq_dev_stats[i].offset);
+
+ base = (u8 *)&adapter->rx_queue[j].stats;
+ for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_driver_stats); i++)
+ *buf++ = *(u64 *)(base +
+ vmxnet3_rq_driver_stats[i].offset);
+ }
+
+ base = (u8 *)adapter;
+ for (i = 0; i < ARRAY_SIZE(vmxnet3_global_stats); i++)
+ *buf++ = *(u64 *)(base + vmxnet3_global_stats[i].offset);
+}
+
+
+/* This is a version 2 of the vmxnet3 ethtool_regs which goes hand in hand with
+ * the version 2 of the vmxnet3 support for ethtool(8) --register-dump.
+ * Therefore, if any registers are added, removed or modified, then a version
+ * bump and a corresponding change in the vmxnet3 support for ethtool(8)
+ * --register-dump would be required.
+ */
+static void
+vmxnet3_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *p)
+{
+ struct vmxnet3_adapter *adapter = netdev_priv(netdev);
+ u32 *buf = p;
+ int i = 0, j = 0;
+
+ memset(p, 0, vmxnet3_get_regs_len(netdev));
+
+ regs->version = 2;
+
+ /* Update vmxnet3_get_regs_len if we want to dump more registers */
+
+ buf[j++] = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_VRRS);
+ buf[j++] = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_UVRS);
+ buf[j++] = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_DSAL);
+ buf[j++] = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_DSAH);
+ buf[j++] = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
+ buf[j++] = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_MACL);
+ buf[j++] = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_MACH);
+ buf[j++] = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_ICR);
+ buf[j++] = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_ECR);
+
+ buf[j++] = adapter->intr.num_intrs;
+ for (i = 0; i < adapter->intr.num_intrs; i++) {
+ buf[j++] = VMXNET3_READ_BAR0_REG(adapter, VMXNET3_REG_IMR
+ + i * VMXNET3_REG_ALIGN);
+ }
+
+ buf[j++] = adapter->num_tx_queues;
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ struct vmxnet3_tx_queue *tq = &adapter->tx_queue[i];
+
+ buf[j++] = VMXNET3_READ_BAR0_REG(adapter, adapter->tx_prod_offset +
+ i * VMXNET3_REG_ALIGN);
+
+ buf[j++] = VMXNET3_GET_ADDR_LO(tq->tx_ring.basePA);
+ buf[j++] = VMXNET3_GET_ADDR_HI(tq->tx_ring.basePA);
+ buf[j++] = tq->tx_ring.size;
+ buf[j++] = tq->tx_ring.next2fill;
+ buf[j++] = tq->tx_ring.next2comp;
+ buf[j++] = tq->tx_ring.gen;
+
+ buf[j++] = VMXNET3_GET_ADDR_LO(tq->data_ring.basePA);
+ buf[j++] = VMXNET3_GET_ADDR_HI(tq->data_ring.basePA);
+ buf[j++] = tq->data_ring.size;
+ buf[j++] = tq->txdata_desc_size;
+
+ buf[j++] = VMXNET3_GET_ADDR_LO(tq->comp_ring.basePA);
+ buf[j++] = VMXNET3_GET_ADDR_HI(tq->comp_ring.basePA);
+ buf[j++] = tq->comp_ring.size;
+ buf[j++] = tq->comp_ring.next2proc;
+ buf[j++] = tq->comp_ring.gen;
+
+ buf[j++] = tq->stopped;
+ }
+
+ buf[j++] = adapter->num_rx_queues;
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i];
+
+ buf[j++] = VMXNET3_READ_BAR0_REG(adapter, adapter->rx_prod_offset +
+ i * VMXNET3_REG_ALIGN);
+ buf[j++] = VMXNET3_READ_BAR0_REG(adapter, adapter->rx_prod2_offset +
+ i * VMXNET3_REG_ALIGN);
+
+ buf[j++] = VMXNET3_GET_ADDR_LO(rq->rx_ring[0].basePA);
+ buf[j++] = VMXNET3_GET_ADDR_HI(rq->rx_ring[0].basePA);
+ buf[j++] = rq->rx_ring[0].size;
+ buf[j++] = rq->rx_ring[0].next2fill;
+ buf[j++] = rq->rx_ring[0].next2comp;
+ buf[j++] = rq->rx_ring[0].gen;
+
+ buf[j++] = VMXNET3_GET_ADDR_LO(rq->rx_ring[1].basePA);
+ buf[j++] = VMXNET3_GET_ADDR_HI(rq->rx_ring[1].basePA);
+ buf[j++] = rq->rx_ring[1].size;
+ buf[j++] = rq->rx_ring[1].next2fill;
+ buf[j++] = rq->rx_ring[1].next2comp;
+ buf[j++] = rq->rx_ring[1].gen;
+
+ buf[j++] = VMXNET3_GET_ADDR_LO(rq->data_ring.basePA);
+ buf[j++] = VMXNET3_GET_ADDR_HI(rq->data_ring.basePA);
+ buf[j++] = rq->rx_ring[0].size;
+ buf[j++] = rq->data_ring.desc_size;
+
+ buf[j++] = VMXNET3_GET_ADDR_LO(rq->comp_ring.basePA);
+ buf[j++] = VMXNET3_GET_ADDR_HI(rq->comp_ring.basePA);
+ buf[j++] = rq->comp_ring.size;
+ buf[j++] = rq->comp_ring.next2proc;
+ buf[j++] = rq->comp_ring.gen;
+ }
+}
+
+
+static void
+vmxnet3_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
+{
+ struct vmxnet3_adapter *adapter = netdev_priv(netdev);
+
+ wol->supported = WAKE_UCAST | WAKE_ARP | WAKE_MAGIC;
+ wol->wolopts = adapter->wol;
+}
+
+
+static int
+vmxnet3_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
+{
+ struct vmxnet3_adapter *adapter = netdev_priv(netdev);
+
+ if (wol->wolopts & (WAKE_PHY | WAKE_MCAST | WAKE_BCAST |
+ WAKE_MAGICSECURE)) {
+ return -EOPNOTSUPP;
+ }
+
+ adapter->wol = wol->wolopts;
+
+ device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
+
+ return 0;
+}
+
+
+static int
+vmxnet3_get_link_ksettings(struct net_device *netdev,
+ struct ethtool_link_ksettings *ecmd)
+{
+ struct vmxnet3_adapter *adapter = netdev_priv(netdev);
+
+ ethtool_link_ksettings_zero_link_mode(ecmd, supported);
+ ethtool_link_ksettings_add_link_mode(ecmd, supported, 10000baseT_Full);
+ ethtool_link_ksettings_add_link_mode(ecmd, supported, 1000baseT_Full);
+ ethtool_link_ksettings_add_link_mode(ecmd, supported, TP);
+ ethtool_link_ksettings_zero_link_mode(ecmd, advertising);
+ ethtool_link_ksettings_add_link_mode(ecmd, advertising, TP);
+ ecmd->base.port = PORT_TP;
+
+ if (adapter->link_speed) {
+ ecmd->base.speed = adapter->link_speed;
+ ecmd->base.duplex = DUPLEX_FULL;
+ } else {
+ ecmd->base.speed = SPEED_UNKNOWN;
+ ecmd->base.duplex = DUPLEX_UNKNOWN;
+ }
+ return 0;
+}
+
+static void
+vmxnet3_get_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *param,
+ struct kernel_ethtool_ringparam *kernel_param,
+ struct netlink_ext_ack *extack)
+{
+ struct vmxnet3_adapter *adapter = netdev_priv(netdev);
+
+ param->rx_max_pending = VMXNET3_RX_RING_MAX_SIZE;
+ param->tx_max_pending = VMXNET3_TX_RING_MAX_SIZE;
+ param->rx_mini_max_pending = VMXNET3_VERSION_GE_3(adapter) ?
+ VMXNET3_RXDATA_DESC_MAX_SIZE : 0;
+ param->rx_jumbo_max_pending = VMXNET3_RX_RING2_MAX_SIZE;
+
+ param->rx_pending = adapter->rx_ring_size;
+ param->tx_pending = adapter->tx_ring_size;
+ param->rx_mini_pending = VMXNET3_VERSION_GE_3(adapter) ?
+ adapter->rxdata_desc_size : 0;
+ param->rx_jumbo_pending = adapter->rx_ring2_size;
+}
+
+static int
+vmxnet3_set_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *param,
+ struct kernel_ethtool_ringparam *kernel_param,
+ struct netlink_ext_ack *extack)
+{
+ struct vmxnet3_adapter *adapter = netdev_priv(netdev);
+ u32 new_tx_ring_size, new_rx_ring_size, new_rx_ring2_size;
+ u16 new_rxdata_desc_size;
+ u32 sz;
+ int err = 0;
+
+ if (param->tx_pending == 0 || param->tx_pending >
+ VMXNET3_TX_RING_MAX_SIZE)
+ return -EINVAL;
+
+ if (param->rx_pending == 0 || param->rx_pending >
+ VMXNET3_RX_RING_MAX_SIZE)
+ return -EINVAL;
+
+ if (param->rx_jumbo_pending == 0 ||
+ param->rx_jumbo_pending > VMXNET3_RX_RING2_MAX_SIZE)
+ return -EINVAL;
+
+ /* if adapter not yet initialized, do nothing */
+ if (adapter->rx_buf_per_pkt == 0) {
+ netdev_err(netdev, "adapter not completely initialized, "
+ "ring size cannot be changed yet\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (VMXNET3_VERSION_GE_3(adapter)) {
+ if (param->rx_mini_pending > VMXNET3_RXDATA_DESC_MAX_SIZE)
+ return -EINVAL;
+ } else if (param->rx_mini_pending != 0) {
+ return -EINVAL;
+ }
+
+ /* round it up to a multiple of VMXNET3_RING_SIZE_ALIGN */
+ new_tx_ring_size = (param->tx_pending + VMXNET3_RING_SIZE_MASK) &
+ ~VMXNET3_RING_SIZE_MASK;
+ new_tx_ring_size = min_t(u32, new_tx_ring_size,
+ VMXNET3_TX_RING_MAX_SIZE);
+ if (new_tx_ring_size > VMXNET3_TX_RING_MAX_SIZE || (new_tx_ring_size %
+ VMXNET3_RING_SIZE_ALIGN) != 0)
+ return -EINVAL;
+
+ /* ring0 has to be a multiple of
+ * rx_buf_per_pkt * VMXNET3_RING_SIZE_ALIGN
+ */
+ sz = adapter->rx_buf_per_pkt * VMXNET3_RING_SIZE_ALIGN;
+ new_rx_ring_size = (param->rx_pending + sz - 1) / sz * sz;
+ new_rx_ring_size = min_t(u32, new_rx_ring_size,
+ VMXNET3_RX_RING_MAX_SIZE / sz * sz);
+ if (new_rx_ring_size > VMXNET3_RX_RING_MAX_SIZE || (new_rx_ring_size %
+ sz) != 0)
+ return -EINVAL;
+
+ /* ring2 has to be a multiple of VMXNET3_RING_SIZE_ALIGN */
+ new_rx_ring2_size = (param->rx_jumbo_pending + VMXNET3_RING_SIZE_MASK) &
+ ~VMXNET3_RING_SIZE_MASK;
+ new_rx_ring2_size = min_t(u32, new_rx_ring2_size,
+ VMXNET3_RX_RING2_MAX_SIZE);
+
+ /* For v7 and later, keep ring size power of 2 for UPT */
+ if (VMXNET3_VERSION_GE_7(adapter)) {
+ new_tx_ring_size = rounddown_pow_of_two(new_tx_ring_size);
+ new_rx_ring_size = rounddown_pow_of_two(new_rx_ring_size);
+ new_rx_ring2_size = rounddown_pow_of_two(new_rx_ring2_size);
+ }
+
+ /* rx data ring buffer size has to be a multiple of
+ * VMXNET3_RXDATA_DESC_SIZE_ALIGN
+ */
+ new_rxdata_desc_size =
+ (param->rx_mini_pending + VMXNET3_RXDATA_DESC_SIZE_MASK) &
+ ~VMXNET3_RXDATA_DESC_SIZE_MASK;
+ new_rxdata_desc_size = min_t(u16, new_rxdata_desc_size,
+ VMXNET3_RXDATA_DESC_MAX_SIZE);
+
+ if (new_tx_ring_size == adapter->tx_ring_size &&
+ new_rx_ring_size == adapter->rx_ring_size &&
+ new_rx_ring2_size == adapter->rx_ring2_size &&
+ new_rxdata_desc_size == adapter->rxdata_desc_size) {
+ return 0;
+ }
+
+ /*
+ * Reset_work may be in the middle of resetting the device, wait for its
+ * completion.
+ */
+ while (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state))
+ usleep_range(1000, 2000);
+
+ if (netif_running(netdev)) {
+ vmxnet3_quiesce_dev(adapter);
+ vmxnet3_reset_dev(adapter);
+
+ /* recreate the rx queue and the tx queue based on the
+ * new sizes */
+ vmxnet3_tq_destroy_all(adapter);
+ vmxnet3_rq_destroy_all(adapter);
+
+ err = vmxnet3_create_queues(adapter, new_tx_ring_size,
+ new_rx_ring_size, new_rx_ring2_size,
+ adapter->txdata_desc_size,
+ new_rxdata_desc_size);
+ if (err) {
+ /* failed, most likely because of OOM, try default
+ * size */
+ netdev_err(netdev, "failed to apply new sizes, "
+ "try the default ones\n");
+ new_rx_ring_size = VMXNET3_DEF_RX_RING_SIZE;
+ new_rx_ring2_size = VMXNET3_DEF_RX_RING2_SIZE;
+ new_tx_ring_size = VMXNET3_DEF_TX_RING_SIZE;
+ new_rxdata_desc_size = VMXNET3_VERSION_GE_3(adapter) ?
+ VMXNET3_DEF_RXDATA_DESC_SIZE : 0;
+
+ err = vmxnet3_create_queues(adapter,
+ new_tx_ring_size,
+ new_rx_ring_size,
+ new_rx_ring2_size,
+ adapter->txdata_desc_size,
+ new_rxdata_desc_size);
+ if (err) {
+ netdev_err(netdev, "failed to create queues "
+ "with default sizes. Closing it\n");
+ goto out;
+ }
+ }
+
+ err = vmxnet3_activate_dev(adapter);
+ if (err)
+ netdev_err(netdev, "failed to re-activate, error %d."
+ " Closing it\n", err);
+ }
+ adapter->tx_ring_size = new_tx_ring_size;
+ adapter->rx_ring_size = new_rx_ring_size;
+ adapter->rx_ring2_size = new_rx_ring2_size;
+ adapter->rxdata_desc_size = new_rxdata_desc_size;
+
+out:
+ clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
+ if (err)
+ vmxnet3_force_close(adapter);
+
+ return err;
+}
+
+static int
+vmxnet3_get_rss_hash_opts(struct vmxnet3_adapter *adapter,
+ struct ethtool_rxnfc *info)
+{
+ enum Vmxnet3_RSSField rss_fields;
+
+ if (netif_running(adapter->netdev)) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&adapter->cmd_lock, flags);
+
+ VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
+ VMXNET3_CMD_GET_RSS_FIELDS);
+ rss_fields = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
+ spin_unlock_irqrestore(&adapter->cmd_lock, flags);
+ } else {
+ rss_fields = adapter->rss_fields;
+ }
+
+ info->data = 0;
+
+ /* Report default options for RSS on vmxnet3 */
+ switch (info->flow_type) {
+ case TCP_V4_FLOW:
+ case TCP_V6_FLOW:
+ info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3 |
+ RXH_IP_SRC | RXH_IP_DST;
+ break;
+ case UDP_V4_FLOW:
+ if (rss_fields & VMXNET3_RSS_FIELDS_UDPIP4)
+ info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ info->data |= RXH_IP_SRC | RXH_IP_DST;
+ break;
+ case AH_ESP_V4_FLOW:
+ case AH_V4_FLOW:
+ case ESP_V4_FLOW:
+ if (rss_fields & VMXNET3_RSS_FIELDS_ESPIP4)
+ info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ fallthrough;
+ case SCTP_V4_FLOW:
+ case IPV4_FLOW:
+ info->data |= RXH_IP_SRC | RXH_IP_DST;
+ break;
+ case UDP_V6_FLOW:
+ if (rss_fields & VMXNET3_RSS_FIELDS_UDPIP6)
+ info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ info->data |= RXH_IP_SRC | RXH_IP_DST;
+ break;
+ case AH_ESP_V6_FLOW:
+ case AH_V6_FLOW:
+ case ESP_V6_FLOW:
+ if (VMXNET3_VERSION_GE_6(adapter) &&
+ (rss_fields & VMXNET3_RSS_FIELDS_ESPIP6))
+ info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ fallthrough;
+ case SCTP_V6_FLOW:
+ case IPV6_FLOW:
+ info->data |= RXH_IP_SRC | RXH_IP_DST;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+vmxnet3_set_rss_hash_opt(struct net_device *netdev,
+ struct vmxnet3_adapter *adapter,
+ struct ethtool_rxnfc *nfc)
+{
+ enum Vmxnet3_RSSField rss_fields = adapter->rss_fields;
+
+ /* RSS does not support anything other than hashing
+ * to queues on src and dst IPs and ports
+ */
+ if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
+ RXH_L4_B_0_1 | RXH_L4_B_2_3))
+ return -EINVAL;
+
+ switch (nfc->flow_type) {
+ case TCP_V4_FLOW:
+ case TCP_V6_FLOW:
+ if (!(nfc->data & RXH_IP_SRC) ||
+ !(nfc->data & RXH_IP_DST) ||
+ !(nfc->data & RXH_L4_B_0_1) ||
+ !(nfc->data & RXH_L4_B_2_3))
+ return -EINVAL;
+ break;
+ case UDP_V4_FLOW:
+ if (!(nfc->data & RXH_IP_SRC) ||
+ !(nfc->data & RXH_IP_DST))
+ return -EINVAL;
+ switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
+ case 0:
+ rss_fields &= ~VMXNET3_RSS_FIELDS_UDPIP4;
+ break;
+ case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
+ rss_fields |= VMXNET3_RSS_FIELDS_UDPIP4;
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+ case UDP_V6_FLOW:
+ if (!(nfc->data & RXH_IP_SRC) ||
+ !(nfc->data & RXH_IP_DST))
+ return -EINVAL;
+ switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
+ case 0:
+ rss_fields &= ~VMXNET3_RSS_FIELDS_UDPIP6;
+ break;
+ case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
+ rss_fields |= VMXNET3_RSS_FIELDS_UDPIP6;
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+ case ESP_V4_FLOW:
+ case AH_V4_FLOW:
+ case AH_ESP_V4_FLOW:
+ if (!(nfc->data & RXH_IP_SRC) ||
+ !(nfc->data & RXH_IP_DST))
+ return -EINVAL;
+ switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
+ case 0:
+ rss_fields &= ~VMXNET3_RSS_FIELDS_ESPIP4;
+ break;
+ case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
+ rss_fields |= VMXNET3_RSS_FIELDS_ESPIP4;
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+ case ESP_V6_FLOW:
+ case AH_V6_FLOW:
+ case AH_ESP_V6_FLOW:
+ if (!VMXNET3_VERSION_GE_6(adapter))
+ return -EOPNOTSUPP;
+ if (!(nfc->data & RXH_IP_SRC) ||
+ !(nfc->data & RXH_IP_DST))
+ return -EINVAL;
+ switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
+ case 0:
+ rss_fields &= ~VMXNET3_RSS_FIELDS_ESPIP6;
+ break;
+ case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
+ rss_fields |= VMXNET3_RSS_FIELDS_ESPIP6;
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+ case SCTP_V4_FLOW:
+ case SCTP_V6_FLOW:
+ if (!(nfc->data & RXH_IP_SRC) ||
+ !(nfc->data & RXH_IP_DST) ||
+ (nfc->data & RXH_L4_B_0_1) ||
+ (nfc->data & RXH_L4_B_2_3))
+ return -EINVAL;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* if we changed something we need to update flags */
+ if (rss_fields != adapter->rss_fields) {
+ adapter->default_rss_fields = false;
+ if (netif_running(netdev)) {
+ struct Vmxnet3_DriverShared *shared = adapter->shared;
+ union Vmxnet3_CmdInfo *cmdInfo = &shared->cu.cmdInfo;
+ unsigned long flags;
+
+ if (VMXNET3_VERSION_GE_7(adapter)) {
+ if ((rss_fields & VMXNET3_RSS_FIELDS_UDPIP4 ||
+ rss_fields & VMXNET3_RSS_FIELDS_UDPIP6) &&
+ vmxnet3_check_ptcapability(adapter->ptcap_supported[0],
+ VMXNET3_CAP_UDP_RSS)) {
+ adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_UDP_RSS;
+ } else {
+ adapter->dev_caps[0] &= ~(1UL << VMXNET3_CAP_UDP_RSS);
+ }
+ if ((rss_fields & VMXNET3_RSS_FIELDS_ESPIP4) &&
+ vmxnet3_check_ptcapability(adapter->ptcap_supported[0],
+ VMXNET3_CAP_ESP_RSS_IPV4)) {
+ adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_ESP_RSS_IPV4;
+ } else {
+ adapter->dev_caps[0] &= ~(1UL << VMXNET3_CAP_ESP_RSS_IPV4);
+ }
+ if ((rss_fields & VMXNET3_RSS_FIELDS_ESPIP6) &&
+ vmxnet3_check_ptcapability(adapter->ptcap_supported[0],
+ VMXNET3_CAP_ESP_RSS_IPV6)) {
+ adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_ESP_RSS_IPV6;
+ } else {
+ adapter->dev_caps[0] &= ~(1UL << VMXNET3_CAP_ESP_RSS_IPV6);
+ }
+
+ VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DCR,
+ adapter->dev_caps[0]);
+ spin_lock_irqsave(&adapter->cmd_lock, flags);
+ VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
+ VMXNET3_CMD_GET_DCR0_REG);
+ adapter->dev_caps[0] = VMXNET3_READ_BAR1_REG(adapter,
+ VMXNET3_REG_CMD);
+ spin_unlock_irqrestore(&adapter->cmd_lock, flags);
+ }
+ spin_lock_irqsave(&adapter->cmd_lock, flags);
+ cmdInfo->setRssFields = rss_fields;
+ VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
+ VMXNET3_CMD_SET_RSS_FIELDS);
+
+ /* Not all requested RSS may get applied, so get and
+ * cache what was actually applied.
+ */
+ VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
+ VMXNET3_CMD_GET_RSS_FIELDS);
+ adapter->rss_fields =
+ VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
+ spin_unlock_irqrestore(&adapter->cmd_lock, flags);
+ } else {
+ /* When the device is activated, we will try to apply
+ * these rules and cache the applied value later.
+ */
+ adapter->rss_fields = rss_fields;
+ }
+ }
+ return 0;
+}
+
+static int
+vmxnet3_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *info,
+ u32 *rules)
+{
+ struct vmxnet3_adapter *adapter = netdev_priv(netdev);
+ int err = 0;
+
+ switch (info->cmd) {
+ case ETHTOOL_GRXRINGS:
+ info->data = adapter->num_rx_queues;
+ break;
+ case ETHTOOL_GRXFH:
+ if (!VMXNET3_VERSION_GE_4(adapter)) {
+ err = -EOPNOTSUPP;
+ break;
+ }
+#ifdef VMXNET3_RSS
+ if (!adapter->rss) {
+ err = -EOPNOTSUPP;
+ break;
+ }
+#endif
+ err = vmxnet3_get_rss_hash_opts(adapter, info);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ return err;
+}
+
+static int
+vmxnet3_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *info)
+{
+ struct vmxnet3_adapter *adapter = netdev_priv(netdev);
+ int err = 0;
+
+ if (!VMXNET3_VERSION_GE_4(adapter)) {
+ err = -EOPNOTSUPP;
+ goto done;
+ }
+#ifdef VMXNET3_RSS
+ if (!adapter->rss) {
+ err = -EOPNOTSUPP;
+ goto done;
+ }
+#endif
+
+ switch (info->cmd) {
+ case ETHTOOL_SRXFH:
+ err = vmxnet3_set_rss_hash_opt(netdev, adapter, info);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+done:
+ return err;
+}
+
+#ifdef VMXNET3_RSS
+static u32
+vmxnet3_get_rss_indir_size(struct net_device *netdev)
+{
+ struct vmxnet3_adapter *adapter = netdev_priv(netdev);
+ struct UPT1_RSSConf *rssConf = adapter->rss_conf;
+
+ return rssConf->indTableSize;
+}
+
+static int
+vmxnet3_get_rss(struct net_device *netdev, u32 *p, u8 *key, u8 *hfunc)
+{
+ struct vmxnet3_adapter *adapter = netdev_priv(netdev);
+ struct UPT1_RSSConf *rssConf = adapter->rss_conf;
+ unsigned int n = rssConf->indTableSize;
+
+ if (hfunc)
+ *hfunc = ETH_RSS_HASH_TOP;
+ if (!p)
+ return 0;
+ if (n > UPT1_RSS_MAX_IND_TABLE_SIZE)
+ return 0;
+ while (n--)
+ p[n] = rssConf->indTable[n];
+ return 0;
+
+}
+
+static int
+vmxnet3_set_rss(struct net_device *netdev, const u32 *p, const u8 *key,
+ const u8 hfunc)
+{
+ unsigned int i;
+ unsigned long flags;
+ struct vmxnet3_adapter *adapter = netdev_priv(netdev);
+ struct UPT1_RSSConf *rssConf = adapter->rss_conf;
+
+ /* We do not allow change in unsupported parameters */
+ if (key ||
+ (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP))
+ return -EOPNOTSUPP;
+ if (!p)
+ return 0;
+ for (i = 0; i < rssConf->indTableSize; i++)
+ rssConf->indTable[i] = p[i];
+
+ spin_lock_irqsave(&adapter->cmd_lock, flags);
+ VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
+ VMXNET3_CMD_UPDATE_RSSIDT);
+ spin_unlock_irqrestore(&adapter->cmd_lock, flags);
+
+ return 0;
+
+}
+#endif
+
+static int vmxnet3_get_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *ec,
+ struct kernel_ethtool_coalesce *kernel_coal,
+ struct netlink_ext_ack *extack)
+{
+ struct vmxnet3_adapter *adapter = netdev_priv(netdev);
+
+ if (!VMXNET3_VERSION_GE_3(adapter))
+ return -EOPNOTSUPP;
+
+ switch (adapter->coal_conf->coalMode) {
+ case VMXNET3_COALESCE_DISABLED:
+ /* struct ethtool_coalesce is already initialized to 0 */
+ break;
+ case VMXNET3_COALESCE_ADAPT:
+ ec->use_adaptive_rx_coalesce = true;
+ break;
+ case VMXNET3_COALESCE_STATIC:
+ ec->tx_max_coalesced_frames =
+ adapter->coal_conf->coalPara.coalStatic.tx_comp_depth;
+ ec->rx_max_coalesced_frames =
+ adapter->coal_conf->coalPara.coalStatic.rx_depth;
+ break;
+ case VMXNET3_COALESCE_RBC: {
+ u32 rbc_rate;
+
+ rbc_rate = adapter->coal_conf->coalPara.coalRbc.rbc_rate;
+ ec->rx_coalesce_usecs = VMXNET3_COAL_RBC_USECS(rbc_rate);
+ }
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int vmxnet3_set_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *ec,
+ struct kernel_ethtool_coalesce *kernel_coal,
+ struct netlink_ext_ack *extack)
+{
+ struct vmxnet3_adapter *adapter = netdev_priv(netdev);
+ struct Vmxnet3_DriverShared *shared = adapter->shared;
+ union Vmxnet3_CmdInfo *cmdInfo = &shared->cu.cmdInfo;
+ unsigned long flags;
+
+ if (!VMXNET3_VERSION_GE_3(adapter))
+ return -EOPNOTSUPP;
+
+ if ((ec->rx_coalesce_usecs == 0) &&
+ (ec->use_adaptive_rx_coalesce == 0) &&
+ (ec->tx_max_coalesced_frames == 0) &&
+ (ec->rx_max_coalesced_frames == 0)) {
+ memset(adapter->coal_conf, 0, sizeof(*adapter->coal_conf));
+ adapter->coal_conf->coalMode = VMXNET3_COALESCE_DISABLED;
+ goto done;
+ }
+
+ if (ec->rx_coalesce_usecs != 0) {
+ u32 rbc_rate;
+
+ if ((ec->use_adaptive_rx_coalesce != 0) ||
+ (ec->tx_max_coalesced_frames != 0) ||
+ (ec->rx_max_coalesced_frames != 0)) {
+ return -EINVAL;
+ }
+
+ rbc_rate = VMXNET3_COAL_RBC_RATE(ec->rx_coalesce_usecs);
+ if (rbc_rate < VMXNET3_COAL_RBC_MIN_RATE ||
+ rbc_rate > VMXNET3_COAL_RBC_MAX_RATE) {
+ return -EINVAL;
+ }
+
+ memset(adapter->coal_conf, 0, sizeof(*adapter->coal_conf));
+ adapter->coal_conf->coalMode = VMXNET3_COALESCE_RBC;
+ adapter->coal_conf->coalPara.coalRbc.rbc_rate = rbc_rate;
+ goto done;
+ }
+
+ if (ec->use_adaptive_rx_coalesce != 0) {
+ if (ec->tx_max_coalesced_frames != 0 ||
+ ec->rx_max_coalesced_frames != 0) {
+ return -EINVAL;
+ }
+ memset(adapter->coal_conf, 0, sizeof(*adapter->coal_conf));
+ adapter->coal_conf->coalMode = VMXNET3_COALESCE_ADAPT;
+ goto done;
+ }
+
+ if ((ec->tx_max_coalesced_frames != 0) ||
+ (ec->rx_max_coalesced_frames != 0)) {
+ if ((ec->tx_max_coalesced_frames >
+ VMXNET3_COAL_STATIC_MAX_DEPTH) ||
+ (ec->rx_max_coalesced_frames >
+ VMXNET3_COAL_STATIC_MAX_DEPTH)) {
+ return -EINVAL;
+ }
+
+ memset(adapter->coal_conf, 0, sizeof(*adapter->coal_conf));
+ adapter->coal_conf->coalMode = VMXNET3_COALESCE_STATIC;
+
+ adapter->coal_conf->coalPara.coalStatic.tx_comp_depth =
+ (ec->tx_max_coalesced_frames ?
+ ec->tx_max_coalesced_frames :
+ VMXNET3_COAL_STATIC_DEFAULT_DEPTH);
+
+ adapter->coal_conf->coalPara.coalStatic.rx_depth =
+ (ec->rx_max_coalesced_frames ?
+ ec->rx_max_coalesced_frames :
+ VMXNET3_COAL_STATIC_DEFAULT_DEPTH);
+
+ adapter->coal_conf->coalPara.coalStatic.tx_depth =
+ VMXNET3_COAL_STATIC_DEFAULT_DEPTH;
+ goto done;
+ }
+
+done:
+ adapter->default_coal_mode = false;
+ if (netif_running(netdev)) {
+ spin_lock_irqsave(&adapter->cmd_lock, flags);
+ cmdInfo->varConf.confVer = 1;
+ cmdInfo->varConf.confLen =
+ cpu_to_le32(sizeof(*adapter->coal_conf));
+ cmdInfo->varConf.confPA = cpu_to_le64(adapter->coal_conf_pa);
+ VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
+ VMXNET3_CMD_SET_COALESCE);
+ spin_unlock_irqrestore(&adapter->cmd_lock, flags);
+ }
+
+ return 0;
+}
+
+static void vmxnet3_get_channels(struct net_device *netdev,
+ struct ethtool_channels *ec)
+{
+ struct vmxnet3_adapter *adapter = netdev_priv(netdev);
+
+ if (IS_ENABLED(CONFIG_PCI_MSI) && adapter->intr.type == VMXNET3_IT_MSIX) {
+ if (adapter->share_intr == VMXNET3_INTR_BUDDYSHARE) {
+ ec->combined_count = adapter->num_tx_queues;
+ } else {
+ ec->rx_count = adapter->num_rx_queues;
+ ec->tx_count =
+ adapter->share_intr == VMXNET3_INTR_TXSHARE ?
+ 1 : adapter->num_tx_queues;
+ }
+ } else {
+ ec->combined_count = 1;
+ }
+
+ ec->other_count = 1;
+
+ /* Number of interrupts cannot be changed on the fly */
+ /* Just set maximums to actual values */
+ ec->max_rx = ec->rx_count;
+ ec->max_tx = ec->tx_count;
+ ec->max_combined = ec->combined_count;
+ ec->max_other = ec->other_count;
+}
+
+static const struct ethtool_ops vmxnet3_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS |
+ ETHTOOL_COALESCE_MAX_FRAMES |
+ ETHTOOL_COALESCE_USE_ADAPTIVE_RX,
+ .get_drvinfo = vmxnet3_get_drvinfo,
+ .get_regs_len = vmxnet3_get_regs_len,
+ .get_regs = vmxnet3_get_regs,
+ .get_wol = vmxnet3_get_wol,
+ .set_wol = vmxnet3_set_wol,
+ .get_link = ethtool_op_get_link,
+ .get_coalesce = vmxnet3_get_coalesce,
+ .set_coalesce = vmxnet3_set_coalesce,
+ .get_strings = vmxnet3_get_strings,
+ .get_sset_count = vmxnet3_get_sset_count,
+ .get_ethtool_stats = vmxnet3_get_ethtool_stats,
+ .get_ringparam = vmxnet3_get_ringparam,
+ .set_ringparam = vmxnet3_set_ringparam,
+ .get_rxnfc = vmxnet3_get_rxnfc,
+ .set_rxnfc = vmxnet3_set_rxnfc,
+#ifdef VMXNET3_RSS
+ .get_rxfh_indir_size = vmxnet3_get_rss_indir_size,
+ .get_rxfh = vmxnet3_get_rss,
+ .set_rxfh = vmxnet3_set_rss,
+#endif
+ .get_link_ksettings = vmxnet3_get_link_ksettings,
+ .get_channels = vmxnet3_get_channels,
+};
+
+void vmxnet3_set_ethtool_ops(struct net_device *netdev)
+{
+ netdev->ethtool_ops = &vmxnet3_ethtool_ops;
+}
diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h
new file mode 100644
index 000000000..915aaf18c
--- /dev/null
+++ b/drivers/net/vmxnet3/vmxnet3_int.h
@@ -0,0 +1,538 @@
+/*
+ * Linux driver for VMware's vmxnet3 ethernet NIC.
+ *
+ * Copyright (C) 2008-2022, VMware, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; version 2 of the License and no later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Maintained by: pv-drivers@vmware.com
+ *
+ */
+
+#ifndef _VMXNET3_INT_H
+#define _VMXNET3_INT_H
+
+#include <linux/bitops.h>
+#include <linux/ethtool.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/pci.h>
+#include <linux/compiler.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/ioport.h>
+#include <linux/highmem.h>
+#include <linux/timer.h>
+#include <linux/skbuff.h>
+#include <linux/interrupt.h>
+#include <linux/workqueue.h>
+#include <linux/uaccess.h>
+#include <asm/dma.h>
+#include <asm/page.h>
+
+#include <linux/tcp.h>
+#include <linux/udp.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/in.h>
+#include <linux/etherdevice.h>
+#include <asm/checksum.h>
+#include <linux/if_vlan.h>
+#include <linux/if_arp.h>
+#include <linux/inetdevice.h>
+#include <linux/log2.h>
+#include <linux/bpf.h>
+#include <net/page_pool/helpers.h>
+#include <net/xdp.h>
+
+#include "vmxnet3_defs.h"
+
+#ifdef DEBUG
+# define VMXNET3_DRIVER_VERSION_REPORT VMXNET3_DRIVER_VERSION_STRING"-NAPI(debug)"
+#else
+# define VMXNET3_DRIVER_VERSION_REPORT VMXNET3_DRIVER_VERSION_STRING"-NAPI"
+#endif
+
+
+/*
+ * Version numbers
+ */
+#define VMXNET3_DRIVER_VERSION_STRING "1.7.0.0-k"
+
+/* Each byte of this 32-bit integer encodes a version number in
+ * VMXNET3_DRIVER_VERSION_STRING.
+ */
+#define VMXNET3_DRIVER_VERSION_NUM 0x01070000
+
+#if defined(CONFIG_PCI_MSI)
+ /* RSS only makes sense if MSI-X is supported. */
+ #define VMXNET3_RSS
+#endif
+
+#define VMXNET3_REV_7 6 /* Vmxnet3 Rev. 7 */
+#define VMXNET3_REV_6 5 /* Vmxnet3 Rev. 6 */
+#define VMXNET3_REV_5 4 /* Vmxnet3 Rev. 5 */
+#define VMXNET3_REV_4 3 /* Vmxnet3 Rev. 4 */
+#define VMXNET3_REV_3 2 /* Vmxnet3 Rev. 3 */
+#define VMXNET3_REV_2 1 /* Vmxnet3 Rev. 2 */
+#define VMXNET3_REV_1 0 /* Vmxnet3 Rev. 1 */
+
+/*
+ * Capabilities
+ */
+
+enum {
+ VMNET_CAP_SG = 0x0001, /* Can do scatter-gather transmits. */
+ VMNET_CAP_IP4_CSUM = 0x0002, /* Can checksum only TCP/UDP over
+ * IPv4 */
+ VMNET_CAP_HW_CSUM = 0x0004, /* Can checksum all packets. */
+ VMNET_CAP_HIGH_DMA = 0x0008, /* Can DMA to high memory. */
+ VMNET_CAP_TOE = 0x0010, /* Supports TCP/IP offload. */
+ VMNET_CAP_TSO = 0x0020, /* Supports TCP Segmentation
+ * offload */
+ VMNET_CAP_SW_TSO = 0x0040, /* Supports SW TCP Segmentation */
+ VMNET_CAP_VMXNET_APROM = 0x0080, /* Vmxnet APROM support */
+ VMNET_CAP_HW_TX_VLAN = 0x0100, /* Can we do VLAN tagging in HW */
+ VMNET_CAP_HW_RX_VLAN = 0x0200, /* Can we do VLAN untagging in HW */
+ VMNET_CAP_SW_VLAN = 0x0400, /* VLAN tagging/untagging in SW */
+ VMNET_CAP_WAKE_PCKT_RCV = 0x0800, /* Can wake on network packet recv? */
+ VMNET_CAP_ENABLE_INT_INLINE = 0x1000, /* Enable Interrupt Inline */
+ VMNET_CAP_ENABLE_HEADER_COPY = 0x2000, /* copy header for vmkernel */
+ VMNET_CAP_TX_CHAIN = 0x4000, /* Guest can use multiple tx entries
+ * for a pkt */
+ VMNET_CAP_RX_CHAIN = 0x8000, /* pkt can span multiple rx entries */
+ VMNET_CAP_LPD = 0x10000, /* large pkt delivery */
+ VMNET_CAP_BPF = 0x20000, /* BPF Support in VMXNET Virtual HW*/
+ VMNET_CAP_SG_SPAN_PAGES = 0x40000, /* Scatter-gather can span multiple*/
+ /* pages transmits */
+ VMNET_CAP_IP6_CSUM = 0x80000, /* Can do IPv6 csum offload. */
+ VMNET_CAP_TSO6 = 0x100000, /* TSO seg. offload for IPv6 pkts. */
+ VMNET_CAP_TSO256k = 0x200000, /* Can do TSO seg offload for */
+ /* pkts up to 256kB. */
+ VMNET_CAP_UPT = 0x400000 /* Support UPT */
+};
+
+/*
+ * Maximum devices supported.
+ */
+#define MAX_ETHERNET_CARDS 10
+#define MAX_PCI_PASSTHRU_DEVICE 6
+
+struct vmxnet3_cmd_ring {
+ union Vmxnet3_GenericDesc *base;
+ u32 size;
+ u32 next2fill;
+ u32 next2comp;
+ u8 gen;
+ u8 isOutOfOrder;
+ dma_addr_t basePA;
+};
+
+static inline void
+vmxnet3_cmd_ring_adv_next2fill(struct vmxnet3_cmd_ring *ring)
+{
+ ring->next2fill++;
+ if (unlikely(ring->next2fill == ring->size)) {
+ ring->next2fill = 0;
+ VMXNET3_FLIP_RING_GEN(ring->gen);
+ }
+}
+
+static inline void
+vmxnet3_cmd_ring_adv_next2comp(struct vmxnet3_cmd_ring *ring)
+{
+ VMXNET3_INC_RING_IDX_ONLY(ring->next2comp, ring->size);
+}
+
+static inline int
+vmxnet3_cmd_ring_desc_avail(struct vmxnet3_cmd_ring *ring)
+{
+ return (ring->next2comp > ring->next2fill ? 0 : ring->size) +
+ ring->next2comp - ring->next2fill - 1;
+}
+
+struct vmxnet3_comp_ring {
+ union Vmxnet3_GenericDesc *base;
+ u32 size;
+ u32 next2proc;
+ u8 gen;
+ u8 intr_idx;
+ dma_addr_t basePA;
+};
+
+static inline void
+vmxnet3_comp_ring_adv_next2proc(struct vmxnet3_comp_ring *ring)
+{
+ ring->next2proc++;
+ if (unlikely(ring->next2proc == ring->size)) {
+ ring->next2proc = 0;
+ VMXNET3_FLIP_RING_GEN(ring->gen);
+ }
+}
+
+struct vmxnet3_tx_data_ring {
+ struct Vmxnet3_TxDataDesc *base;
+ u32 size;
+ dma_addr_t basePA;
+};
+
+#define VMXNET3_MAP_NONE 0
+#define VMXNET3_MAP_SINGLE BIT(0)
+#define VMXNET3_MAP_PAGE BIT(1)
+#define VMXNET3_MAP_XDP BIT(2)
+
+struct vmxnet3_tx_buf_info {
+ u32 map_type;
+ u16 len;
+ u16 sop_idx;
+ dma_addr_t dma_addr;
+ union {
+ struct sk_buff *skb;
+ struct xdp_frame *xdpf;
+ };
+};
+
+struct vmxnet3_tq_driver_stats {
+ u64 drop_total; /* # of pkts dropped by the driver, the
+ * counters below track droppings due to
+ * different reasons
+ */
+ u64 drop_too_many_frags;
+ u64 drop_oversized_hdr;
+ u64 drop_hdr_inspect_err;
+ u64 drop_tso;
+
+ u64 tx_ring_full;
+ u64 linearized; /* # of pkts linearized */
+ u64 copy_skb_header; /* # of times we have to copy skb header */
+ u64 oversized_hdr;
+
+ u64 xdp_xmit;
+ u64 xdp_xmit_err;
+};
+
+struct vmxnet3_tx_ctx {
+ bool ipv4;
+ bool ipv6;
+ u16 mss;
+ u32 l4_offset; /* only valid for pkts requesting tso or csum
+ * offloading. For encap offload, it refers to
+ * inner L4 offset i.e. it includes outer header
+ * encap header and inner eth and ip header size
+ */
+
+ u32 l4_hdr_size; /* only valid if mss != 0
+ * Refers to inner L4 hdr size for encap
+ * offload
+ */
+ u32 copy_size; /* # of bytes copied into the data ring */
+ union Vmxnet3_GenericDesc *sop_txd;
+ union Vmxnet3_GenericDesc *eop_txd;
+};
+
+struct vmxnet3_tx_queue {
+ char name[IFNAMSIZ+8]; /* To identify interrupt */
+ struct vmxnet3_adapter *adapter;
+ spinlock_t tx_lock;
+ struct vmxnet3_cmd_ring tx_ring;
+ struct vmxnet3_tx_buf_info *buf_info;
+ struct vmxnet3_tx_data_ring data_ring;
+ struct vmxnet3_comp_ring comp_ring;
+ struct Vmxnet3_TxQueueCtrl *shared;
+ struct vmxnet3_tq_driver_stats stats;
+ bool stopped;
+ int num_stop; /* # of times the queue is
+ * stopped */
+ int qid;
+ u16 txdata_desc_size;
+} ____cacheline_aligned;
+
+enum vmxnet3_rx_buf_type {
+ VMXNET3_RX_BUF_NONE = 0,
+ VMXNET3_RX_BUF_SKB = 1,
+ VMXNET3_RX_BUF_PAGE = 2,
+ VMXNET3_RX_BUF_XDP = 3,
+};
+
+#define VMXNET3_RXD_COMP_PENDING 0
+#define VMXNET3_RXD_COMP_DONE 1
+
+struct vmxnet3_rx_buf_info {
+ enum vmxnet3_rx_buf_type buf_type;
+ u16 len;
+ u8 comp_state;
+ union {
+ struct sk_buff *skb;
+ struct page *page;
+ };
+ dma_addr_t dma_addr;
+};
+
+struct vmxnet3_rx_ctx {
+ struct sk_buff *skb;
+ u32 sop_idx;
+};
+
+struct vmxnet3_rq_driver_stats {
+ u64 drop_total;
+ u64 drop_err;
+ u64 drop_fcs;
+ u64 rx_buf_alloc_failure;
+
+ u64 xdp_packets; /* Total packets processed by XDP. */
+ u64 xdp_tx;
+ u64 xdp_redirects;
+ u64 xdp_drops;
+ u64 xdp_aborted;
+};
+
+struct vmxnet3_rx_data_ring {
+ Vmxnet3_RxDataDesc *base;
+ dma_addr_t basePA;
+ u16 desc_size;
+};
+
+struct vmxnet3_rx_queue {
+ char name[IFNAMSIZ + 8]; /* To identify interrupt */
+ struct vmxnet3_adapter *adapter;
+ struct napi_struct napi;
+ struct vmxnet3_cmd_ring rx_ring[2];
+ struct vmxnet3_rx_data_ring data_ring;
+ struct vmxnet3_comp_ring comp_ring;
+ struct vmxnet3_rx_ctx rx_ctx;
+ u32 qid; /* rqID in RCD for buffer from 1st ring */
+ u32 qid2; /* rqID in RCD for buffer from 2nd ring */
+ u32 dataRingQid; /* rqID in RCD for buffer from data ring */
+ struct vmxnet3_rx_buf_info *buf_info[2];
+ struct Vmxnet3_RxQueueCtrl *shared;
+ struct vmxnet3_rq_driver_stats stats;
+ struct page_pool *page_pool;
+ struct xdp_rxq_info xdp_rxq;
+} ____cacheline_aligned;
+
+#define VMXNET3_DEVICE_MAX_TX_QUEUES 32
+#define VMXNET3_DEVICE_MAX_RX_QUEUES 32 /* Keep this value as a power of 2 */
+
+#define VMXNET3_DEVICE_DEFAULT_TX_QUEUES 8
+#define VMXNET3_DEVICE_DEFAULT_RX_QUEUES 8 /* Keep this value as a power of 2 */
+
+/* Should be less than UPT1_RSS_MAX_IND_TABLE_SIZE */
+#define VMXNET3_RSS_IND_TABLE_SIZE (VMXNET3_DEVICE_MAX_RX_QUEUES * 4)
+
+#define VMXNET3_LINUX_MAX_MSIX_VECT (VMXNET3_DEVICE_MAX_TX_QUEUES + \
+ VMXNET3_DEVICE_MAX_RX_QUEUES + 1)
+#define VMXNET3_LINUX_MIN_MSIX_VECT 3 /* 1 for tx, 1 for rx pair and 1 for event */
+
+
+struct vmxnet3_intr {
+ enum vmxnet3_intr_mask_mode mask_mode;
+ enum vmxnet3_intr_type type; /* MSI-X, MSI, or INTx? */
+ u8 num_intrs; /* # of intr vectors */
+ u8 event_intr_idx; /* idx of the intr vector for event */
+ u8 mod_levels[VMXNET3_LINUX_MAX_MSIX_VECT]; /* moderation level */
+ char event_msi_vector_name[IFNAMSIZ+17];
+#ifdef CONFIG_PCI_MSI
+ struct msix_entry msix_entries[VMXNET3_LINUX_MAX_MSIX_VECT];
+#endif
+};
+
+/* Interrupt sharing schemes, share_intr */
+#define VMXNET3_INTR_BUDDYSHARE 0 /* Corresponding tx,rx queues share irq */
+#define VMXNET3_INTR_TXSHARE 1 /* All tx queues share one irq */
+#define VMXNET3_INTR_DONTSHARE 2 /* each queue has its own irq */
+
+
+#define VMXNET3_STATE_BIT_RESETTING 0
+#define VMXNET3_STATE_BIT_QUIESCED 1
+struct vmxnet3_adapter {
+ struct vmxnet3_tx_queue tx_queue[VMXNET3_DEVICE_MAX_TX_QUEUES];
+ struct vmxnet3_rx_queue rx_queue[VMXNET3_DEVICE_MAX_RX_QUEUES];
+ unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
+ struct vmxnet3_intr intr;
+ spinlock_t cmd_lock;
+ struct Vmxnet3_DriverShared *shared;
+ struct Vmxnet3_PMConf *pm_conf;
+ struct Vmxnet3_TxQueueDesc *tqd_start; /* all tx queue desc */
+ struct Vmxnet3_RxQueueDesc *rqd_start; /* all rx queue desc */
+ struct net_device *netdev;
+ struct pci_dev *pdev;
+
+ u8 __iomem *hw_addr0; /* for BAR 0 */
+ u8 __iomem *hw_addr1; /* for BAR 1 */
+ u8 version;
+
+#ifdef VMXNET3_RSS
+ struct UPT1_RSSConf *rss_conf;
+ bool rss;
+#endif
+ u32 num_rx_queues;
+ u32 num_tx_queues;
+
+ /* rx buffer related */
+ unsigned skb_buf_size;
+ int rx_buf_per_pkt; /* only apply to the 1st ring */
+ dma_addr_t shared_pa;
+ dma_addr_t queue_desc_pa;
+ dma_addr_t coal_conf_pa;
+
+ /* Wake-on-LAN */
+ u32 wol;
+
+ /* Link speed */
+ u32 link_speed; /* in mbps */
+
+ u64 tx_timeout_count;
+
+ /* Ring sizes */
+ u32 tx_ring_size;
+ u32 rx_ring_size;
+ u32 rx_ring2_size;
+
+ /* Size of buffer in the data ring */
+ u16 txdata_desc_size;
+ u16 rxdata_desc_size;
+
+ bool rxdataring_enabled;
+ bool default_rss_fields;
+ enum Vmxnet3_RSSField rss_fields;
+
+ struct work_struct work;
+
+ unsigned long state; /* VMXNET3_STATE_BIT_xxx */
+
+ int share_intr;
+
+ struct Vmxnet3_CoalesceScheme *coal_conf;
+ bool default_coal_mode;
+
+ dma_addr_t adapter_pa;
+ dma_addr_t pm_conf_pa;
+ dma_addr_t rss_conf_pa;
+ bool queuesExtEnabled;
+ struct Vmxnet3_RingBufferSize ringBufSize;
+ u32 devcap_supported[8];
+ u32 ptcap_supported[8];
+ u32 dev_caps[8];
+ u16 tx_prod_offset;
+ u16 rx_prod_offset;
+ u16 rx_prod2_offset;
+ struct bpf_prog __rcu *xdp_bpf_prog;
+};
+
+#define VMXNET3_WRITE_BAR0_REG(adapter, reg, val) \
+ writel((val), (adapter)->hw_addr0 + (reg))
+#define VMXNET3_READ_BAR0_REG(adapter, reg) \
+ readl((adapter)->hw_addr0 + (reg))
+
+#define VMXNET3_WRITE_BAR1_REG(adapter, reg, val) \
+ writel((val), (adapter)->hw_addr1 + (reg))
+#define VMXNET3_READ_BAR1_REG(adapter, reg) \
+ readl((adapter)->hw_addr1 + (reg))
+
+#define VMXNET3_WAKE_QUEUE_THRESHOLD(tq) (5)
+#define VMXNET3_RX_ALLOC_THRESHOLD(rq, ring_idx, adapter) \
+ ((rq)->rx_ring[ring_idx].size >> 3)
+
+#define VMXNET3_GET_ADDR_LO(dma) ((u32)(dma))
+#define VMXNET3_GET_ADDR_HI(dma) ((u32)(((u64)(dma)) >> 32))
+
+#define VMXNET3_VERSION_GE_2(adapter) \
+ (adapter->version >= VMXNET3_REV_2 + 1)
+#define VMXNET3_VERSION_GE_3(adapter) \
+ (adapter->version >= VMXNET3_REV_3 + 1)
+#define VMXNET3_VERSION_GE_4(adapter) \
+ (adapter->version >= VMXNET3_REV_4 + 1)
+#define VMXNET3_VERSION_GE_5(adapter) \
+ (adapter->version >= VMXNET3_REV_5 + 1)
+#define VMXNET3_VERSION_GE_6(adapter) \
+ (adapter->version >= VMXNET3_REV_6 + 1)
+#define VMXNET3_VERSION_GE_7(adapter) \
+ (adapter->version >= VMXNET3_REV_7 + 1)
+
+/* must be a multiple of VMXNET3_RING_SIZE_ALIGN */
+#define VMXNET3_DEF_TX_RING_SIZE 512
+#define VMXNET3_DEF_RX_RING_SIZE 1024
+#define VMXNET3_DEF_RX_RING2_SIZE 512
+
+#define VMXNET3_DEF_RXDATA_DESC_SIZE 128
+
+#define VMXNET3_MAX_ETH_HDR_SIZE 22
+#define VMXNET3_MAX_SKB_BUF_SIZE (3*1024)
+
+#define VMXNET3_GET_RING_IDX(adapter, rqID) \
+ ((rqID >= adapter->num_rx_queues && \
+ rqID < 2 * adapter->num_rx_queues) ? 1 : 0) \
+
+#define VMXNET3_RX_DATA_RING(adapter, rqID) \
+ (rqID >= 2 * adapter->num_rx_queues && \
+ rqID < 3 * adapter->num_rx_queues) \
+
+#define VMXNET3_COAL_STATIC_DEFAULT_DEPTH 64
+
+#define VMXNET3_COAL_RBC_RATE(usecs) (1000000 / usecs)
+#define VMXNET3_COAL_RBC_USECS(rbc_rate) (1000000 / rbc_rate)
+#define VMXNET3_RSS_FIELDS_DEFAULT (VMXNET3_RSS_FIELDS_TCPIP4 | \
+ VMXNET3_RSS_FIELDS_TCPIP6)
+
+int
+vmxnet3_quiesce_dev(struct vmxnet3_adapter *adapter);
+
+int
+vmxnet3_activate_dev(struct vmxnet3_adapter *adapter);
+
+void
+vmxnet3_force_close(struct vmxnet3_adapter *adapter);
+
+void
+vmxnet3_reset_dev(struct vmxnet3_adapter *adapter);
+
+void
+vmxnet3_tq_destroy_all(struct vmxnet3_adapter *adapter);
+
+void
+vmxnet3_rq_destroy_all(struct vmxnet3_adapter *adapter);
+
+int
+vmxnet3_rq_create_all(struct vmxnet3_adapter *adapter);
+
+void
+vmxnet3_adjust_rx_ring_size(struct vmxnet3_adapter *adapter);
+
+netdev_features_t
+vmxnet3_fix_features(struct net_device *netdev, netdev_features_t features);
+
+netdev_features_t
+vmxnet3_features_check(struct sk_buff *skb,
+ struct net_device *netdev, netdev_features_t features);
+
+int
+vmxnet3_set_features(struct net_device *netdev, netdev_features_t features);
+
+int
+vmxnet3_create_queues(struct vmxnet3_adapter *adapter,
+ u32 tx_ring_size, u32 rx_ring_size, u32 rx_ring2_size,
+ u16 txdata_desc_size, u16 rxdata_desc_size);
+
+void vmxnet3_set_ethtool_ops(struct net_device *netdev);
+
+void vmxnet3_get_stats64(struct net_device *dev,
+ struct rtnl_link_stats64 *stats);
+bool vmxnet3_check_ptcapability(u32 cap_supported, u32 cap);
+
+extern char vmxnet3_driver_name[];
+#endif
diff --git a/drivers/net/vmxnet3/vmxnet3_xdp.c b/drivers/net/vmxnet3/vmxnet3_xdp.c
new file mode 100644
index 000000000..80ddaff75
--- /dev/null
+++ b/drivers/net/vmxnet3/vmxnet3_xdp.c
@@ -0,0 +1,419 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Linux driver for VMware's vmxnet3 ethernet NIC.
+ * Copyright (C) 2008-2023, VMware, Inc. All Rights Reserved.
+ * Maintained by: pv-drivers@vmware.com
+ *
+ */
+
+#include "vmxnet3_int.h"
+#include "vmxnet3_xdp.h"
+
+static void
+vmxnet3_xdp_exchange_program(struct vmxnet3_adapter *adapter,
+ struct bpf_prog *prog)
+{
+ rcu_assign_pointer(adapter->xdp_bpf_prog, prog);
+}
+
+static inline struct vmxnet3_tx_queue *
+vmxnet3_xdp_get_tq(struct vmxnet3_adapter *adapter)
+{
+ struct vmxnet3_tx_queue *tq;
+ int tq_number;
+ int cpu;
+
+ tq_number = adapter->num_tx_queues;
+ cpu = smp_processor_id();
+ if (likely(cpu < tq_number))
+ tq = &adapter->tx_queue[cpu];
+ else
+ tq = &adapter->tx_queue[reciprocal_scale(cpu, tq_number)];
+
+ return tq;
+}
+
+static int
+vmxnet3_xdp_set(struct net_device *netdev, struct netdev_bpf *bpf,
+ struct netlink_ext_ack *extack)
+{
+ struct vmxnet3_adapter *adapter = netdev_priv(netdev);
+ struct bpf_prog *new_bpf_prog = bpf->prog;
+ struct bpf_prog *old_bpf_prog;
+ bool need_update;
+ bool running;
+ int err;
+
+ if (new_bpf_prog && netdev->mtu > VMXNET3_XDP_MAX_MTU) {
+ NL_SET_ERR_MSG_FMT_MOD(extack, "MTU %u too large for XDP",
+ netdev->mtu);
+ return -EOPNOTSUPP;
+ }
+
+ if (adapter->netdev->features & NETIF_F_LRO) {
+ NL_SET_ERR_MSG_MOD(extack, "LRO is not supported with XDP");
+ adapter->netdev->features &= ~NETIF_F_LRO;
+ }
+
+ old_bpf_prog = rcu_dereference(adapter->xdp_bpf_prog);
+ if (!new_bpf_prog && !old_bpf_prog)
+ return 0;
+
+ running = netif_running(netdev);
+ need_update = !!old_bpf_prog != !!new_bpf_prog;
+
+ if (running && need_update)
+ vmxnet3_quiesce_dev(adapter);
+
+ vmxnet3_xdp_exchange_program(adapter, new_bpf_prog);
+ if (old_bpf_prog)
+ bpf_prog_put(old_bpf_prog);
+
+ if (!running || !need_update)
+ return 0;
+
+ if (new_bpf_prog)
+ xdp_features_set_redirect_target(netdev, false);
+ else
+ xdp_features_clear_redirect_target(netdev);
+
+ vmxnet3_reset_dev(adapter);
+ vmxnet3_rq_destroy_all(adapter);
+ vmxnet3_adjust_rx_ring_size(adapter);
+ err = vmxnet3_rq_create_all(adapter);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "failed to re-create rx queues for XDP.");
+ return -EOPNOTSUPP;
+ }
+ err = vmxnet3_activate_dev(adapter);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "failed to activate device for XDP.");
+ return -EOPNOTSUPP;
+ }
+ clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
+
+ return 0;
+}
+
+/* This is the main xdp call used by kernel to set/unset eBPF program. */
+int
+vmxnet3_xdp(struct net_device *netdev, struct netdev_bpf *bpf)
+{
+ switch (bpf->command) {
+ case XDP_SETUP_PROG:
+ return vmxnet3_xdp_set(netdev, bpf, bpf->extack);
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+vmxnet3_xdp_xmit_frame(struct vmxnet3_adapter *adapter,
+ struct xdp_frame *xdpf,
+ struct vmxnet3_tx_queue *tq, bool dma_map)
+{
+ struct vmxnet3_tx_buf_info *tbi = NULL;
+ union Vmxnet3_GenericDesc *gdesc;
+ struct vmxnet3_tx_ctx ctx;
+ int tx_num_deferred;
+ struct page *page;
+ u32 buf_size;
+ u32 dw2;
+
+ dw2 = (tq->tx_ring.gen ^ 0x1) << VMXNET3_TXD_GEN_SHIFT;
+ dw2 |= xdpf->len;
+ ctx.sop_txd = tq->tx_ring.base + tq->tx_ring.next2fill;
+ gdesc = ctx.sop_txd;
+
+ buf_size = xdpf->len;
+ tbi = tq->buf_info + tq->tx_ring.next2fill;
+
+ if (vmxnet3_cmd_ring_desc_avail(&tq->tx_ring) == 0) {
+ tq->stats.tx_ring_full++;
+ return -ENOSPC;
+ }
+
+ tbi->map_type = VMXNET3_MAP_XDP;
+ if (dma_map) { /* ndo_xdp_xmit */
+ tbi->dma_addr = dma_map_single(&adapter->pdev->dev,
+ xdpf->data, buf_size,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(&adapter->pdev->dev, tbi->dma_addr))
+ return -EFAULT;
+ tbi->map_type |= VMXNET3_MAP_SINGLE;
+ } else { /* XDP buffer from page pool */
+ page = virt_to_page(xdpf->data);
+ tbi->dma_addr = page_pool_get_dma_addr(page) +
+ VMXNET3_XDP_HEADROOM;
+ dma_sync_single_for_device(&adapter->pdev->dev,
+ tbi->dma_addr, buf_size,
+ DMA_TO_DEVICE);
+ }
+ tbi->xdpf = xdpf;
+ tbi->len = buf_size;
+
+ gdesc = tq->tx_ring.base + tq->tx_ring.next2fill;
+ WARN_ON_ONCE(gdesc->txd.gen == tq->tx_ring.gen);
+
+ gdesc->txd.addr = cpu_to_le64(tbi->dma_addr);
+ gdesc->dword[2] = cpu_to_le32(dw2);
+
+ /* Setup the EOP desc */
+ gdesc->dword[3] = cpu_to_le32(VMXNET3_TXD_CQ | VMXNET3_TXD_EOP);
+
+ gdesc->txd.om = 0;
+ gdesc->txd.msscof = 0;
+ gdesc->txd.hlen = 0;
+ gdesc->txd.ti = 0;
+
+ tx_num_deferred = le32_to_cpu(tq->shared->txNumDeferred);
+ le32_add_cpu(&tq->shared->txNumDeferred, 1);
+ tx_num_deferred++;
+
+ vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
+
+ /* set the last buf_info for the pkt */
+ tbi->sop_idx = ctx.sop_txd - tq->tx_ring.base;
+
+ dma_wmb();
+ gdesc->dword[2] = cpu_to_le32(le32_to_cpu(gdesc->dword[2]) ^
+ VMXNET3_TXD_GEN);
+
+ /* No need to handle the case when tx_num_deferred doesn't reach
+ * threshold. Backend driver at hypervisor side will poll and reset
+ * tq->shared->txNumDeferred to 0.
+ */
+ if (tx_num_deferred >= le32_to_cpu(tq->shared->txThreshold)) {
+ tq->shared->txNumDeferred = 0;
+ VMXNET3_WRITE_BAR0_REG(adapter,
+ VMXNET3_REG_TXPROD + tq->qid * 8,
+ tq->tx_ring.next2fill);
+ }
+
+ return 0;
+}
+
+static int
+vmxnet3_xdp_xmit_back(struct vmxnet3_adapter *adapter,
+ struct xdp_frame *xdpf)
+{
+ struct vmxnet3_tx_queue *tq;
+ struct netdev_queue *nq;
+ int err;
+
+ tq = vmxnet3_xdp_get_tq(adapter);
+ if (tq->stopped)
+ return -ENETDOWN;
+
+ nq = netdev_get_tx_queue(adapter->netdev, tq->qid);
+
+ __netif_tx_lock(nq, smp_processor_id());
+ err = vmxnet3_xdp_xmit_frame(adapter, xdpf, tq, false);
+ __netif_tx_unlock(nq);
+
+ return err;
+}
+
+/* ndo_xdp_xmit */
+int
+vmxnet3_xdp_xmit(struct net_device *dev,
+ int n, struct xdp_frame **frames, u32 flags)
+{
+ struct vmxnet3_adapter *adapter = netdev_priv(dev);
+ struct vmxnet3_tx_queue *tq;
+ int i;
+
+ if (unlikely(test_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state)))
+ return -ENETDOWN;
+ if (unlikely(test_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state)))
+ return -EINVAL;
+
+ tq = vmxnet3_xdp_get_tq(adapter);
+ if (tq->stopped)
+ return -ENETDOWN;
+
+ for (i = 0; i < n; i++) {
+ if (vmxnet3_xdp_xmit_frame(adapter, frames[i], tq, true)) {
+ tq->stats.xdp_xmit_err++;
+ break;
+ }
+ }
+ tq->stats.xdp_xmit += i;
+
+ return i;
+}
+
+static int
+vmxnet3_run_xdp(struct vmxnet3_rx_queue *rq, struct xdp_buff *xdp,
+ struct bpf_prog *prog)
+{
+ struct xdp_frame *xdpf;
+ struct page *page;
+ int err;
+ u32 act;
+
+ rq->stats.xdp_packets++;
+ act = bpf_prog_run_xdp(prog, xdp);
+ page = virt_to_page(xdp->data_hard_start);
+
+ switch (act) {
+ case XDP_PASS:
+ return act;
+ case XDP_REDIRECT:
+ err = xdp_do_redirect(rq->adapter->netdev, xdp, prog);
+ if (!err) {
+ rq->stats.xdp_redirects++;
+ } else {
+ rq->stats.xdp_drops++;
+ page_pool_recycle_direct(rq->page_pool, page);
+ }
+ return act;
+ case XDP_TX:
+ xdpf = xdp_convert_buff_to_frame(xdp);
+ if (unlikely(!xdpf ||
+ vmxnet3_xdp_xmit_back(rq->adapter, xdpf))) {
+ rq->stats.xdp_drops++;
+ page_pool_recycle_direct(rq->page_pool, page);
+ } else {
+ rq->stats.xdp_tx++;
+ }
+ return act;
+ default:
+ bpf_warn_invalid_xdp_action(rq->adapter->netdev, prog, act);
+ fallthrough;
+ case XDP_ABORTED:
+ trace_xdp_exception(rq->adapter->netdev, prog, act);
+ rq->stats.xdp_aborted++;
+ break;
+ case XDP_DROP:
+ rq->stats.xdp_drops++;
+ break;
+ }
+
+ page_pool_recycle_direct(rq->page_pool, page);
+
+ return act;
+}
+
+static struct sk_buff *
+vmxnet3_build_skb(struct vmxnet3_rx_queue *rq, struct page *page,
+ const struct xdp_buff *xdp)
+{
+ struct sk_buff *skb;
+
+ skb = build_skb(page_address(page), PAGE_SIZE);
+ if (unlikely(!skb)) {
+ page_pool_recycle_direct(rq->page_pool, page);
+ rq->stats.rx_buf_alloc_failure++;
+ return NULL;
+ }
+
+ /* bpf prog might change len and data position. */
+ skb_reserve(skb, xdp->data - xdp->data_hard_start);
+ skb_put(skb, xdp->data_end - xdp->data);
+ skb_mark_for_recycle(skb);
+
+ return skb;
+}
+
+/* Handle packets from DataRing. */
+int
+vmxnet3_process_xdp_small(struct vmxnet3_adapter *adapter,
+ struct vmxnet3_rx_queue *rq,
+ void *data, int len,
+ struct sk_buff **skb_xdp_pass)
+{
+ struct bpf_prog *xdp_prog;
+ struct xdp_buff xdp;
+ struct page *page;
+ int act;
+
+ page = page_pool_alloc_pages(rq->page_pool, GFP_ATOMIC);
+ if (unlikely(!page)) {
+ rq->stats.rx_buf_alloc_failure++;
+ return XDP_DROP;
+ }
+
+ xdp_init_buff(&xdp, PAGE_SIZE, &rq->xdp_rxq);
+ xdp_prepare_buff(&xdp, page_address(page), rq->page_pool->p.offset,
+ len, false);
+ xdp_buff_clear_frags_flag(&xdp);
+
+ /* Must copy the data because it's at dataring. */
+ memcpy(xdp.data, data, len);
+
+ xdp_prog = rcu_dereference(rq->adapter->xdp_bpf_prog);
+ if (!xdp_prog) {
+ act = XDP_PASS;
+ goto out_skb;
+ }
+ act = vmxnet3_run_xdp(rq, &xdp, xdp_prog);
+ if (act != XDP_PASS)
+ return act;
+
+out_skb:
+ *skb_xdp_pass = vmxnet3_build_skb(rq, page, &xdp);
+ if (!*skb_xdp_pass)
+ return XDP_DROP;
+
+ /* No need to refill. */
+ return likely(*skb_xdp_pass) ? act : XDP_DROP;
+}
+
+int
+vmxnet3_process_xdp(struct vmxnet3_adapter *adapter,
+ struct vmxnet3_rx_queue *rq,
+ struct Vmxnet3_RxCompDesc *rcd,
+ struct vmxnet3_rx_buf_info *rbi,
+ struct Vmxnet3_RxDesc *rxd,
+ struct sk_buff **skb_xdp_pass)
+{
+ struct bpf_prog *xdp_prog;
+ dma_addr_t new_dma_addr;
+ struct xdp_buff xdp;
+ struct page *page;
+ void *new_data;
+ int act;
+
+ page = rbi->page;
+ dma_sync_single_for_cpu(&adapter->pdev->dev,
+ page_pool_get_dma_addr(page) +
+ rq->page_pool->p.offset, rcd->len,
+ page_pool_get_dma_dir(rq->page_pool));
+
+ xdp_init_buff(&xdp, rbi->len, &rq->xdp_rxq);
+ xdp_prepare_buff(&xdp, page_address(page), rq->page_pool->p.offset,
+ rcd->len, false);
+ xdp_buff_clear_frags_flag(&xdp);
+
+ xdp_prog = rcu_dereference(rq->adapter->xdp_bpf_prog);
+ if (!xdp_prog) {
+ act = XDP_PASS;
+ goto out_skb;
+ }
+ act = vmxnet3_run_xdp(rq, &xdp, xdp_prog);
+
+ if (act == XDP_PASS) {
+out_skb:
+ *skb_xdp_pass = vmxnet3_build_skb(rq, page, &xdp);
+ if (!*skb_xdp_pass)
+ act = XDP_DROP;
+ }
+
+ new_data = vmxnet3_pp_get_buff(rq->page_pool, &new_dma_addr,
+ GFP_ATOMIC);
+ if (!new_data) {
+ rq->stats.rx_buf_alloc_failure++;
+ return XDP_DROP;
+ }
+ rbi->page = virt_to_page(new_data);
+ rbi->dma_addr = new_dma_addr;
+ rxd->addr = cpu_to_le64(rbi->dma_addr);
+ rxd->len = rbi->len;
+
+ return act;
+}
diff --git a/drivers/net/vmxnet3/vmxnet3_xdp.h b/drivers/net/vmxnet3/vmxnet3_xdp.h
new file mode 100644
index 000000000..f9d843e06
--- /dev/null
+++ b/drivers/net/vmxnet3/vmxnet3_xdp.h
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later
+ *
+ * Linux driver for VMware's vmxnet3 ethernet NIC.
+ * Copyright (C) 2008-2023, VMware, Inc. All Rights Reserved.
+ * Maintained by: pv-drivers@vmware.com
+ *
+ */
+
+#ifndef _VMXNET3_XDP_H
+#define _VMXNET3_XDP_H
+
+#include <linux/filter.h>
+#include <linux/bpf_trace.h>
+#include <linux/netlink.h>
+
+#include "vmxnet3_int.h"
+
+#define VMXNET3_XDP_HEADROOM (XDP_PACKET_HEADROOM + NET_IP_ALIGN)
+#define VMXNET3_XDP_RX_TAILROOM SKB_DATA_ALIGN(sizeof(struct skb_shared_info))
+#define VMXNET3_XDP_RX_OFFSET VMXNET3_XDP_HEADROOM
+#define VMXNET3_XDP_MAX_FRSIZE (PAGE_SIZE - VMXNET3_XDP_HEADROOM - \
+ VMXNET3_XDP_RX_TAILROOM)
+#define VMXNET3_XDP_MAX_MTU (VMXNET3_XDP_MAX_FRSIZE - ETH_HLEN - \
+ 2 * VLAN_HLEN - ETH_FCS_LEN)
+
+int vmxnet3_xdp(struct net_device *netdev, struct netdev_bpf *bpf);
+int vmxnet3_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
+ u32 flags);
+int vmxnet3_process_xdp(struct vmxnet3_adapter *adapter,
+ struct vmxnet3_rx_queue *rq,
+ struct Vmxnet3_RxCompDesc *rcd,
+ struct vmxnet3_rx_buf_info *rbi,
+ struct Vmxnet3_RxDesc *rxd,
+ struct sk_buff **skb_xdp_pass);
+int vmxnet3_process_xdp_small(struct vmxnet3_adapter *adapter,
+ struct vmxnet3_rx_queue *rq,
+ void *data, int len,
+ struct sk_buff **skb_xdp_pass);
+void *vmxnet3_pp_get_buff(struct page_pool *pp, dma_addr_t *dma_addr,
+ gfp_t gfp_mask);
+
+static inline bool vmxnet3_xdp_enabled(struct vmxnet3_adapter *adapter)
+{
+ return !!rcu_access_pointer(adapter->xdp_bpf_prog);
+}
+
+#endif