summaryrefslogtreecommitdiffstats
path: root/drivers/net/hyperv
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 18:49:45 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 18:49:45 +0000
commit2c3c1048746a4622d8c89a29670120dc8fab93c4 (patch)
tree848558de17fb3008cdf4d861b01ac7781903ce39 /drivers/net/hyperv
parentInitial commit. (diff)
downloadlinux-2c3c1048746a4622d8c89a29670120dc8fab93c4.tar.xz
linux-2c3c1048746a4622d8c89a29670120dc8fab93c4.zip
Adding upstream version 6.1.76.upstream/6.1.76upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/net/hyperv')
-rw-r--r--drivers/net/hyperv/Kconfig8
-rw-r--r--drivers/net/hyperv/Makefile4
-rw-r--r--drivers/net/hyperv/hyperv_net.h1803
-rw-r--r--drivers/net/hyperv/netvsc.c1856
-rw-r--r--drivers/net/hyperv/netvsc_bpf.c292
-rw-r--r--drivers/net/hyperv/netvsc_drv.c2823
-rw-r--r--drivers/net/hyperv/netvsc_trace.c7
-rw-r--r--drivers/net/hyperv/netvsc_trace.h182
-rw-r--r--drivers/net/hyperv/rndis_filter.c1620
9 files changed, 8595 insertions, 0 deletions
diff --git a/drivers/net/hyperv/Kconfig b/drivers/net/hyperv/Kconfig
new file mode 100644
index 000000000..c8cbd85ad
--- /dev/null
+++ b/drivers/net/hyperv/Kconfig
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config HYPERV_NET
+ tristate "Microsoft Hyper-V virtual network driver"
+ depends on HYPERV
+ select UCS2_STRING
+ select NLS
+ help
+ Select this option to enable the Hyper-V virtual network driver.
diff --git a/drivers/net/hyperv/Makefile b/drivers/net/hyperv/Makefile
new file mode 100644
index 000000000..0db7ccaec
--- /dev/null
+++ b/drivers/net/hyperv/Makefile
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_HYPERV_NET) += hv_netvsc.o
+
+hv_netvsc-y := netvsc_drv.o netvsc.o rndis_filter.o netvsc_trace.o netvsc_bpf.o
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
new file mode 100644
index 000000000..dd5919ec4
--- /dev/null
+++ b/drivers/net/hyperv/hyperv_net.h
@@ -0,0 +1,1803 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ *
+ * Copyright (c) 2011, Microsoft Corporation.
+ *
+ * Authors:
+ * Haiyang Zhang <haiyangz@microsoft.com>
+ * Hank Janssen <hjanssen@microsoft.com>
+ * K. Y. Srinivasan <kys@microsoft.com>
+ */
+
+#ifndef _HYPERV_NET_H
+#define _HYPERV_NET_H
+
+#include <linux/list.h>
+#include <linux/hyperv.h>
+#include <linux/rndis.h>
+#include <linux/jhash.h>
+
+/* RSS related */
+#define OID_GEN_RECEIVE_SCALE_CAPABILITIES 0x00010203 /* query only */
+#define OID_GEN_RECEIVE_SCALE_PARAMETERS 0x00010204 /* query and set */
+
+#define NDIS_OBJECT_TYPE_RSS_CAPABILITIES 0x88
+#define NDIS_OBJECT_TYPE_RSS_PARAMETERS 0x89
+#define NDIS_OBJECT_TYPE_OFFLOAD 0xa7
+
+#define NDIS_RECEIVE_SCALE_CAPABILITIES_REVISION_2 2
+#define NDIS_RECEIVE_SCALE_PARAMETERS_REVISION_2 2
+
+struct ndis_obj_header {
+ u8 type;
+ u8 rev;
+ u16 size;
+} __packed;
+
+/* ndis_recv_scale_cap/cap_flag */
+#define NDIS_RSS_CAPS_MESSAGE_SIGNALED_INTERRUPTS 0x01000000
+#define NDIS_RSS_CAPS_CLASSIFICATION_AT_ISR 0x02000000
+#define NDIS_RSS_CAPS_CLASSIFICATION_AT_DPC 0x04000000
+#define NDIS_RSS_CAPS_USING_MSI_X 0x08000000
+#define NDIS_RSS_CAPS_RSS_AVAILABLE_ON_PORTS 0x10000000
+#define NDIS_RSS_CAPS_SUPPORTS_MSI_X 0x20000000
+#define NDIS_RSS_CAPS_HASH_TYPE_TCP_IPV4 0x00000100
+#define NDIS_RSS_CAPS_HASH_TYPE_TCP_IPV6 0x00000200
+#define NDIS_RSS_CAPS_HASH_TYPE_TCP_IPV6_EX 0x00000400
+
+struct ndis_recv_scale_cap { /* NDIS_RECEIVE_SCALE_CAPABILITIES */
+ struct ndis_obj_header hdr;
+ u32 cap_flag;
+ u32 num_int_msg;
+ u32 num_recv_que;
+ u16 num_indirect_tabent;
+} __packed;
+
+
+/* ndis_recv_scale_param flags */
+#define NDIS_RSS_PARAM_FLAG_BASE_CPU_UNCHANGED 0x0001
+#define NDIS_RSS_PARAM_FLAG_HASH_INFO_UNCHANGED 0x0002
+#define NDIS_RSS_PARAM_FLAG_ITABLE_UNCHANGED 0x0004
+#define NDIS_RSS_PARAM_FLAG_HASH_KEY_UNCHANGED 0x0008
+#define NDIS_RSS_PARAM_FLAG_DISABLE_RSS 0x0010
+
+/* Hash info bits */
+#define NDIS_HASH_FUNC_TOEPLITZ 0x00000001
+#define NDIS_HASH_IPV4 0x00000100
+#define NDIS_HASH_TCP_IPV4 0x00000200
+#define NDIS_HASH_IPV6 0x00000400
+#define NDIS_HASH_IPV6_EX 0x00000800
+#define NDIS_HASH_TCP_IPV6 0x00001000
+#define NDIS_HASH_TCP_IPV6_EX 0x00002000
+
+#define NDIS_RSS_INDIRECTION_TABLE_MAX_SIZE_REVISION_2 (128 * 4)
+#define NDIS_RSS_HASH_SECRET_KEY_MAX_SIZE_REVISION_2 40
+
+#define ITAB_NUM 128
+
+struct ndis_recv_scale_param { /* NDIS_RECEIVE_SCALE_PARAMETERS */
+ struct ndis_obj_header hdr;
+
+ /* Qualifies the rest of the information */
+ u16 flag;
+
+ /* The base CPU number to do receive processing. not used */
+ u16 base_cpu_number;
+
+ /* This describes the hash function and type being enabled */
+ u32 hashinfo;
+
+ /* The size of indirection table array */
+ u16 indirect_tabsize;
+
+ /* The offset of the indirection table from the beginning of this
+ * structure
+ */
+ u32 indirect_taboffset;
+
+ /* The size of the hash secret key */
+ u16 hashkey_size;
+
+ /* The offset of the secret key from the beginning of this structure */
+ u32 hashkey_offset;
+
+ u32 processor_masks_offset;
+ u32 num_processor_masks;
+ u32 processor_masks_entry_size;
+};
+
+struct ndis_tcp_ip_checksum_info {
+ union {
+ struct {
+ u32 is_ipv4:1;
+ u32 is_ipv6:1;
+ u32 tcp_checksum:1;
+ u32 udp_checksum:1;
+ u32 ip_header_checksum:1;
+ u32 reserved:11;
+ u32 tcp_header_offset:10;
+ } transmit;
+ struct {
+ u32 tcp_checksum_failed:1;
+ u32 udp_checksum_failed:1;
+ u32 ip_checksum_failed:1;
+ u32 tcp_checksum_succeeded:1;
+ u32 udp_checksum_succeeded:1;
+ u32 ip_checksum_succeeded:1;
+ u32 loopback:1;
+ u32 tcp_checksum_value_invalid:1;
+ u32 ip_checksum_value_invalid:1;
+ } receive;
+ u32 value;
+ };
+};
+
+struct ndis_pkt_8021q_info {
+ union {
+ struct {
+ u32 pri:3; /* User Priority */
+ u32 cfi:1; /* Canonical Format ID */
+ u32 vlanid:12; /* VLAN ID */
+ u32 reserved:16;
+ };
+ u32 value;
+ };
+};
+
+/*
+ * Represent netvsc packet which contains 1 RNDIS and 1 ethernet frame
+ * within the RNDIS
+ *
+ * The size of this structure is less than 48 bytes and we can now
+ * place this structure in the skb->cb field.
+ */
+struct hv_netvsc_packet {
+ /* Bookkeeping stuff */
+ u8 cp_partial; /* partial copy into send buffer */
+
+ u8 rmsg_size; /* RNDIS header and PPI size */
+ u8 rmsg_pgcnt; /* page count of RNDIS header and PPI */
+ u8 page_buf_cnt;
+
+ u16 q_idx;
+ u16 total_packets;
+
+ u32 total_bytes;
+ u32 send_buf_index;
+ u32 total_data_buflen;
+ struct hv_dma_range *dma_range;
+};
+
+#define NETVSC_HASH_KEYLEN 40
+
+struct netvsc_device_info {
+ unsigned char mac_adr[ETH_ALEN];
+ u32 num_chn;
+ u32 send_sections;
+ u32 recv_sections;
+ u32 send_section_size;
+ u32 recv_section_size;
+
+ struct bpf_prog *bprog;
+
+ u8 rss_key[NETVSC_HASH_KEYLEN];
+};
+
+enum rndis_device_state {
+ RNDIS_DEV_UNINITIALIZED = 0,
+ RNDIS_DEV_INITIALIZING,
+ RNDIS_DEV_INITIALIZED,
+ RNDIS_DEV_DATAINITIALIZED,
+};
+
+struct rndis_device {
+ struct net_device *ndev;
+
+ enum rndis_device_state state;
+
+ atomic_t new_req_id;
+
+ spinlock_t request_lock;
+ struct list_head req_list;
+
+ struct work_struct mcast_work;
+ u32 filter;
+
+ bool link_state; /* 0 - link up, 1 - link down */
+
+ u8 hw_mac_adr[ETH_ALEN];
+ u8 rss_key[NETVSC_HASH_KEYLEN];
+};
+
+
+/* Interface */
+struct rndis_message;
+struct ndis_offload_params;
+struct netvsc_device;
+struct netvsc_channel;
+struct net_device_context;
+
+extern u32 netvsc_ring_bytes;
+
+struct netvsc_device *netvsc_device_add(struct hv_device *device,
+ const struct netvsc_device_info *info);
+int netvsc_alloc_recv_comp_ring(struct netvsc_device *net_device, u32 q_idx);
+void netvsc_device_remove(struct hv_device *device);
+int netvsc_send(struct net_device *net,
+ struct hv_netvsc_packet *packet,
+ struct rndis_message *rndis_msg,
+ struct hv_page_buffer *page_buffer,
+ struct sk_buff *skb,
+ bool xdp_tx);
+void netvsc_linkstatus_callback(struct net_device *net,
+ struct rndis_message *resp,
+ void *data, u32 data_buflen);
+int netvsc_recv_callback(struct net_device *net,
+ struct netvsc_device *nvdev,
+ struct netvsc_channel *nvchan);
+void netvsc_channel_cb(void *context);
+int netvsc_poll(struct napi_struct *napi, int budget);
+
+void netvsc_xdp_xmit(struct sk_buff *skb, struct net_device *ndev);
+u32 netvsc_run_xdp(struct net_device *ndev, struct netvsc_channel *nvchan,
+ struct xdp_buff *xdp);
+unsigned int netvsc_xdp_fraglen(unsigned int len);
+struct bpf_prog *netvsc_xdp_get(struct netvsc_device *nvdev);
+int netvsc_xdp_set(struct net_device *dev, struct bpf_prog *prog,
+ struct netlink_ext_ack *extack,
+ struct netvsc_device *nvdev);
+int netvsc_vf_setxdp(struct net_device *vf_netdev, struct bpf_prog *prog);
+int netvsc_bpf(struct net_device *dev, struct netdev_bpf *bpf);
+int netvsc_ndoxdp_xmit(struct net_device *ndev, int n,
+ struct xdp_frame **frames, u32 flags);
+
+int rndis_set_subchannel(struct net_device *ndev,
+ struct netvsc_device *nvdev,
+ struct netvsc_device_info *dev_info);
+int rndis_filter_open(struct netvsc_device *nvdev);
+int rndis_filter_close(struct netvsc_device *nvdev);
+struct netvsc_device *rndis_filter_device_add(struct hv_device *dev,
+ struct netvsc_device_info *info);
+void rndis_filter_update(struct netvsc_device *nvdev);
+void rndis_filter_device_remove(struct hv_device *dev,
+ struct netvsc_device *nvdev);
+int rndis_filter_set_rss_param(struct rndis_device *rdev,
+ const u8 *key);
+int rndis_filter_set_offload_params(struct net_device *ndev,
+ struct netvsc_device *nvdev,
+ struct ndis_offload_params *req_offloads);
+int rndis_filter_receive(struct net_device *ndev,
+ struct netvsc_device *net_dev,
+ struct netvsc_channel *nvchan,
+ void *data, u32 buflen);
+
+int rndis_filter_set_device_mac(struct netvsc_device *ndev,
+ const char *mac);
+
+int netvsc_switch_datapath(struct net_device *nv_dev, bool vf);
+
+#define NVSP_INVALID_PROTOCOL_VERSION ((u32)0xFFFFFFFF)
+
+#define NVSP_PROTOCOL_VERSION_1 2
+#define NVSP_PROTOCOL_VERSION_2 0x30002
+#define NVSP_PROTOCOL_VERSION_4 0x40000
+#define NVSP_PROTOCOL_VERSION_5 0x50000
+#define NVSP_PROTOCOL_VERSION_6 0x60000
+#define NVSP_PROTOCOL_VERSION_61 0x60001
+
+enum {
+ NVSP_MSG_TYPE_NONE = 0,
+
+ /* Init Messages */
+ NVSP_MSG_TYPE_INIT = 1,
+ NVSP_MSG_TYPE_INIT_COMPLETE = 2,
+
+ NVSP_VERSION_MSG_START = 100,
+
+ /* Version 1 Messages */
+ NVSP_MSG1_TYPE_SEND_NDIS_VER = NVSP_VERSION_MSG_START,
+
+ NVSP_MSG1_TYPE_SEND_RECV_BUF,
+ NVSP_MSG1_TYPE_SEND_RECV_BUF_COMPLETE,
+ NVSP_MSG1_TYPE_REVOKE_RECV_BUF,
+
+ NVSP_MSG1_TYPE_SEND_SEND_BUF,
+ NVSP_MSG1_TYPE_SEND_SEND_BUF_COMPLETE,
+ NVSP_MSG1_TYPE_REVOKE_SEND_BUF,
+
+ NVSP_MSG1_TYPE_SEND_RNDIS_PKT,
+ NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE,
+
+ /* Version 2 messages */
+ NVSP_MSG2_TYPE_SEND_CHIMNEY_DELEGATED_BUF,
+ NVSP_MSG2_TYPE_SEND_CHIMNEY_DELEGATED_BUF_COMP,
+ NVSP_MSG2_TYPE_REVOKE_CHIMNEY_DELEGATED_BUF,
+
+ NVSP_MSG2_TYPE_RESUME_CHIMNEY_RX_INDICATION,
+
+ NVSP_MSG2_TYPE_TERMINATE_CHIMNEY,
+ NVSP_MSG2_TYPE_TERMINATE_CHIMNEY_COMP,
+
+ NVSP_MSG2_TYPE_INDICATE_CHIMNEY_EVENT,
+
+ NVSP_MSG2_TYPE_SEND_CHIMNEY_PKT,
+ NVSP_MSG2_TYPE_SEND_CHIMNEY_PKT_COMP,
+
+ NVSP_MSG2_TYPE_POST_CHIMNEY_RECV_REQ,
+ NVSP_MSG2_TYPE_POST_CHIMNEY_RECV_REQ_COMP,
+
+ NVSP_MSG2_TYPE_ALLOC_RXBUF,
+ NVSP_MSG2_TYPE_ALLOC_RXBUF_COMP,
+
+ NVSP_MSG2_TYPE_FREE_RXBUF,
+
+ NVSP_MSG2_TYPE_SEND_VMQ_RNDIS_PKT,
+ NVSP_MSG2_TYPE_SEND_VMQ_RNDIS_PKT_COMP,
+
+ NVSP_MSG2_TYPE_SEND_NDIS_CONFIG,
+
+ NVSP_MSG2_TYPE_ALLOC_CHIMNEY_HANDLE,
+ NVSP_MSG2_TYPE_ALLOC_CHIMNEY_HANDLE_COMP,
+
+ NVSP_MSG2_MAX = NVSP_MSG2_TYPE_ALLOC_CHIMNEY_HANDLE_COMP,
+
+ /* Version 4 messages */
+ NVSP_MSG4_TYPE_SEND_VF_ASSOCIATION,
+ NVSP_MSG4_TYPE_SWITCH_DATA_PATH,
+ NVSP_MSG4_TYPE_UPLINK_CONNECT_STATE_DEPRECATED,
+
+ NVSP_MSG4_MAX = NVSP_MSG4_TYPE_UPLINK_CONNECT_STATE_DEPRECATED,
+
+ /* Version 5 messages */
+ NVSP_MSG5_TYPE_OID_QUERY_EX,
+ NVSP_MSG5_TYPE_OID_QUERY_EX_COMP,
+ NVSP_MSG5_TYPE_SUBCHANNEL,
+ NVSP_MSG5_TYPE_SEND_INDIRECTION_TABLE,
+
+ NVSP_MSG5_MAX = NVSP_MSG5_TYPE_SEND_INDIRECTION_TABLE,
+
+ /* Version 6 messages */
+ NVSP_MSG6_TYPE_PD_API,
+ NVSP_MSG6_TYPE_PD_POST_BATCH,
+
+ NVSP_MSG6_MAX = NVSP_MSG6_TYPE_PD_POST_BATCH
+};
+
+enum {
+ NVSP_STAT_NONE = 0,
+ NVSP_STAT_SUCCESS,
+ NVSP_STAT_FAIL,
+ NVSP_STAT_PROTOCOL_TOO_NEW,
+ NVSP_STAT_PROTOCOL_TOO_OLD,
+ NVSP_STAT_INVALID_RNDIS_PKT,
+ NVSP_STAT_BUSY,
+ NVSP_STAT_PROTOCOL_UNSUPPORTED,
+ NVSP_STAT_MAX,
+};
+
+struct nvsp_message_header {
+ u32 msg_type;
+};
+
+/* Init Messages */
+
+/*
+ * This message is used by the VSC to initialize the channel after the channels
+ * has been opened. This message should never include anything other then
+ * versioning (i.e. this message will be the same for ever).
+ */
+struct nvsp_message_init {
+ u32 min_protocol_ver;
+ u32 max_protocol_ver;
+} __packed;
+
+/*
+ * This message is used by the VSP to complete the initialization of the
+ * channel. This message should never include anything other then versioning
+ * (i.e. this message will be the same for ever).
+ */
+struct nvsp_message_init_complete {
+ u32 negotiated_protocol_ver;
+ u32 max_mdl_chain_len;
+ u32 status;
+} __packed;
+
+union nvsp_message_init_uber {
+ struct nvsp_message_init init;
+ struct nvsp_message_init_complete init_complete;
+} __packed;
+
+/* Version 1 Messages */
+
+/*
+ * This message is used by the VSC to send the NDIS version to the VSP. The VSP
+ * can use this information when handling OIDs sent by the VSC.
+ */
+struct nvsp_1_message_send_ndis_version {
+ u32 ndis_major_ver;
+ u32 ndis_minor_ver;
+} __packed;
+
+/*
+ * This message is used by the VSC to send a receive buffer to the VSP. The VSP
+ * can then use the receive buffer to send data to the VSC.
+ */
+struct nvsp_1_message_send_receive_buffer {
+ u32 gpadl_handle;
+ u16 id;
+} __packed;
+
+struct nvsp_1_receive_buffer_section {
+ u32 offset;
+ u32 sub_alloc_size;
+ u32 num_sub_allocs;
+ u32 end_offset;
+} __packed;
+
+/*
+ * This message is used by the VSP to acknowledge a receive buffer send by the
+ * VSC. This message must be sent by the VSP before the VSP uses the receive
+ * buffer.
+ */
+struct nvsp_1_message_send_receive_buffer_complete {
+ u32 status;
+ u32 num_sections;
+
+ /*
+ * The receive buffer is split into two parts, a large suballocation
+ * section and a small suballocation section. These sections are then
+ * suballocated by a certain size.
+ */
+
+ /*
+ * For example, the following break up of the receive buffer has 6
+ * large suballocations and 10 small suballocations.
+ */
+
+ /*
+ * | Large Section | | Small Section |
+ * ------------------------------------------------------------
+ * | | | | | | | | | | | | | | | | | |
+ * | |
+ * LargeOffset SmallOffset
+ */
+
+ struct nvsp_1_receive_buffer_section sections[1];
+} __packed;
+
+/*
+ * This message is sent by the VSC to revoke the receive buffer. After the VSP
+ * completes this transaction, the vsp should never use the receive buffer
+ * again.
+ */
+struct nvsp_1_message_revoke_receive_buffer {
+ u16 id;
+};
+
+/*
+ * This message is used by the VSC to send a send buffer to the VSP. The VSC
+ * can then use the send buffer to send data to the VSP.
+ */
+struct nvsp_1_message_send_send_buffer {
+ u32 gpadl_handle;
+ u16 id;
+} __packed;
+
+/*
+ * This message is used by the VSP to acknowledge a send buffer sent by the
+ * VSC. This message must be sent by the VSP before the VSP uses the sent
+ * buffer.
+ */
+struct nvsp_1_message_send_send_buffer_complete {
+ u32 status;
+
+ /*
+ * The VSC gets to choose the size of the send buffer and the VSP gets
+ * to choose the sections size of the buffer. This was done to enable
+ * dynamic reconfigurations when the cost of GPA-direct buffers
+ * decreases.
+ */
+ u32 section_size;
+} __packed;
+
+/*
+ * This message is sent by the VSC to revoke the send buffer. After the VSP
+ * completes this transaction, the vsp should never use the send buffer again.
+ */
+struct nvsp_1_message_revoke_send_buffer {
+ u16 id;
+};
+
+/*
+ * This message is used by both the VSP and the VSC to send a RNDIS message to
+ * the opposite channel endpoint.
+ */
+struct nvsp_1_message_send_rndis_packet {
+ /*
+ * This field is specified by RNDIS. They assume there's two different
+ * channels of communication. However, the Network VSP only has one.
+ * Therefore, the channel travels with the RNDIS packet.
+ */
+ u32 channel_type;
+
+ /*
+ * This field is used to send part or all of the data through a send
+ * buffer. This values specifies an index into the send buffer. If the
+ * index is 0xFFFFFFFF, then the send buffer is not being used and all
+ * of the data was sent through other VMBus mechanisms.
+ */
+ u32 send_buf_section_index;
+ u32 send_buf_section_size;
+} __packed;
+
+/*
+ * This message is used by both the VSP and the VSC to complete a RNDIS message
+ * to the opposite channel endpoint. At this point, the initiator of this
+ * message cannot use any resources associated with the original RNDIS packet.
+ */
+struct nvsp_1_message_send_rndis_packet_complete {
+ u32 status;
+};
+
+union nvsp_1_message_uber {
+ struct nvsp_1_message_send_ndis_version send_ndis_ver;
+
+ struct nvsp_1_message_send_receive_buffer send_recv_buf;
+ struct nvsp_1_message_send_receive_buffer_complete
+ send_recv_buf_complete;
+ struct nvsp_1_message_revoke_receive_buffer revoke_recv_buf;
+
+ struct nvsp_1_message_send_send_buffer send_send_buf;
+ struct nvsp_1_message_send_send_buffer_complete send_send_buf_complete;
+ struct nvsp_1_message_revoke_send_buffer revoke_send_buf;
+
+ struct nvsp_1_message_send_rndis_packet send_rndis_pkt;
+ struct nvsp_1_message_send_rndis_packet_complete
+ send_rndis_pkt_complete;
+} __packed;
+
+
+/*
+ * Network VSP protocol version 2 messages:
+ */
+struct nvsp_2_vsc_capability {
+ union {
+ u64 data;
+ struct {
+ u64 vmq:1;
+ u64 chimney:1;
+ u64 sriov:1;
+ u64 ieee8021q:1;
+ u64 correlation_id:1;
+ u64 teaming:1;
+ u64 vsubnetid:1;
+ u64 rsc:1;
+ };
+ };
+} __packed;
+
+struct nvsp_2_send_ndis_config {
+ u32 mtu;
+ u32 reserved;
+ struct nvsp_2_vsc_capability capability;
+} __packed;
+
+/* Allocate receive buffer */
+struct nvsp_2_alloc_rxbuf {
+ /* Allocation ID to match the allocation request and response */
+ u32 alloc_id;
+
+ /* Length of the VM shared memory receive buffer that needs to
+ * be allocated
+ */
+ u32 len;
+} __packed;
+
+/* Allocate receive buffer complete */
+struct nvsp_2_alloc_rxbuf_comp {
+ /* The NDIS_STATUS code for buffer allocation */
+ u32 status;
+
+ u32 alloc_id;
+
+ /* GPADL handle for the allocated receive buffer */
+ u32 gpadl_handle;
+
+ /* Receive buffer ID */
+ u64 recv_buf_id;
+} __packed;
+
+struct nvsp_2_free_rxbuf {
+ u64 recv_buf_id;
+} __packed;
+
+union nvsp_2_message_uber {
+ struct nvsp_2_send_ndis_config send_ndis_config;
+ struct nvsp_2_alloc_rxbuf alloc_rxbuf;
+ struct nvsp_2_alloc_rxbuf_comp alloc_rxbuf_comp;
+ struct nvsp_2_free_rxbuf free_rxbuf;
+} __packed;
+
+struct nvsp_4_send_vf_association {
+ /* 1: allocated, serial number is valid. 0: not allocated */
+ u32 allocated;
+
+ /* Serial number of the VF to team with */
+ u32 serial;
+} __packed;
+
+enum nvsp_vm_datapath {
+ NVSP_DATAPATH_SYNTHETIC = 0,
+ NVSP_DATAPATH_VF,
+ NVSP_DATAPATH_MAX
+};
+
+struct nvsp_4_sw_datapath {
+ u32 active_datapath; /* active data path in VM */
+} __packed;
+
+union nvsp_4_message_uber {
+ struct nvsp_4_send_vf_association vf_assoc;
+ struct nvsp_4_sw_datapath active_dp;
+} __packed;
+
+enum nvsp_subchannel_operation {
+ NVSP_SUBCHANNEL_NONE = 0,
+ NVSP_SUBCHANNEL_ALLOCATE,
+ NVSP_SUBCHANNEL_MAX
+};
+
+struct nvsp_5_subchannel_request {
+ u32 op;
+ u32 num_subchannels;
+} __packed;
+
+struct nvsp_5_subchannel_complete {
+ u32 status;
+ u32 num_subchannels; /* Actual number of subchannels allocated */
+} __packed;
+
+struct nvsp_5_send_indirect_table {
+ /* The number of entries in the send indirection table */
+ u32 count;
+
+ /* The offset of the send indirection table from the beginning of
+ * struct nvsp_message.
+ * The send indirection table tells which channel to put the send
+ * traffic on. Each entry is a channel number.
+ */
+ u32 offset;
+} __packed;
+
+union nvsp_5_message_uber {
+ struct nvsp_5_subchannel_request subchn_req;
+ struct nvsp_5_subchannel_complete subchn_comp;
+ struct nvsp_5_send_indirect_table send_table;
+} __packed;
+
+enum nvsp_6_pd_api_op {
+ PD_API_OP_CONFIG = 1,
+ PD_API_OP_SW_DATAPATH, /* Switch Datapath */
+ PD_API_OP_OPEN_PROVIDER,
+ PD_API_OP_CLOSE_PROVIDER,
+ PD_API_OP_CREATE_QUEUE,
+ PD_API_OP_FLUSH_QUEUE,
+ PD_API_OP_FREE_QUEUE,
+ PD_API_OP_ALLOC_COM_BUF, /* Allocate Common Buffer */
+ PD_API_OP_FREE_COM_BUF, /* Free Common Buffer */
+ PD_API_OP_MAX
+};
+
+struct grp_affinity {
+ u64 mask;
+ u16 grp;
+ u16 reserved[3];
+} __packed;
+
+struct nvsp_6_pd_api_req {
+ u32 op;
+
+ union {
+ /* MMIO information is sent from the VM to VSP */
+ struct __packed {
+ u64 mmio_pa; /* MMIO Physical Address */
+ u32 mmio_len;
+
+ /* Number of PD queues a VM can support */
+ u16 num_subchn;
+ } config;
+
+ /* Switch Datapath */
+ struct __packed {
+ /* Host Datapath Is PacketDirect */
+ u8 host_dpath_is_pd;
+
+ /* Guest PacketDirect Is Enabled */
+ u8 guest_pd_enabled;
+ } sw_dpath;
+
+ /* Open Provider*/
+ struct __packed {
+ u32 prov_id; /* Provider id */
+ u32 flag;
+ } open_prov;
+
+ /* Close Provider */
+ struct __packed {
+ u32 prov_id;
+ } cls_prov;
+
+ /* Create Queue*/
+ struct __packed {
+ u32 prov_id;
+ u16 q_id;
+ u16 q_size;
+ u8 is_recv_q;
+ u8 is_rss_q;
+ u32 recv_data_len;
+ struct grp_affinity affy;
+ } cr_q;
+
+ /* Delete Queue*/
+ struct __packed {
+ u32 prov_id;
+ u16 q_id;
+ } del_q;
+
+ /* Flush Queue */
+ struct __packed {
+ u32 prov_id;
+ u16 q_id;
+ } flush_q;
+
+ /* Allocate Common Buffer */
+ struct __packed {
+ u32 len;
+ u32 pf_node; /* Preferred Node */
+ u16 region_id;
+ } alloc_com_buf;
+
+ /* Free Common Buffer */
+ struct __packed {
+ u32 len;
+ u64 pa; /* Physical Address */
+ u32 pf_node; /* Preferred Node */
+ u16 region_id;
+ u8 cache_type;
+ } free_com_buf;
+ } __packed;
+} __packed;
+
+struct nvsp_6_pd_api_comp {
+ u32 op;
+ u32 status;
+
+ union {
+ struct __packed {
+ /* actual number of PD queues allocated to the VM */
+ u16 num_pd_q;
+
+ /* Num Receive Rss PD Queues */
+ u8 num_rss_q;
+
+ u8 is_supported; /* Is supported by VSP */
+ u8 is_enabled; /* Is enabled by VSP */
+ } config;
+
+ /* Open Provider */
+ struct __packed {
+ u32 prov_id;
+ } open_prov;
+
+ /* Create Queue */
+ struct __packed {
+ u32 prov_id;
+ u16 q_id;
+ u16 q_size;
+ u32 recv_data_len;
+ struct grp_affinity affy;
+ } cr_q;
+
+ /* Allocate Common Buffer */
+ struct __packed {
+ u64 pa; /* Physical Address */
+ u32 len;
+ u32 pf_node; /* Preferred Node */
+ u16 region_id;
+ u8 cache_type;
+ } alloc_com_buf;
+ } __packed;
+} __packed;
+
+struct nvsp_6_pd_buf {
+ u32 region_offset;
+ u16 region_id;
+ u16 is_partial:1;
+ u16 reserved:15;
+} __packed;
+
+struct nvsp_6_pd_batch_msg {
+ struct nvsp_message_header hdr;
+ u16 count;
+ u16 guest2host:1;
+ u16 is_recv:1;
+ u16 reserved:14;
+ struct nvsp_6_pd_buf pd_buf[0];
+} __packed;
+
+union nvsp_6_message_uber {
+ struct nvsp_6_pd_api_req pd_req;
+ struct nvsp_6_pd_api_comp pd_comp;
+} __packed;
+
+union nvsp_all_messages {
+ union nvsp_message_init_uber init_msg;
+ union nvsp_1_message_uber v1_msg;
+ union nvsp_2_message_uber v2_msg;
+ union nvsp_4_message_uber v4_msg;
+ union nvsp_5_message_uber v5_msg;
+ union nvsp_6_message_uber v6_msg;
+} __packed;
+
+/* ALL Messages */
+struct nvsp_message {
+ struct nvsp_message_header hdr;
+ union nvsp_all_messages msg;
+} __packed;
+
+
+#define NETVSC_MTU 65535
+#define NETVSC_MTU_MIN ETH_MIN_MTU
+
+/* Max buffer sizes allowed by a host */
+#define NETVSC_RECEIVE_BUFFER_SIZE (1024 * 1024 * 31) /* 31MB */
+#define NETVSC_RECEIVE_BUFFER_SIZE_LEGACY (1024 * 1024 * 15) /* 15MB */
+#define NETVSC_RECEIVE_BUFFER_DEFAULT (1024 * 1024 * 16)
+
+#define NETVSC_SEND_BUFFER_SIZE (1024 * 1024 * 15) /* 15MB */
+#define NETVSC_SEND_BUFFER_DEFAULT (1024 * 1024)
+
+#define NETVSC_INVALID_INDEX -1
+
+#define NETVSC_SEND_SECTION_SIZE 6144
+#define NETVSC_RECV_SECTION_SIZE 1728
+
+/* Default size of TX buf: 1MB, RX buf: 16MB */
+#define NETVSC_MIN_TX_SECTIONS 10
+#define NETVSC_DEFAULT_TX (NETVSC_SEND_BUFFER_DEFAULT \
+ / NETVSC_SEND_SECTION_SIZE)
+#define NETVSC_MIN_RX_SECTIONS 10
+#define NETVSC_DEFAULT_RX (NETVSC_RECEIVE_BUFFER_DEFAULT \
+ / NETVSC_RECV_SECTION_SIZE)
+
+#define NETVSC_RECEIVE_BUFFER_ID 0xcafe
+#define NETVSC_SEND_BUFFER_ID 0
+
+#define NETVSC_SUPPORTED_HW_FEATURES (NETIF_F_RXCSUM | NETIF_F_IP_CSUM | \
+ NETIF_F_TSO | NETIF_F_IPV6_CSUM | \
+ NETIF_F_TSO6 | NETIF_F_LRO | \
+ NETIF_F_SG | NETIF_F_RXHASH)
+
+#define VRSS_SEND_TAB_SIZE 16 /* must be power of 2 */
+#define VRSS_CHANNEL_MAX 64
+#define VRSS_CHANNEL_DEFAULT 8
+
+#define RNDIS_MAX_PKT_DEFAULT 8
+#define RNDIS_PKT_ALIGN_DEFAULT 8
+
+#define NETVSC_XDP_HDRM 256
+
+#define NETVSC_MIN_OUT_MSG_SIZE (sizeof(struct vmpacket_descriptor) + \
+ sizeof(struct nvsp_message))
+#define NETVSC_MIN_IN_MSG_SIZE sizeof(struct vmpacket_descriptor)
+
+/* Estimated requestor size:
+ * out_ring_size/min_out_msg_size + in_ring_size/min_in_msg_size
+ */
+static inline u32 netvsc_rqstor_size(unsigned long ringbytes)
+{
+ return ringbytes / NETVSC_MIN_OUT_MSG_SIZE +
+ ringbytes / NETVSC_MIN_IN_MSG_SIZE;
+}
+
+/* XFER PAGE packets can specify a maximum of 375 ranges for NDIS >= 6.0
+ * and a maximum of 64 ranges for NDIS < 6.0 with no RSC; with RSC, this
+ * limit is raised to 562 (= NVSP_RSC_MAX).
+ */
+#define NETVSC_MAX_XFER_PAGE_RANGES NVSP_RSC_MAX
+#define NETVSC_XFER_HEADER_SIZE(rng_cnt) \
+ (offsetof(struct vmtransfer_page_packet_header, ranges) + \
+ (rng_cnt) * sizeof(struct vmtransfer_page_range))
+#define NETVSC_MAX_PKT_SIZE (NETVSC_XFER_HEADER_SIZE(NETVSC_MAX_XFER_PAGE_RANGES) + \
+ sizeof(struct nvsp_message) + (sizeof(u32) * VRSS_SEND_TAB_SIZE))
+
+struct multi_send_data {
+ struct sk_buff *skb; /* skb containing the pkt */
+ struct hv_netvsc_packet *pkt; /* netvsc pkt pending */
+ u32 count; /* counter of batched packets */
+};
+
+struct recv_comp_data {
+ u64 tid; /* transaction id */
+ u32 status;
+};
+
+struct multi_recv_comp {
+ struct recv_comp_data *slots;
+ u32 first; /* first data entry */
+ u32 next; /* next entry for writing */
+};
+
+#define NVSP_RSC_MAX 562 /* Max #RSC frags in a vmbus xfer page pkt */
+
+struct nvsc_rsc {
+ struct ndis_pkt_8021q_info vlan;
+ struct ndis_tcp_ip_checksum_info csum_info;
+ u32 hash_info;
+ u8 ppi_flags; /* valid/present bits for the above PPIs */
+ u8 is_last; /* last RNDIS msg in a vmtransfer_page */
+ u32 cnt; /* #fragments in an RSC packet */
+ u32 pktlen; /* Full packet length */
+ void *data[NVSP_RSC_MAX];
+ u32 len[NVSP_RSC_MAX];
+};
+
+#define NVSC_RSC_VLAN BIT(0) /* valid/present bit for 'vlan' */
+#define NVSC_RSC_CSUM_INFO BIT(1) /* valid/present bit for 'csum_info' */
+#define NVSC_RSC_HASH_INFO BIT(2) /* valid/present bit for 'hash_info' */
+
+struct netvsc_stats_tx {
+ u64 packets;
+ u64 bytes;
+ u64 xdp_xmit;
+ struct u64_stats_sync syncp;
+};
+
+struct netvsc_stats_rx {
+ u64 packets;
+ u64 bytes;
+ u64 broadcast;
+ u64 multicast;
+ u64 xdp_drop;
+ u64 xdp_redirect;
+ u64 xdp_tx;
+ struct u64_stats_sync syncp;
+};
+
+struct netvsc_ethtool_stats {
+ unsigned long tx_scattered;
+ unsigned long tx_no_memory;
+ unsigned long tx_no_space;
+ unsigned long tx_too_big;
+ unsigned long tx_busy;
+ unsigned long tx_send_full;
+ unsigned long rx_comp_busy;
+ unsigned long rx_no_memory;
+ unsigned long stop_queue;
+ unsigned long wake_queue;
+ unsigned long vlan_error;
+};
+
+struct netvsc_ethtool_pcpu_stats {
+ u64 rx_packets;
+ u64 rx_bytes;
+ u64 tx_packets;
+ u64 tx_bytes;
+ u64 vf_rx_packets;
+ u64 vf_rx_bytes;
+ u64 vf_tx_packets;
+ u64 vf_tx_bytes;
+};
+
+struct netvsc_vf_pcpu_stats {
+ u64 rx_packets;
+ u64 rx_bytes;
+ u64 tx_packets;
+ u64 tx_bytes;
+ struct u64_stats_sync syncp;
+ u32 tx_dropped;
+};
+
+struct netvsc_reconfig {
+ struct list_head list;
+ u32 event;
+};
+
+/* L4 hash bits for different protocols */
+#define HV_TCP4_L4HASH 1
+#define HV_TCP6_L4HASH 2
+#define HV_UDP4_L4HASH 4
+#define HV_UDP6_L4HASH 8
+#define HV_DEFAULT_L4HASH (HV_TCP4_L4HASH | HV_TCP6_L4HASH | HV_UDP4_L4HASH | \
+ HV_UDP6_L4HASH)
+
+/* The context of the netvsc device */
+struct net_device_context {
+ /* point back to our device context */
+ struct hv_device *device_ctx;
+ /* netvsc_device */
+ struct netvsc_device __rcu *nvdev;
+ /* list of netvsc net_devices */
+ struct list_head list;
+ /* reconfigure work */
+ struct delayed_work dwork;
+ /* last reconfig time */
+ unsigned long last_reconfig;
+ /* reconfig events */
+ struct list_head reconfig_events;
+ /* list protection */
+ spinlock_t lock;
+
+ u32 msg_enable; /* debug level */
+
+ u32 tx_checksum_mask;
+
+ u32 tx_table[VRSS_SEND_TAB_SIZE];
+
+ u16 rx_table[ITAB_NUM];
+
+ /* Ethtool settings */
+ u8 duplex;
+ u32 speed;
+ u32 l4_hash; /* L4 hash settings */
+ struct netvsc_ethtool_stats eth_stats;
+
+ /* State to manage the associated VF interface. */
+ struct net_device __rcu *vf_netdev;
+ struct netvsc_vf_pcpu_stats __percpu *vf_stats;
+ struct delayed_work vf_takeover;
+
+ /* 1: allocated, serial number is valid. 0: not allocated */
+ u32 vf_alloc;
+ /* Serial number of the VF to team with */
+ u32 vf_serial;
+ /* completion variable to confirm vf association */
+ struct completion vf_add;
+ /* Is the current data path through the VF NIC? */
+ bool data_path_is_vf;
+
+ /* Used to temporarily save the config info across hibernation */
+ struct netvsc_device_info *saved_netvsc_dev_info;
+};
+
+/* Azure hosts don't support non-TCP port numbers in hashing for fragmented
+ * packets. We can use ethtool to change UDP hash level when necessary.
+ */
+static inline u32 netvsc_get_hash(struct sk_buff *skb,
+ const struct net_device_context *ndc)
+{
+ struct flow_keys flow;
+ u32 hash, pkt_proto = 0;
+ static u32 hashrnd __read_mostly;
+
+ net_get_random_once(&hashrnd, sizeof(hashrnd));
+
+ if (!skb_flow_dissect_flow_keys(skb, &flow, 0))
+ return 0;
+
+ switch (flow.basic.ip_proto) {
+ case IPPROTO_TCP:
+ if (flow.basic.n_proto == htons(ETH_P_IP))
+ pkt_proto = HV_TCP4_L4HASH;
+ else if (flow.basic.n_proto == htons(ETH_P_IPV6))
+ pkt_proto = HV_TCP6_L4HASH;
+
+ break;
+
+ case IPPROTO_UDP:
+ if (flow.basic.n_proto == htons(ETH_P_IP))
+ pkt_proto = HV_UDP4_L4HASH;
+ else if (flow.basic.n_proto == htons(ETH_P_IPV6))
+ pkt_proto = HV_UDP6_L4HASH;
+
+ break;
+ }
+
+ if (pkt_proto & ndc->l4_hash) {
+ return skb_get_hash(skb);
+ } else {
+ if (flow.basic.n_proto == htons(ETH_P_IP))
+ hash = jhash2((u32 *)&flow.addrs.v4addrs, 2, hashrnd);
+ else if (flow.basic.n_proto == htons(ETH_P_IPV6))
+ hash = jhash2((u32 *)&flow.addrs.v6addrs, 8, hashrnd);
+ else
+ return 0;
+
+ __skb_set_sw_hash(skb, hash, false);
+ }
+
+ return hash;
+}
+
+/* Per channel data */
+struct netvsc_channel {
+ struct vmbus_channel *channel;
+ struct netvsc_device *net_device;
+ void *recv_buf; /* buffer to copy packets out from the receive buffer */
+ const struct vmpacket_descriptor *desc;
+ struct napi_struct napi;
+ struct multi_send_data msd;
+ struct multi_recv_comp mrc;
+ atomic_t queue_sends;
+ struct nvsc_rsc rsc;
+
+ struct bpf_prog __rcu *bpf_prog;
+ struct xdp_rxq_info xdp_rxq;
+ bool xdp_flush;
+
+ struct netvsc_stats_tx tx_stats;
+ struct netvsc_stats_rx rx_stats;
+};
+
+/* Per netvsc device */
+struct netvsc_device {
+ u32 nvsp_version;
+
+ wait_queue_head_t wait_drain;
+ bool destroy;
+ bool tx_disable; /* if true, do not wake up queue again */
+
+ /* Receive buffer allocated by us but manages by NetVSP */
+ void *recv_buf;
+ void *recv_original_buf;
+ u32 recv_buf_size; /* allocated bytes */
+ struct vmbus_gpadl recv_buf_gpadl_handle;
+ u32 recv_section_cnt;
+ u32 recv_section_size;
+ u32 recv_completion_cnt;
+
+ /* Send buffer allocated by us */
+ void *send_buf;
+ void *send_original_buf;
+ u32 send_buf_size;
+ struct vmbus_gpadl send_buf_gpadl_handle;
+ u32 send_section_cnt;
+ u32 send_section_size;
+ unsigned long *send_section_map;
+
+ /* Used for NetVSP initialization protocol */
+ struct completion channel_init_wait;
+ struct nvsp_message channel_init_pkt;
+
+ struct nvsp_message revoke_packet;
+
+ u32 max_chn;
+ u32 num_chn;
+
+ atomic_t open_chn;
+ struct work_struct subchan_work;
+ wait_queue_head_t subchan_open;
+
+ struct rndis_device *extension;
+
+ u32 max_pkt; /* max number of pkt in one send, e.g. 8 */
+ u32 pkt_align; /* alignment bytes, e.g. 8 */
+
+ struct netvsc_channel chan_table[VRSS_CHANNEL_MAX];
+
+ struct rcu_head rcu;
+};
+
+/* NdisInitialize message */
+struct rndis_initialize_request {
+ u32 req_id;
+ u32 major_ver;
+ u32 minor_ver;
+ u32 max_xfer_size;
+};
+
+/* Response to NdisInitialize */
+struct rndis_initialize_complete {
+ u32 req_id;
+ u32 status;
+ u32 major_ver;
+ u32 minor_ver;
+ u32 dev_flags;
+ u32 medium;
+ u32 max_pkt_per_msg;
+ u32 max_xfer_size;
+ u32 pkt_alignment_factor;
+ u32 af_list_offset;
+ u32 af_list_size;
+};
+
+/* Call manager devices only: Information about an address family */
+/* supported by the device is appended to the response to NdisInitialize. */
+struct rndis_co_address_family {
+ u32 address_family;
+ u32 major_ver;
+ u32 minor_ver;
+};
+
+/* NdisHalt message */
+struct rndis_halt_request {
+ u32 req_id;
+};
+
+/* NdisQueryRequest message */
+struct rndis_query_request {
+ u32 req_id;
+ u32 oid;
+ u32 info_buflen;
+ u32 info_buf_offset;
+ u32 dev_vc_handle;
+};
+
+/* Response to NdisQueryRequest */
+struct rndis_query_complete {
+ u32 req_id;
+ u32 status;
+ u32 info_buflen;
+ u32 info_buf_offset;
+};
+
+/* NdisSetRequest message */
+struct rndis_set_request {
+ u32 req_id;
+ u32 oid;
+ u32 info_buflen;
+ u32 info_buf_offset;
+ u32 dev_vc_handle;
+ u8 info_buf[];
+};
+
+/* Response to NdisSetRequest */
+struct rndis_set_complete {
+ u32 req_id;
+ u32 status;
+};
+
+/* NdisReset message */
+struct rndis_reset_request {
+ u32 reserved;
+};
+
+/* Response to NdisReset */
+struct rndis_reset_complete {
+ u32 status;
+ u32 addressing_reset;
+};
+
+/* NdisMIndicateStatus message */
+struct rndis_indicate_status {
+ u32 status;
+ u32 status_buflen;
+ u32 status_buf_offset;
+};
+
+/* Diagnostic information passed as the status buffer in */
+/* struct rndis_indicate_status messages signifying error conditions. */
+struct rndis_diagnostic_info {
+ u32 diag_status;
+ u32 error_offset;
+};
+
+/* NdisKeepAlive message */
+struct rndis_keepalive_request {
+ u32 req_id;
+};
+
+/* Response to NdisKeepAlive */
+struct rndis_keepalive_complete {
+ u32 req_id;
+ u32 status;
+};
+
+/*
+ * Data message. All Offset fields contain byte offsets from the beginning of
+ * struct rndis_packet. All Length fields are in bytes. VcHandle is set
+ * to 0 for connectionless data, otherwise it contains the VC handle.
+ */
+struct rndis_packet {
+ u32 data_offset;
+ u32 data_len;
+ u32 oob_data_offset;
+ u32 oob_data_len;
+ u32 num_oob_data_elements;
+ u32 per_pkt_info_offset;
+ u32 per_pkt_info_len;
+ u32 vc_handle;
+ u32 reserved;
+};
+
+/* Optional Out of Band data associated with a Data message. */
+struct rndis_oobd {
+ u32 size;
+ u32 type;
+ u32 class_info_offset;
+};
+
+/* Packet extension field contents associated with a Data message. */
+struct rndis_per_packet_info {
+ u32 size;
+ u32 type:31;
+ u32 internal:1;
+ u32 ppi_offset;
+};
+
+enum ndis_per_pkt_info_type {
+ TCPIP_CHKSUM_PKTINFO,
+ IPSEC_PKTINFO,
+ TCP_LARGESEND_PKTINFO,
+ CLASSIFICATION_HANDLE_PKTINFO,
+ NDIS_RESERVED,
+ SG_LIST_PKTINFO,
+ IEEE_8021Q_INFO,
+ ORIGINAL_PKTINFO,
+ PACKET_CANCEL_ID,
+ NBL_HASH_VALUE = PACKET_CANCEL_ID,
+ ORIGINAL_NET_BUFLIST,
+ CACHED_NET_BUFLIST,
+ SHORT_PKT_PADINFO,
+ MAX_PER_PKT_INFO
+};
+
+enum rndis_per_pkt_info_interal_type {
+ RNDIS_PKTINFO_ID = 1,
+ /* Add more members here */
+
+ RNDIS_PKTINFO_MAX
+};
+
+#define RNDIS_PKTINFO_SUBALLOC BIT(0)
+#define RNDIS_PKTINFO_1ST_FRAG BIT(1)
+#define RNDIS_PKTINFO_LAST_FRAG BIT(2)
+
+#define RNDIS_PKTINFO_ID_V1 1
+
+struct rndis_pktinfo_id {
+ u8 ver;
+ u8 flag;
+ u16 pkt_id;
+};
+
+struct ndis_object_header {
+ u8 type;
+ u8 revision;
+ u16 size;
+};
+
+#define NDIS_OBJECT_TYPE_DEFAULT 0x80
+#define NDIS_OFFLOAD_PARAMETERS_REVISION_3 3
+#define NDIS_OFFLOAD_PARAMETERS_REVISION_2 2
+#define NDIS_OFFLOAD_PARAMETERS_REVISION_1 1
+
+#define NDIS_OFFLOAD_PARAMETERS_NO_CHANGE 0
+#define NDIS_OFFLOAD_PARAMETERS_LSOV2_DISABLED 1
+#define NDIS_OFFLOAD_PARAMETERS_LSOV2_ENABLED 2
+#define NDIS_OFFLOAD_PARAMETERS_LSOV1_ENABLED 2
+#define NDIS_OFFLOAD_PARAMETERS_RSC_DISABLED 1
+#define NDIS_OFFLOAD_PARAMETERS_RSC_ENABLED 2
+#define NDIS_OFFLOAD_PARAMETERS_TX_RX_DISABLED 1
+#define NDIS_OFFLOAD_PARAMETERS_TX_ENABLED_RX_DISABLED 2
+#define NDIS_OFFLOAD_PARAMETERS_RX_ENABLED_TX_DISABLED 3
+#define NDIS_OFFLOAD_PARAMETERS_TX_RX_ENABLED 4
+
+#define NDIS_TCP_LARGE_SEND_OFFLOAD_V2_TYPE 1
+#define NDIS_TCP_LARGE_SEND_OFFLOAD_IPV4 0
+#define NDIS_TCP_LARGE_SEND_OFFLOAD_IPV6 1
+
+#define VERSION_4_OFFLOAD_SIZE 22
+/*
+ * New offload OIDs for NDIS 6
+ */
+#define OID_TCP_OFFLOAD_CURRENT_CONFIG 0xFC01020B /* query only */
+#define OID_TCP_OFFLOAD_PARAMETERS 0xFC01020C /* set only */
+#define OID_TCP_OFFLOAD_HARDWARE_CAPABILITIES 0xFC01020D/* query only */
+#define OID_TCP_CONNECTION_OFFLOAD_CURRENT_CONFIG 0xFC01020E /* query only */
+#define OID_TCP_CONNECTION_OFFLOAD_HARDWARE_CAPABILITIES 0xFC01020F /* query */
+#define OID_OFFLOAD_ENCAPSULATION 0x0101010A /* set/query */
+
+/*
+ * OID_TCP_OFFLOAD_HARDWARE_CAPABILITIES
+ * ndis_type: NDIS_OBJTYPE_OFFLOAD
+ */
+
+#define NDIS_OFFLOAD_ENCAP_NONE 0x0000
+#define NDIS_OFFLOAD_ENCAP_NULL 0x0001
+#define NDIS_OFFLOAD_ENCAP_8023 0x0002
+#define NDIS_OFFLOAD_ENCAP_8023PQ 0x0004
+#define NDIS_OFFLOAD_ENCAP_8023PQ_OOB 0x0008
+#define NDIS_OFFLOAD_ENCAP_RFC1483 0x0010
+
+struct ndis_csum_offload {
+ u32 ip4_txenc;
+ u32 ip4_txcsum;
+#define NDIS_TXCSUM_CAP_IP4OPT 0x001
+#define NDIS_TXCSUM_CAP_TCP4OPT 0x004
+#define NDIS_TXCSUM_CAP_TCP4 0x010
+#define NDIS_TXCSUM_CAP_UDP4 0x040
+#define NDIS_TXCSUM_CAP_IP4 0x100
+
+#define NDIS_TXCSUM_ALL_TCP4 (NDIS_TXCSUM_CAP_TCP4 | NDIS_TXCSUM_CAP_TCP4OPT)
+
+ u32 ip4_rxenc;
+ u32 ip4_rxcsum;
+#define NDIS_RXCSUM_CAP_IP4OPT 0x001
+#define NDIS_RXCSUM_CAP_TCP4OPT 0x004
+#define NDIS_RXCSUM_CAP_TCP4 0x010
+#define NDIS_RXCSUM_CAP_UDP4 0x040
+#define NDIS_RXCSUM_CAP_IP4 0x100
+ u32 ip6_txenc;
+ u32 ip6_txcsum;
+#define NDIS_TXCSUM_CAP_IP6EXT 0x001
+#define NDIS_TXCSUM_CAP_TCP6OPT 0x004
+#define NDIS_TXCSUM_CAP_TCP6 0x010
+#define NDIS_TXCSUM_CAP_UDP6 0x040
+ u32 ip6_rxenc;
+ u32 ip6_rxcsum;
+#define NDIS_RXCSUM_CAP_IP6EXT 0x001
+#define NDIS_RXCSUM_CAP_TCP6OPT 0x004
+#define NDIS_RXCSUM_CAP_TCP6 0x010
+#define NDIS_RXCSUM_CAP_UDP6 0x040
+
+#define NDIS_TXCSUM_ALL_TCP6 (NDIS_TXCSUM_CAP_TCP6 | \
+ NDIS_TXCSUM_CAP_TCP6OPT | \
+ NDIS_TXCSUM_CAP_IP6EXT)
+};
+
+struct ndis_lsov1_offload {
+ u32 encap;
+ u32 maxsize;
+ u32 minsegs;
+ u32 opts;
+};
+
+struct ndis_ipsecv1_offload {
+ u32 encap;
+ u32 ah_esp;
+ u32 xport_tun;
+ u32 ip4_opts;
+ u32 flags;
+ u32 ip4_ah;
+ u32 ip4_esp;
+};
+
+struct ndis_lsov2_offload {
+ u32 ip4_encap;
+ u32 ip4_maxsz;
+ u32 ip4_minsg;
+ u32 ip6_encap;
+ u32 ip6_maxsz;
+ u32 ip6_minsg;
+ u32 ip6_opts;
+#define NDIS_LSOV2_CAP_IP6EXT 0x001
+#define NDIS_LSOV2_CAP_TCP6OPT 0x004
+
+#define NDIS_LSOV2_CAP_IP6 (NDIS_LSOV2_CAP_IP6EXT | \
+ NDIS_LSOV2_CAP_TCP6OPT)
+};
+
+struct ndis_ipsecv2_offload {
+ u32 encap;
+ u8 ip6;
+ u8 ip4opt;
+ u8 ip6ext;
+ u8 ah;
+ u8 esp;
+ u8 ah_esp;
+ u8 xport;
+ u8 tun;
+ u8 xport_tun;
+ u8 lso;
+ u8 extseq;
+ u32 udp_esp;
+ u32 auth;
+ u32 crypto;
+ u32 sa_caps;
+};
+
+struct ndis_rsc_offload {
+ u8 ip4;
+ u8 ip6;
+};
+
+struct ndis_encap_offload {
+ u32 flags;
+ u32 maxhdr;
+};
+
+struct ndis_offload {
+ struct ndis_object_header header;
+ struct ndis_csum_offload csum;
+ struct ndis_lsov1_offload lsov1;
+ struct ndis_ipsecv1_offload ipsecv1;
+ struct ndis_lsov2_offload lsov2;
+ u32 flags;
+ /* NDIS >= 6.1 */
+ struct ndis_ipsecv2_offload ipsecv2;
+ /* NDIS >= 6.30 */
+ struct ndis_rsc_offload rsc;
+ struct ndis_encap_offload encap_gre;
+};
+
+#define NDIS_OFFLOAD_SIZE sizeof(struct ndis_offload)
+#define NDIS_OFFLOAD_SIZE_6_0 offsetof(struct ndis_offload, ipsecv2)
+#define NDIS_OFFLOAD_SIZE_6_1 offsetof(struct ndis_offload, rsc)
+
+struct ndis_offload_params {
+ struct ndis_object_header header;
+ u8 ip_v4_csum;
+ u8 tcp_ip_v4_csum;
+ u8 udp_ip_v4_csum;
+ u8 tcp_ip_v6_csum;
+ u8 udp_ip_v6_csum;
+ u8 lso_v1;
+ u8 ip_sec_v1;
+ u8 lso_v2_ipv4;
+ u8 lso_v2_ipv6;
+ u8 tcp_connection_ip_v4;
+ u8 tcp_connection_ip_v6;
+ u32 flags;
+ u8 ip_sec_v2;
+ u8 ip_sec_v2_ip_v4;
+ struct {
+ u8 rsc_ip_v4;
+ u8 rsc_ip_v6;
+ };
+ struct {
+ u8 encapsulated_packet_task_offload;
+ u8 encapsulation_types;
+ };
+};
+
+struct ndis_tcp_lso_info {
+ union {
+ struct {
+ u32 unused:30;
+ u32 type:1;
+ u32 reserved2:1;
+ } transmit;
+ struct {
+ u32 mss:20;
+ u32 tcp_header_offset:10;
+ u32 type:1;
+ u32 reserved2:1;
+ } lso_v1_transmit;
+ struct {
+ u32 tcp_payload:30;
+ u32 type:1;
+ u32 reserved2:1;
+ } lso_v1_transmit_complete;
+ struct {
+ u32 mss:20;
+ u32 tcp_header_offset:10;
+ u32 type:1;
+ u32 ip_version:1;
+ } lso_v2_transmit;
+ struct {
+ u32 reserved:30;
+ u32 type:1;
+ u32 reserved2:1;
+ } lso_v2_transmit_complete;
+ u32 value;
+ };
+};
+
+#define NDIS_VLAN_PPI_SIZE (sizeof(struct rndis_per_packet_info) + \
+ sizeof(struct ndis_pkt_8021q_info))
+
+#define NDIS_CSUM_PPI_SIZE (sizeof(struct rndis_per_packet_info) + \
+ sizeof(struct ndis_tcp_ip_checksum_info))
+
+#define NDIS_LSO_PPI_SIZE (sizeof(struct rndis_per_packet_info) + \
+ sizeof(struct ndis_tcp_lso_info))
+
+#define NDIS_HASH_PPI_SIZE (sizeof(struct rndis_per_packet_info) + \
+ sizeof(u32))
+
+/* Total size of all PPI data */
+#define NDIS_ALL_PPI_SIZE (NDIS_VLAN_PPI_SIZE + NDIS_CSUM_PPI_SIZE + \
+ NDIS_LSO_PPI_SIZE + NDIS_HASH_PPI_SIZE)
+
+/* Format of Information buffer passed in a SetRequest for the OID */
+/* OID_GEN_RNDIS_CONFIG_PARAMETER. */
+struct rndis_config_parameter_info {
+ u32 parameter_name_offset;
+ u32 parameter_name_length;
+ u32 parameter_type;
+ u32 parameter_value_offset;
+ u32 parameter_value_length;
+};
+
+/* Values for ParameterType in struct rndis_config_parameter_info */
+#define RNDIS_CONFIG_PARAM_TYPE_INTEGER 0
+#define RNDIS_CONFIG_PARAM_TYPE_STRING 2
+
+/* CONDIS Miniport messages for connection oriented devices */
+/* that do not implement a call manager. */
+
+/* CoNdisMiniportCreateVc message */
+struct rcondis_mp_create_vc {
+ u32 req_id;
+ u32 ndis_vc_handle;
+};
+
+/* Response to CoNdisMiniportCreateVc */
+struct rcondis_mp_create_vc_complete {
+ u32 req_id;
+ u32 dev_vc_handle;
+ u32 status;
+};
+
+/* CoNdisMiniportDeleteVc message */
+struct rcondis_mp_delete_vc {
+ u32 req_id;
+ u32 dev_vc_handle;
+};
+
+/* Response to CoNdisMiniportDeleteVc */
+struct rcondis_mp_delete_vc_complete {
+ u32 req_id;
+ u32 status;
+};
+
+/* CoNdisMiniportQueryRequest message */
+struct rcondis_mp_query_request {
+ u32 req_id;
+ u32 request_type;
+ u32 oid;
+ u32 dev_vc_handle;
+ u32 info_buflen;
+ u32 info_buf_offset;
+};
+
+/* CoNdisMiniportSetRequest message */
+struct rcondis_mp_set_request {
+ u32 req_id;
+ u32 request_type;
+ u32 oid;
+ u32 dev_vc_handle;
+ u32 info_buflen;
+ u32 info_buf_offset;
+};
+
+/* CoNdisIndicateStatus message */
+struct rcondis_indicate_status {
+ u32 ndis_vc_handle;
+ u32 status;
+ u32 status_buflen;
+ u32 status_buf_offset;
+};
+
+/* CONDIS Call/VC parameters */
+struct rcondis_specific_parameters {
+ u32 parameter_type;
+ u32 parameter_length;
+ u32 parameter_lffset;
+};
+
+struct rcondis_media_parameters {
+ u32 flags;
+ u32 reserved1;
+ u32 reserved2;
+ struct rcondis_specific_parameters media_specific;
+};
+
+struct rndis_flowspec {
+ u32 token_rate;
+ u32 token_bucket_size;
+ u32 peak_bandwidth;
+ u32 latency;
+ u32 delay_variation;
+ u32 service_type;
+ u32 max_sdu_size;
+ u32 minimum_policed_size;
+};
+
+struct rcondis_call_manager_parameters {
+ struct rndis_flowspec transmit;
+ struct rndis_flowspec receive;
+ struct rcondis_specific_parameters call_mgr_specific;
+};
+
+/* CoNdisMiniportActivateVc message */
+struct rcondis_mp_activate_vc_request {
+ u32 req_id;
+ u32 flags;
+ u32 dev_vc_handle;
+ u32 media_params_offset;
+ u32 media_params_length;
+ u32 call_mgr_params_offset;
+ u32 call_mgr_params_length;
+};
+
+/* Response to CoNdisMiniportActivateVc */
+struct rcondis_mp_activate_vc_complete {
+ u32 req_id;
+ u32 status;
+};
+
+/* CoNdisMiniportDeactivateVc message */
+struct rcondis_mp_deactivate_vc_request {
+ u32 req_id;
+ u32 flags;
+ u32 dev_vc_handle;
+};
+
+/* Response to CoNdisMiniportDeactivateVc */
+struct rcondis_mp_deactivate_vc_complete {
+ u32 req_id;
+ u32 status;
+};
+
+
+/* union with all of the RNDIS messages */
+union rndis_message_container {
+ struct rndis_packet pkt;
+ struct rndis_initialize_request init_req;
+ struct rndis_halt_request halt_req;
+ struct rndis_query_request query_req;
+ struct rndis_set_request set_req;
+ struct rndis_reset_request reset_req;
+ struct rndis_keepalive_request keep_alive_req;
+ struct rndis_indicate_status indicate_status;
+ struct rndis_initialize_complete init_complete;
+ struct rndis_query_complete query_complete;
+ struct rndis_set_complete set_complete;
+ struct rndis_reset_complete reset_complete;
+ struct rndis_keepalive_complete keep_alive_complete;
+ struct rcondis_mp_create_vc co_miniport_create_vc;
+ struct rcondis_mp_delete_vc co_miniport_delete_vc;
+ struct rcondis_indicate_status co_indicate_status;
+ struct rcondis_mp_activate_vc_request co_miniport_activate_vc;
+ struct rcondis_mp_deactivate_vc_request co_miniport_deactivate_vc;
+ struct rcondis_mp_create_vc_complete co_miniport_create_vc_complete;
+ struct rcondis_mp_delete_vc_complete co_miniport_delete_vc_complete;
+ struct rcondis_mp_activate_vc_complete co_miniport_activate_vc_complete;
+ struct rcondis_mp_deactivate_vc_complete
+ co_miniport_deactivate_vc_complete;
+};
+
+/* Remote NDIS message format */
+struct rndis_message {
+ u32 ndis_msg_type;
+
+ /* Total length of this message, from the beginning */
+ /* of the struct rndis_message, in bytes. */
+ u32 msg_len;
+
+ /* Actual message */
+ union rndis_message_container msg;
+};
+
+
+/* Handy macros */
+
+/* get the size of an RNDIS message. Pass in the message type, */
+/* struct rndis_set_request, struct rndis_packet for example */
+#define RNDIS_MESSAGE_SIZE(msg) \
+ (sizeof(msg) + (sizeof(struct rndis_message) - \
+ sizeof(union rndis_message_container)))
+
+#define RNDIS_HEADER_SIZE (sizeof(struct rndis_message) - \
+ sizeof(union rndis_message_container))
+
+#define RNDIS_AND_PPI_SIZE (sizeof(struct rndis_message) + NDIS_ALL_PPI_SIZE)
+
+#define NDIS_PACKET_TYPE_DIRECTED 0x00000001
+#define NDIS_PACKET_TYPE_MULTICAST 0x00000002
+#define NDIS_PACKET_TYPE_ALL_MULTICAST 0x00000004
+#define NDIS_PACKET_TYPE_BROADCAST 0x00000008
+#define NDIS_PACKET_TYPE_SOURCE_ROUTING 0x00000010
+#define NDIS_PACKET_TYPE_PROMISCUOUS 0x00000020
+#define NDIS_PACKET_TYPE_SMT 0x00000040
+#define NDIS_PACKET_TYPE_ALL_LOCAL 0x00000080
+#define NDIS_PACKET_TYPE_GROUP 0x00000100
+#define NDIS_PACKET_TYPE_ALL_FUNCTIONAL 0x00000200
+#define NDIS_PACKET_TYPE_FUNCTIONAL 0x00000400
+#define NDIS_PACKET_TYPE_MAC_FRAME 0x00000800
+
+#define TRANSPORT_INFO_NOT_IP 0
+#define TRANSPORT_INFO_IPV4_TCP 0x01
+#define TRANSPORT_INFO_IPV4_UDP 0x02
+#define TRANSPORT_INFO_IPV6_TCP 0x10
+#define TRANSPORT_INFO_IPV6_UDP 0x20
+
+#define RETRY_US_LO 5000
+#define RETRY_US_HI 10000
+#define RETRY_MAX 2000 /* >10 sec */
+
+void netvsc_dma_unmap(struct hv_device *hv_dev,
+ struct hv_netvsc_packet *packet);
+#endif /* _HYPERV_NET_H */
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
new file mode 100644
index 000000000..da737d959
--- /dev/null
+++ b/drivers/net/hyperv/netvsc.c
@@ -0,0 +1,1856 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2009, Microsoft Corporation.
+ *
+ * Authors:
+ * Haiyang Zhang <haiyangz@microsoft.com>
+ * Hank Janssen <hjanssen@microsoft.com>
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/mm.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/netdevice.h>
+#include <linux/if_ether.h>
+#include <linux/vmalloc.h>
+#include <linux/rtnetlink.h>
+#include <linux/prefetch.h>
+#include <linux/filter.h>
+
+#include <asm/sync_bitops.h>
+#include <asm/mshyperv.h>
+
+#include "hyperv_net.h"
+#include "netvsc_trace.h"
+
+/*
+ * Switch the data path from the synthetic interface to the VF
+ * interface.
+ */
+int netvsc_switch_datapath(struct net_device *ndev, bool vf)
+{
+ struct net_device_context *net_device_ctx = netdev_priv(ndev);
+ struct hv_device *dev = net_device_ctx->device_ctx;
+ struct netvsc_device *nv_dev = rtnl_dereference(net_device_ctx->nvdev);
+ struct nvsp_message *init_pkt = &nv_dev->channel_init_pkt;
+ int ret, retry = 0;
+
+ /* Block sending traffic to VF if it's about to be gone */
+ if (!vf)
+ net_device_ctx->data_path_is_vf = vf;
+
+ memset(init_pkt, 0, sizeof(struct nvsp_message));
+ init_pkt->hdr.msg_type = NVSP_MSG4_TYPE_SWITCH_DATA_PATH;
+ if (vf)
+ init_pkt->msg.v4_msg.active_dp.active_datapath =
+ NVSP_DATAPATH_VF;
+ else
+ init_pkt->msg.v4_msg.active_dp.active_datapath =
+ NVSP_DATAPATH_SYNTHETIC;
+
+again:
+ trace_nvsp_send(ndev, init_pkt);
+
+ ret = vmbus_sendpacket(dev->channel, init_pkt,
+ sizeof(struct nvsp_message),
+ (unsigned long)init_pkt, VM_PKT_DATA_INBAND,
+ VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
+
+ /* If failed to switch to/from VF, let data_path_is_vf stay false,
+ * so we use synthetic path to send data.
+ */
+ if (ret) {
+ if (ret != -EAGAIN) {
+ netdev_err(ndev,
+ "Unable to send sw datapath msg, err: %d\n",
+ ret);
+ return ret;
+ }
+
+ if (retry++ < RETRY_MAX) {
+ usleep_range(RETRY_US_LO, RETRY_US_HI);
+ goto again;
+ } else {
+ netdev_err(
+ ndev,
+ "Retry failed to send sw datapath msg, err: %d\n",
+ ret);
+ return ret;
+ }
+ }
+
+ wait_for_completion(&nv_dev->channel_init_wait);
+ net_device_ctx->data_path_is_vf = vf;
+
+ return 0;
+}
+
+/* Worker to setup sub channels on initial setup
+ * Initial hotplug event occurs in softirq context
+ * and can't wait for channels.
+ */
+static void netvsc_subchan_work(struct work_struct *w)
+{
+ struct netvsc_device *nvdev =
+ container_of(w, struct netvsc_device, subchan_work);
+ struct rndis_device *rdev;
+ int i, ret;
+
+ /* Avoid deadlock with device removal already under RTNL */
+ if (!rtnl_trylock()) {
+ schedule_work(w);
+ return;
+ }
+
+ rdev = nvdev->extension;
+ if (rdev) {
+ ret = rndis_set_subchannel(rdev->ndev, nvdev, NULL);
+ if (ret == 0) {
+ netif_device_attach(rdev->ndev);
+ } else {
+ /* fallback to only primary channel */
+ for (i = 1; i < nvdev->num_chn; i++)
+ netif_napi_del(&nvdev->chan_table[i].napi);
+
+ nvdev->max_chn = 1;
+ nvdev->num_chn = 1;
+ }
+ }
+
+ rtnl_unlock();
+}
+
+static struct netvsc_device *alloc_net_device(void)
+{
+ struct netvsc_device *net_device;
+
+ net_device = kzalloc(sizeof(struct netvsc_device), GFP_KERNEL);
+ if (!net_device)
+ return NULL;
+
+ init_waitqueue_head(&net_device->wait_drain);
+ net_device->destroy = false;
+ net_device->tx_disable = true;
+
+ net_device->max_pkt = RNDIS_MAX_PKT_DEFAULT;
+ net_device->pkt_align = RNDIS_PKT_ALIGN_DEFAULT;
+
+ init_completion(&net_device->channel_init_wait);
+ init_waitqueue_head(&net_device->subchan_open);
+ INIT_WORK(&net_device->subchan_work, netvsc_subchan_work);
+
+ return net_device;
+}
+
+static void free_netvsc_device(struct rcu_head *head)
+{
+ struct netvsc_device *nvdev
+ = container_of(head, struct netvsc_device, rcu);
+ int i;
+
+ kfree(nvdev->extension);
+
+ if (nvdev->recv_original_buf)
+ vfree(nvdev->recv_original_buf);
+ else
+ vfree(nvdev->recv_buf);
+
+ if (nvdev->send_original_buf)
+ vfree(nvdev->send_original_buf);
+ else
+ vfree(nvdev->send_buf);
+
+ bitmap_free(nvdev->send_section_map);
+
+ for (i = 0; i < VRSS_CHANNEL_MAX; i++) {
+ xdp_rxq_info_unreg(&nvdev->chan_table[i].xdp_rxq);
+ kfree(nvdev->chan_table[i].recv_buf);
+ vfree(nvdev->chan_table[i].mrc.slots);
+ }
+
+ kfree(nvdev);
+}
+
+static void free_netvsc_device_rcu(struct netvsc_device *nvdev)
+{
+ call_rcu(&nvdev->rcu, free_netvsc_device);
+}
+
+static void netvsc_revoke_recv_buf(struct hv_device *device,
+ struct netvsc_device *net_device,
+ struct net_device *ndev)
+{
+ struct nvsp_message *revoke_packet;
+ int ret;
+
+ /*
+ * If we got a section count, it means we received a
+ * SendReceiveBufferComplete msg (ie sent
+ * NvspMessage1TypeSendReceiveBuffer msg) therefore, we need
+ * to send a revoke msg here
+ */
+ if (net_device->recv_section_cnt) {
+ /* Send the revoke receive buffer */
+ revoke_packet = &net_device->revoke_packet;
+ memset(revoke_packet, 0, sizeof(struct nvsp_message));
+
+ revoke_packet->hdr.msg_type =
+ NVSP_MSG1_TYPE_REVOKE_RECV_BUF;
+ revoke_packet->msg.v1_msg.
+ revoke_recv_buf.id = NETVSC_RECEIVE_BUFFER_ID;
+
+ trace_nvsp_send(ndev, revoke_packet);
+
+ ret = vmbus_sendpacket(device->channel,
+ revoke_packet,
+ sizeof(struct nvsp_message),
+ VMBUS_RQST_ID_NO_RESPONSE,
+ VM_PKT_DATA_INBAND, 0);
+ /* If the failure is because the channel is rescinded;
+ * ignore the failure since we cannot send on a rescinded
+ * channel. This would allow us to properly cleanup
+ * even when the channel is rescinded.
+ */
+ if (device->channel->rescind)
+ ret = 0;
+ /*
+ * If we failed here, we might as well return and
+ * have a leak rather than continue and a bugchk
+ */
+ if (ret != 0) {
+ netdev_err(ndev, "unable to send "
+ "revoke receive buffer to netvsp\n");
+ return;
+ }
+ net_device->recv_section_cnt = 0;
+ }
+}
+
+static void netvsc_revoke_send_buf(struct hv_device *device,
+ struct netvsc_device *net_device,
+ struct net_device *ndev)
+{
+ struct nvsp_message *revoke_packet;
+ int ret;
+
+ /* Deal with the send buffer we may have setup.
+ * If we got a send section size, it means we received a
+ * NVSP_MSG1_TYPE_SEND_SEND_BUF_COMPLETE msg (ie sent
+ * NVSP_MSG1_TYPE_SEND_SEND_BUF msg) therefore, we need
+ * to send a revoke msg here
+ */
+ if (net_device->send_section_cnt) {
+ /* Send the revoke receive buffer */
+ revoke_packet = &net_device->revoke_packet;
+ memset(revoke_packet, 0, sizeof(struct nvsp_message));
+
+ revoke_packet->hdr.msg_type =
+ NVSP_MSG1_TYPE_REVOKE_SEND_BUF;
+ revoke_packet->msg.v1_msg.revoke_send_buf.id =
+ NETVSC_SEND_BUFFER_ID;
+
+ trace_nvsp_send(ndev, revoke_packet);
+
+ ret = vmbus_sendpacket(device->channel,
+ revoke_packet,
+ sizeof(struct nvsp_message),
+ VMBUS_RQST_ID_NO_RESPONSE,
+ VM_PKT_DATA_INBAND, 0);
+
+ /* If the failure is because the channel is rescinded;
+ * ignore the failure since we cannot send on a rescinded
+ * channel. This would allow us to properly cleanup
+ * even when the channel is rescinded.
+ */
+ if (device->channel->rescind)
+ ret = 0;
+
+ /* If we failed here, we might as well return and
+ * have a leak rather than continue and a bugchk
+ */
+ if (ret != 0) {
+ netdev_err(ndev, "unable to send "
+ "revoke send buffer to netvsp\n");
+ return;
+ }
+ net_device->send_section_cnt = 0;
+ }
+}
+
+static void netvsc_teardown_recv_gpadl(struct hv_device *device,
+ struct netvsc_device *net_device,
+ struct net_device *ndev)
+{
+ int ret;
+
+ if (net_device->recv_buf_gpadl_handle.gpadl_handle) {
+ ret = vmbus_teardown_gpadl(device->channel,
+ &net_device->recv_buf_gpadl_handle);
+
+ /* If we failed here, we might as well return and have a leak
+ * rather than continue and a bugchk
+ */
+ if (ret != 0) {
+ netdev_err(ndev,
+ "unable to teardown receive buffer's gpadl\n");
+ return;
+ }
+ }
+}
+
+static void netvsc_teardown_send_gpadl(struct hv_device *device,
+ struct netvsc_device *net_device,
+ struct net_device *ndev)
+{
+ int ret;
+
+ if (net_device->send_buf_gpadl_handle.gpadl_handle) {
+ ret = vmbus_teardown_gpadl(device->channel,
+ &net_device->send_buf_gpadl_handle);
+
+ /* If we failed here, we might as well return and have a leak
+ * rather than continue and a bugchk
+ */
+ if (ret != 0) {
+ netdev_err(ndev,
+ "unable to teardown send buffer's gpadl\n");
+ return;
+ }
+ }
+}
+
+int netvsc_alloc_recv_comp_ring(struct netvsc_device *net_device, u32 q_idx)
+{
+ struct netvsc_channel *nvchan = &net_device->chan_table[q_idx];
+ int node = cpu_to_node(nvchan->channel->target_cpu);
+ size_t size;
+
+ size = net_device->recv_completion_cnt * sizeof(struct recv_comp_data);
+ nvchan->mrc.slots = vzalloc_node(size, node);
+ if (!nvchan->mrc.slots)
+ nvchan->mrc.slots = vzalloc(size);
+
+ return nvchan->mrc.slots ? 0 : -ENOMEM;
+}
+
+static int netvsc_init_buf(struct hv_device *device,
+ struct netvsc_device *net_device,
+ const struct netvsc_device_info *device_info)
+{
+ struct nvsp_1_message_send_receive_buffer_complete *resp;
+ struct net_device *ndev = hv_get_drvdata(device);
+ struct nvsp_message *init_packet;
+ unsigned int buf_size;
+ int i, ret = 0;
+ void *vaddr;
+
+ /* Get receive buffer area. */
+ buf_size = device_info->recv_sections * device_info->recv_section_size;
+ buf_size = roundup(buf_size, PAGE_SIZE);
+
+ /* Legacy hosts only allow smaller receive buffer */
+ if (net_device->nvsp_version <= NVSP_PROTOCOL_VERSION_2)
+ buf_size = min_t(unsigned int, buf_size,
+ NETVSC_RECEIVE_BUFFER_SIZE_LEGACY);
+
+ net_device->recv_buf = vzalloc(buf_size);
+ if (!net_device->recv_buf) {
+ netdev_err(ndev,
+ "unable to allocate receive buffer of size %u\n",
+ buf_size);
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+
+ net_device->recv_buf_size = buf_size;
+
+ /*
+ * Establish the gpadl handle for this buffer on this
+ * channel. Note: This call uses the vmbus connection rather
+ * than the channel to establish the gpadl handle.
+ */
+ ret = vmbus_establish_gpadl(device->channel, net_device->recv_buf,
+ buf_size,
+ &net_device->recv_buf_gpadl_handle);
+ if (ret != 0) {
+ netdev_err(ndev,
+ "unable to establish receive buffer's gpadl\n");
+ goto cleanup;
+ }
+
+ if (hv_isolation_type_snp()) {
+ vaddr = hv_map_memory(net_device->recv_buf, buf_size);
+ if (!vaddr) {
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+
+ net_device->recv_original_buf = net_device->recv_buf;
+ net_device->recv_buf = vaddr;
+ }
+
+ /* Notify the NetVsp of the gpadl handle */
+ init_packet = &net_device->channel_init_pkt;
+ memset(init_packet, 0, sizeof(struct nvsp_message));
+ init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_RECV_BUF;
+ init_packet->msg.v1_msg.send_recv_buf.
+ gpadl_handle = net_device->recv_buf_gpadl_handle.gpadl_handle;
+ init_packet->msg.v1_msg.
+ send_recv_buf.id = NETVSC_RECEIVE_BUFFER_ID;
+
+ trace_nvsp_send(ndev, init_packet);
+
+ /* Send the gpadl notification request */
+ ret = vmbus_sendpacket(device->channel, init_packet,
+ sizeof(struct nvsp_message),
+ (unsigned long)init_packet,
+ VM_PKT_DATA_INBAND,
+ VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
+ if (ret != 0) {
+ netdev_err(ndev,
+ "unable to send receive buffer's gpadl to netvsp\n");
+ goto cleanup;
+ }
+
+ wait_for_completion(&net_device->channel_init_wait);
+
+ /* Check the response */
+ resp = &init_packet->msg.v1_msg.send_recv_buf_complete;
+ if (resp->status != NVSP_STAT_SUCCESS) {
+ netdev_err(ndev,
+ "Unable to complete receive buffer initialization with NetVsp - status %d\n",
+ resp->status);
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ /* Parse the response */
+ netdev_dbg(ndev, "Receive sections: %u sub_allocs: size %u count: %u\n",
+ resp->num_sections, resp->sections[0].sub_alloc_size,
+ resp->sections[0].num_sub_allocs);
+
+ /* There should only be one section for the entire receive buffer */
+ if (resp->num_sections != 1 || resp->sections[0].offset != 0) {
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ net_device->recv_section_size = resp->sections[0].sub_alloc_size;
+ net_device->recv_section_cnt = resp->sections[0].num_sub_allocs;
+
+ /* Ensure buffer will not overflow */
+ if (net_device->recv_section_size < NETVSC_MTU_MIN || (u64)net_device->recv_section_size *
+ (u64)net_device->recv_section_cnt > (u64)buf_size) {
+ netdev_err(ndev, "invalid recv_section_size %u\n",
+ net_device->recv_section_size);
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ for (i = 0; i < VRSS_CHANNEL_MAX; i++) {
+ struct netvsc_channel *nvchan = &net_device->chan_table[i];
+
+ nvchan->recv_buf = kzalloc(net_device->recv_section_size, GFP_KERNEL);
+ if (nvchan->recv_buf == NULL) {
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+ }
+
+ /* Setup receive completion ring.
+ * Add 1 to the recv_section_cnt because at least one entry in a
+ * ring buffer has to be empty.
+ */
+ net_device->recv_completion_cnt = net_device->recv_section_cnt + 1;
+ ret = netvsc_alloc_recv_comp_ring(net_device, 0);
+ if (ret)
+ goto cleanup;
+
+ /* Now setup the send buffer. */
+ buf_size = device_info->send_sections * device_info->send_section_size;
+ buf_size = round_up(buf_size, PAGE_SIZE);
+
+ net_device->send_buf = vzalloc(buf_size);
+ if (!net_device->send_buf) {
+ netdev_err(ndev, "unable to allocate send buffer of size %u\n",
+ buf_size);
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+ net_device->send_buf_size = buf_size;
+
+ /* Establish the gpadl handle for this buffer on this
+ * channel. Note: This call uses the vmbus connection rather
+ * than the channel to establish the gpadl handle.
+ */
+ ret = vmbus_establish_gpadl(device->channel, net_device->send_buf,
+ buf_size,
+ &net_device->send_buf_gpadl_handle);
+ if (ret != 0) {
+ netdev_err(ndev,
+ "unable to establish send buffer's gpadl\n");
+ goto cleanup;
+ }
+
+ if (hv_isolation_type_snp()) {
+ vaddr = hv_map_memory(net_device->send_buf, buf_size);
+ if (!vaddr) {
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+
+ net_device->send_original_buf = net_device->send_buf;
+ net_device->send_buf = vaddr;
+ }
+
+ /* Notify the NetVsp of the gpadl handle */
+ init_packet = &net_device->channel_init_pkt;
+ memset(init_packet, 0, sizeof(struct nvsp_message));
+ init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_SEND_BUF;
+ init_packet->msg.v1_msg.send_send_buf.gpadl_handle =
+ net_device->send_buf_gpadl_handle.gpadl_handle;
+ init_packet->msg.v1_msg.send_send_buf.id = NETVSC_SEND_BUFFER_ID;
+
+ trace_nvsp_send(ndev, init_packet);
+
+ /* Send the gpadl notification request */
+ ret = vmbus_sendpacket(device->channel, init_packet,
+ sizeof(struct nvsp_message),
+ (unsigned long)init_packet,
+ VM_PKT_DATA_INBAND,
+ VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
+ if (ret != 0) {
+ netdev_err(ndev,
+ "unable to send send buffer's gpadl to netvsp\n");
+ goto cleanup;
+ }
+
+ wait_for_completion(&net_device->channel_init_wait);
+
+ /* Check the response */
+ if (init_packet->msg.v1_msg.
+ send_send_buf_complete.status != NVSP_STAT_SUCCESS) {
+ netdev_err(ndev, "Unable to complete send buffer "
+ "initialization with NetVsp - status %d\n",
+ init_packet->msg.v1_msg.
+ send_send_buf_complete.status);
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ /* Parse the response */
+ net_device->send_section_size = init_packet->msg.
+ v1_msg.send_send_buf_complete.section_size;
+ if (net_device->send_section_size < NETVSC_MTU_MIN) {
+ netdev_err(ndev, "invalid send_section_size %u\n",
+ net_device->send_section_size);
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ /* Section count is simply the size divided by the section size. */
+ net_device->send_section_cnt = buf_size / net_device->send_section_size;
+
+ netdev_dbg(ndev, "Send section size: %d, Section count:%d\n",
+ net_device->send_section_size, net_device->send_section_cnt);
+
+ /* Setup state for managing the send buffer. */
+ net_device->send_section_map = bitmap_zalloc(net_device->send_section_cnt,
+ GFP_KERNEL);
+ if (!net_device->send_section_map) {
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+
+ goto exit;
+
+cleanup:
+ netvsc_revoke_recv_buf(device, net_device, ndev);
+ netvsc_revoke_send_buf(device, net_device, ndev);
+ netvsc_teardown_recv_gpadl(device, net_device, ndev);
+ netvsc_teardown_send_gpadl(device, net_device, ndev);
+
+exit:
+ return ret;
+}
+
+/* Negotiate NVSP protocol version */
+static int negotiate_nvsp_ver(struct hv_device *device,
+ struct netvsc_device *net_device,
+ struct nvsp_message *init_packet,
+ u32 nvsp_ver)
+{
+ struct net_device *ndev = hv_get_drvdata(device);
+ int ret;
+
+ memset(init_packet, 0, sizeof(struct nvsp_message));
+ init_packet->hdr.msg_type = NVSP_MSG_TYPE_INIT;
+ init_packet->msg.init_msg.init.min_protocol_ver = nvsp_ver;
+ init_packet->msg.init_msg.init.max_protocol_ver = nvsp_ver;
+ trace_nvsp_send(ndev, init_packet);
+
+ /* Send the init request */
+ ret = vmbus_sendpacket(device->channel, init_packet,
+ sizeof(struct nvsp_message),
+ (unsigned long)init_packet,
+ VM_PKT_DATA_INBAND,
+ VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
+
+ if (ret != 0)
+ return ret;
+
+ wait_for_completion(&net_device->channel_init_wait);
+
+ if (init_packet->msg.init_msg.init_complete.status !=
+ NVSP_STAT_SUCCESS)
+ return -EINVAL;
+
+ if (nvsp_ver == NVSP_PROTOCOL_VERSION_1)
+ return 0;
+
+ /* NVSPv2 or later: Send NDIS config */
+ memset(init_packet, 0, sizeof(struct nvsp_message));
+ init_packet->hdr.msg_type = NVSP_MSG2_TYPE_SEND_NDIS_CONFIG;
+ init_packet->msg.v2_msg.send_ndis_config.mtu = ndev->mtu + ETH_HLEN;
+ init_packet->msg.v2_msg.send_ndis_config.capability.ieee8021q = 1;
+
+ if (nvsp_ver >= NVSP_PROTOCOL_VERSION_5) {
+ if (hv_is_isolation_supported())
+ netdev_info(ndev, "SR-IOV not advertised by guests on the host supporting isolation\n");
+ else
+ init_packet->msg.v2_msg.send_ndis_config.capability.sriov = 1;
+
+ /* Teaming bit is needed to receive link speed updates */
+ init_packet->msg.v2_msg.send_ndis_config.capability.teaming = 1;
+ }
+
+ if (nvsp_ver >= NVSP_PROTOCOL_VERSION_61)
+ init_packet->msg.v2_msg.send_ndis_config.capability.rsc = 1;
+
+ trace_nvsp_send(ndev, init_packet);
+
+ ret = vmbus_sendpacket(device->channel, init_packet,
+ sizeof(struct nvsp_message),
+ VMBUS_RQST_ID_NO_RESPONSE,
+ VM_PKT_DATA_INBAND, 0);
+
+ return ret;
+}
+
+static int netvsc_connect_vsp(struct hv_device *device,
+ struct netvsc_device *net_device,
+ const struct netvsc_device_info *device_info)
+{
+ struct net_device *ndev = hv_get_drvdata(device);
+ static const u32 ver_list[] = {
+ NVSP_PROTOCOL_VERSION_1, NVSP_PROTOCOL_VERSION_2,
+ NVSP_PROTOCOL_VERSION_4, NVSP_PROTOCOL_VERSION_5,
+ NVSP_PROTOCOL_VERSION_6, NVSP_PROTOCOL_VERSION_61
+ };
+ struct nvsp_message *init_packet;
+ int ndis_version, i, ret;
+
+ init_packet = &net_device->channel_init_pkt;
+
+ /* Negotiate the latest NVSP protocol supported */
+ for (i = ARRAY_SIZE(ver_list) - 1; i >= 0; i--)
+ if (negotiate_nvsp_ver(device, net_device, init_packet,
+ ver_list[i]) == 0) {
+ net_device->nvsp_version = ver_list[i];
+ break;
+ }
+
+ if (i < 0) {
+ ret = -EPROTO;
+ goto cleanup;
+ }
+
+ if (hv_is_isolation_supported() && net_device->nvsp_version < NVSP_PROTOCOL_VERSION_61) {
+ netdev_err(ndev, "Invalid NVSP version 0x%x (expected >= 0x%x) from the host supporting isolation\n",
+ net_device->nvsp_version, NVSP_PROTOCOL_VERSION_61);
+ ret = -EPROTO;
+ goto cleanup;
+ }
+
+ pr_debug("Negotiated NVSP version:%x\n", net_device->nvsp_version);
+
+ /* Send the ndis version */
+ memset(init_packet, 0, sizeof(struct nvsp_message));
+
+ if (net_device->nvsp_version <= NVSP_PROTOCOL_VERSION_4)
+ ndis_version = 0x00060001;
+ else
+ ndis_version = 0x0006001e;
+
+ init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_NDIS_VER;
+ init_packet->msg.v1_msg.
+ send_ndis_ver.ndis_major_ver =
+ (ndis_version & 0xFFFF0000) >> 16;
+ init_packet->msg.v1_msg.
+ send_ndis_ver.ndis_minor_ver =
+ ndis_version & 0xFFFF;
+
+ trace_nvsp_send(ndev, init_packet);
+
+ /* Send the init request */
+ ret = vmbus_sendpacket(device->channel, init_packet,
+ sizeof(struct nvsp_message),
+ VMBUS_RQST_ID_NO_RESPONSE,
+ VM_PKT_DATA_INBAND, 0);
+ if (ret != 0)
+ goto cleanup;
+
+
+ ret = netvsc_init_buf(device, net_device, device_info);
+
+cleanup:
+ return ret;
+}
+
+/*
+ * netvsc_device_remove - Callback when the root bus device is removed
+ */
+void netvsc_device_remove(struct hv_device *device)
+{
+ struct net_device *ndev = hv_get_drvdata(device);
+ struct net_device_context *net_device_ctx = netdev_priv(ndev);
+ struct netvsc_device *net_device
+ = rtnl_dereference(net_device_ctx->nvdev);
+ int i;
+
+ /*
+ * Revoke receive buffer. If host is pre-Win2016 then tear down
+ * receive buffer GPADL. Do the same for send buffer.
+ */
+ netvsc_revoke_recv_buf(device, net_device, ndev);
+ if (vmbus_proto_version < VERSION_WIN10)
+ netvsc_teardown_recv_gpadl(device, net_device, ndev);
+
+ netvsc_revoke_send_buf(device, net_device, ndev);
+ if (vmbus_proto_version < VERSION_WIN10)
+ netvsc_teardown_send_gpadl(device, net_device, ndev);
+
+ RCU_INIT_POINTER(net_device_ctx->nvdev, NULL);
+
+ /* Disable NAPI and disassociate its context from the device. */
+ for (i = 0; i < net_device->num_chn; i++) {
+ /* See also vmbus_reset_channel_cb(). */
+ napi_disable(&net_device->chan_table[i].napi);
+ netif_napi_del(&net_device->chan_table[i].napi);
+ }
+
+ /*
+ * At this point, no one should be accessing net_device
+ * except in here
+ */
+ netdev_dbg(ndev, "net device safe to remove\n");
+
+ /* Now, we can close the channel safely */
+ vmbus_close(device->channel);
+
+ /*
+ * If host is Win2016 or higher then we do the GPADL tear down
+ * here after VMBus is closed.
+ */
+ if (vmbus_proto_version >= VERSION_WIN10) {
+ netvsc_teardown_recv_gpadl(device, net_device, ndev);
+ netvsc_teardown_send_gpadl(device, net_device, ndev);
+ }
+
+ if (net_device->recv_original_buf)
+ hv_unmap_memory(net_device->recv_buf);
+
+ if (net_device->send_original_buf)
+ hv_unmap_memory(net_device->send_buf);
+
+ /* Release all resources */
+ free_netvsc_device_rcu(net_device);
+}
+
+#define RING_AVAIL_PERCENT_HIWATER 20
+#define RING_AVAIL_PERCENT_LOWATER 10
+
+static inline void netvsc_free_send_slot(struct netvsc_device *net_device,
+ u32 index)
+{
+ sync_change_bit(index, net_device->send_section_map);
+}
+
+static void netvsc_send_tx_complete(struct net_device *ndev,
+ struct netvsc_device *net_device,
+ struct vmbus_channel *channel,
+ const struct vmpacket_descriptor *desc,
+ int budget)
+{
+ struct net_device_context *ndev_ctx = netdev_priv(ndev);
+ struct sk_buff *skb;
+ u16 q_idx = 0;
+ int queue_sends;
+ u64 cmd_rqst;
+
+ cmd_rqst = channel->request_addr_callback(channel, desc->trans_id);
+ if (cmd_rqst == VMBUS_RQST_ERROR) {
+ netdev_err(ndev, "Invalid transaction ID %llx\n", desc->trans_id);
+ return;
+ }
+
+ skb = (struct sk_buff *)(unsigned long)cmd_rqst;
+
+ /* Notify the layer above us */
+ if (likely(skb)) {
+ struct hv_netvsc_packet *packet
+ = (struct hv_netvsc_packet *)skb->cb;
+ u32 send_index = packet->send_buf_index;
+ struct netvsc_stats_tx *tx_stats;
+
+ if (send_index != NETVSC_INVALID_INDEX)
+ netvsc_free_send_slot(net_device, send_index);
+ q_idx = packet->q_idx;
+
+ tx_stats = &net_device->chan_table[q_idx].tx_stats;
+
+ u64_stats_update_begin(&tx_stats->syncp);
+ tx_stats->packets += packet->total_packets;
+ tx_stats->bytes += packet->total_bytes;
+ u64_stats_update_end(&tx_stats->syncp);
+
+ netvsc_dma_unmap(ndev_ctx->device_ctx, packet);
+ napi_consume_skb(skb, budget);
+ }
+
+ queue_sends =
+ atomic_dec_return(&net_device->chan_table[q_idx].queue_sends);
+
+ if (unlikely(net_device->destroy)) {
+ if (queue_sends == 0)
+ wake_up(&net_device->wait_drain);
+ } else {
+ struct netdev_queue *txq = netdev_get_tx_queue(ndev, q_idx);
+
+ if (netif_tx_queue_stopped(txq) && !net_device->tx_disable &&
+ (hv_get_avail_to_write_percent(&channel->outbound) >
+ RING_AVAIL_PERCENT_HIWATER || queue_sends < 1)) {
+ netif_tx_wake_queue(txq);
+ ndev_ctx->eth_stats.wake_queue++;
+ }
+ }
+}
+
+static void netvsc_send_completion(struct net_device *ndev,
+ struct netvsc_device *net_device,
+ struct vmbus_channel *incoming_channel,
+ const struct vmpacket_descriptor *desc,
+ int budget)
+{
+ const struct nvsp_message *nvsp_packet;
+ u32 msglen = hv_pkt_datalen(desc);
+ struct nvsp_message *pkt_rqst;
+ u64 cmd_rqst;
+ u32 status;
+
+ /* First check if this is a VMBUS completion without data payload */
+ if (!msglen) {
+ cmd_rqst = incoming_channel->request_addr_callback(incoming_channel,
+ desc->trans_id);
+ if (cmd_rqst == VMBUS_RQST_ERROR) {
+ netdev_err(ndev, "Invalid transaction ID %llx\n", desc->trans_id);
+ return;
+ }
+
+ pkt_rqst = (struct nvsp_message *)(uintptr_t)cmd_rqst;
+ switch (pkt_rqst->hdr.msg_type) {
+ case NVSP_MSG4_TYPE_SWITCH_DATA_PATH:
+ complete(&net_device->channel_init_wait);
+ break;
+
+ default:
+ netdev_err(ndev, "Unexpected VMBUS completion!!\n");
+ }
+ return;
+ }
+
+ /* Ensure packet is big enough to read header fields */
+ if (msglen < sizeof(struct nvsp_message_header)) {
+ netdev_err(ndev, "nvsp_message length too small: %u\n", msglen);
+ return;
+ }
+
+ nvsp_packet = hv_pkt_data(desc);
+ switch (nvsp_packet->hdr.msg_type) {
+ case NVSP_MSG_TYPE_INIT_COMPLETE:
+ if (msglen < sizeof(struct nvsp_message_header) +
+ sizeof(struct nvsp_message_init_complete)) {
+ netdev_err(ndev, "nvsp_msg length too small: %u\n",
+ msglen);
+ return;
+ }
+ fallthrough;
+
+ case NVSP_MSG1_TYPE_SEND_RECV_BUF_COMPLETE:
+ if (msglen < sizeof(struct nvsp_message_header) +
+ sizeof(struct nvsp_1_message_send_receive_buffer_complete)) {
+ netdev_err(ndev, "nvsp_msg1 length too small: %u\n",
+ msglen);
+ return;
+ }
+ fallthrough;
+
+ case NVSP_MSG1_TYPE_SEND_SEND_BUF_COMPLETE:
+ if (msglen < sizeof(struct nvsp_message_header) +
+ sizeof(struct nvsp_1_message_send_send_buffer_complete)) {
+ netdev_err(ndev, "nvsp_msg1 length too small: %u\n",
+ msglen);
+ return;
+ }
+ fallthrough;
+
+ case NVSP_MSG5_TYPE_SUBCHANNEL:
+ if (msglen < sizeof(struct nvsp_message_header) +
+ sizeof(struct nvsp_5_subchannel_complete)) {
+ netdev_err(ndev, "nvsp_msg5 length too small: %u\n",
+ msglen);
+ return;
+ }
+ /* Copy the response back */
+ memcpy(&net_device->channel_init_pkt, nvsp_packet,
+ sizeof(struct nvsp_message));
+ complete(&net_device->channel_init_wait);
+ break;
+
+ case NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE:
+ if (msglen < sizeof(struct nvsp_message_header) +
+ sizeof(struct nvsp_1_message_send_rndis_packet_complete)) {
+ if (net_ratelimit())
+ netdev_err(ndev, "nvsp_rndis_pkt_complete length too small: %u\n",
+ msglen);
+ return;
+ }
+
+ /* If status indicates an error, output a message so we know
+ * there's a problem. But process the completion anyway so the
+ * resources are released.
+ */
+ status = nvsp_packet->msg.v1_msg.send_rndis_pkt_complete.status;
+ if (status != NVSP_STAT_SUCCESS && net_ratelimit())
+ netdev_err(ndev, "nvsp_rndis_pkt_complete error status: %x\n",
+ status);
+
+ netvsc_send_tx_complete(ndev, net_device, incoming_channel,
+ desc, budget);
+ break;
+
+ default:
+ netdev_err(ndev,
+ "Unknown send completion type %d received!!\n",
+ nvsp_packet->hdr.msg_type);
+ }
+}
+
+static u32 netvsc_get_next_send_section(struct netvsc_device *net_device)
+{
+ unsigned long *map_addr = net_device->send_section_map;
+ unsigned int i;
+
+ for_each_clear_bit(i, map_addr, net_device->send_section_cnt) {
+ if (sync_test_and_set_bit(i, map_addr) == 0)
+ return i;
+ }
+
+ return NETVSC_INVALID_INDEX;
+}
+
+static void netvsc_copy_to_send_buf(struct netvsc_device *net_device,
+ unsigned int section_index,
+ u32 pend_size,
+ struct hv_netvsc_packet *packet,
+ struct rndis_message *rndis_msg,
+ struct hv_page_buffer *pb,
+ bool xmit_more)
+{
+ char *start = net_device->send_buf;
+ char *dest = start + (section_index * net_device->send_section_size)
+ + pend_size;
+ int i;
+ u32 padding = 0;
+ u32 page_count = packet->cp_partial ? packet->rmsg_pgcnt :
+ packet->page_buf_cnt;
+ u32 remain;
+
+ /* Add padding */
+ remain = packet->total_data_buflen & (net_device->pkt_align - 1);
+ if (xmit_more && remain) {
+ padding = net_device->pkt_align - remain;
+ rndis_msg->msg_len += padding;
+ packet->total_data_buflen += padding;
+ }
+
+ for (i = 0; i < page_count; i++) {
+ char *src = phys_to_virt(pb[i].pfn << HV_HYP_PAGE_SHIFT);
+ u32 offset = pb[i].offset;
+ u32 len = pb[i].len;
+
+ memcpy(dest, (src + offset), len);
+ dest += len;
+ }
+
+ if (padding)
+ memset(dest, 0, padding);
+}
+
+void netvsc_dma_unmap(struct hv_device *hv_dev,
+ struct hv_netvsc_packet *packet)
+{
+ int i;
+
+ if (!hv_is_isolation_supported())
+ return;
+
+ if (!packet->dma_range)
+ return;
+
+ for (i = 0; i < packet->page_buf_cnt; i++)
+ dma_unmap_single(&hv_dev->device, packet->dma_range[i].dma,
+ packet->dma_range[i].mapping_size,
+ DMA_TO_DEVICE);
+
+ kfree(packet->dma_range);
+}
+
+/* netvsc_dma_map - Map swiotlb bounce buffer with data page of
+ * packet sent by vmbus_sendpacket_pagebuffer() in the Isolation
+ * VM.
+ *
+ * In isolation VM, netvsc send buffer has been marked visible to
+ * host and so the data copied to send buffer doesn't need to use
+ * bounce buffer. The data pages handled by vmbus_sendpacket_pagebuffer()
+ * may not be copied to send buffer and so these pages need to be
+ * mapped with swiotlb bounce buffer. netvsc_dma_map() is to do
+ * that. The pfns in the struct hv_page_buffer need to be converted
+ * to bounce buffer's pfn. The loop here is necessary because the
+ * entries in the page buffer array are not necessarily full
+ * pages of data. Each entry in the array has a separate offset and
+ * len that may be non-zero, even for entries in the middle of the
+ * array. And the entries are not physically contiguous. So each
+ * entry must be individually mapped rather than as a contiguous unit.
+ * So not use dma_map_sg() here.
+ */
+static int netvsc_dma_map(struct hv_device *hv_dev,
+ struct hv_netvsc_packet *packet,
+ struct hv_page_buffer *pb)
+{
+ u32 page_count = packet->page_buf_cnt;
+ dma_addr_t dma;
+ int i;
+
+ if (!hv_is_isolation_supported())
+ return 0;
+
+ packet->dma_range = kcalloc(page_count,
+ sizeof(*packet->dma_range),
+ GFP_ATOMIC);
+ if (!packet->dma_range)
+ return -ENOMEM;
+
+ for (i = 0; i < page_count; i++) {
+ char *src = phys_to_virt((pb[i].pfn << HV_HYP_PAGE_SHIFT)
+ + pb[i].offset);
+ u32 len = pb[i].len;
+
+ dma = dma_map_single(&hv_dev->device, src, len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(&hv_dev->device, dma)) {
+ kfree(packet->dma_range);
+ return -ENOMEM;
+ }
+
+ /* pb[].offset and pb[].len are not changed during dma mapping
+ * and so not reassign.
+ */
+ packet->dma_range[i].dma = dma;
+ packet->dma_range[i].mapping_size = len;
+ pb[i].pfn = dma >> HV_HYP_PAGE_SHIFT;
+ }
+
+ return 0;
+}
+
+static inline int netvsc_send_pkt(
+ struct hv_device *device,
+ struct hv_netvsc_packet *packet,
+ struct netvsc_device *net_device,
+ struct hv_page_buffer *pb,
+ struct sk_buff *skb)
+{
+ struct nvsp_message nvmsg;
+ struct nvsp_1_message_send_rndis_packet *rpkt =
+ &nvmsg.msg.v1_msg.send_rndis_pkt;
+ struct netvsc_channel * const nvchan =
+ &net_device->chan_table[packet->q_idx];
+ struct vmbus_channel *out_channel = nvchan->channel;
+ struct net_device *ndev = hv_get_drvdata(device);
+ struct net_device_context *ndev_ctx = netdev_priv(ndev);
+ struct netdev_queue *txq = netdev_get_tx_queue(ndev, packet->q_idx);
+ u64 req_id;
+ int ret;
+ u32 ring_avail = hv_get_avail_to_write_percent(&out_channel->outbound);
+
+ memset(&nvmsg, 0, sizeof(struct nvsp_message));
+ nvmsg.hdr.msg_type = NVSP_MSG1_TYPE_SEND_RNDIS_PKT;
+ if (skb)
+ rpkt->channel_type = 0; /* 0 is RMC_DATA */
+ else
+ rpkt->channel_type = 1; /* 1 is RMC_CONTROL */
+
+ rpkt->send_buf_section_index = packet->send_buf_index;
+ if (packet->send_buf_index == NETVSC_INVALID_INDEX)
+ rpkt->send_buf_section_size = 0;
+ else
+ rpkt->send_buf_section_size = packet->total_data_buflen;
+
+ req_id = (ulong)skb;
+
+ if (out_channel->rescind)
+ return -ENODEV;
+
+ trace_nvsp_send_pkt(ndev, out_channel, rpkt);
+
+ packet->dma_range = NULL;
+ if (packet->page_buf_cnt) {
+ if (packet->cp_partial)
+ pb += packet->rmsg_pgcnt;
+
+ ret = netvsc_dma_map(ndev_ctx->device_ctx, packet, pb);
+ if (ret) {
+ ret = -EAGAIN;
+ goto exit;
+ }
+
+ ret = vmbus_sendpacket_pagebuffer(out_channel,
+ pb, packet->page_buf_cnt,
+ &nvmsg, sizeof(nvmsg),
+ req_id);
+
+ if (ret)
+ netvsc_dma_unmap(ndev_ctx->device_ctx, packet);
+ } else {
+ ret = vmbus_sendpacket(out_channel,
+ &nvmsg, sizeof(nvmsg),
+ req_id, VM_PKT_DATA_INBAND,
+ VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
+ }
+
+exit:
+ if (ret == 0) {
+ atomic_inc_return(&nvchan->queue_sends);
+
+ if (ring_avail < RING_AVAIL_PERCENT_LOWATER) {
+ netif_tx_stop_queue(txq);
+ ndev_ctx->eth_stats.stop_queue++;
+ }
+ } else if (ret == -EAGAIN) {
+ netif_tx_stop_queue(txq);
+ ndev_ctx->eth_stats.stop_queue++;
+ } else {
+ netdev_err(ndev,
+ "Unable to send packet pages %u len %u, ret %d\n",
+ packet->page_buf_cnt, packet->total_data_buflen,
+ ret);
+ }
+
+ if (netif_tx_queue_stopped(txq) &&
+ atomic_read(&nvchan->queue_sends) < 1 &&
+ !net_device->tx_disable) {
+ netif_tx_wake_queue(txq);
+ ndev_ctx->eth_stats.wake_queue++;
+ if (ret == -EAGAIN)
+ ret = -ENOSPC;
+ }
+
+ return ret;
+}
+
+/* Move packet out of multi send data (msd), and clear msd */
+static inline void move_pkt_msd(struct hv_netvsc_packet **msd_send,
+ struct sk_buff **msd_skb,
+ struct multi_send_data *msdp)
+{
+ *msd_skb = msdp->skb;
+ *msd_send = msdp->pkt;
+ msdp->skb = NULL;
+ msdp->pkt = NULL;
+ msdp->count = 0;
+}
+
+/* RCU already held by caller */
+/* Batching/bouncing logic is designed to attempt to optimize
+ * performance.
+ *
+ * For small, non-LSO packets we copy the packet to a send buffer
+ * which is pre-registered with the Hyper-V side. This enables the
+ * hypervisor to avoid remapping the aperture to access the packet
+ * descriptor and data.
+ *
+ * If we already started using a buffer and the netdev is transmitting
+ * a burst of packets, keep on copying into the buffer until it is
+ * full or we are done collecting a burst. If there is an existing
+ * buffer with space for the RNDIS descriptor but not the packet, copy
+ * the RNDIS descriptor to the buffer, keeping the packet in place.
+ *
+ * If we do batching and send more than one packet using a single
+ * NetVSC message, free the SKBs of the packets copied, except for the
+ * last packet. This is done to streamline the handling of the case
+ * where the last packet only had the RNDIS descriptor copied to the
+ * send buffer, with the data pointers included in the NetVSC message.
+ */
+int netvsc_send(struct net_device *ndev,
+ struct hv_netvsc_packet *packet,
+ struct rndis_message *rndis_msg,
+ struct hv_page_buffer *pb,
+ struct sk_buff *skb,
+ bool xdp_tx)
+{
+ struct net_device_context *ndev_ctx = netdev_priv(ndev);
+ struct netvsc_device *net_device
+ = rcu_dereference_bh(ndev_ctx->nvdev);
+ struct hv_device *device = ndev_ctx->device_ctx;
+ int ret = 0;
+ struct netvsc_channel *nvchan;
+ u32 pktlen = packet->total_data_buflen, msd_len = 0;
+ unsigned int section_index = NETVSC_INVALID_INDEX;
+ struct multi_send_data *msdp;
+ struct hv_netvsc_packet *msd_send = NULL, *cur_send = NULL;
+ struct sk_buff *msd_skb = NULL;
+ bool try_batch, xmit_more;
+
+ /* If device is rescinded, return error and packet will get dropped. */
+ if (unlikely(!net_device || net_device->destroy))
+ return -ENODEV;
+
+ nvchan = &net_device->chan_table[packet->q_idx];
+ packet->send_buf_index = NETVSC_INVALID_INDEX;
+ packet->cp_partial = false;
+
+ /* Send a control message or XDP packet directly without accessing
+ * msd (Multi-Send Data) field which may be changed during data packet
+ * processing.
+ */
+ if (!skb || xdp_tx)
+ return netvsc_send_pkt(device, packet, net_device, pb, skb);
+
+ /* batch packets in send buffer if possible */
+ msdp = &nvchan->msd;
+ if (msdp->pkt)
+ msd_len = msdp->pkt->total_data_buflen;
+
+ try_batch = msd_len > 0 && msdp->count < net_device->max_pkt;
+ if (try_batch && msd_len + pktlen + net_device->pkt_align <
+ net_device->send_section_size) {
+ section_index = msdp->pkt->send_buf_index;
+
+ } else if (try_batch && msd_len + packet->rmsg_size <
+ net_device->send_section_size) {
+ section_index = msdp->pkt->send_buf_index;
+ packet->cp_partial = true;
+
+ } else if (pktlen + net_device->pkt_align <
+ net_device->send_section_size) {
+ section_index = netvsc_get_next_send_section(net_device);
+ if (unlikely(section_index == NETVSC_INVALID_INDEX)) {
+ ++ndev_ctx->eth_stats.tx_send_full;
+ } else {
+ move_pkt_msd(&msd_send, &msd_skb, msdp);
+ msd_len = 0;
+ }
+ }
+
+ /* Keep aggregating only if stack says more data is coming
+ * and not doing mixed modes send and not flow blocked
+ */
+ xmit_more = netdev_xmit_more() &&
+ !packet->cp_partial &&
+ !netif_xmit_stopped(netdev_get_tx_queue(ndev, packet->q_idx));
+
+ if (section_index != NETVSC_INVALID_INDEX) {
+ netvsc_copy_to_send_buf(net_device,
+ section_index, msd_len,
+ packet, rndis_msg, pb, xmit_more);
+
+ packet->send_buf_index = section_index;
+
+ if (packet->cp_partial) {
+ packet->page_buf_cnt -= packet->rmsg_pgcnt;
+ packet->total_data_buflen = msd_len + packet->rmsg_size;
+ } else {
+ packet->page_buf_cnt = 0;
+ packet->total_data_buflen += msd_len;
+ }
+
+ if (msdp->pkt) {
+ packet->total_packets += msdp->pkt->total_packets;
+ packet->total_bytes += msdp->pkt->total_bytes;
+ }
+
+ if (msdp->skb)
+ dev_consume_skb_any(msdp->skb);
+
+ if (xmit_more) {
+ msdp->skb = skb;
+ msdp->pkt = packet;
+ msdp->count++;
+ } else {
+ cur_send = packet;
+ msdp->skb = NULL;
+ msdp->pkt = NULL;
+ msdp->count = 0;
+ }
+ } else {
+ move_pkt_msd(&msd_send, &msd_skb, msdp);
+ cur_send = packet;
+ }
+
+ if (msd_send) {
+ int m_ret = netvsc_send_pkt(device, msd_send, net_device,
+ NULL, msd_skb);
+
+ if (m_ret != 0) {
+ netvsc_free_send_slot(net_device,
+ msd_send->send_buf_index);
+ dev_kfree_skb_any(msd_skb);
+ }
+ }
+
+ if (cur_send)
+ ret = netvsc_send_pkt(device, cur_send, net_device, pb, skb);
+
+ if (ret != 0 && section_index != NETVSC_INVALID_INDEX)
+ netvsc_free_send_slot(net_device, section_index);
+
+ return ret;
+}
+
+/* Send pending recv completions */
+static int send_recv_completions(struct net_device *ndev,
+ struct netvsc_device *nvdev,
+ struct netvsc_channel *nvchan)
+{
+ struct multi_recv_comp *mrc = &nvchan->mrc;
+ struct recv_comp_msg {
+ struct nvsp_message_header hdr;
+ u32 status;
+ } __packed;
+ struct recv_comp_msg msg = {
+ .hdr.msg_type = NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE,
+ };
+ int ret;
+
+ while (mrc->first != mrc->next) {
+ const struct recv_comp_data *rcd
+ = mrc->slots + mrc->first;
+
+ msg.status = rcd->status;
+ ret = vmbus_sendpacket(nvchan->channel, &msg, sizeof(msg),
+ rcd->tid, VM_PKT_COMP, 0);
+ if (unlikely(ret)) {
+ struct net_device_context *ndev_ctx = netdev_priv(ndev);
+
+ ++ndev_ctx->eth_stats.rx_comp_busy;
+ return ret;
+ }
+
+ if (++mrc->first == nvdev->recv_completion_cnt)
+ mrc->first = 0;
+ }
+
+ /* receive completion ring has been emptied */
+ if (unlikely(nvdev->destroy))
+ wake_up(&nvdev->wait_drain);
+
+ return 0;
+}
+
+/* Count how many receive completions are outstanding */
+static void recv_comp_slot_avail(const struct netvsc_device *nvdev,
+ const struct multi_recv_comp *mrc,
+ u32 *filled, u32 *avail)
+{
+ u32 count = nvdev->recv_completion_cnt;
+
+ if (mrc->next >= mrc->first)
+ *filled = mrc->next - mrc->first;
+ else
+ *filled = (count - mrc->first) + mrc->next;
+
+ *avail = count - *filled - 1;
+}
+
+/* Add receive complete to ring to send to host. */
+static void enq_receive_complete(struct net_device *ndev,
+ struct netvsc_device *nvdev, u16 q_idx,
+ u64 tid, u32 status)
+{
+ struct netvsc_channel *nvchan = &nvdev->chan_table[q_idx];
+ struct multi_recv_comp *mrc = &nvchan->mrc;
+ struct recv_comp_data *rcd;
+ u32 filled, avail;
+
+ recv_comp_slot_avail(nvdev, mrc, &filled, &avail);
+
+ if (unlikely(filled > NAPI_POLL_WEIGHT)) {
+ send_recv_completions(ndev, nvdev, nvchan);
+ recv_comp_slot_avail(nvdev, mrc, &filled, &avail);
+ }
+
+ if (unlikely(!avail)) {
+ netdev_err(ndev, "Recv_comp full buf q:%hd, tid:%llx\n",
+ q_idx, tid);
+ return;
+ }
+
+ rcd = mrc->slots + mrc->next;
+ rcd->tid = tid;
+ rcd->status = status;
+
+ if (++mrc->next == nvdev->recv_completion_cnt)
+ mrc->next = 0;
+}
+
+static int netvsc_receive(struct net_device *ndev,
+ struct netvsc_device *net_device,
+ struct netvsc_channel *nvchan,
+ const struct vmpacket_descriptor *desc)
+{
+ struct net_device_context *net_device_ctx = netdev_priv(ndev);
+ struct vmbus_channel *channel = nvchan->channel;
+ const struct vmtransfer_page_packet_header *vmxferpage_packet
+ = container_of(desc, const struct vmtransfer_page_packet_header, d);
+ const struct nvsp_message *nvsp = hv_pkt_data(desc);
+ u32 msglen = hv_pkt_datalen(desc);
+ u16 q_idx = channel->offermsg.offer.sub_channel_index;
+ char *recv_buf = net_device->recv_buf;
+ u32 status = NVSP_STAT_SUCCESS;
+ int i;
+ int count = 0;
+
+ /* Ensure packet is big enough to read header fields */
+ if (msglen < sizeof(struct nvsp_message_header)) {
+ netif_err(net_device_ctx, rx_err, ndev,
+ "invalid nvsp header, length too small: %u\n",
+ msglen);
+ return 0;
+ }
+
+ /* Make sure this is a valid nvsp packet */
+ if (unlikely(nvsp->hdr.msg_type != NVSP_MSG1_TYPE_SEND_RNDIS_PKT)) {
+ netif_err(net_device_ctx, rx_err, ndev,
+ "Unknown nvsp packet type received %u\n",
+ nvsp->hdr.msg_type);
+ return 0;
+ }
+
+ /* Validate xfer page pkt header */
+ if ((desc->offset8 << 3) < sizeof(struct vmtransfer_page_packet_header)) {
+ netif_err(net_device_ctx, rx_err, ndev,
+ "Invalid xfer page pkt, offset too small: %u\n",
+ desc->offset8 << 3);
+ return 0;
+ }
+
+ if (unlikely(vmxferpage_packet->xfer_pageset_id != NETVSC_RECEIVE_BUFFER_ID)) {
+ netif_err(net_device_ctx, rx_err, ndev,
+ "Invalid xfer page set id - expecting %x got %x\n",
+ NETVSC_RECEIVE_BUFFER_ID,
+ vmxferpage_packet->xfer_pageset_id);
+ return 0;
+ }
+
+ count = vmxferpage_packet->range_cnt;
+
+ /* Check count for a valid value */
+ if (NETVSC_XFER_HEADER_SIZE(count) > desc->offset8 << 3) {
+ netif_err(net_device_ctx, rx_err, ndev,
+ "Range count is not valid: %d\n",
+ count);
+ return 0;
+ }
+
+ /* Each range represents 1 RNDIS pkt that contains 1 ethernet frame */
+ for (i = 0; i < count; i++) {
+ u32 offset = vmxferpage_packet->ranges[i].byte_offset;
+ u32 buflen = vmxferpage_packet->ranges[i].byte_count;
+ void *data;
+ int ret;
+
+ if (unlikely(offset > net_device->recv_buf_size ||
+ buflen > net_device->recv_buf_size - offset)) {
+ nvchan->rsc.cnt = 0;
+ status = NVSP_STAT_FAIL;
+ netif_err(net_device_ctx, rx_err, ndev,
+ "Packet offset:%u + len:%u too big\n",
+ offset, buflen);
+
+ continue;
+ }
+
+ /* We're going to copy (sections of) the packet into nvchan->recv_buf;
+ * make sure that nvchan->recv_buf is large enough to hold the packet.
+ */
+ if (unlikely(buflen > net_device->recv_section_size)) {
+ nvchan->rsc.cnt = 0;
+ status = NVSP_STAT_FAIL;
+ netif_err(net_device_ctx, rx_err, ndev,
+ "Packet too big: buflen=%u recv_section_size=%u\n",
+ buflen, net_device->recv_section_size);
+
+ continue;
+ }
+
+ data = recv_buf + offset;
+
+ nvchan->rsc.is_last = (i == count - 1);
+
+ trace_rndis_recv(ndev, q_idx, data);
+
+ /* Pass it to the upper layer */
+ ret = rndis_filter_receive(ndev, net_device,
+ nvchan, data, buflen);
+
+ if (unlikely(ret != NVSP_STAT_SUCCESS)) {
+ /* Drop incomplete packet */
+ nvchan->rsc.cnt = 0;
+ status = NVSP_STAT_FAIL;
+ }
+ }
+
+ enq_receive_complete(ndev, net_device, q_idx,
+ vmxferpage_packet->d.trans_id, status);
+
+ return count;
+}
+
+static void netvsc_send_table(struct net_device *ndev,
+ struct netvsc_device *nvscdev,
+ const struct nvsp_message *nvmsg,
+ u32 msglen)
+{
+ struct net_device_context *net_device_ctx = netdev_priv(ndev);
+ u32 count, offset, *tab;
+ int i;
+
+ /* Ensure packet is big enough to read send_table fields */
+ if (msglen < sizeof(struct nvsp_message_header) +
+ sizeof(struct nvsp_5_send_indirect_table)) {
+ netdev_err(ndev, "nvsp_v5_msg length too small: %u\n", msglen);
+ return;
+ }
+
+ count = nvmsg->msg.v5_msg.send_table.count;
+ offset = nvmsg->msg.v5_msg.send_table.offset;
+
+ if (count != VRSS_SEND_TAB_SIZE) {
+ netdev_err(ndev, "Received wrong send-table size:%u\n", count);
+ return;
+ }
+
+ /* If negotiated version <= NVSP_PROTOCOL_VERSION_6, the offset may be
+ * wrong due to a host bug. So fix the offset here.
+ */
+ if (nvscdev->nvsp_version <= NVSP_PROTOCOL_VERSION_6 &&
+ msglen >= sizeof(struct nvsp_message_header) +
+ sizeof(union nvsp_6_message_uber) + count * sizeof(u32))
+ offset = sizeof(struct nvsp_message_header) +
+ sizeof(union nvsp_6_message_uber);
+
+ /* Boundary check for all versions */
+ if (msglen < count * sizeof(u32) || offset > msglen - count * sizeof(u32)) {
+ netdev_err(ndev, "Received send-table offset too big:%u\n",
+ offset);
+ return;
+ }
+
+ tab = (void *)nvmsg + offset;
+
+ for (i = 0; i < count; i++)
+ net_device_ctx->tx_table[i] = tab[i];
+}
+
+static void netvsc_send_vf(struct net_device *ndev,
+ const struct nvsp_message *nvmsg,
+ u32 msglen)
+{
+ struct net_device_context *net_device_ctx = netdev_priv(ndev);
+
+ /* Ensure packet is big enough to read its fields */
+ if (msglen < sizeof(struct nvsp_message_header) +
+ sizeof(struct nvsp_4_send_vf_association)) {
+ netdev_err(ndev, "nvsp_v4_msg length too small: %u\n", msglen);
+ return;
+ }
+
+ net_device_ctx->vf_alloc = nvmsg->msg.v4_msg.vf_assoc.allocated;
+ net_device_ctx->vf_serial = nvmsg->msg.v4_msg.vf_assoc.serial;
+
+ if (net_device_ctx->vf_alloc)
+ complete(&net_device_ctx->vf_add);
+
+ netdev_info(ndev, "VF slot %u %s\n",
+ net_device_ctx->vf_serial,
+ net_device_ctx->vf_alloc ? "added" : "removed");
+}
+
+static void netvsc_receive_inband(struct net_device *ndev,
+ struct netvsc_device *nvscdev,
+ const struct vmpacket_descriptor *desc)
+{
+ const struct nvsp_message *nvmsg = hv_pkt_data(desc);
+ u32 msglen = hv_pkt_datalen(desc);
+
+ /* Ensure packet is big enough to read header fields */
+ if (msglen < sizeof(struct nvsp_message_header)) {
+ netdev_err(ndev, "inband nvsp_message length too small: %u\n", msglen);
+ return;
+ }
+
+ switch (nvmsg->hdr.msg_type) {
+ case NVSP_MSG5_TYPE_SEND_INDIRECTION_TABLE:
+ netvsc_send_table(ndev, nvscdev, nvmsg, msglen);
+ break;
+
+ case NVSP_MSG4_TYPE_SEND_VF_ASSOCIATION:
+ if (hv_is_isolation_supported())
+ netdev_err(ndev, "Ignore VF_ASSOCIATION msg from the host supporting isolation\n");
+ else
+ netvsc_send_vf(ndev, nvmsg, msglen);
+ break;
+ }
+}
+
+static int netvsc_process_raw_pkt(struct hv_device *device,
+ struct netvsc_channel *nvchan,
+ struct netvsc_device *net_device,
+ struct net_device *ndev,
+ const struct vmpacket_descriptor *desc,
+ int budget)
+{
+ struct vmbus_channel *channel = nvchan->channel;
+ const struct nvsp_message *nvmsg = hv_pkt_data(desc);
+
+ trace_nvsp_recv(ndev, channel, nvmsg);
+
+ switch (desc->type) {
+ case VM_PKT_COMP:
+ netvsc_send_completion(ndev, net_device, channel, desc, budget);
+ break;
+
+ case VM_PKT_DATA_USING_XFER_PAGES:
+ return netvsc_receive(ndev, net_device, nvchan, desc);
+
+ case VM_PKT_DATA_INBAND:
+ netvsc_receive_inband(ndev, net_device, desc);
+ break;
+
+ default:
+ netdev_err(ndev, "unhandled packet type %d, tid %llx\n",
+ desc->type, desc->trans_id);
+ break;
+ }
+
+ return 0;
+}
+
+static struct hv_device *netvsc_channel_to_device(struct vmbus_channel *channel)
+{
+ struct vmbus_channel *primary = channel->primary_channel;
+
+ return primary ? primary->device_obj : channel->device_obj;
+}
+
+/* Network processing softirq
+ * Process data in incoming ring buffer from host
+ * Stops when ring is empty or budget is met or exceeded.
+ */
+int netvsc_poll(struct napi_struct *napi, int budget)
+{
+ struct netvsc_channel *nvchan
+ = container_of(napi, struct netvsc_channel, napi);
+ struct netvsc_device *net_device = nvchan->net_device;
+ struct vmbus_channel *channel = nvchan->channel;
+ struct hv_device *device = netvsc_channel_to_device(channel);
+ struct net_device *ndev = hv_get_drvdata(device);
+ int work_done = 0;
+ int ret;
+
+ /* If starting a new interval */
+ if (!nvchan->desc)
+ nvchan->desc = hv_pkt_iter_first(channel);
+
+ nvchan->xdp_flush = false;
+
+ while (nvchan->desc && work_done < budget) {
+ work_done += netvsc_process_raw_pkt(device, nvchan, net_device,
+ ndev, nvchan->desc, budget);
+ nvchan->desc = hv_pkt_iter_next(channel, nvchan->desc);
+ }
+
+ if (nvchan->xdp_flush)
+ xdp_do_flush();
+
+ /* Send any pending receive completions */
+ ret = send_recv_completions(ndev, net_device, nvchan);
+
+ /* If it did not exhaust NAPI budget this time
+ * and not doing busy poll
+ * then re-enable host interrupts
+ * and reschedule if ring is not empty
+ * or sending receive completion failed.
+ */
+ if (work_done < budget &&
+ napi_complete_done(napi, work_done) &&
+ (ret || hv_end_read(&channel->inbound)) &&
+ napi_schedule_prep(napi)) {
+ hv_begin_read(&channel->inbound);
+ __napi_schedule(napi);
+ }
+
+ /* Driver may overshoot since multiple packets per descriptor */
+ return min(work_done, budget);
+}
+
+/* Call back when data is available in host ring buffer.
+ * Processing is deferred until network softirq (NAPI)
+ */
+void netvsc_channel_cb(void *context)
+{
+ struct netvsc_channel *nvchan = context;
+ struct vmbus_channel *channel = nvchan->channel;
+ struct hv_ring_buffer_info *rbi = &channel->inbound;
+
+ /* preload first vmpacket descriptor */
+ prefetch(hv_get_ring_buffer(rbi) + rbi->priv_read_index);
+
+ if (napi_schedule_prep(&nvchan->napi)) {
+ /* disable interrupts from host */
+ hv_begin_read(rbi);
+
+ __napi_schedule_irqoff(&nvchan->napi);
+ }
+}
+
+/*
+ * netvsc_device_add - Callback when the device belonging to this
+ * driver is added
+ */
+struct netvsc_device *netvsc_device_add(struct hv_device *device,
+ const struct netvsc_device_info *device_info)
+{
+ int i, ret = 0;
+ struct netvsc_device *net_device;
+ struct net_device *ndev = hv_get_drvdata(device);
+ struct net_device_context *net_device_ctx = netdev_priv(ndev);
+
+ net_device = alloc_net_device();
+ if (!net_device)
+ return ERR_PTR(-ENOMEM);
+
+ for (i = 0; i < VRSS_SEND_TAB_SIZE; i++)
+ net_device_ctx->tx_table[i] = 0;
+
+ /* Because the device uses NAPI, all the interrupt batching and
+ * control is done via Net softirq, not the channel handling
+ */
+ set_channel_read_mode(device->channel, HV_CALL_ISR);
+
+ /* If we're reopening the device we may have multiple queues, fill the
+ * chn_table with the default channel to use it before subchannels are
+ * opened.
+ * Initialize the channel state before we open;
+ * we can be interrupted as soon as we open the channel.
+ */
+
+ for (i = 0; i < VRSS_CHANNEL_MAX; i++) {
+ struct netvsc_channel *nvchan = &net_device->chan_table[i];
+
+ nvchan->channel = device->channel;
+ nvchan->net_device = net_device;
+ u64_stats_init(&nvchan->tx_stats.syncp);
+ u64_stats_init(&nvchan->rx_stats.syncp);
+
+ ret = xdp_rxq_info_reg(&nvchan->xdp_rxq, ndev, i, 0);
+
+ if (ret) {
+ netdev_err(ndev, "xdp_rxq_info_reg fail: %d\n", ret);
+ goto cleanup2;
+ }
+
+ ret = xdp_rxq_info_reg_mem_model(&nvchan->xdp_rxq,
+ MEM_TYPE_PAGE_SHARED, NULL);
+
+ if (ret) {
+ netdev_err(ndev, "xdp reg_mem_model fail: %d\n", ret);
+ goto cleanup2;
+ }
+ }
+
+ /* Enable NAPI handler before init callbacks */
+ netif_napi_add(ndev, &net_device->chan_table[0].napi, netvsc_poll);
+
+ /* Open the channel */
+ device->channel->next_request_id_callback = vmbus_next_request_id;
+ device->channel->request_addr_callback = vmbus_request_addr;
+ device->channel->rqstor_size = netvsc_rqstor_size(netvsc_ring_bytes);
+ device->channel->max_pkt_size = NETVSC_MAX_PKT_SIZE;
+
+ ret = vmbus_open(device->channel, netvsc_ring_bytes,
+ netvsc_ring_bytes, NULL, 0,
+ netvsc_channel_cb, net_device->chan_table);
+
+ if (ret != 0) {
+ netdev_err(ndev, "unable to open channel: %d\n", ret);
+ goto cleanup;
+ }
+
+ /* Channel is opened */
+ netdev_dbg(ndev, "hv_netvsc channel opened successfully\n");
+
+ napi_enable(&net_device->chan_table[0].napi);
+
+ /* Connect with the NetVsp */
+ ret = netvsc_connect_vsp(device, net_device, device_info);
+ if (ret != 0) {
+ netdev_err(ndev,
+ "unable to connect to NetVSP - %d\n", ret);
+ goto close;
+ }
+
+ /* Writing nvdev pointer unlocks netvsc_send(), make sure chn_table is
+ * populated.
+ */
+ rcu_assign_pointer(net_device_ctx->nvdev, net_device);
+
+ return net_device;
+
+close:
+ RCU_INIT_POINTER(net_device_ctx->nvdev, NULL);
+ napi_disable(&net_device->chan_table[0].napi);
+
+ /* Now, we can close the channel safely */
+ vmbus_close(device->channel);
+
+cleanup:
+ netif_napi_del(&net_device->chan_table[0].napi);
+
+cleanup2:
+ if (net_device->recv_original_buf)
+ hv_unmap_memory(net_device->recv_buf);
+
+ if (net_device->send_original_buf)
+ hv_unmap_memory(net_device->send_buf);
+
+ free_netvsc_device(&net_device->rcu);
+
+ return ERR_PTR(ret);
+}
diff --git a/drivers/net/hyperv/netvsc_bpf.c b/drivers/net/hyperv/netvsc_bpf.c
new file mode 100644
index 000000000..4a9522689
--- /dev/null
+++ b/drivers/net/hyperv/netvsc_bpf.c
@@ -0,0 +1,292 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2019, Microsoft Corporation.
+ *
+ * Author:
+ * Haiyang Zhang <haiyangz@microsoft.com>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/netpoll.h>
+#include <linux/bpf.h>
+#include <linux/bpf_trace.h>
+#include <linux/kernel.h>
+#include <net/xdp.h>
+
+#include <linux/mutex.h>
+#include <linux/rtnetlink.h>
+
+#include "hyperv_net.h"
+
+u32 netvsc_run_xdp(struct net_device *ndev, struct netvsc_channel *nvchan,
+ struct xdp_buff *xdp)
+{
+ struct netvsc_stats_rx *rx_stats = &nvchan->rx_stats;
+ void *data = nvchan->rsc.data[0];
+ u32 len = nvchan->rsc.len[0];
+ struct page *page = NULL;
+ struct bpf_prog *prog;
+ u32 act = XDP_PASS;
+ bool drop = true;
+
+ xdp->data_hard_start = NULL;
+
+ rcu_read_lock();
+ prog = rcu_dereference(nvchan->bpf_prog);
+
+ if (!prog)
+ goto out;
+
+ /* Ensure that the below memcpy() won't overflow the page buffer. */
+ if (len > ndev->mtu + ETH_HLEN) {
+ act = XDP_DROP;
+ goto out;
+ }
+
+ /* allocate page buffer for data */
+ page = alloc_page(GFP_ATOMIC);
+ if (!page) {
+ act = XDP_DROP;
+ goto out;
+ }
+
+ xdp_init_buff(xdp, PAGE_SIZE, &nvchan->xdp_rxq);
+ xdp_prepare_buff(xdp, page_address(page), NETVSC_XDP_HDRM, len, false);
+
+ memcpy(xdp->data, data, len);
+
+ act = bpf_prog_run_xdp(prog, xdp);
+
+ switch (act) {
+ case XDP_PASS:
+ case XDP_TX:
+ drop = false;
+ break;
+
+ case XDP_DROP:
+ break;
+
+ case XDP_REDIRECT:
+ if (!xdp_do_redirect(ndev, xdp, prog)) {
+ nvchan->xdp_flush = true;
+ drop = false;
+
+ u64_stats_update_begin(&rx_stats->syncp);
+
+ rx_stats->xdp_redirect++;
+ rx_stats->packets++;
+ rx_stats->bytes += nvchan->rsc.pktlen;
+
+ u64_stats_update_end(&rx_stats->syncp);
+
+ break;
+ } else {
+ u64_stats_update_begin(&rx_stats->syncp);
+ rx_stats->xdp_drop++;
+ u64_stats_update_end(&rx_stats->syncp);
+ }
+
+ fallthrough;
+
+ case XDP_ABORTED:
+ trace_xdp_exception(ndev, prog, act);
+ break;
+
+ default:
+ bpf_warn_invalid_xdp_action(ndev, prog, act);
+ }
+
+out:
+ rcu_read_unlock();
+
+ if (page && drop) {
+ __free_page(page);
+ xdp->data_hard_start = NULL;
+ }
+
+ return act;
+}
+
+unsigned int netvsc_xdp_fraglen(unsigned int len)
+{
+ return SKB_DATA_ALIGN(len) +
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+}
+
+struct bpf_prog *netvsc_xdp_get(struct netvsc_device *nvdev)
+{
+ return rtnl_dereference(nvdev->chan_table[0].bpf_prog);
+}
+
+int netvsc_xdp_set(struct net_device *dev, struct bpf_prog *prog,
+ struct netlink_ext_ack *extack,
+ struct netvsc_device *nvdev)
+{
+ struct bpf_prog *old_prog;
+ int buf_max, i;
+
+ old_prog = netvsc_xdp_get(nvdev);
+
+ if (!old_prog && !prog)
+ return 0;
+
+ buf_max = NETVSC_XDP_HDRM + netvsc_xdp_fraglen(dev->mtu + ETH_HLEN);
+ if (prog && buf_max > PAGE_SIZE) {
+ netdev_err(dev, "XDP: mtu:%u too large, buf_max:%u\n",
+ dev->mtu, buf_max);
+ NL_SET_ERR_MSG_MOD(extack, "XDP: mtu too large");
+
+ return -EOPNOTSUPP;
+ }
+
+ if (prog && (dev->features & NETIF_F_LRO)) {
+ netdev_err(dev, "XDP: not support LRO\n");
+ NL_SET_ERR_MSG_MOD(extack, "XDP: not support LRO");
+
+ return -EOPNOTSUPP;
+ }
+
+ if (prog)
+ bpf_prog_add(prog, nvdev->num_chn - 1);
+
+ for (i = 0; i < nvdev->num_chn; i++)
+ rcu_assign_pointer(nvdev->chan_table[i].bpf_prog, prog);
+
+ if (old_prog)
+ for (i = 0; i < nvdev->num_chn; i++)
+ bpf_prog_put(old_prog);
+
+ return 0;
+}
+
+int netvsc_vf_setxdp(struct net_device *vf_netdev, struct bpf_prog *prog)
+{
+ struct netdev_bpf xdp;
+ int ret;
+
+ ASSERT_RTNL();
+
+ if (!vf_netdev)
+ return 0;
+
+ if (!vf_netdev->netdev_ops->ndo_bpf)
+ return 0;
+
+ memset(&xdp, 0, sizeof(xdp));
+
+ if (prog)
+ bpf_prog_inc(prog);
+
+ xdp.command = XDP_SETUP_PROG;
+ xdp.prog = prog;
+
+ ret = vf_netdev->netdev_ops->ndo_bpf(vf_netdev, &xdp);
+
+ if (ret && prog)
+ bpf_prog_put(prog);
+
+ return ret;
+}
+
+int netvsc_bpf(struct net_device *dev, struct netdev_bpf *bpf)
+{
+ struct net_device_context *ndevctx = netdev_priv(dev);
+ struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev);
+ struct net_device *vf_netdev = rtnl_dereference(ndevctx->vf_netdev);
+ struct netlink_ext_ack *extack = bpf->extack;
+ int ret;
+
+ if (!nvdev || nvdev->destroy) {
+ return -ENODEV;
+ }
+
+ switch (bpf->command) {
+ case XDP_SETUP_PROG:
+ ret = netvsc_xdp_set(dev, bpf->prog, extack, nvdev);
+
+ if (ret)
+ return ret;
+
+ ret = netvsc_vf_setxdp(vf_netdev, bpf->prog);
+
+ if (ret) {
+ netdev_err(dev, "vf_setxdp failed:%d\n", ret);
+ NL_SET_ERR_MSG_MOD(extack, "vf_setxdp failed");
+
+ netvsc_xdp_set(dev, NULL, extack, nvdev);
+ }
+
+ return ret;
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static int netvsc_ndoxdp_xmit_fm(struct net_device *ndev,
+ struct xdp_frame *frame, u16 q_idx)
+{
+ struct sk_buff *skb;
+
+ skb = xdp_build_skb_from_frame(frame, ndev);
+ if (unlikely(!skb))
+ return -ENOMEM;
+
+ netvsc_get_hash(skb, netdev_priv(ndev));
+
+ skb_record_rx_queue(skb, q_idx);
+
+ netvsc_xdp_xmit(skb, ndev);
+
+ return 0;
+}
+
+int netvsc_ndoxdp_xmit(struct net_device *ndev, int n,
+ struct xdp_frame **frames, u32 flags)
+{
+ struct net_device_context *ndev_ctx = netdev_priv(ndev);
+ const struct net_device_ops *vf_ops;
+ struct netvsc_stats_tx *tx_stats;
+ struct netvsc_device *nvsc_dev;
+ struct net_device *vf_netdev;
+ int i, count = 0;
+ u16 q_idx;
+
+ /* Don't transmit if netvsc_device is gone */
+ nvsc_dev = rcu_dereference_bh(ndev_ctx->nvdev);
+ if (unlikely(!nvsc_dev || nvsc_dev->destroy))
+ return 0;
+
+ /* If VF is present and up then redirect packets to it.
+ * Skip the VF if it is marked down or has no carrier.
+ * If netpoll is in uses, then VF can not be used either.
+ */
+ vf_netdev = rcu_dereference_bh(ndev_ctx->vf_netdev);
+ if (vf_netdev && netif_running(vf_netdev) &&
+ netif_carrier_ok(vf_netdev) && !netpoll_tx_running(ndev) &&
+ vf_netdev->netdev_ops->ndo_xdp_xmit &&
+ ndev_ctx->data_path_is_vf) {
+ vf_ops = vf_netdev->netdev_ops;
+ return vf_ops->ndo_xdp_xmit(vf_netdev, n, frames, flags);
+ }
+
+ q_idx = smp_processor_id() % ndev->real_num_tx_queues;
+
+ for (i = 0; i < n; i++) {
+ if (netvsc_ndoxdp_xmit_fm(ndev, frames[i], q_idx))
+ break;
+
+ count++;
+ }
+
+ tx_stats = &nvsc_dev->chan_table[q_idx].tx_stats;
+
+ u64_stats_update_begin(&tx_stats->syncp);
+ tx_stats->xdp_xmit += count;
+ u64_stats_update_end(&tx_stats->syncp);
+
+ return count;
+}
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
new file mode 100644
index 000000000..c1aac6ceb
--- /dev/null
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -0,0 +1,2823 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2009, Microsoft Corporation.
+ *
+ * Authors:
+ * Haiyang Zhang <haiyangz@microsoft.com>
+ * Hank Janssen <hjanssen@microsoft.com>
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/init.h>
+#include <linux/atomic.h>
+#include <linux/ethtool.h>
+#include <linux/module.h>
+#include <linux/highmem.h>
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/inetdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/pci.h>
+#include <linux/skbuff.h>
+#include <linux/if_vlan.h>
+#include <linux/in.h>
+#include <linux/slab.h>
+#include <linux/rtnetlink.h>
+#include <linux/netpoll.h>
+#include <linux/bpf.h>
+
+#include <net/arp.h>
+#include <net/route.h>
+#include <net/sock.h>
+#include <net/pkt_sched.h>
+#include <net/checksum.h>
+#include <net/ip6_checksum.h>
+
+#include "hyperv_net.h"
+
+#define RING_SIZE_MIN 64
+
+#define LINKCHANGE_INT (2 * HZ)
+#define VF_TAKEOVER_INT (HZ / 10)
+
+static unsigned int ring_size __ro_after_init = 128;
+module_param(ring_size, uint, 0444);
+MODULE_PARM_DESC(ring_size, "Ring buffer size (# of 4K pages)");
+unsigned int netvsc_ring_bytes __ro_after_init;
+
+static const u32 default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE |
+ NETIF_MSG_LINK | NETIF_MSG_IFUP |
+ NETIF_MSG_IFDOWN | NETIF_MSG_RX_ERR |
+ NETIF_MSG_TX_ERR;
+
+static int debug = -1;
+module_param(debug, int, 0444);
+MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
+
+static LIST_HEAD(netvsc_dev_list);
+
+static void netvsc_change_rx_flags(struct net_device *net, int change)
+{
+ struct net_device_context *ndev_ctx = netdev_priv(net);
+ struct net_device *vf_netdev = rtnl_dereference(ndev_ctx->vf_netdev);
+ int inc;
+
+ if (!vf_netdev)
+ return;
+
+ if (change & IFF_PROMISC) {
+ inc = (net->flags & IFF_PROMISC) ? 1 : -1;
+ dev_set_promiscuity(vf_netdev, inc);
+ }
+
+ if (change & IFF_ALLMULTI) {
+ inc = (net->flags & IFF_ALLMULTI) ? 1 : -1;
+ dev_set_allmulti(vf_netdev, inc);
+ }
+}
+
+static void netvsc_set_rx_mode(struct net_device *net)
+{
+ struct net_device_context *ndev_ctx = netdev_priv(net);
+ struct net_device *vf_netdev;
+ struct netvsc_device *nvdev;
+
+ rcu_read_lock();
+ vf_netdev = rcu_dereference(ndev_ctx->vf_netdev);
+ if (vf_netdev) {
+ dev_uc_sync(vf_netdev, net);
+ dev_mc_sync(vf_netdev, net);
+ }
+
+ nvdev = rcu_dereference(ndev_ctx->nvdev);
+ if (nvdev)
+ rndis_filter_update(nvdev);
+ rcu_read_unlock();
+}
+
+static void netvsc_tx_enable(struct netvsc_device *nvscdev,
+ struct net_device *ndev)
+{
+ nvscdev->tx_disable = false;
+ virt_wmb(); /* ensure queue wake up mechanism is on */
+
+ netif_tx_wake_all_queues(ndev);
+}
+
+static int netvsc_open(struct net_device *net)
+{
+ struct net_device_context *ndev_ctx = netdev_priv(net);
+ struct net_device *vf_netdev = rtnl_dereference(ndev_ctx->vf_netdev);
+ struct netvsc_device *nvdev = rtnl_dereference(ndev_ctx->nvdev);
+ struct rndis_device *rdev;
+ int ret = 0;
+
+ netif_carrier_off(net);
+
+ /* Open up the device */
+ ret = rndis_filter_open(nvdev);
+ if (ret != 0) {
+ netdev_err(net, "unable to open device (ret %d).\n", ret);
+ return ret;
+ }
+
+ rdev = nvdev->extension;
+ if (!rdev->link_state) {
+ netif_carrier_on(net);
+ netvsc_tx_enable(nvdev, net);
+ }
+
+ if (vf_netdev) {
+ /* Setting synthetic device up transparently sets
+ * slave as up. If open fails, then slave will be
+ * still be offline (and not used).
+ */
+ ret = dev_open(vf_netdev, NULL);
+ if (ret)
+ netdev_warn(net,
+ "unable to open slave: %s: %d\n",
+ vf_netdev->name, ret);
+ }
+ return 0;
+}
+
+static int netvsc_wait_until_empty(struct netvsc_device *nvdev)
+{
+ unsigned int retry = 0;
+ int i;
+
+ /* Ensure pending bytes in ring are read */
+ for (;;) {
+ u32 aread = 0;
+
+ for (i = 0; i < nvdev->num_chn; i++) {
+ struct vmbus_channel *chn
+ = nvdev->chan_table[i].channel;
+
+ if (!chn)
+ continue;
+
+ /* make sure receive not running now */
+ napi_synchronize(&nvdev->chan_table[i].napi);
+
+ aread = hv_get_bytes_to_read(&chn->inbound);
+ if (aread)
+ break;
+
+ aread = hv_get_bytes_to_read(&chn->outbound);
+ if (aread)
+ break;
+ }
+
+ if (aread == 0)
+ return 0;
+
+ if (++retry > RETRY_MAX)
+ return -ETIMEDOUT;
+
+ usleep_range(RETRY_US_LO, RETRY_US_HI);
+ }
+}
+
+static void netvsc_tx_disable(struct netvsc_device *nvscdev,
+ struct net_device *ndev)
+{
+ if (nvscdev) {
+ nvscdev->tx_disable = true;
+ virt_wmb(); /* ensure txq will not wake up after stop */
+ }
+
+ netif_tx_disable(ndev);
+}
+
+static int netvsc_close(struct net_device *net)
+{
+ struct net_device_context *net_device_ctx = netdev_priv(net);
+ struct net_device *vf_netdev
+ = rtnl_dereference(net_device_ctx->vf_netdev);
+ struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev);
+ int ret;
+
+ netvsc_tx_disable(nvdev, net);
+
+ /* No need to close rndis filter if it is removed already */
+ if (!nvdev)
+ return 0;
+
+ ret = rndis_filter_close(nvdev);
+ if (ret != 0) {
+ netdev_err(net, "unable to close device (ret %d).\n", ret);
+ return ret;
+ }
+
+ ret = netvsc_wait_until_empty(nvdev);
+ if (ret)
+ netdev_err(net, "Ring buffer not empty after closing rndis\n");
+
+ if (vf_netdev)
+ dev_close(vf_netdev);
+
+ return ret;
+}
+
+static inline void *init_ppi_data(struct rndis_message *msg,
+ u32 ppi_size, u32 pkt_type)
+{
+ struct rndis_packet *rndis_pkt = &msg->msg.pkt;
+ struct rndis_per_packet_info *ppi;
+
+ rndis_pkt->data_offset += ppi_size;
+ ppi = (void *)rndis_pkt + rndis_pkt->per_pkt_info_offset
+ + rndis_pkt->per_pkt_info_len;
+
+ ppi->size = ppi_size;
+ ppi->type = pkt_type;
+ ppi->internal = 0;
+ ppi->ppi_offset = sizeof(struct rndis_per_packet_info);
+
+ rndis_pkt->per_pkt_info_len += ppi_size;
+
+ return ppi + 1;
+}
+
+static inline int netvsc_get_tx_queue(struct net_device *ndev,
+ struct sk_buff *skb, int old_idx)
+{
+ const struct net_device_context *ndc = netdev_priv(ndev);
+ struct sock *sk = skb->sk;
+ int q_idx;
+
+ q_idx = ndc->tx_table[netvsc_get_hash(skb, ndc) &
+ (VRSS_SEND_TAB_SIZE - 1)];
+
+ /* If queue index changed record the new value */
+ if (q_idx != old_idx &&
+ sk && sk_fullsock(sk) && rcu_access_pointer(sk->sk_dst_cache))
+ sk_tx_queue_set(sk, q_idx);
+
+ return q_idx;
+}
+
+/*
+ * Select queue for transmit.
+ *
+ * If a valid queue has already been assigned, then use that.
+ * Otherwise compute tx queue based on hash and the send table.
+ *
+ * This is basically similar to default (netdev_pick_tx) with the added step
+ * of using the host send_table when no other queue has been assigned.
+ *
+ * TODO support XPS - but get_xps_queue not exported
+ */
+static u16 netvsc_pick_tx(struct net_device *ndev, struct sk_buff *skb)
+{
+ int q_idx = sk_tx_queue_get(skb->sk);
+
+ if (q_idx < 0 || skb->ooo_okay || q_idx >= ndev->real_num_tx_queues) {
+ /* If forwarding a packet, we use the recorded queue when
+ * available for better cache locality.
+ */
+ if (skb_rx_queue_recorded(skb))
+ q_idx = skb_get_rx_queue(skb);
+ else
+ q_idx = netvsc_get_tx_queue(ndev, skb, q_idx);
+ }
+
+ return q_idx;
+}
+
+static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb,
+ struct net_device *sb_dev)
+{
+ struct net_device_context *ndc = netdev_priv(ndev);
+ struct net_device *vf_netdev;
+ u16 txq;
+
+ rcu_read_lock();
+ vf_netdev = rcu_dereference(ndc->vf_netdev);
+ if (vf_netdev) {
+ const struct net_device_ops *vf_ops = vf_netdev->netdev_ops;
+
+ if (vf_ops->ndo_select_queue)
+ txq = vf_ops->ndo_select_queue(vf_netdev, skb, sb_dev);
+ else
+ txq = netdev_pick_tx(vf_netdev, skb, NULL);
+
+ /* Record the queue selected by VF so that it can be
+ * used for common case where VF has more queues than
+ * the synthetic device.
+ */
+ qdisc_skb_cb(skb)->slave_dev_queue_mapping = txq;
+ } else {
+ txq = netvsc_pick_tx(ndev, skb);
+ }
+ rcu_read_unlock();
+
+ while (txq >= ndev->real_num_tx_queues)
+ txq -= ndev->real_num_tx_queues;
+
+ return txq;
+}
+
+static u32 fill_pg_buf(unsigned long hvpfn, u32 offset, u32 len,
+ struct hv_page_buffer *pb)
+{
+ int j = 0;
+
+ hvpfn += offset >> HV_HYP_PAGE_SHIFT;
+ offset = offset & ~HV_HYP_PAGE_MASK;
+
+ while (len > 0) {
+ unsigned long bytes;
+
+ bytes = HV_HYP_PAGE_SIZE - offset;
+ if (bytes > len)
+ bytes = len;
+ pb[j].pfn = hvpfn;
+ pb[j].offset = offset;
+ pb[j].len = bytes;
+
+ offset += bytes;
+ len -= bytes;
+
+ if (offset == HV_HYP_PAGE_SIZE && len) {
+ hvpfn++;
+ offset = 0;
+ j++;
+ }
+ }
+
+ return j + 1;
+}
+
+static u32 init_page_array(void *hdr, u32 len, struct sk_buff *skb,
+ struct hv_netvsc_packet *packet,
+ struct hv_page_buffer *pb)
+{
+ u32 slots_used = 0;
+ char *data = skb->data;
+ int frags = skb_shinfo(skb)->nr_frags;
+ int i;
+
+ /* The packet is laid out thus:
+ * 1. hdr: RNDIS header and PPI
+ * 2. skb linear data
+ * 3. skb fragment data
+ */
+ slots_used += fill_pg_buf(virt_to_hvpfn(hdr),
+ offset_in_hvpage(hdr),
+ len,
+ &pb[slots_used]);
+
+ packet->rmsg_size = len;
+ packet->rmsg_pgcnt = slots_used;
+
+ slots_used += fill_pg_buf(virt_to_hvpfn(data),
+ offset_in_hvpage(data),
+ skb_headlen(skb),
+ &pb[slots_used]);
+
+ for (i = 0; i < frags; i++) {
+ skb_frag_t *frag = skb_shinfo(skb)->frags + i;
+
+ slots_used += fill_pg_buf(page_to_hvpfn(skb_frag_page(frag)),
+ skb_frag_off(frag),
+ skb_frag_size(frag),
+ &pb[slots_used]);
+ }
+ return slots_used;
+}
+
+static int count_skb_frag_slots(struct sk_buff *skb)
+{
+ int i, frags = skb_shinfo(skb)->nr_frags;
+ int pages = 0;
+
+ for (i = 0; i < frags; i++) {
+ skb_frag_t *frag = skb_shinfo(skb)->frags + i;
+ unsigned long size = skb_frag_size(frag);
+ unsigned long offset = skb_frag_off(frag);
+
+ /* Skip unused frames from start of page */
+ offset &= ~HV_HYP_PAGE_MASK;
+ pages += HVPFN_UP(offset + size);
+ }
+ return pages;
+}
+
+static int netvsc_get_slots(struct sk_buff *skb)
+{
+ char *data = skb->data;
+ unsigned int offset = offset_in_hvpage(data);
+ unsigned int len = skb_headlen(skb);
+ int slots;
+ int frag_slots;
+
+ slots = DIV_ROUND_UP(offset + len, HV_HYP_PAGE_SIZE);
+ frag_slots = count_skb_frag_slots(skb);
+ return slots + frag_slots;
+}
+
+static u32 net_checksum_info(struct sk_buff *skb)
+{
+ if (skb->protocol == htons(ETH_P_IP)) {
+ struct iphdr *ip = ip_hdr(skb);
+
+ if (ip->protocol == IPPROTO_TCP)
+ return TRANSPORT_INFO_IPV4_TCP;
+ else if (ip->protocol == IPPROTO_UDP)
+ return TRANSPORT_INFO_IPV4_UDP;
+ } else {
+ struct ipv6hdr *ip6 = ipv6_hdr(skb);
+
+ if (ip6->nexthdr == IPPROTO_TCP)
+ return TRANSPORT_INFO_IPV6_TCP;
+ else if (ip6->nexthdr == IPPROTO_UDP)
+ return TRANSPORT_INFO_IPV6_UDP;
+ }
+
+ return TRANSPORT_INFO_NOT_IP;
+}
+
+/* Send skb on the slave VF device. */
+static int netvsc_vf_xmit(struct net_device *net, struct net_device *vf_netdev,
+ struct sk_buff *skb)
+{
+ struct net_device_context *ndev_ctx = netdev_priv(net);
+ unsigned int len = skb->len;
+ int rc;
+
+ skb->dev = vf_netdev;
+ skb_record_rx_queue(skb, qdisc_skb_cb(skb)->slave_dev_queue_mapping);
+
+ rc = dev_queue_xmit(skb);
+ if (likely(rc == NET_XMIT_SUCCESS || rc == NET_XMIT_CN)) {
+ struct netvsc_vf_pcpu_stats *pcpu_stats
+ = this_cpu_ptr(ndev_ctx->vf_stats);
+
+ u64_stats_update_begin(&pcpu_stats->syncp);
+ pcpu_stats->tx_packets++;
+ pcpu_stats->tx_bytes += len;
+ u64_stats_update_end(&pcpu_stats->syncp);
+ } else {
+ this_cpu_inc(ndev_ctx->vf_stats->tx_dropped);
+ }
+
+ return rc;
+}
+
+static int netvsc_xmit(struct sk_buff *skb, struct net_device *net, bool xdp_tx)
+{
+ struct net_device_context *net_device_ctx = netdev_priv(net);
+ struct hv_netvsc_packet *packet = NULL;
+ int ret;
+ unsigned int num_data_pgs;
+ struct rndis_message *rndis_msg;
+ struct net_device *vf_netdev;
+ u32 rndis_msg_size;
+ u32 hash;
+ struct hv_page_buffer pb[MAX_PAGE_BUFFER_COUNT];
+
+ /* If VF is present and up then redirect packets to it.
+ * Skip the VF if it is marked down or has no carrier.
+ * If netpoll is in uses, then VF can not be used either.
+ */
+ vf_netdev = rcu_dereference_bh(net_device_ctx->vf_netdev);
+ if (vf_netdev && netif_running(vf_netdev) &&
+ netif_carrier_ok(vf_netdev) && !netpoll_tx_running(net) &&
+ net_device_ctx->data_path_is_vf)
+ return netvsc_vf_xmit(net, vf_netdev, skb);
+
+ /* We will atmost need two pages to describe the rndis
+ * header. We can only transmit MAX_PAGE_BUFFER_COUNT number
+ * of pages in a single packet. If skb is scattered around
+ * more pages we try linearizing it.
+ */
+
+ num_data_pgs = netvsc_get_slots(skb) + 2;
+
+ if (unlikely(num_data_pgs > MAX_PAGE_BUFFER_COUNT)) {
+ ++net_device_ctx->eth_stats.tx_scattered;
+
+ if (skb_linearize(skb))
+ goto no_memory;
+
+ num_data_pgs = netvsc_get_slots(skb) + 2;
+ if (num_data_pgs > MAX_PAGE_BUFFER_COUNT) {
+ ++net_device_ctx->eth_stats.tx_too_big;
+ goto drop;
+ }
+ }
+
+ /*
+ * Place the rndis header in the skb head room and
+ * the skb->cb will be used for hv_netvsc_packet
+ * structure.
+ */
+ ret = skb_cow_head(skb, RNDIS_AND_PPI_SIZE);
+ if (ret)
+ goto no_memory;
+
+ /* Use the skb control buffer for building up the packet */
+ BUILD_BUG_ON(sizeof(struct hv_netvsc_packet) >
+ sizeof_field(struct sk_buff, cb));
+ packet = (struct hv_netvsc_packet *)skb->cb;
+
+ packet->q_idx = skb_get_queue_mapping(skb);
+
+ packet->total_data_buflen = skb->len;
+ packet->total_bytes = skb->len;
+ packet->total_packets = 1;
+
+ rndis_msg = (struct rndis_message *)skb->head;
+
+ /* Add the rndis header */
+ rndis_msg->ndis_msg_type = RNDIS_MSG_PACKET;
+ rndis_msg->msg_len = packet->total_data_buflen;
+
+ rndis_msg->msg.pkt = (struct rndis_packet) {
+ .data_offset = sizeof(struct rndis_packet),
+ .data_len = packet->total_data_buflen,
+ .per_pkt_info_offset = sizeof(struct rndis_packet),
+ };
+
+ rndis_msg_size = RNDIS_MESSAGE_SIZE(struct rndis_packet);
+
+ hash = skb_get_hash_raw(skb);
+ if (hash != 0 && net->real_num_tx_queues > 1) {
+ u32 *hash_info;
+
+ rndis_msg_size += NDIS_HASH_PPI_SIZE;
+ hash_info = init_ppi_data(rndis_msg, NDIS_HASH_PPI_SIZE,
+ NBL_HASH_VALUE);
+ *hash_info = hash;
+ }
+
+ /* When using AF_PACKET we need to drop VLAN header from
+ * the frame and update the SKB to allow the HOST OS
+ * to transmit the 802.1Q packet
+ */
+ if (skb->protocol == htons(ETH_P_8021Q)) {
+ u16 vlan_tci;
+
+ skb_reset_mac_header(skb);
+ if (eth_type_vlan(eth_hdr(skb)->h_proto)) {
+ if (unlikely(__skb_vlan_pop(skb, &vlan_tci) != 0)) {
+ ++net_device_ctx->eth_stats.vlan_error;
+ goto drop;
+ }
+
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tci);
+ /* Update the NDIS header pkt lengths */
+ packet->total_data_buflen -= VLAN_HLEN;
+ packet->total_bytes -= VLAN_HLEN;
+ rndis_msg->msg_len = packet->total_data_buflen;
+ rndis_msg->msg.pkt.data_len = packet->total_data_buflen;
+ }
+ }
+
+ if (skb_vlan_tag_present(skb)) {
+ struct ndis_pkt_8021q_info *vlan;
+
+ rndis_msg_size += NDIS_VLAN_PPI_SIZE;
+ vlan = init_ppi_data(rndis_msg, NDIS_VLAN_PPI_SIZE,
+ IEEE_8021Q_INFO);
+
+ vlan->value = 0;
+ vlan->vlanid = skb_vlan_tag_get_id(skb);
+ vlan->cfi = skb_vlan_tag_get_cfi(skb);
+ vlan->pri = skb_vlan_tag_get_prio(skb);
+ }
+
+ if (skb_is_gso(skb)) {
+ struct ndis_tcp_lso_info *lso_info;
+
+ rndis_msg_size += NDIS_LSO_PPI_SIZE;
+ lso_info = init_ppi_data(rndis_msg, NDIS_LSO_PPI_SIZE,
+ TCP_LARGESEND_PKTINFO);
+
+ lso_info->value = 0;
+ lso_info->lso_v2_transmit.type = NDIS_TCP_LARGE_SEND_OFFLOAD_V2_TYPE;
+ if (skb->protocol == htons(ETH_P_IP)) {
+ lso_info->lso_v2_transmit.ip_version =
+ NDIS_TCP_LARGE_SEND_OFFLOAD_IPV4;
+ ip_hdr(skb)->tot_len = 0;
+ ip_hdr(skb)->check = 0;
+ tcp_hdr(skb)->check =
+ ~csum_tcpudp_magic(ip_hdr(skb)->saddr,
+ ip_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
+ } else {
+ lso_info->lso_v2_transmit.ip_version =
+ NDIS_TCP_LARGE_SEND_OFFLOAD_IPV6;
+ tcp_v6_gso_csum_prep(skb);
+ }
+ lso_info->lso_v2_transmit.tcp_header_offset = skb_transport_offset(skb);
+ lso_info->lso_v2_transmit.mss = skb_shinfo(skb)->gso_size;
+ } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ if (net_checksum_info(skb) & net_device_ctx->tx_checksum_mask) {
+ struct ndis_tcp_ip_checksum_info *csum_info;
+
+ rndis_msg_size += NDIS_CSUM_PPI_SIZE;
+ csum_info = init_ppi_data(rndis_msg, NDIS_CSUM_PPI_SIZE,
+ TCPIP_CHKSUM_PKTINFO);
+
+ csum_info->value = 0;
+ csum_info->transmit.tcp_header_offset = skb_transport_offset(skb);
+
+ if (skb->protocol == htons(ETH_P_IP)) {
+ csum_info->transmit.is_ipv4 = 1;
+
+ if (ip_hdr(skb)->protocol == IPPROTO_TCP)
+ csum_info->transmit.tcp_checksum = 1;
+ else
+ csum_info->transmit.udp_checksum = 1;
+ } else {
+ csum_info->transmit.is_ipv6 = 1;
+
+ if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
+ csum_info->transmit.tcp_checksum = 1;
+ else
+ csum_info->transmit.udp_checksum = 1;
+ }
+ } else {
+ /* Can't do offload of this type of checksum */
+ if (skb_checksum_help(skb))
+ goto drop;
+ }
+ }
+
+ /* Start filling in the page buffers with the rndis hdr */
+ rndis_msg->msg_len += rndis_msg_size;
+ packet->total_data_buflen = rndis_msg->msg_len;
+ packet->page_buf_cnt = init_page_array(rndis_msg, rndis_msg_size,
+ skb, packet, pb);
+
+ /* timestamp packet in software */
+ skb_tx_timestamp(skb);
+
+ ret = netvsc_send(net, packet, rndis_msg, pb, skb, xdp_tx);
+ if (likely(ret == 0))
+ return NETDEV_TX_OK;
+
+ if (ret == -EAGAIN) {
+ ++net_device_ctx->eth_stats.tx_busy;
+ return NETDEV_TX_BUSY;
+ }
+
+ if (ret == -ENOSPC)
+ ++net_device_ctx->eth_stats.tx_no_space;
+
+drop:
+ dev_kfree_skb_any(skb);
+ net->stats.tx_dropped++;
+
+ return NETDEV_TX_OK;
+
+no_memory:
+ ++net_device_ctx->eth_stats.tx_no_memory;
+ goto drop;
+}
+
+static netdev_tx_t netvsc_start_xmit(struct sk_buff *skb,
+ struct net_device *ndev)
+{
+ return netvsc_xmit(skb, ndev, false);
+}
+
+/*
+ * netvsc_linkstatus_callback - Link up/down notification
+ */
+void netvsc_linkstatus_callback(struct net_device *net,
+ struct rndis_message *resp,
+ void *data, u32 data_buflen)
+{
+ struct rndis_indicate_status *indicate = &resp->msg.indicate_status;
+ struct net_device_context *ndev_ctx = netdev_priv(net);
+ struct netvsc_reconfig *event;
+ unsigned long flags;
+
+ /* Ensure the packet is big enough to access its fields */
+ if (resp->msg_len - RNDIS_HEADER_SIZE < sizeof(struct rndis_indicate_status)) {
+ netdev_err(net, "invalid rndis_indicate_status packet, len: %u\n",
+ resp->msg_len);
+ return;
+ }
+
+ /* Copy the RNDIS indicate status into nvchan->recv_buf */
+ memcpy(indicate, data + RNDIS_HEADER_SIZE, sizeof(*indicate));
+
+ /* Update the physical link speed when changing to another vSwitch */
+ if (indicate->status == RNDIS_STATUS_LINK_SPEED_CHANGE) {
+ u32 speed;
+
+ /* Validate status_buf_offset and status_buflen.
+ *
+ * Certain (pre-Fe) implementations of Hyper-V's vSwitch didn't account
+ * for the status buffer field in resp->msg_len; perform the validation
+ * using data_buflen (>= resp->msg_len).
+ */
+ if (indicate->status_buflen < sizeof(speed) ||
+ indicate->status_buf_offset < sizeof(*indicate) ||
+ data_buflen - RNDIS_HEADER_SIZE < indicate->status_buf_offset ||
+ data_buflen - RNDIS_HEADER_SIZE - indicate->status_buf_offset
+ < indicate->status_buflen) {
+ netdev_err(net, "invalid rndis_indicate_status packet\n");
+ return;
+ }
+
+ speed = *(u32 *)(data + RNDIS_HEADER_SIZE + indicate->status_buf_offset) / 10000;
+ ndev_ctx->speed = speed;
+ return;
+ }
+
+ /* Handle these link change statuses below */
+ if (indicate->status != RNDIS_STATUS_NETWORK_CHANGE &&
+ indicate->status != RNDIS_STATUS_MEDIA_CONNECT &&
+ indicate->status != RNDIS_STATUS_MEDIA_DISCONNECT)
+ return;
+
+ if (net->reg_state != NETREG_REGISTERED)
+ return;
+
+ event = kzalloc(sizeof(*event), GFP_ATOMIC);
+ if (!event)
+ return;
+ event->event = indicate->status;
+
+ spin_lock_irqsave(&ndev_ctx->lock, flags);
+ list_add_tail(&event->list, &ndev_ctx->reconfig_events);
+ spin_unlock_irqrestore(&ndev_ctx->lock, flags);
+
+ schedule_delayed_work(&ndev_ctx->dwork, 0);
+}
+
+/* This function should only be called after skb_record_rx_queue() */
+void netvsc_xdp_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+ int rc;
+
+ skb->queue_mapping = skb_get_rx_queue(skb);
+ __skb_push(skb, ETH_HLEN);
+
+ rc = netvsc_xmit(skb, ndev, true);
+
+ if (dev_xmit_complete(rc))
+ return;
+
+ dev_kfree_skb_any(skb);
+ ndev->stats.tx_dropped++;
+}
+
+static void netvsc_comp_ipcsum(struct sk_buff *skb)
+{
+ struct iphdr *iph = (struct iphdr *)skb->data;
+
+ iph->check = 0;
+ iph->check = ip_fast_csum(iph, iph->ihl);
+}
+
+static struct sk_buff *netvsc_alloc_recv_skb(struct net_device *net,
+ struct netvsc_channel *nvchan,
+ struct xdp_buff *xdp)
+{
+ struct napi_struct *napi = &nvchan->napi;
+ const struct ndis_pkt_8021q_info *vlan = &nvchan->rsc.vlan;
+ const struct ndis_tcp_ip_checksum_info *csum_info =
+ &nvchan->rsc.csum_info;
+ const u32 *hash_info = &nvchan->rsc.hash_info;
+ u8 ppi_flags = nvchan->rsc.ppi_flags;
+ struct sk_buff *skb;
+ void *xbuf = xdp->data_hard_start;
+ int i;
+
+ if (xbuf) {
+ unsigned int hdroom = xdp->data - xdp->data_hard_start;
+ unsigned int xlen = xdp->data_end - xdp->data;
+ unsigned int frag_size = xdp->frame_sz;
+
+ skb = build_skb(xbuf, frag_size);
+
+ if (!skb) {
+ __free_page(virt_to_page(xbuf));
+ return NULL;
+ }
+
+ skb_reserve(skb, hdroom);
+ skb_put(skb, xlen);
+ skb->dev = napi->dev;
+ } else {
+ skb = napi_alloc_skb(napi, nvchan->rsc.pktlen);
+
+ if (!skb)
+ return NULL;
+
+ /* Copy to skb. This copy is needed here since the memory
+ * pointed by hv_netvsc_packet cannot be deallocated.
+ */
+ for (i = 0; i < nvchan->rsc.cnt; i++)
+ skb_put_data(skb, nvchan->rsc.data[i],
+ nvchan->rsc.len[i]);
+ }
+
+ skb->protocol = eth_type_trans(skb, net);
+
+ /* skb is already created with CHECKSUM_NONE */
+ skb_checksum_none_assert(skb);
+
+ /* Incoming packets may have IP header checksum verified by the host.
+ * They may not have IP header checksum computed after coalescing.
+ * We compute it here if the flags are set, because on Linux, the IP
+ * checksum is always checked.
+ */
+ if ((ppi_flags & NVSC_RSC_CSUM_INFO) && csum_info->receive.ip_checksum_value_invalid &&
+ csum_info->receive.ip_checksum_succeeded &&
+ skb->protocol == htons(ETH_P_IP)) {
+ /* Check that there is enough space to hold the IP header. */
+ if (skb_headlen(skb) < sizeof(struct iphdr)) {
+ kfree_skb(skb);
+ return NULL;
+ }
+ netvsc_comp_ipcsum(skb);
+ }
+
+ /* Do L4 checksum offload if enabled and present. */
+ if ((ppi_flags & NVSC_RSC_CSUM_INFO) && (net->features & NETIF_F_RXCSUM)) {
+ if (csum_info->receive.tcp_checksum_succeeded ||
+ csum_info->receive.udp_checksum_succeeded)
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ }
+
+ if ((ppi_flags & NVSC_RSC_HASH_INFO) && (net->features & NETIF_F_RXHASH))
+ skb_set_hash(skb, *hash_info, PKT_HASH_TYPE_L4);
+
+ if (ppi_flags & NVSC_RSC_VLAN) {
+ u16 vlan_tci = vlan->vlanid | (vlan->pri << VLAN_PRIO_SHIFT) |
+ (vlan->cfi ? VLAN_CFI_MASK : 0);
+
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
+ vlan_tci);
+ }
+
+ return skb;
+}
+
+/*
+ * netvsc_recv_callback - Callback when we receive a packet from the
+ * "wire" on the specified device.
+ */
+int netvsc_recv_callback(struct net_device *net,
+ struct netvsc_device *net_device,
+ struct netvsc_channel *nvchan)
+{
+ struct net_device_context *net_device_ctx = netdev_priv(net);
+ struct vmbus_channel *channel = nvchan->channel;
+ u16 q_idx = channel->offermsg.offer.sub_channel_index;
+ struct sk_buff *skb;
+ struct netvsc_stats_rx *rx_stats = &nvchan->rx_stats;
+ struct xdp_buff xdp;
+ u32 act;
+
+ if (net->reg_state != NETREG_REGISTERED)
+ return NVSP_STAT_FAIL;
+
+ act = netvsc_run_xdp(net, nvchan, &xdp);
+
+ if (act == XDP_REDIRECT)
+ return NVSP_STAT_SUCCESS;
+
+ if (act != XDP_PASS && act != XDP_TX) {
+ u64_stats_update_begin(&rx_stats->syncp);
+ rx_stats->xdp_drop++;
+ u64_stats_update_end(&rx_stats->syncp);
+
+ return NVSP_STAT_SUCCESS; /* consumed by XDP */
+ }
+
+ /* Allocate a skb - TODO direct I/O to pages? */
+ skb = netvsc_alloc_recv_skb(net, nvchan, &xdp);
+
+ if (unlikely(!skb)) {
+ ++net_device_ctx->eth_stats.rx_no_memory;
+ return NVSP_STAT_FAIL;
+ }
+
+ skb_record_rx_queue(skb, q_idx);
+
+ /*
+ * Even if injecting the packet, record the statistics
+ * on the synthetic device because modifying the VF device
+ * statistics will not work correctly.
+ */
+ u64_stats_update_begin(&rx_stats->syncp);
+ if (act == XDP_TX)
+ rx_stats->xdp_tx++;
+
+ rx_stats->packets++;
+ rx_stats->bytes += nvchan->rsc.pktlen;
+
+ if (skb->pkt_type == PACKET_BROADCAST)
+ ++rx_stats->broadcast;
+ else if (skb->pkt_type == PACKET_MULTICAST)
+ ++rx_stats->multicast;
+ u64_stats_update_end(&rx_stats->syncp);
+
+ if (act == XDP_TX) {
+ netvsc_xdp_xmit(skb, net);
+ return NVSP_STAT_SUCCESS;
+ }
+
+ napi_gro_receive(&nvchan->napi, skb);
+ return NVSP_STAT_SUCCESS;
+}
+
+static void netvsc_get_drvinfo(struct net_device *net,
+ struct ethtool_drvinfo *info)
+{
+ strscpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
+ strscpy(info->fw_version, "N/A", sizeof(info->fw_version));
+}
+
+static void netvsc_get_channels(struct net_device *net,
+ struct ethtool_channels *channel)
+{
+ struct net_device_context *net_device_ctx = netdev_priv(net);
+ struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev);
+
+ if (nvdev) {
+ channel->max_combined = nvdev->max_chn;
+ channel->combined_count = nvdev->num_chn;
+ }
+}
+
+/* Alloc struct netvsc_device_info, and initialize it from either existing
+ * struct netvsc_device, or from default values.
+ */
+static
+struct netvsc_device_info *netvsc_devinfo_get(struct netvsc_device *nvdev)
+{
+ struct netvsc_device_info *dev_info;
+ struct bpf_prog *prog;
+
+ dev_info = kzalloc(sizeof(*dev_info), GFP_ATOMIC);
+
+ if (!dev_info)
+ return NULL;
+
+ if (nvdev) {
+ ASSERT_RTNL();
+
+ dev_info->num_chn = nvdev->num_chn;
+ dev_info->send_sections = nvdev->send_section_cnt;
+ dev_info->send_section_size = nvdev->send_section_size;
+ dev_info->recv_sections = nvdev->recv_section_cnt;
+ dev_info->recv_section_size = nvdev->recv_section_size;
+
+ memcpy(dev_info->rss_key, nvdev->extension->rss_key,
+ NETVSC_HASH_KEYLEN);
+
+ prog = netvsc_xdp_get(nvdev);
+ if (prog) {
+ bpf_prog_inc(prog);
+ dev_info->bprog = prog;
+ }
+ } else {
+ dev_info->num_chn = VRSS_CHANNEL_DEFAULT;
+ dev_info->send_sections = NETVSC_DEFAULT_TX;
+ dev_info->send_section_size = NETVSC_SEND_SECTION_SIZE;
+ dev_info->recv_sections = NETVSC_DEFAULT_RX;
+ dev_info->recv_section_size = NETVSC_RECV_SECTION_SIZE;
+ }
+
+ return dev_info;
+}
+
+/* Free struct netvsc_device_info */
+static void netvsc_devinfo_put(struct netvsc_device_info *dev_info)
+{
+ if (dev_info->bprog) {
+ ASSERT_RTNL();
+ bpf_prog_put(dev_info->bprog);
+ }
+
+ kfree(dev_info);
+}
+
+static int netvsc_detach(struct net_device *ndev,
+ struct netvsc_device *nvdev)
+{
+ struct net_device_context *ndev_ctx = netdev_priv(ndev);
+ struct hv_device *hdev = ndev_ctx->device_ctx;
+ int ret;
+
+ /* Don't try continuing to try and setup sub channels */
+ if (cancel_work_sync(&nvdev->subchan_work))
+ nvdev->num_chn = 1;
+
+ netvsc_xdp_set(ndev, NULL, NULL, nvdev);
+
+ /* If device was up (receiving) then shutdown */
+ if (netif_running(ndev)) {
+ netvsc_tx_disable(nvdev, ndev);
+
+ ret = rndis_filter_close(nvdev);
+ if (ret) {
+ netdev_err(ndev,
+ "unable to close device (ret %d).\n", ret);
+ return ret;
+ }
+
+ ret = netvsc_wait_until_empty(nvdev);
+ if (ret) {
+ netdev_err(ndev,
+ "Ring buffer not empty after closing rndis\n");
+ return ret;
+ }
+ }
+
+ netif_device_detach(ndev);
+
+ rndis_filter_device_remove(hdev, nvdev);
+
+ return 0;
+}
+
+static int netvsc_attach(struct net_device *ndev,
+ struct netvsc_device_info *dev_info)
+{
+ struct net_device_context *ndev_ctx = netdev_priv(ndev);
+ struct hv_device *hdev = ndev_ctx->device_ctx;
+ struct netvsc_device *nvdev;
+ struct rndis_device *rdev;
+ struct bpf_prog *prog;
+ int ret = 0;
+
+ nvdev = rndis_filter_device_add(hdev, dev_info);
+ if (IS_ERR(nvdev))
+ return PTR_ERR(nvdev);
+
+ if (nvdev->num_chn > 1) {
+ ret = rndis_set_subchannel(ndev, nvdev, dev_info);
+
+ /* if unavailable, just proceed with one queue */
+ if (ret) {
+ nvdev->max_chn = 1;
+ nvdev->num_chn = 1;
+ }
+ }
+
+ prog = dev_info->bprog;
+ if (prog) {
+ bpf_prog_inc(prog);
+ ret = netvsc_xdp_set(ndev, prog, NULL, nvdev);
+ if (ret) {
+ bpf_prog_put(prog);
+ goto err1;
+ }
+ }
+
+ /* In any case device is now ready */
+ nvdev->tx_disable = false;
+ netif_device_attach(ndev);
+
+ /* Note: enable and attach happen when sub-channels setup */
+ netif_carrier_off(ndev);
+
+ if (netif_running(ndev)) {
+ ret = rndis_filter_open(nvdev);
+ if (ret)
+ goto err2;
+
+ rdev = nvdev->extension;
+ if (!rdev->link_state)
+ netif_carrier_on(ndev);
+ }
+
+ return 0;
+
+err2:
+ netif_device_detach(ndev);
+
+err1:
+ rndis_filter_device_remove(hdev, nvdev);
+
+ return ret;
+}
+
+static int netvsc_set_channels(struct net_device *net,
+ struct ethtool_channels *channels)
+{
+ struct net_device_context *net_device_ctx = netdev_priv(net);
+ struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev);
+ unsigned int orig, count = channels->combined_count;
+ struct netvsc_device_info *device_info;
+ int ret;
+
+ /* We do not support separate count for rx, tx, or other */
+ if (count == 0 ||
+ channels->rx_count || channels->tx_count || channels->other_count)
+ return -EINVAL;
+
+ if (!nvdev || nvdev->destroy)
+ return -ENODEV;
+
+ if (nvdev->nvsp_version < NVSP_PROTOCOL_VERSION_5)
+ return -EINVAL;
+
+ if (count > nvdev->max_chn)
+ return -EINVAL;
+
+ orig = nvdev->num_chn;
+
+ device_info = netvsc_devinfo_get(nvdev);
+
+ if (!device_info)
+ return -ENOMEM;
+
+ device_info->num_chn = count;
+
+ ret = netvsc_detach(net, nvdev);
+ if (ret)
+ goto out;
+
+ ret = netvsc_attach(net, device_info);
+ if (ret) {
+ device_info->num_chn = orig;
+ if (netvsc_attach(net, device_info))
+ netdev_err(net, "restoring channel setting failed\n");
+ }
+
+out:
+ netvsc_devinfo_put(device_info);
+ return ret;
+}
+
+static void netvsc_init_settings(struct net_device *dev)
+{
+ struct net_device_context *ndc = netdev_priv(dev);
+
+ ndc->l4_hash = HV_DEFAULT_L4HASH;
+
+ ndc->speed = SPEED_UNKNOWN;
+ ndc->duplex = DUPLEX_FULL;
+
+ dev->features = NETIF_F_LRO;
+}
+
+static int netvsc_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd)
+{
+ struct net_device_context *ndc = netdev_priv(dev);
+ struct net_device *vf_netdev;
+
+ vf_netdev = rtnl_dereference(ndc->vf_netdev);
+
+ if (vf_netdev)
+ return __ethtool_get_link_ksettings(vf_netdev, cmd);
+
+ cmd->base.speed = ndc->speed;
+ cmd->base.duplex = ndc->duplex;
+ cmd->base.port = PORT_OTHER;
+
+ return 0;
+}
+
+static int netvsc_set_link_ksettings(struct net_device *dev,
+ const struct ethtool_link_ksettings *cmd)
+{
+ struct net_device_context *ndc = netdev_priv(dev);
+ struct net_device *vf_netdev = rtnl_dereference(ndc->vf_netdev);
+
+ if (vf_netdev) {
+ if (!vf_netdev->ethtool_ops->set_link_ksettings)
+ return -EOPNOTSUPP;
+
+ return vf_netdev->ethtool_ops->set_link_ksettings(vf_netdev,
+ cmd);
+ }
+
+ return ethtool_virtdev_set_link_ksettings(dev, cmd,
+ &ndc->speed, &ndc->duplex);
+}
+
+static int netvsc_change_mtu(struct net_device *ndev, int mtu)
+{
+ struct net_device_context *ndevctx = netdev_priv(ndev);
+ struct net_device *vf_netdev = rtnl_dereference(ndevctx->vf_netdev);
+ struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev);
+ int orig_mtu = ndev->mtu;
+ struct netvsc_device_info *device_info;
+ int ret = 0;
+
+ if (!nvdev || nvdev->destroy)
+ return -ENODEV;
+
+ device_info = netvsc_devinfo_get(nvdev);
+
+ if (!device_info)
+ return -ENOMEM;
+
+ /* Change MTU of underlying VF netdev first. */
+ if (vf_netdev) {
+ ret = dev_set_mtu(vf_netdev, mtu);
+ if (ret)
+ goto out;
+ }
+
+ ret = netvsc_detach(ndev, nvdev);
+ if (ret)
+ goto rollback_vf;
+
+ ndev->mtu = mtu;
+
+ ret = netvsc_attach(ndev, device_info);
+ if (!ret)
+ goto out;
+
+ /* Attempt rollback to original MTU */
+ ndev->mtu = orig_mtu;
+
+ if (netvsc_attach(ndev, device_info))
+ netdev_err(ndev, "restoring mtu failed\n");
+rollback_vf:
+ if (vf_netdev)
+ dev_set_mtu(vf_netdev, orig_mtu);
+
+out:
+ netvsc_devinfo_put(device_info);
+ return ret;
+}
+
+static void netvsc_get_vf_stats(struct net_device *net,
+ struct netvsc_vf_pcpu_stats *tot)
+{
+ struct net_device_context *ndev_ctx = netdev_priv(net);
+ int i;
+
+ memset(tot, 0, sizeof(*tot));
+
+ for_each_possible_cpu(i) {
+ const struct netvsc_vf_pcpu_stats *stats
+ = per_cpu_ptr(ndev_ctx->vf_stats, i);
+ u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
+ unsigned int start;
+
+ do {
+ start = u64_stats_fetch_begin_irq(&stats->syncp);
+ rx_packets = stats->rx_packets;
+ tx_packets = stats->tx_packets;
+ rx_bytes = stats->rx_bytes;
+ tx_bytes = stats->tx_bytes;
+ } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
+
+ tot->rx_packets += rx_packets;
+ tot->tx_packets += tx_packets;
+ tot->rx_bytes += rx_bytes;
+ tot->tx_bytes += tx_bytes;
+ tot->tx_dropped += stats->tx_dropped;
+ }
+}
+
+static void netvsc_get_pcpu_stats(struct net_device *net,
+ struct netvsc_ethtool_pcpu_stats *pcpu_tot)
+{
+ struct net_device_context *ndev_ctx = netdev_priv(net);
+ struct netvsc_device *nvdev = rcu_dereference_rtnl(ndev_ctx->nvdev);
+ int i;
+
+ /* fetch percpu stats of vf */
+ for_each_possible_cpu(i) {
+ const struct netvsc_vf_pcpu_stats *stats =
+ per_cpu_ptr(ndev_ctx->vf_stats, i);
+ struct netvsc_ethtool_pcpu_stats *this_tot = &pcpu_tot[i];
+ unsigned int start;
+
+ do {
+ start = u64_stats_fetch_begin_irq(&stats->syncp);
+ this_tot->vf_rx_packets = stats->rx_packets;
+ this_tot->vf_tx_packets = stats->tx_packets;
+ this_tot->vf_rx_bytes = stats->rx_bytes;
+ this_tot->vf_tx_bytes = stats->tx_bytes;
+ } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
+ this_tot->rx_packets = this_tot->vf_rx_packets;
+ this_tot->tx_packets = this_tot->vf_tx_packets;
+ this_tot->rx_bytes = this_tot->vf_rx_bytes;
+ this_tot->tx_bytes = this_tot->vf_tx_bytes;
+ }
+
+ /* fetch percpu stats of netvsc */
+ for (i = 0; i < nvdev->num_chn; i++) {
+ const struct netvsc_channel *nvchan = &nvdev->chan_table[i];
+ const struct netvsc_stats_tx *tx_stats;
+ const struct netvsc_stats_rx *rx_stats;
+ struct netvsc_ethtool_pcpu_stats *this_tot =
+ &pcpu_tot[nvchan->channel->target_cpu];
+ u64 packets, bytes;
+ unsigned int start;
+
+ tx_stats = &nvchan->tx_stats;
+ do {
+ start = u64_stats_fetch_begin_irq(&tx_stats->syncp);
+ packets = tx_stats->packets;
+ bytes = tx_stats->bytes;
+ } while (u64_stats_fetch_retry_irq(&tx_stats->syncp, start));
+
+ this_tot->tx_bytes += bytes;
+ this_tot->tx_packets += packets;
+
+ rx_stats = &nvchan->rx_stats;
+ do {
+ start = u64_stats_fetch_begin_irq(&rx_stats->syncp);
+ packets = rx_stats->packets;
+ bytes = rx_stats->bytes;
+ } while (u64_stats_fetch_retry_irq(&rx_stats->syncp, start));
+
+ this_tot->rx_bytes += bytes;
+ this_tot->rx_packets += packets;
+ }
+}
+
+static void netvsc_get_stats64(struct net_device *net,
+ struct rtnl_link_stats64 *t)
+{
+ struct net_device_context *ndev_ctx = netdev_priv(net);
+ struct netvsc_device *nvdev;
+ struct netvsc_vf_pcpu_stats vf_tot;
+ int i;
+
+ rcu_read_lock();
+
+ nvdev = rcu_dereference(ndev_ctx->nvdev);
+ if (!nvdev)
+ goto out;
+
+ netdev_stats_to_stats64(t, &net->stats);
+
+ netvsc_get_vf_stats(net, &vf_tot);
+ t->rx_packets += vf_tot.rx_packets;
+ t->tx_packets += vf_tot.tx_packets;
+ t->rx_bytes += vf_tot.rx_bytes;
+ t->tx_bytes += vf_tot.tx_bytes;
+ t->tx_dropped += vf_tot.tx_dropped;
+
+ for (i = 0; i < nvdev->num_chn; i++) {
+ const struct netvsc_channel *nvchan = &nvdev->chan_table[i];
+ const struct netvsc_stats_tx *tx_stats;
+ const struct netvsc_stats_rx *rx_stats;
+ u64 packets, bytes, multicast;
+ unsigned int start;
+
+ tx_stats = &nvchan->tx_stats;
+ do {
+ start = u64_stats_fetch_begin_irq(&tx_stats->syncp);
+ packets = tx_stats->packets;
+ bytes = tx_stats->bytes;
+ } while (u64_stats_fetch_retry_irq(&tx_stats->syncp, start));
+
+ t->tx_bytes += bytes;
+ t->tx_packets += packets;
+
+ rx_stats = &nvchan->rx_stats;
+ do {
+ start = u64_stats_fetch_begin_irq(&rx_stats->syncp);
+ packets = rx_stats->packets;
+ bytes = rx_stats->bytes;
+ multicast = rx_stats->multicast + rx_stats->broadcast;
+ } while (u64_stats_fetch_retry_irq(&rx_stats->syncp, start));
+
+ t->rx_bytes += bytes;
+ t->rx_packets += packets;
+ t->multicast += multicast;
+ }
+out:
+ rcu_read_unlock();
+}
+
+static int netvsc_set_mac_addr(struct net_device *ndev, void *p)
+{
+ struct net_device_context *ndc = netdev_priv(ndev);
+ struct net_device *vf_netdev = rtnl_dereference(ndc->vf_netdev);
+ struct netvsc_device *nvdev = rtnl_dereference(ndc->nvdev);
+ struct sockaddr *addr = p;
+ int err;
+
+ err = eth_prepare_mac_addr_change(ndev, p);
+ if (err)
+ return err;
+
+ if (!nvdev)
+ return -ENODEV;
+
+ if (vf_netdev) {
+ err = dev_set_mac_address(vf_netdev, addr, NULL);
+ if (err)
+ return err;
+ }
+
+ err = rndis_filter_set_device_mac(nvdev, addr->sa_data);
+ if (!err) {
+ eth_commit_mac_addr_change(ndev, p);
+ } else if (vf_netdev) {
+ /* rollback change on VF */
+ memcpy(addr->sa_data, ndev->dev_addr, ETH_ALEN);
+ dev_set_mac_address(vf_netdev, addr, NULL);
+ }
+
+ return err;
+}
+
+static const struct {
+ char name[ETH_GSTRING_LEN];
+ u16 offset;
+} netvsc_stats[] = {
+ { "tx_scattered", offsetof(struct netvsc_ethtool_stats, tx_scattered) },
+ { "tx_no_memory", offsetof(struct netvsc_ethtool_stats, tx_no_memory) },
+ { "tx_no_space", offsetof(struct netvsc_ethtool_stats, tx_no_space) },
+ { "tx_too_big", offsetof(struct netvsc_ethtool_stats, tx_too_big) },
+ { "tx_busy", offsetof(struct netvsc_ethtool_stats, tx_busy) },
+ { "tx_send_full", offsetof(struct netvsc_ethtool_stats, tx_send_full) },
+ { "rx_comp_busy", offsetof(struct netvsc_ethtool_stats, rx_comp_busy) },
+ { "rx_no_memory", offsetof(struct netvsc_ethtool_stats, rx_no_memory) },
+ { "stop_queue", offsetof(struct netvsc_ethtool_stats, stop_queue) },
+ { "wake_queue", offsetof(struct netvsc_ethtool_stats, wake_queue) },
+ { "vlan_error", offsetof(struct netvsc_ethtool_stats, vlan_error) },
+}, pcpu_stats[] = {
+ { "cpu%u_rx_packets",
+ offsetof(struct netvsc_ethtool_pcpu_stats, rx_packets) },
+ { "cpu%u_rx_bytes",
+ offsetof(struct netvsc_ethtool_pcpu_stats, rx_bytes) },
+ { "cpu%u_tx_packets",
+ offsetof(struct netvsc_ethtool_pcpu_stats, tx_packets) },
+ { "cpu%u_tx_bytes",
+ offsetof(struct netvsc_ethtool_pcpu_stats, tx_bytes) },
+ { "cpu%u_vf_rx_packets",
+ offsetof(struct netvsc_ethtool_pcpu_stats, vf_rx_packets) },
+ { "cpu%u_vf_rx_bytes",
+ offsetof(struct netvsc_ethtool_pcpu_stats, vf_rx_bytes) },
+ { "cpu%u_vf_tx_packets",
+ offsetof(struct netvsc_ethtool_pcpu_stats, vf_tx_packets) },
+ { "cpu%u_vf_tx_bytes",
+ offsetof(struct netvsc_ethtool_pcpu_stats, vf_tx_bytes) },
+}, vf_stats[] = {
+ { "vf_rx_packets", offsetof(struct netvsc_vf_pcpu_stats, rx_packets) },
+ { "vf_rx_bytes", offsetof(struct netvsc_vf_pcpu_stats, rx_bytes) },
+ { "vf_tx_packets", offsetof(struct netvsc_vf_pcpu_stats, tx_packets) },
+ { "vf_tx_bytes", offsetof(struct netvsc_vf_pcpu_stats, tx_bytes) },
+ { "vf_tx_dropped", offsetof(struct netvsc_vf_pcpu_stats, tx_dropped) },
+};
+
+#define NETVSC_GLOBAL_STATS_LEN ARRAY_SIZE(netvsc_stats)
+#define NETVSC_VF_STATS_LEN ARRAY_SIZE(vf_stats)
+
+/* statistics per queue (rx/tx packets/bytes) */
+#define NETVSC_PCPU_STATS_LEN (num_present_cpus() * ARRAY_SIZE(pcpu_stats))
+
+/* 8 statistics per queue (rx/tx packets/bytes, XDP actions) */
+#define NETVSC_QUEUE_STATS_LEN(dev) ((dev)->num_chn * 8)
+
+static int netvsc_get_sset_count(struct net_device *dev, int string_set)
+{
+ struct net_device_context *ndc = netdev_priv(dev);
+ struct netvsc_device *nvdev = rtnl_dereference(ndc->nvdev);
+
+ if (!nvdev)
+ return -ENODEV;
+
+ switch (string_set) {
+ case ETH_SS_STATS:
+ return NETVSC_GLOBAL_STATS_LEN
+ + NETVSC_VF_STATS_LEN
+ + NETVSC_QUEUE_STATS_LEN(nvdev)
+ + NETVSC_PCPU_STATS_LEN;
+ default:
+ return -EINVAL;
+ }
+}
+
+static void netvsc_get_ethtool_stats(struct net_device *dev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct net_device_context *ndc = netdev_priv(dev);
+ struct netvsc_device *nvdev = rtnl_dereference(ndc->nvdev);
+ const void *nds = &ndc->eth_stats;
+ const struct netvsc_stats_tx *tx_stats;
+ const struct netvsc_stats_rx *rx_stats;
+ struct netvsc_vf_pcpu_stats sum;
+ struct netvsc_ethtool_pcpu_stats *pcpu_sum;
+ unsigned int start;
+ u64 packets, bytes;
+ u64 xdp_drop;
+ u64 xdp_redirect;
+ u64 xdp_tx;
+ u64 xdp_xmit;
+ int i, j, cpu;
+
+ if (!nvdev)
+ return;
+
+ for (i = 0; i < NETVSC_GLOBAL_STATS_LEN; i++)
+ data[i] = *(unsigned long *)(nds + netvsc_stats[i].offset);
+
+ netvsc_get_vf_stats(dev, &sum);
+ for (j = 0; j < NETVSC_VF_STATS_LEN; j++)
+ data[i++] = *(u64 *)((void *)&sum + vf_stats[j].offset);
+
+ for (j = 0; j < nvdev->num_chn; j++) {
+ tx_stats = &nvdev->chan_table[j].tx_stats;
+
+ do {
+ start = u64_stats_fetch_begin_irq(&tx_stats->syncp);
+ packets = tx_stats->packets;
+ bytes = tx_stats->bytes;
+ xdp_xmit = tx_stats->xdp_xmit;
+ } while (u64_stats_fetch_retry_irq(&tx_stats->syncp, start));
+ data[i++] = packets;
+ data[i++] = bytes;
+ data[i++] = xdp_xmit;
+
+ rx_stats = &nvdev->chan_table[j].rx_stats;
+ do {
+ start = u64_stats_fetch_begin_irq(&rx_stats->syncp);
+ packets = rx_stats->packets;
+ bytes = rx_stats->bytes;
+ xdp_drop = rx_stats->xdp_drop;
+ xdp_redirect = rx_stats->xdp_redirect;
+ xdp_tx = rx_stats->xdp_tx;
+ } while (u64_stats_fetch_retry_irq(&rx_stats->syncp, start));
+ data[i++] = packets;
+ data[i++] = bytes;
+ data[i++] = xdp_drop;
+ data[i++] = xdp_redirect;
+ data[i++] = xdp_tx;
+ }
+
+ pcpu_sum = kvmalloc_array(num_possible_cpus(),
+ sizeof(struct netvsc_ethtool_pcpu_stats),
+ GFP_KERNEL);
+ if (!pcpu_sum)
+ return;
+
+ netvsc_get_pcpu_stats(dev, pcpu_sum);
+ for_each_present_cpu(cpu) {
+ struct netvsc_ethtool_pcpu_stats *this_sum = &pcpu_sum[cpu];
+
+ for (j = 0; j < ARRAY_SIZE(pcpu_stats); j++)
+ data[i++] = *(u64 *)((void *)this_sum
+ + pcpu_stats[j].offset);
+ }
+ kvfree(pcpu_sum);
+}
+
+static void netvsc_get_strings(struct net_device *dev, u32 stringset, u8 *data)
+{
+ struct net_device_context *ndc = netdev_priv(dev);
+ struct netvsc_device *nvdev = rtnl_dereference(ndc->nvdev);
+ u8 *p = data;
+ int i, cpu;
+
+ if (!nvdev)
+ return;
+
+ switch (stringset) {
+ case ETH_SS_STATS:
+ for (i = 0; i < ARRAY_SIZE(netvsc_stats); i++)
+ ethtool_sprintf(&p, netvsc_stats[i].name);
+
+ for (i = 0; i < ARRAY_SIZE(vf_stats); i++)
+ ethtool_sprintf(&p, vf_stats[i].name);
+
+ for (i = 0; i < nvdev->num_chn; i++) {
+ ethtool_sprintf(&p, "tx_queue_%u_packets", i);
+ ethtool_sprintf(&p, "tx_queue_%u_bytes", i);
+ ethtool_sprintf(&p, "tx_queue_%u_xdp_xmit", i);
+ ethtool_sprintf(&p, "rx_queue_%u_packets", i);
+ ethtool_sprintf(&p, "rx_queue_%u_bytes", i);
+ ethtool_sprintf(&p, "rx_queue_%u_xdp_drop", i);
+ ethtool_sprintf(&p, "rx_queue_%u_xdp_redirect", i);
+ ethtool_sprintf(&p, "rx_queue_%u_xdp_tx", i);
+ }
+
+ for_each_present_cpu(cpu) {
+ for (i = 0; i < ARRAY_SIZE(pcpu_stats); i++)
+ ethtool_sprintf(&p, pcpu_stats[i].name, cpu);
+ }
+
+ break;
+ }
+}
+
+static int
+netvsc_get_rss_hash_opts(struct net_device_context *ndc,
+ struct ethtool_rxnfc *info)
+{
+ const u32 l4_flag = RXH_L4_B_0_1 | RXH_L4_B_2_3;
+
+ info->data = RXH_IP_SRC | RXH_IP_DST;
+
+ switch (info->flow_type) {
+ case TCP_V4_FLOW:
+ if (ndc->l4_hash & HV_TCP4_L4HASH)
+ info->data |= l4_flag;
+
+ break;
+
+ case TCP_V6_FLOW:
+ if (ndc->l4_hash & HV_TCP6_L4HASH)
+ info->data |= l4_flag;
+
+ break;
+
+ case UDP_V4_FLOW:
+ if (ndc->l4_hash & HV_UDP4_L4HASH)
+ info->data |= l4_flag;
+
+ break;
+
+ case UDP_V6_FLOW:
+ if (ndc->l4_hash & HV_UDP6_L4HASH)
+ info->data |= l4_flag;
+
+ break;
+
+ case IPV4_FLOW:
+ case IPV6_FLOW:
+ break;
+ default:
+ info->data = 0;
+ break;
+ }
+
+ return 0;
+}
+
+static int
+netvsc_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
+ u32 *rules)
+{
+ struct net_device_context *ndc = netdev_priv(dev);
+ struct netvsc_device *nvdev = rtnl_dereference(ndc->nvdev);
+
+ if (!nvdev)
+ return -ENODEV;
+
+ switch (info->cmd) {
+ case ETHTOOL_GRXRINGS:
+ info->data = nvdev->num_chn;
+ return 0;
+
+ case ETHTOOL_GRXFH:
+ return netvsc_get_rss_hash_opts(ndc, info);
+ }
+ return -EOPNOTSUPP;
+}
+
+static int netvsc_set_rss_hash_opts(struct net_device_context *ndc,
+ struct ethtool_rxnfc *info)
+{
+ if (info->data == (RXH_IP_SRC | RXH_IP_DST |
+ RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
+ switch (info->flow_type) {
+ case TCP_V4_FLOW:
+ ndc->l4_hash |= HV_TCP4_L4HASH;
+ break;
+
+ case TCP_V6_FLOW:
+ ndc->l4_hash |= HV_TCP6_L4HASH;
+ break;
+
+ case UDP_V4_FLOW:
+ ndc->l4_hash |= HV_UDP4_L4HASH;
+ break;
+
+ case UDP_V6_FLOW:
+ ndc->l4_hash |= HV_UDP6_L4HASH;
+ break;
+
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+ }
+
+ if (info->data == (RXH_IP_SRC | RXH_IP_DST)) {
+ switch (info->flow_type) {
+ case TCP_V4_FLOW:
+ ndc->l4_hash &= ~HV_TCP4_L4HASH;
+ break;
+
+ case TCP_V6_FLOW:
+ ndc->l4_hash &= ~HV_TCP6_L4HASH;
+ break;
+
+ case UDP_V4_FLOW:
+ ndc->l4_hash &= ~HV_UDP4_L4HASH;
+ break;
+
+ case UDP_V6_FLOW:
+ ndc->l4_hash &= ~HV_UDP6_L4HASH;
+ break;
+
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+static int
+netvsc_set_rxnfc(struct net_device *ndev, struct ethtool_rxnfc *info)
+{
+ struct net_device_context *ndc = netdev_priv(ndev);
+
+ if (info->cmd == ETHTOOL_SRXFH)
+ return netvsc_set_rss_hash_opts(ndc, info);
+
+ return -EOPNOTSUPP;
+}
+
+static u32 netvsc_get_rxfh_key_size(struct net_device *dev)
+{
+ return NETVSC_HASH_KEYLEN;
+}
+
+static u32 netvsc_rss_indir_size(struct net_device *dev)
+{
+ return ITAB_NUM;
+}
+
+static int netvsc_get_rxfh(struct net_device *dev, u32 *indir, u8 *key,
+ u8 *hfunc)
+{
+ struct net_device_context *ndc = netdev_priv(dev);
+ struct netvsc_device *ndev = rtnl_dereference(ndc->nvdev);
+ struct rndis_device *rndis_dev;
+ int i;
+
+ if (!ndev)
+ return -ENODEV;
+
+ if (hfunc)
+ *hfunc = ETH_RSS_HASH_TOP; /* Toeplitz */
+
+ rndis_dev = ndev->extension;
+ if (indir) {
+ for (i = 0; i < ITAB_NUM; i++)
+ indir[i] = ndc->rx_table[i];
+ }
+
+ if (key)
+ memcpy(key, rndis_dev->rss_key, NETVSC_HASH_KEYLEN);
+
+ return 0;
+}
+
+static int netvsc_set_rxfh(struct net_device *dev, const u32 *indir,
+ const u8 *key, const u8 hfunc)
+{
+ struct net_device_context *ndc = netdev_priv(dev);
+ struct netvsc_device *ndev = rtnl_dereference(ndc->nvdev);
+ struct rndis_device *rndis_dev;
+ int i;
+
+ if (!ndev)
+ return -ENODEV;
+
+ if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
+ return -EOPNOTSUPP;
+
+ rndis_dev = ndev->extension;
+ if (indir) {
+ for (i = 0; i < ITAB_NUM; i++)
+ if (indir[i] >= ndev->num_chn)
+ return -EINVAL;
+
+ for (i = 0; i < ITAB_NUM; i++)
+ ndc->rx_table[i] = indir[i];
+ }
+
+ if (!key) {
+ if (!indir)
+ return 0;
+
+ key = rndis_dev->rss_key;
+ }
+
+ return rndis_filter_set_rss_param(rndis_dev, key);
+}
+
+/* Hyper-V RNDIS protocol does not have ring in the HW sense.
+ * It does have pre-allocated receive area which is divided into sections.
+ */
+static void __netvsc_get_ringparam(struct netvsc_device *nvdev,
+ struct ethtool_ringparam *ring)
+{
+ u32 max_buf_size;
+
+ ring->rx_pending = nvdev->recv_section_cnt;
+ ring->tx_pending = nvdev->send_section_cnt;
+
+ if (nvdev->nvsp_version <= NVSP_PROTOCOL_VERSION_2)
+ max_buf_size = NETVSC_RECEIVE_BUFFER_SIZE_LEGACY;
+ else
+ max_buf_size = NETVSC_RECEIVE_BUFFER_SIZE;
+
+ ring->rx_max_pending = max_buf_size / nvdev->recv_section_size;
+ ring->tx_max_pending = NETVSC_SEND_BUFFER_SIZE
+ / nvdev->send_section_size;
+}
+
+static void netvsc_get_ringparam(struct net_device *ndev,
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
+{
+ struct net_device_context *ndevctx = netdev_priv(ndev);
+ struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev);
+
+ if (!nvdev)
+ return;
+
+ __netvsc_get_ringparam(nvdev, ring);
+}
+
+static int netvsc_set_ringparam(struct net_device *ndev,
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
+{
+ struct net_device_context *ndevctx = netdev_priv(ndev);
+ struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev);
+ struct netvsc_device_info *device_info;
+ struct ethtool_ringparam orig;
+ u32 new_tx, new_rx;
+ int ret = 0;
+
+ if (!nvdev || nvdev->destroy)
+ return -ENODEV;
+
+ memset(&orig, 0, sizeof(orig));
+ __netvsc_get_ringparam(nvdev, &orig);
+
+ new_tx = clamp_t(u32, ring->tx_pending,
+ NETVSC_MIN_TX_SECTIONS, orig.tx_max_pending);
+ new_rx = clamp_t(u32, ring->rx_pending,
+ NETVSC_MIN_RX_SECTIONS, orig.rx_max_pending);
+
+ if (new_tx == orig.tx_pending &&
+ new_rx == orig.rx_pending)
+ return 0; /* no change */
+
+ device_info = netvsc_devinfo_get(nvdev);
+
+ if (!device_info)
+ return -ENOMEM;
+
+ device_info->send_sections = new_tx;
+ device_info->recv_sections = new_rx;
+
+ ret = netvsc_detach(ndev, nvdev);
+ if (ret)
+ goto out;
+
+ ret = netvsc_attach(ndev, device_info);
+ if (ret) {
+ device_info->send_sections = orig.tx_pending;
+ device_info->recv_sections = orig.rx_pending;
+
+ if (netvsc_attach(ndev, device_info))
+ netdev_err(ndev, "restoring ringparam failed");
+ }
+
+out:
+ netvsc_devinfo_put(device_info);
+ return ret;
+}
+
+static netdev_features_t netvsc_fix_features(struct net_device *ndev,
+ netdev_features_t features)
+{
+ struct net_device_context *ndevctx = netdev_priv(ndev);
+ struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev);
+
+ if (!nvdev || nvdev->destroy)
+ return features;
+
+ if ((features & NETIF_F_LRO) && netvsc_xdp_get(nvdev)) {
+ features ^= NETIF_F_LRO;
+ netdev_info(ndev, "Skip LRO - unsupported with XDP\n");
+ }
+
+ return features;
+}
+
+static int netvsc_set_features(struct net_device *ndev,
+ netdev_features_t features)
+{
+ netdev_features_t change = features ^ ndev->features;
+ struct net_device_context *ndevctx = netdev_priv(ndev);
+ struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev);
+ struct net_device *vf_netdev = rtnl_dereference(ndevctx->vf_netdev);
+ struct ndis_offload_params offloads;
+ int ret = 0;
+
+ if (!nvdev || nvdev->destroy)
+ return -ENODEV;
+
+ if (!(change & NETIF_F_LRO))
+ goto syncvf;
+
+ memset(&offloads, 0, sizeof(struct ndis_offload_params));
+
+ if (features & NETIF_F_LRO) {
+ offloads.rsc_ip_v4 = NDIS_OFFLOAD_PARAMETERS_RSC_ENABLED;
+ offloads.rsc_ip_v6 = NDIS_OFFLOAD_PARAMETERS_RSC_ENABLED;
+ } else {
+ offloads.rsc_ip_v4 = NDIS_OFFLOAD_PARAMETERS_RSC_DISABLED;
+ offloads.rsc_ip_v6 = NDIS_OFFLOAD_PARAMETERS_RSC_DISABLED;
+ }
+
+ ret = rndis_filter_set_offload_params(ndev, nvdev, &offloads);
+
+ if (ret) {
+ features ^= NETIF_F_LRO;
+ ndev->features = features;
+ }
+
+syncvf:
+ if (!vf_netdev)
+ return ret;
+
+ vf_netdev->wanted_features = features;
+ netdev_update_features(vf_netdev);
+
+ return ret;
+}
+
+static int netvsc_get_regs_len(struct net_device *netdev)
+{
+ return VRSS_SEND_TAB_SIZE * sizeof(u32);
+}
+
+static void netvsc_get_regs(struct net_device *netdev,
+ struct ethtool_regs *regs, void *p)
+{
+ struct net_device_context *ndc = netdev_priv(netdev);
+ u32 *regs_buff = p;
+
+ /* increase the version, if buffer format is changed. */
+ regs->version = 1;
+
+ memcpy(regs_buff, ndc->tx_table, VRSS_SEND_TAB_SIZE * sizeof(u32));
+}
+
+static u32 netvsc_get_msglevel(struct net_device *ndev)
+{
+ struct net_device_context *ndev_ctx = netdev_priv(ndev);
+
+ return ndev_ctx->msg_enable;
+}
+
+static void netvsc_set_msglevel(struct net_device *ndev, u32 val)
+{
+ struct net_device_context *ndev_ctx = netdev_priv(ndev);
+
+ ndev_ctx->msg_enable = val;
+}
+
+static const struct ethtool_ops ethtool_ops = {
+ .get_drvinfo = netvsc_get_drvinfo,
+ .get_regs_len = netvsc_get_regs_len,
+ .get_regs = netvsc_get_regs,
+ .get_msglevel = netvsc_get_msglevel,
+ .set_msglevel = netvsc_set_msglevel,
+ .get_link = ethtool_op_get_link,
+ .get_ethtool_stats = netvsc_get_ethtool_stats,
+ .get_sset_count = netvsc_get_sset_count,
+ .get_strings = netvsc_get_strings,
+ .get_channels = netvsc_get_channels,
+ .set_channels = netvsc_set_channels,
+ .get_ts_info = ethtool_op_get_ts_info,
+ .get_rxnfc = netvsc_get_rxnfc,
+ .set_rxnfc = netvsc_set_rxnfc,
+ .get_rxfh_key_size = netvsc_get_rxfh_key_size,
+ .get_rxfh_indir_size = netvsc_rss_indir_size,
+ .get_rxfh = netvsc_get_rxfh,
+ .set_rxfh = netvsc_set_rxfh,
+ .get_link_ksettings = netvsc_get_link_ksettings,
+ .set_link_ksettings = netvsc_set_link_ksettings,
+ .get_ringparam = netvsc_get_ringparam,
+ .set_ringparam = netvsc_set_ringparam,
+};
+
+static const struct net_device_ops device_ops = {
+ .ndo_open = netvsc_open,
+ .ndo_stop = netvsc_close,
+ .ndo_start_xmit = netvsc_start_xmit,
+ .ndo_change_rx_flags = netvsc_change_rx_flags,
+ .ndo_set_rx_mode = netvsc_set_rx_mode,
+ .ndo_fix_features = netvsc_fix_features,
+ .ndo_set_features = netvsc_set_features,
+ .ndo_change_mtu = netvsc_change_mtu,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = netvsc_set_mac_addr,
+ .ndo_select_queue = netvsc_select_queue,
+ .ndo_get_stats64 = netvsc_get_stats64,
+ .ndo_bpf = netvsc_bpf,
+ .ndo_xdp_xmit = netvsc_ndoxdp_xmit,
+};
+
+/*
+ * Handle link status changes. For RNDIS_STATUS_NETWORK_CHANGE emulate link
+ * down/up sequence. In case of RNDIS_STATUS_MEDIA_CONNECT when carrier is
+ * present send GARP packet to network peers with netif_notify_peers().
+ */
+static void netvsc_link_change(struct work_struct *w)
+{
+ struct net_device_context *ndev_ctx =
+ container_of(w, struct net_device_context, dwork.work);
+ struct hv_device *device_obj = ndev_ctx->device_ctx;
+ struct net_device *net = hv_get_drvdata(device_obj);
+ unsigned long flags, next_reconfig, delay;
+ struct netvsc_reconfig *event = NULL;
+ struct netvsc_device *net_device;
+ struct rndis_device *rdev;
+ bool reschedule = false;
+
+ /* if changes are happening, comeback later */
+ if (!rtnl_trylock()) {
+ schedule_delayed_work(&ndev_ctx->dwork, LINKCHANGE_INT);
+ return;
+ }
+
+ net_device = rtnl_dereference(ndev_ctx->nvdev);
+ if (!net_device)
+ goto out_unlock;
+
+ rdev = net_device->extension;
+
+ next_reconfig = ndev_ctx->last_reconfig + LINKCHANGE_INT;
+ if (time_is_after_jiffies(next_reconfig)) {
+ /* link_watch only sends one notification with current state
+ * per second, avoid doing reconfig more frequently. Handle
+ * wrap around.
+ */
+ delay = next_reconfig - jiffies;
+ delay = delay < LINKCHANGE_INT ? delay : LINKCHANGE_INT;
+ schedule_delayed_work(&ndev_ctx->dwork, delay);
+ goto out_unlock;
+ }
+ ndev_ctx->last_reconfig = jiffies;
+
+ spin_lock_irqsave(&ndev_ctx->lock, flags);
+ if (!list_empty(&ndev_ctx->reconfig_events)) {
+ event = list_first_entry(&ndev_ctx->reconfig_events,
+ struct netvsc_reconfig, list);
+ list_del(&event->list);
+ reschedule = !list_empty(&ndev_ctx->reconfig_events);
+ }
+ spin_unlock_irqrestore(&ndev_ctx->lock, flags);
+
+ if (!event)
+ goto out_unlock;
+
+ switch (event->event) {
+ /* Only the following events are possible due to the check in
+ * netvsc_linkstatus_callback()
+ */
+ case RNDIS_STATUS_MEDIA_CONNECT:
+ if (rdev->link_state) {
+ rdev->link_state = false;
+ netif_carrier_on(net);
+ netvsc_tx_enable(net_device, net);
+ } else {
+ __netdev_notify_peers(net);
+ }
+ kfree(event);
+ break;
+ case RNDIS_STATUS_MEDIA_DISCONNECT:
+ if (!rdev->link_state) {
+ rdev->link_state = true;
+ netif_carrier_off(net);
+ netvsc_tx_disable(net_device, net);
+ }
+ kfree(event);
+ break;
+ case RNDIS_STATUS_NETWORK_CHANGE:
+ /* Only makes sense if carrier is present */
+ if (!rdev->link_state) {
+ rdev->link_state = true;
+ netif_carrier_off(net);
+ netvsc_tx_disable(net_device, net);
+ event->event = RNDIS_STATUS_MEDIA_CONNECT;
+ spin_lock_irqsave(&ndev_ctx->lock, flags);
+ list_add(&event->list, &ndev_ctx->reconfig_events);
+ spin_unlock_irqrestore(&ndev_ctx->lock, flags);
+ reschedule = true;
+ }
+ break;
+ }
+
+ rtnl_unlock();
+
+ /* link_watch only sends one notification with current state per
+ * second, handle next reconfig event in 2 seconds.
+ */
+ if (reschedule)
+ schedule_delayed_work(&ndev_ctx->dwork, LINKCHANGE_INT);
+
+ return;
+
+out_unlock:
+ rtnl_unlock();
+}
+
+static struct net_device *get_netvsc_byref(struct net_device *vf_netdev)
+{
+ struct net_device_context *net_device_ctx;
+ struct net_device *dev;
+
+ dev = netdev_master_upper_dev_get(vf_netdev);
+ if (!dev || dev->netdev_ops != &device_ops)
+ return NULL; /* not a netvsc device */
+
+ net_device_ctx = netdev_priv(dev);
+ if (!rtnl_dereference(net_device_ctx->nvdev))
+ return NULL; /* device is removed */
+
+ return dev;
+}
+
+/* Called when VF is injecting data into network stack.
+ * Change the associated network device from VF to netvsc.
+ * note: already called with rcu_read_lock
+ */
+static rx_handler_result_t netvsc_vf_handle_frame(struct sk_buff **pskb)
+{
+ struct sk_buff *skb = *pskb;
+ struct net_device *ndev = rcu_dereference(skb->dev->rx_handler_data);
+ struct net_device_context *ndev_ctx = netdev_priv(ndev);
+ struct netvsc_vf_pcpu_stats *pcpu_stats
+ = this_cpu_ptr(ndev_ctx->vf_stats);
+
+ skb = skb_share_check(skb, GFP_ATOMIC);
+ if (unlikely(!skb))
+ return RX_HANDLER_CONSUMED;
+
+ *pskb = skb;
+
+ skb->dev = ndev;
+
+ u64_stats_update_begin(&pcpu_stats->syncp);
+ pcpu_stats->rx_packets++;
+ pcpu_stats->rx_bytes += skb->len;
+ u64_stats_update_end(&pcpu_stats->syncp);
+
+ return RX_HANDLER_ANOTHER;
+}
+
+static int netvsc_vf_join(struct net_device *vf_netdev,
+ struct net_device *ndev)
+{
+ struct net_device_context *ndev_ctx = netdev_priv(ndev);
+ int ret;
+
+ ret = netdev_rx_handler_register(vf_netdev,
+ netvsc_vf_handle_frame, ndev);
+ if (ret != 0) {
+ netdev_err(vf_netdev,
+ "can not register netvsc VF receive handler (err = %d)\n",
+ ret);
+ goto rx_handler_failed;
+ }
+
+ ret = netdev_master_upper_dev_link(vf_netdev, ndev,
+ NULL, NULL, NULL);
+ if (ret != 0) {
+ netdev_err(vf_netdev,
+ "can not set master device %s (err = %d)\n",
+ ndev->name, ret);
+ goto upper_link_failed;
+ }
+
+ schedule_delayed_work(&ndev_ctx->vf_takeover, VF_TAKEOVER_INT);
+
+ call_netdevice_notifiers(NETDEV_JOIN, vf_netdev);
+
+ netdev_info(vf_netdev, "joined to %s\n", ndev->name);
+ return 0;
+
+upper_link_failed:
+ netdev_rx_handler_unregister(vf_netdev);
+rx_handler_failed:
+ return ret;
+}
+
+static void __netvsc_vf_setup(struct net_device *ndev,
+ struct net_device *vf_netdev)
+{
+ int ret;
+
+ /* Align MTU of VF with master */
+ ret = dev_set_mtu(vf_netdev, ndev->mtu);
+ if (ret)
+ netdev_warn(vf_netdev,
+ "unable to change mtu to %u\n", ndev->mtu);
+
+ /* set multicast etc flags on VF */
+ dev_change_flags(vf_netdev, ndev->flags | IFF_SLAVE, NULL);
+
+ /* sync address list from ndev to VF */
+ netif_addr_lock_bh(ndev);
+ dev_uc_sync(vf_netdev, ndev);
+ dev_mc_sync(vf_netdev, ndev);
+ netif_addr_unlock_bh(ndev);
+
+ if (netif_running(ndev)) {
+ ret = dev_open(vf_netdev, NULL);
+ if (ret)
+ netdev_warn(vf_netdev,
+ "unable to open: %d\n", ret);
+ }
+}
+
+/* Setup VF as slave of the synthetic device.
+ * Runs in workqueue to avoid recursion in netlink callbacks.
+ */
+static void netvsc_vf_setup(struct work_struct *w)
+{
+ struct net_device_context *ndev_ctx
+ = container_of(w, struct net_device_context, vf_takeover.work);
+ struct net_device *ndev = hv_get_drvdata(ndev_ctx->device_ctx);
+ struct net_device *vf_netdev;
+
+ if (!rtnl_trylock()) {
+ schedule_delayed_work(&ndev_ctx->vf_takeover, 0);
+ return;
+ }
+
+ vf_netdev = rtnl_dereference(ndev_ctx->vf_netdev);
+ if (vf_netdev)
+ __netvsc_vf_setup(ndev, vf_netdev);
+
+ rtnl_unlock();
+}
+
+/* Find netvsc by VF serial number.
+ * The PCI hyperv controller records the serial number as the slot kobj name.
+ */
+static struct net_device *get_netvsc_byslot(const struct net_device *vf_netdev)
+{
+ struct device *parent = vf_netdev->dev.parent;
+ struct net_device_context *ndev_ctx;
+ struct net_device *ndev;
+ struct pci_dev *pdev;
+ u32 serial;
+
+ if (!parent || !dev_is_pci(parent))
+ return NULL; /* not a PCI device */
+
+ pdev = to_pci_dev(parent);
+ if (!pdev->slot) {
+ netdev_notice(vf_netdev, "no PCI slot information\n");
+ return NULL;
+ }
+
+ if (kstrtou32(pci_slot_name(pdev->slot), 10, &serial)) {
+ netdev_notice(vf_netdev, "Invalid vf serial:%s\n",
+ pci_slot_name(pdev->slot));
+ return NULL;
+ }
+
+ list_for_each_entry(ndev_ctx, &netvsc_dev_list, list) {
+ if (!ndev_ctx->vf_alloc)
+ continue;
+
+ if (ndev_ctx->vf_serial != serial)
+ continue;
+
+ ndev = hv_get_drvdata(ndev_ctx->device_ctx);
+ if (ndev->addr_len != vf_netdev->addr_len ||
+ memcmp(ndev->perm_addr, vf_netdev->perm_addr,
+ ndev->addr_len) != 0)
+ continue;
+
+ return ndev;
+
+ }
+
+ /* Fallback path to check synthetic vf with help of mac addr.
+ * Because this function can be called before vf_netdev is
+ * initialized (NETDEV_POST_INIT) when its perm_addr has not been copied
+ * from dev_addr, also try to match to its dev_addr.
+ * Note: On Hyper-V and Azure, it's not possible to set a MAC address
+ * on a VF that matches to the MAC of a unrelated NETVSC device.
+ */
+ list_for_each_entry(ndev_ctx, &netvsc_dev_list, list) {
+ ndev = hv_get_drvdata(ndev_ctx->device_ctx);
+ if (ether_addr_equal(vf_netdev->perm_addr, ndev->perm_addr) ||
+ ether_addr_equal(vf_netdev->dev_addr, ndev->perm_addr))
+ return ndev;
+ }
+
+ netdev_notice(vf_netdev,
+ "no netdev found for vf serial:%u\n", serial);
+ return NULL;
+}
+
+static int netvsc_prepare_bonding(struct net_device *vf_netdev)
+{
+ struct net_device *ndev;
+
+ ndev = get_netvsc_byslot(vf_netdev);
+ if (!ndev)
+ return NOTIFY_DONE;
+
+ /* set slave flag before open to prevent IPv6 addrconf */
+ vf_netdev->flags |= IFF_SLAVE;
+ return NOTIFY_DONE;
+}
+
+static int netvsc_register_vf(struct net_device *vf_netdev)
+{
+ struct net_device_context *net_device_ctx;
+ struct netvsc_device *netvsc_dev;
+ struct bpf_prog *prog;
+ struct net_device *ndev;
+ int ret;
+
+ if (vf_netdev->addr_len != ETH_ALEN)
+ return NOTIFY_DONE;
+
+ ndev = get_netvsc_byslot(vf_netdev);
+ if (!ndev)
+ return NOTIFY_DONE;
+
+ net_device_ctx = netdev_priv(ndev);
+ netvsc_dev = rtnl_dereference(net_device_ctx->nvdev);
+ if (!netvsc_dev || rtnl_dereference(net_device_ctx->vf_netdev))
+ return NOTIFY_DONE;
+
+ /* if synthetic interface is a different namespace,
+ * then move the VF to that namespace; join will be
+ * done again in that context.
+ */
+ if (!net_eq(dev_net(ndev), dev_net(vf_netdev))) {
+ ret = dev_change_net_namespace(vf_netdev,
+ dev_net(ndev), "eth%d");
+ if (ret)
+ netdev_err(vf_netdev,
+ "could not move to same namespace as %s: %d\n",
+ ndev->name, ret);
+ else
+ netdev_info(vf_netdev,
+ "VF moved to namespace with: %s\n",
+ ndev->name);
+ return NOTIFY_DONE;
+ }
+
+ netdev_info(ndev, "VF registering: %s\n", vf_netdev->name);
+
+ if (netvsc_vf_join(vf_netdev, ndev) != 0)
+ return NOTIFY_DONE;
+
+ dev_hold(vf_netdev);
+ rcu_assign_pointer(net_device_ctx->vf_netdev, vf_netdev);
+
+ if (ndev->needed_headroom < vf_netdev->needed_headroom)
+ ndev->needed_headroom = vf_netdev->needed_headroom;
+
+ vf_netdev->wanted_features = ndev->features;
+ netdev_update_features(vf_netdev);
+
+ prog = netvsc_xdp_get(netvsc_dev);
+ netvsc_vf_setxdp(vf_netdev, prog);
+
+ return NOTIFY_OK;
+}
+
+/* Change the data path when VF UP/DOWN/CHANGE are detected.
+ *
+ * Typically a UP or DOWN event is followed by a CHANGE event, so
+ * net_device_ctx->data_path_is_vf is used to cache the current data path
+ * to avoid the duplicate call of netvsc_switch_datapath() and the duplicate
+ * message.
+ *
+ * During hibernation, if a VF NIC driver (e.g. mlx5) preserves the network
+ * interface, there is only the CHANGE event and no UP or DOWN event.
+ */
+static int netvsc_vf_changed(struct net_device *vf_netdev, unsigned long event)
+{
+ struct net_device_context *net_device_ctx;
+ struct netvsc_device *netvsc_dev;
+ struct net_device *ndev;
+ bool vf_is_up = false;
+ int ret;
+
+ if (event != NETDEV_GOING_DOWN)
+ vf_is_up = netif_running(vf_netdev);
+
+ ndev = get_netvsc_byref(vf_netdev);
+ if (!ndev)
+ return NOTIFY_DONE;
+
+ net_device_ctx = netdev_priv(ndev);
+ netvsc_dev = rtnl_dereference(net_device_ctx->nvdev);
+ if (!netvsc_dev)
+ return NOTIFY_DONE;
+
+ if (net_device_ctx->data_path_is_vf == vf_is_up)
+ return NOTIFY_OK;
+
+ if (vf_is_up && !net_device_ctx->vf_alloc) {
+ netdev_info(ndev, "Waiting for the VF association from host\n");
+ wait_for_completion(&net_device_ctx->vf_add);
+ }
+
+ ret = netvsc_switch_datapath(ndev, vf_is_up);
+
+ if (ret) {
+ netdev_err(ndev,
+ "Data path failed to switch %s VF: %s, err: %d\n",
+ vf_is_up ? "to" : "from", vf_netdev->name, ret);
+ return NOTIFY_DONE;
+ } else {
+ netdev_info(ndev, "Data path switched %s VF: %s\n",
+ vf_is_up ? "to" : "from", vf_netdev->name);
+ }
+
+ return NOTIFY_OK;
+}
+
+static int netvsc_unregister_vf(struct net_device *vf_netdev)
+{
+ struct net_device *ndev;
+ struct net_device_context *net_device_ctx;
+
+ ndev = get_netvsc_byref(vf_netdev);
+ if (!ndev)
+ return NOTIFY_DONE;
+
+ net_device_ctx = netdev_priv(ndev);
+ cancel_delayed_work_sync(&net_device_ctx->vf_takeover);
+
+ netdev_info(ndev, "VF unregistering: %s\n", vf_netdev->name);
+
+ netvsc_vf_setxdp(vf_netdev, NULL);
+
+ reinit_completion(&net_device_ctx->vf_add);
+ netdev_rx_handler_unregister(vf_netdev);
+ netdev_upper_dev_unlink(vf_netdev, ndev);
+ RCU_INIT_POINTER(net_device_ctx->vf_netdev, NULL);
+ dev_put(vf_netdev);
+
+ ndev->needed_headroom = RNDIS_AND_PPI_SIZE;
+
+ return NOTIFY_OK;
+}
+
+static int netvsc_probe(struct hv_device *dev,
+ const struct hv_vmbus_device_id *dev_id)
+{
+ struct net_device *net = NULL;
+ struct net_device_context *net_device_ctx;
+ struct netvsc_device_info *device_info = NULL;
+ struct netvsc_device *nvdev;
+ int ret = -ENOMEM;
+
+ net = alloc_etherdev_mq(sizeof(struct net_device_context),
+ VRSS_CHANNEL_MAX);
+ if (!net)
+ goto no_net;
+
+ netif_carrier_off(net);
+
+ netvsc_init_settings(net);
+
+ net_device_ctx = netdev_priv(net);
+ net_device_ctx->device_ctx = dev;
+ net_device_ctx->msg_enable = netif_msg_init(debug, default_msg);
+ if (netif_msg_probe(net_device_ctx))
+ netdev_dbg(net, "netvsc msg_enable: %d\n",
+ net_device_ctx->msg_enable);
+
+ hv_set_drvdata(dev, net);
+
+ INIT_DELAYED_WORK(&net_device_ctx->dwork, netvsc_link_change);
+
+ init_completion(&net_device_ctx->vf_add);
+ spin_lock_init(&net_device_ctx->lock);
+ INIT_LIST_HEAD(&net_device_ctx->reconfig_events);
+ INIT_DELAYED_WORK(&net_device_ctx->vf_takeover, netvsc_vf_setup);
+
+ net_device_ctx->vf_stats
+ = netdev_alloc_pcpu_stats(struct netvsc_vf_pcpu_stats);
+ if (!net_device_ctx->vf_stats)
+ goto no_stats;
+
+ net->netdev_ops = &device_ops;
+ net->ethtool_ops = &ethtool_ops;
+ SET_NETDEV_DEV(net, &dev->device);
+ dma_set_min_align_mask(&dev->device, HV_HYP_PAGE_SIZE - 1);
+
+ /* We always need headroom for rndis header */
+ net->needed_headroom = RNDIS_AND_PPI_SIZE;
+
+ /* Initialize the number of queues to be 1, we may change it if more
+ * channels are offered later.
+ */
+ netif_set_real_num_tx_queues(net, 1);
+ netif_set_real_num_rx_queues(net, 1);
+
+ /* Notify the netvsc driver of the new device */
+ device_info = netvsc_devinfo_get(NULL);
+
+ if (!device_info) {
+ ret = -ENOMEM;
+ goto devinfo_failed;
+ }
+
+ /* We must get rtnl lock before scheduling nvdev->subchan_work,
+ * otherwise netvsc_subchan_work() can get rtnl lock first and wait
+ * all subchannels to show up, but that may not happen because
+ * netvsc_probe() can't get rtnl lock and as a result vmbus_onoffer()
+ * -> ... -> device_add() -> ... -> __device_attach() can't get
+ * the device lock, so all the subchannels can't be processed --
+ * finally netvsc_subchan_work() hangs forever.
+ *
+ * The rtnl lock also needs to be held before rndis_filter_device_add()
+ * which advertises nvsp_2_vsc_capability / sriov bit, and triggers
+ * VF NIC offering and registering. If VF NIC finished register_netdev()
+ * earlier it may cause name based config failure.
+ */
+ rtnl_lock();
+
+ nvdev = rndis_filter_device_add(dev, device_info);
+ if (IS_ERR(nvdev)) {
+ ret = PTR_ERR(nvdev);
+ netdev_err(net, "unable to add netvsc device (ret %d)\n", ret);
+ goto rndis_failed;
+ }
+
+ eth_hw_addr_set(net, device_info->mac_adr);
+
+ if (nvdev->num_chn > 1)
+ schedule_work(&nvdev->subchan_work);
+
+ /* hw_features computed in rndis_netdev_set_hwcaps() */
+ net->features = net->hw_features |
+ NETIF_F_HIGHDMA | NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_HW_VLAN_CTAG_RX;
+ net->vlan_features = net->features;
+
+ netdev_lockdep_set_classes(net);
+
+ /* MTU range: 68 - 1500 or 65521 */
+ net->min_mtu = NETVSC_MTU_MIN;
+ if (nvdev->nvsp_version >= NVSP_PROTOCOL_VERSION_2)
+ net->max_mtu = NETVSC_MTU - ETH_HLEN;
+ else
+ net->max_mtu = ETH_DATA_LEN;
+
+ nvdev->tx_disable = false;
+
+ ret = register_netdevice(net);
+ if (ret != 0) {
+ pr_err("Unable to register netdev.\n");
+ goto register_failed;
+ }
+
+ list_add(&net_device_ctx->list, &netvsc_dev_list);
+ rtnl_unlock();
+
+ netvsc_devinfo_put(device_info);
+ return 0;
+
+register_failed:
+ rndis_filter_device_remove(dev, nvdev);
+rndis_failed:
+ rtnl_unlock();
+ netvsc_devinfo_put(device_info);
+devinfo_failed:
+ free_percpu(net_device_ctx->vf_stats);
+no_stats:
+ hv_set_drvdata(dev, NULL);
+ free_netdev(net);
+no_net:
+ return ret;
+}
+
+static int netvsc_remove(struct hv_device *dev)
+{
+ struct net_device_context *ndev_ctx;
+ struct net_device *vf_netdev, *net;
+ struct netvsc_device *nvdev;
+
+ net = hv_get_drvdata(dev);
+ if (net == NULL) {
+ dev_err(&dev->device, "No net device to remove\n");
+ return 0;
+ }
+
+ ndev_ctx = netdev_priv(net);
+
+ cancel_delayed_work_sync(&ndev_ctx->dwork);
+
+ rtnl_lock();
+ nvdev = rtnl_dereference(ndev_ctx->nvdev);
+ if (nvdev) {
+ cancel_work_sync(&nvdev->subchan_work);
+ netvsc_xdp_set(net, NULL, NULL, nvdev);
+ }
+
+ /*
+ * Call to the vsc driver to let it know that the device is being
+ * removed. Also blocks mtu and channel changes.
+ */
+ vf_netdev = rtnl_dereference(ndev_ctx->vf_netdev);
+ if (vf_netdev)
+ netvsc_unregister_vf(vf_netdev);
+
+ if (nvdev)
+ rndis_filter_device_remove(dev, nvdev);
+
+ unregister_netdevice(net);
+ list_del(&ndev_ctx->list);
+
+ rtnl_unlock();
+
+ hv_set_drvdata(dev, NULL);
+
+ free_percpu(ndev_ctx->vf_stats);
+ free_netdev(net);
+ return 0;
+}
+
+static int netvsc_suspend(struct hv_device *dev)
+{
+ struct net_device_context *ndev_ctx;
+ struct netvsc_device *nvdev;
+ struct net_device *net;
+ int ret;
+
+ net = hv_get_drvdata(dev);
+
+ ndev_ctx = netdev_priv(net);
+ cancel_delayed_work_sync(&ndev_ctx->dwork);
+
+ rtnl_lock();
+
+ nvdev = rtnl_dereference(ndev_ctx->nvdev);
+ if (nvdev == NULL) {
+ ret = -ENODEV;
+ goto out;
+ }
+
+ /* Save the current config info */
+ ndev_ctx->saved_netvsc_dev_info = netvsc_devinfo_get(nvdev);
+ if (!ndev_ctx->saved_netvsc_dev_info) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ ret = netvsc_detach(net, nvdev);
+out:
+ rtnl_unlock();
+
+ return ret;
+}
+
+static int netvsc_resume(struct hv_device *dev)
+{
+ struct net_device *net = hv_get_drvdata(dev);
+ struct net_device_context *net_device_ctx;
+ struct netvsc_device_info *device_info;
+ int ret;
+
+ rtnl_lock();
+
+ net_device_ctx = netdev_priv(net);
+
+ /* Reset the data path to the netvsc NIC before re-opening the vmbus
+ * channel. Later netvsc_netdev_event() will switch the data path to
+ * the VF upon the UP or CHANGE event.
+ */
+ net_device_ctx->data_path_is_vf = false;
+ device_info = net_device_ctx->saved_netvsc_dev_info;
+
+ ret = netvsc_attach(net, device_info);
+
+ netvsc_devinfo_put(device_info);
+ net_device_ctx->saved_netvsc_dev_info = NULL;
+
+ rtnl_unlock();
+
+ return ret;
+}
+static const struct hv_vmbus_device_id id_table[] = {
+ /* Network guid */
+ { HV_NIC_GUID, },
+ { },
+};
+
+MODULE_DEVICE_TABLE(vmbus, id_table);
+
+/* The one and only one */
+static struct hv_driver netvsc_drv = {
+ .name = KBUILD_MODNAME,
+ .id_table = id_table,
+ .probe = netvsc_probe,
+ .remove = netvsc_remove,
+ .suspend = netvsc_suspend,
+ .resume = netvsc_resume,
+ .driver = {
+ .probe_type = PROBE_FORCE_SYNCHRONOUS,
+ },
+};
+
+/*
+ * On Hyper-V, every VF interface is matched with a corresponding
+ * synthetic interface. The synthetic interface is presented first
+ * to the guest. When the corresponding VF instance is registered,
+ * we will take care of switching the data path.
+ */
+static int netvsc_netdev_event(struct notifier_block *this,
+ unsigned long event, void *ptr)
+{
+ struct net_device *event_dev = netdev_notifier_info_to_dev(ptr);
+
+ /* Skip our own events */
+ if (event_dev->netdev_ops == &device_ops)
+ return NOTIFY_DONE;
+
+ /* Avoid non-Ethernet type devices */
+ if (event_dev->type != ARPHRD_ETHER)
+ return NOTIFY_DONE;
+
+ /* Avoid Vlan dev with same MAC registering as VF */
+ if (is_vlan_dev(event_dev))
+ return NOTIFY_DONE;
+
+ /* Avoid Bonding master dev with same MAC registering as VF */
+ if (netif_is_bond_master(event_dev))
+ return NOTIFY_DONE;
+
+ switch (event) {
+ case NETDEV_POST_INIT:
+ return netvsc_prepare_bonding(event_dev);
+ case NETDEV_REGISTER:
+ return netvsc_register_vf(event_dev);
+ case NETDEV_UNREGISTER:
+ return netvsc_unregister_vf(event_dev);
+ case NETDEV_UP:
+ case NETDEV_DOWN:
+ case NETDEV_CHANGE:
+ case NETDEV_GOING_DOWN:
+ return netvsc_vf_changed(event_dev, event);
+ default:
+ return NOTIFY_DONE;
+ }
+}
+
+static struct notifier_block netvsc_netdev_notifier = {
+ .notifier_call = netvsc_netdev_event,
+};
+
+static void __exit netvsc_drv_exit(void)
+{
+ unregister_netdevice_notifier(&netvsc_netdev_notifier);
+ vmbus_driver_unregister(&netvsc_drv);
+}
+
+static int __init netvsc_drv_init(void)
+{
+ int ret;
+
+ if (ring_size < RING_SIZE_MIN) {
+ ring_size = RING_SIZE_MIN;
+ pr_info("Increased ring_size to %u (min allowed)\n",
+ ring_size);
+ }
+ netvsc_ring_bytes = VMBUS_RING_SIZE(ring_size * 4096);
+
+ register_netdevice_notifier(&netvsc_netdev_notifier);
+
+ ret = vmbus_driver_register(&netvsc_drv);
+ if (ret)
+ goto err_vmbus_reg;
+
+ return 0;
+
+err_vmbus_reg:
+ unregister_netdevice_notifier(&netvsc_netdev_notifier);
+ return ret;
+}
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Microsoft Hyper-V network driver");
+
+module_init(netvsc_drv_init);
+module_exit(netvsc_drv_exit);
diff --git a/drivers/net/hyperv/netvsc_trace.c b/drivers/net/hyperv/netvsc_trace.c
new file mode 100644
index 000000000..bb0ce5a2b
--- /dev/null
+++ b/drivers/net/hyperv/netvsc_trace.c
@@ -0,0 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#include <linux/netdevice.h>
+
+#include "hyperv_net.h"
+
+#define CREATE_TRACE_POINTS
+#include "netvsc_trace.h"
diff --git a/drivers/net/hyperv/netvsc_trace.h b/drivers/net/hyperv/netvsc_trace.h
new file mode 100644
index 000000000..f7585563d
--- /dev/null
+++ b/drivers/net/hyperv/netvsc_trace.h
@@ -0,0 +1,182 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#if !defined(_NETVSC_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _NETVSC_TRACE_H
+
+#include <linux/tracepoint.h>
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM netvsc
+#define TRACE_INCLUDE_FILE netvsc_trace
+
+TRACE_DEFINE_ENUM(RNDIS_MSG_PACKET);
+TRACE_DEFINE_ENUM(RNDIS_MSG_INDICATE);
+TRACE_DEFINE_ENUM(RNDIS_MSG_INIT);
+TRACE_DEFINE_ENUM(RNDIS_MSG_INIT_C);
+TRACE_DEFINE_ENUM(RNDIS_MSG_HALT);
+TRACE_DEFINE_ENUM(RNDIS_MSG_QUERY);
+TRACE_DEFINE_ENUM(RNDIS_MSG_QUERY_C);
+TRACE_DEFINE_ENUM(RNDIS_MSG_SET);
+TRACE_DEFINE_ENUM(RNDIS_MSG_SET_C);
+TRACE_DEFINE_ENUM(RNDIS_MSG_RESET);
+TRACE_DEFINE_ENUM(RNDIS_MSG_RESET_C);
+TRACE_DEFINE_ENUM(RNDIS_MSG_KEEPALIVE);
+TRACE_DEFINE_ENUM(RNDIS_MSG_KEEPALIVE_C);
+
+#define show_rndis_type(type) \
+ __print_symbolic(type, \
+ { RNDIS_MSG_PACKET, "PACKET" }, \
+ { RNDIS_MSG_INDICATE, "INDICATE", }, \
+ { RNDIS_MSG_INIT, "INIT", }, \
+ { RNDIS_MSG_INIT_C, "INIT_C", }, \
+ { RNDIS_MSG_HALT, "HALT", }, \
+ { RNDIS_MSG_QUERY, "QUERY", }, \
+ { RNDIS_MSG_QUERY_C, "QUERY_C", }, \
+ { RNDIS_MSG_SET, "SET", }, \
+ { RNDIS_MSG_SET_C, "SET_C", }, \
+ { RNDIS_MSG_RESET, "RESET", }, \
+ { RNDIS_MSG_RESET_C, "RESET_C", }, \
+ { RNDIS_MSG_KEEPALIVE, "KEEPALIVE", }, \
+ { RNDIS_MSG_KEEPALIVE_C, "KEEPALIVE_C", })
+
+DECLARE_EVENT_CLASS(rndis_msg_class,
+ TP_PROTO(const struct net_device *ndev, u16 q,
+ const struct rndis_message *msg),
+ TP_ARGS(ndev, q, msg),
+ TP_STRUCT__entry(
+ __string( name, ndev->name )
+ __field( u16, queue )
+ __field( u32, req_id )
+ __field( u32, msg_type )
+ __field( u32, msg_len )
+ ),
+ TP_fast_assign(
+ __assign_str(name, ndev->name);
+ __entry->queue = q;
+ __entry->req_id = msg->msg.init_req.req_id;
+ __entry->msg_type = msg->ndis_msg_type;
+ __entry->msg_len = msg->msg_len;
+ ),
+ TP_printk("dev=%s q=%u req=%#x type=%s msg_len=%u",
+ __get_str(name), __entry->queue, __entry->req_id,
+ show_rndis_type(__entry->msg_type), __entry->msg_len)
+);
+
+DEFINE_EVENT(rndis_msg_class, rndis_send,
+ TP_PROTO(const struct net_device *ndev, u16 q,
+ const struct rndis_message *msg),
+ TP_ARGS(ndev, q, msg)
+);
+
+DEFINE_EVENT(rndis_msg_class, rndis_recv,
+ TP_PROTO(const struct net_device *ndev, u16 q,
+ const struct rndis_message *msg),
+ TP_ARGS(ndev, q, msg)
+);
+
+TRACE_DEFINE_ENUM(NVSP_MSG_TYPE_INIT);
+TRACE_DEFINE_ENUM(NVSP_MSG_TYPE_INIT_COMPLETE);
+TRACE_DEFINE_ENUM(NVSP_MSG1_TYPE_SEND_NDIS_VER);
+TRACE_DEFINE_ENUM(NVSP_MSG1_TYPE_SEND_RECV_BUF);
+TRACE_DEFINE_ENUM(NVSP_MSG1_TYPE_SEND_RECV_BUF_COMPLETE);
+TRACE_DEFINE_ENUM(NVSP_MSG1_TYPE_REVOKE_RECV_BUF);
+TRACE_DEFINE_ENUM(NVSP_MSG1_TYPE_SEND_SEND_BUF);
+TRACE_DEFINE_ENUM(NVSP_MSG1_TYPE_SEND_SEND_BUF_COMPLETE);
+TRACE_DEFINE_ENUM(NVSP_MSG1_TYPE_REVOKE_SEND_BUF);
+TRACE_DEFINE_ENUM(NVSP_MSG1_TYPE_SEND_RNDIS_PKT);
+TRACE_DEFINE_ENUM(NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE);
+TRACE_DEFINE_ENUM(NVSP_MSG2_TYPE_SEND_NDIS_CONFIG);
+
+TRACE_DEFINE_ENUM(NVSP_MSG4_TYPE_SEND_VF_ASSOCIATION);
+TRACE_DEFINE_ENUM(NVSP_MSG4_TYPE_SWITCH_DATA_PATH);
+
+TRACE_DEFINE_ENUM(NVSP_MSG5_TYPE_SUBCHANNEL);
+TRACE_DEFINE_ENUM(NVSP_MSG5_TYPE_SEND_INDIRECTION_TABLE);
+
+#define show_nvsp_type(type) \
+ __print_symbolic(type, \
+ { NVSP_MSG_TYPE_INIT, "INIT" }, \
+ { NVSP_MSG_TYPE_INIT_COMPLETE, "INIT_COMPLETE" }, \
+ { NVSP_MSG1_TYPE_SEND_NDIS_VER, "SEND_NDIS_VER" }, \
+ { NVSP_MSG1_TYPE_SEND_RECV_BUF, "SEND_RECV_BUF" }, \
+ { NVSP_MSG1_TYPE_SEND_RECV_BUF_COMPLETE, "SEND_RECV_BUF_COMPLETE" }, \
+ { NVSP_MSG1_TYPE_REVOKE_RECV_BUF, "REVOKE_RECV_BUF" }, \
+ { NVSP_MSG1_TYPE_SEND_SEND_BUF, "SEND_SEND_BUF" }, \
+ { NVSP_MSG1_TYPE_SEND_SEND_BUF_COMPLETE, "SEND_SEND_BUF_COMPLETE" }, \
+ { NVSP_MSG1_TYPE_REVOKE_SEND_BUF, "REVOKE_SEND_BUF" }, \
+ { NVSP_MSG1_TYPE_SEND_RNDIS_PKT, "SEND_RNDIS_PKT" }, \
+ { NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE, "SEND_RNDIS_PKT_COMPLETE" },\
+ { NVSP_MSG2_TYPE_SEND_NDIS_CONFIG, "SEND_NDIS_CONFIG" }, \
+ { NVSP_MSG4_TYPE_SEND_VF_ASSOCIATION, "SEND_VF_ASSOCIATION" }, \
+ { NVSP_MSG4_TYPE_SWITCH_DATA_PATH, "SWITCH_DATA_PATH" }, \
+ { NVSP_MSG5_TYPE_SUBCHANNEL, "SUBCHANNEL" }, \
+ { NVSP_MSG5_TYPE_SEND_INDIRECTION_TABLE, "SEND_INDIRECTION_TABLE" })
+
+TRACE_EVENT(nvsp_send,
+ TP_PROTO(const struct net_device *ndev,
+ const struct nvsp_message *msg),
+ TP_ARGS(ndev, msg),
+ TP_STRUCT__entry(
+ __string( name, ndev->name )
+ __field( u32, msg_type )
+ ),
+ TP_fast_assign(
+ __assign_str(name, ndev->name);
+ __entry->msg_type = msg->hdr.msg_type;
+ ),
+ TP_printk("dev=%s type=%s",
+ __get_str(name),
+ show_nvsp_type(__entry->msg_type))
+);
+
+TRACE_EVENT(nvsp_send_pkt,
+ TP_PROTO(const struct net_device *ndev,
+ const struct vmbus_channel *chan,
+ const struct nvsp_1_message_send_rndis_packet *rpkt),
+ TP_ARGS(ndev, chan, rpkt),
+ TP_STRUCT__entry(
+ __string( name, ndev->name )
+ __field( u16, qid )
+ __field( u32, channel_type )
+ __field( u32, section_index )
+ __field( u32, section_size )
+ ),
+ TP_fast_assign(
+ __assign_str(name, ndev->name);
+ __entry->qid = chan->offermsg.offer.sub_channel_index;
+ __entry->channel_type = rpkt->channel_type;
+ __entry->section_index = rpkt->send_buf_section_index;
+ __entry->section_size = rpkt->send_buf_section_size;
+ ),
+ TP_printk("dev=%s qid=%u type=%s section=%u size=%d",
+ __get_str(name), __entry->qid,
+ __entry->channel_type ? "CONTROL" : "DATA",
+ __entry->section_index, __entry->section_size)
+);
+
+TRACE_EVENT(nvsp_recv,
+ TP_PROTO(const struct net_device *ndev,
+ const struct vmbus_channel *chan,
+ const struct nvsp_message *msg),
+ TP_ARGS(ndev, chan, msg),
+ TP_STRUCT__entry(
+ __string( name, ndev->name )
+ __field( u16, qid )
+ __field( u32, msg_type )
+ ),
+ TP_fast_assign(
+ __assign_str(name, ndev->name);
+ __entry->qid = chan->offermsg.offer.sub_channel_index;
+ __entry->msg_type = msg->hdr.msg_type;
+ ),
+ TP_printk("dev=%s qid=%u type=%s",
+ __get_str(name), __entry->qid,
+ show_nvsp_type(__entry->msg_type))
+);
+
+#endif /* _NETVSC_TRACE_H */
+
+/* This part must be outside protection */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH ../../drivers/net/hyperv
+#include <trace/define_trace.h>
diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
new file mode 100644
index 000000000..eea777ec2
--- /dev/null
+++ b/drivers/net/hyperv/rndis_filter.c
@@ -0,0 +1,1620 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2009, Microsoft Corporation.
+ *
+ * Authors:
+ * Haiyang Zhang <haiyangz@microsoft.com>
+ * Hank Janssen <hjanssen@microsoft.com>
+ */
+#include <linux/ethtool.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/highmem.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/if_ether.h>
+#include <linux/netdevice.h>
+#include <linux/if_vlan.h>
+#include <linux/nls.h>
+#include <linux/vmalloc.h>
+#include <linux/rtnetlink.h>
+#include <linux/ucs2_string.h>
+#include <linux/string.h>
+
+#include "hyperv_net.h"
+#include "netvsc_trace.h"
+
+static void rndis_set_multicast(struct work_struct *w);
+
+#define RNDIS_EXT_LEN HV_HYP_PAGE_SIZE
+struct rndis_request {
+ struct list_head list_ent;
+ struct completion wait_event;
+
+ struct rndis_message response_msg;
+ /*
+ * The buffer for extended info after the RNDIS response message. It's
+ * referenced based on the data offset in the RNDIS message. Its size
+ * is enough for current needs, and should be sufficient for the near
+ * future.
+ */
+ u8 response_ext[RNDIS_EXT_LEN];
+
+ /* Simplify allocation by having a netvsc packet inline */
+ struct hv_netvsc_packet pkt;
+
+ struct rndis_message request_msg;
+ /*
+ * The buffer for the extended info after the RNDIS request message.
+ * It is referenced and sized in a similar way as response_ext.
+ */
+ u8 request_ext[RNDIS_EXT_LEN];
+};
+
+static const u8 netvsc_hash_key[NETVSC_HASH_KEYLEN] = {
+ 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
+ 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
+ 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
+ 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
+ 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa
+};
+
+static struct rndis_device *get_rndis_device(void)
+{
+ struct rndis_device *device;
+
+ device = kzalloc(sizeof(struct rndis_device), GFP_KERNEL);
+ if (!device)
+ return NULL;
+
+ spin_lock_init(&device->request_lock);
+
+ INIT_LIST_HEAD(&device->req_list);
+ INIT_WORK(&device->mcast_work, rndis_set_multicast);
+
+ device->state = RNDIS_DEV_UNINITIALIZED;
+
+ return device;
+}
+
+static struct rndis_request *get_rndis_request(struct rndis_device *dev,
+ u32 msg_type,
+ u32 msg_len)
+{
+ struct rndis_request *request;
+ struct rndis_message *rndis_msg;
+ struct rndis_set_request *set;
+ unsigned long flags;
+
+ request = kzalloc(sizeof(struct rndis_request), GFP_KERNEL);
+ if (!request)
+ return NULL;
+
+ init_completion(&request->wait_event);
+
+ rndis_msg = &request->request_msg;
+ rndis_msg->ndis_msg_type = msg_type;
+ rndis_msg->msg_len = msg_len;
+
+ request->pkt.q_idx = 0;
+
+ /*
+ * Set the request id. This field is always after the rndis header for
+ * request/response packet types so we just used the SetRequest as a
+ * template
+ */
+ set = &rndis_msg->msg.set_req;
+ set->req_id = atomic_inc_return(&dev->new_req_id);
+
+ /* Add to the request list */
+ spin_lock_irqsave(&dev->request_lock, flags);
+ list_add_tail(&request->list_ent, &dev->req_list);
+ spin_unlock_irqrestore(&dev->request_lock, flags);
+
+ return request;
+}
+
+static void put_rndis_request(struct rndis_device *dev,
+ struct rndis_request *req)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->request_lock, flags);
+ list_del(&req->list_ent);
+ spin_unlock_irqrestore(&dev->request_lock, flags);
+
+ kfree(req);
+}
+
+static void dump_rndis_message(struct net_device *netdev,
+ const struct rndis_message *rndis_msg,
+ const void *data)
+{
+ switch (rndis_msg->ndis_msg_type) {
+ case RNDIS_MSG_PACKET:
+ if (rndis_msg->msg_len - RNDIS_HEADER_SIZE >= sizeof(struct rndis_packet)) {
+ const struct rndis_packet *pkt = data + RNDIS_HEADER_SIZE;
+ netdev_dbg(netdev, "RNDIS_MSG_PACKET (len %u, "
+ "data offset %u data len %u, # oob %u, "
+ "oob offset %u, oob len %u, pkt offset %u, "
+ "pkt len %u\n",
+ rndis_msg->msg_len,
+ pkt->data_offset,
+ pkt->data_len,
+ pkt->num_oob_data_elements,
+ pkt->oob_data_offset,
+ pkt->oob_data_len,
+ pkt->per_pkt_info_offset,
+ pkt->per_pkt_info_len);
+ }
+ break;
+
+ case RNDIS_MSG_INIT_C:
+ if (rndis_msg->msg_len - RNDIS_HEADER_SIZE >=
+ sizeof(struct rndis_initialize_complete)) {
+ const struct rndis_initialize_complete *init_complete =
+ data + RNDIS_HEADER_SIZE;
+ netdev_dbg(netdev, "RNDIS_MSG_INIT_C "
+ "(len %u, id 0x%x, status 0x%x, major %d, minor %d, "
+ "device flags %d, max xfer size 0x%x, max pkts %u, "
+ "pkt aligned %u)\n",
+ rndis_msg->msg_len,
+ init_complete->req_id,
+ init_complete->status,
+ init_complete->major_ver,
+ init_complete->minor_ver,
+ init_complete->dev_flags,
+ init_complete->max_xfer_size,
+ init_complete->max_pkt_per_msg,
+ init_complete->pkt_alignment_factor);
+ }
+ break;
+
+ case RNDIS_MSG_QUERY_C:
+ if (rndis_msg->msg_len - RNDIS_HEADER_SIZE >=
+ sizeof(struct rndis_query_complete)) {
+ const struct rndis_query_complete *query_complete =
+ data + RNDIS_HEADER_SIZE;
+ netdev_dbg(netdev, "RNDIS_MSG_QUERY_C "
+ "(len %u, id 0x%x, status 0x%x, buf len %u, "
+ "buf offset %u)\n",
+ rndis_msg->msg_len,
+ query_complete->req_id,
+ query_complete->status,
+ query_complete->info_buflen,
+ query_complete->info_buf_offset);
+ }
+ break;
+
+ case RNDIS_MSG_SET_C:
+ if (rndis_msg->msg_len - RNDIS_HEADER_SIZE + sizeof(struct rndis_set_complete)) {
+ const struct rndis_set_complete *set_complete =
+ data + RNDIS_HEADER_SIZE;
+ netdev_dbg(netdev,
+ "RNDIS_MSG_SET_C (len %u, id 0x%x, status 0x%x)\n",
+ rndis_msg->msg_len,
+ set_complete->req_id,
+ set_complete->status);
+ }
+ break;
+
+ case RNDIS_MSG_INDICATE:
+ if (rndis_msg->msg_len - RNDIS_HEADER_SIZE >=
+ sizeof(struct rndis_indicate_status)) {
+ const struct rndis_indicate_status *indicate_status =
+ data + RNDIS_HEADER_SIZE;
+ netdev_dbg(netdev, "RNDIS_MSG_INDICATE "
+ "(len %u, status 0x%x, buf len %u, buf offset %u)\n",
+ rndis_msg->msg_len,
+ indicate_status->status,
+ indicate_status->status_buflen,
+ indicate_status->status_buf_offset);
+ }
+ break;
+
+ default:
+ netdev_dbg(netdev, "0x%x (len %u)\n",
+ rndis_msg->ndis_msg_type,
+ rndis_msg->msg_len);
+ break;
+ }
+}
+
+static int rndis_filter_send_request(struct rndis_device *dev,
+ struct rndis_request *req)
+{
+ struct hv_netvsc_packet *packet;
+ struct hv_page_buffer page_buf[2];
+ struct hv_page_buffer *pb = page_buf;
+ int ret;
+
+ /* Setup the packet to send it */
+ packet = &req->pkt;
+
+ packet->total_data_buflen = req->request_msg.msg_len;
+ packet->page_buf_cnt = 1;
+
+ pb[0].pfn = virt_to_phys(&req->request_msg) >>
+ HV_HYP_PAGE_SHIFT;
+ pb[0].len = req->request_msg.msg_len;
+ pb[0].offset = offset_in_hvpage(&req->request_msg);
+
+ /* Add one page_buf when request_msg crossing page boundary */
+ if (pb[0].offset + pb[0].len > HV_HYP_PAGE_SIZE) {
+ packet->page_buf_cnt++;
+ pb[0].len = HV_HYP_PAGE_SIZE -
+ pb[0].offset;
+ pb[1].pfn = virt_to_phys((void *)&req->request_msg
+ + pb[0].len) >> HV_HYP_PAGE_SHIFT;
+ pb[1].offset = 0;
+ pb[1].len = req->request_msg.msg_len -
+ pb[0].len;
+ }
+
+ trace_rndis_send(dev->ndev, 0, &req->request_msg);
+
+ rcu_read_lock_bh();
+ ret = netvsc_send(dev->ndev, packet, NULL, pb, NULL, false);
+ rcu_read_unlock_bh();
+
+ return ret;
+}
+
+static void rndis_set_link_state(struct rndis_device *rdev,
+ struct rndis_request *request)
+{
+ u32 link_status;
+ struct rndis_query_complete *query_complete;
+ u32 msg_len = request->response_msg.msg_len;
+
+ /* Ensure the packet is big enough to access its fields */
+ if (msg_len - RNDIS_HEADER_SIZE < sizeof(struct rndis_query_complete))
+ return;
+
+ query_complete = &request->response_msg.msg.query_complete;
+
+ if (query_complete->status == RNDIS_STATUS_SUCCESS &&
+ query_complete->info_buflen >= sizeof(u32) &&
+ query_complete->info_buf_offset >= sizeof(*query_complete) &&
+ msg_len - RNDIS_HEADER_SIZE >= query_complete->info_buf_offset &&
+ msg_len - RNDIS_HEADER_SIZE - query_complete->info_buf_offset
+ >= query_complete->info_buflen) {
+ memcpy(&link_status, (void *)((unsigned long)query_complete +
+ query_complete->info_buf_offset), sizeof(u32));
+ rdev->link_state = link_status != 0;
+ }
+}
+
+static void rndis_filter_receive_response(struct net_device *ndev,
+ struct netvsc_device *nvdev,
+ struct rndis_message *resp,
+ void *data)
+{
+ u32 *req_id = &resp->msg.init_complete.req_id;
+ struct rndis_device *dev = nvdev->extension;
+ struct rndis_request *request = NULL;
+ bool found = false;
+ unsigned long flags;
+
+ /* This should never happen, it means control message
+ * response received after device removed.
+ */
+ if (dev->state == RNDIS_DEV_UNINITIALIZED) {
+ netdev_err(ndev,
+ "got rndis message uninitialized\n");
+ return;
+ }
+
+ /* Ensure the packet is big enough to read req_id. Req_id is the 1st
+ * field in any request/response message, so the payload should have at
+ * least sizeof(u32) bytes
+ */
+ if (resp->msg_len - RNDIS_HEADER_SIZE < sizeof(u32)) {
+ netdev_err(ndev, "rndis msg_len too small: %u\n",
+ resp->msg_len);
+ return;
+ }
+
+ /* Copy the request ID into nvchan->recv_buf */
+ *req_id = *(u32 *)(data + RNDIS_HEADER_SIZE);
+
+ spin_lock_irqsave(&dev->request_lock, flags);
+ list_for_each_entry(request, &dev->req_list, list_ent) {
+ /*
+ * All request/response message contains RequestId as the 1st
+ * field
+ */
+ if (request->request_msg.msg.init_req.req_id == *req_id) {
+ found = true;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&dev->request_lock, flags);
+
+ if (found) {
+ if (resp->msg_len <=
+ sizeof(struct rndis_message) + RNDIS_EXT_LEN) {
+ memcpy(&request->response_msg, resp, RNDIS_HEADER_SIZE + sizeof(*req_id));
+ unsafe_memcpy((void *)&request->response_msg + RNDIS_HEADER_SIZE + sizeof(*req_id),
+ data + RNDIS_HEADER_SIZE + sizeof(*req_id),
+ resp->msg_len - RNDIS_HEADER_SIZE - sizeof(*req_id),
+ "request->response_msg is followed by a padding of RNDIS_EXT_LEN inside rndis_request");
+ if (request->request_msg.ndis_msg_type ==
+ RNDIS_MSG_QUERY && request->request_msg.msg.
+ query_req.oid == RNDIS_OID_GEN_MEDIA_CONNECT_STATUS)
+ rndis_set_link_state(dev, request);
+ } else {
+ netdev_err(ndev,
+ "rndis response buffer overflow "
+ "detected (size %u max %zu)\n",
+ resp->msg_len,
+ sizeof(struct rndis_message));
+
+ if (resp->ndis_msg_type ==
+ RNDIS_MSG_RESET_C) {
+ /* does not have a request id field */
+ request->response_msg.msg.reset_complete.
+ status = RNDIS_STATUS_BUFFER_OVERFLOW;
+ } else {
+ request->response_msg.msg.
+ init_complete.status =
+ RNDIS_STATUS_BUFFER_OVERFLOW;
+ }
+ }
+
+ netvsc_dma_unmap(((struct net_device_context *)
+ netdev_priv(ndev))->device_ctx, &request->pkt);
+ complete(&request->wait_event);
+ } else {
+ netdev_err(ndev,
+ "no rndis request found for this response "
+ "(id 0x%x res type 0x%x)\n",
+ *req_id,
+ resp->ndis_msg_type);
+ }
+}
+
+/*
+ * Get the Per-Packet-Info with the specified type
+ * return NULL if not found.
+ */
+static inline void *rndis_get_ppi(struct net_device *ndev,
+ struct rndis_packet *rpkt,
+ u32 rpkt_len, u32 type, u8 internal,
+ u32 ppi_size, void *data)
+{
+ struct rndis_per_packet_info *ppi;
+ int len;
+
+ if (rpkt->per_pkt_info_offset == 0)
+ return NULL;
+
+ /* Validate info_offset and info_len */
+ if (rpkt->per_pkt_info_offset < sizeof(struct rndis_packet) ||
+ rpkt->per_pkt_info_offset > rpkt_len) {
+ netdev_err(ndev, "Invalid per_pkt_info_offset: %u\n",
+ rpkt->per_pkt_info_offset);
+ return NULL;
+ }
+
+ if (rpkt->per_pkt_info_len < sizeof(*ppi) ||
+ rpkt->per_pkt_info_len > rpkt_len - rpkt->per_pkt_info_offset) {
+ netdev_err(ndev, "Invalid per_pkt_info_len: %u\n",
+ rpkt->per_pkt_info_len);
+ return NULL;
+ }
+
+ ppi = (struct rndis_per_packet_info *)((ulong)rpkt +
+ rpkt->per_pkt_info_offset);
+ /* Copy the PPIs into nvchan->recv_buf */
+ memcpy(ppi, data + RNDIS_HEADER_SIZE + rpkt->per_pkt_info_offset, rpkt->per_pkt_info_len);
+ len = rpkt->per_pkt_info_len;
+
+ while (len > 0) {
+ /* Validate ppi_offset and ppi_size */
+ if (ppi->size > len) {
+ netdev_err(ndev, "Invalid ppi size: %u\n", ppi->size);
+ continue;
+ }
+
+ if (ppi->ppi_offset >= ppi->size) {
+ netdev_err(ndev, "Invalid ppi_offset: %u\n", ppi->ppi_offset);
+ continue;
+ }
+
+ if (ppi->type == type && ppi->internal == internal) {
+ /* ppi->size should be big enough to hold the returned object. */
+ if (ppi->size - ppi->ppi_offset < ppi_size) {
+ netdev_err(ndev, "Invalid ppi: size %u ppi_offset %u\n",
+ ppi->size, ppi->ppi_offset);
+ continue;
+ }
+ return (void *)((ulong)ppi + ppi->ppi_offset);
+ }
+ len -= ppi->size;
+ ppi = (struct rndis_per_packet_info *)((ulong)ppi + ppi->size);
+ }
+
+ return NULL;
+}
+
+static inline
+void rsc_add_data(struct netvsc_channel *nvchan,
+ const struct ndis_pkt_8021q_info *vlan,
+ const struct ndis_tcp_ip_checksum_info *csum_info,
+ const u32 *hash_info,
+ void *data, u32 len)
+{
+ u32 cnt = nvchan->rsc.cnt;
+
+ if (cnt) {
+ nvchan->rsc.pktlen += len;
+ } else {
+ /* The data/values pointed by vlan, csum_info and hash_info are shared
+ * across the different 'fragments' of the RSC packet; store them into
+ * the packet itself.
+ */
+ if (vlan != NULL) {
+ memcpy(&nvchan->rsc.vlan, vlan, sizeof(*vlan));
+ nvchan->rsc.ppi_flags |= NVSC_RSC_VLAN;
+ } else {
+ nvchan->rsc.ppi_flags &= ~NVSC_RSC_VLAN;
+ }
+ if (csum_info != NULL) {
+ memcpy(&nvchan->rsc.csum_info, csum_info, sizeof(*csum_info));
+ nvchan->rsc.ppi_flags |= NVSC_RSC_CSUM_INFO;
+ } else {
+ nvchan->rsc.ppi_flags &= ~NVSC_RSC_CSUM_INFO;
+ }
+ nvchan->rsc.pktlen = len;
+ if (hash_info != NULL) {
+ nvchan->rsc.hash_info = *hash_info;
+ nvchan->rsc.ppi_flags |= NVSC_RSC_HASH_INFO;
+ } else {
+ nvchan->rsc.ppi_flags &= ~NVSC_RSC_HASH_INFO;
+ }
+ }
+
+ nvchan->rsc.data[cnt] = data;
+ nvchan->rsc.len[cnt] = len;
+ nvchan->rsc.cnt++;
+}
+
+static int rndis_filter_receive_data(struct net_device *ndev,
+ struct netvsc_device *nvdev,
+ struct netvsc_channel *nvchan,
+ struct rndis_message *msg,
+ void *data, u32 data_buflen)
+{
+ struct rndis_packet *rndis_pkt = &msg->msg.pkt;
+ const struct ndis_tcp_ip_checksum_info *csum_info;
+ const struct ndis_pkt_8021q_info *vlan;
+ const struct rndis_pktinfo_id *pktinfo_id;
+ const u32 *hash_info;
+ u32 data_offset, rpkt_len;
+ bool rsc_more = false;
+ int ret;
+
+ /* Ensure data_buflen is big enough to read header fields */
+ if (data_buflen < RNDIS_HEADER_SIZE + sizeof(struct rndis_packet)) {
+ netdev_err(ndev, "invalid rndis pkt, data_buflen too small: %u\n",
+ data_buflen);
+ return NVSP_STAT_FAIL;
+ }
+
+ /* Copy the RNDIS packet into nvchan->recv_buf */
+ memcpy(rndis_pkt, data + RNDIS_HEADER_SIZE, sizeof(*rndis_pkt));
+
+ /* Validate rndis_pkt offset */
+ if (rndis_pkt->data_offset >= data_buflen - RNDIS_HEADER_SIZE) {
+ netdev_err(ndev, "invalid rndis packet offset: %u\n",
+ rndis_pkt->data_offset);
+ return NVSP_STAT_FAIL;
+ }
+
+ /* Remove the rndis header and pass it back up the stack */
+ data_offset = RNDIS_HEADER_SIZE + rndis_pkt->data_offset;
+
+ rpkt_len = data_buflen - RNDIS_HEADER_SIZE;
+ data_buflen -= data_offset;
+
+ /*
+ * Make sure we got a valid RNDIS message, now total_data_buflen
+ * should be the data packet size plus the trailer padding size
+ */
+ if (unlikely(data_buflen < rndis_pkt->data_len)) {
+ netdev_err(ndev, "rndis message buffer "
+ "overflow detected (got %u, min %u)"
+ "...dropping this message!\n",
+ data_buflen, rndis_pkt->data_len);
+ return NVSP_STAT_FAIL;
+ }
+
+ vlan = rndis_get_ppi(ndev, rndis_pkt, rpkt_len, IEEE_8021Q_INFO, 0, sizeof(*vlan),
+ data);
+
+ csum_info = rndis_get_ppi(ndev, rndis_pkt, rpkt_len, TCPIP_CHKSUM_PKTINFO, 0,
+ sizeof(*csum_info), data);
+
+ hash_info = rndis_get_ppi(ndev, rndis_pkt, rpkt_len, NBL_HASH_VALUE, 0,
+ sizeof(*hash_info), data);
+
+ pktinfo_id = rndis_get_ppi(ndev, rndis_pkt, rpkt_len, RNDIS_PKTINFO_ID, 1,
+ sizeof(*pktinfo_id), data);
+
+ /* Identify RSC frags, drop erroneous packets */
+ if (pktinfo_id && (pktinfo_id->flag & RNDIS_PKTINFO_SUBALLOC)) {
+ if (pktinfo_id->flag & RNDIS_PKTINFO_1ST_FRAG)
+ nvchan->rsc.cnt = 0;
+ else if (nvchan->rsc.cnt == 0)
+ goto drop;
+
+ rsc_more = true;
+
+ if (pktinfo_id->flag & RNDIS_PKTINFO_LAST_FRAG)
+ rsc_more = false;
+
+ if (rsc_more && nvchan->rsc.is_last)
+ goto drop;
+ } else {
+ nvchan->rsc.cnt = 0;
+ }
+
+ if (unlikely(nvchan->rsc.cnt >= NVSP_RSC_MAX))
+ goto drop;
+
+ /* Put data into per channel structure.
+ * Also, remove the rndis trailer padding from rndis packet message
+ * rndis_pkt->data_len tell us the real data length, we only copy
+ * the data packet to the stack, without the rndis trailer padding
+ */
+ rsc_add_data(nvchan, vlan, csum_info, hash_info,
+ data + data_offset, rndis_pkt->data_len);
+
+ if (rsc_more)
+ return NVSP_STAT_SUCCESS;
+
+ ret = netvsc_recv_callback(ndev, nvdev, nvchan);
+ nvchan->rsc.cnt = 0;
+
+ return ret;
+
+drop:
+ return NVSP_STAT_FAIL;
+}
+
+int rndis_filter_receive(struct net_device *ndev,
+ struct netvsc_device *net_dev,
+ struct netvsc_channel *nvchan,
+ void *data, u32 buflen)
+{
+ struct net_device_context *net_device_ctx = netdev_priv(ndev);
+ struct rndis_message *rndis_msg = nvchan->recv_buf;
+
+ if (buflen < RNDIS_HEADER_SIZE) {
+ netdev_err(ndev, "Invalid rndis_msg (buflen: %u)\n", buflen);
+ return NVSP_STAT_FAIL;
+ }
+
+ /* Copy the RNDIS msg header into nvchan->recv_buf */
+ memcpy(rndis_msg, data, RNDIS_HEADER_SIZE);
+
+ /* Validate incoming rndis_message packet */
+ if (rndis_msg->msg_len < RNDIS_HEADER_SIZE ||
+ buflen < rndis_msg->msg_len) {
+ netdev_err(ndev, "Invalid rndis_msg (buflen: %u, msg_len: %u)\n",
+ buflen, rndis_msg->msg_len);
+ return NVSP_STAT_FAIL;
+ }
+
+ if (netif_msg_rx_status(net_device_ctx))
+ dump_rndis_message(ndev, rndis_msg, data);
+
+ switch (rndis_msg->ndis_msg_type) {
+ case RNDIS_MSG_PACKET:
+ return rndis_filter_receive_data(ndev, net_dev, nvchan,
+ rndis_msg, data, buflen);
+ case RNDIS_MSG_INIT_C:
+ case RNDIS_MSG_QUERY_C:
+ case RNDIS_MSG_SET_C:
+ /* completion msgs */
+ rndis_filter_receive_response(ndev, net_dev, rndis_msg, data);
+ break;
+
+ case RNDIS_MSG_INDICATE:
+ /* notification msgs */
+ netvsc_linkstatus_callback(ndev, rndis_msg, data, buflen);
+ break;
+ default:
+ netdev_err(ndev,
+ "unhandled rndis message (type %u len %u)\n",
+ rndis_msg->ndis_msg_type,
+ rndis_msg->msg_len);
+ return NVSP_STAT_FAIL;
+ }
+
+ return NVSP_STAT_SUCCESS;
+}
+
+static int rndis_filter_query_device(struct rndis_device *dev,
+ struct netvsc_device *nvdev,
+ u32 oid, void *result, u32 *result_size)
+{
+ struct rndis_request *request;
+ u32 inresult_size = *result_size;
+ struct rndis_query_request *query;
+ struct rndis_query_complete *query_complete;
+ u32 msg_len;
+ int ret = 0;
+
+ if (!result)
+ return -EINVAL;
+
+ *result_size = 0;
+ request = get_rndis_request(dev, RNDIS_MSG_QUERY,
+ RNDIS_MESSAGE_SIZE(struct rndis_query_request));
+ if (!request) {
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+
+ /* Setup the rndis query */
+ query = &request->request_msg.msg.query_req;
+ query->oid = oid;
+ query->info_buf_offset = sizeof(struct rndis_query_request);
+ query->info_buflen = 0;
+ query->dev_vc_handle = 0;
+
+ if (oid == OID_TCP_OFFLOAD_HARDWARE_CAPABILITIES) {
+ struct ndis_offload *hwcaps;
+ u32 nvsp_version = nvdev->nvsp_version;
+ u8 ndis_rev;
+ size_t size;
+
+ if (nvsp_version >= NVSP_PROTOCOL_VERSION_5) {
+ ndis_rev = NDIS_OFFLOAD_PARAMETERS_REVISION_3;
+ size = NDIS_OFFLOAD_SIZE;
+ } else if (nvsp_version >= NVSP_PROTOCOL_VERSION_4) {
+ ndis_rev = NDIS_OFFLOAD_PARAMETERS_REVISION_2;
+ size = NDIS_OFFLOAD_SIZE_6_1;
+ } else {
+ ndis_rev = NDIS_OFFLOAD_PARAMETERS_REVISION_1;
+ size = NDIS_OFFLOAD_SIZE_6_0;
+ }
+
+ request->request_msg.msg_len += size;
+ query->info_buflen = size;
+ hwcaps = (struct ndis_offload *)
+ ((unsigned long)query + query->info_buf_offset);
+
+ hwcaps->header.type = NDIS_OBJECT_TYPE_OFFLOAD;
+ hwcaps->header.revision = ndis_rev;
+ hwcaps->header.size = size;
+
+ } else if (oid == OID_GEN_RECEIVE_SCALE_CAPABILITIES) {
+ struct ndis_recv_scale_cap *cap;
+
+ request->request_msg.msg_len +=
+ sizeof(struct ndis_recv_scale_cap);
+ query->info_buflen = sizeof(struct ndis_recv_scale_cap);
+ cap = (struct ndis_recv_scale_cap *)((unsigned long)query +
+ query->info_buf_offset);
+ cap->hdr.type = NDIS_OBJECT_TYPE_RSS_CAPABILITIES;
+ cap->hdr.rev = NDIS_RECEIVE_SCALE_CAPABILITIES_REVISION_2;
+ cap->hdr.size = sizeof(struct ndis_recv_scale_cap);
+ }
+
+ ret = rndis_filter_send_request(dev, request);
+ if (ret != 0)
+ goto cleanup;
+
+ wait_for_completion(&request->wait_event);
+
+ /* Copy the response back */
+ query_complete = &request->response_msg.msg.query_complete;
+ msg_len = request->response_msg.msg_len;
+
+ /* Ensure the packet is big enough to access its fields */
+ if (msg_len - RNDIS_HEADER_SIZE < sizeof(struct rndis_query_complete)) {
+ ret = -1;
+ goto cleanup;
+ }
+
+ if (query_complete->info_buflen > inresult_size ||
+ query_complete->info_buf_offset < sizeof(*query_complete) ||
+ msg_len - RNDIS_HEADER_SIZE < query_complete->info_buf_offset ||
+ msg_len - RNDIS_HEADER_SIZE - query_complete->info_buf_offset
+ < query_complete->info_buflen) {
+ ret = -1;
+ goto cleanup;
+ }
+
+ memcpy(result,
+ (void *)((unsigned long)query_complete +
+ query_complete->info_buf_offset),
+ query_complete->info_buflen);
+
+ *result_size = query_complete->info_buflen;
+
+cleanup:
+ if (request)
+ put_rndis_request(dev, request);
+
+ return ret;
+}
+
+/* Get the hardware offload capabilities */
+static int
+rndis_query_hwcaps(struct rndis_device *dev, struct netvsc_device *net_device,
+ struct ndis_offload *caps)
+{
+ u32 caps_len = sizeof(*caps);
+ int ret;
+
+ memset(caps, 0, sizeof(*caps));
+
+ ret = rndis_filter_query_device(dev, net_device,
+ OID_TCP_OFFLOAD_HARDWARE_CAPABILITIES,
+ caps, &caps_len);
+ if (ret)
+ return ret;
+
+ if (caps->header.type != NDIS_OBJECT_TYPE_OFFLOAD) {
+ netdev_warn(dev->ndev, "invalid NDIS objtype %#x\n",
+ caps->header.type);
+ return -EINVAL;
+ }
+
+ if (caps->header.revision < NDIS_OFFLOAD_PARAMETERS_REVISION_1) {
+ netdev_warn(dev->ndev, "invalid NDIS objrev %x\n",
+ caps->header.revision);
+ return -EINVAL;
+ }
+
+ if (caps->header.size > caps_len ||
+ caps->header.size < NDIS_OFFLOAD_SIZE_6_0) {
+ netdev_warn(dev->ndev,
+ "invalid NDIS objsize %u, data size %u\n",
+ caps->header.size, caps_len);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int rndis_filter_query_device_mac(struct rndis_device *dev,
+ struct netvsc_device *net_device)
+{
+ u32 size = ETH_ALEN;
+
+ return rndis_filter_query_device(dev, net_device,
+ RNDIS_OID_802_3_PERMANENT_ADDRESS,
+ dev->hw_mac_adr, &size);
+}
+
+#define NWADR_STR "NetworkAddress"
+#define NWADR_STRLEN 14
+
+int rndis_filter_set_device_mac(struct netvsc_device *nvdev,
+ const char *mac)
+{
+ struct rndis_device *rdev = nvdev->extension;
+ struct rndis_request *request;
+ struct rndis_set_request *set;
+ struct rndis_config_parameter_info *cpi;
+ wchar_t *cfg_nwadr, *cfg_mac;
+ struct rndis_set_complete *set_complete;
+ char macstr[2*ETH_ALEN+1];
+ u32 extlen = sizeof(struct rndis_config_parameter_info) +
+ 2*NWADR_STRLEN + 4*ETH_ALEN;
+ int ret;
+
+ request = get_rndis_request(rdev, RNDIS_MSG_SET,
+ RNDIS_MESSAGE_SIZE(struct rndis_set_request) + extlen);
+ if (!request)
+ return -ENOMEM;
+
+ set = &request->request_msg.msg.set_req;
+ set->oid = RNDIS_OID_GEN_RNDIS_CONFIG_PARAMETER;
+ set->info_buflen = extlen;
+ set->info_buf_offset = sizeof(struct rndis_set_request);
+ set->dev_vc_handle = 0;
+
+ cpi = (struct rndis_config_parameter_info *)((ulong)set +
+ set->info_buf_offset);
+ cpi->parameter_name_offset =
+ sizeof(struct rndis_config_parameter_info);
+ /* Multiply by 2 because host needs 2 bytes (utf16) for each char */
+ cpi->parameter_name_length = 2*NWADR_STRLEN;
+ cpi->parameter_type = RNDIS_CONFIG_PARAM_TYPE_STRING;
+ cpi->parameter_value_offset =
+ cpi->parameter_name_offset + cpi->parameter_name_length;
+ /* Multiply by 4 because each MAC byte displayed as 2 utf16 chars */
+ cpi->parameter_value_length = 4*ETH_ALEN;
+
+ cfg_nwadr = (wchar_t *)((ulong)cpi + cpi->parameter_name_offset);
+ cfg_mac = (wchar_t *)((ulong)cpi + cpi->parameter_value_offset);
+ ret = utf8s_to_utf16s(NWADR_STR, NWADR_STRLEN, UTF16_HOST_ENDIAN,
+ cfg_nwadr, NWADR_STRLEN);
+ if (ret < 0)
+ goto cleanup;
+ snprintf(macstr, 2*ETH_ALEN+1, "%pm", mac);
+ ret = utf8s_to_utf16s(macstr, 2*ETH_ALEN, UTF16_HOST_ENDIAN,
+ cfg_mac, 2*ETH_ALEN);
+ if (ret < 0)
+ goto cleanup;
+
+ ret = rndis_filter_send_request(rdev, request);
+ if (ret != 0)
+ goto cleanup;
+
+ wait_for_completion(&request->wait_event);
+
+ set_complete = &request->response_msg.msg.set_complete;
+ if (set_complete->status != RNDIS_STATUS_SUCCESS)
+ ret = -EIO;
+
+cleanup:
+ put_rndis_request(rdev, request);
+ return ret;
+}
+
+int
+rndis_filter_set_offload_params(struct net_device *ndev,
+ struct netvsc_device *nvdev,
+ struct ndis_offload_params *req_offloads)
+{
+ struct rndis_device *rdev = nvdev->extension;
+ struct rndis_request *request;
+ struct rndis_set_request *set;
+ struct ndis_offload_params *offload_params;
+ struct rndis_set_complete *set_complete;
+ u32 extlen = sizeof(struct ndis_offload_params);
+ int ret;
+ u32 vsp_version = nvdev->nvsp_version;
+
+ if (vsp_version <= NVSP_PROTOCOL_VERSION_4) {
+ extlen = VERSION_4_OFFLOAD_SIZE;
+ /* On NVSP_PROTOCOL_VERSION_4 and below, we do not support
+ * UDP checksum offload.
+ */
+ req_offloads->udp_ip_v4_csum = 0;
+ req_offloads->udp_ip_v6_csum = 0;
+ }
+
+ request = get_rndis_request(rdev, RNDIS_MSG_SET,
+ RNDIS_MESSAGE_SIZE(struct rndis_set_request) + extlen);
+ if (!request)
+ return -ENOMEM;
+
+ set = &request->request_msg.msg.set_req;
+ set->oid = OID_TCP_OFFLOAD_PARAMETERS;
+ set->info_buflen = extlen;
+ set->info_buf_offset = sizeof(struct rndis_set_request);
+ set->dev_vc_handle = 0;
+
+ offload_params = (struct ndis_offload_params *)((ulong)set +
+ set->info_buf_offset);
+ *offload_params = *req_offloads;
+ offload_params->header.type = NDIS_OBJECT_TYPE_DEFAULT;
+ offload_params->header.revision = NDIS_OFFLOAD_PARAMETERS_REVISION_3;
+ offload_params->header.size = extlen;
+
+ ret = rndis_filter_send_request(rdev, request);
+ if (ret != 0)
+ goto cleanup;
+
+ wait_for_completion(&request->wait_event);
+ set_complete = &request->response_msg.msg.set_complete;
+ if (set_complete->status != RNDIS_STATUS_SUCCESS) {
+ netdev_err(ndev, "Fail to set offload on host side:0x%x\n",
+ set_complete->status);
+ ret = -EINVAL;
+ }
+
+cleanup:
+ put_rndis_request(rdev, request);
+ return ret;
+}
+
+static int rndis_set_rss_param_msg(struct rndis_device *rdev,
+ const u8 *rss_key, u16 flag)
+{
+ struct net_device *ndev = rdev->ndev;
+ struct net_device_context *ndc = netdev_priv(ndev);
+ struct rndis_request *request;
+ struct rndis_set_request *set;
+ struct rndis_set_complete *set_complete;
+ u32 extlen = sizeof(struct ndis_recv_scale_param) +
+ 4 * ITAB_NUM + NETVSC_HASH_KEYLEN;
+ struct ndis_recv_scale_param *rssp;
+ u32 *itab;
+ u8 *keyp;
+ int i, ret;
+
+ request = get_rndis_request(
+ rdev, RNDIS_MSG_SET,
+ RNDIS_MESSAGE_SIZE(struct rndis_set_request) + extlen);
+ if (!request)
+ return -ENOMEM;
+
+ set = &request->request_msg.msg.set_req;
+ set->oid = OID_GEN_RECEIVE_SCALE_PARAMETERS;
+ set->info_buflen = extlen;
+ set->info_buf_offset = sizeof(struct rndis_set_request);
+ set->dev_vc_handle = 0;
+
+ rssp = (struct ndis_recv_scale_param *)(set + 1);
+ rssp->hdr.type = NDIS_OBJECT_TYPE_RSS_PARAMETERS;
+ rssp->hdr.rev = NDIS_RECEIVE_SCALE_PARAMETERS_REVISION_2;
+ rssp->hdr.size = sizeof(struct ndis_recv_scale_param);
+ rssp->flag = flag;
+ rssp->hashinfo = NDIS_HASH_FUNC_TOEPLITZ | NDIS_HASH_IPV4 |
+ NDIS_HASH_TCP_IPV4 | NDIS_HASH_IPV6 |
+ NDIS_HASH_TCP_IPV6;
+ rssp->indirect_tabsize = 4*ITAB_NUM;
+ rssp->indirect_taboffset = sizeof(struct ndis_recv_scale_param);
+ rssp->hashkey_size = NETVSC_HASH_KEYLEN;
+ rssp->hashkey_offset = rssp->indirect_taboffset +
+ rssp->indirect_tabsize;
+
+ /* Set indirection table entries */
+ itab = (u32 *)(rssp + 1);
+ for (i = 0; i < ITAB_NUM; i++)
+ itab[i] = ndc->rx_table[i];
+
+ /* Set hask key values */
+ keyp = (u8 *)((unsigned long)rssp + rssp->hashkey_offset);
+ memcpy(keyp, rss_key, NETVSC_HASH_KEYLEN);
+
+ ret = rndis_filter_send_request(rdev, request);
+ if (ret != 0)
+ goto cleanup;
+
+ wait_for_completion(&request->wait_event);
+ set_complete = &request->response_msg.msg.set_complete;
+ if (set_complete->status == RNDIS_STATUS_SUCCESS) {
+ if (!(flag & NDIS_RSS_PARAM_FLAG_DISABLE_RSS) &&
+ !(flag & NDIS_RSS_PARAM_FLAG_HASH_KEY_UNCHANGED))
+ memcpy(rdev->rss_key, rss_key, NETVSC_HASH_KEYLEN);
+
+ } else {
+ netdev_err(ndev, "Fail to set RSS parameters:0x%x\n",
+ set_complete->status);
+ ret = -EINVAL;
+ }
+
+cleanup:
+ put_rndis_request(rdev, request);
+ return ret;
+}
+
+int rndis_filter_set_rss_param(struct rndis_device *rdev,
+ const u8 *rss_key)
+{
+ /* Disable RSS before change */
+ rndis_set_rss_param_msg(rdev, rss_key,
+ NDIS_RSS_PARAM_FLAG_DISABLE_RSS);
+
+ return rndis_set_rss_param_msg(rdev, rss_key, 0);
+}
+
+static int rndis_filter_query_device_link_status(struct rndis_device *dev,
+ struct netvsc_device *net_device)
+{
+ u32 size = sizeof(u32);
+ u32 link_status;
+
+ return rndis_filter_query_device(dev, net_device,
+ RNDIS_OID_GEN_MEDIA_CONNECT_STATUS,
+ &link_status, &size);
+}
+
+static int rndis_filter_query_link_speed(struct rndis_device *dev,
+ struct netvsc_device *net_device)
+{
+ u32 size = sizeof(u32);
+ u32 link_speed;
+ struct net_device_context *ndc;
+ int ret;
+
+ ret = rndis_filter_query_device(dev, net_device,
+ RNDIS_OID_GEN_LINK_SPEED,
+ &link_speed, &size);
+
+ if (!ret) {
+ ndc = netdev_priv(dev->ndev);
+
+ /* The link speed reported from host is in 100bps unit, so
+ * we convert it to Mbps here.
+ */
+ ndc->speed = link_speed / 10000;
+ }
+
+ return ret;
+}
+
+static int rndis_filter_set_packet_filter(struct rndis_device *dev,
+ u32 new_filter)
+{
+ struct rndis_request *request;
+ struct rndis_set_request *set;
+ int ret;
+
+ if (dev->filter == new_filter)
+ return 0;
+
+ request = get_rndis_request(dev, RNDIS_MSG_SET,
+ RNDIS_MESSAGE_SIZE(struct rndis_set_request) +
+ sizeof(u32));
+ if (!request)
+ return -ENOMEM;
+
+ /* Setup the rndis set */
+ set = &request->request_msg.msg.set_req;
+ set->oid = RNDIS_OID_GEN_CURRENT_PACKET_FILTER;
+ set->info_buflen = sizeof(u32);
+ set->info_buf_offset = offsetof(typeof(*set), info_buf);
+ memcpy(set->info_buf, &new_filter, sizeof(u32));
+
+ ret = rndis_filter_send_request(dev, request);
+ if (ret == 0) {
+ wait_for_completion(&request->wait_event);
+ dev->filter = new_filter;
+ }
+
+ put_rndis_request(dev, request);
+
+ return ret;
+}
+
+static void rndis_set_multicast(struct work_struct *w)
+{
+ struct rndis_device *rdev
+ = container_of(w, struct rndis_device, mcast_work);
+ u32 filter = NDIS_PACKET_TYPE_DIRECTED;
+ unsigned int flags = rdev->ndev->flags;
+
+ if (flags & IFF_PROMISC) {
+ filter = NDIS_PACKET_TYPE_PROMISCUOUS;
+ } else {
+ if (!netdev_mc_empty(rdev->ndev) || (flags & IFF_ALLMULTI))
+ filter |= NDIS_PACKET_TYPE_ALL_MULTICAST;
+ if (flags & IFF_BROADCAST)
+ filter |= NDIS_PACKET_TYPE_BROADCAST;
+ }
+
+ rndis_filter_set_packet_filter(rdev, filter);
+}
+
+void rndis_filter_update(struct netvsc_device *nvdev)
+{
+ struct rndis_device *rdev = nvdev->extension;
+
+ schedule_work(&rdev->mcast_work);
+}
+
+static int rndis_filter_init_device(struct rndis_device *dev,
+ struct netvsc_device *nvdev)
+{
+ struct rndis_request *request;
+ struct rndis_initialize_request *init;
+ struct rndis_initialize_complete *init_complete;
+ u32 status;
+ int ret;
+
+ request = get_rndis_request(dev, RNDIS_MSG_INIT,
+ RNDIS_MESSAGE_SIZE(struct rndis_initialize_request));
+ if (!request) {
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+
+ /* Setup the rndis set */
+ init = &request->request_msg.msg.init_req;
+ init->major_ver = RNDIS_MAJOR_VERSION;
+ init->minor_ver = RNDIS_MINOR_VERSION;
+ init->max_xfer_size = 0x4000;
+
+ dev->state = RNDIS_DEV_INITIALIZING;
+
+ ret = rndis_filter_send_request(dev, request);
+ if (ret != 0) {
+ dev->state = RNDIS_DEV_UNINITIALIZED;
+ goto cleanup;
+ }
+
+ wait_for_completion(&request->wait_event);
+
+ init_complete = &request->response_msg.msg.init_complete;
+ status = init_complete->status;
+ if (status == RNDIS_STATUS_SUCCESS) {
+ dev->state = RNDIS_DEV_INITIALIZED;
+ nvdev->max_pkt = init_complete->max_pkt_per_msg;
+ nvdev->pkt_align = 1 << init_complete->pkt_alignment_factor;
+ ret = 0;
+ } else {
+ dev->state = RNDIS_DEV_UNINITIALIZED;
+ ret = -EINVAL;
+ }
+
+cleanup:
+ if (request)
+ put_rndis_request(dev, request);
+
+ return ret;
+}
+
+static bool netvsc_device_idle(const struct netvsc_device *nvdev)
+{
+ int i;
+
+ for (i = 0; i < nvdev->num_chn; i++) {
+ const struct netvsc_channel *nvchan = &nvdev->chan_table[i];
+
+ if (nvchan->mrc.first != nvchan->mrc.next)
+ return false;
+
+ if (atomic_read(&nvchan->queue_sends) > 0)
+ return false;
+ }
+
+ return true;
+}
+
+static void rndis_filter_halt_device(struct netvsc_device *nvdev,
+ struct rndis_device *dev)
+{
+ struct rndis_request *request;
+ struct rndis_halt_request *halt;
+
+ /* Attempt to do a rndis device halt */
+ request = get_rndis_request(dev, RNDIS_MSG_HALT,
+ RNDIS_MESSAGE_SIZE(struct rndis_halt_request));
+ if (!request)
+ goto cleanup;
+
+ /* Setup the rndis set */
+ halt = &request->request_msg.msg.halt_req;
+ halt->req_id = atomic_inc_return(&dev->new_req_id);
+
+ /* Ignore return since this msg is optional. */
+ rndis_filter_send_request(dev, request);
+
+ dev->state = RNDIS_DEV_UNINITIALIZED;
+
+cleanup:
+ nvdev->destroy = true;
+
+ /* Force flag to be ordered before waiting */
+ wmb();
+
+ /* Wait for all send completions */
+ wait_event(nvdev->wait_drain, netvsc_device_idle(nvdev));
+
+ if (request)
+ put_rndis_request(dev, request);
+}
+
+static int rndis_filter_open_device(struct rndis_device *dev)
+{
+ int ret;
+
+ if (dev->state != RNDIS_DEV_INITIALIZED)
+ return 0;
+
+ ret = rndis_filter_set_packet_filter(dev,
+ NDIS_PACKET_TYPE_BROADCAST |
+ NDIS_PACKET_TYPE_ALL_MULTICAST |
+ NDIS_PACKET_TYPE_DIRECTED);
+ if (ret == 0)
+ dev->state = RNDIS_DEV_DATAINITIALIZED;
+
+ return ret;
+}
+
+static int rndis_filter_close_device(struct rndis_device *dev)
+{
+ int ret;
+
+ if (dev->state != RNDIS_DEV_DATAINITIALIZED)
+ return 0;
+
+ /* Make sure rndis_set_multicast doesn't re-enable filter! */
+ cancel_work_sync(&dev->mcast_work);
+
+ ret = rndis_filter_set_packet_filter(dev, 0);
+ if (ret == -ENODEV)
+ ret = 0;
+
+ if (ret == 0)
+ dev->state = RNDIS_DEV_INITIALIZED;
+
+ return ret;
+}
+
+static void netvsc_sc_open(struct vmbus_channel *new_sc)
+{
+ struct net_device *ndev =
+ hv_get_drvdata(new_sc->primary_channel->device_obj);
+ struct net_device_context *ndev_ctx = netdev_priv(ndev);
+ struct netvsc_device *nvscdev;
+ u16 chn_index = new_sc->offermsg.offer.sub_channel_index;
+ struct netvsc_channel *nvchan;
+ int ret;
+
+ /* This is safe because this callback only happens when
+ * new device is being setup and waiting on the channel_init_wait.
+ */
+ nvscdev = rcu_dereference_raw(ndev_ctx->nvdev);
+ if (!nvscdev || chn_index >= nvscdev->num_chn)
+ return;
+
+ nvchan = nvscdev->chan_table + chn_index;
+
+ /* Because the device uses NAPI, all the interrupt batching and
+ * control is done via Net softirq, not the channel handling
+ */
+ set_channel_read_mode(new_sc, HV_CALL_ISR);
+
+ /* Set the channel before opening.*/
+ nvchan->channel = new_sc;
+
+ new_sc->next_request_id_callback = vmbus_next_request_id;
+ new_sc->request_addr_callback = vmbus_request_addr;
+ new_sc->rqstor_size = netvsc_rqstor_size(netvsc_ring_bytes);
+ new_sc->max_pkt_size = NETVSC_MAX_PKT_SIZE;
+
+ ret = vmbus_open(new_sc, netvsc_ring_bytes,
+ netvsc_ring_bytes, NULL, 0,
+ netvsc_channel_cb, nvchan);
+ if (ret == 0)
+ napi_enable(&nvchan->napi);
+ else
+ netdev_notice(ndev, "sub channel open failed: %d\n", ret);
+
+ if (atomic_inc_return(&nvscdev->open_chn) == nvscdev->num_chn)
+ wake_up(&nvscdev->subchan_open);
+}
+
+/* Open sub-channels after completing the handling of the device probe.
+ * This breaks overlap of processing the host message for the
+ * new primary channel with the initialization of sub-channels.
+ */
+int rndis_set_subchannel(struct net_device *ndev,
+ struct netvsc_device *nvdev,
+ struct netvsc_device_info *dev_info)
+{
+ struct nvsp_message *init_packet = &nvdev->channel_init_pkt;
+ struct net_device_context *ndev_ctx = netdev_priv(ndev);
+ struct hv_device *hv_dev = ndev_ctx->device_ctx;
+ struct rndis_device *rdev = nvdev->extension;
+ int i, ret;
+
+ ASSERT_RTNL();
+
+ memset(init_packet, 0, sizeof(struct nvsp_message));
+ init_packet->hdr.msg_type = NVSP_MSG5_TYPE_SUBCHANNEL;
+ init_packet->msg.v5_msg.subchn_req.op = NVSP_SUBCHANNEL_ALLOCATE;
+ init_packet->msg.v5_msg.subchn_req.num_subchannels =
+ nvdev->num_chn - 1;
+ trace_nvsp_send(ndev, init_packet);
+
+ ret = vmbus_sendpacket(hv_dev->channel, init_packet,
+ sizeof(struct nvsp_message),
+ (unsigned long)init_packet,
+ VM_PKT_DATA_INBAND,
+ VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
+ if (ret) {
+ netdev_err(ndev, "sub channel allocate send failed: %d\n", ret);
+ return ret;
+ }
+
+ wait_for_completion(&nvdev->channel_init_wait);
+ if (init_packet->msg.v5_msg.subchn_comp.status != NVSP_STAT_SUCCESS) {
+ netdev_err(ndev, "sub channel request failed\n");
+ return -EIO;
+ }
+
+ /* Check that number of allocated sub channel is within the expected range */
+ if (init_packet->msg.v5_msg.subchn_comp.num_subchannels > nvdev->num_chn - 1) {
+ netdev_err(ndev, "invalid number of allocated sub channel\n");
+ return -EINVAL;
+ }
+ nvdev->num_chn = 1 +
+ init_packet->msg.v5_msg.subchn_comp.num_subchannels;
+
+ /* wait for all sub channels to open */
+ wait_event(nvdev->subchan_open,
+ atomic_read(&nvdev->open_chn) == nvdev->num_chn);
+
+ for (i = 0; i < VRSS_SEND_TAB_SIZE; i++)
+ ndev_ctx->tx_table[i] = i % nvdev->num_chn;
+
+ /* ignore failures from setting rss parameters, still have channels */
+ if (dev_info)
+ rndis_filter_set_rss_param(rdev, dev_info->rss_key);
+ else
+ rndis_filter_set_rss_param(rdev, netvsc_hash_key);
+
+ netif_set_real_num_tx_queues(ndev, nvdev->num_chn);
+ netif_set_real_num_rx_queues(ndev, nvdev->num_chn);
+
+ return 0;
+}
+
+static int rndis_netdev_set_hwcaps(struct rndis_device *rndis_device,
+ struct netvsc_device *nvdev)
+{
+ struct net_device *net = rndis_device->ndev;
+ struct net_device_context *net_device_ctx = netdev_priv(net);
+ struct ndis_offload hwcaps;
+ struct ndis_offload_params offloads;
+ unsigned int gso_max_size = GSO_LEGACY_MAX_SIZE;
+ int ret;
+
+ /* Find HW offload capabilities */
+ ret = rndis_query_hwcaps(rndis_device, nvdev, &hwcaps);
+ if (ret != 0)
+ return ret;
+
+ /* A value of zero means "no change"; now turn on what we want. */
+ memset(&offloads, 0, sizeof(struct ndis_offload_params));
+
+ /* Linux does not care about IP checksum, always does in kernel */
+ offloads.ip_v4_csum = NDIS_OFFLOAD_PARAMETERS_TX_RX_DISABLED;
+
+ /* Reset previously set hw_features flags */
+ net->hw_features &= ~NETVSC_SUPPORTED_HW_FEATURES;
+ net_device_ctx->tx_checksum_mask = 0;
+
+ /* Compute tx offload settings based on hw capabilities */
+ net->hw_features |= NETIF_F_RXCSUM;
+ net->hw_features |= NETIF_F_SG;
+ net->hw_features |= NETIF_F_RXHASH;
+
+ if ((hwcaps.csum.ip4_txcsum & NDIS_TXCSUM_ALL_TCP4) == NDIS_TXCSUM_ALL_TCP4) {
+ /* Can checksum TCP */
+ net->hw_features |= NETIF_F_IP_CSUM;
+ net_device_ctx->tx_checksum_mask |= TRANSPORT_INFO_IPV4_TCP;
+
+ offloads.tcp_ip_v4_csum = NDIS_OFFLOAD_PARAMETERS_TX_RX_ENABLED;
+
+ if (hwcaps.lsov2.ip4_encap & NDIS_OFFLOAD_ENCAP_8023) {
+ offloads.lso_v2_ipv4 = NDIS_OFFLOAD_PARAMETERS_LSOV2_ENABLED;
+ net->hw_features |= NETIF_F_TSO;
+
+ if (hwcaps.lsov2.ip4_maxsz < gso_max_size)
+ gso_max_size = hwcaps.lsov2.ip4_maxsz;
+ }
+
+ if (hwcaps.csum.ip4_txcsum & NDIS_TXCSUM_CAP_UDP4) {
+ offloads.udp_ip_v4_csum = NDIS_OFFLOAD_PARAMETERS_TX_RX_ENABLED;
+ net_device_ctx->tx_checksum_mask |= TRANSPORT_INFO_IPV4_UDP;
+ }
+ }
+
+ if ((hwcaps.csum.ip6_txcsum & NDIS_TXCSUM_ALL_TCP6) == NDIS_TXCSUM_ALL_TCP6) {
+ net->hw_features |= NETIF_F_IPV6_CSUM;
+
+ offloads.tcp_ip_v6_csum = NDIS_OFFLOAD_PARAMETERS_TX_RX_ENABLED;
+ net_device_ctx->tx_checksum_mask |= TRANSPORT_INFO_IPV6_TCP;
+
+ if ((hwcaps.lsov2.ip6_encap & NDIS_OFFLOAD_ENCAP_8023) &&
+ (hwcaps.lsov2.ip6_opts & NDIS_LSOV2_CAP_IP6) == NDIS_LSOV2_CAP_IP6) {
+ offloads.lso_v2_ipv6 = NDIS_OFFLOAD_PARAMETERS_LSOV2_ENABLED;
+ net->hw_features |= NETIF_F_TSO6;
+
+ if (hwcaps.lsov2.ip6_maxsz < gso_max_size)
+ gso_max_size = hwcaps.lsov2.ip6_maxsz;
+ }
+
+ if (hwcaps.csum.ip6_txcsum & NDIS_TXCSUM_CAP_UDP6) {
+ offloads.udp_ip_v6_csum = NDIS_OFFLOAD_PARAMETERS_TX_RX_ENABLED;
+ net_device_ctx->tx_checksum_mask |= TRANSPORT_INFO_IPV6_UDP;
+ }
+ }
+
+ if (hwcaps.rsc.ip4 && hwcaps.rsc.ip6) {
+ net->hw_features |= NETIF_F_LRO;
+
+ if (net->features & NETIF_F_LRO) {
+ offloads.rsc_ip_v4 = NDIS_OFFLOAD_PARAMETERS_RSC_ENABLED;
+ offloads.rsc_ip_v6 = NDIS_OFFLOAD_PARAMETERS_RSC_ENABLED;
+ } else {
+ offloads.rsc_ip_v4 = NDIS_OFFLOAD_PARAMETERS_RSC_DISABLED;
+ offloads.rsc_ip_v6 = NDIS_OFFLOAD_PARAMETERS_RSC_DISABLED;
+ }
+ }
+
+ /* In case some hw_features disappeared we need to remove them from
+ * net->features list as they're no longer supported.
+ */
+ net->features &= ~NETVSC_SUPPORTED_HW_FEATURES | net->hw_features;
+
+ netif_set_tso_max_size(net, gso_max_size);
+
+ ret = rndis_filter_set_offload_params(net, nvdev, &offloads);
+
+ return ret;
+}
+
+static void rndis_get_friendly_name(struct net_device *net,
+ struct rndis_device *rndis_device,
+ struct netvsc_device *net_device)
+{
+ ucs2_char_t wname[256];
+ unsigned long len;
+ u8 ifalias[256];
+ u32 size;
+
+ size = sizeof(wname);
+ if (rndis_filter_query_device(rndis_device, net_device,
+ RNDIS_OID_GEN_FRIENDLY_NAME,
+ wname, &size) != 0)
+ return; /* ignore if host does not support */
+
+ if (size == 0)
+ return; /* name not set */
+
+ /* Convert Windows Unicode string to UTF-8 */
+ len = ucs2_as_utf8(ifalias, wname, sizeof(ifalias));
+
+ /* ignore the default value from host */
+ if (strcmp(ifalias, "Network Adapter") != 0)
+ dev_set_alias(net, ifalias, len);
+}
+
+struct netvsc_device *rndis_filter_device_add(struct hv_device *dev,
+ struct netvsc_device_info *device_info)
+{
+ struct net_device *net = hv_get_drvdata(dev);
+ struct net_device_context *ndc = netdev_priv(net);
+ struct netvsc_device *net_device;
+ struct rndis_device *rndis_device;
+ struct ndis_recv_scale_cap rsscap;
+ u32 rsscap_size = sizeof(struct ndis_recv_scale_cap);
+ u32 mtu, size;
+ u32 num_possible_rss_qs;
+ int i, ret;
+
+ rndis_device = get_rndis_device();
+ if (!rndis_device)
+ return ERR_PTR(-ENODEV);
+
+ /* Let the inner driver handle this first to create the netvsc channel
+ * NOTE! Once the channel is created, we may get a receive callback
+ * (RndisFilterOnReceive()) before this call is completed
+ */
+ net_device = netvsc_device_add(dev, device_info);
+ if (IS_ERR(net_device)) {
+ kfree(rndis_device);
+ return net_device;
+ }
+
+ /* Initialize the rndis device */
+ net_device->max_chn = 1;
+ net_device->num_chn = 1;
+
+ net_device->extension = rndis_device;
+ rndis_device->ndev = net;
+
+ /* Send the rndis initialization message */
+ ret = rndis_filter_init_device(rndis_device, net_device);
+ if (ret != 0)
+ goto err_dev_remv;
+
+ /* Get the MTU from the host */
+ size = sizeof(u32);
+ ret = rndis_filter_query_device(rndis_device, net_device,
+ RNDIS_OID_GEN_MAXIMUM_FRAME_SIZE,
+ &mtu, &size);
+ if (ret == 0 && size == sizeof(u32) && mtu < net->mtu)
+ net->mtu = mtu;
+
+ /* Get the mac address */
+ ret = rndis_filter_query_device_mac(rndis_device, net_device);
+ if (ret != 0)
+ goto err_dev_remv;
+
+ memcpy(device_info->mac_adr, rndis_device->hw_mac_adr, ETH_ALEN);
+
+ /* Get friendly name as ifalias*/
+ if (!net->ifalias)
+ rndis_get_friendly_name(net, rndis_device, net_device);
+
+ /* Query and set hardware capabilities */
+ ret = rndis_netdev_set_hwcaps(rndis_device, net_device);
+ if (ret != 0)
+ goto err_dev_remv;
+
+ rndis_filter_query_device_link_status(rndis_device, net_device);
+
+ netdev_dbg(net, "Device MAC %pM link state %s\n",
+ rndis_device->hw_mac_adr,
+ rndis_device->link_state ? "down" : "up");
+
+ if (net_device->nvsp_version < NVSP_PROTOCOL_VERSION_5)
+ goto out;
+
+ rndis_filter_query_link_speed(rndis_device, net_device);
+
+ /* vRSS setup */
+ memset(&rsscap, 0, rsscap_size);
+ ret = rndis_filter_query_device(rndis_device, net_device,
+ OID_GEN_RECEIVE_SCALE_CAPABILITIES,
+ &rsscap, &rsscap_size);
+ if (ret || rsscap.num_recv_que < 2)
+ goto out;
+
+ /* This guarantees that num_possible_rss_qs <= num_online_cpus */
+ num_possible_rss_qs = min_t(u32, num_online_cpus(),
+ rsscap.num_recv_que);
+
+ net_device->max_chn = min_t(u32, VRSS_CHANNEL_MAX, num_possible_rss_qs);
+
+ /* We will use the given number of channels if available. */
+ net_device->num_chn = min(net_device->max_chn, device_info->num_chn);
+
+ if (!netif_is_rxfh_configured(net)) {
+ for (i = 0; i < ITAB_NUM; i++)
+ ndc->rx_table[i] = ethtool_rxfh_indir_default(
+ i, net_device->num_chn);
+ }
+
+ atomic_set(&net_device->open_chn, 1);
+ vmbus_set_sc_create_callback(dev->channel, netvsc_sc_open);
+
+ for (i = 1; i < net_device->num_chn; i++) {
+ ret = netvsc_alloc_recv_comp_ring(net_device, i);
+ if (ret) {
+ while (--i != 0)
+ vfree(net_device->chan_table[i].mrc.slots);
+ goto out;
+ }
+ }
+
+ for (i = 1; i < net_device->num_chn; i++)
+ netif_napi_add(net, &net_device->chan_table[i].napi,
+ netvsc_poll);
+
+ return net_device;
+
+out:
+ /* setting up multiple channels failed */
+ net_device->max_chn = 1;
+ net_device->num_chn = 1;
+ return net_device;
+
+err_dev_remv:
+ rndis_filter_device_remove(dev, net_device);
+ return ERR_PTR(ret);
+}
+
+void rndis_filter_device_remove(struct hv_device *dev,
+ struct netvsc_device *net_dev)
+{
+ struct rndis_device *rndis_dev = net_dev->extension;
+
+ /* Halt and release the rndis device */
+ rndis_filter_halt_device(net_dev, rndis_dev);
+
+ netvsc_device_remove(dev);
+}
+
+int rndis_filter_open(struct netvsc_device *nvdev)
+{
+ if (!nvdev)
+ return -EINVAL;
+
+ return rndis_filter_open_device(nvdev->extension);
+}
+
+int rndis_filter_close(struct netvsc_device *nvdev)
+{
+ if (!nvdev)
+ return -EINVAL;
+
+ return rndis_filter_close_device(nvdev->extension);
+}