diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-11 08:27:49 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-11 08:27:49 +0000 |
commit | ace9429bb58fd418f0c81d4c2835699bddf6bde6 (patch) | |
tree | b2d64bc10158fdd5497876388cd68142ca374ed3 /drivers/net/ethernet/google/gve | |
parent | Initial commit. (diff) | |
download | linux-ace9429bb58fd418f0c81d4c2835699bddf6bde6.tar.xz linux-ace9429bb58fd418f0c81d4c2835699bddf6bde6.zip |
Adding upstream version 6.6.15.upstream/6.6.15
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/net/ethernet/google/gve')
-rw-r--r-- | drivers/net/ethernet/google/gve/Makefile | 4 | ||||
-rw-r--r-- | drivers/net/ethernet/google/gve/gve.h | 1072 | ||||
-rw-r--r-- | drivers/net/ethernet/google/gve/gve_adminq.c | 1030 | ||||
-rw-r--r-- | drivers/net/ethernet/google/gve/gve_adminq.h | 442 | ||||
-rw-r--r-- | drivers/net/ethernet/google/gve/gve_desc.h | 139 | ||||
-rw-r--r-- | drivers/net/ethernet/google/gve/gve_desc_dqo.h | 261 | ||||
-rw-r--r-- | drivers/net/ethernet/google/gve/gve_dqo.h | 93 | ||||
-rw-r--r-- | drivers/net/ethernet/google/gve/gve_ethtool.c | 676 | ||||
-rw-r--r-- | drivers/net/ethernet/google/gve/gve_main.c | 2429 | ||||
-rw-r--r-- | drivers/net/ethernet/google/gve/gve_register.h | 28 | ||||
-rw-r--r-- | drivers/net/ethernet/google/gve/gve_rx.c | 1014 | ||||
-rw-r--r-- | drivers/net/ethernet/google/gve/gve_rx_dqo.c | 849 | ||||
-rw-r--r-- | drivers/net/ethernet/google/gve/gve_tx.c | 979 | ||||
-rw-r--r-- | drivers/net/ethernet/google/gve/gve_tx_dqo.c | 1274 | ||||
-rw-r--r-- | drivers/net/ethernet/google/gve/gve_utils.c | 83 | ||||
-rw-r--r-- | drivers/net/ethernet/google/gve/gve_utils.h | 27 |
16 files changed, 10400 insertions, 0 deletions
diff --git a/drivers/net/ethernet/google/gve/Makefile b/drivers/net/ethernet/google/gve/Makefile new file mode 100644 index 0000000000..b9a6be7653 --- /dev/null +++ b/drivers/net/ethernet/google/gve/Makefile @@ -0,0 +1,4 @@ +# Makefile for the Google virtual Ethernet (gve) driver + +obj-$(CONFIG_GVE) += gve.o +gve-objs := gve_main.o gve_tx.o gve_tx_dqo.o gve_rx.o gve_rx_dqo.o gve_ethtool.o gve_adminq.o gve_utils.o diff --git a/drivers/net/ethernet/google/gve/gve.h b/drivers/net/ethernet/google/gve/gve.h new file mode 100644 index 0000000000..0d1e681be2 --- /dev/null +++ b/drivers/net/ethernet/google/gve/gve.h @@ -0,0 +1,1072 @@ +/* SPDX-License-Identifier: (GPL-2.0 OR MIT) + * Google virtual Ethernet (gve) driver + * + * Copyright (C) 2015-2021 Google, Inc. + */ + +#ifndef _GVE_H_ +#define _GVE_H_ + +#include <linux/dma-mapping.h> +#include <linux/netdevice.h> +#include <linux/pci.h> +#include <linux/u64_stats_sync.h> +#include <net/xdp.h> + +#include "gve_desc.h" +#include "gve_desc_dqo.h" + +#ifndef PCI_VENDOR_ID_GOOGLE +#define PCI_VENDOR_ID_GOOGLE 0x1ae0 +#endif + +#define PCI_DEV_ID_GVNIC 0x0042 + +#define GVE_REGISTER_BAR 0 +#define GVE_DOORBELL_BAR 2 + +/* Driver can alloc up to 2 segments for the header and 2 for the payload. */ +#define GVE_TX_MAX_IOVEC 4 +/* 1 for management, 1 for rx, 1 for tx */ +#define GVE_MIN_MSIX 3 + +/* Numbers of gve tx/rx stats in stats report. */ +#define GVE_TX_STATS_REPORT_NUM 6 +#define GVE_RX_STATS_REPORT_NUM 2 + +/* Interval to schedule a stats report update, 20000ms. */ +#define GVE_STATS_REPORT_TIMER_PERIOD 20000 + +/* Numbers of NIC tx/rx stats in stats report. */ +#define NIC_TX_STATS_REPORT_NUM 0 +#define NIC_RX_STATS_REPORT_NUM 4 + +#define GVE_DATA_SLOT_ADDR_PAGE_MASK (~(PAGE_SIZE - 1)) + +/* PTYPEs are always 10 bits. */ +#define GVE_NUM_PTYPES 1024 + +#define GVE_RX_BUFFER_SIZE_DQO 2048 + +#define GVE_XDP_ACTIONS 5 + +#define GVE_GQ_TX_MIN_PKT_DESC_BYTES 182 + +#define DQO_QPL_DEFAULT_TX_PAGES 512 +#define DQO_QPL_DEFAULT_RX_PAGES 2048 + +/* Maximum TSO size supported on DQO */ +#define GVE_DQO_TX_MAX 0x3FFFF + +#define GVE_TX_BUF_SHIFT_DQO 11 + +/* 2K buffers for DQO-QPL */ +#define GVE_TX_BUF_SIZE_DQO BIT(GVE_TX_BUF_SHIFT_DQO) +#define GVE_TX_BUFS_PER_PAGE_DQO (PAGE_SIZE >> GVE_TX_BUF_SHIFT_DQO) +#define GVE_MAX_TX_BUFS_PER_PKT (DIV_ROUND_UP(GVE_DQO_TX_MAX, GVE_TX_BUF_SIZE_DQO)) + +/* If number of free/recyclable buffers are less than this threshold; driver + * allocs and uses a non-qpl page on the receive path of DQO QPL to free + * up buffers. + * Value is set big enough to post at least 3 64K LRO packet via 2K buffer to NIC. + */ +#define GVE_DQO_QPL_ONDEMAND_ALLOC_THRESHOLD 96 + +/* Each slot in the desc ring has a 1:1 mapping to a slot in the data ring */ +struct gve_rx_desc_queue { + struct gve_rx_desc *desc_ring; /* the descriptor ring */ + dma_addr_t bus; /* the bus for the desc_ring */ + u8 seqno; /* the next expected seqno for this desc*/ +}; + +/* The page info for a single slot in the RX data queue */ +struct gve_rx_slot_page_info { + struct page *page; + void *page_address; + u32 page_offset; /* offset to write to in page */ + int pagecnt_bias; /* expected pagecnt if only the driver has a ref */ + u16 pad; /* adjustment for rx padding */ + u8 can_flip; /* tracks if the networking stack is using the page */ +}; + +/* A list of pages registered with the device during setup and used by a queue + * as buffers + */ +struct gve_queue_page_list { + u32 id; /* unique id */ + u32 num_entries; + struct page **pages; /* list of num_entries pages */ + dma_addr_t *page_buses; /* the dma addrs of the pages */ +}; + +/* Each slot in the data ring has a 1:1 mapping to a slot in the desc ring */ +struct gve_rx_data_queue { + union gve_rx_data_slot *data_ring; /* read by NIC */ + dma_addr_t data_bus; /* dma mapping of the slots */ + struct gve_rx_slot_page_info *page_info; /* page info of the buffers */ + struct gve_queue_page_list *qpl; /* qpl assigned to this queue */ + u8 raw_addressing; /* use raw_addressing? */ +}; + +struct gve_priv; + +/* RX buffer queue for posting buffers to HW. + * Each RX (completion) queue has a corresponding buffer queue. + */ +struct gve_rx_buf_queue_dqo { + struct gve_rx_desc_dqo *desc_ring; + dma_addr_t bus; + u32 head; /* Pointer to start cleaning buffers at. */ + u32 tail; /* Last posted buffer index + 1 */ + u32 mask; /* Mask for indices to the size of the ring */ +}; + +/* RX completion queue to receive packets from HW. */ +struct gve_rx_compl_queue_dqo { + struct gve_rx_compl_desc_dqo *desc_ring; + dma_addr_t bus; + + /* Number of slots which did not have a buffer posted yet. We should not + * post more buffers than the queue size to avoid HW overrunning the + * queue. + */ + int num_free_slots; + + /* HW uses a "generation bit" to notify SW of new descriptors. When a + * descriptor's generation bit is different from the current generation, + * that descriptor is ready to be consumed by SW. + */ + u8 cur_gen_bit; + + /* Pointer into desc_ring where the next completion descriptor will be + * received. + */ + u32 head; + u32 mask; /* Mask for indices to the size of the ring */ +}; + +/* Stores state for tracking buffers posted to HW */ +struct gve_rx_buf_state_dqo { + /* The page posted to HW. */ + struct gve_rx_slot_page_info page_info; + + /* The DMA address corresponding to `page_info`. */ + dma_addr_t addr; + + /* Last offset into the page when it only had a single reference, at + * which point every other offset is free to be reused. + */ + u32 last_single_ref_offset; + + /* Linked list index to next element in the list, or -1 if none */ + s16 next; +}; + +/* `head` and `tail` are indices into an array, or -1 if empty. */ +struct gve_index_list { + s16 head; + s16 tail; +}; + +/* A single received packet split across multiple buffers may be + * reconstructed using the information in this structure. + */ +struct gve_rx_ctx { + /* head and tail of skb chain for the current packet or NULL if none */ + struct sk_buff *skb_head; + struct sk_buff *skb_tail; + u32 total_size; + u8 frag_cnt; + bool drop_pkt; +}; + +struct gve_rx_cnts { + u32 ok_pkt_bytes; + u16 ok_pkt_cnt; + u16 total_pkt_cnt; + u16 cont_pkt_cnt; + u16 desc_err_pkt_cnt; +}; + +/* Contains datapath state used to represent an RX queue. */ +struct gve_rx_ring { + struct gve_priv *gve; + union { + /* GQI fields */ + struct { + struct gve_rx_desc_queue desc; + struct gve_rx_data_queue data; + + /* threshold for posting new buffs and descs */ + u32 db_threshold; + u16 packet_buffer_size; + + u32 qpl_copy_pool_mask; + u32 qpl_copy_pool_head; + struct gve_rx_slot_page_info *qpl_copy_pool; + }; + + /* DQO fields. */ + struct { + struct gve_rx_buf_queue_dqo bufq; + struct gve_rx_compl_queue_dqo complq; + + struct gve_rx_buf_state_dqo *buf_states; + u16 num_buf_states; + + /* Linked list of gve_rx_buf_state_dqo. Index into + * buf_states, or -1 if empty. + */ + s16 free_buf_states; + + /* Linked list of gve_rx_buf_state_dqo. Indexes into + * buf_states, or -1 if empty. + * + * This list contains buf_states which are pointing to + * valid buffers. + * + * We use a FIFO here in order to increase the + * probability that buffers can be reused by increasing + * the time between usages. + */ + struct gve_index_list recycled_buf_states; + + /* Linked list of gve_rx_buf_state_dqo. Indexes into + * buf_states, or -1 if empty. + * + * This list contains buf_states which have buffers + * which cannot be reused yet. + */ + struct gve_index_list used_buf_states; + + /* qpl assigned to this queue */ + struct gve_queue_page_list *qpl; + + /* index into queue page list */ + u32 next_qpl_page_idx; + + /* track number of used buffers */ + u16 used_buf_states_cnt; + } dqo; + }; + + u64 rbytes; /* free-running bytes received */ + u64 rpackets; /* free-running packets received */ + u32 cnt; /* free-running total number of completed packets */ + u32 fill_cnt; /* free-running total number of descs and buffs posted */ + u32 mask; /* masks the cnt and fill_cnt to the size of the ring */ + u64 rx_copybreak_pkt; /* free-running count of copybreak packets */ + u64 rx_copied_pkt; /* free-running total number of copied packets */ + u64 rx_skb_alloc_fail; /* free-running count of skb alloc fails */ + u64 rx_buf_alloc_fail; /* free-running count of buffer alloc fails */ + u64 rx_desc_err_dropped_pkt; /* free-running count of packets dropped by descriptor error */ + u64 rx_cont_packet_cnt; /* free-running multi-fragment packets received */ + u64 rx_frag_flip_cnt; /* free-running count of rx segments where page_flip was used */ + u64 rx_frag_copy_cnt; /* free-running count of rx segments copied */ + u64 rx_frag_alloc_cnt; /* free-running count of rx page allocations */ + u64 xdp_tx_errors; + u64 xdp_redirect_errors; + u64 xdp_alloc_fails; + u64 xdp_actions[GVE_XDP_ACTIONS]; + u32 q_num; /* queue index */ + u32 ntfy_id; /* notification block index */ + struct gve_queue_resources *q_resources; /* head and tail pointer idx */ + dma_addr_t q_resources_bus; /* dma address for the queue resources */ + struct u64_stats_sync statss; /* sync stats for 32bit archs */ + + struct gve_rx_ctx ctx; /* Info for packet currently being processed in this ring. */ + + /* XDP stuff */ + struct xdp_rxq_info xdp_rxq; + struct xdp_rxq_info xsk_rxq; + struct xsk_buff_pool *xsk_pool; + struct page_frag_cache page_cache; /* Page cache to allocate XDP frames */ +}; + +/* A TX desc ring entry */ +union gve_tx_desc { + struct gve_tx_pkt_desc pkt; /* first desc for a packet */ + struct gve_tx_mtd_desc mtd; /* optional metadata descriptor */ + struct gve_tx_seg_desc seg; /* subsequent descs for a packet */ +}; + +/* Tracks the memory in the fifo occupied by a segment of a packet */ +struct gve_tx_iovec { + u32 iov_offset; /* offset into this segment */ + u32 iov_len; /* length */ + u32 iov_padding; /* padding associated with this segment */ +}; + +/* Tracks the memory in the fifo occupied by the skb. Mapped 1:1 to a desc + * ring entry but only used for a pkt_desc not a seg_desc + */ +struct gve_tx_buffer_state { + union { + struct sk_buff *skb; /* skb for this pkt */ + struct xdp_frame *xdp_frame; /* xdp_frame */ + }; + struct { + u16 size; /* size of xmitted xdp pkt */ + u8 is_xsk; /* xsk buff */ + } xdp; + union { + struct gve_tx_iovec iov[GVE_TX_MAX_IOVEC]; /* segments of this pkt */ + struct { + DEFINE_DMA_UNMAP_ADDR(dma); + DEFINE_DMA_UNMAP_LEN(len); + }; + }; +}; + +/* A TX buffer - each queue has one */ +struct gve_tx_fifo { + void *base; /* address of base of FIFO */ + u32 size; /* total size */ + atomic_t available; /* how much space is still available */ + u32 head; /* offset to write at */ + struct gve_queue_page_list *qpl; /* QPL mapped into this FIFO */ +}; + +/* TX descriptor for DQO format */ +union gve_tx_desc_dqo { + struct gve_tx_pkt_desc_dqo pkt; + struct gve_tx_tso_context_desc_dqo tso_ctx; + struct gve_tx_general_context_desc_dqo general_ctx; +}; + +enum gve_packet_state { + /* Packet is in free list, available to be allocated. + * This should always be zero since state is not explicitly initialized. + */ + GVE_PACKET_STATE_UNALLOCATED, + /* Packet is expecting a regular data completion or miss completion */ + GVE_PACKET_STATE_PENDING_DATA_COMPL, + /* Packet has received a miss completion and is expecting a + * re-injection completion. + */ + GVE_PACKET_STATE_PENDING_REINJECT_COMPL, + /* No valid completion received within the specified timeout. */ + GVE_PACKET_STATE_TIMED_OUT_COMPL, +}; + +struct gve_tx_pending_packet_dqo { + struct sk_buff *skb; /* skb for this packet */ + + /* 0th element corresponds to the linear portion of `skb`, should be + * unmapped with `dma_unmap_single`. + * + * All others correspond to `skb`'s frags and should be unmapped with + * `dma_unmap_page`. + */ + union { + struct { + DEFINE_DMA_UNMAP_ADDR(dma[MAX_SKB_FRAGS + 1]); + DEFINE_DMA_UNMAP_LEN(len[MAX_SKB_FRAGS + 1]); + }; + s16 tx_qpl_buf_ids[GVE_MAX_TX_BUFS_PER_PKT]; + }; + + u16 num_bufs; + + /* Linked list index to next element in the list, or -1 if none */ + s16 next; + + /* Linked list index to prev element in the list, or -1 if none. + * Used for tracking either outstanding miss completions or prematurely + * freed packets. + */ + s16 prev; + + /* Identifies the current state of the packet as defined in + * `enum gve_packet_state`. + */ + u8 state; + + /* If packet is an outstanding miss completion, then the packet is + * freed if the corresponding re-injection completion is not received + * before kernel jiffies exceeds timeout_jiffies. + */ + unsigned long timeout_jiffies; +}; + +/* Contains datapath state used to represent a TX queue. */ +struct gve_tx_ring { + /* Cacheline 0 -- Accessed & dirtied during transmit */ + union { + /* GQI fields */ + struct { + struct gve_tx_fifo tx_fifo; + u32 req; /* driver tracked head pointer */ + u32 done; /* driver tracked tail pointer */ + }; + + /* DQO fields. */ + struct { + /* Linked list of gve_tx_pending_packet_dqo. Index into + * pending_packets, or -1 if empty. + * + * This is a consumer list owned by the TX path. When it + * runs out, the producer list is stolen from the + * completion handling path + * (dqo_compl.free_pending_packets). + */ + s16 free_pending_packets; + + /* Cached value of `dqo_compl.hw_tx_head` */ + u32 head; + u32 tail; /* Last posted buffer index + 1 */ + + /* Index of the last descriptor with "report event" bit + * set. + */ + u32 last_re_idx; + + /* free running number of packet buf descriptors posted */ + u16 posted_packet_desc_cnt; + /* free running number of packet buf descriptors completed */ + u16 completed_packet_desc_cnt; + + /* QPL fields */ + struct { + /* Linked list of gve_tx_buf_dqo. Index into + * tx_qpl_buf_next, or -1 if empty. + * + * This is a consumer list owned by the TX path. When it + * runs out, the producer list is stolen from the + * completion handling path + * (dqo_compl.free_tx_qpl_buf_head). + */ + s16 free_tx_qpl_buf_head; + + /* Free running count of the number of QPL tx buffers + * allocated + */ + u32 alloc_tx_qpl_buf_cnt; + + /* Cached value of `dqo_compl.free_tx_qpl_buf_cnt` */ + u32 free_tx_qpl_buf_cnt; + }; + } dqo_tx; + }; + + /* Cacheline 1 -- Accessed & dirtied during gve_clean_tx_done */ + union { + /* GQI fields */ + struct { + /* Spinlock for when cleanup in progress */ + spinlock_t clean_lock; + /* Spinlock for XDP tx traffic */ + spinlock_t xdp_lock; + }; + + /* DQO fields. */ + struct { + u32 head; /* Last read on compl_desc */ + + /* Tracks the current gen bit of compl_q */ + u8 cur_gen_bit; + + /* Linked list of gve_tx_pending_packet_dqo. Index into + * pending_packets, or -1 if empty. + * + * This is the producer list, owned by the completion + * handling path. When the consumer list + * (dqo_tx.free_pending_packets) is runs out, this list + * will be stolen. + */ + atomic_t free_pending_packets; + + /* Last TX ring index fetched by HW */ + atomic_t hw_tx_head; + + /* List to track pending packets which received a miss + * completion but not a corresponding reinjection. + */ + struct gve_index_list miss_completions; + + /* List to track pending packets that were completed + * before receiving a valid completion because they + * reached a specified timeout. + */ + struct gve_index_list timed_out_completions; + + /* QPL fields */ + struct { + /* Linked list of gve_tx_buf_dqo. Index into + * tx_qpl_buf_next, or -1 if empty. + * + * This is the producer list, owned by the completion + * handling path. When the consumer list + * (dqo_tx.free_tx_qpl_buf_head) is runs out, this list + * will be stolen. + */ + atomic_t free_tx_qpl_buf_head; + + /* Free running count of the number of tx buffers + * freed + */ + atomic_t free_tx_qpl_buf_cnt; + }; + } dqo_compl; + } ____cacheline_aligned; + u64 pkt_done; /* free-running - total packets completed */ + u64 bytes_done; /* free-running - total bytes completed */ + u64 dropped_pkt; /* free-running - total packets dropped */ + u64 dma_mapping_error; /* count of dma mapping errors */ + + /* Cacheline 2 -- Read-mostly fields */ + union { + /* GQI fields */ + struct { + union gve_tx_desc *desc; + + /* Maps 1:1 to a desc */ + struct gve_tx_buffer_state *info; + }; + + /* DQO fields. */ + struct { + union gve_tx_desc_dqo *tx_ring; + struct gve_tx_compl_desc *compl_ring; + + struct gve_tx_pending_packet_dqo *pending_packets; + s16 num_pending_packets; + + u32 complq_mask; /* complq size is complq_mask + 1 */ + + /* QPL fields */ + struct { + /* qpl assigned to this queue */ + struct gve_queue_page_list *qpl; + + /* Each QPL page is divided into TX bounce buffers + * of size GVE_TX_BUF_SIZE_DQO. tx_qpl_buf_next is + * an array to manage linked lists of TX buffers. + * An entry j at index i implies that j'th buffer + * is next on the list after i + */ + s16 *tx_qpl_buf_next; + u32 num_tx_qpl_bufs; + }; + } dqo; + } ____cacheline_aligned; + struct netdev_queue *netdev_txq; + struct gve_queue_resources *q_resources; /* head and tail pointer idx */ + struct device *dev; + u32 mask; /* masks req and done down to queue size */ + u8 raw_addressing; /* use raw_addressing? */ + + /* Slow-path fields */ + u32 q_num ____cacheline_aligned; /* queue idx */ + u32 stop_queue; /* count of queue stops */ + u32 wake_queue; /* count of queue wakes */ + u32 queue_timeout; /* count of queue timeouts */ + u32 ntfy_id; /* notification block index */ + u32 last_kick_msec; /* Last time the queue was kicked */ + dma_addr_t bus; /* dma address of the descr ring */ + dma_addr_t q_resources_bus; /* dma address of the queue resources */ + dma_addr_t complq_bus_dqo; /* dma address of the dqo.compl_ring */ + struct u64_stats_sync statss; /* sync stats for 32bit archs */ + struct xsk_buff_pool *xsk_pool; + u32 xdp_xsk_wakeup; + u32 xdp_xsk_done; + u64 xdp_xsk_sent; + u64 xdp_xmit; + u64 xdp_xmit_errors; +} ____cacheline_aligned; + +/* Wraps the info for one irq including the napi struct and the queues + * associated with that irq. + */ +struct gve_notify_block { + __be32 *irq_db_index; /* pointer to idx into Bar2 */ + char name[IFNAMSIZ + 16]; /* name registered with the kernel */ + struct napi_struct napi; /* kernel napi struct for this block */ + struct gve_priv *priv; + struct gve_tx_ring *tx; /* tx rings on this block */ + struct gve_rx_ring *rx; /* rx rings on this block */ +}; + +/* Tracks allowed and current queue settings */ +struct gve_queue_config { + u16 max_queues; + u16 num_queues; /* current */ +}; + +/* Tracks the available and used qpl IDs */ +struct gve_qpl_config { + u32 qpl_map_size; /* map memory size */ + unsigned long *qpl_id_map; /* bitmap of used qpl ids */ +}; + +struct gve_options_dqo_rda { + u16 tx_comp_ring_entries; /* number of tx_comp descriptors */ + u16 rx_buff_ring_entries; /* number of rx_buff descriptors */ +}; + +struct gve_irq_db { + __be32 index; +} ____cacheline_aligned; + +struct gve_ptype { + u8 l3_type; /* `gve_l3_type` in gve_adminq.h */ + u8 l4_type; /* `gve_l4_type` in gve_adminq.h */ +}; + +struct gve_ptype_lut { + struct gve_ptype ptypes[GVE_NUM_PTYPES]; +}; + +/* GVE_QUEUE_FORMAT_UNSPECIFIED must be zero since 0 is the default value + * when the entire configure_device_resources command is zeroed out and the + * queue_format is not specified. + */ +enum gve_queue_format { + GVE_QUEUE_FORMAT_UNSPECIFIED = 0x0, + GVE_GQI_RDA_FORMAT = 0x1, + GVE_GQI_QPL_FORMAT = 0x2, + GVE_DQO_RDA_FORMAT = 0x3, + GVE_DQO_QPL_FORMAT = 0x4, +}; + +struct gve_priv { + struct net_device *dev; + struct gve_tx_ring *tx; /* array of tx_cfg.num_queues */ + struct gve_rx_ring *rx; /* array of rx_cfg.num_queues */ + struct gve_queue_page_list *qpls; /* array of num qpls */ + struct gve_notify_block *ntfy_blocks; /* array of num_ntfy_blks */ + struct gve_irq_db *irq_db_indices; /* array of num_ntfy_blks */ + dma_addr_t irq_db_indices_bus; + struct msix_entry *msix_vectors; /* array of num_ntfy_blks + 1 */ + char mgmt_msix_name[IFNAMSIZ + 16]; + u32 mgmt_msix_idx; + __be32 *counter_array; /* array of num_event_counters */ + dma_addr_t counter_array_bus; + + u16 num_event_counters; + u16 tx_desc_cnt; /* num desc per ring */ + u16 rx_desc_cnt; /* num desc per ring */ + u16 tx_pages_per_qpl; /* Suggested number of pages per qpl for TX queues by NIC */ + u16 rx_pages_per_qpl; /* Suggested number of pages per qpl for RX queues by NIC */ + u16 rx_data_slot_cnt; /* rx buffer length */ + u64 max_registered_pages; + u64 num_registered_pages; /* num pages registered with NIC */ + struct bpf_prog *xdp_prog; /* XDP BPF program */ + u32 rx_copybreak; /* copy packets smaller than this */ + u16 default_num_queues; /* default num queues to set up */ + + u16 num_xdp_queues; + struct gve_queue_config tx_cfg; + struct gve_queue_config rx_cfg; + struct gve_qpl_config qpl_cfg; /* map used QPL ids */ + u32 num_ntfy_blks; /* spilt between TX and RX so must be even */ + + struct gve_registers __iomem *reg_bar0; /* see gve_register.h */ + __be32 __iomem *db_bar2; /* "array" of doorbells */ + u32 msg_enable; /* level for netif* netdev print macros */ + struct pci_dev *pdev; + + /* metrics */ + u32 tx_timeo_cnt; + + /* Admin queue - see gve_adminq.h*/ + union gve_adminq_command *adminq; + dma_addr_t adminq_bus_addr; + u32 adminq_mask; /* masks prod_cnt to adminq size */ + u32 adminq_prod_cnt; /* free-running count of AQ cmds executed */ + u32 adminq_cmd_fail; /* free-running count of AQ cmds failed */ + u32 adminq_timeouts; /* free-running count of AQ cmds timeouts */ + /* free-running count of per AQ cmd executed */ + u32 adminq_describe_device_cnt; + u32 adminq_cfg_device_resources_cnt; + u32 adminq_register_page_list_cnt; + u32 adminq_unregister_page_list_cnt; + u32 adminq_create_tx_queue_cnt; + u32 adminq_create_rx_queue_cnt; + u32 adminq_destroy_tx_queue_cnt; + u32 adminq_destroy_rx_queue_cnt; + u32 adminq_dcfg_device_resources_cnt; + u32 adminq_set_driver_parameter_cnt; + u32 adminq_report_stats_cnt; + u32 adminq_report_link_speed_cnt; + u32 adminq_get_ptype_map_cnt; + u32 adminq_verify_driver_compatibility_cnt; + + /* Global stats */ + u32 interface_up_cnt; /* count of times interface turned up since last reset */ + u32 interface_down_cnt; /* count of times interface turned down since last reset */ + u32 reset_cnt; /* count of reset */ + u32 page_alloc_fail; /* count of page alloc fails */ + u32 dma_mapping_error; /* count of dma mapping errors */ + u32 stats_report_trigger_cnt; /* count of device-requested stats-reports since last reset */ + u32 suspend_cnt; /* count of times suspended */ + u32 resume_cnt; /* count of times resumed */ + struct workqueue_struct *gve_wq; + struct work_struct service_task; + struct work_struct stats_report_task; + unsigned long service_task_flags; + unsigned long state_flags; + + struct gve_stats_report *stats_report; + u64 stats_report_len; + dma_addr_t stats_report_bus; /* dma address for the stats report */ + unsigned long ethtool_flags; + + unsigned long stats_report_timer_period; + struct timer_list stats_report_timer; + + /* Gvnic device link speed from hypervisor. */ + u64 link_speed; + bool up_before_suspend; /* True if dev was up before suspend */ + + struct gve_options_dqo_rda options_dqo_rda; + struct gve_ptype_lut *ptype_lut_dqo; + + /* Must be a power of two. */ + int data_buffer_size_dqo; + + enum gve_queue_format queue_format; + + /* Interrupt coalescing settings */ + u32 tx_coalesce_usecs; + u32 rx_coalesce_usecs; +}; + +enum gve_service_task_flags_bit { + GVE_PRIV_FLAGS_DO_RESET = 1, + GVE_PRIV_FLAGS_RESET_IN_PROGRESS = 2, + GVE_PRIV_FLAGS_PROBE_IN_PROGRESS = 3, + GVE_PRIV_FLAGS_DO_REPORT_STATS = 4, +}; + +enum gve_state_flags_bit { + GVE_PRIV_FLAGS_ADMIN_QUEUE_OK = 1, + GVE_PRIV_FLAGS_DEVICE_RESOURCES_OK = 2, + GVE_PRIV_FLAGS_DEVICE_RINGS_OK = 3, + GVE_PRIV_FLAGS_NAPI_ENABLED = 4, +}; + +enum gve_ethtool_flags_bit { + GVE_PRIV_FLAGS_REPORT_STATS = 0, +}; + +static inline bool gve_get_do_reset(struct gve_priv *priv) +{ + return test_bit(GVE_PRIV_FLAGS_DO_RESET, &priv->service_task_flags); +} + +static inline void gve_set_do_reset(struct gve_priv *priv) +{ + set_bit(GVE_PRIV_FLAGS_DO_RESET, &priv->service_task_flags); +} + +static inline void gve_clear_do_reset(struct gve_priv *priv) +{ + clear_bit(GVE_PRIV_FLAGS_DO_RESET, &priv->service_task_flags); +} + +static inline bool gve_get_reset_in_progress(struct gve_priv *priv) +{ + return test_bit(GVE_PRIV_FLAGS_RESET_IN_PROGRESS, + &priv->service_task_flags); +} + +static inline void gve_set_reset_in_progress(struct gve_priv *priv) +{ + set_bit(GVE_PRIV_FLAGS_RESET_IN_PROGRESS, &priv->service_task_flags); +} + +static inline void gve_clear_reset_in_progress(struct gve_priv *priv) +{ + clear_bit(GVE_PRIV_FLAGS_RESET_IN_PROGRESS, &priv->service_task_flags); +} + +static inline bool gve_get_probe_in_progress(struct gve_priv *priv) +{ + return test_bit(GVE_PRIV_FLAGS_PROBE_IN_PROGRESS, + &priv->service_task_flags); +} + +static inline void gve_set_probe_in_progress(struct gve_priv *priv) +{ + set_bit(GVE_PRIV_FLAGS_PROBE_IN_PROGRESS, &priv->service_task_flags); +} + +static inline void gve_clear_probe_in_progress(struct gve_priv *priv) +{ + clear_bit(GVE_PRIV_FLAGS_PROBE_IN_PROGRESS, &priv->service_task_flags); +} + +static inline bool gve_get_do_report_stats(struct gve_priv *priv) +{ + return test_bit(GVE_PRIV_FLAGS_DO_REPORT_STATS, + &priv->service_task_flags); +} + +static inline void gve_set_do_report_stats(struct gve_priv *priv) +{ + set_bit(GVE_PRIV_FLAGS_DO_REPORT_STATS, &priv->service_task_flags); +} + +static inline void gve_clear_do_report_stats(struct gve_priv *priv) +{ + clear_bit(GVE_PRIV_FLAGS_DO_REPORT_STATS, &priv->service_task_flags); +} + +static inline bool gve_get_admin_queue_ok(struct gve_priv *priv) +{ + return test_bit(GVE_PRIV_FLAGS_ADMIN_QUEUE_OK, &priv->state_flags); +} + +static inline void gve_set_admin_queue_ok(struct gve_priv *priv) +{ + set_bit(GVE_PRIV_FLAGS_ADMIN_QUEUE_OK, &priv->state_flags); +} + +static inline void gve_clear_admin_queue_ok(struct gve_priv *priv) +{ + clear_bit(GVE_PRIV_FLAGS_ADMIN_QUEUE_OK, &priv->state_flags); +} + +static inline bool gve_get_device_resources_ok(struct gve_priv *priv) +{ + return test_bit(GVE_PRIV_FLAGS_DEVICE_RESOURCES_OK, &priv->state_flags); +} + +static inline void gve_set_device_resources_ok(struct gve_priv *priv) +{ + set_bit(GVE_PRIV_FLAGS_DEVICE_RESOURCES_OK, &priv->state_flags); +} + +static inline void gve_clear_device_resources_ok(struct gve_priv *priv) +{ + clear_bit(GVE_PRIV_FLAGS_DEVICE_RESOURCES_OK, &priv->state_flags); +} + +static inline bool gve_get_device_rings_ok(struct gve_priv *priv) +{ + return test_bit(GVE_PRIV_FLAGS_DEVICE_RINGS_OK, &priv->state_flags); +} + +static inline void gve_set_device_rings_ok(struct gve_priv *priv) +{ + set_bit(GVE_PRIV_FLAGS_DEVICE_RINGS_OK, &priv->state_flags); +} + +static inline void gve_clear_device_rings_ok(struct gve_priv *priv) +{ + clear_bit(GVE_PRIV_FLAGS_DEVICE_RINGS_OK, &priv->state_flags); +} + +static inline bool gve_get_napi_enabled(struct gve_priv *priv) +{ + return test_bit(GVE_PRIV_FLAGS_NAPI_ENABLED, &priv->state_flags); +} + +static inline void gve_set_napi_enabled(struct gve_priv *priv) +{ + set_bit(GVE_PRIV_FLAGS_NAPI_ENABLED, &priv->state_flags); +} + +static inline void gve_clear_napi_enabled(struct gve_priv *priv) +{ + clear_bit(GVE_PRIV_FLAGS_NAPI_ENABLED, &priv->state_flags); +} + +static inline bool gve_get_report_stats(struct gve_priv *priv) +{ + return test_bit(GVE_PRIV_FLAGS_REPORT_STATS, &priv->ethtool_flags); +} + +static inline void gve_clear_report_stats(struct gve_priv *priv) +{ + clear_bit(GVE_PRIV_FLAGS_REPORT_STATS, &priv->ethtool_flags); +} + +/* Returns the address of the ntfy_blocks irq doorbell + */ +static inline __be32 __iomem *gve_irq_doorbell(struct gve_priv *priv, + struct gve_notify_block *block) +{ + return &priv->db_bar2[be32_to_cpu(*block->irq_db_index)]; +} + +/* Returns the index into ntfy_blocks of the given tx ring's block + */ +static inline u32 gve_tx_idx_to_ntfy(struct gve_priv *priv, u32 queue_idx) +{ + return queue_idx; +} + +/* Returns the index into ntfy_blocks of the given rx ring's block + */ +static inline u32 gve_rx_idx_to_ntfy(struct gve_priv *priv, u32 queue_idx) +{ + return (priv->num_ntfy_blks / 2) + queue_idx; +} + +static inline bool gve_is_qpl(struct gve_priv *priv) +{ + return priv->queue_format == GVE_GQI_QPL_FORMAT || + priv->queue_format == GVE_DQO_QPL_FORMAT; +} + +/* Returns the number of tx queue page lists + */ +static inline u32 gve_num_tx_qpls(struct gve_priv *priv) +{ + if (!gve_is_qpl(priv)) + return 0; + + return priv->tx_cfg.num_queues + priv->num_xdp_queues; +} + +/* Returns the number of XDP tx queue page lists + */ +static inline u32 gve_num_xdp_qpls(struct gve_priv *priv) +{ + if (priv->queue_format != GVE_GQI_QPL_FORMAT) + return 0; + + return priv->num_xdp_queues; +} + +/* Returns the number of rx queue page lists + */ +static inline u32 gve_num_rx_qpls(struct gve_priv *priv) +{ + if (!gve_is_qpl(priv)) + return 0; + + return priv->rx_cfg.num_queues; +} + +static inline u32 gve_tx_qpl_id(struct gve_priv *priv, int tx_qid) +{ + return tx_qid; +} + +static inline u32 gve_rx_qpl_id(struct gve_priv *priv, int rx_qid) +{ + return priv->tx_cfg.max_queues + rx_qid; +} + +static inline u32 gve_tx_start_qpl_id(struct gve_priv *priv) +{ + return gve_tx_qpl_id(priv, 0); +} + +static inline u32 gve_rx_start_qpl_id(struct gve_priv *priv) +{ + return gve_rx_qpl_id(priv, 0); +} + +/* Returns a pointer to the next available tx qpl in the list of qpls + */ +static inline +struct gve_queue_page_list *gve_assign_tx_qpl(struct gve_priv *priv, int tx_qid) +{ + int id = gve_tx_qpl_id(priv, tx_qid); + + /* QPL already in use */ + if (test_bit(id, priv->qpl_cfg.qpl_id_map)) + return NULL; + + set_bit(id, priv->qpl_cfg.qpl_id_map); + return &priv->qpls[id]; +} + +/* Returns a pointer to the next available rx qpl in the list of qpls + */ +static inline +struct gve_queue_page_list *gve_assign_rx_qpl(struct gve_priv *priv, int rx_qid) +{ + int id = gve_rx_qpl_id(priv, rx_qid); + + /* QPL already in use */ + if (test_bit(id, priv->qpl_cfg.qpl_id_map)) + return NULL; + + set_bit(id, priv->qpl_cfg.qpl_id_map); + return &priv->qpls[id]; +} + +/* Unassigns the qpl with the given id + */ +static inline void gve_unassign_qpl(struct gve_priv *priv, int id) +{ + clear_bit(id, priv->qpl_cfg.qpl_id_map); +} + +/* Returns the correct dma direction for tx and rx qpls + */ +static inline enum dma_data_direction gve_qpl_dma_dir(struct gve_priv *priv, + int id) +{ + if (id < gve_rx_start_qpl_id(priv)) + return DMA_TO_DEVICE; + else + return DMA_FROM_DEVICE; +} + +static inline bool gve_is_gqi(struct gve_priv *priv) +{ + return priv->queue_format == GVE_GQI_RDA_FORMAT || + priv->queue_format == GVE_GQI_QPL_FORMAT; +} + +static inline u32 gve_num_tx_queues(struct gve_priv *priv) +{ + return priv->tx_cfg.num_queues + priv->num_xdp_queues; +} + +static inline u32 gve_xdp_tx_queue_id(struct gve_priv *priv, u32 queue_id) +{ + return priv->tx_cfg.num_queues + queue_id; +} + +static inline u32 gve_xdp_tx_start_queue_id(struct gve_priv *priv) +{ + return gve_xdp_tx_queue_id(priv, 0); +} + +/* buffers */ +int gve_alloc_page(struct gve_priv *priv, struct device *dev, + struct page **page, dma_addr_t *dma, + enum dma_data_direction, gfp_t gfp_flags); +void gve_free_page(struct device *dev, struct page *page, dma_addr_t dma, + enum dma_data_direction); +/* tx handling */ +netdev_tx_t gve_tx(struct sk_buff *skb, struct net_device *dev); +int gve_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, + u32 flags); +int gve_xdp_xmit_one(struct gve_priv *priv, struct gve_tx_ring *tx, + void *data, int len, void *frame_p); +void gve_xdp_tx_flush(struct gve_priv *priv, u32 xdp_qid); +bool gve_tx_poll(struct gve_notify_block *block, int budget); +bool gve_xdp_poll(struct gve_notify_block *block, int budget); +int gve_tx_alloc_rings(struct gve_priv *priv, int start_id, int num_rings); +void gve_tx_free_rings_gqi(struct gve_priv *priv, int start_id, int num_rings); +u32 gve_tx_load_event_counter(struct gve_priv *priv, + struct gve_tx_ring *tx); +bool gve_tx_clean_pending(struct gve_priv *priv, struct gve_tx_ring *tx); +/* rx handling */ +void gve_rx_write_doorbell(struct gve_priv *priv, struct gve_rx_ring *rx); +int gve_rx_poll(struct gve_notify_block *block, int budget); +bool gve_rx_work_pending(struct gve_rx_ring *rx); +int gve_rx_alloc_rings(struct gve_priv *priv); +void gve_rx_free_rings_gqi(struct gve_priv *priv); +/* Reset */ +void gve_schedule_reset(struct gve_priv *priv); +int gve_reset(struct gve_priv *priv, bool attempt_teardown); +int gve_adjust_queues(struct gve_priv *priv, + struct gve_queue_config new_rx_config, + struct gve_queue_config new_tx_config); +/* report stats handling */ +void gve_handle_report_stats(struct gve_priv *priv); +/* exported by ethtool.c */ +extern const struct ethtool_ops gve_ethtool_ops; +/* needed by ethtool */ +extern char gve_driver_name[]; +extern const char gve_version_str[]; +#endif /* _GVE_H_ */ diff --git a/drivers/net/ethernet/google/gve/gve_adminq.c b/drivers/net/ethernet/google/gve/gve_adminq.c new file mode 100644 index 0000000000..79db7a6d42 --- /dev/null +++ b/drivers/net/ethernet/google/gve/gve_adminq.c @@ -0,0 +1,1030 @@ +// SPDX-License-Identifier: (GPL-2.0 OR MIT) +/* Google virtual Ethernet (gve) driver + * + * Copyright (C) 2015-2021 Google, Inc. + */ + +#include <linux/etherdevice.h> +#include <linux/pci.h> +#include "gve.h" +#include "gve_adminq.h" +#include "gve_register.h" + +#define GVE_MAX_ADMINQ_RELEASE_CHECK 500 +#define GVE_ADMINQ_SLEEP_LEN 20 +#define GVE_MAX_ADMINQ_EVENT_COUNTER_CHECK 100 + +#define GVE_DEVICE_OPTION_ERROR_FMT "%s option error:\n" \ +"Expected: length=%d, feature_mask=%x.\n" \ +"Actual: length=%d, feature_mask=%x.\n" + +#define GVE_DEVICE_OPTION_TOO_BIG_FMT "Length of %s option larger than expected. Possible older version of guest driver.\n" + +static +struct gve_device_option *gve_get_next_option(struct gve_device_descriptor *descriptor, + struct gve_device_option *option) +{ + void *option_end, *descriptor_end; + + option_end = (void *)(option + 1) + be16_to_cpu(option->option_length); + descriptor_end = (void *)descriptor + be16_to_cpu(descriptor->total_length); + + return option_end > descriptor_end ? NULL : (struct gve_device_option *)option_end; +} + +static +void gve_parse_device_option(struct gve_priv *priv, + struct gve_device_descriptor *device_descriptor, + struct gve_device_option *option, + struct gve_device_option_gqi_rda **dev_op_gqi_rda, + struct gve_device_option_gqi_qpl **dev_op_gqi_qpl, + struct gve_device_option_dqo_rda **dev_op_dqo_rda, + struct gve_device_option_jumbo_frames **dev_op_jumbo_frames, + struct gve_device_option_dqo_qpl **dev_op_dqo_qpl) +{ + u32 req_feat_mask = be32_to_cpu(option->required_features_mask); + u16 option_length = be16_to_cpu(option->option_length); + u16 option_id = be16_to_cpu(option->option_id); + + /* If the length or feature mask doesn't match, continue without + * enabling the feature. + */ + switch (option_id) { + case GVE_DEV_OPT_ID_GQI_RAW_ADDRESSING: + if (option_length != GVE_DEV_OPT_LEN_GQI_RAW_ADDRESSING || + req_feat_mask != GVE_DEV_OPT_REQ_FEAT_MASK_GQI_RAW_ADDRESSING) { + dev_warn(&priv->pdev->dev, GVE_DEVICE_OPTION_ERROR_FMT, + "Raw Addressing", + GVE_DEV_OPT_LEN_GQI_RAW_ADDRESSING, + GVE_DEV_OPT_REQ_FEAT_MASK_GQI_RAW_ADDRESSING, + option_length, req_feat_mask); + break; + } + + dev_info(&priv->pdev->dev, + "Gqi raw addressing device option enabled.\n"); + priv->queue_format = GVE_GQI_RDA_FORMAT; + break; + case GVE_DEV_OPT_ID_GQI_RDA: + if (option_length < sizeof(**dev_op_gqi_rda) || + req_feat_mask != GVE_DEV_OPT_REQ_FEAT_MASK_GQI_RDA) { + dev_warn(&priv->pdev->dev, GVE_DEVICE_OPTION_ERROR_FMT, + "GQI RDA", (int)sizeof(**dev_op_gqi_rda), + GVE_DEV_OPT_REQ_FEAT_MASK_GQI_RDA, + option_length, req_feat_mask); + break; + } + + if (option_length > sizeof(**dev_op_gqi_rda)) { + dev_warn(&priv->pdev->dev, + GVE_DEVICE_OPTION_TOO_BIG_FMT, "GQI RDA"); + } + *dev_op_gqi_rda = (void *)(option + 1); + break; + case GVE_DEV_OPT_ID_GQI_QPL: + if (option_length < sizeof(**dev_op_gqi_qpl) || + req_feat_mask != GVE_DEV_OPT_REQ_FEAT_MASK_GQI_QPL) { + dev_warn(&priv->pdev->dev, GVE_DEVICE_OPTION_ERROR_FMT, + "GQI QPL", (int)sizeof(**dev_op_gqi_qpl), + GVE_DEV_OPT_REQ_FEAT_MASK_GQI_QPL, + option_length, req_feat_mask); + break; + } + + if (option_length > sizeof(**dev_op_gqi_qpl)) { + dev_warn(&priv->pdev->dev, + GVE_DEVICE_OPTION_TOO_BIG_FMT, "GQI QPL"); + } + *dev_op_gqi_qpl = (void *)(option + 1); + break; + case GVE_DEV_OPT_ID_DQO_RDA: + if (option_length < sizeof(**dev_op_dqo_rda) || + req_feat_mask != GVE_DEV_OPT_REQ_FEAT_MASK_DQO_RDA) { + dev_warn(&priv->pdev->dev, GVE_DEVICE_OPTION_ERROR_FMT, + "DQO RDA", (int)sizeof(**dev_op_dqo_rda), + GVE_DEV_OPT_REQ_FEAT_MASK_DQO_RDA, + option_length, req_feat_mask); + break; + } + + if (option_length > sizeof(**dev_op_dqo_rda)) { + dev_warn(&priv->pdev->dev, + GVE_DEVICE_OPTION_TOO_BIG_FMT, "DQO RDA"); + } + *dev_op_dqo_rda = (void *)(option + 1); + break; + case GVE_DEV_OPT_ID_DQO_QPL: + if (option_length < sizeof(**dev_op_dqo_qpl) || + req_feat_mask != GVE_DEV_OPT_REQ_FEAT_MASK_DQO_QPL) { + dev_warn(&priv->pdev->dev, GVE_DEVICE_OPTION_ERROR_FMT, + "DQO QPL", (int)sizeof(**dev_op_dqo_qpl), + GVE_DEV_OPT_REQ_FEAT_MASK_DQO_QPL, + option_length, req_feat_mask); + break; + } + + if (option_length > sizeof(**dev_op_dqo_qpl)) { + dev_warn(&priv->pdev->dev, + GVE_DEVICE_OPTION_TOO_BIG_FMT, "DQO QPL"); + } + *dev_op_dqo_qpl = (void *)(option + 1); + break; + case GVE_DEV_OPT_ID_JUMBO_FRAMES: + if (option_length < sizeof(**dev_op_jumbo_frames) || + req_feat_mask != GVE_DEV_OPT_REQ_FEAT_MASK_JUMBO_FRAMES) { + dev_warn(&priv->pdev->dev, GVE_DEVICE_OPTION_ERROR_FMT, + "Jumbo Frames", + (int)sizeof(**dev_op_jumbo_frames), + GVE_DEV_OPT_REQ_FEAT_MASK_JUMBO_FRAMES, + option_length, req_feat_mask); + break; + } + + if (option_length > sizeof(**dev_op_jumbo_frames)) { + dev_warn(&priv->pdev->dev, + GVE_DEVICE_OPTION_TOO_BIG_FMT, + "Jumbo Frames"); + } + *dev_op_jumbo_frames = (void *)(option + 1); + break; + default: + /* If we don't recognize the option just continue + * without doing anything. + */ + dev_dbg(&priv->pdev->dev, "Unrecognized device option 0x%hx not enabled.\n", + option_id); + } +} + +/* Process all device options for a given describe device call. */ +static int +gve_process_device_options(struct gve_priv *priv, + struct gve_device_descriptor *descriptor, + struct gve_device_option_gqi_rda **dev_op_gqi_rda, + struct gve_device_option_gqi_qpl **dev_op_gqi_qpl, + struct gve_device_option_dqo_rda **dev_op_dqo_rda, + struct gve_device_option_jumbo_frames **dev_op_jumbo_frames, + struct gve_device_option_dqo_qpl **dev_op_dqo_qpl) +{ + const int num_options = be16_to_cpu(descriptor->num_device_options); + struct gve_device_option *dev_opt; + int i; + + /* The options struct directly follows the device descriptor. */ + dev_opt = (void *)(descriptor + 1); + for (i = 0; i < num_options; i++) { + struct gve_device_option *next_opt; + + next_opt = gve_get_next_option(descriptor, dev_opt); + if (!next_opt) { + dev_err(&priv->dev->dev, + "options exceed device_descriptor's total length.\n"); + return -EINVAL; + } + + gve_parse_device_option(priv, descriptor, dev_opt, + dev_op_gqi_rda, dev_op_gqi_qpl, + dev_op_dqo_rda, dev_op_jumbo_frames, + dev_op_dqo_qpl); + dev_opt = next_opt; + } + + return 0; +} + +int gve_adminq_alloc(struct device *dev, struct gve_priv *priv) +{ + priv->adminq = dma_alloc_coherent(dev, PAGE_SIZE, + &priv->adminq_bus_addr, GFP_KERNEL); + if (unlikely(!priv->adminq)) + return -ENOMEM; + + priv->adminq_mask = (PAGE_SIZE / sizeof(union gve_adminq_command)) - 1; + priv->adminq_prod_cnt = 0; + priv->adminq_cmd_fail = 0; + priv->adminq_timeouts = 0; + priv->adminq_describe_device_cnt = 0; + priv->adminq_cfg_device_resources_cnt = 0; + priv->adminq_register_page_list_cnt = 0; + priv->adminq_unregister_page_list_cnt = 0; + priv->adminq_create_tx_queue_cnt = 0; + priv->adminq_create_rx_queue_cnt = 0; + priv->adminq_destroy_tx_queue_cnt = 0; + priv->adminq_destroy_rx_queue_cnt = 0; + priv->adminq_dcfg_device_resources_cnt = 0; + priv->adminq_set_driver_parameter_cnt = 0; + priv->adminq_report_stats_cnt = 0; + priv->adminq_report_link_speed_cnt = 0; + priv->adminq_get_ptype_map_cnt = 0; + + /* Setup Admin queue with the device */ + iowrite32be(priv->adminq_bus_addr / PAGE_SIZE, + &priv->reg_bar0->adminq_pfn); + + gve_set_admin_queue_ok(priv); + return 0; +} + +void gve_adminq_release(struct gve_priv *priv) +{ + int i = 0; + + /* Tell the device the adminq is leaving */ + iowrite32be(0x0, &priv->reg_bar0->adminq_pfn); + while (ioread32be(&priv->reg_bar0->adminq_pfn)) { + /* If this is reached the device is unrecoverable and still + * holding memory. Continue looping to avoid memory corruption, + * but WARN so it is visible what is going on. + */ + if (i == GVE_MAX_ADMINQ_RELEASE_CHECK) + WARN(1, "Unrecoverable platform error!"); + i++; + msleep(GVE_ADMINQ_SLEEP_LEN); + } + gve_clear_device_rings_ok(priv); + gve_clear_device_resources_ok(priv); + gve_clear_admin_queue_ok(priv); +} + +void gve_adminq_free(struct device *dev, struct gve_priv *priv) +{ + if (!gve_get_admin_queue_ok(priv)) + return; + gve_adminq_release(priv); + dma_free_coherent(dev, PAGE_SIZE, priv->adminq, priv->adminq_bus_addr); + gve_clear_admin_queue_ok(priv); +} + +static void gve_adminq_kick_cmd(struct gve_priv *priv, u32 prod_cnt) +{ + iowrite32be(prod_cnt, &priv->reg_bar0->adminq_doorbell); +} + +static bool gve_adminq_wait_for_cmd(struct gve_priv *priv, u32 prod_cnt) +{ + int i; + + for (i = 0; i < GVE_MAX_ADMINQ_EVENT_COUNTER_CHECK; i++) { + if (ioread32be(&priv->reg_bar0->adminq_event_counter) + == prod_cnt) + return true; + msleep(GVE_ADMINQ_SLEEP_LEN); + } + + return false; +} + +static int gve_adminq_parse_err(struct gve_priv *priv, u32 status) +{ + if (status != GVE_ADMINQ_COMMAND_PASSED && + status != GVE_ADMINQ_COMMAND_UNSET) { + dev_err(&priv->pdev->dev, "AQ command failed with status %d\n", status); + priv->adminq_cmd_fail++; + } + switch (status) { + case GVE_ADMINQ_COMMAND_PASSED: + return 0; + case GVE_ADMINQ_COMMAND_UNSET: + dev_err(&priv->pdev->dev, "parse_aq_err: err and status both unset, this should not be possible.\n"); + return -EINVAL; + case GVE_ADMINQ_COMMAND_ERROR_ABORTED: + case GVE_ADMINQ_COMMAND_ERROR_CANCELLED: + case GVE_ADMINQ_COMMAND_ERROR_DATALOSS: + case GVE_ADMINQ_COMMAND_ERROR_FAILED_PRECONDITION: + case GVE_ADMINQ_COMMAND_ERROR_UNAVAILABLE: + return -EAGAIN; + case GVE_ADMINQ_COMMAND_ERROR_ALREADY_EXISTS: + case GVE_ADMINQ_COMMAND_ERROR_INTERNAL_ERROR: + case GVE_ADMINQ_COMMAND_ERROR_INVALID_ARGUMENT: + case GVE_ADMINQ_COMMAND_ERROR_NOT_FOUND: + case GVE_ADMINQ_COMMAND_ERROR_OUT_OF_RANGE: + case GVE_ADMINQ_COMMAND_ERROR_UNKNOWN_ERROR: + return -EINVAL; + case GVE_ADMINQ_COMMAND_ERROR_DEADLINE_EXCEEDED: + return -ETIME; + case GVE_ADMINQ_COMMAND_ERROR_PERMISSION_DENIED: + case GVE_ADMINQ_COMMAND_ERROR_UNAUTHENTICATED: + return -EACCES; + case GVE_ADMINQ_COMMAND_ERROR_RESOURCE_EXHAUSTED: + return -ENOMEM; + case GVE_ADMINQ_COMMAND_ERROR_UNIMPLEMENTED: + return -EOPNOTSUPP; + default: + dev_err(&priv->pdev->dev, "parse_aq_err: unknown status code %d\n", status); + return -EINVAL; + } +} + +/* Flushes all AQ commands currently queued and waits for them to complete. + * If there are failures, it will return the first error. + */ +static int gve_adminq_kick_and_wait(struct gve_priv *priv) +{ + int tail, head; + int i; + + tail = ioread32be(&priv->reg_bar0->adminq_event_counter); + head = priv->adminq_prod_cnt; + + gve_adminq_kick_cmd(priv, head); + if (!gve_adminq_wait_for_cmd(priv, head)) { + dev_err(&priv->pdev->dev, "AQ commands timed out, need to reset AQ\n"); + priv->adminq_timeouts++; + return -ENOTRECOVERABLE; + } + + for (i = tail; i < head; i++) { + union gve_adminq_command *cmd; + u32 status, err; + + cmd = &priv->adminq[i & priv->adminq_mask]; + status = be32_to_cpu(READ_ONCE(cmd->status)); + err = gve_adminq_parse_err(priv, status); + if (err) + // Return the first error if we failed. + return err; + } + + return 0; +} + +/* This function is not threadsafe - the caller is responsible for any + * necessary locks. + */ +static int gve_adminq_issue_cmd(struct gve_priv *priv, + union gve_adminq_command *cmd_orig) +{ + union gve_adminq_command *cmd; + u32 opcode; + u32 tail; + + tail = ioread32be(&priv->reg_bar0->adminq_event_counter); + + // Check if next command will overflow the buffer. + if (((priv->adminq_prod_cnt + 1) & priv->adminq_mask) == + (tail & priv->adminq_mask)) { + int err; + + // Flush existing commands to make room. + err = gve_adminq_kick_and_wait(priv); + if (err) + return err; + + // Retry. + tail = ioread32be(&priv->reg_bar0->adminq_event_counter); + if (((priv->adminq_prod_cnt + 1) & priv->adminq_mask) == + (tail & priv->adminq_mask)) { + // This should never happen. We just flushed the + // command queue so there should be enough space. + return -ENOMEM; + } + } + + cmd = &priv->adminq[priv->adminq_prod_cnt & priv->adminq_mask]; + priv->adminq_prod_cnt++; + + memcpy(cmd, cmd_orig, sizeof(*cmd_orig)); + opcode = be32_to_cpu(READ_ONCE(cmd->opcode)); + + switch (opcode) { + case GVE_ADMINQ_DESCRIBE_DEVICE: + priv->adminq_describe_device_cnt++; + break; + case GVE_ADMINQ_CONFIGURE_DEVICE_RESOURCES: + priv->adminq_cfg_device_resources_cnt++; + break; + case GVE_ADMINQ_REGISTER_PAGE_LIST: + priv->adminq_register_page_list_cnt++; + break; + case GVE_ADMINQ_UNREGISTER_PAGE_LIST: + priv->adminq_unregister_page_list_cnt++; + break; + case GVE_ADMINQ_CREATE_TX_QUEUE: + priv->adminq_create_tx_queue_cnt++; + break; + case GVE_ADMINQ_CREATE_RX_QUEUE: + priv->adminq_create_rx_queue_cnt++; + break; + case GVE_ADMINQ_DESTROY_TX_QUEUE: + priv->adminq_destroy_tx_queue_cnt++; + break; + case GVE_ADMINQ_DESTROY_RX_QUEUE: + priv->adminq_destroy_rx_queue_cnt++; + break; + case GVE_ADMINQ_DECONFIGURE_DEVICE_RESOURCES: + priv->adminq_dcfg_device_resources_cnt++; + break; + case GVE_ADMINQ_SET_DRIVER_PARAMETER: + priv->adminq_set_driver_parameter_cnt++; + break; + case GVE_ADMINQ_REPORT_STATS: + priv->adminq_report_stats_cnt++; + break; + case GVE_ADMINQ_REPORT_LINK_SPEED: + priv->adminq_report_link_speed_cnt++; + break; + case GVE_ADMINQ_GET_PTYPE_MAP: + priv->adminq_get_ptype_map_cnt++; + break; + case GVE_ADMINQ_VERIFY_DRIVER_COMPATIBILITY: + priv->adminq_verify_driver_compatibility_cnt++; + break; + default: + dev_err(&priv->pdev->dev, "unknown AQ command opcode %d\n", opcode); + } + + return 0; +} + +/* This function is not threadsafe - the caller is responsible for any + * necessary locks. + * The caller is also responsible for making sure there are no commands + * waiting to be executed. + */ +static int gve_adminq_execute_cmd(struct gve_priv *priv, + union gve_adminq_command *cmd_orig) +{ + u32 tail, head; + int err; + + tail = ioread32be(&priv->reg_bar0->adminq_event_counter); + head = priv->adminq_prod_cnt; + if (tail != head) + // This is not a valid path + return -EINVAL; + + err = gve_adminq_issue_cmd(priv, cmd_orig); + if (err) + return err; + + return gve_adminq_kick_and_wait(priv); +} + +/* The device specifies that the management vector can either be the first irq + * or the last irq. ntfy_blk_msix_base_idx indicates the first irq assigned to + * the ntfy blks. It if is 0 then the management vector is last, if it is 1 then + * the management vector is first. + * + * gve arranges the msix vectors so that the management vector is last. + */ +#define GVE_NTFY_BLK_BASE_MSIX_IDX 0 +int gve_adminq_configure_device_resources(struct gve_priv *priv, + dma_addr_t counter_array_bus_addr, + u32 num_counters, + dma_addr_t db_array_bus_addr, + u32 num_ntfy_blks) +{ + union gve_adminq_command cmd; + + memset(&cmd, 0, sizeof(cmd)); + cmd.opcode = cpu_to_be32(GVE_ADMINQ_CONFIGURE_DEVICE_RESOURCES); + cmd.configure_device_resources = + (struct gve_adminq_configure_device_resources) { + .counter_array = cpu_to_be64(counter_array_bus_addr), + .num_counters = cpu_to_be32(num_counters), + .irq_db_addr = cpu_to_be64(db_array_bus_addr), + .num_irq_dbs = cpu_to_be32(num_ntfy_blks), + .irq_db_stride = cpu_to_be32(sizeof(*priv->irq_db_indices)), + .ntfy_blk_msix_base_idx = + cpu_to_be32(GVE_NTFY_BLK_BASE_MSIX_IDX), + .queue_format = priv->queue_format, + }; + + return gve_adminq_execute_cmd(priv, &cmd); +} + +int gve_adminq_deconfigure_device_resources(struct gve_priv *priv) +{ + union gve_adminq_command cmd; + + memset(&cmd, 0, sizeof(cmd)); + cmd.opcode = cpu_to_be32(GVE_ADMINQ_DECONFIGURE_DEVICE_RESOURCES); + + return gve_adminq_execute_cmd(priv, &cmd); +} + +static int gve_adminq_create_tx_queue(struct gve_priv *priv, u32 queue_index) +{ + struct gve_tx_ring *tx = &priv->tx[queue_index]; + union gve_adminq_command cmd; + + memset(&cmd, 0, sizeof(cmd)); + cmd.opcode = cpu_to_be32(GVE_ADMINQ_CREATE_TX_QUEUE); + cmd.create_tx_queue = (struct gve_adminq_create_tx_queue) { + .queue_id = cpu_to_be32(queue_index), + .queue_resources_addr = + cpu_to_be64(tx->q_resources_bus), + .tx_ring_addr = cpu_to_be64(tx->bus), + .ntfy_id = cpu_to_be32(tx->ntfy_id), + }; + + if (gve_is_gqi(priv)) { + u32 qpl_id = priv->queue_format == GVE_GQI_RDA_FORMAT ? + GVE_RAW_ADDRESSING_QPL_ID : tx->tx_fifo.qpl->id; + + cmd.create_tx_queue.queue_page_list_id = cpu_to_be32(qpl_id); + } else { + u16 comp_ring_size; + u32 qpl_id = 0; + + if (priv->queue_format == GVE_DQO_RDA_FORMAT) { + qpl_id = GVE_RAW_ADDRESSING_QPL_ID; + comp_ring_size = + priv->options_dqo_rda.tx_comp_ring_entries; + } else { + qpl_id = tx->dqo.qpl->id; + comp_ring_size = priv->tx_desc_cnt; + } + cmd.create_tx_queue.queue_page_list_id = cpu_to_be32(qpl_id); + cmd.create_tx_queue.tx_ring_size = + cpu_to_be16(priv->tx_desc_cnt); + cmd.create_tx_queue.tx_comp_ring_addr = + cpu_to_be64(tx->complq_bus_dqo); + cmd.create_tx_queue.tx_comp_ring_size = + cpu_to_be16(comp_ring_size); + } + + return gve_adminq_issue_cmd(priv, &cmd); +} + +int gve_adminq_create_tx_queues(struct gve_priv *priv, u32 start_id, u32 num_queues) +{ + int err; + int i; + + for (i = start_id; i < start_id + num_queues; i++) { + err = gve_adminq_create_tx_queue(priv, i); + if (err) + return err; + } + + return gve_adminq_kick_and_wait(priv); +} + +static int gve_adminq_create_rx_queue(struct gve_priv *priv, u32 queue_index) +{ + struct gve_rx_ring *rx = &priv->rx[queue_index]; + union gve_adminq_command cmd; + + memset(&cmd, 0, sizeof(cmd)); + cmd.opcode = cpu_to_be32(GVE_ADMINQ_CREATE_RX_QUEUE); + cmd.create_rx_queue = (struct gve_adminq_create_rx_queue) { + .queue_id = cpu_to_be32(queue_index), + .ntfy_id = cpu_to_be32(rx->ntfy_id), + .queue_resources_addr = cpu_to_be64(rx->q_resources_bus), + }; + + if (gve_is_gqi(priv)) { + u32 qpl_id = priv->queue_format == GVE_GQI_RDA_FORMAT ? + GVE_RAW_ADDRESSING_QPL_ID : rx->data.qpl->id; + + cmd.create_rx_queue.rx_desc_ring_addr = + cpu_to_be64(rx->desc.bus), + cmd.create_rx_queue.rx_data_ring_addr = + cpu_to_be64(rx->data.data_bus), + cmd.create_rx_queue.index = cpu_to_be32(queue_index); + cmd.create_rx_queue.queue_page_list_id = cpu_to_be32(qpl_id); + cmd.create_rx_queue.packet_buffer_size = cpu_to_be16(rx->packet_buffer_size); + } else { + u16 rx_buff_ring_entries; + u32 qpl_id = 0; + + if (priv->queue_format == GVE_DQO_RDA_FORMAT) { + qpl_id = GVE_RAW_ADDRESSING_QPL_ID; + rx_buff_ring_entries = + priv->options_dqo_rda.rx_buff_ring_entries; + } else { + qpl_id = rx->dqo.qpl->id; + rx_buff_ring_entries = priv->rx_desc_cnt; + } + cmd.create_rx_queue.queue_page_list_id = cpu_to_be32(qpl_id); + cmd.create_rx_queue.rx_ring_size = + cpu_to_be16(priv->rx_desc_cnt); + cmd.create_rx_queue.rx_desc_ring_addr = + cpu_to_be64(rx->dqo.complq.bus); + cmd.create_rx_queue.rx_data_ring_addr = + cpu_to_be64(rx->dqo.bufq.bus); + cmd.create_rx_queue.packet_buffer_size = + cpu_to_be16(priv->data_buffer_size_dqo); + cmd.create_rx_queue.rx_buff_ring_size = + cpu_to_be16(rx_buff_ring_entries); + cmd.create_rx_queue.enable_rsc = + !!(priv->dev->features & NETIF_F_LRO); + } + + return gve_adminq_issue_cmd(priv, &cmd); +} + +int gve_adminq_create_rx_queues(struct gve_priv *priv, u32 num_queues) +{ + int err; + int i; + + for (i = 0; i < num_queues; i++) { + err = gve_adminq_create_rx_queue(priv, i); + if (err) + return err; + } + + return gve_adminq_kick_and_wait(priv); +} + +static int gve_adminq_destroy_tx_queue(struct gve_priv *priv, u32 queue_index) +{ + union gve_adminq_command cmd; + int err; + + memset(&cmd, 0, sizeof(cmd)); + cmd.opcode = cpu_to_be32(GVE_ADMINQ_DESTROY_TX_QUEUE); + cmd.destroy_tx_queue = (struct gve_adminq_destroy_tx_queue) { + .queue_id = cpu_to_be32(queue_index), + }; + + err = gve_adminq_issue_cmd(priv, &cmd); + if (err) + return err; + + return 0; +} + +int gve_adminq_destroy_tx_queues(struct gve_priv *priv, u32 start_id, u32 num_queues) +{ + int err; + int i; + + for (i = start_id; i < start_id + num_queues; i++) { + err = gve_adminq_destroy_tx_queue(priv, i); + if (err) + return err; + } + + return gve_adminq_kick_and_wait(priv); +} + +static int gve_adminq_destroy_rx_queue(struct gve_priv *priv, u32 queue_index) +{ + union gve_adminq_command cmd; + int err; + + memset(&cmd, 0, sizeof(cmd)); + cmd.opcode = cpu_to_be32(GVE_ADMINQ_DESTROY_RX_QUEUE); + cmd.destroy_rx_queue = (struct gve_adminq_destroy_rx_queue) { + .queue_id = cpu_to_be32(queue_index), + }; + + err = gve_adminq_issue_cmd(priv, &cmd); + if (err) + return err; + + return 0; +} + +int gve_adminq_destroy_rx_queues(struct gve_priv *priv, u32 num_queues) +{ + int err; + int i; + + for (i = 0; i < num_queues; i++) { + err = gve_adminq_destroy_rx_queue(priv, i); + if (err) + return err; + } + + return gve_adminq_kick_and_wait(priv); +} + +static int gve_set_desc_cnt(struct gve_priv *priv, + struct gve_device_descriptor *descriptor) +{ + priv->tx_desc_cnt = be16_to_cpu(descriptor->tx_queue_entries); + if (priv->tx_desc_cnt * sizeof(priv->tx->desc[0]) < PAGE_SIZE) { + dev_err(&priv->pdev->dev, "Tx desc count %d too low\n", + priv->tx_desc_cnt); + return -EINVAL; + } + priv->rx_desc_cnt = be16_to_cpu(descriptor->rx_queue_entries); + if (priv->rx_desc_cnt * sizeof(priv->rx->desc.desc_ring[0]) + < PAGE_SIZE) { + dev_err(&priv->pdev->dev, "Rx desc count %d too low\n", + priv->rx_desc_cnt); + return -EINVAL; + } + return 0; +} + +static int +gve_set_desc_cnt_dqo(struct gve_priv *priv, + const struct gve_device_descriptor *descriptor, + const struct gve_device_option_dqo_rda *dev_op_dqo_rda) +{ + priv->tx_desc_cnt = be16_to_cpu(descriptor->tx_queue_entries); + priv->rx_desc_cnt = be16_to_cpu(descriptor->rx_queue_entries); + + if (priv->queue_format == GVE_DQO_QPL_FORMAT) + return 0; + + priv->options_dqo_rda.tx_comp_ring_entries = + be16_to_cpu(dev_op_dqo_rda->tx_comp_ring_entries); + priv->options_dqo_rda.rx_buff_ring_entries = + be16_to_cpu(dev_op_dqo_rda->rx_buff_ring_entries); + + return 0; +} + +static void gve_enable_supported_features(struct gve_priv *priv, + u32 supported_features_mask, + const struct gve_device_option_jumbo_frames + *dev_op_jumbo_frames, + const struct gve_device_option_dqo_qpl + *dev_op_dqo_qpl) +{ + /* Before control reaches this point, the page-size-capped max MTU from + * the gve_device_descriptor field has already been stored in + * priv->dev->max_mtu. We overwrite it with the true max MTU below. + */ + if (dev_op_jumbo_frames && + (supported_features_mask & GVE_SUP_JUMBO_FRAMES_MASK)) { + dev_info(&priv->pdev->dev, + "JUMBO FRAMES device option enabled.\n"); + priv->dev->max_mtu = be16_to_cpu(dev_op_jumbo_frames->max_mtu); + } + + /* Override pages for qpl for DQO-QPL */ + if (dev_op_dqo_qpl) { + priv->tx_pages_per_qpl = + be16_to_cpu(dev_op_dqo_qpl->tx_pages_per_qpl); + priv->rx_pages_per_qpl = + be16_to_cpu(dev_op_dqo_qpl->rx_pages_per_qpl); + if (priv->tx_pages_per_qpl == 0) + priv->tx_pages_per_qpl = DQO_QPL_DEFAULT_TX_PAGES; + if (priv->rx_pages_per_qpl == 0) + priv->rx_pages_per_qpl = DQO_QPL_DEFAULT_RX_PAGES; + } +} + +int gve_adminq_describe_device(struct gve_priv *priv) +{ + struct gve_device_option_jumbo_frames *dev_op_jumbo_frames = NULL; + struct gve_device_option_gqi_rda *dev_op_gqi_rda = NULL; + struct gve_device_option_gqi_qpl *dev_op_gqi_qpl = NULL; + struct gve_device_option_dqo_rda *dev_op_dqo_rda = NULL; + struct gve_device_option_dqo_qpl *dev_op_dqo_qpl = NULL; + struct gve_device_descriptor *descriptor; + u32 supported_features_mask = 0; + union gve_adminq_command cmd; + dma_addr_t descriptor_bus; + int err = 0; + u8 *mac; + u16 mtu; + + memset(&cmd, 0, sizeof(cmd)); + descriptor = dma_alloc_coherent(&priv->pdev->dev, PAGE_SIZE, + &descriptor_bus, GFP_KERNEL); + if (!descriptor) + return -ENOMEM; + cmd.opcode = cpu_to_be32(GVE_ADMINQ_DESCRIBE_DEVICE); + cmd.describe_device.device_descriptor_addr = + cpu_to_be64(descriptor_bus); + cmd.describe_device.device_descriptor_version = + cpu_to_be32(GVE_ADMINQ_DEVICE_DESCRIPTOR_VERSION); + cmd.describe_device.available_length = cpu_to_be32(PAGE_SIZE); + + err = gve_adminq_execute_cmd(priv, &cmd); + if (err) + goto free_device_descriptor; + + err = gve_process_device_options(priv, descriptor, &dev_op_gqi_rda, + &dev_op_gqi_qpl, &dev_op_dqo_rda, + &dev_op_jumbo_frames, + &dev_op_dqo_qpl); + if (err) + goto free_device_descriptor; + + /* If the GQI_RAW_ADDRESSING option is not enabled and the queue format + * is not set to GqiRda, choose the queue format in a priority order: + * DqoRda, DqoQpl, GqiRda, GqiQpl. Use GqiQpl as default. + */ + if (dev_op_dqo_rda) { + priv->queue_format = GVE_DQO_RDA_FORMAT; + dev_info(&priv->pdev->dev, + "Driver is running with DQO RDA queue format.\n"); + supported_features_mask = + be32_to_cpu(dev_op_dqo_rda->supported_features_mask); + } else if (dev_op_dqo_qpl) { + priv->queue_format = GVE_DQO_QPL_FORMAT; + supported_features_mask = + be32_to_cpu(dev_op_dqo_qpl->supported_features_mask); + } else if (dev_op_gqi_rda) { + priv->queue_format = GVE_GQI_RDA_FORMAT; + dev_info(&priv->pdev->dev, + "Driver is running with GQI RDA queue format.\n"); + supported_features_mask = + be32_to_cpu(dev_op_gqi_rda->supported_features_mask); + } else if (priv->queue_format == GVE_GQI_RDA_FORMAT) { + dev_info(&priv->pdev->dev, + "Driver is running with GQI RDA queue format.\n"); + } else { + priv->queue_format = GVE_GQI_QPL_FORMAT; + if (dev_op_gqi_qpl) + supported_features_mask = + be32_to_cpu(dev_op_gqi_qpl->supported_features_mask); + dev_info(&priv->pdev->dev, + "Driver is running with GQI QPL queue format.\n"); + } + if (gve_is_gqi(priv)) { + err = gve_set_desc_cnt(priv, descriptor); + } else { + /* DQO supports LRO. */ + priv->dev->hw_features |= NETIF_F_LRO; + err = gve_set_desc_cnt_dqo(priv, descriptor, dev_op_dqo_rda); + } + if (err) + goto free_device_descriptor; + + priv->max_registered_pages = + be64_to_cpu(descriptor->max_registered_pages); + mtu = be16_to_cpu(descriptor->mtu); + if (mtu < ETH_MIN_MTU) { + dev_err(&priv->pdev->dev, "MTU %d below minimum MTU\n", mtu); + err = -EINVAL; + goto free_device_descriptor; + } + priv->dev->max_mtu = mtu; + priv->num_event_counters = be16_to_cpu(descriptor->counters); + eth_hw_addr_set(priv->dev, descriptor->mac); + mac = descriptor->mac; + dev_info(&priv->pdev->dev, "MAC addr: %pM\n", mac); + priv->tx_pages_per_qpl = be16_to_cpu(descriptor->tx_pages_per_qpl); + priv->rx_data_slot_cnt = be16_to_cpu(descriptor->rx_pages_per_qpl); + + if (gve_is_gqi(priv) && priv->rx_data_slot_cnt < priv->rx_desc_cnt) { + dev_err(&priv->pdev->dev, "rx_data_slot_cnt cannot be smaller than rx_desc_cnt, setting rx_desc_cnt down to %d.\n", + priv->rx_data_slot_cnt); + priv->rx_desc_cnt = priv->rx_data_slot_cnt; + } + priv->default_num_queues = be16_to_cpu(descriptor->default_num_queues); + + gve_enable_supported_features(priv, supported_features_mask, + dev_op_jumbo_frames, dev_op_dqo_qpl); + +free_device_descriptor: + dma_free_coherent(&priv->pdev->dev, PAGE_SIZE, descriptor, + descriptor_bus); + return err; +} + +int gve_adminq_register_page_list(struct gve_priv *priv, + struct gve_queue_page_list *qpl) +{ + struct device *hdev = &priv->pdev->dev; + u32 num_entries = qpl->num_entries; + u32 size = num_entries * sizeof(qpl->page_buses[0]); + union gve_adminq_command cmd; + dma_addr_t page_list_bus; + __be64 *page_list; + int err; + int i; + + memset(&cmd, 0, sizeof(cmd)); + page_list = dma_alloc_coherent(hdev, size, &page_list_bus, GFP_KERNEL); + if (!page_list) + return -ENOMEM; + + for (i = 0; i < num_entries; i++) + page_list[i] = cpu_to_be64(qpl->page_buses[i]); + + cmd.opcode = cpu_to_be32(GVE_ADMINQ_REGISTER_PAGE_LIST); + cmd.reg_page_list = (struct gve_adminq_register_page_list) { + .page_list_id = cpu_to_be32(qpl->id), + .num_pages = cpu_to_be32(num_entries), + .page_address_list_addr = cpu_to_be64(page_list_bus), + }; + + err = gve_adminq_execute_cmd(priv, &cmd); + dma_free_coherent(hdev, size, page_list, page_list_bus); + return err; +} + +int gve_adminq_unregister_page_list(struct gve_priv *priv, u32 page_list_id) +{ + union gve_adminq_command cmd; + + memset(&cmd, 0, sizeof(cmd)); + cmd.opcode = cpu_to_be32(GVE_ADMINQ_UNREGISTER_PAGE_LIST); + cmd.unreg_page_list = (struct gve_adminq_unregister_page_list) { + .page_list_id = cpu_to_be32(page_list_id), + }; + + return gve_adminq_execute_cmd(priv, &cmd); +} + +int gve_adminq_set_mtu(struct gve_priv *priv, u64 mtu) +{ + union gve_adminq_command cmd; + + memset(&cmd, 0, sizeof(cmd)); + cmd.opcode = cpu_to_be32(GVE_ADMINQ_SET_DRIVER_PARAMETER); + cmd.set_driver_param = (struct gve_adminq_set_driver_parameter) { + .parameter_type = cpu_to_be32(GVE_SET_PARAM_MTU), + .parameter_value = cpu_to_be64(mtu), + }; + + return gve_adminq_execute_cmd(priv, &cmd); +} + +int gve_adminq_report_stats(struct gve_priv *priv, u64 stats_report_len, + dma_addr_t stats_report_addr, u64 interval) +{ + union gve_adminq_command cmd; + + memset(&cmd, 0, sizeof(cmd)); + cmd.opcode = cpu_to_be32(GVE_ADMINQ_REPORT_STATS); + cmd.report_stats = (struct gve_adminq_report_stats) { + .stats_report_len = cpu_to_be64(stats_report_len), + .stats_report_addr = cpu_to_be64(stats_report_addr), + .interval = cpu_to_be64(interval), + }; + + return gve_adminq_execute_cmd(priv, &cmd); +} + +int gve_adminq_verify_driver_compatibility(struct gve_priv *priv, + u64 driver_info_len, + dma_addr_t driver_info_addr) +{ + union gve_adminq_command cmd; + + memset(&cmd, 0, sizeof(cmd)); + cmd.opcode = cpu_to_be32(GVE_ADMINQ_VERIFY_DRIVER_COMPATIBILITY); + cmd.verify_driver_compatibility = (struct gve_adminq_verify_driver_compatibility) { + .driver_info_len = cpu_to_be64(driver_info_len), + .driver_info_addr = cpu_to_be64(driver_info_addr), + }; + + return gve_adminq_execute_cmd(priv, &cmd); +} + +int gve_adminq_report_link_speed(struct gve_priv *priv) +{ + union gve_adminq_command gvnic_cmd; + dma_addr_t link_speed_region_bus; + __be64 *link_speed_region; + int err; + + link_speed_region = + dma_alloc_coherent(&priv->pdev->dev, sizeof(*link_speed_region), + &link_speed_region_bus, GFP_KERNEL); + + if (!link_speed_region) + return -ENOMEM; + + memset(&gvnic_cmd, 0, sizeof(gvnic_cmd)); + gvnic_cmd.opcode = cpu_to_be32(GVE_ADMINQ_REPORT_LINK_SPEED); + gvnic_cmd.report_link_speed.link_speed_address = + cpu_to_be64(link_speed_region_bus); + + err = gve_adminq_execute_cmd(priv, &gvnic_cmd); + + priv->link_speed = be64_to_cpu(*link_speed_region); + dma_free_coherent(&priv->pdev->dev, sizeof(*link_speed_region), link_speed_region, + link_speed_region_bus); + return err; +} + +int gve_adminq_get_ptype_map_dqo(struct gve_priv *priv, + struct gve_ptype_lut *ptype_lut) +{ + struct gve_ptype_map *ptype_map; + union gve_adminq_command cmd; + dma_addr_t ptype_map_bus; + int err = 0; + int i; + + memset(&cmd, 0, sizeof(cmd)); + ptype_map = dma_alloc_coherent(&priv->pdev->dev, sizeof(*ptype_map), + &ptype_map_bus, GFP_KERNEL); + if (!ptype_map) + return -ENOMEM; + + cmd.opcode = cpu_to_be32(GVE_ADMINQ_GET_PTYPE_MAP); + cmd.get_ptype_map = (struct gve_adminq_get_ptype_map) { + .ptype_map_len = cpu_to_be64(sizeof(*ptype_map)), + .ptype_map_addr = cpu_to_be64(ptype_map_bus), + }; + + err = gve_adminq_execute_cmd(priv, &cmd); + if (err) + goto err; + + /* Populate ptype_lut. */ + for (i = 0; i < GVE_NUM_PTYPES; i++) { + ptype_lut->ptypes[i].l3_type = + ptype_map->ptypes[i].l3_type; + ptype_lut->ptypes[i].l4_type = + ptype_map->ptypes[i].l4_type; + } +err: + dma_free_coherent(&priv->pdev->dev, sizeof(*ptype_map), ptype_map, + ptype_map_bus); + return err; +} diff --git a/drivers/net/ethernet/google/gve/gve_adminq.h b/drivers/net/ethernet/google/gve/gve_adminq.h new file mode 100644 index 0000000000..38a22279e8 --- /dev/null +++ b/drivers/net/ethernet/google/gve/gve_adminq.h @@ -0,0 +1,442 @@ +/* SPDX-License-Identifier: (GPL-2.0 OR MIT) + * Google virtual Ethernet (gve) driver + * + * Copyright (C) 2015-2021 Google, Inc. + */ + +#ifndef _GVE_ADMINQ_H +#define _GVE_ADMINQ_H + +#include <linux/build_bug.h> + +/* Admin queue opcodes */ +enum gve_adminq_opcodes { + GVE_ADMINQ_DESCRIBE_DEVICE = 0x1, + GVE_ADMINQ_CONFIGURE_DEVICE_RESOURCES = 0x2, + GVE_ADMINQ_REGISTER_PAGE_LIST = 0x3, + GVE_ADMINQ_UNREGISTER_PAGE_LIST = 0x4, + GVE_ADMINQ_CREATE_TX_QUEUE = 0x5, + GVE_ADMINQ_CREATE_RX_QUEUE = 0x6, + GVE_ADMINQ_DESTROY_TX_QUEUE = 0x7, + GVE_ADMINQ_DESTROY_RX_QUEUE = 0x8, + GVE_ADMINQ_DECONFIGURE_DEVICE_RESOURCES = 0x9, + GVE_ADMINQ_SET_DRIVER_PARAMETER = 0xB, + GVE_ADMINQ_REPORT_STATS = 0xC, + GVE_ADMINQ_REPORT_LINK_SPEED = 0xD, + GVE_ADMINQ_GET_PTYPE_MAP = 0xE, + GVE_ADMINQ_VERIFY_DRIVER_COMPATIBILITY = 0xF, +}; + +/* Admin queue status codes */ +enum gve_adminq_statuses { + GVE_ADMINQ_COMMAND_UNSET = 0x0, + GVE_ADMINQ_COMMAND_PASSED = 0x1, + GVE_ADMINQ_COMMAND_ERROR_ABORTED = 0xFFFFFFF0, + GVE_ADMINQ_COMMAND_ERROR_ALREADY_EXISTS = 0xFFFFFFF1, + GVE_ADMINQ_COMMAND_ERROR_CANCELLED = 0xFFFFFFF2, + GVE_ADMINQ_COMMAND_ERROR_DATALOSS = 0xFFFFFFF3, + GVE_ADMINQ_COMMAND_ERROR_DEADLINE_EXCEEDED = 0xFFFFFFF4, + GVE_ADMINQ_COMMAND_ERROR_FAILED_PRECONDITION = 0xFFFFFFF5, + GVE_ADMINQ_COMMAND_ERROR_INTERNAL_ERROR = 0xFFFFFFF6, + GVE_ADMINQ_COMMAND_ERROR_INVALID_ARGUMENT = 0xFFFFFFF7, + GVE_ADMINQ_COMMAND_ERROR_NOT_FOUND = 0xFFFFFFF8, + GVE_ADMINQ_COMMAND_ERROR_OUT_OF_RANGE = 0xFFFFFFF9, + GVE_ADMINQ_COMMAND_ERROR_PERMISSION_DENIED = 0xFFFFFFFA, + GVE_ADMINQ_COMMAND_ERROR_UNAUTHENTICATED = 0xFFFFFFFB, + GVE_ADMINQ_COMMAND_ERROR_RESOURCE_EXHAUSTED = 0xFFFFFFFC, + GVE_ADMINQ_COMMAND_ERROR_UNAVAILABLE = 0xFFFFFFFD, + GVE_ADMINQ_COMMAND_ERROR_UNIMPLEMENTED = 0xFFFFFFFE, + GVE_ADMINQ_COMMAND_ERROR_UNKNOWN_ERROR = 0xFFFFFFFF, +}; + +#define GVE_ADMINQ_DEVICE_DESCRIPTOR_VERSION 1 + +/* All AdminQ command structs should be naturally packed. The static_assert + * calls make sure this is the case at compile time. + */ + +struct gve_adminq_describe_device { + __be64 device_descriptor_addr; + __be32 device_descriptor_version; + __be32 available_length; +}; + +static_assert(sizeof(struct gve_adminq_describe_device) == 16); + +struct gve_device_descriptor { + __be64 max_registered_pages; + __be16 reserved1; + __be16 tx_queue_entries; + __be16 rx_queue_entries; + __be16 default_num_queues; + __be16 mtu; + __be16 counters; + __be16 tx_pages_per_qpl; + __be16 rx_pages_per_qpl; + u8 mac[ETH_ALEN]; + __be16 num_device_options; + __be16 total_length; + u8 reserved2[6]; +}; + +static_assert(sizeof(struct gve_device_descriptor) == 40); + +struct gve_device_option { + __be16 option_id; + __be16 option_length; + __be32 required_features_mask; +}; + +static_assert(sizeof(struct gve_device_option) == 8); + +struct gve_device_option_gqi_rda { + __be32 supported_features_mask; +}; + +static_assert(sizeof(struct gve_device_option_gqi_rda) == 4); + +struct gve_device_option_gqi_qpl { + __be32 supported_features_mask; +}; + +static_assert(sizeof(struct gve_device_option_gqi_qpl) == 4); + +struct gve_device_option_dqo_rda { + __be32 supported_features_mask; + __be16 tx_comp_ring_entries; + __be16 rx_buff_ring_entries; +}; + +static_assert(sizeof(struct gve_device_option_dqo_rda) == 8); + +struct gve_device_option_dqo_qpl { + __be32 supported_features_mask; + __be16 tx_pages_per_qpl; + __be16 rx_pages_per_qpl; +}; + +static_assert(sizeof(struct gve_device_option_dqo_qpl) == 8); + +struct gve_device_option_jumbo_frames { + __be32 supported_features_mask; + __be16 max_mtu; + u8 padding[2]; +}; + +static_assert(sizeof(struct gve_device_option_jumbo_frames) == 8); + +/* Terminology: + * + * RDA - Raw DMA Addressing - Buffers associated with SKBs are directly DMA + * mapped and read/updated by the device. + * + * QPL - Queue Page Lists - Driver uses bounce buffers which are DMA mapped with + * the device for read/write and data is copied from/to SKBs. + */ +enum gve_dev_opt_id { + GVE_DEV_OPT_ID_GQI_RAW_ADDRESSING = 0x1, + GVE_DEV_OPT_ID_GQI_RDA = 0x2, + GVE_DEV_OPT_ID_GQI_QPL = 0x3, + GVE_DEV_OPT_ID_DQO_RDA = 0x4, + GVE_DEV_OPT_ID_DQO_QPL = 0x7, + GVE_DEV_OPT_ID_JUMBO_FRAMES = 0x8, +}; + +enum gve_dev_opt_req_feat_mask { + GVE_DEV_OPT_REQ_FEAT_MASK_GQI_RAW_ADDRESSING = 0x0, + GVE_DEV_OPT_REQ_FEAT_MASK_GQI_RDA = 0x0, + GVE_DEV_OPT_REQ_FEAT_MASK_GQI_QPL = 0x0, + GVE_DEV_OPT_REQ_FEAT_MASK_DQO_RDA = 0x0, + GVE_DEV_OPT_REQ_FEAT_MASK_JUMBO_FRAMES = 0x0, + GVE_DEV_OPT_REQ_FEAT_MASK_DQO_QPL = 0x0, +}; + +enum gve_sup_feature_mask { + GVE_SUP_JUMBO_FRAMES_MASK = 1 << 2, +}; + +#define GVE_DEV_OPT_LEN_GQI_RAW_ADDRESSING 0x0 + +#define GVE_VERSION_STR_LEN 128 + +enum gve_driver_capbility { + gve_driver_capability_gqi_qpl = 0, + gve_driver_capability_gqi_rda = 1, + gve_driver_capability_dqo_qpl = 2, /* reserved for future use */ + gve_driver_capability_dqo_rda = 3, + gve_driver_capability_alt_miss_compl = 4, +}; + +#define GVE_CAP1(a) BIT((int)a) +#define GVE_CAP2(a) BIT(((int)a) - 64) +#define GVE_CAP3(a) BIT(((int)a) - 128) +#define GVE_CAP4(a) BIT(((int)a) - 192) + +#define GVE_DRIVER_CAPABILITY_FLAGS1 \ + (GVE_CAP1(gve_driver_capability_gqi_qpl) | \ + GVE_CAP1(gve_driver_capability_gqi_rda) | \ + GVE_CAP1(gve_driver_capability_dqo_rda) | \ + GVE_CAP1(gve_driver_capability_alt_miss_compl)) + +#define GVE_DRIVER_CAPABILITY_FLAGS2 0x0 +#define GVE_DRIVER_CAPABILITY_FLAGS3 0x0 +#define GVE_DRIVER_CAPABILITY_FLAGS4 0x0 + +struct gve_driver_info { + u8 os_type; /* 0x01 = Linux */ + u8 driver_major; + u8 driver_minor; + u8 driver_sub; + __be32 os_version_major; + __be32 os_version_minor; + __be32 os_version_sub; + __be64 driver_capability_flags[4]; + u8 os_version_str1[GVE_VERSION_STR_LEN]; + u8 os_version_str2[GVE_VERSION_STR_LEN]; +}; + +struct gve_adminq_verify_driver_compatibility { + __be64 driver_info_len; + __be64 driver_info_addr; +}; + +static_assert(sizeof(struct gve_adminq_verify_driver_compatibility) == 16); + +struct gve_adminq_configure_device_resources { + __be64 counter_array; + __be64 irq_db_addr; + __be32 num_counters; + __be32 num_irq_dbs; + __be32 irq_db_stride; + __be32 ntfy_blk_msix_base_idx; + u8 queue_format; + u8 padding[7]; +}; + +static_assert(sizeof(struct gve_adminq_configure_device_resources) == 40); + +struct gve_adminq_register_page_list { + __be32 page_list_id; + __be32 num_pages; + __be64 page_address_list_addr; +}; + +static_assert(sizeof(struct gve_adminq_register_page_list) == 16); + +struct gve_adminq_unregister_page_list { + __be32 page_list_id; +}; + +static_assert(sizeof(struct gve_adminq_unregister_page_list) == 4); + +#define GVE_RAW_ADDRESSING_QPL_ID 0xFFFFFFFF + +struct gve_adminq_create_tx_queue { + __be32 queue_id; + __be32 reserved; + __be64 queue_resources_addr; + __be64 tx_ring_addr; + __be32 queue_page_list_id; + __be32 ntfy_id; + __be64 tx_comp_ring_addr; + __be16 tx_ring_size; + __be16 tx_comp_ring_size; + u8 padding[4]; +}; + +static_assert(sizeof(struct gve_adminq_create_tx_queue) == 48); + +struct gve_adminq_create_rx_queue { + __be32 queue_id; + __be32 index; + __be32 reserved; + __be32 ntfy_id; + __be64 queue_resources_addr; + __be64 rx_desc_ring_addr; + __be64 rx_data_ring_addr; + __be32 queue_page_list_id; + __be16 rx_ring_size; + __be16 packet_buffer_size; + __be16 rx_buff_ring_size; + u8 enable_rsc; + u8 padding[5]; +}; + +static_assert(sizeof(struct gve_adminq_create_rx_queue) == 56); + +/* Queue resources that are shared with the device */ +struct gve_queue_resources { + union { + struct { + __be32 db_index; /* Device -> Guest */ + __be32 counter_index; /* Device -> Guest */ + }; + u8 reserved[64]; + }; +}; + +static_assert(sizeof(struct gve_queue_resources) == 64); + +struct gve_adminq_destroy_tx_queue { + __be32 queue_id; +}; + +static_assert(sizeof(struct gve_adminq_destroy_tx_queue) == 4); + +struct gve_adminq_destroy_rx_queue { + __be32 queue_id; +}; + +static_assert(sizeof(struct gve_adminq_destroy_rx_queue) == 4); + +/* GVE Set Driver Parameter Types */ +enum gve_set_driver_param_types { + GVE_SET_PARAM_MTU = 0x1, +}; + +struct gve_adminq_set_driver_parameter { + __be32 parameter_type; + u8 reserved[4]; + __be64 parameter_value; +}; + +static_assert(sizeof(struct gve_adminq_set_driver_parameter) == 16); + +struct gve_adminq_report_stats { + __be64 stats_report_len; + __be64 stats_report_addr; + __be64 interval; +}; + +static_assert(sizeof(struct gve_adminq_report_stats) == 24); + +struct gve_adminq_report_link_speed { + __be64 link_speed_address; +}; + +static_assert(sizeof(struct gve_adminq_report_link_speed) == 8); + +struct stats { + __be32 stat_name; + __be32 queue_id; + __be64 value; +}; + +static_assert(sizeof(struct stats) == 16); + +struct gve_stats_report { + __be64 written_count; + struct stats stats[]; +}; + +static_assert(sizeof(struct gve_stats_report) == 8); + +enum gve_stat_names { + // stats from gve + TX_WAKE_CNT = 1, + TX_STOP_CNT = 2, + TX_FRAMES_SENT = 3, + TX_BYTES_SENT = 4, + TX_LAST_COMPLETION_PROCESSED = 5, + RX_NEXT_EXPECTED_SEQUENCE = 6, + RX_BUFFERS_POSTED = 7, + TX_TIMEOUT_CNT = 8, + // stats from NIC + RX_QUEUE_DROP_CNT = 65, + RX_NO_BUFFERS_POSTED = 66, + RX_DROPS_PACKET_OVER_MRU = 67, + RX_DROPS_INVALID_CHECKSUM = 68, +}; + +enum gve_l3_type { + /* Must be zero so zero initialized LUT is unknown. */ + GVE_L3_TYPE_UNKNOWN = 0, + GVE_L3_TYPE_OTHER, + GVE_L3_TYPE_IPV4, + GVE_L3_TYPE_IPV6, +}; + +enum gve_l4_type { + /* Must be zero so zero initialized LUT is unknown. */ + GVE_L4_TYPE_UNKNOWN = 0, + GVE_L4_TYPE_OTHER, + GVE_L4_TYPE_TCP, + GVE_L4_TYPE_UDP, + GVE_L4_TYPE_ICMP, + GVE_L4_TYPE_SCTP, +}; + +/* These are control path types for PTYPE which are the same as the data path + * types. + */ +struct gve_ptype_entry { + u8 l3_type; + u8 l4_type; +}; + +struct gve_ptype_map { + struct gve_ptype_entry ptypes[1 << 10]; /* PTYPES are always 10 bits. */ +}; + +struct gve_adminq_get_ptype_map { + __be64 ptype_map_len; + __be64 ptype_map_addr; +}; + +union gve_adminq_command { + struct { + __be32 opcode; + __be32 status; + union { + struct gve_adminq_configure_device_resources + configure_device_resources; + struct gve_adminq_create_tx_queue create_tx_queue; + struct gve_adminq_create_rx_queue create_rx_queue; + struct gve_adminq_destroy_tx_queue destroy_tx_queue; + struct gve_adminq_destroy_rx_queue destroy_rx_queue; + struct gve_adminq_describe_device describe_device; + struct gve_adminq_register_page_list reg_page_list; + struct gve_adminq_unregister_page_list unreg_page_list; + struct gve_adminq_set_driver_parameter set_driver_param; + struct gve_adminq_report_stats report_stats; + struct gve_adminq_report_link_speed report_link_speed; + struct gve_adminq_get_ptype_map get_ptype_map; + struct gve_adminq_verify_driver_compatibility + verify_driver_compatibility; + }; + }; + u8 reserved[64]; +}; + +static_assert(sizeof(union gve_adminq_command) == 64); + +int gve_adminq_alloc(struct device *dev, struct gve_priv *priv); +void gve_adminq_free(struct device *dev, struct gve_priv *priv); +void gve_adminq_release(struct gve_priv *priv); +int gve_adminq_describe_device(struct gve_priv *priv); +int gve_adminq_configure_device_resources(struct gve_priv *priv, + dma_addr_t counter_array_bus_addr, + u32 num_counters, + dma_addr_t db_array_bus_addr, + u32 num_ntfy_blks); +int gve_adminq_deconfigure_device_resources(struct gve_priv *priv); +int gve_adminq_create_tx_queues(struct gve_priv *priv, u32 start_id, u32 num_queues); +int gve_adminq_destroy_tx_queues(struct gve_priv *priv, u32 start_id, u32 num_queues); +int gve_adminq_create_rx_queues(struct gve_priv *priv, u32 num_queues); +int gve_adminq_destroy_rx_queues(struct gve_priv *priv, u32 queue_id); +int gve_adminq_register_page_list(struct gve_priv *priv, + struct gve_queue_page_list *qpl); +int gve_adminq_unregister_page_list(struct gve_priv *priv, u32 page_list_id); +int gve_adminq_set_mtu(struct gve_priv *priv, u64 mtu); +int gve_adminq_report_stats(struct gve_priv *priv, u64 stats_report_len, + dma_addr_t stats_report_addr, u64 interval); +int gve_adminq_verify_driver_compatibility(struct gve_priv *priv, + u64 driver_info_len, + dma_addr_t driver_info_addr); +int gve_adminq_report_link_speed(struct gve_priv *priv); + +struct gve_ptype_lut; +int gve_adminq_get_ptype_map_dqo(struct gve_priv *priv, + struct gve_ptype_lut *ptype_lut); + +#endif /* _GVE_ADMINQ_H */ diff --git a/drivers/net/ethernet/google/gve/gve_desc.h b/drivers/net/ethernet/google/gve/gve_desc.h new file mode 100644 index 0000000000..c2874cdcf4 --- /dev/null +++ b/drivers/net/ethernet/google/gve/gve_desc.h @@ -0,0 +1,139 @@ +/* SPDX-License-Identifier: (GPL-2.0 OR MIT) + * Google virtual Ethernet (gve) driver + * + * Copyright (C) 2015-2019 Google, Inc. + */ + +/* GVE Transmit Descriptor formats */ + +#ifndef _GVE_DESC_H_ +#define _GVE_DESC_H_ + +#include <linux/build_bug.h> + +/* A note on seg_addrs + * + * Base addresses encoded in seg_addr are not assumed to be physical + * addresses. The ring format assumes these come from some linear address + * space. This could be physical memory, kernel virtual memory, user virtual + * memory. + * If raw dma addressing is not supported then gVNIC uses lists of registered + * pages. Each queue is assumed to be associated with a single such linear + * address space to ensure a consistent meaning for seg_addrs posted to its + * rings. + */ + +struct gve_tx_pkt_desc { + u8 type_flags; /* desc type is lower 4 bits, flags upper */ + u8 l4_csum_offset; /* relative offset of L4 csum word */ + u8 l4_hdr_offset; /* Offset of start of L4 headers in packet */ + u8 desc_cnt; /* Total descriptors for this packet */ + __be16 len; /* Total length of this packet (in bytes) */ + __be16 seg_len; /* Length of this descriptor's segment */ + __be64 seg_addr; /* Base address (see note) of this segment */ +} __packed; + +struct gve_tx_mtd_desc { + u8 type_flags; /* type is lower 4 bits, subtype upper */ + u8 path_state; /* state is lower 4 bits, hash type upper */ + __be16 reserved0; + __be32 path_hash; + __be64 reserved1; +} __packed; + +struct gve_tx_seg_desc { + u8 type_flags; /* type is lower 4 bits, flags upper */ + u8 l3_offset; /* TSO: 2 byte units to start of IPH */ + __be16 reserved; + __be16 mss; /* TSO MSS */ + __be16 seg_len; + __be64 seg_addr; +} __packed; + +/* GVE Transmit Descriptor Types */ +#define GVE_TXD_STD (0x0 << 4) /* Std with Host Address */ +#define GVE_TXD_TSO (0x1 << 4) /* TSO with Host Address */ +#define GVE_TXD_SEG (0x2 << 4) /* Seg with Host Address */ +#define GVE_TXD_MTD (0x3 << 4) /* Metadata */ + +/* GVE Transmit Descriptor Flags for Std Pkts */ +#define GVE_TXF_L4CSUM BIT(0) /* Need csum offload */ +#define GVE_TXF_TSTAMP BIT(2) /* Timestamp required */ + +/* GVE Transmit Descriptor Flags for TSO Segs */ +#define GVE_TXSF_IPV6 BIT(1) /* IPv6 TSO */ + +/* GVE Transmit Descriptor Options for MTD Segs */ +#define GVE_MTD_SUBTYPE_PATH 0 + +#define GVE_MTD_PATH_STATE_DEFAULT 0 +#define GVE_MTD_PATH_STATE_TIMEOUT 1 +#define GVE_MTD_PATH_STATE_CONGESTION 2 +#define GVE_MTD_PATH_STATE_RETRANSMIT 3 + +#define GVE_MTD_PATH_HASH_NONE (0x0 << 4) +#define GVE_MTD_PATH_HASH_L4 (0x1 << 4) + +/* GVE Receive Packet Descriptor */ +/* The start of an ethernet packet comes 2 bytes into the rx buffer. + * gVNIC adds this padding so that both the DMA and the L3/4 protocol header + * access is aligned. + */ +#define GVE_RX_PAD 2 + +struct gve_rx_desc { + u8 padding[48]; + __be32 rss_hash; /* Receive-side scaling hash (Toeplitz for gVNIC) */ + __be16 mss; + __be16 reserved; /* Reserved to zero */ + u8 hdr_len; /* Header length (L2-L4) including padding */ + u8 hdr_off; /* 64-byte-scaled offset into RX_DATA entry */ + __sum16 csum; /* 1's-complement partial checksum of L3+ bytes */ + __be16 len; /* Length of the received packet */ + __be16 flags_seq; /* Flags [15:3] and sequence number [2:0] (1-7) */ +} __packed; +static_assert(sizeof(struct gve_rx_desc) == 64); + +/* If the device supports raw dma addressing then the addr in data slot is + * the dma address of the buffer. + * If the device only supports registered segments then the addr is a byte + * offset into the registered segment (an ordered list of pages) where the + * buffer is. + */ +union gve_rx_data_slot { + __be64 qpl_offset; + __be64 addr; +}; + +/* GVE Receive Packet Descriptor Seq No */ +#define GVE_SEQNO(x) (be16_to_cpu(x) & 0x7) + +/* GVE Receive Packet Descriptor Flags */ +#define GVE_RXFLG(x) cpu_to_be16(1 << (3 + (x))) +#define GVE_RXF_FRAG GVE_RXFLG(3) /* IP Fragment */ +#define GVE_RXF_IPV4 GVE_RXFLG(4) /* IPv4 */ +#define GVE_RXF_IPV6 GVE_RXFLG(5) /* IPv6 */ +#define GVE_RXF_TCP GVE_RXFLG(6) /* TCP Packet */ +#define GVE_RXF_UDP GVE_RXFLG(7) /* UDP Packet */ +#define GVE_RXF_ERR GVE_RXFLG(8) /* Packet Error Detected */ +#define GVE_RXF_PKT_CONT GVE_RXFLG(10) /* Multi Fragment RX packet */ + +/* GVE IRQ */ +#define GVE_IRQ_ACK BIT(31) +#define GVE_IRQ_MASK BIT(30) +#define GVE_IRQ_EVENT BIT(29) + +static inline bool gve_needs_rss(__be16 flag) +{ + if (flag & GVE_RXF_FRAG) + return false; + if (flag & (GVE_RXF_IPV4 | GVE_RXF_IPV6)) + return true; + return false; +} + +static inline u8 gve_next_seqno(u8 seq) +{ + return (seq + 1) == 8 ? 1 : seq + 1; +} +#endif /* _GVE_DESC_H_ */ diff --git a/drivers/net/ethernet/google/gve/gve_desc_dqo.h b/drivers/net/ethernet/google/gve/gve_desc_dqo.h new file mode 100644 index 0000000000..f79cd05911 --- /dev/null +++ b/drivers/net/ethernet/google/gve/gve_desc_dqo.h @@ -0,0 +1,261 @@ +/* SPDX-License-Identifier: (GPL-2.0 OR MIT) + * Google virtual Ethernet (gve) driver + * + * Copyright (C) 2015-2021 Google, Inc. + */ + +/* GVE DQO Descriptor formats */ + +#ifndef _GVE_DESC_DQO_H_ +#define _GVE_DESC_DQO_H_ + +#include <linux/build_bug.h> + +#define GVE_TX_MAX_HDR_SIZE_DQO 255 +#define GVE_TX_MIN_TSO_MSS_DQO 88 + +#ifndef __LITTLE_ENDIAN_BITFIELD +#error "Only little endian supported" +#endif + +/* Basic TX descriptor (DTYPE 0x0C) */ +struct gve_tx_pkt_desc_dqo { + __le64 buf_addr; + + /* Must be GVE_TX_PKT_DESC_DTYPE_DQO (0xc) */ + u8 dtype: 5; + + /* Denotes the last descriptor of a packet. */ + u8 end_of_packet: 1; + u8 checksum_offload_enable: 1; + + /* If set, will generate a descriptor completion for this descriptor. */ + u8 report_event: 1; + u8 reserved0; + __le16 reserved1; + + /* The TX completion associated with this packet will contain this tag. + */ + __le16 compl_tag; + u16 buf_size: 14; + u16 reserved2: 2; +} __packed; +static_assert(sizeof(struct gve_tx_pkt_desc_dqo) == 16); + +#define GVE_TX_PKT_DESC_DTYPE_DQO 0xc +#define GVE_TX_MAX_BUF_SIZE_DQO ((16 * 1024) - 1) + +/* Maximum number of data descriptors allowed per packet, or per-TSO segment. */ +#define GVE_TX_MAX_DATA_DESCS 10 + +/* Min gap between tail and head to avoid cacheline overlap */ +#define GVE_TX_MIN_DESC_PREVENT_CACHE_OVERLAP 4 + +/* "report_event" on TX packet descriptors may only be reported on the last + * descriptor of a TX packet, and they must be spaced apart with at least this + * value. + */ +#define GVE_TX_MIN_RE_INTERVAL 32 + +struct gve_tx_context_cmd_dtype { + u8 dtype: 5; + u8 tso: 1; + u8 reserved1: 2; + + u8 reserved2; +}; + +static_assert(sizeof(struct gve_tx_context_cmd_dtype) == 2); + +/* TX Native TSO Context DTYPE (0x05) + * + * "flex" fields allow the driver to send additional packet context to HW. + */ +struct gve_tx_tso_context_desc_dqo { + /* The L4 payload bytes that should be segmented. */ + u32 tso_total_len: 24; + u32 flex10: 8; + + /* Max segment size in TSO excluding headers. */ + u16 mss: 14; + u16 reserved: 2; + + u8 header_len; /* Header length to use for TSO offload */ + u8 flex11; + struct gve_tx_context_cmd_dtype cmd_dtype; + u8 flex0; + u8 flex5; + u8 flex6; + u8 flex7; + u8 flex8; + u8 flex9; +} __packed; +static_assert(sizeof(struct gve_tx_tso_context_desc_dqo) == 16); + +#define GVE_TX_TSO_CTX_DESC_DTYPE_DQO 0x5 + +/* General context descriptor for sending metadata. */ +struct gve_tx_general_context_desc_dqo { + u8 flex4; + u8 flex5; + u8 flex6; + u8 flex7; + u8 flex8; + u8 flex9; + u8 flex10; + u8 flex11; + struct gve_tx_context_cmd_dtype cmd_dtype; + u16 reserved; + u8 flex0; + u8 flex1; + u8 flex2; + u8 flex3; +} __packed; +static_assert(sizeof(struct gve_tx_general_context_desc_dqo) == 16); + +#define GVE_TX_GENERAL_CTX_DESC_DTYPE_DQO 0x4 + +/* Logical structure of metadata which is packed into context descriptor flex + * fields. + */ +struct gve_tx_metadata_dqo { + union { + struct { + u8 version; + + /* If `skb->l4_hash` is set, this value should be + * derived from `skb->hash`. + * + * A zero value means no l4_hash was associated with the + * skb. + */ + u16 path_hash: 15; + + /* Should be set to 1 if the flow associated with the + * skb had a rehash from the TCP stack. + */ + u16 rehash_event: 1; + } __packed; + u8 bytes[12]; + }; +} __packed; +static_assert(sizeof(struct gve_tx_metadata_dqo) == 12); + +#define GVE_TX_METADATA_VERSION_DQO 0 + +/* TX completion descriptor */ +struct gve_tx_compl_desc { + /* For types 0-4 this is the TX queue ID associated with this + * completion. + */ + u16 id: 11; + + /* See: GVE_COMPL_TYPE_DQO* */ + u16 type: 3; + u16 reserved0: 1; + + /* Flipped by HW to notify the descriptor is populated. */ + u16 generation: 1; + union { + /* For descriptor completions, this is the last index fetched + * by HW + 1. + */ + __le16 tx_head; + + /* For packet completions, this is the completion tag set on the + * TX packet descriptors. + */ + __le16 completion_tag; + }; + __le32 reserved1; +} __packed; +static_assert(sizeof(struct gve_tx_compl_desc) == 8); + +#define GVE_COMPL_TYPE_DQO_PKT 0x2 /* Packet completion */ +#define GVE_COMPL_TYPE_DQO_DESC 0x4 /* Descriptor completion */ +#define GVE_COMPL_TYPE_DQO_MISS 0x1 /* Miss path completion */ +#define GVE_COMPL_TYPE_DQO_REINJECTION 0x3 /* Re-injection completion */ + +/* The most significant bit in the completion tag can change the completion + * type from packet completion to miss path completion. + */ +#define GVE_ALT_MISS_COMPL_BIT BIT(15) + +/* Descriptor to post buffers to HW on buffer queue. */ +struct gve_rx_desc_dqo { + __le16 buf_id; /* ID returned in Rx completion descriptor */ + __le16 reserved0; + __le32 reserved1; + __le64 buf_addr; /* DMA address of the buffer */ + __le64 header_buf_addr; + __le64 reserved2; +} __packed; +static_assert(sizeof(struct gve_rx_desc_dqo) == 32); + +/* Descriptor for HW to notify SW of new packets received on RX queue. */ +struct gve_rx_compl_desc_dqo { + /* Must be 1 */ + u8 rxdid: 4; + u8 reserved0: 4; + + /* Packet originated from this system rather than the network. */ + u8 loopback: 1; + /* Set when IPv6 packet contains a destination options header or routing + * header. + */ + u8 ipv6_ex_add: 1; + /* Invalid packet was received. */ + u8 rx_error: 1; + u8 reserved1: 5; + + u16 packet_type: 10; + u16 ip_hdr_err: 1; + u16 udp_len_err: 1; + u16 raw_cs_invalid: 1; + u16 reserved2: 3; + + u16 packet_len: 14; + /* Flipped by HW to notify the descriptor is populated. */ + u16 generation: 1; + /* Should be zero. */ + u16 buffer_queue_id: 1; + + u16 header_len: 10; + u16 rsc: 1; + u16 split_header: 1; + u16 reserved3: 4; + + u8 descriptor_done: 1; + u8 end_of_packet: 1; + u8 header_buffer_overflow: 1; + u8 l3_l4_processed: 1; + u8 csum_ip_err: 1; + u8 csum_l4_err: 1; + u8 csum_external_ip_err: 1; + u8 csum_external_udp_err: 1; + + u8 status_error1; + + __le16 reserved5; + __le16 buf_id; /* Buffer ID which was sent on the buffer queue. */ + + union { + /* Packet checksum. */ + __le16 raw_cs; + /* Segment length for RSC packets. */ + __le16 rsc_seg_len; + }; + __le32 hash; + __le32 reserved6; + __le64 reserved7; +} __packed; + +static_assert(sizeof(struct gve_rx_compl_desc_dqo) == 32); + +/* Ringing the doorbell too often can hurt performance. + * + * HW requires this value to be at least 8. + */ +#define GVE_RX_BUF_THRESH_DQO 32 + +#endif /* _GVE_DESC_DQO_H_ */ diff --git a/drivers/net/ethernet/google/gve/gve_dqo.h b/drivers/net/ethernet/google/gve/gve_dqo.h new file mode 100644 index 0000000000..1eb4d5fd85 --- /dev/null +++ b/drivers/net/ethernet/google/gve/gve_dqo.h @@ -0,0 +1,93 @@ +/* SPDX-License-Identifier: (GPL-2.0 OR MIT) + * Google virtual Ethernet (gve) driver + * + * Copyright (C) 2015-2021 Google, Inc. + */ + +#ifndef _GVE_DQO_H_ +#define _GVE_DQO_H_ + +#include "gve_adminq.h" + +#define GVE_ITR_ENABLE_BIT_DQO BIT(0) +#define GVE_ITR_CLEAR_PBA_BIT_DQO BIT(1) +#define GVE_ITR_NO_UPDATE_DQO (3 << 3) + +#define GVE_ITR_INTERVAL_DQO_SHIFT 5 +#define GVE_ITR_INTERVAL_DQO_MASK ((1 << 12) - 1) + +#define GVE_TX_IRQ_RATELIMIT_US_DQO 50 +#define GVE_RX_IRQ_RATELIMIT_US_DQO 20 +#define GVE_MAX_ITR_INTERVAL_DQO (GVE_ITR_INTERVAL_DQO_MASK * 2) + +/* Timeout in seconds to wait for a reinjection completion after receiving + * its corresponding miss completion. + */ +#define GVE_REINJECT_COMPL_TIMEOUT 1 + +/* Timeout in seconds to deallocate the completion tag for a packet that was + * prematurely freed for not receiving a valid completion. This should be large + * enough to rule out the possibility of receiving the corresponding valid + * completion after this interval. + */ +#define GVE_DEALLOCATE_COMPL_TIMEOUT 60 + +netdev_tx_t gve_tx_dqo(struct sk_buff *skb, struct net_device *dev); +bool gve_tx_poll_dqo(struct gve_notify_block *block, bool do_clean); +int gve_rx_poll_dqo(struct gve_notify_block *block, int budget); +int gve_tx_alloc_rings_dqo(struct gve_priv *priv); +void gve_tx_free_rings_dqo(struct gve_priv *priv); +int gve_rx_alloc_rings_dqo(struct gve_priv *priv); +void gve_rx_free_rings_dqo(struct gve_priv *priv); +int gve_clean_tx_done_dqo(struct gve_priv *priv, struct gve_tx_ring *tx, + struct napi_struct *napi); +void gve_rx_post_buffers_dqo(struct gve_rx_ring *rx); +void gve_rx_write_doorbell_dqo(const struct gve_priv *priv, int queue_idx); + +static inline void +gve_tx_put_doorbell_dqo(const struct gve_priv *priv, + const struct gve_queue_resources *q_resources, u32 val) +{ + u64 index; + + index = be32_to_cpu(q_resources->db_index); + iowrite32(val, &priv->db_bar2[index]); +} + +/* Builds register value to write to DQO IRQ doorbell to enable with specified + * ITR interval. + */ +static inline u32 gve_setup_itr_interval_dqo(u32 interval_us) +{ + u32 result = GVE_ITR_ENABLE_BIT_DQO; + + /* Interval has 2us granularity. */ + interval_us >>= 1; + + interval_us &= GVE_ITR_INTERVAL_DQO_MASK; + result |= (interval_us << GVE_ITR_INTERVAL_DQO_SHIFT); + + return result; +} + +static inline void +gve_write_irq_doorbell_dqo(const struct gve_priv *priv, + const struct gve_notify_block *block, u32 val) +{ + u32 index = be32_to_cpu(*block->irq_db_index); + + iowrite32(val, &priv->db_bar2[index]); +} + +/* Sets interrupt throttling interval and enables interrupt + * by writing to IRQ doorbell. + */ +static inline void +gve_set_itr_coalesce_usecs_dqo(struct gve_priv *priv, + struct gve_notify_block *block, + u32 usecs) +{ + gve_write_irq_doorbell_dqo(priv, block, + gve_setup_itr_interval_dqo(usecs)); +} +#endif /* _GVE_DQO_H_ */ diff --git a/drivers/net/ethernet/google/gve/gve_ethtool.c b/drivers/net/ethernet/google/gve/gve_ethtool.c new file mode 100644 index 0000000000..233e594690 --- /dev/null +++ b/drivers/net/ethernet/google/gve/gve_ethtool.c @@ -0,0 +1,676 @@ +// SPDX-License-Identifier: (GPL-2.0 OR MIT) +/* Google virtual Ethernet (gve) driver + * + * Copyright (C) 2015-2021 Google, Inc. + */ + +#include <linux/ethtool.h> +#include <linux/rtnetlink.h> +#include "gve.h" +#include "gve_adminq.h" +#include "gve_dqo.h" + +static void gve_get_drvinfo(struct net_device *netdev, + struct ethtool_drvinfo *info) +{ + struct gve_priv *priv = netdev_priv(netdev); + + strscpy(info->driver, gve_driver_name, sizeof(info->driver)); + strscpy(info->version, gve_version_str, sizeof(info->version)); + strscpy(info->bus_info, pci_name(priv->pdev), sizeof(info->bus_info)); +} + +static void gve_set_msglevel(struct net_device *netdev, u32 value) +{ + struct gve_priv *priv = netdev_priv(netdev); + + priv->msg_enable = value; +} + +static u32 gve_get_msglevel(struct net_device *netdev) +{ + struct gve_priv *priv = netdev_priv(netdev); + + return priv->msg_enable; +} + +/* For the following stats column string names, make sure the order + * matches how it is filled in the code. For xdp_aborted, xdp_drop, + * xdp_pass, xdp_tx, xdp_redirect, make sure it also matches the order + * as declared in enum xdp_action inside file uapi/linux/bpf.h . + */ +static const char gve_gstrings_main_stats[][ETH_GSTRING_LEN] = { + "rx_packets", "tx_packets", "rx_bytes", "tx_bytes", + "rx_dropped", "tx_dropped", "tx_timeouts", + "rx_skb_alloc_fail", "rx_buf_alloc_fail", "rx_desc_err_dropped_pkt", + "interface_up_cnt", "interface_down_cnt", "reset_cnt", + "page_alloc_fail", "dma_mapping_error", "stats_report_trigger_cnt", +}; + +static const char gve_gstrings_rx_stats[][ETH_GSTRING_LEN] = { + "rx_posted_desc[%u]", "rx_completed_desc[%u]", "rx_consumed_desc[%u]", "rx_bytes[%u]", + "rx_cont_packet_cnt[%u]", "rx_frag_flip_cnt[%u]", "rx_frag_copy_cnt[%u]", + "rx_frag_alloc_cnt[%u]", + "rx_dropped_pkt[%u]", "rx_copybreak_pkt[%u]", "rx_copied_pkt[%u]", + "rx_queue_drop_cnt[%u]", "rx_no_buffers_posted[%u]", + "rx_drops_packet_over_mru[%u]", "rx_drops_invalid_checksum[%u]", + "rx_xdp_aborted[%u]", "rx_xdp_drop[%u]", "rx_xdp_pass[%u]", + "rx_xdp_tx[%u]", "rx_xdp_redirect[%u]", + "rx_xdp_tx_errors[%u]", "rx_xdp_redirect_errors[%u]", "rx_xdp_alloc_fails[%u]", +}; + +static const char gve_gstrings_tx_stats[][ETH_GSTRING_LEN] = { + "tx_posted_desc[%u]", "tx_completed_desc[%u]", "tx_consumed_desc[%u]", "tx_bytes[%u]", + "tx_wake[%u]", "tx_stop[%u]", "tx_event_counter[%u]", + "tx_dma_mapping_error[%u]", "tx_xsk_wakeup[%u]", + "tx_xsk_done[%u]", "tx_xsk_sent[%u]", "tx_xdp_xmit[%u]", "tx_xdp_xmit_errors[%u]" +}; + +static const char gve_gstrings_adminq_stats[][ETH_GSTRING_LEN] = { + "adminq_prod_cnt", "adminq_cmd_fail", "adminq_timeouts", + "adminq_describe_device_cnt", "adminq_cfg_device_resources_cnt", + "adminq_register_page_list_cnt", "adminq_unregister_page_list_cnt", + "adminq_create_tx_queue_cnt", "adminq_create_rx_queue_cnt", + "adminq_destroy_tx_queue_cnt", "adminq_destroy_rx_queue_cnt", + "adminq_dcfg_device_resources_cnt", "adminq_set_driver_parameter_cnt", + "adminq_report_stats_cnt", "adminq_report_link_speed_cnt" +}; + +static const char gve_gstrings_priv_flags[][ETH_GSTRING_LEN] = { + "report-stats", +}; + +#define GVE_MAIN_STATS_LEN ARRAY_SIZE(gve_gstrings_main_stats) +#define GVE_ADMINQ_STATS_LEN ARRAY_SIZE(gve_gstrings_adminq_stats) +#define NUM_GVE_TX_CNTS ARRAY_SIZE(gve_gstrings_tx_stats) +#define NUM_GVE_RX_CNTS ARRAY_SIZE(gve_gstrings_rx_stats) +#define GVE_PRIV_FLAGS_STR_LEN ARRAY_SIZE(gve_gstrings_priv_flags) + +static void gve_get_strings(struct net_device *netdev, u32 stringset, u8 *data) +{ + struct gve_priv *priv = netdev_priv(netdev); + char *s = (char *)data; + int num_tx_queues; + int i, j; + + num_tx_queues = gve_num_tx_queues(priv); + switch (stringset) { + case ETH_SS_STATS: + memcpy(s, *gve_gstrings_main_stats, + sizeof(gve_gstrings_main_stats)); + s += sizeof(gve_gstrings_main_stats); + + for (i = 0; i < priv->rx_cfg.num_queues; i++) { + for (j = 0; j < NUM_GVE_RX_CNTS; j++) { + snprintf(s, ETH_GSTRING_LEN, + gve_gstrings_rx_stats[j], i); + s += ETH_GSTRING_LEN; + } + } + + for (i = 0; i < num_tx_queues; i++) { + for (j = 0; j < NUM_GVE_TX_CNTS; j++) { + snprintf(s, ETH_GSTRING_LEN, + gve_gstrings_tx_stats[j], i); + s += ETH_GSTRING_LEN; + } + } + + memcpy(s, *gve_gstrings_adminq_stats, + sizeof(gve_gstrings_adminq_stats)); + s += sizeof(gve_gstrings_adminq_stats); + break; + + case ETH_SS_PRIV_FLAGS: + memcpy(s, *gve_gstrings_priv_flags, + sizeof(gve_gstrings_priv_flags)); + s += sizeof(gve_gstrings_priv_flags); + break; + + default: + break; + } +} + +static int gve_get_sset_count(struct net_device *netdev, int sset) +{ + struct gve_priv *priv = netdev_priv(netdev); + int num_tx_queues; + + num_tx_queues = gve_num_tx_queues(priv); + switch (sset) { + case ETH_SS_STATS: + return GVE_MAIN_STATS_LEN + GVE_ADMINQ_STATS_LEN + + (priv->rx_cfg.num_queues * NUM_GVE_RX_CNTS) + + (num_tx_queues * NUM_GVE_TX_CNTS); + case ETH_SS_PRIV_FLAGS: + return GVE_PRIV_FLAGS_STR_LEN; + default: + return -EOPNOTSUPP; + } +} + +static void +gve_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats *stats, u64 *data) +{ + u64 tmp_rx_pkts, tmp_rx_bytes, tmp_rx_skb_alloc_fail, + tmp_rx_buf_alloc_fail, tmp_rx_desc_err_dropped_pkt, + tmp_tx_pkts, tmp_tx_bytes; + u64 rx_buf_alloc_fail, rx_desc_err_dropped_pkt, rx_pkts, + rx_skb_alloc_fail, rx_bytes, tx_pkts, tx_bytes, tx_dropped; + int stats_idx, base_stats_idx, max_stats_idx; + struct stats *report_stats; + int *rx_qid_to_stats_idx; + int *tx_qid_to_stats_idx; + struct gve_priv *priv; + bool skip_nic_stats; + unsigned int start; + int num_tx_queues; + int ring; + int i, j; + + ASSERT_RTNL(); + + priv = netdev_priv(netdev); + num_tx_queues = gve_num_tx_queues(priv); + report_stats = priv->stats_report->stats; + rx_qid_to_stats_idx = kmalloc_array(priv->rx_cfg.num_queues, + sizeof(int), GFP_KERNEL); + if (!rx_qid_to_stats_idx) + return; + tx_qid_to_stats_idx = kmalloc_array(num_tx_queues, + sizeof(int), GFP_KERNEL); + if (!tx_qid_to_stats_idx) { + kfree(rx_qid_to_stats_idx); + return; + } + for (rx_pkts = 0, rx_bytes = 0, rx_skb_alloc_fail = 0, + rx_buf_alloc_fail = 0, rx_desc_err_dropped_pkt = 0, ring = 0; + ring < priv->rx_cfg.num_queues; ring++) { + if (priv->rx) { + do { + struct gve_rx_ring *rx = &priv->rx[ring]; + + start = + u64_stats_fetch_begin(&priv->rx[ring].statss); + tmp_rx_pkts = rx->rpackets; + tmp_rx_bytes = rx->rbytes; + tmp_rx_skb_alloc_fail = rx->rx_skb_alloc_fail; + tmp_rx_buf_alloc_fail = rx->rx_buf_alloc_fail; + tmp_rx_desc_err_dropped_pkt = + rx->rx_desc_err_dropped_pkt; + } while (u64_stats_fetch_retry(&priv->rx[ring].statss, + start)); + rx_pkts += tmp_rx_pkts; + rx_bytes += tmp_rx_bytes; + rx_skb_alloc_fail += tmp_rx_skb_alloc_fail; + rx_buf_alloc_fail += tmp_rx_buf_alloc_fail; + rx_desc_err_dropped_pkt += tmp_rx_desc_err_dropped_pkt; + } + } + for (tx_pkts = 0, tx_bytes = 0, tx_dropped = 0, ring = 0; + ring < num_tx_queues; ring++) { + if (priv->tx) { + do { + start = + u64_stats_fetch_begin(&priv->tx[ring].statss); + tmp_tx_pkts = priv->tx[ring].pkt_done; + tmp_tx_bytes = priv->tx[ring].bytes_done; + } while (u64_stats_fetch_retry(&priv->tx[ring].statss, + start)); + tx_pkts += tmp_tx_pkts; + tx_bytes += tmp_tx_bytes; + tx_dropped += priv->tx[ring].dropped_pkt; + } + } + + i = 0; + data[i++] = rx_pkts; + data[i++] = tx_pkts; + data[i++] = rx_bytes; + data[i++] = tx_bytes; + /* total rx dropped packets */ + data[i++] = rx_skb_alloc_fail + rx_buf_alloc_fail + + rx_desc_err_dropped_pkt; + data[i++] = tx_dropped; + data[i++] = priv->tx_timeo_cnt; + data[i++] = rx_skb_alloc_fail; + data[i++] = rx_buf_alloc_fail; + data[i++] = rx_desc_err_dropped_pkt; + data[i++] = priv->interface_up_cnt; + data[i++] = priv->interface_down_cnt; + data[i++] = priv->reset_cnt; + data[i++] = priv->page_alloc_fail; + data[i++] = priv->dma_mapping_error; + data[i++] = priv->stats_report_trigger_cnt; + i = GVE_MAIN_STATS_LEN; + + /* For rx cross-reporting stats, start from nic rx stats in report */ + base_stats_idx = GVE_TX_STATS_REPORT_NUM * num_tx_queues + + GVE_RX_STATS_REPORT_NUM * priv->rx_cfg.num_queues; + max_stats_idx = NIC_RX_STATS_REPORT_NUM * priv->rx_cfg.num_queues + + base_stats_idx; + /* Preprocess the stats report for rx, map queue id to start index */ + skip_nic_stats = false; + for (stats_idx = base_stats_idx; stats_idx < max_stats_idx; + stats_idx += NIC_RX_STATS_REPORT_NUM) { + u32 stat_name = be32_to_cpu(report_stats[stats_idx].stat_name); + u32 queue_id = be32_to_cpu(report_stats[stats_idx].queue_id); + + if (stat_name == 0) { + /* no stats written by NIC yet */ + skip_nic_stats = true; + break; + } + rx_qid_to_stats_idx[queue_id] = stats_idx; + } + /* walk RX rings */ + if (priv->rx) { + for (ring = 0; ring < priv->rx_cfg.num_queues; ring++) { + struct gve_rx_ring *rx = &priv->rx[ring]; + + data[i++] = rx->fill_cnt; + data[i++] = rx->cnt; + data[i++] = rx->fill_cnt - rx->cnt; + do { + start = + u64_stats_fetch_begin(&priv->rx[ring].statss); + tmp_rx_bytes = rx->rbytes; + tmp_rx_skb_alloc_fail = rx->rx_skb_alloc_fail; + tmp_rx_buf_alloc_fail = rx->rx_buf_alloc_fail; + tmp_rx_desc_err_dropped_pkt = + rx->rx_desc_err_dropped_pkt; + } while (u64_stats_fetch_retry(&priv->rx[ring].statss, + start)); + data[i++] = tmp_rx_bytes; + data[i++] = rx->rx_cont_packet_cnt; + data[i++] = rx->rx_frag_flip_cnt; + data[i++] = rx->rx_frag_copy_cnt; + data[i++] = rx->rx_frag_alloc_cnt; + /* rx dropped packets */ + data[i++] = tmp_rx_skb_alloc_fail + + tmp_rx_buf_alloc_fail + + tmp_rx_desc_err_dropped_pkt; + data[i++] = rx->rx_copybreak_pkt; + data[i++] = rx->rx_copied_pkt; + /* stats from NIC */ + if (skip_nic_stats) { + /* skip NIC rx stats */ + i += NIC_RX_STATS_REPORT_NUM; + } else { + stats_idx = rx_qid_to_stats_idx[ring]; + for (j = 0; j < NIC_RX_STATS_REPORT_NUM; j++) { + u64 value = + be64_to_cpu(report_stats[stats_idx + j].value); + + data[i++] = value; + } + } + /* XDP rx counters */ + do { + start = u64_stats_fetch_begin(&priv->rx[ring].statss); + for (j = 0; j < GVE_XDP_ACTIONS; j++) + data[i + j] = rx->xdp_actions[j]; + data[i + j++] = rx->xdp_tx_errors; + data[i + j++] = rx->xdp_redirect_errors; + data[i + j++] = rx->xdp_alloc_fails; + } while (u64_stats_fetch_retry(&priv->rx[ring].statss, + start)); + i += GVE_XDP_ACTIONS + 3; /* XDP rx counters */ + } + } else { + i += priv->rx_cfg.num_queues * NUM_GVE_RX_CNTS; + } + + /* For tx cross-reporting stats, start from nic tx stats in report */ + base_stats_idx = max_stats_idx; + max_stats_idx = NIC_TX_STATS_REPORT_NUM * num_tx_queues + + max_stats_idx; + /* Preprocess the stats report for tx, map queue id to start index */ + skip_nic_stats = false; + for (stats_idx = base_stats_idx; stats_idx < max_stats_idx; + stats_idx += NIC_TX_STATS_REPORT_NUM) { + u32 stat_name = be32_to_cpu(report_stats[stats_idx].stat_name); + u32 queue_id = be32_to_cpu(report_stats[stats_idx].queue_id); + + if (stat_name == 0) { + /* no stats written by NIC yet */ + skip_nic_stats = true; + break; + } + tx_qid_to_stats_idx[queue_id] = stats_idx; + } + /* walk TX rings */ + if (priv->tx) { + for (ring = 0; ring < num_tx_queues; ring++) { + struct gve_tx_ring *tx = &priv->tx[ring]; + + if (gve_is_gqi(priv)) { + data[i++] = tx->req; + data[i++] = tx->done; + data[i++] = tx->req - tx->done; + } else { + /* DQO doesn't currently support + * posted/completed descriptor counts; + */ + data[i++] = 0; + data[i++] = 0; + data[i++] = tx->dqo_tx.tail - tx->dqo_tx.head; + } + do { + start = + u64_stats_fetch_begin(&priv->tx[ring].statss); + tmp_tx_bytes = tx->bytes_done; + } while (u64_stats_fetch_retry(&priv->tx[ring].statss, + start)); + data[i++] = tmp_tx_bytes; + data[i++] = tx->wake_queue; + data[i++] = tx->stop_queue; + data[i++] = gve_tx_load_event_counter(priv, tx); + data[i++] = tx->dma_mapping_error; + /* stats from NIC */ + if (skip_nic_stats) { + /* skip NIC tx stats */ + i += NIC_TX_STATS_REPORT_NUM; + } else { + stats_idx = tx_qid_to_stats_idx[ring]; + for (j = 0; j < NIC_TX_STATS_REPORT_NUM; j++) { + u64 value = + be64_to_cpu(report_stats[stats_idx + j].value); + data[i++] = value; + } + } + /* XDP xsk counters */ + data[i++] = tx->xdp_xsk_wakeup; + data[i++] = tx->xdp_xsk_done; + do { + start = u64_stats_fetch_begin(&priv->tx[ring].statss); + data[i] = tx->xdp_xsk_sent; + data[i + 1] = tx->xdp_xmit; + data[i + 2] = tx->xdp_xmit_errors; + } while (u64_stats_fetch_retry(&priv->tx[ring].statss, + start)); + i += 3; /* XDP tx counters */ + } + } else { + i += num_tx_queues * NUM_GVE_TX_CNTS; + } + + kfree(rx_qid_to_stats_idx); + kfree(tx_qid_to_stats_idx); + /* AQ Stats */ + data[i++] = priv->adminq_prod_cnt; + data[i++] = priv->adminq_cmd_fail; + data[i++] = priv->adminq_timeouts; + data[i++] = priv->adminq_describe_device_cnt; + data[i++] = priv->adminq_cfg_device_resources_cnt; + data[i++] = priv->adminq_register_page_list_cnt; + data[i++] = priv->adminq_unregister_page_list_cnt; + data[i++] = priv->adminq_create_tx_queue_cnt; + data[i++] = priv->adminq_create_rx_queue_cnt; + data[i++] = priv->adminq_destroy_tx_queue_cnt; + data[i++] = priv->adminq_destroy_rx_queue_cnt; + data[i++] = priv->adminq_dcfg_device_resources_cnt; + data[i++] = priv->adminq_set_driver_parameter_cnt; + data[i++] = priv->adminq_report_stats_cnt; + data[i++] = priv->adminq_report_link_speed_cnt; +} + +static void gve_get_channels(struct net_device *netdev, + struct ethtool_channels *cmd) +{ + struct gve_priv *priv = netdev_priv(netdev); + + cmd->max_rx = priv->rx_cfg.max_queues; + cmd->max_tx = priv->tx_cfg.max_queues; + cmd->max_other = 0; + cmd->max_combined = 0; + cmd->rx_count = priv->rx_cfg.num_queues; + cmd->tx_count = priv->tx_cfg.num_queues; + cmd->other_count = 0; + cmd->combined_count = 0; +} + +static int gve_set_channels(struct net_device *netdev, + struct ethtool_channels *cmd) +{ + struct gve_priv *priv = netdev_priv(netdev); + struct gve_queue_config new_tx_cfg = priv->tx_cfg; + struct gve_queue_config new_rx_cfg = priv->rx_cfg; + struct ethtool_channels old_settings; + int new_tx = cmd->tx_count; + int new_rx = cmd->rx_count; + + gve_get_channels(netdev, &old_settings); + + /* Changing combined is not allowed */ + if (cmd->combined_count != old_settings.combined_count) + return -EINVAL; + + if (!new_rx || !new_tx) + return -EINVAL; + + if (priv->num_xdp_queues && + (new_tx != new_rx || (2 * new_tx > priv->tx_cfg.max_queues))) { + dev_err(&priv->pdev->dev, "XDP load failed: The number of configured RX queues should be equal to the number of configured TX queues and the number of configured RX/TX queues should be less than or equal to half the maximum number of RX/TX queues"); + return -EINVAL; + } + + if (!netif_carrier_ok(netdev)) { + priv->tx_cfg.num_queues = new_tx; + priv->rx_cfg.num_queues = new_rx; + return 0; + } + + new_tx_cfg.num_queues = new_tx; + new_rx_cfg.num_queues = new_rx; + + return gve_adjust_queues(priv, new_rx_cfg, new_tx_cfg); +} + +static void gve_get_ringparam(struct net_device *netdev, + struct ethtool_ringparam *cmd, + struct kernel_ethtool_ringparam *kernel_cmd, + struct netlink_ext_ack *extack) +{ + struct gve_priv *priv = netdev_priv(netdev); + + cmd->rx_max_pending = priv->rx_desc_cnt; + cmd->tx_max_pending = priv->tx_desc_cnt; + cmd->rx_pending = priv->rx_desc_cnt; + cmd->tx_pending = priv->tx_desc_cnt; +} + +static int gve_user_reset(struct net_device *netdev, u32 *flags) +{ + struct gve_priv *priv = netdev_priv(netdev); + + if (*flags == ETH_RESET_ALL) { + *flags = 0; + return gve_reset(priv, true); + } + + return -EOPNOTSUPP; +} + +static int gve_get_tunable(struct net_device *netdev, + const struct ethtool_tunable *etuna, void *value) +{ + struct gve_priv *priv = netdev_priv(netdev); + + switch (etuna->id) { + case ETHTOOL_RX_COPYBREAK: + *(u32 *)value = priv->rx_copybreak; + return 0; + default: + return -EOPNOTSUPP; + } +} + +static int gve_set_tunable(struct net_device *netdev, + const struct ethtool_tunable *etuna, + const void *value) +{ + struct gve_priv *priv = netdev_priv(netdev); + u32 len; + + switch (etuna->id) { + case ETHTOOL_RX_COPYBREAK: + { + u32 max_copybreak = gve_is_gqi(priv) ? + (PAGE_SIZE / 2) : priv->data_buffer_size_dqo; + + len = *(u32 *)value; + if (len > max_copybreak) + return -EINVAL; + priv->rx_copybreak = len; + return 0; + } + default: + return -EOPNOTSUPP; + } +} + +static u32 gve_get_priv_flags(struct net_device *netdev) +{ + struct gve_priv *priv = netdev_priv(netdev); + u32 ret_flags = 0; + + /* Only 1 flag exists currently: report-stats (BIT(O)), so set that flag. */ + if (priv->ethtool_flags & BIT(0)) + ret_flags |= BIT(0); + return ret_flags; +} + +static int gve_set_priv_flags(struct net_device *netdev, u32 flags) +{ + struct gve_priv *priv = netdev_priv(netdev); + u64 ori_flags, new_flags; + int num_tx_queues; + + num_tx_queues = gve_num_tx_queues(priv); + ori_flags = READ_ONCE(priv->ethtool_flags); + new_flags = ori_flags; + + /* Only one priv flag exists: report-stats (BIT(0))*/ + if (flags & BIT(0)) + new_flags |= BIT(0); + else + new_flags &= ~(BIT(0)); + priv->ethtool_flags = new_flags; + /* start report-stats timer when user turns report stats on. */ + if (flags & BIT(0)) { + mod_timer(&priv->stats_report_timer, + round_jiffies(jiffies + + msecs_to_jiffies(priv->stats_report_timer_period))); + } + /* Zero off gve stats when report-stats turned off and */ + /* delete report stats timer. */ + if (!(flags & BIT(0)) && (ori_flags & BIT(0))) { + int tx_stats_num = GVE_TX_STATS_REPORT_NUM * + num_tx_queues; + int rx_stats_num = GVE_RX_STATS_REPORT_NUM * + priv->rx_cfg.num_queues; + + memset(priv->stats_report->stats, 0, (tx_stats_num + rx_stats_num) * + sizeof(struct stats)); + del_timer_sync(&priv->stats_report_timer); + } + return 0; +} + +static int gve_get_link_ksettings(struct net_device *netdev, + struct ethtool_link_ksettings *cmd) +{ + struct gve_priv *priv = netdev_priv(netdev); + int err = 0; + + if (priv->link_speed == 0) + err = gve_adminq_report_link_speed(priv); + + cmd->base.speed = priv->link_speed; + + cmd->base.duplex = DUPLEX_FULL; + + return err; +} + +static int gve_get_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_ec, + struct netlink_ext_ack *extack) +{ + struct gve_priv *priv = netdev_priv(netdev); + + if (gve_is_gqi(priv)) + return -EOPNOTSUPP; + ec->tx_coalesce_usecs = priv->tx_coalesce_usecs; + ec->rx_coalesce_usecs = priv->rx_coalesce_usecs; + + return 0; +} + +static int gve_set_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_ec, + struct netlink_ext_ack *extack) +{ + struct gve_priv *priv = netdev_priv(netdev); + u32 tx_usecs_orig = priv->tx_coalesce_usecs; + u32 rx_usecs_orig = priv->rx_coalesce_usecs; + int idx; + + if (gve_is_gqi(priv)) + return -EOPNOTSUPP; + + if (ec->tx_coalesce_usecs > GVE_MAX_ITR_INTERVAL_DQO || + ec->rx_coalesce_usecs > GVE_MAX_ITR_INTERVAL_DQO) + return -EINVAL; + priv->tx_coalesce_usecs = ec->tx_coalesce_usecs; + priv->rx_coalesce_usecs = ec->rx_coalesce_usecs; + + if (tx_usecs_orig != priv->tx_coalesce_usecs) { + for (idx = 0; idx < priv->tx_cfg.num_queues; idx++) { + int ntfy_idx = gve_tx_idx_to_ntfy(priv, idx); + struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx]; + + gve_set_itr_coalesce_usecs_dqo(priv, block, + priv->tx_coalesce_usecs); + } + } + + if (rx_usecs_orig != priv->rx_coalesce_usecs) { + for (idx = 0; idx < priv->rx_cfg.num_queues; idx++) { + int ntfy_idx = gve_rx_idx_to_ntfy(priv, idx); + struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx]; + + gve_set_itr_coalesce_usecs_dqo(priv, block, + priv->rx_coalesce_usecs); + } + } + + return 0; +} + +const struct ethtool_ops gve_ethtool_ops = { + .supported_coalesce_params = ETHTOOL_COALESCE_USECS, + .get_drvinfo = gve_get_drvinfo, + .get_strings = gve_get_strings, + .get_sset_count = gve_get_sset_count, + .get_ethtool_stats = gve_get_ethtool_stats, + .set_msglevel = gve_set_msglevel, + .get_msglevel = gve_get_msglevel, + .set_channels = gve_set_channels, + .get_channels = gve_get_channels, + .get_link = ethtool_op_get_link, + .get_coalesce = gve_get_coalesce, + .set_coalesce = gve_set_coalesce, + .get_ringparam = gve_get_ringparam, + .reset = gve_user_reset, + .get_tunable = gve_get_tunable, + .set_tunable = gve_set_tunable, + .get_priv_flags = gve_get_priv_flags, + .set_priv_flags = gve_set_priv_flags, + .get_link_ksettings = gve_get_link_ksettings +}; diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c new file mode 100644 index 0000000000..5703240474 --- /dev/null +++ b/drivers/net/ethernet/google/gve/gve_main.c @@ -0,0 +1,2429 @@ +// SPDX-License-Identifier: (GPL-2.0 OR MIT) +/* Google virtual Ethernet (gve) driver + * + * Copyright (C) 2015-2021 Google, Inc. + */ + +#include <linux/bpf.h> +#include <linux/cpumask.h> +#include <linux/etherdevice.h> +#include <linux/filter.h> +#include <linux/interrupt.h> +#include <linux/module.h> +#include <linux/pci.h> +#include <linux/sched.h> +#include <linux/timer.h> +#include <linux/workqueue.h> +#include <linux/utsname.h> +#include <linux/version.h> +#include <net/sch_generic.h> +#include <net/xdp_sock_drv.h> +#include "gve.h" +#include "gve_dqo.h" +#include "gve_adminq.h" +#include "gve_register.h" + +#define GVE_DEFAULT_RX_COPYBREAK (256) + +#define DEFAULT_MSG_LEVEL (NETIF_MSG_DRV | NETIF_MSG_LINK) +#define GVE_VERSION "1.0.0" +#define GVE_VERSION_PREFIX "GVE-" + +// Minimum amount of time between queue kicks in msec (10 seconds) +#define MIN_TX_TIMEOUT_GAP (1000 * 10) + +char gve_driver_name[] = "gve"; +const char gve_version_str[] = GVE_VERSION; +static const char gve_version_prefix[] = GVE_VERSION_PREFIX; + +static int gve_verify_driver_compatibility(struct gve_priv *priv) +{ + int err; + struct gve_driver_info *driver_info; + dma_addr_t driver_info_bus; + + driver_info = dma_alloc_coherent(&priv->pdev->dev, + sizeof(struct gve_driver_info), + &driver_info_bus, GFP_KERNEL); + if (!driver_info) + return -ENOMEM; + + *driver_info = (struct gve_driver_info) { + .os_type = 1, /* Linux */ + .os_version_major = cpu_to_be32(LINUX_VERSION_MAJOR), + .os_version_minor = cpu_to_be32(LINUX_VERSION_SUBLEVEL), + .os_version_sub = cpu_to_be32(LINUX_VERSION_PATCHLEVEL), + .driver_capability_flags = { + cpu_to_be64(GVE_DRIVER_CAPABILITY_FLAGS1), + cpu_to_be64(GVE_DRIVER_CAPABILITY_FLAGS2), + cpu_to_be64(GVE_DRIVER_CAPABILITY_FLAGS3), + cpu_to_be64(GVE_DRIVER_CAPABILITY_FLAGS4), + }, + }; + strscpy(driver_info->os_version_str1, utsname()->release, + sizeof(driver_info->os_version_str1)); + strscpy(driver_info->os_version_str2, utsname()->version, + sizeof(driver_info->os_version_str2)); + + err = gve_adminq_verify_driver_compatibility(priv, + sizeof(struct gve_driver_info), + driver_info_bus); + + /* It's ok if the device doesn't support this */ + if (err == -EOPNOTSUPP) + err = 0; + + dma_free_coherent(&priv->pdev->dev, + sizeof(struct gve_driver_info), + driver_info, driver_info_bus); + return err; +} + +static netdev_tx_t gve_start_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct gve_priv *priv = netdev_priv(dev); + + if (gve_is_gqi(priv)) + return gve_tx(skb, dev); + else + return gve_tx_dqo(skb, dev); +} + +static void gve_get_stats(struct net_device *dev, struct rtnl_link_stats64 *s) +{ + struct gve_priv *priv = netdev_priv(dev); + unsigned int start; + u64 packets, bytes; + int num_tx_queues; + int ring; + + num_tx_queues = gve_num_tx_queues(priv); + if (priv->rx) { + for (ring = 0; ring < priv->rx_cfg.num_queues; ring++) { + do { + start = + u64_stats_fetch_begin(&priv->rx[ring].statss); + packets = priv->rx[ring].rpackets; + bytes = priv->rx[ring].rbytes; + } while (u64_stats_fetch_retry(&priv->rx[ring].statss, + start)); + s->rx_packets += packets; + s->rx_bytes += bytes; + } + } + if (priv->tx) { + for (ring = 0; ring < num_tx_queues; ring++) { + do { + start = + u64_stats_fetch_begin(&priv->tx[ring].statss); + packets = priv->tx[ring].pkt_done; + bytes = priv->tx[ring].bytes_done; + } while (u64_stats_fetch_retry(&priv->tx[ring].statss, + start)); + s->tx_packets += packets; + s->tx_bytes += bytes; + } + } +} + +static int gve_alloc_counter_array(struct gve_priv *priv) +{ + priv->counter_array = + dma_alloc_coherent(&priv->pdev->dev, + priv->num_event_counters * + sizeof(*priv->counter_array), + &priv->counter_array_bus, GFP_KERNEL); + if (!priv->counter_array) + return -ENOMEM; + + return 0; +} + +static void gve_free_counter_array(struct gve_priv *priv) +{ + if (!priv->counter_array) + return; + + dma_free_coherent(&priv->pdev->dev, + priv->num_event_counters * + sizeof(*priv->counter_array), + priv->counter_array, priv->counter_array_bus); + priv->counter_array = NULL; +} + +/* NIC requests to report stats */ +static void gve_stats_report_task(struct work_struct *work) +{ + struct gve_priv *priv = container_of(work, struct gve_priv, + stats_report_task); + if (gve_get_do_report_stats(priv)) { + gve_handle_report_stats(priv); + gve_clear_do_report_stats(priv); + } +} + +static void gve_stats_report_schedule(struct gve_priv *priv) +{ + if (!gve_get_probe_in_progress(priv) && + !gve_get_reset_in_progress(priv)) { + gve_set_do_report_stats(priv); + queue_work(priv->gve_wq, &priv->stats_report_task); + } +} + +static void gve_stats_report_timer(struct timer_list *t) +{ + struct gve_priv *priv = from_timer(priv, t, stats_report_timer); + + mod_timer(&priv->stats_report_timer, + round_jiffies(jiffies + + msecs_to_jiffies(priv->stats_report_timer_period))); + gve_stats_report_schedule(priv); +} + +static int gve_alloc_stats_report(struct gve_priv *priv) +{ + int tx_stats_num, rx_stats_num; + + tx_stats_num = (GVE_TX_STATS_REPORT_NUM + NIC_TX_STATS_REPORT_NUM) * + gve_num_tx_queues(priv); + rx_stats_num = (GVE_RX_STATS_REPORT_NUM + NIC_RX_STATS_REPORT_NUM) * + priv->rx_cfg.num_queues; + priv->stats_report_len = struct_size(priv->stats_report, stats, + size_add(tx_stats_num, rx_stats_num)); + priv->stats_report = + dma_alloc_coherent(&priv->pdev->dev, priv->stats_report_len, + &priv->stats_report_bus, GFP_KERNEL); + if (!priv->stats_report) + return -ENOMEM; + /* Set up timer for the report-stats task */ + timer_setup(&priv->stats_report_timer, gve_stats_report_timer, 0); + priv->stats_report_timer_period = GVE_STATS_REPORT_TIMER_PERIOD; + return 0; +} + +static void gve_free_stats_report(struct gve_priv *priv) +{ + if (!priv->stats_report) + return; + + del_timer_sync(&priv->stats_report_timer); + dma_free_coherent(&priv->pdev->dev, priv->stats_report_len, + priv->stats_report, priv->stats_report_bus); + priv->stats_report = NULL; +} + +static irqreturn_t gve_mgmnt_intr(int irq, void *arg) +{ + struct gve_priv *priv = arg; + + queue_work(priv->gve_wq, &priv->service_task); + return IRQ_HANDLED; +} + +static irqreturn_t gve_intr(int irq, void *arg) +{ + struct gve_notify_block *block = arg; + struct gve_priv *priv = block->priv; + + iowrite32be(GVE_IRQ_MASK, gve_irq_doorbell(priv, block)); + napi_schedule_irqoff(&block->napi); + return IRQ_HANDLED; +} + +static irqreturn_t gve_intr_dqo(int irq, void *arg) +{ + struct gve_notify_block *block = arg; + + /* Interrupts are automatically masked */ + napi_schedule_irqoff(&block->napi); + return IRQ_HANDLED; +} + +static int gve_napi_poll(struct napi_struct *napi, int budget) +{ + struct gve_notify_block *block; + __be32 __iomem *irq_doorbell; + bool reschedule = false; + struct gve_priv *priv; + int work_done = 0; + + block = container_of(napi, struct gve_notify_block, napi); + priv = block->priv; + + if (block->tx) { + if (block->tx->q_num < priv->tx_cfg.num_queues) + reschedule |= gve_tx_poll(block, budget); + else if (budget) + reschedule |= gve_xdp_poll(block, budget); + } + + if (!budget) + return 0; + + if (block->rx) { + work_done = gve_rx_poll(block, budget); + reschedule |= work_done == budget; + } + + if (reschedule) + return budget; + + /* Complete processing - don't unmask irq if busy polling is enabled */ + if (likely(napi_complete_done(napi, work_done))) { + irq_doorbell = gve_irq_doorbell(priv, block); + iowrite32be(GVE_IRQ_ACK | GVE_IRQ_EVENT, irq_doorbell); + + /* Ensure IRQ ACK is visible before we check pending work. + * If queue had issued updates, it would be truly visible. + */ + mb(); + + if (block->tx) + reschedule |= gve_tx_clean_pending(priv, block->tx); + if (block->rx) + reschedule |= gve_rx_work_pending(block->rx); + + if (reschedule && napi_reschedule(napi)) + iowrite32be(GVE_IRQ_MASK, irq_doorbell); + } + return work_done; +} + +static int gve_napi_poll_dqo(struct napi_struct *napi, int budget) +{ + struct gve_notify_block *block = + container_of(napi, struct gve_notify_block, napi); + struct gve_priv *priv = block->priv; + bool reschedule = false; + int work_done = 0; + + if (block->tx) + reschedule |= gve_tx_poll_dqo(block, /*do_clean=*/true); + + if (!budget) + return 0; + + if (block->rx) { + work_done = gve_rx_poll_dqo(block, budget); + reschedule |= work_done == budget; + } + + if (reschedule) + return budget; + + if (likely(napi_complete_done(napi, work_done))) { + /* Enable interrupts again. + * + * We don't need to repoll afterwards because HW supports the + * PCI MSI-X PBA feature. + * + * Another interrupt would be triggered if a new event came in + * since the last one. + */ + gve_write_irq_doorbell_dqo(priv, block, + GVE_ITR_NO_UPDATE_DQO | GVE_ITR_ENABLE_BIT_DQO); + } + + return work_done; +} + +static int gve_alloc_notify_blocks(struct gve_priv *priv) +{ + int num_vecs_requested = priv->num_ntfy_blks + 1; + unsigned int active_cpus; + int vecs_enabled; + int i, j; + int err; + + priv->msix_vectors = kvcalloc(num_vecs_requested, + sizeof(*priv->msix_vectors), GFP_KERNEL); + if (!priv->msix_vectors) + return -ENOMEM; + for (i = 0; i < num_vecs_requested; i++) + priv->msix_vectors[i].entry = i; + vecs_enabled = pci_enable_msix_range(priv->pdev, priv->msix_vectors, + GVE_MIN_MSIX, num_vecs_requested); + if (vecs_enabled < 0) { + dev_err(&priv->pdev->dev, "Could not enable min msix %d/%d\n", + GVE_MIN_MSIX, vecs_enabled); + err = vecs_enabled; + goto abort_with_msix_vectors; + } + if (vecs_enabled != num_vecs_requested) { + int new_num_ntfy_blks = (vecs_enabled - 1) & ~0x1; + int vecs_per_type = new_num_ntfy_blks / 2; + int vecs_left = new_num_ntfy_blks % 2; + + priv->num_ntfy_blks = new_num_ntfy_blks; + priv->mgmt_msix_idx = priv->num_ntfy_blks; + priv->tx_cfg.max_queues = min_t(int, priv->tx_cfg.max_queues, + vecs_per_type); + priv->rx_cfg.max_queues = min_t(int, priv->rx_cfg.max_queues, + vecs_per_type + vecs_left); + dev_err(&priv->pdev->dev, + "Could not enable desired msix, only enabled %d, adjusting tx max queues to %d, and rx max queues to %d\n", + vecs_enabled, priv->tx_cfg.max_queues, + priv->rx_cfg.max_queues); + if (priv->tx_cfg.num_queues > priv->tx_cfg.max_queues) + priv->tx_cfg.num_queues = priv->tx_cfg.max_queues; + if (priv->rx_cfg.num_queues > priv->rx_cfg.max_queues) + priv->rx_cfg.num_queues = priv->rx_cfg.max_queues; + } + /* Half the notification blocks go to TX and half to RX */ + active_cpus = min_t(int, priv->num_ntfy_blks / 2, num_online_cpus()); + + /* Setup Management Vector - the last vector */ + snprintf(priv->mgmt_msix_name, sizeof(priv->mgmt_msix_name), "gve-mgmnt@pci:%s", + pci_name(priv->pdev)); + err = request_irq(priv->msix_vectors[priv->mgmt_msix_idx].vector, + gve_mgmnt_intr, 0, priv->mgmt_msix_name, priv); + if (err) { + dev_err(&priv->pdev->dev, "Did not receive management vector.\n"); + goto abort_with_msix_enabled; + } + priv->irq_db_indices = + dma_alloc_coherent(&priv->pdev->dev, + priv->num_ntfy_blks * + sizeof(*priv->irq_db_indices), + &priv->irq_db_indices_bus, GFP_KERNEL); + if (!priv->irq_db_indices) { + err = -ENOMEM; + goto abort_with_mgmt_vector; + } + + priv->ntfy_blocks = kvzalloc(priv->num_ntfy_blks * + sizeof(*priv->ntfy_blocks), GFP_KERNEL); + if (!priv->ntfy_blocks) { + err = -ENOMEM; + goto abort_with_irq_db_indices; + } + + /* Setup the other blocks - the first n-1 vectors */ + for (i = 0; i < priv->num_ntfy_blks; i++) { + struct gve_notify_block *block = &priv->ntfy_blocks[i]; + int msix_idx = i; + + snprintf(block->name, sizeof(block->name), "gve-ntfy-blk%d@pci:%s", + i, pci_name(priv->pdev)); + block->priv = priv; + err = request_irq(priv->msix_vectors[msix_idx].vector, + gve_is_gqi(priv) ? gve_intr : gve_intr_dqo, + 0, block->name, block); + if (err) { + dev_err(&priv->pdev->dev, + "Failed to receive msix vector %d\n", i); + goto abort_with_some_ntfy_blocks; + } + irq_set_affinity_hint(priv->msix_vectors[msix_idx].vector, + get_cpu_mask(i % active_cpus)); + block->irq_db_index = &priv->irq_db_indices[i].index; + } + return 0; +abort_with_some_ntfy_blocks: + for (j = 0; j < i; j++) { + struct gve_notify_block *block = &priv->ntfy_blocks[j]; + int msix_idx = j; + + irq_set_affinity_hint(priv->msix_vectors[msix_idx].vector, + NULL); + free_irq(priv->msix_vectors[msix_idx].vector, block); + } + kvfree(priv->ntfy_blocks); + priv->ntfy_blocks = NULL; +abort_with_irq_db_indices: + dma_free_coherent(&priv->pdev->dev, priv->num_ntfy_blks * + sizeof(*priv->irq_db_indices), + priv->irq_db_indices, priv->irq_db_indices_bus); + priv->irq_db_indices = NULL; +abort_with_mgmt_vector: + free_irq(priv->msix_vectors[priv->mgmt_msix_idx].vector, priv); +abort_with_msix_enabled: + pci_disable_msix(priv->pdev); +abort_with_msix_vectors: + kvfree(priv->msix_vectors); + priv->msix_vectors = NULL; + return err; +} + +static void gve_free_notify_blocks(struct gve_priv *priv) +{ + int i; + + if (!priv->msix_vectors) + return; + + /* Free the irqs */ + for (i = 0; i < priv->num_ntfy_blks; i++) { + struct gve_notify_block *block = &priv->ntfy_blocks[i]; + int msix_idx = i; + + irq_set_affinity_hint(priv->msix_vectors[msix_idx].vector, + NULL); + free_irq(priv->msix_vectors[msix_idx].vector, block); + } + free_irq(priv->msix_vectors[priv->mgmt_msix_idx].vector, priv); + kvfree(priv->ntfy_blocks); + priv->ntfy_blocks = NULL; + dma_free_coherent(&priv->pdev->dev, priv->num_ntfy_blks * + sizeof(*priv->irq_db_indices), + priv->irq_db_indices, priv->irq_db_indices_bus); + priv->irq_db_indices = NULL; + pci_disable_msix(priv->pdev); + kvfree(priv->msix_vectors); + priv->msix_vectors = NULL; +} + +static int gve_setup_device_resources(struct gve_priv *priv) +{ + int err; + + err = gve_alloc_counter_array(priv); + if (err) + return err; + err = gve_alloc_notify_blocks(priv); + if (err) + goto abort_with_counter; + err = gve_alloc_stats_report(priv); + if (err) + goto abort_with_ntfy_blocks; + err = gve_adminq_configure_device_resources(priv, + priv->counter_array_bus, + priv->num_event_counters, + priv->irq_db_indices_bus, + priv->num_ntfy_blks); + if (unlikely(err)) { + dev_err(&priv->pdev->dev, + "could not setup device_resources: err=%d\n", err); + err = -ENXIO; + goto abort_with_stats_report; + } + + if (!gve_is_gqi(priv)) { + priv->ptype_lut_dqo = kvzalloc(sizeof(*priv->ptype_lut_dqo), + GFP_KERNEL); + if (!priv->ptype_lut_dqo) { + err = -ENOMEM; + goto abort_with_stats_report; + } + err = gve_adminq_get_ptype_map_dqo(priv, priv->ptype_lut_dqo); + if (err) { + dev_err(&priv->pdev->dev, + "Failed to get ptype map: err=%d\n", err); + goto abort_with_ptype_lut; + } + } + + err = gve_adminq_report_stats(priv, priv->stats_report_len, + priv->stats_report_bus, + GVE_STATS_REPORT_TIMER_PERIOD); + if (err) + dev_err(&priv->pdev->dev, + "Failed to report stats: err=%d\n", err); + gve_set_device_resources_ok(priv); + return 0; + +abort_with_ptype_lut: + kvfree(priv->ptype_lut_dqo); + priv->ptype_lut_dqo = NULL; +abort_with_stats_report: + gve_free_stats_report(priv); +abort_with_ntfy_blocks: + gve_free_notify_blocks(priv); +abort_with_counter: + gve_free_counter_array(priv); + + return err; +} + +static void gve_trigger_reset(struct gve_priv *priv); + +static void gve_teardown_device_resources(struct gve_priv *priv) +{ + int err; + + /* Tell device its resources are being freed */ + if (gve_get_device_resources_ok(priv)) { + /* detach the stats report */ + err = gve_adminq_report_stats(priv, 0, 0x0, GVE_STATS_REPORT_TIMER_PERIOD); + if (err) { + dev_err(&priv->pdev->dev, + "Failed to detach stats report: err=%d\n", err); + gve_trigger_reset(priv); + } + err = gve_adminq_deconfigure_device_resources(priv); + if (err) { + dev_err(&priv->pdev->dev, + "Could not deconfigure device resources: err=%d\n", + err); + gve_trigger_reset(priv); + } + } + + kvfree(priv->ptype_lut_dqo); + priv->ptype_lut_dqo = NULL; + + gve_free_counter_array(priv); + gve_free_notify_blocks(priv); + gve_free_stats_report(priv); + gve_clear_device_resources_ok(priv); +} + +static void gve_add_napi(struct gve_priv *priv, int ntfy_idx, + int (*gve_poll)(struct napi_struct *, int)) +{ + struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx]; + + netif_napi_add(priv->dev, &block->napi, gve_poll); +} + +static void gve_remove_napi(struct gve_priv *priv, int ntfy_idx) +{ + struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx]; + + netif_napi_del(&block->napi); +} + +static int gve_register_xdp_qpls(struct gve_priv *priv) +{ + int start_id; + int err; + int i; + + start_id = gve_tx_qpl_id(priv, gve_xdp_tx_start_queue_id(priv)); + for (i = start_id; i < start_id + gve_num_xdp_qpls(priv); i++) { + err = gve_adminq_register_page_list(priv, &priv->qpls[i]); + if (err) { + netif_err(priv, drv, priv->dev, + "failed to register queue page list %d\n", + priv->qpls[i].id); + /* This failure will trigger a reset - no need to clean + * up + */ + return err; + } + } + return 0; +} + +static int gve_register_qpls(struct gve_priv *priv) +{ + int start_id; + int err; + int i; + + start_id = gve_tx_start_qpl_id(priv); + for (i = start_id; i < start_id + gve_num_tx_qpls(priv); i++) { + err = gve_adminq_register_page_list(priv, &priv->qpls[i]); + if (err) { + netif_err(priv, drv, priv->dev, + "failed to register queue page list %d\n", + priv->qpls[i].id); + /* This failure will trigger a reset - no need to clean + * up + */ + return err; + } + } + + start_id = gve_rx_start_qpl_id(priv); + for (i = start_id; i < start_id + gve_num_rx_qpls(priv); i++) { + err = gve_adminq_register_page_list(priv, &priv->qpls[i]); + if (err) { + netif_err(priv, drv, priv->dev, + "failed to register queue page list %d\n", + priv->qpls[i].id); + /* This failure will trigger a reset - no need to clean + * up + */ + return err; + } + } + return 0; +} + +static int gve_unregister_xdp_qpls(struct gve_priv *priv) +{ + int start_id; + int err; + int i; + + start_id = gve_tx_qpl_id(priv, gve_xdp_tx_start_queue_id(priv)); + for (i = start_id; i < start_id + gve_num_xdp_qpls(priv); i++) { + err = gve_adminq_unregister_page_list(priv, priv->qpls[i].id); + /* This failure will trigger a reset - no need to clean up */ + if (err) { + netif_err(priv, drv, priv->dev, + "Failed to unregister queue page list %d\n", + priv->qpls[i].id); + return err; + } + } + return 0; +} + +static int gve_unregister_qpls(struct gve_priv *priv) +{ + int start_id; + int err; + int i; + + start_id = gve_tx_start_qpl_id(priv); + for (i = start_id; i < start_id + gve_num_tx_qpls(priv); i++) { + err = gve_adminq_unregister_page_list(priv, priv->qpls[i].id); + /* This failure will trigger a reset - no need to clean up */ + if (err) { + netif_err(priv, drv, priv->dev, + "Failed to unregister queue page list %d\n", + priv->qpls[i].id); + return err; + } + } + + start_id = gve_rx_start_qpl_id(priv); + for (i = start_id; i < start_id + gve_num_rx_qpls(priv); i++) { + err = gve_adminq_unregister_page_list(priv, priv->qpls[i].id); + /* This failure will trigger a reset - no need to clean up */ + if (err) { + netif_err(priv, drv, priv->dev, + "Failed to unregister queue page list %d\n", + priv->qpls[i].id); + return err; + } + } + return 0; +} + +static int gve_create_xdp_rings(struct gve_priv *priv) +{ + int err; + + err = gve_adminq_create_tx_queues(priv, + gve_xdp_tx_start_queue_id(priv), + priv->num_xdp_queues); + if (err) { + netif_err(priv, drv, priv->dev, "failed to create %d XDP tx queues\n", + priv->num_xdp_queues); + /* This failure will trigger a reset - no need to clean + * up + */ + return err; + } + netif_dbg(priv, drv, priv->dev, "created %d XDP tx queues\n", + priv->num_xdp_queues); + + return 0; +} + +static int gve_create_rings(struct gve_priv *priv) +{ + int num_tx_queues = gve_num_tx_queues(priv); + int err; + int i; + + err = gve_adminq_create_tx_queues(priv, 0, num_tx_queues); + if (err) { + netif_err(priv, drv, priv->dev, "failed to create %d tx queues\n", + num_tx_queues); + /* This failure will trigger a reset - no need to clean + * up + */ + return err; + } + netif_dbg(priv, drv, priv->dev, "created %d tx queues\n", + num_tx_queues); + + err = gve_adminq_create_rx_queues(priv, priv->rx_cfg.num_queues); + if (err) { + netif_err(priv, drv, priv->dev, "failed to create %d rx queues\n", + priv->rx_cfg.num_queues); + /* This failure will trigger a reset - no need to clean + * up + */ + return err; + } + netif_dbg(priv, drv, priv->dev, "created %d rx queues\n", + priv->rx_cfg.num_queues); + + if (gve_is_gqi(priv)) { + /* Rx data ring has been prefilled with packet buffers at queue + * allocation time. + * + * Write the doorbell to provide descriptor slots and packet + * buffers to the NIC. + */ + for (i = 0; i < priv->rx_cfg.num_queues; i++) + gve_rx_write_doorbell(priv, &priv->rx[i]); + } else { + for (i = 0; i < priv->rx_cfg.num_queues; i++) { + /* Post buffers and ring doorbell. */ + gve_rx_post_buffers_dqo(&priv->rx[i]); + } + } + + return 0; +} + +static void add_napi_init_xdp_sync_stats(struct gve_priv *priv, + int (*napi_poll)(struct napi_struct *napi, + int budget)) +{ + int start_id = gve_xdp_tx_start_queue_id(priv); + int i; + + /* Add xdp tx napi & init sync stats*/ + for (i = start_id; i < start_id + priv->num_xdp_queues; i++) { + int ntfy_idx = gve_tx_idx_to_ntfy(priv, i); + + u64_stats_init(&priv->tx[i].statss); + priv->tx[i].ntfy_id = ntfy_idx; + gve_add_napi(priv, ntfy_idx, napi_poll); + } +} + +static void add_napi_init_sync_stats(struct gve_priv *priv, + int (*napi_poll)(struct napi_struct *napi, + int budget)) +{ + int i; + + /* Add tx napi & init sync stats*/ + for (i = 0; i < gve_num_tx_queues(priv); i++) { + int ntfy_idx = gve_tx_idx_to_ntfy(priv, i); + + u64_stats_init(&priv->tx[i].statss); + priv->tx[i].ntfy_id = ntfy_idx; + gve_add_napi(priv, ntfy_idx, napi_poll); + } + /* Add rx napi & init sync stats*/ + for (i = 0; i < priv->rx_cfg.num_queues; i++) { + int ntfy_idx = gve_rx_idx_to_ntfy(priv, i); + + u64_stats_init(&priv->rx[i].statss); + priv->rx[i].ntfy_id = ntfy_idx; + gve_add_napi(priv, ntfy_idx, napi_poll); + } +} + +static void gve_tx_free_rings(struct gve_priv *priv, int start_id, int num_rings) +{ + if (gve_is_gqi(priv)) { + gve_tx_free_rings_gqi(priv, start_id, num_rings); + } else { + gve_tx_free_rings_dqo(priv); + } +} + +static int gve_alloc_xdp_rings(struct gve_priv *priv) +{ + int start_id; + int err = 0; + + if (!priv->num_xdp_queues) + return 0; + + start_id = gve_xdp_tx_start_queue_id(priv); + err = gve_tx_alloc_rings(priv, start_id, priv->num_xdp_queues); + if (err) + return err; + add_napi_init_xdp_sync_stats(priv, gve_napi_poll); + + return 0; +} + +static int gve_alloc_rings(struct gve_priv *priv) +{ + int err; + + /* Setup tx rings */ + priv->tx = kvcalloc(priv->tx_cfg.max_queues, sizeof(*priv->tx), + GFP_KERNEL); + if (!priv->tx) + return -ENOMEM; + + if (gve_is_gqi(priv)) + err = gve_tx_alloc_rings(priv, 0, gve_num_tx_queues(priv)); + else + err = gve_tx_alloc_rings_dqo(priv); + if (err) + goto free_tx; + + /* Setup rx rings */ + priv->rx = kvcalloc(priv->rx_cfg.max_queues, sizeof(*priv->rx), + GFP_KERNEL); + if (!priv->rx) { + err = -ENOMEM; + goto free_tx_queue; + } + + if (gve_is_gqi(priv)) + err = gve_rx_alloc_rings(priv); + else + err = gve_rx_alloc_rings_dqo(priv); + if (err) + goto free_rx; + + if (gve_is_gqi(priv)) + add_napi_init_sync_stats(priv, gve_napi_poll); + else + add_napi_init_sync_stats(priv, gve_napi_poll_dqo); + + return 0; + +free_rx: + kvfree(priv->rx); + priv->rx = NULL; +free_tx_queue: + gve_tx_free_rings(priv, 0, gve_num_tx_queues(priv)); +free_tx: + kvfree(priv->tx); + priv->tx = NULL; + return err; +} + +static int gve_destroy_xdp_rings(struct gve_priv *priv) +{ + int start_id; + int err; + + start_id = gve_xdp_tx_start_queue_id(priv); + err = gve_adminq_destroy_tx_queues(priv, + start_id, + priv->num_xdp_queues); + if (err) { + netif_err(priv, drv, priv->dev, + "failed to destroy XDP queues\n"); + /* This failure will trigger a reset - no need to clean up */ + return err; + } + netif_dbg(priv, drv, priv->dev, "destroyed XDP queues\n"); + + return 0; +} + +static int gve_destroy_rings(struct gve_priv *priv) +{ + int num_tx_queues = gve_num_tx_queues(priv); + int err; + + err = gve_adminq_destroy_tx_queues(priv, 0, num_tx_queues); + if (err) { + netif_err(priv, drv, priv->dev, + "failed to destroy tx queues\n"); + /* This failure will trigger a reset - no need to clean up */ + return err; + } + netif_dbg(priv, drv, priv->dev, "destroyed tx queues\n"); + err = gve_adminq_destroy_rx_queues(priv, priv->rx_cfg.num_queues); + if (err) { + netif_err(priv, drv, priv->dev, + "failed to destroy rx queues\n"); + /* This failure will trigger a reset - no need to clean up */ + return err; + } + netif_dbg(priv, drv, priv->dev, "destroyed rx queues\n"); + return 0; +} + +static void gve_rx_free_rings(struct gve_priv *priv) +{ + if (gve_is_gqi(priv)) + gve_rx_free_rings_gqi(priv); + else + gve_rx_free_rings_dqo(priv); +} + +static void gve_free_xdp_rings(struct gve_priv *priv) +{ + int ntfy_idx, start_id; + int i; + + start_id = gve_xdp_tx_start_queue_id(priv); + if (priv->tx) { + for (i = start_id; i < start_id + priv->num_xdp_queues; i++) { + ntfy_idx = gve_tx_idx_to_ntfy(priv, i); + gve_remove_napi(priv, ntfy_idx); + } + gve_tx_free_rings(priv, start_id, priv->num_xdp_queues); + } +} + +static void gve_free_rings(struct gve_priv *priv) +{ + int num_tx_queues = gve_num_tx_queues(priv); + int ntfy_idx; + int i; + + if (priv->tx) { + for (i = 0; i < num_tx_queues; i++) { + ntfy_idx = gve_tx_idx_to_ntfy(priv, i); + gve_remove_napi(priv, ntfy_idx); + } + gve_tx_free_rings(priv, 0, num_tx_queues); + kvfree(priv->tx); + priv->tx = NULL; + } + if (priv->rx) { + for (i = 0; i < priv->rx_cfg.num_queues; i++) { + ntfy_idx = gve_rx_idx_to_ntfy(priv, i); + gve_remove_napi(priv, ntfy_idx); + } + gve_rx_free_rings(priv); + kvfree(priv->rx); + priv->rx = NULL; + } +} + +int gve_alloc_page(struct gve_priv *priv, struct device *dev, + struct page **page, dma_addr_t *dma, + enum dma_data_direction dir, gfp_t gfp_flags) +{ + *page = alloc_page(gfp_flags); + if (!*page) { + priv->page_alloc_fail++; + return -ENOMEM; + } + *dma = dma_map_page(dev, *page, 0, PAGE_SIZE, dir); + if (dma_mapping_error(dev, *dma)) { + priv->dma_mapping_error++; + put_page(*page); + return -ENOMEM; + } + return 0; +} + +static int gve_alloc_queue_page_list(struct gve_priv *priv, u32 id, + int pages) +{ + struct gve_queue_page_list *qpl = &priv->qpls[id]; + int err; + int i; + + if (pages + priv->num_registered_pages > priv->max_registered_pages) { + netif_err(priv, drv, priv->dev, + "Reached max number of registered pages %llu > %llu\n", + pages + priv->num_registered_pages, + priv->max_registered_pages); + return -EINVAL; + } + + qpl->id = id; + qpl->num_entries = 0; + qpl->pages = kvcalloc(pages, sizeof(*qpl->pages), GFP_KERNEL); + /* caller handles clean up */ + if (!qpl->pages) + return -ENOMEM; + qpl->page_buses = kvcalloc(pages, sizeof(*qpl->page_buses), GFP_KERNEL); + /* caller handles clean up */ + if (!qpl->page_buses) + return -ENOMEM; + + for (i = 0; i < pages; i++) { + err = gve_alloc_page(priv, &priv->pdev->dev, &qpl->pages[i], + &qpl->page_buses[i], + gve_qpl_dma_dir(priv, id), GFP_KERNEL); + /* caller handles clean up */ + if (err) + return -ENOMEM; + qpl->num_entries++; + } + priv->num_registered_pages += pages; + + return 0; +} + +void gve_free_page(struct device *dev, struct page *page, dma_addr_t dma, + enum dma_data_direction dir) +{ + if (!dma_mapping_error(dev, dma)) + dma_unmap_page(dev, dma, PAGE_SIZE, dir); + if (page) + put_page(page); +} + +static void gve_free_queue_page_list(struct gve_priv *priv, u32 id) +{ + struct gve_queue_page_list *qpl = &priv->qpls[id]; + int i; + + if (!qpl->pages) + return; + if (!qpl->page_buses) + goto free_pages; + + for (i = 0; i < qpl->num_entries; i++) + gve_free_page(&priv->pdev->dev, qpl->pages[i], + qpl->page_buses[i], gve_qpl_dma_dir(priv, id)); + + kvfree(qpl->page_buses); + qpl->page_buses = NULL; +free_pages: + kvfree(qpl->pages); + qpl->pages = NULL; + priv->num_registered_pages -= qpl->num_entries; +} + +static int gve_alloc_xdp_qpls(struct gve_priv *priv) +{ + int start_id; + int i, j; + int err; + + start_id = gve_tx_qpl_id(priv, gve_xdp_tx_start_queue_id(priv)); + for (i = start_id; i < start_id + gve_num_xdp_qpls(priv); i++) { + err = gve_alloc_queue_page_list(priv, i, + priv->tx_pages_per_qpl); + if (err) + goto free_qpls; + } + + return 0; + +free_qpls: + for (j = start_id; j <= i; j++) + gve_free_queue_page_list(priv, j); + return err; +} + +static int gve_alloc_qpls(struct gve_priv *priv) +{ + int max_queues = priv->tx_cfg.max_queues + priv->rx_cfg.max_queues; + int page_count; + int start_id; + int i, j; + int err; + + if (!gve_is_qpl(priv)) + return 0; + + priv->qpls = kvcalloc(max_queues, sizeof(*priv->qpls), GFP_KERNEL); + if (!priv->qpls) + return -ENOMEM; + + start_id = gve_tx_start_qpl_id(priv); + page_count = priv->tx_pages_per_qpl; + for (i = start_id; i < start_id + gve_num_tx_qpls(priv); i++) { + err = gve_alloc_queue_page_list(priv, i, + page_count); + if (err) + goto free_qpls; + } + + start_id = gve_rx_start_qpl_id(priv); + + /* For GQI_QPL number of pages allocated have 1:1 relationship with + * number of descriptors. For DQO, number of pages required are + * more than descriptors (because of out of order completions). + */ + page_count = priv->queue_format == GVE_GQI_QPL_FORMAT ? + priv->rx_data_slot_cnt : priv->rx_pages_per_qpl; + for (i = start_id; i < start_id + gve_num_rx_qpls(priv); i++) { + err = gve_alloc_queue_page_list(priv, i, + page_count); + if (err) + goto free_qpls; + } + + priv->qpl_cfg.qpl_map_size = BITS_TO_LONGS(max_queues) * + sizeof(unsigned long) * BITS_PER_BYTE; + priv->qpl_cfg.qpl_id_map = kvcalloc(BITS_TO_LONGS(max_queues), + sizeof(unsigned long), GFP_KERNEL); + if (!priv->qpl_cfg.qpl_id_map) { + err = -ENOMEM; + goto free_qpls; + } + + return 0; + +free_qpls: + for (j = 0; j <= i; j++) + gve_free_queue_page_list(priv, j); + kvfree(priv->qpls); + priv->qpls = NULL; + return err; +} + +static void gve_free_xdp_qpls(struct gve_priv *priv) +{ + int start_id; + int i; + + start_id = gve_tx_qpl_id(priv, gve_xdp_tx_start_queue_id(priv)); + for (i = start_id; i < start_id + gve_num_xdp_qpls(priv); i++) + gve_free_queue_page_list(priv, i); +} + +static void gve_free_qpls(struct gve_priv *priv) +{ + int max_queues = priv->tx_cfg.max_queues + priv->rx_cfg.max_queues; + int i; + + if (!priv->qpls) + return; + + kvfree(priv->qpl_cfg.qpl_id_map); + priv->qpl_cfg.qpl_id_map = NULL; + + for (i = 0; i < max_queues; i++) + gve_free_queue_page_list(priv, i); + + kvfree(priv->qpls); + priv->qpls = NULL; +} + +/* Use this to schedule a reset when the device is capable of continuing + * to handle other requests in its current state. If it is not, do a reset + * in thread instead. + */ +void gve_schedule_reset(struct gve_priv *priv) +{ + gve_set_do_reset(priv); + queue_work(priv->gve_wq, &priv->service_task); +} + +static void gve_reset_and_teardown(struct gve_priv *priv, bool was_up); +static int gve_reset_recovery(struct gve_priv *priv, bool was_up); +static void gve_turndown(struct gve_priv *priv); +static void gve_turnup(struct gve_priv *priv); + +static int gve_reg_xdp_info(struct gve_priv *priv, struct net_device *dev) +{ + struct napi_struct *napi; + struct gve_rx_ring *rx; + int err = 0; + int i, j; + u32 tx_qid; + + if (!priv->num_xdp_queues) + return 0; + + for (i = 0; i < priv->rx_cfg.num_queues; i++) { + rx = &priv->rx[i]; + napi = &priv->ntfy_blocks[rx->ntfy_id].napi; + + err = xdp_rxq_info_reg(&rx->xdp_rxq, dev, i, + napi->napi_id); + if (err) + goto err; + err = xdp_rxq_info_reg_mem_model(&rx->xdp_rxq, + MEM_TYPE_PAGE_SHARED, NULL); + if (err) + goto err; + rx->xsk_pool = xsk_get_pool_from_qid(dev, i); + if (rx->xsk_pool) { + err = xdp_rxq_info_reg(&rx->xsk_rxq, dev, i, + napi->napi_id); + if (err) + goto err; + err = xdp_rxq_info_reg_mem_model(&rx->xsk_rxq, + MEM_TYPE_XSK_BUFF_POOL, NULL); + if (err) + goto err; + xsk_pool_set_rxq_info(rx->xsk_pool, + &rx->xsk_rxq); + } + } + + for (i = 0; i < priv->num_xdp_queues; i++) { + tx_qid = gve_xdp_tx_queue_id(priv, i); + priv->tx[tx_qid].xsk_pool = xsk_get_pool_from_qid(dev, i); + } + return 0; + +err: + for (j = i; j >= 0; j--) { + rx = &priv->rx[j]; + if (xdp_rxq_info_is_reg(&rx->xdp_rxq)) + xdp_rxq_info_unreg(&rx->xdp_rxq); + if (xdp_rxq_info_is_reg(&rx->xsk_rxq)) + xdp_rxq_info_unreg(&rx->xsk_rxq); + } + return err; +} + +static void gve_unreg_xdp_info(struct gve_priv *priv) +{ + int i, tx_qid; + + if (!priv->num_xdp_queues) + return; + + for (i = 0; i < priv->rx_cfg.num_queues; i++) { + struct gve_rx_ring *rx = &priv->rx[i]; + + xdp_rxq_info_unreg(&rx->xdp_rxq); + if (rx->xsk_pool) { + xdp_rxq_info_unreg(&rx->xsk_rxq); + rx->xsk_pool = NULL; + } + } + + for (i = 0; i < priv->num_xdp_queues; i++) { + tx_qid = gve_xdp_tx_queue_id(priv, i); + priv->tx[tx_qid].xsk_pool = NULL; + } +} + +static void gve_drain_page_cache(struct gve_priv *priv) +{ + struct page_frag_cache *nc; + int i; + + for (i = 0; i < priv->rx_cfg.num_queues; i++) { + nc = &priv->rx[i].page_cache; + if (nc->va) { + __page_frag_cache_drain(virt_to_page(nc->va), + nc->pagecnt_bias); + nc->va = NULL; + } + } +} + +static int gve_open(struct net_device *dev) +{ + struct gve_priv *priv = netdev_priv(dev); + int err; + + if (priv->xdp_prog) + priv->num_xdp_queues = priv->rx_cfg.num_queues; + else + priv->num_xdp_queues = 0; + + err = gve_alloc_qpls(priv); + if (err) + return err; + + err = gve_alloc_rings(priv); + if (err) + goto free_qpls; + + err = netif_set_real_num_tx_queues(dev, priv->tx_cfg.num_queues); + if (err) + goto free_rings; + err = netif_set_real_num_rx_queues(dev, priv->rx_cfg.num_queues); + if (err) + goto free_rings; + + err = gve_reg_xdp_info(priv, dev); + if (err) + goto free_rings; + + err = gve_register_qpls(priv); + if (err) + goto reset; + + if (!gve_is_gqi(priv)) { + /* Hard code this for now. This may be tuned in the future for + * performance. + */ + priv->data_buffer_size_dqo = GVE_RX_BUFFER_SIZE_DQO; + } + err = gve_create_rings(priv); + if (err) + goto reset; + + gve_set_device_rings_ok(priv); + + if (gve_get_report_stats(priv)) + mod_timer(&priv->stats_report_timer, + round_jiffies(jiffies + + msecs_to_jiffies(priv->stats_report_timer_period))); + + gve_turnup(priv); + queue_work(priv->gve_wq, &priv->service_task); + priv->interface_up_cnt++; + return 0; + +free_rings: + gve_free_rings(priv); +free_qpls: + gve_free_qpls(priv); + return err; + +reset: + /* This must have been called from a reset due to the rtnl lock + * so just return at this point. + */ + if (gve_get_reset_in_progress(priv)) + return err; + /* Otherwise reset before returning */ + gve_reset_and_teardown(priv, true); + /* if this fails there is nothing we can do so just ignore the return */ + gve_reset_recovery(priv, false); + /* return the original error */ + return err; +} + +static int gve_close(struct net_device *dev) +{ + struct gve_priv *priv = netdev_priv(dev); + int err; + + netif_carrier_off(dev); + if (gve_get_device_rings_ok(priv)) { + gve_turndown(priv); + gve_drain_page_cache(priv); + err = gve_destroy_rings(priv); + if (err) + goto err; + err = gve_unregister_qpls(priv); + if (err) + goto err; + gve_clear_device_rings_ok(priv); + } + del_timer_sync(&priv->stats_report_timer); + + gve_unreg_xdp_info(priv); + gve_free_rings(priv); + gve_free_qpls(priv); + priv->interface_down_cnt++; + return 0; + +err: + /* This must have been called from a reset due to the rtnl lock + * so just return at this point. + */ + if (gve_get_reset_in_progress(priv)) + return err; + /* Otherwise reset before returning */ + gve_reset_and_teardown(priv, true); + return gve_reset_recovery(priv, false); +} + +static int gve_remove_xdp_queues(struct gve_priv *priv) +{ + int err; + + err = gve_destroy_xdp_rings(priv); + if (err) + return err; + + err = gve_unregister_xdp_qpls(priv); + if (err) + return err; + + gve_unreg_xdp_info(priv); + gve_free_xdp_rings(priv); + gve_free_xdp_qpls(priv); + priv->num_xdp_queues = 0; + return 0; +} + +static int gve_add_xdp_queues(struct gve_priv *priv) +{ + int err; + + priv->num_xdp_queues = priv->tx_cfg.num_queues; + + err = gve_alloc_xdp_qpls(priv); + if (err) + goto err; + + err = gve_alloc_xdp_rings(priv); + if (err) + goto free_xdp_qpls; + + err = gve_reg_xdp_info(priv, priv->dev); + if (err) + goto free_xdp_rings; + + err = gve_register_xdp_qpls(priv); + if (err) + goto free_xdp_rings; + + err = gve_create_xdp_rings(priv); + if (err) + goto free_xdp_rings; + + return 0; + +free_xdp_rings: + gve_free_xdp_rings(priv); +free_xdp_qpls: + gve_free_xdp_qpls(priv); +err: + priv->num_xdp_queues = 0; + return err; +} + +static void gve_handle_link_status(struct gve_priv *priv, bool link_status) +{ + if (!gve_get_napi_enabled(priv)) + return; + + if (link_status == netif_carrier_ok(priv->dev)) + return; + + if (link_status) { + netdev_info(priv->dev, "Device link is up.\n"); + netif_carrier_on(priv->dev); + } else { + netdev_info(priv->dev, "Device link is down.\n"); + netif_carrier_off(priv->dev); + } +} + +static int gve_set_xdp(struct gve_priv *priv, struct bpf_prog *prog, + struct netlink_ext_ack *extack) +{ + struct bpf_prog *old_prog; + int err = 0; + u32 status; + + old_prog = READ_ONCE(priv->xdp_prog); + if (!netif_carrier_ok(priv->dev)) { + WRITE_ONCE(priv->xdp_prog, prog); + if (old_prog) + bpf_prog_put(old_prog); + return 0; + } + + gve_turndown(priv); + if (!old_prog && prog) { + // Allocate XDP TX queues if an XDP program is + // being installed + err = gve_add_xdp_queues(priv); + if (err) + goto out; + } else if (old_prog && !prog) { + // Remove XDP TX queues if an XDP program is + // being uninstalled + err = gve_remove_xdp_queues(priv); + if (err) + goto out; + } + WRITE_ONCE(priv->xdp_prog, prog); + if (old_prog) + bpf_prog_put(old_prog); + +out: + gve_turnup(priv); + status = ioread32be(&priv->reg_bar0->device_status); + gve_handle_link_status(priv, GVE_DEVICE_STATUS_LINK_STATUS_MASK & status); + return err; +} + +static int gve_xsk_pool_enable(struct net_device *dev, + struct xsk_buff_pool *pool, + u16 qid) +{ + struct gve_priv *priv = netdev_priv(dev); + struct napi_struct *napi; + struct gve_rx_ring *rx; + int tx_qid; + int err; + + if (qid >= priv->rx_cfg.num_queues) { + dev_err(&priv->pdev->dev, "xsk pool invalid qid %d", qid); + return -EINVAL; + } + if (xsk_pool_get_rx_frame_size(pool) < + priv->dev->max_mtu + sizeof(struct ethhdr)) { + dev_err(&priv->pdev->dev, "xsk pool frame_len too small"); + return -EINVAL; + } + + err = xsk_pool_dma_map(pool, &priv->pdev->dev, + DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING); + if (err) + return err; + + /* If XDP prog is not installed, return */ + if (!priv->xdp_prog) + return 0; + + rx = &priv->rx[qid]; + napi = &priv->ntfy_blocks[rx->ntfy_id].napi; + err = xdp_rxq_info_reg(&rx->xsk_rxq, dev, qid, napi->napi_id); + if (err) + goto err; + + err = xdp_rxq_info_reg_mem_model(&rx->xsk_rxq, + MEM_TYPE_XSK_BUFF_POOL, NULL); + if (err) + goto err; + + xsk_pool_set_rxq_info(pool, &rx->xsk_rxq); + rx->xsk_pool = pool; + + tx_qid = gve_xdp_tx_queue_id(priv, qid); + priv->tx[tx_qid].xsk_pool = pool; + + return 0; +err: + if (xdp_rxq_info_is_reg(&rx->xsk_rxq)) + xdp_rxq_info_unreg(&rx->xsk_rxq); + + xsk_pool_dma_unmap(pool, + DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING); + return err; +} + +static int gve_xsk_pool_disable(struct net_device *dev, + u16 qid) +{ + struct gve_priv *priv = netdev_priv(dev); + struct napi_struct *napi_rx; + struct napi_struct *napi_tx; + struct xsk_buff_pool *pool; + int tx_qid; + + pool = xsk_get_pool_from_qid(dev, qid); + if (!pool) + return -EINVAL; + if (qid >= priv->rx_cfg.num_queues) + return -EINVAL; + + /* If XDP prog is not installed, unmap DMA and return */ + if (!priv->xdp_prog) + goto done; + + tx_qid = gve_xdp_tx_queue_id(priv, qid); + if (!netif_running(dev)) { + priv->rx[qid].xsk_pool = NULL; + xdp_rxq_info_unreg(&priv->rx[qid].xsk_rxq); + priv->tx[tx_qid].xsk_pool = NULL; + goto done; + } + + napi_rx = &priv->ntfy_blocks[priv->rx[qid].ntfy_id].napi; + napi_disable(napi_rx); /* make sure current rx poll is done */ + + napi_tx = &priv->ntfy_blocks[priv->tx[tx_qid].ntfy_id].napi; + napi_disable(napi_tx); /* make sure current tx poll is done */ + + priv->rx[qid].xsk_pool = NULL; + xdp_rxq_info_unreg(&priv->rx[qid].xsk_rxq); + priv->tx[tx_qid].xsk_pool = NULL; + smp_mb(); /* Make sure it is visible to the workers on datapath */ + + napi_enable(napi_rx); + if (gve_rx_work_pending(&priv->rx[qid])) + napi_schedule(napi_rx); + + napi_enable(napi_tx); + if (gve_tx_clean_pending(priv, &priv->tx[tx_qid])) + napi_schedule(napi_tx); + +done: + xsk_pool_dma_unmap(pool, + DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING); + return 0; +} + +static int gve_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags) +{ + struct gve_priv *priv = netdev_priv(dev); + int tx_queue_id = gve_xdp_tx_queue_id(priv, queue_id); + + if (queue_id >= priv->rx_cfg.num_queues || !priv->xdp_prog) + return -EINVAL; + + if (flags & XDP_WAKEUP_TX) { + struct gve_tx_ring *tx = &priv->tx[tx_queue_id]; + struct napi_struct *napi = + &priv->ntfy_blocks[tx->ntfy_id].napi; + + if (!napi_if_scheduled_mark_missed(napi)) { + /* Call local_bh_enable to trigger SoftIRQ processing */ + local_bh_disable(); + napi_schedule(napi); + local_bh_enable(); + } + + tx->xdp_xsk_wakeup++; + } + + return 0; +} + +static int verify_xdp_configuration(struct net_device *dev) +{ + struct gve_priv *priv = netdev_priv(dev); + + if (dev->features & NETIF_F_LRO) { + netdev_warn(dev, "XDP is not supported when LRO is on.\n"); + return -EOPNOTSUPP; + } + + if (priv->queue_format != GVE_GQI_QPL_FORMAT) { + netdev_warn(dev, "XDP is not supported in mode %d.\n", + priv->queue_format); + return -EOPNOTSUPP; + } + + if (dev->mtu > (PAGE_SIZE / 2) - sizeof(struct ethhdr) - GVE_RX_PAD) { + netdev_warn(dev, "XDP is not supported for mtu %d.\n", + dev->mtu); + return -EOPNOTSUPP; + } + + if (priv->rx_cfg.num_queues != priv->tx_cfg.num_queues || + (2 * priv->tx_cfg.num_queues > priv->tx_cfg.max_queues)) { + netdev_warn(dev, "XDP load failed: The number of configured RX queues %d should be equal to the number of configured TX queues %d and the number of configured RX/TX queues should be less than or equal to half the maximum number of RX/TX queues %d", + priv->rx_cfg.num_queues, + priv->tx_cfg.num_queues, + priv->tx_cfg.max_queues); + return -EINVAL; + } + return 0; +} + +static int gve_xdp(struct net_device *dev, struct netdev_bpf *xdp) +{ + struct gve_priv *priv = netdev_priv(dev); + int err; + + err = verify_xdp_configuration(dev); + if (err) + return err; + switch (xdp->command) { + case XDP_SETUP_PROG: + return gve_set_xdp(priv, xdp->prog, xdp->extack); + case XDP_SETUP_XSK_POOL: + if (xdp->xsk.pool) + return gve_xsk_pool_enable(dev, xdp->xsk.pool, xdp->xsk.queue_id); + else + return gve_xsk_pool_disable(dev, xdp->xsk.queue_id); + default: + return -EINVAL; + } +} + +int gve_adjust_queues(struct gve_priv *priv, + struct gve_queue_config new_rx_config, + struct gve_queue_config new_tx_config) +{ + int err; + + if (netif_carrier_ok(priv->dev)) { + /* To make this process as simple as possible we teardown the + * device, set the new configuration, and then bring the device + * up again. + */ + err = gve_close(priv->dev); + /* we have already tried to reset in close, + * just fail at this point + */ + if (err) + return err; + priv->tx_cfg = new_tx_config; + priv->rx_cfg = new_rx_config; + + err = gve_open(priv->dev); + if (err) + goto err; + + return 0; + } + /* Set the config for the next up. */ + priv->tx_cfg = new_tx_config; + priv->rx_cfg = new_rx_config; + + return 0; +err: + netif_err(priv, drv, priv->dev, + "Adjust queues failed! !!! DISABLING ALL QUEUES !!!\n"); + gve_turndown(priv); + return err; +} + +static void gve_turndown(struct gve_priv *priv) +{ + int idx; + + if (netif_carrier_ok(priv->dev)) + netif_carrier_off(priv->dev); + + if (!gve_get_napi_enabled(priv)) + return; + + /* Disable napi to prevent more work from coming in */ + for (idx = 0; idx < gve_num_tx_queues(priv); idx++) { + int ntfy_idx = gve_tx_idx_to_ntfy(priv, idx); + struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx]; + + napi_disable(&block->napi); + } + for (idx = 0; idx < priv->rx_cfg.num_queues; idx++) { + int ntfy_idx = gve_rx_idx_to_ntfy(priv, idx); + struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx]; + + napi_disable(&block->napi); + } + + /* Stop tx queues */ + netif_tx_disable(priv->dev); + + gve_clear_napi_enabled(priv); + gve_clear_report_stats(priv); +} + +static void gve_turnup(struct gve_priv *priv) +{ + int idx; + + /* Start the tx queues */ + netif_tx_start_all_queues(priv->dev); + + /* Enable napi and unmask interrupts for all queues */ + for (idx = 0; idx < gve_num_tx_queues(priv); idx++) { + int ntfy_idx = gve_tx_idx_to_ntfy(priv, idx); + struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx]; + + napi_enable(&block->napi); + if (gve_is_gqi(priv)) { + iowrite32be(0, gve_irq_doorbell(priv, block)); + } else { + gve_set_itr_coalesce_usecs_dqo(priv, block, + priv->tx_coalesce_usecs); + } + } + for (idx = 0; idx < priv->rx_cfg.num_queues; idx++) { + int ntfy_idx = gve_rx_idx_to_ntfy(priv, idx); + struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx]; + + napi_enable(&block->napi); + if (gve_is_gqi(priv)) { + iowrite32be(0, gve_irq_doorbell(priv, block)); + } else { + gve_set_itr_coalesce_usecs_dqo(priv, block, + priv->rx_coalesce_usecs); + } + } + + gve_set_napi_enabled(priv); +} + +static void gve_tx_timeout(struct net_device *dev, unsigned int txqueue) +{ + struct gve_notify_block *block; + struct gve_tx_ring *tx = NULL; + struct gve_priv *priv; + u32 last_nic_done; + u32 current_time; + u32 ntfy_idx; + + netdev_info(dev, "Timeout on tx queue, %d", txqueue); + priv = netdev_priv(dev); + if (txqueue > priv->tx_cfg.num_queues) + goto reset; + + ntfy_idx = gve_tx_idx_to_ntfy(priv, txqueue); + if (ntfy_idx >= priv->num_ntfy_blks) + goto reset; + + block = &priv->ntfy_blocks[ntfy_idx]; + tx = block->tx; + + current_time = jiffies_to_msecs(jiffies); + if (tx->last_kick_msec + MIN_TX_TIMEOUT_GAP > current_time) + goto reset; + + /* Check to see if there are missed completions, which will allow us to + * kick the queue. + */ + last_nic_done = gve_tx_load_event_counter(priv, tx); + if (last_nic_done - tx->done) { + netdev_info(dev, "Kicking queue %d", txqueue); + iowrite32be(GVE_IRQ_MASK, gve_irq_doorbell(priv, block)); + napi_schedule(&block->napi); + tx->last_kick_msec = current_time; + goto out; + } // Else reset. + +reset: + gve_schedule_reset(priv); + +out: + if (tx) + tx->queue_timeout++; + priv->tx_timeo_cnt++; +} + +static int gve_set_features(struct net_device *netdev, + netdev_features_t features) +{ + const netdev_features_t orig_features = netdev->features; + struct gve_priv *priv = netdev_priv(netdev); + int err; + + if ((netdev->features & NETIF_F_LRO) != (features & NETIF_F_LRO)) { + netdev->features ^= NETIF_F_LRO; + if (netif_carrier_ok(netdev)) { + /* To make this process as simple as possible we + * teardown the device, set the new configuration, + * and then bring the device up again. + */ + err = gve_close(netdev); + /* We have already tried to reset in close, just fail + * at this point. + */ + if (err) + goto err; + + err = gve_open(netdev); + if (err) + goto err; + } + } + + return 0; +err: + /* Reverts the change on error. */ + netdev->features = orig_features; + netif_err(priv, drv, netdev, + "Set features failed! !!! DISABLING ALL QUEUES !!!\n"); + return err; +} + +static const struct net_device_ops gve_netdev_ops = { + .ndo_start_xmit = gve_start_xmit, + .ndo_open = gve_open, + .ndo_stop = gve_close, + .ndo_get_stats64 = gve_get_stats, + .ndo_tx_timeout = gve_tx_timeout, + .ndo_set_features = gve_set_features, + .ndo_bpf = gve_xdp, + .ndo_xdp_xmit = gve_xdp_xmit, + .ndo_xsk_wakeup = gve_xsk_wakeup, +}; + +static void gve_handle_status(struct gve_priv *priv, u32 status) +{ + if (GVE_DEVICE_STATUS_RESET_MASK & status) { + dev_info(&priv->pdev->dev, "Device requested reset.\n"); + gve_set_do_reset(priv); + } + if (GVE_DEVICE_STATUS_REPORT_STATS_MASK & status) { + priv->stats_report_trigger_cnt++; + gve_set_do_report_stats(priv); + } +} + +static void gve_handle_reset(struct gve_priv *priv) +{ + /* A service task will be scheduled at the end of probe to catch any + * resets that need to happen, and we don't want to reset until + * probe is done. + */ + if (gve_get_probe_in_progress(priv)) + return; + + if (gve_get_do_reset(priv)) { + rtnl_lock(); + gve_reset(priv, false); + rtnl_unlock(); + } +} + +void gve_handle_report_stats(struct gve_priv *priv) +{ + struct stats *stats = priv->stats_report->stats; + int idx, stats_idx = 0; + unsigned int start = 0; + u64 tx_bytes; + + if (!gve_get_report_stats(priv)) + return; + + be64_add_cpu(&priv->stats_report->written_count, 1); + /* tx stats */ + if (priv->tx) { + for (idx = 0; idx < gve_num_tx_queues(priv); idx++) { + u32 last_completion = 0; + u32 tx_frames = 0; + + /* DQO doesn't currently support these metrics. */ + if (gve_is_gqi(priv)) { + last_completion = priv->tx[idx].done; + tx_frames = priv->tx[idx].req; + } + + do { + start = u64_stats_fetch_begin(&priv->tx[idx].statss); + tx_bytes = priv->tx[idx].bytes_done; + } while (u64_stats_fetch_retry(&priv->tx[idx].statss, start)); + stats[stats_idx++] = (struct stats) { + .stat_name = cpu_to_be32(TX_WAKE_CNT), + .value = cpu_to_be64(priv->tx[idx].wake_queue), + .queue_id = cpu_to_be32(idx), + }; + stats[stats_idx++] = (struct stats) { + .stat_name = cpu_to_be32(TX_STOP_CNT), + .value = cpu_to_be64(priv->tx[idx].stop_queue), + .queue_id = cpu_to_be32(idx), + }; + stats[stats_idx++] = (struct stats) { + .stat_name = cpu_to_be32(TX_FRAMES_SENT), + .value = cpu_to_be64(tx_frames), + .queue_id = cpu_to_be32(idx), + }; + stats[stats_idx++] = (struct stats) { + .stat_name = cpu_to_be32(TX_BYTES_SENT), + .value = cpu_to_be64(tx_bytes), + .queue_id = cpu_to_be32(idx), + }; + stats[stats_idx++] = (struct stats) { + .stat_name = cpu_to_be32(TX_LAST_COMPLETION_PROCESSED), + .value = cpu_to_be64(last_completion), + .queue_id = cpu_to_be32(idx), + }; + stats[stats_idx++] = (struct stats) { + .stat_name = cpu_to_be32(TX_TIMEOUT_CNT), + .value = cpu_to_be64(priv->tx[idx].queue_timeout), + .queue_id = cpu_to_be32(idx), + }; + } + } + /* rx stats */ + if (priv->rx) { + for (idx = 0; idx < priv->rx_cfg.num_queues; idx++) { + stats[stats_idx++] = (struct stats) { + .stat_name = cpu_to_be32(RX_NEXT_EXPECTED_SEQUENCE), + .value = cpu_to_be64(priv->rx[idx].desc.seqno), + .queue_id = cpu_to_be32(idx), + }; + stats[stats_idx++] = (struct stats) { + .stat_name = cpu_to_be32(RX_BUFFERS_POSTED), + .value = cpu_to_be64(priv->rx[0].fill_cnt), + .queue_id = cpu_to_be32(idx), + }; + } + } +} + +/* Handle NIC status register changes, reset requests and report stats */ +static void gve_service_task(struct work_struct *work) +{ + struct gve_priv *priv = container_of(work, struct gve_priv, + service_task); + u32 status = ioread32be(&priv->reg_bar0->device_status); + + gve_handle_status(priv, status); + + gve_handle_reset(priv); + gve_handle_link_status(priv, GVE_DEVICE_STATUS_LINK_STATUS_MASK & status); +} + +static void gve_set_netdev_xdp_features(struct gve_priv *priv) +{ + if (priv->queue_format == GVE_GQI_QPL_FORMAT) { + priv->dev->xdp_features = NETDEV_XDP_ACT_BASIC; + priv->dev->xdp_features |= NETDEV_XDP_ACT_REDIRECT; + priv->dev->xdp_features |= NETDEV_XDP_ACT_NDO_XMIT; + priv->dev->xdp_features |= NETDEV_XDP_ACT_XSK_ZEROCOPY; + } else { + priv->dev->xdp_features = 0; + } +} + +static int gve_init_priv(struct gve_priv *priv, bool skip_describe_device) +{ + int num_ntfy; + int err; + + /* Set up the adminq */ + err = gve_adminq_alloc(&priv->pdev->dev, priv); + if (err) { + dev_err(&priv->pdev->dev, + "Failed to alloc admin queue: err=%d\n", err); + return err; + } + + err = gve_verify_driver_compatibility(priv); + if (err) { + dev_err(&priv->pdev->dev, + "Could not verify driver compatibility: err=%d\n", err); + goto err; + } + + if (skip_describe_device) + goto setup_device; + + priv->queue_format = GVE_QUEUE_FORMAT_UNSPECIFIED; + /* Get the initial information we need from the device */ + err = gve_adminq_describe_device(priv); + if (err) { + dev_err(&priv->pdev->dev, + "Could not get device information: err=%d\n", err); + goto err; + } + priv->dev->mtu = priv->dev->max_mtu; + num_ntfy = pci_msix_vec_count(priv->pdev); + if (num_ntfy <= 0) { + dev_err(&priv->pdev->dev, + "could not count MSI-x vectors: err=%d\n", num_ntfy); + err = num_ntfy; + goto err; + } else if (num_ntfy < GVE_MIN_MSIX) { + dev_err(&priv->pdev->dev, "gve needs at least %d MSI-x vectors, but only has %d\n", + GVE_MIN_MSIX, num_ntfy); + err = -EINVAL; + goto err; + } + + /* Big TCP is only supported on DQ*/ + if (!gve_is_gqi(priv)) + netif_set_tso_max_size(priv->dev, GVE_DQO_TX_MAX); + + priv->num_registered_pages = 0; + priv->rx_copybreak = GVE_DEFAULT_RX_COPYBREAK; + /* gvnic has one Notification Block per MSI-x vector, except for the + * management vector + */ + priv->num_ntfy_blks = (num_ntfy - 1) & ~0x1; + priv->mgmt_msix_idx = priv->num_ntfy_blks; + + priv->tx_cfg.max_queues = + min_t(int, priv->tx_cfg.max_queues, priv->num_ntfy_blks / 2); + priv->rx_cfg.max_queues = + min_t(int, priv->rx_cfg.max_queues, priv->num_ntfy_blks / 2); + + priv->tx_cfg.num_queues = priv->tx_cfg.max_queues; + priv->rx_cfg.num_queues = priv->rx_cfg.max_queues; + if (priv->default_num_queues > 0) { + priv->tx_cfg.num_queues = min_t(int, priv->default_num_queues, + priv->tx_cfg.num_queues); + priv->rx_cfg.num_queues = min_t(int, priv->default_num_queues, + priv->rx_cfg.num_queues); + } + + dev_info(&priv->pdev->dev, "TX queues %d, RX queues %d\n", + priv->tx_cfg.num_queues, priv->rx_cfg.num_queues); + dev_info(&priv->pdev->dev, "Max TX queues %d, Max RX queues %d\n", + priv->tx_cfg.max_queues, priv->rx_cfg.max_queues); + + if (!gve_is_gqi(priv)) { + priv->tx_coalesce_usecs = GVE_TX_IRQ_RATELIMIT_US_DQO; + priv->rx_coalesce_usecs = GVE_RX_IRQ_RATELIMIT_US_DQO; + } + +setup_device: + gve_set_netdev_xdp_features(priv); + err = gve_setup_device_resources(priv); + if (!err) + return 0; +err: + gve_adminq_free(&priv->pdev->dev, priv); + return err; +} + +static void gve_teardown_priv_resources(struct gve_priv *priv) +{ + gve_teardown_device_resources(priv); + gve_adminq_free(&priv->pdev->dev, priv); +} + +static void gve_trigger_reset(struct gve_priv *priv) +{ + /* Reset the device by releasing the AQ */ + gve_adminq_release(priv); +} + +static void gve_reset_and_teardown(struct gve_priv *priv, bool was_up) +{ + gve_trigger_reset(priv); + /* With the reset having already happened, close cannot fail */ + if (was_up) + gve_close(priv->dev); + gve_teardown_priv_resources(priv); +} + +static int gve_reset_recovery(struct gve_priv *priv, bool was_up) +{ + int err; + + err = gve_init_priv(priv, true); + if (err) + goto err; + if (was_up) { + err = gve_open(priv->dev); + if (err) + goto err; + } + return 0; +err: + dev_err(&priv->pdev->dev, "Reset failed! !!! DISABLING ALL QUEUES !!!\n"); + gve_turndown(priv); + return err; +} + +int gve_reset(struct gve_priv *priv, bool attempt_teardown) +{ + bool was_up = netif_carrier_ok(priv->dev); + int err; + + dev_info(&priv->pdev->dev, "Performing reset\n"); + gve_clear_do_reset(priv); + gve_set_reset_in_progress(priv); + /* If we aren't attempting to teardown normally, just go turndown and + * reset right away. + */ + if (!attempt_teardown) { + gve_turndown(priv); + gve_reset_and_teardown(priv, was_up); + } else { + /* Otherwise attempt to close normally */ + if (was_up) { + err = gve_close(priv->dev); + /* If that fails reset as we did above */ + if (err) + gve_reset_and_teardown(priv, was_up); + } + /* Clean up any remaining resources */ + gve_teardown_priv_resources(priv); + } + + /* Set it all back up */ + err = gve_reset_recovery(priv, was_up); + gve_clear_reset_in_progress(priv); + priv->reset_cnt++; + priv->interface_up_cnt = 0; + priv->interface_down_cnt = 0; + priv->stats_report_trigger_cnt = 0; + return err; +} + +static void gve_write_version(u8 __iomem *driver_version_register) +{ + const char *c = gve_version_prefix; + + while (*c) { + writeb(*c, driver_version_register); + c++; + } + + c = gve_version_str; + while (*c) { + writeb(*c, driver_version_register); + c++; + } + writeb('\n', driver_version_register); +} + +static int gve_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + int max_tx_queues, max_rx_queues; + struct net_device *dev; + __be32 __iomem *db_bar; + struct gve_registers __iomem *reg_bar; + struct gve_priv *priv; + int err; + + err = pci_enable_device(pdev); + if (err) + return err; + + err = pci_request_regions(pdev, gve_driver_name); + if (err) + goto abort_with_enabled; + + pci_set_master(pdev); + + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); + if (err) { + dev_err(&pdev->dev, "Failed to set dma mask: err=%d\n", err); + goto abort_with_pci_region; + } + + reg_bar = pci_iomap(pdev, GVE_REGISTER_BAR, 0); + if (!reg_bar) { + dev_err(&pdev->dev, "Failed to map pci bar!\n"); + err = -ENOMEM; + goto abort_with_pci_region; + } + + db_bar = pci_iomap(pdev, GVE_DOORBELL_BAR, 0); + if (!db_bar) { + dev_err(&pdev->dev, "Failed to map doorbell bar!\n"); + err = -ENOMEM; + goto abort_with_reg_bar; + } + + gve_write_version(®_bar->driver_version); + /* Get max queues to alloc etherdev */ + max_tx_queues = ioread32be(®_bar->max_tx_queues); + max_rx_queues = ioread32be(®_bar->max_rx_queues); + /* Alloc and setup the netdev and priv */ + dev = alloc_etherdev_mqs(sizeof(*priv), max_tx_queues, max_rx_queues); + if (!dev) { + dev_err(&pdev->dev, "could not allocate netdev\n"); + err = -ENOMEM; + goto abort_with_db_bar; + } + SET_NETDEV_DEV(dev, &pdev->dev); + pci_set_drvdata(pdev, dev); + dev->ethtool_ops = &gve_ethtool_ops; + dev->netdev_ops = &gve_netdev_ops; + + /* Set default and supported features. + * + * Features might be set in other locations as well (such as + * `gve_adminq_describe_device`). + */ + dev->hw_features = NETIF_F_HIGHDMA; + dev->hw_features |= NETIF_F_SG; + dev->hw_features |= NETIF_F_HW_CSUM; + dev->hw_features |= NETIF_F_TSO; + dev->hw_features |= NETIF_F_TSO6; + dev->hw_features |= NETIF_F_TSO_ECN; + dev->hw_features |= NETIF_F_RXCSUM; + dev->hw_features |= NETIF_F_RXHASH; + dev->features = dev->hw_features; + dev->watchdog_timeo = 5 * HZ; + dev->min_mtu = ETH_MIN_MTU; + netif_carrier_off(dev); + + priv = netdev_priv(dev); + priv->dev = dev; + priv->pdev = pdev; + priv->msg_enable = DEFAULT_MSG_LEVEL; + priv->reg_bar0 = reg_bar; + priv->db_bar2 = db_bar; + priv->service_task_flags = 0x0; + priv->state_flags = 0x0; + priv->ethtool_flags = 0x0; + + gve_set_probe_in_progress(priv); + priv->gve_wq = alloc_ordered_workqueue("gve", 0); + if (!priv->gve_wq) { + dev_err(&pdev->dev, "Could not allocate workqueue"); + err = -ENOMEM; + goto abort_with_netdev; + } + INIT_WORK(&priv->service_task, gve_service_task); + INIT_WORK(&priv->stats_report_task, gve_stats_report_task); + priv->tx_cfg.max_queues = max_tx_queues; + priv->rx_cfg.max_queues = max_rx_queues; + + err = gve_init_priv(priv, false); + if (err) + goto abort_with_wq; + + err = register_netdev(dev); + if (err) + goto abort_with_gve_init; + + dev_info(&pdev->dev, "GVE version %s\n", gve_version_str); + dev_info(&pdev->dev, "GVE queue format %d\n", (int)priv->queue_format); + gve_clear_probe_in_progress(priv); + queue_work(priv->gve_wq, &priv->service_task); + return 0; + +abort_with_gve_init: + gve_teardown_priv_resources(priv); + +abort_with_wq: + destroy_workqueue(priv->gve_wq); + +abort_with_netdev: + free_netdev(dev); + +abort_with_db_bar: + pci_iounmap(pdev, db_bar); + +abort_with_reg_bar: + pci_iounmap(pdev, reg_bar); + +abort_with_pci_region: + pci_release_regions(pdev); + +abort_with_enabled: + pci_disable_device(pdev); + return err; +} + +static void gve_remove(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct gve_priv *priv = netdev_priv(netdev); + __be32 __iomem *db_bar = priv->db_bar2; + void __iomem *reg_bar = priv->reg_bar0; + + unregister_netdev(netdev); + gve_teardown_priv_resources(priv); + destroy_workqueue(priv->gve_wq); + free_netdev(netdev); + pci_iounmap(pdev, db_bar); + pci_iounmap(pdev, reg_bar); + pci_release_regions(pdev); + pci_disable_device(pdev); +} + +static void gve_shutdown(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct gve_priv *priv = netdev_priv(netdev); + bool was_up = netif_carrier_ok(priv->dev); + + rtnl_lock(); + if (was_up && gve_close(priv->dev)) { + /* If the dev was up, attempt to close, if close fails, reset */ + gve_reset_and_teardown(priv, was_up); + } else { + /* If the dev wasn't up or close worked, finish tearing down */ + gve_teardown_priv_resources(priv); + } + rtnl_unlock(); +} + +#ifdef CONFIG_PM +static int gve_suspend(struct pci_dev *pdev, pm_message_t state) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct gve_priv *priv = netdev_priv(netdev); + bool was_up = netif_carrier_ok(priv->dev); + + priv->suspend_cnt++; + rtnl_lock(); + if (was_up && gve_close(priv->dev)) { + /* If the dev was up, attempt to close, if close fails, reset */ + gve_reset_and_teardown(priv, was_up); + } else { + /* If the dev wasn't up or close worked, finish tearing down */ + gve_teardown_priv_resources(priv); + } + priv->up_before_suspend = was_up; + rtnl_unlock(); + return 0; +} + +static int gve_resume(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct gve_priv *priv = netdev_priv(netdev); + int err; + + priv->resume_cnt++; + rtnl_lock(); + err = gve_reset_recovery(priv, priv->up_before_suspend); + rtnl_unlock(); + return err; +} +#endif /* CONFIG_PM */ + +static const struct pci_device_id gve_id_table[] = { + { PCI_DEVICE(PCI_VENDOR_ID_GOOGLE, PCI_DEV_ID_GVNIC) }, + { } +}; + +static struct pci_driver gve_driver = { + .name = gve_driver_name, + .id_table = gve_id_table, + .probe = gve_probe, + .remove = gve_remove, + .shutdown = gve_shutdown, +#ifdef CONFIG_PM + .suspend = gve_suspend, + .resume = gve_resume, +#endif +}; + +module_pci_driver(gve_driver); + +MODULE_DEVICE_TABLE(pci, gve_id_table); +MODULE_AUTHOR("Google, Inc."); +MODULE_DESCRIPTION("Google Virtual NIC Driver"); +MODULE_LICENSE("Dual MIT/GPL"); +MODULE_VERSION(GVE_VERSION); diff --git a/drivers/net/ethernet/google/gve/gve_register.h b/drivers/net/ethernet/google/gve/gve_register.h new file mode 100644 index 0000000000..fb655463c3 --- /dev/null +++ b/drivers/net/ethernet/google/gve/gve_register.h @@ -0,0 +1,28 @@ +/* SPDX-License-Identifier: (GPL-2.0 OR MIT) + * Google virtual Ethernet (gve) driver + * + * Copyright (C) 2015-2019 Google, Inc. + */ + +#ifndef _GVE_REGISTER_H_ +#define _GVE_REGISTER_H_ + +/* Fixed Configuration Registers */ +struct gve_registers { + __be32 device_status; + __be32 driver_status; + __be32 max_tx_queues; + __be32 max_rx_queues; + __be32 adminq_pfn; + __be32 adminq_doorbell; + __be32 adminq_event_counter; + u8 reserved[3]; + u8 driver_version; +}; + +enum gve_device_status_flags { + GVE_DEVICE_STATUS_RESET_MASK = BIT(1), + GVE_DEVICE_STATUS_LINK_STATUS_MASK = BIT(2), + GVE_DEVICE_STATUS_REPORT_STATS_MASK = BIT(3), +}; +#endif /* _GVE_REGISTER_H_ */ diff --git a/drivers/net/ethernet/google/gve/gve_rx.c b/drivers/net/ethernet/google/gve/gve_rx.c new file mode 100644 index 0000000000..7365534790 --- /dev/null +++ b/drivers/net/ethernet/google/gve/gve_rx.c @@ -0,0 +1,1014 @@ +// SPDX-License-Identifier: (GPL-2.0 OR MIT) +/* Google virtual Ethernet (gve) driver + * + * Copyright (C) 2015-2021 Google, Inc. + */ + +#include "gve.h" +#include "gve_adminq.h" +#include "gve_utils.h" +#include <linux/etherdevice.h> +#include <linux/filter.h> +#include <net/xdp.h> +#include <net/xdp_sock_drv.h> + +static void gve_rx_free_buffer(struct device *dev, + struct gve_rx_slot_page_info *page_info, + union gve_rx_data_slot *data_slot) +{ + dma_addr_t dma = (dma_addr_t)(be64_to_cpu(data_slot->addr) & + GVE_DATA_SLOT_ADDR_PAGE_MASK); + + page_ref_sub(page_info->page, page_info->pagecnt_bias - 1); + gve_free_page(dev, page_info->page, dma, DMA_FROM_DEVICE); +} + +static void gve_rx_unfill_pages(struct gve_priv *priv, struct gve_rx_ring *rx) +{ + u32 slots = rx->mask + 1; + int i; + + if (rx->data.raw_addressing) { + for (i = 0; i < slots; i++) + gve_rx_free_buffer(&priv->pdev->dev, &rx->data.page_info[i], + &rx->data.data_ring[i]); + } else { + for (i = 0; i < slots; i++) + page_ref_sub(rx->data.page_info[i].page, + rx->data.page_info[i].pagecnt_bias - 1); + gve_unassign_qpl(priv, rx->data.qpl->id); + rx->data.qpl = NULL; + + for (i = 0; i < rx->qpl_copy_pool_mask + 1; i++) { + page_ref_sub(rx->qpl_copy_pool[i].page, + rx->qpl_copy_pool[i].pagecnt_bias - 1); + put_page(rx->qpl_copy_pool[i].page); + } + } + kvfree(rx->data.page_info); + rx->data.page_info = NULL; +} + +static void gve_rx_free_ring(struct gve_priv *priv, int idx) +{ + struct gve_rx_ring *rx = &priv->rx[idx]; + struct device *dev = &priv->pdev->dev; + u32 slots = rx->mask + 1; + size_t bytes; + + gve_rx_remove_from_block(priv, idx); + + bytes = sizeof(struct gve_rx_desc) * priv->rx_desc_cnt; + dma_free_coherent(dev, bytes, rx->desc.desc_ring, rx->desc.bus); + rx->desc.desc_ring = NULL; + + dma_free_coherent(dev, sizeof(*rx->q_resources), + rx->q_resources, rx->q_resources_bus); + rx->q_resources = NULL; + + gve_rx_unfill_pages(priv, rx); + + bytes = sizeof(*rx->data.data_ring) * slots; + dma_free_coherent(dev, bytes, rx->data.data_ring, + rx->data.data_bus); + rx->data.data_ring = NULL; + + kvfree(rx->qpl_copy_pool); + rx->qpl_copy_pool = NULL; + + netif_dbg(priv, drv, priv->dev, "freed rx ring %d\n", idx); +} + +static void gve_setup_rx_buffer(struct gve_rx_slot_page_info *page_info, + dma_addr_t addr, struct page *page, __be64 *slot_addr) +{ + page_info->page = page; + page_info->page_offset = 0; + page_info->page_address = page_address(page); + *slot_addr = cpu_to_be64(addr); + /* The page already has 1 ref */ + page_ref_add(page, INT_MAX - 1); + page_info->pagecnt_bias = INT_MAX; +} + +static int gve_rx_alloc_buffer(struct gve_priv *priv, struct device *dev, + struct gve_rx_slot_page_info *page_info, + union gve_rx_data_slot *data_slot) +{ + struct page *page; + dma_addr_t dma; + int err; + + err = gve_alloc_page(priv, dev, &page, &dma, DMA_FROM_DEVICE, + GFP_ATOMIC); + if (err) + return err; + + gve_setup_rx_buffer(page_info, dma, page, &data_slot->addr); + return 0; +} + +static int gve_prefill_rx_pages(struct gve_rx_ring *rx) +{ + struct gve_priv *priv = rx->gve; + u32 slots; + int err; + int i; + int j; + + /* Allocate one page per Rx queue slot. Each page is split into two + * packet buffers, when possible we "page flip" between the two. + */ + slots = rx->mask + 1; + + rx->data.page_info = kvzalloc(slots * + sizeof(*rx->data.page_info), GFP_KERNEL); + if (!rx->data.page_info) + return -ENOMEM; + + if (!rx->data.raw_addressing) { + rx->data.qpl = gve_assign_rx_qpl(priv, rx->q_num); + if (!rx->data.qpl) { + kvfree(rx->data.page_info); + rx->data.page_info = NULL; + return -ENOMEM; + } + } + for (i = 0; i < slots; i++) { + if (!rx->data.raw_addressing) { + struct page *page = rx->data.qpl->pages[i]; + dma_addr_t addr = i * PAGE_SIZE; + + gve_setup_rx_buffer(&rx->data.page_info[i], addr, page, + &rx->data.data_ring[i].qpl_offset); + continue; + } + err = gve_rx_alloc_buffer(priv, &priv->pdev->dev, &rx->data.page_info[i], + &rx->data.data_ring[i]); + if (err) + goto alloc_err_rda; + } + + if (!rx->data.raw_addressing) { + for (j = 0; j < rx->qpl_copy_pool_mask + 1; j++) { + struct page *page = alloc_page(GFP_KERNEL); + + if (!page) { + err = -ENOMEM; + goto alloc_err_qpl; + } + + rx->qpl_copy_pool[j].page = page; + rx->qpl_copy_pool[j].page_offset = 0; + rx->qpl_copy_pool[j].page_address = page_address(page); + + /* The page already has 1 ref. */ + page_ref_add(page, INT_MAX - 1); + rx->qpl_copy_pool[j].pagecnt_bias = INT_MAX; + } + } + + return slots; + +alloc_err_qpl: + /* Fully free the copy pool pages. */ + while (j--) { + page_ref_sub(rx->qpl_copy_pool[j].page, + rx->qpl_copy_pool[j].pagecnt_bias - 1); + put_page(rx->qpl_copy_pool[j].page); + } + + /* Do not fully free QPL pages - only remove the bias added in this + * function with gve_setup_rx_buffer. + */ + while (i--) + page_ref_sub(rx->data.page_info[i].page, + rx->data.page_info[i].pagecnt_bias - 1); + + gve_unassign_qpl(priv, rx->data.qpl->id); + rx->data.qpl = NULL; + + return err; + +alloc_err_rda: + while (i--) + gve_rx_free_buffer(&priv->pdev->dev, + &rx->data.page_info[i], + &rx->data.data_ring[i]); + return err; +} + +static void gve_rx_ctx_clear(struct gve_rx_ctx *ctx) +{ + ctx->skb_head = NULL; + ctx->skb_tail = NULL; + ctx->total_size = 0; + ctx->frag_cnt = 0; + ctx->drop_pkt = false; +} + +static int gve_rx_alloc_ring(struct gve_priv *priv, int idx) +{ + struct gve_rx_ring *rx = &priv->rx[idx]; + struct device *hdev = &priv->pdev->dev; + u32 slots, npages; + int filled_pages; + size_t bytes; + int err; + + netif_dbg(priv, drv, priv->dev, "allocating rx ring\n"); + /* Make sure everything is zeroed to start with */ + memset(rx, 0, sizeof(*rx)); + + rx->gve = priv; + rx->q_num = idx; + + slots = priv->rx_data_slot_cnt; + rx->mask = slots - 1; + rx->data.raw_addressing = priv->queue_format == GVE_GQI_RDA_FORMAT; + + /* alloc rx data ring */ + bytes = sizeof(*rx->data.data_ring) * slots; + rx->data.data_ring = dma_alloc_coherent(hdev, bytes, + &rx->data.data_bus, + GFP_KERNEL); + if (!rx->data.data_ring) + return -ENOMEM; + + rx->qpl_copy_pool_mask = min_t(u32, U32_MAX, slots * 2) - 1; + rx->qpl_copy_pool_head = 0; + rx->qpl_copy_pool = kvcalloc(rx->qpl_copy_pool_mask + 1, + sizeof(rx->qpl_copy_pool[0]), + GFP_KERNEL); + + if (!rx->qpl_copy_pool) { + err = -ENOMEM; + goto abort_with_slots; + } + + filled_pages = gve_prefill_rx_pages(rx); + if (filled_pages < 0) { + err = -ENOMEM; + goto abort_with_copy_pool; + } + rx->fill_cnt = filled_pages; + /* Ensure data ring slots (packet buffers) are visible. */ + dma_wmb(); + + /* Alloc gve_queue_resources */ + rx->q_resources = + dma_alloc_coherent(hdev, + sizeof(*rx->q_resources), + &rx->q_resources_bus, + GFP_KERNEL); + if (!rx->q_resources) { + err = -ENOMEM; + goto abort_filled; + } + netif_dbg(priv, drv, priv->dev, "rx[%d]->data.data_bus=%lx\n", idx, + (unsigned long)rx->data.data_bus); + + /* alloc rx desc ring */ + bytes = sizeof(struct gve_rx_desc) * priv->rx_desc_cnt; + npages = bytes / PAGE_SIZE; + if (npages * PAGE_SIZE != bytes) { + err = -EIO; + goto abort_with_q_resources; + } + + rx->desc.desc_ring = dma_alloc_coherent(hdev, bytes, &rx->desc.bus, + GFP_KERNEL); + if (!rx->desc.desc_ring) { + err = -ENOMEM; + goto abort_with_q_resources; + } + rx->cnt = 0; + rx->db_threshold = priv->rx_desc_cnt / 2; + rx->desc.seqno = 1; + + /* Allocating half-page buffers allows page-flipping which is faster + * than copying or allocating new pages. + */ + rx->packet_buffer_size = PAGE_SIZE / 2; + gve_rx_ctx_clear(&rx->ctx); + gve_rx_add_to_block(priv, idx); + + return 0; + +abort_with_q_resources: + dma_free_coherent(hdev, sizeof(*rx->q_resources), + rx->q_resources, rx->q_resources_bus); + rx->q_resources = NULL; +abort_filled: + gve_rx_unfill_pages(priv, rx); +abort_with_copy_pool: + kvfree(rx->qpl_copy_pool); + rx->qpl_copy_pool = NULL; +abort_with_slots: + bytes = sizeof(*rx->data.data_ring) * slots; + dma_free_coherent(hdev, bytes, rx->data.data_ring, rx->data.data_bus); + rx->data.data_ring = NULL; + + return err; +} + +int gve_rx_alloc_rings(struct gve_priv *priv) +{ + int err = 0; + int i; + + for (i = 0; i < priv->rx_cfg.num_queues; i++) { + err = gve_rx_alloc_ring(priv, i); + if (err) { + netif_err(priv, drv, priv->dev, + "Failed to alloc rx ring=%d: err=%d\n", + i, err); + break; + } + } + /* Unallocate if there was an error */ + if (err) { + int j; + + for (j = 0; j < i; j++) + gve_rx_free_ring(priv, j); + } + return err; +} + +void gve_rx_free_rings_gqi(struct gve_priv *priv) +{ + int i; + + for (i = 0; i < priv->rx_cfg.num_queues; i++) + gve_rx_free_ring(priv, i); +} + +void gve_rx_write_doorbell(struct gve_priv *priv, struct gve_rx_ring *rx) +{ + u32 db_idx = be32_to_cpu(rx->q_resources->db_index); + + iowrite32be(rx->fill_cnt, &priv->db_bar2[db_idx]); +} + +static enum pkt_hash_types gve_rss_type(__be16 pkt_flags) +{ + if (likely(pkt_flags & (GVE_RXF_TCP | GVE_RXF_UDP))) + return PKT_HASH_TYPE_L4; + if (pkt_flags & (GVE_RXF_IPV4 | GVE_RXF_IPV6)) + return PKT_HASH_TYPE_L3; + return PKT_HASH_TYPE_L2; +} + +static struct sk_buff *gve_rx_add_frags(struct napi_struct *napi, + struct gve_rx_slot_page_info *page_info, + u16 packet_buffer_size, u16 len, + struct gve_rx_ctx *ctx) +{ + u32 offset = page_info->page_offset + page_info->pad; + struct sk_buff *skb = ctx->skb_tail; + int num_frags = 0; + + if (!skb) { + skb = napi_get_frags(napi); + if (unlikely(!skb)) + return NULL; + + ctx->skb_head = skb; + ctx->skb_tail = skb; + } else { + num_frags = skb_shinfo(ctx->skb_tail)->nr_frags; + if (num_frags == MAX_SKB_FRAGS) { + skb = napi_alloc_skb(napi, 0); + if (!skb) + return NULL; + + // We will never chain more than two SKBs: 2 * 16 * 2k > 64k + // which is why we do not need to chain by using skb->next + skb_shinfo(ctx->skb_tail)->frag_list = skb; + + ctx->skb_tail = skb; + num_frags = 0; + } + } + + if (skb != ctx->skb_head) { + ctx->skb_head->len += len; + ctx->skb_head->data_len += len; + ctx->skb_head->truesize += packet_buffer_size; + } + skb_add_rx_frag(skb, num_frags, page_info->page, + offset, len, packet_buffer_size); + + return ctx->skb_head; +} + +static void gve_rx_flip_buff(struct gve_rx_slot_page_info *page_info, __be64 *slot_addr) +{ + const __be64 offset = cpu_to_be64(PAGE_SIZE / 2); + + /* "flip" to other packet buffer on this page */ + page_info->page_offset ^= PAGE_SIZE / 2; + *(slot_addr) ^= offset; +} + +static int gve_rx_can_recycle_buffer(struct gve_rx_slot_page_info *page_info) +{ + int pagecount = page_count(page_info->page); + + /* This page is not being used by any SKBs - reuse */ + if (pagecount == page_info->pagecnt_bias) + return 1; + /* This page is still being used by an SKB - we can't reuse */ + else if (pagecount > page_info->pagecnt_bias) + return 0; + WARN(pagecount < page_info->pagecnt_bias, + "Pagecount should never be less than the bias."); + return -1; +} + +static struct sk_buff * +gve_rx_raw_addressing(struct device *dev, struct net_device *netdev, + struct gve_rx_slot_page_info *page_info, u16 len, + struct napi_struct *napi, + union gve_rx_data_slot *data_slot, + u16 packet_buffer_size, struct gve_rx_ctx *ctx) +{ + struct sk_buff *skb = gve_rx_add_frags(napi, page_info, packet_buffer_size, len, ctx); + + if (!skb) + return NULL; + + /* Optimistically stop the kernel from freeing the page. + * We will check again in refill to determine if we need to alloc a + * new page. + */ + gve_dec_pagecnt_bias(page_info); + + return skb; +} + +static struct sk_buff *gve_rx_copy_to_pool(struct gve_rx_ring *rx, + struct gve_rx_slot_page_info *page_info, + u16 len, struct napi_struct *napi) +{ + u32 pool_idx = rx->qpl_copy_pool_head & rx->qpl_copy_pool_mask; + void *src = page_info->page_address + page_info->page_offset; + struct gve_rx_slot_page_info *copy_page_info; + struct gve_rx_ctx *ctx = &rx->ctx; + bool alloc_page = false; + struct sk_buff *skb; + void *dst; + + copy_page_info = &rx->qpl_copy_pool[pool_idx]; + if (!copy_page_info->can_flip) { + int recycle = gve_rx_can_recycle_buffer(copy_page_info); + + if (unlikely(recycle < 0)) { + gve_schedule_reset(rx->gve); + return NULL; + } + alloc_page = !recycle; + } + + if (alloc_page) { + struct gve_rx_slot_page_info alloc_page_info; + struct page *page; + + /* The least recently used page turned out to be + * still in use by the kernel. Ignoring it and moving + * on alleviates head-of-line blocking. + */ + rx->qpl_copy_pool_head++; + + page = alloc_page(GFP_ATOMIC); + if (!page) + return NULL; + + alloc_page_info.page = page; + alloc_page_info.page_offset = 0; + alloc_page_info.page_address = page_address(page); + alloc_page_info.pad = page_info->pad; + + memcpy(alloc_page_info.page_address, src, page_info->pad + len); + skb = gve_rx_add_frags(napi, &alloc_page_info, + rx->packet_buffer_size, + len, ctx); + + u64_stats_update_begin(&rx->statss); + rx->rx_frag_copy_cnt++; + rx->rx_frag_alloc_cnt++; + u64_stats_update_end(&rx->statss); + + return skb; + } + + dst = copy_page_info->page_address + copy_page_info->page_offset; + memcpy(dst, src, page_info->pad + len); + copy_page_info->pad = page_info->pad; + + skb = gve_rx_add_frags(napi, copy_page_info, + rx->packet_buffer_size, len, ctx); + if (unlikely(!skb)) + return NULL; + + gve_dec_pagecnt_bias(copy_page_info); + copy_page_info->page_offset += rx->packet_buffer_size; + copy_page_info->page_offset &= (PAGE_SIZE - 1); + + if (copy_page_info->can_flip) { + /* We have used both halves of this copy page, it + * is time for it to go to the back of the queue. + */ + copy_page_info->can_flip = false; + rx->qpl_copy_pool_head++; + prefetch(rx->qpl_copy_pool[rx->qpl_copy_pool_head & rx->qpl_copy_pool_mask].page); + } else { + copy_page_info->can_flip = true; + } + + u64_stats_update_begin(&rx->statss); + rx->rx_frag_copy_cnt++; + u64_stats_update_end(&rx->statss); + + return skb; +} + +static struct sk_buff * +gve_rx_qpl(struct device *dev, struct net_device *netdev, + struct gve_rx_ring *rx, struct gve_rx_slot_page_info *page_info, + u16 len, struct napi_struct *napi, + union gve_rx_data_slot *data_slot) +{ + struct gve_rx_ctx *ctx = &rx->ctx; + struct sk_buff *skb; + + /* if raw_addressing mode is not enabled gvnic can only receive into + * registered segments. If the buffer can't be recycled, our only + * choice is to copy the data out of it so that we can return it to the + * device. + */ + if (page_info->can_flip) { + skb = gve_rx_add_frags(napi, page_info, rx->packet_buffer_size, len, ctx); + /* No point in recycling if we didn't get the skb */ + if (skb) { + /* Make sure that the page isn't freed. */ + gve_dec_pagecnt_bias(page_info); + gve_rx_flip_buff(page_info, &data_slot->qpl_offset); + } + } else { + skb = gve_rx_copy_to_pool(rx, page_info, len, napi); + } + return skb; +} + +static struct sk_buff *gve_rx_skb(struct gve_priv *priv, struct gve_rx_ring *rx, + struct gve_rx_slot_page_info *page_info, struct napi_struct *napi, + u16 len, union gve_rx_data_slot *data_slot, + bool is_only_frag) +{ + struct net_device *netdev = priv->dev; + struct gve_rx_ctx *ctx = &rx->ctx; + struct sk_buff *skb = NULL; + + if (len <= priv->rx_copybreak && is_only_frag) { + /* Just copy small packets */ + skb = gve_rx_copy(netdev, napi, page_info, len); + if (skb) { + u64_stats_update_begin(&rx->statss); + rx->rx_copied_pkt++; + rx->rx_frag_copy_cnt++; + rx->rx_copybreak_pkt++; + u64_stats_update_end(&rx->statss); + } + } else { + int recycle = gve_rx_can_recycle_buffer(page_info); + + if (unlikely(recycle < 0)) { + gve_schedule_reset(priv); + return NULL; + } + page_info->can_flip = recycle; + if (page_info->can_flip) { + u64_stats_update_begin(&rx->statss); + rx->rx_frag_flip_cnt++; + u64_stats_update_end(&rx->statss); + } + + if (rx->data.raw_addressing) { + skb = gve_rx_raw_addressing(&priv->pdev->dev, netdev, + page_info, len, napi, + data_slot, + rx->packet_buffer_size, ctx); + } else { + skb = gve_rx_qpl(&priv->pdev->dev, netdev, rx, + page_info, len, napi, data_slot); + } + } + return skb; +} + +static int gve_xsk_pool_redirect(struct net_device *dev, + struct gve_rx_ring *rx, + void *data, int len, + struct bpf_prog *xdp_prog) +{ + struct xdp_buff *xdp; + int err; + + if (rx->xsk_pool->frame_len < len) + return -E2BIG; + xdp = xsk_buff_alloc(rx->xsk_pool); + if (!xdp) { + u64_stats_update_begin(&rx->statss); + rx->xdp_alloc_fails++; + u64_stats_update_end(&rx->statss); + return -ENOMEM; + } + xdp->data_end = xdp->data + len; + memcpy(xdp->data, data, len); + err = xdp_do_redirect(dev, xdp, xdp_prog); + if (err) + xsk_buff_free(xdp); + return err; +} + +static int gve_xdp_redirect(struct net_device *dev, struct gve_rx_ring *rx, + struct xdp_buff *orig, struct bpf_prog *xdp_prog) +{ + int total_len, len = orig->data_end - orig->data; + int headroom = XDP_PACKET_HEADROOM; + struct xdp_buff new; + void *frame; + int err; + + if (rx->xsk_pool) + return gve_xsk_pool_redirect(dev, rx, orig->data, + len, xdp_prog); + + total_len = headroom + SKB_DATA_ALIGN(len) + + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); + frame = page_frag_alloc(&rx->page_cache, total_len, GFP_ATOMIC); + if (!frame) { + u64_stats_update_begin(&rx->statss); + rx->xdp_alloc_fails++; + u64_stats_update_end(&rx->statss); + return -ENOMEM; + } + xdp_init_buff(&new, total_len, &rx->xdp_rxq); + xdp_prepare_buff(&new, frame, headroom, len, false); + memcpy(new.data, orig->data, len); + + err = xdp_do_redirect(dev, &new, xdp_prog); + if (err) + page_frag_free(frame); + + return err; +} + +static void gve_xdp_done(struct gve_priv *priv, struct gve_rx_ring *rx, + struct xdp_buff *xdp, struct bpf_prog *xprog, + int xdp_act) +{ + struct gve_tx_ring *tx; + int tx_qid; + int err; + + switch (xdp_act) { + case XDP_ABORTED: + case XDP_DROP: + default: + break; + case XDP_TX: + tx_qid = gve_xdp_tx_queue_id(priv, rx->q_num); + tx = &priv->tx[tx_qid]; + spin_lock(&tx->xdp_lock); + err = gve_xdp_xmit_one(priv, tx, xdp->data, + xdp->data_end - xdp->data, NULL); + spin_unlock(&tx->xdp_lock); + + if (unlikely(err)) { + u64_stats_update_begin(&rx->statss); + rx->xdp_tx_errors++; + u64_stats_update_end(&rx->statss); + } + break; + case XDP_REDIRECT: + err = gve_xdp_redirect(priv->dev, rx, xdp, xprog); + + if (unlikely(err)) { + u64_stats_update_begin(&rx->statss); + rx->xdp_redirect_errors++; + u64_stats_update_end(&rx->statss); + } + break; + } + u64_stats_update_begin(&rx->statss); + if ((u32)xdp_act < GVE_XDP_ACTIONS) + rx->xdp_actions[xdp_act]++; + u64_stats_update_end(&rx->statss); +} + +#define GVE_PKTCONT_BIT_IS_SET(x) (GVE_RXF_PKT_CONT & (x)) +static void gve_rx(struct gve_rx_ring *rx, netdev_features_t feat, + struct gve_rx_desc *desc, u32 idx, + struct gve_rx_cnts *cnts) +{ + bool is_last_frag = !GVE_PKTCONT_BIT_IS_SET(desc->flags_seq); + struct gve_rx_slot_page_info *page_info; + u16 frag_size = be16_to_cpu(desc->len); + struct gve_rx_ctx *ctx = &rx->ctx; + union gve_rx_data_slot *data_slot; + struct gve_priv *priv = rx->gve; + struct sk_buff *skb = NULL; + struct bpf_prog *xprog; + struct xdp_buff xdp; + dma_addr_t page_bus; + void *va; + + u16 len = frag_size; + struct napi_struct *napi = &priv->ntfy_blocks[rx->ntfy_id].napi; + bool is_first_frag = ctx->frag_cnt == 0; + + bool is_only_frag = is_first_frag && is_last_frag; + + if (unlikely(ctx->drop_pkt)) + goto finish_frag; + + if (desc->flags_seq & GVE_RXF_ERR) { + ctx->drop_pkt = true; + cnts->desc_err_pkt_cnt++; + napi_free_frags(napi); + goto finish_frag; + } + + if (unlikely(frag_size > rx->packet_buffer_size)) { + netdev_warn(priv->dev, "Unexpected frag size %d, can't exceed %d, scheduling reset", + frag_size, rx->packet_buffer_size); + ctx->drop_pkt = true; + napi_free_frags(napi); + gve_schedule_reset(rx->gve); + goto finish_frag; + } + + /* Prefetch two packet buffers ahead, we will need it soon. */ + page_info = &rx->data.page_info[(idx + 2) & rx->mask]; + va = page_info->page_address + page_info->page_offset; + prefetch(page_info->page); /* Kernel page struct. */ + prefetch(va); /* Packet header. */ + prefetch(va + 64); /* Next cacheline too. */ + + page_info = &rx->data.page_info[idx]; + data_slot = &rx->data.data_ring[idx]; + page_bus = (rx->data.raw_addressing) ? + be64_to_cpu(data_slot->addr) - page_info->page_offset : + rx->data.qpl->page_buses[idx]; + dma_sync_single_for_cpu(&priv->pdev->dev, page_bus, + PAGE_SIZE, DMA_FROM_DEVICE); + page_info->pad = is_first_frag ? GVE_RX_PAD : 0; + len -= page_info->pad; + frag_size -= page_info->pad; + + xprog = READ_ONCE(priv->xdp_prog); + if (xprog && is_only_frag) { + void *old_data; + int xdp_act; + + xdp_init_buff(&xdp, rx->packet_buffer_size, &rx->xdp_rxq); + xdp_prepare_buff(&xdp, page_info->page_address + + page_info->page_offset, GVE_RX_PAD, + len, false); + old_data = xdp.data; + xdp_act = bpf_prog_run_xdp(xprog, &xdp); + if (xdp_act != XDP_PASS) { + gve_xdp_done(priv, rx, &xdp, xprog, xdp_act); + ctx->total_size += frag_size; + goto finish_ok_pkt; + } + + page_info->pad += xdp.data - old_data; + len = xdp.data_end - xdp.data; + + u64_stats_update_begin(&rx->statss); + rx->xdp_actions[XDP_PASS]++; + u64_stats_update_end(&rx->statss); + } + + skb = gve_rx_skb(priv, rx, page_info, napi, len, + data_slot, is_only_frag); + if (!skb) { + u64_stats_update_begin(&rx->statss); + rx->rx_skb_alloc_fail++; + u64_stats_update_end(&rx->statss); + + napi_free_frags(napi); + ctx->drop_pkt = true; + goto finish_frag; + } + ctx->total_size += frag_size; + + if (is_first_frag) { + if (likely(feat & NETIF_F_RXCSUM)) { + /* NIC passes up the partial sum */ + if (desc->csum) + skb->ip_summed = CHECKSUM_COMPLETE; + else + skb->ip_summed = CHECKSUM_NONE; + skb->csum = csum_unfold(desc->csum); + } + + /* parse flags & pass relevant info up */ + if (likely(feat & NETIF_F_RXHASH) && + gve_needs_rss(desc->flags_seq)) + skb_set_hash(skb, be32_to_cpu(desc->rss_hash), + gve_rss_type(desc->flags_seq)); + } + + if (is_last_frag) { + skb_record_rx_queue(skb, rx->q_num); + if (skb_is_nonlinear(skb)) + napi_gro_frags(napi); + else + napi_gro_receive(napi, skb); + goto finish_ok_pkt; + } + + goto finish_frag; + +finish_ok_pkt: + cnts->ok_pkt_bytes += ctx->total_size; + cnts->ok_pkt_cnt++; +finish_frag: + ctx->frag_cnt++; + if (is_last_frag) { + cnts->total_pkt_cnt++; + cnts->cont_pkt_cnt += (ctx->frag_cnt > 1); + gve_rx_ctx_clear(ctx); + } +} + +bool gve_rx_work_pending(struct gve_rx_ring *rx) +{ + struct gve_rx_desc *desc; + __be16 flags_seq; + u32 next_idx; + + next_idx = rx->cnt & rx->mask; + desc = rx->desc.desc_ring + next_idx; + + flags_seq = desc->flags_seq; + + return (GVE_SEQNO(flags_seq) == rx->desc.seqno); +} + +static bool gve_rx_refill_buffers(struct gve_priv *priv, struct gve_rx_ring *rx) +{ + int refill_target = rx->mask + 1; + u32 fill_cnt = rx->fill_cnt; + + while (fill_cnt - rx->cnt < refill_target) { + struct gve_rx_slot_page_info *page_info; + u32 idx = fill_cnt & rx->mask; + + page_info = &rx->data.page_info[idx]; + if (page_info->can_flip) { + /* The other half of the page is free because it was + * free when we processed the descriptor. Flip to it. + */ + union gve_rx_data_slot *data_slot = + &rx->data.data_ring[idx]; + + gve_rx_flip_buff(page_info, &data_slot->addr); + page_info->can_flip = 0; + } else { + /* It is possible that the networking stack has already + * finished processing all outstanding packets in the buffer + * and it can be reused. + * Flipping is unnecessary here - if the networking stack still + * owns half the page it is impossible to tell which half. Either + * the whole page is free or it needs to be replaced. + */ + int recycle = gve_rx_can_recycle_buffer(page_info); + + if (recycle < 0) { + if (!rx->data.raw_addressing) + gve_schedule_reset(priv); + return false; + } + if (!recycle) { + /* We can't reuse the buffer - alloc a new one*/ + union gve_rx_data_slot *data_slot = + &rx->data.data_ring[idx]; + struct device *dev = &priv->pdev->dev; + gve_rx_free_buffer(dev, page_info, data_slot); + page_info->page = NULL; + if (gve_rx_alloc_buffer(priv, dev, page_info, + data_slot)) { + u64_stats_update_begin(&rx->statss); + rx->rx_buf_alloc_fail++; + u64_stats_update_end(&rx->statss); + break; + } + } + } + fill_cnt++; + } + rx->fill_cnt = fill_cnt; + return true; +} + +static int gve_clean_rx_done(struct gve_rx_ring *rx, int budget, + netdev_features_t feat) +{ + u64 xdp_redirects = rx->xdp_actions[XDP_REDIRECT]; + u64 xdp_txs = rx->xdp_actions[XDP_TX]; + struct gve_rx_ctx *ctx = &rx->ctx; + struct gve_priv *priv = rx->gve; + struct gve_rx_cnts cnts = {0}; + struct gve_rx_desc *next_desc; + u32 idx = rx->cnt & rx->mask; + u32 work_done = 0; + + struct gve_rx_desc *desc = &rx->desc.desc_ring[idx]; + + // Exceed budget only if (and till) the inflight packet is consumed. + while ((GVE_SEQNO(desc->flags_seq) == rx->desc.seqno) && + (work_done < budget || ctx->frag_cnt)) { + next_desc = &rx->desc.desc_ring[(idx + 1) & rx->mask]; + prefetch(next_desc); + + gve_rx(rx, feat, desc, idx, &cnts); + + rx->cnt++; + idx = rx->cnt & rx->mask; + desc = &rx->desc.desc_ring[idx]; + rx->desc.seqno = gve_next_seqno(rx->desc.seqno); + work_done++; + } + + // The device will only send whole packets. + if (unlikely(ctx->frag_cnt)) { + struct napi_struct *napi = &priv->ntfy_blocks[rx->ntfy_id].napi; + + napi_free_frags(napi); + gve_rx_ctx_clear(&rx->ctx); + netdev_warn(priv->dev, "Unexpected seq number %d with incomplete packet, expected %d, scheduling reset", + GVE_SEQNO(desc->flags_seq), rx->desc.seqno); + gve_schedule_reset(rx->gve); + } + + if (!work_done && rx->fill_cnt - rx->cnt > rx->db_threshold) + return 0; + + if (work_done) { + u64_stats_update_begin(&rx->statss); + rx->rpackets += cnts.ok_pkt_cnt; + rx->rbytes += cnts.ok_pkt_bytes; + rx->rx_cont_packet_cnt += cnts.cont_pkt_cnt; + rx->rx_desc_err_dropped_pkt += cnts.desc_err_pkt_cnt; + u64_stats_update_end(&rx->statss); + } + + if (xdp_txs != rx->xdp_actions[XDP_TX]) + gve_xdp_tx_flush(priv, rx->q_num); + + if (xdp_redirects != rx->xdp_actions[XDP_REDIRECT]) + xdp_do_flush(); + + /* restock ring slots */ + if (!rx->data.raw_addressing) { + /* In QPL mode buffs are refilled as the desc are processed */ + rx->fill_cnt += work_done; + } else if (rx->fill_cnt - rx->cnt <= rx->db_threshold) { + /* In raw addressing mode buffs are only refilled if the avail + * falls below a threshold. + */ + if (!gve_rx_refill_buffers(priv, rx)) + return 0; + + /* If we were not able to completely refill buffers, we'll want + * to schedule this queue for work again to refill buffers. + */ + if (rx->fill_cnt - rx->cnt <= rx->db_threshold) { + gve_rx_write_doorbell(priv, rx); + return budget; + } + } + + gve_rx_write_doorbell(priv, rx); + return cnts.total_pkt_cnt; +} + +int gve_rx_poll(struct gve_notify_block *block, int budget) +{ + struct gve_rx_ring *rx = block->rx; + netdev_features_t feat; + int work_done = 0; + + feat = block->napi.dev->features; + + if (budget > 0) + work_done = gve_clean_rx_done(rx, budget, feat); + + return work_done; +} diff --git a/drivers/net/ethernet/google/gve/gve_rx_dqo.c b/drivers/net/ethernet/google/gve/gve_rx_dqo.c new file mode 100644 index 0000000000..f281e42a7e --- /dev/null +++ b/drivers/net/ethernet/google/gve/gve_rx_dqo.c @@ -0,0 +1,849 @@ +// SPDX-License-Identifier: (GPL-2.0 OR MIT) +/* Google virtual Ethernet (gve) driver + * + * Copyright (C) 2015-2021 Google, Inc. + */ + +#include "gve.h" +#include "gve_dqo.h" +#include "gve_adminq.h" +#include "gve_utils.h" +#include <linux/ip.h> +#include <linux/ipv6.h> +#include <linux/skbuff.h> +#include <linux/slab.h> +#include <net/ip6_checksum.h> +#include <net/ipv6.h> +#include <net/tcp.h> + +static int gve_buf_ref_cnt(struct gve_rx_buf_state_dqo *bs) +{ + return page_count(bs->page_info.page) - bs->page_info.pagecnt_bias; +} + +static void gve_free_page_dqo(struct gve_priv *priv, + struct gve_rx_buf_state_dqo *bs, + bool free_page) +{ + page_ref_sub(bs->page_info.page, bs->page_info.pagecnt_bias - 1); + if (free_page) + gve_free_page(&priv->pdev->dev, bs->page_info.page, bs->addr, + DMA_FROM_DEVICE); + bs->page_info.page = NULL; +} + +static struct gve_rx_buf_state_dqo *gve_alloc_buf_state(struct gve_rx_ring *rx) +{ + struct gve_rx_buf_state_dqo *buf_state; + s16 buffer_id; + + buffer_id = rx->dqo.free_buf_states; + if (unlikely(buffer_id == -1)) + return NULL; + + buf_state = &rx->dqo.buf_states[buffer_id]; + + /* Remove buf_state from free list */ + rx->dqo.free_buf_states = buf_state->next; + + /* Point buf_state to itself to mark it as allocated */ + buf_state->next = buffer_id; + + return buf_state; +} + +static bool gve_buf_state_is_allocated(struct gve_rx_ring *rx, + struct gve_rx_buf_state_dqo *buf_state) +{ + s16 buffer_id = buf_state - rx->dqo.buf_states; + + return buf_state->next == buffer_id; +} + +static void gve_free_buf_state(struct gve_rx_ring *rx, + struct gve_rx_buf_state_dqo *buf_state) +{ + s16 buffer_id = buf_state - rx->dqo.buf_states; + + buf_state->next = rx->dqo.free_buf_states; + rx->dqo.free_buf_states = buffer_id; +} + +static struct gve_rx_buf_state_dqo * +gve_dequeue_buf_state(struct gve_rx_ring *rx, struct gve_index_list *list) +{ + struct gve_rx_buf_state_dqo *buf_state; + s16 buffer_id; + + buffer_id = list->head; + if (unlikely(buffer_id == -1)) + return NULL; + + buf_state = &rx->dqo.buf_states[buffer_id]; + + /* Remove buf_state from list */ + list->head = buf_state->next; + if (buf_state->next == -1) + list->tail = -1; + + /* Point buf_state to itself to mark it as allocated */ + buf_state->next = buffer_id; + + return buf_state; +} + +static void gve_enqueue_buf_state(struct gve_rx_ring *rx, + struct gve_index_list *list, + struct gve_rx_buf_state_dqo *buf_state) +{ + s16 buffer_id = buf_state - rx->dqo.buf_states; + + buf_state->next = -1; + + if (list->head == -1) { + list->head = buffer_id; + list->tail = buffer_id; + } else { + int tail = list->tail; + + rx->dqo.buf_states[tail].next = buffer_id; + list->tail = buffer_id; + } +} + +static struct gve_rx_buf_state_dqo * +gve_get_recycled_buf_state(struct gve_rx_ring *rx) +{ + struct gve_rx_buf_state_dqo *buf_state; + int i; + + /* Recycled buf states are immediately usable. */ + buf_state = gve_dequeue_buf_state(rx, &rx->dqo.recycled_buf_states); + if (likely(buf_state)) + return buf_state; + + if (unlikely(rx->dqo.used_buf_states.head == -1)) + return NULL; + + /* Used buf states are only usable when ref count reaches 0, which means + * no SKBs refer to them. + * + * Search a limited number before giving up. + */ + for (i = 0; i < 5; i++) { + buf_state = gve_dequeue_buf_state(rx, &rx->dqo.used_buf_states); + if (gve_buf_ref_cnt(buf_state) == 0) { + rx->dqo.used_buf_states_cnt--; + return buf_state; + } + + gve_enqueue_buf_state(rx, &rx->dqo.used_buf_states, buf_state); + } + + /* For QPL, we cannot allocate any new buffers and must + * wait for the existing ones to be available. + */ + if (rx->dqo.qpl) + return NULL; + + /* If there are no free buf states discard an entry from + * `used_buf_states` so it can be used. + */ + if (unlikely(rx->dqo.free_buf_states == -1)) { + buf_state = gve_dequeue_buf_state(rx, &rx->dqo.used_buf_states); + if (gve_buf_ref_cnt(buf_state) == 0) + return buf_state; + + gve_free_page_dqo(rx->gve, buf_state, true); + gve_free_buf_state(rx, buf_state); + } + + return NULL; +} + +static int gve_alloc_page_dqo(struct gve_rx_ring *rx, + struct gve_rx_buf_state_dqo *buf_state) +{ + struct gve_priv *priv = rx->gve; + u32 idx; + + if (!rx->dqo.qpl) { + int err; + + err = gve_alloc_page(priv, &priv->pdev->dev, + &buf_state->page_info.page, + &buf_state->addr, + DMA_FROM_DEVICE, GFP_ATOMIC); + if (err) + return err; + } else { + idx = rx->dqo.next_qpl_page_idx; + if (idx >= priv->rx_pages_per_qpl) { + net_err_ratelimited("%s: Out of QPL pages\n", + priv->dev->name); + return -ENOMEM; + } + buf_state->page_info.page = rx->dqo.qpl->pages[idx]; + buf_state->addr = rx->dqo.qpl->page_buses[idx]; + rx->dqo.next_qpl_page_idx++; + } + buf_state->page_info.page_offset = 0; + buf_state->page_info.page_address = + page_address(buf_state->page_info.page); + buf_state->last_single_ref_offset = 0; + + /* The page already has 1 ref. */ + page_ref_add(buf_state->page_info.page, INT_MAX - 1); + buf_state->page_info.pagecnt_bias = INT_MAX; + + return 0; +} + +static void gve_rx_free_ring_dqo(struct gve_priv *priv, int idx) +{ + struct gve_rx_ring *rx = &priv->rx[idx]; + struct device *hdev = &priv->pdev->dev; + size_t completion_queue_slots; + size_t buffer_queue_slots; + size_t size; + int i; + + completion_queue_slots = rx->dqo.complq.mask + 1; + buffer_queue_slots = rx->dqo.bufq.mask + 1; + + gve_rx_remove_from_block(priv, idx); + + if (rx->q_resources) { + dma_free_coherent(hdev, sizeof(*rx->q_resources), + rx->q_resources, rx->q_resources_bus); + rx->q_resources = NULL; + } + + for (i = 0; i < rx->dqo.num_buf_states; i++) { + struct gve_rx_buf_state_dqo *bs = &rx->dqo.buf_states[i]; + /* Only free page for RDA. QPL pages are freed in gve_main. */ + if (bs->page_info.page) + gve_free_page_dqo(priv, bs, !rx->dqo.qpl); + } + if (rx->dqo.qpl) { + gve_unassign_qpl(priv, rx->dqo.qpl->id); + rx->dqo.qpl = NULL; + } + + if (rx->dqo.bufq.desc_ring) { + size = sizeof(rx->dqo.bufq.desc_ring[0]) * buffer_queue_slots; + dma_free_coherent(hdev, size, rx->dqo.bufq.desc_ring, + rx->dqo.bufq.bus); + rx->dqo.bufq.desc_ring = NULL; + } + + if (rx->dqo.complq.desc_ring) { + size = sizeof(rx->dqo.complq.desc_ring[0]) * + completion_queue_slots; + dma_free_coherent(hdev, size, rx->dqo.complq.desc_ring, + rx->dqo.complq.bus); + rx->dqo.complq.desc_ring = NULL; + } + + kvfree(rx->dqo.buf_states); + rx->dqo.buf_states = NULL; + + netif_dbg(priv, drv, priv->dev, "freed rx ring %d\n", idx); +} + +static int gve_rx_alloc_ring_dqo(struct gve_priv *priv, int idx) +{ + struct gve_rx_ring *rx = &priv->rx[idx]; + struct device *hdev = &priv->pdev->dev; + size_t size; + int i; + + const u32 buffer_queue_slots = + priv->queue_format == GVE_DQO_RDA_FORMAT ? + priv->options_dqo_rda.rx_buff_ring_entries : priv->rx_desc_cnt; + const u32 completion_queue_slots = priv->rx_desc_cnt; + + netif_dbg(priv, drv, priv->dev, "allocating rx ring DQO\n"); + + memset(rx, 0, sizeof(*rx)); + rx->gve = priv; + rx->q_num = idx; + rx->dqo.bufq.mask = buffer_queue_slots - 1; + rx->dqo.complq.num_free_slots = completion_queue_slots; + rx->dqo.complq.mask = completion_queue_slots - 1; + rx->ctx.skb_head = NULL; + rx->ctx.skb_tail = NULL; + + rx->dqo.num_buf_states = priv->queue_format == GVE_DQO_RDA_FORMAT ? + min_t(s16, S16_MAX, buffer_queue_slots * 4) : + priv->rx_pages_per_qpl; + rx->dqo.buf_states = kvcalloc(rx->dqo.num_buf_states, + sizeof(rx->dqo.buf_states[0]), + GFP_KERNEL); + if (!rx->dqo.buf_states) + return -ENOMEM; + + /* Set up linked list of buffer IDs */ + for (i = 0; i < rx->dqo.num_buf_states - 1; i++) + rx->dqo.buf_states[i].next = i + 1; + + rx->dqo.buf_states[rx->dqo.num_buf_states - 1].next = -1; + rx->dqo.recycled_buf_states.head = -1; + rx->dqo.recycled_buf_states.tail = -1; + rx->dqo.used_buf_states.head = -1; + rx->dqo.used_buf_states.tail = -1; + + /* Allocate RX completion queue */ + size = sizeof(rx->dqo.complq.desc_ring[0]) * + completion_queue_slots; + rx->dqo.complq.desc_ring = + dma_alloc_coherent(hdev, size, &rx->dqo.complq.bus, GFP_KERNEL); + if (!rx->dqo.complq.desc_ring) + goto err; + + /* Allocate RX buffer queue */ + size = sizeof(rx->dqo.bufq.desc_ring[0]) * buffer_queue_slots; + rx->dqo.bufq.desc_ring = + dma_alloc_coherent(hdev, size, &rx->dqo.bufq.bus, GFP_KERNEL); + if (!rx->dqo.bufq.desc_ring) + goto err; + + if (priv->queue_format != GVE_DQO_RDA_FORMAT) { + rx->dqo.qpl = gve_assign_rx_qpl(priv, rx->q_num); + if (!rx->dqo.qpl) + goto err; + rx->dqo.next_qpl_page_idx = 0; + } + + rx->q_resources = dma_alloc_coherent(hdev, sizeof(*rx->q_resources), + &rx->q_resources_bus, GFP_KERNEL); + if (!rx->q_resources) + goto err; + + gve_rx_add_to_block(priv, idx); + + return 0; + +err: + gve_rx_free_ring_dqo(priv, idx); + return -ENOMEM; +} + +void gve_rx_write_doorbell_dqo(const struct gve_priv *priv, int queue_idx) +{ + const struct gve_rx_ring *rx = &priv->rx[queue_idx]; + u64 index = be32_to_cpu(rx->q_resources->db_index); + + iowrite32(rx->dqo.bufq.tail, &priv->db_bar2[index]); +} + +int gve_rx_alloc_rings_dqo(struct gve_priv *priv) +{ + int err = 0; + int i; + + for (i = 0; i < priv->rx_cfg.num_queues; i++) { + err = gve_rx_alloc_ring_dqo(priv, i); + if (err) { + netif_err(priv, drv, priv->dev, + "Failed to alloc rx ring=%d: err=%d\n", + i, err); + goto err; + } + } + + return 0; + +err: + for (i--; i >= 0; i--) + gve_rx_free_ring_dqo(priv, i); + + return err; +} + +void gve_rx_free_rings_dqo(struct gve_priv *priv) +{ + int i; + + for (i = 0; i < priv->rx_cfg.num_queues; i++) + gve_rx_free_ring_dqo(priv, i); +} + +void gve_rx_post_buffers_dqo(struct gve_rx_ring *rx) +{ + struct gve_rx_compl_queue_dqo *complq = &rx->dqo.complq; + struct gve_rx_buf_queue_dqo *bufq = &rx->dqo.bufq; + struct gve_priv *priv = rx->gve; + u32 num_avail_slots; + u32 num_full_slots; + u32 num_posted = 0; + + num_full_slots = (bufq->tail - bufq->head) & bufq->mask; + num_avail_slots = bufq->mask - num_full_slots; + + num_avail_slots = min_t(u32, num_avail_slots, complq->num_free_slots); + while (num_posted < num_avail_slots) { + struct gve_rx_desc_dqo *desc = &bufq->desc_ring[bufq->tail]; + struct gve_rx_buf_state_dqo *buf_state; + + buf_state = gve_get_recycled_buf_state(rx); + if (unlikely(!buf_state)) { + buf_state = gve_alloc_buf_state(rx); + if (unlikely(!buf_state)) + break; + + if (unlikely(gve_alloc_page_dqo(rx, buf_state))) { + u64_stats_update_begin(&rx->statss); + rx->rx_buf_alloc_fail++; + u64_stats_update_end(&rx->statss); + gve_free_buf_state(rx, buf_state); + break; + } + } + + desc->buf_id = cpu_to_le16(buf_state - rx->dqo.buf_states); + desc->buf_addr = cpu_to_le64(buf_state->addr + + buf_state->page_info.page_offset); + + bufq->tail = (bufq->tail + 1) & bufq->mask; + complq->num_free_slots--; + num_posted++; + + if ((bufq->tail & (GVE_RX_BUF_THRESH_DQO - 1)) == 0) + gve_rx_write_doorbell_dqo(priv, rx->q_num); + } + + rx->fill_cnt += num_posted; +} + +static void gve_try_recycle_buf(struct gve_priv *priv, struct gve_rx_ring *rx, + struct gve_rx_buf_state_dqo *buf_state) +{ + const int data_buffer_size = priv->data_buffer_size_dqo; + int pagecount; + + /* Can't reuse if we only fit one buffer per page */ + if (data_buffer_size * 2 > PAGE_SIZE) + goto mark_used; + + pagecount = gve_buf_ref_cnt(buf_state); + + /* Record the offset when we have a single remaining reference. + * + * When this happens, we know all of the other offsets of the page are + * usable. + */ + if (pagecount == 1) { + buf_state->last_single_ref_offset = + buf_state->page_info.page_offset; + } + + /* Use the next buffer sized chunk in the page. */ + buf_state->page_info.page_offset += data_buffer_size; + buf_state->page_info.page_offset &= (PAGE_SIZE - 1); + + /* If we wrap around to the same offset without ever dropping to 1 + * reference, then we don't know if this offset was ever freed. + */ + if (buf_state->page_info.page_offset == + buf_state->last_single_ref_offset) { + goto mark_used; + } + + gve_enqueue_buf_state(rx, &rx->dqo.recycled_buf_states, buf_state); + return; + +mark_used: + gve_enqueue_buf_state(rx, &rx->dqo.used_buf_states, buf_state); + rx->dqo.used_buf_states_cnt++; +} + +static void gve_rx_skb_csum(struct sk_buff *skb, + const struct gve_rx_compl_desc_dqo *desc, + struct gve_ptype ptype) +{ + skb->ip_summed = CHECKSUM_NONE; + + /* HW did not identify and process L3 and L4 headers. */ + if (unlikely(!desc->l3_l4_processed)) + return; + + if (ptype.l3_type == GVE_L3_TYPE_IPV4) { + if (unlikely(desc->csum_ip_err || desc->csum_external_ip_err)) + return; + } else if (ptype.l3_type == GVE_L3_TYPE_IPV6) { + /* Checksum should be skipped if this flag is set. */ + if (unlikely(desc->ipv6_ex_add)) + return; + } + + if (unlikely(desc->csum_l4_err)) + return; + + switch (ptype.l4_type) { + case GVE_L4_TYPE_TCP: + case GVE_L4_TYPE_UDP: + case GVE_L4_TYPE_ICMP: + case GVE_L4_TYPE_SCTP: + skb->ip_summed = CHECKSUM_UNNECESSARY; + break; + default: + break; + } +} + +static void gve_rx_skb_hash(struct sk_buff *skb, + const struct gve_rx_compl_desc_dqo *compl_desc, + struct gve_ptype ptype) +{ + enum pkt_hash_types hash_type = PKT_HASH_TYPE_L2; + + if (ptype.l4_type != GVE_L4_TYPE_UNKNOWN) + hash_type = PKT_HASH_TYPE_L4; + else if (ptype.l3_type != GVE_L3_TYPE_UNKNOWN) + hash_type = PKT_HASH_TYPE_L3; + + skb_set_hash(skb, le32_to_cpu(compl_desc->hash), hash_type); +} + +static void gve_rx_free_skb(struct gve_rx_ring *rx) +{ + if (!rx->ctx.skb_head) + return; + + dev_kfree_skb_any(rx->ctx.skb_head); + rx->ctx.skb_head = NULL; + rx->ctx.skb_tail = NULL; +} + +static bool gve_rx_should_trigger_copy_ondemand(struct gve_rx_ring *rx) +{ + if (!rx->dqo.qpl) + return false; + if (rx->dqo.used_buf_states_cnt < + (rx->dqo.num_buf_states - + GVE_DQO_QPL_ONDEMAND_ALLOC_THRESHOLD)) + return false; + return true; +} + +static int gve_rx_copy_ondemand(struct gve_rx_ring *rx, + struct gve_rx_buf_state_dqo *buf_state, + u16 buf_len) +{ + struct page *page = alloc_page(GFP_ATOMIC); + int num_frags; + + if (!page) + return -ENOMEM; + + memcpy(page_address(page), + buf_state->page_info.page_address + + buf_state->page_info.page_offset, + buf_len); + num_frags = skb_shinfo(rx->ctx.skb_tail)->nr_frags; + skb_add_rx_frag(rx->ctx.skb_tail, num_frags, page, + 0, buf_len, PAGE_SIZE); + + u64_stats_update_begin(&rx->statss); + rx->rx_frag_alloc_cnt++; + u64_stats_update_end(&rx->statss); + /* Return unused buffer. */ + gve_enqueue_buf_state(rx, &rx->dqo.recycled_buf_states, buf_state); + return 0; +} + +/* Chains multi skbs for single rx packet. + * Returns 0 if buffer is appended, -1 otherwise. + */ +static int gve_rx_append_frags(struct napi_struct *napi, + struct gve_rx_buf_state_dqo *buf_state, + u16 buf_len, struct gve_rx_ring *rx, + struct gve_priv *priv) +{ + int num_frags = skb_shinfo(rx->ctx.skb_tail)->nr_frags; + + if (unlikely(num_frags == MAX_SKB_FRAGS)) { + struct sk_buff *skb; + + skb = napi_alloc_skb(napi, 0); + if (!skb) + return -1; + + if (rx->ctx.skb_tail == rx->ctx.skb_head) + skb_shinfo(rx->ctx.skb_head)->frag_list = skb; + else + rx->ctx.skb_tail->next = skb; + rx->ctx.skb_tail = skb; + num_frags = 0; + } + if (rx->ctx.skb_tail != rx->ctx.skb_head) { + rx->ctx.skb_head->len += buf_len; + rx->ctx.skb_head->data_len += buf_len; + rx->ctx.skb_head->truesize += priv->data_buffer_size_dqo; + } + + /* Trigger ondemand page allocation if we are running low on buffers */ + if (gve_rx_should_trigger_copy_ondemand(rx)) + return gve_rx_copy_ondemand(rx, buf_state, buf_len); + + skb_add_rx_frag(rx->ctx.skb_tail, num_frags, + buf_state->page_info.page, + buf_state->page_info.page_offset, + buf_len, priv->data_buffer_size_dqo); + gve_dec_pagecnt_bias(&buf_state->page_info); + + /* Advances buffer page-offset if page is partially used. + * Marks buffer as used if page is full. + */ + gve_try_recycle_buf(priv, rx, buf_state); + return 0; +} + +/* Returns 0 if descriptor is completed successfully. + * Returns -EINVAL if descriptor is invalid. + * Returns -ENOMEM if data cannot be copied to skb. + */ +static int gve_rx_dqo(struct napi_struct *napi, struct gve_rx_ring *rx, + const struct gve_rx_compl_desc_dqo *compl_desc, + int queue_idx) +{ + const u16 buffer_id = le16_to_cpu(compl_desc->buf_id); + const bool eop = compl_desc->end_of_packet != 0; + struct gve_rx_buf_state_dqo *buf_state; + struct gve_priv *priv = rx->gve; + u16 buf_len; + + if (unlikely(buffer_id >= rx->dqo.num_buf_states)) { + net_err_ratelimited("%s: Invalid RX buffer_id=%u\n", + priv->dev->name, buffer_id); + return -EINVAL; + } + buf_state = &rx->dqo.buf_states[buffer_id]; + if (unlikely(!gve_buf_state_is_allocated(rx, buf_state))) { + net_err_ratelimited("%s: RX buffer_id is not allocated: %u\n", + priv->dev->name, buffer_id); + return -EINVAL; + } + + if (unlikely(compl_desc->rx_error)) { + gve_enqueue_buf_state(rx, &rx->dqo.recycled_buf_states, + buf_state); + return -EINVAL; + } + + buf_len = compl_desc->packet_len; + + /* Page might have not been used for awhile and was likely last written + * by a different thread. + */ + prefetch(buf_state->page_info.page); + + /* Sync the portion of dma buffer for CPU to read. */ + dma_sync_single_range_for_cpu(&priv->pdev->dev, buf_state->addr, + buf_state->page_info.page_offset, + buf_len, DMA_FROM_DEVICE); + + /* Append to current skb if one exists. */ + if (rx->ctx.skb_head) { + if (unlikely(gve_rx_append_frags(napi, buf_state, buf_len, rx, + priv)) != 0) { + goto error; + } + return 0; + } + + if (eop && buf_len <= priv->rx_copybreak) { + rx->ctx.skb_head = gve_rx_copy(priv->dev, napi, + &buf_state->page_info, buf_len); + if (unlikely(!rx->ctx.skb_head)) + goto error; + rx->ctx.skb_tail = rx->ctx.skb_head; + + u64_stats_update_begin(&rx->statss); + rx->rx_copied_pkt++; + rx->rx_copybreak_pkt++; + u64_stats_update_end(&rx->statss); + + gve_enqueue_buf_state(rx, &rx->dqo.recycled_buf_states, + buf_state); + return 0; + } + + rx->ctx.skb_head = napi_get_frags(napi); + if (unlikely(!rx->ctx.skb_head)) + goto error; + rx->ctx.skb_tail = rx->ctx.skb_head; + + if (gve_rx_should_trigger_copy_ondemand(rx)) { + if (gve_rx_copy_ondemand(rx, buf_state, buf_len) < 0) + goto error; + return 0; + } + + skb_add_rx_frag(rx->ctx.skb_head, 0, buf_state->page_info.page, + buf_state->page_info.page_offset, buf_len, + priv->data_buffer_size_dqo); + gve_dec_pagecnt_bias(&buf_state->page_info); + + gve_try_recycle_buf(priv, rx, buf_state); + return 0; + +error: + gve_enqueue_buf_state(rx, &rx->dqo.recycled_buf_states, buf_state); + return -ENOMEM; +} + +static int gve_rx_complete_rsc(struct sk_buff *skb, + const struct gve_rx_compl_desc_dqo *desc, + struct gve_ptype ptype) +{ + struct skb_shared_info *shinfo = skb_shinfo(skb); + + /* Only TCP is supported right now. */ + if (ptype.l4_type != GVE_L4_TYPE_TCP) + return -EINVAL; + + switch (ptype.l3_type) { + case GVE_L3_TYPE_IPV4: + shinfo->gso_type = SKB_GSO_TCPV4; + break; + case GVE_L3_TYPE_IPV6: + shinfo->gso_type = SKB_GSO_TCPV6; + break; + default: + return -EINVAL; + } + + shinfo->gso_size = le16_to_cpu(desc->rsc_seg_len); + return 0; +} + +/* Returns 0 if skb is completed successfully, -1 otherwise. */ +static int gve_rx_complete_skb(struct gve_rx_ring *rx, struct napi_struct *napi, + const struct gve_rx_compl_desc_dqo *desc, + netdev_features_t feat) +{ + struct gve_ptype ptype = + rx->gve->ptype_lut_dqo->ptypes[desc->packet_type]; + int err; + + skb_record_rx_queue(rx->ctx.skb_head, rx->q_num); + + if (feat & NETIF_F_RXHASH) + gve_rx_skb_hash(rx->ctx.skb_head, desc, ptype); + + if (feat & NETIF_F_RXCSUM) + gve_rx_skb_csum(rx->ctx.skb_head, desc, ptype); + + /* RSC packets must set gso_size otherwise the TCP stack will complain + * that packets are larger than MTU. + */ + if (desc->rsc) { + err = gve_rx_complete_rsc(rx->ctx.skb_head, desc, ptype); + if (err < 0) + return err; + } + + if (skb_headlen(rx->ctx.skb_head) == 0) + napi_gro_frags(napi); + else + napi_gro_receive(napi, rx->ctx.skb_head); + + return 0; +} + +int gve_rx_poll_dqo(struct gve_notify_block *block, int budget) +{ + struct napi_struct *napi = &block->napi; + netdev_features_t feat = napi->dev->features; + + struct gve_rx_ring *rx = block->rx; + struct gve_rx_compl_queue_dqo *complq = &rx->dqo.complq; + + u32 work_done = 0; + u64 bytes = 0; + int err; + + while (work_done < budget) { + struct gve_rx_compl_desc_dqo *compl_desc = + &complq->desc_ring[complq->head]; + u32 pkt_bytes; + + /* No more new packets */ + if (compl_desc->generation == complq->cur_gen_bit) + break; + + /* Prefetch the next two descriptors. */ + prefetch(&complq->desc_ring[(complq->head + 1) & complq->mask]); + prefetch(&complq->desc_ring[(complq->head + 2) & complq->mask]); + + /* Do not read data until we own the descriptor */ + dma_rmb(); + + err = gve_rx_dqo(napi, rx, compl_desc, rx->q_num); + if (err < 0) { + gve_rx_free_skb(rx); + u64_stats_update_begin(&rx->statss); + if (err == -ENOMEM) + rx->rx_skb_alloc_fail++; + else if (err == -EINVAL) + rx->rx_desc_err_dropped_pkt++; + u64_stats_update_end(&rx->statss); + } + + complq->head = (complq->head + 1) & complq->mask; + complq->num_free_slots++; + + /* When the ring wraps, the generation bit is flipped. */ + complq->cur_gen_bit ^= (complq->head == 0); + + /* Receiving a completion means we have space to post another + * buffer on the buffer queue. + */ + { + struct gve_rx_buf_queue_dqo *bufq = &rx->dqo.bufq; + + bufq->head = (bufq->head + 1) & bufq->mask; + } + + /* Free running counter of completed descriptors */ + rx->cnt++; + + if (!rx->ctx.skb_head) + continue; + + if (!compl_desc->end_of_packet) + continue; + + work_done++; + pkt_bytes = rx->ctx.skb_head->len; + /* The ethernet header (first ETH_HLEN bytes) is snipped off + * by eth_type_trans. + */ + if (skb_headlen(rx->ctx.skb_head)) + pkt_bytes += ETH_HLEN; + + /* gve_rx_complete_skb() will consume skb if successful */ + if (gve_rx_complete_skb(rx, napi, compl_desc, feat) != 0) { + gve_rx_free_skb(rx); + u64_stats_update_begin(&rx->statss); + rx->rx_desc_err_dropped_pkt++; + u64_stats_update_end(&rx->statss); + continue; + } + + bytes += pkt_bytes; + rx->ctx.skb_head = NULL; + rx->ctx.skb_tail = NULL; + } + + gve_rx_post_buffers_dqo(rx); + + u64_stats_update_begin(&rx->statss); + rx->rpackets += work_done; + rx->rbytes += bytes; + u64_stats_update_end(&rx->statss); + + return work_done; +} diff --git a/drivers/net/ethernet/google/gve/gve_tx.c b/drivers/net/ethernet/google/gve/gve_tx.c new file mode 100644 index 0000000000..9f6ffc4a54 --- /dev/null +++ b/drivers/net/ethernet/google/gve/gve_tx.c @@ -0,0 +1,979 @@ +// SPDX-License-Identifier: (GPL-2.0 OR MIT) +/* Google virtual Ethernet (gve) driver + * + * Copyright (C) 2015-2021 Google, Inc. + */ + +#include "gve.h" +#include "gve_adminq.h" +#include "gve_utils.h" +#include <linux/ip.h> +#include <linux/tcp.h> +#include <linux/vmalloc.h> +#include <linux/skbuff.h> +#include <net/xdp_sock_drv.h> + +static inline void gve_tx_put_doorbell(struct gve_priv *priv, + struct gve_queue_resources *q_resources, + u32 val) +{ + iowrite32be(val, &priv->db_bar2[be32_to_cpu(q_resources->db_index)]); +} + +void gve_xdp_tx_flush(struct gve_priv *priv, u32 xdp_qid) +{ + u32 tx_qid = gve_xdp_tx_queue_id(priv, xdp_qid); + struct gve_tx_ring *tx = &priv->tx[tx_qid]; + + gve_tx_put_doorbell(priv, tx->q_resources, tx->req); +} + +/* gvnic can only transmit from a Registered Segment. + * We copy skb payloads into the registered segment before writing Tx + * descriptors and ringing the Tx doorbell. + * + * gve_tx_fifo_* manages the Registered Segment as a FIFO - clients must + * free allocations in the order they were allocated. + */ + +static int gve_tx_fifo_init(struct gve_priv *priv, struct gve_tx_fifo *fifo) +{ + fifo->base = vmap(fifo->qpl->pages, fifo->qpl->num_entries, VM_MAP, + PAGE_KERNEL); + if (unlikely(!fifo->base)) { + netif_err(priv, drv, priv->dev, "Failed to vmap fifo, qpl_id = %d\n", + fifo->qpl->id); + return -ENOMEM; + } + + fifo->size = fifo->qpl->num_entries * PAGE_SIZE; + atomic_set(&fifo->available, fifo->size); + fifo->head = 0; + return 0; +} + +static void gve_tx_fifo_release(struct gve_priv *priv, struct gve_tx_fifo *fifo) +{ + WARN(atomic_read(&fifo->available) != fifo->size, + "Releasing non-empty fifo"); + + vunmap(fifo->base); +} + +static int gve_tx_fifo_pad_alloc_one_frag(struct gve_tx_fifo *fifo, + size_t bytes) +{ + return (fifo->head + bytes < fifo->size) ? 0 : fifo->size - fifo->head; +} + +static bool gve_tx_fifo_can_alloc(struct gve_tx_fifo *fifo, size_t bytes) +{ + return (atomic_read(&fifo->available) <= bytes) ? false : true; +} + +/* gve_tx_alloc_fifo - Allocate fragment(s) from Tx FIFO + * @fifo: FIFO to allocate from + * @bytes: Allocation size + * @iov: Scatter-gather elements to fill with allocation fragment base/len + * + * Returns number of valid elements in iov[] or negative on error. + * + * Allocations from a given FIFO must be externally synchronized but concurrent + * allocation and frees are allowed. + */ +static int gve_tx_alloc_fifo(struct gve_tx_fifo *fifo, size_t bytes, + struct gve_tx_iovec iov[2]) +{ + size_t overflow, padding; + u32 aligned_head; + int nfrags = 0; + + if (!bytes) + return 0; + + /* This check happens before we know how much padding is needed to + * align to a cacheline boundary for the payload, but that is fine, + * because the FIFO head always start aligned, and the FIFO's boundaries + * are aligned, so if there is space for the data, there is space for + * the padding to the next alignment. + */ + WARN(!gve_tx_fifo_can_alloc(fifo, bytes), + "Reached %s when there's not enough space in the fifo", __func__); + + nfrags++; + + iov[0].iov_offset = fifo->head; + iov[0].iov_len = bytes; + fifo->head += bytes; + + if (fifo->head > fifo->size) { + /* If the allocation did not fit in the tail fragment of the + * FIFO, also use the head fragment. + */ + nfrags++; + overflow = fifo->head - fifo->size; + iov[0].iov_len -= overflow; + iov[1].iov_offset = 0; /* Start of fifo*/ + iov[1].iov_len = overflow; + + fifo->head = overflow; + } + + /* Re-align to a cacheline boundary */ + aligned_head = L1_CACHE_ALIGN(fifo->head); + padding = aligned_head - fifo->head; + iov[nfrags - 1].iov_padding = padding; + atomic_sub(bytes + padding, &fifo->available); + fifo->head = aligned_head; + + if (fifo->head == fifo->size) + fifo->head = 0; + + return nfrags; +} + +/* gve_tx_free_fifo - Return space to Tx FIFO + * @fifo: FIFO to return fragments to + * @bytes: Bytes to free + */ +static void gve_tx_free_fifo(struct gve_tx_fifo *fifo, size_t bytes) +{ + atomic_add(bytes, &fifo->available); +} + +static size_t gve_tx_clear_buffer_state(struct gve_tx_buffer_state *info) +{ + size_t space_freed = 0; + int i; + + for (i = 0; i < ARRAY_SIZE(info->iov); i++) { + space_freed += info->iov[i].iov_len + info->iov[i].iov_padding; + info->iov[i].iov_len = 0; + info->iov[i].iov_padding = 0; + } + return space_freed; +} + +static int gve_clean_xdp_done(struct gve_priv *priv, struct gve_tx_ring *tx, + u32 to_do) +{ + struct gve_tx_buffer_state *info; + u32 clean_end = tx->done + to_do; + u64 pkts = 0, bytes = 0; + size_t space_freed = 0; + u32 xsk_complete = 0; + u32 idx; + + for (; tx->done < clean_end; tx->done++) { + idx = tx->done & tx->mask; + info = &tx->info[idx]; + + if (unlikely(!info->xdp.size)) + continue; + + bytes += info->xdp.size; + pkts++; + xsk_complete += info->xdp.is_xsk; + + info->xdp.size = 0; + if (info->xdp_frame) { + xdp_return_frame(info->xdp_frame); + info->xdp_frame = NULL; + } + space_freed += gve_tx_clear_buffer_state(info); + } + + gve_tx_free_fifo(&tx->tx_fifo, space_freed); + if (xsk_complete > 0 && tx->xsk_pool) + xsk_tx_completed(tx->xsk_pool, xsk_complete); + u64_stats_update_begin(&tx->statss); + tx->bytes_done += bytes; + tx->pkt_done += pkts; + u64_stats_update_end(&tx->statss); + return pkts; +} + +static int gve_clean_tx_done(struct gve_priv *priv, struct gve_tx_ring *tx, + u32 to_do, bool try_to_wake); + +static void gve_tx_free_ring(struct gve_priv *priv, int idx) +{ + struct gve_tx_ring *tx = &priv->tx[idx]; + struct device *hdev = &priv->pdev->dev; + size_t bytes; + u32 slots; + + gve_tx_remove_from_block(priv, idx); + slots = tx->mask + 1; + if (tx->q_num < priv->tx_cfg.num_queues) { + gve_clean_tx_done(priv, tx, priv->tx_desc_cnt, false); + netdev_tx_reset_queue(tx->netdev_txq); + } else { + gve_clean_xdp_done(priv, tx, priv->tx_desc_cnt); + } + + dma_free_coherent(hdev, sizeof(*tx->q_resources), + tx->q_resources, tx->q_resources_bus); + tx->q_resources = NULL; + + if (!tx->raw_addressing) { + gve_tx_fifo_release(priv, &tx->tx_fifo); + gve_unassign_qpl(priv, tx->tx_fifo.qpl->id); + tx->tx_fifo.qpl = NULL; + } + + bytes = sizeof(*tx->desc) * slots; + dma_free_coherent(hdev, bytes, tx->desc, tx->bus); + tx->desc = NULL; + + vfree(tx->info); + tx->info = NULL; + + netif_dbg(priv, drv, priv->dev, "freed tx queue %d\n", idx); +} + +static int gve_tx_alloc_ring(struct gve_priv *priv, int idx) +{ + struct gve_tx_ring *tx = &priv->tx[idx]; + struct device *hdev = &priv->pdev->dev; + u32 slots = priv->tx_desc_cnt; + size_t bytes; + + /* Make sure everything is zeroed to start */ + memset(tx, 0, sizeof(*tx)); + spin_lock_init(&tx->clean_lock); + spin_lock_init(&tx->xdp_lock); + tx->q_num = idx; + + tx->mask = slots - 1; + + /* alloc metadata */ + tx->info = vcalloc(slots, sizeof(*tx->info)); + if (!tx->info) + return -ENOMEM; + + /* alloc tx queue */ + bytes = sizeof(*tx->desc) * slots; + tx->desc = dma_alloc_coherent(hdev, bytes, &tx->bus, GFP_KERNEL); + if (!tx->desc) + goto abort_with_info; + + tx->raw_addressing = priv->queue_format == GVE_GQI_RDA_FORMAT; + tx->dev = &priv->pdev->dev; + if (!tx->raw_addressing) { + tx->tx_fifo.qpl = gve_assign_tx_qpl(priv, idx); + if (!tx->tx_fifo.qpl) + goto abort_with_desc; + /* map Tx FIFO */ + if (gve_tx_fifo_init(priv, &tx->tx_fifo)) + goto abort_with_qpl; + } + + tx->q_resources = + dma_alloc_coherent(hdev, + sizeof(*tx->q_resources), + &tx->q_resources_bus, + GFP_KERNEL); + if (!tx->q_resources) + goto abort_with_fifo; + + netif_dbg(priv, drv, priv->dev, "tx[%d]->bus=%lx\n", idx, + (unsigned long)tx->bus); + if (idx < priv->tx_cfg.num_queues) + tx->netdev_txq = netdev_get_tx_queue(priv->dev, idx); + gve_tx_add_to_block(priv, idx); + + return 0; + +abort_with_fifo: + if (!tx->raw_addressing) + gve_tx_fifo_release(priv, &tx->tx_fifo); +abort_with_qpl: + if (!tx->raw_addressing) + gve_unassign_qpl(priv, tx->tx_fifo.qpl->id); +abort_with_desc: + dma_free_coherent(hdev, bytes, tx->desc, tx->bus); + tx->desc = NULL; +abort_with_info: + vfree(tx->info); + tx->info = NULL; + return -ENOMEM; +} + +int gve_tx_alloc_rings(struct gve_priv *priv, int start_id, int num_rings) +{ + int err = 0; + int i; + + for (i = start_id; i < start_id + num_rings; i++) { + err = gve_tx_alloc_ring(priv, i); + if (err) { + netif_err(priv, drv, priv->dev, + "Failed to alloc tx ring=%d: err=%d\n", + i, err); + break; + } + } + /* Unallocate if there was an error */ + if (err) { + int j; + + for (j = start_id; j < i; j++) + gve_tx_free_ring(priv, j); + } + return err; +} + +void gve_tx_free_rings_gqi(struct gve_priv *priv, int start_id, int num_rings) +{ + int i; + + for (i = start_id; i < start_id + num_rings; i++) + gve_tx_free_ring(priv, i); +} + +/* gve_tx_avail - Calculates the number of slots available in the ring + * @tx: tx ring to check + * + * Returns the number of slots available + * + * The capacity of the queue is mask + 1. We don't need to reserve an entry. + **/ +static inline u32 gve_tx_avail(struct gve_tx_ring *tx) +{ + return tx->mask + 1 - (tx->req - tx->done); +} + +static inline int gve_skb_fifo_bytes_required(struct gve_tx_ring *tx, + struct sk_buff *skb) +{ + int pad_bytes, align_hdr_pad; + int bytes; + int hlen; + + hlen = skb_is_gso(skb) ? skb_checksum_start_offset(skb) + tcp_hdrlen(skb) : + min_t(int, GVE_GQ_TX_MIN_PKT_DESC_BYTES, skb->len); + + pad_bytes = gve_tx_fifo_pad_alloc_one_frag(&tx->tx_fifo, + hlen); + /* We need to take into account the header alignment padding. */ + align_hdr_pad = L1_CACHE_ALIGN(hlen) - hlen; + bytes = align_hdr_pad + pad_bytes + skb->len; + + return bytes; +} + +/* The most descriptors we could need is MAX_SKB_FRAGS + 4 : + * 1 for each skb frag + * 1 for the skb linear portion + * 1 for when tcp hdr needs to be in separate descriptor + * 1 if the payload wraps to the beginning of the FIFO + * 1 for metadata descriptor + */ +#define MAX_TX_DESC_NEEDED (MAX_SKB_FRAGS + 4) +static void gve_tx_unmap_buf(struct device *dev, struct gve_tx_buffer_state *info) +{ + if (info->skb) { + dma_unmap_single(dev, dma_unmap_addr(info, dma), + dma_unmap_len(info, len), + DMA_TO_DEVICE); + dma_unmap_len_set(info, len, 0); + } else { + dma_unmap_page(dev, dma_unmap_addr(info, dma), + dma_unmap_len(info, len), + DMA_TO_DEVICE); + dma_unmap_len_set(info, len, 0); + } +} + +/* Check if sufficient resources (descriptor ring space, FIFO space) are + * available to transmit the given number of bytes. + */ +static inline bool gve_can_tx(struct gve_tx_ring *tx, int bytes_required) +{ + bool can_alloc = true; + + if (!tx->raw_addressing) + can_alloc = gve_tx_fifo_can_alloc(&tx->tx_fifo, bytes_required); + + return (gve_tx_avail(tx) >= MAX_TX_DESC_NEEDED && can_alloc); +} + +static_assert(NAPI_POLL_WEIGHT >= MAX_TX_DESC_NEEDED); + +/* Stops the queue if the skb cannot be transmitted. */ +static int gve_maybe_stop_tx(struct gve_priv *priv, struct gve_tx_ring *tx, + struct sk_buff *skb) +{ + int bytes_required = 0; + u32 nic_done; + u32 to_do; + int ret; + + if (!tx->raw_addressing) + bytes_required = gve_skb_fifo_bytes_required(tx, skb); + + if (likely(gve_can_tx(tx, bytes_required))) + return 0; + + ret = -EBUSY; + spin_lock(&tx->clean_lock); + nic_done = gve_tx_load_event_counter(priv, tx); + to_do = nic_done - tx->done; + + /* Only try to clean if there is hope for TX */ + if (to_do + gve_tx_avail(tx) >= MAX_TX_DESC_NEEDED) { + if (to_do > 0) { + to_do = min_t(u32, to_do, NAPI_POLL_WEIGHT); + gve_clean_tx_done(priv, tx, to_do, false); + } + if (likely(gve_can_tx(tx, bytes_required))) + ret = 0; + } + if (ret) { + /* No space, so stop the queue */ + tx->stop_queue++; + netif_tx_stop_queue(tx->netdev_txq); + } + spin_unlock(&tx->clean_lock); + + return ret; +} + +static void gve_tx_fill_pkt_desc(union gve_tx_desc *pkt_desc, + u16 csum_offset, u8 ip_summed, bool is_gso, + int l4_hdr_offset, u32 desc_cnt, + u16 hlen, u64 addr, u16 pkt_len) +{ + /* l4_hdr_offset and csum_offset are in units of 16-bit words */ + if (is_gso) { + pkt_desc->pkt.type_flags = GVE_TXD_TSO | GVE_TXF_L4CSUM; + pkt_desc->pkt.l4_csum_offset = csum_offset >> 1; + pkt_desc->pkt.l4_hdr_offset = l4_hdr_offset >> 1; + } else if (likely(ip_summed == CHECKSUM_PARTIAL)) { + pkt_desc->pkt.type_flags = GVE_TXD_STD | GVE_TXF_L4CSUM; + pkt_desc->pkt.l4_csum_offset = csum_offset >> 1; + pkt_desc->pkt.l4_hdr_offset = l4_hdr_offset >> 1; + } else { + pkt_desc->pkt.type_flags = GVE_TXD_STD; + pkt_desc->pkt.l4_csum_offset = 0; + pkt_desc->pkt.l4_hdr_offset = 0; + } + pkt_desc->pkt.desc_cnt = desc_cnt; + pkt_desc->pkt.len = cpu_to_be16(pkt_len); + pkt_desc->pkt.seg_len = cpu_to_be16(hlen); + pkt_desc->pkt.seg_addr = cpu_to_be64(addr); +} + +static void gve_tx_fill_mtd_desc(union gve_tx_desc *mtd_desc, + struct sk_buff *skb) +{ + BUILD_BUG_ON(sizeof(mtd_desc->mtd) != sizeof(mtd_desc->pkt)); + + mtd_desc->mtd.type_flags = GVE_TXD_MTD | GVE_MTD_SUBTYPE_PATH; + mtd_desc->mtd.path_state = GVE_MTD_PATH_STATE_DEFAULT | + GVE_MTD_PATH_HASH_L4; + mtd_desc->mtd.path_hash = cpu_to_be32(skb->hash); + mtd_desc->mtd.reserved0 = 0; + mtd_desc->mtd.reserved1 = 0; +} + +static void gve_tx_fill_seg_desc(union gve_tx_desc *seg_desc, + u16 l3_offset, u16 gso_size, + bool is_gso_v6, bool is_gso, + u16 len, u64 addr) +{ + seg_desc->seg.type_flags = GVE_TXD_SEG; + if (is_gso) { + if (is_gso_v6) + seg_desc->seg.type_flags |= GVE_TXSF_IPV6; + seg_desc->seg.l3_offset = l3_offset >> 1; + seg_desc->seg.mss = cpu_to_be16(gso_size); + } + seg_desc->seg.seg_len = cpu_to_be16(len); + seg_desc->seg.seg_addr = cpu_to_be64(addr); +} + +static void gve_dma_sync_for_device(struct device *dev, dma_addr_t *page_buses, + u64 iov_offset, u64 iov_len) +{ + u64 last_page = (iov_offset + iov_len - 1) / PAGE_SIZE; + u64 first_page = iov_offset / PAGE_SIZE; + u64 page; + + for (page = first_page; page <= last_page; page++) + dma_sync_single_for_device(dev, page_buses[page], PAGE_SIZE, DMA_TO_DEVICE); +} + +static int gve_tx_add_skb_copy(struct gve_priv *priv, struct gve_tx_ring *tx, struct sk_buff *skb) +{ + int pad_bytes, hlen, hdr_nfrags, payload_nfrags, l4_hdr_offset; + union gve_tx_desc *pkt_desc, *seg_desc; + struct gve_tx_buffer_state *info; + int mtd_desc_nr = !!skb->l4_hash; + bool is_gso = skb_is_gso(skb); + u32 idx = tx->req & tx->mask; + int payload_iov = 2; + int copy_offset; + u32 next_idx; + int i; + + info = &tx->info[idx]; + pkt_desc = &tx->desc[idx]; + + l4_hdr_offset = skb_checksum_start_offset(skb); + /* If the skb is gso, then we want the tcp header alone in the first segment + * otherwise we want the minimum required by the gVNIC spec. + */ + hlen = is_gso ? l4_hdr_offset + tcp_hdrlen(skb) : + min_t(int, GVE_GQ_TX_MIN_PKT_DESC_BYTES, skb->len); + + info->skb = skb; + /* We don't want to split the header, so if necessary, pad to the end + * of the fifo and then put the header at the beginning of the fifo. + */ + pad_bytes = gve_tx_fifo_pad_alloc_one_frag(&tx->tx_fifo, hlen); + hdr_nfrags = gve_tx_alloc_fifo(&tx->tx_fifo, hlen + pad_bytes, + &info->iov[0]); + WARN(!hdr_nfrags, "hdr_nfrags should never be 0!"); + payload_nfrags = gve_tx_alloc_fifo(&tx->tx_fifo, skb->len - hlen, + &info->iov[payload_iov]); + + gve_tx_fill_pkt_desc(pkt_desc, skb->csum_offset, skb->ip_summed, + is_gso, l4_hdr_offset, + 1 + mtd_desc_nr + payload_nfrags, hlen, + info->iov[hdr_nfrags - 1].iov_offset, skb->len); + + skb_copy_bits(skb, 0, + tx->tx_fifo.base + info->iov[hdr_nfrags - 1].iov_offset, + hlen); + gve_dma_sync_for_device(&priv->pdev->dev, tx->tx_fifo.qpl->page_buses, + info->iov[hdr_nfrags - 1].iov_offset, + info->iov[hdr_nfrags - 1].iov_len); + copy_offset = hlen; + + if (mtd_desc_nr) { + next_idx = (tx->req + 1) & tx->mask; + gve_tx_fill_mtd_desc(&tx->desc[next_idx], skb); + } + + for (i = payload_iov; i < payload_nfrags + payload_iov; i++) { + next_idx = (tx->req + 1 + mtd_desc_nr + i - payload_iov) & tx->mask; + seg_desc = &tx->desc[next_idx]; + + gve_tx_fill_seg_desc(seg_desc, skb_network_offset(skb), + skb_shinfo(skb)->gso_size, + skb_is_gso_v6(skb), is_gso, + info->iov[i].iov_len, + info->iov[i].iov_offset); + + skb_copy_bits(skb, copy_offset, + tx->tx_fifo.base + info->iov[i].iov_offset, + info->iov[i].iov_len); + gve_dma_sync_for_device(&priv->pdev->dev, tx->tx_fifo.qpl->page_buses, + info->iov[i].iov_offset, + info->iov[i].iov_len); + copy_offset += info->iov[i].iov_len; + } + + return 1 + mtd_desc_nr + payload_nfrags; +} + +static int gve_tx_add_skb_no_copy(struct gve_priv *priv, struct gve_tx_ring *tx, + struct sk_buff *skb) +{ + const struct skb_shared_info *shinfo = skb_shinfo(skb); + int hlen, num_descriptors, l4_hdr_offset; + union gve_tx_desc *pkt_desc, *mtd_desc, *seg_desc; + struct gve_tx_buffer_state *info; + int mtd_desc_nr = !!skb->l4_hash; + bool is_gso = skb_is_gso(skb); + u32 idx = tx->req & tx->mask; + u64 addr; + u32 len; + int i; + + info = &tx->info[idx]; + pkt_desc = &tx->desc[idx]; + + l4_hdr_offset = skb_checksum_start_offset(skb); + /* If the skb is gso, then we want only up to the tcp header in the first segment + * to efficiently replicate on each segment otherwise we want the linear portion + * of the skb (which will contain the checksum because skb->csum_start and + * skb->csum_offset are given relative to skb->head) in the first segment. + */ + hlen = is_gso ? l4_hdr_offset + tcp_hdrlen(skb) : skb_headlen(skb); + len = skb_headlen(skb); + + info->skb = skb; + + addr = dma_map_single(tx->dev, skb->data, len, DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(tx->dev, addr))) { + tx->dma_mapping_error++; + goto drop; + } + dma_unmap_len_set(info, len, len); + dma_unmap_addr_set(info, dma, addr); + + num_descriptors = 1 + shinfo->nr_frags; + if (hlen < len) + num_descriptors++; + if (mtd_desc_nr) + num_descriptors++; + + gve_tx_fill_pkt_desc(pkt_desc, skb->csum_offset, skb->ip_summed, + is_gso, l4_hdr_offset, + num_descriptors, hlen, addr, skb->len); + + if (mtd_desc_nr) { + idx = (idx + 1) & tx->mask; + mtd_desc = &tx->desc[idx]; + gve_tx_fill_mtd_desc(mtd_desc, skb); + } + + if (hlen < len) { + /* For gso the rest of the linear portion of the skb needs to + * be in its own descriptor. + */ + len -= hlen; + addr += hlen; + idx = (idx + 1) & tx->mask; + seg_desc = &tx->desc[idx]; + gve_tx_fill_seg_desc(seg_desc, skb_network_offset(skb), + skb_shinfo(skb)->gso_size, + skb_is_gso_v6(skb), is_gso, len, addr); + } + + for (i = 0; i < shinfo->nr_frags; i++) { + const skb_frag_t *frag = &shinfo->frags[i]; + + idx = (idx + 1) & tx->mask; + seg_desc = &tx->desc[idx]; + len = skb_frag_size(frag); + addr = skb_frag_dma_map(tx->dev, frag, 0, len, DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(tx->dev, addr))) { + tx->dma_mapping_error++; + goto unmap_drop; + } + tx->info[idx].skb = NULL; + dma_unmap_len_set(&tx->info[idx], len, len); + dma_unmap_addr_set(&tx->info[idx], dma, addr); + + gve_tx_fill_seg_desc(seg_desc, skb_network_offset(skb), + skb_shinfo(skb)->gso_size, + skb_is_gso_v6(skb), is_gso, len, addr); + } + + return num_descriptors; + +unmap_drop: + i += num_descriptors - shinfo->nr_frags; + while (i--) { + /* Skip metadata descriptor, if set */ + if (i == 1 && mtd_desc_nr == 1) + continue; + idx--; + gve_tx_unmap_buf(tx->dev, &tx->info[idx & tx->mask]); + } +drop: + tx->dropped_pkt++; + return 0; +} + +netdev_tx_t gve_tx(struct sk_buff *skb, struct net_device *dev) +{ + struct gve_priv *priv = netdev_priv(dev); + struct gve_tx_ring *tx; + int nsegs; + + WARN(skb_get_queue_mapping(skb) >= priv->tx_cfg.num_queues, + "skb queue index out of range"); + tx = &priv->tx[skb_get_queue_mapping(skb)]; + if (unlikely(gve_maybe_stop_tx(priv, tx, skb))) { + /* We need to ring the txq doorbell -- we have stopped the Tx + * queue for want of resources, but prior calls to gve_tx() + * may have added descriptors without ringing the doorbell. + */ + + gve_tx_put_doorbell(priv, tx->q_resources, tx->req); + return NETDEV_TX_BUSY; + } + if (tx->raw_addressing) + nsegs = gve_tx_add_skb_no_copy(priv, tx, skb); + else + nsegs = gve_tx_add_skb_copy(priv, tx, skb); + + /* If the packet is getting sent, we need to update the skb */ + if (nsegs) { + netdev_tx_sent_queue(tx->netdev_txq, skb->len); + skb_tx_timestamp(skb); + tx->req += nsegs; + } else { + dev_kfree_skb_any(skb); + } + + if (!netif_xmit_stopped(tx->netdev_txq) && netdev_xmit_more()) + return NETDEV_TX_OK; + + /* Give packets to NIC. Even if this packet failed to send the doorbell + * might need to be rung because of xmit_more. + */ + gve_tx_put_doorbell(priv, tx->q_resources, tx->req); + return NETDEV_TX_OK; +} + +static int gve_tx_fill_xdp(struct gve_priv *priv, struct gve_tx_ring *tx, + void *data, int len, void *frame_p, bool is_xsk) +{ + int pad, nfrags, ndescs, iovi, offset; + struct gve_tx_buffer_state *info; + u32 reqi = tx->req; + + pad = gve_tx_fifo_pad_alloc_one_frag(&tx->tx_fifo, len); + if (pad >= GVE_GQ_TX_MIN_PKT_DESC_BYTES) + pad = 0; + info = &tx->info[reqi & tx->mask]; + info->xdp_frame = frame_p; + info->xdp.size = len; + info->xdp.is_xsk = is_xsk; + + nfrags = gve_tx_alloc_fifo(&tx->tx_fifo, pad + len, + &info->iov[0]); + iovi = pad > 0; + ndescs = nfrags - iovi; + offset = 0; + + while (iovi < nfrags) { + if (!offset) + gve_tx_fill_pkt_desc(&tx->desc[reqi & tx->mask], 0, + CHECKSUM_NONE, false, 0, ndescs, + info->iov[iovi].iov_len, + info->iov[iovi].iov_offset, len); + else + gve_tx_fill_seg_desc(&tx->desc[reqi & tx->mask], + 0, 0, false, false, + info->iov[iovi].iov_len, + info->iov[iovi].iov_offset); + + memcpy(tx->tx_fifo.base + info->iov[iovi].iov_offset, + data + offset, info->iov[iovi].iov_len); + gve_dma_sync_for_device(&priv->pdev->dev, + tx->tx_fifo.qpl->page_buses, + info->iov[iovi].iov_offset, + info->iov[iovi].iov_len); + offset += info->iov[iovi].iov_len; + iovi++; + reqi++; + } + + return ndescs; +} + +int gve_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, + u32 flags) +{ + struct gve_priv *priv = netdev_priv(dev); + struct gve_tx_ring *tx; + int i, err = 0, qid; + + if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) + return -EINVAL; + + qid = gve_xdp_tx_queue_id(priv, + smp_processor_id() % priv->num_xdp_queues); + + tx = &priv->tx[qid]; + + spin_lock(&tx->xdp_lock); + for (i = 0; i < n; i++) { + err = gve_xdp_xmit_one(priv, tx, frames[i]->data, + frames[i]->len, frames[i]); + if (err) + break; + } + + if (flags & XDP_XMIT_FLUSH) + gve_tx_put_doorbell(priv, tx->q_resources, tx->req); + + spin_unlock(&tx->xdp_lock); + + u64_stats_update_begin(&tx->statss); + tx->xdp_xmit += n; + tx->xdp_xmit_errors += n - i; + u64_stats_update_end(&tx->statss); + + return i ? i : err; +} + +int gve_xdp_xmit_one(struct gve_priv *priv, struct gve_tx_ring *tx, + void *data, int len, void *frame_p) +{ + int nsegs; + + if (!gve_can_tx(tx, len + GVE_GQ_TX_MIN_PKT_DESC_BYTES - 1)) + return -EBUSY; + + nsegs = gve_tx_fill_xdp(priv, tx, data, len, frame_p, false); + tx->req += nsegs; + + return 0; +} + +#define GVE_TX_START_THRESH PAGE_SIZE + +static int gve_clean_tx_done(struct gve_priv *priv, struct gve_tx_ring *tx, + u32 to_do, bool try_to_wake) +{ + struct gve_tx_buffer_state *info; + u64 pkts = 0, bytes = 0; + size_t space_freed = 0; + struct sk_buff *skb; + u32 idx; + int j; + + for (j = 0; j < to_do; j++) { + idx = tx->done & tx->mask; + netif_info(priv, tx_done, priv->dev, + "[%d] %s: idx=%d (req=%u done=%u)\n", + tx->q_num, __func__, idx, tx->req, tx->done); + info = &tx->info[idx]; + skb = info->skb; + + /* Unmap the buffer */ + if (tx->raw_addressing) + gve_tx_unmap_buf(tx->dev, info); + tx->done++; + /* Mark as free */ + if (skb) { + info->skb = NULL; + bytes += skb->len; + pkts++; + dev_consume_skb_any(skb); + if (tx->raw_addressing) + continue; + space_freed += gve_tx_clear_buffer_state(info); + } + } + + if (!tx->raw_addressing) + gve_tx_free_fifo(&tx->tx_fifo, space_freed); + u64_stats_update_begin(&tx->statss); + tx->bytes_done += bytes; + tx->pkt_done += pkts; + u64_stats_update_end(&tx->statss); + netdev_tx_completed_queue(tx->netdev_txq, pkts, bytes); + + /* start the queue if we've stopped it */ +#ifndef CONFIG_BQL + /* Make sure that the doorbells are synced */ + smp_mb(); +#endif + if (try_to_wake && netif_tx_queue_stopped(tx->netdev_txq) && + likely(gve_can_tx(tx, GVE_TX_START_THRESH))) { + tx->wake_queue++; + netif_tx_wake_queue(tx->netdev_txq); + } + + return pkts; +} + +u32 gve_tx_load_event_counter(struct gve_priv *priv, + struct gve_tx_ring *tx) +{ + u32 counter_index = be32_to_cpu(tx->q_resources->counter_index); + __be32 counter = READ_ONCE(priv->counter_array[counter_index]); + + return be32_to_cpu(counter); +} + +static int gve_xsk_tx(struct gve_priv *priv, struct gve_tx_ring *tx, + int budget) +{ + struct xdp_desc desc; + int sent = 0, nsegs; + void *data; + + spin_lock(&tx->xdp_lock); + while (sent < budget) { + if (!gve_can_tx(tx, GVE_TX_START_THRESH)) + goto out; + + if (!xsk_tx_peek_desc(tx->xsk_pool, &desc)) { + tx->xdp_xsk_done = tx->xdp_xsk_wakeup; + goto out; + } + + data = xsk_buff_raw_get_data(tx->xsk_pool, desc.addr); + nsegs = gve_tx_fill_xdp(priv, tx, data, desc.len, NULL, true); + tx->req += nsegs; + sent++; + } +out: + if (sent > 0) { + gve_tx_put_doorbell(priv, tx->q_resources, tx->req); + xsk_tx_release(tx->xsk_pool); + } + spin_unlock(&tx->xdp_lock); + return sent; +} + +bool gve_xdp_poll(struct gve_notify_block *block, int budget) +{ + struct gve_priv *priv = block->priv; + struct gve_tx_ring *tx = block->tx; + u32 nic_done; + bool repoll; + u32 to_do; + + /* Find out how much work there is to be done */ + nic_done = gve_tx_load_event_counter(priv, tx); + to_do = min_t(u32, (nic_done - tx->done), budget); + gve_clean_xdp_done(priv, tx, to_do); + repoll = nic_done != tx->done; + + if (tx->xsk_pool) { + int sent = gve_xsk_tx(priv, tx, budget); + + u64_stats_update_begin(&tx->statss); + tx->xdp_xsk_sent += sent; + u64_stats_update_end(&tx->statss); + repoll |= (sent == budget); + if (xsk_uses_need_wakeup(tx->xsk_pool)) + xsk_set_tx_need_wakeup(tx->xsk_pool); + } + + /* If we still have work we want to repoll */ + return repoll; +} + +bool gve_tx_poll(struct gve_notify_block *block, int budget) +{ + struct gve_priv *priv = block->priv; + struct gve_tx_ring *tx = block->tx; + u32 nic_done; + u32 to_do; + + /* If budget is 0, do all the work */ + if (budget == 0) + budget = INT_MAX; + + /* In TX path, it may try to clean completed pkts in order to xmit, + * to avoid cleaning conflict, use spin_lock(), it yields better + * concurrency between xmit/clean than netif's lock. + */ + spin_lock(&tx->clean_lock); + /* Find out how much work there is to be done */ + nic_done = gve_tx_load_event_counter(priv, tx); + to_do = min_t(u32, (nic_done - tx->done), budget); + gve_clean_tx_done(priv, tx, to_do, true); + spin_unlock(&tx->clean_lock); + /* If we still have work we want to repoll */ + return nic_done != tx->done; +} + +bool gve_tx_clean_pending(struct gve_priv *priv, struct gve_tx_ring *tx) +{ + u32 nic_done = gve_tx_load_event_counter(priv, tx); + + return nic_done != tx->done; +} diff --git a/drivers/net/ethernet/google/gve/gve_tx_dqo.c b/drivers/net/ethernet/google/gve/gve_tx_dqo.c new file mode 100644 index 0000000000..1e19b834a6 --- /dev/null +++ b/drivers/net/ethernet/google/gve/gve_tx_dqo.c @@ -0,0 +1,1274 @@ +// SPDX-License-Identifier: (GPL-2.0 OR MIT) +/* Google virtual Ethernet (gve) driver + * + * Copyright (C) 2015-2021 Google, Inc. + */ + +#include "gve.h" +#include "gve_adminq.h" +#include "gve_utils.h" +#include "gve_dqo.h" +#include <net/ip.h> +#include <linux/tcp.h> +#include <linux/slab.h> +#include <linux/skbuff.h> + +/* Returns true if tx_bufs are available. */ +static bool gve_has_free_tx_qpl_bufs(struct gve_tx_ring *tx, int count) +{ + int num_avail; + + if (!tx->dqo.qpl) + return true; + + num_avail = tx->dqo.num_tx_qpl_bufs - + (tx->dqo_tx.alloc_tx_qpl_buf_cnt - + tx->dqo_tx.free_tx_qpl_buf_cnt); + + if (count <= num_avail) + return true; + + /* Update cached value from dqo_compl. */ + tx->dqo_tx.free_tx_qpl_buf_cnt = + atomic_read_acquire(&tx->dqo_compl.free_tx_qpl_buf_cnt); + + num_avail = tx->dqo.num_tx_qpl_bufs - + (tx->dqo_tx.alloc_tx_qpl_buf_cnt - + tx->dqo_tx.free_tx_qpl_buf_cnt); + + return count <= num_avail; +} + +static s16 +gve_alloc_tx_qpl_buf(struct gve_tx_ring *tx) +{ + s16 index; + + index = tx->dqo_tx.free_tx_qpl_buf_head; + + /* No TX buffers available, try to steal the list from the + * completion handler. + */ + if (unlikely(index == -1)) { + tx->dqo_tx.free_tx_qpl_buf_head = + atomic_xchg(&tx->dqo_compl.free_tx_qpl_buf_head, -1); + index = tx->dqo_tx.free_tx_qpl_buf_head; + + if (unlikely(index == -1)) + return index; + } + + /* Remove TX buf from free list */ + tx->dqo_tx.free_tx_qpl_buf_head = tx->dqo.tx_qpl_buf_next[index]; + + return index; +} + +static void +gve_free_tx_qpl_bufs(struct gve_tx_ring *tx, + struct gve_tx_pending_packet_dqo *pkt) +{ + s16 index; + int i; + + if (!pkt->num_bufs) + return; + + index = pkt->tx_qpl_buf_ids[0]; + /* Create a linked list of buffers to be added to the free list */ + for (i = 1; i < pkt->num_bufs; i++) { + tx->dqo.tx_qpl_buf_next[index] = pkt->tx_qpl_buf_ids[i]; + index = pkt->tx_qpl_buf_ids[i]; + } + + while (true) { + s16 old_head = atomic_read_acquire(&tx->dqo_compl.free_tx_qpl_buf_head); + + tx->dqo.tx_qpl_buf_next[index] = old_head; + if (atomic_cmpxchg(&tx->dqo_compl.free_tx_qpl_buf_head, + old_head, + pkt->tx_qpl_buf_ids[0]) == old_head) { + break; + } + } + + atomic_add(pkt->num_bufs, &tx->dqo_compl.free_tx_qpl_buf_cnt); + pkt->num_bufs = 0; +} + +/* Returns true if a gve_tx_pending_packet_dqo object is available. */ +static bool gve_has_pending_packet(struct gve_tx_ring *tx) +{ + /* Check TX path's list. */ + if (tx->dqo_tx.free_pending_packets != -1) + return true; + + /* Check completion handler's list. */ + if (atomic_read_acquire(&tx->dqo_compl.free_pending_packets) != -1) + return true; + + return false; +} + +static struct gve_tx_pending_packet_dqo * +gve_alloc_pending_packet(struct gve_tx_ring *tx) +{ + struct gve_tx_pending_packet_dqo *pending_packet; + s16 index; + + index = tx->dqo_tx.free_pending_packets; + + /* No pending_packets available, try to steal the list from the + * completion handler. + */ + if (unlikely(index == -1)) { + tx->dqo_tx.free_pending_packets = + atomic_xchg(&tx->dqo_compl.free_pending_packets, -1); + index = tx->dqo_tx.free_pending_packets; + + if (unlikely(index == -1)) + return NULL; + } + + pending_packet = &tx->dqo.pending_packets[index]; + + /* Remove pending_packet from free list */ + tx->dqo_tx.free_pending_packets = pending_packet->next; + pending_packet->state = GVE_PACKET_STATE_PENDING_DATA_COMPL; + + return pending_packet; +} + +static void +gve_free_pending_packet(struct gve_tx_ring *tx, + struct gve_tx_pending_packet_dqo *pending_packet) +{ + s16 index = pending_packet - tx->dqo.pending_packets; + + pending_packet->state = GVE_PACKET_STATE_UNALLOCATED; + while (true) { + s16 old_head = atomic_read_acquire(&tx->dqo_compl.free_pending_packets); + + pending_packet->next = old_head; + if (atomic_cmpxchg(&tx->dqo_compl.free_pending_packets, + old_head, index) == old_head) { + break; + } + } +} + +/* gve_tx_free_desc - Cleans up all pending tx requests and buffers. + */ +static void gve_tx_clean_pending_packets(struct gve_tx_ring *tx) +{ + int i; + + for (i = 0; i < tx->dqo.num_pending_packets; i++) { + struct gve_tx_pending_packet_dqo *cur_state = + &tx->dqo.pending_packets[i]; + int j; + + for (j = 0; j < cur_state->num_bufs; j++) { + if (j == 0) { + dma_unmap_single(tx->dev, + dma_unmap_addr(cur_state, dma[j]), + dma_unmap_len(cur_state, len[j]), + DMA_TO_DEVICE); + } else { + dma_unmap_page(tx->dev, + dma_unmap_addr(cur_state, dma[j]), + dma_unmap_len(cur_state, len[j]), + DMA_TO_DEVICE); + } + } + if (cur_state->skb) { + dev_consume_skb_any(cur_state->skb); + cur_state->skb = NULL; + } + } +} + +static void gve_tx_free_ring_dqo(struct gve_priv *priv, int idx) +{ + struct gve_tx_ring *tx = &priv->tx[idx]; + struct device *hdev = &priv->pdev->dev; + size_t bytes; + + gve_tx_remove_from_block(priv, idx); + + if (tx->q_resources) { + dma_free_coherent(hdev, sizeof(*tx->q_resources), + tx->q_resources, tx->q_resources_bus); + tx->q_resources = NULL; + } + + if (tx->dqo.compl_ring) { + bytes = sizeof(tx->dqo.compl_ring[0]) * + (tx->dqo.complq_mask + 1); + dma_free_coherent(hdev, bytes, tx->dqo.compl_ring, + tx->complq_bus_dqo); + tx->dqo.compl_ring = NULL; + } + + if (tx->dqo.tx_ring) { + bytes = sizeof(tx->dqo.tx_ring[0]) * (tx->mask + 1); + dma_free_coherent(hdev, bytes, tx->dqo.tx_ring, tx->bus); + tx->dqo.tx_ring = NULL; + } + + kvfree(tx->dqo.pending_packets); + tx->dqo.pending_packets = NULL; + + kvfree(tx->dqo.tx_qpl_buf_next); + tx->dqo.tx_qpl_buf_next = NULL; + + if (tx->dqo.qpl) { + gve_unassign_qpl(priv, tx->dqo.qpl->id); + tx->dqo.qpl = NULL; + } + + netif_dbg(priv, drv, priv->dev, "freed tx queue %d\n", idx); +} + +static int gve_tx_qpl_buf_init(struct gve_tx_ring *tx) +{ + int num_tx_qpl_bufs = GVE_TX_BUFS_PER_PAGE_DQO * + tx->dqo.qpl->num_entries; + int i; + + tx->dqo.tx_qpl_buf_next = kvcalloc(num_tx_qpl_bufs, + sizeof(tx->dqo.tx_qpl_buf_next[0]), + GFP_KERNEL); + if (!tx->dqo.tx_qpl_buf_next) + return -ENOMEM; + + tx->dqo.num_tx_qpl_bufs = num_tx_qpl_bufs; + + /* Generate free TX buf list */ + for (i = 0; i < num_tx_qpl_bufs - 1; i++) + tx->dqo.tx_qpl_buf_next[i] = i + 1; + tx->dqo.tx_qpl_buf_next[num_tx_qpl_bufs - 1] = -1; + + atomic_set_release(&tx->dqo_compl.free_tx_qpl_buf_head, -1); + return 0; +} + +static int gve_tx_alloc_ring_dqo(struct gve_priv *priv, int idx) +{ + struct gve_tx_ring *tx = &priv->tx[idx]; + struct device *hdev = &priv->pdev->dev; + int num_pending_packets; + size_t bytes; + int i; + + memset(tx, 0, sizeof(*tx)); + tx->q_num = idx; + tx->dev = &priv->pdev->dev; + tx->netdev_txq = netdev_get_tx_queue(priv->dev, idx); + atomic_set_release(&tx->dqo_compl.hw_tx_head, 0); + + /* Queue sizes must be a power of 2 */ + tx->mask = priv->tx_desc_cnt - 1; + tx->dqo.complq_mask = priv->queue_format == GVE_DQO_RDA_FORMAT ? + priv->options_dqo_rda.tx_comp_ring_entries - 1 : + tx->mask; + + /* The max number of pending packets determines the maximum number of + * descriptors which maybe written to the completion queue. + * + * We must set the number small enough to make sure we never overrun the + * completion queue. + */ + num_pending_packets = tx->dqo.complq_mask + 1; + + /* Reserve space for descriptor completions, which will be reported at + * most every GVE_TX_MIN_RE_INTERVAL packets. + */ + num_pending_packets -= + (tx->dqo.complq_mask + 1) / GVE_TX_MIN_RE_INTERVAL; + + /* Each packet may have at most 2 buffer completions if it receives both + * a miss and reinjection completion. + */ + num_pending_packets /= 2; + + tx->dqo.num_pending_packets = min_t(int, num_pending_packets, S16_MAX); + tx->dqo.pending_packets = kvcalloc(tx->dqo.num_pending_packets, + sizeof(tx->dqo.pending_packets[0]), + GFP_KERNEL); + if (!tx->dqo.pending_packets) + goto err; + + /* Set up linked list of pending packets */ + for (i = 0; i < tx->dqo.num_pending_packets - 1; i++) + tx->dqo.pending_packets[i].next = i + 1; + + tx->dqo.pending_packets[tx->dqo.num_pending_packets - 1].next = -1; + atomic_set_release(&tx->dqo_compl.free_pending_packets, -1); + tx->dqo_compl.miss_completions.head = -1; + tx->dqo_compl.miss_completions.tail = -1; + tx->dqo_compl.timed_out_completions.head = -1; + tx->dqo_compl.timed_out_completions.tail = -1; + + bytes = sizeof(tx->dqo.tx_ring[0]) * (tx->mask + 1); + tx->dqo.tx_ring = dma_alloc_coherent(hdev, bytes, &tx->bus, GFP_KERNEL); + if (!tx->dqo.tx_ring) + goto err; + + bytes = sizeof(tx->dqo.compl_ring[0]) * (tx->dqo.complq_mask + 1); + tx->dqo.compl_ring = dma_alloc_coherent(hdev, bytes, + &tx->complq_bus_dqo, + GFP_KERNEL); + if (!tx->dqo.compl_ring) + goto err; + + tx->q_resources = dma_alloc_coherent(hdev, sizeof(*tx->q_resources), + &tx->q_resources_bus, GFP_KERNEL); + if (!tx->q_resources) + goto err; + + if (gve_is_qpl(priv)) { + tx->dqo.qpl = gve_assign_tx_qpl(priv, idx); + if (!tx->dqo.qpl) + goto err; + + if (gve_tx_qpl_buf_init(tx)) + goto err; + } + + gve_tx_add_to_block(priv, idx); + + return 0; + +err: + gve_tx_free_ring_dqo(priv, idx); + return -ENOMEM; +} + +int gve_tx_alloc_rings_dqo(struct gve_priv *priv) +{ + int err = 0; + int i; + + for (i = 0; i < priv->tx_cfg.num_queues; i++) { + err = gve_tx_alloc_ring_dqo(priv, i); + if (err) { + netif_err(priv, drv, priv->dev, + "Failed to alloc tx ring=%d: err=%d\n", + i, err); + goto err; + } + } + + return 0; + +err: + for (i--; i >= 0; i--) + gve_tx_free_ring_dqo(priv, i); + + return err; +} + +void gve_tx_free_rings_dqo(struct gve_priv *priv) +{ + int i; + + for (i = 0; i < priv->tx_cfg.num_queues; i++) { + struct gve_tx_ring *tx = &priv->tx[i]; + + gve_clean_tx_done_dqo(priv, tx, /*napi=*/NULL); + netdev_tx_reset_queue(tx->netdev_txq); + gve_tx_clean_pending_packets(tx); + + gve_tx_free_ring_dqo(priv, i); + } +} + +/* Returns the number of slots available in the ring */ +static u32 num_avail_tx_slots(const struct gve_tx_ring *tx) +{ + u32 num_used = (tx->dqo_tx.tail - tx->dqo_tx.head) & tx->mask; + + return tx->mask - num_used; +} + +static bool gve_has_avail_slots_tx_dqo(struct gve_tx_ring *tx, + int desc_count, int buf_count) +{ + return gve_has_pending_packet(tx) && + num_avail_tx_slots(tx) >= desc_count && + gve_has_free_tx_qpl_bufs(tx, buf_count); +} + +/* Stops the queue if available descriptors is less than 'count'. + * Return: 0 if stop is not required. + */ +static int gve_maybe_stop_tx_dqo(struct gve_tx_ring *tx, + int desc_count, int buf_count) +{ + if (likely(gve_has_avail_slots_tx_dqo(tx, desc_count, buf_count))) + return 0; + + /* Update cached TX head pointer */ + tx->dqo_tx.head = atomic_read_acquire(&tx->dqo_compl.hw_tx_head); + + if (likely(gve_has_avail_slots_tx_dqo(tx, desc_count, buf_count))) + return 0; + + /* No space, so stop the queue */ + tx->stop_queue++; + netif_tx_stop_queue(tx->netdev_txq); + + /* Sync with restarting queue in `gve_tx_poll_dqo()` */ + mb(); + + /* After stopping queue, check if we can transmit again in order to + * avoid TOCTOU bug. + */ + tx->dqo_tx.head = atomic_read_acquire(&tx->dqo_compl.hw_tx_head); + + if (likely(!gve_has_avail_slots_tx_dqo(tx, desc_count, buf_count))) + return -EBUSY; + + netif_tx_start_queue(tx->netdev_txq); + tx->wake_queue++; + return 0; +} + +static void gve_extract_tx_metadata_dqo(const struct sk_buff *skb, + struct gve_tx_metadata_dqo *metadata) +{ + memset(metadata, 0, sizeof(*metadata)); + metadata->version = GVE_TX_METADATA_VERSION_DQO; + + if (skb->l4_hash) { + u16 path_hash = skb->hash ^ (skb->hash >> 16); + + path_hash &= (1 << 15) - 1; + if (unlikely(path_hash == 0)) + path_hash = ~path_hash; + + metadata->path_hash = path_hash; + } +} + +static void gve_tx_fill_pkt_desc_dqo(struct gve_tx_ring *tx, u32 *desc_idx, + struct sk_buff *skb, u32 len, u64 addr, + s16 compl_tag, bool eop, bool is_gso) +{ + const bool checksum_offload_en = skb->ip_summed == CHECKSUM_PARTIAL; + + while (len > 0) { + struct gve_tx_pkt_desc_dqo *desc = + &tx->dqo.tx_ring[*desc_idx].pkt; + u32 cur_len = min_t(u32, len, GVE_TX_MAX_BUF_SIZE_DQO); + bool cur_eop = eop && cur_len == len; + + *desc = (struct gve_tx_pkt_desc_dqo){ + .buf_addr = cpu_to_le64(addr), + .dtype = GVE_TX_PKT_DESC_DTYPE_DQO, + .end_of_packet = cur_eop, + .checksum_offload_enable = checksum_offload_en, + .compl_tag = cpu_to_le16(compl_tag), + .buf_size = cur_len, + }; + + addr += cur_len; + len -= cur_len; + *desc_idx = (*desc_idx + 1) & tx->mask; + } +} + +/* Validates and prepares `skb` for TSO. + * + * Returns header length, or < 0 if invalid. + */ +static int gve_prep_tso(struct sk_buff *skb) +{ + struct tcphdr *tcp; + int header_len; + u32 paylen; + int err; + + /* Note: HW requires MSS (gso_size) to be <= 9728 and the total length + * of the TSO to be <= 262143. + * + * However, we don't validate these because: + * - Hypervisor enforces a limit of 9K MTU + * - Kernel will not produce a TSO larger than 64k + */ + + if (unlikely(skb_shinfo(skb)->gso_size < GVE_TX_MIN_TSO_MSS_DQO)) + return -1; + + /* Needed because we will modify header. */ + err = skb_cow_head(skb, 0); + if (err < 0) + return err; + + tcp = tcp_hdr(skb); + + /* Remove payload length from checksum. */ + paylen = skb->len - skb_transport_offset(skb); + + switch (skb_shinfo(skb)->gso_type) { + case SKB_GSO_TCPV4: + case SKB_GSO_TCPV6: + csum_replace_by_diff(&tcp->check, + (__force __wsum)htonl(paylen)); + + /* Compute length of segmentation header. */ + header_len = skb_tcp_all_headers(skb); + break; + default: + return -EINVAL; + } + + if (unlikely(header_len > GVE_TX_MAX_HDR_SIZE_DQO)) + return -EINVAL; + + return header_len; +} + +static void gve_tx_fill_tso_ctx_desc(struct gve_tx_tso_context_desc_dqo *desc, + const struct sk_buff *skb, + const struct gve_tx_metadata_dqo *metadata, + int header_len) +{ + *desc = (struct gve_tx_tso_context_desc_dqo){ + .header_len = header_len, + .cmd_dtype = { + .dtype = GVE_TX_TSO_CTX_DESC_DTYPE_DQO, + .tso = 1, + }, + .flex0 = metadata->bytes[0], + .flex5 = metadata->bytes[5], + .flex6 = metadata->bytes[6], + .flex7 = metadata->bytes[7], + .flex8 = metadata->bytes[8], + .flex9 = metadata->bytes[9], + .flex10 = metadata->bytes[10], + .flex11 = metadata->bytes[11], + }; + desc->tso_total_len = skb->len - header_len; + desc->mss = skb_shinfo(skb)->gso_size; +} + +static void +gve_tx_fill_general_ctx_desc(struct gve_tx_general_context_desc_dqo *desc, + const struct gve_tx_metadata_dqo *metadata) +{ + *desc = (struct gve_tx_general_context_desc_dqo){ + .flex0 = metadata->bytes[0], + .flex1 = metadata->bytes[1], + .flex2 = metadata->bytes[2], + .flex3 = metadata->bytes[3], + .flex4 = metadata->bytes[4], + .flex5 = metadata->bytes[5], + .flex6 = metadata->bytes[6], + .flex7 = metadata->bytes[7], + .flex8 = metadata->bytes[8], + .flex9 = metadata->bytes[9], + .flex10 = metadata->bytes[10], + .flex11 = metadata->bytes[11], + .cmd_dtype = {.dtype = GVE_TX_GENERAL_CTX_DESC_DTYPE_DQO}, + }; +} + +static int gve_tx_add_skb_no_copy_dqo(struct gve_tx_ring *tx, + struct sk_buff *skb, + struct gve_tx_pending_packet_dqo *pkt, + s16 completion_tag, + u32 *desc_idx, + bool is_gso) +{ + const struct skb_shared_info *shinfo = skb_shinfo(skb); + int i; + + /* Note: HW requires that the size of a non-TSO packet be within the + * range of [17, 9728]. + * + * We don't double check because + * - We limited `netdev->min_mtu` to ETH_MIN_MTU. + * - Hypervisor won't allow MTU larger than 9216. + */ + + pkt->num_bufs = 0; + /* Map the linear portion of skb */ + { + u32 len = skb_headlen(skb); + dma_addr_t addr; + + addr = dma_map_single(tx->dev, skb->data, len, DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(tx->dev, addr))) + goto err; + + dma_unmap_len_set(pkt, len[pkt->num_bufs], len); + dma_unmap_addr_set(pkt, dma[pkt->num_bufs], addr); + ++pkt->num_bufs; + + gve_tx_fill_pkt_desc_dqo(tx, desc_idx, skb, len, addr, + completion_tag, + /*eop=*/shinfo->nr_frags == 0, is_gso); + } + + for (i = 0; i < shinfo->nr_frags; i++) { + const skb_frag_t *frag = &shinfo->frags[i]; + bool is_eop = i == (shinfo->nr_frags - 1); + u32 len = skb_frag_size(frag); + dma_addr_t addr; + + addr = skb_frag_dma_map(tx->dev, frag, 0, len, DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(tx->dev, addr))) + goto err; + + dma_unmap_len_set(pkt, len[pkt->num_bufs], len); + dma_unmap_addr_set(pkt, dma[pkt->num_bufs], addr); + ++pkt->num_bufs; + + gve_tx_fill_pkt_desc_dqo(tx, desc_idx, skb, len, addr, + completion_tag, is_eop, is_gso); + } + + return 0; +err: + for (i = 0; i < pkt->num_bufs; i++) { + if (i == 0) { + dma_unmap_single(tx->dev, + dma_unmap_addr(pkt, dma[i]), + dma_unmap_len(pkt, len[i]), + DMA_TO_DEVICE); + } else { + dma_unmap_page(tx->dev, + dma_unmap_addr(pkt, dma[i]), + dma_unmap_len(pkt, len[i]), + DMA_TO_DEVICE); + } + } + pkt->num_bufs = 0; + return -1; +} + +/* Tx buffer i corresponds to + * qpl_page_id = i / GVE_TX_BUFS_PER_PAGE_DQO + * qpl_page_offset = (i % GVE_TX_BUFS_PER_PAGE_DQO) * GVE_TX_BUF_SIZE_DQO + */ +static void gve_tx_buf_get_addr(struct gve_tx_ring *tx, + s16 index, + void **va, dma_addr_t *dma_addr) +{ + int page_id = index >> (PAGE_SHIFT - GVE_TX_BUF_SHIFT_DQO); + int offset = (index & (GVE_TX_BUFS_PER_PAGE_DQO - 1)) << GVE_TX_BUF_SHIFT_DQO; + + *va = page_address(tx->dqo.qpl->pages[page_id]) + offset; + *dma_addr = tx->dqo.qpl->page_buses[page_id] + offset; +} + +static int gve_tx_add_skb_copy_dqo(struct gve_tx_ring *tx, + struct sk_buff *skb, + struct gve_tx_pending_packet_dqo *pkt, + s16 completion_tag, + u32 *desc_idx, + bool is_gso) +{ + u32 copy_offset = 0; + dma_addr_t dma_addr; + u32 copy_len; + s16 index; + void *va; + + /* Break the packet into buffer size chunks */ + pkt->num_bufs = 0; + while (copy_offset < skb->len) { + index = gve_alloc_tx_qpl_buf(tx); + if (unlikely(index == -1)) + goto err; + + gve_tx_buf_get_addr(tx, index, &va, &dma_addr); + copy_len = min_t(u32, GVE_TX_BUF_SIZE_DQO, + skb->len - copy_offset); + skb_copy_bits(skb, copy_offset, va, copy_len); + + copy_offset += copy_len; + dma_sync_single_for_device(tx->dev, dma_addr, + copy_len, DMA_TO_DEVICE); + gve_tx_fill_pkt_desc_dqo(tx, desc_idx, skb, + copy_len, + dma_addr, + completion_tag, + copy_offset == skb->len, + is_gso); + + pkt->tx_qpl_buf_ids[pkt->num_bufs] = index; + ++tx->dqo_tx.alloc_tx_qpl_buf_cnt; + ++pkt->num_bufs; + } + + return 0; +err: + /* Should not be here if gve_has_free_tx_qpl_bufs() check is correct */ + gve_free_tx_qpl_bufs(tx, pkt); + return -ENOMEM; +} + +/* Returns 0 on success, or < 0 on error. + * + * Before this function is called, the caller must ensure + * gve_has_pending_packet(tx) returns true. + */ +static int gve_tx_add_skb_dqo(struct gve_tx_ring *tx, + struct sk_buff *skb) +{ + const bool is_gso = skb_is_gso(skb); + u32 desc_idx = tx->dqo_tx.tail; + struct gve_tx_pending_packet_dqo *pkt; + struct gve_tx_metadata_dqo metadata; + s16 completion_tag; + + pkt = gve_alloc_pending_packet(tx); + pkt->skb = skb; + completion_tag = pkt - tx->dqo.pending_packets; + + gve_extract_tx_metadata_dqo(skb, &metadata); + if (is_gso) { + int header_len = gve_prep_tso(skb); + + if (unlikely(header_len < 0)) + goto err; + + gve_tx_fill_tso_ctx_desc(&tx->dqo.tx_ring[desc_idx].tso_ctx, + skb, &metadata, header_len); + desc_idx = (desc_idx + 1) & tx->mask; + } + + gve_tx_fill_general_ctx_desc(&tx->dqo.tx_ring[desc_idx].general_ctx, + &metadata); + desc_idx = (desc_idx + 1) & tx->mask; + + if (tx->dqo.qpl) { + if (gve_tx_add_skb_copy_dqo(tx, skb, pkt, + completion_tag, + &desc_idx, is_gso)) + goto err; + } else { + if (gve_tx_add_skb_no_copy_dqo(tx, skb, pkt, + completion_tag, + &desc_idx, is_gso)) + goto err; + } + + tx->dqo_tx.posted_packet_desc_cnt += pkt->num_bufs; + + /* Commit the changes to our state */ + tx->dqo_tx.tail = desc_idx; + + /* Request a descriptor completion on the last descriptor of the + * packet if we are allowed to by the HW enforced interval. + */ + { + u32 last_desc_idx = (desc_idx - 1) & tx->mask; + u32 last_report_event_interval = + (last_desc_idx - tx->dqo_tx.last_re_idx) & tx->mask; + + if (unlikely(last_report_event_interval >= + GVE_TX_MIN_RE_INTERVAL)) { + tx->dqo.tx_ring[last_desc_idx].pkt.report_event = true; + tx->dqo_tx.last_re_idx = last_desc_idx; + } + } + + return 0; + +err: + pkt->skb = NULL; + gve_free_pending_packet(tx, pkt); + + return -1; +} + +static int gve_num_descs_per_buf(size_t size) +{ + return DIV_ROUND_UP(size, GVE_TX_MAX_BUF_SIZE_DQO); +} + +static int gve_num_buffer_descs_needed(const struct sk_buff *skb) +{ + const struct skb_shared_info *shinfo = skb_shinfo(skb); + int num_descs; + int i; + + num_descs = gve_num_descs_per_buf(skb_headlen(skb)); + + for (i = 0; i < shinfo->nr_frags; i++) { + unsigned int frag_size = skb_frag_size(&shinfo->frags[i]); + + num_descs += gve_num_descs_per_buf(frag_size); + } + + return num_descs; +} + +/* Returns true if HW is capable of sending TSO represented by `skb`. + * + * Each segment must not span more than GVE_TX_MAX_DATA_DESCS buffers. + * - The header is counted as one buffer for every single segment. + * - A buffer which is split between two segments is counted for both. + * - If a buffer contains both header and payload, it is counted as two buffers. + */ +static bool gve_can_send_tso(const struct sk_buff *skb) +{ + const int max_bufs_per_seg = GVE_TX_MAX_DATA_DESCS - 1; + const struct skb_shared_info *shinfo = skb_shinfo(skb); + const int header_len = skb_tcp_all_headers(skb); + const int gso_size = shinfo->gso_size; + int cur_seg_num_bufs; + int cur_seg_size; + int i; + + cur_seg_size = skb_headlen(skb) - header_len; + cur_seg_num_bufs = cur_seg_size > 0; + + for (i = 0; i < shinfo->nr_frags; i++) { + if (cur_seg_size >= gso_size) { + cur_seg_size %= gso_size; + cur_seg_num_bufs = cur_seg_size > 0; + } + + if (unlikely(++cur_seg_num_bufs > max_bufs_per_seg)) + return false; + + cur_seg_size += skb_frag_size(&shinfo->frags[i]); + } + + return true; +} + +/* Attempt to transmit specified SKB. + * + * Returns 0 if the SKB was transmitted or dropped. + * Returns -1 if there is not currently enough space to transmit the SKB. + */ +static int gve_try_tx_skb(struct gve_priv *priv, struct gve_tx_ring *tx, + struct sk_buff *skb) +{ + int num_buffer_descs; + int total_num_descs; + + if (tx->dqo.qpl) { + if (skb_is_gso(skb)) + if (unlikely(ipv6_hopopt_jumbo_remove(skb))) + goto drop; + + /* We do not need to verify the number of buffers used per + * packet or per segment in case of TSO as with 2K size buffers + * none of the TX packet rules would be violated. + * + * gve_can_send_tso() checks that each TCP segment of gso_size is + * not distributed over more than 9 SKB frags.. + */ + num_buffer_descs = DIV_ROUND_UP(skb->len, GVE_TX_BUF_SIZE_DQO); + } else { + if (skb_is_gso(skb)) { + /* If TSO doesn't meet HW requirements, attempt to linearize the + * packet. + */ + if (unlikely(!gve_can_send_tso(skb) && + skb_linearize(skb) < 0)) { + net_err_ratelimited("%s: Failed to transmit TSO packet\n", + priv->dev->name); + goto drop; + } + + if (unlikely(ipv6_hopopt_jumbo_remove(skb))) + goto drop; + + num_buffer_descs = gve_num_buffer_descs_needed(skb); + } else { + num_buffer_descs = gve_num_buffer_descs_needed(skb); + + if (unlikely(num_buffer_descs > GVE_TX_MAX_DATA_DESCS)) { + if (unlikely(skb_linearize(skb) < 0)) + goto drop; + + num_buffer_descs = 1; + } + } + } + + /* Metadata + (optional TSO) + data descriptors. */ + total_num_descs = 1 + skb_is_gso(skb) + num_buffer_descs; + if (unlikely(gve_maybe_stop_tx_dqo(tx, total_num_descs + + GVE_TX_MIN_DESC_PREVENT_CACHE_OVERLAP, + num_buffer_descs))) { + return -1; + } + + if (unlikely(gve_tx_add_skb_dqo(tx, skb) < 0)) + goto drop; + + netdev_tx_sent_queue(tx->netdev_txq, skb->len); + skb_tx_timestamp(skb); + return 0; + +drop: + tx->dropped_pkt++; + dev_kfree_skb_any(skb); + return 0; +} + +/* Transmit a given skb and ring the doorbell. */ +netdev_tx_t gve_tx_dqo(struct sk_buff *skb, struct net_device *dev) +{ + struct gve_priv *priv = netdev_priv(dev); + struct gve_tx_ring *tx; + + tx = &priv->tx[skb_get_queue_mapping(skb)]; + if (unlikely(gve_try_tx_skb(priv, tx, skb) < 0)) { + /* We need to ring the txq doorbell -- we have stopped the Tx + * queue for want of resources, but prior calls to gve_tx() + * may have added descriptors without ringing the doorbell. + */ + gve_tx_put_doorbell_dqo(priv, tx->q_resources, tx->dqo_tx.tail); + return NETDEV_TX_BUSY; + } + + if (!netif_xmit_stopped(tx->netdev_txq) && netdev_xmit_more()) + return NETDEV_TX_OK; + + gve_tx_put_doorbell_dqo(priv, tx->q_resources, tx->dqo_tx.tail); + return NETDEV_TX_OK; +} + +static void add_to_list(struct gve_tx_ring *tx, struct gve_index_list *list, + struct gve_tx_pending_packet_dqo *pending_packet) +{ + s16 old_tail, index; + + index = pending_packet - tx->dqo.pending_packets; + old_tail = list->tail; + list->tail = index; + if (old_tail == -1) + list->head = index; + else + tx->dqo.pending_packets[old_tail].next = index; + + pending_packet->next = -1; + pending_packet->prev = old_tail; +} + +static void remove_from_list(struct gve_tx_ring *tx, + struct gve_index_list *list, + struct gve_tx_pending_packet_dqo *pkt) +{ + s16 prev_index, next_index; + + prev_index = pkt->prev; + next_index = pkt->next; + + if (prev_index == -1) { + /* Node is head */ + list->head = next_index; + } else { + tx->dqo.pending_packets[prev_index].next = next_index; + } + if (next_index == -1) { + /* Node is tail */ + list->tail = prev_index; + } else { + tx->dqo.pending_packets[next_index].prev = prev_index; + } +} + +static void gve_unmap_packet(struct device *dev, + struct gve_tx_pending_packet_dqo *pkt) +{ + int i; + + /* SKB linear portion is guaranteed to be mapped */ + dma_unmap_single(dev, dma_unmap_addr(pkt, dma[0]), + dma_unmap_len(pkt, len[0]), DMA_TO_DEVICE); + for (i = 1; i < pkt->num_bufs; i++) { + dma_unmap_page(dev, dma_unmap_addr(pkt, dma[i]), + dma_unmap_len(pkt, len[i]), DMA_TO_DEVICE); + } + pkt->num_bufs = 0; +} + +/* Completion types and expected behavior: + * No Miss compl + Packet compl = Packet completed normally. + * Miss compl + Re-inject compl = Packet completed normally. + * No Miss compl + Re-inject compl = Skipped i.e. packet not completed. + * Miss compl + Packet compl = Skipped i.e. packet not completed. + */ +static void gve_handle_packet_completion(struct gve_priv *priv, + struct gve_tx_ring *tx, bool is_napi, + u16 compl_tag, u64 *bytes, u64 *pkts, + bool is_reinjection) +{ + struct gve_tx_pending_packet_dqo *pending_packet; + + if (unlikely(compl_tag >= tx->dqo.num_pending_packets)) { + net_err_ratelimited("%s: Invalid TX completion tag: %d\n", + priv->dev->name, (int)compl_tag); + return; + } + + pending_packet = &tx->dqo.pending_packets[compl_tag]; + + if (unlikely(is_reinjection)) { + if (unlikely(pending_packet->state == + GVE_PACKET_STATE_TIMED_OUT_COMPL)) { + net_err_ratelimited("%s: Re-injection completion: %d received after timeout.\n", + priv->dev->name, (int)compl_tag); + /* Packet was already completed as a result of timeout, + * so just remove from list and free pending packet. + */ + remove_from_list(tx, + &tx->dqo_compl.timed_out_completions, + pending_packet); + gve_free_pending_packet(tx, pending_packet); + return; + } + if (unlikely(pending_packet->state != + GVE_PACKET_STATE_PENDING_REINJECT_COMPL)) { + /* No outstanding miss completion but packet allocated + * implies packet receives a re-injection completion + * without a prior miss completion. Return without + * completing the packet. + */ + net_err_ratelimited("%s: Re-injection completion received without corresponding miss completion: %d\n", + priv->dev->name, (int)compl_tag); + return; + } + remove_from_list(tx, &tx->dqo_compl.miss_completions, + pending_packet); + } else { + /* Packet is allocated but not a pending data completion. */ + if (unlikely(pending_packet->state != + GVE_PACKET_STATE_PENDING_DATA_COMPL)) { + net_err_ratelimited("%s: No pending data completion: %d\n", + priv->dev->name, (int)compl_tag); + return; + } + } + tx->dqo_tx.completed_packet_desc_cnt += pending_packet->num_bufs; + if (tx->dqo.qpl) + gve_free_tx_qpl_bufs(tx, pending_packet); + else + gve_unmap_packet(tx->dev, pending_packet); + + *bytes += pending_packet->skb->len; + (*pkts)++; + napi_consume_skb(pending_packet->skb, is_napi); + pending_packet->skb = NULL; + gve_free_pending_packet(tx, pending_packet); +} + +static void gve_handle_miss_completion(struct gve_priv *priv, + struct gve_tx_ring *tx, u16 compl_tag, + u64 *bytes, u64 *pkts) +{ + struct gve_tx_pending_packet_dqo *pending_packet; + + if (unlikely(compl_tag >= tx->dqo.num_pending_packets)) { + net_err_ratelimited("%s: Invalid TX completion tag: %d\n", + priv->dev->name, (int)compl_tag); + return; + } + + pending_packet = &tx->dqo.pending_packets[compl_tag]; + if (unlikely(pending_packet->state != + GVE_PACKET_STATE_PENDING_DATA_COMPL)) { + net_err_ratelimited("%s: Unexpected packet state: %d for completion tag : %d\n", + priv->dev->name, (int)pending_packet->state, + (int)compl_tag); + return; + } + + pending_packet->state = GVE_PACKET_STATE_PENDING_REINJECT_COMPL; + /* jiffies can wraparound but time comparisons can handle overflows. */ + pending_packet->timeout_jiffies = + jiffies + + msecs_to_jiffies(GVE_REINJECT_COMPL_TIMEOUT * + MSEC_PER_SEC); + add_to_list(tx, &tx->dqo_compl.miss_completions, pending_packet); + + *bytes += pending_packet->skb->len; + (*pkts)++; +} + +static void remove_miss_completions(struct gve_priv *priv, + struct gve_tx_ring *tx) +{ + struct gve_tx_pending_packet_dqo *pending_packet; + s16 next_index; + + next_index = tx->dqo_compl.miss_completions.head; + while (next_index != -1) { + pending_packet = &tx->dqo.pending_packets[next_index]; + next_index = pending_packet->next; + /* Break early because packets should timeout in order. */ + if (time_is_after_jiffies(pending_packet->timeout_jiffies)) + break; + + remove_from_list(tx, &tx->dqo_compl.miss_completions, + pending_packet); + /* Unmap/free TX buffers and free skb but do not unallocate packet i.e. + * the completion tag is not freed to ensure that the driver + * can take appropriate action if a corresponding valid + * completion is received later. + */ + if (tx->dqo.qpl) + gve_free_tx_qpl_bufs(tx, pending_packet); + else + gve_unmap_packet(tx->dev, pending_packet); + + /* This indicates the packet was dropped. */ + dev_kfree_skb_any(pending_packet->skb); + pending_packet->skb = NULL; + tx->dropped_pkt++; + net_err_ratelimited("%s: No reinjection completion was received for: %d.\n", + priv->dev->name, + (int)(pending_packet - tx->dqo.pending_packets)); + + pending_packet->state = GVE_PACKET_STATE_TIMED_OUT_COMPL; + pending_packet->timeout_jiffies = + jiffies + + msecs_to_jiffies(GVE_DEALLOCATE_COMPL_TIMEOUT * + MSEC_PER_SEC); + /* Maintain pending packet in another list so the packet can be + * unallocated at a later time. + */ + add_to_list(tx, &tx->dqo_compl.timed_out_completions, + pending_packet); + } +} + +static void remove_timed_out_completions(struct gve_priv *priv, + struct gve_tx_ring *tx) +{ + struct gve_tx_pending_packet_dqo *pending_packet; + s16 next_index; + + next_index = tx->dqo_compl.timed_out_completions.head; + while (next_index != -1) { + pending_packet = &tx->dqo.pending_packets[next_index]; + next_index = pending_packet->next; + /* Break early because packets should timeout in order. */ + if (time_is_after_jiffies(pending_packet->timeout_jiffies)) + break; + + remove_from_list(tx, &tx->dqo_compl.timed_out_completions, + pending_packet); + gve_free_pending_packet(tx, pending_packet); + } +} + +int gve_clean_tx_done_dqo(struct gve_priv *priv, struct gve_tx_ring *tx, + struct napi_struct *napi) +{ + u64 reinject_compl_bytes = 0; + u64 reinject_compl_pkts = 0; + int num_descs_cleaned = 0; + u64 miss_compl_bytes = 0; + u64 miss_compl_pkts = 0; + u64 pkt_compl_bytes = 0; + u64 pkt_compl_pkts = 0; + + /* Limit in order to avoid blocking for too long */ + while (!napi || pkt_compl_pkts < napi->weight) { + struct gve_tx_compl_desc *compl_desc = + &tx->dqo.compl_ring[tx->dqo_compl.head]; + u16 type; + + if (compl_desc->generation == tx->dqo_compl.cur_gen_bit) + break; + + /* Prefetch the next descriptor. */ + prefetch(&tx->dqo.compl_ring[(tx->dqo_compl.head + 1) & + tx->dqo.complq_mask]); + + /* Do not read data until we own the descriptor */ + dma_rmb(); + type = compl_desc->type; + + if (type == GVE_COMPL_TYPE_DQO_DESC) { + /* This is the last descriptor fetched by HW plus one */ + u16 tx_head = le16_to_cpu(compl_desc->tx_head); + + atomic_set_release(&tx->dqo_compl.hw_tx_head, tx_head); + } else if (type == GVE_COMPL_TYPE_DQO_PKT) { + u16 compl_tag = le16_to_cpu(compl_desc->completion_tag); + if (compl_tag & GVE_ALT_MISS_COMPL_BIT) { + compl_tag &= ~GVE_ALT_MISS_COMPL_BIT; + gve_handle_miss_completion(priv, tx, compl_tag, + &miss_compl_bytes, + &miss_compl_pkts); + } else { + gve_handle_packet_completion(priv, tx, !!napi, + compl_tag, + &pkt_compl_bytes, + &pkt_compl_pkts, + false); + } + } else if (type == GVE_COMPL_TYPE_DQO_MISS) { + u16 compl_tag = le16_to_cpu(compl_desc->completion_tag); + + gve_handle_miss_completion(priv, tx, compl_tag, + &miss_compl_bytes, + &miss_compl_pkts); + } else if (type == GVE_COMPL_TYPE_DQO_REINJECTION) { + u16 compl_tag = le16_to_cpu(compl_desc->completion_tag); + + gve_handle_packet_completion(priv, tx, !!napi, + compl_tag, + &reinject_compl_bytes, + &reinject_compl_pkts, + true); + } + + tx->dqo_compl.head = + (tx->dqo_compl.head + 1) & tx->dqo.complq_mask; + /* Flip the generation bit when we wrap around */ + tx->dqo_compl.cur_gen_bit ^= tx->dqo_compl.head == 0; + num_descs_cleaned++; + } + + netdev_tx_completed_queue(tx->netdev_txq, + pkt_compl_pkts + miss_compl_pkts, + pkt_compl_bytes + miss_compl_bytes); + + remove_miss_completions(priv, tx); + remove_timed_out_completions(priv, tx); + + u64_stats_update_begin(&tx->statss); + tx->bytes_done += pkt_compl_bytes + reinject_compl_bytes; + tx->pkt_done += pkt_compl_pkts + reinject_compl_pkts; + u64_stats_update_end(&tx->statss); + return num_descs_cleaned; +} + +bool gve_tx_poll_dqo(struct gve_notify_block *block, bool do_clean) +{ + struct gve_tx_compl_desc *compl_desc; + struct gve_tx_ring *tx = block->tx; + struct gve_priv *priv = block->priv; + + if (do_clean) { + int num_descs_cleaned = gve_clean_tx_done_dqo(priv, tx, + &block->napi); + + /* Sync with queue being stopped in `gve_maybe_stop_tx_dqo()` */ + mb(); + + if (netif_tx_queue_stopped(tx->netdev_txq) && + num_descs_cleaned > 0) { + tx->wake_queue++; + netif_tx_wake_queue(tx->netdev_txq); + } + } + + /* Return true if we still have work. */ + compl_desc = &tx->dqo.compl_ring[tx->dqo_compl.head]; + return compl_desc->generation != tx->dqo_compl.cur_gen_bit; +} diff --git a/drivers/net/ethernet/google/gve/gve_utils.c b/drivers/net/ethernet/google/gve/gve_utils.c new file mode 100644 index 0000000000..26e08d7532 --- /dev/null +++ b/drivers/net/ethernet/google/gve/gve_utils.c @@ -0,0 +1,83 @@ +// SPDX-License-Identifier: (GPL-2.0 OR MIT) +/* Google virtual Ethernet (gve) driver + * + * Copyright (C) 2015-2021 Google, Inc. + */ + +#include "gve.h" +#include "gve_adminq.h" +#include "gve_utils.h" + +void gve_tx_remove_from_block(struct gve_priv *priv, int queue_idx) +{ + struct gve_notify_block *block = + &priv->ntfy_blocks[gve_tx_idx_to_ntfy(priv, queue_idx)]; + + block->tx = NULL; +} + +void gve_tx_add_to_block(struct gve_priv *priv, int queue_idx) +{ + unsigned int active_cpus = min_t(int, priv->num_ntfy_blks / 2, + num_online_cpus()); + int ntfy_idx = gve_tx_idx_to_ntfy(priv, queue_idx); + struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx]; + struct gve_tx_ring *tx = &priv->tx[queue_idx]; + + block->tx = tx; + tx->ntfy_id = ntfy_idx; + netif_set_xps_queue(priv->dev, get_cpu_mask(ntfy_idx % active_cpus), + queue_idx); +} + +void gve_rx_remove_from_block(struct gve_priv *priv, int queue_idx) +{ + struct gve_notify_block *block = + &priv->ntfy_blocks[gve_rx_idx_to_ntfy(priv, queue_idx)]; + + block->rx = NULL; +} + +void gve_rx_add_to_block(struct gve_priv *priv, int queue_idx) +{ + u32 ntfy_idx = gve_rx_idx_to_ntfy(priv, queue_idx); + struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx]; + struct gve_rx_ring *rx = &priv->rx[queue_idx]; + + block->rx = rx; + rx->ntfy_id = ntfy_idx; +} + +struct sk_buff *gve_rx_copy(struct net_device *dev, struct napi_struct *napi, + struct gve_rx_slot_page_info *page_info, u16 len) +{ + void *va = page_info->page_address + page_info->page_offset + + page_info->pad; + struct sk_buff *skb; + + skb = napi_alloc_skb(napi, len); + if (unlikely(!skb)) + return NULL; + + __skb_put(skb, len); + skb_copy_to_linear_data_offset(skb, 0, va, len); + skb->protocol = eth_type_trans(skb, dev); + + return skb; +} + +void gve_dec_pagecnt_bias(struct gve_rx_slot_page_info *page_info) +{ + page_info->pagecnt_bias--; + if (page_info->pagecnt_bias == 0) { + int pagecount = page_count(page_info->page); + + /* If we have run out of bias - set it back up to INT_MAX + * minus the existing refs. + */ + page_info->pagecnt_bias = INT_MAX - pagecount; + + /* Set pagecount back up to max. */ + page_ref_add(page_info->page, INT_MAX - pagecount); + } +} diff --git a/drivers/net/ethernet/google/gve/gve_utils.h b/drivers/net/ethernet/google/gve/gve_utils.h new file mode 100644 index 0000000000..324fd98a61 --- /dev/null +++ b/drivers/net/ethernet/google/gve/gve_utils.h @@ -0,0 +1,27 @@ +/* SPDX-License-Identifier: (GPL-2.0 OR MIT) + * Google virtual Ethernet (gve) driver + * + * Copyright (C) 2015-2021 Google, Inc. + */ + +#ifndef _GVE_UTILS_H +#define _GVE_UTILS_H + +#include <linux/etherdevice.h> + +#include "gve.h" + +void gve_tx_remove_from_block(struct gve_priv *priv, int queue_idx); +void gve_tx_add_to_block(struct gve_priv *priv, int queue_idx); + +void gve_rx_remove_from_block(struct gve_priv *priv, int queue_idx); +void gve_rx_add_to_block(struct gve_priv *priv, int queue_idx); + +struct sk_buff *gve_rx_copy(struct net_device *dev, struct napi_struct *napi, + struct gve_rx_slot_page_info *page_info, u16 len); + +/* Decrement pagecnt_bias. Set it back to INT_MAX if it reached zero. */ +void gve_dec_pagecnt_bias(struct gve_rx_slot_page_info *page_info); + +#endif /* _GVE_UTILS_H */ + |