diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-05-18 18:50:36 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-05-18 18:50:36 +0000 |
commit | 50ba0232fd5312410f1b65247e774244f89a628e (patch) | |
tree | fd8f2fc78e9e548af0ff9590276602ee6125be00 /drivers/net/ethernet/amazon | |
parent | Releasing progress-linux version 6.7.12-1~progress7.99u1. (diff) | |
download | linux-50ba0232fd5312410f1b65247e774244f89a628e.tar.xz linux-50ba0232fd5312410f1b65247e774244f89a628e.zip |
Merging upstream version 6.8.9.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/net/ethernet/amazon')
-rw-r--r-- | drivers/net/ethernet/amazon/ena/Makefile | 2 | ||||
-rw-r--r-- | drivers/net/ethernet/amazon/ena/ena_com.c | 2 | ||||
-rw-r--r-- | drivers/net/ethernet/amazon/ena/ena_ethtool.c | 50 | ||||
-rw-r--r-- | drivers/net/ethernet/amazon/ena/ena_netdev.c | 728 | ||||
-rw-r--r-- | drivers/net/ethernet/amazon/ena/ena_netdev.h | 99 | ||||
-rw-r--r-- | drivers/net/ethernet/amazon/ena/ena_xdp.c | 470 | ||||
-rw-r--r-- | drivers/net/ethernet/amazon/ena/ena_xdp.h | 151 |
7 files changed, 779 insertions, 723 deletions
diff --git a/drivers/net/ethernet/amazon/ena/Makefile b/drivers/net/ethernet/amazon/ena/Makefile index f1f752a8f7..6ab6153651 100644 --- a/drivers/net/ethernet/amazon/ena/Makefile +++ b/drivers/net/ethernet/amazon/ena/Makefile @@ -5,4 +5,4 @@ obj-$(CONFIG_ENA_ETHERNET) += ena.o -ena-y := ena_netdev.o ena_com.o ena_eth_com.o ena_ethtool.o +ena-y := ena_netdev.o ena_com.o ena_eth_com.o ena_ethtool.o ena_xdp.o diff --git a/drivers/net/ethernet/amazon/ena/ena_com.c b/drivers/net/ethernet/amazon/ena/ena_com.c index 633b321d7f..4db6893729 100644 --- a/drivers/net/ethernet/amazon/ena/ena_com.c +++ b/drivers/net/ethernet/amazon/ena/ena_com.c @@ -362,7 +362,7 @@ static int ena_com_init_io_sq(struct ena_com_dev *ena_dev, ENA_COM_BOUNCE_BUFFER_CNTRL_CNT; io_sq->bounce_buf_ctrl.next_to_use = 0; - size = io_sq->bounce_buf_ctrl.buffer_size * + size = (size_t)io_sq->bounce_buf_ctrl.buffer_size * io_sq->bounce_buf_ctrl.buffers_num; dev_node = dev_to_node(ena_dev->dmadev); diff --git a/drivers/net/ethernet/amazon/ena/ena_ethtool.c b/drivers/net/ethernet/amazon/ena/ena_ethtool.c index d671df4b76..0cb6cc1cef 100644 --- a/drivers/net/ethernet/amazon/ena/ena_ethtool.c +++ b/drivers/net/ethernet/amazon/ena/ena_ethtool.c @@ -7,6 +7,7 @@ #include <linux/pci.h> #include "ena_netdev.h" +#include "ena_xdp.h" struct ena_stats { char name[ETH_GSTRING_LEN]; @@ -262,17 +263,14 @@ static void ena_queue_strings(struct ena_adapter *adapter, u8 **data) ena_stats->name); } - if (!is_xdp) { - /* RX stats, in XDP there isn't a RX queue - * counterpart - */ - for (j = 0; j < ENA_STATS_ARRAY_RX; j++) { - ena_stats = &ena_stats_rx_strings[j]; + /* In XDP there isn't an RX queue counterpart */ + if (is_xdp) + continue; - ethtool_sprintf(data, - "queue_%u_rx_%s", i, - ena_stats->name); - } + for (j = 0; j < ENA_STATS_ARRAY_RX; j++) { + ena_stats = &ena_stats_rx_strings[j]; + + ethtool_sprintf(data, "queue_%u_rx_%s", i, ena_stats->name); } } } @@ -299,13 +297,13 @@ static void ena_get_strings(struct ena_adapter *adapter, for (i = 0; i < ENA_STATS_ARRAY_GLOBAL; i++) { ena_stats = &ena_stats_global_strings[i]; - ethtool_sprintf(&data, ena_stats->name); + ethtool_puts(&data, ena_stats->name); } if (eni_stats_needed) { for (i = 0; i < ENA_STATS_ARRAY_ENI(adapter); i++) { ena_stats = &ena_stats_eni_strings[i]; - ethtool_sprintf(&data, ena_stats->name); + ethtool_puts(&data, ena_stats->name); } } @@ -802,15 +800,15 @@ static int ena_indirection_table_get(struct ena_adapter *adapter, u32 *indir) return rc; } -static int ena_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, - u8 *hfunc) +static int ena_get_rxfh(struct net_device *netdev, + struct ethtool_rxfh_param *rxfh) { struct ena_adapter *adapter = netdev_priv(netdev); enum ena_admin_hash_functions ena_func; u8 func; int rc; - rc = ena_indirection_table_get(adapter, indir); + rc = ena_indirection_table_get(adapter, rxfh->indir); if (rc) return rc; @@ -825,7 +823,7 @@ static int ena_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, return rc; } - rc = ena_com_get_hash_key(adapter->ena_dev, key); + rc = ena_com_get_hash_key(adapter->ena_dev, rxfh->key); if (rc) return rc; @@ -842,27 +840,27 @@ static int ena_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, return -EOPNOTSUPP; } - if (hfunc) - *hfunc = func; + rxfh->hfunc = func; return 0; } -static int ena_set_rxfh(struct net_device *netdev, const u32 *indir, - const u8 *key, const u8 hfunc) +static int ena_set_rxfh(struct net_device *netdev, + struct ethtool_rxfh_param *rxfh, + struct netlink_ext_ack *extack) { struct ena_adapter *adapter = netdev_priv(netdev); struct ena_com_dev *ena_dev = adapter->ena_dev; enum ena_admin_hash_functions func = 0; int rc; - if (indir) { - rc = ena_indirection_table_set(adapter, indir); + if (rxfh->indir) { + rc = ena_indirection_table_set(adapter, rxfh->indir); if (rc) return rc; } - switch (hfunc) { + switch (rxfh->hfunc) { case ETH_RSS_HASH_NO_CHANGE: func = ena_com_get_current_hash_function(ena_dev); break; @@ -874,12 +872,12 @@ static int ena_set_rxfh(struct net_device *netdev, const u32 *indir, break; default: netif_err(adapter, drv, netdev, "Unsupported hfunc %d\n", - hfunc); + rxfh->hfunc); return -EOPNOTSUPP; } - if (key || func) { - rc = ena_com_fill_hash_function(ena_dev, func, key, + if (rxfh->key || func) { + rc = ena_com_fill_hash_function(ena_dev, func, rxfh->key, ENA_HASH_KEY_SIZE, 0xFFFFFFFF); if (unlikely(rc)) { diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c index 4fa27c9a33..95ed32542e 100644 --- a/drivers/net/ethernet/amazon/ena/ena_netdev.c +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c @@ -19,8 +19,8 @@ #include <net/ip.h> #include "ena_netdev.h" -#include <linux/bpf_trace.h> #include "ena_pci_id_tbl.h" +#include "ena_xdp.h" MODULE_AUTHOR("Amazon.com, Inc. or its affiliates"); MODULE_DESCRIPTION(DEVICE_NAME); @@ -45,53 +45,6 @@ static void check_for_admin_com_state(struct ena_adapter *adapter); static void ena_destroy_device(struct ena_adapter *adapter, bool graceful); static int ena_restore_device(struct ena_adapter *adapter); -static void ena_init_io_rings(struct ena_adapter *adapter, - int first_index, int count); -static void ena_init_napi_in_range(struct ena_adapter *adapter, int first_index, - int count); -static void ena_del_napi_in_range(struct ena_adapter *adapter, int first_index, - int count); -static int ena_setup_tx_resources(struct ena_adapter *adapter, int qid); -static int ena_setup_tx_resources_in_range(struct ena_adapter *adapter, - int first_index, - int count); -static int ena_create_io_tx_queue(struct ena_adapter *adapter, int qid); -static void ena_free_tx_resources(struct ena_adapter *adapter, int qid); -static int ena_clean_xdp_irq(struct ena_ring *xdp_ring, u32 budget); -static void ena_destroy_all_tx_queues(struct ena_adapter *adapter); -static void ena_free_all_io_tx_resources(struct ena_adapter *adapter); -static void ena_napi_disable_in_range(struct ena_adapter *adapter, - int first_index, int count); -static void ena_napi_enable_in_range(struct ena_adapter *adapter, - int first_index, int count); -static int ena_up(struct ena_adapter *adapter); -static void ena_down(struct ena_adapter *adapter); -static void ena_unmask_interrupt(struct ena_ring *tx_ring, - struct ena_ring *rx_ring); -static void ena_update_ring_numa_node(struct ena_ring *tx_ring, - struct ena_ring *rx_ring); -static void ena_unmap_tx_buff(struct ena_ring *tx_ring, - struct ena_tx_buffer *tx_info); -static int ena_create_io_tx_queues_in_range(struct ena_adapter *adapter, - int first_index, int count); -static void ena_free_all_io_tx_resources_in_range(struct ena_adapter *adapter, - int first_index, int count); - -/* Increase a stat by cnt while holding syncp seqlock on 32bit machines */ -static void ena_increase_stat(u64 *statp, u64 cnt, - struct u64_stats_sync *syncp) -{ - u64_stats_update_begin(syncp); - (*statp) += cnt; - u64_stats_update_end(syncp); -} - -static void ena_ring_tx_doorbell(struct ena_ring *tx_ring) -{ - ena_com_write_sq_doorbell(tx_ring->ena_com_io_sq); - ena_increase_stat(&tx_ring->tx_stats.doorbells, 1, &tx_ring->syncp); -} - static void ena_tx_timeout(struct net_device *dev, unsigned int txqueue) { struct ena_adapter *adapter = netdev_priv(dev); @@ -135,19 +88,18 @@ static int ena_change_mtu(struct net_device *dev, int new_mtu) return ret; } -static int ena_xmit_common(struct net_device *dev, - struct ena_ring *ring, - struct ena_tx_buffer *tx_info, - struct ena_com_tx_ctx *ena_tx_ctx, - u16 next_to_use, - u32 bytes) +int ena_xmit_common(struct ena_adapter *adapter, + struct ena_ring *ring, + struct ena_tx_buffer *tx_info, + struct ena_com_tx_ctx *ena_tx_ctx, + u16 next_to_use, + u32 bytes) { - struct ena_adapter *adapter = netdev_priv(dev); int rc, nb_hw_desc; if (unlikely(ena_com_is_doorbell_needed(ring->ena_com_io_sq, ena_tx_ctx))) { - netif_dbg(adapter, tx_queued, dev, + netif_dbg(adapter, tx_queued, adapter->netdev, "llq tx max burst size of queue %d achieved, writing doorbell to send burst\n", ring->qid); ena_ring_tx_doorbell(ring); @@ -162,7 +114,7 @@ static int ena_xmit_common(struct net_device *dev, * ena_com_prepare_tx() are fatal and therefore require a device reset. */ if (unlikely(rc)) { - netif_err(adapter, tx_queued, dev, + netif_err(adapter, tx_queued, adapter->netdev, "Failed to prepare tx bufs\n"); ena_increase_stat(&ring->tx_stats.prepare_ctx_err, 1, &ring->syncp); @@ -178,6 +130,7 @@ static int ena_xmit_common(struct net_device *dev, u64_stats_update_end(&ring->syncp); tx_info->tx_descs = nb_hw_desc; + tx_info->total_tx_size = bytes; tx_info->last_jiffies = jiffies; tx_info->print_once = 0; @@ -186,467 +139,6 @@ static int ena_xmit_common(struct net_device *dev, return 0; } -/* This is the XDP napi callback. XDP queues use a separate napi callback - * than Rx/Tx queues. - */ -static int ena_xdp_io_poll(struct napi_struct *napi, int budget) -{ - struct ena_napi *ena_napi = container_of(napi, struct ena_napi, napi); - u32 xdp_work_done, xdp_budget; - struct ena_ring *xdp_ring; - int napi_comp_call = 0; - int ret; - - xdp_ring = ena_napi->xdp_ring; - - xdp_budget = budget; - - if (!test_bit(ENA_FLAG_DEV_UP, &xdp_ring->adapter->flags) || - test_bit(ENA_FLAG_TRIGGER_RESET, &xdp_ring->adapter->flags)) { - napi_complete_done(napi, 0); - return 0; - } - - xdp_work_done = ena_clean_xdp_irq(xdp_ring, xdp_budget); - - /* If the device is about to reset or down, avoid unmask - * the interrupt and return 0 so NAPI won't reschedule - */ - if (unlikely(!test_bit(ENA_FLAG_DEV_UP, &xdp_ring->adapter->flags))) { - napi_complete_done(napi, 0); - ret = 0; - } else if (xdp_budget > xdp_work_done) { - napi_comp_call = 1; - if (napi_complete_done(napi, xdp_work_done)) - ena_unmask_interrupt(xdp_ring, NULL); - ena_update_ring_numa_node(xdp_ring, NULL); - ret = xdp_work_done; - } else { - ret = xdp_budget; - } - - u64_stats_update_begin(&xdp_ring->syncp); - xdp_ring->tx_stats.napi_comp += napi_comp_call; - xdp_ring->tx_stats.tx_poll++; - u64_stats_update_end(&xdp_ring->syncp); - xdp_ring->tx_stats.last_napi_jiffies = jiffies; - - return ret; -} - -static int ena_xdp_tx_map_frame(struct ena_ring *xdp_ring, - struct ena_tx_buffer *tx_info, - struct xdp_frame *xdpf, - struct ena_com_tx_ctx *ena_tx_ctx) -{ - struct ena_adapter *adapter = xdp_ring->adapter; - struct ena_com_buf *ena_buf; - int push_len = 0; - dma_addr_t dma; - void *data; - u32 size; - - tx_info->xdpf = xdpf; - data = tx_info->xdpf->data; - size = tx_info->xdpf->len; - - if (xdp_ring->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) { - /* Designate part of the packet for LLQ */ - push_len = min_t(u32, size, xdp_ring->tx_max_header_size); - - ena_tx_ctx->push_header = data; - - size -= push_len; - data += push_len; - } - - ena_tx_ctx->header_len = push_len; - - if (size > 0) { - dma = dma_map_single(xdp_ring->dev, - data, - size, - DMA_TO_DEVICE); - if (unlikely(dma_mapping_error(xdp_ring->dev, dma))) - goto error_report_dma_error; - - tx_info->map_linear_data = 0; - - ena_buf = tx_info->bufs; - ena_buf->paddr = dma; - ena_buf->len = size; - - ena_tx_ctx->ena_bufs = ena_buf; - ena_tx_ctx->num_bufs = tx_info->num_of_bufs = 1; - } - - return 0; - -error_report_dma_error: - ena_increase_stat(&xdp_ring->tx_stats.dma_mapping_err, 1, - &xdp_ring->syncp); - netif_warn(adapter, tx_queued, adapter->netdev, "Failed to map xdp buff\n"); - - return -EINVAL; -} - -static int ena_xdp_xmit_frame(struct ena_ring *xdp_ring, - struct net_device *dev, - struct xdp_frame *xdpf, - int flags) -{ - struct ena_com_tx_ctx ena_tx_ctx = {}; - struct ena_tx_buffer *tx_info; - u16 next_to_use, req_id; - int rc; - - next_to_use = xdp_ring->next_to_use; - req_id = xdp_ring->free_ids[next_to_use]; - tx_info = &xdp_ring->tx_buffer_info[req_id]; - tx_info->num_of_bufs = 0; - - rc = ena_xdp_tx_map_frame(xdp_ring, tx_info, xdpf, &ena_tx_ctx); - if (unlikely(rc)) - return rc; - - ena_tx_ctx.req_id = req_id; - - rc = ena_xmit_common(dev, - xdp_ring, - tx_info, - &ena_tx_ctx, - next_to_use, - xdpf->len); - if (rc) - goto error_unmap_dma; - - /* trigger the dma engine. ena_ring_tx_doorbell() - * calls a memory barrier inside it. - */ - if (flags & XDP_XMIT_FLUSH) - ena_ring_tx_doorbell(xdp_ring); - - return rc; - -error_unmap_dma: - ena_unmap_tx_buff(xdp_ring, tx_info); - tx_info->xdpf = NULL; - return rc; -} - -static int ena_xdp_xmit(struct net_device *dev, int n, - struct xdp_frame **frames, u32 flags) -{ - struct ena_adapter *adapter = netdev_priv(dev); - struct ena_ring *xdp_ring; - int qid, i, nxmit = 0; - - if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) - return -EINVAL; - - if (!test_bit(ENA_FLAG_DEV_UP, &adapter->flags)) - return -ENETDOWN; - - /* We assume that all rings have the same XDP program */ - if (!READ_ONCE(adapter->rx_ring->xdp_bpf_prog)) - return -ENXIO; - - qid = smp_processor_id() % adapter->xdp_num_queues; - qid += adapter->xdp_first_ring; - xdp_ring = &adapter->tx_ring[qid]; - - /* Other CPU ids might try to send thorugh this queue */ - spin_lock(&xdp_ring->xdp_tx_lock); - - for (i = 0; i < n; i++) { - if (ena_xdp_xmit_frame(xdp_ring, dev, frames[i], 0)) - break; - nxmit++; - } - - /* Ring doorbell to make device aware of the packets */ - if (flags & XDP_XMIT_FLUSH) - ena_ring_tx_doorbell(xdp_ring); - - spin_unlock(&xdp_ring->xdp_tx_lock); - - /* Return number of packets sent */ - return nxmit; -} - -static int ena_xdp_execute(struct ena_ring *rx_ring, struct xdp_buff *xdp) -{ - u32 verdict = ENA_XDP_PASS; - struct bpf_prog *xdp_prog; - struct ena_ring *xdp_ring; - struct xdp_frame *xdpf; - u64 *xdp_stat; - - xdp_prog = READ_ONCE(rx_ring->xdp_bpf_prog); - - if (!xdp_prog) - goto out; - - verdict = bpf_prog_run_xdp(xdp_prog, xdp); - - switch (verdict) { - case XDP_TX: - xdpf = xdp_convert_buff_to_frame(xdp); - if (unlikely(!xdpf)) { - trace_xdp_exception(rx_ring->netdev, xdp_prog, verdict); - xdp_stat = &rx_ring->rx_stats.xdp_aborted; - verdict = ENA_XDP_DROP; - break; - } - - /* Find xmit queue */ - xdp_ring = rx_ring->xdp_ring; - - /* The XDP queues are shared between XDP_TX and XDP_REDIRECT */ - spin_lock(&xdp_ring->xdp_tx_lock); - - if (ena_xdp_xmit_frame(xdp_ring, rx_ring->netdev, xdpf, - XDP_XMIT_FLUSH)) - xdp_return_frame(xdpf); - - spin_unlock(&xdp_ring->xdp_tx_lock); - xdp_stat = &rx_ring->rx_stats.xdp_tx; - verdict = ENA_XDP_TX; - break; - case XDP_REDIRECT: - if (likely(!xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog))) { - xdp_stat = &rx_ring->rx_stats.xdp_redirect; - verdict = ENA_XDP_REDIRECT; - break; - } - trace_xdp_exception(rx_ring->netdev, xdp_prog, verdict); - xdp_stat = &rx_ring->rx_stats.xdp_aborted; - verdict = ENA_XDP_DROP; - break; - case XDP_ABORTED: - trace_xdp_exception(rx_ring->netdev, xdp_prog, verdict); - xdp_stat = &rx_ring->rx_stats.xdp_aborted; - verdict = ENA_XDP_DROP; - break; - case XDP_DROP: - xdp_stat = &rx_ring->rx_stats.xdp_drop; - verdict = ENA_XDP_DROP; - break; - case XDP_PASS: - xdp_stat = &rx_ring->rx_stats.xdp_pass; - verdict = ENA_XDP_PASS; - break; - default: - bpf_warn_invalid_xdp_action(rx_ring->netdev, xdp_prog, verdict); - xdp_stat = &rx_ring->rx_stats.xdp_invalid; - verdict = ENA_XDP_DROP; - } - - ena_increase_stat(xdp_stat, 1, &rx_ring->syncp); -out: - return verdict; -} - -static void ena_init_all_xdp_queues(struct ena_adapter *adapter) -{ - adapter->xdp_first_ring = adapter->num_io_queues; - adapter->xdp_num_queues = adapter->num_io_queues; - - ena_init_io_rings(adapter, - adapter->xdp_first_ring, - adapter->xdp_num_queues); -} - -static int ena_setup_and_create_all_xdp_queues(struct ena_adapter *adapter) -{ - u32 xdp_first_ring = adapter->xdp_first_ring; - u32 xdp_num_queues = adapter->xdp_num_queues; - int rc = 0; - - rc = ena_setup_tx_resources_in_range(adapter, xdp_first_ring, xdp_num_queues); - if (rc) - goto setup_err; - - rc = ena_create_io_tx_queues_in_range(adapter, xdp_first_ring, xdp_num_queues); - if (rc) - goto create_err; - - return 0; - -create_err: - ena_free_all_io_tx_resources_in_range(adapter, xdp_first_ring, xdp_num_queues); -setup_err: - return rc; -} - -/* Provides a way for both kernel and bpf-prog to know - * more about the RX-queue a given XDP frame arrived on. - */ -static int ena_xdp_register_rxq_info(struct ena_ring *rx_ring) -{ - int rc; - - rc = xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev, rx_ring->qid, 0); - - if (rc) { - netif_err(rx_ring->adapter, ifup, rx_ring->netdev, - "Failed to register xdp rx queue info. RX queue num %d rc: %d\n", - rx_ring->qid, rc); - goto err; - } - - rc = xdp_rxq_info_reg_mem_model(&rx_ring->xdp_rxq, MEM_TYPE_PAGE_SHARED, - NULL); - - if (rc) { - netif_err(rx_ring->adapter, ifup, rx_ring->netdev, - "Failed to register xdp rx queue info memory model. RX queue num %d rc: %d\n", - rx_ring->qid, rc); - xdp_rxq_info_unreg(&rx_ring->xdp_rxq); - } - -err: - return rc; -} - -static void ena_xdp_unregister_rxq_info(struct ena_ring *rx_ring) -{ - xdp_rxq_info_unreg_mem_model(&rx_ring->xdp_rxq); - xdp_rxq_info_unreg(&rx_ring->xdp_rxq); -} - -static void ena_xdp_exchange_program_rx_in_range(struct ena_adapter *adapter, - struct bpf_prog *prog, - int first, int count) -{ - struct bpf_prog *old_bpf_prog; - struct ena_ring *rx_ring; - int i = 0; - - for (i = first; i < count; i++) { - rx_ring = &adapter->rx_ring[i]; - old_bpf_prog = xchg(&rx_ring->xdp_bpf_prog, prog); - - if (!old_bpf_prog && prog) { - ena_xdp_register_rxq_info(rx_ring); - rx_ring->rx_headroom = XDP_PACKET_HEADROOM; - } else if (old_bpf_prog && !prog) { - ena_xdp_unregister_rxq_info(rx_ring); - rx_ring->rx_headroom = NET_SKB_PAD; - } - } -} - -static void ena_xdp_exchange_program(struct ena_adapter *adapter, - struct bpf_prog *prog) -{ - struct bpf_prog *old_bpf_prog = xchg(&adapter->xdp_bpf_prog, prog); - - ena_xdp_exchange_program_rx_in_range(adapter, - prog, - 0, - adapter->num_io_queues); - - if (old_bpf_prog) - bpf_prog_put(old_bpf_prog); -} - -static int ena_destroy_and_free_all_xdp_queues(struct ena_adapter *adapter) -{ - bool was_up; - int rc; - - was_up = test_bit(ENA_FLAG_DEV_UP, &adapter->flags); - - if (was_up) - ena_down(adapter); - - adapter->xdp_first_ring = 0; - adapter->xdp_num_queues = 0; - ena_xdp_exchange_program(adapter, NULL); - if (was_up) { - rc = ena_up(adapter); - if (rc) - return rc; - } - return 0; -} - -static int ena_xdp_set(struct net_device *netdev, struct netdev_bpf *bpf) -{ - struct ena_adapter *adapter = netdev_priv(netdev); - struct bpf_prog *prog = bpf->prog; - struct bpf_prog *old_bpf_prog; - int rc, prev_mtu; - bool is_up; - - is_up = test_bit(ENA_FLAG_DEV_UP, &adapter->flags); - rc = ena_xdp_allowed(adapter); - if (rc == ENA_XDP_ALLOWED) { - old_bpf_prog = adapter->xdp_bpf_prog; - if (prog) { - if (!is_up) { - ena_init_all_xdp_queues(adapter); - } else if (!old_bpf_prog) { - ena_down(adapter); - ena_init_all_xdp_queues(adapter); - } - ena_xdp_exchange_program(adapter, prog); - - if (is_up && !old_bpf_prog) { - rc = ena_up(adapter); - if (rc) - return rc; - } - xdp_features_set_redirect_target(netdev, false); - } else if (old_bpf_prog) { - xdp_features_clear_redirect_target(netdev); - rc = ena_destroy_and_free_all_xdp_queues(adapter); - if (rc) - return rc; - } - - prev_mtu = netdev->max_mtu; - netdev->max_mtu = prog ? ENA_XDP_MAX_MTU : adapter->max_mtu; - - if (!old_bpf_prog) - netif_info(adapter, drv, adapter->netdev, - "XDP program is set, changing the max_mtu from %d to %d", - prev_mtu, netdev->max_mtu); - - } else if (rc == ENA_XDP_CURRENT_MTU_TOO_LARGE) { - netif_err(adapter, drv, adapter->netdev, - "Failed to set xdp program, the current MTU (%d) is larger than the maximum allowed MTU (%lu) while xdp is on", - netdev->mtu, ENA_XDP_MAX_MTU); - NL_SET_ERR_MSG_MOD(bpf->extack, - "Failed to set xdp program, the current MTU is larger than the maximum allowed MTU. Check the dmesg for more info"); - return -EINVAL; - } else if (rc == ENA_XDP_NO_ENOUGH_QUEUES) { - netif_err(adapter, drv, adapter->netdev, - "Failed to set xdp program, the Rx/Tx channel count should be at most half of the maximum allowed channel count. The current queue count (%d), the maximal queue count (%d)\n", - adapter->num_io_queues, adapter->max_num_io_queues); - NL_SET_ERR_MSG_MOD(bpf->extack, - "Failed to set xdp program, there is no enough space for allocating XDP queues, Check the dmesg for more info"); - return -EINVAL; - } - - return 0; -} - -/* This is the main xdp callback, it's used by the kernel to set/unset the xdp - * program as well as to query the current xdp program id. - */ -static int ena_xdp(struct net_device *netdev, struct netdev_bpf *bpf) -{ - switch (bpf->command) { - case XDP_SETUP_PROG: - return ena_xdp_set(netdev, bpf); - default: - return -EINVAL; - } - return 0; -} - static int ena_init_rx_cpu_rmap(struct ena_adapter *adapter) { #ifdef CONFIG_RFS_ACCEL @@ -688,8 +180,8 @@ static void ena_init_io_rings_common(struct ena_adapter *adapter, u64_stats_init(&ring->syncp); } -static void ena_init_io_rings(struct ena_adapter *adapter, - int first_index, int count) +void ena_init_io_rings(struct ena_adapter *adapter, + int first_index, int count) { struct ena_com_dev *ena_dev; struct ena_ring *txr, *rxr; @@ -820,9 +312,8 @@ static void ena_free_tx_resources(struct ena_adapter *adapter, int qid) tx_ring->push_buf_intermediate_buf = NULL; } -static int ena_setup_tx_resources_in_range(struct ena_adapter *adapter, - int first_index, - int count) +int ena_setup_tx_resources_in_range(struct ena_adapter *adapter, + int first_index, int count) { int i, rc = 0; @@ -845,8 +336,8 @@ err_setup_tx: return rc; } -static void ena_free_all_io_tx_resources_in_range(struct ena_adapter *adapter, - int first_index, int count) +void ena_free_all_io_tx_resources_in_range(struct ena_adapter *adapter, + int first_index, int count) { int i; @@ -859,7 +350,7 @@ static void ena_free_all_io_tx_resources_in_range(struct ena_adapter *adapter, * * Free all transmit software resources */ -static void ena_free_all_io_tx_resources(struct ena_adapter *adapter) +void ena_free_all_io_tx_resources(struct ena_adapter *adapter) { ena_free_all_io_tx_resources_in_range(adapter, 0, @@ -1169,8 +660,8 @@ static void ena_free_all_rx_bufs(struct ena_adapter *adapter) ena_free_rx_bufs(adapter, i); } -static void ena_unmap_tx_buff(struct ena_ring *tx_ring, - struct ena_tx_buffer *tx_info) +void ena_unmap_tx_buff(struct ena_ring *tx_ring, + struct ena_tx_buffer *tx_info) { struct ena_com_buf *ena_buf; u32 cnt; @@ -1205,8 +696,11 @@ static void ena_unmap_tx_buff(struct ena_ring *tx_ring, static void ena_free_tx_bufs(struct ena_ring *tx_ring) { bool print_once = true; + bool is_xdp_ring; u32 i; + is_xdp_ring = ENA_IS_XDP_INDEX(tx_ring->adapter, tx_ring->qid); + for (i = 0; i < tx_ring->ring_size; i++) { struct ena_tx_buffer *tx_info = &tx_ring->tx_buffer_info[i]; @@ -1226,10 +720,15 @@ static void ena_free_tx_bufs(struct ena_ring *tx_ring) ena_unmap_tx_buff(tx_ring, tx_info); - dev_kfree_skb_any(tx_info->skb); + if (is_xdp_ring) + xdp_return_frame(tx_info->xdpf); + else + dev_kfree_skb_any(tx_info->skb); } - netdev_tx_reset_queue(netdev_get_tx_queue(tx_ring->netdev, - tx_ring->qid)); + + if (!is_xdp_ring) + netdev_tx_reset_queue(netdev_get_tx_queue(tx_ring->netdev, + tx_ring->qid)); } static void ena_free_all_tx_bufs(struct ena_adapter *adapter) @@ -1262,6 +761,7 @@ static void ena_destroy_all_rx_queues(struct ena_adapter *adapter) for (i = 0; i < adapter->num_io_queues; i++) { ena_qid = ENA_IO_RXQ_IDX(i); cancel_work_sync(&adapter->ena_napi[i].dim.work); + ena_xdp_unregister_rxq_info(&adapter->rx_ring[i]); ena_com_destroy_io_queue(adapter->ena_dev, ena_qid); } } @@ -1272,8 +772,8 @@ static void ena_destroy_all_io_queues(struct ena_adapter *adapter) ena_destroy_all_rx_queues(adapter); } -static int handle_invalid_req_id(struct ena_ring *ring, u16 req_id, - struct ena_tx_buffer *tx_info, bool is_xdp) +int handle_invalid_req_id(struct ena_ring *ring, u16 req_id, + struct ena_tx_buffer *tx_info, bool is_xdp) { if (tx_info) netif_err(ring->adapter, @@ -1305,17 +805,6 @@ static int validate_tx_req_id(struct ena_ring *tx_ring, u16 req_id) return handle_invalid_req_id(tx_ring, req_id, tx_info, false); } -static int validate_xdp_req_id(struct ena_ring *xdp_ring, u16 req_id) -{ - struct ena_tx_buffer *tx_info; - - tx_info = &xdp_ring->tx_buffer_info[req_id]; - if (likely(tx_info->xdpf)) - return 0; - - return handle_invalid_req_id(xdp_ring, req_id, tx_info, true); -} - static int ena_clean_tx_irq(struct ena_ring *tx_ring, u32 budget) { struct netdev_queue *txq; @@ -1363,7 +852,7 @@ static int ena_clean_tx_irq(struct ena_ring *tx_ring, u32 budget) "tx_poll: q %d skb %p completed\n", tx_ring->qid, skb); - tx_bytes += skb->len; + tx_bytes += tx_info->total_tx_size; dev_kfree_skb(skb); tx_pkts++; total_done += tx_info->tx_descs; @@ -1688,6 +1177,7 @@ static int ena_xdp_handle_buff(struct ena_ring *rx_ring, struct xdp_buff *xdp, u return ret; } + /* ena_clean_rx_irq - Cleanup RX irq * @rx_ring: RX ring to clean * @napi: napi handler @@ -1880,8 +1370,8 @@ static void ena_adjust_adaptive_rx_intr_moderation(struct ena_napi *ena_napi) rx_ring->per_napi_packets = 0; } -static void ena_unmask_interrupt(struct ena_ring *tx_ring, - struct ena_ring *rx_ring) +void ena_unmask_interrupt(struct ena_ring *tx_ring, + struct ena_ring *rx_ring) { u32 rx_interval = tx_ring->smoothed_interval; struct ena_eth_io_intr_reg intr_reg; @@ -1913,8 +1403,8 @@ static void ena_unmask_interrupt(struct ena_ring *tx_ring, ena_com_unmask_intr(tx_ring->ena_com_io_cq, &intr_reg); } -static void ena_update_ring_numa_node(struct ena_ring *tx_ring, - struct ena_ring *rx_ring) +void ena_update_ring_numa_node(struct ena_ring *tx_ring, + struct ena_ring *rx_ring) { int cpu = get_cpu(); int numa_node; @@ -1949,67 +1439,6 @@ out: put_cpu(); } -static int ena_clean_xdp_irq(struct ena_ring *xdp_ring, u32 budget) -{ - u32 total_done = 0; - u16 next_to_clean; - int tx_pkts = 0; - u16 req_id; - int rc; - - if (unlikely(!xdp_ring)) - return 0; - next_to_clean = xdp_ring->next_to_clean; - - while (tx_pkts < budget) { - struct ena_tx_buffer *tx_info; - struct xdp_frame *xdpf; - - rc = ena_com_tx_comp_req_id_get(xdp_ring->ena_com_io_cq, - &req_id); - if (rc) { - if (unlikely(rc == -EINVAL)) - handle_invalid_req_id(xdp_ring, req_id, NULL, - true); - break; - } - - /* validate that the request id points to a valid xdp_frame */ - rc = validate_xdp_req_id(xdp_ring, req_id); - if (rc) - break; - - tx_info = &xdp_ring->tx_buffer_info[req_id]; - xdpf = tx_info->xdpf; - - tx_info->xdpf = NULL; - tx_info->last_jiffies = 0; - ena_unmap_tx_buff(xdp_ring, tx_info); - - netif_dbg(xdp_ring->adapter, tx_done, xdp_ring->netdev, - "tx_poll: q %d skb %p completed\n", xdp_ring->qid, - xdpf); - - tx_pkts++; - total_done += tx_info->tx_descs; - - xdp_return_frame(xdpf); - xdp_ring->free_ids[next_to_clean] = req_id; - next_to_clean = ENA_TX_RING_IDX_NEXT(next_to_clean, - xdp_ring->ring_size); - } - - xdp_ring->next_to_clean = next_to_clean; - ena_com_comp_ack(xdp_ring->ena_com_io_sq, total_done); - ena_com_update_dev_comp_head(xdp_ring->ena_com_io_cq); - - netif_dbg(xdp_ring->adapter, tx_done, xdp_ring->netdev, - "tx_poll: q %d done. total pkts: %d\n", - xdp_ring->qid, tx_pkts); - - return tx_pkts; -} - static int ena_io_poll(struct napi_struct *napi, int budget) { struct ena_napi *ena_napi = container_of(napi, struct ena_napi, napi); @@ -2326,28 +1755,36 @@ static void ena_del_napi_in_range(struct ena_adapter *adapter, for (i = first_index; i < first_index + count; i++) { netif_napi_del(&adapter->ena_napi[i].napi); - WARN_ON(!ENA_IS_XDP_INDEX(adapter, i) && - adapter->ena_napi[i].xdp_ring); + WARN_ON(ENA_IS_XDP_INDEX(adapter, i) && + adapter->ena_napi[i].rx_ring); } } static void ena_init_napi_in_range(struct ena_adapter *adapter, int first_index, int count) { + int (*napi_handler)(struct napi_struct *napi, int budget); int i; for (i = first_index; i < first_index + count; i++) { struct ena_napi *napi = &adapter->ena_napi[i]; + struct ena_ring *rx_ring, *tx_ring; - netif_napi_add(adapter->netdev, &napi->napi, - ENA_IS_XDP_INDEX(adapter, i) ? ena_xdp_io_poll : ena_io_poll); + memset(napi, 0, sizeof(*napi)); - if (!ENA_IS_XDP_INDEX(adapter, i)) { - napi->rx_ring = &adapter->rx_ring[i]; - napi->tx_ring = &adapter->tx_ring[i]; - } else { - napi->xdp_ring = &adapter->tx_ring[i]; - } + rx_ring = &adapter->rx_ring[i]; + tx_ring = &adapter->tx_ring[i]; + + napi_handler = ena_io_poll; + if (ENA_IS_XDP_INDEX(adapter, i)) + napi_handler = ena_xdp_io_poll; + + netif_napi_add(adapter->netdev, &napi->napi, napi_handler); + + if (!ENA_IS_XDP_INDEX(adapter, i)) + napi->rx_ring = rx_ring; + + napi->tx_ring = tx_ring; napi->qid = i; } } @@ -2475,8 +1912,8 @@ static int ena_create_io_tx_queue(struct ena_adapter *adapter, int qid) return rc; } -static int ena_create_io_tx_queues_in_range(struct ena_adapter *adapter, - int first_index, int count) +int ena_create_io_tx_queues_in_range(struct ena_adapter *adapter, + int first_index, int count) { struct ena_com_dev *ena_dev = adapter->ena_dev; int rc, i; @@ -2556,12 +1993,15 @@ static int ena_create_all_io_rx_queues(struct ena_adapter *adapter) if (rc) goto create_err; INIT_WORK(&adapter->ena_napi[i].dim.work, ena_dim_work); + + ena_xdp_register_rxq_info(&adapter->rx_ring[i]); } return 0; create_err: while (i--) { + ena_xdp_unregister_rxq_info(&adapter->rx_ring[i]); cancel_work_sync(&adapter->ena_napi[i].dim.work); ena_com_destroy_io_queue(ena_dev, ENA_IO_RXQ_IDX(i)); } @@ -2686,7 +2126,7 @@ err_setup_tx: } } -static int ena_up(struct ena_adapter *adapter) +int ena_up(struct ena_adapter *adapter) { int io_queue_count, rc, i; @@ -2748,7 +2188,7 @@ err_req_irq: return rc; } -static void ena_down(struct ena_adapter *adapter) +void ena_down(struct ena_adapter *adapter) { int io_queue_count = adapter->num_io_queues + adapter->xdp_num_queues; @@ -3179,7 +2619,7 @@ static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev) /* set flags and meta data */ ena_tx_csum(&ena_tx_ctx, skb, tx_ring->disable_meta_caching); - rc = ena_xmit_common(dev, + rc = ena_xmit_common(adapter, tx_ring, tx_info, &ena_tx_ctx, @@ -3259,8 +2699,8 @@ static void ena_config_host_info(struct ena_com_dev *ena_dev, struct pci_dev *pd strscpy(host_info->kernel_ver_str, utsname()->version, sizeof(host_info->kernel_ver_str) - 1); host_info->os_dist = 0; - strncpy(host_info->os_dist_str, utsname()->release, - sizeof(host_info->os_dist_str) - 1); + strscpy(host_info->os_dist_str, utsname()->release, + sizeof(host_info->os_dist_str)); host_info->driver_version = (DRV_MODULE_GEN_MAJOR) | (DRV_MODULE_GEN_MINOR << ENA_ADMIN_HOST_INFO_MINOR_SHIFT) | @@ -3347,6 +2787,7 @@ static void ena_get_stats64(struct net_device *netdev, { struct ena_adapter *adapter = netdev_priv(netdev); struct ena_ring *rx_ring, *tx_ring; + u64 total_xdp_rx_drops = 0; unsigned int start; u64 rx_drops; u64 tx_drops; @@ -3355,8 +2796,8 @@ static void ena_get_stats64(struct net_device *netdev, if (!test_bit(ENA_FLAG_DEV_UP, &adapter->flags)) return; - for (i = 0; i < adapter->num_io_queues; i++) { - u64 bytes, packets; + for (i = 0; i < adapter->num_io_queues + adapter->xdp_num_queues; i++) { + u64 bytes, packets, xdp_rx_drops; tx_ring = &adapter->tx_ring[i]; @@ -3369,16 +2810,22 @@ static void ena_get_stats64(struct net_device *netdev, stats->tx_packets += packets; stats->tx_bytes += bytes; + /* In XDP there isn't an RX queue counterpart */ + if (ENA_IS_XDP_INDEX(adapter, i)) + continue; + rx_ring = &adapter->rx_ring[i]; do { start = u64_stats_fetch_begin(&rx_ring->syncp); packets = rx_ring->rx_stats.cnt; bytes = rx_ring->rx_stats.bytes; + xdp_rx_drops = rx_ring->rx_stats.xdp_drop; } while (u64_stats_fetch_retry(&rx_ring->syncp, start)); stats->rx_packets += packets; stats->rx_bytes += bytes; + total_xdp_rx_drops += xdp_rx_drops; } do { @@ -3387,7 +2834,7 @@ static void ena_get_stats64(struct net_device *netdev, tx_drops = adapter->dev_stats.tx_drops; } while (u64_stats_fetch_retry(&adapter->syncp, start)); - stats->rx_dropped = rx_drops; + stats->rx_dropped = rx_drops + total_xdp_rx_drops; stats->tx_dropped = tx_drops; stats->multicast = 0; @@ -3982,10 +3429,11 @@ static void check_for_missing_completions(struct ena_adapter *adapter) { struct ena_ring *tx_ring; struct ena_ring *rx_ring; - int i, budget, rc; + int qid, budget, rc; int io_queue_count; io_queue_count = adapter->xdp_num_queues + adapter->num_io_queues; + /* Make sure the driver doesn't turn the device in other process */ smp_rmb(); @@ -3998,27 +3446,29 @@ static void check_for_missing_completions(struct ena_adapter *adapter) if (adapter->missing_tx_completion_to == ENA_HW_HINTS_NO_TIMEOUT) return; - budget = ENA_MONITORED_TX_QUEUES; + budget = min_t(u32, io_queue_count, ENA_MONITORED_TX_QUEUES); - for (i = adapter->last_monitored_tx_qid; i < io_queue_count; i++) { - tx_ring = &adapter->tx_ring[i]; - rx_ring = &adapter->rx_ring[i]; + qid = adapter->last_monitored_tx_qid; + + while (budget) { + qid = (qid + 1) % io_queue_count; + + tx_ring = &adapter->tx_ring[qid]; + rx_ring = &adapter->rx_ring[qid]; rc = check_missing_comp_in_tx_queue(adapter, tx_ring); if (unlikely(rc)) return; - rc = !ENA_IS_XDP_INDEX(adapter, i) ? + rc = !ENA_IS_XDP_INDEX(adapter, qid) ? check_for_rx_interrupt_queue(adapter, rx_ring) : 0; if (unlikely(rc)) return; budget--; - if (!budget) - break; } - adapter->last_monitored_tx_qid = i % io_queue_count; + adapter->last_monitored_tx_qid = qid; } /* trigger napi schedule after 2 consecutive detections */ diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.h b/drivers/net/ethernet/amazon/ena/ena_netdev.h index 33c923e126..6d2cc20210 100644 --- a/drivers/net/ethernet/amazon/ena/ena_netdev.h +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.h @@ -110,19 +110,6 @@ #define ENA_MMIO_DISABLE_REG_READ BIT(0) -/* The max MTU size is configured to be the ethernet frame size without - * the overhead of the ethernet header, which can have a VLAN header, and - * a frame check sequence (FCS). - * The buffer size we share with the device is defined to be ENA_PAGE_SIZE - */ - -#define ENA_XDP_MAX_MTU (ENA_PAGE_SIZE - ETH_HLEN - ETH_FCS_LEN - \ - VLAN_HLEN - XDP_PACKET_HEADROOM - \ - SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) - -#define ENA_IS_XDP_INDEX(adapter, index) (((index) >= (adapter)->xdp_first_ring) && \ - ((index) < (adapter)->xdp_first_ring + (adapter)->xdp_num_queues)) - struct ena_irq { irq_handler_t handler; void *data; @@ -138,13 +125,18 @@ struct ena_napi { struct napi_struct napi; struct ena_ring *tx_ring; struct ena_ring *rx_ring; - struct ena_ring *xdp_ring; u32 qid; struct dim dim; }; struct ena_tx_buffer { - struct sk_buff *skb; + union { + struct sk_buff *skb; + /* XDP buffer structure which is used for sending packets in + * the xdp queues + */ + struct xdp_frame *xdpf; + }; /* num of ena desc for this specific skb * (includes data desc and metadata desc) */ @@ -152,16 +144,14 @@ struct ena_tx_buffer { /* num of buffers used by this skb */ u32 num_of_bufs; - /* XDP buffer structure which is used for sending packets in - * the xdp queues - */ - struct xdp_frame *xdpf; + /* Total size of all buffers in bytes */ + u32 total_tx_size; /* Indicate if bufs[0] map the linear data of the skb. */ u8 map_linear_data; /* Used for detect missing tx packets to limit the number of prints */ - u32 print_once; + u8 print_once; /* Save the last jiffies to detect missing tx packets * * sets to non zero value on ena_start_xmit and set to zero on @@ -421,47 +411,44 @@ static inline void ena_reset_device(struct ena_adapter *adapter, set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags); } -enum ena_xdp_errors_t { - ENA_XDP_ALLOWED = 0, - ENA_XDP_CURRENT_MTU_TOO_LARGE, - ENA_XDP_NO_ENOUGH_QUEUES, -}; +int handle_invalid_req_id(struct ena_ring *ring, u16 req_id, + struct ena_tx_buffer *tx_info, bool is_xdp); -enum ENA_XDP_ACTIONS { - ENA_XDP_PASS = 0, - ENA_XDP_TX = BIT(0), - ENA_XDP_REDIRECT = BIT(1), - ENA_XDP_DROP = BIT(2) -}; - -#define ENA_XDP_FORWARDED (ENA_XDP_TX | ENA_XDP_REDIRECT) - -static inline bool ena_xdp_present(struct ena_adapter *adapter) -{ - return !!adapter->xdp_bpf_prog; -} - -static inline bool ena_xdp_present_ring(struct ena_ring *ring) +/* Increase a stat by cnt while holding syncp seqlock on 32bit machines */ +static inline void ena_increase_stat(u64 *statp, u64 cnt, + struct u64_stats_sync *syncp) { - return !!ring->xdp_bpf_prog; + u64_stats_update_begin(syncp); + (*statp) += cnt; + u64_stats_update_end(syncp); } -static inline bool ena_xdp_legal_queue_count(struct ena_adapter *adapter, - u32 queues) +static inline void ena_ring_tx_doorbell(struct ena_ring *tx_ring) { - return 2 * queues <= adapter->max_num_io_queues; -} - -static inline enum ena_xdp_errors_t ena_xdp_allowed(struct ena_adapter *adapter) -{ - enum ena_xdp_errors_t rc = ENA_XDP_ALLOWED; - - if (adapter->netdev->mtu > ENA_XDP_MAX_MTU) - rc = ENA_XDP_CURRENT_MTU_TOO_LARGE; - else if (!ena_xdp_legal_queue_count(adapter, adapter->num_io_queues)) - rc = ENA_XDP_NO_ENOUGH_QUEUES; - - return rc; + ena_com_write_sq_doorbell(tx_ring->ena_com_io_sq); + ena_increase_stat(&tx_ring->tx_stats.doorbells, 1, &tx_ring->syncp); } +int ena_xmit_common(struct ena_adapter *adapter, + struct ena_ring *ring, + struct ena_tx_buffer *tx_info, + struct ena_com_tx_ctx *ena_tx_ctx, + u16 next_to_use, + u32 bytes); +void ena_unmap_tx_buff(struct ena_ring *tx_ring, + struct ena_tx_buffer *tx_info); +void ena_init_io_rings(struct ena_adapter *adapter, + int first_index, int count); +int ena_create_io_tx_queues_in_range(struct ena_adapter *adapter, + int first_index, int count); +int ena_setup_tx_resources_in_range(struct ena_adapter *adapter, + int first_index, int count); +void ena_free_all_io_tx_resources_in_range(struct ena_adapter *adapter, + int first_index, int count); +void ena_free_all_io_tx_resources(struct ena_adapter *adapter); +void ena_down(struct ena_adapter *adapter); +int ena_up(struct ena_adapter *adapter); +void ena_unmask_interrupt(struct ena_ring *tx_ring, struct ena_ring *rx_ring); +void ena_update_ring_numa_node(struct ena_ring *tx_ring, + struct ena_ring *rx_ring); #endif /* !(ENA_H) */ diff --git a/drivers/net/ethernet/amazon/ena/ena_xdp.c b/drivers/net/ethernet/amazon/ena/ena_xdp.c new file mode 100644 index 0000000000..34d73c72f7 --- /dev/null +++ b/drivers/net/ethernet/amazon/ena/ena_xdp.c @@ -0,0 +1,470 @@ +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB +/* + * Copyright 2015-2021 Amazon.com, Inc. or its affiliates. All rights reserved. + */ + +#include "ena_xdp.h" + +static int validate_xdp_req_id(struct ena_ring *tx_ring, u16 req_id) +{ + struct ena_tx_buffer *tx_info; + + tx_info = &tx_ring->tx_buffer_info[req_id]; + if (likely(tx_info->xdpf)) + return 0; + + return handle_invalid_req_id(tx_ring, req_id, tx_info, true); +} + +static int ena_xdp_tx_map_frame(struct ena_ring *tx_ring, + struct ena_tx_buffer *tx_info, + struct xdp_frame *xdpf, + struct ena_com_tx_ctx *ena_tx_ctx) +{ + struct ena_adapter *adapter = tx_ring->adapter; + struct ena_com_buf *ena_buf; + int push_len = 0; + dma_addr_t dma; + void *data; + u32 size; + + tx_info->xdpf = xdpf; + data = tx_info->xdpf->data; + size = tx_info->xdpf->len; + + if (tx_ring->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) { + /* Designate part of the packet for LLQ */ + push_len = min_t(u32, size, tx_ring->tx_max_header_size); + + ena_tx_ctx->push_header = data; + + size -= push_len; + data += push_len; + } + + ena_tx_ctx->header_len = push_len; + + if (size > 0) { + dma = dma_map_single(tx_ring->dev, + data, + size, + DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(tx_ring->dev, dma))) + goto error_report_dma_error; + + tx_info->map_linear_data = 0; + + ena_buf = tx_info->bufs; + ena_buf->paddr = dma; + ena_buf->len = size; + + ena_tx_ctx->ena_bufs = ena_buf; + ena_tx_ctx->num_bufs = tx_info->num_of_bufs = 1; + } + + return 0; + +error_report_dma_error: + ena_increase_stat(&tx_ring->tx_stats.dma_mapping_err, 1, + &tx_ring->syncp); + netif_warn(adapter, tx_queued, adapter->netdev, "Failed to map xdp buff\n"); + + return -EINVAL; +} + +int ena_xdp_xmit_frame(struct ena_ring *tx_ring, + struct ena_adapter *adapter, + struct xdp_frame *xdpf, + int flags) +{ + struct ena_com_tx_ctx ena_tx_ctx = {}; + struct ena_tx_buffer *tx_info; + u16 next_to_use, req_id; + int rc; + + next_to_use = tx_ring->next_to_use; + req_id = tx_ring->free_ids[next_to_use]; + tx_info = &tx_ring->tx_buffer_info[req_id]; + tx_info->num_of_bufs = 0; + + rc = ena_xdp_tx_map_frame(tx_ring, tx_info, xdpf, &ena_tx_ctx); + if (unlikely(rc)) + goto err; + + ena_tx_ctx.req_id = req_id; + + rc = ena_xmit_common(adapter, + tx_ring, + tx_info, + &ena_tx_ctx, + next_to_use, + xdpf->len); + if (rc) + goto error_unmap_dma; + + /* trigger the dma engine. ena_ring_tx_doorbell() + * calls a memory barrier inside it. + */ + if (flags & XDP_XMIT_FLUSH) + ena_ring_tx_doorbell(tx_ring); + + return rc; + +error_unmap_dma: + ena_unmap_tx_buff(tx_ring, tx_info); +err: + tx_info->xdpf = NULL; + + return rc; +} + +int ena_xdp_xmit(struct net_device *dev, int n, + struct xdp_frame **frames, u32 flags) +{ + struct ena_adapter *adapter = netdev_priv(dev); + struct ena_ring *tx_ring; + int qid, i, nxmit = 0; + + if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) + return -EINVAL; + + if (!test_bit(ENA_FLAG_DEV_UP, &adapter->flags)) + return -ENETDOWN; + + /* We assume that all rings have the same XDP program */ + if (!READ_ONCE(adapter->rx_ring->xdp_bpf_prog)) + return -ENXIO; + + qid = smp_processor_id() % adapter->xdp_num_queues; + qid += adapter->xdp_first_ring; + tx_ring = &adapter->tx_ring[qid]; + + /* Other CPU ids might try to send thorugh this queue */ + spin_lock(&tx_ring->xdp_tx_lock); + + for (i = 0; i < n; i++) { + if (ena_xdp_xmit_frame(tx_ring, adapter, frames[i], 0)) + break; + nxmit++; + } + + /* Ring doorbell to make device aware of the packets */ + if (flags & XDP_XMIT_FLUSH) + ena_ring_tx_doorbell(tx_ring); + + spin_unlock(&tx_ring->xdp_tx_lock); + + /* Return number of packets sent */ + return nxmit; +} + +static void ena_init_all_xdp_queues(struct ena_adapter *adapter) +{ + adapter->xdp_first_ring = adapter->num_io_queues; + adapter->xdp_num_queues = adapter->num_io_queues; + + ena_init_io_rings(adapter, + adapter->xdp_first_ring, + adapter->xdp_num_queues); +} + +int ena_setup_and_create_all_xdp_queues(struct ena_adapter *adapter) +{ + u32 xdp_first_ring = adapter->xdp_first_ring; + u32 xdp_num_queues = adapter->xdp_num_queues; + int rc = 0; + + rc = ena_setup_tx_resources_in_range(adapter, xdp_first_ring, xdp_num_queues); + if (rc) + goto setup_err; + + rc = ena_create_io_tx_queues_in_range(adapter, xdp_first_ring, xdp_num_queues); + if (rc) + goto create_err; + + return 0; + +create_err: + ena_free_all_io_tx_resources_in_range(adapter, xdp_first_ring, xdp_num_queues); +setup_err: + return rc; +} + +/* Provides a way for both kernel and bpf-prog to know + * more about the RX-queue a given XDP frame arrived on. + */ +int ena_xdp_register_rxq_info(struct ena_ring *rx_ring) +{ + int rc; + + rc = xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev, rx_ring->qid, 0); + + netif_dbg(rx_ring->adapter, ifup, rx_ring->netdev, "Registering RX info for queue %d", + rx_ring->qid); + if (rc) { + netif_err(rx_ring->adapter, ifup, rx_ring->netdev, + "Failed to register xdp rx queue info. RX queue num %d rc: %d\n", + rx_ring->qid, rc); + goto err; + } + + rc = xdp_rxq_info_reg_mem_model(&rx_ring->xdp_rxq, MEM_TYPE_PAGE_SHARED, NULL); + + if (rc) { + netif_err(rx_ring->adapter, ifup, rx_ring->netdev, + "Failed to register xdp rx queue info memory model. RX queue num %d rc: %d\n", + rx_ring->qid, rc); + xdp_rxq_info_unreg(&rx_ring->xdp_rxq); + } + +err: + return rc; +} + +void ena_xdp_unregister_rxq_info(struct ena_ring *rx_ring) +{ + netif_dbg(rx_ring->adapter, ifdown, rx_ring->netdev, + "Unregistering RX info for queue %d", + rx_ring->qid); + xdp_rxq_info_unreg_mem_model(&rx_ring->xdp_rxq); + xdp_rxq_info_unreg(&rx_ring->xdp_rxq); +} + +void ena_xdp_exchange_program_rx_in_range(struct ena_adapter *adapter, + struct bpf_prog *prog, + int first, int count) +{ + struct bpf_prog *old_bpf_prog; + struct ena_ring *rx_ring; + int i = 0; + + for (i = first; i < count; i++) { + rx_ring = &adapter->rx_ring[i]; + old_bpf_prog = xchg(&rx_ring->xdp_bpf_prog, prog); + + if (!old_bpf_prog && prog) { + rx_ring->rx_headroom = XDP_PACKET_HEADROOM; + } else if (old_bpf_prog && !prog) { + rx_ring->rx_headroom = NET_SKB_PAD; + } + } +} + +static void ena_xdp_exchange_program(struct ena_adapter *adapter, + struct bpf_prog *prog) +{ + struct bpf_prog *old_bpf_prog = xchg(&adapter->xdp_bpf_prog, prog); + + ena_xdp_exchange_program_rx_in_range(adapter, + prog, + 0, + adapter->num_io_queues); + + if (old_bpf_prog) + bpf_prog_put(old_bpf_prog); +} + +static int ena_destroy_and_free_all_xdp_queues(struct ena_adapter *adapter) +{ + bool was_up; + int rc; + + was_up = test_bit(ENA_FLAG_DEV_UP, &adapter->flags); + + if (was_up) + ena_down(adapter); + + adapter->xdp_first_ring = 0; + adapter->xdp_num_queues = 0; + ena_xdp_exchange_program(adapter, NULL); + if (was_up) { + rc = ena_up(adapter); + if (rc) + return rc; + } + return 0; +} + +static int ena_xdp_set(struct net_device *netdev, struct netdev_bpf *bpf) +{ + struct ena_adapter *adapter = netdev_priv(netdev); + struct bpf_prog *prog = bpf->prog; + struct bpf_prog *old_bpf_prog; + int rc, prev_mtu; + bool is_up; + + is_up = test_bit(ENA_FLAG_DEV_UP, &adapter->flags); + rc = ena_xdp_allowed(adapter); + if (rc == ENA_XDP_ALLOWED) { + old_bpf_prog = adapter->xdp_bpf_prog; + if (prog) { + if (!is_up) { + ena_init_all_xdp_queues(adapter); + } else if (!old_bpf_prog) { + ena_down(adapter); + ena_init_all_xdp_queues(adapter); + } + ena_xdp_exchange_program(adapter, prog); + + netif_dbg(adapter, drv, adapter->netdev, "Set a new XDP program\n"); + + if (is_up && !old_bpf_prog) { + rc = ena_up(adapter); + if (rc) + return rc; + } + xdp_features_set_redirect_target(netdev, false); + } else if (old_bpf_prog) { + xdp_features_clear_redirect_target(netdev); + netif_dbg(adapter, drv, adapter->netdev, "Removing XDP program\n"); + + rc = ena_destroy_and_free_all_xdp_queues(adapter); + if (rc) + return rc; + } + + prev_mtu = netdev->max_mtu; + netdev->max_mtu = prog ? ENA_XDP_MAX_MTU : adapter->max_mtu; + + if (!old_bpf_prog) + netif_info(adapter, drv, adapter->netdev, + "XDP program is set, changing the max_mtu from %d to %d", + prev_mtu, netdev->max_mtu); + + } else if (rc == ENA_XDP_CURRENT_MTU_TOO_LARGE) { + netif_err(adapter, drv, adapter->netdev, + "Failed to set xdp program, the current MTU (%d) is larger than the maximum allowed MTU (%lu) while xdp is on", + netdev->mtu, ENA_XDP_MAX_MTU); + NL_SET_ERR_MSG_MOD(bpf->extack, + "Failed to set xdp program, the current MTU is larger than the maximum allowed MTU. Check the dmesg for more info"); + return -EINVAL; + } else if (rc == ENA_XDP_NO_ENOUGH_QUEUES) { + netif_err(adapter, drv, adapter->netdev, + "Failed to set xdp program, the Rx/Tx channel count should be at most half of the maximum allowed channel count. The current queue count (%d), the maximal queue count (%d)\n", + adapter->num_io_queues, adapter->max_num_io_queues); + NL_SET_ERR_MSG_MOD(bpf->extack, + "Failed to set xdp program, there is no enough space for allocating XDP queues, Check the dmesg for more info"); + return -EINVAL; + } + + return 0; +} + +/* This is the main xdp callback, it's used by the kernel to set/unset the xdp + * program as well as to query the current xdp program id. + */ +int ena_xdp(struct net_device *netdev, struct netdev_bpf *bpf) +{ + switch (bpf->command) { + case XDP_SETUP_PROG: + return ena_xdp_set(netdev, bpf); + default: + return -EINVAL; + } + return 0; +} + +static int ena_clean_xdp_irq(struct ena_ring *tx_ring, u32 budget) +{ + u32 total_done = 0; + u16 next_to_clean; + int tx_pkts = 0; + u16 req_id; + int rc; + + if (unlikely(!tx_ring)) + return 0; + next_to_clean = tx_ring->next_to_clean; + + while (tx_pkts < budget) { + struct ena_tx_buffer *tx_info; + struct xdp_frame *xdpf; + + rc = ena_com_tx_comp_req_id_get(tx_ring->ena_com_io_cq, + &req_id); + if (rc) { + if (unlikely(rc == -EINVAL)) + handle_invalid_req_id(tx_ring, req_id, NULL, true); + break; + } + + /* validate that the request id points to a valid xdp_frame */ + rc = validate_xdp_req_id(tx_ring, req_id); + if (rc) + break; + + tx_info = &tx_ring->tx_buffer_info[req_id]; + + tx_info->last_jiffies = 0; + + xdpf = tx_info->xdpf; + tx_info->xdpf = NULL; + ena_unmap_tx_buff(tx_ring, tx_info); + xdp_return_frame(xdpf); + + tx_pkts++; + total_done += tx_info->tx_descs; + tx_ring->free_ids[next_to_clean] = req_id; + next_to_clean = ENA_TX_RING_IDX_NEXT(next_to_clean, + tx_ring->ring_size); + + netif_dbg(tx_ring->adapter, tx_done, tx_ring->netdev, + "tx_poll: q %d pkt #%d req_id %d\n", tx_ring->qid, tx_pkts, req_id); + } + + tx_ring->next_to_clean = next_to_clean; + ena_com_comp_ack(tx_ring->ena_com_io_sq, total_done); + ena_com_update_dev_comp_head(tx_ring->ena_com_io_cq); + + netif_dbg(tx_ring->adapter, tx_done, tx_ring->netdev, + "tx_poll: q %d done. total pkts: %d\n", + tx_ring->qid, tx_pkts); + + return tx_pkts; +} + +/* This is the XDP napi callback. XDP queues use a separate napi callback + * than Rx/Tx queues. + */ +int ena_xdp_io_poll(struct napi_struct *napi, int budget) +{ + struct ena_napi *ena_napi = container_of(napi, struct ena_napi, napi); + struct ena_ring *tx_ring; + u32 work_done; + int ret; + + tx_ring = ena_napi->tx_ring; + + if (!test_bit(ENA_FLAG_DEV_UP, &tx_ring->adapter->flags) || + test_bit(ENA_FLAG_TRIGGER_RESET, &tx_ring->adapter->flags)) { + napi_complete_done(napi, 0); + return 0; + } + + work_done = ena_clean_xdp_irq(tx_ring, budget); + + /* If the device is about to reset or down, avoid unmask + * the interrupt and return 0 so NAPI won't reschedule + */ + if (unlikely(!test_bit(ENA_FLAG_DEV_UP, &tx_ring->adapter->flags))) { + napi_complete_done(napi, 0); + ret = 0; + } else if (budget > work_done) { + ena_increase_stat(&tx_ring->tx_stats.napi_comp, 1, + &tx_ring->syncp); + if (napi_complete_done(napi, work_done)) + ena_unmask_interrupt(tx_ring, NULL); + + ena_update_ring_numa_node(tx_ring, NULL); + ret = work_done; + } else { + ret = budget; + } + + u64_stats_update_begin(&tx_ring->syncp); + tx_ring->tx_stats.tx_poll++; + u64_stats_update_end(&tx_ring->syncp); + tx_ring->tx_stats.last_napi_jiffies = jiffies; + + return ret; +} diff --git a/drivers/net/ethernet/amazon/ena/ena_xdp.h b/drivers/net/ethernet/amazon/ena/ena_xdp.h new file mode 100644 index 0000000000..cfd8272848 --- /dev/null +++ b/drivers/net/ethernet/amazon/ena/ena_xdp.h @@ -0,0 +1,151 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ +/* + * Copyright 2015-2021 Amazon.com, Inc. or its affiliates. All rights reserved. + */ + +#ifndef ENA_XDP_H +#define ENA_XDP_H + +#include "ena_netdev.h" +#include <linux/bpf_trace.h> + +/* The max MTU size is configured to be the ethernet frame size without + * the overhead of the ethernet header, which can have a VLAN header, and + * a frame check sequence (FCS). + * The buffer size we share with the device is defined to be ENA_PAGE_SIZE + */ +#define ENA_XDP_MAX_MTU (ENA_PAGE_SIZE - ETH_HLEN - ETH_FCS_LEN - \ + VLAN_HLEN - XDP_PACKET_HEADROOM - \ + SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) + +#define ENA_IS_XDP_INDEX(adapter, index) (((index) >= (adapter)->xdp_first_ring) && \ + ((index) < (adapter)->xdp_first_ring + (adapter)->xdp_num_queues)) + +enum ENA_XDP_ACTIONS { + ENA_XDP_PASS = 0, + ENA_XDP_TX = BIT(0), + ENA_XDP_REDIRECT = BIT(1), + ENA_XDP_DROP = BIT(2) +}; + +#define ENA_XDP_FORWARDED (ENA_XDP_TX | ENA_XDP_REDIRECT) + +int ena_setup_and_create_all_xdp_queues(struct ena_adapter *adapter); +void ena_xdp_exchange_program_rx_in_range(struct ena_adapter *adapter, + struct bpf_prog *prog, + int first, int count); +int ena_xdp_io_poll(struct napi_struct *napi, int budget); +int ena_xdp_xmit_frame(struct ena_ring *tx_ring, + struct ena_adapter *adapter, + struct xdp_frame *xdpf, + int flags); +int ena_xdp_xmit(struct net_device *dev, int n, + struct xdp_frame **frames, u32 flags); +int ena_xdp(struct net_device *netdev, struct netdev_bpf *bpf); +int ena_xdp_register_rxq_info(struct ena_ring *rx_ring); +void ena_xdp_unregister_rxq_info(struct ena_ring *rx_ring); + +enum ena_xdp_errors_t { + ENA_XDP_ALLOWED = 0, + ENA_XDP_CURRENT_MTU_TOO_LARGE, + ENA_XDP_NO_ENOUGH_QUEUES, +}; + +static inline bool ena_xdp_present(struct ena_adapter *adapter) +{ + return !!adapter->xdp_bpf_prog; +} + +static inline bool ena_xdp_present_ring(struct ena_ring *ring) +{ + return !!ring->xdp_bpf_prog; +} + +static inline bool ena_xdp_legal_queue_count(struct ena_adapter *adapter, + u32 queues) +{ + return 2 * queues <= adapter->max_num_io_queues; +} + +static inline enum ena_xdp_errors_t ena_xdp_allowed(struct ena_adapter *adapter) +{ + enum ena_xdp_errors_t rc = ENA_XDP_ALLOWED; + + if (adapter->netdev->mtu > ENA_XDP_MAX_MTU) + rc = ENA_XDP_CURRENT_MTU_TOO_LARGE; + else if (!ena_xdp_legal_queue_count(adapter, adapter->num_io_queues)) + rc = ENA_XDP_NO_ENOUGH_QUEUES; + + return rc; +} + +static inline int ena_xdp_execute(struct ena_ring *rx_ring, struct xdp_buff *xdp) +{ + u32 verdict = ENA_XDP_PASS; + struct bpf_prog *xdp_prog; + struct ena_ring *xdp_ring; + struct xdp_frame *xdpf; + u64 *xdp_stat; + + xdp_prog = READ_ONCE(rx_ring->xdp_bpf_prog); + + verdict = bpf_prog_run_xdp(xdp_prog, xdp); + + switch (verdict) { + case XDP_TX: + xdpf = xdp_convert_buff_to_frame(xdp); + if (unlikely(!xdpf)) { + trace_xdp_exception(rx_ring->netdev, xdp_prog, verdict); + xdp_stat = &rx_ring->rx_stats.xdp_aborted; + verdict = ENA_XDP_DROP; + break; + } + + /* Find xmit queue */ + xdp_ring = rx_ring->xdp_ring; + + /* The XDP queues are shared between XDP_TX and XDP_REDIRECT */ + spin_lock(&xdp_ring->xdp_tx_lock); + + if (ena_xdp_xmit_frame(xdp_ring, rx_ring->adapter, xdpf, + XDP_XMIT_FLUSH)) + xdp_return_frame(xdpf); + + spin_unlock(&xdp_ring->xdp_tx_lock); + xdp_stat = &rx_ring->rx_stats.xdp_tx; + verdict = ENA_XDP_TX; + break; + case XDP_REDIRECT: + if (likely(!xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog))) { + xdp_stat = &rx_ring->rx_stats.xdp_redirect; + verdict = ENA_XDP_REDIRECT; + break; + } + trace_xdp_exception(rx_ring->netdev, xdp_prog, verdict); + xdp_stat = &rx_ring->rx_stats.xdp_aborted; + verdict = ENA_XDP_DROP; + break; + case XDP_ABORTED: + trace_xdp_exception(rx_ring->netdev, xdp_prog, verdict); + xdp_stat = &rx_ring->rx_stats.xdp_aborted; + verdict = ENA_XDP_DROP; + break; + case XDP_DROP: + xdp_stat = &rx_ring->rx_stats.xdp_drop; + verdict = ENA_XDP_DROP; + break; + case XDP_PASS: + xdp_stat = &rx_ring->rx_stats.xdp_pass; + verdict = ENA_XDP_PASS; + break; + default: + bpf_warn_invalid_xdp_action(rx_ring->netdev, xdp_prog, verdict); + xdp_stat = &rx_ring->rx_stats.xdp_invalid; + verdict = ENA_XDP_DROP; + } + + ena_increase_stat(xdp_stat, 1, &rx_ring->syncp); + + return verdict; +} +#endif /* ENA_XDP_H */ |