diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-05-18 18:50:12 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-05-18 18:50:12 +0000 |
commit | 8665bd53f2f2e27e5511d90428cb3f60e6d0ce15 (patch) | |
tree | 8d58900dc0ebd4a3011f92c128d2fe45bc7c4bf2 /drivers/net/ethernet/xilinx | |
parent | Adding debian version 6.7.12-1. (diff) | |
download | linux-8665bd53f2f2e27e5511d90428cb3f60e6d0ce15.tar.xz linux-8665bd53f2f2e27e5511d90428cb3f60e6d0ce15.zip |
Merging upstream version 6.8.9.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/net/ethernet/xilinx')
-rw-r--r-- | drivers/net/ethernet/xilinx/Kconfig | 1 | ||||
-rw-r--r-- | drivers/net/ethernet/xilinx/xilinx_axienet.h | 35 | ||||
-rw-r--r-- | drivers/net/ethernet/xilinx/xilinx_axienet_main.c | 667 |
3 files changed, 582 insertions, 121 deletions
diff --git a/drivers/net/ethernet/xilinx/Kconfig b/drivers/net/ethernet/xilinx/Kconfig index 0014729b88..35d96c633a 100644 --- a/drivers/net/ethernet/xilinx/Kconfig +++ b/drivers/net/ethernet/xilinx/Kconfig @@ -26,6 +26,7 @@ config XILINX_EMACLITE config XILINX_AXI_EMAC tristate "Xilinx 10/100/1000 AXI Ethernet support" depends on HAS_IOMEM + depends on XILINX_DMA select PHYLINK help This driver supports the 10/100/1000 Ethernet from Xilinx for the diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet.h b/drivers/net/ethernet/xilinx/xilinx_axienet.h index 575ff9de89..807ead6785 100644 --- a/drivers/net/ethernet/xilinx/xilinx_axienet.h +++ b/drivers/net/ethernet/xilinx/xilinx_axienet.h @@ -14,6 +14,7 @@ #include <linux/interrupt.h> #include <linux/if_vlan.h> #include <linux/phylink.h> +#include <linux/skbuff.h> /* Packet size info */ #define XAE_HDR_SIZE 14 /* Size of Ethernet header */ @@ -379,6 +380,22 @@ struct axidma_bd { #define XAE_NUM_MISC_CLOCKS 3 /** + * struct skbuf_dma_descriptor - skb for each dma descriptor + * @sgl: Pointer for sglist. + * @desc: Pointer to dma descriptor. + * @dma_address: dma address of sglist. + * @skb: Pointer to SKB transferred using DMA + * @sg_len: number of entries in the sglist. + */ +struct skbuf_dma_descriptor { + struct scatterlist sgl[MAX_SKB_FRAGS + 1]; + struct dma_async_tx_descriptor *desc; + dma_addr_t dma_address; + struct sk_buff *skb; + int sg_len; +}; + +/** * struct axienet_local - axienet private per device data * @ndev: Pointer for net_device to which it will be attached. * @dev: Pointer to device structure @@ -435,6 +452,15 @@ struct axidma_bd { * @coalesce_usec_rx: IRQ coalesce delay for RX * @coalesce_count_tx: Store the irq coalesce on TX side. * @coalesce_usec_tx: IRQ coalesce delay for TX + * @use_dmaengine: flag to check dmaengine framework usage. + * @tx_chan: TX DMA channel. + * @rx_chan: RX DMA channel. + * @tx_skb_ring: Pointer to TX skb ring buffer array. + * @rx_skb_ring: Pointer to RX skb ring buffer array. + * @tx_ring_head: TX skb ring buffer head index. + * @tx_ring_tail: TX skb ring buffer tail index. + * @rx_ring_head: RX skb ring buffer head index. + * @rx_ring_tail: RX skb ring buffer tail index. */ struct axienet_local { struct net_device *ndev; @@ -499,6 +525,15 @@ struct axienet_local { u32 coalesce_usec_rx; u32 coalesce_count_tx; u32 coalesce_usec_tx; + u8 use_dmaengine; + struct dma_chan *tx_chan; + struct dma_chan *rx_chan; + struct skbuf_dma_descriptor **tx_skb_ring; + struct skbuf_dma_descriptor **rx_skb_ring; + int tx_ring_head; + int tx_ring_tail; + int rx_ring_head; + int rx_ring_tail; }; /** diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c index bf6e339904..aaf780fd4f 100644 --- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c +++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c @@ -38,6 +38,11 @@ #include <linux/phy.h> #include <linux/mii.h> #include <linux/ethtool.h> +#include <linux/dmaengine.h> +#include <linux/dma-mapping.h> +#include <linux/dma/xilinx_dma.h> +#include <linux/circ_buf.h> +#include <net/netdev_queues.h> #include "xilinx_axienet.h" @@ -47,6 +52,9 @@ #define TX_BD_NUM_MIN (MAX_SKB_FRAGS + 1) #define TX_BD_NUM_MAX 4096 #define RX_BD_NUM_MAX 4096 +#define DMA_NUM_APP_WORDS 5 +#define LEN_APP 4 +#define RX_BUF_NUM_DEFAULT 128 /* Must be shorter than length of ethtool_drvinfo.driver field to fit */ #define DRIVER_NAME "xaxienet" @@ -55,6 +63,8 @@ #define AXIENET_REGS_N 40 +static void axienet_rx_submit_desc(struct net_device *ndev); + /* Match table for of_platform binding */ static const struct of_device_id axienet_of_match[] = { { .compatible = "xlnx,axi-ethernet-1.00.a", }, @@ -120,6 +130,16 @@ static struct axienet_option axienet_options[] = { {} }; +static struct skbuf_dma_descriptor *axienet_get_rx_desc(struct axienet_local *lp, int i) +{ + return lp->rx_skb_ring[i & (RX_BUF_NUM_DEFAULT - 1)]; +} + +static struct skbuf_dma_descriptor *axienet_get_tx_desc(struct axienet_local *lp, int i) +{ + return lp->tx_skb_ring[i & (TX_BD_NUM_MAX - 1)]; +} + /** * axienet_dma_in32 - Memory mapped Axi DMA register read * @lp: Pointer to axienet local structure @@ -589,10 +609,6 @@ static int axienet_device_reset(struct net_device *ndev) struct axienet_local *lp = netdev_priv(ndev); int ret; - ret = __axienet_device_reset(lp); - if (ret) - return ret; - lp->max_frm_size = XAE_MAX_VLAN_FRAME_SIZE; lp->options |= XAE_OPTION_VLAN; lp->options &= (~XAE_OPTION_JUMBO); @@ -606,11 +622,17 @@ static int axienet_device_reset(struct net_device *ndev) lp->options |= XAE_OPTION_JUMBO; } - ret = axienet_dma_bd_init(ndev); - if (ret) { - netdev_err(ndev, "%s: descriptor allocation failed\n", - __func__); - return ret; + if (!lp->use_dmaengine) { + ret = __axienet_device_reset(lp); + if (ret) + return ret; + + ret = axienet_dma_bd_init(ndev); + if (ret) { + netdev_err(ndev, "%s: descriptor allocation failed\n", + __func__); + return ret; + } } axienet_status = axienet_ior(lp, XAE_RCW1_OFFSET); @@ -726,6 +748,128 @@ static inline int axienet_check_tx_bd_space(struct axienet_local *lp, } /** + * axienet_dma_tx_cb - DMA engine callback for TX channel. + * @data: Pointer to the axienet_local structure. + * @result: error reporting through dmaengine_result. + * This function is called by dmaengine driver for TX channel to notify + * that the transmit is done. + */ +static void axienet_dma_tx_cb(void *data, const struct dmaengine_result *result) +{ + struct skbuf_dma_descriptor *skbuf_dma; + struct axienet_local *lp = data; + struct netdev_queue *txq; + int len; + + skbuf_dma = axienet_get_tx_desc(lp, lp->tx_ring_tail++); + len = skbuf_dma->skb->len; + txq = skb_get_tx_queue(lp->ndev, skbuf_dma->skb); + u64_stats_update_begin(&lp->tx_stat_sync); + u64_stats_add(&lp->tx_bytes, len); + u64_stats_add(&lp->tx_packets, 1); + u64_stats_update_end(&lp->tx_stat_sync); + dma_unmap_sg(lp->dev, skbuf_dma->sgl, skbuf_dma->sg_len, DMA_TO_DEVICE); + dev_consume_skb_any(skbuf_dma->skb); + netif_txq_completed_wake(txq, 1, len, + CIRC_SPACE(lp->tx_ring_head, lp->tx_ring_tail, TX_BD_NUM_MAX), + 2 * MAX_SKB_FRAGS); +} + +/** + * axienet_start_xmit_dmaengine - Starts the transmission. + * @skb: sk_buff pointer that contains data to be Txed. + * @ndev: Pointer to net_device structure. + * + * Return: NETDEV_TX_OK on success or any non space errors. + * NETDEV_TX_BUSY when free element in TX skb ring buffer + * is not available. + * + * This function is invoked to initiate transmission. The + * function sets the skbs, register dma callback API and submit + * the dma transaction. + * Additionally if checksum offloading is supported, + * it populates AXI Stream Control fields with appropriate values. + */ +static netdev_tx_t +axienet_start_xmit_dmaengine(struct sk_buff *skb, struct net_device *ndev) +{ + struct dma_async_tx_descriptor *dma_tx_desc = NULL; + struct axienet_local *lp = netdev_priv(ndev); + u32 app_metadata[DMA_NUM_APP_WORDS] = {0}; + struct skbuf_dma_descriptor *skbuf_dma; + struct dma_device *dma_dev; + struct netdev_queue *txq; + u32 csum_start_off; + u32 csum_index_off; + int sg_len; + int ret; + + dma_dev = lp->tx_chan->device; + sg_len = skb_shinfo(skb)->nr_frags + 1; + if (CIRC_SPACE(lp->tx_ring_head, lp->tx_ring_tail, TX_BD_NUM_MAX) <= sg_len) { + netif_stop_queue(ndev); + if (net_ratelimit()) + netdev_warn(ndev, "TX ring unexpectedly full\n"); + return NETDEV_TX_BUSY; + } + + skbuf_dma = axienet_get_tx_desc(lp, lp->tx_ring_head); + if (!skbuf_dma) + goto xmit_error_drop_skb; + + lp->tx_ring_head++; + sg_init_table(skbuf_dma->sgl, sg_len); + ret = skb_to_sgvec(skb, skbuf_dma->sgl, 0, skb->len); + if (ret < 0) + goto xmit_error_drop_skb; + + ret = dma_map_sg(lp->dev, skbuf_dma->sgl, sg_len, DMA_TO_DEVICE); + if (!ret) + goto xmit_error_drop_skb; + + /* Fill up app fields for checksum */ + if (skb->ip_summed == CHECKSUM_PARTIAL) { + if (lp->features & XAE_FEATURE_FULL_TX_CSUM) { + /* Tx Full Checksum Offload Enabled */ + app_metadata[0] |= 2; + } else if (lp->features & XAE_FEATURE_PARTIAL_TX_CSUM) { + csum_start_off = skb_transport_offset(skb); + csum_index_off = csum_start_off + skb->csum_offset; + /* Tx Partial Checksum Offload Enabled */ + app_metadata[0] |= 1; + app_metadata[1] = (csum_start_off << 16) | csum_index_off; + } + } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) { + app_metadata[0] |= 2; /* Tx Full Checksum Offload Enabled */ + } + + dma_tx_desc = dma_dev->device_prep_slave_sg(lp->tx_chan, skbuf_dma->sgl, + sg_len, DMA_MEM_TO_DEV, + DMA_PREP_INTERRUPT, (void *)app_metadata); + if (!dma_tx_desc) + goto xmit_error_unmap_sg; + + skbuf_dma->skb = skb; + skbuf_dma->sg_len = sg_len; + dma_tx_desc->callback_param = lp; + dma_tx_desc->callback_result = axienet_dma_tx_cb; + dmaengine_submit(dma_tx_desc); + dma_async_issue_pending(lp->tx_chan); + txq = skb_get_tx_queue(lp->ndev, skb); + netdev_tx_sent_queue(txq, skb->len); + netif_txq_maybe_stop(txq, CIRC_SPACE(lp->tx_ring_head, lp->tx_ring_tail, TX_BD_NUM_MAX), + MAX_SKB_FRAGS + 1, 2 * MAX_SKB_FRAGS); + + return NETDEV_TX_OK; + +xmit_error_unmap_sg: + dma_unmap_sg(lp->dev, skbuf_dma->sgl, sg_len, DMA_TO_DEVICE); +xmit_error_drop_skb: + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; +} + +/** * axienet_tx_poll - Invoked once a transmit is completed by the * Axi DMA Tx channel. * @napi: Pointer to NAPI structure. @@ -892,6 +1036,42 @@ axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev) } /** + * axienet_dma_rx_cb - DMA engine callback for RX channel. + * @data: Pointer to the skbuf_dma_descriptor structure. + * @result: error reporting through dmaengine_result. + * This function is called by dmaengine driver for RX channel to notify + * that the packet is received. + */ +static void axienet_dma_rx_cb(void *data, const struct dmaengine_result *result) +{ + struct skbuf_dma_descriptor *skbuf_dma; + size_t meta_len, meta_max_len, rx_len; + struct axienet_local *lp = data; + struct sk_buff *skb; + u32 *app_metadata; + + skbuf_dma = axienet_get_rx_desc(lp, lp->rx_ring_tail++); + skb = skbuf_dma->skb; + app_metadata = dmaengine_desc_get_metadata_ptr(skbuf_dma->desc, &meta_len, + &meta_max_len); + dma_unmap_single(lp->dev, skbuf_dma->dma_address, lp->max_frm_size, + DMA_FROM_DEVICE); + /* TODO: Derive app word index programmatically */ + rx_len = (app_metadata[LEN_APP] & 0xFFFF); + skb_put(skb, rx_len); + skb->protocol = eth_type_trans(skb, lp->ndev); + skb->ip_summed = CHECKSUM_NONE; + + __netif_rx(skb); + u64_stats_update_begin(&lp->rx_stat_sync); + u64_stats_add(&lp->rx_packets, 1); + u64_stats_add(&lp->rx_bytes, rx_len); + u64_stats_update_end(&lp->rx_stat_sync); + axienet_rx_submit_desc(lp->ndev); + dma_async_issue_pending(lp->rx_chan); +} + +/** * axienet_rx_poll - Triggered by RX ISR to complete the BD processing. * @napi: Pointer to NAPI structure. * @budget: Max number of RX packets to process. @@ -1125,40 +1305,158 @@ static irqreturn_t axienet_eth_irq(int irq, void *_ndev) static void axienet_dma_err_handler(struct work_struct *work); /** - * axienet_open - Driver open routine. - * @ndev: Pointer to net_device structure + * axienet_rx_submit_desc - Submit the rx descriptors to dmaengine. + * allocate skbuff, map the scatterlist and obtain a descriptor + * and then add the callback information and submit descriptor. + * + * @ndev: net_device pointer + * + */ +static void axienet_rx_submit_desc(struct net_device *ndev) +{ + struct dma_async_tx_descriptor *dma_rx_desc = NULL; + struct axienet_local *lp = netdev_priv(ndev); + struct skbuf_dma_descriptor *skbuf_dma; + struct sk_buff *skb; + dma_addr_t addr; + + skbuf_dma = axienet_get_rx_desc(lp, lp->rx_ring_head); + if (!skbuf_dma) + return; + + lp->rx_ring_head++; + skb = netdev_alloc_skb(ndev, lp->max_frm_size); + if (!skb) + return; + + sg_init_table(skbuf_dma->sgl, 1); + addr = dma_map_single(lp->dev, skb->data, lp->max_frm_size, DMA_FROM_DEVICE); + if (unlikely(dma_mapping_error(lp->dev, addr))) { + if (net_ratelimit()) + netdev_err(ndev, "DMA mapping error\n"); + goto rx_submit_err_free_skb; + } + sg_dma_address(skbuf_dma->sgl) = addr; + sg_dma_len(skbuf_dma->sgl) = lp->max_frm_size; + dma_rx_desc = dmaengine_prep_slave_sg(lp->rx_chan, skbuf_dma->sgl, + 1, DMA_DEV_TO_MEM, + DMA_PREP_INTERRUPT); + if (!dma_rx_desc) + goto rx_submit_err_unmap_skb; + + skbuf_dma->skb = skb; + skbuf_dma->dma_address = sg_dma_address(skbuf_dma->sgl); + skbuf_dma->desc = dma_rx_desc; + dma_rx_desc->callback_param = lp; + dma_rx_desc->callback_result = axienet_dma_rx_cb; + dmaengine_submit(dma_rx_desc); + + return; + +rx_submit_err_unmap_skb: + dma_unmap_single(lp->dev, addr, lp->max_frm_size, DMA_FROM_DEVICE); +rx_submit_err_free_skb: + dev_kfree_skb(skb); +} + +/** + * axienet_init_dmaengine - init the dmaengine code. + * @ndev: Pointer to net_device structure * * Return: 0, on success. - * non-zero error value on failure + * non-zero error value on failure * - * This is the driver open routine. It calls phylink_start to start the - * PHY device. - * It also allocates interrupt service routines, enables the interrupt lines - * and ISR handling. Axi Ethernet core is reset through Axi DMA core. Buffer - * descriptors are initialized. + * This is the dmaengine initialization code. */ -static int axienet_open(struct net_device *ndev) +static int axienet_init_dmaengine(struct net_device *ndev) { - int ret; struct axienet_local *lp = netdev_priv(ndev); + struct skbuf_dma_descriptor *skbuf_dma; + int i, ret; - dev_dbg(&ndev->dev, "axienet_open()\n"); + lp->tx_chan = dma_request_chan(lp->dev, "tx_chan0"); + if (IS_ERR(lp->tx_chan)) { + dev_err(lp->dev, "No Ethernet DMA (TX) channel found\n"); + return PTR_ERR(lp->tx_chan); + } - /* When we do an Axi Ethernet reset, it resets the complete core - * including the MDIO. MDIO must be disabled before resetting. - * Hold MDIO bus lock to avoid MDIO accesses during the reset. - */ - axienet_lock_mii(lp); - ret = axienet_device_reset(ndev); - axienet_unlock_mii(lp); + lp->rx_chan = dma_request_chan(lp->dev, "rx_chan0"); + if (IS_ERR(lp->rx_chan)) { + ret = PTR_ERR(lp->rx_chan); + dev_err(lp->dev, "No Ethernet DMA (RX) channel found\n"); + goto err_dma_release_tx; + } - ret = phylink_of_phy_connect(lp->phylink, lp->dev->of_node, 0); - if (ret) { - dev_err(lp->dev, "phylink_of_phy_connect() failed: %d\n", ret); - return ret; + lp->tx_ring_tail = 0; + lp->tx_ring_head = 0; + lp->rx_ring_tail = 0; + lp->rx_ring_head = 0; + lp->tx_skb_ring = kcalloc(TX_BD_NUM_MAX, sizeof(*lp->tx_skb_ring), + GFP_KERNEL); + if (!lp->tx_skb_ring) { + ret = -ENOMEM; + goto err_dma_release_rx; + } + for (i = 0; i < TX_BD_NUM_MAX; i++) { + skbuf_dma = kzalloc(sizeof(*skbuf_dma), GFP_KERNEL); + if (!skbuf_dma) { + ret = -ENOMEM; + goto err_free_tx_skb_ring; + } + lp->tx_skb_ring[i] = skbuf_dma; } - phylink_start(lp->phylink); + lp->rx_skb_ring = kcalloc(RX_BUF_NUM_DEFAULT, sizeof(*lp->rx_skb_ring), + GFP_KERNEL); + if (!lp->rx_skb_ring) { + ret = -ENOMEM; + goto err_free_tx_skb_ring; + } + for (i = 0; i < RX_BUF_NUM_DEFAULT; i++) { + skbuf_dma = kzalloc(sizeof(*skbuf_dma), GFP_KERNEL); + if (!skbuf_dma) { + ret = -ENOMEM; + goto err_free_rx_skb_ring; + } + lp->rx_skb_ring[i] = skbuf_dma; + } + /* TODO: Instead of BD_NUM_DEFAULT use runtime support */ + for (i = 0; i < RX_BUF_NUM_DEFAULT; i++) + axienet_rx_submit_desc(ndev); + dma_async_issue_pending(lp->rx_chan); + + return 0; + +err_free_rx_skb_ring: + for (i = 0; i < RX_BUF_NUM_DEFAULT; i++) + kfree(lp->rx_skb_ring[i]); + kfree(lp->rx_skb_ring); +err_free_tx_skb_ring: + for (i = 0; i < TX_BD_NUM_MAX; i++) + kfree(lp->tx_skb_ring[i]); + kfree(lp->tx_skb_ring); +err_dma_release_rx: + dma_release_channel(lp->rx_chan); +err_dma_release_tx: + dma_release_channel(lp->tx_chan); + return ret; +} + +/** + * axienet_init_legacy_dma - init the dma legacy code. + * @ndev: Pointer to net_device structure + * + * Return: 0, on success. + * non-zero error value on failure + * + * This is the dma initialization code. It also allocates interrupt + * service routines, enables the interrupt lines and ISR handling. + * + */ +static int axienet_init_legacy_dma(struct net_device *ndev) +{ + int ret; + struct axienet_local *lp = netdev_priv(ndev); /* Enable worker thread for Axi DMA error handling */ INIT_WORK(&lp->dma_err_task, axienet_dma_err_handler); @@ -1193,14 +1491,77 @@ err_rx_irq: err_tx_irq: napi_disable(&lp->napi_tx); napi_disable(&lp->napi_rx); - phylink_stop(lp->phylink); - phylink_disconnect_phy(lp->phylink); cancel_work_sync(&lp->dma_err_task); dev_err(lp->dev, "request_irq() failed\n"); return ret; } /** + * axienet_open - Driver open routine. + * @ndev: Pointer to net_device structure + * + * Return: 0, on success. + * non-zero error value on failure + * + * This is the driver open routine. It calls phylink_start to start the + * PHY device. + * It also allocates interrupt service routines, enables the interrupt lines + * and ISR handling. Axi Ethernet core is reset through Axi DMA core. Buffer + * descriptors are initialized. + */ +static int axienet_open(struct net_device *ndev) +{ + int ret; + struct axienet_local *lp = netdev_priv(ndev); + + dev_dbg(&ndev->dev, "%s\n", __func__); + + /* When we do an Axi Ethernet reset, it resets the complete core + * including the MDIO. MDIO must be disabled before resetting. + * Hold MDIO bus lock to avoid MDIO accesses during the reset. + */ + axienet_lock_mii(lp); + ret = axienet_device_reset(ndev); + axienet_unlock_mii(lp); + + ret = phylink_of_phy_connect(lp->phylink, lp->dev->of_node, 0); + if (ret) { + dev_err(lp->dev, "phylink_of_phy_connect() failed: %d\n", ret); + return ret; + } + + phylink_start(lp->phylink); + + if (lp->use_dmaengine) { + /* Enable interrupts for Axi Ethernet core (if defined) */ + if (lp->eth_irq > 0) { + ret = request_irq(lp->eth_irq, axienet_eth_irq, IRQF_SHARED, + ndev->name, ndev); + if (ret) + goto err_phy; + } + + ret = axienet_init_dmaengine(ndev); + if (ret < 0) + goto err_free_eth_irq; + } else { + ret = axienet_init_legacy_dma(ndev); + if (ret) + goto err_phy; + } + + return 0; + +err_free_eth_irq: + if (lp->eth_irq > 0) + free_irq(lp->eth_irq, ndev); +err_phy: + phylink_stop(lp->phylink); + phylink_disconnect_phy(lp->phylink); + return ret; +} + +/** * axienet_stop - Driver stop routine. * @ndev: Pointer to net_device structure * @@ -1213,11 +1574,14 @@ err_tx_irq: static int axienet_stop(struct net_device *ndev) { struct axienet_local *lp = netdev_priv(ndev); + int i; dev_dbg(&ndev->dev, "axienet_close()\n"); - napi_disable(&lp->napi_tx); - napi_disable(&lp->napi_rx); + if (!lp->use_dmaengine) { + napi_disable(&lp->napi_tx); + napi_disable(&lp->napi_rx); + } phylink_stop(lp->phylink); phylink_disconnect_phy(lp->phylink); @@ -1225,18 +1589,33 @@ static int axienet_stop(struct net_device *ndev) axienet_setoptions(ndev, lp->options & ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN)); - axienet_dma_stop(lp); + if (!lp->use_dmaengine) { + axienet_dma_stop(lp); + cancel_work_sync(&lp->dma_err_task); + free_irq(lp->tx_irq, ndev); + free_irq(lp->rx_irq, ndev); + axienet_dma_bd_release(ndev); + } else { + dmaengine_terminate_sync(lp->tx_chan); + dmaengine_synchronize(lp->tx_chan); + dmaengine_terminate_sync(lp->rx_chan); + dmaengine_synchronize(lp->rx_chan); + + for (i = 0; i < TX_BD_NUM_MAX; i++) + kfree(lp->tx_skb_ring[i]); + kfree(lp->tx_skb_ring); + for (i = 0; i < RX_BUF_NUM_DEFAULT; i++) + kfree(lp->rx_skb_ring[i]); + kfree(lp->rx_skb_ring); + + dma_release_channel(lp->rx_chan); + dma_release_channel(lp->tx_chan); + } axienet_iow(lp, XAE_IE_OFFSET, 0); - cancel_work_sync(&lp->dma_err_task); - if (lp->eth_irq > 0) free_irq(lp->eth_irq, ndev); - free_irq(lp->tx_irq, ndev); - free_irq(lp->rx_irq, ndev); - - axienet_dma_bd_release(ndev); return 0; } @@ -1333,6 +1712,18 @@ static const struct net_device_ops axienet_netdev_ops = { #endif }; +static const struct net_device_ops axienet_netdev_dmaengine_ops = { + .ndo_open = axienet_open, + .ndo_stop = axienet_stop, + .ndo_start_xmit = axienet_start_xmit_dmaengine, + .ndo_get_stats64 = axienet_get_stats64, + .ndo_change_mtu = axienet_change_mtu, + .ndo_set_mac_address = netdev_set_mac_address, + .ndo_validate_addr = eth_validate_addr, + .ndo_eth_ioctl = axienet_ioctl, + .ndo_set_rx_mode = axienet_set_multicast_list, +}; + /** * axienet_ethtools_get_drvinfo - Get various Axi Ethernet driver information. * @ndev: Pointer to net_device structure @@ -1412,14 +1803,16 @@ static void axienet_ethtools_get_regs(struct net_device *ndev, data[29] = axienet_ior(lp, XAE_FMI_OFFSET); data[30] = axienet_ior(lp, XAE_AF0_OFFSET); data[31] = axienet_ior(lp, XAE_AF1_OFFSET); - data[32] = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET); - data[33] = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET); - data[34] = axienet_dma_in32(lp, XAXIDMA_TX_CDESC_OFFSET); - data[35] = axienet_dma_in32(lp, XAXIDMA_TX_TDESC_OFFSET); - data[36] = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET); - data[37] = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET); - data[38] = axienet_dma_in32(lp, XAXIDMA_RX_CDESC_OFFSET); - data[39] = axienet_dma_in32(lp, XAXIDMA_RX_TDESC_OFFSET); + if (!lp->use_dmaengine) { + data[32] = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET); + data[33] = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET); + data[34] = axienet_dma_in32(lp, XAXIDMA_TX_CDESC_OFFSET); + data[35] = axienet_dma_in32(lp, XAXIDMA_TX_TDESC_OFFSET); + data[36] = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET); + data[37] = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET); + data[38] = axienet_dma_in32(lp, XAXIDMA_RX_CDESC_OFFSET); + data[39] = axienet_dma_in32(lp, XAXIDMA_RX_TDESC_OFFSET); + } } static void @@ -1863,7 +2256,6 @@ static int axienet_probe(struct platform_device *pdev) SET_NETDEV_DEV(ndev, &pdev->dev); ndev->flags &= ~IFF_MULTICAST; /* clear multicast */ ndev->features = NETIF_F_SG; - ndev->netdev_ops = &axienet_netdev_ops; ndev->ethtool_ops = &axienet_ethtool_ops; /* MTU range: 64 - 9000 */ @@ -1880,9 +2272,6 @@ static int axienet_probe(struct platform_device *pdev) u64_stats_init(&lp->rx_stat_sync); u64_stats_init(&lp->tx_stat_sync); - netif_napi_add(ndev, &lp->napi_rx, axienet_rx_poll); - netif_napi_add(ndev, &lp->napi_tx, axienet_tx_poll); - lp->axi_clk = devm_clk_get_optional(&pdev->dev, "s_axi_lite_clk"); if (!lp->axi_clk) { /* For backward compatibility, if named AXI clock is not present, @@ -2008,82 +2397,118 @@ static int axienet_probe(struct platform_device *pdev) goto cleanup_clk; } - /* Find the DMA node, map the DMA registers, and decode the DMA IRQs */ - np = of_parse_phandle(pdev->dev.of_node, "axistream-connected", 0); - if (np) { - struct resource dmares; + if (!of_find_property(pdev->dev.of_node, "dmas", NULL)) { + /* Find the DMA node, map the DMA registers, and decode the DMA IRQs */ + np = of_parse_phandle(pdev->dev.of_node, "axistream-connected", 0); - ret = of_address_to_resource(np, 0, &dmares); - if (ret) { - dev_err(&pdev->dev, - "unable to get DMA resource\n"); + if (np) { + struct resource dmares; + + ret = of_address_to_resource(np, 0, &dmares); + if (ret) { + dev_err(&pdev->dev, + "unable to get DMA resource\n"); + of_node_put(np); + goto cleanup_clk; + } + lp->dma_regs = devm_ioremap_resource(&pdev->dev, + &dmares); + lp->rx_irq = irq_of_parse_and_map(np, 1); + lp->tx_irq = irq_of_parse_and_map(np, 0); of_node_put(np); + lp->eth_irq = platform_get_irq_optional(pdev, 0); + } else { + /* Check for these resources directly on the Ethernet node. */ + lp->dma_regs = devm_platform_get_and_ioremap_resource(pdev, 1, NULL); + lp->rx_irq = platform_get_irq(pdev, 1); + lp->tx_irq = platform_get_irq(pdev, 0); + lp->eth_irq = platform_get_irq_optional(pdev, 2); + } + if (IS_ERR(lp->dma_regs)) { + dev_err(&pdev->dev, "could not map DMA regs\n"); + ret = PTR_ERR(lp->dma_regs); + goto cleanup_clk; + } + if (lp->rx_irq <= 0 || lp->tx_irq <= 0) { + dev_err(&pdev->dev, "could not determine irqs\n"); + ret = -ENOMEM; goto cleanup_clk; } - lp->dma_regs = devm_ioremap_resource(&pdev->dev, - &dmares); - lp->rx_irq = irq_of_parse_and_map(np, 1); - lp->tx_irq = irq_of_parse_and_map(np, 0); - of_node_put(np); - lp->eth_irq = platform_get_irq_optional(pdev, 0); - } else { - /* Check for these resources directly on the Ethernet node. */ - lp->dma_regs = devm_platform_get_and_ioremap_resource(pdev, 1, NULL); - lp->rx_irq = platform_get_irq(pdev, 1); - lp->tx_irq = platform_get_irq(pdev, 0); - lp->eth_irq = platform_get_irq_optional(pdev, 2); - } - if (IS_ERR(lp->dma_regs)) { - dev_err(&pdev->dev, "could not map DMA regs\n"); - ret = PTR_ERR(lp->dma_regs); - goto cleanup_clk; - } - if ((lp->rx_irq <= 0) || (lp->tx_irq <= 0)) { - dev_err(&pdev->dev, "could not determine irqs\n"); - ret = -ENOMEM; - goto cleanup_clk; - } - /* Reset core now that clocks are enabled, prior to accessing MDIO */ - ret = __axienet_device_reset(lp); - if (ret) - goto cleanup_clk; + /* Reset core now that clocks are enabled, prior to accessing MDIO */ + ret = __axienet_device_reset(lp); + if (ret) + goto cleanup_clk; + + /* Autodetect the need for 64-bit DMA pointers. + * When the IP is configured for a bus width bigger than 32 bits, + * writing the MSB registers is mandatory, even if they are all 0. + * We can detect this case by writing all 1's to one such register + * and see if that sticks: when the IP is configured for 32 bits + * only, those registers are RES0. + * Those MSB registers were introduced in IP v7.1, which we check first. + */ + if ((axienet_ior(lp, XAE_ID_OFFSET) >> 24) >= 0x9) { + void __iomem *desc = lp->dma_regs + XAXIDMA_TX_CDESC_OFFSET + 4; - /* Autodetect the need for 64-bit DMA pointers. - * When the IP is configured for a bus width bigger than 32 bits, - * writing the MSB registers is mandatory, even if they are all 0. - * We can detect this case by writing all 1's to one such register - * and see if that sticks: when the IP is configured for 32 bits - * only, those registers are RES0. - * Those MSB registers were introduced in IP v7.1, which we check first. - */ - if ((axienet_ior(lp, XAE_ID_OFFSET) >> 24) >= 0x9) { - void __iomem *desc = lp->dma_regs + XAXIDMA_TX_CDESC_OFFSET + 4; - - iowrite32(0x0, desc); - if (ioread32(desc) == 0) { /* sanity check */ - iowrite32(0xffffffff, desc); - if (ioread32(desc) > 0) { - lp->features |= XAE_FEATURE_DMA_64BIT; - addr_width = 64; - dev_info(&pdev->dev, - "autodetected 64-bit DMA range\n"); - } iowrite32(0x0, desc); + if (ioread32(desc) == 0) { /* sanity check */ + iowrite32(0xffffffff, desc); + if (ioread32(desc) > 0) { + lp->features |= XAE_FEATURE_DMA_64BIT; + addr_width = 64; + dev_info(&pdev->dev, + "autodetected 64-bit DMA range\n"); + } + iowrite32(0x0, desc); + } + } + if (!IS_ENABLED(CONFIG_64BIT) && lp->features & XAE_FEATURE_DMA_64BIT) { + dev_err(&pdev->dev, "64-bit addressable DMA is not compatible with 32-bit archecture\n"); + ret = -EINVAL; + goto cleanup_clk; } - } - if (!IS_ENABLED(CONFIG_64BIT) && lp->features & XAE_FEATURE_DMA_64BIT) { - dev_err(&pdev->dev, "64-bit addressable DMA is not compatible with 32-bit archecture\n"); - ret = -EINVAL; - goto cleanup_clk; - } - ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(addr_width)); - if (ret) { - dev_err(&pdev->dev, "No suitable DMA available\n"); - goto cleanup_clk; + ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(addr_width)); + if (ret) { + dev_err(&pdev->dev, "No suitable DMA available\n"); + goto cleanup_clk; + } + netif_napi_add(ndev, &lp->napi_rx, axienet_rx_poll); + netif_napi_add(ndev, &lp->napi_tx, axienet_tx_poll); + } else { + struct xilinx_vdma_config cfg; + struct dma_chan *tx_chan; + + lp->eth_irq = platform_get_irq_optional(pdev, 0); + if (lp->eth_irq < 0 && lp->eth_irq != -ENXIO) { + ret = lp->eth_irq; + goto cleanup_clk; + } + tx_chan = dma_request_chan(lp->dev, "tx_chan0"); + if (IS_ERR(tx_chan)) { + ret = PTR_ERR(tx_chan); + dev_err_probe(lp->dev, ret, "No Ethernet DMA (TX) channel found\n"); + goto cleanup_clk; + } + + cfg.reset = 1; + /* As name says VDMA but it has support for DMA channel reset */ + ret = xilinx_vdma_channel_set_config(tx_chan, &cfg); + if (ret < 0) { + dev_err(&pdev->dev, "Reset channel failed\n"); + dma_release_channel(tx_chan); + goto cleanup_clk; + } + + dma_release_channel(tx_chan); + lp->use_dmaengine = 1; } + if (lp->use_dmaengine) + ndev->netdev_ops = &axienet_netdev_dmaengine_ops; + else + ndev->netdev_ops = &axienet_netdev_ops; /* Check for Ethernet core IRQ (optional) */ if (lp->eth_irq <= 0) dev_info(&pdev->dev, "Ethernet core IRQ not defined\n"); @@ -2099,8 +2524,8 @@ static int axienet_probe(struct platform_device *pdev) } lp->coalesce_count_rx = XAXIDMA_DFT_RX_THRESHOLD; - lp->coalesce_usec_rx = XAXIDMA_DFT_RX_USEC; lp->coalesce_count_tx = XAXIDMA_DFT_TX_THRESHOLD; + lp->coalesce_usec_rx = XAXIDMA_DFT_RX_USEC; lp->coalesce_usec_tx = XAXIDMA_DFT_TX_USEC; ret = axienet_mdio_setup(lp); |