summaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/engleder
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-11 08:27:49 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-11 08:27:49 +0000
commitace9429bb58fd418f0c81d4c2835699bddf6bde6 (patch)
treeb2d64bc10158fdd5497876388cd68142ca374ed3 /drivers/net/ethernet/engleder
parentInitial commit. (diff)
downloadlinux-ace9429bb58fd418f0c81d4c2835699bddf6bde6.tar.xz
linux-ace9429bb58fd418f0c81d4c2835699bddf6bde6.zip
Adding upstream version 6.6.15.upstream/6.6.15
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/net/ethernet/engleder')
-rw-r--r--drivers/net/ethernet/engleder/Kconfig40
-rw-r--r--drivers/net/ethernet/engleder/Makefile10
-rw-r--r--drivers/net/ethernet/engleder/tsnep.h260
-rw-r--r--drivers/net/ethernet/engleder/tsnep_ethtool.c484
-rw-r--r--drivers/net/ethernet/engleder/tsnep_hw.h246
-rw-r--r--drivers/net/ethernet/engleder/tsnep_main.c2641
-rw-r--r--drivers/net/ethernet/engleder/tsnep_ptp.c246
-rw-r--r--drivers/net/ethernet/engleder/tsnep_rxnfc.c307
-rw-r--r--drivers/net/ethernet/engleder/tsnep_selftests.c811
-rw-r--r--drivers/net/ethernet/engleder/tsnep_tc.c466
-rw-r--r--drivers/net/ethernet/engleder/tsnep_xdp.c85
11 files changed, 5596 insertions, 0 deletions
diff --git a/drivers/net/ethernet/engleder/Kconfig b/drivers/net/ethernet/engleder/Kconfig
new file mode 100644
index 0000000000..3df6bf476a
--- /dev/null
+++ b/drivers/net/ethernet/engleder/Kconfig
@@ -0,0 +1,40 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Engleder network device configuration
+#
+
+config NET_VENDOR_ENGLEDER
+ bool "Engleder devices"
+ default y
+ help
+ If you have a network (Ethernet) card belonging to this class, say Y.
+
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about Engleder devices. If you say Y, you will be asked
+ for your specific card in the following questions.
+
+if NET_VENDOR_ENGLEDER
+
+config TSNEP
+ tristate "TSN endpoint support"
+ depends on HAS_IOMEM && HAS_DMA
+ depends on PTP_1588_CLOCK_OPTIONAL
+ select PHYLIB
+ select PAGE_POOL
+ help
+ Support for the Engleder TSN endpoint Ethernet MAC IP Core.
+
+ To compile this driver as a module, choose M here. The module will be
+ called tsnep.
+
+config TSNEP_SELFTESTS
+ bool "TSN endpoint self test support"
+ default n
+ depends on TSNEP
+ help
+ This enables self test support within the TSN endpoint driver.
+
+ If unsure, say N.
+
+endif # NET_VENDOR_ENGLEDER
diff --git a/drivers/net/ethernet/engleder/Makefile b/drivers/net/ethernet/engleder/Makefile
new file mode 100644
index 0000000000..b98135f65e
--- /dev/null
+++ b/drivers/net/ethernet/engleder/Makefile
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for the Engleder Ethernet drivers
+#
+
+obj-$(CONFIG_TSNEP) += tsnep.o
+
+tsnep-objs := tsnep_main.o tsnep_ethtool.o tsnep_ptp.o tsnep_tc.o \
+ tsnep_rxnfc.o tsnep_xdp.o
+tsnep-$(CONFIG_TSNEP_SELFTESTS) += tsnep_selftests.o
diff --git a/drivers/net/ethernet/engleder/tsnep.h b/drivers/net/ethernet/engleder/tsnep.h
new file mode 100644
index 0000000000..f188fba021
--- /dev/null
+++ b/drivers/net/ethernet/engleder/tsnep.h
@@ -0,0 +1,260 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2021 Gerhard Engleder <gerhard@engleder-embedded.com> */
+
+#ifndef _TSNEP_H
+#define _TSNEP_H
+
+#include "tsnep_hw.h"
+
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/etherdevice.h>
+#include <linux/phy.h>
+#include <linux/ethtool.h>
+#include <linux/net_tstamp.h>
+#include <linux/ptp_clock_kernel.h>
+#include <linux/miscdevice.h>
+#include <net/xdp.h>
+
+#define TSNEP "tsnep"
+
+#define TSNEP_RING_SIZE 256
+#define TSNEP_RING_MASK (TSNEP_RING_SIZE - 1)
+#define TSNEP_RING_RX_REFILL 16
+#define TSNEP_RING_RX_REUSE (TSNEP_RING_SIZE - TSNEP_RING_SIZE / 4)
+#define TSNEP_RING_ENTRIES_PER_PAGE (PAGE_SIZE / TSNEP_DESC_SIZE)
+#define TSNEP_RING_PAGE_COUNT (TSNEP_RING_SIZE / TSNEP_RING_ENTRIES_PER_PAGE)
+
+struct tsnep_gcl {
+ void __iomem *addr;
+
+ u64 base_time;
+ u64 cycle_time;
+ u64 cycle_time_extension;
+
+ struct tsnep_gcl_operation operation[TSNEP_GCL_COUNT];
+ int count;
+
+ u64 change_limit;
+
+ u64 start_time;
+ bool change;
+};
+
+enum tsnep_rxnfc_filter_type {
+ TSNEP_RXNFC_ETHER_TYPE,
+};
+
+struct tsnep_rxnfc_filter {
+ enum tsnep_rxnfc_filter_type type;
+ union {
+ u16 ether_type;
+ };
+};
+
+struct tsnep_rxnfc_rule {
+ struct list_head list;
+ struct tsnep_rxnfc_filter filter;
+ int queue_index;
+ int location;
+};
+
+struct tsnep_tx_entry {
+ struct tsnep_tx_desc *desc;
+ struct tsnep_tx_desc_wb *desc_wb;
+ dma_addr_t desc_dma;
+ bool owner_user_flag;
+
+ u32 properties;
+
+ u32 type;
+ union {
+ struct sk_buff *skb;
+ struct xdp_frame *xdpf;
+ bool zc;
+ };
+ size_t len;
+ DEFINE_DMA_UNMAP_ADDR(dma);
+};
+
+struct tsnep_tx {
+ struct tsnep_adapter *adapter;
+ void __iomem *addr;
+ int queue_index;
+
+ void *page[TSNEP_RING_PAGE_COUNT];
+ dma_addr_t page_dma[TSNEP_RING_PAGE_COUNT];
+
+ struct tsnep_tx_entry entry[TSNEP_RING_SIZE];
+ int write;
+ int read;
+ u32 owner_counter;
+ int increment_owner_counter;
+ struct xsk_buff_pool *xsk_pool;
+
+ u32 packets;
+ u32 bytes;
+ u32 dropped;
+};
+
+struct tsnep_rx_entry {
+ struct tsnep_rx_desc *desc;
+ struct tsnep_rx_desc_wb *desc_wb;
+ dma_addr_t desc_dma;
+
+ u32 properties;
+
+ union {
+ struct page *page;
+ struct xdp_buff *xdp;
+ };
+ size_t len;
+ dma_addr_t dma;
+};
+
+struct tsnep_rx {
+ struct tsnep_adapter *adapter;
+ void __iomem *addr;
+ int queue_index;
+ int tx_queue_index;
+
+ void *page[TSNEP_RING_PAGE_COUNT];
+ dma_addr_t page_dma[TSNEP_RING_PAGE_COUNT];
+
+ struct tsnep_rx_entry entry[TSNEP_RING_SIZE];
+ int write;
+ int read;
+ u32 owner_counter;
+ int increment_owner_counter;
+ struct page_pool *page_pool;
+ struct page **page_buffer;
+ struct xsk_buff_pool *xsk_pool;
+ struct xdp_buff **xdp_batch;
+
+ u32 packets;
+ u32 bytes;
+ u32 dropped;
+ u32 multicast;
+ u32 alloc_failed;
+
+ struct xdp_rxq_info xdp_rxq;
+ struct xdp_rxq_info xdp_rxq_zc;
+};
+
+struct tsnep_queue {
+ struct tsnep_adapter *adapter;
+ char name[IFNAMSIZ + 16];
+
+ struct tsnep_tx *tx;
+ struct tsnep_rx *rx;
+
+ struct napi_struct napi;
+
+ int irq;
+ u32 irq_mask;
+ void __iomem *irq_delay_addr;
+ u8 irq_delay;
+};
+
+struct tsnep_adapter {
+ struct net_device *netdev;
+ u8 mac_address[ETH_ALEN];
+ struct mii_bus *mdiobus;
+ bool suppress_preamble;
+ phy_interface_t phy_mode;
+ struct phy_device *phydev;
+ int msg_enable;
+
+ struct platform_device *pdev;
+ struct device *dmadev;
+ void __iomem *addr;
+
+ bool gate_control;
+ /* gate control lock */
+ struct mutex gate_control_lock;
+ bool gate_control_active;
+ struct tsnep_gcl gcl[2];
+ int next_gcl;
+
+ struct hwtstamp_config hwtstamp_config;
+ struct ptp_clock *ptp_clock;
+ struct ptp_clock_info ptp_clock_info;
+ /* ptp clock lock */
+ spinlock_t ptp_lock;
+
+ /* RX flow classification rules lock */
+ struct mutex rxnfc_lock;
+ struct list_head rxnfc_rules;
+ int rxnfc_count;
+ int rxnfc_max;
+
+ struct bpf_prog *xdp_prog;
+
+ int num_tx_queues;
+ struct tsnep_tx tx[TSNEP_MAX_QUEUES];
+ int num_rx_queues;
+ struct tsnep_rx rx[TSNEP_MAX_QUEUES];
+
+ int num_queues;
+ struct tsnep_queue queue[TSNEP_MAX_QUEUES];
+};
+
+extern const struct ethtool_ops tsnep_ethtool_ops;
+
+int tsnep_ptp_init(struct tsnep_adapter *adapter);
+void tsnep_ptp_cleanup(struct tsnep_adapter *adapter);
+int tsnep_ptp_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd);
+
+int tsnep_tc_init(struct tsnep_adapter *adapter);
+void tsnep_tc_cleanup(struct tsnep_adapter *adapter);
+int tsnep_tc_setup(struct net_device *netdev, enum tc_setup_type type,
+ void *type_data);
+
+int tsnep_rxnfc_init(struct tsnep_adapter *adapter);
+void tsnep_rxnfc_cleanup(struct tsnep_adapter *adapter);
+int tsnep_rxnfc_get_rule(struct tsnep_adapter *adapter,
+ struct ethtool_rxnfc *cmd);
+int tsnep_rxnfc_get_all(struct tsnep_adapter *adapter,
+ struct ethtool_rxnfc *cmd,
+ u32 *rule_locs);
+int tsnep_rxnfc_add_rule(struct tsnep_adapter *adapter,
+ struct ethtool_rxnfc *cmd);
+int tsnep_rxnfc_del_rule(struct tsnep_adapter *adapter,
+ struct ethtool_rxnfc *cmd);
+
+int tsnep_xdp_setup_prog(struct tsnep_adapter *adapter, struct bpf_prog *prog,
+ struct netlink_ext_ack *extack);
+int tsnep_xdp_setup_pool(struct tsnep_adapter *adapter,
+ struct xsk_buff_pool *pool, u16 queue_id);
+
+#if IS_ENABLED(CONFIG_TSNEP_SELFTESTS)
+int tsnep_ethtool_get_test_count(void);
+void tsnep_ethtool_get_test_strings(u8 *data);
+void tsnep_ethtool_self_test(struct net_device *netdev,
+ struct ethtool_test *eth_test, u64 *data);
+#else
+static inline int tsnep_ethtool_get_test_count(void)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void tsnep_ethtool_get_test_strings(u8 *data)
+{
+ /* not enabled */
+}
+
+static inline void tsnep_ethtool_self_test(struct net_device *dev,
+ struct ethtool_test *eth_test,
+ u64 *data)
+{
+ /* not enabled */
+}
+#endif /* CONFIG_TSNEP_SELFTESTS */
+
+void tsnep_get_system_time(struct tsnep_adapter *adapter, u64 *time);
+int tsnep_set_irq_coalesce(struct tsnep_queue *queue, u32 usecs);
+u32 tsnep_get_irq_coalesce(struct tsnep_queue *queue);
+int tsnep_enable_xsk(struct tsnep_queue *queue, struct xsk_buff_pool *pool);
+void tsnep_disable_xsk(struct tsnep_queue *queue);
+
+#endif /* _TSNEP_H */
diff --git a/drivers/net/ethernet/engleder/tsnep_ethtool.c b/drivers/net/ethernet/engleder/tsnep_ethtool.c
new file mode 100644
index 0000000000..65ec1abc94
--- /dev/null
+++ b/drivers/net/ethernet/engleder/tsnep_ethtool.c
@@ -0,0 +1,484 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2021 Gerhard Engleder <gerhard@engleder-embedded.com> */
+
+#include "tsnep.h"
+
+static const char tsnep_stats_strings[][ETH_GSTRING_LEN] = {
+ "rx_packets",
+ "rx_bytes",
+ "rx_dropped",
+ "rx_multicast",
+ "rx_alloc_failed",
+ "rx_phy_errors",
+ "rx_forwarded_phy_errors",
+ "rx_invalid_frame_errors",
+ "tx_packets",
+ "tx_bytes",
+ "tx_dropped",
+};
+
+struct tsnep_stats {
+ u64 rx_packets;
+ u64 rx_bytes;
+ u64 rx_dropped;
+ u64 rx_multicast;
+ u64 rx_alloc_failed;
+ u64 rx_phy_errors;
+ u64 rx_forwarded_phy_errors;
+ u64 rx_invalid_frame_errors;
+ u64 tx_packets;
+ u64 tx_bytes;
+ u64 tx_dropped;
+};
+
+#define TSNEP_STATS_COUNT (sizeof(struct tsnep_stats) / sizeof(u64))
+
+static const char tsnep_rx_queue_stats_strings[][ETH_GSTRING_LEN] = {
+ "rx_%d_packets",
+ "rx_%d_bytes",
+ "rx_%d_dropped",
+ "rx_%d_multicast",
+ "rx_%d_alloc_failed",
+ "rx_%d_no_descriptor_errors",
+ "rx_%d_buffer_too_small_errors",
+ "rx_%d_fifo_overflow_errors",
+ "rx_%d_invalid_frame_errors",
+};
+
+struct tsnep_rx_queue_stats {
+ u64 rx_packets;
+ u64 rx_bytes;
+ u64 rx_dropped;
+ u64 rx_multicast;
+ u64 rx_alloc_failed;
+ u64 rx_no_descriptor_errors;
+ u64 rx_buffer_too_small_errors;
+ u64 rx_fifo_overflow_errors;
+ u64 rx_invalid_frame_errors;
+};
+
+#define TSNEP_RX_QUEUE_STATS_COUNT (sizeof(struct tsnep_rx_queue_stats) / \
+ sizeof(u64))
+
+static const char tsnep_tx_queue_stats_strings[][ETH_GSTRING_LEN] = {
+ "tx_%d_packets",
+ "tx_%d_bytes",
+ "tx_%d_dropped",
+};
+
+struct tsnep_tx_queue_stats {
+ u64 tx_packets;
+ u64 tx_bytes;
+ u64 tx_dropped;
+};
+
+#define TSNEP_TX_QUEUE_STATS_COUNT (sizeof(struct tsnep_tx_queue_stats) / \
+ sizeof(u64))
+
+static void tsnep_ethtool_get_drvinfo(struct net_device *netdev,
+ struct ethtool_drvinfo *drvinfo)
+{
+ struct tsnep_adapter *adapter = netdev_priv(netdev);
+
+ strscpy(drvinfo->driver, TSNEP, sizeof(drvinfo->driver));
+ strscpy(drvinfo->bus_info, dev_name(&adapter->pdev->dev),
+ sizeof(drvinfo->bus_info));
+}
+
+static int tsnep_ethtool_get_regs_len(struct net_device *netdev)
+{
+ struct tsnep_adapter *adapter = netdev_priv(netdev);
+ int len;
+ int num_additional_queues;
+
+ len = TSNEP_MAC_SIZE;
+
+ /* first queue pair is within TSNEP_MAC_SIZE, only queues additional to
+ * the first queue pair extend the register length by TSNEP_QUEUE_SIZE
+ */
+ num_additional_queues =
+ max(adapter->num_tx_queues, adapter->num_rx_queues) - 1;
+ len += TSNEP_QUEUE_SIZE * num_additional_queues;
+
+ return len;
+}
+
+static void tsnep_ethtool_get_regs(struct net_device *netdev,
+ struct ethtool_regs *regs,
+ void *p)
+{
+ struct tsnep_adapter *adapter = netdev_priv(netdev);
+
+ regs->version = 1;
+
+ memcpy_fromio(p, adapter->addr, regs->len);
+}
+
+static u32 tsnep_ethtool_get_msglevel(struct net_device *netdev)
+{
+ struct tsnep_adapter *adapter = netdev_priv(netdev);
+
+ return adapter->msg_enable;
+}
+
+static void tsnep_ethtool_set_msglevel(struct net_device *netdev, u32 data)
+{
+ struct tsnep_adapter *adapter = netdev_priv(netdev);
+
+ adapter->msg_enable = data;
+}
+
+static void tsnep_ethtool_get_strings(struct net_device *netdev, u32 stringset,
+ u8 *data)
+{
+ struct tsnep_adapter *adapter = netdev_priv(netdev);
+ int rx_count = adapter->num_rx_queues;
+ int tx_count = adapter->num_tx_queues;
+ int i, j;
+
+ switch (stringset) {
+ case ETH_SS_STATS:
+ memcpy(data, tsnep_stats_strings, sizeof(tsnep_stats_strings));
+ data += sizeof(tsnep_stats_strings);
+
+ for (i = 0; i < rx_count; i++) {
+ for (j = 0; j < TSNEP_RX_QUEUE_STATS_COUNT; j++) {
+ snprintf(data, ETH_GSTRING_LEN,
+ tsnep_rx_queue_stats_strings[j], i);
+ data += ETH_GSTRING_LEN;
+ }
+ }
+
+ for (i = 0; i < tx_count; i++) {
+ for (j = 0; j < TSNEP_TX_QUEUE_STATS_COUNT; j++) {
+ snprintf(data, ETH_GSTRING_LEN,
+ tsnep_tx_queue_stats_strings[j], i);
+ data += ETH_GSTRING_LEN;
+ }
+ }
+ break;
+ case ETH_SS_TEST:
+ tsnep_ethtool_get_test_strings(data);
+ break;
+ }
+}
+
+static void tsnep_ethtool_get_ethtool_stats(struct net_device *netdev,
+ struct ethtool_stats *stats,
+ u64 *data)
+{
+ struct tsnep_adapter *adapter = netdev_priv(netdev);
+ int rx_count = adapter->num_rx_queues;
+ int tx_count = adapter->num_tx_queues;
+ struct tsnep_stats tsnep_stats;
+ struct tsnep_rx_queue_stats tsnep_rx_queue_stats;
+ struct tsnep_tx_queue_stats tsnep_tx_queue_stats;
+ u32 reg;
+ int i;
+
+ memset(&tsnep_stats, 0, sizeof(tsnep_stats));
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ tsnep_stats.rx_packets += adapter->rx[i].packets;
+ tsnep_stats.rx_bytes += adapter->rx[i].bytes;
+ tsnep_stats.rx_dropped += adapter->rx[i].dropped;
+ tsnep_stats.rx_multicast += adapter->rx[i].multicast;
+ tsnep_stats.rx_alloc_failed += adapter->rx[i].alloc_failed;
+ }
+ reg = ioread32(adapter->addr + ECM_STAT);
+ tsnep_stats.rx_phy_errors =
+ (reg & ECM_STAT_RX_ERR_MASK) >> ECM_STAT_RX_ERR_SHIFT;
+ tsnep_stats.rx_forwarded_phy_errors =
+ (reg & ECM_STAT_FWD_RX_ERR_MASK) >> ECM_STAT_FWD_RX_ERR_SHIFT;
+ tsnep_stats.rx_invalid_frame_errors =
+ (reg & ECM_STAT_INV_FRM_MASK) >> ECM_STAT_INV_FRM_SHIFT;
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ tsnep_stats.tx_packets += adapter->tx[i].packets;
+ tsnep_stats.tx_bytes += adapter->tx[i].bytes;
+ tsnep_stats.tx_dropped += adapter->tx[i].dropped;
+ }
+ memcpy(data, &tsnep_stats, sizeof(tsnep_stats));
+ data += TSNEP_STATS_COUNT;
+
+ for (i = 0; i < rx_count; i++) {
+ memset(&tsnep_rx_queue_stats, 0, sizeof(tsnep_rx_queue_stats));
+ tsnep_rx_queue_stats.rx_packets = adapter->rx[i].packets;
+ tsnep_rx_queue_stats.rx_bytes = adapter->rx[i].bytes;
+ tsnep_rx_queue_stats.rx_dropped = adapter->rx[i].dropped;
+ tsnep_rx_queue_stats.rx_multicast = adapter->rx[i].multicast;
+ tsnep_rx_queue_stats.rx_alloc_failed =
+ adapter->rx[i].alloc_failed;
+ reg = ioread32(adapter->addr + TSNEP_QUEUE(i) +
+ TSNEP_RX_STATISTIC);
+ tsnep_rx_queue_stats.rx_no_descriptor_errors =
+ (reg & TSNEP_RX_STATISTIC_NO_DESC_MASK) >>
+ TSNEP_RX_STATISTIC_NO_DESC_SHIFT;
+ tsnep_rx_queue_stats.rx_buffer_too_small_errors =
+ (reg & TSNEP_RX_STATISTIC_BUFFER_TOO_SMALL_MASK) >>
+ TSNEP_RX_STATISTIC_BUFFER_TOO_SMALL_SHIFT;
+ tsnep_rx_queue_stats.rx_fifo_overflow_errors =
+ (reg & TSNEP_RX_STATISTIC_FIFO_OVERFLOW_MASK) >>
+ TSNEP_RX_STATISTIC_FIFO_OVERFLOW_SHIFT;
+ tsnep_rx_queue_stats.rx_invalid_frame_errors =
+ (reg & TSNEP_RX_STATISTIC_INVALID_FRAME_MASK) >>
+ TSNEP_RX_STATISTIC_INVALID_FRAME_SHIFT;
+ memcpy(data, &tsnep_rx_queue_stats,
+ sizeof(tsnep_rx_queue_stats));
+ data += TSNEP_RX_QUEUE_STATS_COUNT;
+ }
+
+ for (i = 0; i < tx_count; i++) {
+ memset(&tsnep_tx_queue_stats, 0, sizeof(tsnep_tx_queue_stats));
+ tsnep_tx_queue_stats.tx_packets += adapter->tx[i].packets;
+ tsnep_tx_queue_stats.tx_bytes += adapter->tx[i].bytes;
+ tsnep_tx_queue_stats.tx_dropped += adapter->tx[i].dropped;
+ memcpy(data, &tsnep_tx_queue_stats,
+ sizeof(tsnep_tx_queue_stats));
+ data += TSNEP_TX_QUEUE_STATS_COUNT;
+ }
+}
+
+static int tsnep_ethtool_get_sset_count(struct net_device *netdev, int sset)
+{
+ struct tsnep_adapter *adapter = netdev_priv(netdev);
+ int rx_count;
+ int tx_count;
+
+ switch (sset) {
+ case ETH_SS_STATS:
+ rx_count = adapter->num_rx_queues;
+ tx_count = adapter->num_tx_queues;
+ return TSNEP_STATS_COUNT +
+ TSNEP_RX_QUEUE_STATS_COUNT * rx_count +
+ TSNEP_TX_QUEUE_STATS_COUNT * tx_count;
+ case ETH_SS_TEST:
+ return tsnep_ethtool_get_test_count();
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int tsnep_ethtool_get_rxnfc(struct net_device *netdev,
+ struct ethtool_rxnfc *cmd, u32 *rule_locs)
+{
+ struct tsnep_adapter *adapter = netdev_priv(netdev);
+
+ switch (cmd->cmd) {
+ case ETHTOOL_GRXRINGS:
+ cmd->data = adapter->num_rx_queues;
+ return 0;
+ case ETHTOOL_GRXCLSRLCNT:
+ cmd->rule_cnt = adapter->rxnfc_count;
+ cmd->data = adapter->rxnfc_max;
+ cmd->data |= RX_CLS_LOC_SPECIAL;
+ return 0;
+ case ETHTOOL_GRXCLSRULE:
+ return tsnep_rxnfc_get_rule(adapter, cmd);
+ case ETHTOOL_GRXCLSRLALL:
+ return tsnep_rxnfc_get_all(adapter, cmd, rule_locs);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int tsnep_ethtool_set_rxnfc(struct net_device *netdev,
+ struct ethtool_rxnfc *cmd)
+{
+ struct tsnep_adapter *adapter = netdev_priv(netdev);
+
+ switch (cmd->cmd) {
+ case ETHTOOL_SRXCLSRLINS:
+ return tsnep_rxnfc_add_rule(adapter, cmd);
+ case ETHTOOL_SRXCLSRLDEL:
+ return tsnep_rxnfc_del_rule(adapter, cmd);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void tsnep_ethtool_get_channels(struct net_device *netdev,
+ struct ethtool_channels *ch)
+{
+ struct tsnep_adapter *adapter = netdev_priv(netdev);
+
+ ch->max_combined = adapter->num_queues;
+ ch->combined_count = adapter->num_queues;
+}
+
+static int tsnep_ethtool_get_ts_info(struct net_device *netdev,
+ struct ethtool_ts_info *info)
+{
+ struct tsnep_adapter *adapter = netdev_priv(netdev);
+
+ info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE |
+ SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+
+ if (adapter->ptp_clock)
+ info->phc_index = ptp_clock_index(adapter->ptp_clock);
+ else
+ info->phc_index = -1;
+
+ info->tx_types = BIT(HWTSTAMP_TX_OFF) |
+ BIT(HWTSTAMP_TX_ON);
+ info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) |
+ BIT(HWTSTAMP_FILTER_ALL);
+
+ return 0;
+}
+
+static struct tsnep_queue *tsnep_get_queue_with_tx(struct tsnep_adapter *adapter,
+ int index)
+{
+ int i;
+
+ for (i = 0; i < adapter->num_queues; i++) {
+ if (adapter->queue[i].tx) {
+ if (index == 0)
+ return &adapter->queue[i];
+
+ index--;
+ }
+ }
+
+ return NULL;
+}
+
+static struct tsnep_queue *tsnep_get_queue_with_rx(struct tsnep_adapter *adapter,
+ int index)
+{
+ int i;
+
+ for (i = 0; i < adapter->num_queues; i++) {
+ if (adapter->queue[i].rx) {
+ if (index == 0)
+ return &adapter->queue[i];
+
+ index--;
+ }
+ }
+
+ return NULL;
+}
+
+static int tsnep_ethtool_get_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *ec,
+ struct kernel_ethtool_coalesce *kernel_coal,
+ struct netlink_ext_ack *extack)
+{
+ struct tsnep_adapter *adapter = netdev_priv(netdev);
+ struct tsnep_queue *queue;
+
+ queue = tsnep_get_queue_with_rx(adapter, 0);
+ if (queue)
+ ec->rx_coalesce_usecs = tsnep_get_irq_coalesce(queue);
+
+ queue = tsnep_get_queue_with_tx(adapter, 0);
+ if (queue)
+ ec->tx_coalesce_usecs = tsnep_get_irq_coalesce(queue);
+
+ return 0;
+}
+
+static int tsnep_ethtool_set_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *ec,
+ struct kernel_ethtool_coalesce *kernel_coal,
+ struct netlink_ext_ack *extack)
+{
+ struct tsnep_adapter *adapter = netdev_priv(netdev);
+ int i;
+ int retval;
+
+ for (i = 0; i < adapter->num_queues; i++) {
+ /* RX coalesce has priority for queues with TX and RX */
+ if (adapter->queue[i].rx)
+ retval = tsnep_set_irq_coalesce(&adapter->queue[i],
+ ec->rx_coalesce_usecs);
+ else
+ retval = tsnep_set_irq_coalesce(&adapter->queue[i],
+ ec->tx_coalesce_usecs);
+ if (retval != 0)
+ return retval;
+ }
+
+ return 0;
+}
+
+static int tsnep_ethtool_get_per_queue_coalesce(struct net_device *netdev,
+ u32 queue,
+ struct ethtool_coalesce *ec)
+{
+ struct tsnep_adapter *adapter = netdev_priv(netdev);
+ struct tsnep_queue *queue_with_rx;
+ struct tsnep_queue *queue_with_tx;
+
+ if (queue >= max(adapter->num_tx_queues, adapter->num_rx_queues))
+ return -EINVAL;
+
+ queue_with_rx = tsnep_get_queue_with_rx(adapter, queue);
+ if (queue_with_rx)
+ ec->rx_coalesce_usecs = tsnep_get_irq_coalesce(queue_with_rx);
+
+ queue_with_tx = tsnep_get_queue_with_tx(adapter, queue);
+ if (queue_with_tx)
+ ec->tx_coalesce_usecs = tsnep_get_irq_coalesce(queue_with_tx);
+
+ return 0;
+}
+
+static int tsnep_ethtool_set_per_queue_coalesce(struct net_device *netdev,
+ u32 queue,
+ struct ethtool_coalesce *ec)
+{
+ struct tsnep_adapter *adapter = netdev_priv(netdev);
+ struct tsnep_queue *queue_with_rx;
+ struct tsnep_queue *queue_with_tx;
+ int retval;
+
+ if (queue >= max(adapter->num_tx_queues, adapter->num_rx_queues))
+ return -EINVAL;
+
+ queue_with_rx = tsnep_get_queue_with_rx(adapter, queue);
+ if (queue_with_rx) {
+ retval = tsnep_set_irq_coalesce(queue_with_rx, ec->rx_coalesce_usecs);
+ if (retval != 0)
+ return retval;
+ }
+
+ /* RX coalesce has priority for queues with TX and RX */
+ queue_with_tx = tsnep_get_queue_with_tx(adapter, queue);
+ if (queue_with_tx && !queue_with_tx->rx) {
+ retval = tsnep_set_irq_coalesce(queue_with_tx, ec->tx_coalesce_usecs);
+ if (retval != 0)
+ return retval;
+ }
+
+ return 0;
+}
+
+const struct ethtool_ops tsnep_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS,
+ .get_drvinfo = tsnep_ethtool_get_drvinfo,
+ .get_regs_len = tsnep_ethtool_get_regs_len,
+ .get_regs = tsnep_ethtool_get_regs,
+ .get_msglevel = tsnep_ethtool_get_msglevel,
+ .set_msglevel = tsnep_ethtool_set_msglevel,
+ .nway_reset = phy_ethtool_nway_reset,
+ .get_link = ethtool_op_get_link,
+ .self_test = tsnep_ethtool_self_test,
+ .get_strings = tsnep_ethtool_get_strings,
+ .get_ethtool_stats = tsnep_ethtool_get_ethtool_stats,
+ .get_sset_count = tsnep_ethtool_get_sset_count,
+ .get_rxnfc = tsnep_ethtool_get_rxnfc,
+ .set_rxnfc = tsnep_ethtool_set_rxnfc,
+ .get_channels = tsnep_ethtool_get_channels,
+ .get_ts_info = tsnep_ethtool_get_ts_info,
+ .get_coalesce = tsnep_ethtool_get_coalesce,
+ .set_coalesce = tsnep_ethtool_set_coalesce,
+ .get_per_queue_coalesce = tsnep_ethtool_get_per_queue_coalesce,
+ .set_per_queue_coalesce = tsnep_ethtool_set_per_queue_coalesce,
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
+};
diff --git a/drivers/net/ethernet/engleder/tsnep_hw.h b/drivers/net/ethernet/engleder/tsnep_hw.h
new file mode 100644
index 0000000000..55e1caf193
--- /dev/null
+++ b/drivers/net/ethernet/engleder/tsnep_hw.h
@@ -0,0 +1,246 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2021 Gerhard Engleder <gerhard@engleder-embedded.com> */
+
+/* Hardware definition of TSNEP and EtherCAT MAC device */
+
+#ifndef _TSNEP_HW_H
+#define _TSNEP_HW_H
+
+#include <linux/types.h>
+
+/* type */
+#define ECM_TYPE 0x0000
+#define ECM_REVISION_MASK 0x000000FF
+#define ECM_REVISION_SHIFT 0
+#define ECM_VERSION_MASK 0x0000FF00
+#define ECM_VERSION_SHIFT 8
+#define ECM_QUEUE_COUNT_MASK 0x00070000
+#define ECM_QUEUE_COUNT_SHIFT 16
+#define ECM_GATE_CONTROL 0x02000000
+
+/* system time */
+#define ECM_SYSTEM_TIME_LOW 0x0008
+#define ECM_SYSTEM_TIME_HIGH 0x000C
+
+/* clock */
+#define ECM_CLOCK_RATE 0x0010
+#define ECM_CLOCK_RATE_OFFSET_MASK 0x7FFFFFFF
+#define ECM_CLOCK_RATE_OFFSET_SIGN 0x80000000
+
+/* interrupt */
+#define ECM_INT_ENABLE 0x0018
+#define ECM_INT_ACTIVE 0x001C
+#define ECM_INT_ACKNOWLEDGE 0x001C
+#define ECM_INT_LINK 0x00000020
+#define ECM_INT_TX_0 0x00000100
+#define ECM_INT_RX_0 0x00000200
+#define ECM_INT_TXRX_SHIFT 2
+#define ECM_INT_ALL 0x7FFFFFFF
+#define ECM_INT_DISABLE 0x80000000
+
+/* reset */
+#define ECM_RESET 0x0020
+#define ECM_RESET_COMMON 0x00000001
+#define ECM_RESET_CHANNEL 0x00000100
+#define ECM_RESET_TXRX 0x00010000
+
+/* counter */
+#define ECM_COUNTER_LOW 0x0028
+#define ECM_COUNTER_HIGH 0x002C
+
+/* interrupt delay */
+#define ECM_INT_DELAY 0x0030
+#define ECM_INT_DELAY_MASK 0xF0
+#define ECM_INT_DELAY_SHIFT 4
+#define ECM_INT_DELAY_BASE_US 16
+#define ECM_INT_DELAY_OFFSET 1
+
+/* control and status */
+#define ECM_STATUS 0x0080
+#define ECM_LINK_MODE_OFF 0x01000000
+#define ECM_LINK_MODE_100 0x02000000
+#define ECM_LINK_MODE_1000 0x04000000
+#define ECM_NO_LINK 0x01000000
+#define ECM_LINK_MODE_MASK 0x06000000
+
+/* management data */
+#define ECM_MD_CONTROL 0x0084
+#define ECM_MD_STATUS 0x0084
+#define ECM_MD_PREAMBLE 0x00000001
+#define ECM_MD_READ 0x00000004
+#define ECM_MD_WRITE 0x00000002
+#define ECM_MD_ADDR_MASK 0x000000F8
+#define ECM_MD_ADDR_SHIFT 3
+#define ECM_MD_PHY_ADDR_MASK 0x00001F00
+#define ECM_MD_PHY_ADDR_SHIFT 8
+#define ECM_MD_BUSY 0x00000001
+#define ECM_MD_DATA_MASK 0xFFFF0000
+#define ECM_MD_DATA_SHIFT 16
+
+/* statistic */
+#define ECM_STAT 0x00B0
+#define ECM_STAT_RX_ERR_MASK 0x000000FF
+#define ECM_STAT_RX_ERR_SHIFT 0
+#define ECM_STAT_INV_FRM_MASK 0x0000FF00
+#define ECM_STAT_INV_FRM_SHIFT 8
+#define ECM_STAT_FWD_RX_ERR_MASK 0x00FF0000
+#define ECM_STAT_FWD_RX_ERR_SHIFT 16
+
+/* tsnep */
+#define TSNEP_MAC_SIZE 0x4000
+#define TSNEP_QUEUE_SIZE 0x1000
+#define TSNEP_QUEUE(n) ({ typeof(n) __n = (n); \
+ (__n) == 0 ? \
+ 0 : \
+ TSNEP_MAC_SIZE + TSNEP_QUEUE_SIZE * ((__n) - 1); })
+#define TSNEP_MAX_QUEUES 8
+#define TSNEP_MAX_FRAME_SIZE (2 * 1024) /* hardware supports actually 16k */
+#define TSNEP_DESC_SIZE 256
+#define TSNEP_DESC_OFFSET 128
+
+/* tsnep register */
+#define TSNEP_INFO 0x0100
+#define TSNEP_INFO_TX_TIME 0x00010000
+#define TSNEP_CONTROL 0x0108
+#define TSNEP_CONTROL_TX_RESET 0x00000001
+#define TSNEP_CONTROL_TX_ENABLE 0x00000002
+#define TSNEP_CONTROL_TX_DMA_ERROR 0x00000010
+#define TSNEP_CONTROL_TX_DESC_ERROR 0x00000020
+#define TSNEP_CONTROL_RX_RESET 0x00000100
+#define TSNEP_CONTROL_RX_ENABLE 0x00000200
+#define TSNEP_CONTROL_RX_DISABLE 0x00000400
+#define TSNEP_CONTROL_RX_DMA_ERROR 0x00001000
+#define TSNEP_CONTROL_RX_DESC_ERROR 0x00002000
+#define TSNEP_TX_DESC_ADDR_LOW 0x0140
+#define TSNEP_TX_DESC_ADDR_HIGH 0x0144
+#define TSNEP_RX_DESC_ADDR_LOW 0x0180
+#define TSNEP_RX_DESC_ADDR_HIGH 0x0184
+#define TSNEP_RESET_OWNER_COUNTER 0x01
+#define TSNEP_RX_STATISTIC 0x0190
+#define TSNEP_RX_STATISTIC_NO_DESC_MASK 0x000000FF
+#define TSNEP_RX_STATISTIC_NO_DESC_SHIFT 0
+#define TSNEP_RX_STATISTIC_BUFFER_TOO_SMALL_MASK 0x0000FF00
+#define TSNEP_RX_STATISTIC_BUFFER_TOO_SMALL_SHIFT 8
+#define TSNEP_RX_STATISTIC_FIFO_OVERFLOW_MASK 0x00FF0000
+#define TSNEP_RX_STATISTIC_FIFO_OVERFLOW_SHIFT 16
+#define TSNEP_RX_STATISTIC_INVALID_FRAME_MASK 0xFF000000
+#define TSNEP_RX_STATISTIC_INVALID_FRAME_SHIFT 24
+#define TSNEP_RX_STATISTIC_NO_DESC 0x0190
+#define TSNEP_RX_STATISTIC_BUFFER_TOO_SMALL 0x0191
+#define TSNEP_RX_STATISTIC_FIFO_OVERFLOW 0x0192
+#define TSNEP_RX_STATISTIC_INVALID_FRAME 0x0193
+#define TSNEP_MAC_ADDRESS_LOW 0x0800
+#define TSNEP_MAC_ADDRESS_HIGH 0x0804
+#define TSNEP_RX_FILTER 0x0806
+#define TSNEP_RX_FILTER_ACCEPT_ALL_MULTICASTS 0x0001
+#define TSNEP_RX_FILTER_ACCEPT_ALL_UNICASTS 0x0002
+#define TSNEP_GC 0x0808
+#define TSNEP_GC_ENABLE_A 0x00000002
+#define TSNEP_GC_ENABLE_B 0x00000004
+#define TSNEP_GC_DISABLE 0x00000008
+#define TSNEP_GC_ENABLE_TIMEOUT 0x00000010
+#define TSNEP_GC_ACTIVE_A 0x00000002
+#define TSNEP_GC_ACTIVE_B 0x00000004
+#define TSNEP_GC_CHANGE_AB 0x00000008
+#define TSNEP_GC_TIMEOUT_ACTIVE 0x00000010
+#define TSNEP_GC_TIMEOUT_SIGNAL 0x00000020
+#define TSNEP_GC_LIST_ERROR 0x00000080
+#define TSNEP_GC_OPEN 0x00FF0000
+#define TSNEP_GC_OPEN_SHIFT 16
+#define TSNEP_GC_NEXT_OPEN 0xFF000000
+#define TSNEP_GC_NEXT_OPEN_SHIFT 24
+#define TSNEP_GC_TIMEOUT 131072
+#define TSNEP_GC_TIME 0x080C
+#define TSNEP_GC_CHANGE 0x0810
+#define TSNEP_GCL_A 0x2000
+#define TSNEP_GCL_B 0x2800
+#define TSNEP_GCL_SIZE SZ_2K
+#define TSNEP_RX_ASSIGN 0x0840
+#define TSNEP_RX_ASSIGN_ACTIVE 0x00000001
+#define TSNEP_RX_ASSIGN_QUEUE_MASK 0x00000006
+#define TSNEP_RX_ASSIGN_QUEUE_SHIFT 1
+#define TSNEP_RX_ASSIGN_OFFSET 1
+#define TSNEP_RX_ASSIGN_ETHER_TYPE 0x0880
+#define TSNEP_RX_ASSIGN_ETHER_TYPE_OFFSET 2
+#define TSNEP_RX_ASSIGN_ETHER_TYPE_COUNT 2
+
+/* tsnep gate control list operation */
+struct tsnep_gcl_operation {
+ u32 properties;
+ u32 interval;
+};
+
+#define TSNEP_GCL_COUNT (TSNEP_GCL_SIZE / sizeof(struct tsnep_gcl_operation))
+#define TSNEP_GCL_MASK 0x000000FF
+#define TSNEP_GCL_INSERT 0x20000000
+#define TSNEP_GCL_CHANGE 0x40000000
+#define TSNEP_GCL_LAST 0x80000000
+#define TSNEP_GCL_MIN_INTERVAL 32
+
+/* tsnep TX/RX descriptor */
+#define TSNEP_DESC_SIZE 256
+#define TSNEP_DESC_SIZE_DATA_AFTER 2048
+#define TSNEP_DESC_OFFSET 128
+#define TSNEP_DESC_OWNER_COUNTER_MASK 0xC0000000
+#define TSNEP_DESC_OWNER_COUNTER_SHIFT 30
+#define TSNEP_DESC_LENGTH_MASK 0x00003FFF
+#define TSNEP_DESC_INTERRUPT_FLAG 0x00040000
+#define TSNEP_DESC_EXTENDED_WRITEBACK_FLAG 0x00080000
+#define TSNEP_DESC_NO_LINK_FLAG 0x01000000
+
+/* tsnep TX descriptor */
+struct tsnep_tx_desc {
+ __le32 properties;
+ __le32 more_properties;
+ __le32 reserved[2];
+ __le64 next;
+ __le64 tx;
+};
+
+#define TSNEP_TX_DESC_OWNER_MASK 0xE0000000
+#define TSNEP_TX_DESC_OWNER_USER_FLAG 0x20000000
+#define TSNEP_TX_DESC_LAST_FRAGMENT_FLAG 0x00010000
+#define TSNEP_TX_DESC_DATA_AFTER_DESC_FLAG 0x00020000
+
+/* tsnep TX descriptor writeback */
+struct tsnep_tx_desc_wb {
+ __le32 properties;
+ __le32 reserved1;
+ __le64 counter;
+ __le64 timestamp;
+ __le32 dma_delay;
+ __le32 reserved2;
+};
+
+#define TSNEP_TX_DESC_UNDERRUN_ERROR_FLAG 0x00010000
+#define TSNEP_TX_DESC_DMA_DELAY_FIRST_DATA_MASK 0x0000FFFC
+#define TSNEP_TX_DESC_DMA_DELAY_FIRST_DATA_SHIFT 2
+#define TSNEP_TX_DESC_DMA_DELAY_LAST_DATA_MASK 0xFFFC0000
+#define TSNEP_TX_DESC_DMA_DELAY_LAST_DATA_SHIFT 18
+#define TSNEP_TX_DESC_DMA_DELAY_NS 64
+
+/* tsnep RX descriptor */
+struct tsnep_rx_desc {
+ __le32 properties;
+ __le32 reserved[3];
+ __le64 next;
+ __le64 rx;
+};
+
+#define TSNEP_RX_DESC_BUFFER_SIZE_MASK 0x00003FFC
+
+/* tsnep RX descriptor writeback */
+struct tsnep_rx_desc_wb {
+ __le32 properties;
+ __le32 reserved[7];
+};
+
+/* tsnep RX inline meta */
+struct tsnep_rx_inline {
+ __le64 counter;
+ __le64 timestamp;
+};
+
+#define TSNEP_RX_INLINE_METADATA_SIZE (sizeof(struct tsnep_rx_inline))
+
+#endif /* _TSNEP_HW_H */
diff --git a/drivers/net/ethernet/engleder/tsnep_main.c b/drivers/net/ethernet/engleder/tsnep_main.c
new file mode 100644
index 0000000000..08e113e785
--- /dev/null
+++ b/drivers/net/ethernet/engleder/tsnep_main.c
@@ -0,0 +1,2641 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2021 Gerhard Engleder <gerhard@engleder-embedded.com> */
+
+/* TSN endpoint Ethernet MAC driver
+ *
+ * The TSN endpoint Ethernet MAC is a FPGA based network device for real-time
+ * communication. It is designed for endpoints within TSN (Time Sensitive
+ * Networking) networks; e.g., for PLCs in the industrial automation case.
+ *
+ * It supports multiple TX/RX queue pairs. The first TX/RX queue pair is used
+ * by the driver.
+ *
+ * More information can be found here:
+ * - www.embedded-experts.at/tsn
+ * - www.engleder-embedded.com
+ */
+
+#include "tsnep.h"
+#include "tsnep_hw.h"
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_net.h>
+#include <linux/of_mdio.h>
+#include <linux/interrupt.h>
+#include <linux/etherdevice.h>
+#include <linux/phy.h>
+#include <linux/iopoll.h>
+#include <linux/bpf.h>
+#include <linux/bpf_trace.h>
+#include <net/page_pool/helpers.h>
+#include <net/xdp_sock_drv.h>
+
+#define TSNEP_RX_OFFSET (max(NET_SKB_PAD, XDP_PACKET_HEADROOM) + NET_IP_ALIGN)
+#define TSNEP_HEADROOM ALIGN(TSNEP_RX_OFFSET, 4)
+#define TSNEP_MAX_RX_BUF_SIZE (PAGE_SIZE - TSNEP_HEADROOM - \
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
+/* XSK buffer shall store at least Q-in-Q frame */
+#define TSNEP_XSK_RX_BUF_SIZE (ALIGN(TSNEP_RX_INLINE_METADATA_SIZE + \
+ ETH_FRAME_LEN + ETH_FCS_LEN + \
+ VLAN_HLEN * 2, 4))
+
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+#define DMA_ADDR_HIGH(dma_addr) ((u32)(((dma_addr) >> 32) & 0xFFFFFFFF))
+#else
+#define DMA_ADDR_HIGH(dma_addr) ((u32)(0))
+#endif
+#define DMA_ADDR_LOW(dma_addr) ((u32)((dma_addr) & 0xFFFFFFFF))
+
+#define TSNEP_COALESCE_USECS_DEFAULT 64
+#define TSNEP_COALESCE_USECS_MAX ((ECM_INT_DELAY_MASK >> ECM_INT_DELAY_SHIFT) * \
+ ECM_INT_DELAY_BASE_US + ECM_INT_DELAY_BASE_US - 1)
+
+#define TSNEP_TX_TYPE_SKB BIT(0)
+#define TSNEP_TX_TYPE_SKB_FRAG BIT(1)
+#define TSNEP_TX_TYPE_XDP_TX BIT(2)
+#define TSNEP_TX_TYPE_XDP_NDO BIT(3)
+#define TSNEP_TX_TYPE_XDP (TSNEP_TX_TYPE_XDP_TX | TSNEP_TX_TYPE_XDP_NDO)
+#define TSNEP_TX_TYPE_XSK BIT(4)
+
+#define TSNEP_XDP_TX BIT(0)
+#define TSNEP_XDP_REDIRECT BIT(1)
+
+static void tsnep_enable_irq(struct tsnep_adapter *adapter, u32 mask)
+{
+ iowrite32(mask, adapter->addr + ECM_INT_ENABLE);
+}
+
+static void tsnep_disable_irq(struct tsnep_adapter *adapter, u32 mask)
+{
+ mask |= ECM_INT_DISABLE;
+ iowrite32(mask, adapter->addr + ECM_INT_ENABLE);
+}
+
+static irqreturn_t tsnep_irq(int irq, void *arg)
+{
+ struct tsnep_adapter *adapter = arg;
+ u32 active = ioread32(adapter->addr + ECM_INT_ACTIVE);
+
+ /* acknowledge interrupt */
+ if (active != 0)
+ iowrite32(active, adapter->addr + ECM_INT_ACKNOWLEDGE);
+
+ /* handle link interrupt */
+ if ((active & ECM_INT_LINK) != 0)
+ phy_mac_interrupt(adapter->netdev->phydev);
+
+ /* handle TX/RX queue 0 interrupt */
+ if ((active & adapter->queue[0].irq_mask) != 0) {
+ if (napi_schedule_prep(&adapter->queue[0].napi)) {
+ tsnep_disable_irq(adapter, adapter->queue[0].irq_mask);
+ /* schedule after masking to avoid races */
+ __napi_schedule(&adapter->queue[0].napi);
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t tsnep_irq_txrx(int irq, void *arg)
+{
+ struct tsnep_queue *queue = arg;
+
+ /* handle TX/RX queue interrupt */
+ if (napi_schedule_prep(&queue->napi)) {
+ tsnep_disable_irq(queue->adapter, queue->irq_mask);
+ /* schedule after masking to avoid races */
+ __napi_schedule(&queue->napi);
+ }
+
+ return IRQ_HANDLED;
+}
+
+int tsnep_set_irq_coalesce(struct tsnep_queue *queue, u32 usecs)
+{
+ if (usecs > TSNEP_COALESCE_USECS_MAX)
+ return -ERANGE;
+
+ usecs /= ECM_INT_DELAY_BASE_US;
+ usecs <<= ECM_INT_DELAY_SHIFT;
+ usecs &= ECM_INT_DELAY_MASK;
+
+ queue->irq_delay &= ~ECM_INT_DELAY_MASK;
+ queue->irq_delay |= usecs;
+ iowrite8(queue->irq_delay, queue->irq_delay_addr);
+
+ return 0;
+}
+
+u32 tsnep_get_irq_coalesce(struct tsnep_queue *queue)
+{
+ u32 usecs;
+
+ usecs = (queue->irq_delay & ECM_INT_DELAY_MASK);
+ usecs >>= ECM_INT_DELAY_SHIFT;
+ usecs *= ECM_INT_DELAY_BASE_US;
+
+ return usecs;
+}
+
+static int tsnep_mdiobus_read(struct mii_bus *bus, int addr, int regnum)
+{
+ struct tsnep_adapter *adapter = bus->priv;
+ u32 md;
+ int retval;
+
+ md = ECM_MD_READ;
+ if (!adapter->suppress_preamble)
+ md |= ECM_MD_PREAMBLE;
+ md |= (regnum << ECM_MD_ADDR_SHIFT) & ECM_MD_ADDR_MASK;
+ md |= (addr << ECM_MD_PHY_ADDR_SHIFT) & ECM_MD_PHY_ADDR_MASK;
+ iowrite32(md, adapter->addr + ECM_MD_CONTROL);
+ retval = readl_poll_timeout_atomic(adapter->addr + ECM_MD_STATUS, md,
+ !(md & ECM_MD_BUSY), 16, 1000);
+ if (retval != 0)
+ return retval;
+
+ return (md & ECM_MD_DATA_MASK) >> ECM_MD_DATA_SHIFT;
+}
+
+static int tsnep_mdiobus_write(struct mii_bus *bus, int addr, int regnum,
+ u16 val)
+{
+ struct tsnep_adapter *adapter = bus->priv;
+ u32 md;
+ int retval;
+
+ md = ECM_MD_WRITE;
+ if (!adapter->suppress_preamble)
+ md |= ECM_MD_PREAMBLE;
+ md |= (regnum << ECM_MD_ADDR_SHIFT) & ECM_MD_ADDR_MASK;
+ md |= (addr << ECM_MD_PHY_ADDR_SHIFT) & ECM_MD_PHY_ADDR_MASK;
+ md |= ((u32)val << ECM_MD_DATA_SHIFT) & ECM_MD_DATA_MASK;
+ iowrite32(md, adapter->addr + ECM_MD_CONTROL);
+ retval = readl_poll_timeout_atomic(adapter->addr + ECM_MD_STATUS, md,
+ !(md & ECM_MD_BUSY), 16, 1000);
+ if (retval != 0)
+ return retval;
+
+ return 0;
+}
+
+static void tsnep_set_link_mode(struct tsnep_adapter *adapter)
+{
+ u32 mode;
+
+ switch (adapter->phydev->speed) {
+ case SPEED_100:
+ mode = ECM_LINK_MODE_100;
+ break;
+ case SPEED_1000:
+ mode = ECM_LINK_MODE_1000;
+ break;
+ default:
+ mode = ECM_LINK_MODE_OFF;
+ break;
+ }
+ iowrite32(mode, adapter->addr + ECM_STATUS);
+}
+
+static void tsnep_phy_link_status_change(struct net_device *netdev)
+{
+ struct tsnep_adapter *adapter = netdev_priv(netdev);
+ struct phy_device *phydev = netdev->phydev;
+
+ if (phydev->link)
+ tsnep_set_link_mode(adapter);
+
+ phy_print_status(netdev->phydev);
+}
+
+static int tsnep_phy_loopback(struct tsnep_adapter *adapter, bool enable)
+{
+ int retval;
+
+ retval = phy_loopback(adapter->phydev, enable);
+
+ /* PHY link state change is not signaled if loopback is enabled, it
+ * would delay a working loopback anyway, let's ensure that loopback
+ * is working immediately by setting link mode directly
+ */
+ if (!retval && enable)
+ tsnep_set_link_mode(adapter);
+
+ return retval;
+}
+
+static int tsnep_phy_open(struct tsnep_adapter *adapter)
+{
+ struct phy_device *phydev;
+ struct ethtool_eee ethtool_eee;
+ int retval;
+
+ retval = phy_connect_direct(adapter->netdev, adapter->phydev,
+ tsnep_phy_link_status_change,
+ adapter->phy_mode);
+ if (retval)
+ return retval;
+ phydev = adapter->netdev->phydev;
+
+ /* MAC supports only 100Mbps|1000Mbps full duplex
+ * SPE (Single Pair Ethernet) is also an option but not implemented yet
+ */
+ phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Half_BIT);
+ phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Full_BIT);
+ phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Half_BIT);
+ phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
+
+ /* disable EEE autoneg, EEE not supported by TSNEP */
+ memset(&ethtool_eee, 0, sizeof(ethtool_eee));
+ phy_ethtool_set_eee(adapter->phydev, &ethtool_eee);
+
+ adapter->phydev->irq = PHY_MAC_INTERRUPT;
+ phy_start(adapter->phydev);
+
+ return 0;
+}
+
+static void tsnep_phy_close(struct tsnep_adapter *adapter)
+{
+ phy_stop(adapter->netdev->phydev);
+ phy_disconnect(adapter->netdev->phydev);
+}
+
+static void tsnep_tx_ring_cleanup(struct tsnep_tx *tx)
+{
+ struct device *dmadev = tx->adapter->dmadev;
+ int i;
+
+ memset(tx->entry, 0, sizeof(tx->entry));
+
+ for (i = 0; i < TSNEP_RING_PAGE_COUNT; i++) {
+ if (tx->page[i]) {
+ dma_free_coherent(dmadev, PAGE_SIZE, tx->page[i],
+ tx->page_dma[i]);
+ tx->page[i] = NULL;
+ tx->page_dma[i] = 0;
+ }
+ }
+}
+
+static int tsnep_tx_ring_create(struct tsnep_tx *tx)
+{
+ struct device *dmadev = tx->adapter->dmadev;
+ struct tsnep_tx_entry *entry;
+ struct tsnep_tx_entry *next_entry;
+ int i, j;
+ int retval;
+
+ for (i = 0; i < TSNEP_RING_PAGE_COUNT; i++) {
+ tx->page[i] =
+ dma_alloc_coherent(dmadev, PAGE_SIZE, &tx->page_dma[i],
+ GFP_KERNEL);
+ if (!tx->page[i]) {
+ retval = -ENOMEM;
+ goto alloc_failed;
+ }
+ for (j = 0; j < TSNEP_RING_ENTRIES_PER_PAGE; j++) {
+ entry = &tx->entry[TSNEP_RING_ENTRIES_PER_PAGE * i + j];
+ entry->desc_wb = (struct tsnep_tx_desc_wb *)
+ (((u8 *)tx->page[i]) + TSNEP_DESC_SIZE * j);
+ entry->desc = (struct tsnep_tx_desc *)
+ (((u8 *)entry->desc_wb) + TSNEP_DESC_OFFSET);
+ entry->desc_dma = tx->page_dma[i] + TSNEP_DESC_SIZE * j;
+ entry->owner_user_flag = false;
+ }
+ }
+ for (i = 0; i < TSNEP_RING_SIZE; i++) {
+ entry = &tx->entry[i];
+ next_entry = &tx->entry[(i + 1) & TSNEP_RING_MASK];
+ entry->desc->next = __cpu_to_le64(next_entry->desc_dma);
+ }
+
+ return 0;
+
+alloc_failed:
+ tsnep_tx_ring_cleanup(tx);
+ return retval;
+}
+
+static void tsnep_tx_init(struct tsnep_tx *tx)
+{
+ dma_addr_t dma;
+
+ dma = tx->entry[0].desc_dma | TSNEP_RESET_OWNER_COUNTER;
+ iowrite32(DMA_ADDR_LOW(dma), tx->addr + TSNEP_TX_DESC_ADDR_LOW);
+ iowrite32(DMA_ADDR_HIGH(dma), tx->addr + TSNEP_TX_DESC_ADDR_HIGH);
+ tx->write = 0;
+ tx->read = 0;
+ tx->owner_counter = 1;
+ tx->increment_owner_counter = TSNEP_RING_SIZE - 1;
+}
+
+static void tsnep_tx_enable(struct tsnep_tx *tx)
+{
+ struct netdev_queue *nq;
+
+ nq = netdev_get_tx_queue(tx->adapter->netdev, tx->queue_index);
+
+ __netif_tx_lock_bh(nq);
+ netif_tx_wake_queue(nq);
+ __netif_tx_unlock_bh(nq);
+}
+
+static void tsnep_tx_disable(struct tsnep_tx *tx, struct napi_struct *napi)
+{
+ struct netdev_queue *nq;
+ u32 val;
+
+ nq = netdev_get_tx_queue(tx->adapter->netdev, tx->queue_index);
+
+ __netif_tx_lock_bh(nq);
+ netif_tx_stop_queue(nq);
+ __netif_tx_unlock_bh(nq);
+
+ /* wait until TX is done in hardware */
+ readx_poll_timeout(ioread32, tx->addr + TSNEP_CONTROL, val,
+ ((val & TSNEP_CONTROL_TX_ENABLE) == 0), 10000,
+ 1000000);
+
+ /* wait until TX is also done in software */
+ while (READ_ONCE(tx->read) != tx->write) {
+ napi_schedule(napi);
+ napi_synchronize(napi);
+ }
+}
+
+static void tsnep_tx_activate(struct tsnep_tx *tx, int index, int length,
+ bool last)
+{
+ struct tsnep_tx_entry *entry = &tx->entry[index];
+
+ entry->properties = 0;
+ /* xdpf and zc are union with skb */
+ if (entry->skb) {
+ entry->properties = length & TSNEP_DESC_LENGTH_MASK;
+ entry->properties |= TSNEP_DESC_INTERRUPT_FLAG;
+ if ((entry->type & TSNEP_TX_TYPE_SKB) &&
+ (skb_shinfo(entry->skb)->tx_flags & SKBTX_IN_PROGRESS))
+ entry->properties |= TSNEP_DESC_EXTENDED_WRITEBACK_FLAG;
+
+ /* toggle user flag to prevent false acknowledge
+ *
+ * Only the first fragment is acknowledged. For all other
+ * fragments no acknowledge is done and the last written owner
+ * counter stays in the writeback descriptor. Therefore, it is
+ * possible that the last written owner counter is identical to
+ * the new incremented owner counter and a false acknowledge is
+ * detected before the real acknowledge has been done by
+ * hardware.
+ *
+ * The user flag is used to prevent this situation. The user
+ * flag is copied to the writeback descriptor by the hardware
+ * and is used as additional acknowledge data. By toggeling the
+ * user flag only for the first fragment (which is
+ * acknowledged), it is guaranteed that the last acknowledge
+ * done for this descriptor has used a different user flag and
+ * cannot be detected as false acknowledge.
+ */
+ entry->owner_user_flag = !entry->owner_user_flag;
+ }
+ if (last)
+ entry->properties |= TSNEP_TX_DESC_LAST_FRAGMENT_FLAG;
+ if (index == tx->increment_owner_counter) {
+ tx->owner_counter++;
+ if (tx->owner_counter == 4)
+ tx->owner_counter = 1;
+ tx->increment_owner_counter--;
+ if (tx->increment_owner_counter < 0)
+ tx->increment_owner_counter = TSNEP_RING_SIZE - 1;
+ }
+ entry->properties |=
+ (tx->owner_counter << TSNEP_DESC_OWNER_COUNTER_SHIFT) &
+ TSNEP_DESC_OWNER_COUNTER_MASK;
+ if (entry->owner_user_flag)
+ entry->properties |= TSNEP_TX_DESC_OWNER_USER_FLAG;
+ entry->desc->more_properties =
+ __cpu_to_le32(entry->len & TSNEP_DESC_LENGTH_MASK);
+
+ /* descriptor properties shall be written last, because valid data is
+ * signaled there
+ */
+ dma_wmb();
+
+ entry->desc->properties = __cpu_to_le32(entry->properties);
+}
+
+static int tsnep_tx_desc_available(struct tsnep_tx *tx)
+{
+ if (tx->read <= tx->write)
+ return TSNEP_RING_SIZE - tx->write + tx->read - 1;
+ else
+ return tx->read - tx->write - 1;
+}
+
+static int tsnep_tx_map(struct sk_buff *skb, struct tsnep_tx *tx, int count)
+{
+ struct device *dmadev = tx->adapter->dmadev;
+ struct tsnep_tx_entry *entry;
+ unsigned int len;
+ dma_addr_t dma;
+ int map_len = 0;
+ int i;
+
+ for (i = 0; i < count; i++) {
+ entry = &tx->entry[(tx->write + i) & TSNEP_RING_MASK];
+
+ if (!i) {
+ len = skb_headlen(skb);
+ dma = dma_map_single(dmadev, skb->data, len,
+ DMA_TO_DEVICE);
+
+ entry->type = TSNEP_TX_TYPE_SKB;
+ } else {
+ len = skb_frag_size(&skb_shinfo(skb)->frags[i - 1]);
+ dma = skb_frag_dma_map(dmadev,
+ &skb_shinfo(skb)->frags[i - 1],
+ 0, len, DMA_TO_DEVICE);
+
+ entry->type = TSNEP_TX_TYPE_SKB_FRAG;
+ }
+ if (dma_mapping_error(dmadev, dma))
+ return -ENOMEM;
+
+ entry->len = len;
+ dma_unmap_addr_set(entry, dma, dma);
+
+ entry->desc->tx = __cpu_to_le64(dma);
+
+ map_len += len;
+ }
+
+ return map_len;
+}
+
+static int tsnep_tx_unmap(struct tsnep_tx *tx, int index, int count)
+{
+ struct device *dmadev = tx->adapter->dmadev;
+ struct tsnep_tx_entry *entry;
+ int map_len = 0;
+ int i;
+
+ for (i = 0; i < count; i++) {
+ entry = &tx->entry[(index + i) & TSNEP_RING_MASK];
+
+ if (entry->len) {
+ if (entry->type & TSNEP_TX_TYPE_SKB)
+ dma_unmap_single(dmadev,
+ dma_unmap_addr(entry, dma),
+ dma_unmap_len(entry, len),
+ DMA_TO_DEVICE);
+ else if (entry->type &
+ (TSNEP_TX_TYPE_SKB_FRAG | TSNEP_TX_TYPE_XDP_NDO))
+ dma_unmap_page(dmadev,
+ dma_unmap_addr(entry, dma),
+ dma_unmap_len(entry, len),
+ DMA_TO_DEVICE);
+ map_len += entry->len;
+ entry->len = 0;
+ }
+ }
+
+ return map_len;
+}
+
+static netdev_tx_t tsnep_xmit_frame_ring(struct sk_buff *skb,
+ struct tsnep_tx *tx)
+{
+ int count = 1;
+ struct tsnep_tx_entry *entry;
+ int length;
+ int i;
+ int retval;
+
+ if (skb_shinfo(skb)->nr_frags > 0)
+ count += skb_shinfo(skb)->nr_frags;
+
+ if (tsnep_tx_desc_available(tx) < count) {
+ /* ring full, shall not happen because queue is stopped if full
+ * below
+ */
+ netif_stop_subqueue(tx->adapter->netdev, tx->queue_index);
+
+ return NETDEV_TX_BUSY;
+ }
+
+ entry = &tx->entry[tx->write];
+ entry->skb = skb;
+
+ retval = tsnep_tx_map(skb, tx, count);
+ if (retval < 0) {
+ tsnep_tx_unmap(tx, tx->write, count);
+ dev_kfree_skb_any(entry->skb);
+ entry->skb = NULL;
+
+ tx->dropped++;
+
+ return NETDEV_TX_OK;
+ }
+ length = retval;
+
+ if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+
+ for (i = 0; i < count; i++)
+ tsnep_tx_activate(tx, (tx->write + i) & TSNEP_RING_MASK, length,
+ i == count - 1);
+ tx->write = (tx->write + count) & TSNEP_RING_MASK;
+
+ skb_tx_timestamp(skb);
+
+ /* descriptor properties shall be valid before hardware is notified */
+ dma_wmb();
+
+ iowrite32(TSNEP_CONTROL_TX_ENABLE, tx->addr + TSNEP_CONTROL);
+
+ if (tsnep_tx_desc_available(tx) < (MAX_SKB_FRAGS + 1)) {
+ /* ring can get full with next frame */
+ netif_stop_subqueue(tx->adapter->netdev, tx->queue_index);
+ }
+
+ return NETDEV_TX_OK;
+}
+
+static int tsnep_xdp_tx_map(struct xdp_frame *xdpf, struct tsnep_tx *tx,
+ struct skb_shared_info *shinfo, int count, u32 type)
+{
+ struct device *dmadev = tx->adapter->dmadev;
+ struct tsnep_tx_entry *entry;
+ struct page *page;
+ skb_frag_t *frag;
+ unsigned int len;
+ int map_len = 0;
+ dma_addr_t dma;
+ void *data;
+ int i;
+
+ frag = NULL;
+ len = xdpf->len;
+ for (i = 0; i < count; i++) {
+ entry = &tx->entry[(tx->write + i) & TSNEP_RING_MASK];
+ if (type & TSNEP_TX_TYPE_XDP_NDO) {
+ data = unlikely(frag) ? skb_frag_address(frag) :
+ xdpf->data;
+ dma = dma_map_single(dmadev, data, len, DMA_TO_DEVICE);
+ if (dma_mapping_error(dmadev, dma))
+ return -ENOMEM;
+
+ entry->type = TSNEP_TX_TYPE_XDP_NDO;
+ } else {
+ page = unlikely(frag) ? skb_frag_page(frag) :
+ virt_to_page(xdpf->data);
+ dma = page_pool_get_dma_addr(page);
+ if (unlikely(frag))
+ dma += skb_frag_off(frag);
+ else
+ dma += sizeof(*xdpf) + xdpf->headroom;
+ dma_sync_single_for_device(dmadev, dma, len,
+ DMA_BIDIRECTIONAL);
+
+ entry->type = TSNEP_TX_TYPE_XDP_TX;
+ }
+
+ entry->len = len;
+ dma_unmap_addr_set(entry, dma, dma);
+
+ entry->desc->tx = __cpu_to_le64(dma);
+
+ map_len += len;
+
+ if (i + 1 < count) {
+ frag = &shinfo->frags[i];
+ len = skb_frag_size(frag);
+ }
+ }
+
+ return map_len;
+}
+
+/* This function requires __netif_tx_lock is held by the caller. */
+static bool tsnep_xdp_xmit_frame_ring(struct xdp_frame *xdpf,
+ struct tsnep_tx *tx, u32 type)
+{
+ struct skb_shared_info *shinfo = xdp_get_shared_info_from_frame(xdpf);
+ struct tsnep_tx_entry *entry;
+ int count, length, retval, i;
+
+ count = 1;
+ if (unlikely(xdp_frame_has_frags(xdpf)))
+ count += shinfo->nr_frags;
+
+ /* ensure that TX ring is not filled up by XDP, always MAX_SKB_FRAGS
+ * will be available for normal TX path and queue is stopped there if
+ * necessary
+ */
+ if (tsnep_tx_desc_available(tx) < (MAX_SKB_FRAGS + 1 + count))
+ return false;
+
+ entry = &tx->entry[tx->write];
+ entry->xdpf = xdpf;
+
+ retval = tsnep_xdp_tx_map(xdpf, tx, shinfo, count, type);
+ if (retval < 0) {
+ tsnep_tx_unmap(tx, tx->write, count);
+ entry->xdpf = NULL;
+
+ tx->dropped++;
+
+ return false;
+ }
+ length = retval;
+
+ for (i = 0; i < count; i++)
+ tsnep_tx_activate(tx, (tx->write + i) & TSNEP_RING_MASK, length,
+ i == count - 1);
+ tx->write = (tx->write + count) & TSNEP_RING_MASK;
+
+ /* descriptor properties shall be valid before hardware is notified */
+ dma_wmb();
+
+ return true;
+}
+
+static void tsnep_xdp_xmit_flush(struct tsnep_tx *tx)
+{
+ iowrite32(TSNEP_CONTROL_TX_ENABLE, tx->addr + TSNEP_CONTROL);
+}
+
+static bool tsnep_xdp_xmit_back(struct tsnep_adapter *adapter,
+ struct xdp_buff *xdp,
+ struct netdev_queue *tx_nq, struct tsnep_tx *tx)
+{
+ struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
+ bool xmit;
+
+ if (unlikely(!xdpf))
+ return false;
+
+ __netif_tx_lock(tx_nq, smp_processor_id());
+
+ xmit = tsnep_xdp_xmit_frame_ring(xdpf, tx, TSNEP_TX_TYPE_XDP_TX);
+
+ /* Avoid transmit queue timeout since we share it with the slow path */
+ if (xmit)
+ txq_trans_cond_update(tx_nq);
+
+ __netif_tx_unlock(tx_nq);
+
+ return xmit;
+}
+
+static int tsnep_xdp_tx_map_zc(struct xdp_desc *xdpd, struct tsnep_tx *tx)
+{
+ struct tsnep_tx_entry *entry;
+ dma_addr_t dma;
+
+ entry = &tx->entry[tx->write];
+ entry->zc = true;
+
+ dma = xsk_buff_raw_get_dma(tx->xsk_pool, xdpd->addr);
+ xsk_buff_raw_dma_sync_for_device(tx->xsk_pool, dma, xdpd->len);
+
+ entry->type = TSNEP_TX_TYPE_XSK;
+ entry->len = xdpd->len;
+
+ entry->desc->tx = __cpu_to_le64(dma);
+
+ return xdpd->len;
+}
+
+static void tsnep_xdp_xmit_frame_ring_zc(struct xdp_desc *xdpd,
+ struct tsnep_tx *tx)
+{
+ int length;
+
+ length = tsnep_xdp_tx_map_zc(xdpd, tx);
+
+ tsnep_tx_activate(tx, tx->write, length, true);
+ tx->write = (tx->write + 1) & TSNEP_RING_MASK;
+}
+
+static void tsnep_xdp_xmit_zc(struct tsnep_tx *tx)
+{
+ int desc_available = tsnep_tx_desc_available(tx);
+ struct xdp_desc *descs = tx->xsk_pool->tx_descs;
+ int batch, i;
+
+ /* ensure that TX ring is not filled up by XDP, always MAX_SKB_FRAGS
+ * will be available for normal TX path and queue is stopped there if
+ * necessary
+ */
+ if (desc_available <= (MAX_SKB_FRAGS + 1))
+ return;
+ desc_available -= MAX_SKB_FRAGS + 1;
+
+ batch = xsk_tx_peek_release_desc_batch(tx->xsk_pool, desc_available);
+ for (i = 0; i < batch; i++)
+ tsnep_xdp_xmit_frame_ring_zc(&descs[i], tx);
+
+ if (batch) {
+ /* descriptor properties shall be valid before hardware is
+ * notified
+ */
+ dma_wmb();
+
+ tsnep_xdp_xmit_flush(tx);
+ }
+}
+
+static bool tsnep_tx_poll(struct tsnep_tx *tx, int napi_budget)
+{
+ struct tsnep_tx_entry *entry;
+ struct netdev_queue *nq;
+ int xsk_frames = 0;
+ int budget = 128;
+ int length;
+ int count;
+
+ nq = netdev_get_tx_queue(tx->adapter->netdev, tx->queue_index);
+ __netif_tx_lock(nq, smp_processor_id());
+
+ do {
+ if (tx->read == tx->write)
+ break;
+
+ entry = &tx->entry[tx->read];
+ if ((__le32_to_cpu(entry->desc_wb->properties) &
+ TSNEP_TX_DESC_OWNER_MASK) !=
+ (entry->properties & TSNEP_TX_DESC_OWNER_MASK))
+ break;
+
+ /* descriptor properties shall be read first, because valid data
+ * is signaled there
+ */
+ dma_rmb();
+
+ count = 1;
+ if ((entry->type & TSNEP_TX_TYPE_SKB) &&
+ skb_shinfo(entry->skb)->nr_frags > 0)
+ count += skb_shinfo(entry->skb)->nr_frags;
+ else if ((entry->type & TSNEP_TX_TYPE_XDP) &&
+ xdp_frame_has_frags(entry->xdpf))
+ count += xdp_get_shared_info_from_frame(entry->xdpf)->nr_frags;
+
+ length = tsnep_tx_unmap(tx, tx->read, count);
+
+ if ((entry->type & TSNEP_TX_TYPE_SKB) &&
+ (skb_shinfo(entry->skb)->tx_flags & SKBTX_IN_PROGRESS) &&
+ (__le32_to_cpu(entry->desc_wb->properties) &
+ TSNEP_DESC_EXTENDED_WRITEBACK_FLAG)) {
+ struct skb_shared_hwtstamps hwtstamps;
+ u64 timestamp;
+
+ if (skb_shinfo(entry->skb)->tx_flags &
+ SKBTX_HW_TSTAMP_USE_CYCLES)
+ timestamp =
+ __le64_to_cpu(entry->desc_wb->counter);
+ else
+ timestamp =
+ __le64_to_cpu(entry->desc_wb->timestamp);
+
+ memset(&hwtstamps, 0, sizeof(hwtstamps));
+ hwtstamps.hwtstamp = ns_to_ktime(timestamp);
+
+ skb_tstamp_tx(entry->skb, &hwtstamps);
+ }
+
+ if (entry->type & TSNEP_TX_TYPE_SKB)
+ napi_consume_skb(entry->skb, napi_budget);
+ else if (entry->type & TSNEP_TX_TYPE_XDP)
+ xdp_return_frame_rx_napi(entry->xdpf);
+ else
+ xsk_frames++;
+ /* xdpf and zc are union with skb */
+ entry->skb = NULL;
+
+ tx->read = (tx->read + count) & TSNEP_RING_MASK;
+
+ tx->packets++;
+ tx->bytes += length + ETH_FCS_LEN;
+
+ budget--;
+ } while (likely(budget));
+
+ if (tx->xsk_pool) {
+ if (xsk_frames)
+ xsk_tx_completed(tx->xsk_pool, xsk_frames);
+ if (xsk_uses_need_wakeup(tx->xsk_pool))
+ xsk_set_tx_need_wakeup(tx->xsk_pool);
+ tsnep_xdp_xmit_zc(tx);
+ }
+
+ if ((tsnep_tx_desc_available(tx) >= ((MAX_SKB_FRAGS + 1) * 2)) &&
+ netif_tx_queue_stopped(nq)) {
+ netif_tx_wake_queue(nq);
+ }
+
+ __netif_tx_unlock(nq);
+
+ return budget != 0;
+}
+
+static bool tsnep_tx_pending(struct tsnep_tx *tx)
+{
+ struct tsnep_tx_entry *entry;
+ struct netdev_queue *nq;
+ bool pending = false;
+
+ nq = netdev_get_tx_queue(tx->adapter->netdev, tx->queue_index);
+ __netif_tx_lock(nq, smp_processor_id());
+
+ if (tx->read != tx->write) {
+ entry = &tx->entry[tx->read];
+ if ((__le32_to_cpu(entry->desc_wb->properties) &
+ TSNEP_TX_DESC_OWNER_MASK) ==
+ (entry->properties & TSNEP_TX_DESC_OWNER_MASK))
+ pending = true;
+ }
+
+ __netif_tx_unlock(nq);
+
+ return pending;
+}
+
+static int tsnep_tx_open(struct tsnep_tx *tx)
+{
+ int retval;
+
+ retval = tsnep_tx_ring_create(tx);
+ if (retval)
+ return retval;
+
+ tsnep_tx_init(tx);
+
+ return 0;
+}
+
+static void tsnep_tx_close(struct tsnep_tx *tx)
+{
+ tsnep_tx_ring_cleanup(tx);
+}
+
+static void tsnep_rx_ring_cleanup(struct tsnep_rx *rx)
+{
+ struct device *dmadev = rx->adapter->dmadev;
+ struct tsnep_rx_entry *entry;
+ int i;
+
+ for (i = 0; i < TSNEP_RING_SIZE; i++) {
+ entry = &rx->entry[i];
+ if (!rx->xsk_pool && entry->page)
+ page_pool_put_full_page(rx->page_pool, entry->page,
+ false);
+ if (rx->xsk_pool && entry->xdp)
+ xsk_buff_free(entry->xdp);
+ /* xdp is union with page */
+ entry->page = NULL;
+ }
+
+ if (rx->page_pool)
+ page_pool_destroy(rx->page_pool);
+
+ memset(rx->entry, 0, sizeof(rx->entry));
+
+ for (i = 0; i < TSNEP_RING_PAGE_COUNT; i++) {
+ if (rx->page[i]) {
+ dma_free_coherent(dmadev, PAGE_SIZE, rx->page[i],
+ rx->page_dma[i]);
+ rx->page[i] = NULL;
+ rx->page_dma[i] = 0;
+ }
+ }
+}
+
+static int tsnep_rx_ring_create(struct tsnep_rx *rx)
+{
+ struct device *dmadev = rx->adapter->dmadev;
+ struct tsnep_rx_entry *entry;
+ struct page_pool_params pp_params = { 0 };
+ struct tsnep_rx_entry *next_entry;
+ int i, j;
+ int retval;
+
+ for (i = 0; i < TSNEP_RING_PAGE_COUNT; i++) {
+ rx->page[i] =
+ dma_alloc_coherent(dmadev, PAGE_SIZE, &rx->page_dma[i],
+ GFP_KERNEL);
+ if (!rx->page[i]) {
+ retval = -ENOMEM;
+ goto failed;
+ }
+ for (j = 0; j < TSNEP_RING_ENTRIES_PER_PAGE; j++) {
+ entry = &rx->entry[TSNEP_RING_ENTRIES_PER_PAGE * i + j];
+ entry->desc_wb = (struct tsnep_rx_desc_wb *)
+ (((u8 *)rx->page[i]) + TSNEP_DESC_SIZE * j);
+ entry->desc = (struct tsnep_rx_desc *)
+ (((u8 *)entry->desc_wb) + TSNEP_DESC_OFFSET);
+ entry->desc_dma = rx->page_dma[i] + TSNEP_DESC_SIZE * j;
+ }
+ }
+
+ pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
+ pp_params.order = 0;
+ pp_params.pool_size = TSNEP_RING_SIZE;
+ pp_params.nid = dev_to_node(dmadev);
+ pp_params.dev = dmadev;
+ pp_params.dma_dir = DMA_BIDIRECTIONAL;
+ pp_params.max_len = TSNEP_MAX_RX_BUF_SIZE;
+ pp_params.offset = TSNEP_RX_OFFSET;
+ rx->page_pool = page_pool_create(&pp_params);
+ if (IS_ERR(rx->page_pool)) {
+ retval = PTR_ERR(rx->page_pool);
+ rx->page_pool = NULL;
+ goto failed;
+ }
+
+ for (i = 0; i < TSNEP_RING_SIZE; i++) {
+ entry = &rx->entry[i];
+ next_entry = &rx->entry[(i + 1) & TSNEP_RING_MASK];
+ entry->desc->next = __cpu_to_le64(next_entry->desc_dma);
+ }
+
+ return 0;
+
+failed:
+ tsnep_rx_ring_cleanup(rx);
+ return retval;
+}
+
+static void tsnep_rx_init(struct tsnep_rx *rx)
+{
+ dma_addr_t dma;
+
+ dma = rx->entry[0].desc_dma | TSNEP_RESET_OWNER_COUNTER;
+ iowrite32(DMA_ADDR_LOW(dma), rx->addr + TSNEP_RX_DESC_ADDR_LOW);
+ iowrite32(DMA_ADDR_HIGH(dma), rx->addr + TSNEP_RX_DESC_ADDR_HIGH);
+ rx->write = 0;
+ rx->read = 0;
+ rx->owner_counter = 1;
+ rx->increment_owner_counter = TSNEP_RING_SIZE - 1;
+}
+
+static void tsnep_rx_enable(struct tsnep_rx *rx)
+{
+ /* descriptor properties shall be valid before hardware is notified */
+ dma_wmb();
+
+ iowrite32(TSNEP_CONTROL_RX_ENABLE, rx->addr + TSNEP_CONTROL);
+}
+
+static void tsnep_rx_disable(struct tsnep_rx *rx)
+{
+ u32 val;
+
+ iowrite32(TSNEP_CONTROL_RX_DISABLE, rx->addr + TSNEP_CONTROL);
+ readx_poll_timeout(ioread32, rx->addr + TSNEP_CONTROL, val,
+ ((val & TSNEP_CONTROL_RX_ENABLE) == 0), 10000,
+ 1000000);
+}
+
+static int tsnep_rx_desc_available(struct tsnep_rx *rx)
+{
+ if (rx->read <= rx->write)
+ return TSNEP_RING_SIZE - rx->write + rx->read - 1;
+ else
+ return rx->read - rx->write - 1;
+}
+
+static void tsnep_rx_free_page_buffer(struct tsnep_rx *rx)
+{
+ struct page **page;
+
+ /* last entry of page_buffer is always zero, because ring cannot be
+ * filled completely
+ */
+ page = rx->page_buffer;
+ while (*page) {
+ page_pool_put_full_page(rx->page_pool, *page, false);
+ *page = NULL;
+ page++;
+ }
+}
+
+static int tsnep_rx_alloc_page_buffer(struct tsnep_rx *rx)
+{
+ int i;
+
+ /* alloc for all ring entries except the last one, because ring cannot
+ * be filled completely
+ */
+ for (i = 0; i < TSNEP_RING_SIZE - 1; i++) {
+ rx->page_buffer[i] = page_pool_dev_alloc_pages(rx->page_pool);
+ if (!rx->page_buffer[i]) {
+ tsnep_rx_free_page_buffer(rx);
+
+ return -ENOMEM;
+ }
+ }
+
+ return 0;
+}
+
+static void tsnep_rx_set_page(struct tsnep_rx *rx, struct tsnep_rx_entry *entry,
+ struct page *page)
+{
+ entry->page = page;
+ entry->len = TSNEP_MAX_RX_BUF_SIZE;
+ entry->dma = page_pool_get_dma_addr(entry->page);
+ entry->desc->rx = __cpu_to_le64(entry->dma + TSNEP_RX_OFFSET);
+}
+
+static int tsnep_rx_alloc_buffer(struct tsnep_rx *rx, int index)
+{
+ struct tsnep_rx_entry *entry = &rx->entry[index];
+ struct page *page;
+
+ page = page_pool_dev_alloc_pages(rx->page_pool);
+ if (unlikely(!page))
+ return -ENOMEM;
+ tsnep_rx_set_page(rx, entry, page);
+
+ return 0;
+}
+
+static void tsnep_rx_reuse_buffer(struct tsnep_rx *rx, int index)
+{
+ struct tsnep_rx_entry *entry = &rx->entry[index];
+ struct tsnep_rx_entry *read = &rx->entry[rx->read];
+
+ tsnep_rx_set_page(rx, entry, read->page);
+ read->page = NULL;
+}
+
+static void tsnep_rx_activate(struct tsnep_rx *rx, int index)
+{
+ struct tsnep_rx_entry *entry = &rx->entry[index];
+
+ /* TSNEP_MAX_RX_BUF_SIZE and TSNEP_XSK_RX_BUF_SIZE are multiple of 4 */
+ entry->properties = entry->len & TSNEP_DESC_LENGTH_MASK;
+ entry->properties |= TSNEP_DESC_INTERRUPT_FLAG;
+ if (index == rx->increment_owner_counter) {
+ rx->owner_counter++;
+ if (rx->owner_counter == 4)
+ rx->owner_counter = 1;
+ rx->increment_owner_counter--;
+ if (rx->increment_owner_counter < 0)
+ rx->increment_owner_counter = TSNEP_RING_SIZE - 1;
+ }
+ entry->properties |=
+ (rx->owner_counter << TSNEP_DESC_OWNER_COUNTER_SHIFT) &
+ TSNEP_DESC_OWNER_COUNTER_MASK;
+
+ /* descriptor properties shall be written last, because valid data is
+ * signaled there
+ */
+ dma_wmb();
+
+ entry->desc->properties = __cpu_to_le32(entry->properties);
+}
+
+static int tsnep_rx_alloc(struct tsnep_rx *rx, int count, bool reuse)
+{
+ bool alloc_failed = false;
+ int i, index;
+
+ for (i = 0; i < count && !alloc_failed; i++) {
+ index = (rx->write + i) & TSNEP_RING_MASK;
+
+ if (unlikely(tsnep_rx_alloc_buffer(rx, index))) {
+ rx->alloc_failed++;
+ alloc_failed = true;
+
+ /* reuse only if no other allocation was successful */
+ if (i == 0 && reuse)
+ tsnep_rx_reuse_buffer(rx, index);
+ else
+ break;
+ }
+
+ tsnep_rx_activate(rx, index);
+ }
+
+ if (i)
+ rx->write = (rx->write + i) & TSNEP_RING_MASK;
+
+ return i;
+}
+
+static int tsnep_rx_refill(struct tsnep_rx *rx, int count, bool reuse)
+{
+ int desc_refilled;
+
+ desc_refilled = tsnep_rx_alloc(rx, count, reuse);
+ if (desc_refilled)
+ tsnep_rx_enable(rx);
+
+ return desc_refilled;
+}
+
+static void tsnep_rx_set_xdp(struct tsnep_rx *rx, struct tsnep_rx_entry *entry,
+ struct xdp_buff *xdp)
+{
+ entry->xdp = xdp;
+ entry->len = TSNEP_XSK_RX_BUF_SIZE;
+ entry->dma = xsk_buff_xdp_get_dma(entry->xdp);
+ entry->desc->rx = __cpu_to_le64(entry->dma);
+}
+
+static void tsnep_rx_reuse_buffer_zc(struct tsnep_rx *rx, int index)
+{
+ struct tsnep_rx_entry *entry = &rx->entry[index];
+ struct tsnep_rx_entry *read = &rx->entry[rx->read];
+
+ tsnep_rx_set_xdp(rx, entry, read->xdp);
+ read->xdp = NULL;
+}
+
+static int tsnep_rx_alloc_zc(struct tsnep_rx *rx, int count, bool reuse)
+{
+ u32 allocated;
+ int i;
+
+ allocated = xsk_buff_alloc_batch(rx->xsk_pool, rx->xdp_batch, count);
+ for (i = 0; i < allocated; i++) {
+ int index = (rx->write + i) & TSNEP_RING_MASK;
+ struct tsnep_rx_entry *entry = &rx->entry[index];
+
+ tsnep_rx_set_xdp(rx, entry, rx->xdp_batch[i]);
+ tsnep_rx_activate(rx, index);
+ }
+ if (i == 0) {
+ rx->alloc_failed++;
+
+ if (reuse) {
+ tsnep_rx_reuse_buffer_zc(rx, rx->write);
+ tsnep_rx_activate(rx, rx->write);
+ }
+ }
+
+ if (i)
+ rx->write = (rx->write + i) & TSNEP_RING_MASK;
+
+ return i;
+}
+
+static void tsnep_rx_free_zc(struct tsnep_rx *rx)
+{
+ int i;
+
+ for (i = 0; i < TSNEP_RING_SIZE; i++) {
+ struct tsnep_rx_entry *entry = &rx->entry[i];
+
+ if (entry->xdp)
+ xsk_buff_free(entry->xdp);
+ entry->xdp = NULL;
+ }
+}
+
+static int tsnep_rx_refill_zc(struct tsnep_rx *rx, int count, bool reuse)
+{
+ int desc_refilled;
+
+ desc_refilled = tsnep_rx_alloc_zc(rx, count, reuse);
+ if (desc_refilled)
+ tsnep_rx_enable(rx);
+
+ return desc_refilled;
+}
+
+static bool tsnep_xdp_run_prog(struct tsnep_rx *rx, struct bpf_prog *prog,
+ struct xdp_buff *xdp, int *status,
+ struct netdev_queue *tx_nq, struct tsnep_tx *tx)
+{
+ unsigned int length;
+ unsigned int sync;
+ u32 act;
+
+ length = xdp->data_end - xdp->data_hard_start - XDP_PACKET_HEADROOM;
+
+ act = bpf_prog_run_xdp(prog, xdp);
+ switch (act) {
+ case XDP_PASS:
+ return false;
+ case XDP_TX:
+ if (!tsnep_xdp_xmit_back(rx->adapter, xdp, tx_nq, tx))
+ goto out_failure;
+ *status |= TSNEP_XDP_TX;
+ return true;
+ case XDP_REDIRECT:
+ if (xdp_do_redirect(rx->adapter->netdev, xdp, prog) < 0)
+ goto out_failure;
+ *status |= TSNEP_XDP_REDIRECT;
+ return true;
+ default:
+ bpf_warn_invalid_xdp_action(rx->adapter->netdev, prog, act);
+ fallthrough;
+ case XDP_ABORTED:
+out_failure:
+ trace_xdp_exception(rx->adapter->netdev, prog, act);
+ fallthrough;
+ case XDP_DROP:
+ /* Due xdp_adjust_tail: DMA sync for_device cover max len CPU
+ * touch
+ */
+ sync = xdp->data_end - xdp->data_hard_start -
+ XDP_PACKET_HEADROOM;
+ sync = max(sync, length);
+ page_pool_put_page(rx->page_pool, virt_to_head_page(xdp->data),
+ sync, true);
+ return true;
+ }
+}
+
+static bool tsnep_xdp_run_prog_zc(struct tsnep_rx *rx, struct bpf_prog *prog,
+ struct xdp_buff *xdp, int *status,
+ struct netdev_queue *tx_nq,
+ struct tsnep_tx *tx)
+{
+ u32 act;
+
+ act = bpf_prog_run_xdp(prog, xdp);
+
+ /* XDP_REDIRECT is the main action for zero-copy */
+ if (likely(act == XDP_REDIRECT)) {
+ if (xdp_do_redirect(rx->adapter->netdev, xdp, prog) < 0)
+ goto out_failure;
+ *status |= TSNEP_XDP_REDIRECT;
+ return true;
+ }
+
+ switch (act) {
+ case XDP_PASS:
+ return false;
+ case XDP_TX:
+ if (!tsnep_xdp_xmit_back(rx->adapter, xdp, tx_nq, tx))
+ goto out_failure;
+ *status |= TSNEP_XDP_TX;
+ return true;
+ default:
+ bpf_warn_invalid_xdp_action(rx->adapter->netdev, prog, act);
+ fallthrough;
+ case XDP_ABORTED:
+out_failure:
+ trace_xdp_exception(rx->adapter->netdev, prog, act);
+ fallthrough;
+ case XDP_DROP:
+ xsk_buff_free(xdp);
+ return true;
+ }
+}
+
+static void tsnep_finalize_xdp(struct tsnep_adapter *adapter, int status,
+ struct netdev_queue *tx_nq, struct tsnep_tx *tx)
+{
+ if (status & TSNEP_XDP_TX) {
+ __netif_tx_lock(tx_nq, smp_processor_id());
+ tsnep_xdp_xmit_flush(tx);
+ __netif_tx_unlock(tx_nq);
+ }
+
+ if (status & TSNEP_XDP_REDIRECT)
+ xdp_do_flush();
+}
+
+static struct sk_buff *tsnep_build_skb(struct tsnep_rx *rx, struct page *page,
+ int length)
+{
+ struct sk_buff *skb;
+
+ skb = napi_build_skb(page_address(page), PAGE_SIZE);
+ if (unlikely(!skb))
+ return NULL;
+
+ /* update pointers within the skb to store the data */
+ skb_reserve(skb, TSNEP_RX_OFFSET + TSNEP_RX_INLINE_METADATA_SIZE);
+ __skb_put(skb, length - ETH_FCS_LEN);
+
+ if (rx->adapter->hwtstamp_config.rx_filter == HWTSTAMP_FILTER_ALL) {
+ struct skb_shared_hwtstamps *hwtstamps = skb_hwtstamps(skb);
+ struct tsnep_rx_inline *rx_inline =
+ (struct tsnep_rx_inline *)(page_address(page) +
+ TSNEP_RX_OFFSET);
+
+ skb_shinfo(skb)->tx_flags |=
+ SKBTX_HW_TSTAMP_NETDEV;
+ memset(hwtstamps, 0, sizeof(*hwtstamps));
+ hwtstamps->netdev_data = rx_inline;
+ }
+
+ skb_record_rx_queue(skb, rx->queue_index);
+ skb->protocol = eth_type_trans(skb, rx->adapter->netdev);
+
+ return skb;
+}
+
+static void tsnep_rx_page(struct tsnep_rx *rx, struct napi_struct *napi,
+ struct page *page, int length)
+{
+ struct sk_buff *skb;
+
+ skb = tsnep_build_skb(rx, page, length);
+ if (skb) {
+ skb_mark_for_recycle(skb);
+
+ rx->packets++;
+ rx->bytes += length;
+ if (skb->pkt_type == PACKET_MULTICAST)
+ rx->multicast++;
+
+ napi_gro_receive(napi, skb);
+ } else {
+ page_pool_recycle_direct(rx->page_pool, page);
+
+ rx->dropped++;
+ }
+}
+
+static int tsnep_rx_poll(struct tsnep_rx *rx, struct napi_struct *napi,
+ int budget)
+{
+ struct device *dmadev = rx->adapter->dmadev;
+ enum dma_data_direction dma_dir;
+ struct tsnep_rx_entry *entry;
+ struct netdev_queue *tx_nq;
+ struct bpf_prog *prog;
+ struct xdp_buff xdp;
+ struct tsnep_tx *tx;
+ int desc_available;
+ int xdp_status = 0;
+ int done = 0;
+ int length;
+
+ desc_available = tsnep_rx_desc_available(rx);
+ dma_dir = page_pool_get_dma_dir(rx->page_pool);
+ prog = READ_ONCE(rx->adapter->xdp_prog);
+ if (prog) {
+ tx_nq = netdev_get_tx_queue(rx->adapter->netdev,
+ rx->tx_queue_index);
+ tx = &rx->adapter->tx[rx->tx_queue_index];
+
+ xdp_init_buff(&xdp, PAGE_SIZE, &rx->xdp_rxq);
+ }
+
+ while (likely(done < budget) && (rx->read != rx->write)) {
+ entry = &rx->entry[rx->read];
+ if ((__le32_to_cpu(entry->desc_wb->properties) &
+ TSNEP_DESC_OWNER_COUNTER_MASK) !=
+ (entry->properties & TSNEP_DESC_OWNER_COUNTER_MASK))
+ break;
+ done++;
+
+ if (desc_available >= TSNEP_RING_RX_REFILL) {
+ bool reuse = desc_available >= TSNEP_RING_RX_REUSE;
+
+ desc_available -= tsnep_rx_refill(rx, desc_available,
+ reuse);
+ if (!entry->page) {
+ /* buffer has been reused for refill to prevent
+ * empty RX ring, thus buffer cannot be used for
+ * RX processing
+ */
+ rx->read = (rx->read + 1) & TSNEP_RING_MASK;
+ desc_available++;
+
+ rx->dropped++;
+
+ continue;
+ }
+ }
+
+ /* descriptor properties shall be read first, because valid data
+ * is signaled there
+ */
+ dma_rmb();
+
+ prefetch(page_address(entry->page) + TSNEP_RX_OFFSET);
+ length = __le32_to_cpu(entry->desc_wb->properties) &
+ TSNEP_DESC_LENGTH_MASK;
+ dma_sync_single_range_for_cpu(dmadev, entry->dma,
+ TSNEP_RX_OFFSET, length, dma_dir);
+
+ /* RX metadata with timestamps is in front of actual data,
+ * subtract metadata size to get length of actual data and
+ * consider metadata size as offset of actual data during RX
+ * processing
+ */
+ length -= TSNEP_RX_INLINE_METADATA_SIZE;
+
+ rx->read = (rx->read + 1) & TSNEP_RING_MASK;
+ desc_available++;
+
+ if (prog) {
+ bool consume;
+
+ xdp_prepare_buff(&xdp, page_address(entry->page),
+ XDP_PACKET_HEADROOM + TSNEP_RX_INLINE_METADATA_SIZE,
+ length - ETH_FCS_LEN, false);
+
+ consume = tsnep_xdp_run_prog(rx, prog, &xdp,
+ &xdp_status, tx_nq, tx);
+ if (consume) {
+ rx->packets++;
+ rx->bytes += length;
+
+ entry->page = NULL;
+
+ continue;
+ }
+ }
+
+ tsnep_rx_page(rx, napi, entry->page, length);
+ entry->page = NULL;
+ }
+
+ if (xdp_status)
+ tsnep_finalize_xdp(rx->adapter, xdp_status, tx_nq, tx);
+
+ if (desc_available)
+ tsnep_rx_refill(rx, desc_available, false);
+
+ return done;
+}
+
+static int tsnep_rx_poll_zc(struct tsnep_rx *rx, struct napi_struct *napi,
+ int budget)
+{
+ struct tsnep_rx_entry *entry;
+ struct netdev_queue *tx_nq;
+ struct bpf_prog *prog;
+ struct tsnep_tx *tx;
+ int desc_available;
+ int xdp_status = 0;
+ struct page *page;
+ int done = 0;
+ int length;
+
+ desc_available = tsnep_rx_desc_available(rx);
+ prog = READ_ONCE(rx->adapter->xdp_prog);
+ if (prog) {
+ tx_nq = netdev_get_tx_queue(rx->adapter->netdev,
+ rx->tx_queue_index);
+ tx = &rx->adapter->tx[rx->tx_queue_index];
+ }
+
+ while (likely(done < budget) && (rx->read != rx->write)) {
+ entry = &rx->entry[rx->read];
+ if ((__le32_to_cpu(entry->desc_wb->properties) &
+ TSNEP_DESC_OWNER_COUNTER_MASK) !=
+ (entry->properties & TSNEP_DESC_OWNER_COUNTER_MASK))
+ break;
+ done++;
+
+ if (desc_available >= TSNEP_RING_RX_REFILL) {
+ bool reuse = desc_available >= TSNEP_RING_RX_REUSE;
+
+ desc_available -= tsnep_rx_refill_zc(rx, desc_available,
+ reuse);
+ if (!entry->xdp) {
+ /* buffer has been reused for refill to prevent
+ * empty RX ring, thus buffer cannot be used for
+ * RX processing
+ */
+ rx->read = (rx->read + 1) & TSNEP_RING_MASK;
+ desc_available++;
+
+ rx->dropped++;
+
+ continue;
+ }
+ }
+
+ /* descriptor properties shall be read first, because valid data
+ * is signaled there
+ */
+ dma_rmb();
+
+ prefetch(entry->xdp->data);
+ length = __le32_to_cpu(entry->desc_wb->properties) &
+ TSNEP_DESC_LENGTH_MASK;
+ xsk_buff_set_size(entry->xdp, length - ETH_FCS_LEN);
+ xsk_buff_dma_sync_for_cpu(entry->xdp, rx->xsk_pool);
+
+ /* RX metadata with timestamps is in front of actual data,
+ * subtract metadata size to get length of actual data and
+ * consider metadata size as offset of actual data during RX
+ * processing
+ */
+ length -= TSNEP_RX_INLINE_METADATA_SIZE;
+
+ rx->read = (rx->read + 1) & TSNEP_RING_MASK;
+ desc_available++;
+
+ if (prog) {
+ bool consume;
+
+ entry->xdp->data += TSNEP_RX_INLINE_METADATA_SIZE;
+ entry->xdp->data_meta += TSNEP_RX_INLINE_METADATA_SIZE;
+
+ consume = tsnep_xdp_run_prog_zc(rx, prog, entry->xdp,
+ &xdp_status, tx_nq, tx);
+ if (consume) {
+ rx->packets++;
+ rx->bytes += length;
+
+ entry->xdp = NULL;
+
+ continue;
+ }
+ }
+
+ page = page_pool_dev_alloc_pages(rx->page_pool);
+ if (page) {
+ memcpy(page_address(page) + TSNEP_RX_OFFSET,
+ entry->xdp->data - TSNEP_RX_INLINE_METADATA_SIZE,
+ length + TSNEP_RX_INLINE_METADATA_SIZE);
+ tsnep_rx_page(rx, napi, page, length);
+ } else {
+ rx->dropped++;
+ }
+ xsk_buff_free(entry->xdp);
+ entry->xdp = NULL;
+ }
+
+ if (xdp_status)
+ tsnep_finalize_xdp(rx->adapter, xdp_status, tx_nq, tx);
+
+ if (desc_available)
+ desc_available -= tsnep_rx_refill_zc(rx, desc_available, false);
+
+ if (xsk_uses_need_wakeup(rx->xsk_pool)) {
+ if (desc_available)
+ xsk_set_rx_need_wakeup(rx->xsk_pool);
+ else
+ xsk_clear_rx_need_wakeup(rx->xsk_pool);
+
+ return done;
+ }
+
+ return desc_available ? budget : done;
+}
+
+static bool tsnep_rx_pending(struct tsnep_rx *rx)
+{
+ struct tsnep_rx_entry *entry;
+
+ if (rx->read != rx->write) {
+ entry = &rx->entry[rx->read];
+ if ((__le32_to_cpu(entry->desc_wb->properties) &
+ TSNEP_DESC_OWNER_COUNTER_MASK) ==
+ (entry->properties & TSNEP_DESC_OWNER_COUNTER_MASK))
+ return true;
+ }
+
+ return false;
+}
+
+static int tsnep_rx_open(struct tsnep_rx *rx)
+{
+ int desc_available;
+ int retval;
+
+ retval = tsnep_rx_ring_create(rx);
+ if (retval)
+ return retval;
+
+ tsnep_rx_init(rx);
+
+ desc_available = tsnep_rx_desc_available(rx);
+ if (rx->xsk_pool)
+ retval = tsnep_rx_alloc_zc(rx, desc_available, false);
+ else
+ retval = tsnep_rx_alloc(rx, desc_available, false);
+ if (retval != desc_available) {
+ retval = -ENOMEM;
+
+ goto alloc_failed;
+ }
+
+ /* prealloc pages to prevent allocation failures when XSK pool is
+ * disabled at runtime
+ */
+ if (rx->xsk_pool) {
+ retval = tsnep_rx_alloc_page_buffer(rx);
+ if (retval)
+ goto alloc_failed;
+ }
+
+ return 0;
+
+alloc_failed:
+ tsnep_rx_ring_cleanup(rx);
+ return retval;
+}
+
+static void tsnep_rx_close(struct tsnep_rx *rx)
+{
+ if (rx->xsk_pool)
+ tsnep_rx_free_page_buffer(rx);
+
+ tsnep_rx_ring_cleanup(rx);
+}
+
+static void tsnep_rx_reopen(struct tsnep_rx *rx)
+{
+ struct page **page = rx->page_buffer;
+ int i;
+
+ tsnep_rx_init(rx);
+
+ for (i = 0; i < TSNEP_RING_SIZE; i++) {
+ struct tsnep_rx_entry *entry = &rx->entry[i];
+
+ /* defined initial values for properties are required for
+ * correct owner counter checking
+ */
+ entry->desc->properties = 0;
+ entry->desc_wb->properties = 0;
+
+ /* prevent allocation failures by reusing kept pages */
+ if (*page) {
+ tsnep_rx_set_page(rx, entry, *page);
+ tsnep_rx_activate(rx, rx->write);
+ rx->write++;
+
+ *page = NULL;
+ page++;
+ }
+ }
+}
+
+static void tsnep_rx_reopen_xsk(struct tsnep_rx *rx)
+{
+ struct page **page = rx->page_buffer;
+ u32 allocated;
+ int i;
+
+ tsnep_rx_init(rx);
+
+ /* alloc all ring entries except the last one, because ring cannot be
+ * filled completely, as many buffers as possible is enough as wakeup is
+ * done if new buffers are available
+ */
+ allocated = xsk_buff_alloc_batch(rx->xsk_pool, rx->xdp_batch,
+ TSNEP_RING_SIZE - 1);
+
+ for (i = 0; i < TSNEP_RING_SIZE; i++) {
+ struct tsnep_rx_entry *entry = &rx->entry[i];
+
+ /* keep pages to prevent allocation failures when xsk is
+ * disabled
+ */
+ if (entry->page) {
+ *page = entry->page;
+ entry->page = NULL;
+
+ page++;
+ }
+
+ /* defined initial values for properties are required for
+ * correct owner counter checking
+ */
+ entry->desc->properties = 0;
+ entry->desc_wb->properties = 0;
+
+ if (allocated) {
+ tsnep_rx_set_xdp(rx, entry,
+ rx->xdp_batch[allocated - 1]);
+ tsnep_rx_activate(rx, rx->write);
+ rx->write++;
+
+ allocated--;
+ }
+ }
+
+ /* set need wakeup flag immediately if ring is not filled completely,
+ * first polling would be too late as need wakeup signalisation would
+ * be delayed for an indefinite time
+ */
+ if (xsk_uses_need_wakeup(rx->xsk_pool)) {
+ int desc_available = tsnep_rx_desc_available(rx);
+
+ if (desc_available)
+ xsk_set_rx_need_wakeup(rx->xsk_pool);
+ else
+ xsk_clear_rx_need_wakeup(rx->xsk_pool);
+ }
+}
+
+static bool tsnep_pending(struct tsnep_queue *queue)
+{
+ if (queue->tx && tsnep_tx_pending(queue->tx))
+ return true;
+
+ if (queue->rx && tsnep_rx_pending(queue->rx))
+ return true;
+
+ return false;
+}
+
+static int tsnep_poll(struct napi_struct *napi, int budget)
+{
+ struct tsnep_queue *queue = container_of(napi, struct tsnep_queue,
+ napi);
+ bool complete = true;
+ int done = 0;
+
+ if (queue->tx)
+ complete = tsnep_tx_poll(queue->tx, budget);
+
+ /* handle case where we are called by netpoll with a budget of 0 */
+ if (unlikely(budget <= 0))
+ return budget;
+
+ if (queue->rx) {
+ done = queue->rx->xsk_pool ?
+ tsnep_rx_poll_zc(queue->rx, napi, budget) :
+ tsnep_rx_poll(queue->rx, napi, budget);
+ if (done >= budget)
+ complete = false;
+ }
+
+ /* if all work not completed, return budget and keep polling */
+ if (!complete)
+ return budget;
+
+ if (likely(napi_complete_done(napi, done))) {
+ tsnep_enable_irq(queue->adapter, queue->irq_mask);
+
+ /* reschedule if work is already pending, prevent rotten packets
+ * which are transmitted or received after polling but before
+ * interrupt enable
+ */
+ if (tsnep_pending(queue)) {
+ tsnep_disable_irq(queue->adapter, queue->irq_mask);
+ napi_schedule(napi);
+ }
+ }
+
+ return min(done, budget - 1);
+}
+
+static int tsnep_request_irq(struct tsnep_queue *queue, bool first)
+{
+ const char *name = netdev_name(queue->adapter->netdev);
+ irq_handler_t handler;
+ void *dev;
+ int retval;
+
+ if (first) {
+ sprintf(queue->name, "%s-mac", name);
+ handler = tsnep_irq;
+ dev = queue->adapter;
+ } else {
+ if (queue->tx && queue->rx)
+ snprintf(queue->name, sizeof(queue->name), "%s-txrx-%d",
+ name, queue->rx->queue_index);
+ else if (queue->tx)
+ snprintf(queue->name, sizeof(queue->name), "%s-tx-%d",
+ name, queue->tx->queue_index);
+ else
+ snprintf(queue->name, sizeof(queue->name), "%s-rx-%d",
+ name, queue->rx->queue_index);
+ handler = tsnep_irq_txrx;
+ dev = queue;
+ }
+
+ retval = request_irq(queue->irq, handler, 0, queue->name, dev);
+ if (retval) {
+ /* if name is empty, then interrupt won't be freed */
+ memset(queue->name, 0, sizeof(queue->name));
+ }
+
+ return retval;
+}
+
+static void tsnep_free_irq(struct tsnep_queue *queue, bool first)
+{
+ void *dev;
+
+ if (!strlen(queue->name))
+ return;
+
+ if (first)
+ dev = queue->adapter;
+ else
+ dev = queue;
+
+ free_irq(queue->irq, dev);
+ memset(queue->name, 0, sizeof(queue->name));
+}
+
+static void tsnep_queue_close(struct tsnep_queue *queue, bool first)
+{
+ struct tsnep_rx *rx = queue->rx;
+
+ tsnep_free_irq(queue, first);
+
+ if (rx) {
+ if (xdp_rxq_info_is_reg(&rx->xdp_rxq))
+ xdp_rxq_info_unreg(&rx->xdp_rxq);
+ if (xdp_rxq_info_is_reg(&rx->xdp_rxq_zc))
+ xdp_rxq_info_unreg(&rx->xdp_rxq_zc);
+ }
+
+ netif_napi_del(&queue->napi);
+}
+
+static int tsnep_queue_open(struct tsnep_adapter *adapter,
+ struct tsnep_queue *queue, bool first)
+{
+ struct tsnep_rx *rx = queue->rx;
+ struct tsnep_tx *tx = queue->tx;
+ int retval;
+
+ netif_napi_add(adapter->netdev, &queue->napi, tsnep_poll);
+
+ if (rx) {
+ /* choose TX queue for XDP_TX */
+ if (tx)
+ rx->tx_queue_index = tx->queue_index;
+ else if (rx->queue_index < adapter->num_tx_queues)
+ rx->tx_queue_index = rx->queue_index;
+ else
+ rx->tx_queue_index = 0;
+
+ /* prepare both memory models to eliminate possible registration
+ * errors when memory model is switched between page pool and
+ * XSK pool during runtime
+ */
+ retval = xdp_rxq_info_reg(&rx->xdp_rxq, adapter->netdev,
+ rx->queue_index, queue->napi.napi_id);
+ if (retval)
+ goto failed;
+ retval = xdp_rxq_info_reg_mem_model(&rx->xdp_rxq,
+ MEM_TYPE_PAGE_POOL,
+ rx->page_pool);
+ if (retval)
+ goto failed;
+ retval = xdp_rxq_info_reg(&rx->xdp_rxq_zc, adapter->netdev,
+ rx->queue_index, queue->napi.napi_id);
+ if (retval)
+ goto failed;
+ retval = xdp_rxq_info_reg_mem_model(&rx->xdp_rxq_zc,
+ MEM_TYPE_XSK_BUFF_POOL,
+ NULL);
+ if (retval)
+ goto failed;
+ if (rx->xsk_pool)
+ xsk_pool_set_rxq_info(rx->xsk_pool, &rx->xdp_rxq_zc);
+ }
+
+ retval = tsnep_request_irq(queue, first);
+ if (retval) {
+ netif_err(adapter, drv, adapter->netdev,
+ "can't get assigned irq %d.\n", queue->irq);
+ goto failed;
+ }
+
+ return 0;
+
+failed:
+ tsnep_queue_close(queue, first);
+
+ return retval;
+}
+
+static void tsnep_queue_enable(struct tsnep_queue *queue)
+{
+ napi_enable(&queue->napi);
+ tsnep_enable_irq(queue->adapter, queue->irq_mask);
+
+ if (queue->tx)
+ tsnep_tx_enable(queue->tx);
+
+ if (queue->rx)
+ tsnep_rx_enable(queue->rx);
+}
+
+static void tsnep_queue_disable(struct tsnep_queue *queue)
+{
+ if (queue->tx)
+ tsnep_tx_disable(queue->tx, &queue->napi);
+
+ napi_disable(&queue->napi);
+ tsnep_disable_irq(queue->adapter, queue->irq_mask);
+
+ /* disable RX after NAPI polling has been disabled, because RX can be
+ * enabled during NAPI polling
+ */
+ if (queue->rx)
+ tsnep_rx_disable(queue->rx);
+}
+
+static int tsnep_netdev_open(struct net_device *netdev)
+{
+ struct tsnep_adapter *adapter = netdev_priv(netdev);
+ int i, retval;
+
+ for (i = 0; i < adapter->num_queues; i++) {
+ if (adapter->queue[i].tx) {
+ retval = tsnep_tx_open(adapter->queue[i].tx);
+ if (retval)
+ goto failed;
+ }
+ if (adapter->queue[i].rx) {
+ retval = tsnep_rx_open(adapter->queue[i].rx);
+ if (retval)
+ goto failed;
+ }
+
+ retval = tsnep_queue_open(adapter, &adapter->queue[i], i == 0);
+ if (retval)
+ goto failed;
+ }
+
+ retval = netif_set_real_num_tx_queues(adapter->netdev,
+ adapter->num_tx_queues);
+ if (retval)
+ goto failed;
+ retval = netif_set_real_num_rx_queues(adapter->netdev,
+ adapter->num_rx_queues);
+ if (retval)
+ goto failed;
+
+ tsnep_enable_irq(adapter, ECM_INT_LINK);
+ retval = tsnep_phy_open(adapter);
+ if (retval)
+ goto phy_failed;
+
+ for (i = 0; i < adapter->num_queues; i++)
+ tsnep_queue_enable(&adapter->queue[i]);
+
+ return 0;
+
+phy_failed:
+ tsnep_disable_irq(adapter, ECM_INT_LINK);
+failed:
+ for (i = 0; i < adapter->num_queues; i++) {
+ tsnep_queue_close(&adapter->queue[i], i == 0);
+
+ if (adapter->queue[i].rx)
+ tsnep_rx_close(adapter->queue[i].rx);
+ if (adapter->queue[i].tx)
+ tsnep_tx_close(adapter->queue[i].tx);
+ }
+ return retval;
+}
+
+static int tsnep_netdev_close(struct net_device *netdev)
+{
+ struct tsnep_adapter *adapter = netdev_priv(netdev);
+ int i;
+
+ tsnep_disable_irq(adapter, ECM_INT_LINK);
+ tsnep_phy_close(adapter);
+
+ for (i = 0; i < adapter->num_queues; i++) {
+ tsnep_queue_disable(&adapter->queue[i]);
+
+ tsnep_queue_close(&adapter->queue[i], i == 0);
+
+ if (adapter->queue[i].rx)
+ tsnep_rx_close(adapter->queue[i].rx);
+ if (adapter->queue[i].tx)
+ tsnep_tx_close(adapter->queue[i].tx);
+ }
+
+ return 0;
+}
+
+int tsnep_enable_xsk(struct tsnep_queue *queue, struct xsk_buff_pool *pool)
+{
+ bool running = netif_running(queue->adapter->netdev);
+ u32 frame_size;
+
+ frame_size = xsk_pool_get_rx_frame_size(pool);
+ if (frame_size < TSNEP_XSK_RX_BUF_SIZE)
+ return -EOPNOTSUPP;
+
+ queue->rx->page_buffer = kcalloc(TSNEP_RING_SIZE,
+ sizeof(*queue->rx->page_buffer),
+ GFP_KERNEL);
+ if (!queue->rx->page_buffer)
+ return -ENOMEM;
+ queue->rx->xdp_batch = kcalloc(TSNEP_RING_SIZE,
+ sizeof(*queue->rx->xdp_batch),
+ GFP_KERNEL);
+ if (!queue->rx->xdp_batch) {
+ kfree(queue->rx->page_buffer);
+ queue->rx->page_buffer = NULL;
+
+ return -ENOMEM;
+ }
+
+ xsk_pool_set_rxq_info(pool, &queue->rx->xdp_rxq_zc);
+
+ if (running)
+ tsnep_queue_disable(queue);
+
+ queue->tx->xsk_pool = pool;
+ queue->rx->xsk_pool = pool;
+
+ if (running) {
+ tsnep_rx_reopen_xsk(queue->rx);
+ tsnep_queue_enable(queue);
+ }
+
+ return 0;
+}
+
+void tsnep_disable_xsk(struct tsnep_queue *queue)
+{
+ bool running = netif_running(queue->adapter->netdev);
+
+ if (running)
+ tsnep_queue_disable(queue);
+
+ tsnep_rx_free_zc(queue->rx);
+
+ queue->rx->xsk_pool = NULL;
+ queue->tx->xsk_pool = NULL;
+
+ if (running) {
+ tsnep_rx_reopen(queue->rx);
+ tsnep_queue_enable(queue);
+ }
+
+ kfree(queue->rx->xdp_batch);
+ queue->rx->xdp_batch = NULL;
+ kfree(queue->rx->page_buffer);
+ queue->rx->page_buffer = NULL;
+}
+
+static netdev_tx_t tsnep_netdev_xmit_frame(struct sk_buff *skb,
+ struct net_device *netdev)
+{
+ struct tsnep_adapter *adapter = netdev_priv(netdev);
+ u16 queue_mapping = skb_get_queue_mapping(skb);
+
+ if (queue_mapping >= adapter->num_tx_queues)
+ queue_mapping = 0;
+
+ return tsnep_xmit_frame_ring(skb, &adapter->tx[queue_mapping]);
+}
+
+static int tsnep_netdev_ioctl(struct net_device *netdev, struct ifreq *ifr,
+ int cmd)
+{
+ if (!netif_running(netdev))
+ return -EINVAL;
+ if (cmd == SIOCSHWTSTAMP || cmd == SIOCGHWTSTAMP)
+ return tsnep_ptp_ioctl(netdev, ifr, cmd);
+ return phy_mii_ioctl(netdev->phydev, ifr, cmd);
+}
+
+static void tsnep_netdev_set_multicast(struct net_device *netdev)
+{
+ struct tsnep_adapter *adapter = netdev_priv(netdev);
+
+ u16 rx_filter = 0;
+
+ /* configured MAC address and broadcasts are never filtered */
+ if (netdev->flags & IFF_PROMISC) {
+ rx_filter |= TSNEP_RX_FILTER_ACCEPT_ALL_MULTICASTS;
+ rx_filter |= TSNEP_RX_FILTER_ACCEPT_ALL_UNICASTS;
+ } else if (!netdev_mc_empty(netdev) || (netdev->flags & IFF_ALLMULTI)) {
+ rx_filter |= TSNEP_RX_FILTER_ACCEPT_ALL_MULTICASTS;
+ }
+ iowrite16(rx_filter, adapter->addr + TSNEP_RX_FILTER);
+}
+
+static void tsnep_netdev_get_stats64(struct net_device *netdev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct tsnep_adapter *adapter = netdev_priv(netdev);
+ u32 reg;
+ u32 val;
+ int i;
+
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ stats->tx_packets += adapter->tx[i].packets;
+ stats->tx_bytes += adapter->tx[i].bytes;
+ stats->tx_dropped += adapter->tx[i].dropped;
+ }
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ stats->rx_packets += adapter->rx[i].packets;
+ stats->rx_bytes += adapter->rx[i].bytes;
+ stats->rx_dropped += adapter->rx[i].dropped;
+ stats->multicast += adapter->rx[i].multicast;
+
+ reg = ioread32(adapter->addr + TSNEP_QUEUE(i) +
+ TSNEP_RX_STATISTIC);
+ val = (reg & TSNEP_RX_STATISTIC_NO_DESC_MASK) >>
+ TSNEP_RX_STATISTIC_NO_DESC_SHIFT;
+ stats->rx_dropped += val;
+ val = (reg & TSNEP_RX_STATISTIC_BUFFER_TOO_SMALL_MASK) >>
+ TSNEP_RX_STATISTIC_BUFFER_TOO_SMALL_SHIFT;
+ stats->rx_dropped += val;
+ val = (reg & TSNEP_RX_STATISTIC_FIFO_OVERFLOW_MASK) >>
+ TSNEP_RX_STATISTIC_FIFO_OVERFLOW_SHIFT;
+ stats->rx_errors += val;
+ stats->rx_fifo_errors += val;
+ val = (reg & TSNEP_RX_STATISTIC_INVALID_FRAME_MASK) >>
+ TSNEP_RX_STATISTIC_INVALID_FRAME_SHIFT;
+ stats->rx_errors += val;
+ stats->rx_frame_errors += val;
+ }
+
+ reg = ioread32(adapter->addr + ECM_STAT);
+ val = (reg & ECM_STAT_RX_ERR_MASK) >> ECM_STAT_RX_ERR_SHIFT;
+ stats->rx_errors += val;
+ val = (reg & ECM_STAT_INV_FRM_MASK) >> ECM_STAT_INV_FRM_SHIFT;
+ stats->rx_errors += val;
+ stats->rx_crc_errors += val;
+ val = (reg & ECM_STAT_FWD_RX_ERR_MASK) >> ECM_STAT_FWD_RX_ERR_SHIFT;
+ stats->rx_errors += val;
+}
+
+static void tsnep_mac_set_address(struct tsnep_adapter *adapter, u8 *addr)
+{
+ iowrite32(*(u32 *)addr, adapter->addr + TSNEP_MAC_ADDRESS_LOW);
+ iowrite16(*(u16 *)(addr + sizeof(u32)),
+ adapter->addr + TSNEP_MAC_ADDRESS_HIGH);
+
+ ether_addr_copy(adapter->mac_address, addr);
+ netif_info(adapter, drv, adapter->netdev, "MAC address set to %pM\n",
+ addr);
+}
+
+static int tsnep_netdev_set_mac_address(struct net_device *netdev, void *addr)
+{
+ struct tsnep_adapter *adapter = netdev_priv(netdev);
+ struct sockaddr *sock_addr = addr;
+ int retval;
+
+ retval = eth_prepare_mac_addr_change(netdev, sock_addr);
+ if (retval)
+ return retval;
+ eth_hw_addr_set(netdev, sock_addr->sa_data);
+ tsnep_mac_set_address(adapter, sock_addr->sa_data);
+
+ return 0;
+}
+
+static int tsnep_netdev_set_features(struct net_device *netdev,
+ netdev_features_t features)
+{
+ struct tsnep_adapter *adapter = netdev_priv(netdev);
+ netdev_features_t changed = netdev->features ^ features;
+ bool enable;
+ int retval = 0;
+
+ if (changed & NETIF_F_LOOPBACK) {
+ enable = !!(features & NETIF_F_LOOPBACK);
+ retval = tsnep_phy_loopback(adapter, enable);
+ }
+
+ return retval;
+}
+
+static ktime_t tsnep_netdev_get_tstamp(struct net_device *netdev,
+ const struct skb_shared_hwtstamps *hwtstamps,
+ bool cycles)
+{
+ struct tsnep_rx_inline *rx_inline = hwtstamps->netdev_data;
+ u64 timestamp;
+
+ if (cycles)
+ timestamp = __le64_to_cpu(rx_inline->counter);
+ else
+ timestamp = __le64_to_cpu(rx_inline->timestamp);
+
+ return ns_to_ktime(timestamp);
+}
+
+static int tsnep_netdev_bpf(struct net_device *dev, struct netdev_bpf *bpf)
+{
+ struct tsnep_adapter *adapter = netdev_priv(dev);
+
+ switch (bpf->command) {
+ case XDP_SETUP_PROG:
+ return tsnep_xdp_setup_prog(adapter, bpf->prog, bpf->extack);
+ case XDP_SETUP_XSK_POOL:
+ return tsnep_xdp_setup_pool(adapter, bpf->xsk.pool,
+ bpf->xsk.queue_id);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static struct tsnep_tx *tsnep_xdp_get_tx(struct tsnep_adapter *adapter, u32 cpu)
+{
+ if (cpu >= TSNEP_MAX_QUEUES)
+ cpu &= TSNEP_MAX_QUEUES - 1;
+
+ while (cpu >= adapter->num_tx_queues)
+ cpu -= adapter->num_tx_queues;
+
+ return &adapter->tx[cpu];
+}
+
+static int tsnep_netdev_xdp_xmit(struct net_device *dev, int n,
+ struct xdp_frame **xdp, u32 flags)
+{
+ struct tsnep_adapter *adapter = netdev_priv(dev);
+ u32 cpu = smp_processor_id();
+ struct netdev_queue *nq;
+ struct tsnep_tx *tx;
+ int nxmit;
+ bool xmit;
+
+ if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
+ return -EINVAL;
+
+ tx = tsnep_xdp_get_tx(adapter, cpu);
+ nq = netdev_get_tx_queue(adapter->netdev, tx->queue_index);
+
+ __netif_tx_lock(nq, cpu);
+
+ for (nxmit = 0; nxmit < n; nxmit++) {
+ xmit = tsnep_xdp_xmit_frame_ring(xdp[nxmit], tx,
+ TSNEP_TX_TYPE_XDP_NDO);
+ if (!xmit)
+ break;
+
+ /* avoid transmit queue timeout since we share it with the slow
+ * path
+ */
+ txq_trans_cond_update(nq);
+ }
+
+ if (flags & XDP_XMIT_FLUSH)
+ tsnep_xdp_xmit_flush(tx);
+
+ __netif_tx_unlock(nq);
+
+ return nxmit;
+}
+
+static int tsnep_netdev_xsk_wakeup(struct net_device *dev, u32 queue_id,
+ u32 flags)
+{
+ struct tsnep_adapter *adapter = netdev_priv(dev);
+ struct tsnep_queue *queue;
+
+ if (queue_id >= adapter->num_rx_queues ||
+ queue_id >= adapter->num_tx_queues)
+ return -EINVAL;
+
+ queue = &adapter->queue[queue_id];
+
+ if (!napi_if_scheduled_mark_missed(&queue->napi))
+ napi_schedule(&queue->napi);
+
+ return 0;
+}
+
+static const struct net_device_ops tsnep_netdev_ops = {
+ .ndo_open = tsnep_netdev_open,
+ .ndo_stop = tsnep_netdev_close,
+ .ndo_start_xmit = tsnep_netdev_xmit_frame,
+ .ndo_eth_ioctl = tsnep_netdev_ioctl,
+ .ndo_set_rx_mode = tsnep_netdev_set_multicast,
+ .ndo_get_stats64 = tsnep_netdev_get_stats64,
+ .ndo_set_mac_address = tsnep_netdev_set_mac_address,
+ .ndo_set_features = tsnep_netdev_set_features,
+ .ndo_get_tstamp = tsnep_netdev_get_tstamp,
+ .ndo_setup_tc = tsnep_tc_setup,
+ .ndo_bpf = tsnep_netdev_bpf,
+ .ndo_xdp_xmit = tsnep_netdev_xdp_xmit,
+ .ndo_xsk_wakeup = tsnep_netdev_xsk_wakeup,
+};
+
+static int tsnep_mac_init(struct tsnep_adapter *adapter)
+{
+ int retval;
+
+ /* initialize RX filtering, at least configured MAC address and
+ * broadcast are not filtered
+ */
+ iowrite16(0, adapter->addr + TSNEP_RX_FILTER);
+
+ /* try to get MAC address in the following order:
+ * - device tree
+ * - valid MAC address already set
+ * - MAC address register if valid
+ * - random MAC address
+ */
+ retval = of_get_mac_address(adapter->pdev->dev.of_node,
+ adapter->mac_address);
+ if (retval == -EPROBE_DEFER)
+ return retval;
+ if (retval && !is_valid_ether_addr(adapter->mac_address)) {
+ *(u32 *)adapter->mac_address =
+ ioread32(adapter->addr + TSNEP_MAC_ADDRESS_LOW);
+ *(u16 *)(adapter->mac_address + sizeof(u32)) =
+ ioread16(adapter->addr + TSNEP_MAC_ADDRESS_HIGH);
+ if (!is_valid_ether_addr(adapter->mac_address))
+ eth_random_addr(adapter->mac_address);
+ }
+
+ tsnep_mac_set_address(adapter, adapter->mac_address);
+ eth_hw_addr_set(adapter->netdev, adapter->mac_address);
+
+ return 0;
+}
+
+static int tsnep_mdio_init(struct tsnep_adapter *adapter)
+{
+ struct device_node *np = adapter->pdev->dev.of_node;
+ int retval;
+
+ if (np) {
+ np = of_get_child_by_name(np, "mdio");
+ if (!np)
+ return 0;
+
+ adapter->suppress_preamble =
+ of_property_read_bool(np, "suppress-preamble");
+ }
+
+ adapter->mdiobus = devm_mdiobus_alloc(&adapter->pdev->dev);
+ if (!adapter->mdiobus) {
+ retval = -ENOMEM;
+
+ goto out;
+ }
+
+ adapter->mdiobus->priv = (void *)adapter;
+ adapter->mdiobus->parent = &adapter->pdev->dev;
+ adapter->mdiobus->read = tsnep_mdiobus_read;
+ adapter->mdiobus->write = tsnep_mdiobus_write;
+ adapter->mdiobus->name = TSNEP "-mdiobus";
+ snprintf(adapter->mdiobus->id, MII_BUS_ID_SIZE, "%s",
+ adapter->pdev->name);
+
+ /* do not scan broadcast address */
+ adapter->mdiobus->phy_mask = 0x0000001;
+
+ retval = of_mdiobus_register(adapter->mdiobus, np);
+
+out:
+ of_node_put(np);
+
+ return retval;
+}
+
+static int tsnep_phy_init(struct tsnep_adapter *adapter)
+{
+ struct device_node *phy_node;
+ int retval;
+
+ retval = of_get_phy_mode(adapter->pdev->dev.of_node,
+ &adapter->phy_mode);
+ if (retval)
+ adapter->phy_mode = PHY_INTERFACE_MODE_GMII;
+
+ phy_node = of_parse_phandle(adapter->pdev->dev.of_node, "phy-handle",
+ 0);
+ adapter->phydev = of_phy_find_device(phy_node);
+ of_node_put(phy_node);
+ if (!adapter->phydev && adapter->mdiobus)
+ adapter->phydev = phy_find_first(adapter->mdiobus);
+ if (!adapter->phydev)
+ return -EIO;
+
+ return 0;
+}
+
+static int tsnep_queue_init(struct tsnep_adapter *adapter, int queue_count)
+{
+ u32 irq_mask = ECM_INT_TX_0 | ECM_INT_RX_0;
+ char name[8];
+ int i;
+ int retval;
+
+ /* one TX/RX queue pair for netdev is mandatory */
+ if (platform_irq_count(adapter->pdev) == 1)
+ retval = platform_get_irq(adapter->pdev, 0);
+ else
+ retval = platform_get_irq_byname(adapter->pdev, "mac");
+ if (retval < 0)
+ return retval;
+ adapter->num_tx_queues = 1;
+ adapter->num_rx_queues = 1;
+ adapter->num_queues = 1;
+ adapter->queue[0].adapter = adapter;
+ adapter->queue[0].irq = retval;
+ adapter->queue[0].tx = &adapter->tx[0];
+ adapter->queue[0].tx->adapter = adapter;
+ adapter->queue[0].tx->addr = adapter->addr + TSNEP_QUEUE(0);
+ adapter->queue[0].tx->queue_index = 0;
+ adapter->queue[0].rx = &adapter->rx[0];
+ adapter->queue[0].rx->adapter = adapter;
+ adapter->queue[0].rx->addr = adapter->addr + TSNEP_QUEUE(0);
+ adapter->queue[0].rx->queue_index = 0;
+ adapter->queue[0].irq_mask = irq_mask;
+ adapter->queue[0].irq_delay_addr = adapter->addr + ECM_INT_DELAY;
+ retval = tsnep_set_irq_coalesce(&adapter->queue[0],
+ TSNEP_COALESCE_USECS_DEFAULT);
+ if (retval < 0)
+ return retval;
+
+ adapter->netdev->irq = adapter->queue[0].irq;
+
+ /* add additional TX/RX queue pairs only if dedicated interrupt is
+ * available
+ */
+ for (i = 1; i < queue_count; i++) {
+ sprintf(name, "txrx-%d", i);
+ retval = platform_get_irq_byname_optional(adapter->pdev, name);
+ if (retval < 0)
+ break;
+
+ adapter->num_tx_queues++;
+ adapter->num_rx_queues++;
+ adapter->num_queues++;
+ adapter->queue[i].adapter = adapter;
+ adapter->queue[i].irq = retval;
+ adapter->queue[i].tx = &adapter->tx[i];
+ adapter->queue[i].tx->adapter = adapter;
+ adapter->queue[i].tx->addr = adapter->addr + TSNEP_QUEUE(i);
+ adapter->queue[i].tx->queue_index = i;
+ adapter->queue[i].rx = &adapter->rx[i];
+ adapter->queue[i].rx->adapter = adapter;
+ adapter->queue[i].rx->addr = adapter->addr + TSNEP_QUEUE(i);
+ adapter->queue[i].rx->queue_index = i;
+ adapter->queue[i].irq_mask =
+ irq_mask << (ECM_INT_TXRX_SHIFT * i);
+ adapter->queue[i].irq_delay_addr =
+ adapter->addr + ECM_INT_DELAY + ECM_INT_DELAY_OFFSET * i;
+ retval = tsnep_set_irq_coalesce(&adapter->queue[i],
+ TSNEP_COALESCE_USECS_DEFAULT);
+ if (retval < 0)
+ return retval;
+ }
+
+ return 0;
+}
+
+static int tsnep_probe(struct platform_device *pdev)
+{
+ struct tsnep_adapter *adapter;
+ struct net_device *netdev;
+ struct resource *io;
+ u32 type;
+ int revision;
+ int version;
+ int queue_count;
+ int retval;
+
+ netdev = devm_alloc_etherdev_mqs(&pdev->dev,
+ sizeof(struct tsnep_adapter),
+ TSNEP_MAX_QUEUES, TSNEP_MAX_QUEUES);
+ if (!netdev)
+ return -ENODEV;
+ SET_NETDEV_DEV(netdev, &pdev->dev);
+ adapter = netdev_priv(netdev);
+ platform_set_drvdata(pdev, adapter);
+ adapter->pdev = pdev;
+ adapter->dmadev = &pdev->dev;
+ adapter->netdev = netdev;
+ adapter->msg_enable = NETIF_MSG_DRV | NETIF_MSG_PROBE |
+ NETIF_MSG_LINK | NETIF_MSG_IFUP |
+ NETIF_MSG_IFDOWN | NETIF_MSG_TX_QUEUED;
+
+ netdev->min_mtu = ETH_MIN_MTU;
+ netdev->max_mtu = TSNEP_MAX_FRAME_SIZE;
+
+ mutex_init(&adapter->gate_control_lock);
+ mutex_init(&adapter->rxnfc_lock);
+ INIT_LIST_HEAD(&adapter->rxnfc_rules);
+
+ io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ adapter->addr = devm_ioremap_resource(&pdev->dev, io);
+ if (IS_ERR(adapter->addr))
+ return PTR_ERR(adapter->addr);
+ netdev->mem_start = io->start;
+ netdev->mem_end = io->end;
+
+ type = ioread32(adapter->addr + ECM_TYPE);
+ revision = (type & ECM_REVISION_MASK) >> ECM_REVISION_SHIFT;
+ version = (type & ECM_VERSION_MASK) >> ECM_VERSION_SHIFT;
+ queue_count = (type & ECM_QUEUE_COUNT_MASK) >> ECM_QUEUE_COUNT_SHIFT;
+ adapter->gate_control = type & ECM_GATE_CONTROL;
+ adapter->rxnfc_max = TSNEP_RX_ASSIGN_ETHER_TYPE_COUNT;
+
+ tsnep_disable_irq(adapter, ECM_INT_ALL);
+
+ retval = tsnep_queue_init(adapter, queue_count);
+ if (retval)
+ return retval;
+
+ retval = dma_set_mask_and_coherent(&adapter->pdev->dev,
+ DMA_BIT_MASK(64));
+ if (retval) {
+ dev_err(&adapter->pdev->dev, "no usable DMA configuration.\n");
+ return retval;
+ }
+
+ retval = tsnep_mac_init(adapter);
+ if (retval)
+ return retval;
+
+ retval = tsnep_mdio_init(adapter);
+ if (retval)
+ goto mdio_init_failed;
+
+ retval = tsnep_phy_init(adapter);
+ if (retval)
+ goto phy_init_failed;
+
+ retval = tsnep_ptp_init(adapter);
+ if (retval)
+ goto ptp_init_failed;
+
+ retval = tsnep_tc_init(adapter);
+ if (retval)
+ goto tc_init_failed;
+
+ retval = tsnep_rxnfc_init(adapter);
+ if (retval)
+ goto rxnfc_init_failed;
+
+ netdev->netdev_ops = &tsnep_netdev_ops;
+ netdev->ethtool_ops = &tsnep_ethtool_ops;
+ netdev->features = NETIF_F_SG;
+ netdev->hw_features = netdev->features | NETIF_F_LOOPBACK;
+
+ netdev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
+ NETDEV_XDP_ACT_NDO_XMIT |
+ NETDEV_XDP_ACT_NDO_XMIT_SG |
+ NETDEV_XDP_ACT_XSK_ZEROCOPY;
+
+ /* carrier off reporting is important to ethtool even BEFORE open */
+ netif_carrier_off(netdev);
+
+ retval = register_netdev(netdev);
+ if (retval)
+ goto register_failed;
+
+ dev_info(&adapter->pdev->dev, "device version %d.%02d\n", version,
+ revision);
+ if (adapter->gate_control)
+ dev_info(&adapter->pdev->dev, "gate control detected\n");
+
+ return 0;
+
+register_failed:
+ tsnep_rxnfc_cleanup(adapter);
+rxnfc_init_failed:
+ tsnep_tc_cleanup(adapter);
+tc_init_failed:
+ tsnep_ptp_cleanup(adapter);
+ptp_init_failed:
+phy_init_failed:
+ if (adapter->mdiobus)
+ mdiobus_unregister(adapter->mdiobus);
+mdio_init_failed:
+ return retval;
+}
+
+static int tsnep_remove(struct platform_device *pdev)
+{
+ struct tsnep_adapter *adapter = platform_get_drvdata(pdev);
+
+ unregister_netdev(adapter->netdev);
+
+ tsnep_rxnfc_cleanup(adapter);
+
+ tsnep_tc_cleanup(adapter);
+
+ tsnep_ptp_cleanup(adapter);
+
+ if (adapter->mdiobus)
+ mdiobus_unregister(adapter->mdiobus);
+
+ tsnep_disable_irq(adapter, ECM_INT_ALL);
+
+ return 0;
+}
+
+static const struct of_device_id tsnep_of_match[] = {
+ { .compatible = "engleder,tsnep", },
+{ },
+};
+MODULE_DEVICE_TABLE(of, tsnep_of_match);
+
+static struct platform_driver tsnep_driver = {
+ .driver = {
+ .name = TSNEP,
+ .of_match_table = tsnep_of_match,
+ },
+ .probe = tsnep_probe,
+ .remove = tsnep_remove,
+};
+module_platform_driver(tsnep_driver);
+
+MODULE_AUTHOR("Gerhard Engleder <gerhard@engleder-embedded.com>");
+MODULE_DESCRIPTION("TSN endpoint Ethernet MAC driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/engleder/tsnep_ptp.c b/drivers/net/ethernet/engleder/tsnep_ptp.c
new file mode 100644
index 0000000000..54fbf01268
--- /dev/null
+++ b/drivers/net/ethernet/engleder/tsnep_ptp.c
@@ -0,0 +1,246 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2021 Gerhard Engleder <gerhard@engleder-embedded.com> */
+
+#include "tsnep.h"
+
+void tsnep_get_system_time(struct tsnep_adapter *adapter, u64 *time)
+{
+ u32 high_before;
+ u32 low;
+ u32 high;
+
+ /* read high dword twice to detect overrun */
+ high = ioread32(adapter->addr + ECM_SYSTEM_TIME_HIGH);
+ do {
+ low = ioread32(adapter->addr + ECM_SYSTEM_TIME_LOW);
+ high_before = high;
+ high = ioread32(adapter->addr + ECM_SYSTEM_TIME_HIGH);
+ } while (high != high_before);
+ *time = (((u64)high) << 32) | ((u64)low);
+}
+
+int tsnep_ptp_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
+{
+ struct tsnep_adapter *adapter = netdev_priv(netdev);
+ struct hwtstamp_config config;
+
+ if (!ifr)
+ return -EINVAL;
+
+ if (cmd == SIOCSHWTSTAMP) {
+ if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
+ return -EFAULT;
+
+ switch (config.tx_type) {
+ case HWTSTAMP_TX_OFF:
+ case HWTSTAMP_TX_ON:
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ switch (config.rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ break;
+ case HWTSTAMP_FILTER_ALL:
+ case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ case HWTSTAMP_FILTER_NTP_ALL:
+ config.rx_filter = HWTSTAMP_FILTER_ALL;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ memcpy(&adapter->hwtstamp_config, &config,
+ sizeof(adapter->hwtstamp_config));
+ }
+
+ if (copy_to_user(ifr->ifr_data, &adapter->hwtstamp_config,
+ sizeof(adapter->hwtstamp_config)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int tsnep_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
+{
+ struct tsnep_adapter *adapter = container_of(ptp, struct tsnep_adapter,
+ ptp_clock_info);
+ bool negative = false;
+ u64 rate_offset;
+
+ if (scaled_ppm < 0) {
+ scaled_ppm = -scaled_ppm;
+ negative = true;
+ }
+
+ /* convert from 16 bit to 32 bit binary fractional, divide by 1000000 to
+ * eliminate ppm, multiply with 8 to compensate 8ns clock cycle time,
+ * simplify calculation because 15625 * 8 = 1000000 / 8
+ */
+ rate_offset = scaled_ppm;
+ rate_offset <<= 16 - 3;
+ rate_offset = div_u64(rate_offset, 15625);
+
+ rate_offset &= ECM_CLOCK_RATE_OFFSET_MASK;
+ if (negative)
+ rate_offset |= ECM_CLOCK_RATE_OFFSET_SIGN;
+ iowrite32(rate_offset & 0xFFFFFFFF, adapter->addr + ECM_CLOCK_RATE);
+
+ return 0;
+}
+
+static int tsnep_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
+{
+ struct tsnep_adapter *adapter = container_of(ptp, struct tsnep_adapter,
+ ptp_clock_info);
+ u64 system_time;
+ unsigned long flags;
+
+ spin_lock_irqsave(&adapter->ptp_lock, flags);
+
+ tsnep_get_system_time(adapter, &system_time);
+
+ system_time += delta;
+
+ /* high dword is buffered in hardware and synchronously written to
+ * system time when low dword is written
+ */
+ iowrite32(system_time >> 32, adapter->addr + ECM_SYSTEM_TIME_HIGH);
+ iowrite32(system_time & 0xFFFFFFFF,
+ adapter->addr + ECM_SYSTEM_TIME_LOW);
+
+ spin_unlock_irqrestore(&adapter->ptp_lock, flags);
+
+ return 0;
+}
+
+static int tsnep_ptp_gettimex64(struct ptp_clock_info *ptp,
+ struct timespec64 *ts,
+ struct ptp_system_timestamp *sts)
+{
+ struct tsnep_adapter *adapter = container_of(ptp, struct tsnep_adapter,
+ ptp_clock_info);
+ u32 high_before;
+ u32 low;
+ u32 high;
+ u64 system_time;
+
+ /* read high dword twice to detect overrun */
+ high = ioread32(adapter->addr + ECM_SYSTEM_TIME_HIGH);
+ do {
+ ptp_read_system_prets(sts);
+ low = ioread32(adapter->addr + ECM_SYSTEM_TIME_LOW);
+ ptp_read_system_postts(sts);
+ high_before = high;
+ high = ioread32(adapter->addr + ECM_SYSTEM_TIME_HIGH);
+ } while (high != high_before);
+ system_time = (((u64)high) << 32) | ((u64)low);
+
+ *ts = ns_to_timespec64(system_time);
+
+ return 0;
+}
+
+static int tsnep_ptp_settime64(struct ptp_clock_info *ptp,
+ const struct timespec64 *ts)
+{
+ struct tsnep_adapter *adapter = container_of(ptp, struct tsnep_adapter,
+ ptp_clock_info);
+ u64 system_time = timespec64_to_ns(ts);
+ unsigned long flags;
+
+ spin_lock_irqsave(&adapter->ptp_lock, flags);
+
+ /* high dword is buffered in hardware and synchronously written to
+ * system time when low dword is written
+ */
+ iowrite32(system_time >> 32, adapter->addr + ECM_SYSTEM_TIME_HIGH);
+ iowrite32(system_time & 0xFFFFFFFF,
+ adapter->addr + ECM_SYSTEM_TIME_LOW);
+
+ spin_unlock_irqrestore(&adapter->ptp_lock, flags);
+
+ return 0;
+}
+
+static int tsnep_ptp_getcyclesx64(struct ptp_clock_info *ptp,
+ struct timespec64 *ts,
+ struct ptp_system_timestamp *sts)
+{
+ struct tsnep_adapter *adapter = container_of(ptp, struct tsnep_adapter,
+ ptp_clock_info);
+ u32 high_before;
+ u32 low;
+ u32 high;
+ u64 counter;
+
+ /* read high dword twice to detect overrun */
+ high = ioread32(adapter->addr + ECM_COUNTER_HIGH);
+ do {
+ ptp_read_system_prets(sts);
+ low = ioread32(adapter->addr + ECM_COUNTER_LOW);
+ ptp_read_system_postts(sts);
+ high_before = high;
+ high = ioread32(adapter->addr + ECM_COUNTER_HIGH);
+ } while (high != high_before);
+ counter = (((u64)high) << 32) | ((u64)low);
+
+ *ts = ns_to_timespec64(counter);
+
+ return 0;
+}
+
+int tsnep_ptp_init(struct tsnep_adapter *adapter)
+{
+ int retval = 0;
+
+ adapter->hwtstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;
+ adapter->hwtstamp_config.tx_type = HWTSTAMP_TX_OFF;
+
+ snprintf(adapter->ptp_clock_info.name, 16, "%s", TSNEP);
+ adapter->ptp_clock_info.owner = THIS_MODULE;
+ /* at most 2^-1ns adjustment every clock cycle for 8ns clock cycle time,
+ * stay slightly below because only bits below 2^-1ns are supported
+ */
+ adapter->ptp_clock_info.max_adj = (500000000 / 8 - 1);
+ adapter->ptp_clock_info.adjfine = tsnep_ptp_adjfine;
+ adapter->ptp_clock_info.adjtime = tsnep_ptp_adjtime;
+ adapter->ptp_clock_info.gettimex64 = tsnep_ptp_gettimex64;
+ adapter->ptp_clock_info.settime64 = tsnep_ptp_settime64;
+ adapter->ptp_clock_info.getcyclesx64 = tsnep_ptp_getcyclesx64;
+
+ spin_lock_init(&adapter->ptp_lock);
+
+ adapter->ptp_clock = ptp_clock_register(&adapter->ptp_clock_info,
+ &adapter->pdev->dev);
+ if (IS_ERR(adapter->ptp_clock)) {
+ netdev_err(adapter->netdev, "ptp_clock_register failed\n");
+
+ retval = PTR_ERR(adapter->ptp_clock);
+ adapter->ptp_clock = NULL;
+ } else if (adapter->ptp_clock) {
+ netdev_info(adapter->netdev, "PHC added\n");
+ }
+
+ return retval;
+}
+
+void tsnep_ptp_cleanup(struct tsnep_adapter *adapter)
+{
+ if (adapter->ptp_clock) {
+ ptp_clock_unregister(adapter->ptp_clock);
+ netdev_info(adapter->netdev, "PHC removed\n");
+ }
+}
diff --git a/drivers/net/ethernet/engleder/tsnep_rxnfc.c b/drivers/net/ethernet/engleder/tsnep_rxnfc.c
new file mode 100644
index 0000000000..9ac2a0cf38
--- /dev/null
+++ b/drivers/net/ethernet/engleder/tsnep_rxnfc.c
@@ -0,0 +1,307 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2022 Gerhard Engleder <gerhard@engleder-embedded.com> */
+
+#include "tsnep.h"
+
+#define ETHER_TYPE_FULL_MASK ((__force __be16)~0)
+
+static void tsnep_enable_rule(struct tsnep_adapter *adapter,
+ struct tsnep_rxnfc_rule *rule)
+{
+ u8 rx_assign;
+ void __iomem *addr;
+
+ rx_assign = TSNEP_RX_ASSIGN_ACTIVE;
+ rx_assign |= (rule->queue_index << TSNEP_RX_ASSIGN_QUEUE_SHIFT) &
+ TSNEP_RX_ASSIGN_QUEUE_MASK;
+
+ addr = adapter->addr + TSNEP_RX_ASSIGN_ETHER_TYPE +
+ TSNEP_RX_ASSIGN_ETHER_TYPE_OFFSET * rule->location;
+ iowrite16(rule->filter.ether_type, addr);
+
+ /* enable rule after all settings are done */
+ addr = adapter->addr + TSNEP_RX_ASSIGN +
+ TSNEP_RX_ASSIGN_OFFSET * rule->location;
+ iowrite8(rx_assign, addr);
+}
+
+static void tsnep_disable_rule(struct tsnep_adapter *adapter,
+ struct tsnep_rxnfc_rule *rule)
+{
+ void __iomem *addr;
+
+ addr = adapter->addr + TSNEP_RX_ASSIGN +
+ TSNEP_RX_ASSIGN_OFFSET * rule->location;
+ iowrite8(0, addr);
+}
+
+static struct tsnep_rxnfc_rule *tsnep_get_rule(struct tsnep_adapter *adapter,
+ int location)
+{
+ struct tsnep_rxnfc_rule *rule;
+
+ list_for_each_entry(rule, &adapter->rxnfc_rules, list) {
+ if (rule->location == location)
+ return rule;
+ if (rule->location > location)
+ break;
+ }
+
+ return NULL;
+}
+
+static void tsnep_add_rule(struct tsnep_adapter *adapter,
+ struct tsnep_rxnfc_rule *rule)
+{
+ struct tsnep_rxnfc_rule *pred, *cur;
+
+ tsnep_enable_rule(adapter, rule);
+
+ pred = NULL;
+ list_for_each_entry(cur, &adapter->rxnfc_rules, list) {
+ if (cur->location >= rule->location)
+ break;
+ pred = cur;
+ }
+
+ list_add(&rule->list, pred ? &pred->list : &adapter->rxnfc_rules);
+ adapter->rxnfc_count++;
+}
+
+static void tsnep_delete_rule(struct tsnep_adapter *adapter,
+ struct tsnep_rxnfc_rule *rule)
+{
+ tsnep_disable_rule(adapter, rule);
+
+ list_del(&rule->list);
+ adapter->rxnfc_count--;
+
+ kfree(rule);
+}
+
+static void tsnep_flush_rules(struct tsnep_adapter *adapter)
+{
+ struct tsnep_rxnfc_rule *rule, *tmp;
+
+ mutex_lock(&adapter->rxnfc_lock);
+
+ list_for_each_entry_safe(rule, tmp, &adapter->rxnfc_rules, list)
+ tsnep_delete_rule(adapter, rule);
+
+ mutex_unlock(&adapter->rxnfc_lock);
+}
+
+int tsnep_rxnfc_get_rule(struct tsnep_adapter *adapter,
+ struct ethtool_rxnfc *cmd)
+{
+ struct ethtool_rx_flow_spec *fsp = &cmd->fs;
+ struct tsnep_rxnfc_rule *rule = NULL;
+
+ cmd->data = adapter->rxnfc_max;
+
+ mutex_lock(&adapter->rxnfc_lock);
+
+ rule = tsnep_get_rule(adapter, fsp->location);
+ if (!rule) {
+ mutex_unlock(&adapter->rxnfc_lock);
+
+ return -ENOENT;
+ }
+
+ fsp->flow_type = ETHER_FLOW;
+ fsp->ring_cookie = rule->queue_index;
+
+ if (rule->filter.type == TSNEP_RXNFC_ETHER_TYPE) {
+ fsp->h_u.ether_spec.h_proto = htons(rule->filter.ether_type);
+ fsp->m_u.ether_spec.h_proto = ETHER_TYPE_FULL_MASK;
+ }
+
+ mutex_unlock(&adapter->rxnfc_lock);
+
+ return 0;
+}
+
+int tsnep_rxnfc_get_all(struct tsnep_adapter *adapter,
+ struct ethtool_rxnfc *cmd,
+ u32 *rule_locs)
+{
+ struct tsnep_rxnfc_rule *rule;
+ int count = 0;
+
+ cmd->data = adapter->rxnfc_max;
+
+ mutex_lock(&adapter->rxnfc_lock);
+
+ list_for_each_entry(rule, &adapter->rxnfc_rules, list) {
+ if (count == cmd->rule_cnt) {
+ mutex_unlock(&adapter->rxnfc_lock);
+
+ return -EMSGSIZE;
+ }
+
+ rule_locs[count] = rule->location;
+ count++;
+ }
+
+ mutex_unlock(&adapter->rxnfc_lock);
+
+ cmd->rule_cnt = count;
+
+ return 0;
+}
+
+static int tsnep_rxnfc_find_location(struct tsnep_adapter *adapter)
+{
+ struct tsnep_rxnfc_rule *tmp;
+ int location = 0;
+
+ list_for_each_entry(tmp, &adapter->rxnfc_rules, list) {
+ if (tmp->location == location)
+ location++;
+ else
+ return location;
+ }
+
+ if (location >= adapter->rxnfc_max)
+ return -ENOSPC;
+
+ return location;
+}
+
+static void tsnep_rxnfc_init_rule(struct tsnep_rxnfc_rule *rule,
+ const struct ethtool_rx_flow_spec *fsp)
+{
+ INIT_LIST_HEAD(&rule->list);
+
+ rule->queue_index = fsp->ring_cookie;
+ rule->location = fsp->location;
+
+ rule->filter.type = TSNEP_RXNFC_ETHER_TYPE;
+ rule->filter.ether_type = ntohs(fsp->h_u.ether_spec.h_proto);
+}
+
+static int tsnep_rxnfc_check_rule(struct tsnep_adapter *adapter,
+ struct tsnep_rxnfc_rule *rule)
+{
+ struct net_device *dev = adapter->netdev;
+ struct tsnep_rxnfc_rule *tmp;
+
+ list_for_each_entry(tmp, &adapter->rxnfc_rules, list) {
+ if (!memcmp(&rule->filter, &tmp->filter, sizeof(rule->filter)) &&
+ tmp->location != rule->location) {
+ netdev_dbg(dev, "rule already exists\n");
+
+ return -EEXIST;
+ }
+ }
+
+ return 0;
+}
+
+int tsnep_rxnfc_add_rule(struct tsnep_adapter *adapter,
+ struct ethtool_rxnfc *cmd)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct ethtool_rx_flow_spec *fsp =
+ (struct ethtool_rx_flow_spec *)&cmd->fs;
+ struct tsnep_rxnfc_rule *rule, *old_rule;
+ int retval;
+
+ /* only EtherType is supported */
+ if (fsp->flow_type != ETHER_FLOW ||
+ !is_zero_ether_addr(fsp->m_u.ether_spec.h_dest) ||
+ !is_zero_ether_addr(fsp->m_u.ether_spec.h_source) ||
+ fsp->m_u.ether_spec.h_proto != ETHER_TYPE_FULL_MASK) {
+ netdev_dbg(netdev, "only ethernet protocol is supported\n");
+
+ return -EOPNOTSUPP;
+ }
+
+ if (fsp->ring_cookie >
+ (TSNEP_RX_ASSIGN_QUEUE_MASK >> TSNEP_RX_ASSIGN_QUEUE_SHIFT)) {
+ netdev_dbg(netdev, "invalid action\n");
+
+ return -EINVAL;
+ }
+
+ if (fsp->location != RX_CLS_LOC_ANY &&
+ fsp->location >= adapter->rxnfc_max) {
+ netdev_dbg(netdev, "invalid location\n");
+
+ return -EINVAL;
+ }
+
+ rule = kzalloc(sizeof(*rule), GFP_KERNEL);
+ if (!rule)
+ return -ENOMEM;
+
+ mutex_lock(&adapter->rxnfc_lock);
+
+ if (fsp->location == RX_CLS_LOC_ANY) {
+ retval = tsnep_rxnfc_find_location(adapter);
+ if (retval < 0)
+ goto failed;
+ fsp->location = retval;
+ }
+
+ tsnep_rxnfc_init_rule(rule, fsp);
+
+ retval = tsnep_rxnfc_check_rule(adapter, rule);
+ if (retval)
+ goto failed;
+
+ old_rule = tsnep_get_rule(adapter, fsp->location);
+ if (old_rule)
+ tsnep_delete_rule(adapter, old_rule);
+
+ tsnep_add_rule(adapter, rule);
+
+ mutex_unlock(&adapter->rxnfc_lock);
+
+ return 0;
+
+failed:
+ mutex_unlock(&adapter->rxnfc_lock);
+ kfree(rule);
+ return retval;
+}
+
+int tsnep_rxnfc_del_rule(struct tsnep_adapter *adapter,
+ struct ethtool_rxnfc *cmd)
+{
+ struct ethtool_rx_flow_spec *fsp =
+ (struct ethtool_rx_flow_spec *)&cmd->fs;
+ struct tsnep_rxnfc_rule *rule;
+
+ mutex_lock(&adapter->rxnfc_lock);
+
+ rule = tsnep_get_rule(adapter, fsp->location);
+ if (!rule) {
+ mutex_unlock(&adapter->rxnfc_lock);
+
+ return -ENOENT;
+ }
+
+ tsnep_delete_rule(adapter, rule);
+
+ mutex_unlock(&adapter->rxnfc_lock);
+
+ return 0;
+}
+
+int tsnep_rxnfc_init(struct tsnep_adapter *adapter)
+{
+ int i;
+
+ /* disable all rules */
+ for (i = 0; i < adapter->rxnfc_max;
+ i += sizeof(u32) / TSNEP_RX_ASSIGN_OFFSET)
+ iowrite32(0, adapter->addr + TSNEP_RX_ASSIGN + i);
+
+ return 0;
+}
+
+void tsnep_rxnfc_cleanup(struct tsnep_adapter *adapter)
+{
+ tsnep_flush_rules(adapter);
+}
diff --git a/drivers/net/ethernet/engleder/tsnep_selftests.c b/drivers/net/ethernet/engleder/tsnep_selftests.c
new file mode 100644
index 0000000000..8a9145f931
--- /dev/null
+++ b/drivers/net/ethernet/engleder/tsnep_selftests.c
@@ -0,0 +1,811 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2021 Gerhard Engleder <gerhard@engleder-embedded.com> */
+
+#include "tsnep.h"
+
+#include <net/pkt_sched.h>
+
+enum tsnep_test {
+ TSNEP_TEST_ENABLE = 0,
+ TSNEP_TEST_TAPRIO,
+ TSNEP_TEST_TAPRIO_CHANGE,
+ TSNEP_TEST_TAPRIO_EXTENSION,
+};
+
+static const char tsnep_test_strings[][ETH_GSTRING_LEN] = {
+ "Enable timeout (offline)",
+ "TAPRIO (offline)",
+ "TAPRIO change (offline)",
+ "TAPRIO extension (offline)",
+};
+
+#define TSNEP_TEST_COUNT (sizeof(tsnep_test_strings) / ETH_GSTRING_LEN)
+
+static bool enable_gc_timeout(struct tsnep_adapter *adapter)
+{
+ iowrite8(TSNEP_GC_ENABLE_TIMEOUT, adapter->addr + TSNEP_GC);
+ if (!(ioread32(adapter->addr + TSNEP_GC) & TSNEP_GC_TIMEOUT_ACTIVE))
+ return false;
+
+ return true;
+}
+
+static bool gc_timeout_signaled(struct tsnep_adapter *adapter)
+{
+ if (ioread32(adapter->addr + TSNEP_GC) & TSNEP_GC_TIMEOUT_SIGNAL)
+ return true;
+
+ return false;
+}
+
+static bool ack_gc_timeout(struct tsnep_adapter *adapter)
+{
+ iowrite8(TSNEP_GC_ENABLE_TIMEOUT, adapter->addr + TSNEP_GC);
+ if (ioread32(adapter->addr + TSNEP_GC) &
+ (TSNEP_GC_TIMEOUT_ACTIVE | TSNEP_GC_TIMEOUT_SIGNAL))
+ return false;
+ return true;
+}
+
+static bool enable_gc(struct tsnep_adapter *adapter, bool a)
+{
+ u8 enable;
+ u8 active;
+
+ if (a) {
+ enable = TSNEP_GC_ENABLE_A;
+ active = TSNEP_GC_ACTIVE_A;
+ } else {
+ enable = TSNEP_GC_ENABLE_B;
+ active = TSNEP_GC_ACTIVE_B;
+ }
+
+ iowrite8(enable, adapter->addr + TSNEP_GC);
+ if (!(ioread32(adapter->addr + TSNEP_GC) & active))
+ return false;
+
+ return true;
+}
+
+static bool disable_gc(struct tsnep_adapter *adapter)
+{
+ iowrite8(TSNEP_GC_DISABLE, adapter->addr + TSNEP_GC);
+ if (ioread32(adapter->addr + TSNEP_GC) &
+ (TSNEP_GC_ACTIVE_A | TSNEP_GC_ACTIVE_B))
+ return false;
+
+ return true;
+}
+
+static bool gc_delayed_enable(struct tsnep_adapter *adapter, bool a, int delay)
+{
+ u64 before, after;
+ u32 time;
+ bool enabled;
+
+ if (!disable_gc(adapter))
+ return false;
+
+ before = ktime_get_ns();
+
+ if (!enable_gc_timeout(adapter))
+ return false;
+
+ /* for start time after timeout, the timeout can guarantee, that enable
+ * is blocked if too late
+ */
+ time = ioread32(adapter->addr + ECM_SYSTEM_TIME_LOW);
+ time += TSNEP_GC_TIMEOUT;
+ iowrite32(time, adapter->addr + TSNEP_GC_TIME);
+
+ ndelay(delay);
+
+ enabled = enable_gc(adapter, a);
+ after = ktime_get_ns();
+
+ if (delay > TSNEP_GC_TIMEOUT) {
+ /* timeout must have blocked enable */
+ if (enabled)
+ return false;
+ } else if ((after - before) < TSNEP_GC_TIMEOUT * 14 / 16) {
+ /* timeout must not have blocked enable */
+ if (!enabled)
+ return false;
+ }
+
+ if (enabled) {
+ if (gc_timeout_signaled(adapter))
+ return false;
+ } else {
+ if (!gc_timeout_signaled(adapter))
+ return false;
+ if (!ack_gc_timeout(adapter))
+ return false;
+ }
+
+ if (!disable_gc(adapter))
+ return false;
+
+ return true;
+}
+
+static bool tsnep_test_gc_enable(struct tsnep_adapter *adapter)
+{
+ int i;
+
+ iowrite32(0x80000001, adapter->addr + TSNEP_GCL_A + 0);
+ iowrite32(100000, adapter->addr + TSNEP_GCL_A + 4);
+
+ for (i = 0; i < 200000; i += 100) {
+ if (!gc_delayed_enable(adapter, true, i))
+ return false;
+ }
+
+ iowrite32(0x80000001, adapter->addr + TSNEP_GCL_B + 0);
+ iowrite32(100000, adapter->addr + TSNEP_GCL_B + 4);
+
+ for (i = 0; i < 200000; i += 100) {
+ if (!gc_delayed_enable(adapter, false, i))
+ return false;
+ }
+
+ return true;
+}
+
+static void delay_base_time(struct tsnep_adapter *adapter,
+ struct tc_taprio_qopt_offload *qopt, s64 ms)
+{
+ u64 system_time;
+ u64 base_time = ktime_to_ns(qopt->base_time);
+ u64 n;
+
+ tsnep_get_system_time(adapter, &system_time);
+ system_time += ms * 1000000;
+ n = div64_u64(system_time - base_time, qopt->cycle_time);
+
+ qopt->base_time = ktime_add_ns(qopt->base_time,
+ (n + 1) * qopt->cycle_time);
+}
+
+static void get_gate_state(struct tsnep_adapter *adapter, u32 *gc, u32 *gc_time,
+ u64 *system_time)
+{
+ u32 time_high_before;
+ u32 time_low;
+ u32 time_high;
+ u32 gc_time_before;
+
+ time_high = ioread32(adapter->addr + ECM_SYSTEM_TIME_HIGH);
+ *gc_time = ioread32(adapter->addr + TSNEP_GC_TIME);
+ do {
+ time_low = ioread32(adapter->addr + ECM_SYSTEM_TIME_LOW);
+ *gc = ioread32(adapter->addr + TSNEP_GC);
+
+ gc_time_before = *gc_time;
+ *gc_time = ioread32(adapter->addr + TSNEP_GC_TIME);
+ time_high_before = time_high;
+ time_high = ioread32(adapter->addr + ECM_SYSTEM_TIME_HIGH);
+ } while ((time_high != time_high_before) ||
+ (*gc_time != gc_time_before));
+
+ *system_time = (((u64)time_high) << 32) | ((u64)time_low);
+}
+
+static int get_operation(struct tsnep_gcl *gcl, u64 system_time, u64 *next)
+{
+ u64 n = div64_u64(system_time - gcl->base_time, gcl->cycle_time);
+ u64 cycle_start = gcl->base_time + gcl->cycle_time * n;
+ int i;
+
+ *next = cycle_start;
+ for (i = 0; i < gcl->count; i++) {
+ *next += gcl->operation[i].interval;
+ if (*next > system_time)
+ break;
+ }
+
+ return i;
+}
+
+static bool check_gate(struct tsnep_adapter *adapter)
+{
+ u32 gc_time;
+ u32 gc;
+ u64 system_time;
+ struct tsnep_gcl *curr;
+ struct tsnep_gcl *prev;
+ u64 next_time;
+ u8 gate_open;
+ u8 next_gate_open;
+
+ get_gate_state(adapter, &gc, &gc_time, &system_time);
+
+ if (gc & TSNEP_GC_ACTIVE_A) {
+ curr = &adapter->gcl[0];
+ prev = &adapter->gcl[1];
+ } else if (gc & TSNEP_GC_ACTIVE_B) {
+ curr = &adapter->gcl[1];
+ prev = &adapter->gcl[0];
+ } else {
+ return false;
+ }
+ if (curr->start_time <= system_time) {
+ /* GCL is already active */
+ int index;
+
+ index = get_operation(curr, system_time, &next_time);
+ gate_open = curr->operation[index].properties & TSNEP_GCL_MASK;
+ if (index == curr->count - 1)
+ index = 0;
+ else
+ index++;
+ next_gate_open =
+ curr->operation[index].properties & TSNEP_GCL_MASK;
+ } else if (curr->change) {
+ /* operation of previous GCL is active */
+ int index;
+ u64 start_before;
+ u64 n;
+
+ index = get_operation(prev, system_time, &next_time);
+ next_time = curr->start_time;
+ start_before = prev->base_time;
+ n = div64_u64(curr->start_time - start_before,
+ prev->cycle_time);
+ start_before += n * prev->cycle_time;
+ if (curr->start_time == start_before)
+ start_before -= prev->cycle_time;
+ if (((start_before + prev->cycle_time_extension) >=
+ curr->start_time) &&
+ (curr->start_time - prev->cycle_time_extension <=
+ system_time)) {
+ /* extend */
+ index = prev->count - 1;
+ }
+ gate_open = prev->operation[index].properties & TSNEP_GCL_MASK;
+ next_gate_open =
+ curr->operation[0].properties & TSNEP_GCL_MASK;
+ } else {
+ /* GCL is waiting for start */
+ next_time = curr->start_time;
+ gate_open = 0xFF;
+ next_gate_open = curr->operation[0].properties & TSNEP_GCL_MASK;
+ }
+
+ if (gc_time != (next_time & 0xFFFFFFFF)) {
+ dev_err(&adapter->pdev->dev, "gate control time 0x%x!=0x%llx\n",
+ gc_time, next_time);
+ return false;
+ }
+ if (((gc & TSNEP_GC_OPEN) >> TSNEP_GC_OPEN_SHIFT) != gate_open) {
+ dev_err(&adapter->pdev->dev,
+ "gate control open 0x%02x!=0x%02x\n",
+ ((gc & TSNEP_GC_OPEN) >> TSNEP_GC_OPEN_SHIFT),
+ gate_open);
+ return false;
+ }
+ if (((gc & TSNEP_GC_NEXT_OPEN) >> TSNEP_GC_NEXT_OPEN_SHIFT) !=
+ next_gate_open) {
+ dev_err(&adapter->pdev->dev,
+ "gate control next open 0x%02x!=0x%02x\n",
+ ((gc & TSNEP_GC_NEXT_OPEN) >> TSNEP_GC_NEXT_OPEN_SHIFT),
+ next_gate_open);
+ return false;
+ }
+
+ return true;
+}
+
+static bool check_gate_duration(struct tsnep_adapter *adapter, s64 ms)
+{
+ ktime_t start = ktime_get();
+
+ do {
+ if (!check_gate(adapter))
+ return false;
+ } while (ktime_ms_delta(ktime_get(), start) < ms);
+
+ return true;
+}
+
+static bool enable_check_taprio(struct tsnep_adapter *adapter,
+ struct tc_taprio_qopt_offload *qopt, s64 ms)
+{
+ int retval;
+
+ retval = tsnep_tc_setup(adapter->netdev, TC_SETUP_QDISC_TAPRIO, qopt);
+ if (retval)
+ return false;
+
+ if (!check_gate_duration(adapter, ms))
+ return false;
+
+ return true;
+}
+
+static bool disable_taprio(struct tsnep_adapter *adapter)
+{
+ struct tc_taprio_qopt_offload qopt;
+ int retval;
+
+ memset(&qopt, 0, sizeof(qopt));
+ qopt.cmd = TAPRIO_CMD_DESTROY;
+ retval = tsnep_tc_setup(adapter->netdev, TC_SETUP_QDISC_TAPRIO, &qopt);
+ if (retval)
+ return false;
+
+ return true;
+}
+
+static bool run_taprio(struct tsnep_adapter *adapter,
+ struct tc_taprio_qopt_offload *qopt, s64 ms)
+{
+ if (!enable_check_taprio(adapter, qopt, ms))
+ return false;
+
+ if (!disable_taprio(adapter))
+ return false;
+
+ return true;
+}
+
+static bool tsnep_test_taprio(struct tsnep_adapter *adapter)
+{
+ struct tc_taprio_qopt_offload *qopt;
+ int i;
+
+ qopt = kzalloc(struct_size(qopt, entries, 255), GFP_KERNEL);
+ if (!qopt)
+ return false;
+ for (i = 0; i < 255; i++)
+ qopt->entries[i].command = TC_TAPRIO_CMD_SET_GATES;
+
+ qopt->cmd = TAPRIO_CMD_REPLACE;
+ qopt->base_time = ktime_set(0, 0);
+ qopt->cycle_time = 1500000;
+ qopt->cycle_time_extension = 0;
+ qopt->entries[0].gate_mask = 0x02;
+ qopt->entries[0].interval = 200000;
+ qopt->entries[1].gate_mask = 0x03;
+ qopt->entries[1].interval = 800000;
+ qopt->entries[2].gate_mask = 0x07;
+ qopt->entries[2].interval = 240000;
+ qopt->entries[3].gate_mask = 0x01;
+ qopt->entries[3].interval = 80000;
+ qopt->entries[4].gate_mask = 0x04;
+ qopt->entries[4].interval = 70000;
+ qopt->entries[5].gate_mask = 0x06;
+ qopt->entries[5].interval = 60000;
+ qopt->entries[6].gate_mask = 0x0F;
+ qopt->entries[6].interval = 50000;
+ qopt->num_entries = 7;
+ if (!run_taprio(adapter, qopt, 100))
+ goto failed;
+
+ qopt->cmd = TAPRIO_CMD_REPLACE;
+ qopt->base_time = ktime_set(0, 0);
+ qopt->cycle_time = 411854;
+ qopt->cycle_time_extension = 0;
+ qopt->entries[0].gate_mask = 0x17;
+ qopt->entries[0].interval = 23842;
+ qopt->entries[1].gate_mask = 0x16;
+ qopt->entries[1].interval = 13482;
+ qopt->entries[2].gate_mask = 0x15;
+ qopt->entries[2].interval = 49428;
+ qopt->entries[3].gate_mask = 0x14;
+ qopt->entries[3].interval = 38189;
+ qopt->entries[4].gate_mask = 0x13;
+ qopt->entries[4].interval = 92321;
+ qopt->entries[5].gate_mask = 0x12;
+ qopt->entries[5].interval = 71239;
+ qopt->entries[6].gate_mask = 0x11;
+ qopt->entries[6].interval = 69932;
+ qopt->entries[7].gate_mask = 0x10;
+ qopt->entries[7].interval = 53421;
+ qopt->num_entries = 8;
+ if (!run_taprio(adapter, qopt, 100))
+ goto failed;
+
+ qopt->cmd = TAPRIO_CMD_REPLACE;
+ qopt->base_time = ktime_set(0, 0);
+ delay_base_time(adapter, qopt, 12);
+ qopt->cycle_time = 125000;
+ qopt->cycle_time_extension = 0;
+ qopt->entries[0].gate_mask = 0x27;
+ qopt->entries[0].interval = 15000;
+ qopt->entries[1].gate_mask = 0x26;
+ qopt->entries[1].interval = 15000;
+ qopt->entries[2].gate_mask = 0x25;
+ qopt->entries[2].interval = 12500;
+ qopt->entries[3].gate_mask = 0x24;
+ qopt->entries[3].interval = 17500;
+ qopt->entries[4].gate_mask = 0x23;
+ qopt->entries[4].interval = 10000;
+ qopt->entries[5].gate_mask = 0x22;
+ qopt->entries[5].interval = 11000;
+ qopt->entries[6].gate_mask = 0x21;
+ qopt->entries[6].interval = 9000;
+ qopt->entries[7].gate_mask = 0x20;
+ qopt->entries[7].interval = 10000;
+ qopt->entries[8].gate_mask = 0x20;
+ qopt->entries[8].interval = 12500;
+ qopt->entries[9].gate_mask = 0x20;
+ qopt->entries[9].interval = 12500;
+ qopt->num_entries = 10;
+ if (!run_taprio(adapter, qopt, 100))
+ goto failed;
+
+ kfree(qopt);
+
+ return true;
+
+failed:
+ disable_taprio(adapter);
+ kfree(qopt);
+
+ return false;
+}
+
+static bool tsnep_test_taprio_change(struct tsnep_adapter *adapter)
+{
+ struct tc_taprio_qopt_offload *qopt;
+ int i;
+
+ qopt = kzalloc(struct_size(qopt, entries, 255), GFP_KERNEL);
+ if (!qopt)
+ return false;
+ for (i = 0; i < 255; i++)
+ qopt->entries[i].command = TC_TAPRIO_CMD_SET_GATES;
+
+ qopt->cmd = TAPRIO_CMD_REPLACE;
+ qopt->base_time = ktime_set(0, 0);
+ qopt->cycle_time = 100000;
+ qopt->cycle_time_extension = 0;
+ qopt->entries[0].gate_mask = 0x30;
+ qopt->entries[0].interval = 20000;
+ qopt->entries[1].gate_mask = 0x31;
+ qopt->entries[1].interval = 80000;
+ qopt->num_entries = 2;
+ if (!enable_check_taprio(adapter, qopt, 100))
+ goto failed;
+
+ /* change to identical */
+ if (!enable_check_taprio(adapter, qopt, 100))
+ goto failed;
+ delay_base_time(adapter, qopt, 17);
+ if (!enable_check_taprio(adapter, qopt, 100))
+ goto failed;
+
+ /* change to same cycle time */
+ qopt->base_time = ktime_set(0, 0);
+ qopt->entries[0].gate_mask = 0x42;
+ qopt->entries[1].gate_mask = 0x43;
+ delay_base_time(adapter, qopt, 2);
+ if (!enable_check_taprio(adapter, qopt, 100))
+ goto failed;
+ qopt->base_time = ktime_set(0, 0);
+ qopt->entries[0].gate_mask = 0x54;
+ qopt->entries[0].interval = 33333;
+ qopt->entries[1].gate_mask = 0x55;
+ qopt->entries[1].interval = 66667;
+ delay_base_time(adapter, qopt, 23);
+ if (!enable_check_taprio(adapter, qopt, 100))
+ goto failed;
+ qopt->base_time = ktime_set(0, 0);
+ qopt->entries[0].gate_mask = 0x66;
+ qopt->entries[0].interval = 50000;
+ qopt->entries[1].gate_mask = 0x67;
+ qopt->entries[1].interval = 25000;
+ qopt->entries[2].gate_mask = 0x68;
+ qopt->entries[2].interval = 25000;
+ qopt->num_entries = 3;
+ delay_base_time(adapter, qopt, 11);
+ if (!enable_check_taprio(adapter, qopt, 100))
+ goto failed;
+
+ /* change to multiple of cycle time */
+ qopt->base_time = ktime_set(0, 0);
+ qopt->cycle_time = 200000;
+ qopt->entries[0].gate_mask = 0x79;
+ qopt->entries[0].interval = 50000;
+ qopt->entries[1].gate_mask = 0x7A;
+ qopt->entries[1].interval = 150000;
+ qopt->num_entries = 2;
+ delay_base_time(adapter, qopt, 11);
+ if (!enable_check_taprio(adapter, qopt, 100))
+ goto failed;
+ qopt->base_time = ktime_set(0, 0);
+ qopt->cycle_time = 1000000;
+ qopt->entries[0].gate_mask = 0x7B;
+ qopt->entries[0].interval = 125000;
+ qopt->entries[1].gate_mask = 0x7C;
+ qopt->entries[1].interval = 250000;
+ qopt->entries[2].gate_mask = 0x7D;
+ qopt->entries[2].interval = 375000;
+ qopt->entries[3].gate_mask = 0x7E;
+ qopt->entries[3].interval = 250000;
+ qopt->num_entries = 4;
+ delay_base_time(adapter, qopt, 3);
+ if (!enable_check_taprio(adapter, qopt, 100))
+ goto failed;
+
+ /* change to shorter cycle time */
+ qopt->base_time = ktime_set(0, 0);
+ qopt->cycle_time = 333333;
+ qopt->entries[0].gate_mask = 0x8F;
+ qopt->entries[0].interval = 166666;
+ qopt->entries[1].gate_mask = 0x80;
+ qopt->entries[1].interval = 166667;
+ qopt->num_entries = 2;
+ delay_base_time(adapter, qopt, 11);
+ if (!enable_check_taprio(adapter, qopt, 100))
+ goto failed;
+ qopt->base_time = ktime_set(0, 0);
+ qopt->cycle_time = 62500;
+ qopt->entries[0].gate_mask = 0x81;
+ qopt->entries[0].interval = 31250;
+ qopt->entries[1].gate_mask = 0x82;
+ qopt->entries[1].interval = 15625;
+ qopt->entries[2].gate_mask = 0x83;
+ qopt->entries[2].interval = 15625;
+ qopt->num_entries = 3;
+ delay_base_time(adapter, qopt, 1);
+ if (!enable_check_taprio(adapter, qopt, 100))
+ goto failed;
+
+ /* change to longer cycle time */
+ qopt->base_time = ktime_set(0, 0);
+ qopt->cycle_time = 400000;
+ qopt->entries[0].gate_mask = 0x84;
+ qopt->entries[0].interval = 100000;
+ qopt->entries[1].gate_mask = 0x85;
+ qopt->entries[1].interval = 100000;
+ qopt->entries[2].gate_mask = 0x86;
+ qopt->entries[2].interval = 100000;
+ qopt->entries[3].gate_mask = 0x87;
+ qopt->entries[3].interval = 100000;
+ qopt->num_entries = 4;
+ delay_base_time(adapter, qopt, 7);
+ if (!enable_check_taprio(adapter, qopt, 100))
+ goto failed;
+ qopt->base_time = ktime_set(0, 0);
+ qopt->cycle_time = 1700000;
+ qopt->entries[0].gate_mask = 0x88;
+ qopt->entries[0].interval = 200000;
+ qopt->entries[1].gate_mask = 0x89;
+ qopt->entries[1].interval = 300000;
+ qopt->entries[2].gate_mask = 0x8A;
+ qopt->entries[2].interval = 600000;
+ qopt->entries[3].gate_mask = 0x8B;
+ qopt->entries[3].interval = 100000;
+ qopt->entries[4].gate_mask = 0x8C;
+ qopt->entries[4].interval = 500000;
+ qopt->num_entries = 5;
+ delay_base_time(adapter, qopt, 6);
+ if (!enable_check_taprio(adapter, qopt, 100))
+ goto failed;
+
+ if (!disable_taprio(adapter))
+ goto failed;
+
+ kfree(qopt);
+
+ return true;
+
+failed:
+ disable_taprio(adapter);
+ kfree(qopt);
+
+ return false;
+}
+
+static bool tsnep_test_taprio_extension(struct tsnep_adapter *adapter)
+{
+ struct tc_taprio_qopt_offload *qopt;
+ int i;
+
+ qopt = kzalloc(struct_size(qopt, entries, 255), GFP_KERNEL);
+ if (!qopt)
+ return false;
+ for (i = 0; i < 255; i++)
+ qopt->entries[i].command = TC_TAPRIO_CMD_SET_GATES;
+
+ qopt->cmd = TAPRIO_CMD_REPLACE;
+ qopt->base_time = ktime_set(0, 0);
+ qopt->cycle_time = 100000;
+ qopt->cycle_time_extension = 50000;
+ qopt->entries[0].gate_mask = 0x90;
+ qopt->entries[0].interval = 20000;
+ qopt->entries[1].gate_mask = 0x91;
+ qopt->entries[1].interval = 80000;
+ qopt->num_entries = 2;
+ if (!enable_check_taprio(adapter, qopt, 100))
+ goto failed;
+
+ /* change to different phase */
+ qopt->base_time = ktime_set(0, 50000);
+ qopt->entries[0].gate_mask = 0x92;
+ qopt->entries[0].interval = 33000;
+ qopt->entries[1].gate_mask = 0x93;
+ qopt->entries[1].interval = 67000;
+ qopt->num_entries = 2;
+ delay_base_time(adapter, qopt, 2);
+ if (!enable_check_taprio(adapter, qopt, 100))
+ goto failed;
+
+ /* change to different phase and longer cycle time */
+ qopt->base_time = ktime_set(0, 0);
+ qopt->cycle_time = 1000000;
+ qopt->cycle_time_extension = 700000;
+ qopt->entries[0].gate_mask = 0x94;
+ qopt->entries[0].interval = 400000;
+ qopt->entries[1].gate_mask = 0x95;
+ qopt->entries[1].interval = 600000;
+ qopt->num_entries = 2;
+ delay_base_time(adapter, qopt, 7);
+ if (!enable_check_taprio(adapter, qopt, 100))
+ goto failed;
+ qopt->base_time = ktime_set(0, 700000);
+ qopt->cycle_time = 2000000;
+ qopt->cycle_time_extension = 1900000;
+ qopt->entries[0].gate_mask = 0x96;
+ qopt->entries[0].interval = 400000;
+ qopt->entries[1].gate_mask = 0x97;
+ qopt->entries[1].interval = 1600000;
+ qopt->num_entries = 2;
+ delay_base_time(adapter, qopt, 9);
+ if (!enable_check_taprio(adapter, qopt, 100))
+ goto failed;
+
+ /* change to different phase and shorter cycle time */
+ qopt->base_time = ktime_set(0, 0);
+ qopt->cycle_time = 1500000;
+ qopt->cycle_time_extension = 700000;
+ qopt->entries[0].gate_mask = 0x98;
+ qopt->entries[0].interval = 400000;
+ qopt->entries[1].gate_mask = 0x99;
+ qopt->entries[1].interval = 600000;
+ qopt->entries[2].gate_mask = 0x9A;
+ qopt->entries[2].interval = 500000;
+ qopt->num_entries = 3;
+ delay_base_time(adapter, qopt, 3);
+ if (!enable_check_taprio(adapter, qopt, 100))
+ goto failed;
+ qopt->base_time = ktime_set(0, 100000);
+ qopt->cycle_time = 500000;
+ qopt->cycle_time_extension = 300000;
+ qopt->entries[0].gate_mask = 0x9B;
+ qopt->entries[0].interval = 150000;
+ qopt->entries[1].gate_mask = 0x9C;
+ qopt->entries[1].interval = 350000;
+ qopt->num_entries = 2;
+ delay_base_time(adapter, qopt, 9);
+ if (!enable_check_taprio(adapter, qopt, 100))
+ goto failed;
+
+ /* change to different cycle time */
+ qopt->base_time = ktime_set(0, 0);
+ qopt->cycle_time = 1000000;
+ qopt->cycle_time_extension = 700000;
+ qopt->entries[0].gate_mask = 0xAD;
+ qopt->entries[0].interval = 400000;
+ qopt->entries[1].gate_mask = 0xAE;
+ qopt->entries[1].interval = 300000;
+ qopt->entries[2].gate_mask = 0xAF;
+ qopt->entries[2].interval = 300000;
+ qopt->num_entries = 3;
+ if (!enable_check_taprio(adapter, qopt, 100))
+ goto failed;
+ qopt->base_time = ktime_set(0, 0);
+ qopt->cycle_time = 400000;
+ qopt->cycle_time_extension = 100000;
+ qopt->entries[0].gate_mask = 0xA0;
+ qopt->entries[0].interval = 200000;
+ qopt->entries[1].gate_mask = 0xA1;
+ qopt->entries[1].interval = 200000;
+ qopt->num_entries = 2;
+ delay_base_time(adapter, qopt, 19);
+ if (!enable_check_taprio(adapter, qopt, 100))
+ goto failed;
+ qopt->base_time = ktime_set(0, 0);
+ qopt->cycle_time = 500000;
+ qopt->cycle_time_extension = 499999;
+ qopt->entries[0].gate_mask = 0xB2;
+ qopt->entries[0].interval = 100000;
+ qopt->entries[1].gate_mask = 0xB3;
+ qopt->entries[1].interval = 100000;
+ qopt->entries[2].gate_mask = 0xB4;
+ qopt->entries[2].interval = 100000;
+ qopt->entries[3].gate_mask = 0xB5;
+ qopt->entries[3].interval = 200000;
+ qopt->num_entries = 4;
+ delay_base_time(adapter, qopt, 19);
+ if (!enable_check_taprio(adapter, qopt, 100))
+ goto failed;
+ qopt->base_time = ktime_set(0, 0);
+ qopt->cycle_time = 6000000;
+ qopt->cycle_time_extension = 5999999;
+ qopt->entries[0].gate_mask = 0xC6;
+ qopt->entries[0].interval = 1000000;
+ qopt->entries[1].gate_mask = 0xC7;
+ qopt->entries[1].interval = 1000000;
+ qopt->entries[2].gate_mask = 0xC8;
+ qopt->entries[2].interval = 1000000;
+ qopt->entries[3].gate_mask = 0xC9;
+ qopt->entries[3].interval = 1500000;
+ qopt->entries[4].gate_mask = 0xCA;
+ qopt->entries[4].interval = 1500000;
+ qopt->num_entries = 5;
+ delay_base_time(adapter, qopt, 1);
+ if (!enable_check_taprio(adapter, qopt, 100))
+ goto failed;
+
+ if (!disable_taprio(adapter))
+ goto failed;
+
+ kfree(qopt);
+
+ return true;
+
+failed:
+ disable_taprio(adapter);
+ kfree(qopt);
+
+ return false;
+}
+
+int tsnep_ethtool_get_test_count(void)
+{
+ return TSNEP_TEST_COUNT;
+}
+
+void tsnep_ethtool_get_test_strings(u8 *data)
+{
+ memcpy(data, tsnep_test_strings, sizeof(tsnep_test_strings));
+}
+
+void tsnep_ethtool_self_test(struct net_device *netdev,
+ struct ethtool_test *eth_test, u64 *data)
+{
+ struct tsnep_adapter *adapter = netdev_priv(netdev);
+
+ eth_test->len = TSNEP_TEST_COUNT;
+
+ if (eth_test->flags != ETH_TEST_FL_OFFLINE) {
+ /* no tests are done online */
+ data[TSNEP_TEST_ENABLE] = 0;
+ data[TSNEP_TEST_TAPRIO] = 0;
+ data[TSNEP_TEST_TAPRIO_CHANGE] = 0;
+ data[TSNEP_TEST_TAPRIO_EXTENSION] = 0;
+
+ return;
+ }
+
+ if (tsnep_test_gc_enable(adapter)) {
+ data[TSNEP_TEST_ENABLE] = 0;
+ } else {
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+ data[TSNEP_TEST_ENABLE] = 1;
+ }
+
+ if (tsnep_test_taprio(adapter)) {
+ data[TSNEP_TEST_TAPRIO] = 0;
+ } else {
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+ data[TSNEP_TEST_TAPRIO] = 1;
+ }
+
+ if (tsnep_test_taprio_change(adapter)) {
+ data[TSNEP_TEST_TAPRIO_CHANGE] = 0;
+ } else {
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+ data[TSNEP_TEST_TAPRIO_CHANGE] = 1;
+ }
+
+ if (tsnep_test_taprio_extension(adapter)) {
+ data[TSNEP_TEST_TAPRIO_EXTENSION] = 0;
+ } else {
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+ data[TSNEP_TEST_TAPRIO_EXTENSION] = 1;
+ }
+}
diff --git a/drivers/net/ethernet/engleder/tsnep_tc.c b/drivers/net/ethernet/engleder/tsnep_tc.c
new file mode 100644
index 0000000000..745b191a55
--- /dev/null
+++ b/drivers/net/ethernet/engleder/tsnep_tc.c
@@ -0,0 +1,466 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2021 Gerhard Engleder <gerhard@engleder-embedded.com> */
+
+#include "tsnep.h"
+
+#include <net/pkt_sched.h>
+
+/* save one operation at the end for additional operation at list change */
+#define TSNEP_MAX_GCL_NUM (TSNEP_GCL_COUNT - 1)
+
+static int tsnep_validate_gcl(struct tc_taprio_qopt_offload *qopt)
+{
+ int i;
+ u64 cycle_time;
+
+ if (!qopt->cycle_time)
+ return -ERANGE;
+ if (qopt->num_entries > TSNEP_MAX_GCL_NUM)
+ return -EINVAL;
+ cycle_time = 0;
+ for (i = 0; i < qopt->num_entries; i++) {
+ if (qopt->entries[i].command != TC_TAPRIO_CMD_SET_GATES)
+ return -EINVAL;
+ if (qopt->entries[i].gate_mask & ~TSNEP_GCL_MASK)
+ return -EINVAL;
+ if (qopt->entries[i].interval < TSNEP_GCL_MIN_INTERVAL)
+ return -EINVAL;
+ cycle_time += qopt->entries[i].interval;
+ }
+ if (qopt->cycle_time != cycle_time)
+ return -EINVAL;
+ if (qopt->cycle_time_extension >= qopt->cycle_time)
+ return -EINVAL;
+
+ return 0;
+}
+
+static void tsnep_write_gcl_operation(struct tsnep_gcl *gcl, int index,
+ u32 properties, u32 interval, bool flush)
+{
+ void __iomem *addr = gcl->addr +
+ sizeof(struct tsnep_gcl_operation) * index;
+
+ gcl->operation[index].properties = properties;
+ gcl->operation[index].interval = interval;
+
+ iowrite32(properties, addr);
+ iowrite32(interval, addr + sizeof(u32));
+
+ if (flush) {
+ /* flush write with read access */
+ ioread32(addr);
+ }
+}
+
+static u64 tsnep_change_duration(struct tsnep_gcl *gcl, int index)
+{
+ u64 duration;
+ int count;
+
+ /* change needs to be triggered one or two operations before start of
+ * new gate control list
+ * - change is triggered at start of operation (minimum one operation)
+ * - operation with adjusted interval is inserted on demand to exactly
+ * meet the start of the new gate control list (optional)
+ *
+ * additionally properties are read directly after start of previous
+ * operation
+ *
+ * therefore, three operations needs to be considered for the limit
+ */
+ duration = 0;
+ count = 3;
+ while (count) {
+ duration += gcl->operation[index].interval;
+
+ index--;
+ if (index < 0)
+ index = gcl->count - 1;
+
+ count--;
+ }
+
+ return duration;
+}
+
+static void tsnep_write_gcl(struct tsnep_gcl *gcl,
+ struct tc_taprio_qopt_offload *qopt)
+{
+ int i;
+ u32 properties;
+ u64 extend;
+ u64 cut;
+
+ gcl->base_time = ktime_to_ns(qopt->base_time);
+ gcl->cycle_time = qopt->cycle_time;
+ gcl->cycle_time_extension = qopt->cycle_time_extension;
+
+ for (i = 0; i < qopt->num_entries; i++) {
+ properties = qopt->entries[i].gate_mask;
+ if (i == (qopt->num_entries - 1))
+ properties |= TSNEP_GCL_LAST;
+
+ tsnep_write_gcl_operation(gcl, i, properties,
+ qopt->entries[i].interval, true);
+ }
+ gcl->count = qopt->num_entries;
+
+ /* calculate change limit; i.e., the time needed between enable and
+ * start of new gate control list
+ */
+
+ /* case 1: extend cycle time for change
+ * - change duration of last operation
+ * - cycle time extension
+ */
+ extend = tsnep_change_duration(gcl, gcl->count - 1);
+ extend += gcl->cycle_time_extension;
+
+ /* case 2: cut cycle time for change
+ * - maximum change duration
+ */
+ cut = 0;
+ for (i = 0; i < gcl->count; i++)
+ cut = max(cut, tsnep_change_duration(gcl, i));
+
+ /* use maximum, because the actual case (extend or cut) can be
+ * determined only after limit is known (chicken-and-egg problem)
+ */
+ gcl->change_limit = max(extend, cut);
+}
+
+static u64 tsnep_gcl_start_after(struct tsnep_gcl *gcl, u64 limit)
+{
+ u64 start = gcl->base_time;
+ u64 n;
+
+ if (start <= limit) {
+ n = div64_u64(limit - start, gcl->cycle_time);
+ start += (n + 1) * gcl->cycle_time;
+ }
+
+ return start;
+}
+
+static u64 tsnep_gcl_start_before(struct tsnep_gcl *gcl, u64 limit)
+{
+ u64 start = gcl->base_time;
+ u64 n;
+
+ n = div64_u64(limit - start, gcl->cycle_time);
+ start += n * gcl->cycle_time;
+ if (start == limit)
+ start -= gcl->cycle_time;
+
+ return start;
+}
+
+static u64 tsnep_set_gcl_change(struct tsnep_gcl *gcl, int index, u64 change,
+ bool insert)
+{
+ /* previous operation triggers change and properties are evaluated at
+ * start of operation
+ */
+ if (index == 0)
+ index = gcl->count - 1;
+ else
+ index = index - 1;
+ change -= gcl->operation[index].interval;
+
+ /* optionally change to new list with additional operation in between */
+ if (insert) {
+ void __iomem *addr = gcl->addr +
+ sizeof(struct tsnep_gcl_operation) * index;
+
+ gcl->operation[index].properties |= TSNEP_GCL_INSERT;
+ iowrite32(gcl->operation[index].properties, addr);
+ }
+
+ return change;
+}
+
+static void tsnep_clean_gcl(struct tsnep_gcl *gcl)
+{
+ int i;
+ u32 mask = TSNEP_GCL_LAST | TSNEP_GCL_MASK;
+ void __iomem *addr;
+
+ /* search for insert operation and reset properties */
+ for (i = 0; i < gcl->count; i++) {
+ if (gcl->operation[i].properties & ~mask) {
+ addr = gcl->addr +
+ sizeof(struct tsnep_gcl_operation) * i;
+
+ gcl->operation[i].properties &= mask;
+ iowrite32(gcl->operation[i].properties, addr);
+
+ break;
+ }
+ }
+}
+
+static u64 tsnep_insert_gcl_operation(struct tsnep_gcl *gcl, int ref,
+ u64 change, u32 interval)
+{
+ u32 properties;
+
+ properties = gcl->operation[ref].properties & TSNEP_GCL_MASK;
+ /* change to new list directly after inserted operation */
+ properties |= TSNEP_GCL_CHANGE;
+
+ /* last operation of list is reserved to insert operation */
+ tsnep_write_gcl_operation(gcl, TSNEP_GCL_COUNT - 1, properties,
+ interval, false);
+
+ return tsnep_set_gcl_change(gcl, ref, change, true);
+}
+
+static u64 tsnep_extend_gcl(struct tsnep_gcl *gcl, u64 start, u32 extension)
+{
+ int ref = gcl->count - 1;
+ u32 interval = gcl->operation[ref].interval + extension;
+
+ start -= gcl->operation[ref].interval;
+
+ return tsnep_insert_gcl_operation(gcl, ref, start, interval);
+}
+
+static u64 tsnep_cut_gcl(struct tsnep_gcl *gcl, u64 start, u64 cycle_time)
+{
+ u64 sum = 0;
+ int i;
+
+ /* find operation which shall be cutted */
+ for (i = 0; i < gcl->count; i++) {
+ u64 sum_tmp = sum + gcl->operation[i].interval;
+ u64 interval;
+
+ /* sum up operations as long as cycle time is not exceeded */
+ if (sum_tmp > cycle_time)
+ break;
+
+ /* remaining interval must be big enough for hardware */
+ interval = cycle_time - sum_tmp;
+ if (interval > 0 && interval < TSNEP_GCL_MIN_INTERVAL)
+ break;
+
+ sum = sum_tmp;
+ }
+ if (sum == cycle_time) {
+ /* no need to cut operation itself or whole cycle
+ * => change exactly at operation
+ */
+ return tsnep_set_gcl_change(gcl, i, start + sum, false);
+ }
+ return tsnep_insert_gcl_operation(gcl, i, start + sum,
+ cycle_time - sum);
+}
+
+static int tsnep_enable_gcl(struct tsnep_adapter *adapter,
+ struct tsnep_gcl *gcl, struct tsnep_gcl *curr)
+{
+ u64 system_time;
+ u64 timeout;
+ u64 limit;
+
+ /* estimate timeout limit after timeout enable, actually timeout limit
+ * in hardware will be earlier than estimate so we are on the safe side
+ */
+ tsnep_get_system_time(adapter, &system_time);
+ timeout = system_time + TSNEP_GC_TIMEOUT;
+
+ if (curr)
+ limit = timeout + curr->change_limit;
+ else
+ limit = timeout;
+
+ gcl->start_time = tsnep_gcl_start_after(gcl, limit);
+
+ /* gate control time register is only 32bit => time shall be in the near
+ * future (no driver support for far future implemented)
+ */
+ if ((gcl->start_time - system_time) >= U32_MAX)
+ return -EAGAIN;
+
+ if (curr) {
+ /* change gate control list */
+ u64 last;
+ u64 change;
+
+ last = tsnep_gcl_start_before(curr, gcl->start_time);
+ if ((last + curr->cycle_time) == gcl->start_time)
+ change = tsnep_cut_gcl(curr, last,
+ gcl->start_time - last);
+ else if (((gcl->start_time - last) <=
+ curr->cycle_time_extension) ||
+ ((gcl->start_time - last) <= TSNEP_GCL_MIN_INTERVAL))
+ change = tsnep_extend_gcl(curr, last,
+ gcl->start_time - last);
+ else
+ change = tsnep_cut_gcl(curr, last,
+ gcl->start_time - last);
+
+ WARN_ON(change <= timeout);
+ gcl->change = true;
+ iowrite32(change & 0xFFFFFFFF, adapter->addr + TSNEP_GC_CHANGE);
+ } else {
+ /* start gate control list */
+ WARN_ON(gcl->start_time <= timeout);
+ gcl->change = false;
+ iowrite32(gcl->start_time & 0xFFFFFFFF,
+ adapter->addr + TSNEP_GC_TIME);
+ }
+
+ return 0;
+}
+
+static int tsnep_taprio(struct tsnep_adapter *adapter,
+ struct tc_taprio_qopt_offload *qopt)
+{
+ struct tsnep_gcl *gcl;
+ struct tsnep_gcl *curr;
+ int retval;
+
+ if (!adapter->gate_control)
+ return -EOPNOTSUPP;
+
+ if (qopt->cmd == TAPRIO_CMD_DESTROY) {
+ /* disable gate control if active */
+ mutex_lock(&adapter->gate_control_lock);
+
+ if (adapter->gate_control_active) {
+ iowrite8(TSNEP_GC_DISABLE, adapter->addr + TSNEP_GC);
+ adapter->gate_control_active = false;
+ }
+
+ mutex_unlock(&adapter->gate_control_lock);
+
+ return 0;
+ } else if (qopt->cmd != TAPRIO_CMD_REPLACE) {
+ return -EOPNOTSUPP;
+ }
+
+ retval = tsnep_validate_gcl(qopt);
+ if (retval)
+ return retval;
+
+ mutex_lock(&adapter->gate_control_lock);
+
+ gcl = &adapter->gcl[adapter->next_gcl];
+ tsnep_write_gcl(gcl, qopt);
+
+ /* select current gate control list if active */
+ if (adapter->gate_control_active) {
+ if (adapter->next_gcl == 0)
+ curr = &adapter->gcl[1];
+ else
+ curr = &adapter->gcl[0];
+ } else {
+ curr = NULL;
+ }
+
+ for (;;) {
+ /* start timeout which discards late enable, this helps ensuring
+ * that start/change time are in the future at enable
+ */
+ iowrite8(TSNEP_GC_ENABLE_TIMEOUT, adapter->addr + TSNEP_GC);
+
+ retval = tsnep_enable_gcl(adapter, gcl, curr);
+ if (retval) {
+ mutex_unlock(&adapter->gate_control_lock);
+
+ return retval;
+ }
+
+ /* enable gate control list */
+ if (adapter->next_gcl == 0)
+ iowrite8(TSNEP_GC_ENABLE_A, adapter->addr + TSNEP_GC);
+ else
+ iowrite8(TSNEP_GC_ENABLE_B, adapter->addr + TSNEP_GC);
+
+ /* done if timeout did not happen */
+ if (!(ioread32(adapter->addr + TSNEP_GC) &
+ TSNEP_GC_TIMEOUT_SIGNAL))
+ break;
+
+ /* timeout is acknowledged with any enable */
+ iowrite8(TSNEP_GC_ENABLE_A, adapter->addr + TSNEP_GC);
+
+ if (curr)
+ tsnep_clean_gcl(curr);
+
+ /* retry because of timeout */
+ }
+
+ adapter->gate_control_active = true;
+
+ if (adapter->next_gcl == 0)
+ adapter->next_gcl = 1;
+ else
+ adapter->next_gcl = 0;
+
+ mutex_unlock(&adapter->gate_control_lock);
+
+ return 0;
+}
+
+static int tsnep_tc_query_caps(struct tsnep_adapter *adapter,
+ struct tc_query_caps_base *base)
+{
+ switch (base->type) {
+ case TC_SETUP_QDISC_TAPRIO: {
+ struct tc_taprio_caps *caps = base->caps;
+
+ if (!adapter->gate_control)
+ return -EOPNOTSUPP;
+
+ caps->gate_mask_per_txq = true;
+
+ return 0;
+ }
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+int tsnep_tc_setup(struct net_device *netdev, enum tc_setup_type type,
+ void *type_data)
+{
+ struct tsnep_adapter *adapter = netdev_priv(netdev);
+
+ switch (type) {
+ case TC_QUERY_CAPS:
+ return tsnep_tc_query_caps(adapter, type_data);
+ case TC_SETUP_QDISC_TAPRIO:
+ return tsnep_taprio(adapter, type_data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+int tsnep_tc_init(struct tsnep_adapter *adapter)
+{
+ if (!adapter->gate_control)
+ return 0;
+
+ /* open all gates */
+ iowrite8(TSNEP_GC_DISABLE, adapter->addr + TSNEP_GC);
+ iowrite32(TSNEP_GC_OPEN | TSNEP_GC_NEXT_OPEN, adapter->addr + TSNEP_GC);
+
+ adapter->gcl[0].addr = adapter->addr + TSNEP_GCL_A;
+ adapter->gcl[1].addr = adapter->addr + TSNEP_GCL_B;
+
+ return 0;
+}
+
+void tsnep_tc_cleanup(struct tsnep_adapter *adapter)
+{
+ if (!adapter->gate_control)
+ return;
+
+ if (adapter->gate_control_active) {
+ iowrite8(TSNEP_GC_DISABLE, adapter->addr + TSNEP_GC);
+ adapter->gate_control_active = false;
+ }
+}
diff --git a/drivers/net/ethernet/engleder/tsnep_xdp.c b/drivers/net/ethernet/engleder/tsnep_xdp.c
new file mode 100644
index 0000000000..c0513848c5
--- /dev/null
+++ b/drivers/net/ethernet/engleder/tsnep_xdp.c
@@ -0,0 +1,85 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2022 Gerhard Engleder <gerhard@engleder-embedded.com> */
+
+#include <linux/if_vlan.h>
+#include <net/xdp_sock_drv.h>
+
+#include "tsnep.h"
+
+int tsnep_xdp_setup_prog(struct tsnep_adapter *adapter, struct bpf_prog *prog,
+ struct netlink_ext_ack *extack)
+{
+ struct bpf_prog *old_prog;
+
+ old_prog = xchg(&adapter->xdp_prog, prog);
+ if (old_prog)
+ bpf_prog_put(old_prog);
+
+ return 0;
+}
+
+static int tsnep_xdp_enable_pool(struct tsnep_adapter *adapter,
+ struct xsk_buff_pool *pool, u16 queue_id)
+{
+ struct tsnep_queue *queue;
+ int retval;
+
+ if (queue_id >= adapter->num_rx_queues ||
+ queue_id >= adapter->num_tx_queues)
+ return -EINVAL;
+
+ queue = &adapter->queue[queue_id];
+ if (queue->rx->queue_index != queue_id ||
+ queue->tx->queue_index != queue_id) {
+ netdev_err(adapter->netdev,
+ "XSK support only for TX/RX queue pairs\n");
+
+ return -EOPNOTSUPP;
+ }
+
+ retval = xsk_pool_dma_map(pool, adapter->dmadev,
+ DMA_ATTR_SKIP_CPU_SYNC);
+ if (retval) {
+ netdev_err(adapter->netdev, "failed to map XSK pool\n");
+
+ return retval;
+ }
+
+ retval = tsnep_enable_xsk(queue, pool);
+ if (retval) {
+ xsk_pool_dma_unmap(pool, DMA_ATTR_SKIP_CPU_SYNC);
+
+ return retval;
+ }
+
+ return 0;
+}
+
+static int tsnep_xdp_disable_pool(struct tsnep_adapter *adapter, u16 queue_id)
+{
+ struct xsk_buff_pool *pool;
+ struct tsnep_queue *queue;
+
+ if (queue_id >= adapter->num_rx_queues ||
+ queue_id >= adapter->num_tx_queues)
+ return -EINVAL;
+
+ pool = xsk_get_pool_from_qid(adapter->netdev, queue_id);
+ if (!pool)
+ return -EINVAL;
+
+ queue = &adapter->queue[queue_id];
+
+ tsnep_disable_xsk(queue);
+
+ xsk_pool_dma_unmap(pool, DMA_ATTR_SKIP_CPU_SYNC);
+
+ return 0;
+}
+
+int tsnep_xdp_setup_pool(struct tsnep_adapter *adapter,
+ struct xsk_buff_pool *pool, u16 queue_id)
+{
+ return pool ? tsnep_xdp_enable_pool(adapter, pool, queue_id) :
+ tsnep_xdp_disable_pool(adapter, queue_id);
+}