diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-27 10:05:51 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-27 10:05:51 +0000 |
commit | 5d1646d90e1f2cceb9f0828f4b28318cd0ec7744 (patch) | |
tree | a94efe259b9009378be6d90eb30d2b019d95c194 /drivers/net/ethernet/freescale/enetc | |
parent | Initial commit. (diff) | |
download | linux-5d1646d90e1f2cceb9f0828f4b28318cd0ec7744.tar.xz linux-5d1646d90e1f2cceb9f0828f4b28318cd0ec7744.zip |
Adding upstream version 5.10.209.upstream/5.10.209
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/net/ethernet/freescale/enetc')
-rw-r--r-- | drivers/net/ethernet/freescale/enetc/Kconfig | 57 | ||||
-rw-r--r-- | drivers/net/ethernet/freescale/enetc/Makefile | 18 | ||||
-rw-r--r-- | drivers/net/ethernet/freescale/enetc/enetc.c | 2006 | ||||
-rw-r--r-- | drivers/net/ethernet/freescale/enetc/enetc.h | 420 | ||||
-rw-r--r-- | drivers/net/ethernet/freescale/enetc/enetc_cbdr.c | 213 | ||||
-rw-r--r-- | drivers/net/ethernet/freescale/enetc/enetc_ethtool.c | 773 | ||||
-rw-r--r-- | drivers/net/ethernet/freescale/enetc/enetc_hw.h | 947 | ||||
-rw-r--r-- | drivers/net/ethernet/freescale/enetc/enetc_mdio.c | 180 | ||||
-rw-r--r-- | drivers/net/ethernet/freescale/enetc/enetc_msg.c | 164 | ||||
-rw-r--r-- | drivers/net/ethernet/freescale/enetc/enetc_pci_mdio.c | 110 | ||||
-rw-r--r-- | drivers/net/ethernet/freescale/enetc/enetc_pf.c | 1278 | ||||
-rw-r--r-- | drivers/net/ethernet/freescale/enetc/enetc_pf.h | 60 | ||||
-rw-r--r-- | drivers/net/ethernet/freescale/enetc/enetc_ptp.c | 152 | ||||
-rw-r--r-- | drivers/net/ethernet/freescale/enetc/enetc_qos.c | 1608 | ||||
-rw-r--r-- | drivers/net/ethernet/freescale/enetc/enetc_vf.c | 248 |
15 files changed, 8234 insertions, 0 deletions
diff --git a/drivers/net/ethernet/freescale/enetc/Kconfig b/drivers/net/ethernet/freescale/enetc/Kconfig new file mode 100644 index 000000000..d99ea0f4e --- /dev/null +++ b/drivers/net/ethernet/freescale/enetc/Kconfig @@ -0,0 +1,57 @@ +# SPDX-License-Identifier: GPL-2.0 +config FSL_ENETC + tristate "ENETC PF driver" + depends on PCI && PCI_MSI + select FSL_ENETC_MDIO + select PHYLINK + select PCS_LYNX + select DIMLIB + help + This driver supports NXP ENETC gigabit ethernet controller PCIe + physical function (PF) devices, managing ENETC Ports at a privileged + level. + + If compiled as module (M), the module name is fsl-enetc. + +config FSL_ENETC_VF + tristate "ENETC VF driver" + depends on PCI && PCI_MSI + select FSL_ENETC_MDIO + select PHYLINK + select DIMLIB + help + This driver supports NXP ENETC gigabit ethernet controller PCIe + virtual function (VF) devices enabled by the ENETC PF driver. + + If compiled as module (M), the module name is fsl-enetc-vf. + +config FSL_ENETC_MDIO + tristate "ENETC MDIO driver" + depends on PCI + help + This driver supports NXP ENETC Central MDIO controller as a PCIe + physical function (PF) device. + + If compiled as module (M), the module name is fsl-enetc-mdio. + +config FSL_ENETC_PTP_CLOCK + tristate "ENETC PTP clock driver" + depends on PTP_1588_CLOCK_QORIQ && (FSL_ENETC || FSL_ENETC_VF) + default y + help + This driver adds support for using the ENETC 1588 timer + as a PTP clock. This clock is only useful if your PTP + programs are getting hardware time stamps on the PTP Ethernet + packets using the SO_TIMESTAMPING API. + + If compiled as module (M), the module name is fsl-enetc-ptp. + +config FSL_ENETC_QOS + bool "ENETC hardware Time-sensitive Network support" + depends on (FSL_ENETC || FSL_ENETC_VF) && (NET_SCH_TAPRIO || NET_SCH_CBS) + help + There are Time-Sensitive Network(TSN) capabilities(802.1Qbv/802.1Qci + /802.1Qbu etc.) supported by ENETC. These TSN capabilities can be set + enable/disable from user space via Qos commands(tc). In the kernel + side, it can be loaded by Qos driver. Currently, it is only support + taprio(802.1Qbv) and Credit Based Shaper(802.1Qbu). diff --git a/drivers/net/ethernet/freescale/enetc/Makefile b/drivers/net/ethernet/freescale/enetc/Makefile new file mode 100644 index 000000000..74f7ac253 --- /dev/null +++ b/drivers/net/ethernet/freescale/enetc/Makefile @@ -0,0 +1,18 @@ +# SPDX-License-Identifier: GPL-2.0 + +common-objs := enetc.o enetc_cbdr.o enetc_ethtool.o + +obj-$(CONFIG_FSL_ENETC) += fsl-enetc.o +fsl-enetc-y := enetc_pf.o $(common-objs) +fsl-enetc-$(CONFIG_PCI_IOV) += enetc_msg.o +fsl-enetc-$(CONFIG_FSL_ENETC_QOS) += enetc_qos.o + +obj-$(CONFIG_FSL_ENETC_VF) += fsl-enetc-vf.o +fsl-enetc-vf-y := enetc_vf.o $(common-objs) +fsl-enetc-vf-$(CONFIG_FSL_ENETC_QOS) += enetc_qos.o + +obj-$(CONFIG_FSL_ENETC_MDIO) += fsl-enetc-mdio.o +fsl-enetc-mdio-y := enetc_pci_mdio.o enetc_mdio.o + +obj-$(CONFIG_FSL_ENETC_PTP_CLOCK) += fsl-enetc-ptp.o +fsl-enetc-ptp-y := enetc_ptp.o diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c new file mode 100644 index 000000000..5f9603d4c --- /dev/null +++ b/drivers/net/ethernet/freescale/enetc/enetc.c @@ -0,0 +1,2006 @@ +// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) +/* Copyright 2017-2019 NXP */ + +#include "enetc.h" +#include <linux/tcp.h> +#include <linux/udp.h> +#include <linux/vmalloc.h> +#include <net/pkt_sched.h> + +/* ENETC overhead: optional extension BD + 1 BD gap */ +#define ENETC_TXBDS_NEEDED(val) ((val) + 2) +/* max # of chained Tx BDs is 15, including head and extension BD */ +#define ENETC_MAX_SKB_FRAGS 13 +#define ENETC_TXBDS_MAX_NEEDED ENETC_TXBDS_NEEDED(ENETC_MAX_SKB_FRAGS + 1) + +static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb, + int active_offloads); + +netdev_tx_t enetc_xmit(struct sk_buff *skb, struct net_device *ndev) +{ + struct enetc_ndev_priv *priv = netdev_priv(ndev); + struct enetc_bdr *tx_ring; + int count; + + tx_ring = priv->tx_ring[skb->queue_mapping]; + + if (unlikely(skb_shinfo(skb)->nr_frags > ENETC_MAX_SKB_FRAGS)) + if (unlikely(skb_linearize(skb))) + goto drop_packet_err; + + count = skb_shinfo(skb)->nr_frags + 1; /* fragments + head */ + if (enetc_bd_unused(tx_ring) < ENETC_TXBDS_NEEDED(count)) { + netif_stop_subqueue(ndev, tx_ring->index); + return NETDEV_TX_BUSY; + } + + enetc_lock_mdio(); + count = enetc_map_tx_buffs(tx_ring, skb, priv->active_offloads); + enetc_unlock_mdio(); + + if (unlikely(!count)) + goto drop_packet_err; + + if (enetc_bd_unused(tx_ring) < ENETC_TXBDS_MAX_NEEDED) + netif_stop_subqueue(ndev, tx_ring->index); + + return NETDEV_TX_OK; + +drop_packet_err: + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; +} + +static bool enetc_tx_csum(struct sk_buff *skb, union enetc_tx_bd *txbd) +{ + int l3_start, l3_hsize; + u16 l3_flags, l4_flags; + + if (skb->ip_summed != CHECKSUM_PARTIAL) + return false; + + switch (skb->csum_offset) { + case offsetof(struct tcphdr, check): + l4_flags = ENETC_TXBD_L4_TCP; + break; + case offsetof(struct udphdr, check): + l4_flags = ENETC_TXBD_L4_UDP; + break; + default: + skb_checksum_help(skb); + return false; + } + + l3_start = skb_network_offset(skb); + l3_hsize = skb_network_header_len(skb); + + l3_flags = 0; + if (skb->protocol == htons(ETH_P_IPV6)) + l3_flags = ENETC_TXBD_L3_IPV6; + + /* write BD fields */ + txbd->l3_csoff = enetc_txbd_l3_csoff(l3_start, l3_hsize, l3_flags); + txbd->l4_csoff = l4_flags; + + return true; +} + +static void enetc_unmap_tx_buff(struct enetc_bdr *tx_ring, + struct enetc_tx_swbd *tx_swbd) +{ + if (tx_swbd->is_dma_page) + dma_unmap_page(tx_ring->dev, tx_swbd->dma, + tx_swbd->len, DMA_TO_DEVICE); + else + dma_unmap_single(tx_ring->dev, tx_swbd->dma, + tx_swbd->len, DMA_TO_DEVICE); + tx_swbd->dma = 0; +} + +static void enetc_free_tx_skb(struct enetc_bdr *tx_ring, + struct enetc_tx_swbd *tx_swbd) +{ + if (tx_swbd->dma) + enetc_unmap_tx_buff(tx_ring, tx_swbd); + + if (tx_swbd->skb) { + dev_kfree_skb_any(tx_swbd->skb); + tx_swbd->skb = NULL; + } +} + +static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb, + int active_offloads) +{ + struct enetc_tx_swbd *tx_swbd; + skb_frag_t *frag; + int len = skb_headlen(skb); + union enetc_tx_bd temp_bd; + union enetc_tx_bd *txbd; + bool do_vlan, do_tstamp; + int i, count = 0; + unsigned int f; + dma_addr_t dma; + u8 flags = 0; + + i = tx_ring->next_to_use; + txbd = ENETC_TXBD(*tx_ring, i); + prefetchw(txbd); + + dma = dma_map_single(tx_ring->dev, skb->data, len, DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(tx_ring->dev, dma))) + goto dma_err; + + temp_bd.addr = cpu_to_le64(dma); + temp_bd.buf_len = cpu_to_le16(len); + temp_bd.lstatus = 0; + + tx_swbd = &tx_ring->tx_swbd[i]; + tx_swbd->dma = dma; + tx_swbd->len = len; + tx_swbd->is_dma_page = 0; + count++; + + do_vlan = skb_vlan_tag_present(skb); + do_tstamp = (active_offloads & ENETC_F_TX_TSTAMP) && + (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP); + tx_swbd->do_tstamp = do_tstamp; + tx_swbd->check_wb = tx_swbd->do_tstamp; + + if (do_vlan || do_tstamp) + flags |= ENETC_TXBD_FLAGS_EX; + + if (enetc_tx_csum(skb, &temp_bd)) + flags |= ENETC_TXBD_FLAGS_CSUM | ENETC_TXBD_FLAGS_L4CS; + else if (tx_ring->tsd_enable) + flags |= ENETC_TXBD_FLAGS_TSE | ENETC_TXBD_FLAGS_TXSTART; + + /* first BD needs frm_len and offload flags set */ + temp_bd.frm_len = cpu_to_le16(skb->len); + temp_bd.flags = flags; + + if (flags & ENETC_TXBD_FLAGS_TSE) { + u32 temp; + + temp = (skb->skb_mstamp_ns >> 5 & ENETC_TXBD_TXSTART_MASK) + | (flags << ENETC_TXBD_FLAGS_OFFSET); + temp_bd.txstart = cpu_to_le32(temp); + } + + if (flags & ENETC_TXBD_FLAGS_EX) { + u8 e_flags = 0; + *txbd = temp_bd; + enetc_clear_tx_bd(&temp_bd); + + /* add extension BD for VLAN and/or timestamping */ + flags = 0; + tx_swbd++; + txbd++; + i++; + if (unlikely(i == tx_ring->bd_count)) { + i = 0; + tx_swbd = tx_ring->tx_swbd; + txbd = ENETC_TXBD(*tx_ring, 0); + } + prefetchw(txbd); + + if (do_vlan) { + temp_bd.ext.vid = cpu_to_le16(skb_vlan_tag_get(skb)); + temp_bd.ext.tpid = 0; /* < C-TAG */ + e_flags |= ENETC_TXBD_E_FLAGS_VLAN_INS; + } + + if (do_tstamp) { + skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; + e_flags |= ENETC_TXBD_E_FLAGS_TWO_STEP_PTP; + } + + temp_bd.ext.e_flags = e_flags; + count++; + } + + frag = &skb_shinfo(skb)->frags[0]; + for (f = 0; f < skb_shinfo(skb)->nr_frags; f++, frag++) { + len = skb_frag_size(frag); + dma = skb_frag_dma_map(tx_ring->dev, frag, 0, len, + DMA_TO_DEVICE); + if (dma_mapping_error(tx_ring->dev, dma)) + goto dma_err; + + *txbd = temp_bd; + enetc_clear_tx_bd(&temp_bd); + + flags = 0; + tx_swbd++; + txbd++; + i++; + if (unlikely(i == tx_ring->bd_count)) { + i = 0; + tx_swbd = tx_ring->tx_swbd; + txbd = ENETC_TXBD(*tx_ring, 0); + } + prefetchw(txbd); + + temp_bd.addr = cpu_to_le64(dma); + temp_bd.buf_len = cpu_to_le16(len); + + tx_swbd->dma = dma; + tx_swbd->len = len; + tx_swbd->is_dma_page = 1; + count++; + } + + /* last BD needs 'F' bit set */ + flags |= ENETC_TXBD_FLAGS_F; + temp_bd.flags = flags; + *txbd = temp_bd; + + tx_ring->tx_swbd[i].skb = skb; + + enetc_bdr_idx_inc(tx_ring, &i); + tx_ring->next_to_use = i; + + skb_tx_timestamp(skb); + + /* let H/W know BD ring has been updated */ + enetc_wr_reg_hot(tx_ring->tpir, i); /* includes wmb() */ + + return count; + +dma_err: + dev_err(tx_ring->dev, "DMA map error"); + + do { + tx_swbd = &tx_ring->tx_swbd[i]; + enetc_free_tx_skb(tx_ring, tx_swbd); + if (i == 0) + i = tx_ring->bd_count; + i--; + } while (count--); + + return 0; +} + +static irqreturn_t enetc_msix(int irq, void *data) +{ + struct enetc_int_vector *v = data; + int i; + + enetc_lock_mdio(); + + /* disable interrupts */ + enetc_wr_reg_hot(v->rbier, 0); + enetc_wr_reg_hot(v->ricr1, v->rx_ictt); + + for_each_set_bit(i, &v->tx_rings_map, ENETC_MAX_NUM_TXQS) + enetc_wr_reg_hot(v->tbier_base + ENETC_BDR_OFF(i), 0); + + enetc_unlock_mdio(); + + napi_schedule(&v->napi); + + return IRQ_HANDLED; +} + +static bool enetc_clean_tx_ring(struct enetc_bdr *tx_ring, int napi_budget); +static int enetc_clean_rx_ring(struct enetc_bdr *rx_ring, + struct napi_struct *napi, int work_limit); + +static void enetc_rx_dim_work(struct work_struct *w) +{ + struct dim *dim = container_of(w, struct dim, work); + struct dim_cq_moder moder = + net_dim_get_rx_moderation(dim->mode, dim->profile_ix); + struct enetc_int_vector *v = + container_of(dim, struct enetc_int_vector, rx_dim); + + v->rx_ictt = enetc_usecs_to_cycles(moder.usec); + dim->state = DIM_START_MEASURE; +} + +static void enetc_rx_net_dim(struct enetc_int_vector *v) +{ + struct dim_sample dim_sample = {}; + + v->comp_cnt++; + + if (!v->rx_napi_work) + return; + + dim_update_sample(v->comp_cnt, + v->rx_ring.stats.packets, + v->rx_ring.stats.bytes, + &dim_sample); + net_dim(&v->rx_dim, dim_sample); +} + +static int enetc_poll(struct napi_struct *napi, int budget) +{ + struct enetc_int_vector + *v = container_of(napi, struct enetc_int_vector, napi); + bool complete = true; + int work_done; + int i; + + enetc_lock_mdio(); + + for (i = 0; i < v->count_tx_rings; i++) + if (!enetc_clean_tx_ring(&v->tx_ring[i], budget)) + complete = false; + + work_done = enetc_clean_rx_ring(&v->rx_ring, napi, budget); + if (work_done == budget) + complete = false; + if (work_done) + v->rx_napi_work = true; + + if (!complete) { + enetc_unlock_mdio(); + return budget; + } + + napi_complete_done(napi, work_done); + + if (likely(v->rx_dim_en)) + enetc_rx_net_dim(v); + + v->rx_napi_work = false; + + /* enable interrupts */ + enetc_wr_reg_hot(v->rbier, ENETC_RBIER_RXTIE); + + for_each_set_bit(i, &v->tx_rings_map, ENETC_MAX_NUM_TXQS) + enetc_wr_reg_hot(v->tbier_base + ENETC_BDR_OFF(i), + ENETC_TBIER_TXTIE); + + enetc_unlock_mdio(); + + return work_done; +} + +static int enetc_bd_ready_count(struct enetc_bdr *tx_ring, int ci) +{ + int pi = enetc_rd_reg_hot(tx_ring->tcir) & ENETC_TBCIR_IDX_MASK; + + return pi >= ci ? pi - ci : tx_ring->bd_count - ci + pi; +} + +static void enetc_get_tx_tstamp(struct enetc_hw *hw, union enetc_tx_bd *txbd, + u64 *tstamp) +{ + u32 lo, hi, tstamp_lo; + + lo = enetc_rd_hot(hw, ENETC_SICTR0); + hi = enetc_rd_hot(hw, ENETC_SICTR1); + tstamp_lo = le32_to_cpu(txbd->wb.tstamp); + if (lo <= tstamp_lo) + hi -= 1; + *tstamp = (u64)hi << 32 | tstamp_lo; +} + +static void enetc_tstamp_tx(struct sk_buff *skb, u64 tstamp) +{ + struct skb_shared_hwtstamps shhwtstamps; + + if (skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS) { + memset(&shhwtstamps, 0, sizeof(shhwtstamps)); + shhwtstamps.hwtstamp = ns_to_ktime(tstamp); + skb_txtime_consumed(skb); + skb_tstamp_tx(skb, &shhwtstamps); + } +} + +static bool enetc_clean_tx_ring(struct enetc_bdr *tx_ring, int napi_budget) +{ + struct net_device *ndev = tx_ring->ndev; + int tx_frm_cnt = 0, tx_byte_cnt = 0; + struct enetc_tx_swbd *tx_swbd; + int i, bds_to_clean; + bool do_tstamp; + u64 tstamp = 0; + + i = tx_ring->next_to_clean; + tx_swbd = &tx_ring->tx_swbd[i]; + + bds_to_clean = enetc_bd_ready_count(tx_ring, i); + + do_tstamp = false; + + while (bds_to_clean && tx_frm_cnt < ENETC_DEFAULT_TX_WORK) { + bool is_eof = !!tx_swbd->skb; + + if (unlikely(tx_swbd->check_wb)) { + struct enetc_ndev_priv *priv = netdev_priv(ndev); + union enetc_tx_bd *txbd; + + txbd = ENETC_TXBD(*tx_ring, i); + + if (txbd->flags & ENETC_TXBD_FLAGS_W && + tx_swbd->do_tstamp) { + enetc_get_tx_tstamp(&priv->si->hw, txbd, + &tstamp); + do_tstamp = true; + } + } + + if (likely(tx_swbd->dma)) + enetc_unmap_tx_buff(tx_ring, tx_swbd); + + if (is_eof) { + if (unlikely(do_tstamp)) { + enetc_tstamp_tx(tx_swbd->skb, tstamp); + do_tstamp = false; + } + napi_consume_skb(tx_swbd->skb, napi_budget); + tx_swbd->skb = NULL; + } + + tx_byte_cnt += tx_swbd->len; + + bds_to_clean--; + tx_swbd++; + i++; + if (unlikely(i == tx_ring->bd_count)) { + i = 0; + tx_swbd = tx_ring->tx_swbd; + } + + /* BD iteration loop end */ + if (is_eof) { + tx_frm_cnt++; + /* re-arm interrupt source */ + enetc_wr_reg_hot(tx_ring->idr, BIT(tx_ring->index) | + BIT(16 + tx_ring->index)); + } + + if (unlikely(!bds_to_clean)) + bds_to_clean = enetc_bd_ready_count(tx_ring, i); + } + + tx_ring->next_to_clean = i; + tx_ring->stats.packets += tx_frm_cnt; + tx_ring->stats.bytes += tx_byte_cnt; + + if (unlikely(tx_frm_cnt && netif_carrier_ok(ndev) && + __netif_subqueue_stopped(ndev, tx_ring->index) && + (enetc_bd_unused(tx_ring) >= ENETC_TXBDS_MAX_NEEDED))) { + netif_wake_subqueue(ndev, tx_ring->index); + } + + return tx_frm_cnt != ENETC_DEFAULT_TX_WORK; +} + +static bool enetc_new_page(struct enetc_bdr *rx_ring, + struct enetc_rx_swbd *rx_swbd) +{ + struct page *page; + dma_addr_t addr; + + page = dev_alloc_page(); + if (unlikely(!page)) + return false; + + addr = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE); + if (unlikely(dma_mapping_error(rx_ring->dev, addr))) { + __free_page(page); + + return false; + } + + rx_swbd->dma = addr; + rx_swbd->page = page; + rx_swbd->page_offset = ENETC_RXB_PAD; + + return true; +} + +static int enetc_refill_rx_ring(struct enetc_bdr *rx_ring, const int buff_cnt) +{ + struct enetc_rx_swbd *rx_swbd; + union enetc_rx_bd *rxbd; + int i, j; + + i = rx_ring->next_to_use; + rx_swbd = &rx_ring->rx_swbd[i]; + rxbd = enetc_rxbd(rx_ring, i); + + for (j = 0; j < buff_cnt; j++) { + /* try reuse page */ + if (unlikely(!rx_swbd->page)) { + if (unlikely(!enetc_new_page(rx_ring, rx_swbd))) { + rx_ring->stats.rx_alloc_errs++; + break; + } + } + + /* update RxBD */ + rxbd->w.addr = cpu_to_le64(rx_swbd->dma + + rx_swbd->page_offset); + /* clear 'R" as well */ + rxbd->r.lstatus = 0; + + rxbd = enetc_rxbd_next(rx_ring, rxbd, i); + rx_swbd++; + i++; + if (unlikely(i == rx_ring->bd_count)) { + i = 0; + rx_swbd = rx_ring->rx_swbd; + } + } + + if (likely(j)) { + rx_ring->next_to_alloc = i; /* keep track from page reuse */ + rx_ring->next_to_use = i; + } + + return j; +} + +#ifdef CONFIG_FSL_ENETC_PTP_CLOCK +static void enetc_get_rx_tstamp(struct net_device *ndev, + union enetc_rx_bd *rxbd, + struct sk_buff *skb) +{ + struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb); + struct enetc_ndev_priv *priv = netdev_priv(ndev); + struct enetc_hw *hw = &priv->si->hw; + u32 lo, hi, tstamp_lo; + u64 tstamp; + + if (le16_to_cpu(rxbd->r.flags) & ENETC_RXBD_FLAG_TSTMP) { + lo = enetc_rd_reg_hot(hw->reg + ENETC_SICTR0); + hi = enetc_rd_reg_hot(hw->reg + ENETC_SICTR1); + rxbd = enetc_rxbd_ext(rxbd); + tstamp_lo = le32_to_cpu(rxbd->ext.tstamp); + if (lo <= tstamp_lo) + hi -= 1; + + tstamp = (u64)hi << 32 | tstamp_lo; + memset(shhwtstamps, 0, sizeof(*shhwtstamps)); + shhwtstamps->hwtstamp = ns_to_ktime(tstamp); + } +} +#endif + +static void enetc_get_offloads(struct enetc_bdr *rx_ring, + union enetc_rx_bd *rxbd, struct sk_buff *skb) +{ + struct enetc_ndev_priv *priv = netdev_priv(rx_ring->ndev); + + /* TODO: hashing */ + if (rx_ring->ndev->features & NETIF_F_RXCSUM) { + u16 inet_csum = le16_to_cpu(rxbd->r.inet_csum); + + skb->csum = csum_unfold((__force __sum16)~htons(inet_csum)); + skb->ip_summed = CHECKSUM_COMPLETE; + } + + if (le16_to_cpu(rxbd->r.flags) & ENETC_RXBD_FLAG_VLAN) { + __be16 tpid = 0; + + switch (le16_to_cpu(rxbd->r.flags) & ENETC_RXBD_FLAG_TPID) { + case 0: + tpid = htons(ETH_P_8021Q); + break; + case 1: + tpid = htons(ETH_P_8021AD); + break; + case 2: + tpid = htons(enetc_port_rd(&priv->si->hw, + ENETC_PCVLANR1)); + break; + case 3: + tpid = htons(enetc_port_rd(&priv->si->hw, + ENETC_PCVLANR2)); + break; + default: + break; + } + + __vlan_hwaccel_put_tag(skb, tpid, le16_to_cpu(rxbd->r.vlan_opt)); + } + +#ifdef CONFIG_FSL_ENETC_PTP_CLOCK + if (priv->active_offloads & ENETC_F_RX_TSTAMP) + enetc_get_rx_tstamp(rx_ring->ndev, rxbd, skb); +#endif +} + +static void enetc_process_skb(struct enetc_bdr *rx_ring, + struct sk_buff *skb) +{ + skb_record_rx_queue(skb, rx_ring->index); + skb->protocol = eth_type_trans(skb, rx_ring->ndev); +} + +static bool enetc_page_reusable(struct page *page) +{ + return (!page_is_pfmemalloc(page) && page_ref_count(page) == 1); +} + +static void enetc_reuse_page(struct enetc_bdr *rx_ring, + struct enetc_rx_swbd *old) +{ + struct enetc_rx_swbd *new; + + new = &rx_ring->rx_swbd[rx_ring->next_to_alloc]; + + /* next buf that may reuse a page */ + enetc_bdr_idx_inc(rx_ring, &rx_ring->next_to_alloc); + + /* copy page reference */ + *new = *old; +} + +static struct enetc_rx_swbd *enetc_get_rx_buff(struct enetc_bdr *rx_ring, + int i, u16 size) +{ + struct enetc_rx_swbd *rx_swbd = &rx_ring->rx_swbd[i]; + + dma_sync_single_range_for_cpu(rx_ring->dev, rx_swbd->dma, + rx_swbd->page_offset, + size, DMA_FROM_DEVICE); + return rx_swbd; +} + +static void enetc_put_rx_buff(struct enetc_bdr *rx_ring, + struct enetc_rx_swbd *rx_swbd) +{ + if (likely(enetc_page_reusable(rx_swbd->page))) { + rx_swbd->page_offset ^= ENETC_RXB_TRUESIZE; + page_ref_inc(rx_swbd->page); + + enetc_reuse_page(rx_ring, rx_swbd); + + /* sync for use by the device */ + dma_sync_single_range_for_device(rx_ring->dev, rx_swbd->dma, + rx_swbd->page_offset, + ENETC_RXB_DMA_SIZE, + DMA_FROM_DEVICE); + } else { + dma_unmap_page(rx_ring->dev, rx_swbd->dma, + PAGE_SIZE, DMA_FROM_DEVICE); + } + + rx_swbd->page = NULL; +} + +static struct sk_buff *enetc_map_rx_buff_to_skb(struct enetc_bdr *rx_ring, + int i, u16 size) +{ + struct enetc_rx_swbd *rx_swbd = enetc_get_rx_buff(rx_ring, i, size); + struct sk_buff *skb; + void *ba; + + ba = page_address(rx_swbd->page) + rx_swbd->page_offset; + skb = build_skb(ba - ENETC_RXB_PAD, ENETC_RXB_TRUESIZE); + if (unlikely(!skb)) { + rx_ring->stats.rx_alloc_errs++; + return NULL; + } + + skb_reserve(skb, ENETC_RXB_PAD); + __skb_put(skb, size); + + enetc_put_rx_buff(rx_ring, rx_swbd); + + return skb; +} + +static void enetc_add_rx_buff_to_skb(struct enetc_bdr *rx_ring, int i, + u16 size, struct sk_buff *skb) +{ + struct enetc_rx_swbd *rx_swbd = enetc_get_rx_buff(rx_ring, i, size); + + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_swbd->page, + rx_swbd->page_offset, size, ENETC_RXB_TRUESIZE); + + enetc_put_rx_buff(rx_ring, rx_swbd); +} + +#define ENETC_RXBD_BUNDLE 16 /* # of BDs to update at once */ + +static int enetc_clean_rx_ring(struct enetc_bdr *rx_ring, + struct napi_struct *napi, int work_limit) +{ + int rx_frm_cnt = 0, rx_byte_cnt = 0; + int cleaned_cnt, i; + + cleaned_cnt = enetc_bd_unused(rx_ring); + /* next descriptor to process */ + i = rx_ring->next_to_clean; + + while (likely(rx_frm_cnt < work_limit)) { + union enetc_rx_bd *rxbd; + struct sk_buff *skb; + u32 bd_status; + u16 size; + + if (cleaned_cnt >= ENETC_RXBD_BUNDLE) { + int count = enetc_refill_rx_ring(rx_ring, cleaned_cnt); + + /* update ENETC's consumer index */ + enetc_wr_reg_hot(rx_ring->rcir, rx_ring->next_to_use); + cleaned_cnt -= count; + } + + rxbd = enetc_rxbd(rx_ring, i); + bd_status = le32_to_cpu(rxbd->r.lstatus); + if (!bd_status) + break; + + enetc_wr_reg_hot(rx_ring->idr, BIT(rx_ring->index)); + dma_rmb(); /* for reading other rxbd fields */ + size = le16_to_cpu(rxbd->r.buf_len); + skb = enetc_map_rx_buff_to_skb(rx_ring, i, size); + if (!skb) + break; + + enetc_get_offloads(rx_ring, rxbd, skb); + + cleaned_cnt++; + + rxbd = enetc_rxbd_next(rx_ring, rxbd, i); + if (unlikely(++i == rx_ring->bd_count)) + i = 0; + + if (unlikely(bd_status & + ENETC_RXBD_LSTATUS(ENETC_RXBD_ERR_MASK))) { + dev_kfree_skb(skb); + while (!(bd_status & ENETC_RXBD_LSTATUS_F)) { + dma_rmb(); + bd_status = le32_to_cpu(rxbd->r.lstatus); + + rxbd = enetc_rxbd_next(rx_ring, rxbd, i); + if (unlikely(++i == rx_ring->bd_count)) + i = 0; + } + + rx_ring->ndev->stats.rx_dropped++; + rx_ring->ndev->stats.rx_errors++; + + break; + } + + /* not last BD in frame? */ + while (!(bd_status & ENETC_RXBD_LSTATUS_F)) { + bd_status = le32_to_cpu(rxbd->r.lstatus); + size = ENETC_RXB_DMA_SIZE; + + if (bd_status & ENETC_RXBD_LSTATUS_F) { + dma_rmb(); + size = le16_to_cpu(rxbd->r.buf_len); + } + + enetc_add_rx_buff_to_skb(rx_ring, i, size, skb); + + cleaned_cnt++; + + rxbd = enetc_rxbd_next(rx_ring, rxbd, i); + if (unlikely(++i == rx_ring->bd_count)) + i = 0; + } + + rx_byte_cnt += skb->len; + + enetc_process_skb(rx_ring, skb); + + napi_gro_receive(napi, skb); + + rx_frm_cnt++; + } + + rx_ring->next_to_clean = i; + + rx_ring->stats.packets += rx_frm_cnt; + rx_ring->stats.bytes += rx_byte_cnt; + + return rx_frm_cnt; +} + +/* Probing and Init */ +#define ENETC_MAX_RFS_SIZE 64 +void enetc_get_si_caps(struct enetc_si *si) +{ + struct enetc_hw *hw = &si->hw; + u32 val; + + /* find out how many of various resources we have to work with */ + val = enetc_rd(hw, ENETC_SICAPR0); + si->num_rx_rings = (val >> 16) & 0xff; + si->num_tx_rings = val & 0xff; + + val = enetc_rd(hw, ENETC_SIRFSCAPR); + si->num_fs_entries = ENETC_SIRFSCAPR_GET_NUM_RFS(val); + si->num_fs_entries = min(si->num_fs_entries, ENETC_MAX_RFS_SIZE); + + si->num_rss = 0; + val = enetc_rd(hw, ENETC_SIPCAPR0); + if (val & ENETC_SIPCAPR0_RSS) { + u32 rss; + + rss = enetc_rd(hw, ENETC_SIRSSCAPR); + si->num_rss = ENETC_SIRSSCAPR_GET_NUM_RSS(rss); + } + + if (val & ENETC_SIPCAPR0_QBV) + si->hw_features |= ENETC_SI_F_QBV; + + if (val & ENETC_SIPCAPR0_PSFP) + si->hw_features |= ENETC_SI_F_PSFP; +} + +static int enetc_dma_alloc_bdr(struct enetc_bdr *r, size_t bd_size) +{ + r->bd_base = dma_alloc_coherent(r->dev, r->bd_count * bd_size, + &r->bd_dma_base, GFP_KERNEL); + if (!r->bd_base) + return -ENOMEM; + + /* h/w requires 128B alignment */ + if (!IS_ALIGNED(r->bd_dma_base, 128)) { + dma_free_coherent(r->dev, r->bd_count * bd_size, r->bd_base, + r->bd_dma_base); + return -EINVAL; + } + + return 0; +} + +static int enetc_alloc_txbdr(struct enetc_bdr *txr) +{ + int err; + + txr->tx_swbd = vzalloc(txr->bd_count * sizeof(struct enetc_tx_swbd)); + if (!txr->tx_swbd) + return -ENOMEM; + + err = enetc_dma_alloc_bdr(txr, sizeof(union enetc_tx_bd)); + if (err) { + vfree(txr->tx_swbd); + return err; + } + + txr->next_to_clean = 0; + txr->next_to_use = 0; + + return 0; +} + +static void enetc_free_txbdr(struct enetc_bdr *txr) +{ + int size, i; + + for (i = 0; i < txr->bd_count; i++) + enetc_free_tx_skb(txr, &txr->tx_swbd[i]); + + size = txr->bd_count * sizeof(union enetc_tx_bd); + + dma_free_coherent(txr->dev, size, txr->bd_base, txr->bd_dma_base); + txr->bd_base = NULL; + + vfree(txr->tx_swbd); + txr->tx_swbd = NULL; +} + +static int enetc_alloc_tx_resources(struct enetc_ndev_priv *priv) +{ + int i, err; + + for (i = 0; i < priv->num_tx_rings; i++) { + err = enetc_alloc_txbdr(priv->tx_ring[i]); + + if (err) + goto fail; + } + + return 0; + +fail: + while (i-- > 0) + enetc_free_txbdr(priv->tx_ring[i]); + + return err; +} + +static void enetc_free_tx_resources(struct enetc_ndev_priv *priv) +{ + int i; + + for (i = 0; i < priv->num_tx_rings; i++) + enetc_free_txbdr(priv->tx_ring[i]); +} + +static int enetc_alloc_rxbdr(struct enetc_bdr *rxr, bool extended) +{ + size_t size = sizeof(union enetc_rx_bd); + int err; + + rxr->rx_swbd = vzalloc(rxr->bd_count * sizeof(struct enetc_rx_swbd)); + if (!rxr->rx_swbd) + return -ENOMEM; + + if (extended) + size *= 2; + + err = enetc_dma_alloc_bdr(rxr, size); + if (err) { + vfree(rxr->rx_swbd); + return err; + } + + rxr->next_to_clean = 0; + rxr->next_to_use = 0; + rxr->next_to_alloc = 0; + rxr->ext_en = extended; + + return 0; +} + +static void enetc_free_rxbdr(struct enetc_bdr *rxr) +{ + int size; + + size = rxr->bd_count * sizeof(union enetc_rx_bd); + + dma_free_coherent(rxr->dev, size, rxr->bd_base, rxr->bd_dma_base); + rxr->bd_base = NULL; + + vfree(rxr->rx_swbd); + rxr->rx_swbd = NULL; +} + +static int enetc_alloc_rx_resources(struct enetc_ndev_priv *priv) +{ + bool extended = !!(priv->active_offloads & ENETC_F_RX_TSTAMP); + int i, err; + + for (i = 0; i < priv->num_rx_rings; i++) { + err = enetc_alloc_rxbdr(priv->rx_ring[i], extended); + + if (err) + goto fail; + } + + return 0; + +fail: + while (i-- > 0) + enetc_free_rxbdr(priv->rx_ring[i]); + + return err; +} + +static void enetc_free_rx_resources(struct enetc_ndev_priv *priv) +{ + int i; + + for (i = 0; i < priv->num_rx_rings; i++) + enetc_free_rxbdr(priv->rx_ring[i]); +} + +static void enetc_free_tx_ring(struct enetc_bdr *tx_ring) +{ + int i; + + if (!tx_ring->tx_swbd) + return; + + for (i = 0; i < tx_ring->bd_count; i++) { + struct enetc_tx_swbd *tx_swbd = &tx_ring->tx_swbd[i]; + + enetc_free_tx_skb(tx_ring, tx_swbd); + } + + tx_ring->next_to_clean = 0; + tx_ring->next_to_use = 0; +} + +static void enetc_free_rx_ring(struct enetc_bdr *rx_ring) +{ + int i; + + if (!rx_ring->rx_swbd) + return; + + for (i = 0; i < rx_ring->bd_count; i++) { + struct enetc_rx_swbd *rx_swbd = &rx_ring->rx_swbd[i]; + + if (!rx_swbd->page) + continue; + + dma_unmap_page(rx_ring->dev, rx_swbd->dma, + PAGE_SIZE, DMA_FROM_DEVICE); + __free_page(rx_swbd->page); + rx_swbd->page = NULL; + } + + rx_ring->next_to_clean = 0; + rx_ring->next_to_use = 0; + rx_ring->next_to_alloc = 0; +} + +static void enetc_free_rxtx_rings(struct enetc_ndev_priv *priv) +{ + int i; + + for (i = 0; i < priv->num_rx_rings; i++) + enetc_free_rx_ring(priv->rx_ring[i]); + + for (i = 0; i < priv->num_tx_rings; i++) + enetc_free_tx_ring(priv->tx_ring[i]); +} + +int enetc_alloc_cbdr(struct device *dev, struct enetc_cbdr *cbdr) +{ + int size = cbdr->bd_count * sizeof(struct enetc_cbd); + + cbdr->bd_base = dma_alloc_coherent(dev, size, &cbdr->bd_dma_base, + GFP_KERNEL); + if (!cbdr->bd_base) + return -ENOMEM; + + /* h/w requires 128B alignment */ + if (!IS_ALIGNED(cbdr->bd_dma_base, 128)) { + dma_free_coherent(dev, size, cbdr->bd_base, cbdr->bd_dma_base); + return -EINVAL; + } + + cbdr->next_to_clean = 0; + cbdr->next_to_use = 0; + + return 0; +} + +void enetc_free_cbdr(struct device *dev, struct enetc_cbdr *cbdr) +{ + int size = cbdr->bd_count * sizeof(struct enetc_cbd); + + dma_free_coherent(dev, size, cbdr->bd_base, cbdr->bd_dma_base); + cbdr->bd_base = NULL; +} + +void enetc_setup_cbdr(struct enetc_hw *hw, struct enetc_cbdr *cbdr) +{ + /* set CBDR cache attributes */ + enetc_wr(hw, ENETC_SICAR2, + ENETC_SICAR_RD_COHERENT | ENETC_SICAR_WR_COHERENT); + + enetc_wr(hw, ENETC_SICBDRBAR0, lower_32_bits(cbdr->bd_dma_base)); + enetc_wr(hw, ENETC_SICBDRBAR1, upper_32_bits(cbdr->bd_dma_base)); + enetc_wr(hw, ENETC_SICBDRLENR, ENETC_RTBLENR_LEN(cbdr->bd_count)); + + enetc_wr(hw, ENETC_SICBDRPIR, 0); + enetc_wr(hw, ENETC_SICBDRCIR, 0); + + /* enable ring */ + enetc_wr(hw, ENETC_SICBDRMR, BIT(31)); + + cbdr->pir = hw->reg + ENETC_SICBDRPIR; + cbdr->cir = hw->reg + ENETC_SICBDRCIR; +} + +void enetc_clear_cbdr(struct enetc_hw *hw) +{ + enetc_wr(hw, ENETC_SICBDRMR, 0); +} + +static int enetc_setup_default_rss_table(struct enetc_si *si, int num_groups) +{ + int *rss_table; + int i; + + rss_table = kmalloc_array(si->num_rss, sizeof(*rss_table), GFP_KERNEL); + if (!rss_table) + return -ENOMEM; + + /* Set up RSS table defaults */ + for (i = 0; i < si->num_rss; i++) + rss_table[i] = i % num_groups; + + enetc_set_rss_table(si, rss_table, si->num_rss); + + kfree(rss_table); + + return 0; +} + +int enetc_configure_si(struct enetc_ndev_priv *priv) +{ + struct enetc_si *si = priv->si; + struct enetc_hw *hw = &si->hw; + int err; + + /* set SI cache attributes */ + enetc_wr(hw, ENETC_SICAR0, + ENETC_SICAR_RD_COHERENT | ENETC_SICAR_WR_COHERENT); + enetc_wr(hw, ENETC_SICAR1, ENETC_SICAR_MSI); + /* enable SI */ + enetc_wr(hw, ENETC_SIMR, ENETC_SIMR_EN); + + if (si->num_rss) { + err = enetc_setup_default_rss_table(si, priv->num_rx_rings); + if (err) + return err; + } + + return 0; +} + +void enetc_init_si_rings_params(struct enetc_ndev_priv *priv) +{ + struct enetc_si *si = priv->si; + int cpus = num_online_cpus(); + + priv->tx_bd_count = ENETC_TX_RING_DEFAULT_SIZE; + priv->rx_bd_count = ENETC_RX_RING_DEFAULT_SIZE; + + /* Enable all available TX rings in order to configure as many + * priorities as possible, when needed. + * TODO: Make # of TX rings run-time configurable + */ + priv->num_rx_rings = min_t(int, cpus, si->num_rx_rings); + priv->num_tx_rings = si->num_tx_rings; + priv->bdr_int_num = cpus; + priv->ic_mode = ENETC_IC_RX_ADAPTIVE | ENETC_IC_TX_MANUAL; + priv->tx_ictt = ENETC_TXIC_TIMETHR; + + /* SI specific */ + si->cbd_ring.bd_count = ENETC_CBDR_DEFAULT_SIZE; +} + +int enetc_alloc_si_resources(struct enetc_ndev_priv *priv) +{ + struct enetc_si *si = priv->si; + int err; + + err = enetc_alloc_cbdr(priv->dev, &si->cbd_ring); + if (err) + return err; + + enetc_setup_cbdr(&si->hw, &si->cbd_ring); + + priv->cls_rules = kcalloc(si->num_fs_entries, sizeof(*priv->cls_rules), + GFP_KERNEL); + if (!priv->cls_rules) { + err = -ENOMEM; + goto err_alloc_cls; + } + + return 0; + +err_alloc_cls: + enetc_clear_cbdr(&si->hw); + enetc_free_cbdr(priv->dev, &si->cbd_ring); + + return err; +} + +void enetc_free_si_resources(struct enetc_ndev_priv *priv) +{ + struct enetc_si *si = priv->si; + + enetc_clear_cbdr(&si->hw); + enetc_free_cbdr(priv->dev, &si->cbd_ring); + + kfree(priv->cls_rules); +} + +static void enetc_setup_txbdr(struct enetc_hw *hw, struct enetc_bdr *tx_ring) +{ + int idx = tx_ring->index; + u32 tbmr; + + enetc_txbdr_wr(hw, idx, ENETC_TBBAR0, + lower_32_bits(tx_ring->bd_dma_base)); + + enetc_txbdr_wr(hw, idx, ENETC_TBBAR1, + upper_32_bits(tx_ring->bd_dma_base)); + + WARN_ON(!IS_ALIGNED(tx_ring->bd_count, 64)); /* multiple of 64 */ + enetc_txbdr_wr(hw, idx, ENETC_TBLENR, + ENETC_RTBLENR_LEN(tx_ring->bd_count)); + + /* clearing PI/CI registers for Tx not supported, adjust sw indexes */ + tx_ring->next_to_use = enetc_txbdr_rd(hw, idx, ENETC_TBPIR); + tx_ring->next_to_clean = enetc_txbdr_rd(hw, idx, ENETC_TBCIR); + + /* enable Tx ints by setting pkt thr to 1 */ + enetc_txbdr_wr(hw, idx, ENETC_TBICR0, ENETC_TBICR0_ICEN | 0x1); + + tbmr = ENETC_TBMR_EN | ENETC_TBMR_SET_PRIO(tx_ring->prio); + if (tx_ring->ndev->features & NETIF_F_HW_VLAN_CTAG_TX) + tbmr |= ENETC_TBMR_VIH; + + /* enable ring */ + enetc_txbdr_wr(hw, idx, ENETC_TBMR, tbmr); + + tx_ring->tpir = hw->reg + ENETC_BDR(TX, idx, ENETC_TBPIR); + tx_ring->tcir = hw->reg + ENETC_BDR(TX, idx, ENETC_TBCIR); + tx_ring->idr = hw->reg + ENETC_SITXIDR; +} + +static void enetc_setup_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring) +{ + int idx = rx_ring->index; + u32 rbmr; + + enetc_rxbdr_wr(hw, idx, ENETC_RBBAR0, + lower_32_bits(rx_ring->bd_dma_base)); + + enetc_rxbdr_wr(hw, idx, ENETC_RBBAR1, + upper_32_bits(rx_ring->bd_dma_base)); + + WARN_ON(!IS_ALIGNED(rx_ring->bd_count, 64)); /* multiple of 64 */ + enetc_rxbdr_wr(hw, idx, ENETC_RBLENR, + ENETC_RTBLENR_LEN(rx_ring->bd_count)); + + enetc_rxbdr_wr(hw, idx, ENETC_RBBSR, ENETC_RXB_DMA_SIZE); + + /* Also prepare the consumer index in case page allocation never + * succeeds. In that case, hardware will never advance producer index + * to match consumer index, and will drop all frames. + */ + enetc_rxbdr_wr(hw, idx, ENETC_RBPIR, 0); + enetc_rxbdr_wr(hw, idx, ENETC_RBCIR, 1); + + /* enable Rx ints by setting pkt thr to 1 */ + enetc_rxbdr_wr(hw, idx, ENETC_RBICR0, ENETC_RBICR0_ICEN | 0x1); + + rbmr = ENETC_RBMR_EN; + + if (rx_ring->ext_en) + rbmr |= ENETC_RBMR_BDS; + + if (rx_ring->ndev->features & NETIF_F_HW_VLAN_CTAG_RX) + rbmr |= ENETC_RBMR_VTE; + + rx_ring->rcir = hw->reg + ENETC_BDR(RX, idx, ENETC_RBCIR); + rx_ring->idr = hw->reg + ENETC_SIRXIDR; + + enetc_refill_rx_ring(rx_ring, enetc_bd_unused(rx_ring)); + /* update ENETC's consumer index */ + enetc_rxbdr_wr(hw, idx, ENETC_RBCIR, rx_ring->next_to_use); + + /* enable ring */ + enetc_rxbdr_wr(hw, idx, ENETC_RBMR, rbmr); +} + +static void enetc_setup_bdrs(struct enetc_ndev_priv *priv) +{ + struct enetc_hw *hw = &priv->si->hw; + int i; + + for (i = 0; i < priv->num_tx_rings; i++) + enetc_setup_txbdr(hw, priv->tx_ring[i]); + + for (i = 0; i < priv->num_rx_rings; i++) + enetc_setup_rxbdr(hw, priv->rx_ring[i]); +} + +static void enetc_clear_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring) +{ + int idx = rx_ring->index; + + /* disable EN bit on ring */ + enetc_rxbdr_wr(hw, idx, ENETC_RBMR, 0); +} + +static void enetc_clear_txbdr(struct enetc_hw *hw, struct enetc_bdr *tx_ring) +{ + int delay = 8, timeout = 100; + int idx = tx_ring->index; + + /* disable EN bit on ring */ + enetc_txbdr_wr(hw, idx, ENETC_TBMR, 0); + + /* wait for busy to clear */ + while (delay < timeout && + enetc_txbdr_rd(hw, idx, ENETC_TBSR) & ENETC_TBSR_BUSY) { + msleep(delay); + delay *= 2; + } + + if (delay >= timeout) + netdev_warn(tx_ring->ndev, "timeout for tx ring #%d clear\n", + idx); +} + +static void enetc_clear_bdrs(struct enetc_ndev_priv *priv) +{ + struct enetc_hw *hw = &priv->si->hw; + int i; + + for (i = 0; i < priv->num_tx_rings; i++) + enetc_clear_txbdr(hw, priv->tx_ring[i]); + + for (i = 0; i < priv->num_rx_rings; i++) + enetc_clear_rxbdr(hw, priv->rx_ring[i]); + + udelay(1); +} + +static int enetc_setup_irqs(struct enetc_ndev_priv *priv) +{ + struct pci_dev *pdev = priv->si->pdev; + struct enetc_hw *hw = &priv->si->hw; + int i, j, err; + + for (i = 0; i < priv->bdr_int_num; i++) { + int irq = pci_irq_vector(pdev, ENETC_BDR_INT_BASE_IDX + i); + struct enetc_int_vector *v = priv->int_vector[i]; + int entry = ENETC_BDR_INT_BASE_IDX + i; + + snprintf(v->name, sizeof(v->name), "%s-rxtx%d", + priv->ndev->name, i); + err = request_irq(irq, enetc_msix, 0, v->name, v); + if (err) { + dev_err(priv->dev, "request_irq() failed!\n"); + goto irq_err; + } + disable_irq(irq); + + v->tbier_base = hw->reg + ENETC_BDR(TX, 0, ENETC_TBIER); + v->rbier = hw->reg + ENETC_BDR(RX, i, ENETC_RBIER); + v->ricr1 = hw->reg + ENETC_BDR(RX, i, ENETC_RBICR1); + + enetc_wr(hw, ENETC_SIMSIRRV(i), entry); + + for (j = 0; j < v->count_tx_rings; j++) { + int idx = v->tx_ring[j].index; + + enetc_wr(hw, ENETC_SIMSITRV(idx), entry); + } + irq_set_affinity_hint(irq, get_cpu_mask(i % num_online_cpus())); + } + + return 0; + +irq_err: + while (i--) { + int irq = pci_irq_vector(pdev, ENETC_BDR_INT_BASE_IDX + i); + + irq_set_affinity_hint(irq, NULL); + free_irq(irq, priv->int_vector[i]); + } + + return err; +} + +static void enetc_free_irqs(struct enetc_ndev_priv *priv) +{ + struct pci_dev *pdev = priv->si->pdev; + int i; + + for (i = 0; i < priv->bdr_int_num; i++) { + int irq = pci_irq_vector(pdev, ENETC_BDR_INT_BASE_IDX + i); + + irq_set_affinity_hint(irq, NULL); + free_irq(irq, priv->int_vector[i]); + } +} + +static void enetc_setup_interrupts(struct enetc_ndev_priv *priv) +{ + struct enetc_hw *hw = &priv->si->hw; + u32 icpt, ictt; + int i; + + /* enable Tx & Rx event indication */ + if (priv->ic_mode & + (ENETC_IC_RX_MANUAL | ENETC_IC_RX_ADAPTIVE)) { + icpt = ENETC_RBICR0_SET_ICPT(ENETC_RXIC_PKTTHR); + /* init to non-0 minimum, will be adjusted later */ + ictt = 0x1; + } else { + icpt = 0x1; /* enable Rx ints by setting pkt thr to 1 */ + ictt = 0; + } + + for (i = 0; i < priv->num_rx_rings; i++) { + enetc_rxbdr_wr(hw, i, ENETC_RBICR1, ictt); + enetc_rxbdr_wr(hw, i, ENETC_RBICR0, ENETC_RBICR0_ICEN | icpt); + enetc_rxbdr_wr(hw, i, ENETC_RBIER, ENETC_RBIER_RXTIE); + } + + if (priv->ic_mode & ENETC_IC_TX_MANUAL) + icpt = ENETC_TBICR0_SET_ICPT(ENETC_TXIC_PKTTHR); + else + icpt = 0x1; /* enable Tx ints by setting pkt thr to 1 */ + + for (i = 0; i < priv->num_tx_rings; i++) { + enetc_txbdr_wr(hw, i, ENETC_TBICR1, priv->tx_ictt); + enetc_txbdr_wr(hw, i, ENETC_TBICR0, ENETC_TBICR0_ICEN | icpt); + enetc_txbdr_wr(hw, i, ENETC_TBIER, ENETC_TBIER_TXTIE); + } +} + +static void enetc_clear_interrupts(struct enetc_ndev_priv *priv) +{ + struct enetc_hw *hw = &priv->si->hw; + int i; + + for (i = 0; i < priv->num_tx_rings; i++) + enetc_txbdr_wr(hw, i, ENETC_TBIER, 0); + + for (i = 0; i < priv->num_rx_rings; i++) + enetc_rxbdr_wr(hw, i, ENETC_RBIER, 0); +} + +static int enetc_phylink_connect(struct net_device *ndev) +{ + struct enetc_ndev_priv *priv = netdev_priv(ndev); + struct ethtool_eee edata; + int err; + + if (!priv->phylink) + return 0; /* phy-less mode */ + + err = phylink_of_phy_connect(priv->phylink, priv->dev->of_node, 0); + if (err) { + dev_err(&ndev->dev, "could not attach to PHY\n"); + return err; + } + + /* disable EEE autoneg, until ENETC driver supports it */ + memset(&edata, 0, sizeof(struct ethtool_eee)); + phylink_ethtool_set_eee(priv->phylink, &edata); + + return 0; +} + +void enetc_start(struct net_device *ndev) +{ + struct enetc_ndev_priv *priv = netdev_priv(ndev); + int i; + + enetc_setup_interrupts(priv); + + for (i = 0; i < priv->bdr_int_num; i++) { + int irq = pci_irq_vector(priv->si->pdev, + ENETC_BDR_INT_BASE_IDX + i); + + napi_enable(&priv->int_vector[i]->napi); + enable_irq(irq); + } + + if (priv->phylink) + phylink_start(priv->phylink); + else + netif_carrier_on(ndev); + + netif_tx_start_all_queues(ndev); +} + +int enetc_open(struct net_device *ndev) +{ + struct enetc_ndev_priv *priv = netdev_priv(ndev); + int err; + + err = enetc_setup_irqs(priv); + if (err) + return err; + + err = enetc_phylink_connect(ndev); + if (err) + goto err_phy_connect; + + err = enetc_alloc_tx_resources(priv); + if (err) + goto err_alloc_tx; + + err = enetc_alloc_rx_resources(priv); + if (err) + goto err_alloc_rx; + + err = netif_set_real_num_tx_queues(ndev, priv->num_tx_rings); + if (err) + goto err_set_queues; + + err = netif_set_real_num_rx_queues(ndev, priv->num_rx_rings); + if (err) + goto err_set_queues; + + enetc_setup_bdrs(priv); + enetc_start(ndev); + + return 0; + +err_set_queues: + enetc_free_rx_resources(priv); +err_alloc_rx: + enetc_free_tx_resources(priv); +err_alloc_tx: + if (priv->phylink) + phylink_disconnect_phy(priv->phylink); +err_phy_connect: + enetc_free_irqs(priv); + + return err; +} + +void enetc_stop(struct net_device *ndev) +{ + struct enetc_ndev_priv *priv = netdev_priv(ndev); + int i; + + netif_tx_stop_all_queues(ndev); + + for (i = 0; i < priv->bdr_int_num; i++) { + int irq = pci_irq_vector(priv->si->pdev, + ENETC_BDR_INT_BASE_IDX + i); + + disable_irq(irq); + napi_synchronize(&priv->int_vector[i]->napi); + napi_disable(&priv->int_vector[i]->napi); + } + + if (priv->phylink) + phylink_stop(priv->phylink); + else + netif_carrier_off(ndev); + + enetc_clear_interrupts(priv); +} + +int enetc_close(struct net_device *ndev) +{ + struct enetc_ndev_priv *priv = netdev_priv(ndev); + + enetc_stop(ndev); + enetc_clear_bdrs(priv); + + if (priv->phylink) + phylink_disconnect_phy(priv->phylink); + enetc_free_rxtx_rings(priv); + enetc_free_rx_resources(priv); + enetc_free_tx_resources(priv); + enetc_free_irqs(priv); + + return 0; +} + +static int enetc_setup_tc_mqprio(struct net_device *ndev, void *type_data) +{ + struct enetc_ndev_priv *priv = netdev_priv(ndev); + struct tc_mqprio_qopt *mqprio = type_data; + struct enetc_hw *hw = &priv->si->hw; + struct enetc_bdr *tx_ring; + u8 num_tc; + int i; + + mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; + num_tc = mqprio->num_tc; + + if (!num_tc) { + netdev_reset_tc(ndev); + netif_set_real_num_tx_queues(ndev, priv->num_tx_rings); + + /* Reset all ring priorities to 0 */ + for (i = 0; i < priv->num_tx_rings; i++) { + tx_ring = priv->tx_ring[i]; + tx_ring->prio = 0; + enetc_set_bdr_prio(hw, tx_ring->index, tx_ring->prio); + } + + return 0; + } + + /* Check if we have enough BD rings available to accommodate all TCs */ + if (num_tc > priv->num_tx_rings) { + netdev_err(ndev, "Max %d traffic classes supported\n", + priv->num_tx_rings); + return -EINVAL; + } + + /* For the moment, we use only one BD ring per TC. + * + * Configure num_tc BD rings with increasing priorities. + */ + for (i = 0; i < num_tc; i++) { + tx_ring = priv->tx_ring[i]; + tx_ring->prio = i; + enetc_set_bdr_prio(hw, tx_ring->index, tx_ring->prio); + } + + /* Reset the number of netdev queues based on the TC count */ + netif_set_real_num_tx_queues(ndev, num_tc); + + netdev_set_num_tc(ndev, num_tc); + + /* Each TC is associated with one netdev queue */ + for (i = 0; i < num_tc; i++) + netdev_set_tc_queue(ndev, i, 1, i); + + return 0; +} + +int enetc_setup_tc(struct net_device *ndev, enum tc_setup_type type, + void *type_data) +{ + switch (type) { + case TC_SETUP_QDISC_MQPRIO: + return enetc_setup_tc_mqprio(ndev, type_data); + case TC_SETUP_QDISC_TAPRIO: + return enetc_setup_tc_taprio(ndev, type_data); + case TC_SETUP_QDISC_CBS: + return enetc_setup_tc_cbs(ndev, type_data); + case TC_SETUP_QDISC_ETF: + return enetc_setup_tc_txtime(ndev, type_data); + case TC_SETUP_BLOCK: + return enetc_setup_tc_psfp(ndev, type_data); + default: + return -EOPNOTSUPP; + } +} + +struct net_device_stats *enetc_get_stats(struct net_device *ndev) +{ + struct enetc_ndev_priv *priv = netdev_priv(ndev); + struct net_device_stats *stats = &ndev->stats; + unsigned long packets = 0, bytes = 0; + int i; + + for (i = 0; i < priv->num_rx_rings; i++) { + packets += priv->rx_ring[i]->stats.packets; + bytes += priv->rx_ring[i]->stats.bytes; + } + + stats->rx_packets = packets; + stats->rx_bytes = bytes; + bytes = 0; + packets = 0; + + for (i = 0; i < priv->num_tx_rings; i++) { + packets += priv->tx_ring[i]->stats.packets; + bytes += priv->tx_ring[i]->stats.bytes; + } + + stats->tx_packets = packets; + stats->tx_bytes = bytes; + + return stats; +} + +static int enetc_set_rss(struct net_device *ndev, int en) +{ + struct enetc_ndev_priv *priv = netdev_priv(ndev); + struct enetc_hw *hw = &priv->si->hw; + u32 reg; + + enetc_wr(hw, ENETC_SIRBGCR, priv->num_rx_rings); + + reg = enetc_rd(hw, ENETC_SIMR); + reg &= ~ENETC_SIMR_RSSE; + reg |= (en) ? ENETC_SIMR_RSSE : 0; + enetc_wr(hw, ENETC_SIMR, reg); + + return 0; +} + +static void enetc_enable_rxvlan(struct net_device *ndev, bool en) +{ + struct enetc_ndev_priv *priv = netdev_priv(ndev); + struct enetc_hw *hw = &priv->si->hw; + int i; + + for (i = 0; i < priv->num_rx_rings; i++) + enetc_bdr_enable_rxvlan(hw, i, en); +} + +static void enetc_enable_txvlan(struct net_device *ndev, bool en) +{ + struct enetc_ndev_priv *priv = netdev_priv(ndev); + struct enetc_hw *hw = &priv->si->hw; + int i; + + for (i = 0; i < priv->num_tx_rings; i++) + enetc_bdr_enable_txvlan(hw, i, en); +} + +void enetc_set_features(struct net_device *ndev, netdev_features_t features) +{ + netdev_features_t changed = ndev->features ^ features; + + if (changed & NETIF_F_RXHASH) + enetc_set_rss(ndev, !!(features & NETIF_F_RXHASH)); + + if (changed & NETIF_F_HW_VLAN_CTAG_RX) + enetc_enable_rxvlan(ndev, + !!(features & NETIF_F_HW_VLAN_CTAG_RX)); + + if (changed & NETIF_F_HW_VLAN_CTAG_TX) + enetc_enable_txvlan(ndev, + !!(features & NETIF_F_HW_VLAN_CTAG_TX)); +} + +#ifdef CONFIG_FSL_ENETC_PTP_CLOCK +static int enetc_hwtstamp_set(struct net_device *ndev, struct ifreq *ifr) +{ + struct enetc_ndev_priv *priv = netdev_priv(ndev); + struct hwtstamp_config config; + int ao; + + if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) + return -EFAULT; + + switch (config.tx_type) { + case HWTSTAMP_TX_OFF: + priv->active_offloads &= ~ENETC_F_TX_TSTAMP; + break; + case HWTSTAMP_TX_ON: + priv->active_offloads |= ENETC_F_TX_TSTAMP; + break; + default: + return -ERANGE; + } + + ao = priv->active_offloads; + switch (config.rx_filter) { + case HWTSTAMP_FILTER_NONE: + priv->active_offloads &= ~ENETC_F_RX_TSTAMP; + break; + default: + priv->active_offloads |= ENETC_F_RX_TSTAMP; + config.rx_filter = HWTSTAMP_FILTER_ALL; + } + + if (netif_running(ndev) && ao != priv->active_offloads) { + enetc_close(ndev); + enetc_open(ndev); + } + + return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? + -EFAULT : 0; +} + +static int enetc_hwtstamp_get(struct net_device *ndev, struct ifreq *ifr) +{ + struct enetc_ndev_priv *priv = netdev_priv(ndev); + struct hwtstamp_config config; + + config.flags = 0; + + if (priv->active_offloads & ENETC_F_TX_TSTAMP) + config.tx_type = HWTSTAMP_TX_ON; + else + config.tx_type = HWTSTAMP_TX_OFF; + + config.rx_filter = (priv->active_offloads & ENETC_F_RX_TSTAMP) ? + HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE; + + return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? + -EFAULT : 0; +} +#endif + +int enetc_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd) +{ + struct enetc_ndev_priv *priv = netdev_priv(ndev); +#ifdef CONFIG_FSL_ENETC_PTP_CLOCK + if (cmd == SIOCSHWTSTAMP) + return enetc_hwtstamp_set(ndev, rq); + if (cmd == SIOCGHWTSTAMP) + return enetc_hwtstamp_get(ndev, rq); +#endif + + if (!priv->phylink) + return -EOPNOTSUPP; + + return phylink_mii_ioctl(priv->phylink, rq, cmd); +} + +int enetc_alloc_msix(struct enetc_ndev_priv *priv) +{ + struct pci_dev *pdev = priv->si->pdev; + int v_tx_rings; + int i, n, err, nvec; + + nvec = ENETC_BDR_INT_BASE_IDX + priv->bdr_int_num; + /* allocate MSIX for both messaging and Rx/Tx interrupts */ + n = pci_alloc_irq_vectors(pdev, nvec, nvec, PCI_IRQ_MSIX); + + if (n < 0) + return n; + + if (n != nvec) + return -EPERM; + + /* # of tx rings per int vector */ + v_tx_rings = priv->num_tx_rings / priv->bdr_int_num; + + for (i = 0; i < priv->bdr_int_num; i++) { + struct enetc_int_vector *v; + struct enetc_bdr *bdr; + int j; + + v = kzalloc(struct_size(v, tx_ring, v_tx_rings), GFP_KERNEL); + if (!v) { + err = -ENOMEM; + goto fail; + } + + priv->int_vector[i] = v; + + /* init defaults for adaptive IC */ + if (priv->ic_mode & ENETC_IC_RX_ADAPTIVE) { + v->rx_ictt = 0x1; + v->rx_dim_en = true; + } + INIT_WORK(&v->rx_dim.work, enetc_rx_dim_work); + netif_napi_add(priv->ndev, &v->napi, enetc_poll, + NAPI_POLL_WEIGHT); + v->count_tx_rings = v_tx_rings; + + for (j = 0; j < v_tx_rings; j++) { + int idx; + + /* default tx ring mapping policy */ + if (priv->bdr_int_num == ENETC_MAX_BDR_INT) + idx = 2 * j + i; /* 2 CPUs */ + else + idx = j + i * v_tx_rings; /* default */ + + __set_bit(idx, &v->tx_rings_map); + bdr = &v->tx_ring[j]; + bdr->index = idx; + bdr->ndev = priv->ndev; + bdr->dev = priv->dev; + bdr->bd_count = priv->tx_bd_count; + priv->tx_ring[idx] = bdr; + } + + bdr = &v->rx_ring; + bdr->index = i; + bdr->ndev = priv->ndev; + bdr->dev = priv->dev; + bdr->bd_count = priv->rx_bd_count; + priv->rx_ring[i] = bdr; + } + + return 0; + +fail: + while (i--) { + netif_napi_del(&priv->int_vector[i]->napi); + cancel_work_sync(&priv->int_vector[i]->rx_dim.work); + kfree(priv->int_vector[i]); + } + + pci_free_irq_vectors(pdev); + + return err; +} + +void enetc_free_msix(struct enetc_ndev_priv *priv) +{ + int i; + + for (i = 0; i < priv->bdr_int_num; i++) { + struct enetc_int_vector *v = priv->int_vector[i]; + + netif_napi_del(&v->napi); + cancel_work_sync(&v->rx_dim.work); + } + + for (i = 0; i < priv->num_rx_rings; i++) + priv->rx_ring[i] = NULL; + + for (i = 0; i < priv->num_tx_rings; i++) + priv->tx_ring[i] = NULL; + + for (i = 0; i < priv->bdr_int_num; i++) { + kfree(priv->int_vector[i]); + priv->int_vector[i] = NULL; + } + + /* disable all MSIX for this device */ + pci_free_irq_vectors(priv->si->pdev); +} + +static void enetc_kfree_si(struct enetc_si *si) +{ + char *p = (char *)si - si->pad; + + kfree(p); +} + +static void enetc_detect_errata(struct enetc_si *si) +{ + if (si->pdev->revision == ENETC_REV1) + si->errata = ENETC_ERR_TXCSUM | ENETC_ERR_VLAN_ISOL | + ENETC_ERR_UCMCSWP; +} + +int enetc_pci_probe(struct pci_dev *pdev, const char *name, int sizeof_priv) +{ + struct enetc_si *si, *p; + struct enetc_hw *hw; + size_t alloc_size; + int err, len; + + pcie_flr(pdev); + err = pci_enable_device_mem(pdev); + if (err) { + dev_err(&pdev->dev, "device enable failed\n"); + return err; + } + + /* set up for high or low dma */ + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); + if (err) { + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); + if (err) { + dev_err(&pdev->dev, + "DMA configuration failed: 0x%x\n", err); + goto err_dma; + } + } + + err = pci_request_mem_regions(pdev, name); + if (err) { + dev_err(&pdev->dev, "pci_request_regions failed err=%d\n", err); + goto err_pci_mem_reg; + } + + pci_set_master(pdev); + + alloc_size = sizeof(struct enetc_si); + if (sizeof_priv) { + /* align priv to 32B */ + alloc_size = ALIGN(alloc_size, ENETC_SI_ALIGN); + alloc_size += sizeof_priv; + } + /* force 32B alignment for enetc_si */ + alloc_size += ENETC_SI_ALIGN - 1; + + p = kzalloc(alloc_size, GFP_KERNEL); + if (!p) { + err = -ENOMEM; + goto err_alloc_si; + } + + si = PTR_ALIGN(p, ENETC_SI_ALIGN); + si->pad = (char *)si - (char *)p; + + pci_set_drvdata(pdev, si); + si->pdev = pdev; + hw = &si->hw; + + len = pci_resource_len(pdev, ENETC_BAR_REGS); + hw->reg = ioremap(pci_resource_start(pdev, ENETC_BAR_REGS), len); + if (!hw->reg) { + err = -ENXIO; + dev_err(&pdev->dev, "ioremap() failed\n"); + goto err_ioremap; + } + if (len > ENETC_PORT_BASE) + hw->port = hw->reg + ENETC_PORT_BASE; + if (len > ENETC_GLOBAL_BASE) + hw->global = hw->reg + ENETC_GLOBAL_BASE; + + enetc_detect_errata(si); + + return 0; + +err_ioremap: + enetc_kfree_si(si); +err_alloc_si: + pci_release_mem_regions(pdev); +err_pci_mem_reg: +err_dma: + pci_disable_device(pdev); + + return err; +} + +void enetc_pci_remove(struct pci_dev *pdev) +{ + struct enetc_si *si = pci_get_drvdata(pdev); + struct enetc_hw *hw = &si->hw; + + iounmap(hw->reg); + enetc_kfree_si(si); + pci_release_mem_regions(pdev); + pci_disable_device(pdev); +} diff --git a/drivers/net/ethernet/freescale/enetc/enetc.h b/drivers/net/ethernet/freescale/enetc/enetc.h new file mode 100644 index 000000000..725c3d1cb --- /dev/null +++ b/drivers/net/ethernet/freescale/enetc/enetc.h @@ -0,0 +1,420 @@ +/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */ +/* Copyright 2017-2019 NXP */ + +#include <linux/timer.h> +#include <linux/pci.h> +#include <linux/netdevice.h> +#include <linux/etherdevice.h> +#include <linux/dma-mapping.h> +#include <linux/skbuff.h> +#include <linux/ethtool.h> +#include <linux/if_vlan.h> +#include <linux/phylink.h> +#include <linux/dim.h> + +#include "enetc_hw.h" + +#define ENETC_MAC_MAXFRM_SIZE 9600 +#define ENETC_MAX_MTU (ENETC_MAC_MAXFRM_SIZE - \ + (ETH_FCS_LEN + ETH_HLEN + VLAN_HLEN)) + +struct enetc_tx_swbd { + struct sk_buff *skb; + dma_addr_t dma; + u16 len; + u8 is_dma_page:1; + u8 check_wb:1; + u8 do_tstamp:1; +}; + +#define ENETC_RX_MAXFRM_SIZE ENETC_MAC_MAXFRM_SIZE +#define ENETC_RXB_TRUESIZE 2048 /* PAGE_SIZE >> 1 */ +#define ENETC_RXB_PAD NET_SKB_PAD /* add extra space if needed */ +#define ENETC_RXB_DMA_SIZE \ + (SKB_WITH_OVERHEAD(ENETC_RXB_TRUESIZE) - ENETC_RXB_PAD) + +struct enetc_rx_swbd { + dma_addr_t dma; + struct page *page; + u16 page_offset; +}; + +struct enetc_ring_stats { + unsigned int packets; + unsigned int bytes; + unsigned int rx_alloc_errs; +}; + +#define ENETC_RX_RING_DEFAULT_SIZE 512 +#define ENETC_TX_RING_DEFAULT_SIZE 256 +#define ENETC_DEFAULT_TX_WORK (ENETC_TX_RING_DEFAULT_SIZE / 2) + +struct enetc_bdr { + struct device *dev; /* for DMA mapping */ + struct net_device *ndev; + void *bd_base; /* points to Rx or Tx BD ring */ + union { + void __iomem *tpir; + void __iomem *rcir; + }; + u16 index; + u16 prio; + int bd_count; /* # of BDs */ + int next_to_use; + int next_to_clean; + union { + struct enetc_tx_swbd *tx_swbd; + struct enetc_rx_swbd *rx_swbd; + }; + union { + void __iomem *tcir; /* Tx */ + int next_to_alloc; /* Rx */ + }; + void __iomem *idr; /* Interrupt Detect Register pointer */ + + struct enetc_ring_stats stats; + + dma_addr_t bd_dma_base; + u8 tsd_enable; /* Time specific departure */ + bool ext_en; /* enable h/w descriptor extensions */ +} ____cacheline_aligned_in_smp; + +static inline void enetc_bdr_idx_inc(struct enetc_bdr *bdr, int *i) +{ + if (unlikely(++*i == bdr->bd_count)) + *i = 0; +} + +static inline int enetc_bd_unused(struct enetc_bdr *bdr) +{ + if (bdr->next_to_clean > bdr->next_to_use) + return bdr->next_to_clean - bdr->next_to_use - 1; + + return bdr->bd_count + bdr->next_to_clean - bdr->next_to_use - 1; +} + +/* Control BD ring */ +#define ENETC_CBDR_DEFAULT_SIZE 64 +struct enetc_cbdr { + void *bd_base; /* points to Rx or Tx BD ring */ + void __iomem *pir; + void __iomem *cir; + + int bd_count; /* # of BDs */ + int next_to_use; + int next_to_clean; + + dma_addr_t bd_dma_base; +}; + +#define ENETC_TXBD(BDR, i) (&(((union enetc_tx_bd *)((BDR).bd_base))[i])) + +static inline union enetc_rx_bd *enetc_rxbd(struct enetc_bdr *rx_ring, int i) +{ + int hw_idx = i; + +#ifdef CONFIG_FSL_ENETC_PTP_CLOCK + if (rx_ring->ext_en) + hw_idx = 2 * i; +#endif + return &(((union enetc_rx_bd *)rx_ring->bd_base)[hw_idx]); +} + +static inline union enetc_rx_bd *enetc_rxbd_next(struct enetc_bdr *rx_ring, + union enetc_rx_bd *rxbd, + int i) +{ + rxbd++; +#ifdef CONFIG_FSL_ENETC_PTP_CLOCK + if (rx_ring->ext_en) + rxbd++; +#endif + if (unlikely(++i == rx_ring->bd_count)) + rxbd = rx_ring->bd_base; + + return rxbd; +} + +static inline union enetc_rx_bd *enetc_rxbd_ext(union enetc_rx_bd *rxbd) +{ + return ++rxbd; +} + +struct enetc_msg_swbd { + void *vaddr; + dma_addr_t dma; + int size; +}; + +#define ENETC_REV1 0x1 +enum enetc_errata { + ENETC_ERR_TXCSUM = BIT(0), + ENETC_ERR_VLAN_ISOL = BIT(1), + ENETC_ERR_UCMCSWP = BIT(2), +}; + +#define ENETC_SI_F_QBV BIT(0) +#define ENETC_SI_F_PSFP BIT(1) + +/* PCI IEP device data */ +struct enetc_si { + struct pci_dev *pdev; + struct enetc_hw hw; + enum enetc_errata errata; + + struct net_device *ndev; /* back ref. */ + + struct enetc_cbdr cbd_ring; + + int num_rx_rings; /* how many rings are available in the SI */ + int num_tx_rings; + int num_fs_entries; + int num_rss; /* number of RSS buckets */ + unsigned short pad; + int hw_features; +}; + +#define ENETC_SI_ALIGN 32 + +static inline void *enetc_si_priv(const struct enetc_si *si) +{ + return (char *)si + ALIGN(sizeof(struct enetc_si), ENETC_SI_ALIGN); +} + +static inline bool enetc_si_is_pf(struct enetc_si *si) +{ + return !!(si->hw.port); +} + +#define ENETC_MAX_NUM_TXQS 8 +#define ENETC_INT_NAME_MAX (IFNAMSIZ + 8) + +struct enetc_int_vector { + void __iomem *rbier; + void __iomem *tbier_base; + void __iomem *ricr1; + unsigned long tx_rings_map; + int count_tx_rings; + u32 rx_ictt; + u16 comp_cnt; + bool rx_dim_en, rx_napi_work; + struct napi_struct napi ____cacheline_aligned_in_smp; + struct dim rx_dim ____cacheline_aligned_in_smp; + char name[ENETC_INT_NAME_MAX]; + + struct enetc_bdr rx_ring; + struct enetc_bdr tx_ring[]; +} ____cacheline_aligned_in_smp; + +struct enetc_cls_rule { + struct ethtool_rx_flow_spec fs; + int used; +}; + +#define ENETC_MAX_BDR_INT 2 /* fixed to max # of available cpus */ +struct psfp_cap { + u32 max_streamid; + u32 max_psfp_filter; + u32 max_psfp_gate; + u32 max_psfp_gatelist; + u32 max_psfp_meter; +}; + +/* TODO: more hardware offloads */ +enum enetc_active_offloads { + ENETC_F_RX_TSTAMP = BIT(0), + ENETC_F_TX_TSTAMP = BIT(1), + ENETC_F_QBV = BIT(2), + ENETC_F_QCI = BIT(3), +}; + +/* interrupt coalescing modes */ +enum enetc_ic_mode { + /* one interrupt per frame */ + ENETC_IC_NONE = 0, + /* activated when int coalescing time is set to a non-0 value */ + ENETC_IC_RX_MANUAL = BIT(0), + ENETC_IC_TX_MANUAL = BIT(1), + /* use dynamic interrupt moderation */ + ENETC_IC_RX_ADAPTIVE = BIT(2), +}; + +#define ENETC_RXIC_PKTTHR min_t(u32, 256, ENETC_RX_RING_DEFAULT_SIZE / 2) +#define ENETC_TXIC_PKTTHR min_t(u32, 128, ENETC_TX_RING_DEFAULT_SIZE / 2) +#define ENETC_TXIC_TIMETHR enetc_usecs_to_cycles(600) + +struct enetc_ndev_priv { + struct net_device *ndev; + struct device *dev; /* dma-mapping device */ + struct enetc_si *si; + + int bdr_int_num; /* number of Rx/Tx ring interrupts */ + struct enetc_int_vector *int_vector[ENETC_MAX_BDR_INT]; + u16 num_rx_rings, num_tx_rings; + u16 rx_bd_count, tx_bd_count; + + u16 msg_enable; + int active_offloads; + + u32 speed; /* store speed for compare update pspeed */ + + struct enetc_bdr *tx_ring[16]; + struct enetc_bdr *rx_ring[16]; + + struct enetc_cls_rule *cls_rules; + + struct psfp_cap psfp_cap; + + struct phylink *phylink; + int ic_mode; + u32 tx_ictt; +}; + +/* Messaging */ + +/* VF-PF set primary MAC address message format */ +struct enetc_msg_cmd_set_primary_mac { + struct enetc_msg_cmd_header header; + struct sockaddr mac; +}; + +#define ENETC_CBD(R, i) (&(((struct enetc_cbd *)((R).bd_base))[i])) + +#define ENETC_CBDR_TIMEOUT 1000 /* usecs */ + +/* PTP driver exports */ +extern int enetc_phc_index; + +/* SI common */ +int enetc_pci_probe(struct pci_dev *pdev, const char *name, int sizeof_priv); +void enetc_pci_remove(struct pci_dev *pdev); +int enetc_alloc_msix(struct enetc_ndev_priv *priv); +void enetc_free_msix(struct enetc_ndev_priv *priv); +void enetc_get_si_caps(struct enetc_si *si); +void enetc_init_si_rings_params(struct enetc_ndev_priv *priv); +int enetc_alloc_si_resources(struct enetc_ndev_priv *priv); +void enetc_free_si_resources(struct enetc_ndev_priv *priv); +int enetc_configure_si(struct enetc_ndev_priv *priv); + +int enetc_open(struct net_device *ndev); +int enetc_close(struct net_device *ndev); +void enetc_start(struct net_device *ndev); +void enetc_stop(struct net_device *ndev); +netdev_tx_t enetc_xmit(struct sk_buff *skb, struct net_device *ndev); +struct net_device_stats *enetc_get_stats(struct net_device *ndev); +void enetc_set_features(struct net_device *ndev, netdev_features_t features); +int enetc_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd); +int enetc_setup_tc(struct net_device *ndev, enum tc_setup_type type, + void *type_data); + +/* ethtool */ +void enetc_set_ethtool_ops(struct net_device *ndev); + +/* control buffer descriptor ring (CBDR) */ +int enetc_alloc_cbdr(struct device *dev, struct enetc_cbdr *cbdr); +void enetc_free_cbdr(struct device *dev, struct enetc_cbdr *cbdr); +void enetc_setup_cbdr(struct enetc_hw *hw, struct enetc_cbdr *cbdr); +void enetc_clear_cbdr(struct enetc_hw *hw); +int enetc_set_mac_flt_entry(struct enetc_si *si, int index, + char *mac_addr, int si_map); +int enetc_clear_mac_flt_entry(struct enetc_si *si, int index); +int enetc_set_fs_entry(struct enetc_si *si, struct enetc_cmd_rfse *rfse, + int index); +void enetc_set_rss_key(struct enetc_hw *hw, const u8 *bytes); +int enetc_get_rss_table(struct enetc_si *si, u32 *table, int count); +int enetc_set_rss_table(struct enetc_si *si, const u32 *table, int count); +int enetc_send_cmd(struct enetc_si *si, struct enetc_cbd *cbd); + +#ifdef CONFIG_FSL_ENETC_QOS +int enetc_setup_tc_taprio(struct net_device *ndev, void *type_data); +void enetc_sched_speed_set(struct enetc_ndev_priv *priv, int speed); +int enetc_setup_tc_cbs(struct net_device *ndev, void *type_data); +int enetc_setup_tc_txtime(struct net_device *ndev, void *type_data); +int enetc_setup_tc_block_cb(enum tc_setup_type type, void *type_data, + void *cb_priv); +int enetc_setup_tc_psfp(struct net_device *ndev, void *type_data); +int enetc_psfp_init(struct enetc_ndev_priv *priv); +int enetc_psfp_clean(struct enetc_ndev_priv *priv); +int enetc_set_psfp(struct net_device *ndev, bool en); + +static inline void enetc_get_max_cap(struct enetc_ndev_priv *priv) +{ + struct enetc_hw *hw = &priv->si->hw; + u32 reg; + + reg = enetc_port_rd(hw, ENETC_PSIDCAPR); + priv->psfp_cap.max_streamid = reg & ENETC_PSIDCAPR_MSK; + /* Port stream filter capability */ + reg = enetc_port_rd(hw, ENETC_PSFCAPR); + priv->psfp_cap.max_psfp_filter = reg & ENETC_PSFCAPR_MSK; + /* Port stream gate capability */ + reg = enetc_port_rd(hw, ENETC_PSGCAPR); + priv->psfp_cap.max_psfp_gate = (reg & ENETC_PSGCAPR_SGIT_MSK); + priv->psfp_cap.max_psfp_gatelist = (reg & ENETC_PSGCAPR_GCL_MSK) >> 16; + /* Port flow meter capability */ + reg = enetc_port_rd(hw, ENETC_PFMCAPR); + priv->psfp_cap.max_psfp_meter = reg & ENETC_PFMCAPR_MSK; +} + +static inline int enetc_psfp_enable(struct enetc_ndev_priv *priv) +{ + struct enetc_hw *hw = &priv->si->hw; + int err; + + enetc_get_max_cap(priv); + + err = enetc_psfp_init(priv); + if (err) + return err; + + enetc_wr(hw, ENETC_PPSFPMR, enetc_rd(hw, ENETC_PPSFPMR) | + ENETC_PPSFPMR_PSFPEN | ENETC_PPSFPMR_VS | + ENETC_PPSFPMR_PVC | ENETC_PPSFPMR_PVZC); + + return 0; +} + +static inline int enetc_psfp_disable(struct enetc_ndev_priv *priv) +{ + struct enetc_hw *hw = &priv->si->hw; + int err; + + err = enetc_psfp_clean(priv); + if (err) + return err; + + enetc_wr(hw, ENETC_PPSFPMR, enetc_rd(hw, ENETC_PPSFPMR) & + ~ENETC_PPSFPMR_PSFPEN & ~ENETC_PPSFPMR_VS & + ~ENETC_PPSFPMR_PVC & ~ENETC_PPSFPMR_PVZC); + + memset(&priv->psfp_cap, 0, sizeof(struct psfp_cap)); + + return 0; +} + +#else +#define enetc_setup_tc_taprio(ndev, type_data) -EOPNOTSUPP +#define enetc_sched_speed_set(priv, speed) (void)0 +#define enetc_setup_tc_cbs(ndev, type_data) -EOPNOTSUPP +#define enetc_setup_tc_txtime(ndev, type_data) -EOPNOTSUPP +#define enetc_setup_tc_psfp(ndev, type_data) -EOPNOTSUPP +#define enetc_setup_tc_block_cb NULL + +#define enetc_get_max_cap(p) \ + memset(&((p)->psfp_cap), 0, sizeof(struct psfp_cap)) + +static inline int enetc_psfp_enable(struct enetc_ndev_priv *priv) +{ + return 0; +} + +static inline int enetc_psfp_disable(struct enetc_ndev_priv *priv) +{ + return 0; +} + +static inline int enetc_set_psfp(struct net_device *ndev, bool en) +{ + return 0; +} +#endif diff --git a/drivers/net/ethernet/freescale/enetc/enetc_cbdr.c b/drivers/net/ethernet/freescale/enetc/enetc_cbdr.c new file mode 100644 index 000000000..201cbc362 --- /dev/null +++ b/drivers/net/ethernet/freescale/enetc/enetc_cbdr.c @@ -0,0 +1,213 @@ +// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) +/* Copyright 2017-2019 NXP */ + +#include "enetc.h" + +static void enetc_clean_cbdr(struct enetc_si *si) +{ + struct enetc_cbdr *ring = &si->cbd_ring; + struct enetc_cbd *dest_cbd; + int i, status; + + i = ring->next_to_clean; + + while (enetc_rd_reg(ring->cir) != i) { + dest_cbd = ENETC_CBD(*ring, i); + status = dest_cbd->status_flags & ENETC_CBD_STATUS_MASK; + if (status) + dev_warn(&si->pdev->dev, "CMD err %04x for cmd %04x\n", + status, dest_cbd->cmd); + + memset(dest_cbd, 0, sizeof(*dest_cbd)); + + i = (i + 1) % ring->bd_count; + } + + ring->next_to_clean = i; +} + +static int enetc_cbd_unused(struct enetc_cbdr *r) +{ + return (r->next_to_clean - r->next_to_use - 1 + r->bd_count) % + r->bd_count; +} + +int enetc_send_cmd(struct enetc_si *si, struct enetc_cbd *cbd) +{ + struct enetc_cbdr *ring = &si->cbd_ring; + int timeout = ENETC_CBDR_TIMEOUT; + struct enetc_cbd *dest_cbd; + int i; + + if (unlikely(!ring->bd_base)) + return -EIO; + + if (unlikely(!enetc_cbd_unused(ring))) + enetc_clean_cbdr(si); + + i = ring->next_to_use; + dest_cbd = ENETC_CBD(*ring, i); + + /* copy command to the ring */ + *dest_cbd = *cbd; + i = (i + 1) % ring->bd_count; + + ring->next_to_use = i; + /* let H/W know BD ring has been updated */ + enetc_wr_reg(ring->pir, i); + + do { + if (enetc_rd_reg(ring->cir) == i) + break; + udelay(10); /* cannot sleep, rtnl_lock() */ + timeout -= 10; + } while (timeout); + + if (!timeout) + return -EBUSY; + + /* CBD may writeback data, feedback up level */ + *cbd = *dest_cbd; + + enetc_clean_cbdr(si); + + return 0; +} + +int enetc_clear_mac_flt_entry(struct enetc_si *si, int index) +{ + struct enetc_cbd cbd; + + memset(&cbd, 0, sizeof(cbd)); + + cbd.cls = 1; + cbd.status_flags = ENETC_CBD_FLAGS_SF; + cbd.index = cpu_to_le16(index); + + return enetc_send_cmd(si, &cbd); +} + +int enetc_set_mac_flt_entry(struct enetc_si *si, int index, + char *mac_addr, int si_map) +{ + struct enetc_cbd cbd; + u32 upper; + u16 lower; + + memset(&cbd, 0, sizeof(cbd)); + + /* fill up the "set" descriptor */ + cbd.cls = 1; + cbd.status_flags = ENETC_CBD_FLAGS_SF; + cbd.index = cpu_to_le16(index); + cbd.opt[3] = cpu_to_le32(si_map); + /* enable entry */ + cbd.opt[0] = cpu_to_le32(BIT(31)); + + upper = *(const u32 *)mac_addr; + lower = *(const u16 *)(mac_addr + 4); + cbd.addr[0] = cpu_to_le32(upper); + cbd.addr[1] = cpu_to_le32(lower); + + return enetc_send_cmd(si, &cbd); +} + +#define RFSE_ALIGN 64 +/* Set entry in RFS table */ +int enetc_set_fs_entry(struct enetc_si *si, struct enetc_cmd_rfse *rfse, + int index) +{ + struct enetc_cbd cbd = {.cmd = 0}; + dma_addr_t dma, dma_align; + void *tmp, *tmp_align; + int err; + + /* fill up the "set" descriptor */ + cbd.cmd = 0; + cbd.cls = 4; + cbd.index = cpu_to_le16(index); + cbd.length = cpu_to_le16(sizeof(*rfse)); + cbd.opt[3] = cpu_to_le32(0); /* SI */ + + tmp = dma_alloc_coherent(&si->pdev->dev, sizeof(*rfse) + RFSE_ALIGN, + &dma, GFP_KERNEL); + if (!tmp) { + dev_err(&si->pdev->dev, "DMA mapping of RFS entry failed!\n"); + return -ENOMEM; + } + + dma_align = ALIGN(dma, RFSE_ALIGN); + tmp_align = PTR_ALIGN(tmp, RFSE_ALIGN); + memcpy(tmp_align, rfse, sizeof(*rfse)); + + cbd.addr[0] = cpu_to_le32(lower_32_bits(dma_align)); + cbd.addr[1] = cpu_to_le32(upper_32_bits(dma_align)); + + err = enetc_send_cmd(si, &cbd); + if (err) + dev_err(&si->pdev->dev, "FS entry add failed (%d)!", err); + + dma_free_coherent(&si->pdev->dev, sizeof(*rfse) + RFSE_ALIGN, + tmp, dma); + + return err; +} + +#define RSSE_ALIGN 64 +static int enetc_cmd_rss_table(struct enetc_si *si, u32 *table, int count, + bool read) +{ + struct enetc_cbd cbd = {.cmd = 0}; + dma_addr_t dma, dma_align; + u8 *tmp, *tmp_align; + int err, i; + + if (count < RSSE_ALIGN) + /* HW only takes in a full 64 entry table */ + return -EINVAL; + + tmp = dma_alloc_coherent(&si->pdev->dev, count + RSSE_ALIGN, + &dma, GFP_KERNEL); + if (!tmp) { + dev_err(&si->pdev->dev, "DMA mapping of RSS table failed!\n"); + return -ENOMEM; + } + dma_align = ALIGN(dma, RSSE_ALIGN); + tmp_align = PTR_ALIGN(tmp, RSSE_ALIGN); + + if (!read) + for (i = 0; i < count; i++) + tmp_align[i] = (u8)(table[i]); + + /* fill up the descriptor */ + cbd.cmd = read ? 2 : 1; + cbd.cls = 3; + cbd.length = cpu_to_le16(count); + + cbd.addr[0] = cpu_to_le32(lower_32_bits(dma_align)); + cbd.addr[1] = cpu_to_le32(upper_32_bits(dma_align)); + + err = enetc_send_cmd(si, &cbd); + if (err) + dev_err(&si->pdev->dev, "RSS cmd failed (%d)!", err); + + if (read) + for (i = 0; i < count; i++) + table[i] = tmp_align[i]; + + dma_free_coherent(&si->pdev->dev, count + RSSE_ALIGN, tmp, dma); + + return err; +} + +/* Get RSS table */ +int enetc_get_rss_table(struct enetc_si *si, u32 *table, int count) +{ + return enetc_cmd_rss_table(si, table, count, true); +} + +/* Set RSS table */ +int enetc_set_rss_table(struct enetc_si *si, const u32 *table, int count) +{ + return enetc_cmd_rss_table(si, (u32 *)table, count, false); +} diff --git a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c new file mode 100644 index 000000000..cf98a0029 --- /dev/null +++ b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c @@ -0,0 +1,773 @@ +// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) +/* Copyright 2017-2019 NXP */ + +#include <linux/net_tstamp.h> +#include <linux/module.h> +#include "enetc.h" + +static const u32 enetc_si_regs[] = { + ENETC_SIMR, ENETC_SIPMAR0, ENETC_SIPMAR1, ENETC_SICBDRMR, + ENETC_SICBDRSR, ENETC_SICBDRBAR0, ENETC_SICBDRBAR1, ENETC_SICBDRPIR, + ENETC_SICBDRCIR, ENETC_SICBDRLENR, ENETC_SICAPR0, ENETC_SICAPR1, + ENETC_SIUEFDCR +}; + +static const u32 enetc_txbdr_regs[] = { + ENETC_TBMR, ENETC_TBSR, ENETC_TBBAR0, ENETC_TBBAR1, + ENETC_TBPIR, ENETC_TBCIR, ENETC_TBLENR, ENETC_TBIER, ENETC_TBICR0, + ENETC_TBICR1 +}; + +static const u32 enetc_rxbdr_regs[] = { + ENETC_RBMR, ENETC_RBSR, ENETC_RBBSR, ENETC_RBCIR, ENETC_RBBAR0, + ENETC_RBBAR1, ENETC_RBPIR, ENETC_RBLENR, ENETC_RBIER, ENETC_RBICR0, + ENETC_RBICR1 +}; + +static const u32 enetc_port_regs[] = { + ENETC_PMR, ENETC_PSR, ENETC_PSIPMR, ENETC_PSIPMAR0(0), + ENETC_PSIPMAR1(0), ENETC_PTXMBAR, ENETC_PCAPR0, ENETC_PCAPR1, + ENETC_PSICFGR0(0), ENETC_PRFSCAPR, ENETC_PTCMSDUR(0), + ENETC_PM0_CMD_CFG, ENETC_PM0_MAXFRM, ENETC_PM0_IF_MODE +}; + +static int enetc_get_reglen(struct net_device *ndev) +{ + struct enetc_ndev_priv *priv = netdev_priv(ndev); + struct enetc_hw *hw = &priv->si->hw; + int len; + + len = ARRAY_SIZE(enetc_si_regs); + len += ARRAY_SIZE(enetc_txbdr_regs) * priv->num_tx_rings; + len += ARRAY_SIZE(enetc_rxbdr_regs) * priv->num_rx_rings; + + if (hw->port) + len += ARRAY_SIZE(enetc_port_regs); + + len *= sizeof(u32) * 2; /* store 2 entries per reg: addr and value */ + + return len; +} + +static void enetc_get_regs(struct net_device *ndev, struct ethtool_regs *regs, + void *regbuf) +{ + struct enetc_ndev_priv *priv = netdev_priv(ndev); + struct enetc_hw *hw = &priv->si->hw; + u32 *buf = (u32 *)regbuf; + int i, j; + u32 addr; + + for (i = 0; i < ARRAY_SIZE(enetc_si_regs); i++) { + *buf++ = enetc_si_regs[i]; + *buf++ = enetc_rd(hw, enetc_si_regs[i]); + } + + for (i = 0; i < priv->num_tx_rings; i++) { + for (j = 0; j < ARRAY_SIZE(enetc_txbdr_regs); j++) { + addr = ENETC_BDR(TX, i, enetc_txbdr_regs[j]); + + *buf++ = addr; + *buf++ = enetc_rd(hw, addr); + } + } + + for (i = 0; i < priv->num_rx_rings; i++) { + for (j = 0; j < ARRAY_SIZE(enetc_rxbdr_regs); j++) { + addr = ENETC_BDR(RX, i, enetc_rxbdr_regs[j]); + + *buf++ = addr; + *buf++ = enetc_rd(hw, addr); + } + } + + if (!hw->port) + return; + + for (i = 0; i < ARRAY_SIZE(enetc_port_regs); i++) { + addr = ENETC_PORT_BASE + enetc_port_regs[i]; + *buf++ = addr; + *buf++ = enetc_rd(hw, addr); + } +} + +static const struct { + int reg; + char name[ETH_GSTRING_LEN]; +} enetc_si_counters[] = { + { ENETC_SIROCT, "SI rx octets" }, + { ENETC_SIRFRM, "SI rx frames" }, + { ENETC_SIRUCA, "SI rx u-cast frames" }, + { ENETC_SIRMCA, "SI rx m-cast frames" }, + { ENETC_SITOCT, "SI tx octets" }, + { ENETC_SITFRM, "SI tx frames" }, + { ENETC_SITUCA, "SI tx u-cast frames" }, + { ENETC_SITMCA, "SI tx m-cast frames" }, + { ENETC_RBDCR(0), "Rx ring 0 discarded frames" }, + { ENETC_RBDCR(1), "Rx ring 1 discarded frames" }, + { ENETC_RBDCR(2), "Rx ring 2 discarded frames" }, + { ENETC_RBDCR(3), "Rx ring 3 discarded frames" }, + { ENETC_RBDCR(4), "Rx ring 4 discarded frames" }, + { ENETC_RBDCR(5), "Rx ring 5 discarded frames" }, + { ENETC_RBDCR(6), "Rx ring 6 discarded frames" }, + { ENETC_RBDCR(7), "Rx ring 7 discarded frames" }, + { ENETC_RBDCR(8), "Rx ring 8 discarded frames" }, + { ENETC_RBDCR(9), "Rx ring 9 discarded frames" }, + { ENETC_RBDCR(10), "Rx ring 10 discarded frames" }, + { ENETC_RBDCR(11), "Rx ring 11 discarded frames" }, + { ENETC_RBDCR(12), "Rx ring 12 discarded frames" }, + { ENETC_RBDCR(13), "Rx ring 13 discarded frames" }, + { ENETC_RBDCR(14), "Rx ring 14 discarded frames" }, + { ENETC_RBDCR(15), "Rx ring 15 discarded frames" }, +}; + +static const struct { + int reg; + char name[ETH_GSTRING_LEN]; +} enetc_port_counters[] = { + { ENETC_PM0_REOCT, "MAC rx ethernet octets" }, + { ENETC_PM0_RALN, "MAC rx alignment errors" }, + { ENETC_PM0_RXPF, "MAC rx valid pause frames" }, + { ENETC_PM0_RFRM, "MAC rx valid frames" }, + { ENETC_PM0_RFCS, "MAC rx fcs errors" }, + { ENETC_PM0_RVLAN, "MAC rx VLAN frames" }, + { ENETC_PM0_RERR, "MAC rx frame errors" }, + { ENETC_PM0_RUCA, "MAC rx unicast frames" }, + { ENETC_PM0_RMCA, "MAC rx multicast frames" }, + { ENETC_PM0_RBCA, "MAC rx broadcast frames" }, + { ENETC_PM0_RDRP, "MAC rx dropped packets" }, + { ENETC_PM0_RPKT, "MAC rx packets" }, + { ENETC_PM0_RUND, "MAC rx undersized packets" }, + { ENETC_PM0_R64, "MAC rx 64 byte packets" }, + { ENETC_PM0_R127, "MAC rx 65-127 byte packets" }, + { ENETC_PM0_R255, "MAC rx 128-255 byte packets" }, + { ENETC_PM0_R511, "MAC rx 256-511 byte packets" }, + { ENETC_PM0_R1023, "MAC rx 512-1023 byte packets" }, + { ENETC_PM0_R1522, "MAC rx 1024-1522 byte packets" }, + { ENETC_PM0_R1523X, "MAC rx 1523 to max-octet packets" }, + { ENETC_PM0_ROVR, "MAC rx oversized packets" }, + { ENETC_PM0_RJBR, "MAC rx jabber packets" }, + { ENETC_PM0_RFRG, "MAC rx fragment packets" }, + { ENETC_PM0_RCNP, "MAC rx control packets" }, + { ENETC_PM0_RDRNTP, "MAC rx fifo drop" }, + { ENETC_PM0_TEOCT, "MAC tx ethernet octets" }, + { ENETC_PM0_TOCT, "MAC tx octets" }, + { ENETC_PM0_TCRSE, "MAC tx carrier sense errors" }, + { ENETC_PM0_TXPF, "MAC tx valid pause frames" }, + { ENETC_PM0_TFRM, "MAC tx frames" }, + { ENETC_PM0_TFCS, "MAC tx fcs errors" }, + { ENETC_PM0_TVLAN, "MAC tx VLAN frames" }, + { ENETC_PM0_TERR, "MAC tx frame errors" }, + { ENETC_PM0_TUCA, "MAC tx unicast frames" }, + { ENETC_PM0_TMCA, "MAC tx multicast frames" }, + { ENETC_PM0_TBCA, "MAC tx broadcast frames" }, + { ENETC_PM0_TPKT, "MAC tx packets" }, + { ENETC_PM0_TUND, "MAC tx undersized packets" }, + { ENETC_PM0_T64, "MAC tx 64 byte packets" }, + { ENETC_PM0_T127, "MAC tx 65-127 byte packets" }, + { ENETC_PM0_T255, "MAC tx 128-255 byte packets" }, + { ENETC_PM0_T511, "MAC tx 256-511 byte packets" }, + { ENETC_PM0_T1023, "MAC tx 512-1023 byte packets" }, + { ENETC_PM0_T1522, "MAC tx 1024-1522 byte packets" }, + { ENETC_PM0_T1523X, "MAC tx 1523 to max-octet packets" }, + { ENETC_PM0_TCNP, "MAC tx control packets" }, + { ENETC_PM0_TDFR, "MAC tx deferred packets" }, + { ENETC_PM0_TMCOL, "MAC tx multiple collisions" }, + { ENETC_PM0_TSCOL, "MAC tx single collisions" }, + { ENETC_PM0_TLCOL, "MAC tx late collisions" }, + { ENETC_PM0_TECOL, "MAC tx excessive collisions" }, + { ENETC_UFDMF, "SI MAC nomatch u-cast discards" }, + { ENETC_MFDMF, "SI MAC nomatch m-cast discards" }, + { ENETC_PBFDSIR, "SI MAC nomatch b-cast discards" }, + { ENETC_PUFDVFR, "SI VLAN nomatch u-cast discards" }, + { ENETC_PMFDVFR, "SI VLAN nomatch m-cast discards" }, + { ENETC_PBFDVFR, "SI VLAN nomatch b-cast discards" }, + { ENETC_PFDMSAPR, "SI pruning discarded frames" }, + { ENETC_PICDR(0), "ICM DR0 discarded frames" }, + { ENETC_PICDR(1), "ICM DR1 discarded frames" }, + { ENETC_PICDR(2), "ICM DR2 discarded frames" }, + { ENETC_PICDR(3), "ICM DR3 discarded frames" }, +}; + +static const char rx_ring_stats[][ETH_GSTRING_LEN] = { + "Rx ring %2d frames", + "Rx ring %2d alloc errors", +}; + +static const char tx_ring_stats[][ETH_GSTRING_LEN] = { + "Tx ring %2d frames", +}; + +static int enetc_get_sset_count(struct net_device *ndev, int sset) +{ + struct enetc_ndev_priv *priv = netdev_priv(ndev); + int len; + + if (sset != ETH_SS_STATS) + return -EOPNOTSUPP; + + len = ARRAY_SIZE(enetc_si_counters) + + ARRAY_SIZE(tx_ring_stats) * priv->num_tx_rings + + ARRAY_SIZE(rx_ring_stats) * priv->num_rx_rings; + + if (!enetc_si_is_pf(priv->si)) + return len; + + len += ARRAY_SIZE(enetc_port_counters); + + return len; +} + +static void enetc_get_strings(struct net_device *ndev, u32 stringset, u8 *data) +{ + struct enetc_ndev_priv *priv = netdev_priv(ndev); + u8 *p = data; + int i, j; + + switch (stringset) { + case ETH_SS_STATS: + for (i = 0; i < ARRAY_SIZE(enetc_si_counters); i++) { + strlcpy(p, enetc_si_counters[i].name, ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + } + for (i = 0; i < priv->num_tx_rings; i++) { + for (j = 0; j < ARRAY_SIZE(tx_ring_stats); j++) { + snprintf(p, ETH_GSTRING_LEN, tx_ring_stats[j], + i); + p += ETH_GSTRING_LEN; + } + } + for (i = 0; i < priv->num_rx_rings; i++) { + for (j = 0; j < ARRAY_SIZE(rx_ring_stats); j++) { + snprintf(p, ETH_GSTRING_LEN, rx_ring_stats[j], + i); + p += ETH_GSTRING_LEN; + } + } + + if (!enetc_si_is_pf(priv->si)) + break; + + for (i = 0; i < ARRAY_SIZE(enetc_port_counters); i++) { + strlcpy(p, enetc_port_counters[i].name, + ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + } + break; + } +} + +static void enetc_get_ethtool_stats(struct net_device *ndev, + struct ethtool_stats *stats, u64 *data) +{ + struct enetc_ndev_priv *priv = netdev_priv(ndev); + struct enetc_hw *hw = &priv->si->hw; + int i, o = 0; + + for (i = 0; i < ARRAY_SIZE(enetc_si_counters); i++) + data[o++] = enetc_rd64(hw, enetc_si_counters[i].reg); + + for (i = 0; i < priv->num_tx_rings; i++) + data[o++] = priv->tx_ring[i]->stats.packets; + + for (i = 0; i < priv->num_rx_rings; i++) { + data[o++] = priv->rx_ring[i]->stats.packets; + data[o++] = priv->rx_ring[i]->stats.rx_alloc_errs; + } + + if (!enetc_si_is_pf(priv->si)) + return; + + for (i = 0; i < ARRAY_SIZE(enetc_port_counters); i++) + data[o++] = enetc_port_rd(hw, enetc_port_counters[i].reg); +} + +#define ENETC_RSSHASH_L3 (RXH_L2DA | RXH_VLAN | RXH_L3_PROTO | RXH_IP_SRC | \ + RXH_IP_DST) +#define ENETC_RSSHASH_L4 (ENETC_RSSHASH_L3 | RXH_L4_B_0_1 | RXH_L4_B_2_3) +static int enetc_get_rsshash(struct ethtool_rxnfc *rxnfc) +{ + static const u32 rsshash[] = { + [TCP_V4_FLOW] = ENETC_RSSHASH_L4, + [UDP_V4_FLOW] = ENETC_RSSHASH_L4, + [SCTP_V4_FLOW] = ENETC_RSSHASH_L4, + [AH_ESP_V4_FLOW] = ENETC_RSSHASH_L3, + [IPV4_FLOW] = ENETC_RSSHASH_L3, + [TCP_V6_FLOW] = ENETC_RSSHASH_L4, + [UDP_V6_FLOW] = ENETC_RSSHASH_L4, + [SCTP_V6_FLOW] = ENETC_RSSHASH_L4, + [AH_ESP_V6_FLOW] = ENETC_RSSHASH_L3, + [IPV6_FLOW] = ENETC_RSSHASH_L3, + [ETHER_FLOW] = 0, + }; + + if (rxnfc->flow_type >= ARRAY_SIZE(rsshash)) + return -EINVAL; + + rxnfc->data = rsshash[rxnfc->flow_type]; + + return 0; +} + +/* current HW spec does byte reversal on everything including MAC addresses */ +static void ether_addr_copy_swap(u8 *dst, const u8 *src) +{ + int i; + + for (i = 0; i < ETH_ALEN; i++) + dst[i] = src[ETH_ALEN - i - 1]; +} + +static int enetc_set_cls_entry(struct enetc_si *si, + struct ethtool_rx_flow_spec *fs, bool en) +{ + struct ethtool_tcpip4_spec *l4ip4_h, *l4ip4_m; + struct ethtool_usrip4_spec *l3ip4_h, *l3ip4_m; + struct ethhdr *eth_h, *eth_m; + struct enetc_cmd_rfse rfse = { {0} }; + + if (!en) + goto done; + + switch (fs->flow_type & 0xff) { + case TCP_V4_FLOW: + l4ip4_h = &fs->h_u.tcp_ip4_spec; + l4ip4_m = &fs->m_u.tcp_ip4_spec; + goto l4ip4; + case UDP_V4_FLOW: + l4ip4_h = &fs->h_u.udp_ip4_spec; + l4ip4_m = &fs->m_u.udp_ip4_spec; + goto l4ip4; + case SCTP_V4_FLOW: + l4ip4_h = &fs->h_u.sctp_ip4_spec; + l4ip4_m = &fs->m_u.sctp_ip4_spec; +l4ip4: + rfse.sip_h[0] = l4ip4_h->ip4src; + rfse.sip_m[0] = l4ip4_m->ip4src; + rfse.dip_h[0] = l4ip4_h->ip4dst; + rfse.dip_m[0] = l4ip4_m->ip4dst; + rfse.sport_h = ntohs(l4ip4_h->psrc); + rfse.sport_m = ntohs(l4ip4_m->psrc); + rfse.dport_h = ntohs(l4ip4_h->pdst); + rfse.dport_m = ntohs(l4ip4_m->pdst); + if (l4ip4_m->tos) + netdev_warn(si->ndev, "ToS field is not supported and was ignored\n"); + rfse.ethtype_h = ETH_P_IP; /* IPv4 */ + rfse.ethtype_m = 0xffff; + break; + case IP_USER_FLOW: + l3ip4_h = &fs->h_u.usr_ip4_spec; + l3ip4_m = &fs->m_u.usr_ip4_spec; + + rfse.sip_h[0] = l3ip4_h->ip4src; + rfse.sip_m[0] = l3ip4_m->ip4src; + rfse.dip_h[0] = l3ip4_h->ip4dst; + rfse.dip_m[0] = l3ip4_m->ip4dst; + if (l3ip4_m->tos) + netdev_warn(si->ndev, "ToS field is not supported and was ignored\n"); + rfse.ethtype_h = ETH_P_IP; /* IPv4 */ + rfse.ethtype_m = 0xffff; + break; + case ETHER_FLOW: + eth_h = &fs->h_u.ether_spec; + eth_m = &fs->m_u.ether_spec; + + ether_addr_copy_swap(rfse.smac_h, eth_h->h_source); + ether_addr_copy_swap(rfse.smac_m, eth_m->h_source); + ether_addr_copy_swap(rfse.dmac_h, eth_h->h_dest); + ether_addr_copy_swap(rfse.dmac_m, eth_m->h_dest); + rfse.ethtype_h = ntohs(eth_h->h_proto); + rfse.ethtype_m = ntohs(eth_m->h_proto); + break; + default: + return -EOPNOTSUPP; + } + + rfse.mode |= ENETC_RFSE_EN; + if (fs->ring_cookie != RX_CLS_FLOW_DISC) { + rfse.mode |= ENETC_RFSE_MODE_BD; + rfse.result = fs->ring_cookie; + } +done: + return enetc_set_fs_entry(si, &rfse, fs->location); +} + +static int enetc_get_rxnfc(struct net_device *ndev, struct ethtool_rxnfc *rxnfc, + u32 *rule_locs) +{ + struct enetc_ndev_priv *priv = netdev_priv(ndev); + int i, j; + + switch (rxnfc->cmd) { + case ETHTOOL_GRXRINGS: + rxnfc->data = priv->num_rx_rings; + break; + case ETHTOOL_GRXFH: + /* get RSS hash config */ + return enetc_get_rsshash(rxnfc); + case ETHTOOL_GRXCLSRLCNT: + /* total number of entries */ + rxnfc->data = priv->si->num_fs_entries; + /* number of entries in use */ + rxnfc->rule_cnt = 0; + for (i = 0; i < priv->si->num_fs_entries; i++) + if (priv->cls_rules[i].used) + rxnfc->rule_cnt++; + break; + case ETHTOOL_GRXCLSRULE: + if (rxnfc->fs.location >= priv->si->num_fs_entries) + return -EINVAL; + + /* get entry x */ + rxnfc->fs = priv->cls_rules[rxnfc->fs.location].fs; + break; + case ETHTOOL_GRXCLSRLALL: + /* total number of entries */ + rxnfc->data = priv->si->num_fs_entries; + /* array of indexes of used entries */ + j = 0; + for (i = 0; i < priv->si->num_fs_entries; i++) { + if (!priv->cls_rules[i].used) + continue; + if (j == rxnfc->rule_cnt) + return -EMSGSIZE; + rule_locs[j++] = i; + } + /* number of entries in use */ + rxnfc->rule_cnt = j; + break; + default: + return -EOPNOTSUPP; + } + + return 0; +} + +static int enetc_set_rxnfc(struct net_device *ndev, struct ethtool_rxnfc *rxnfc) +{ + struct enetc_ndev_priv *priv = netdev_priv(ndev); + int err; + + switch (rxnfc->cmd) { + case ETHTOOL_SRXCLSRLINS: + if (rxnfc->fs.location >= priv->si->num_fs_entries) + return -EINVAL; + + if (rxnfc->fs.ring_cookie >= priv->num_rx_rings && + rxnfc->fs.ring_cookie != RX_CLS_FLOW_DISC) + return -EINVAL; + + err = enetc_set_cls_entry(priv->si, &rxnfc->fs, true); + if (err) + return err; + priv->cls_rules[rxnfc->fs.location].fs = rxnfc->fs; + priv->cls_rules[rxnfc->fs.location].used = 1; + break; + case ETHTOOL_SRXCLSRLDEL: + if (rxnfc->fs.location >= priv->si->num_fs_entries) + return -EINVAL; + + err = enetc_set_cls_entry(priv->si, &rxnfc->fs, false); + if (err) + return err; + priv->cls_rules[rxnfc->fs.location].used = 0; + break; + default: + return -EOPNOTSUPP; + } + + return 0; +} + +static u32 enetc_get_rxfh_key_size(struct net_device *ndev) +{ + struct enetc_ndev_priv *priv = netdev_priv(ndev); + + /* return the size of the RX flow hash key. PF only */ + return (priv->si->hw.port) ? ENETC_RSSHASH_KEY_SIZE : 0; +} + +static u32 enetc_get_rxfh_indir_size(struct net_device *ndev) +{ + struct enetc_ndev_priv *priv = netdev_priv(ndev); + + /* return the size of the RX flow hash indirection table */ + return priv->si->num_rss; +} + +static int enetc_get_rxfh(struct net_device *ndev, u32 *indir, u8 *key, + u8 *hfunc) +{ + struct enetc_ndev_priv *priv = netdev_priv(ndev); + struct enetc_hw *hw = &priv->si->hw; + int err = 0, i; + + /* return hash function */ + if (hfunc) + *hfunc = ETH_RSS_HASH_TOP; + + /* return hash key */ + if (key && hw->port) + for (i = 0; i < ENETC_RSSHASH_KEY_SIZE / 4; i++) + ((u32 *)key)[i] = enetc_port_rd(hw, ENETC_PRSSK(i)); + + /* return RSS table */ + if (indir) + err = enetc_get_rss_table(priv->si, indir, priv->si->num_rss); + + return err; +} + +void enetc_set_rss_key(struct enetc_hw *hw, const u8 *bytes) +{ + int i; + + for (i = 0; i < ENETC_RSSHASH_KEY_SIZE / 4; i++) + enetc_port_wr(hw, ENETC_PRSSK(i), ((u32 *)bytes)[i]); +} + +static int enetc_set_rxfh(struct net_device *ndev, const u32 *indir, + const u8 *key, const u8 hfunc) +{ + struct enetc_ndev_priv *priv = netdev_priv(ndev); + struct enetc_hw *hw = &priv->si->hw; + int err = 0; + + /* set hash key, if PF */ + if (key && hw->port) + enetc_set_rss_key(hw, key); + + /* set RSS table */ + if (indir) + err = enetc_set_rss_table(priv->si, indir, priv->si->num_rss); + + return err; +} + +static void enetc_get_ringparam(struct net_device *ndev, + struct ethtool_ringparam *ring) +{ + struct enetc_ndev_priv *priv = netdev_priv(ndev); + + ring->rx_pending = priv->rx_bd_count; + ring->tx_pending = priv->tx_bd_count; + + /* do some h/w sanity checks for BDR length */ + if (netif_running(ndev)) { + struct enetc_hw *hw = &priv->si->hw; + u32 val = enetc_rxbdr_rd(hw, 0, ENETC_RBLENR); + + if (val != priv->rx_bd_count) + netif_err(priv, hw, ndev, "RxBDR[RBLENR] = %d!\n", val); + + val = enetc_txbdr_rd(hw, 0, ENETC_TBLENR); + + if (val != priv->tx_bd_count) + netif_err(priv, hw, ndev, "TxBDR[TBLENR] = %d!\n", val); + } +} + +static int enetc_get_coalesce(struct net_device *ndev, + struct ethtool_coalesce *ic) +{ + struct enetc_ndev_priv *priv = netdev_priv(ndev); + struct enetc_int_vector *v = priv->int_vector[0]; + + ic->tx_coalesce_usecs = enetc_cycles_to_usecs(priv->tx_ictt); + ic->rx_coalesce_usecs = enetc_cycles_to_usecs(v->rx_ictt); + + ic->tx_max_coalesced_frames = ENETC_TXIC_PKTTHR; + ic->rx_max_coalesced_frames = ENETC_RXIC_PKTTHR; + + ic->use_adaptive_rx_coalesce = priv->ic_mode & ENETC_IC_RX_ADAPTIVE; + + return 0; +} + +static int enetc_set_coalesce(struct net_device *ndev, + struct ethtool_coalesce *ic) +{ + struct enetc_ndev_priv *priv = netdev_priv(ndev); + u32 rx_ictt, tx_ictt; + int i, ic_mode; + bool changed; + + tx_ictt = enetc_usecs_to_cycles(ic->tx_coalesce_usecs); + rx_ictt = enetc_usecs_to_cycles(ic->rx_coalesce_usecs); + + if (ic->rx_max_coalesced_frames != ENETC_RXIC_PKTTHR) + return -EOPNOTSUPP; + + if (ic->tx_max_coalesced_frames != ENETC_TXIC_PKTTHR) + return -EOPNOTSUPP; + + ic_mode = ENETC_IC_NONE; + if (ic->use_adaptive_rx_coalesce) { + ic_mode |= ENETC_IC_RX_ADAPTIVE; + rx_ictt = 0x1; + } else { + ic_mode |= rx_ictt ? ENETC_IC_RX_MANUAL : 0; + } + + ic_mode |= tx_ictt ? ENETC_IC_TX_MANUAL : 0; + + /* commit the settings */ + changed = (ic_mode != priv->ic_mode) || (priv->tx_ictt != tx_ictt); + + priv->ic_mode = ic_mode; + priv->tx_ictt = tx_ictt; + + for (i = 0; i < priv->bdr_int_num; i++) { + struct enetc_int_vector *v = priv->int_vector[i]; + + v->rx_ictt = rx_ictt; + v->rx_dim_en = !!(ic_mode & ENETC_IC_RX_ADAPTIVE); + } + + if (netif_running(ndev) && changed) { + /* reconfigure the operation mode of h/w interrupts, + * traffic needs to be paused in the process + */ + enetc_stop(ndev); + enetc_start(ndev); + } + + return 0; +} + +static int enetc_get_ts_info(struct net_device *ndev, + struct ethtool_ts_info *info) +{ + int *phc_idx; + + phc_idx = symbol_get(enetc_phc_index); + if (phc_idx) { + info->phc_index = *phc_idx; + symbol_put(enetc_phc_index); + } else { + info->phc_index = -1; + } + +#ifdef CONFIG_FSL_ENETC_PTP_CLOCK + info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE | + SOF_TIMESTAMPING_RX_HARDWARE | + SOF_TIMESTAMPING_RAW_HARDWARE | + SOF_TIMESTAMPING_TX_SOFTWARE | + SOF_TIMESTAMPING_RX_SOFTWARE | + SOF_TIMESTAMPING_SOFTWARE; + + info->tx_types = (1 << HWTSTAMP_TX_OFF) | + (1 << HWTSTAMP_TX_ON); + info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) | + (1 << HWTSTAMP_FILTER_ALL); +#else + info->so_timestamping = SOF_TIMESTAMPING_RX_SOFTWARE | + SOF_TIMESTAMPING_TX_SOFTWARE | + SOF_TIMESTAMPING_SOFTWARE; +#endif + return 0; +} + +static void enetc_get_wol(struct net_device *dev, + struct ethtool_wolinfo *wol) +{ + wol->supported = 0; + wol->wolopts = 0; + + if (dev->phydev) + phy_ethtool_get_wol(dev->phydev, wol); +} + +static int enetc_set_wol(struct net_device *dev, + struct ethtool_wolinfo *wol) +{ + int ret; + + if (!dev->phydev) + return -EOPNOTSUPP; + + ret = phy_ethtool_set_wol(dev->phydev, wol); + if (!ret) + device_set_wakeup_enable(&dev->dev, wol->wolopts); + + return ret; +} + +static int enetc_get_link_ksettings(struct net_device *dev, + struct ethtool_link_ksettings *cmd) +{ + struct enetc_ndev_priv *priv = netdev_priv(dev); + + if (!priv->phylink) + return -EOPNOTSUPP; + + return phylink_ethtool_ksettings_get(priv->phylink, cmd); +} + +static int enetc_set_link_ksettings(struct net_device *dev, + const struct ethtool_link_ksettings *cmd) +{ + struct enetc_ndev_priv *priv = netdev_priv(dev); + + if (!priv->phylink) + return -EOPNOTSUPP; + + return phylink_ethtool_ksettings_set(priv->phylink, cmd); +} + +static const struct ethtool_ops enetc_pf_ethtool_ops = { + .supported_coalesce_params = ETHTOOL_COALESCE_USECS | + ETHTOOL_COALESCE_MAX_FRAMES | + ETHTOOL_COALESCE_USE_ADAPTIVE_RX, + .get_regs_len = enetc_get_reglen, + .get_regs = enetc_get_regs, + .get_sset_count = enetc_get_sset_count, + .get_strings = enetc_get_strings, + .get_ethtool_stats = enetc_get_ethtool_stats, + .get_rxnfc = enetc_get_rxnfc, + .set_rxnfc = enetc_set_rxnfc, + .get_rxfh_key_size = enetc_get_rxfh_key_size, + .get_rxfh_indir_size = enetc_get_rxfh_indir_size, + .get_rxfh = enetc_get_rxfh, + .set_rxfh = enetc_set_rxfh, + .get_ringparam = enetc_get_ringparam, + .get_coalesce = enetc_get_coalesce, + .set_coalesce = enetc_set_coalesce, + .get_link_ksettings = enetc_get_link_ksettings, + .set_link_ksettings = enetc_set_link_ksettings, + .get_link = ethtool_op_get_link, + .get_ts_info = enetc_get_ts_info, + .get_wol = enetc_get_wol, + .set_wol = enetc_set_wol, +}; + +static const struct ethtool_ops enetc_vf_ethtool_ops = { + .supported_coalesce_params = ETHTOOL_COALESCE_USECS | + ETHTOOL_COALESCE_MAX_FRAMES | + ETHTOOL_COALESCE_USE_ADAPTIVE_RX, + .get_regs_len = enetc_get_reglen, + .get_regs = enetc_get_regs, + .get_sset_count = enetc_get_sset_count, + .get_strings = enetc_get_strings, + .get_ethtool_stats = enetc_get_ethtool_stats, + .get_rxnfc = enetc_get_rxnfc, + .set_rxnfc = enetc_set_rxnfc, + .get_rxfh_indir_size = enetc_get_rxfh_indir_size, + .get_rxfh = enetc_get_rxfh, + .set_rxfh = enetc_set_rxfh, + .get_ringparam = enetc_get_ringparam, + .get_coalesce = enetc_get_coalesce, + .set_coalesce = enetc_set_coalesce, + .get_link = ethtool_op_get_link, + .get_ts_info = enetc_get_ts_info, +}; + +void enetc_set_ethtool_ops(struct net_device *ndev) +{ + struct enetc_ndev_priv *priv = netdev_priv(ndev); + + if (enetc_si_is_pf(priv->si)) + ndev->ethtool_ops = &enetc_pf_ethtool_ops; + else + ndev->ethtool_ops = &enetc_vf_ethtool_ops; +} diff --git a/drivers/net/ethernet/freescale/enetc/enetc_hw.h b/drivers/net/ethernet/freescale/enetc/enetc_hw.h new file mode 100644 index 000000000..2b90a3455 --- /dev/null +++ b/drivers/net/ethernet/freescale/enetc/enetc_hw.h @@ -0,0 +1,947 @@ +/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */ +/* Copyright 2017-2019 NXP */ + +#include <linux/bitops.h> + +/* ENETC device IDs */ +#define ENETC_DEV_ID_PF 0xe100 +#define ENETC_DEV_ID_VF 0xef00 +#define ENETC_DEV_ID_PTP 0xee02 + +/* ENETC register block BAR */ +#define ENETC_BAR_REGS 0 + +/** SI regs, offset: 0h */ +#define ENETC_SIMR 0 +#define ENETC_SIMR_EN BIT(31) +#define ENETC_SIMR_RSSE BIT(0) +#define ENETC_SICTR0 0x18 +#define ENETC_SICTR1 0x1c +#define ENETC_SIPCAPR0 0x20 +#define ENETC_SIPCAPR0_QBV BIT(4) +#define ENETC_SIPCAPR0_PSFP BIT(9) +#define ENETC_SIPCAPR0_RSS BIT(8) +#define ENETC_SIPCAPR1 0x24 +#define ENETC_SITGTGR 0x30 +#define ENETC_SIRBGCR 0x38 +/* cache attribute registers for transactions initiated by ENETC */ +#define ENETC_SICAR0 0x40 +#define ENETC_SICAR1 0x44 +#define ENETC_SICAR2 0x48 +/* rd snoop, no alloc + * wr snoop, no alloc, partial cache line update for BDs and full cache line + * update for data + */ +#define ENETC_SICAR_RD_COHERENT 0x2b2b0000 +#define ENETC_SICAR_WR_COHERENT 0x00006727 +#define ENETC_SICAR_MSI 0x00300030 /* rd/wr device, no snoop, no alloc */ + +#define ENETC_SIPMAR0 0x80 +#define ENETC_SIPMAR1 0x84 + +/* VF-PF Message passing */ +#define ENETC_DEFAULT_MSG_SIZE 1024 /* and max size */ +/* msg size encoding: default and max msg value of 1024B encoded as 0 */ +static inline u32 enetc_vsi_set_msize(u32 size) +{ + return size < ENETC_DEFAULT_MSG_SIZE ? size >> 5 : 0; +} + +#define ENETC_PSIMSGRR 0x204 +#define ENETC_PSIMSGRR_MR_MASK GENMASK(2, 1) +#define ENETC_PSIMSGRR_MR(n) BIT((n) + 1) /* n = VSI index */ +#define ENETC_PSIVMSGRCVAR0(n) (0x210 + (n) * 0x8) /* n = VSI index */ +#define ENETC_PSIVMSGRCVAR1(n) (0x214 + (n) * 0x8) + +#define ENETC_VSIMSGSR 0x204 /* RO */ +#define ENETC_VSIMSGSR_MB BIT(0) +#define ENETC_VSIMSGSR_MS BIT(1) +#define ENETC_VSIMSGSNDAR0 0x210 +#define ENETC_VSIMSGSNDAR1 0x214 + +#define ENETC_SIMSGSR_SET_MC(val) ((val) << 16) +#define ENETC_SIMSGSR_GET_MC(val) ((val) >> 16) + +/* SI statistics */ +#define ENETC_SIROCT 0x300 +#define ENETC_SIRFRM 0x308 +#define ENETC_SIRUCA 0x310 +#define ENETC_SIRMCA 0x318 +#define ENETC_SITOCT 0x320 +#define ENETC_SITFRM 0x328 +#define ENETC_SITUCA 0x330 +#define ENETC_SITMCA 0x338 +#define ENETC_RBDCR(n) (0x8180 + (n) * 0x200) + +/* Control BDR regs */ +#define ENETC_SICBDRMR 0x800 +#define ENETC_SICBDRSR 0x804 /* RO */ +#define ENETC_SICBDRBAR0 0x810 +#define ENETC_SICBDRBAR1 0x814 +#define ENETC_SICBDRPIR 0x818 +#define ENETC_SICBDRCIR 0x81c +#define ENETC_SICBDRLENR 0x820 + +#define ENETC_SICAPR0 0x900 +#define ENETC_SICAPR1 0x904 + +#define ENETC_PSIIER 0xa00 +#define ENETC_PSIIER_MR_MASK GENMASK(2, 1) +#define ENETC_PSIIDR 0xa08 +#define ENETC_SITXIDR 0xa18 +#define ENETC_SIRXIDR 0xa28 +#define ENETC_SIMSIVR 0xa30 + +#define ENETC_SIMSITRV(n) (0xB00 + (n) * 0x4) +#define ENETC_SIMSIRRV(n) (0xB80 + (n) * 0x4) + +#define ENETC_SIUEFDCR 0xe28 + +#define ENETC_SIRFSCAPR 0x1200 +#define ENETC_SIRFSCAPR_GET_NUM_RFS(val) ((val) & 0x7f) +#define ENETC_SIRSSCAPR 0x1600 +#define ENETC_SIRSSCAPR_GET_NUM_RSS(val) (BIT((val) & 0xf) * 32) + +/** SI BDR sub-blocks, n = 0..7 */ +enum enetc_bdr_type {TX, RX}; +#define ENETC_BDR_OFF(i) ((i) * 0x200) +#define ENETC_BDR(t, i, r) (0x8000 + (t) * 0x100 + ENETC_BDR_OFF(i) + (r)) +/* RX BDR reg offsets */ +#define ENETC_RBMR 0 +#define ENETC_RBMR_BDS BIT(2) +#define ENETC_RBMR_VTE BIT(5) +#define ENETC_RBMR_EN BIT(31) +#define ENETC_RBSR 0x4 +#define ENETC_RBBSR 0x8 +#define ENETC_RBCIR 0xc +#define ENETC_RBBAR0 0x10 +#define ENETC_RBBAR1 0x14 +#define ENETC_RBPIR 0x18 +#define ENETC_RBLENR 0x20 +#define ENETC_RBIER 0xa0 +#define ENETC_RBIER_RXTIE BIT(0) +#define ENETC_RBIDR 0xa4 +#define ENETC_RBICR0 0xa8 +#define ENETC_RBICR0_ICEN BIT(31) +#define ENETC_RBICR0_ICPT_MASK 0x1ff +#define ENETC_RBICR0_SET_ICPT(n) ((n) & ENETC_RBICR0_ICPT_MASK) +#define ENETC_RBICR1 0xac + +/* TX BDR reg offsets */ +#define ENETC_TBMR 0 +#define ENETC_TBSR_BUSY BIT(0) +#define ENETC_TBMR_VIH BIT(9) +#define ENETC_TBMR_PRIO_MASK GENMASK(2, 0) +#define ENETC_TBMR_SET_PRIO(val) ((val) & ENETC_TBMR_PRIO_MASK) +#define ENETC_TBMR_EN BIT(31) +#define ENETC_TBSR 0x4 +#define ENETC_TBBAR0 0x10 +#define ENETC_TBBAR1 0x14 +#define ENETC_TBPIR 0x18 +#define ENETC_TBCIR 0x1c +#define ENETC_TBCIR_IDX_MASK 0xffff +#define ENETC_TBLENR 0x20 +#define ENETC_TBIER 0xa0 +#define ENETC_TBIER_TXTIE BIT(0) +#define ENETC_TBIDR 0xa4 +#define ENETC_TBICR0 0xa8 +#define ENETC_TBICR0_ICEN BIT(31) +#define ENETC_TBICR0_ICPT_MASK 0xf +#define ENETC_TBICR0_SET_ICPT(n) ((ilog2(n) + 1) & ENETC_TBICR0_ICPT_MASK) +#define ENETC_TBICR1 0xac + +#define ENETC_RTBLENR_LEN(n) ((n) & ~0x7) + +/* Port regs, offset: 1_0000h */ +#define ENETC_PORT_BASE 0x10000 +#define ENETC_PMR 0x0000 +#define ENETC_PMR_EN GENMASK(18, 16) +#define ENETC_PMR_PSPEED_MASK GENMASK(11, 8) +#define ENETC_PMR_PSPEED_10M 0 +#define ENETC_PMR_PSPEED_100M BIT(8) +#define ENETC_PMR_PSPEED_1000M BIT(9) +#define ENETC_PMR_PSPEED_2500M BIT(10) +#define ENETC_PSR 0x0004 /* RO */ +#define ENETC_PSIPMR 0x0018 +#define ENETC_PSIPMR_SET_UP(n) BIT(n) /* n = SI index */ +#define ENETC_PSIPMR_SET_MP(n) BIT((n) + 16) +#define ENETC_PSIPVMR 0x001c +#define ENETC_VLAN_PROMISC_MAP_ALL 0x7 +#define ENETC_PSIPVMR_SET_VP(simap) ((simap) & 0x7) +#define ENETC_PSIPVMR_SET_VUTA(simap) (((simap) & 0x7) << 16) +#define ENETC_PSIPMAR0(n) (0x0100 + (n) * 0x8) /* n = SI index */ +#define ENETC_PSIPMAR1(n) (0x0104 + (n) * 0x8) +#define ENETC_PVCLCTR 0x0208 +#define ENETC_PCVLANR1 0x0210 +#define ENETC_PCVLANR2 0x0214 +#define ENETC_VLAN_TYPE_C BIT(0) +#define ENETC_VLAN_TYPE_S BIT(1) +#define ENETC_PVCLCTR_OVTPIDL(bmp) ((bmp) & 0xff) /* VLAN_TYPE */ +#define ENETC_PSIVLANR(n) (0x0240 + (n) * 4) /* n = SI index */ +#define ENETC_PSIVLAN_EN BIT(31) +#define ENETC_PSIVLAN_SET_QOS(val) ((u32)(val) << 12) +#define ENETC_PTXMBAR 0x0608 +#define ENETC_PCAPR0 0x0900 +#define ENETC_PCAPR0_RXBDR(val) ((val) >> 24) +#define ENETC_PCAPR0_TXBDR(val) (((val) >> 16) & 0xff) +#define ENETC_PCAPR1 0x0904 +#define ENETC_PSICFGR0(n) (0x0940 + (n) * 0xc) /* n = SI index */ +#define ENETC_PSICFGR0_SET_TXBDR(val) ((val) & 0xff) +#define ENETC_PSICFGR0_SET_RXBDR(val) (((val) & 0xff) << 16) +#define ENETC_PSICFGR0_VTE BIT(12) +#define ENETC_PSICFGR0_SIVIE BIT(14) +#define ENETC_PSICFGR0_ASE BIT(15) +#define ENETC_PSICFGR0_SIVC(bmp) (((bmp) & 0xff) << 24) /* VLAN_TYPE */ + +#define ENETC_PTCCBSR0(n) (0x1110 + (n) * 8) /* n = 0 to 7*/ +#define ENETC_CBSE BIT(31) +#define ENETC_CBS_BW_MASK GENMASK(6, 0) +#define ENETC_PTCCBSR1(n) (0x1114 + (n) * 8) /* n = 0 to 7*/ +#define ENETC_RSSHASH_KEY_SIZE 40 +#define ENETC_PRSSCAPR 0x1404 +#define ENETC_PRSSCAPR_GET_NUM_RSS(val) (BIT((val) & 0xf) * 32) +#define ENETC_PRSSK(n) (0x1410 + (n) * 4) /* n = [0..9] */ +#define ENETC_PSIVLANFMR 0x1700 +#define ENETC_PSIVLANFMR_VS BIT(0) +#define ENETC_PRFSMR 0x1800 +#define ENETC_PRFSMR_RFSE BIT(31) +#define ENETC_PRFSCAPR 0x1804 +#define ENETC_PRFSCAPR_GET_NUM_RFS(val) ((((val) & 0xf) + 1) * 16) +#define ENETC_PSIRFSCFGR(n) (0x1814 + (n) * 4) /* n = SI index */ +#define ENETC_PFPMR 0x1900 +#define ENETC_PFPMR_PMACE BIT(1) +#define ENETC_PFPMR_MWLM BIT(0) +#define ENETC_EMDIO_BASE 0x1c00 +#define ENETC_PSIUMHFR0(n, err) (((err) ? 0x1d08 : 0x1d00) + (n) * 0x10) +#define ENETC_PSIUMHFR1(n) (0x1d04 + (n) * 0x10) +#define ENETC_PSIMMHFR0(n, err) (((err) ? 0x1d00 : 0x1d08) + (n) * 0x10) +#define ENETC_PSIMMHFR1(n) (0x1d0c + (n) * 0x10) +#define ENETC_PSIVHFR0(n) (0x1e00 + (n) * 8) /* n = SI index */ +#define ENETC_PSIVHFR1(n) (0x1e04 + (n) * 8) /* n = SI index */ +#define ENETC_MMCSR 0x1f00 +#define ENETC_MMCSR_ME BIT(16) +#define ENETC_PTCMSDUR(n) (0x2020 + (n) * 4) /* n = TC index [0..7] */ + +#define ENETC_PM0_CMD_CFG 0x8008 +#define ENETC_PM1_CMD_CFG 0x9008 +#define ENETC_PM0_TX_EN BIT(0) +#define ENETC_PM0_RX_EN BIT(1) +#define ENETC_PM0_PROMISC BIT(4) +#define ENETC_PM0_CMD_XGLP BIT(10) +#define ENETC_PM0_CMD_TXP BIT(11) +#define ENETC_PM0_CMD_PHY_TX_EN BIT(15) +#define ENETC_PM0_CMD_SFD BIT(21) +#define ENETC_PM0_MAXFRM 0x8014 +#define ENETC_SET_TX_MTU(val) ((val) << 16) +#define ENETC_SET_MAXFRM(val) ((val) & 0xffff) +#define ENETC_PM0_RX_FIFO 0x801c +#define ENETC_PM0_RX_FIFO_VAL 1 + +#define ENETC_PM_IMDIO_BASE 0x8030 + +#define ENETC_PM0_IF_MODE 0x8300 +#define ENETC_PM0_IFM_RG BIT(2) +#define ENETC_PM0_IFM_RLP (BIT(5) | BIT(11)) +#define ENETC_PM0_IFM_EN_AUTO BIT(15) +#define ENETC_PM0_IFM_SSP_MASK GENMASK(14, 13) +#define ENETC_PM0_IFM_SSP_1000 (2 << 13) +#define ENETC_PM0_IFM_SSP_100 (0 << 13) +#define ENETC_PM0_IFM_SSP_10 (1 << 13) +#define ENETC_PM0_IFM_FULL_DPX BIT(12) +#define ENETC_PM0_IFM_IFMODE_MASK GENMASK(1, 0) +#define ENETC_PM0_IFM_IFMODE_XGMII 0 +#define ENETC_PM0_IFM_IFMODE_GMII 2 +#define ENETC_PSIDCAPR 0x1b08 +#define ENETC_PSIDCAPR_MSK GENMASK(15, 0) +#define ENETC_PSFCAPR 0x1b18 +#define ENETC_PSFCAPR_MSK GENMASK(15, 0) +#define ENETC_PSGCAPR 0x1b28 +#define ENETC_PSGCAPR_GCL_MSK GENMASK(18, 16) +#define ENETC_PSGCAPR_SGIT_MSK GENMASK(15, 0) +#define ENETC_PFMCAPR 0x1b38 +#define ENETC_PFMCAPR_MSK GENMASK(15, 0) + +/* MAC counters */ +#define ENETC_PM0_REOCT 0x8100 +#define ENETC_PM0_RALN 0x8110 +#define ENETC_PM0_RXPF 0x8118 +#define ENETC_PM0_RFRM 0x8120 +#define ENETC_PM0_RFCS 0x8128 +#define ENETC_PM0_RVLAN 0x8130 +#define ENETC_PM0_RERR 0x8138 +#define ENETC_PM0_RUCA 0x8140 +#define ENETC_PM0_RMCA 0x8148 +#define ENETC_PM0_RBCA 0x8150 +#define ENETC_PM0_RDRP 0x8158 +#define ENETC_PM0_RPKT 0x8160 +#define ENETC_PM0_RUND 0x8168 +#define ENETC_PM0_R64 0x8170 +#define ENETC_PM0_R127 0x8178 +#define ENETC_PM0_R255 0x8180 +#define ENETC_PM0_R511 0x8188 +#define ENETC_PM0_R1023 0x8190 +#define ENETC_PM0_R1522 0x8198 +#define ENETC_PM0_R1523X 0x81A0 +#define ENETC_PM0_ROVR 0x81A8 +#define ENETC_PM0_RJBR 0x81B0 +#define ENETC_PM0_RFRG 0x81B8 +#define ENETC_PM0_RCNP 0x81C0 +#define ENETC_PM0_RDRNTP 0x81C8 +#define ENETC_PM0_TEOCT 0x8200 +#define ENETC_PM0_TOCT 0x8208 +#define ENETC_PM0_TCRSE 0x8210 +#define ENETC_PM0_TXPF 0x8218 +#define ENETC_PM0_TFRM 0x8220 +#define ENETC_PM0_TFCS 0x8228 +#define ENETC_PM0_TVLAN 0x8230 +#define ENETC_PM0_TERR 0x8238 +#define ENETC_PM0_TUCA 0x8240 +#define ENETC_PM0_TMCA 0x8248 +#define ENETC_PM0_TBCA 0x8250 +#define ENETC_PM0_TPKT 0x8260 +#define ENETC_PM0_TUND 0x8268 +#define ENETC_PM0_T64 0x8270 +#define ENETC_PM0_T127 0x8278 +#define ENETC_PM0_T255 0x8280 +#define ENETC_PM0_T511 0x8288 +#define ENETC_PM0_T1023 0x8290 +#define ENETC_PM0_T1522 0x8298 +#define ENETC_PM0_T1523X 0x82A0 +#define ENETC_PM0_TCNP 0x82C0 +#define ENETC_PM0_TDFR 0x82D0 +#define ENETC_PM0_TMCOL 0x82D8 +#define ENETC_PM0_TSCOL 0x82E0 +#define ENETC_PM0_TLCOL 0x82E8 +#define ENETC_PM0_TECOL 0x82F0 + +/* Port counters */ +#define ENETC_PICDR(n) (0x0700 + (n) * 8) /* n = [0..3] */ +#define ENETC_PBFDSIR 0x0810 +#define ENETC_PFDMSAPR 0x0814 +#define ENETC_UFDMF 0x1680 +#define ENETC_MFDMF 0x1684 +#define ENETC_PUFDVFR 0x1780 +#define ENETC_PMFDVFR 0x1784 +#define ENETC_PBFDVFR 0x1788 + +/** Global regs, offset: 2_0000h */ +#define ENETC_GLOBAL_BASE 0x20000 +#define ENETC_G_EIPBRR0 0x0bf8 +#define ENETC_G_EIPBRR1 0x0bfc +#define ENETC_G_EPFBLPR(n) (0xd00 + 4 * (n)) +#define ENETC_G_EPFBLPR1_XGMII 0x80000000 + +/* PCI device info */ +struct enetc_hw { + /* SI registers, used by all PCI functions */ + void __iomem *reg; + /* Port registers, PF only */ + void __iomem *port; + /* IP global registers, PF only */ + void __iomem *global; +}; + +/* ENETC register accessors */ + +/* MDIO issue workaround (on LS1028A) - + * Due to a hardware issue, an access to MDIO registers + * that is concurrent with other ENETC register accesses + * may lead to the MDIO access being dropped or corrupted. + * To protect the MDIO accesses a readers-writers locking + * scheme is used, where the MDIO register accesses are + * protected by write locks to insure exclusivity, while + * the remaining ENETC registers are accessed under read + * locks since they only compete with MDIO accesses. + */ +extern rwlock_t enetc_mdio_lock; + +/* use this locking primitive only on the fast datapath to + * group together multiple non-MDIO register accesses to + * minimize the overhead of the lock + */ +static inline void enetc_lock_mdio(void) +{ + read_lock(&enetc_mdio_lock); +} + +static inline void enetc_unlock_mdio(void) +{ + read_unlock(&enetc_mdio_lock); +} + +/* use these accessors only on the fast datapath under + * the enetc_lock_mdio() locking primitive to minimize + * the overhead of the lock + */ +static inline u32 enetc_rd_reg_hot(void __iomem *reg) +{ + lockdep_assert_held(&enetc_mdio_lock); + + return ioread32(reg); +} + +static inline void enetc_wr_reg_hot(void __iomem *reg, u32 val) +{ + lockdep_assert_held(&enetc_mdio_lock); + + iowrite32(val, reg); +} + +/* internal helpers for the MDIO w/a */ +static inline u32 _enetc_rd_reg_wa(void __iomem *reg) +{ + u32 val; + + enetc_lock_mdio(); + val = ioread32(reg); + enetc_unlock_mdio(); + + return val; +} + +static inline void _enetc_wr_reg_wa(void __iomem *reg, u32 val) +{ + enetc_lock_mdio(); + iowrite32(val, reg); + enetc_unlock_mdio(); +} + +static inline u32 _enetc_rd_mdio_reg_wa(void __iomem *reg) +{ + unsigned long flags; + u32 val; + + write_lock_irqsave(&enetc_mdio_lock, flags); + val = ioread32(reg); + write_unlock_irqrestore(&enetc_mdio_lock, flags); + + return val; +} + +static inline void _enetc_wr_mdio_reg_wa(void __iomem *reg, u32 val) +{ + unsigned long flags; + + write_lock_irqsave(&enetc_mdio_lock, flags); + iowrite32(val, reg); + write_unlock_irqrestore(&enetc_mdio_lock, flags); +} + +#ifdef ioread64 +static inline u64 _enetc_rd_reg64(void __iomem *reg) +{ + return ioread64(reg); +} +#else +/* using this to read out stats on 32b systems */ +static inline u64 _enetc_rd_reg64(void __iomem *reg) +{ + u32 low, high, tmp; + + do { + high = ioread32(reg + 4); + low = ioread32(reg); + tmp = ioread32(reg + 4); + } while (high != tmp); + + return le64_to_cpu((__le64)high << 32 | low); +} +#endif + +static inline u64 _enetc_rd_reg64_wa(void __iomem *reg) +{ + u64 val; + + enetc_lock_mdio(); + val = _enetc_rd_reg64(reg); + enetc_unlock_mdio(); + + return val; +} + +/* general register accessors */ +#define enetc_rd_reg(reg) _enetc_rd_reg_wa((reg)) +#define enetc_wr_reg(reg, val) _enetc_wr_reg_wa((reg), (val)) +#define enetc_rd(hw, off) enetc_rd_reg((hw)->reg + (off)) +#define enetc_wr(hw, off, val) enetc_wr_reg((hw)->reg + (off), val) +#define enetc_rd_hot(hw, off) enetc_rd_reg_hot((hw)->reg + (off)) +#define enetc_wr_hot(hw, off, val) enetc_wr_reg_hot((hw)->reg + (off), val) +#define enetc_rd64(hw, off) _enetc_rd_reg64_wa((hw)->reg + (off)) +/* port register accessors - PF only */ +#define enetc_port_rd(hw, off) enetc_rd_reg((hw)->port + (off)) +#define enetc_port_wr(hw, off, val) enetc_wr_reg((hw)->port + (off), val) +#define enetc_port_rd_mdio(hw, off) _enetc_rd_mdio_reg_wa((hw)->port + (off)) +#define enetc_port_wr_mdio(hw, off, val) _enetc_wr_mdio_reg_wa(\ + (hw)->port + (off), val) +/* global register accessors - PF only */ +#define enetc_global_rd(hw, off) enetc_rd_reg((hw)->global + (off)) +#define enetc_global_wr(hw, off, val) enetc_wr_reg((hw)->global + (off), val) +/* BDR register accessors, see ENETC_BDR() */ +#define enetc_bdr_rd(hw, t, n, off) \ + enetc_rd(hw, ENETC_BDR(t, n, off)) +#define enetc_bdr_wr(hw, t, n, off, val) \ + enetc_wr(hw, ENETC_BDR(t, n, off), val) +#define enetc_txbdr_rd(hw, n, off) enetc_bdr_rd(hw, TX, n, off) +#define enetc_rxbdr_rd(hw, n, off) enetc_bdr_rd(hw, RX, n, off) +#define enetc_txbdr_wr(hw, n, off, val) \ + enetc_bdr_wr(hw, TX, n, off, val) +#define enetc_rxbdr_wr(hw, n, off, val) \ + enetc_bdr_wr(hw, RX, n, off, val) + +/* Buffer Descriptors (BD) */ +union enetc_tx_bd { + struct { + __le64 addr; + __le16 buf_len; + __le16 frm_len; + union { + struct { + __le16 l3_csoff; + u8 l4_csoff; + u8 flags; + }; /* default layout */ + __le32 txstart; + __le32 lstatus; + }; + }; + struct { + __le32 tstamp; + __le16 tpid; + __le16 vid; + u8 reserved[6]; + u8 e_flags; + u8 flags; + } ext; /* Tx BD extension */ + struct { + __le32 tstamp; + u8 reserved[10]; + u8 status; + u8 flags; + } wb; /* writeback descriptor */ +}; + +#define ENETC_TXBD_FLAGS_L4CS BIT(0) +#define ENETC_TXBD_FLAGS_TSE BIT(1) +#define ENETC_TXBD_FLAGS_W BIT(2) +#define ENETC_TXBD_FLAGS_CSUM BIT(3) +#define ENETC_TXBD_FLAGS_TXSTART BIT(4) +#define ENETC_TXBD_FLAGS_EX BIT(6) +#define ENETC_TXBD_FLAGS_F BIT(7) +#define ENETC_TXBD_TXSTART_MASK GENMASK(24, 0) +#define ENETC_TXBD_FLAGS_OFFSET 24 +static inline void enetc_clear_tx_bd(union enetc_tx_bd *txbd) +{ + memset(txbd, 0, sizeof(*txbd)); +} + +/* L3 csum flags */ +#define ENETC_TXBD_L3_IPCS BIT(7) +#define ENETC_TXBD_L3_IPV6 BIT(15) + +#define ENETC_TXBD_L3_START_MASK GENMASK(6, 0) +#define ENETC_TXBD_L3_SET_HSIZE(val) ((((val) >> 2) & 0x7f) << 8) + +/* Extension flags */ +#define ENETC_TXBD_E_FLAGS_VLAN_INS BIT(0) +#define ENETC_TXBD_E_FLAGS_TWO_STEP_PTP BIT(2) + +static inline __le16 enetc_txbd_l3_csoff(int start, int hdr_sz, u16 l3_flags) +{ + return cpu_to_le16(l3_flags | ENETC_TXBD_L3_SET_HSIZE(hdr_sz) | + (start & ENETC_TXBD_L3_START_MASK)); +} + +/* L4 csum flags */ +#define ENETC_TXBD_L4_UDP BIT(5) +#define ENETC_TXBD_L4_TCP BIT(6) + +union enetc_rx_bd { + struct { + __le64 addr; + u8 reserved[8]; + } w; + struct { + __le16 inet_csum; + __le16 parse_summary; + __le32 rss_hash; + __le16 buf_len; + __le16 vlan_opt; + union { + struct { + __le16 flags; + __le16 error; + }; + __le32 lstatus; + }; + } r; + struct { + __le32 tstamp; + u8 reserved[12]; + } ext; +}; + +#define ENETC_RXBD_LSTATUS_R BIT(30) +#define ENETC_RXBD_LSTATUS_F BIT(31) +#define ENETC_RXBD_ERR_MASK 0xff +#define ENETC_RXBD_LSTATUS(flags) ((flags) << 16) +#define ENETC_RXBD_FLAG_VLAN BIT(9) +#define ENETC_RXBD_FLAG_TSTMP BIT(10) +#define ENETC_RXBD_FLAG_TPID GENMASK(1, 0) + +#define ENETC_MAC_ADDR_FILT_CNT 8 /* # of supported entries per port */ +#define EMETC_MAC_ADDR_FILT_RES 3 /* # of reserved entries at the beginning */ +#define ENETC_MAX_NUM_VFS 2 + +#define ENETC_CBD_FLAGS_SF BIT(7) /* short format */ +#define ENETC_CBD_STATUS_MASK 0xf + +struct enetc_cmd_rfse { + u8 smac_h[6]; + u8 smac_m[6]; + u8 dmac_h[6]; + u8 dmac_m[6]; + u32 sip_h[4]; + u32 sip_m[4]; + u32 dip_h[4]; + u32 dip_m[4]; + u16 ethtype_h; + u16 ethtype_m; + u16 ethtype4_h; + u16 ethtype4_m; + u16 sport_h; + u16 sport_m; + u16 dport_h; + u16 dport_m; + u16 vlan_h; + u16 vlan_m; + u8 proto_h; + u8 proto_m; + u16 flags; + u16 result; + u16 mode; +}; + +#define ENETC_RFSE_EN BIT(15) +#define ENETC_RFSE_MODE_BD 2 + +static inline void enetc_get_primary_mac_addr(struct enetc_hw *hw, u8 *addr) +{ + *(u32 *)addr = __raw_readl(hw->reg + ENETC_SIPMAR0); + *(u16 *)(addr + 4) = __raw_readw(hw->reg + ENETC_SIPMAR1); +} + +#define ENETC_SI_INT_IDX 0 +/* base index for Rx/Tx interrupts */ +#define ENETC_BDR_INT_BASE_IDX 1 + +/* Messaging */ + +/* Command completion status */ +enum enetc_msg_cmd_status { + ENETC_MSG_CMD_STATUS_OK, + ENETC_MSG_CMD_STATUS_FAIL +}; + +/* VSI-PSI command message types */ +enum enetc_msg_cmd_type { + ENETC_MSG_CMD_MNG_MAC = 1, /* manage MAC address */ + ENETC_MSG_CMD_MNG_RX_MAC_FILTER,/* manage RX MAC table */ + ENETC_MSG_CMD_MNG_RX_VLAN_FILTER /* manage RX VLAN table */ +}; + +/* VSI-PSI command action types */ +enum enetc_msg_cmd_action_type { + ENETC_MSG_CMD_MNG_ADD = 1, + ENETC_MSG_CMD_MNG_REMOVE +}; + +/* PSI-VSI command header format */ +struct enetc_msg_cmd_header { + u16 type; /* command class type */ + u16 id; /* denotes the specific required action */ +}; + +/* Common H/W utility functions */ + +static inline void enetc_bdr_enable_rxvlan(struct enetc_hw *hw, int idx, + bool en) +{ + u32 val = enetc_rxbdr_rd(hw, idx, ENETC_RBMR); + + val = (val & ~ENETC_RBMR_VTE) | (en ? ENETC_RBMR_VTE : 0); + enetc_rxbdr_wr(hw, idx, ENETC_RBMR, val); +} + +static inline void enetc_bdr_enable_txvlan(struct enetc_hw *hw, int idx, + bool en) +{ + u32 val = enetc_txbdr_rd(hw, idx, ENETC_TBMR); + + val = (val & ~ENETC_TBMR_VIH) | (en ? ENETC_TBMR_VIH : 0); + enetc_txbdr_wr(hw, idx, ENETC_TBMR, val); +} + +static inline void enetc_set_bdr_prio(struct enetc_hw *hw, int bdr_idx, + int prio) +{ + u32 val = enetc_txbdr_rd(hw, bdr_idx, ENETC_TBMR); + + val &= ~ENETC_TBMR_PRIO_MASK; + val |= ENETC_TBMR_SET_PRIO(prio); + enetc_txbdr_wr(hw, bdr_idx, ENETC_TBMR, val); +} + +enum bdcr_cmd_class { + BDCR_CMD_UNSPEC = 0, + BDCR_CMD_MAC_FILTER, + BDCR_CMD_VLAN_FILTER, + BDCR_CMD_RSS, + BDCR_CMD_RFS, + BDCR_CMD_PORT_GCL, + BDCR_CMD_RECV_CLASSIFIER, + BDCR_CMD_STREAM_IDENTIFY, + BDCR_CMD_STREAM_FILTER, + BDCR_CMD_STREAM_GCL, + BDCR_CMD_FLOW_METER, + __BDCR_CMD_MAX_LEN, + BDCR_CMD_MAX_LEN = __BDCR_CMD_MAX_LEN - 1, +}; + +/* class 5, command 0 */ +struct tgs_gcl_conf { + u8 atc; /* init gate value */ + u8 res[7]; + struct { + u8 res1[4]; + __le16 acl_len; + u8 res2[2]; + }; +}; + +/* gate control list entry */ +struct gce { + __le32 period; + u8 gate; + u8 res[3]; +}; + +/* tgs_gcl_conf address point to this data space */ +struct tgs_gcl_data { + __le32 btl; + __le32 bth; + __le32 ct; + __le32 cte; + struct gce entry[]; +}; + +/* class 7, command 0, Stream Identity Entry Configuration */ +struct streamid_conf { + __le32 stream_handle; /* init gate value */ + __le32 iports; + u8 id_type; + u8 oui[3]; + u8 res[3]; + u8 en; +}; + +#define ENETC_CBDR_SID_VID_MASK 0xfff +#define ENETC_CBDR_SID_VIDM BIT(12) +#define ENETC_CBDR_SID_TG_MASK 0xc000 +/* streamid_conf address point to this data space */ +struct streamid_data { + union { + u8 dmac[6]; + u8 smac[6]; + }; + u16 vid_vidm_tg; +}; + +#define ENETC_CBDR_SFI_PRI_MASK 0x7 +#define ENETC_CBDR_SFI_PRIM BIT(3) +#define ENETC_CBDR_SFI_BLOV BIT(4) +#define ENETC_CBDR_SFI_BLEN BIT(5) +#define ENETC_CBDR_SFI_MSDUEN BIT(6) +#define ENETC_CBDR_SFI_FMITEN BIT(7) +#define ENETC_CBDR_SFI_ENABLE BIT(7) +/* class 8, command 0, Stream Filter Instance, Short Format */ +struct sfi_conf { + __le32 stream_handle; + u8 multi; + u8 res[2]; + u8 sthm; + /* Max Service Data Unit or Flow Meter Instance Table index. + * Depending on the value of FLT this represents either Max + * Service Data Unit (max frame size) allowed by the filter + * entry or is an index into the Flow Meter Instance table + * index identifying the policer which will be used to police + * it. + */ + __le16 fm_inst_table_index; + __le16 msdu; + __le16 sg_inst_table_index; + u8 res1[2]; + __le32 input_ports; + u8 res2[3]; + u8 en; +}; + +/* class 8, command 2 stream Filter Instance status query short format + * command no need structure define + * Stream Filter Instance Query Statistics Response data + */ +struct sfi_counter_data { + u32 matchl; + u32 matchh; + u32 msdu_dropl; + u32 msdu_droph; + u32 stream_gate_dropl; + u32 stream_gate_droph; + u32 flow_meter_dropl; + u32 flow_meter_droph; +}; + +#define ENETC_CBDR_SGI_OIPV_MASK 0x7 +#define ENETC_CBDR_SGI_OIPV_EN BIT(3) +#define ENETC_CBDR_SGI_CGTST BIT(6) +#define ENETC_CBDR_SGI_OGTST BIT(7) +#define ENETC_CBDR_SGI_CFG_CHG BIT(1) +#define ENETC_CBDR_SGI_CFG_PND BIT(2) +#define ENETC_CBDR_SGI_OEX BIT(4) +#define ENETC_CBDR_SGI_OEXEN BIT(5) +#define ENETC_CBDR_SGI_IRX BIT(6) +#define ENETC_CBDR_SGI_IRXEN BIT(7) +#define ENETC_CBDR_SGI_ACLLEN_MASK 0x3 +#define ENETC_CBDR_SGI_OCLLEN_MASK 0xc +#define ENETC_CBDR_SGI_EN BIT(7) +/* class 9, command 0, Stream Gate Instance Table, Short Format + * class 9, command 2, Stream Gate Instance Table entry query write back + * Short Format + */ +struct sgi_table { + u8 res[8]; + u8 oipv; + u8 res0[2]; + u8 ocgtst; + u8 res1[7]; + u8 gset; + u8 oacl_len; + u8 res2[2]; + u8 en; +}; + +#define ENETC_CBDR_SGI_AIPV_MASK 0x7 +#define ENETC_CBDR_SGI_AIPV_EN BIT(3) +#define ENETC_CBDR_SGI_AGTST BIT(7) + +/* class 9, command 1, Stream Gate Control List, Long Format */ +struct sgcl_conf { + u8 aipv; + u8 res[2]; + u8 agtst; + u8 res1[4]; + union { + struct { + u8 res2[4]; + u8 acl_len; + u8 res3[3]; + }; + u8 cct[8]; /* Config change time */ + }; +}; + +#define ENETC_CBDR_SGL_IOMEN BIT(0) +#define ENETC_CBDR_SGL_IPVEN BIT(3) +#define ENETC_CBDR_SGL_GTST BIT(4) +#define ENETC_CBDR_SGL_IPV_MASK 0xe +/* Stream Gate Control List Entry */ +struct sgce { + u32 interval; + u8 msdu[3]; + u8 multi; +}; + +/* stream control list class 9 , cmd 1 data buffer */ +struct sgcl_data { + u32 btl; + u32 bth; + u32 ct; + u32 cte; + struct sgce sgcl[0]; +}; + +#define ENETC_CBDR_FMI_MR BIT(0) +#define ENETC_CBDR_FMI_MREN BIT(1) +#define ENETC_CBDR_FMI_DOY BIT(2) +#define ENETC_CBDR_FMI_CM BIT(3) +#define ENETC_CBDR_FMI_CF BIT(4) +#define ENETC_CBDR_FMI_NDOR BIT(5) +#define ENETC_CBDR_FMI_OALEN BIT(6) +#define ENETC_CBDR_FMI_IRFPP_MASK GENMASK(4, 0) + +/* class 10: command 0/1, Flow Meter Instance Set, short Format */ +struct fmi_conf { + __le32 cir; + __le32 cbs; + __le32 eir; + __le32 ebs; + u8 conf; + u8 res1; + u8 ir_fpp; + u8 res2[4]; + u8 en; +}; + +struct enetc_cbd { + union{ + struct sfi_conf sfi_conf; + struct sgi_table sgi_table; + struct fmi_conf fmi_conf; + struct { + __le32 addr[2]; + union { + __le32 opt[4]; + struct tgs_gcl_conf gcl_conf; + struct streamid_conf sid_set; + struct sgcl_conf sgcl_conf; + }; + }; /* Long format */ + __le32 data[6]; + }; + __le16 index; + __le16 length; + u8 cmd; + u8 cls; + u8 _res; + u8 status_flags; +}; + +#define ENETC_CLK 400000000ULL +static inline u32 enetc_cycles_to_usecs(u32 cycles) +{ + return (u32)div_u64(cycles * 1000000ULL, ENETC_CLK); +} + +static inline u32 enetc_usecs_to_cycles(u32 usecs) +{ + return (u32)div_u64(usecs * ENETC_CLK, 1000000ULL); +} + +/* port time gating control register */ +#define ENETC_QBV_PTGCR_OFFSET 0x11a00 +#define ENETC_QBV_TGE BIT(31) +#define ENETC_QBV_TGPE BIT(30) + +/* Port time gating capability register */ +#define ENETC_QBV_PTGCAPR_OFFSET 0x11a08 +#define ENETC_QBV_MAX_GCL_LEN_MASK GENMASK(15, 0) + +/* Port time specific departure */ +#define ENETC_PTCTSDR(n) (0x1210 + 4 * (n)) +#define ENETC_TSDE BIT(31) + +/* PSFP setting */ +#define ENETC_PPSFPMR 0x11b00 +#define ENETC_PPSFPMR_PSFPEN BIT(0) +#define ENETC_PPSFPMR_VS BIT(1) +#define ENETC_PPSFPMR_PVC BIT(2) +#define ENETC_PPSFPMR_PVZC BIT(3) diff --git a/drivers/net/ethernet/freescale/enetc/enetc_mdio.c b/drivers/net/ethernet/freescale/enetc/enetc_mdio.c new file mode 100644 index 000000000..ee0116ed4 --- /dev/null +++ b/drivers/net/ethernet/freescale/enetc/enetc_mdio.c @@ -0,0 +1,180 @@ +// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) +/* Copyright 2019 NXP */ + +#include <linux/fsl/enetc_mdio.h> +#include <linux/mdio.h> +#include <linux/of_mdio.h> +#include <linux/iopoll.h> +#include <linux/of.h> + +#include "enetc_pf.h" + +#define ENETC_MDIO_CFG 0x0 /* MDIO configuration and status */ +#define ENETC_MDIO_CTL 0x4 /* MDIO control */ +#define ENETC_MDIO_DATA 0x8 /* MDIO data */ +#define ENETC_MDIO_ADDR 0xc /* MDIO address */ + +static inline u32 _enetc_mdio_rd(struct enetc_mdio_priv *mdio_priv, int off) +{ + return enetc_port_rd_mdio(mdio_priv->hw, mdio_priv->mdio_base + off); +} + +static inline void _enetc_mdio_wr(struct enetc_mdio_priv *mdio_priv, int off, + u32 val) +{ + enetc_port_wr_mdio(mdio_priv->hw, mdio_priv->mdio_base + off, val); +} + +#define enetc_mdio_rd(mdio_priv, off) \ + _enetc_mdio_rd(mdio_priv, ENETC_##off) +#define enetc_mdio_wr(mdio_priv, off, val) \ + _enetc_mdio_wr(mdio_priv, ENETC_##off, val) +#define enetc_mdio_rd_reg(off) enetc_mdio_rd(mdio_priv, off) + +#define MDIO_CFG_CLKDIV(x) ((((x) >> 1) & 0xff) << 8) +#define MDIO_CFG_BSY BIT(0) +#define MDIO_CFG_RD_ER BIT(1) +#define MDIO_CFG_HOLD(x) (((x) << 2) & GENMASK(4, 2)) +#define MDIO_CFG_ENC45 BIT(6) + /* external MDIO only - driven on neg MDC edge */ +#define MDIO_CFG_NEG BIT(23) + +#define ENETC_EMDIO_CFG \ + (MDIO_CFG_HOLD(2) | \ + MDIO_CFG_CLKDIV(258) | \ + MDIO_CFG_NEG) + +#define MDIO_CTL_DEV_ADDR(x) ((x) & 0x1f) +#define MDIO_CTL_PORT_ADDR(x) (((x) & 0x1f) << 5) +#define MDIO_CTL_READ BIT(15) +#define MDIO_DATA(x) ((x) & 0xffff) + +#define TIMEOUT 1000 +static int enetc_mdio_wait_complete(struct enetc_mdio_priv *mdio_priv) +{ + u32 val; + + return readx_poll_timeout(enetc_mdio_rd_reg, MDIO_CFG, val, + !(val & MDIO_CFG_BSY), 10, 10 * TIMEOUT); +} + +int enetc_mdio_write(struct mii_bus *bus, int phy_id, int regnum, u16 value) +{ + struct enetc_mdio_priv *mdio_priv = bus->priv; + u32 mdio_ctl, mdio_cfg; + u16 dev_addr; + int ret; + + mdio_cfg = ENETC_EMDIO_CFG; + if (regnum & MII_ADDR_C45) { + dev_addr = (regnum >> 16) & 0x1f; + mdio_cfg |= MDIO_CFG_ENC45; + } else { + /* clause 22 (ie 1G) */ + dev_addr = regnum & 0x1f; + mdio_cfg &= ~MDIO_CFG_ENC45; + } + + enetc_mdio_wr(mdio_priv, MDIO_CFG, mdio_cfg); + + ret = enetc_mdio_wait_complete(mdio_priv); + if (ret) + return ret; + + /* set port and dev addr */ + mdio_ctl = MDIO_CTL_PORT_ADDR(phy_id) | MDIO_CTL_DEV_ADDR(dev_addr); + enetc_mdio_wr(mdio_priv, MDIO_CTL, mdio_ctl); + + /* set the register address */ + if (regnum & MII_ADDR_C45) { + enetc_mdio_wr(mdio_priv, MDIO_ADDR, regnum & 0xffff); + + ret = enetc_mdio_wait_complete(mdio_priv); + if (ret) + return ret; + } + + /* write the value */ + enetc_mdio_wr(mdio_priv, MDIO_DATA, MDIO_DATA(value)); + + ret = enetc_mdio_wait_complete(mdio_priv); + if (ret) + return ret; + + return 0; +} +EXPORT_SYMBOL_GPL(enetc_mdio_write); + +int enetc_mdio_read(struct mii_bus *bus, int phy_id, int regnum) +{ + struct enetc_mdio_priv *mdio_priv = bus->priv; + u32 mdio_ctl, mdio_cfg; + u16 dev_addr, value; + int ret; + + mdio_cfg = ENETC_EMDIO_CFG; + if (regnum & MII_ADDR_C45) { + dev_addr = (regnum >> 16) & 0x1f; + mdio_cfg |= MDIO_CFG_ENC45; + } else { + dev_addr = regnum & 0x1f; + mdio_cfg &= ~MDIO_CFG_ENC45; + } + + enetc_mdio_wr(mdio_priv, MDIO_CFG, mdio_cfg); + + ret = enetc_mdio_wait_complete(mdio_priv); + if (ret) + return ret; + + /* set port and device addr */ + mdio_ctl = MDIO_CTL_PORT_ADDR(phy_id) | MDIO_CTL_DEV_ADDR(dev_addr); + enetc_mdio_wr(mdio_priv, MDIO_CTL, mdio_ctl); + + /* set the register address */ + if (regnum & MII_ADDR_C45) { + enetc_mdio_wr(mdio_priv, MDIO_ADDR, regnum & 0xffff); + + ret = enetc_mdio_wait_complete(mdio_priv); + if (ret) + return ret; + } + + /* initiate the read */ + enetc_mdio_wr(mdio_priv, MDIO_CTL, mdio_ctl | MDIO_CTL_READ); + + ret = enetc_mdio_wait_complete(mdio_priv); + if (ret) + return ret; + + /* return all Fs if nothing was there */ + if (enetc_mdio_rd(mdio_priv, MDIO_CFG) & MDIO_CFG_RD_ER) { + dev_dbg(&bus->dev, + "Error while reading PHY%d reg at %d.%hhu\n", + phy_id, dev_addr, regnum); + return 0xffff; + } + + value = enetc_mdio_rd(mdio_priv, MDIO_DATA) & 0xffff; + + return value; +} +EXPORT_SYMBOL_GPL(enetc_mdio_read); + +struct enetc_hw *enetc_hw_alloc(struct device *dev, void __iomem *port_regs) +{ + struct enetc_hw *hw; + + hw = devm_kzalloc(dev, sizeof(*hw), GFP_KERNEL); + if (!hw) + return ERR_PTR(-ENOMEM); + + hw->port = port_regs; + + return hw; +} +EXPORT_SYMBOL_GPL(enetc_hw_alloc); + +/* Lock for MDIO access errata on LS1028A */ +DEFINE_RWLOCK(enetc_mdio_lock); +EXPORT_SYMBOL_GPL(enetc_mdio_lock); diff --git a/drivers/net/ethernet/freescale/enetc/enetc_msg.c b/drivers/net/ethernet/freescale/enetc/enetc_msg.c new file mode 100644 index 000000000..40d22ebe9 --- /dev/null +++ b/drivers/net/ethernet/freescale/enetc/enetc_msg.c @@ -0,0 +1,164 @@ +// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) +/* Copyright 2017-2019 NXP */ + +#include "enetc_pf.h" + +static void enetc_msg_disable_mr_int(struct enetc_hw *hw) +{ + u32 psiier = enetc_rd(hw, ENETC_PSIIER); + /* disable MR int source(s) */ + enetc_wr(hw, ENETC_PSIIER, psiier & ~ENETC_PSIIER_MR_MASK); +} + +static void enetc_msg_enable_mr_int(struct enetc_hw *hw) +{ + u32 psiier = enetc_rd(hw, ENETC_PSIIER); + + enetc_wr(hw, ENETC_PSIIER, psiier | ENETC_PSIIER_MR_MASK); +} + +static irqreturn_t enetc_msg_psi_msix(int irq, void *data) +{ + struct enetc_si *si = (struct enetc_si *)data; + struct enetc_pf *pf = enetc_si_priv(si); + + enetc_msg_disable_mr_int(&si->hw); + schedule_work(&pf->msg_task); + + return IRQ_HANDLED; +} + +static void enetc_msg_task(struct work_struct *work) +{ + struct enetc_pf *pf = container_of(work, struct enetc_pf, msg_task); + struct enetc_hw *hw = &pf->si->hw; + unsigned long mr_mask; + int i; + + for (;;) { + mr_mask = enetc_rd(hw, ENETC_PSIMSGRR) & ENETC_PSIMSGRR_MR_MASK; + if (!mr_mask) { + /* re-arm MR interrupts, w1c the IDR reg */ + enetc_wr(hw, ENETC_PSIIDR, ENETC_PSIIER_MR_MASK); + enetc_msg_enable_mr_int(hw); + return; + } + + for (i = 0; i < pf->num_vfs; i++) { + u32 psimsgrr; + u16 msg_code; + + if (!(ENETC_PSIMSGRR_MR(i) & mr_mask)) + continue; + + enetc_msg_handle_rxmsg(pf, i, &msg_code); + + psimsgrr = ENETC_SIMSGSR_SET_MC(msg_code); + psimsgrr |= ENETC_PSIMSGRR_MR(i); /* w1c */ + enetc_wr(hw, ENETC_PSIMSGRR, psimsgrr); + } + } +} + +/* Init */ +static int enetc_msg_alloc_mbx(struct enetc_si *si, int idx) +{ + struct enetc_pf *pf = enetc_si_priv(si); + struct device *dev = &si->pdev->dev; + struct enetc_hw *hw = &si->hw; + struct enetc_msg_swbd *msg; + u32 val; + + msg = &pf->rxmsg[idx]; + /* allocate and set receive buffer */ + msg->size = ENETC_DEFAULT_MSG_SIZE; + + msg->vaddr = dma_alloc_coherent(dev, msg->size, &msg->dma, + GFP_KERNEL); + if (!msg->vaddr) { + dev_err(dev, "msg: fail to alloc dma buffer of size: %d\n", + msg->size); + return -ENOMEM; + } + + /* set multiple of 32 bytes */ + val = lower_32_bits(msg->dma); + enetc_wr(hw, ENETC_PSIVMSGRCVAR0(idx), val); + val = upper_32_bits(msg->dma); + enetc_wr(hw, ENETC_PSIVMSGRCVAR1(idx), val); + + return 0; +} + +static void enetc_msg_free_mbx(struct enetc_si *si, int idx) +{ + struct enetc_pf *pf = enetc_si_priv(si); + struct enetc_hw *hw = &si->hw; + struct enetc_msg_swbd *msg; + + msg = &pf->rxmsg[idx]; + dma_free_coherent(&si->pdev->dev, msg->size, msg->vaddr, msg->dma); + memset(msg, 0, sizeof(*msg)); + + enetc_wr(hw, ENETC_PSIVMSGRCVAR0(idx), 0); + enetc_wr(hw, ENETC_PSIVMSGRCVAR1(idx), 0); +} + +int enetc_msg_psi_init(struct enetc_pf *pf) +{ + struct enetc_si *si = pf->si; + int vector, i, err; + + /* register message passing interrupt handler */ + snprintf(pf->msg_int_name, sizeof(pf->msg_int_name), "%s-vfmsg", + si->ndev->name); + vector = pci_irq_vector(si->pdev, ENETC_SI_INT_IDX); + err = request_irq(vector, enetc_msg_psi_msix, 0, pf->msg_int_name, si); + if (err) { + dev_err(&si->pdev->dev, + "PSI messaging: request_irq() failed!\n"); + return err; + } + + /* set one IRQ entry for PSI message receive notification (SI int) */ + enetc_wr(&si->hw, ENETC_SIMSIVR, ENETC_SI_INT_IDX); + + /* initialize PSI mailbox */ + INIT_WORK(&pf->msg_task, enetc_msg_task); + + for (i = 0; i < pf->num_vfs; i++) { + err = enetc_msg_alloc_mbx(si, i); + if (err) + goto err_init_mbx; + } + + /* enable MR interrupts */ + enetc_msg_enable_mr_int(&si->hw); + + return 0; + +err_init_mbx: + for (i--; i >= 0; i--) + enetc_msg_free_mbx(si, i); + + free_irq(vector, si); + + return err; +} + +void enetc_msg_psi_free(struct enetc_pf *pf) +{ + struct enetc_si *si = pf->si; + int i; + + cancel_work_sync(&pf->msg_task); + + /* disable MR interrupts */ + enetc_msg_disable_mr_int(&si->hw); + + for (i = 0; i < pf->num_vfs; i++) + enetc_msg_free_mbx(si, i); + + /* de-register message passing interrupt handler */ + free_irq(pci_irq_vector(si->pdev, ENETC_SI_INT_IDX), si); +} diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pci_mdio.c b/drivers/net/ethernet/freescale/enetc/enetc_pci_mdio.c new file mode 100644 index 000000000..15f37c5b8 --- /dev/null +++ b/drivers/net/ethernet/freescale/enetc/enetc_pci_mdio.c @@ -0,0 +1,110 @@ +// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) +/* Copyright 2019 NXP */ +#include <linux/fsl/enetc_mdio.h> +#include <linux/of_mdio.h> +#include "enetc_pf.h" + +#define ENETC_MDIO_DEV_ID 0xee01 +#define ENETC_MDIO_DEV_NAME "FSL PCIe IE Central MDIO" +#define ENETC_MDIO_BUS_NAME ENETC_MDIO_DEV_NAME " Bus" +#define ENETC_MDIO_DRV_NAME ENETC_MDIO_DEV_NAME " driver" + +static int enetc_pci_mdio_probe(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + struct enetc_mdio_priv *mdio_priv; + struct device *dev = &pdev->dev; + void __iomem *port_regs; + struct enetc_hw *hw; + struct mii_bus *bus; + int err; + + port_regs = pci_iomap(pdev, 0, 0); + if (!port_regs) { + dev_err(dev, "iomap failed\n"); + err = -ENXIO; + goto err_ioremap; + } + + hw = enetc_hw_alloc(dev, port_regs); + if (IS_ERR(hw)) { + err = PTR_ERR(hw); + goto err_hw_alloc; + } + + bus = devm_mdiobus_alloc_size(dev, sizeof(*mdio_priv)); + if (!bus) { + err = -ENOMEM; + goto err_mdiobus_alloc; + } + + bus->name = ENETC_MDIO_BUS_NAME; + bus->read = enetc_mdio_read; + bus->write = enetc_mdio_write; + bus->parent = dev; + mdio_priv = bus->priv; + mdio_priv->hw = hw; + mdio_priv->mdio_base = ENETC_EMDIO_BASE; + snprintf(bus->id, MII_BUS_ID_SIZE, "%s", dev_name(dev)); + + pcie_flr(pdev); + err = pci_enable_device_mem(pdev); + if (err) { + dev_err(dev, "device enable failed\n"); + goto err_pci_enable; + } + + err = pci_request_region(pdev, 0, KBUILD_MODNAME); + if (err) { + dev_err(dev, "pci_request_region failed\n"); + goto err_pci_mem_reg; + } + + err = of_mdiobus_register(bus, dev->of_node); + if (err) + goto err_mdiobus_reg; + + pci_set_drvdata(pdev, bus); + + return 0; + +err_mdiobus_reg: + pci_release_mem_regions(pdev); +err_pci_mem_reg: + pci_disable_device(pdev); +err_pci_enable: +err_mdiobus_alloc: +err_hw_alloc: + iounmap(port_regs); +err_ioremap: + return err; +} + +static void enetc_pci_mdio_remove(struct pci_dev *pdev) +{ + struct mii_bus *bus = pci_get_drvdata(pdev); + struct enetc_mdio_priv *mdio_priv; + + mdiobus_unregister(bus); + mdio_priv = bus->priv; + iounmap(mdio_priv->hw->port); + pci_release_mem_regions(pdev); + pci_disable_device(pdev); +} + +static const struct pci_device_id enetc_pci_mdio_id_table[] = { + { PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, ENETC_MDIO_DEV_ID) }, + { 0, } /* End of table. */ +}; +MODULE_DEVICE_TABLE(pci, enetc_pci_mdio_id_table); + +static struct pci_driver enetc_pci_mdio_driver = { + .name = KBUILD_MODNAME, + .id_table = enetc_pci_mdio_id_table, + .probe = enetc_pci_mdio_probe, + .remove = enetc_pci_mdio_remove, +}; +module_pci_driver(enetc_pci_mdio_driver); + +MODULE_DESCRIPTION(ENETC_MDIO_DRV_NAME); +MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf.c b/drivers/net/ethernet/freescale/enetc/enetc_pf.c new file mode 100644 index 000000000..515db7e6e --- /dev/null +++ b/drivers/net/ethernet/freescale/enetc/enetc_pf.c @@ -0,0 +1,1278 @@ +// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) +/* Copyright 2017-2019 NXP */ + +#include <linux/mdio.h> +#include <linux/module.h> +#include <linux/fsl/enetc_mdio.h> +#include <linux/of_mdio.h> +#include <linux/of_net.h> +#include "enetc_pf.h" + +#define ENETC_DRV_NAME_STR "ENETC PF driver" + +static void enetc_pf_get_primary_mac_addr(struct enetc_hw *hw, int si, u8 *addr) +{ + u32 upper = __raw_readl(hw->port + ENETC_PSIPMAR0(si)); + u16 lower = __raw_readw(hw->port + ENETC_PSIPMAR1(si)); + + *(u32 *)addr = upper; + *(u16 *)(addr + 4) = lower; +} + +static void enetc_pf_set_primary_mac_addr(struct enetc_hw *hw, int si, + const u8 *addr) +{ + u32 upper = *(const u32 *)addr; + u16 lower = *(const u16 *)(addr + 4); + + __raw_writel(upper, hw->port + ENETC_PSIPMAR0(si)); + __raw_writew(lower, hw->port + ENETC_PSIPMAR1(si)); +} + +static int enetc_pf_set_mac_addr(struct net_device *ndev, void *addr) +{ + struct enetc_ndev_priv *priv = netdev_priv(ndev); + struct sockaddr *saddr = addr; + + if (!is_valid_ether_addr(saddr->sa_data)) + return -EADDRNOTAVAIL; + + memcpy(ndev->dev_addr, saddr->sa_data, ndev->addr_len); + enetc_pf_set_primary_mac_addr(&priv->si->hw, 0, saddr->sa_data); + + return 0; +} + +static void enetc_set_vlan_promisc(struct enetc_hw *hw, char si_map) +{ + u32 val = enetc_port_rd(hw, ENETC_PSIPVMR); + + val &= ~ENETC_PSIPVMR_SET_VP(ENETC_VLAN_PROMISC_MAP_ALL); + enetc_port_wr(hw, ENETC_PSIPVMR, ENETC_PSIPVMR_SET_VP(si_map) | val); +} + +static void enetc_enable_si_vlan_promisc(struct enetc_pf *pf, int si_idx) +{ + pf->vlan_promisc_simap |= BIT(si_idx); + enetc_set_vlan_promisc(&pf->si->hw, pf->vlan_promisc_simap); +} + +static void enetc_disable_si_vlan_promisc(struct enetc_pf *pf, int si_idx) +{ + pf->vlan_promisc_simap &= ~BIT(si_idx); + enetc_set_vlan_promisc(&pf->si->hw, pf->vlan_promisc_simap); +} + +static void enetc_set_isol_vlan(struct enetc_hw *hw, int si, u16 vlan, u8 qos) +{ + u32 val = 0; + + if (vlan) + val = ENETC_PSIVLAN_EN | ENETC_PSIVLAN_SET_QOS(qos) | vlan; + + enetc_port_wr(hw, ENETC_PSIVLANR(si), val); +} + +static int enetc_mac_addr_hash_idx(const u8 *addr) +{ + u64 fold = __swab64(ether_addr_to_u64(addr)) >> 16; + u64 mask = 0; + int res = 0; + int i; + + for (i = 0; i < 8; i++) + mask |= BIT_ULL(i * 6); + + for (i = 0; i < 6; i++) + res |= (hweight64(fold & (mask << i)) & 0x1) << i; + + return res; +} + +static void enetc_reset_mac_addr_filter(struct enetc_mac_filter *filter) +{ + filter->mac_addr_cnt = 0; + + bitmap_zero(filter->mac_hash_table, + ENETC_MADDR_HASH_TBL_SZ); +} + +static void enetc_add_mac_addr_em_filter(struct enetc_mac_filter *filter, + const unsigned char *addr) +{ + /* add exact match addr */ + ether_addr_copy(filter->mac_addr, addr); + filter->mac_addr_cnt++; +} + +static void enetc_add_mac_addr_ht_filter(struct enetc_mac_filter *filter, + const unsigned char *addr) +{ + int idx = enetc_mac_addr_hash_idx(addr); + + /* add hash table entry */ + __set_bit(idx, filter->mac_hash_table); + filter->mac_addr_cnt++; +} + +static void enetc_clear_mac_ht_flt(struct enetc_si *si, int si_idx, int type) +{ + bool err = si->errata & ENETC_ERR_UCMCSWP; + + if (type == UC) { + enetc_port_wr(&si->hw, ENETC_PSIUMHFR0(si_idx, err), 0); + enetc_port_wr(&si->hw, ENETC_PSIUMHFR1(si_idx), 0); + } else { /* MC */ + enetc_port_wr(&si->hw, ENETC_PSIMMHFR0(si_idx, err), 0); + enetc_port_wr(&si->hw, ENETC_PSIMMHFR1(si_idx), 0); + } +} + +static void enetc_set_mac_ht_flt(struct enetc_si *si, int si_idx, int type, + u32 *hash) +{ + bool err = si->errata & ENETC_ERR_UCMCSWP; + + if (type == UC) { + enetc_port_wr(&si->hw, ENETC_PSIUMHFR0(si_idx, err), *hash); + enetc_port_wr(&si->hw, ENETC_PSIUMHFR1(si_idx), *(hash + 1)); + } else { /* MC */ + enetc_port_wr(&si->hw, ENETC_PSIMMHFR0(si_idx, err), *hash); + enetc_port_wr(&si->hw, ENETC_PSIMMHFR1(si_idx), *(hash + 1)); + } +} + +static void enetc_sync_mac_filters(struct enetc_pf *pf) +{ + struct enetc_mac_filter *f = pf->mac_filter; + struct enetc_si *si = pf->si; + int i, pos; + + pos = EMETC_MAC_ADDR_FILT_RES; + + for (i = 0; i < MADDR_TYPE; i++, f++) { + bool em = (f->mac_addr_cnt == 1) && (i == UC); + bool clear = !f->mac_addr_cnt; + + if (clear) { + if (i == UC) + enetc_clear_mac_flt_entry(si, pos); + + enetc_clear_mac_ht_flt(si, 0, i); + continue; + } + + /* exact match filter */ + if (em) { + int err; + + enetc_clear_mac_ht_flt(si, 0, UC); + + err = enetc_set_mac_flt_entry(si, pos, f->mac_addr, + BIT(0)); + if (!err) + continue; + + /* fallback to HT filtering */ + dev_warn(&si->pdev->dev, "fallback to HT filt (%d)\n", + err); + } + + /* hash table filter, clear EM filter for UC entries */ + if (i == UC) + enetc_clear_mac_flt_entry(si, pos); + + enetc_set_mac_ht_flt(si, 0, i, (u32 *)f->mac_hash_table); + } +} + +static void enetc_pf_set_rx_mode(struct net_device *ndev) +{ + struct enetc_ndev_priv *priv = netdev_priv(ndev); + struct enetc_pf *pf = enetc_si_priv(priv->si); + struct enetc_hw *hw = &priv->si->hw; + bool uprom = false, mprom = false; + struct enetc_mac_filter *filter; + struct netdev_hw_addr *ha; + u32 psipmr = 0; + bool em; + + if (ndev->flags & IFF_PROMISC) { + /* enable promisc mode for SI0 (PF) */ + psipmr = ENETC_PSIPMR_SET_UP(0) | ENETC_PSIPMR_SET_MP(0); + uprom = true; + mprom = true; + } else if (ndev->flags & IFF_ALLMULTI) { + /* enable multi cast promisc mode for SI0 (PF) */ + psipmr = ENETC_PSIPMR_SET_MP(0); + mprom = true; + } + + /* first 2 filter entries belong to PF */ + if (!uprom) { + /* Update unicast filters */ + filter = &pf->mac_filter[UC]; + enetc_reset_mac_addr_filter(filter); + + em = (netdev_uc_count(ndev) == 1); + netdev_for_each_uc_addr(ha, ndev) { + if (em) { + enetc_add_mac_addr_em_filter(filter, ha->addr); + break; + } + + enetc_add_mac_addr_ht_filter(filter, ha->addr); + } + } + + if (!mprom) { + /* Update multicast filters */ + filter = &pf->mac_filter[MC]; + enetc_reset_mac_addr_filter(filter); + + netdev_for_each_mc_addr(ha, ndev) { + if (!is_multicast_ether_addr(ha->addr)) + continue; + + enetc_add_mac_addr_ht_filter(filter, ha->addr); + } + } + + if (!uprom || !mprom) + /* update PF entries */ + enetc_sync_mac_filters(pf); + + psipmr |= enetc_port_rd(hw, ENETC_PSIPMR) & + ~(ENETC_PSIPMR_SET_UP(0) | ENETC_PSIPMR_SET_MP(0)); + enetc_port_wr(hw, ENETC_PSIPMR, psipmr); +} + +static void enetc_set_vlan_ht_filter(struct enetc_hw *hw, int si_idx, + u32 *hash) +{ + enetc_port_wr(hw, ENETC_PSIVHFR0(si_idx), *hash); + enetc_port_wr(hw, ENETC_PSIVHFR1(si_idx), *(hash + 1)); +} + +static int enetc_vid_hash_idx(unsigned int vid) +{ + int res = 0; + int i; + + for (i = 0; i < 6; i++) + res |= (hweight8(vid & (BIT(i) | BIT(i + 6))) & 0x1) << i; + + return res; +} + +static void enetc_sync_vlan_ht_filter(struct enetc_pf *pf, bool rehash) +{ + int i; + + if (rehash) { + bitmap_zero(pf->vlan_ht_filter, ENETC_VLAN_HT_SIZE); + + for_each_set_bit(i, pf->active_vlans, VLAN_N_VID) { + int hidx = enetc_vid_hash_idx(i); + + __set_bit(hidx, pf->vlan_ht_filter); + } + } + + enetc_set_vlan_ht_filter(&pf->si->hw, 0, (u32 *)pf->vlan_ht_filter); +} + +static int enetc_vlan_rx_add_vid(struct net_device *ndev, __be16 prot, u16 vid) +{ + struct enetc_ndev_priv *priv = netdev_priv(ndev); + struct enetc_pf *pf = enetc_si_priv(priv->si); + int idx; + + __set_bit(vid, pf->active_vlans); + + idx = enetc_vid_hash_idx(vid); + if (!__test_and_set_bit(idx, pf->vlan_ht_filter)) + enetc_sync_vlan_ht_filter(pf, false); + + return 0; +} + +static int enetc_vlan_rx_del_vid(struct net_device *ndev, __be16 prot, u16 vid) +{ + struct enetc_ndev_priv *priv = netdev_priv(ndev); + struct enetc_pf *pf = enetc_si_priv(priv->si); + + __clear_bit(vid, pf->active_vlans); + enetc_sync_vlan_ht_filter(pf, true); + + return 0; +} + +static void enetc_set_loopback(struct net_device *ndev, bool en) +{ + struct enetc_ndev_priv *priv = netdev_priv(ndev); + struct enetc_hw *hw = &priv->si->hw; + u32 reg; + + reg = enetc_port_rd(hw, ENETC_PM0_IF_MODE); + if (reg & ENETC_PM0_IFM_RG) { + /* RGMII mode */ + reg = (reg & ~ENETC_PM0_IFM_RLP) | + (en ? ENETC_PM0_IFM_RLP : 0); + enetc_port_wr(hw, ENETC_PM0_IF_MODE, reg); + } else { + /* assume SGMII mode */ + reg = enetc_port_rd(hw, ENETC_PM0_CMD_CFG); + reg = (reg & ~ENETC_PM0_CMD_XGLP) | + (en ? ENETC_PM0_CMD_XGLP : 0); + reg = (reg & ~ENETC_PM0_CMD_PHY_TX_EN) | + (en ? ENETC_PM0_CMD_PHY_TX_EN : 0); + enetc_port_wr(hw, ENETC_PM0_CMD_CFG, reg); + enetc_port_wr(hw, ENETC_PM1_CMD_CFG, reg); + } +} + +static int enetc_pf_set_vf_mac(struct net_device *ndev, int vf, u8 *mac) +{ + struct enetc_ndev_priv *priv = netdev_priv(ndev); + struct enetc_pf *pf = enetc_si_priv(priv->si); + struct enetc_vf_state *vf_state; + + if (vf >= pf->total_vfs) + return -EINVAL; + + if (!is_valid_ether_addr(mac)) + return -EADDRNOTAVAIL; + + vf_state = &pf->vf_state[vf]; + vf_state->flags |= ENETC_VF_FLAG_PF_SET_MAC; + enetc_pf_set_primary_mac_addr(&priv->si->hw, vf + 1, mac); + return 0; +} + +static int enetc_pf_set_vf_vlan(struct net_device *ndev, int vf, u16 vlan, + u8 qos, __be16 proto) +{ + struct enetc_ndev_priv *priv = netdev_priv(ndev); + struct enetc_pf *pf = enetc_si_priv(priv->si); + + if (priv->si->errata & ENETC_ERR_VLAN_ISOL) + return -EOPNOTSUPP; + + if (vf >= pf->total_vfs) + return -EINVAL; + + if (proto != htons(ETH_P_8021Q)) + /* only C-tags supported for now */ + return -EPROTONOSUPPORT; + + enetc_set_isol_vlan(&priv->si->hw, vf + 1, vlan, qos); + return 0; +} + +static int enetc_pf_set_vf_spoofchk(struct net_device *ndev, int vf, bool en) +{ + struct enetc_ndev_priv *priv = netdev_priv(ndev); + struct enetc_pf *pf = enetc_si_priv(priv->si); + u32 cfgr; + + if (vf >= pf->total_vfs) + return -EINVAL; + + cfgr = enetc_port_rd(&priv->si->hw, ENETC_PSICFGR0(vf + 1)); + cfgr = (cfgr & ~ENETC_PSICFGR0_ASE) | (en ? ENETC_PSICFGR0_ASE : 0); + enetc_port_wr(&priv->si->hw, ENETC_PSICFGR0(vf + 1), cfgr); + + return 0; +} + +static void enetc_port_setup_primary_mac_address(struct enetc_si *si) +{ + unsigned char mac_addr[MAX_ADDR_LEN]; + struct enetc_pf *pf = enetc_si_priv(si); + struct enetc_hw *hw = &si->hw; + int i; + + /* check MAC addresses for PF and all VFs, if any is 0 set it ro rand */ + for (i = 0; i < pf->total_vfs + 1; i++) { + enetc_pf_get_primary_mac_addr(hw, i, mac_addr); + if (!is_zero_ether_addr(mac_addr)) + continue; + eth_random_addr(mac_addr); + dev_info(&si->pdev->dev, "no MAC address specified for SI%d, using %pM\n", + i, mac_addr); + enetc_pf_set_primary_mac_addr(hw, i, mac_addr); + } +} + +static void enetc_port_assign_rfs_entries(struct enetc_si *si) +{ + struct enetc_pf *pf = enetc_si_priv(si); + struct enetc_hw *hw = &si->hw; + int num_entries, vf_entries, i; + u32 val; + + /* split RFS entries between functions */ + val = enetc_port_rd(hw, ENETC_PRFSCAPR); + num_entries = ENETC_PRFSCAPR_GET_NUM_RFS(val); + vf_entries = num_entries / (pf->total_vfs + 1); + + for (i = 0; i < pf->total_vfs; i++) + enetc_port_wr(hw, ENETC_PSIRFSCFGR(i + 1), vf_entries); + enetc_port_wr(hw, ENETC_PSIRFSCFGR(0), + num_entries - vf_entries * pf->total_vfs); + + /* enable RFS on port */ + enetc_port_wr(hw, ENETC_PRFSMR, ENETC_PRFSMR_RFSE); +} + +static void enetc_port_si_configure(struct enetc_si *si) +{ + struct enetc_pf *pf = enetc_si_priv(si); + struct enetc_hw *hw = &si->hw; + int num_rings, i; + u32 val; + + val = enetc_port_rd(hw, ENETC_PCAPR0); + num_rings = min(ENETC_PCAPR0_RXBDR(val), ENETC_PCAPR0_TXBDR(val)); + + val = ENETC_PSICFGR0_SET_TXBDR(ENETC_PF_NUM_RINGS); + val |= ENETC_PSICFGR0_SET_RXBDR(ENETC_PF_NUM_RINGS); + + if (unlikely(num_rings < ENETC_PF_NUM_RINGS)) { + val = ENETC_PSICFGR0_SET_TXBDR(num_rings); + val |= ENETC_PSICFGR0_SET_RXBDR(num_rings); + + dev_warn(&si->pdev->dev, "Found %d rings, expected %d!\n", + num_rings, ENETC_PF_NUM_RINGS); + + num_rings = 0; + } + + /* Add default one-time settings for SI0 (PF) */ + val |= ENETC_PSICFGR0_SIVC(ENETC_VLAN_TYPE_C | ENETC_VLAN_TYPE_S); + + enetc_port_wr(hw, ENETC_PSICFGR0(0), val); + + if (num_rings) + num_rings -= ENETC_PF_NUM_RINGS; + + /* Configure the SIs for each available VF */ + val = ENETC_PSICFGR0_SIVC(ENETC_VLAN_TYPE_C | ENETC_VLAN_TYPE_S); + val |= ENETC_PSICFGR0_VTE | ENETC_PSICFGR0_SIVIE; + + if (num_rings) { + num_rings /= pf->total_vfs; + val |= ENETC_PSICFGR0_SET_TXBDR(num_rings); + val |= ENETC_PSICFGR0_SET_RXBDR(num_rings); + } + + for (i = 0; i < pf->total_vfs; i++) + enetc_port_wr(hw, ENETC_PSICFGR0(i + 1), val); + + /* Port level VLAN settings */ + val = ENETC_PVCLCTR_OVTPIDL(ENETC_VLAN_TYPE_C | ENETC_VLAN_TYPE_S); + enetc_port_wr(hw, ENETC_PVCLCTR, val); + /* use outer tag for VLAN filtering */ + enetc_port_wr(hw, ENETC_PSIVLANFMR, ENETC_PSIVLANFMR_VS); +} + +static void enetc_configure_port_mac(struct enetc_hw *hw) +{ + enetc_port_wr(hw, ENETC_PM0_MAXFRM, + ENETC_SET_MAXFRM(ENETC_RX_MAXFRM_SIZE)); + + enetc_port_wr(hw, ENETC_PTCMSDUR(0), ENETC_MAC_MAXFRM_SIZE); + enetc_port_wr(hw, ENETC_PTXMBAR, 2 * ENETC_MAC_MAXFRM_SIZE); + + enetc_port_wr(hw, ENETC_PM0_CMD_CFG, ENETC_PM0_CMD_PHY_TX_EN | + ENETC_PM0_CMD_TXP | ENETC_PM0_PROMISC); + + enetc_port_wr(hw, ENETC_PM1_CMD_CFG, ENETC_PM0_CMD_PHY_TX_EN | + ENETC_PM0_CMD_TXP | ENETC_PM0_PROMISC); + + /* On LS1028A, the MAC RX FIFO defaults to 2, which is too high + * and may lead to RX lock-up under traffic. Set it to 1 instead, + * as recommended by the hardware team. + */ + enetc_port_wr(hw, ENETC_PM0_RX_FIFO, ENETC_PM0_RX_FIFO_VAL); +} + +static void enetc_mac_config(struct enetc_hw *hw, phy_interface_t phy_mode) +{ + u32 val; + + if (phy_interface_mode_is_rgmii(phy_mode)) { + val = enetc_port_rd(hw, ENETC_PM0_IF_MODE); + val &= ~(ENETC_PM0_IFM_EN_AUTO | ENETC_PM0_IFM_IFMODE_MASK); + val |= ENETC_PM0_IFM_IFMODE_GMII | ENETC_PM0_IFM_RG; + enetc_port_wr(hw, ENETC_PM0_IF_MODE, val); + } + + if (phy_mode == PHY_INTERFACE_MODE_USXGMII) { + val = ENETC_PM0_IFM_FULL_DPX | ENETC_PM0_IFM_IFMODE_XGMII; + enetc_port_wr(hw, ENETC_PM0_IF_MODE, val); + } +} + +static void enetc_mac_enable(struct enetc_hw *hw, bool en) +{ + u32 val = enetc_port_rd(hw, ENETC_PM0_CMD_CFG); + + val &= ~(ENETC_PM0_TX_EN | ENETC_PM0_RX_EN); + val |= en ? (ENETC_PM0_TX_EN | ENETC_PM0_RX_EN) : 0; + + enetc_port_wr(hw, ENETC_PM0_CMD_CFG, val); + enetc_port_wr(hw, ENETC_PM1_CMD_CFG, val); +} + +static void enetc_configure_port_pmac(struct enetc_hw *hw) +{ + u32 temp; + + /* Set pMAC step lock */ + temp = enetc_port_rd(hw, ENETC_PFPMR); + enetc_port_wr(hw, ENETC_PFPMR, + temp | ENETC_PFPMR_PMACE | ENETC_PFPMR_MWLM); + + temp = enetc_port_rd(hw, ENETC_MMCSR); + enetc_port_wr(hw, ENETC_MMCSR, temp | ENETC_MMCSR_ME); +} + +static void enetc_configure_port(struct enetc_pf *pf) +{ + u8 hash_key[ENETC_RSSHASH_KEY_SIZE]; + struct enetc_hw *hw = &pf->si->hw; + + enetc_configure_port_pmac(hw); + + enetc_configure_port_mac(hw); + + enetc_port_si_configure(pf->si); + + /* set up hash key */ + get_random_bytes(hash_key, ENETC_RSSHASH_KEY_SIZE); + enetc_set_rss_key(hw, hash_key); + + /* split up RFS entries */ + enetc_port_assign_rfs_entries(pf->si); + + /* fix-up primary MAC addresses, if not set already */ + enetc_port_setup_primary_mac_address(pf->si); + + /* enforce VLAN promisc mode for all SIs */ + pf->vlan_promisc_simap = ENETC_VLAN_PROMISC_MAP_ALL; + enetc_set_vlan_promisc(hw, pf->vlan_promisc_simap); + + enetc_port_wr(hw, ENETC_PSIPMR, 0); + + /* enable port */ + enetc_port_wr(hw, ENETC_PMR, ENETC_PMR_EN); +} + +/* Messaging */ +static u16 enetc_msg_pf_set_vf_primary_mac_addr(struct enetc_pf *pf, + int vf_id) +{ + struct enetc_vf_state *vf_state = &pf->vf_state[vf_id]; + struct enetc_msg_swbd *msg = &pf->rxmsg[vf_id]; + struct enetc_msg_cmd_set_primary_mac *cmd; + struct device *dev = &pf->si->pdev->dev; + u16 cmd_id; + char *addr; + + cmd = (struct enetc_msg_cmd_set_primary_mac *)msg->vaddr; + cmd_id = cmd->header.id; + if (cmd_id != ENETC_MSG_CMD_MNG_ADD) + return ENETC_MSG_CMD_STATUS_FAIL; + + addr = cmd->mac.sa_data; + if (vf_state->flags & ENETC_VF_FLAG_PF_SET_MAC) + dev_warn(dev, "Attempt to override PF set mac addr for VF%d\n", + vf_id); + else + enetc_pf_set_primary_mac_addr(&pf->si->hw, vf_id + 1, addr); + + return ENETC_MSG_CMD_STATUS_OK; +} + +void enetc_msg_handle_rxmsg(struct enetc_pf *pf, int vf_id, u16 *status) +{ + struct enetc_msg_swbd *msg = &pf->rxmsg[vf_id]; + struct device *dev = &pf->si->pdev->dev; + struct enetc_msg_cmd_header *cmd_hdr; + u16 cmd_type; + + *status = ENETC_MSG_CMD_STATUS_OK; + cmd_hdr = (struct enetc_msg_cmd_header *)msg->vaddr; + cmd_type = cmd_hdr->type; + + switch (cmd_type) { + case ENETC_MSG_CMD_MNG_MAC: + *status = enetc_msg_pf_set_vf_primary_mac_addr(pf, vf_id); + break; + default: + dev_err(dev, "command not supported (cmd_type: 0x%x)\n", + cmd_type); + } +} + +#ifdef CONFIG_PCI_IOV +static int enetc_sriov_configure(struct pci_dev *pdev, int num_vfs) +{ + struct enetc_si *si = pci_get_drvdata(pdev); + struct enetc_pf *pf = enetc_si_priv(si); + int err; + + if (!num_vfs) { + enetc_msg_psi_free(pf); + kfree(pf->vf_state); + pf->num_vfs = 0; + pci_disable_sriov(pdev); + } else { + pf->num_vfs = num_vfs; + + pf->vf_state = kcalloc(num_vfs, sizeof(struct enetc_vf_state), + GFP_KERNEL); + if (!pf->vf_state) { + pf->num_vfs = 0; + return -ENOMEM; + } + + err = enetc_msg_psi_init(pf); + if (err) { + dev_err(&pdev->dev, "enetc_msg_psi_init (%d)\n", err); + goto err_msg_psi; + } + + err = pci_enable_sriov(pdev, num_vfs); + if (err) { + dev_err(&pdev->dev, "pci_enable_sriov err %d\n", err); + goto err_en_sriov; + } + } + + return num_vfs; + +err_en_sriov: + enetc_msg_psi_free(pf); +err_msg_psi: + kfree(pf->vf_state); + pf->num_vfs = 0; + + return err; +} +#else +#define enetc_sriov_configure(pdev, num_vfs) (void)0 +#endif + +static int enetc_pf_set_features(struct net_device *ndev, + netdev_features_t features) +{ + netdev_features_t changed = ndev->features ^ features; + struct enetc_ndev_priv *priv = netdev_priv(ndev); + int err; + + if (changed & NETIF_F_HW_TC) { + err = enetc_set_psfp(ndev, !!(features & NETIF_F_HW_TC)); + if (err) + return err; + } + + if (changed & NETIF_F_HW_VLAN_CTAG_FILTER) { + struct enetc_pf *pf = enetc_si_priv(priv->si); + + if (!!(features & NETIF_F_HW_VLAN_CTAG_FILTER)) + enetc_disable_si_vlan_promisc(pf, 0); + else + enetc_enable_si_vlan_promisc(pf, 0); + } + + if (changed & NETIF_F_LOOPBACK) + enetc_set_loopback(ndev, !!(features & NETIF_F_LOOPBACK)); + + enetc_set_features(ndev, features); + + return 0; +} + +static const struct net_device_ops enetc_ndev_ops = { + .ndo_open = enetc_open, + .ndo_stop = enetc_close, + .ndo_start_xmit = enetc_xmit, + .ndo_get_stats = enetc_get_stats, + .ndo_set_mac_address = enetc_pf_set_mac_addr, + .ndo_set_rx_mode = enetc_pf_set_rx_mode, + .ndo_vlan_rx_add_vid = enetc_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = enetc_vlan_rx_del_vid, + .ndo_set_vf_mac = enetc_pf_set_vf_mac, + .ndo_set_vf_vlan = enetc_pf_set_vf_vlan, + .ndo_set_vf_spoofchk = enetc_pf_set_vf_spoofchk, + .ndo_set_features = enetc_pf_set_features, + .ndo_do_ioctl = enetc_ioctl, + .ndo_setup_tc = enetc_setup_tc, +}; + +static void enetc_pf_netdev_setup(struct enetc_si *si, struct net_device *ndev, + const struct net_device_ops *ndev_ops) +{ + struct enetc_ndev_priv *priv = netdev_priv(ndev); + + SET_NETDEV_DEV(ndev, &si->pdev->dev); + priv->ndev = ndev; + priv->si = si; + priv->dev = &si->pdev->dev; + si->ndev = ndev; + + priv->msg_enable = (NETIF_MSG_WOL << 1) - 1; + ndev->netdev_ops = ndev_ops; + enetc_set_ethtool_ops(ndev); + ndev->watchdog_timeo = 5 * HZ; + ndev->max_mtu = ENETC_MAX_MTU; + + ndev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM | NETIF_F_HW_CSUM | + NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | + NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_LOOPBACK; + ndev->features = NETIF_F_HIGHDMA | NETIF_F_SG | + NETIF_F_RXCSUM | NETIF_F_HW_CSUM | + NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_CTAG_RX; + + if (si->num_rss) + ndev->hw_features |= NETIF_F_RXHASH; + + if (si->errata & ENETC_ERR_TXCSUM) { + ndev->hw_features &= ~NETIF_F_HW_CSUM; + ndev->features &= ~NETIF_F_HW_CSUM; + } + + ndev->priv_flags |= IFF_UNICAST_FLT; + + if (si->hw_features & ENETC_SI_F_PSFP && !enetc_psfp_enable(priv)) { + priv->active_offloads |= ENETC_F_QCI; + ndev->features |= NETIF_F_HW_TC; + ndev->hw_features |= NETIF_F_HW_TC; + } + + /* pick up primary MAC address from SI */ + enetc_get_primary_mac_addr(&si->hw, ndev->dev_addr); +} + +static int enetc_mdio_probe(struct enetc_pf *pf, struct device_node *np) +{ + struct device *dev = &pf->si->pdev->dev; + struct enetc_mdio_priv *mdio_priv; + struct mii_bus *bus; + int err; + + bus = devm_mdiobus_alloc_size(dev, sizeof(*mdio_priv)); + if (!bus) + return -ENOMEM; + + bus->name = "Freescale ENETC MDIO Bus"; + bus->read = enetc_mdio_read; + bus->write = enetc_mdio_write; + bus->parent = dev; + mdio_priv = bus->priv; + mdio_priv->hw = &pf->si->hw; + mdio_priv->mdio_base = ENETC_EMDIO_BASE; + snprintf(bus->id, MII_BUS_ID_SIZE, "%s", dev_name(dev)); + + err = of_mdiobus_register(bus, np); + if (err) { + dev_err(dev, "cannot register MDIO bus\n"); + return err; + } + + pf->mdio = bus; + + return 0; +} + +static void enetc_mdio_remove(struct enetc_pf *pf) +{ + if (pf->mdio) + mdiobus_unregister(pf->mdio); +} + +static int enetc_imdio_create(struct enetc_pf *pf) +{ + struct device *dev = &pf->si->pdev->dev; + struct enetc_mdio_priv *mdio_priv; + struct lynx_pcs *pcs_lynx; + struct mdio_device *pcs; + struct mii_bus *bus; + int err; + + bus = mdiobus_alloc_size(sizeof(*mdio_priv)); + if (!bus) + return -ENOMEM; + + bus->name = "Freescale ENETC internal MDIO Bus"; + bus->read = enetc_mdio_read; + bus->write = enetc_mdio_write; + bus->parent = dev; + bus->phy_mask = ~0; + mdio_priv = bus->priv; + mdio_priv->hw = &pf->si->hw; + mdio_priv->mdio_base = ENETC_PM_IMDIO_BASE; + snprintf(bus->id, MII_BUS_ID_SIZE, "%s-imdio", dev_name(dev)); + + err = mdiobus_register(bus); + if (err) { + dev_err(dev, "cannot register internal MDIO bus (%d)\n", err); + goto free_mdio_bus; + } + + pcs = mdio_device_create(bus, 0); + if (IS_ERR(pcs)) { + err = PTR_ERR(pcs); + dev_err(dev, "cannot create pcs (%d)\n", err); + goto unregister_mdiobus; + } + + pcs_lynx = lynx_pcs_create(pcs); + if (!pcs_lynx) { + mdio_device_free(pcs); + err = -ENOMEM; + dev_err(dev, "cannot create lynx pcs (%d)\n", err); + goto unregister_mdiobus; + } + + pf->imdio = bus; + pf->pcs = pcs_lynx; + + return 0; + +unregister_mdiobus: + mdiobus_unregister(bus); +free_mdio_bus: + mdiobus_free(bus); + return err; +} + +static void enetc_imdio_remove(struct enetc_pf *pf) +{ + if (pf->pcs) { + mdio_device_free(pf->pcs->mdio); + lynx_pcs_destroy(pf->pcs); + } + if (pf->imdio) { + mdiobus_unregister(pf->imdio); + mdiobus_free(pf->imdio); + } +} + +static bool enetc_port_has_pcs(struct enetc_pf *pf) +{ + return (pf->if_mode == PHY_INTERFACE_MODE_SGMII || + pf->if_mode == PHY_INTERFACE_MODE_2500BASEX || + pf->if_mode == PHY_INTERFACE_MODE_USXGMII); +} + +static int enetc_mdiobus_create(struct enetc_pf *pf, struct device_node *node) +{ + struct device_node *mdio_np; + int err; + + mdio_np = of_get_child_by_name(node, "mdio"); + if (mdio_np) { + err = enetc_mdio_probe(pf, mdio_np); + + of_node_put(mdio_np); + if (err) + return err; + } + + if (enetc_port_has_pcs(pf)) { + err = enetc_imdio_create(pf); + if (err) { + enetc_mdio_remove(pf); + return err; + } + } + + return 0; +} + +static void enetc_mdiobus_destroy(struct enetc_pf *pf) +{ + enetc_mdio_remove(pf); + enetc_imdio_remove(pf); +} + +static void enetc_pl_mac_validate(struct phylink_config *config, + unsigned long *supported, + struct phylink_link_state *state) +{ + __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, }; + + if (state->interface != PHY_INTERFACE_MODE_NA && + state->interface != PHY_INTERFACE_MODE_INTERNAL && + state->interface != PHY_INTERFACE_MODE_SGMII && + state->interface != PHY_INTERFACE_MODE_2500BASEX && + state->interface != PHY_INTERFACE_MODE_USXGMII && + !phy_interface_mode_is_rgmii(state->interface)) { + bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS); + return; + } + + phylink_set_port_modes(mask); + phylink_set(mask, Autoneg); + phylink_set(mask, Pause); + phylink_set(mask, Asym_Pause); + phylink_set(mask, 10baseT_Half); + phylink_set(mask, 10baseT_Full); + phylink_set(mask, 100baseT_Half); + phylink_set(mask, 100baseT_Full); + phylink_set(mask, 100baseT_Half); + phylink_set(mask, 1000baseT_Half); + phylink_set(mask, 1000baseT_Full); + + if (state->interface == PHY_INTERFACE_MODE_INTERNAL || + state->interface == PHY_INTERFACE_MODE_2500BASEX || + state->interface == PHY_INTERFACE_MODE_USXGMII) { + phylink_set(mask, 2500baseT_Full); + phylink_set(mask, 2500baseX_Full); + } + + bitmap_and(supported, supported, mask, + __ETHTOOL_LINK_MODE_MASK_NBITS); + bitmap_and(state->advertising, state->advertising, mask, + __ETHTOOL_LINK_MODE_MASK_NBITS); +} + +static void enetc_pl_mac_config(struct phylink_config *config, + unsigned int mode, + const struct phylink_link_state *state) +{ + struct enetc_pf *pf = phylink_to_enetc_pf(config); + struct enetc_ndev_priv *priv; + + enetc_mac_config(&pf->si->hw, state->interface); + + priv = netdev_priv(pf->si->ndev); + if (pf->pcs) + phylink_set_pcs(priv->phylink, &pf->pcs->pcs); +} + +static void enetc_force_rgmii_mac(struct enetc_hw *hw, int speed, int duplex) +{ + u32 old_val, val; + + old_val = val = enetc_port_rd(hw, ENETC_PM0_IF_MODE); + + if (speed == SPEED_1000) { + val &= ~ENETC_PM0_IFM_SSP_MASK; + val |= ENETC_PM0_IFM_SSP_1000; + } else if (speed == SPEED_100) { + val &= ~ENETC_PM0_IFM_SSP_MASK; + val |= ENETC_PM0_IFM_SSP_100; + } else if (speed == SPEED_10) { + val &= ~ENETC_PM0_IFM_SSP_MASK; + val |= ENETC_PM0_IFM_SSP_10; + } + + if (duplex == DUPLEX_FULL) + val |= ENETC_PM0_IFM_FULL_DPX; + else + val &= ~ENETC_PM0_IFM_FULL_DPX; + + if (val == old_val) + return; + + enetc_port_wr(hw, ENETC_PM0_IF_MODE, val); +} + +static void enetc_pl_mac_link_up(struct phylink_config *config, + struct phy_device *phy, unsigned int mode, + phy_interface_t interface, int speed, + int duplex, bool tx_pause, bool rx_pause) +{ + struct enetc_pf *pf = phylink_to_enetc_pf(config); + struct enetc_ndev_priv *priv; + + priv = netdev_priv(pf->si->ndev); + + if (pf->si->hw_features & ENETC_SI_F_QBV) + enetc_sched_speed_set(priv, speed); + + if (!phylink_autoneg_inband(mode) && + phy_interface_mode_is_rgmii(interface)) + enetc_force_rgmii_mac(&pf->si->hw, speed, duplex); + + enetc_mac_enable(&pf->si->hw, true); +} + +static void enetc_pl_mac_link_down(struct phylink_config *config, + unsigned int mode, + phy_interface_t interface) +{ + struct enetc_pf *pf = phylink_to_enetc_pf(config); + + enetc_mac_enable(&pf->si->hw, false); +} + +static const struct phylink_mac_ops enetc_mac_phylink_ops = { + .validate = enetc_pl_mac_validate, + .mac_config = enetc_pl_mac_config, + .mac_link_up = enetc_pl_mac_link_up, + .mac_link_down = enetc_pl_mac_link_down, +}; + +static int enetc_phylink_create(struct enetc_ndev_priv *priv, + struct device_node *node) +{ + struct enetc_pf *pf = enetc_si_priv(priv->si); + struct phylink *phylink; + int err; + + pf->phylink_config.dev = &priv->ndev->dev; + pf->phylink_config.type = PHYLINK_NETDEV; + + phylink = phylink_create(&pf->phylink_config, of_fwnode_handle(node), + pf->if_mode, &enetc_mac_phylink_ops); + if (IS_ERR(phylink)) { + err = PTR_ERR(phylink); + return err; + } + + priv->phylink = phylink; + + return 0; +} + +static void enetc_phylink_destroy(struct enetc_ndev_priv *priv) +{ + if (priv->phylink) + phylink_destroy(priv->phylink); +} + +/* Initialize the entire shared memory for the flow steering entries + * of this port (PF + VFs) + */ +static int enetc_init_port_rfs_memory(struct enetc_si *si) +{ + struct enetc_cmd_rfse rfse = {0}; + struct enetc_hw *hw = &si->hw; + int num_rfs, i, err = 0; + u32 val; + + val = enetc_port_rd(hw, ENETC_PRFSCAPR); + num_rfs = ENETC_PRFSCAPR_GET_NUM_RFS(val); + + for (i = 0; i < num_rfs; i++) { + err = enetc_set_fs_entry(si, &rfse, i); + if (err) + break; + } + + return err; +} + +static int enetc_init_port_rss_memory(struct enetc_si *si) +{ + struct enetc_hw *hw = &si->hw; + int num_rss, err; + int *rss_table; + u32 val; + + val = enetc_port_rd(hw, ENETC_PRSSCAPR); + num_rss = ENETC_PRSSCAPR_GET_NUM_RSS(val); + if (!num_rss) + return 0; + + rss_table = kcalloc(num_rss, sizeof(*rss_table), GFP_KERNEL); + if (!rss_table) + return -ENOMEM; + + err = enetc_set_rss_table(si, rss_table, num_rss); + + kfree(rss_table); + + return err; +} + +static void enetc_init_unused_port(struct enetc_si *si) +{ + struct device *dev = &si->pdev->dev; + struct enetc_hw *hw = &si->hw; + int err; + + si->cbd_ring.bd_count = ENETC_CBDR_DEFAULT_SIZE; + err = enetc_alloc_cbdr(dev, &si->cbd_ring); + if (err) + return; + + enetc_setup_cbdr(hw, &si->cbd_ring); + + enetc_init_port_rfs_memory(si); + enetc_init_port_rss_memory(si); + + enetc_clear_cbdr(hw); + enetc_free_cbdr(dev, &si->cbd_ring); +} + +static int enetc_pf_probe(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + struct device_node *node = pdev->dev.of_node; + struct enetc_ndev_priv *priv; + struct net_device *ndev; + struct enetc_si *si; + struct enetc_pf *pf; + int err; + + err = enetc_pci_probe(pdev, KBUILD_MODNAME, sizeof(*pf)); + if (err) { + dev_err(&pdev->dev, "PCI probing failed\n"); + return err; + } + + si = pci_get_drvdata(pdev); + if (!si->hw.port || !si->hw.global) { + err = -ENODEV; + dev_err(&pdev->dev, "could not map PF space, probing a VF?\n"); + goto err_map_pf_space; + } + + if (node && !of_device_is_available(node)) { + enetc_init_unused_port(si); + dev_info(&pdev->dev, "device is disabled, skipping\n"); + err = -ENODEV; + goto err_device_disabled; + } + + pf = enetc_si_priv(si); + pf->si = si; + pf->total_vfs = pci_sriov_get_totalvfs(pdev); + + enetc_configure_port(pf); + + enetc_get_si_caps(si); + + ndev = alloc_etherdev_mq(sizeof(*priv), ENETC_MAX_NUM_TXQS); + if (!ndev) { + err = -ENOMEM; + dev_err(&pdev->dev, "netdev creation failed\n"); + goto err_alloc_netdev; + } + + enetc_pf_netdev_setup(si, ndev, &enetc_ndev_ops); + + priv = netdev_priv(ndev); + + enetc_init_si_rings_params(priv); + + err = enetc_alloc_si_resources(priv); + if (err) { + dev_err(&pdev->dev, "SI resource alloc failed\n"); + goto err_alloc_si_res; + } + + err = enetc_init_port_rfs_memory(si); + if (err) { + dev_err(&pdev->dev, "Failed to initialize RFS memory\n"); + goto err_init_port_rfs; + } + + err = enetc_init_port_rss_memory(si); + if (err) { + dev_err(&pdev->dev, "Failed to initialize RSS memory\n"); + goto err_init_port_rss; + } + + err = enetc_configure_si(priv); + if (err) { + dev_err(&pdev->dev, "Failed to configure SI\n"); + goto err_config_si; + } + + err = enetc_alloc_msix(priv); + if (err) { + dev_err(&pdev->dev, "MSIX alloc failed\n"); + goto err_alloc_msix; + } + + if (!of_get_phy_mode(node, &pf->if_mode)) { + err = enetc_mdiobus_create(pf, node); + if (err) + goto err_mdiobus_create; + + err = enetc_phylink_create(priv, node); + if (err) + goto err_phylink_create; + } + + err = register_netdev(ndev); + if (err) + goto err_reg_netdev; + + return 0; + +err_reg_netdev: + enetc_phylink_destroy(priv); +err_phylink_create: + enetc_mdiobus_destroy(pf); +err_mdiobus_create: + enetc_free_msix(priv); +err_config_si: +err_init_port_rss: +err_init_port_rfs: +err_alloc_msix: + enetc_free_si_resources(priv); +err_alloc_si_res: + si->ndev = NULL; + free_netdev(ndev); +err_alloc_netdev: +err_device_disabled: +err_map_pf_space: + enetc_pci_remove(pdev); + + return err; +} + +static void enetc_pf_remove(struct pci_dev *pdev) +{ + struct enetc_si *si = pci_get_drvdata(pdev); + struct enetc_pf *pf = enetc_si_priv(si); + struct enetc_ndev_priv *priv; + + priv = netdev_priv(si->ndev); + + if (pf->num_vfs) + enetc_sriov_configure(pdev, 0); + + unregister_netdev(si->ndev); + + enetc_phylink_destroy(priv); + enetc_mdiobus_destroy(pf); + + enetc_free_msix(priv); + + enetc_free_si_resources(priv); + + free_netdev(si->ndev); + + enetc_pci_remove(pdev); +} + +static const struct pci_device_id enetc_pf_id_table[] = { + { PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, ENETC_DEV_ID_PF) }, + { 0, } /* End of table. */ +}; +MODULE_DEVICE_TABLE(pci, enetc_pf_id_table); + +static struct pci_driver enetc_pf_driver = { + .name = KBUILD_MODNAME, + .id_table = enetc_pf_id_table, + .probe = enetc_pf_probe, + .remove = enetc_pf_remove, +#ifdef CONFIG_PCI_IOV + .sriov_configure = enetc_sriov_configure, +#endif +}; +module_pci_driver(enetc_pf_driver); + +MODULE_DESCRIPTION(ENETC_DRV_NAME_STR); +MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf.h b/drivers/net/ethernet/freescale/enetc/enetc_pf.h new file mode 100644 index 000000000..263946c51 --- /dev/null +++ b/drivers/net/ethernet/freescale/enetc/enetc_pf.h @@ -0,0 +1,60 @@ +/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */ +/* Copyright 2017-2019 NXP */ + +#include "enetc.h" +#include <linux/pcs-lynx.h> + +#define ENETC_PF_NUM_RINGS 8 + +enum enetc_mac_addr_type {UC, MC, MADDR_TYPE}; +#define ENETC_MAX_NUM_MAC_FLT ((ENETC_MAX_NUM_VFS + 1) * MADDR_TYPE) + +#define ENETC_MADDR_HASH_TBL_SZ 64 +struct enetc_mac_filter { + union { + char mac_addr[ETH_ALEN]; + DECLARE_BITMAP(mac_hash_table, ENETC_MADDR_HASH_TBL_SZ); + }; + int mac_addr_cnt; +}; + +#define ENETC_VLAN_HT_SIZE 64 + +enum enetc_vf_flags { + ENETC_VF_FLAG_PF_SET_MAC = BIT(0), +}; + +struct enetc_vf_state { + enum enetc_vf_flags flags; +}; + +struct enetc_pf { + struct enetc_si *si; + int num_vfs; /* number of active VFs, after sriov_init */ + int total_vfs; /* max number of VFs, set for PF at probe */ + struct enetc_vf_state *vf_state; + + struct enetc_mac_filter mac_filter[ENETC_MAX_NUM_MAC_FLT]; + + struct enetc_msg_swbd rxmsg[ENETC_MAX_NUM_VFS]; + struct work_struct msg_task; + char msg_int_name[ENETC_INT_NAME_MAX]; + + char vlan_promisc_simap; /* bitmap of SIs in VLAN promisc mode */ + DECLARE_BITMAP(vlan_ht_filter, ENETC_VLAN_HT_SIZE); + DECLARE_BITMAP(active_vlans, VLAN_N_VID); + + struct mii_bus *mdio; /* saved for cleanup */ + struct mii_bus *imdio; + struct lynx_pcs *pcs; + + phy_interface_t if_mode; + struct phylink_config phylink_config; +}; + +#define phylink_to_enetc_pf(config) \ + container_of((config), struct enetc_pf, phylink_config) + +int enetc_msg_psi_init(struct enetc_pf *pf); +void enetc_msg_psi_free(struct enetc_pf *pf); +void enetc_msg_handle_rxmsg(struct enetc_pf *pf, int mbox_id, u16 *status); diff --git a/drivers/net/ethernet/freescale/enetc/enetc_ptp.c b/drivers/net/ethernet/freescale/enetc/enetc_ptp.c new file mode 100644 index 000000000..8c3661525 --- /dev/null +++ b/drivers/net/ethernet/freescale/enetc/enetc_ptp.c @@ -0,0 +1,152 @@ +// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) +/* Copyright 2019 NXP */ + +#include <linux/module.h> +#include <linux/of.h> +#include <linux/fsl/ptp_qoriq.h> + +#include "enetc.h" + +int enetc_phc_index = -1; +EXPORT_SYMBOL_GPL(enetc_phc_index); + +static struct ptp_clock_info enetc_ptp_caps = { + .owner = THIS_MODULE, + .name = "ENETC PTP clock", + .max_adj = 512000, + .n_alarm = 0, + .n_ext_ts = 2, + .n_per_out = 0, + .n_pins = 0, + .pps = 1, + .adjfine = ptp_qoriq_adjfine, + .adjtime = ptp_qoriq_adjtime, + .gettime64 = ptp_qoriq_gettime, + .settime64 = ptp_qoriq_settime, + .enable = ptp_qoriq_enable, +}; + +static int enetc_ptp_probe(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + struct ptp_qoriq *ptp_qoriq; + void __iomem *base; + int err, len, n; + + if (pdev->dev.of_node && !of_device_is_available(pdev->dev.of_node)) { + dev_info(&pdev->dev, "device is disabled, skipping\n"); + return -ENODEV; + } + + err = pci_enable_device_mem(pdev); + if (err) { + dev_err(&pdev->dev, "device enable failed\n"); + return err; + } + + /* set up for high or low dma */ + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); + if (err) { + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); + if (err) { + dev_err(&pdev->dev, + "DMA configuration failed: 0x%x\n", err); + goto err_dma; + } + } + + err = pci_request_mem_regions(pdev, KBUILD_MODNAME); + if (err) { + dev_err(&pdev->dev, "pci_request_regions failed err=%d\n", err); + goto err_pci_mem_reg; + } + + pci_set_master(pdev); + + ptp_qoriq = kzalloc(sizeof(*ptp_qoriq), GFP_KERNEL); + if (!ptp_qoriq) { + err = -ENOMEM; + goto err_alloc_ptp; + } + + len = pci_resource_len(pdev, ENETC_BAR_REGS); + + base = ioremap(pci_resource_start(pdev, ENETC_BAR_REGS), len); + if (!base) { + err = -ENXIO; + dev_err(&pdev->dev, "ioremap() failed\n"); + goto err_ioremap; + } + + /* Allocate 1 interrupt */ + n = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSIX); + if (n != 1) { + err = -EPERM; + goto err_irq_vectors; + } + + ptp_qoriq->irq = pci_irq_vector(pdev, 0); + + err = request_irq(ptp_qoriq->irq, ptp_qoriq_isr, 0, DRIVER, ptp_qoriq); + if (err) { + dev_err(&pdev->dev, "request_irq() failed!\n"); + goto err_irq; + } + + ptp_qoriq->dev = &pdev->dev; + + err = ptp_qoriq_init(ptp_qoriq, base, &enetc_ptp_caps); + if (err) + goto err_no_clock; + + enetc_phc_index = ptp_qoriq->phc_index; + pci_set_drvdata(pdev, ptp_qoriq); + + return 0; + +err_no_clock: + free_irq(ptp_qoriq->irq, ptp_qoriq); +err_irq: + pci_free_irq_vectors(pdev); +err_irq_vectors: + iounmap(base); +err_ioremap: + kfree(ptp_qoriq); +err_alloc_ptp: + pci_release_mem_regions(pdev); +err_pci_mem_reg: +err_dma: + pci_disable_device(pdev); + + return err; +} + +static void enetc_ptp_remove(struct pci_dev *pdev) +{ + struct ptp_qoriq *ptp_qoriq = pci_get_drvdata(pdev); + + enetc_phc_index = -1; + ptp_qoriq_free(ptp_qoriq); + pci_free_irq_vectors(pdev); + kfree(ptp_qoriq); + + pci_release_mem_regions(pdev); + pci_disable_device(pdev); +} + +static const struct pci_device_id enetc_ptp_id_table[] = { + { PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, ENETC_DEV_ID_PTP) }, + { 0, } /* End of table. */ +}; +MODULE_DEVICE_TABLE(pci, enetc_ptp_id_table); + +static struct pci_driver enetc_ptp_driver = { + .name = KBUILD_MODNAME, + .id_table = enetc_ptp_id_table, + .probe = enetc_ptp_probe, + .remove = enetc_ptp_remove, +}; +module_pci_driver(enetc_ptp_driver); + +MODULE_DESCRIPTION("ENETC PTP clock driver"); +MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/net/ethernet/freescale/enetc/enetc_qos.c b/drivers/net/ethernet/freescale/enetc/enetc_qos.c new file mode 100644 index 000000000..d7215bd77 --- /dev/null +++ b/drivers/net/ethernet/freescale/enetc/enetc_qos.c @@ -0,0 +1,1608 @@ +// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) +/* Copyright 2019 NXP */ + +#include "enetc.h" + +#include <net/pkt_sched.h> +#include <linux/math64.h> +#include <linux/refcount.h> +#include <net/pkt_cls.h> +#include <net/tc_act/tc_gate.h> + +static u16 enetc_get_max_gcl_len(struct enetc_hw *hw) +{ + return enetc_rd(hw, ENETC_QBV_PTGCAPR_OFFSET) + & ENETC_QBV_MAX_GCL_LEN_MASK; +} + +void enetc_sched_speed_set(struct enetc_ndev_priv *priv, int speed) +{ + struct enetc_hw *hw = &priv->si->hw; + u32 old_speed = priv->speed; + u32 pspeed, tmp; + + if (speed == old_speed) + return; + + switch (speed) { + case SPEED_1000: + pspeed = ENETC_PMR_PSPEED_1000M; + break; + case SPEED_2500: + pspeed = ENETC_PMR_PSPEED_2500M; + break; + case SPEED_100: + pspeed = ENETC_PMR_PSPEED_100M; + break; + case SPEED_10: + default: + pspeed = ENETC_PMR_PSPEED_10M; + } + + priv->speed = speed; + tmp = enetc_port_rd(hw, ENETC_PMR); + enetc_port_wr(hw, ENETC_PMR, (tmp & ~ENETC_PMR_PSPEED_MASK) | pspeed); +} + +static int enetc_setup_taprio(struct net_device *ndev, + struct tc_taprio_qopt_offload *admin_conf) +{ + struct enetc_ndev_priv *priv = netdev_priv(ndev); + struct enetc_hw *hw = &priv->si->hw; + struct enetc_cbd cbd = {.cmd = 0}; + struct tgs_gcl_conf *gcl_config; + struct tgs_gcl_data *gcl_data; + struct gce *gce; + dma_addr_t dma; + u16 data_size; + u16 gcl_len; + u32 tge; + int err; + int i; + + if (admin_conf->num_entries > enetc_get_max_gcl_len(hw)) + return -EINVAL; + gcl_len = admin_conf->num_entries; + + tge = enetc_rd(hw, ENETC_QBV_PTGCR_OFFSET); + if (!admin_conf->enable) { + enetc_wr(hw, ENETC_QBV_PTGCR_OFFSET, tge & ~ENETC_QBV_TGE); + + priv->active_offloads &= ~ENETC_F_QBV; + + return 0; + } + + if (admin_conf->cycle_time > U32_MAX || + admin_conf->cycle_time_extension > U32_MAX) + return -EINVAL; + + /* Configure the (administrative) gate control list using the + * control BD descriptor. + */ + gcl_config = &cbd.gcl_conf; + + data_size = struct_size(gcl_data, entry, gcl_len); + gcl_data = kzalloc(data_size, __GFP_DMA | GFP_KERNEL); + if (!gcl_data) + return -ENOMEM; + + gce = (struct gce *)(gcl_data + 1); + + /* Set all gates open as default */ + gcl_config->atc = 0xff; + gcl_config->acl_len = cpu_to_le16(gcl_len); + + gcl_data->btl = cpu_to_le32(lower_32_bits(admin_conf->base_time)); + gcl_data->bth = cpu_to_le32(upper_32_bits(admin_conf->base_time)); + gcl_data->ct = cpu_to_le32(admin_conf->cycle_time); + gcl_data->cte = cpu_to_le32(admin_conf->cycle_time_extension); + + for (i = 0; i < gcl_len; i++) { + struct tc_taprio_sched_entry *temp_entry; + struct gce *temp_gce = gce + i; + + temp_entry = &admin_conf->entries[i]; + + temp_gce->gate = (u8)temp_entry->gate_mask; + temp_gce->period = cpu_to_le32(temp_entry->interval); + } + + cbd.length = cpu_to_le16(data_size); + cbd.status_flags = 0; + + dma = dma_map_single(&priv->si->pdev->dev, gcl_data, + data_size, DMA_TO_DEVICE); + if (dma_mapping_error(&priv->si->pdev->dev, dma)) { + netdev_err(priv->si->ndev, "DMA mapping failed!\n"); + kfree(gcl_data); + return -ENOMEM; + } + + cbd.addr[0] = lower_32_bits(dma); + cbd.addr[1] = upper_32_bits(dma); + cbd.cls = BDCR_CMD_PORT_GCL; + cbd.status_flags = 0; + + enetc_wr(hw, ENETC_QBV_PTGCR_OFFSET, tge | ENETC_QBV_TGE); + + err = enetc_send_cmd(priv->si, &cbd); + if (err) + enetc_wr(hw, ENETC_QBV_PTGCR_OFFSET, tge & ~ENETC_QBV_TGE); + + dma_unmap_single(&priv->si->pdev->dev, dma, data_size, DMA_TO_DEVICE); + kfree(gcl_data); + + if (!err) + priv->active_offloads |= ENETC_F_QBV; + + return err; +} + +int enetc_setup_tc_taprio(struct net_device *ndev, void *type_data) +{ + struct tc_taprio_qopt_offload *taprio = type_data; + struct enetc_ndev_priv *priv = netdev_priv(ndev); + struct enetc_hw *hw = &priv->si->hw; + struct enetc_bdr *tx_ring; + int err; + int i; + + /* TSD and Qbv are mutually exclusive in hardware */ + for (i = 0; i < priv->num_tx_rings; i++) + if (priv->tx_ring[i]->tsd_enable) + return -EBUSY; + + for (i = 0; i < priv->num_tx_rings; i++) { + tx_ring = priv->tx_ring[i]; + tx_ring->prio = taprio->enable ? i : 0; + enetc_set_bdr_prio(hw, tx_ring->index, tx_ring->prio); + } + + err = enetc_setup_taprio(ndev, taprio); + if (err) { + for (i = 0; i < priv->num_tx_rings; i++) { + tx_ring = priv->tx_ring[i]; + tx_ring->prio = taprio->enable ? 0 : i; + enetc_set_bdr_prio(hw, tx_ring->index, tx_ring->prio); + } + } + + return err; +} + +static u32 enetc_get_cbs_enable(struct enetc_hw *hw, u8 tc) +{ + return enetc_port_rd(hw, ENETC_PTCCBSR0(tc)) & ENETC_CBSE; +} + +static u8 enetc_get_cbs_bw(struct enetc_hw *hw, u8 tc) +{ + return enetc_port_rd(hw, ENETC_PTCCBSR0(tc)) & ENETC_CBS_BW_MASK; +} + +int enetc_setup_tc_cbs(struct net_device *ndev, void *type_data) +{ + struct enetc_ndev_priv *priv = netdev_priv(ndev); + struct tc_cbs_qopt_offload *cbs = type_data; + u32 port_transmit_rate = priv->speed; + u8 tc_nums = netdev_get_num_tc(ndev); + struct enetc_hw *hw = &priv->si->hw; + u32 hi_credit_bit, hi_credit_reg; + u32 max_interference_size; + u32 port_frame_max_size; + u8 tc = cbs->queue; + u8 prio_top, prio_next; + int bw_sum = 0; + u8 bw; + + prio_top = tc_nums - 1; + prio_next = tc_nums - 2; + + /* Support highest prio and second prio tc in cbs mode */ + if (tc != prio_top && tc != prio_next) + return -EOPNOTSUPP; + + if (!cbs->enable) { + /* Make sure the other TC that are numerically + * lower than this TC have been disabled. + */ + if (tc == prio_top && + enetc_get_cbs_enable(hw, prio_next)) { + dev_err(&ndev->dev, + "Disable TC%d before disable TC%d\n", + prio_next, tc); + return -EINVAL; + } + + enetc_port_wr(hw, ENETC_PTCCBSR1(tc), 0); + enetc_port_wr(hw, ENETC_PTCCBSR0(tc), 0); + + return 0; + } + + if (cbs->idleslope - cbs->sendslope != port_transmit_rate * 1000L || + cbs->idleslope < 0 || cbs->sendslope > 0) + return -EOPNOTSUPP; + + port_frame_max_size = ndev->mtu + VLAN_ETH_HLEN + ETH_FCS_LEN; + + bw = cbs->idleslope / (port_transmit_rate * 10UL); + + /* Make sure the other TC that are numerically + * higher than this TC have been enabled. + */ + if (tc == prio_next) { + if (!enetc_get_cbs_enable(hw, prio_top)) { + dev_err(&ndev->dev, + "Enable TC%d first before enable TC%d\n", + prio_top, prio_next); + return -EINVAL; + } + bw_sum += enetc_get_cbs_bw(hw, prio_top); + } + + if (bw_sum + bw >= 100) { + dev_err(&ndev->dev, + "The sum of all CBS Bandwidth can't exceed 100\n"); + return -EINVAL; + } + + enetc_port_rd(hw, ENETC_PTCMSDUR(tc)); + + /* For top prio TC, the max_interfrence_size is maxSizedFrame. + * + * For next prio TC, the max_interfrence_size is calculated as below: + * + * max_interference_size = M0 + Ma + Ra * M0 / (R0 - Ra) + * + * - RA: idleSlope for AVB Class A + * - R0: port transmit rate + * - M0: maximum sized frame for the port + * - MA: maximum sized frame for AVB Class A + */ + + if (tc == prio_top) { + max_interference_size = port_frame_max_size * 8; + } else { + u32 m0, ma, r0, ra; + + m0 = port_frame_max_size * 8; + ma = enetc_port_rd(hw, ENETC_PTCMSDUR(prio_top)) * 8; + ra = enetc_get_cbs_bw(hw, prio_top) * + port_transmit_rate * 10000ULL; + r0 = port_transmit_rate * 1000000ULL; + max_interference_size = m0 + ma + + (u32)div_u64((u64)ra * m0, r0 - ra); + } + + /* hiCredit bits calculate by: + * + * maxSizedFrame * (idleSlope/portTxRate) + */ + hi_credit_bit = max_interference_size * bw / 100; + + /* hiCredit bits to hiCredit register need to calculated as: + * + * (enetClockFrequency / portTransmitRate) * 100 + */ + hi_credit_reg = (u32)div_u64((ENETC_CLK * 100ULL) * hi_credit_bit, + port_transmit_rate * 1000000ULL); + + enetc_port_wr(hw, ENETC_PTCCBSR1(tc), hi_credit_reg); + + /* Set bw register and enable this traffic class */ + enetc_port_wr(hw, ENETC_PTCCBSR0(tc), bw | ENETC_CBSE); + + return 0; +} + +int enetc_setup_tc_txtime(struct net_device *ndev, void *type_data) +{ + struct enetc_ndev_priv *priv = netdev_priv(ndev); + struct tc_etf_qopt_offload *qopt = type_data; + u8 tc_nums = netdev_get_num_tc(ndev); + struct enetc_hw *hw = &priv->si->hw; + int tc; + + if (!tc_nums) + return -EOPNOTSUPP; + + tc = qopt->queue; + + if (tc < 0 || tc >= priv->num_tx_rings) + return -EINVAL; + + /* Do not support TXSTART and TX CSUM offload simutaniously */ + if (ndev->features & NETIF_F_CSUM_MASK) + return -EBUSY; + + /* TSD and Qbv are mutually exclusive in hardware */ + if (enetc_rd(hw, ENETC_QBV_PTGCR_OFFSET) & ENETC_QBV_TGE) + return -EBUSY; + + priv->tx_ring[tc]->tsd_enable = qopt->enable; + enetc_port_wr(hw, ENETC_PTCTSDR(tc), qopt->enable ? ENETC_TSDE : 0); + + return 0; +} + +enum streamid_type { + STREAMID_TYPE_RESERVED = 0, + STREAMID_TYPE_NULL, + STREAMID_TYPE_SMAC, +}; + +enum streamid_vlan_tagged { + STREAMID_VLAN_RESERVED = 0, + STREAMID_VLAN_TAGGED, + STREAMID_VLAN_UNTAGGED, + STREAMID_VLAN_ALL, +}; + +#define ENETC_PSFP_WILDCARD -1 +#define HANDLE_OFFSET 100 + +enum forward_type { + FILTER_ACTION_TYPE_PSFP = BIT(0), + FILTER_ACTION_TYPE_ACL = BIT(1), + FILTER_ACTION_TYPE_BOTH = GENMASK(1, 0), +}; + +/* This is for limit output type for input actions */ +struct actions_fwd { + u64 actions; + u64 keys; /* include the must needed keys */ + enum forward_type output; +}; + +struct psfp_streamfilter_counters { + u64 matching_frames_count; + u64 passing_frames_count; + u64 not_passing_frames_count; + u64 passing_sdu_count; + u64 not_passing_sdu_count; + u64 red_frames_count; +}; + +struct enetc_streamid { + u32 index; + union { + u8 src_mac[6]; + u8 dst_mac[6]; + }; + u8 filtertype; + u16 vid; + u8 tagged; + s32 handle; +}; + +struct enetc_psfp_filter { + u32 index; + s32 handle; + s8 prio; + u32 maxsdu; + u32 gate_id; + s32 meter_id; + refcount_t refcount; + struct hlist_node node; +}; + +struct enetc_psfp_gate { + u32 index; + s8 init_ipv; + u64 basetime; + u64 cycletime; + u64 cycletimext; + u32 num_entries; + refcount_t refcount; + struct hlist_node node; + struct action_gate_entry entries[]; +}; + +/* Only enable the green color frame now + * Will add eir and ebs color blind, couple flag etc when + * policing action add more offloading parameters + */ +struct enetc_psfp_meter { + u32 index; + u32 cir; + u32 cbs; + refcount_t refcount; + struct hlist_node node; +}; + +#define ENETC_PSFP_FLAGS_FMI BIT(0) + +struct enetc_stream_filter { + struct enetc_streamid sid; + u32 sfi_index; + u32 sgi_index; + u32 flags; + u32 fmi_index; + struct flow_stats stats; + struct hlist_node node; +}; + +struct enetc_psfp { + unsigned long dev_bitmap; + unsigned long *psfp_sfi_bitmap; + struct hlist_head stream_list; + struct hlist_head psfp_filter_list; + struct hlist_head psfp_gate_list; + struct hlist_head psfp_meter_list; + spinlock_t psfp_lock; /* spinlock for the struct enetc_psfp r/w */ +}; + +static struct actions_fwd enetc_act_fwd[] = { + { + BIT(FLOW_ACTION_GATE), + BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS), + FILTER_ACTION_TYPE_PSFP + }, + { + BIT(FLOW_ACTION_POLICE) | + BIT(FLOW_ACTION_GATE), + BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS), + FILTER_ACTION_TYPE_PSFP + }, + /* example for ACL actions */ + { + BIT(FLOW_ACTION_DROP), + 0, + FILTER_ACTION_TYPE_ACL + } +}; + +static struct enetc_psfp epsfp = { + .psfp_sfi_bitmap = NULL, +}; + +static LIST_HEAD(enetc_block_cb_list); + +static inline int enetc_get_port(struct enetc_ndev_priv *priv) +{ + return priv->si->pdev->devfn & 0x7; +} + +/* Stream Identity Entry Set Descriptor */ +static int enetc_streamid_hw_set(struct enetc_ndev_priv *priv, + struct enetc_streamid *sid, + u8 enable) +{ + struct enetc_cbd cbd = {.cmd = 0}; + struct streamid_data *si_data; + struct streamid_conf *si_conf; + u16 data_size; + dma_addr_t dma; + int err; + + if (sid->index >= priv->psfp_cap.max_streamid) + return -EINVAL; + + if (sid->filtertype != STREAMID_TYPE_NULL && + sid->filtertype != STREAMID_TYPE_SMAC) + return -EOPNOTSUPP; + + /* Disable operation before enable */ + cbd.index = cpu_to_le16((u16)sid->index); + cbd.cls = BDCR_CMD_STREAM_IDENTIFY; + cbd.status_flags = 0; + + data_size = sizeof(struct streamid_data); + si_data = kzalloc(data_size, __GFP_DMA | GFP_KERNEL); + if (!si_data) + return -ENOMEM; + cbd.length = cpu_to_le16(data_size); + + dma = dma_map_single(&priv->si->pdev->dev, si_data, + data_size, DMA_FROM_DEVICE); + if (dma_mapping_error(&priv->si->pdev->dev, dma)) { + netdev_err(priv->si->ndev, "DMA mapping failed!\n"); + err = -ENOMEM; + goto out; + } + + cbd.addr[0] = lower_32_bits(dma); + cbd.addr[1] = upper_32_bits(dma); + eth_broadcast_addr(si_data->dmac); + si_data->vid_vidm_tg = + cpu_to_le16(ENETC_CBDR_SID_VID_MASK + + ((0x3 << 14) | ENETC_CBDR_SID_VIDM)); + + si_conf = &cbd.sid_set; + /* Only one port supported for one entry, set itself */ + si_conf->iports = 1 << enetc_get_port(priv); + si_conf->id_type = 1; + si_conf->oui[2] = 0x0; + si_conf->oui[1] = 0x80; + si_conf->oui[0] = 0xC2; + + err = enetc_send_cmd(priv->si, &cbd); + if (err) + goto out; + + if (!enable) + goto out; + + /* Enable the entry overwrite again incase space flushed by hardware */ + memset(&cbd, 0, sizeof(cbd)); + + cbd.index = cpu_to_le16((u16)sid->index); + cbd.cmd = 0; + cbd.cls = BDCR_CMD_STREAM_IDENTIFY; + cbd.status_flags = 0; + + si_conf->en = 0x80; + si_conf->stream_handle = cpu_to_le32(sid->handle); + si_conf->iports = 1 << enetc_get_port(priv); + si_conf->id_type = sid->filtertype; + si_conf->oui[2] = 0x0; + si_conf->oui[1] = 0x80; + si_conf->oui[0] = 0xC2; + + memset(si_data, 0, data_size); + + cbd.length = cpu_to_le16(data_size); + + cbd.addr[0] = lower_32_bits(dma); + cbd.addr[1] = upper_32_bits(dma); + + /* VIDM default to be 1. + * VID Match. If set (b1) then the VID must match, otherwise + * any VID is considered a match. VIDM setting is only used + * when TG is set to b01. + */ + if (si_conf->id_type == STREAMID_TYPE_NULL) { + ether_addr_copy(si_data->dmac, sid->dst_mac); + si_data->vid_vidm_tg = + cpu_to_le16((sid->vid & ENETC_CBDR_SID_VID_MASK) + + ((((u16)(sid->tagged) & 0x3) << 14) + | ENETC_CBDR_SID_VIDM)); + } else if (si_conf->id_type == STREAMID_TYPE_SMAC) { + ether_addr_copy(si_data->smac, sid->src_mac); + si_data->vid_vidm_tg = + cpu_to_le16((sid->vid & ENETC_CBDR_SID_VID_MASK) + + ((((u16)(sid->tagged) & 0x3) << 14) + | ENETC_CBDR_SID_VIDM)); + } + + err = enetc_send_cmd(priv->si, &cbd); +out: + if (!dma_mapping_error(&priv->si->pdev->dev, dma)) + dma_unmap_single(&priv->si->pdev->dev, dma, data_size, DMA_FROM_DEVICE); + + kfree(si_data); + + return err; +} + +/* Stream Filter Instance Set Descriptor */ +static int enetc_streamfilter_hw_set(struct enetc_ndev_priv *priv, + struct enetc_psfp_filter *sfi, + u8 enable) +{ + struct enetc_cbd cbd = {.cmd = 0}; + struct sfi_conf *sfi_config; + + cbd.index = cpu_to_le16(sfi->index); + cbd.cls = BDCR_CMD_STREAM_FILTER; + cbd.status_flags = 0x80; + cbd.length = cpu_to_le16(1); + + sfi_config = &cbd.sfi_conf; + if (!enable) + goto exit; + + sfi_config->en = 0x80; + + if (sfi->handle >= 0) { + sfi_config->stream_handle = + cpu_to_le32(sfi->handle); + sfi_config->sthm |= 0x80; + } + + sfi_config->sg_inst_table_index = cpu_to_le16(sfi->gate_id); + sfi_config->input_ports = 1 << enetc_get_port(priv); + + /* The priority value which may be matched against the + * frame’s priority value to determine a match for this entry. + */ + if (sfi->prio >= 0) + sfi_config->multi |= (sfi->prio & 0x7) | 0x8; + + /* Filter Type. Identifies the contents of the MSDU/FM_INST_INDEX + * field as being either an MSDU value or an index into the Flow + * Meter Instance table. + */ + if (sfi->maxsdu) { + sfi_config->msdu = + cpu_to_le16(sfi->maxsdu); + sfi_config->multi |= 0x40; + } + + if (sfi->meter_id >= 0) { + sfi_config->fm_inst_table_index = cpu_to_le16(sfi->meter_id); + sfi_config->multi |= 0x80; + } + +exit: + return enetc_send_cmd(priv->si, &cbd); +} + +static int enetc_streamcounter_hw_get(struct enetc_ndev_priv *priv, + u32 index, + struct psfp_streamfilter_counters *cnt) +{ + struct enetc_cbd cbd = { .cmd = 2 }; + struct sfi_counter_data *data_buf; + dma_addr_t dma; + u16 data_size; + int err; + + cbd.index = cpu_to_le16((u16)index); + cbd.cmd = 2; + cbd.cls = BDCR_CMD_STREAM_FILTER; + cbd.status_flags = 0; + + data_size = sizeof(struct sfi_counter_data); + data_buf = kzalloc(data_size, __GFP_DMA | GFP_KERNEL); + if (!data_buf) + return -ENOMEM; + + dma = dma_map_single(&priv->si->pdev->dev, data_buf, + data_size, DMA_FROM_DEVICE); + if (dma_mapping_error(&priv->si->pdev->dev, dma)) { + netdev_err(priv->si->ndev, "DMA mapping failed!\n"); + err = -ENOMEM; + goto exit; + } + cbd.addr[0] = lower_32_bits(dma); + cbd.addr[1] = upper_32_bits(dma); + + cbd.length = cpu_to_le16(data_size); + + err = enetc_send_cmd(priv->si, &cbd); + if (err) + goto exit; + + cnt->matching_frames_count = + ((u64)le32_to_cpu(data_buf->matchh) << 32) + + data_buf->matchl; + + cnt->not_passing_sdu_count = + ((u64)le32_to_cpu(data_buf->msdu_droph) << 32) + + data_buf->msdu_dropl; + + cnt->passing_sdu_count = cnt->matching_frames_count + - cnt->not_passing_sdu_count; + + cnt->not_passing_frames_count = + ((u64)le32_to_cpu(data_buf->stream_gate_droph) << 32) + + le32_to_cpu(data_buf->stream_gate_dropl); + + cnt->passing_frames_count = cnt->matching_frames_count + - cnt->not_passing_sdu_count + - cnt->not_passing_frames_count; + + cnt->red_frames_count = + ((u64)le32_to_cpu(data_buf->flow_meter_droph) << 32) + + le32_to_cpu(data_buf->flow_meter_dropl); + +exit: + kfree(data_buf); + return err; +} + +static u64 get_ptp_now(struct enetc_hw *hw) +{ + u64 now_lo, now_hi, now; + + now_lo = enetc_rd(hw, ENETC_SICTR0); + now_hi = enetc_rd(hw, ENETC_SICTR1); + now = now_lo | now_hi << 32; + + return now; +} + +static int get_start_ns(u64 now, u64 cycle, u64 *start) +{ + u64 n; + + if (!cycle) + return -EFAULT; + + n = div64_u64(now, cycle); + + *start = (n + 1) * cycle; + + return 0; +} + +/* Stream Gate Instance Set Descriptor */ +static int enetc_streamgate_hw_set(struct enetc_ndev_priv *priv, + struct enetc_psfp_gate *sgi, + u8 enable) +{ + struct enetc_cbd cbd = { .cmd = 0 }; + struct sgi_table *sgi_config; + struct sgcl_conf *sgcl_config; + struct sgcl_data *sgcl_data; + struct sgce *sgce; + dma_addr_t dma; + u16 data_size; + int err, i; + u64 now; + + cbd.index = cpu_to_le16(sgi->index); + cbd.cmd = 0; + cbd.cls = BDCR_CMD_STREAM_GCL; + cbd.status_flags = 0x80; + + /* disable */ + if (!enable) + return enetc_send_cmd(priv->si, &cbd); + + if (!sgi->num_entries) + return 0; + + if (sgi->num_entries > priv->psfp_cap.max_psfp_gatelist || + !sgi->cycletime) + return -EINVAL; + + /* enable */ + sgi_config = &cbd.sgi_table; + + /* Keep open before gate list start */ + sgi_config->ocgtst = 0x80; + + sgi_config->oipv = (sgi->init_ipv < 0) ? + 0x0 : ((sgi->init_ipv & 0x7) | 0x8); + + sgi_config->en = 0x80; + + /* Basic config */ + err = enetc_send_cmd(priv->si, &cbd); + if (err) + return -EINVAL; + + memset(&cbd, 0, sizeof(cbd)); + + cbd.index = cpu_to_le16(sgi->index); + cbd.cmd = 1; + cbd.cls = BDCR_CMD_STREAM_GCL; + cbd.status_flags = 0; + + sgcl_config = &cbd.sgcl_conf; + + sgcl_config->acl_len = (sgi->num_entries - 1) & 0x3; + + data_size = struct_size(sgcl_data, sgcl, sgi->num_entries); + + sgcl_data = kzalloc(data_size, __GFP_DMA | GFP_KERNEL); + if (!sgcl_data) + return -ENOMEM; + + cbd.length = cpu_to_le16(data_size); + + dma = dma_map_single(&priv->si->pdev->dev, + sgcl_data, data_size, + DMA_FROM_DEVICE); + if (dma_mapping_error(&priv->si->pdev->dev, dma)) { + netdev_err(priv->si->ndev, "DMA mapping failed!\n"); + kfree(sgcl_data); + return -ENOMEM; + } + + cbd.addr[0] = lower_32_bits(dma); + cbd.addr[1] = upper_32_bits(dma); + + sgce = &sgcl_data->sgcl[0]; + + sgcl_config->agtst = 0x80; + + sgcl_data->ct = cpu_to_le32(sgi->cycletime); + sgcl_data->cte = cpu_to_le32(sgi->cycletimext); + + if (sgi->init_ipv >= 0) + sgcl_config->aipv = (sgi->init_ipv & 0x7) | 0x8; + + for (i = 0; i < sgi->num_entries; i++) { + struct action_gate_entry *from = &sgi->entries[i]; + struct sgce *to = &sgce[i]; + + if (from->gate_state) + to->multi |= 0x10; + + if (from->ipv >= 0) + to->multi |= ((from->ipv & 0x7) << 5) | 0x08; + + if (from->maxoctets >= 0) { + to->multi |= 0x01; + to->msdu[0] = from->maxoctets & 0xFF; + to->msdu[1] = (from->maxoctets >> 8) & 0xFF; + to->msdu[2] = (from->maxoctets >> 16) & 0xFF; + } + + to->interval = cpu_to_le32(from->interval); + } + + /* If basetime is less than now, calculate start time */ + now = get_ptp_now(&priv->si->hw); + + if (sgi->basetime < now) { + u64 start; + + err = get_start_ns(now, sgi->cycletime, &start); + if (err) + goto exit; + sgcl_data->btl = cpu_to_le32(lower_32_bits(start)); + sgcl_data->bth = cpu_to_le32(upper_32_bits(start)); + } else { + u32 hi, lo; + + hi = upper_32_bits(sgi->basetime); + lo = lower_32_bits(sgi->basetime); + sgcl_data->bth = cpu_to_le32(hi); + sgcl_data->btl = cpu_to_le32(lo); + } + + err = enetc_send_cmd(priv->si, &cbd); + +exit: + kfree(sgcl_data); + + return err; +} + +static int enetc_flowmeter_hw_set(struct enetc_ndev_priv *priv, + struct enetc_psfp_meter *fmi, + u8 enable) +{ + struct enetc_cbd cbd = { .cmd = 0 }; + struct fmi_conf *fmi_config; + u64 temp = 0; + + cbd.index = cpu_to_le16((u16)fmi->index); + cbd.cls = BDCR_CMD_FLOW_METER; + cbd.status_flags = 0x80; + + if (!enable) + return enetc_send_cmd(priv->si, &cbd); + + fmi_config = &cbd.fmi_conf; + fmi_config->en = 0x80; + + if (fmi->cir) { + temp = (u64)8000 * fmi->cir; + temp = div_u64(temp, 3725); + } + + fmi_config->cir = cpu_to_le32((u32)temp); + fmi_config->cbs = cpu_to_le32(fmi->cbs); + + /* Default for eir ebs disable */ + fmi_config->eir = 0; + fmi_config->ebs = 0; + + /* Default: + * mark red disable + * drop on yellow disable + * color mode disable + * couple flag disable + */ + fmi_config->conf = 0; + + return enetc_send_cmd(priv->si, &cbd); +} + +static struct enetc_stream_filter *enetc_get_stream_by_index(u32 index) +{ + struct enetc_stream_filter *f; + + hlist_for_each_entry(f, &epsfp.stream_list, node) + if (f->sid.index == index) + return f; + + return NULL; +} + +static struct enetc_psfp_gate *enetc_get_gate_by_index(u32 index) +{ + struct enetc_psfp_gate *g; + + hlist_for_each_entry(g, &epsfp.psfp_gate_list, node) + if (g->index == index) + return g; + + return NULL; +} + +static struct enetc_psfp_filter *enetc_get_filter_by_index(u32 index) +{ + struct enetc_psfp_filter *s; + + hlist_for_each_entry(s, &epsfp.psfp_filter_list, node) + if (s->index == index) + return s; + + return NULL; +} + +static struct enetc_psfp_meter *enetc_get_meter_by_index(u32 index) +{ + struct enetc_psfp_meter *m; + + hlist_for_each_entry(m, &epsfp.psfp_meter_list, node) + if (m->index == index) + return m; + + return NULL; +} + +static struct enetc_psfp_filter + *enetc_psfp_check_sfi(struct enetc_psfp_filter *sfi) +{ + struct enetc_psfp_filter *s; + + hlist_for_each_entry(s, &epsfp.psfp_filter_list, node) + if (s->gate_id == sfi->gate_id && + s->prio == sfi->prio && + s->maxsdu == sfi->maxsdu && + s->meter_id == sfi->meter_id) + return s; + + return NULL; +} + +static int enetc_get_free_index(struct enetc_ndev_priv *priv) +{ + u32 max_size = priv->psfp_cap.max_psfp_filter; + unsigned long index; + + index = find_first_zero_bit(epsfp.psfp_sfi_bitmap, max_size); + if (index == max_size) + return -1; + + return index; +} + +static void stream_filter_unref(struct enetc_ndev_priv *priv, u32 index) +{ + struct enetc_psfp_filter *sfi; + u8 z; + + sfi = enetc_get_filter_by_index(index); + WARN_ON(!sfi); + z = refcount_dec_and_test(&sfi->refcount); + + if (z) { + enetc_streamfilter_hw_set(priv, sfi, false); + hlist_del(&sfi->node); + kfree(sfi); + clear_bit(index, epsfp.psfp_sfi_bitmap); + } +} + +static void stream_gate_unref(struct enetc_ndev_priv *priv, u32 index) +{ + struct enetc_psfp_gate *sgi; + u8 z; + + sgi = enetc_get_gate_by_index(index); + WARN_ON(!sgi); + z = refcount_dec_and_test(&sgi->refcount); + if (z) { + enetc_streamgate_hw_set(priv, sgi, false); + hlist_del(&sgi->node); + kfree(sgi); + } +} + +static void flow_meter_unref(struct enetc_ndev_priv *priv, u32 index) +{ + struct enetc_psfp_meter *fmi; + u8 z; + + fmi = enetc_get_meter_by_index(index); + WARN_ON(!fmi); + z = refcount_dec_and_test(&fmi->refcount); + if (z) { + enetc_flowmeter_hw_set(priv, fmi, false); + hlist_del(&fmi->node); + kfree(fmi); + } +} + +static void remove_one_chain(struct enetc_ndev_priv *priv, + struct enetc_stream_filter *filter) +{ + if (filter->flags & ENETC_PSFP_FLAGS_FMI) + flow_meter_unref(priv, filter->fmi_index); + + stream_gate_unref(priv, filter->sgi_index); + stream_filter_unref(priv, filter->sfi_index); + + hlist_del(&filter->node); + kfree(filter); +} + +static int enetc_psfp_hw_set(struct enetc_ndev_priv *priv, + struct enetc_streamid *sid, + struct enetc_psfp_filter *sfi, + struct enetc_psfp_gate *sgi, + struct enetc_psfp_meter *fmi) +{ + int err; + + err = enetc_streamid_hw_set(priv, sid, true); + if (err) + return err; + + if (sfi) { + err = enetc_streamfilter_hw_set(priv, sfi, true); + if (err) + goto revert_sid; + } + + err = enetc_streamgate_hw_set(priv, sgi, true); + if (err) + goto revert_sfi; + + if (fmi) { + err = enetc_flowmeter_hw_set(priv, fmi, true); + if (err) + goto revert_sgi; + } + + return 0; + +revert_sgi: + enetc_streamgate_hw_set(priv, sgi, false); +revert_sfi: + if (sfi) + enetc_streamfilter_hw_set(priv, sfi, false); +revert_sid: + enetc_streamid_hw_set(priv, sid, false); + return err; +} + +static struct actions_fwd *enetc_check_flow_actions(u64 acts, + unsigned int inputkeys) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(enetc_act_fwd); i++) + if (acts == enetc_act_fwd[i].actions && + inputkeys & enetc_act_fwd[i].keys) + return &enetc_act_fwd[i]; + + return NULL; +} + +static int enetc_psfp_parse_clsflower(struct enetc_ndev_priv *priv, + struct flow_cls_offload *f) +{ + struct flow_action_entry *entryg = NULL, *entryp = NULL; + struct flow_rule *rule = flow_cls_offload_flow_rule(f); + struct netlink_ext_ack *extack = f->common.extack; + struct enetc_stream_filter *filter, *old_filter; + struct enetc_psfp_meter *fmi = NULL, *old_fmi; + struct enetc_psfp_filter *sfi, *old_sfi; + struct enetc_psfp_gate *sgi, *old_sgi; + struct flow_action_entry *entry; + struct action_gate_entry *e; + u8 sfi_overwrite = 0; + int entries_size; + int i, err; + + if (f->common.chain_index >= priv->psfp_cap.max_streamid) { + NL_SET_ERR_MSG_MOD(extack, "No Stream identify resource!"); + return -ENOSPC; + } + + flow_action_for_each(i, entry, &rule->action) + if (entry->id == FLOW_ACTION_GATE) + entryg = entry; + else if (entry->id == FLOW_ACTION_POLICE) + entryp = entry; + + /* Not support without gate action */ + if (!entryg) + return -EINVAL; + + filter = kzalloc(sizeof(*filter), GFP_KERNEL); + if (!filter) + return -ENOMEM; + + filter->sid.index = f->common.chain_index; + + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { + struct flow_match_eth_addrs match; + + flow_rule_match_eth_addrs(rule, &match); + + if (!is_zero_ether_addr(match.mask->dst) && + !is_zero_ether_addr(match.mask->src)) { + NL_SET_ERR_MSG_MOD(extack, + "Cannot match on both source and destination MAC"); + err = -EINVAL; + goto free_filter; + } + + if (!is_zero_ether_addr(match.mask->dst)) { + if (!is_broadcast_ether_addr(match.mask->dst)) { + NL_SET_ERR_MSG_MOD(extack, + "Masked matching on destination MAC not supported"); + err = -EINVAL; + goto free_filter; + } + ether_addr_copy(filter->sid.dst_mac, match.key->dst); + filter->sid.filtertype = STREAMID_TYPE_NULL; + } + + if (!is_zero_ether_addr(match.mask->src)) { + if (!is_broadcast_ether_addr(match.mask->src)) { + NL_SET_ERR_MSG_MOD(extack, + "Masked matching on source MAC not supported"); + err = -EINVAL; + goto free_filter; + } + ether_addr_copy(filter->sid.src_mac, match.key->src); + filter->sid.filtertype = STREAMID_TYPE_SMAC; + } + } else { + NL_SET_ERR_MSG_MOD(extack, "Unsupported, must include ETH_ADDRS"); + err = -EINVAL; + goto free_filter; + } + + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) { + struct flow_match_vlan match; + + flow_rule_match_vlan(rule, &match); + if (match.mask->vlan_priority) { + if (match.mask->vlan_priority != + (VLAN_PRIO_MASK >> VLAN_PRIO_SHIFT)) { + NL_SET_ERR_MSG_MOD(extack, "Only full mask is supported for VLAN priority"); + err = -EINVAL; + goto free_filter; + } + } + + if (match.mask->vlan_id) { + if (match.mask->vlan_id != VLAN_VID_MASK) { + NL_SET_ERR_MSG_MOD(extack, "Only full mask is supported for VLAN id"); + err = -EINVAL; + goto free_filter; + } + + filter->sid.vid = match.key->vlan_id; + if (!filter->sid.vid) + filter->sid.tagged = STREAMID_VLAN_UNTAGGED; + else + filter->sid.tagged = STREAMID_VLAN_TAGGED; + } + } else { + filter->sid.tagged = STREAMID_VLAN_ALL; + } + + /* parsing gate action */ + if (entryg->gate.index >= priv->psfp_cap.max_psfp_gate) { + NL_SET_ERR_MSG_MOD(extack, "No Stream Gate resource!"); + err = -ENOSPC; + goto free_filter; + } + + if (entryg->gate.num_entries >= priv->psfp_cap.max_psfp_gatelist) { + NL_SET_ERR_MSG_MOD(extack, "No Stream Gate resource!"); + err = -ENOSPC; + goto free_filter; + } + + entries_size = struct_size(sgi, entries, entryg->gate.num_entries); + sgi = kzalloc(entries_size, GFP_KERNEL); + if (!sgi) { + err = -ENOMEM; + goto free_filter; + } + + refcount_set(&sgi->refcount, 1); + sgi->index = entryg->gate.index; + sgi->init_ipv = entryg->gate.prio; + sgi->basetime = entryg->gate.basetime; + sgi->cycletime = entryg->gate.cycletime; + sgi->num_entries = entryg->gate.num_entries; + + e = sgi->entries; + for (i = 0; i < entryg->gate.num_entries; i++) { + e[i].gate_state = entryg->gate.entries[i].gate_state; + e[i].interval = entryg->gate.entries[i].interval; + e[i].ipv = entryg->gate.entries[i].ipv; + e[i].maxoctets = entryg->gate.entries[i].maxoctets; + } + + filter->sgi_index = sgi->index; + + sfi = kzalloc(sizeof(*sfi), GFP_KERNEL); + if (!sfi) { + err = -ENOMEM; + goto free_gate; + } + + refcount_set(&sfi->refcount, 1); + sfi->gate_id = sgi->index; + sfi->meter_id = ENETC_PSFP_WILDCARD; + + /* Flow meter and max frame size */ + if (entryp) { + if (entryp->police.burst) { + fmi = kzalloc(sizeof(*fmi), GFP_KERNEL); + if (!fmi) { + err = -ENOMEM; + goto free_sfi; + } + refcount_set(&fmi->refcount, 1); + fmi->cir = entryp->police.rate_bytes_ps; + fmi->cbs = entryp->police.burst; + fmi->index = entryp->police.index; + filter->flags |= ENETC_PSFP_FLAGS_FMI; + filter->fmi_index = fmi->index; + sfi->meter_id = fmi->index; + } + + if (entryp->police.mtu) + sfi->maxsdu = entryp->police.mtu; + } + + /* prio ref the filter prio */ + if (f->common.prio && f->common.prio <= BIT(3)) + sfi->prio = f->common.prio - 1; + else + sfi->prio = ENETC_PSFP_WILDCARD; + + old_sfi = enetc_psfp_check_sfi(sfi); + if (!old_sfi) { + int index; + + index = enetc_get_free_index(priv); + if (index < 0) { + NL_SET_ERR_MSG_MOD(extack, "No Stream Filter resource!"); + err = -ENOSPC; + goto free_fmi; + } + + sfi->index = index; + sfi->handle = index + HANDLE_OFFSET; + /* Update the stream filter handle also */ + filter->sid.handle = sfi->handle; + filter->sfi_index = sfi->index; + sfi_overwrite = 0; + } else { + filter->sfi_index = old_sfi->index; + filter->sid.handle = old_sfi->handle; + sfi_overwrite = 1; + } + + err = enetc_psfp_hw_set(priv, &filter->sid, + sfi_overwrite ? NULL : sfi, sgi, fmi); + if (err) + goto free_fmi; + + spin_lock(&epsfp.psfp_lock); + if (filter->flags & ENETC_PSFP_FLAGS_FMI) { + old_fmi = enetc_get_meter_by_index(filter->fmi_index); + if (old_fmi) { + fmi->refcount = old_fmi->refcount; + refcount_set(&fmi->refcount, + refcount_read(&old_fmi->refcount) + 1); + hlist_del(&old_fmi->node); + kfree(old_fmi); + } + hlist_add_head(&fmi->node, &epsfp.psfp_meter_list); + } + + /* Remove the old node if exist and update with a new node */ + old_sgi = enetc_get_gate_by_index(filter->sgi_index); + if (old_sgi) { + refcount_set(&sgi->refcount, + refcount_read(&old_sgi->refcount) + 1); + hlist_del(&old_sgi->node); + kfree(old_sgi); + } + + hlist_add_head(&sgi->node, &epsfp.psfp_gate_list); + + if (!old_sfi) { + hlist_add_head(&sfi->node, &epsfp.psfp_filter_list); + set_bit(sfi->index, epsfp.psfp_sfi_bitmap); + } else { + kfree(sfi); + refcount_inc(&old_sfi->refcount); + } + + old_filter = enetc_get_stream_by_index(filter->sid.index); + if (old_filter) + remove_one_chain(priv, old_filter); + + filter->stats.lastused = jiffies; + hlist_add_head(&filter->node, &epsfp.stream_list); + + spin_unlock(&epsfp.psfp_lock); + + return 0; + +free_fmi: + kfree(fmi); +free_sfi: + kfree(sfi); +free_gate: + kfree(sgi); +free_filter: + kfree(filter); + + return err; +} + +static int enetc_config_clsflower(struct enetc_ndev_priv *priv, + struct flow_cls_offload *cls_flower) +{ + struct flow_rule *rule = flow_cls_offload_flow_rule(cls_flower); + struct netlink_ext_ack *extack = cls_flower->common.extack; + struct flow_dissector *dissector = rule->match.dissector; + struct flow_action *action = &rule->action; + struct flow_action_entry *entry; + struct actions_fwd *fwd; + u64 actions = 0; + int i, err; + + if (!flow_action_has_entries(action)) { + NL_SET_ERR_MSG_MOD(extack, "At least one action is needed"); + return -EINVAL; + } + + flow_action_for_each(i, entry, action) + actions |= BIT(entry->id); + + fwd = enetc_check_flow_actions(actions, dissector->used_keys); + if (!fwd) { + NL_SET_ERR_MSG_MOD(extack, "Unsupported filter type!"); + return -EOPNOTSUPP; + } + + if (fwd->output & FILTER_ACTION_TYPE_PSFP) { + err = enetc_psfp_parse_clsflower(priv, cls_flower); + if (err) { + NL_SET_ERR_MSG_MOD(extack, "Invalid PSFP inputs"); + return err; + } + } else { + NL_SET_ERR_MSG_MOD(extack, "Unsupported actions"); + return -EOPNOTSUPP; + } + + return 0; +} + +static int enetc_psfp_destroy_clsflower(struct enetc_ndev_priv *priv, + struct flow_cls_offload *f) +{ + struct enetc_stream_filter *filter; + struct netlink_ext_ack *extack = f->common.extack; + int err; + + if (f->common.chain_index >= priv->psfp_cap.max_streamid) { + NL_SET_ERR_MSG_MOD(extack, "No Stream identify resource!"); + return -ENOSPC; + } + + filter = enetc_get_stream_by_index(f->common.chain_index); + if (!filter) + return -EINVAL; + + err = enetc_streamid_hw_set(priv, &filter->sid, false); + if (err) + return err; + + remove_one_chain(priv, filter); + + return 0; +} + +static int enetc_destroy_clsflower(struct enetc_ndev_priv *priv, + struct flow_cls_offload *f) +{ + return enetc_psfp_destroy_clsflower(priv, f); +} + +static int enetc_psfp_get_stats(struct enetc_ndev_priv *priv, + struct flow_cls_offload *f) +{ + struct psfp_streamfilter_counters counters = {}; + struct enetc_stream_filter *filter; + struct flow_stats stats = {}; + int err; + + filter = enetc_get_stream_by_index(f->common.chain_index); + if (!filter) + return -EINVAL; + + err = enetc_streamcounter_hw_get(priv, filter->sfi_index, &counters); + if (err) + return -EINVAL; + + spin_lock(&epsfp.psfp_lock); + stats.pkts = counters.matching_frames_count + + counters.not_passing_sdu_count - + filter->stats.pkts; + stats.drops = counters.not_passing_frames_count + + counters.not_passing_sdu_count + + counters.red_frames_count - + filter->stats.drops; + stats.lastused = filter->stats.lastused; + filter->stats.pkts += stats.pkts; + filter->stats.drops += stats.drops; + spin_unlock(&epsfp.psfp_lock); + + flow_stats_update(&f->stats, 0x0, stats.pkts, stats.drops, + stats.lastused, FLOW_ACTION_HW_STATS_DELAYED); + + return 0; +} + +static int enetc_setup_tc_cls_flower(struct enetc_ndev_priv *priv, + struct flow_cls_offload *cls_flower) +{ + switch (cls_flower->command) { + case FLOW_CLS_REPLACE: + return enetc_config_clsflower(priv, cls_flower); + case FLOW_CLS_DESTROY: + return enetc_destroy_clsflower(priv, cls_flower); + case FLOW_CLS_STATS: + return enetc_psfp_get_stats(priv, cls_flower); + default: + return -EOPNOTSUPP; + } +} + +static inline void clean_psfp_sfi_bitmap(void) +{ + bitmap_free(epsfp.psfp_sfi_bitmap); + epsfp.psfp_sfi_bitmap = NULL; +} + +static void clean_stream_list(void) +{ + struct enetc_stream_filter *s; + struct hlist_node *tmp; + + hlist_for_each_entry_safe(s, tmp, &epsfp.stream_list, node) { + hlist_del(&s->node); + kfree(s); + } +} + +static void clean_sfi_list(void) +{ + struct enetc_psfp_filter *sfi; + struct hlist_node *tmp; + + hlist_for_each_entry_safe(sfi, tmp, &epsfp.psfp_filter_list, node) { + hlist_del(&sfi->node); + kfree(sfi); + } +} + +static void clean_sgi_list(void) +{ + struct enetc_psfp_gate *sgi; + struct hlist_node *tmp; + + hlist_for_each_entry_safe(sgi, tmp, &epsfp.psfp_gate_list, node) { + hlist_del(&sgi->node); + kfree(sgi); + } +} + +static void clean_psfp_all(void) +{ + /* Disable all list nodes and free all memory */ + clean_sfi_list(); + clean_sgi_list(); + clean_stream_list(); + epsfp.dev_bitmap = 0; + clean_psfp_sfi_bitmap(); +} + +int enetc_setup_tc_block_cb(enum tc_setup_type type, void *type_data, + void *cb_priv) +{ + struct net_device *ndev = cb_priv; + + if (!tc_can_offload(ndev)) + return -EOPNOTSUPP; + + switch (type) { + case TC_SETUP_CLSFLOWER: + return enetc_setup_tc_cls_flower(netdev_priv(ndev), type_data); + default: + return -EOPNOTSUPP; + } +} + +int enetc_set_psfp(struct net_device *ndev, bool en) +{ + struct enetc_ndev_priv *priv = netdev_priv(ndev); + int err; + + if (en) { + err = enetc_psfp_enable(priv); + if (err) + return err; + + priv->active_offloads |= ENETC_F_QCI; + return 0; + } + + err = enetc_psfp_disable(priv); + if (err) + return err; + + priv->active_offloads &= ~ENETC_F_QCI; + + return 0; +} + +int enetc_psfp_init(struct enetc_ndev_priv *priv) +{ + if (epsfp.psfp_sfi_bitmap) + return 0; + + epsfp.psfp_sfi_bitmap = bitmap_zalloc(priv->psfp_cap.max_psfp_filter, + GFP_KERNEL); + if (!epsfp.psfp_sfi_bitmap) + return -ENOMEM; + + spin_lock_init(&epsfp.psfp_lock); + + if (list_empty(&enetc_block_cb_list)) + epsfp.dev_bitmap = 0; + + return 0; +} + +int enetc_psfp_clean(struct enetc_ndev_priv *priv) +{ + if (!list_empty(&enetc_block_cb_list)) + return -EBUSY; + + clean_psfp_all(); + + return 0; +} + +int enetc_setup_tc_psfp(struct net_device *ndev, void *type_data) +{ + struct enetc_ndev_priv *priv = netdev_priv(ndev); + struct flow_block_offload *f = type_data; + int err; + + err = flow_block_cb_setup_simple(f, &enetc_block_cb_list, + enetc_setup_tc_block_cb, + ndev, ndev, true); + if (err) + return err; + + switch (f->command) { + case FLOW_BLOCK_BIND: + set_bit(enetc_get_port(priv), &epsfp.dev_bitmap); + break; + case FLOW_BLOCK_UNBIND: + clear_bit(enetc_get_port(priv), &epsfp.dev_bitmap); + if (!epsfp.dev_bitmap) + clean_psfp_all(); + break; + } + + return 0; +} diff --git a/drivers/net/ethernet/freescale/enetc/enetc_vf.c b/drivers/net/ethernet/freescale/enetc/enetc_vf.c new file mode 100644 index 000000000..5ce3e2593 --- /dev/null +++ b/drivers/net/ethernet/freescale/enetc/enetc_vf.c @@ -0,0 +1,248 @@ +// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) +/* Copyright 2017-2019 NXP */ + +#include <linux/module.h> +#include "enetc.h" + +#define ENETC_DRV_NAME_STR "ENETC VF driver" + +/* Messaging */ +static void enetc_msg_vsi_write_msg(struct enetc_hw *hw, + struct enetc_msg_swbd *msg) +{ + u32 val; + + val = enetc_vsi_set_msize(msg->size) | lower_32_bits(msg->dma); + enetc_wr(hw, ENETC_VSIMSGSNDAR1, upper_32_bits(msg->dma)); + enetc_wr(hw, ENETC_VSIMSGSNDAR0, val); +} + +static int enetc_msg_vsi_send(struct enetc_si *si, struct enetc_msg_swbd *msg) +{ + int timeout = 100; + u32 vsimsgsr; + + enetc_msg_vsi_write_msg(&si->hw, msg); + + do { + vsimsgsr = enetc_rd(&si->hw, ENETC_VSIMSGSR); + if (!(vsimsgsr & ENETC_VSIMSGSR_MB)) + break; + + usleep_range(1000, 2000); + } while (--timeout); + + if (!timeout) + return -ETIMEDOUT; + + /* check for message delivery error */ + if (vsimsgsr & ENETC_VSIMSGSR_MS) { + dev_err(&si->pdev->dev, "VSI command execute error: %d\n", + ENETC_SIMSGSR_GET_MC(vsimsgsr)); + return -EIO; + } + + return 0; +} + +static int enetc_msg_vsi_set_primary_mac_addr(struct enetc_ndev_priv *priv, + struct sockaddr *saddr) +{ + struct enetc_msg_cmd_set_primary_mac *cmd; + struct enetc_msg_swbd msg; + int err; + + msg.size = ALIGN(sizeof(struct enetc_msg_cmd_set_primary_mac), 64); + msg.vaddr = dma_alloc_coherent(priv->dev, msg.size, &msg.dma, + GFP_KERNEL); + if (!msg.vaddr) { + dev_err(priv->dev, "Failed to alloc Tx msg (size: %d)\n", + msg.size); + return -ENOMEM; + } + + cmd = (struct enetc_msg_cmd_set_primary_mac *)msg.vaddr; + cmd->header.type = ENETC_MSG_CMD_MNG_MAC; + cmd->header.id = ENETC_MSG_CMD_MNG_ADD; + memcpy(&cmd->mac, saddr, sizeof(struct sockaddr)); + + /* send the command and wait */ + err = enetc_msg_vsi_send(priv->si, &msg); + + dma_free_coherent(priv->dev, msg.size, msg.vaddr, msg.dma); + + return err; +} + +static int enetc_vf_set_mac_addr(struct net_device *ndev, void *addr) +{ + struct enetc_ndev_priv *priv = netdev_priv(ndev); + struct sockaddr *saddr = addr; + + if (!is_valid_ether_addr(saddr->sa_data)) + return -EADDRNOTAVAIL; + + return enetc_msg_vsi_set_primary_mac_addr(priv, saddr); +} + +static int enetc_vf_set_features(struct net_device *ndev, + netdev_features_t features) +{ + enetc_set_features(ndev, features); + + return 0; +} + +/* Probing/ Init */ +static const struct net_device_ops enetc_ndev_ops = { + .ndo_open = enetc_open, + .ndo_stop = enetc_close, + .ndo_start_xmit = enetc_xmit, + .ndo_get_stats = enetc_get_stats, + .ndo_set_mac_address = enetc_vf_set_mac_addr, + .ndo_set_features = enetc_vf_set_features, + .ndo_do_ioctl = enetc_ioctl, + .ndo_setup_tc = enetc_setup_tc, +}; + +static void enetc_vf_netdev_setup(struct enetc_si *si, struct net_device *ndev, + const struct net_device_ops *ndev_ops) +{ + struct enetc_ndev_priv *priv = netdev_priv(ndev); + + SET_NETDEV_DEV(ndev, &si->pdev->dev); + priv->ndev = ndev; + priv->si = si; + priv->dev = &si->pdev->dev; + si->ndev = ndev; + + priv->msg_enable = (NETIF_MSG_IFUP << 1) - 1; + ndev->netdev_ops = ndev_ops; + enetc_set_ethtool_ops(ndev); + ndev->watchdog_timeo = 5 * HZ; + ndev->max_mtu = ENETC_MAX_MTU; + + ndev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM | NETIF_F_HW_CSUM | + NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_CTAG_RX; + ndev->features = NETIF_F_HIGHDMA | NETIF_F_SG | + NETIF_F_RXCSUM | NETIF_F_HW_CSUM | + NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_CTAG_RX; + + if (si->num_rss) + ndev->hw_features |= NETIF_F_RXHASH; + + if (si->errata & ENETC_ERR_TXCSUM) { + ndev->hw_features &= ~NETIF_F_HW_CSUM; + ndev->features &= ~NETIF_F_HW_CSUM; + } + + /* pick up primary MAC address from SI */ + enetc_get_primary_mac_addr(&si->hw, ndev->dev_addr); +} + +static int enetc_vf_probe(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + struct enetc_ndev_priv *priv; + struct net_device *ndev; + struct enetc_si *si; + int err; + + err = enetc_pci_probe(pdev, KBUILD_MODNAME, 0); + if (err) { + dev_err(&pdev->dev, "PCI probing failed\n"); + return err; + } + + si = pci_get_drvdata(pdev); + + enetc_get_si_caps(si); + + ndev = alloc_etherdev_mq(sizeof(*priv), ENETC_MAX_NUM_TXQS); + if (!ndev) { + err = -ENOMEM; + dev_err(&pdev->dev, "netdev creation failed\n"); + goto err_alloc_netdev; + } + + enetc_vf_netdev_setup(si, ndev, &enetc_ndev_ops); + + priv = netdev_priv(ndev); + + enetc_init_si_rings_params(priv); + + err = enetc_alloc_si_resources(priv); + if (err) { + dev_err(&pdev->dev, "SI resource alloc failed\n"); + goto err_alloc_si_res; + } + + err = enetc_configure_si(priv); + if (err) { + dev_err(&pdev->dev, "Failed to configure SI\n"); + goto err_config_si; + } + + err = enetc_alloc_msix(priv); + if (err) { + dev_err(&pdev->dev, "MSIX alloc failed\n"); + goto err_alloc_msix; + } + + err = register_netdev(ndev); + if (err) + goto err_reg_netdev; + + netif_carrier_off(ndev); + + return 0; + +err_reg_netdev: + enetc_free_msix(priv); +err_config_si: +err_alloc_msix: + enetc_free_si_resources(priv); +err_alloc_si_res: + si->ndev = NULL; + free_netdev(ndev); +err_alloc_netdev: + enetc_pci_remove(pdev); + + return err; +} + +static void enetc_vf_remove(struct pci_dev *pdev) +{ + struct enetc_si *si = pci_get_drvdata(pdev); + struct enetc_ndev_priv *priv; + + priv = netdev_priv(si->ndev); + unregister_netdev(si->ndev); + + enetc_free_msix(priv); + + enetc_free_si_resources(priv); + + free_netdev(si->ndev); + + enetc_pci_remove(pdev); +} + +static const struct pci_device_id enetc_vf_id_table[] = { + { PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, ENETC_DEV_ID_VF) }, + { 0, } /* End of table. */ +}; +MODULE_DEVICE_TABLE(pci, enetc_vf_id_table); + +static struct pci_driver enetc_vf_driver = { + .name = KBUILD_MODNAME, + .id_table = enetc_vf_id_table, + .probe = enetc_vf_probe, + .remove = enetc_vf_remove, +}; +module_pci_driver(enetc_vf_driver); + +MODULE_DESCRIPTION(ENETC_DRV_NAME_STR); +MODULE_LICENSE("Dual BSD/GPL"); |