diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-11 08:27:49 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-11 08:27:49 +0000 |
commit | ace9429bb58fd418f0c81d4c2835699bddf6bde6 (patch) | |
tree | b2d64bc10158fdd5497876388cd68142ca374ed3 /drivers/net/ethernet/freescale/enetc | |
parent | Initial commit. (diff) | |
download | linux-ace9429bb58fd418f0c81d4c2835699bddf6bde6.tar.xz linux-ace9429bb58fd418f0c81d4c2835699bddf6bde6.zip |
Adding upstream version 6.6.15.upstream/6.6.15
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/net/ethernet/freescale/enetc')
17 files changed, 10668 insertions, 0 deletions
diff --git a/drivers/net/ethernet/freescale/enetc/Kconfig b/drivers/net/ethernet/freescale/enetc/Kconfig new file mode 100644 index 0000000000..4d75e6807e --- /dev/null +++ b/drivers/net/ethernet/freescale/enetc/Kconfig @@ -0,0 +1,77 @@ +# SPDX-License-Identifier: GPL-2.0 +config FSL_ENETC_CORE + tristate + help + This module supports common functionality between the PF and VF + drivers for the NXP ENETC controller. + + If compiled as module (M), the module name is fsl-enetc-core. + +config FSL_ENETC + tristate "ENETC PF driver" + depends on PCI_MSI + select MDIO_DEVRES + select FSL_ENETC_CORE + select FSL_ENETC_IERB + select FSL_ENETC_MDIO + select PHYLINK + select PCS_LYNX + select DIMLIB + help + This driver supports NXP ENETC gigabit ethernet controller PCIe + physical function (PF) devices, managing ENETC Ports at a privileged + level. + + If compiled as module (M), the module name is fsl-enetc. + +config FSL_ENETC_VF + tristate "ENETC VF driver" + depends on PCI_MSI + select FSL_ENETC_CORE + select FSL_ENETC_MDIO + select PHYLINK + select DIMLIB + help + This driver supports NXP ENETC gigabit ethernet controller PCIe + virtual function (VF) devices enabled by the ENETC PF driver. + + If compiled as module (M), the module name is fsl-enetc-vf. + +config FSL_ENETC_IERB + tristate "ENETC IERB driver" + help + This driver configures the Integrated Endpoint Register Block on NXP + LS1028A. + + If compiled as module (M), the module name is fsl-enetc-ierb. + +config FSL_ENETC_MDIO + tristate "ENETC MDIO driver" + depends on PCI && MDIO_DEVRES && MDIO_BUS + help + This driver supports NXP ENETC Central MDIO controller as a PCIe + physical function (PF) device. + + If compiled as module (M), the module name is fsl-enetc-mdio. + +config FSL_ENETC_PTP_CLOCK + tristate "ENETC PTP clock driver" + depends on PTP_1588_CLOCK_QORIQ && (FSL_ENETC || FSL_ENETC_VF) + default y + help + This driver adds support for using the ENETC 1588 timer + as a PTP clock. This clock is only useful if your PTP + programs are getting hardware time stamps on the PTP Ethernet + packets using the SO_TIMESTAMPING API. + + If compiled as module (M), the module name is fsl-enetc-ptp. + +config FSL_ENETC_QOS + bool "ENETC hardware Time-sensitive Network support" + depends on (FSL_ENETC || FSL_ENETC_VF) && (NET_SCH_TAPRIO || NET_SCH_CBS) + help + There are Time-Sensitive Network(TSN) capabilities(802.1Qbv/802.1Qci + /802.1Qbu etc.) supported by ENETC. These TSN capabilities can be set + enable/disable from user space via Qos commands(tc). In the kernel + side, it can be loaded by Qos driver. Currently, it is only support + taprio(802.1Qbv) and Credit Based Shaper(802.1Qbu). diff --git a/drivers/net/ethernet/freescale/enetc/Makefile b/drivers/net/ethernet/freescale/enetc/Makefile new file mode 100644 index 0000000000..b13cbbabb2 --- /dev/null +++ b/drivers/net/ethernet/freescale/enetc/Makefile @@ -0,0 +1,21 @@ +# SPDX-License-Identifier: GPL-2.0 + +obj-$(CONFIG_FSL_ENETC_CORE) += fsl-enetc-core.o +fsl-enetc-core-y := enetc.o enetc_cbdr.o enetc_ethtool.o + +obj-$(CONFIG_FSL_ENETC) += fsl-enetc.o +fsl-enetc-y := enetc_pf.o +fsl-enetc-$(CONFIG_PCI_IOV) += enetc_msg.o +fsl-enetc-$(CONFIG_FSL_ENETC_QOS) += enetc_qos.o + +obj-$(CONFIG_FSL_ENETC_VF) += fsl-enetc-vf.o +fsl-enetc-vf-y := enetc_vf.o + +obj-$(CONFIG_FSL_ENETC_IERB) += fsl-enetc-ierb.o +fsl-enetc-ierb-y := enetc_ierb.o + +obj-$(CONFIG_FSL_ENETC_MDIO) += fsl-enetc-mdio.o +fsl-enetc-mdio-y := enetc_pci_mdio.o enetc_mdio.o + +obj-$(CONFIG_FSL_ENETC_PTP_CLOCK) += fsl-enetc-ptp.o +fsl-enetc-ptp-y := enetc_ptp.o diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c new file mode 100644 index 0000000000..b92e3aa7cd --- /dev/null +++ b/drivers/net/ethernet/freescale/enetc/enetc.c @@ -0,0 +1,3219 @@ +// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) +/* Copyright 2017-2019 NXP */ + +#include "enetc.h" +#include <linux/bpf_trace.h> +#include <linux/tcp.h> +#include <linux/udp.h> +#include <linux/vmalloc.h> +#include <linux/ptp_classify.h> +#include <net/ip6_checksum.h> +#include <net/pkt_sched.h> +#include <net/tso.h> + +u32 enetc_port_mac_rd(struct enetc_si *si, u32 reg) +{ + return enetc_port_rd(&si->hw, reg); +} +EXPORT_SYMBOL_GPL(enetc_port_mac_rd); + +void enetc_port_mac_wr(struct enetc_si *si, u32 reg, u32 val) +{ + enetc_port_wr(&si->hw, reg, val); + if (si->hw_features & ENETC_SI_F_QBU) + enetc_port_wr(&si->hw, reg + ENETC_PMAC_OFFSET, val); +} +EXPORT_SYMBOL_GPL(enetc_port_mac_wr); + +static void enetc_change_preemptible_tcs(struct enetc_ndev_priv *priv, + u8 preemptible_tcs) +{ + priv->preemptible_tcs = preemptible_tcs; + enetc_mm_commit_preemptible_tcs(priv); +} + +static int enetc_num_stack_tx_queues(struct enetc_ndev_priv *priv) +{ + int num_tx_rings = priv->num_tx_rings; + + if (priv->xdp_prog) + return num_tx_rings - num_possible_cpus(); + + return num_tx_rings; +} + +static struct enetc_bdr *enetc_rx_ring_from_xdp_tx_ring(struct enetc_ndev_priv *priv, + struct enetc_bdr *tx_ring) +{ + int index = &priv->tx_ring[tx_ring->index] - priv->xdp_tx_ring; + + return priv->rx_ring[index]; +} + +static struct sk_buff *enetc_tx_swbd_get_skb(struct enetc_tx_swbd *tx_swbd) +{ + if (tx_swbd->is_xdp_tx || tx_swbd->is_xdp_redirect) + return NULL; + + return tx_swbd->skb; +} + +static struct xdp_frame * +enetc_tx_swbd_get_xdp_frame(struct enetc_tx_swbd *tx_swbd) +{ + if (tx_swbd->is_xdp_redirect) + return tx_swbd->xdp_frame; + + return NULL; +} + +static void enetc_unmap_tx_buff(struct enetc_bdr *tx_ring, + struct enetc_tx_swbd *tx_swbd) +{ + /* For XDP_TX, pages come from RX, whereas for the other contexts where + * we have is_dma_page_set, those come from skb_frag_dma_map. We need + * to match the DMA mapping length, so we need to differentiate those. + */ + if (tx_swbd->is_dma_page) + dma_unmap_page(tx_ring->dev, tx_swbd->dma, + tx_swbd->is_xdp_tx ? PAGE_SIZE : tx_swbd->len, + tx_swbd->dir); + else + dma_unmap_single(tx_ring->dev, tx_swbd->dma, + tx_swbd->len, tx_swbd->dir); + tx_swbd->dma = 0; +} + +static void enetc_free_tx_frame(struct enetc_bdr *tx_ring, + struct enetc_tx_swbd *tx_swbd) +{ + struct xdp_frame *xdp_frame = enetc_tx_swbd_get_xdp_frame(tx_swbd); + struct sk_buff *skb = enetc_tx_swbd_get_skb(tx_swbd); + + if (tx_swbd->dma) + enetc_unmap_tx_buff(tx_ring, tx_swbd); + + if (xdp_frame) { + xdp_return_frame(tx_swbd->xdp_frame); + tx_swbd->xdp_frame = NULL; + } else if (skb) { + dev_kfree_skb_any(skb); + tx_swbd->skb = NULL; + } +} + +/* Let H/W know BD ring has been updated */ +static void enetc_update_tx_ring_tail(struct enetc_bdr *tx_ring) +{ + /* includes wmb() */ + enetc_wr_reg_hot(tx_ring->tpir, tx_ring->next_to_use); +} + +static int enetc_ptp_parse(struct sk_buff *skb, u8 *udp, + u8 *msgtype, u8 *twostep, + u16 *correction_offset, u16 *body_offset) +{ + unsigned int ptp_class; + struct ptp_header *hdr; + unsigned int type; + u8 *base; + + ptp_class = ptp_classify_raw(skb); + if (ptp_class == PTP_CLASS_NONE) + return -EINVAL; + + hdr = ptp_parse_header(skb, ptp_class); + if (!hdr) + return -EINVAL; + + type = ptp_class & PTP_CLASS_PMASK; + if (type == PTP_CLASS_IPV4 || type == PTP_CLASS_IPV6) + *udp = 1; + else + *udp = 0; + + *msgtype = ptp_get_msgtype(hdr, ptp_class); + *twostep = hdr->flag_field[0] & 0x2; + + base = skb_mac_header(skb); + *correction_offset = (u8 *)&hdr->correction - base; + *body_offset = (u8 *)hdr + sizeof(struct ptp_header) - base; + + return 0; +} + +static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb) +{ + bool do_vlan, do_onestep_tstamp = false, do_twostep_tstamp = false; + struct enetc_ndev_priv *priv = netdev_priv(tx_ring->ndev); + struct enetc_hw *hw = &priv->si->hw; + struct enetc_tx_swbd *tx_swbd; + int len = skb_headlen(skb); + union enetc_tx_bd temp_bd; + u8 msgtype, twostep, udp; + union enetc_tx_bd *txbd; + u16 offset1, offset2; + int i, count = 0; + skb_frag_t *frag; + unsigned int f; + dma_addr_t dma; + u8 flags = 0; + + i = tx_ring->next_to_use; + txbd = ENETC_TXBD(*tx_ring, i); + prefetchw(txbd); + + dma = dma_map_single(tx_ring->dev, skb->data, len, DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(tx_ring->dev, dma))) + goto dma_err; + + temp_bd.addr = cpu_to_le64(dma); + temp_bd.buf_len = cpu_to_le16(len); + temp_bd.lstatus = 0; + + tx_swbd = &tx_ring->tx_swbd[i]; + tx_swbd->dma = dma; + tx_swbd->len = len; + tx_swbd->is_dma_page = 0; + tx_swbd->dir = DMA_TO_DEVICE; + count++; + + do_vlan = skb_vlan_tag_present(skb); + if (skb->cb[0] & ENETC_F_TX_ONESTEP_SYNC_TSTAMP) { + if (enetc_ptp_parse(skb, &udp, &msgtype, &twostep, &offset1, + &offset2) || + msgtype != PTP_MSGTYPE_SYNC || twostep) + WARN_ONCE(1, "Bad packet for one-step timestamping\n"); + else + do_onestep_tstamp = true; + } else if (skb->cb[0] & ENETC_F_TX_TSTAMP) { + do_twostep_tstamp = true; + } + + tx_swbd->do_twostep_tstamp = do_twostep_tstamp; + tx_swbd->qbv_en = !!(priv->active_offloads & ENETC_F_QBV); + tx_swbd->check_wb = tx_swbd->do_twostep_tstamp || tx_swbd->qbv_en; + + if (do_vlan || do_onestep_tstamp || do_twostep_tstamp) + flags |= ENETC_TXBD_FLAGS_EX; + + if (tx_ring->tsd_enable) + flags |= ENETC_TXBD_FLAGS_TSE | ENETC_TXBD_FLAGS_TXSTART; + + /* first BD needs frm_len and offload flags set */ + temp_bd.frm_len = cpu_to_le16(skb->len); + temp_bd.flags = flags; + + if (flags & ENETC_TXBD_FLAGS_TSE) + temp_bd.txstart = enetc_txbd_set_tx_start(skb->skb_mstamp_ns, + flags); + + if (flags & ENETC_TXBD_FLAGS_EX) { + u8 e_flags = 0; + *txbd = temp_bd; + enetc_clear_tx_bd(&temp_bd); + + /* add extension BD for VLAN and/or timestamping */ + flags = 0; + tx_swbd++; + txbd++; + i++; + if (unlikely(i == tx_ring->bd_count)) { + i = 0; + tx_swbd = tx_ring->tx_swbd; + txbd = ENETC_TXBD(*tx_ring, 0); + } + prefetchw(txbd); + + if (do_vlan) { + temp_bd.ext.vid = cpu_to_le16(skb_vlan_tag_get(skb)); + temp_bd.ext.tpid = 0; /* < C-TAG */ + e_flags |= ENETC_TXBD_E_FLAGS_VLAN_INS; + } + + if (do_onestep_tstamp) { + u32 lo, hi, val; + u64 sec, nsec; + u8 *data; + + lo = enetc_rd_hot(hw, ENETC_SICTR0); + hi = enetc_rd_hot(hw, ENETC_SICTR1); + sec = (u64)hi << 32 | lo; + nsec = do_div(sec, 1000000000); + + /* Configure extension BD */ + temp_bd.ext.tstamp = cpu_to_le32(lo & 0x3fffffff); + e_flags |= ENETC_TXBD_E_FLAGS_ONE_STEP_PTP; + + /* Update originTimestamp field of Sync packet + * - 48 bits seconds field + * - 32 bits nanseconds field + */ + data = skb_mac_header(skb); + *(__be16 *)(data + offset2) = + htons((sec >> 32) & 0xffff); + *(__be32 *)(data + offset2 + 2) = + htonl(sec & 0xffffffff); + *(__be32 *)(data + offset2 + 6) = htonl(nsec); + + /* Configure single-step register */ + val = ENETC_PM0_SINGLE_STEP_EN; + val |= ENETC_SET_SINGLE_STEP_OFFSET(offset1); + if (udp) + val |= ENETC_PM0_SINGLE_STEP_CH; + + enetc_port_mac_wr(priv->si, ENETC_PM0_SINGLE_STEP, + val); + } else if (do_twostep_tstamp) { + skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; + e_flags |= ENETC_TXBD_E_FLAGS_TWO_STEP_PTP; + } + + temp_bd.ext.e_flags = e_flags; + count++; + } + + frag = &skb_shinfo(skb)->frags[0]; + for (f = 0; f < skb_shinfo(skb)->nr_frags; f++, frag++) { + len = skb_frag_size(frag); + dma = skb_frag_dma_map(tx_ring->dev, frag, 0, len, + DMA_TO_DEVICE); + if (dma_mapping_error(tx_ring->dev, dma)) + goto dma_err; + + *txbd = temp_bd; + enetc_clear_tx_bd(&temp_bd); + + flags = 0; + tx_swbd++; + txbd++; + i++; + if (unlikely(i == tx_ring->bd_count)) { + i = 0; + tx_swbd = tx_ring->tx_swbd; + txbd = ENETC_TXBD(*tx_ring, 0); + } + prefetchw(txbd); + + temp_bd.addr = cpu_to_le64(dma); + temp_bd.buf_len = cpu_to_le16(len); + + tx_swbd->dma = dma; + tx_swbd->len = len; + tx_swbd->is_dma_page = 1; + tx_swbd->dir = DMA_TO_DEVICE; + count++; + } + + /* last BD needs 'F' bit set */ + flags |= ENETC_TXBD_FLAGS_F; + temp_bd.flags = flags; + *txbd = temp_bd; + + tx_ring->tx_swbd[i].is_eof = true; + tx_ring->tx_swbd[i].skb = skb; + + enetc_bdr_idx_inc(tx_ring, &i); + tx_ring->next_to_use = i; + + skb_tx_timestamp(skb); + + enetc_update_tx_ring_tail(tx_ring); + + return count; + +dma_err: + dev_err(tx_ring->dev, "DMA map error"); + + do { + tx_swbd = &tx_ring->tx_swbd[i]; + enetc_free_tx_frame(tx_ring, tx_swbd); + if (i == 0) + i = tx_ring->bd_count; + i--; + } while (count--); + + return 0; +} + +static void enetc_map_tx_tso_hdr(struct enetc_bdr *tx_ring, struct sk_buff *skb, + struct enetc_tx_swbd *tx_swbd, + union enetc_tx_bd *txbd, int *i, int hdr_len, + int data_len) +{ + union enetc_tx_bd txbd_tmp; + u8 flags = 0, e_flags = 0; + dma_addr_t addr; + + enetc_clear_tx_bd(&txbd_tmp); + addr = tx_ring->tso_headers_dma + *i * TSO_HEADER_SIZE; + + if (skb_vlan_tag_present(skb)) + flags |= ENETC_TXBD_FLAGS_EX; + + txbd_tmp.addr = cpu_to_le64(addr); + txbd_tmp.buf_len = cpu_to_le16(hdr_len); + + /* first BD needs frm_len and offload flags set */ + txbd_tmp.frm_len = cpu_to_le16(hdr_len + data_len); + txbd_tmp.flags = flags; + + /* For the TSO header we do not set the dma address since we do not + * want it unmapped when we do cleanup. We still set len so that we + * count the bytes sent. + */ + tx_swbd->len = hdr_len; + tx_swbd->do_twostep_tstamp = false; + tx_swbd->check_wb = false; + + /* Actually write the header in the BD */ + *txbd = txbd_tmp; + + /* Add extension BD for VLAN */ + if (flags & ENETC_TXBD_FLAGS_EX) { + /* Get the next BD */ + enetc_bdr_idx_inc(tx_ring, i); + txbd = ENETC_TXBD(*tx_ring, *i); + tx_swbd = &tx_ring->tx_swbd[*i]; + prefetchw(txbd); + + /* Setup the VLAN fields */ + enetc_clear_tx_bd(&txbd_tmp); + txbd_tmp.ext.vid = cpu_to_le16(skb_vlan_tag_get(skb)); + txbd_tmp.ext.tpid = 0; /* < C-TAG */ + e_flags |= ENETC_TXBD_E_FLAGS_VLAN_INS; + + /* Write the BD */ + txbd_tmp.ext.e_flags = e_flags; + *txbd = txbd_tmp; + } +} + +static int enetc_map_tx_tso_data(struct enetc_bdr *tx_ring, struct sk_buff *skb, + struct enetc_tx_swbd *tx_swbd, + union enetc_tx_bd *txbd, char *data, + int size, bool last_bd) +{ + union enetc_tx_bd txbd_tmp; + dma_addr_t addr; + u8 flags = 0; + + enetc_clear_tx_bd(&txbd_tmp); + + addr = dma_map_single(tx_ring->dev, data, size, DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(tx_ring->dev, addr))) { + netdev_err(tx_ring->ndev, "DMA map error\n"); + return -ENOMEM; + } + + if (last_bd) { + flags |= ENETC_TXBD_FLAGS_F; + tx_swbd->is_eof = 1; + } + + txbd_tmp.addr = cpu_to_le64(addr); + txbd_tmp.buf_len = cpu_to_le16(size); + txbd_tmp.flags = flags; + + tx_swbd->dma = addr; + tx_swbd->len = size; + tx_swbd->dir = DMA_TO_DEVICE; + + *txbd = txbd_tmp; + + return 0; +} + +static __wsum enetc_tso_hdr_csum(struct tso_t *tso, struct sk_buff *skb, + char *hdr, int hdr_len, int *l4_hdr_len) +{ + char *l4_hdr = hdr + skb_transport_offset(skb); + int mac_hdr_len = skb_network_offset(skb); + + if (tso->tlen != sizeof(struct udphdr)) { + struct tcphdr *tcph = (struct tcphdr *)(l4_hdr); + + tcph->check = 0; + } else { + struct udphdr *udph = (struct udphdr *)(l4_hdr); + + udph->check = 0; + } + + /* Compute the IP checksum. This is necessary since tso_build_hdr() + * already incremented the IP ID field. + */ + if (!tso->ipv6) { + struct iphdr *iph = (void *)(hdr + mac_hdr_len); + + iph->check = 0; + iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl); + } + + /* Compute the checksum over the L4 header. */ + *l4_hdr_len = hdr_len - skb_transport_offset(skb); + return csum_partial(l4_hdr, *l4_hdr_len, 0); +} + +static void enetc_tso_complete_csum(struct enetc_bdr *tx_ring, struct tso_t *tso, + struct sk_buff *skb, char *hdr, int len, + __wsum sum) +{ + char *l4_hdr = hdr + skb_transport_offset(skb); + __sum16 csum_final; + + /* Complete the L4 checksum by appending the pseudo-header to the + * already computed checksum. + */ + if (!tso->ipv6) + csum_final = csum_tcpudp_magic(ip_hdr(skb)->saddr, + ip_hdr(skb)->daddr, + len, ip_hdr(skb)->protocol, sum); + else + csum_final = csum_ipv6_magic(&ipv6_hdr(skb)->saddr, + &ipv6_hdr(skb)->daddr, + len, ipv6_hdr(skb)->nexthdr, sum); + + if (tso->tlen != sizeof(struct udphdr)) { + struct tcphdr *tcph = (struct tcphdr *)(l4_hdr); + + tcph->check = csum_final; + } else { + struct udphdr *udph = (struct udphdr *)(l4_hdr); + + udph->check = csum_final; + } +} + +static int enetc_map_tx_tso_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb) +{ + int hdr_len, total_len, data_len; + struct enetc_tx_swbd *tx_swbd; + union enetc_tx_bd *txbd; + struct tso_t tso; + __wsum csum, csum2; + int count = 0, pos; + int err, i, bd_data_num; + + /* Initialize the TSO handler, and prepare the first payload */ + hdr_len = tso_start(skb, &tso); + total_len = skb->len - hdr_len; + i = tx_ring->next_to_use; + + while (total_len > 0) { + char *hdr; + + /* Get the BD */ + txbd = ENETC_TXBD(*tx_ring, i); + tx_swbd = &tx_ring->tx_swbd[i]; + prefetchw(txbd); + + /* Determine the length of this packet */ + data_len = min_t(int, skb_shinfo(skb)->gso_size, total_len); + total_len -= data_len; + + /* prepare packet headers: MAC + IP + TCP */ + hdr = tx_ring->tso_headers + i * TSO_HEADER_SIZE; + tso_build_hdr(skb, hdr, &tso, data_len, total_len == 0); + + /* compute the csum over the L4 header */ + csum = enetc_tso_hdr_csum(&tso, skb, hdr, hdr_len, &pos); + enetc_map_tx_tso_hdr(tx_ring, skb, tx_swbd, txbd, &i, hdr_len, data_len); + bd_data_num = 0; + count++; + + while (data_len > 0) { + int size; + + size = min_t(int, tso.size, data_len); + + /* Advance the index in the BDR */ + enetc_bdr_idx_inc(tx_ring, &i); + txbd = ENETC_TXBD(*tx_ring, i); + tx_swbd = &tx_ring->tx_swbd[i]; + prefetchw(txbd); + + /* Compute the checksum over this segment of data and + * add it to the csum already computed (over the L4 + * header and possible other data segments). + */ + csum2 = csum_partial(tso.data, size, 0); + csum = csum_block_add(csum, csum2, pos); + pos += size; + + err = enetc_map_tx_tso_data(tx_ring, skb, tx_swbd, txbd, + tso.data, size, + size == data_len); + if (err) + goto err_map_data; + + data_len -= size; + count++; + bd_data_num++; + tso_build_data(skb, &tso, size); + + if (unlikely(bd_data_num >= ENETC_MAX_SKB_FRAGS && data_len)) + goto err_chained_bd; + } + + enetc_tso_complete_csum(tx_ring, &tso, skb, hdr, pos, csum); + + if (total_len == 0) + tx_swbd->skb = skb; + + /* Go to the next BD */ + enetc_bdr_idx_inc(tx_ring, &i); + } + + tx_ring->next_to_use = i; + enetc_update_tx_ring_tail(tx_ring); + + return count; + +err_map_data: + dev_err(tx_ring->dev, "DMA map error"); + +err_chained_bd: + do { + tx_swbd = &tx_ring->tx_swbd[i]; + enetc_free_tx_frame(tx_ring, tx_swbd); + if (i == 0) + i = tx_ring->bd_count; + i--; + } while (count--); + + return 0; +} + +static netdev_tx_t enetc_start_xmit(struct sk_buff *skb, + struct net_device *ndev) +{ + struct enetc_ndev_priv *priv = netdev_priv(ndev); + struct enetc_bdr *tx_ring; + int count, err; + + /* Queue one-step Sync packet if already locked */ + if (skb->cb[0] & ENETC_F_TX_ONESTEP_SYNC_TSTAMP) { + if (test_and_set_bit_lock(ENETC_TX_ONESTEP_TSTAMP_IN_PROGRESS, + &priv->flags)) { + skb_queue_tail(&priv->tx_skbs, skb); + return NETDEV_TX_OK; + } + } + + tx_ring = priv->tx_ring[skb->queue_mapping]; + + if (skb_is_gso(skb)) { + if (enetc_bd_unused(tx_ring) < tso_count_descs(skb)) { + netif_stop_subqueue(ndev, tx_ring->index); + return NETDEV_TX_BUSY; + } + + enetc_lock_mdio(); + count = enetc_map_tx_tso_buffs(tx_ring, skb); + enetc_unlock_mdio(); + } else { + if (unlikely(skb_shinfo(skb)->nr_frags > ENETC_MAX_SKB_FRAGS)) + if (unlikely(skb_linearize(skb))) + goto drop_packet_err; + + count = skb_shinfo(skb)->nr_frags + 1; /* fragments + head */ + if (enetc_bd_unused(tx_ring) < ENETC_TXBDS_NEEDED(count)) { + netif_stop_subqueue(ndev, tx_ring->index); + return NETDEV_TX_BUSY; + } + + if (skb->ip_summed == CHECKSUM_PARTIAL) { + err = skb_checksum_help(skb); + if (err) + goto drop_packet_err; + } + enetc_lock_mdio(); + count = enetc_map_tx_buffs(tx_ring, skb); + enetc_unlock_mdio(); + } + + if (unlikely(!count)) + goto drop_packet_err; + + if (enetc_bd_unused(tx_ring) < ENETC_TXBDS_MAX_NEEDED) + netif_stop_subqueue(ndev, tx_ring->index); + + return NETDEV_TX_OK; + +drop_packet_err: + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; +} + +netdev_tx_t enetc_xmit(struct sk_buff *skb, struct net_device *ndev) +{ + struct enetc_ndev_priv *priv = netdev_priv(ndev); + u8 udp, msgtype, twostep; + u16 offset1, offset2; + + /* Mark tx timestamp type on skb->cb[0] if requires */ + if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && + (priv->active_offloads & ENETC_F_TX_TSTAMP_MASK)) { + skb->cb[0] = priv->active_offloads & ENETC_F_TX_TSTAMP_MASK; + } else { + skb->cb[0] = 0; + } + + /* Fall back to two-step timestamp if not one-step Sync packet */ + if (skb->cb[0] & ENETC_F_TX_ONESTEP_SYNC_TSTAMP) { + if (enetc_ptp_parse(skb, &udp, &msgtype, &twostep, + &offset1, &offset2) || + msgtype != PTP_MSGTYPE_SYNC || twostep != 0) + skb->cb[0] = ENETC_F_TX_TSTAMP; + } + + return enetc_start_xmit(skb, ndev); +} +EXPORT_SYMBOL_GPL(enetc_xmit); + +static irqreturn_t enetc_msix(int irq, void *data) +{ + struct enetc_int_vector *v = data; + int i; + + enetc_lock_mdio(); + + /* disable interrupts */ + enetc_wr_reg_hot(v->rbier, 0); + enetc_wr_reg_hot(v->ricr1, v->rx_ictt); + + for_each_set_bit(i, &v->tx_rings_map, ENETC_MAX_NUM_TXQS) + enetc_wr_reg_hot(v->tbier_base + ENETC_BDR_OFF(i), 0); + + enetc_unlock_mdio(); + + napi_schedule(&v->napi); + + return IRQ_HANDLED; +} + +static void enetc_rx_dim_work(struct work_struct *w) +{ + struct dim *dim = container_of(w, struct dim, work); + struct dim_cq_moder moder = + net_dim_get_rx_moderation(dim->mode, dim->profile_ix); + struct enetc_int_vector *v = + container_of(dim, struct enetc_int_vector, rx_dim); + + v->rx_ictt = enetc_usecs_to_cycles(moder.usec); + dim->state = DIM_START_MEASURE; +} + +static void enetc_rx_net_dim(struct enetc_int_vector *v) +{ + struct dim_sample dim_sample = {}; + + v->comp_cnt++; + + if (!v->rx_napi_work) + return; + + dim_update_sample(v->comp_cnt, + v->rx_ring.stats.packets, + v->rx_ring.stats.bytes, + &dim_sample); + net_dim(&v->rx_dim, dim_sample); +} + +static int enetc_bd_ready_count(struct enetc_bdr *tx_ring, int ci) +{ + int pi = enetc_rd_reg_hot(tx_ring->tcir) & ENETC_TBCIR_IDX_MASK; + + return pi >= ci ? pi - ci : tx_ring->bd_count - ci + pi; +} + +static bool enetc_page_reusable(struct page *page) +{ + return (!page_is_pfmemalloc(page) && page_ref_count(page) == 1); +} + +static void enetc_reuse_page(struct enetc_bdr *rx_ring, + struct enetc_rx_swbd *old) +{ + struct enetc_rx_swbd *new; + + new = &rx_ring->rx_swbd[rx_ring->next_to_alloc]; + + /* next buf that may reuse a page */ + enetc_bdr_idx_inc(rx_ring, &rx_ring->next_to_alloc); + + /* copy page reference */ + *new = *old; +} + +static void enetc_get_tx_tstamp(struct enetc_hw *hw, union enetc_tx_bd *txbd, + u64 *tstamp) +{ + u32 lo, hi, tstamp_lo; + + lo = enetc_rd_hot(hw, ENETC_SICTR0); + hi = enetc_rd_hot(hw, ENETC_SICTR1); + tstamp_lo = le32_to_cpu(txbd->wb.tstamp); + if (lo <= tstamp_lo) + hi -= 1; + *tstamp = (u64)hi << 32 | tstamp_lo; +} + +static void enetc_tstamp_tx(struct sk_buff *skb, u64 tstamp) +{ + struct skb_shared_hwtstamps shhwtstamps; + + if (skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS) { + memset(&shhwtstamps, 0, sizeof(shhwtstamps)); + shhwtstamps.hwtstamp = ns_to_ktime(tstamp); + skb_txtime_consumed(skb); + skb_tstamp_tx(skb, &shhwtstamps); + } +} + +static void enetc_recycle_xdp_tx_buff(struct enetc_bdr *tx_ring, + struct enetc_tx_swbd *tx_swbd) +{ + struct enetc_ndev_priv *priv = netdev_priv(tx_ring->ndev); + struct enetc_rx_swbd rx_swbd = { + .dma = tx_swbd->dma, + .page = tx_swbd->page, + .page_offset = tx_swbd->page_offset, + .dir = tx_swbd->dir, + .len = tx_swbd->len, + }; + struct enetc_bdr *rx_ring; + + rx_ring = enetc_rx_ring_from_xdp_tx_ring(priv, tx_ring); + + if (likely(enetc_swbd_unused(rx_ring))) { + enetc_reuse_page(rx_ring, &rx_swbd); + + /* sync for use by the device */ + dma_sync_single_range_for_device(rx_ring->dev, rx_swbd.dma, + rx_swbd.page_offset, + ENETC_RXB_DMA_SIZE_XDP, + rx_swbd.dir); + + rx_ring->stats.recycles++; + } else { + /* RX ring is already full, we need to unmap and free the + * page, since there's nothing useful we can do with it. + */ + rx_ring->stats.recycle_failures++; + + dma_unmap_page(rx_ring->dev, rx_swbd.dma, PAGE_SIZE, + rx_swbd.dir); + __free_page(rx_swbd.page); + } + + rx_ring->xdp.xdp_tx_in_flight--; +} + +static bool enetc_clean_tx_ring(struct enetc_bdr *tx_ring, int napi_budget) +{ + int tx_frm_cnt = 0, tx_byte_cnt = 0, tx_win_drop = 0; + struct net_device *ndev = tx_ring->ndev; + struct enetc_ndev_priv *priv = netdev_priv(ndev); + struct enetc_tx_swbd *tx_swbd; + int i, bds_to_clean; + bool do_twostep_tstamp; + u64 tstamp = 0; + + i = tx_ring->next_to_clean; + tx_swbd = &tx_ring->tx_swbd[i]; + + bds_to_clean = enetc_bd_ready_count(tx_ring, i); + + do_twostep_tstamp = false; + + while (bds_to_clean && tx_frm_cnt < ENETC_DEFAULT_TX_WORK) { + struct xdp_frame *xdp_frame = enetc_tx_swbd_get_xdp_frame(tx_swbd); + struct sk_buff *skb = enetc_tx_swbd_get_skb(tx_swbd); + bool is_eof = tx_swbd->is_eof; + + if (unlikely(tx_swbd->check_wb)) { + union enetc_tx_bd *txbd = ENETC_TXBD(*tx_ring, i); + + if (txbd->flags & ENETC_TXBD_FLAGS_W && + tx_swbd->do_twostep_tstamp) { + enetc_get_tx_tstamp(&priv->si->hw, txbd, + &tstamp); + do_twostep_tstamp = true; + } + + if (tx_swbd->qbv_en && + txbd->wb.status & ENETC_TXBD_STATS_WIN) + tx_win_drop++; + } + + if (tx_swbd->is_xdp_tx) + enetc_recycle_xdp_tx_buff(tx_ring, tx_swbd); + else if (likely(tx_swbd->dma)) + enetc_unmap_tx_buff(tx_ring, tx_swbd); + + if (xdp_frame) { + xdp_return_frame(xdp_frame); + } else if (skb) { + if (unlikely(skb->cb[0] & ENETC_F_TX_ONESTEP_SYNC_TSTAMP)) { + /* Start work to release lock for next one-step + * timestamping packet. And send one skb in + * tx_skbs queue if has. + */ + schedule_work(&priv->tx_onestep_tstamp); + } else if (unlikely(do_twostep_tstamp)) { + enetc_tstamp_tx(skb, tstamp); + do_twostep_tstamp = false; + } + napi_consume_skb(skb, napi_budget); + } + + tx_byte_cnt += tx_swbd->len; + /* Scrub the swbd here so we don't have to do that + * when we reuse it during xmit + */ + memset(tx_swbd, 0, sizeof(*tx_swbd)); + + bds_to_clean--; + tx_swbd++; + i++; + if (unlikely(i == tx_ring->bd_count)) { + i = 0; + tx_swbd = tx_ring->tx_swbd; + } + + /* BD iteration loop end */ + if (is_eof) { + tx_frm_cnt++; + /* re-arm interrupt source */ + enetc_wr_reg_hot(tx_ring->idr, BIT(tx_ring->index) | + BIT(16 + tx_ring->index)); + } + + if (unlikely(!bds_to_clean)) + bds_to_clean = enetc_bd_ready_count(tx_ring, i); + } + + tx_ring->next_to_clean = i; + tx_ring->stats.packets += tx_frm_cnt; + tx_ring->stats.bytes += tx_byte_cnt; + tx_ring->stats.win_drop += tx_win_drop; + + if (unlikely(tx_frm_cnt && netif_carrier_ok(ndev) && + __netif_subqueue_stopped(ndev, tx_ring->index) && + (enetc_bd_unused(tx_ring) >= ENETC_TXBDS_MAX_NEEDED))) { + netif_wake_subqueue(ndev, tx_ring->index); + } + + return tx_frm_cnt != ENETC_DEFAULT_TX_WORK; +} + +static bool enetc_new_page(struct enetc_bdr *rx_ring, + struct enetc_rx_swbd *rx_swbd) +{ + bool xdp = !!(rx_ring->xdp.prog); + struct page *page; + dma_addr_t addr; + + page = dev_alloc_page(); + if (unlikely(!page)) + return false; + + /* For XDP_TX, we forgo dma_unmap -> dma_map */ + rx_swbd->dir = xdp ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE; + + addr = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE, rx_swbd->dir); + if (unlikely(dma_mapping_error(rx_ring->dev, addr))) { + __free_page(page); + + return false; + } + + rx_swbd->dma = addr; + rx_swbd->page = page; + rx_swbd->page_offset = rx_ring->buffer_offset; + + return true; +} + +static int enetc_refill_rx_ring(struct enetc_bdr *rx_ring, const int buff_cnt) +{ + struct enetc_rx_swbd *rx_swbd; + union enetc_rx_bd *rxbd; + int i, j; + + i = rx_ring->next_to_use; + rx_swbd = &rx_ring->rx_swbd[i]; + rxbd = enetc_rxbd(rx_ring, i); + + for (j = 0; j < buff_cnt; j++) { + /* try reuse page */ + if (unlikely(!rx_swbd->page)) { + if (unlikely(!enetc_new_page(rx_ring, rx_swbd))) { + rx_ring->stats.rx_alloc_errs++; + break; + } + } + + /* update RxBD */ + rxbd->w.addr = cpu_to_le64(rx_swbd->dma + + rx_swbd->page_offset); + /* clear 'R" as well */ + rxbd->r.lstatus = 0; + + enetc_rxbd_next(rx_ring, &rxbd, &i); + rx_swbd = &rx_ring->rx_swbd[i]; + } + + if (likely(j)) { + rx_ring->next_to_alloc = i; /* keep track from page reuse */ + rx_ring->next_to_use = i; + + /* update ENETC's consumer index */ + enetc_wr_reg_hot(rx_ring->rcir, rx_ring->next_to_use); + } + + return j; +} + +#ifdef CONFIG_FSL_ENETC_PTP_CLOCK +static void enetc_get_rx_tstamp(struct net_device *ndev, + union enetc_rx_bd *rxbd, + struct sk_buff *skb) +{ + struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb); + struct enetc_ndev_priv *priv = netdev_priv(ndev); + struct enetc_hw *hw = &priv->si->hw; + u32 lo, hi, tstamp_lo; + u64 tstamp; + + if (le16_to_cpu(rxbd->r.flags) & ENETC_RXBD_FLAG_TSTMP) { + lo = enetc_rd_reg_hot(hw->reg + ENETC_SICTR0); + hi = enetc_rd_reg_hot(hw->reg + ENETC_SICTR1); + rxbd = enetc_rxbd_ext(rxbd); + tstamp_lo = le32_to_cpu(rxbd->ext.tstamp); + if (lo <= tstamp_lo) + hi -= 1; + + tstamp = (u64)hi << 32 | tstamp_lo; + memset(shhwtstamps, 0, sizeof(*shhwtstamps)); + shhwtstamps->hwtstamp = ns_to_ktime(tstamp); + } +} +#endif + +static void enetc_get_offloads(struct enetc_bdr *rx_ring, + union enetc_rx_bd *rxbd, struct sk_buff *skb) +{ + struct enetc_ndev_priv *priv = netdev_priv(rx_ring->ndev); + + /* TODO: hashing */ + if (rx_ring->ndev->features & NETIF_F_RXCSUM) { + u16 inet_csum = le16_to_cpu(rxbd->r.inet_csum); + + skb->csum = csum_unfold((__force __sum16)~htons(inet_csum)); + skb->ip_summed = CHECKSUM_COMPLETE; + } + + if (le16_to_cpu(rxbd->r.flags) & ENETC_RXBD_FLAG_VLAN) { + __be16 tpid = 0; + + switch (le16_to_cpu(rxbd->r.flags) & ENETC_RXBD_FLAG_TPID) { + case 0: + tpid = htons(ETH_P_8021Q); + break; + case 1: + tpid = htons(ETH_P_8021AD); + break; + case 2: + tpid = htons(enetc_port_rd(&priv->si->hw, + ENETC_PCVLANR1)); + break; + case 3: + tpid = htons(enetc_port_rd(&priv->si->hw, + ENETC_PCVLANR2)); + break; + default: + break; + } + + __vlan_hwaccel_put_tag(skb, tpid, le16_to_cpu(rxbd->r.vlan_opt)); + } + +#ifdef CONFIG_FSL_ENETC_PTP_CLOCK + if (priv->active_offloads & ENETC_F_RX_TSTAMP) + enetc_get_rx_tstamp(rx_ring->ndev, rxbd, skb); +#endif +} + +/* This gets called during the non-XDP NAPI poll cycle as well as on XDP_PASS, + * so it needs to work with both DMA_FROM_DEVICE as well as DMA_BIDIRECTIONAL + * mapped buffers. + */ +static struct enetc_rx_swbd *enetc_get_rx_buff(struct enetc_bdr *rx_ring, + int i, u16 size) +{ + struct enetc_rx_swbd *rx_swbd = &rx_ring->rx_swbd[i]; + + dma_sync_single_range_for_cpu(rx_ring->dev, rx_swbd->dma, + rx_swbd->page_offset, + size, rx_swbd->dir); + return rx_swbd; +} + +/* Reuse the current page without performing half-page buffer flipping */ +static void enetc_put_rx_buff(struct enetc_bdr *rx_ring, + struct enetc_rx_swbd *rx_swbd) +{ + size_t buffer_size = ENETC_RXB_TRUESIZE - rx_ring->buffer_offset; + + enetc_reuse_page(rx_ring, rx_swbd); + + dma_sync_single_range_for_device(rx_ring->dev, rx_swbd->dma, + rx_swbd->page_offset, + buffer_size, rx_swbd->dir); + + rx_swbd->page = NULL; +} + +/* Reuse the current page by performing half-page buffer flipping */ +static void enetc_flip_rx_buff(struct enetc_bdr *rx_ring, + struct enetc_rx_swbd *rx_swbd) +{ + if (likely(enetc_page_reusable(rx_swbd->page))) { + rx_swbd->page_offset ^= ENETC_RXB_TRUESIZE; + page_ref_inc(rx_swbd->page); + + enetc_put_rx_buff(rx_ring, rx_swbd); + } else { + dma_unmap_page(rx_ring->dev, rx_swbd->dma, PAGE_SIZE, + rx_swbd->dir); + rx_swbd->page = NULL; + } +} + +static struct sk_buff *enetc_map_rx_buff_to_skb(struct enetc_bdr *rx_ring, + int i, u16 size) +{ + struct enetc_rx_swbd *rx_swbd = enetc_get_rx_buff(rx_ring, i, size); + struct sk_buff *skb; + void *ba; + + ba = page_address(rx_swbd->page) + rx_swbd->page_offset; + skb = build_skb(ba - rx_ring->buffer_offset, ENETC_RXB_TRUESIZE); + if (unlikely(!skb)) { + rx_ring->stats.rx_alloc_errs++; + return NULL; + } + + skb_reserve(skb, rx_ring->buffer_offset); + __skb_put(skb, size); + + enetc_flip_rx_buff(rx_ring, rx_swbd); + + return skb; +} + +static void enetc_add_rx_buff_to_skb(struct enetc_bdr *rx_ring, int i, + u16 size, struct sk_buff *skb) +{ + struct enetc_rx_swbd *rx_swbd = enetc_get_rx_buff(rx_ring, i, size); + + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_swbd->page, + rx_swbd->page_offset, size, ENETC_RXB_TRUESIZE); + + enetc_flip_rx_buff(rx_ring, rx_swbd); +} + +static bool enetc_check_bd_errors_and_consume(struct enetc_bdr *rx_ring, + u32 bd_status, + union enetc_rx_bd **rxbd, int *i) +{ + if (likely(!(bd_status & ENETC_RXBD_LSTATUS(ENETC_RXBD_ERR_MASK)))) + return false; + + enetc_put_rx_buff(rx_ring, &rx_ring->rx_swbd[*i]); + enetc_rxbd_next(rx_ring, rxbd, i); + + while (!(bd_status & ENETC_RXBD_LSTATUS_F)) { + dma_rmb(); + bd_status = le32_to_cpu((*rxbd)->r.lstatus); + + enetc_put_rx_buff(rx_ring, &rx_ring->rx_swbd[*i]); + enetc_rxbd_next(rx_ring, rxbd, i); + } + + rx_ring->ndev->stats.rx_dropped++; + rx_ring->ndev->stats.rx_errors++; + + return true; +} + +static struct sk_buff *enetc_build_skb(struct enetc_bdr *rx_ring, + u32 bd_status, union enetc_rx_bd **rxbd, + int *i, int *cleaned_cnt, int buffer_size) +{ + struct sk_buff *skb; + u16 size; + + size = le16_to_cpu((*rxbd)->r.buf_len); + skb = enetc_map_rx_buff_to_skb(rx_ring, *i, size); + if (!skb) + return NULL; + + enetc_get_offloads(rx_ring, *rxbd, skb); + + (*cleaned_cnt)++; + + enetc_rxbd_next(rx_ring, rxbd, i); + + /* not last BD in frame? */ + while (!(bd_status & ENETC_RXBD_LSTATUS_F)) { + bd_status = le32_to_cpu((*rxbd)->r.lstatus); + size = buffer_size; + + if (bd_status & ENETC_RXBD_LSTATUS_F) { + dma_rmb(); + size = le16_to_cpu((*rxbd)->r.buf_len); + } + + enetc_add_rx_buff_to_skb(rx_ring, *i, size, skb); + + (*cleaned_cnt)++; + + enetc_rxbd_next(rx_ring, rxbd, i); + } + + skb_record_rx_queue(skb, rx_ring->index); + skb->protocol = eth_type_trans(skb, rx_ring->ndev); + + return skb; +} + +#define ENETC_RXBD_BUNDLE 16 /* # of BDs to update at once */ + +static int enetc_clean_rx_ring(struct enetc_bdr *rx_ring, + struct napi_struct *napi, int work_limit) +{ + int rx_frm_cnt = 0, rx_byte_cnt = 0; + int cleaned_cnt, i; + + cleaned_cnt = enetc_bd_unused(rx_ring); + /* next descriptor to process */ + i = rx_ring->next_to_clean; + + while (likely(rx_frm_cnt < work_limit)) { + union enetc_rx_bd *rxbd; + struct sk_buff *skb; + u32 bd_status; + + if (cleaned_cnt >= ENETC_RXBD_BUNDLE) + cleaned_cnt -= enetc_refill_rx_ring(rx_ring, + cleaned_cnt); + + rxbd = enetc_rxbd(rx_ring, i); + bd_status = le32_to_cpu(rxbd->r.lstatus); + if (!bd_status) + break; + + enetc_wr_reg_hot(rx_ring->idr, BIT(rx_ring->index)); + dma_rmb(); /* for reading other rxbd fields */ + + if (enetc_check_bd_errors_and_consume(rx_ring, bd_status, + &rxbd, &i)) + break; + + skb = enetc_build_skb(rx_ring, bd_status, &rxbd, &i, + &cleaned_cnt, ENETC_RXB_DMA_SIZE); + if (!skb) + break; + + /* When set, the outer VLAN header is extracted and reported + * in the receive buffer descriptor. So rx_byte_cnt should + * add the length of the extracted VLAN header. + */ + if (bd_status & ENETC_RXBD_FLAG_VLAN) + rx_byte_cnt += VLAN_HLEN; + rx_byte_cnt += skb->len + ETH_HLEN; + rx_frm_cnt++; + + napi_gro_receive(napi, skb); + } + + rx_ring->next_to_clean = i; + + rx_ring->stats.packets += rx_frm_cnt; + rx_ring->stats.bytes += rx_byte_cnt; + + return rx_frm_cnt; +} + +static void enetc_xdp_map_tx_buff(struct enetc_bdr *tx_ring, int i, + struct enetc_tx_swbd *tx_swbd, + int frm_len) +{ + union enetc_tx_bd *txbd = ENETC_TXBD(*tx_ring, i); + + prefetchw(txbd); + + enetc_clear_tx_bd(txbd); + txbd->addr = cpu_to_le64(tx_swbd->dma + tx_swbd->page_offset); + txbd->buf_len = cpu_to_le16(tx_swbd->len); + txbd->frm_len = cpu_to_le16(frm_len); + + memcpy(&tx_ring->tx_swbd[i], tx_swbd, sizeof(*tx_swbd)); +} + +/* Puts in the TX ring one XDP frame, mapped as an array of TX software buffer + * descriptors. + */ +static bool enetc_xdp_tx(struct enetc_bdr *tx_ring, + struct enetc_tx_swbd *xdp_tx_arr, int num_tx_swbd) +{ + struct enetc_tx_swbd *tmp_tx_swbd = xdp_tx_arr; + int i, k, frm_len = tmp_tx_swbd->len; + + if (unlikely(enetc_bd_unused(tx_ring) < ENETC_TXBDS_NEEDED(num_tx_swbd))) + return false; + + while (unlikely(!tmp_tx_swbd->is_eof)) { + tmp_tx_swbd++; + frm_len += tmp_tx_swbd->len; + } + + i = tx_ring->next_to_use; + + for (k = 0; k < num_tx_swbd; k++) { + struct enetc_tx_swbd *xdp_tx_swbd = &xdp_tx_arr[k]; + + enetc_xdp_map_tx_buff(tx_ring, i, xdp_tx_swbd, frm_len); + + /* last BD needs 'F' bit set */ + if (xdp_tx_swbd->is_eof) { + union enetc_tx_bd *txbd = ENETC_TXBD(*tx_ring, i); + + txbd->flags = ENETC_TXBD_FLAGS_F; + } + + enetc_bdr_idx_inc(tx_ring, &i); + } + + tx_ring->next_to_use = i; + + return true; +} + +static int enetc_xdp_frame_to_xdp_tx_swbd(struct enetc_bdr *tx_ring, + struct enetc_tx_swbd *xdp_tx_arr, + struct xdp_frame *xdp_frame) +{ + struct enetc_tx_swbd *xdp_tx_swbd = &xdp_tx_arr[0]; + struct skb_shared_info *shinfo; + void *data = xdp_frame->data; + int len = xdp_frame->len; + skb_frag_t *frag; + dma_addr_t dma; + unsigned int f; + int n = 0; + + dma = dma_map_single(tx_ring->dev, data, len, DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(tx_ring->dev, dma))) { + netdev_err(tx_ring->ndev, "DMA map error\n"); + return -1; + } + + xdp_tx_swbd->dma = dma; + xdp_tx_swbd->dir = DMA_TO_DEVICE; + xdp_tx_swbd->len = len; + xdp_tx_swbd->is_xdp_redirect = true; + xdp_tx_swbd->is_eof = false; + xdp_tx_swbd->xdp_frame = NULL; + + n++; + + if (!xdp_frame_has_frags(xdp_frame)) + goto out; + + xdp_tx_swbd = &xdp_tx_arr[n]; + + shinfo = xdp_get_shared_info_from_frame(xdp_frame); + + for (f = 0, frag = &shinfo->frags[0]; f < shinfo->nr_frags; + f++, frag++) { + data = skb_frag_address(frag); + len = skb_frag_size(frag); + + dma = dma_map_single(tx_ring->dev, data, len, DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(tx_ring->dev, dma))) { + /* Undo the DMA mapping for all fragments */ + while (--n >= 0) + enetc_unmap_tx_buff(tx_ring, &xdp_tx_arr[n]); + + netdev_err(tx_ring->ndev, "DMA map error\n"); + return -1; + } + + xdp_tx_swbd->dma = dma; + xdp_tx_swbd->dir = DMA_TO_DEVICE; + xdp_tx_swbd->len = len; + xdp_tx_swbd->is_xdp_redirect = true; + xdp_tx_swbd->is_eof = false; + xdp_tx_swbd->xdp_frame = NULL; + + n++; + xdp_tx_swbd = &xdp_tx_arr[n]; + } +out: + xdp_tx_arr[n - 1].is_eof = true; + xdp_tx_arr[n - 1].xdp_frame = xdp_frame; + + return n; +} + +int enetc_xdp_xmit(struct net_device *ndev, int num_frames, + struct xdp_frame **frames, u32 flags) +{ + struct enetc_tx_swbd xdp_redirect_arr[ENETC_MAX_SKB_FRAGS] = {0}; + struct enetc_ndev_priv *priv = netdev_priv(ndev); + struct enetc_bdr *tx_ring; + int xdp_tx_bd_cnt, i, k; + int xdp_tx_frm_cnt = 0; + + enetc_lock_mdio(); + + tx_ring = priv->xdp_tx_ring[smp_processor_id()]; + + prefetchw(ENETC_TXBD(*tx_ring, tx_ring->next_to_use)); + + for (k = 0; k < num_frames; k++) { + xdp_tx_bd_cnt = enetc_xdp_frame_to_xdp_tx_swbd(tx_ring, + xdp_redirect_arr, + frames[k]); + if (unlikely(xdp_tx_bd_cnt < 0)) + break; + + if (unlikely(!enetc_xdp_tx(tx_ring, xdp_redirect_arr, + xdp_tx_bd_cnt))) { + for (i = 0; i < xdp_tx_bd_cnt; i++) + enetc_unmap_tx_buff(tx_ring, + &xdp_redirect_arr[i]); + tx_ring->stats.xdp_tx_drops++; + break; + } + + xdp_tx_frm_cnt++; + } + + if (unlikely((flags & XDP_XMIT_FLUSH) || k != xdp_tx_frm_cnt)) + enetc_update_tx_ring_tail(tx_ring); + + tx_ring->stats.xdp_tx += xdp_tx_frm_cnt; + + enetc_unlock_mdio(); + + return xdp_tx_frm_cnt; +} +EXPORT_SYMBOL_GPL(enetc_xdp_xmit); + +static void enetc_map_rx_buff_to_xdp(struct enetc_bdr *rx_ring, int i, + struct xdp_buff *xdp_buff, u16 size) +{ + struct enetc_rx_swbd *rx_swbd = enetc_get_rx_buff(rx_ring, i, size); + void *hard_start = page_address(rx_swbd->page) + rx_swbd->page_offset; + + /* To be used for XDP_TX */ + rx_swbd->len = size; + + xdp_prepare_buff(xdp_buff, hard_start - rx_ring->buffer_offset, + rx_ring->buffer_offset, size, false); +} + +static void enetc_add_rx_buff_to_xdp(struct enetc_bdr *rx_ring, int i, + u16 size, struct xdp_buff *xdp_buff) +{ + struct skb_shared_info *shinfo = xdp_get_shared_info_from_buff(xdp_buff); + struct enetc_rx_swbd *rx_swbd = enetc_get_rx_buff(rx_ring, i, size); + skb_frag_t *frag; + + /* To be used for XDP_TX */ + rx_swbd->len = size; + + if (!xdp_buff_has_frags(xdp_buff)) { + xdp_buff_set_frags_flag(xdp_buff); + shinfo->xdp_frags_size = size; + shinfo->nr_frags = 0; + } else { + shinfo->xdp_frags_size += size; + } + + if (page_is_pfmemalloc(rx_swbd->page)) + xdp_buff_set_frag_pfmemalloc(xdp_buff); + + frag = &shinfo->frags[shinfo->nr_frags]; + skb_frag_fill_page_desc(frag, rx_swbd->page, rx_swbd->page_offset, + size); + + shinfo->nr_frags++; +} + +static void enetc_build_xdp_buff(struct enetc_bdr *rx_ring, u32 bd_status, + union enetc_rx_bd **rxbd, int *i, + int *cleaned_cnt, struct xdp_buff *xdp_buff) +{ + u16 size = le16_to_cpu((*rxbd)->r.buf_len); + + xdp_init_buff(xdp_buff, ENETC_RXB_TRUESIZE, &rx_ring->xdp.rxq); + + enetc_map_rx_buff_to_xdp(rx_ring, *i, xdp_buff, size); + (*cleaned_cnt)++; + enetc_rxbd_next(rx_ring, rxbd, i); + + /* not last BD in frame? */ + while (!(bd_status & ENETC_RXBD_LSTATUS_F)) { + bd_status = le32_to_cpu((*rxbd)->r.lstatus); + size = ENETC_RXB_DMA_SIZE_XDP; + + if (bd_status & ENETC_RXBD_LSTATUS_F) { + dma_rmb(); + size = le16_to_cpu((*rxbd)->r.buf_len); + } + + enetc_add_rx_buff_to_xdp(rx_ring, *i, size, xdp_buff); + (*cleaned_cnt)++; + enetc_rxbd_next(rx_ring, rxbd, i); + } +} + +/* Convert RX buffer descriptors to TX buffer descriptors. These will be + * recycled back into the RX ring in enetc_clean_tx_ring. + */ +static int enetc_rx_swbd_to_xdp_tx_swbd(struct enetc_tx_swbd *xdp_tx_arr, + struct enetc_bdr *rx_ring, + int rx_ring_first, int rx_ring_last) +{ + int n = 0; + + for (; rx_ring_first != rx_ring_last; + n++, enetc_bdr_idx_inc(rx_ring, &rx_ring_first)) { + struct enetc_rx_swbd *rx_swbd = &rx_ring->rx_swbd[rx_ring_first]; + struct enetc_tx_swbd *tx_swbd = &xdp_tx_arr[n]; + + /* No need to dma_map, we already have DMA_BIDIRECTIONAL */ + tx_swbd->dma = rx_swbd->dma; + tx_swbd->dir = rx_swbd->dir; + tx_swbd->page = rx_swbd->page; + tx_swbd->page_offset = rx_swbd->page_offset; + tx_swbd->len = rx_swbd->len; + tx_swbd->is_dma_page = true; + tx_swbd->is_xdp_tx = true; + tx_swbd->is_eof = false; + } + + /* We rely on caller providing an rx_ring_last > rx_ring_first */ + xdp_tx_arr[n - 1].is_eof = true; + + return n; +} + +static void enetc_xdp_drop(struct enetc_bdr *rx_ring, int rx_ring_first, + int rx_ring_last) +{ + while (rx_ring_first != rx_ring_last) { + enetc_put_rx_buff(rx_ring, + &rx_ring->rx_swbd[rx_ring_first]); + enetc_bdr_idx_inc(rx_ring, &rx_ring_first); + } + rx_ring->stats.xdp_drops++; +} + +static int enetc_clean_rx_ring_xdp(struct enetc_bdr *rx_ring, + struct napi_struct *napi, int work_limit, + struct bpf_prog *prog) +{ + int xdp_tx_bd_cnt, xdp_tx_frm_cnt = 0, xdp_redirect_frm_cnt = 0; + struct enetc_tx_swbd xdp_tx_arr[ENETC_MAX_SKB_FRAGS] = {0}; + struct enetc_ndev_priv *priv = netdev_priv(rx_ring->ndev); + int rx_frm_cnt = 0, rx_byte_cnt = 0; + struct enetc_bdr *tx_ring; + int cleaned_cnt, i; + u32 xdp_act; + + cleaned_cnt = enetc_bd_unused(rx_ring); + /* next descriptor to process */ + i = rx_ring->next_to_clean; + + while (likely(rx_frm_cnt < work_limit)) { + union enetc_rx_bd *rxbd, *orig_rxbd; + int orig_i, orig_cleaned_cnt; + struct xdp_buff xdp_buff; + struct sk_buff *skb; + u32 bd_status; + int err; + + rxbd = enetc_rxbd(rx_ring, i); + bd_status = le32_to_cpu(rxbd->r.lstatus); + if (!bd_status) + break; + + enetc_wr_reg_hot(rx_ring->idr, BIT(rx_ring->index)); + dma_rmb(); /* for reading other rxbd fields */ + + if (enetc_check_bd_errors_and_consume(rx_ring, bd_status, + &rxbd, &i)) + break; + + orig_rxbd = rxbd; + orig_cleaned_cnt = cleaned_cnt; + orig_i = i; + + enetc_build_xdp_buff(rx_ring, bd_status, &rxbd, &i, + &cleaned_cnt, &xdp_buff); + + /* When set, the outer VLAN header is extracted and reported + * in the receive buffer descriptor. So rx_byte_cnt should + * add the length of the extracted VLAN header. + */ + if (bd_status & ENETC_RXBD_FLAG_VLAN) + rx_byte_cnt += VLAN_HLEN; + rx_byte_cnt += xdp_get_buff_len(&xdp_buff); + + xdp_act = bpf_prog_run_xdp(prog, &xdp_buff); + + switch (xdp_act) { + default: + bpf_warn_invalid_xdp_action(rx_ring->ndev, prog, xdp_act); + fallthrough; + case XDP_ABORTED: + trace_xdp_exception(rx_ring->ndev, prog, xdp_act); + fallthrough; + case XDP_DROP: + enetc_xdp_drop(rx_ring, orig_i, i); + break; + case XDP_PASS: + rxbd = orig_rxbd; + cleaned_cnt = orig_cleaned_cnt; + i = orig_i; + + skb = enetc_build_skb(rx_ring, bd_status, &rxbd, + &i, &cleaned_cnt, + ENETC_RXB_DMA_SIZE_XDP); + if (unlikely(!skb)) + goto out; + + napi_gro_receive(napi, skb); + break; + case XDP_TX: + tx_ring = priv->xdp_tx_ring[rx_ring->index]; + xdp_tx_bd_cnt = enetc_rx_swbd_to_xdp_tx_swbd(xdp_tx_arr, + rx_ring, + orig_i, i); + + if (!enetc_xdp_tx(tx_ring, xdp_tx_arr, xdp_tx_bd_cnt)) { + enetc_xdp_drop(rx_ring, orig_i, i); + tx_ring->stats.xdp_tx_drops++; + } else { + tx_ring->stats.xdp_tx += xdp_tx_bd_cnt; + rx_ring->xdp.xdp_tx_in_flight += xdp_tx_bd_cnt; + xdp_tx_frm_cnt++; + /* The XDP_TX enqueue was successful, so we + * need to scrub the RX software BDs because + * the ownership of the buffers no longer + * belongs to the RX ring, and we must prevent + * enetc_refill_rx_ring() from reusing + * rx_swbd->page. + */ + while (orig_i != i) { + rx_ring->rx_swbd[orig_i].page = NULL; + enetc_bdr_idx_inc(rx_ring, &orig_i); + } + } + break; + case XDP_REDIRECT: + err = xdp_do_redirect(rx_ring->ndev, &xdp_buff, prog); + if (unlikely(err)) { + enetc_xdp_drop(rx_ring, orig_i, i); + rx_ring->stats.xdp_redirect_failures++; + } else { + while (orig_i != i) { + enetc_flip_rx_buff(rx_ring, + &rx_ring->rx_swbd[orig_i]); + enetc_bdr_idx_inc(rx_ring, &orig_i); + } + xdp_redirect_frm_cnt++; + rx_ring->stats.xdp_redirect++; + } + } + + rx_frm_cnt++; + } + +out: + rx_ring->next_to_clean = i; + + rx_ring->stats.packets += rx_frm_cnt; + rx_ring->stats.bytes += rx_byte_cnt; + + if (xdp_redirect_frm_cnt) + xdp_do_flush_map(); + + if (xdp_tx_frm_cnt) + enetc_update_tx_ring_tail(tx_ring); + + if (cleaned_cnt > rx_ring->xdp.xdp_tx_in_flight) + enetc_refill_rx_ring(rx_ring, enetc_bd_unused(rx_ring) - + rx_ring->xdp.xdp_tx_in_flight); + + return rx_frm_cnt; +} + +static int enetc_poll(struct napi_struct *napi, int budget) +{ + struct enetc_int_vector + *v = container_of(napi, struct enetc_int_vector, napi); + struct enetc_bdr *rx_ring = &v->rx_ring; + struct bpf_prog *prog; + bool complete = true; + int work_done; + int i; + + enetc_lock_mdio(); + + for (i = 0; i < v->count_tx_rings; i++) + if (!enetc_clean_tx_ring(&v->tx_ring[i], budget)) + complete = false; + + prog = rx_ring->xdp.prog; + if (prog) + work_done = enetc_clean_rx_ring_xdp(rx_ring, napi, budget, prog); + else + work_done = enetc_clean_rx_ring(rx_ring, napi, budget); + if (work_done == budget) + complete = false; + if (work_done) + v->rx_napi_work = true; + + if (!complete) { + enetc_unlock_mdio(); + return budget; + } + + napi_complete_done(napi, work_done); + + if (likely(v->rx_dim_en)) + enetc_rx_net_dim(v); + + v->rx_napi_work = false; + + /* enable interrupts */ + enetc_wr_reg_hot(v->rbier, ENETC_RBIER_RXTIE); + + for_each_set_bit(i, &v->tx_rings_map, ENETC_MAX_NUM_TXQS) + enetc_wr_reg_hot(v->tbier_base + ENETC_BDR_OFF(i), + ENETC_TBIER_TXTIE); + + enetc_unlock_mdio(); + + return work_done; +} + +/* Probing and Init */ +#define ENETC_MAX_RFS_SIZE 64 +void enetc_get_si_caps(struct enetc_si *si) +{ + struct enetc_hw *hw = &si->hw; + u32 val; + + /* find out how many of various resources we have to work with */ + val = enetc_rd(hw, ENETC_SICAPR0); + si->num_rx_rings = (val >> 16) & 0xff; + si->num_tx_rings = val & 0xff; + + val = enetc_rd(hw, ENETC_SIRFSCAPR); + si->num_fs_entries = ENETC_SIRFSCAPR_GET_NUM_RFS(val); + si->num_fs_entries = min(si->num_fs_entries, ENETC_MAX_RFS_SIZE); + + si->num_rss = 0; + val = enetc_rd(hw, ENETC_SIPCAPR0); + if (val & ENETC_SIPCAPR0_RSS) { + u32 rss; + + rss = enetc_rd(hw, ENETC_SIRSSCAPR); + si->num_rss = ENETC_SIRSSCAPR_GET_NUM_RSS(rss); + } + + if (val & ENETC_SIPCAPR0_QBV) + si->hw_features |= ENETC_SI_F_QBV; + + if (val & ENETC_SIPCAPR0_QBU) + si->hw_features |= ENETC_SI_F_QBU; + + if (val & ENETC_SIPCAPR0_PSFP) + si->hw_features |= ENETC_SI_F_PSFP; +} +EXPORT_SYMBOL_GPL(enetc_get_si_caps); + +static int enetc_dma_alloc_bdr(struct enetc_bdr_resource *res) +{ + size_t bd_base_size = res->bd_count * res->bd_size; + + res->bd_base = dma_alloc_coherent(res->dev, bd_base_size, + &res->bd_dma_base, GFP_KERNEL); + if (!res->bd_base) + return -ENOMEM; + + /* h/w requires 128B alignment */ + if (!IS_ALIGNED(res->bd_dma_base, 128)) { + dma_free_coherent(res->dev, bd_base_size, res->bd_base, + res->bd_dma_base); + return -EINVAL; + } + + return 0; +} + +static void enetc_dma_free_bdr(const struct enetc_bdr_resource *res) +{ + size_t bd_base_size = res->bd_count * res->bd_size; + + dma_free_coherent(res->dev, bd_base_size, res->bd_base, + res->bd_dma_base); +} + +static int enetc_alloc_tx_resource(struct enetc_bdr_resource *res, + struct device *dev, size_t bd_count) +{ + int err; + + res->dev = dev; + res->bd_count = bd_count; + res->bd_size = sizeof(union enetc_tx_bd); + + res->tx_swbd = vcalloc(bd_count, sizeof(*res->tx_swbd)); + if (!res->tx_swbd) + return -ENOMEM; + + err = enetc_dma_alloc_bdr(res); + if (err) + goto err_alloc_bdr; + + res->tso_headers = dma_alloc_coherent(dev, bd_count * TSO_HEADER_SIZE, + &res->tso_headers_dma, + GFP_KERNEL); + if (!res->tso_headers) { + err = -ENOMEM; + goto err_alloc_tso; + } + + return 0; + +err_alloc_tso: + enetc_dma_free_bdr(res); +err_alloc_bdr: + vfree(res->tx_swbd); + res->tx_swbd = NULL; + + return err; +} + +static void enetc_free_tx_resource(const struct enetc_bdr_resource *res) +{ + dma_free_coherent(res->dev, res->bd_count * TSO_HEADER_SIZE, + res->tso_headers, res->tso_headers_dma); + enetc_dma_free_bdr(res); + vfree(res->tx_swbd); +} + +static struct enetc_bdr_resource * +enetc_alloc_tx_resources(struct enetc_ndev_priv *priv) +{ + struct enetc_bdr_resource *tx_res; + int i, err; + + tx_res = kcalloc(priv->num_tx_rings, sizeof(*tx_res), GFP_KERNEL); + if (!tx_res) + return ERR_PTR(-ENOMEM); + + for (i = 0; i < priv->num_tx_rings; i++) { + struct enetc_bdr *tx_ring = priv->tx_ring[i]; + + err = enetc_alloc_tx_resource(&tx_res[i], tx_ring->dev, + tx_ring->bd_count); + if (err) + goto fail; + } + + return tx_res; + +fail: + while (i-- > 0) + enetc_free_tx_resource(&tx_res[i]); + + kfree(tx_res); + + return ERR_PTR(err); +} + +static void enetc_free_tx_resources(const struct enetc_bdr_resource *tx_res, + size_t num_resources) +{ + size_t i; + + for (i = 0; i < num_resources; i++) + enetc_free_tx_resource(&tx_res[i]); + + kfree(tx_res); +} + +static int enetc_alloc_rx_resource(struct enetc_bdr_resource *res, + struct device *dev, size_t bd_count, + bool extended) +{ + int err; + + res->dev = dev; + res->bd_count = bd_count; + res->bd_size = sizeof(union enetc_rx_bd); + if (extended) + res->bd_size *= 2; + + res->rx_swbd = vcalloc(bd_count, sizeof(struct enetc_rx_swbd)); + if (!res->rx_swbd) + return -ENOMEM; + + err = enetc_dma_alloc_bdr(res); + if (err) { + vfree(res->rx_swbd); + return err; + } + + return 0; +} + +static void enetc_free_rx_resource(const struct enetc_bdr_resource *res) +{ + enetc_dma_free_bdr(res); + vfree(res->rx_swbd); +} + +static struct enetc_bdr_resource * +enetc_alloc_rx_resources(struct enetc_ndev_priv *priv, bool extended) +{ + struct enetc_bdr_resource *rx_res; + int i, err; + + rx_res = kcalloc(priv->num_rx_rings, sizeof(*rx_res), GFP_KERNEL); + if (!rx_res) + return ERR_PTR(-ENOMEM); + + for (i = 0; i < priv->num_rx_rings; i++) { + struct enetc_bdr *rx_ring = priv->rx_ring[i]; + + err = enetc_alloc_rx_resource(&rx_res[i], rx_ring->dev, + rx_ring->bd_count, extended); + if (err) + goto fail; + } + + return rx_res; + +fail: + while (i-- > 0) + enetc_free_rx_resource(&rx_res[i]); + + kfree(rx_res); + + return ERR_PTR(err); +} + +static void enetc_free_rx_resources(const struct enetc_bdr_resource *rx_res, + size_t num_resources) +{ + size_t i; + + for (i = 0; i < num_resources; i++) + enetc_free_rx_resource(&rx_res[i]); + + kfree(rx_res); +} + +static void enetc_assign_tx_resource(struct enetc_bdr *tx_ring, + const struct enetc_bdr_resource *res) +{ + tx_ring->bd_base = res ? res->bd_base : NULL; + tx_ring->bd_dma_base = res ? res->bd_dma_base : 0; + tx_ring->tx_swbd = res ? res->tx_swbd : NULL; + tx_ring->tso_headers = res ? res->tso_headers : NULL; + tx_ring->tso_headers_dma = res ? res->tso_headers_dma : 0; +} + +static void enetc_assign_rx_resource(struct enetc_bdr *rx_ring, + const struct enetc_bdr_resource *res) +{ + rx_ring->bd_base = res ? res->bd_base : NULL; + rx_ring->bd_dma_base = res ? res->bd_dma_base : 0; + rx_ring->rx_swbd = res ? res->rx_swbd : NULL; +} + +static void enetc_assign_tx_resources(struct enetc_ndev_priv *priv, + const struct enetc_bdr_resource *res) +{ + int i; + + if (priv->tx_res) + enetc_free_tx_resources(priv->tx_res, priv->num_tx_rings); + + for (i = 0; i < priv->num_tx_rings; i++) { + enetc_assign_tx_resource(priv->tx_ring[i], + res ? &res[i] : NULL); + } + + priv->tx_res = res; +} + +static void enetc_assign_rx_resources(struct enetc_ndev_priv *priv, + const struct enetc_bdr_resource *res) +{ + int i; + + if (priv->rx_res) + enetc_free_rx_resources(priv->rx_res, priv->num_rx_rings); + + for (i = 0; i < priv->num_rx_rings; i++) { + enetc_assign_rx_resource(priv->rx_ring[i], + res ? &res[i] : NULL); + } + + priv->rx_res = res; +} + +static void enetc_free_tx_ring(struct enetc_bdr *tx_ring) +{ + int i; + + for (i = 0; i < tx_ring->bd_count; i++) { + struct enetc_tx_swbd *tx_swbd = &tx_ring->tx_swbd[i]; + + enetc_free_tx_frame(tx_ring, tx_swbd); + } +} + +static void enetc_free_rx_ring(struct enetc_bdr *rx_ring) +{ + int i; + + for (i = 0; i < rx_ring->bd_count; i++) { + struct enetc_rx_swbd *rx_swbd = &rx_ring->rx_swbd[i]; + + if (!rx_swbd->page) + continue; + + dma_unmap_page(rx_ring->dev, rx_swbd->dma, PAGE_SIZE, + rx_swbd->dir); + __free_page(rx_swbd->page); + rx_swbd->page = NULL; + } +} + +static void enetc_free_rxtx_rings(struct enetc_ndev_priv *priv) +{ + int i; + + for (i = 0; i < priv->num_rx_rings; i++) + enetc_free_rx_ring(priv->rx_ring[i]); + + for (i = 0; i < priv->num_tx_rings; i++) + enetc_free_tx_ring(priv->tx_ring[i]); +} + +static int enetc_setup_default_rss_table(struct enetc_si *si, int num_groups) +{ + int *rss_table; + int i; + + rss_table = kmalloc_array(si->num_rss, sizeof(*rss_table), GFP_KERNEL); + if (!rss_table) + return -ENOMEM; + + /* Set up RSS table defaults */ + for (i = 0; i < si->num_rss; i++) + rss_table[i] = i % num_groups; + + enetc_set_rss_table(si, rss_table, si->num_rss); + + kfree(rss_table); + + return 0; +} + +int enetc_configure_si(struct enetc_ndev_priv *priv) +{ + struct enetc_si *si = priv->si; + struct enetc_hw *hw = &si->hw; + int err; + + /* set SI cache attributes */ + enetc_wr(hw, ENETC_SICAR0, + ENETC_SICAR_RD_COHERENT | ENETC_SICAR_WR_COHERENT); + enetc_wr(hw, ENETC_SICAR1, ENETC_SICAR_MSI); + /* enable SI */ + enetc_wr(hw, ENETC_SIMR, ENETC_SIMR_EN); + + if (si->num_rss) { + err = enetc_setup_default_rss_table(si, priv->num_rx_rings); + if (err) + return err; + } + + return 0; +} +EXPORT_SYMBOL_GPL(enetc_configure_si); + +void enetc_init_si_rings_params(struct enetc_ndev_priv *priv) +{ + struct enetc_si *si = priv->si; + int cpus = num_online_cpus(); + + priv->tx_bd_count = ENETC_TX_RING_DEFAULT_SIZE; + priv->rx_bd_count = ENETC_RX_RING_DEFAULT_SIZE; + + /* Enable all available TX rings in order to configure as many + * priorities as possible, when needed. + * TODO: Make # of TX rings run-time configurable + */ + priv->num_rx_rings = min_t(int, cpus, si->num_rx_rings); + priv->num_tx_rings = si->num_tx_rings; + priv->bdr_int_num = cpus; + priv->ic_mode = ENETC_IC_RX_ADAPTIVE | ENETC_IC_TX_MANUAL; + priv->tx_ictt = ENETC_TXIC_TIMETHR; +} +EXPORT_SYMBOL_GPL(enetc_init_si_rings_params); + +int enetc_alloc_si_resources(struct enetc_ndev_priv *priv) +{ + struct enetc_si *si = priv->si; + + priv->cls_rules = kcalloc(si->num_fs_entries, sizeof(*priv->cls_rules), + GFP_KERNEL); + if (!priv->cls_rules) + return -ENOMEM; + + return 0; +} +EXPORT_SYMBOL_GPL(enetc_alloc_si_resources); + +void enetc_free_si_resources(struct enetc_ndev_priv *priv) +{ + kfree(priv->cls_rules); +} +EXPORT_SYMBOL_GPL(enetc_free_si_resources); + +static void enetc_setup_txbdr(struct enetc_hw *hw, struct enetc_bdr *tx_ring) +{ + int idx = tx_ring->index; + u32 tbmr; + + enetc_txbdr_wr(hw, idx, ENETC_TBBAR0, + lower_32_bits(tx_ring->bd_dma_base)); + + enetc_txbdr_wr(hw, idx, ENETC_TBBAR1, + upper_32_bits(tx_ring->bd_dma_base)); + + WARN_ON(!IS_ALIGNED(tx_ring->bd_count, 64)); /* multiple of 64 */ + enetc_txbdr_wr(hw, idx, ENETC_TBLENR, + ENETC_RTBLENR_LEN(tx_ring->bd_count)); + + /* clearing PI/CI registers for Tx not supported, adjust sw indexes */ + tx_ring->next_to_use = enetc_txbdr_rd(hw, idx, ENETC_TBPIR); + tx_ring->next_to_clean = enetc_txbdr_rd(hw, idx, ENETC_TBCIR); + + /* enable Tx ints by setting pkt thr to 1 */ + enetc_txbdr_wr(hw, idx, ENETC_TBICR0, ENETC_TBICR0_ICEN | 0x1); + + tbmr = ENETC_TBMR_SET_PRIO(tx_ring->prio); + if (tx_ring->ndev->features & NETIF_F_HW_VLAN_CTAG_TX) + tbmr |= ENETC_TBMR_VIH; + + /* enable ring */ + enetc_txbdr_wr(hw, idx, ENETC_TBMR, tbmr); + + tx_ring->tpir = hw->reg + ENETC_BDR(TX, idx, ENETC_TBPIR); + tx_ring->tcir = hw->reg + ENETC_BDR(TX, idx, ENETC_TBCIR); + tx_ring->idr = hw->reg + ENETC_SITXIDR; +} + +static void enetc_setup_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring, + bool extended) +{ + int idx = rx_ring->index; + u32 rbmr = 0; + + enetc_rxbdr_wr(hw, idx, ENETC_RBBAR0, + lower_32_bits(rx_ring->bd_dma_base)); + + enetc_rxbdr_wr(hw, idx, ENETC_RBBAR1, + upper_32_bits(rx_ring->bd_dma_base)); + + WARN_ON(!IS_ALIGNED(rx_ring->bd_count, 64)); /* multiple of 64 */ + enetc_rxbdr_wr(hw, idx, ENETC_RBLENR, + ENETC_RTBLENR_LEN(rx_ring->bd_count)); + + if (rx_ring->xdp.prog) + enetc_rxbdr_wr(hw, idx, ENETC_RBBSR, ENETC_RXB_DMA_SIZE_XDP); + else + enetc_rxbdr_wr(hw, idx, ENETC_RBBSR, ENETC_RXB_DMA_SIZE); + + /* Also prepare the consumer index in case page allocation never + * succeeds. In that case, hardware will never advance producer index + * to match consumer index, and will drop all frames. + */ + enetc_rxbdr_wr(hw, idx, ENETC_RBPIR, 0); + enetc_rxbdr_wr(hw, idx, ENETC_RBCIR, 1); + + /* enable Rx ints by setting pkt thr to 1 */ + enetc_rxbdr_wr(hw, idx, ENETC_RBICR0, ENETC_RBICR0_ICEN | 0x1); + + rx_ring->ext_en = extended; + if (rx_ring->ext_en) + rbmr |= ENETC_RBMR_BDS; + + if (rx_ring->ndev->features & NETIF_F_HW_VLAN_CTAG_RX) + rbmr |= ENETC_RBMR_VTE; + + rx_ring->rcir = hw->reg + ENETC_BDR(RX, idx, ENETC_RBCIR); + rx_ring->idr = hw->reg + ENETC_SIRXIDR; + + rx_ring->next_to_clean = 0; + rx_ring->next_to_use = 0; + rx_ring->next_to_alloc = 0; + + enetc_lock_mdio(); + enetc_refill_rx_ring(rx_ring, enetc_bd_unused(rx_ring)); + enetc_unlock_mdio(); + + enetc_rxbdr_wr(hw, idx, ENETC_RBMR, rbmr); +} + +static void enetc_setup_bdrs(struct enetc_ndev_priv *priv, bool extended) +{ + struct enetc_hw *hw = &priv->si->hw; + int i; + + for (i = 0; i < priv->num_tx_rings; i++) + enetc_setup_txbdr(hw, priv->tx_ring[i]); + + for (i = 0; i < priv->num_rx_rings; i++) + enetc_setup_rxbdr(hw, priv->rx_ring[i], extended); +} + +static void enetc_enable_txbdr(struct enetc_hw *hw, struct enetc_bdr *tx_ring) +{ + int idx = tx_ring->index; + u32 tbmr; + + tbmr = enetc_txbdr_rd(hw, idx, ENETC_TBMR); + tbmr |= ENETC_TBMR_EN; + enetc_txbdr_wr(hw, idx, ENETC_TBMR, tbmr); +} + +static void enetc_enable_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring) +{ + int idx = rx_ring->index; + u32 rbmr; + + rbmr = enetc_rxbdr_rd(hw, idx, ENETC_RBMR); + rbmr |= ENETC_RBMR_EN; + enetc_rxbdr_wr(hw, idx, ENETC_RBMR, rbmr); +} + +static void enetc_enable_bdrs(struct enetc_ndev_priv *priv) +{ + struct enetc_hw *hw = &priv->si->hw; + int i; + + for (i = 0; i < priv->num_tx_rings; i++) + enetc_enable_txbdr(hw, priv->tx_ring[i]); + + for (i = 0; i < priv->num_rx_rings; i++) + enetc_enable_rxbdr(hw, priv->rx_ring[i]); +} + +static void enetc_disable_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring) +{ + int idx = rx_ring->index; + + /* disable EN bit on ring */ + enetc_rxbdr_wr(hw, idx, ENETC_RBMR, 0); +} + +static void enetc_disable_txbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring) +{ + int idx = rx_ring->index; + + /* disable EN bit on ring */ + enetc_txbdr_wr(hw, idx, ENETC_TBMR, 0); +} + +static void enetc_disable_bdrs(struct enetc_ndev_priv *priv) +{ + struct enetc_hw *hw = &priv->si->hw; + int i; + + for (i = 0; i < priv->num_tx_rings; i++) + enetc_disable_txbdr(hw, priv->tx_ring[i]); + + for (i = 0; i < priv->num_rx_rings; i++) + enetc_disable_rxbdr(hw, priv->rx_ring[i]); +} + +static void enetc_wait_txbdr(struct enetc_hw *hw, struct enetc_bdr *tx_ring) +{ + int delay = 8, timeout = 100; + int idx = tx_ring->index; + + /* wait for busy to clear */ + while (delay < timeout && + enetc_txbdr_rd(hw, idx, ENETC_TBSR) & ENETC_TBSR_BUSY) { + msleep(delay); + delay *= 2; + } + + if (delay >= timeout) + netdev_warn(tx_ring->ndev, "timeout for tx ring #%d clear\n", + idx); +} + +static void enetc_wait_bdrs(struct enetc_ndev_priv *priv) +{ + struct enetc_hw *hw = &priv->si->hw; + int i; + + for (i = 0; i < priv->num_tx_rings; i++) + enetc_wait_txbdr(hw, priv->tx_ring[i]); +} + +static int enetc_setup_irqs(struct enetc_ndev_priv *priv) +{ + struct pci_dev *pdev = priv->si->pdev; + struct enetc_hw *hw = &priv->si->hw; + int i, j, err; + + for (i = 0; i < priv->bdr_int_num; i++) { + int irq = pci_irq_vector(pdev, ENETC_BDR_INT_BASE_IDX + i); + struct enetc_int_vector *v = priv->int_vector[i]; + int entry = ENETC_BDR_INT_BASE_IDX + i; + + snprintf(v->name, sizeof(v->name), "%s-rxtx%d", + priv->ndev->name, i); + err = request_irq(irq, enetc_msix, 0, v->name, v); + if (err) { + dev_err(priv->dev, "request_irq() failed!\n"); + goto irq_err; + } + disable_irq(irq); + + v->tbier_base = hw->reg + ENETC_BDR(TX, 0, ENETC_TBIER); + v->rbier = hw->reg + ENETC_BDR(RX, i, ENETC_RBIER); + v->ricr1 = hw->reg + ENETC_BDR(RX, i, ENETC_RBICR1); + + enetc_wr(hw, ENETC_SIMSIRRV(i), entry); + + for (j = 0; j < v->count_tx_rings; j++) { + int idx = v->tx_ring[j].index; + + enetc_wr(hw, ENETC_SIMSITRV(idx), entry); + } + irq_set_affinity_hint(irq, get_cpu_mask(i % num_online_cpus())); + } + + return 0; + +irq_err: + while (i--) { + int irq = pci_irq_vector(pdev, ENETC_BDR_INT_BASE_IDX + i); + + irq_set_affinity_hint(irq, NULL); + free_irq(irq, priv->int_vector[i]); + } + + return err; +} + +static void enetc_free_irqs(struct enetc_ndev_priv *priv) +{ + struct pci_dev *pdev = priv->si->pdev; + int i; + + for (i = 0; i < priv->bdr_int_num; i++) { + int irq = pci_irq_vector(pdev, ENETC_BDR_INT_BASE_IDX + i); + + irq_set_affinity_hint(irq, NULL); + free_irq(irq, priv->int_vector[i]); + } +} + +static void enetc_setup_interrupts(struct enetc_ndev_priv *priv) +{ + struct enetc_hw *hw = &priv->si->hw; + u32 icpt, ictt; + int i; + + /* enable Tx & Rx event indication */ + if (priv->ic_mode & + (ENETC_IC_RX_MANUAL | ENETC_IC_RX_ADAPTIVE)) { + icpt = ENETC_RBICR0_SET_ICPT(ENETC_RXIC_PKTTHR); + /* init to non-0 minimum, will be adjusted later */ + ictt = 0x1; + } else { + icpt = 0x1; /* enable Rx ints by setting pkt thr to 1 */ + ictt = 0; + } + + for (i = 0; i < priv->num_rx_rings; i++) { + enetc_rxbdr_wr(hw, i, ENETC_RBICR1, ictt); + enetc_rxbdr_wr(hw, i, ENETC_RBICR0, ENETC_RBICR0_ICEN | icpt); + enetc_rxbdr_wr(hw, i, ENETC_RBIER, ENETC_RBIER_RXTIE); + } + + if (priv->ic_mode & ENETC_IC_TX_MANUAL) + icpt = ENETC_TBICR0_SET_ICPT(ENETC_TXIC_PKTTHR); + else + icpt = 0x1; /* enable Tx ints by setting pkt thr to 1 */ + + for (i = 0; i < priv->num_tx_rings; i++) { + enetc_txbdr_wr(hw, i, ENETC_TBICR1, priv->tx_ictt); + enetc_txbdr_wr(hw, i, ENETC_TBICR0, ENETC_TBICR0_ICEN | icpt); + enetc_txbdr_wr(hw, i, ENETC_TBIER, ENETC_TBIER_TXTIE); + } +} + +static void enetc_clear_interrupts(struct enetc_ndev_priv *priv) +{ + struct enetc_hw *hw = &priv->si->hw; + int i; + + for (i = 0; i < priv->num_tx_rings; i++) + enetc_txbdr_wr(hw, i, ENETC_TBIER, 0); + + for (i = 0; i < priv->num_rx_rings; i++) + enetc_rxbdr_wr(hw, i, ENETC_RBIER, 0); +} + +static int enetc_phylink_connect(struct net_device *ndev) +{ + struct enetc_ndev_priv *priv = netdev_priv(ndev); + struct ethtool_eee edata; + int err; + + if (!priv->phylink) { + /* phy-less mode */ + netif_carrier_on(ndev); + return 0; + } + + err = phylink_of_phy_connect(priv->phylink, priv->dev->of_node, 0); + if (err) { + dev_err(&ndev->dev, "could not attach to PHY\n"); + return err; + } + + /* disable EEE autoneg, until ENETC driver supports it */ + memset(&edata, 0, sizeof(struct ethtool_eee)); + phylink_ethtool_set_eee(priv->phylink, &edata); + + phylink_start(priv->phylink); + + return 0; +} + +static void enetc_tx_onestep_tstamp(struct work_struct *work) +{ + struct enetc_ndev_priv *priv; + struct sk_buff *skb; + + priv = container_of(work, struct enetc_ndev_priv, tx_onestep_tstamp); + + netif_tx_lock_bh(priv->ndev); + + clear_bit_unlock(ENETC_TX_ONESTEP_TSTAMP_IN_PROGRESS, &priv->flags); + skb = skb_dequeue(&priv->tx_skbs); + if (skb) + enetc_start_xmit(skb, priv->ndev); + + netif_tx_unlock_bh(priv->ndev); +} + +static void enetc_tx_onestep_tstamp_init(struct enetc_ndev_priv *priv) +{ + INIT_WORK(&priv->tx_onestep_tstamp, enetc_tx_onestep_tstamp); + skb_queue_head_init(&priv->tx_skbs); +} + +void enetc_start(struct net_device *ndev) +{ + struct enetc_ndev_priv *priv = netdev_priv(ndev); + int i; + + enetc_setup_interrupts(priv); + + for (i = 0; i < priv->bdr_int_num; i++) { + int irq = pci_irq_vector(priv->si->pdev, + ENETC_BDR_INT_BASE_IDX + i); + + napi_enable(&priv->int_vector[i]->napi); + enable_irq(irq); + } + + enetc_enable_bdrs(priv); + + netif_tx_start_all_queues(ndev); +} +EXPORT_SYMBOL_GPL(enetc_start); + +int enetc_open(struct net_device *ndev) +{ + struct enetc_ndev_priv *priv = netdev_priv(ndev); + struct enetc_bdr_resource *tx_res, *rx_res; + bool extended; + int err; + + extended = !!(priv->active_offloads & ENETC_F_RX_TSTAMP); + + err = enetc_setup_irqs(priv); + if (err) + return err; + + err = enetc_phylink_connect(ndev); + if (err) + goto err_phy_connect; + + tx_res = enetc_alloc_tx_resources(priv); + if (IS_ERR(tx_res)) { + err = PTR_ERR(tx_res); + goto err_alloc_tx; + } + + rx_res = enetc_alloc_rx_resources(priv, extended); + if (IS_ERR(rx_res)) { + err = PTR_ERR(rx_res); + goto err_alloc_rx; + } + + enetc_tx_onestep_tstamp_init(priv); + enetc_assign_tx_resources(priv, tx_res); + enetc_assign_rx_resources(priv, rx_res); + enetc_setup_bdrs(priv, extended); + enetc_start(ndev); + + return 0; + +err_alloc_rx: + enetc_free_tx_resources(tx_res, priv->num_tx_rings); +err_alloc_tx: + if (priv->phylink) + phylink_disconnect_phy(priv->phylink); +err_phy_connect: + enetc_free_irqs(priv); + + return err; +} +EXPORT_SYMBOL_GPL(enetc_open); + +void enetc_stop(struct net_device *ndev) +{ + struct enetc_ndev_priv *priv = netdev_priv(ndev); + int i; + + netif_tx_stop_all_queues(ndev); + + enetc_disable_bdrs(priv); + + for (i = 0; i < priv->bdr_int_num; i++) { + int irq = pci_irq_vector(priv->si->pdev, + ENETC_BDR_INT_BASE_IDX + i); + + disable_irq(irq); + napi_synchronize(&priv->int_vector[i]->napi); + napi_disable(&priv->int_vector[i]->napi); + } + + enetc_wait_bdrs(priv); + + enetc_clear_interrupts(priv); +} +EXPORT_SYMBOL_GPL(enetc_stop); + +int enetc_close(struct net_device *ndev) +{ + struct enetc_ndev_priv *priv = netdev_priv(ndev); + + enetc_stop(ndev); + + if (priv->phylink) { + phylink_stop(priv->phylink); + phylink_disconnect_phy(priv->phylink); + } else { + netif_carrier_off(ndev); + } + + enetc_free_rxtx_rings(priv); + + /* Avoids dangling pointers and also frees old resources */ + enetc_assign_rx_resources(priv, NULL); + enetc_assign_tx_resources(priv, NULL); + + enetc_free_irqs(priv); + + return 0; +} +EXPORT_SYMBOL_GPL(enetc_close); + +static int enetc_reconfigure(struct enetc_ndev_priv *priv, bool extended, + int (*cb)(struct enetc_ndev_priv *priv, void *ctx), + void *ctx) +{ + struct enetc_bdr_resource *tx_res, *rx_res; + int err; + + ASSERT_RTNL(); + + /* If the interface is down, run the callback right away, + * without reconfiguration. + */ + if (!netif_running(priv->ndev)) { + if (cb) { + err = cb(priv, ctx); + if (err) + return err; + } + + return 0; + } + + tx_res = enetc_alloc_tx_resources(priv); + if (IS_ERR(tx_res)) { + err = PTR_ERR(tx_res); + goto out; + } + + rx_res = enetc_alloc_rx_resources(priv, extended); + if (IS_ERR(rx_res)) { + err = PTR_ERR(rx_res); + goto out_free_tx_res; + } + + enetc_stop(priv->ndev); + enetc_free_rxtx_rings(priv); + + /* Interface is down, run optional callback now */ + if (cb) { + err = cb(priv, ctx); + if (err) + goto out_restart; + } + + enetc_assign_tx_resources(priv, tx_res); + enetc_assign_rx_resources(priv, rx_res); + enetc_setup_bdrs(priv, extended); + enetc_start(priv->ndev); + + return 0; + +out_restart: + enetc_setup_bdrs(priv, extended); + enetc_start(priv->ndev); + enetc_free_rx_resources(rx_res, priv->num_rx_rings); +out_free_tx_res: + enetc_free_tx_resources(tx_res, priv->num_tx_rings); +out: + return err; +} + +static void enetc_debug_tx_ring_prios(struct enetc_ndev_priv *priv) +{ + int i; + + for (i = 0; i < priv->num_tx_rings; i++) + netdev_dbg(priv->ndev, "TX ring %d prio %d\n", i, + priv->tx_ring[i]->prio); +} + +void enetc_reset_tc_mqprio(struct net_device *ndev) +{ + struct enetc_ndev_priv *priv = netdev_priv(ndev); + struct enetc_hw *hw = &priv->si->hw; + struct enetc_bdr *tx_ring; + int num_stack_tx_queues; + int i; + + num_stack_tx_queues = enetc_num_stack_tx_queues(priv); + + netdev_reset_tc(ndev); + netif_set_real_num_tx_queues(ndev, num_stack_tx_queues); + priv->min_num_stack_tx_queues = num_possible_cpus(); + + /* Reset all ring priorities to 0 */ + for (i = 0; i < priv->num_tx_rings; i++) { + tx_ring = priv->tx_ring[i]; + tx_ring->prio = 0; + enetc_set_bdr_prio(hw, tx_ring->index, tx_ring->prio); + } + + enetc_debug_tx_ring_prios(priv); + + enetc_change_preemptible_tcs(priv, 0); +} +EXPORT_SYMBOL_GPL(enetc_reset_tc_mqprio); + +int enetc_setup_tc_mqprio(struct net_device *ndev, void *type_data) +{ + struct tc_mqprio_qopt_offload *mqprio = type_data; + struct enetc_ndev_priv *priv = netdev_priv(ndev); + struct tc_mqprio_qopt *qopt = &mqprio->qopt; + struct enetc_hw *hw = &priv->si->hw; + int num_stack_tx_queues = 0; + struct enetc_bdr *tx_ring; + u8 num_tc = qopt->num_tc; + int offset, count; + int err, tc, q; + + if (!num_tc) { + enetc_reset_tc_mqprio(ndev); + return 0; + } + + err = netdev_set_num_tc(ndev, num_tc); + if (err) + return err; + + for (tc = 0; tc < num_tc; tc++) { + offset = qopt->offset[tc]; + count = qopt->count[tc]; + num_stack_tx_queues += count; + + err = netdev_set_tc_queue(ndev, tc, count, offset); + if (err) + goto err_reset_tc; + + for (q = offset; q < offset + count; q++) { + tx_ring = priv->tx_ring[q]; + /* The prio_tc_map is skb_tx_hash()'s way of selecting + * between TX queues based on skb->priority. As such, + * there's nothing to offload based on it. + * Make the mqprio "traffic class" be the priority of + * this ring group, and leave the Tx IPV to traffic + * class mapping as its default mapping value of 1:1. + */ + tx_ring->prio = tc; + enetc_set_bdr_prio(hw, tx_ring->index, tx_ring->prio); + } + } + + err = netif_set_real_num_tx_queues(ndev, num_stack_tx_queues); + if (err) + goto err_reset_tc; + + priv->min_num_stack_tx_queues = num_stack_tx_queues; + + enetc_debug_tx_ring_prios(priv); + + enetc_change_preemptible_tcs(priv, mqprio->preemptible_tcs); + + return 0; + +err_reset_tc: + enetc_reset_tc_mqprio(ndev); + return err; +} +EXPORT_SYMBOL_GPL(enetc_setup_tc_mqprio); + +static int enetc_reconfigure_xdp_cb(struct enetc_ndev_priv *priv, void *ctx) +{ + struct bpf_prog *old_prog, *prog = ctx; + int num_stack_tx_queues; + int err, i; + + old_prog = xchg(&priv->xdp_prog, prog); + + num_stack_tx_queues = enetc_num_stack_tx_queues(priv); + err = netif_set_real_num_tx_queues(priv->ndev, num_stack_tx_queues); + if (err) { + xchg(&priv->xdp_prog, old_prog); + return err; + } + + if (old_prog) + bpf_prog_put(old_prog); + + for (i = 0; i < priv->num_rx_rings; i++) { + struct enetc_bdr *rx_ring = priv->rx_ring[i]; + + rx_ring->xdp.prog = prog; + + if (prog) + rx_ring->buffer_offset = XDP_PACKET_HEADROOM; + else + rx_ring->buffer_offset = ENETC_RXB_PAD; + } + + return 0; +} + +static int enetc_setup_xdp_prog(struct net_device *ndev, struct bpf_prog *prog, + struct netlink_ext_ack *extack) +{ + int num_xdp_tx_queues = prog ? num_possible_cpus() : 0; + struct enetc_ndev_priv *priv = netdev_priv(ndev); + bool extended; + + if (priv->min_num_stack_tx_queues + num_xdp_tx_queues > + priv->num_tx_rings) { + NL_SET_ERR_MSG_FMT_MOD(extack, + "Reserving %d XDP TXQs does not leave a minimum of %d for stack (total %d)", + num_xdp_tx_queues, + priv->min_num_stack_tx_queues, + priv->num_tx_rings); + return -EBUSY; + } + + extended = !!(priv->active_offloads & ENETC_F_RX_TSTAMP); + + /* The buffer layout is changing, so we need to drain the old + * RX buffers and seed new ones. + */ + return enetc_reconfigure(priv, extended, enetc_reconfigure_xdp_cb, prog); +} + +int enetc_setup_bpf(struct net_device *ndev, struct netdev_bpf *bpf) +{ + switch (bpf->command) { + case XDP_SETUP_PROG: + return enetc_setup_xdp_prog(ndev, bpf->prog, bpf->extack); + default: + return -EINVAL; + } + + return 0; +} +EXPORT_SYMBOL_GPL(enetc_setup_bpf); + +struct net_device_stats *enetc_get_stats(struct net_device *ndev) +{ + struct enetc_ndev_priv *priv = netdev_priv(ndev); + struct net_device_stats *stats = &ndev->stats; + unsigned long packets = 0, bytes = 0; + unsigned long tx_dropped = 0; + int i; + + for (i = 0; i < priv->num_rx_rings; i++) { + packets += priv->rx_ring[i]->stats.packets; + bytes += priv->rx_ring[i]->stats.bytes; + } + + stats->rx_packets = packets; + stats->rx_bytes = bytes; + bytes = 0; + packets = 0; + + for (i = 0; i < priv->num_tx_rings; i++) { + packets += priv->tx_ring[i]->stats.packets; + bytes += priv->tx_ring[i]->stats.bytes; + tx_dropped += priv->tx_ring[i]->stats.win_drop; + } + + stats->tx_packets = packets; + stats->tx_bytes = bytes; + stats->tx_dropped = tx_dropped; + + return stats; +} +EXPORT_SYMBOL_GPL(enetc_get_stats); + +static int enetc_set_rss(struct net_device *ndev, int en) +{ + struct enetc_ndev_priv *priv = netdev_priv(ndev); + struct enetc_hw *hw = &priv->si->hw; + u32 reg; + + enetc_wr(hw, ENETC_SIRBGCR, priv->num_rx_rings); + + reg = enetc_rd(hw, ENETC_SIMR); + reg &= ~ENETC_SIMR_RSSE; + reg |= (en) ? ENETC_SIMR_RSSE : 0; + enetc_wr(hw, ENETC_SIMR, reg); + + return 0; +} + +static void enetc_enable_rxvlan(struct net_device *ndev, bool en) +{ + struct enetc_ndev_priv *priv = netdev_priv(ndev); + struct enetc_hw *hw = &priv->si->hw; + int i; + + for (i = 0; i < priv->num_rx_rings; i++) + enetc_bdr_enable_rxvlan(hw, i, en); +} + +static void enetc_enable_txvlan(struct net_device *ndev, bool en) +{ + struct enetc_ndev_priv *priv = netdev_priv(ndev); + struct enetc_hw *hw = &priv->si->hw; + int i; + + for (i = 0; i < priv->num_tx_rings; i++) + enetc_bdr_enable_txvlan(hw, i, en); +} + +void enetc_set_features(struct net_device *ndev, netdev_features_t features) +{ + netdev_features_t changed = ndev->features ^ features; + + if (changed & NETIF_F_RXHASH) + enetc_set_rss(ndev, !!(features & NETIF_F_RXHASH)); + + if (changed & NETIF_F_HW_VLAN_CTAG_RX) + enetc_enable_rxvlan(ndev, + !!(features & NETIF_F_HW_VLAN_CTAG_RX)); + + if (changed & NETIF_F_HW_VLAN_CTAG_TX) + enetc_enable_txvlan(ndev, + !!(features & NETIF_F_HW_VLAN_CTAG_TX)); +} +EXPORT_SYMBOL_GPL(enetc_set_features); + +#ifdef CONFIG_FSL_ENETC_PTP_CLOCK +static int enetc_hwtstamp_set(struct net_device *ndev, struct ifreq *ifr) +{ + struct enetc_ndev_priv *priv = netdev_priv(ndev); + int err, new_offloads = priv->active_offloads; + struct hwtstamp_config config; + + if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) + return -EFAULT; + + switch (config.tx_type) { + case HWTSTAMP_TX_OFF: + new_offloads &= ~ENETC_F_TX_TSTAMP_MASK; + break; + case HWTSTAMP_TX_ON: + new_offloads &= ~ENETC_F_TX_TSTAMP_MASK; + new_offloads |= ENETC_F_TX_TSTAMP; + break; + case HWTSTAMP_TX_ONESTEP_SYNC: + new_offloads &= ~ENETC_F_TX_TSTAMP_MASK; + new_offloads |= ENETC_F_TX_ONESTEP_SYNC_TSTAMP; + break; + default: + return -ERANGE; + } + + switch (config.rx_filter) { + case HWTSTAMP_FILTER_NONE: + new_offloads &= ~ENETC_F_RX_TSTAMP; + break; + default: + new_offloads |= ENETC_F_RX_TSTAMP; + config.rx_filter = HWTSTAMP_FILTER_ALL; + } + + if ((new_offloads ^ priv->active_offloads) & ENETC_F_RX_TSTAMP) { + bool extended = !!(new_offloads & ENETC_F_RX_TSTAMP); + + err = enetc_reconfigure(priv, extended, NULL, NULL); + if (err) + return err; + } + + priv->active_offloads = new_offloads; + + return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? + -EFAULT : 0; +} + +static int enetc_hwtstamp_get(struct net_device *ndev, struct ifreq *ifr) +{ + struct enetc_ndev_priv *priv = netdev_priv(ndev); + struct hwtstamp_config config; + + config.flags = 0; + + if (priv->active_offloads & ENETC_F_TX_ONESTEP_SYNC_TSTAMP) + config.tx_type = HWTSTAMP_TX_ONESTEP_SYNC; + else if (priv->active_offloads & ENETC_F_TX_TSTAMP) + config.tx_type = HWTSTAMP_TX_ON; + else + config.tx_type = HWTSTAMP_TX_OFF; + + config.rx_filter = (priv->active_offloads & ENETC_F_RX_TSTAMP) ? + HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE; + + return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? + -EFAULT : 0; +} +#endif + +int enetc_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd) +{ + struct enetc_ndev_priv *priv = netdev_priv(ndev); +#ifdef CONFIG_FSL_ENETC_PTP_CLOCK + if (cmd == SIOCSHWTSTAMP) + return enetc_hwtstamp_set(ndev, rq); + if (cmd == SIOCGHWTSTAMP) + return enetc_hwtstamp_get(ndev, rq); +#endif + + if (!priv->phylink) + return -EOPNOTSUPP; + + return phylink_mii_ioctl(priv->phylink, rq, cmd); +} +EXPORT_SYMBOL_GPL(enetc_ioctl); + +int enetc_alloc_msix(struct enetc_ndev_priv *priv) +{ + struct pci_dev *pdev = priv->si->pdev; + int num_stack_tx_queues; + int first_xdp_tx_ring; + int i, n, err, nvec; + int v_tx_rings; + + nvec = ENETC_BDR_INT_BASE_IDX + priv->bdr_int_num; + /* allocate MSIX for both messaging and Rx/Tx interrupts */ + n = pci_alloc_irq_vectors(pdev, nvec, nvec, PCI_IRQ_MSIX); + + if (n < 0) + return n; + + if (n != nvec) + return -EPERM; + + /* # of tx rings per int vector */ + v_tx_rings = priv->num_tx_rings / priv->bdr_int_num; + + for (i = 0; i < priv->bdr_int_num; i++) { + struct enetc_int_vector *v; + struct enetc_bdr *bdr; + int j; + + v = kzalloc(struct_size(v, tx_ring, v_tx_rings), GFP_KERNEL); + if (!v) { + err = -ENOMEM; + goto fail; + } + + priv->int_vector[i] = v; + + bdr = &v->rx_ring; + bdr->index = i; + bdr->ndev = priv->ndev; + bdr->dev = priv->dev; + bdr->bd_count = priv->rx_bd_count; + bdr->buffer_offset = ENETC_RXB_PAD; + priv->rx_ring[i] = bdr; + + err = xdp_rxq_info_reg(&bdr->xdp.rxq, priv->ndev, i, 0); + if (err) { + kfree(v); + goto fail; + } + + err = xdp_rxq_info_reg_mem_model(&bdr->xdp.rxq, + MEM_TYPE_PAGE_SHARED, NULL); + if (err) { + xdp_rxq_info_unreg(&bdr->xdp.rxq); + kfree(v); + goto fail; + } + + /* init defaults for adaptive IC */ + if (priv->ic_mode & ENETC_IC_RX_ADAPTIVE) { + v->rx_ictt = 0x1; + v->rx_dim_en = true; + } + INIT_WORK(&v->rx_dim.work, enetc_rx_dim_work); + netif_napi_add(priv->ndev, &v->napi, enetc_poll); + v->count_tx_rings = v_tx_rings; + + for (j = 0; j < v_tx_rings; j++) { + int idx; + + /* default tx ring mapping policy */ + idx = priv->bdr_int_num * j + i; + __set_bit(idx, &v->tx_rings_map); + bdr = &v->tx_ring[j]; + bdr->index = idx; + bdr->ndev = priv->ndev; + bdr->dev = priv->dev; + bdr->bd_count = priv->tx_bd_count; + priv->tx_ring[idx] = bdr; + } + } + + num_stack_tx_queues = enetc_num_stack_tx_queues(priv); + + err = netif_set_real_num_tx_queues(priv->ndev, num_stack_tx_queues); + if (err) + goto fail; + + err = netif_set_real_num_rx_queues(priv->ndev, priv->num_rx_rings); + if (err) + goto fail; + + priv->min_num_stack_tx_queues = num_possible_cpus(); + first_xdp_tx_ring = priv->num_tx_rings - num_possible_cpus(); + priv->xdp_tx_ring = &priv->tx_ring[first_xdp_tx_ring]; + + return 0; + +fail: + while (i--) { + struct enetc_int_vector *v = priv->int_vector[i]; + struct enetc_bdr *rx_ring = &v->rx_ring; + + xdp_rxq_info_unreg_mem_model(&rx_ring->xdp.rxq); + xdp_rxq_info_unreg(&rx_ring->xdp.rxq); + netif_napi_del(&v->napi); + cancel_work_sync(&v->rx_dim.work); + kfree(v); + } + + pci_free_irq_vectors(pdev); + + return err; +} +EXPORT_SYMBOL_GPL(enetc_alloc_msix); + +void enetc_free_msix(struct enetc_ndev_priv *priv) +{ + int i; + + for (i = 0; i < priv->bdr_int_num; i++) { + struct enetc_int_vector *v = priv->int_vector[i]; + struct enetc_bdr *rx_ring = &v->rx_ring; + + xdp_rxq_info_unreg_mem_model(&rx_ring->xdp.rxq); + xdp_rxq_info_unreg(&rx_ring->xdp.rxq); + netif_napi_del(&v->napi); + cancel_work_sync(&v->rx_dim.work); + } + + for (i = 0; i < priv->num_rx_rings; i++) + priv->rx_ring[i] = NULL; + + for (i = 0; i < priv->num_tx_rings; i++) + priv->tx_ring[i] = NULL; + + for (i = 0; i < priv->bdr_int_num; i++) { + kfree(priv->int_vector[i]); + priv->int_vector[i] = NULL; + } + + /* disable all MSIX for this device */ + pci_free_irq_vectors(priv->si->pdev); +} +EXPORT_SYMBOL_GPL(enetc_free_msix); + +static void enetc_kfree_si(struct enetc_si *si) +{ + char *p = (char *)si - si->pad; + + kfree(p); +} + +static void enetc_detect_errata(struct enetc_si *si) +{ + if (si->pdev->revision == ENETC_REV1) + si->errata = ENETC_ERR_VLAN_ISOL | ENETC_ERR_UCMCSWP; +} + +int enetc_pci_probe(struct pci_dev *pdev, const char *name, int sizeof_priv) +{ + struct enetc_si *si, *p; + struct enetc_hw *hw; + size_t alloc_size; + int err, len; + + pcie_flr(pdev); + err = pci_enable_device_mem(pdev); + if (err) + return dev_err_probe(&pdev->dev, err, "device enable failed\n"); + + /* set up for high or low dma */ + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); + if (err) { + dev_err(&pdev->dev, "DMA configuration failed: 0x%x\n", err); + goto err_dma; + } + + err = pci_request_mem_regions(pdev, name); + if (err) { + dev_err(&pdev->dev, "pci_request_regions failed err=%d\n", err); + goto err_pci_mem_reg; + } + + pci_set_master(pdev); + + alloc_size = sizeof(struct enetc_si); + if (sizeof_priv) { + /* align priv to 32B */ + alloc_size = ALIGN(alloc_size, ENETC_SI_ALIGN); + alloc_size += sizeof_priv; + } + /* force 32B alignment for enetc_si */ + alloc_size += ENETC_SI_ALIGN - 1; + + p = kzalloc(alloc_size, GFP_KERNEL); + if (!p) { + err = -ENOMEM; + goto err_alloc_si; + } + + si = PTR_ALIGN(p, ENETC_SI_ALIGN); + si->pad = (char *)si - (char *)p; + + pci_set_drvdata(pdev, si); + si->pdev = pdev; + hw = &si->hw; + + len = pci_resource_len(pdev, ENETC_BAR_REGS); + hw->reg = ioremap(pci_resource_start(pdev, ENETC_BAR_REGS), len); + if (!hw->reg) { + err = -ENXIO; + dev_err(&pdev->dev, "ioremap() failed\n"); + goto err_ioremap; + } + if (len > ENETC_PORT_BASE) + hw->port = hw->reg + ENETC_PORT_BASE; + if (len > ENETC_GLOBAL_BASE) + hw->global = hw->reg + ENETC_GLOBAL_BASE; + + enetc_detect_errata(si); + + return 0; + +err_ioremap: + enetc_kfree_si(si); +err_alloc_si: + pci_release_mem_regions(pdev); +err_pci_mem_reg: +err_dma: + pci_disable_device(pdev); + + return err; +} +EXPORT_SYMBOL_GPL(enetc_pci_probe); + +void enetc_pci_remove(struct pci_dev *pdev) +{ + struct enetc_si *si = pci_get_drvdata(pdev); + struct enetc_hw *hw = &si->hw; + + iounmap(hw->reg); + enetc_kfree_si(si); + pci_release_mem_regions(pdev); + pci_disable_device(pdev); +} +EXPORT_SYMBOL_GPL(enetc_pci_remove); + +MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/net/ethernet/freescale/enetc/enetc.h b/drivers/net/ethernet/freescale/enetc/enetc.h new file mode 100644 index 0000000000..7439739cd8 --- /dev/null +++ b/drivers/net/ethernet/freescale/enetc/enetc.h @@ -0,0 +1,590 @@ +/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */ +/* Copyright 2017-2019 NXP */ + +#include <linux/timer.h> +#include <linux/pci.h> +#include <linux/netdevice.h> +#include <linux/etherdevice.h> +#include <linux/dma-mapping.h> +#include <linux/skbuff.h> +#include <linux/ethtool.h> +#include <linux/if_vlan.h> +#include <linux/phylink.h> +#include <linux/dim.h> +#include <net/xdp.h> + +#include "enetc_hw.h" + +#define ENETC_MAC_MAXFRM_SIZE 9600 +#define ENETC_MAX_MTU (ENETC_MAC_MAXFRM_SIZE - \ + (ETH_FCS_LEN + ETH_HLEN + VLAN_HLEN)) + +#define ENETC_CBD_DATA_MEM_ALIGN 64 + +struct enetc_tx_swbd { + union { + struct sk_buff *skb; + struct xdp_frame *xdp_frame; + }; + dma_addr_t dma; + struct page *page; /* valid only if is_xdp_tx */ + u16 page_offset; /* valid only if is_xdp_tx */ + u16 len; + enum dma_data_direction dir; + u8 is_dma_page:1; + u8 check_wb:1; + u8 do_twostep_tstamp:1; + u8 is_eof:1; + u8 is_xdp_tx:1; + u8 is_xdp_redirect:1; + u8 qbv_en:1; +}; + +#define ENETC_RX_MAXFRM_SIZE ENETC_MAC_MAXFRM_SIZE +#define ENETC_RXB_TRUESIZE 2048 /* PAGE_SIZE >> 1 */ +#define ENETC_RXB_PAD NET_SKB_PAD /* add extra space if needed */ +#define ENETC_RXB_DMA_SIZE \ + (SKB_WITH_OVERHEAD(ENETC_RXB_TRUESIZE) - ENETC_RXB_PAD) +#define ENETC_RXB_DMA_SIZE_XDP \ + (SKB_WITH_OVERHEAD(ENETC_RXB_TRUESIZE) - XDP_PACKET_HEADROOM) + +struct enetc_rx_swbd { + dma_addr_t dma; + struct page *page; + u16 page_offset; + enum dma_data_direction dir; + u16 len; +}; + +/* ENETC overhead: optional extension BD + 1 BD gap */ +#define ENETC_TXBDS_NEEDED(val) ((val) + 2) +/* max # of chained Tx BDs is 15, including head and extension BD */ +#define ENETC_MAX_SKB_FRAGS 13 +#define ENETC_TXBDS_MAX_NEEDED ENETC_TXBDS_NEEDED(ENETC_MAX_SKB_FRAGS + 1) + +struct enetc_ring_stats { + unsigned int packets; + unsigned int bytes; + unsigned int rx_alloc_errs; + unsigned int xdp_drops; + unsigned int xdp_tx; + unsigned int xdp_tx_drops; + unsigned int xdp_redirect; + unsigned int xdp_redirect_failures; + unsigned int recycles; + unsigned int recycle_failures; + unsigned int win_drop; +}; + +struct enetc_xdp_data { + struct xdp_rxq_info rxq; + struct bpf_prog *prog; + int xdp_tx_in_flight; +}; + +#define ENETC_RX_RING_DEFAULT_SIZE 2048 +#define ENETC_TX_RING_DEFAULT_SIZE 2048 +#define ENETC_DEFAULT_TX_WORK (ENETC_TX_RING_DEFAULT_SIZE / 2) + +struct enetc_bdr_resource { + /* Input arguments saved for teardown */ + struct device *dev; /* for DMA mapping */ + size_t bd_count; + size_t bd_size; + + /* Resource proper */ + void *bd_base; /* points to Rx or Tx BD ring */ + dma_addr_t bd_dma_base; + union { + struct enetc_tx_swbd *tx_swbd; + struct enetc_rx_swbd *rx_swbd; + }; + char *tso_headers; + dma_addr_t tso_headers_dma; +}; + +struct enetc_bdr { + struct device *dev; /* for DMA mapping */ + struct net_device *ndev; + void *bd_base; /* points to Rx or Tx BD ring */ + union { + void __iomem *tpir; + void __iomem *rcir; + }; + u16 index; + u16 prio; + int bd_count; /* # of BDs */ + int next_to_use; + int next_to_clean; + union { + struct enetc_tx_swbd *tx_swbd; + struct enetc_rx_swbd *rx_swbd; + }; + union { + void __iomem *tcir; /* Tx */ + int next_to_alloc; /* Rx */ + }; + void __iomem *idr; /* Interrupt Detect Register pointer */ + + int buffer_offset; + struct enetc_xdp_data xdp; + + struct enetc_ring_stats stats; + + dma_addr_t bd_dma_base; + u8 tsd_enable; /* Time specific departure */ + bool ext_en; /* enable h/w descriptor extensions */ + + /* DMA buffer for TSO headers */ + char *tso_headers; + dma_addr_t tso_headers_dma; +} ____cacheline_aligned_in_smp; + +static inline void enetc_bdr_idx_inc(struct enetc_bdr *bdr, int *i) +{ + if (unlikely(++*i == bdr->bd_count)) + *i = 0; +} + +static inline int enetc_bd_unused(struct enetc_bdr *bdr) +{ + if (bdr->next_to_clean > bdr->next_to_use) + return bdr->next_to_clean - bdr->next_to_use - 1; + + return bdr->bd_count + bdr->next_to_clean - bdr->next_to_use - 1; +} + +static inline int enetc_swbd_unused(struct enetc_bdr *bdr) +{ + if (bdr->next_to_clean > bdr->next_to_alloc) + return bdr->next_to_clean - bdr->next_to_alloc - 1; + + return bdr->bd_count + bdr->next_to_clean - bdr->next_to_alloc - 1; +} + +/* Control BD ring */ +#define ENETC_CBDR_DEFAULT_SIZE 64 +struct enetc_cbdr { + void *bd_base; /* points to Rx or Tx BD ring */ + void __iomem *pir; + void __iomem *cir; + void __iomem *mr; /* mode register */ + + int bd_count; /* # of BDs */ + int next_to_use; + int next_to_clean; + + dma_addr_t bd_dma_base; + struct device *dma_dev; +}; + +#define ENETC_TXBD(BDR, i) (&(((union enetc_tx_bd *)((BDR).bd_base))[i])) + +static inline union enetc_rx_bd *enetc_rxbd(struct enetc_bdr *rx_ring, int i) +{ + int hw_idx = i; + +#ifdef CONFIG_FSL_ENETC_PTP_CLOCK + if (rx_ring->ext_en) + hw_idx = 2 * i; +#endif + return &(((union enetc_rx_bd *)rx_ring->bd_base)[hw_idx]); +} + +static inline void enetc_rxbd_next(struct enetc_bdr *rx_ring, + union enetc_rx_bd **old_rxbd, int *old_index) +{ + union enetc_rx_bd *new_rxbd = *old_rxbd; + int new_index = *old_index; + + new_rxbd++; + +#ifdef CONFIG_FSL_ENETC_PTP_CLOCK + if (rx_ring->ext_en) + new_rxbd++; +#endif + + if (unlikely(++new_index == rx_ring->bd_count)) { + new_rxbd = rx_ring->bd_base; + new_index = 0; + } + + *old_rxbd = new_rxbd; + *old_index = new_index; +} + +static inline union enetc_rx_bd *enetc_rxbd_ext(union enetc_rx_bd *rxbd) +{ + return ++rxbd; +} + +struct enetc_msg_swbd { + void *vaddr; + dma_addr_t dma; + int size; +}; + +#define ENETC_REV1 0x1 +enum enetc_errata { + ENETC_ERR_VLAN_ISOL = BIT(0), + ENETC_ERR_UCMCSWP = BIT(1), +}; + +#define ENETC_SI_F_PSFP BIT(0) +#define ENETC_SI_F_QBV BIT(1) +#define ENETC_SI_F_QBU BIT(2) + +/* PCI IEP device data */ +struct enetc_si { + struct pci_dev *pdev; + struct enetc_hw hw; + enum enetc_errata errata; + + struct net_device *ndev; /* back ref. */ + + struct enetc_cbdr cbd_ring; + + int num_rx_rings; /* how many rings are available in the SI */ + int num_tx_rings; + int num_fs_entries; + int num_rss; /* number of RSS buckets */ + unsigned short pad; + int hw_features; +}; + +#define ENETC_SI_ALIGN 32 + +static inline void *enetc_si_priv(const struct enetc_si *si) +{ + return (char *)si + ALIGN(sizeof(struct enetc_si), ENETC_SI_ALIGN); +} + +static inline bool enetc_si_is_pf(struct enetc_si *si) +{ + return !!(si->hw.port); +} + +static inline int enetc_pf_to_port(struct pci_dev *pf_pdev) +{ + switch (pf_pdev->devfn) { + case 0: + return 0; + case 1: + return 1; + case 2: + return 2; + case 6: + return 3; + default: + return -1; + } +} + +#define ENETC_MAX_NUM_TXQS 8 +#define ENETC_INT_NAME_MAX (IFNAMSIZ + 8) + +struct enetc_int_vector { + void __iomem *rbier; + void __iomem *tbier_base; + void __iomem *ricr1; + unsigned long tx_rings_map; + int count_tx_rings; + u32 rx_ictt; + u16 comp_cnt; + bool rx_dim_en, rx_napi_work; + struct napi_struct napi ____cacheline_aligned_in_smp; + struct dim rx_dim ____cacheline_aligned_in_smp; + char name[ENETC_INT_NAME_MAX]; + + struct enetc_bdr rx_ring; + struct enetc_bdr tx_ring[]; +} ____cacheline_aligned_in_smp; + +struct enetc_cls_rule { + struct ethtool_rx_flow_spec fs; + int used; +}; + +#define ENETC_MAX_BDR_INT 2 /* fixed to max # of available cpus */ +struct psfp_cap { + u32 max_streamid; + u32 max_psfp_filter; + u32 max_psfp_gate; + u32 max_psfp_gatelist; + u32 max_psfp_meter; +}; + +#define ENETC_F_TX_TSTAMP_MASK 0xff +enum enetc_active_offloads { + /* 8 bits reserved for TX timestamp types (hwtstamp_tx_types) */ + ENETC_F_TX_TSTAMP = BIT(0), + ENETC_F_TX_ONESTEP_SYNC_TSTAMP = BIT(1), + + ENETC_F_RX_TSTAMP = BIT(8), + ENETC_F_QBV = BIT(9), + ENETC_F_QCI = BIT(10), + ENETC_F_QBU = BIT(11), +}; + +enum enetc_flags_bit { + ENETC_TX_ONESTEP_TSTAMP_IN_PROGRESS = 0, +}; + +/* interrupt coalescing modes */ +enum enetc_ic_mode { + /* one interrupt per frame */ + ENETC_IC_NONE = 0, + /* activated when int coalescing time is set to a non-0 value */ + ENETC_IC_RX_MANUAL = BIT(0), + ENETC_IC_TX_MANUAL = BIT(1), + /* use dynamic interrupt moderation */ + ENETC_IC_RX_ADAPTIVE = BIT(2), +}; + +#define ENETC_RXIC_PKTTHR min_t(u32, 256, ENETC_RX_RING_DEFAULT_SIZE / 2) +#define ENETC_TXIC_PKTTHR min_t(u32, 128, ENETC_TX_RING_DEFAULT_SIZE / 2) +#define ENETC_TXIC_TIMETHR enetc_usecs_to_cycles(600) + +struct enetc_ndev_priv { + struct net_device *ndev; + struct device *dev; /* dma-mapping device */ + struct enetc_si *si; + + int bdr_int_num; /* number of Rx/Tx ring interrupts */ + struct enetc_int_vector *int_vector[ENETC_MAX_BDR_INT]; + u16 num_rx_rings, num_tx_rings; + u16 rx_bd_count, tx_bd_count; + + u16 msg_enable; + + u8 preemptible_tcs; + + enum enetc_active_offloads active_offloads; + + u32 speed; /* store speed for compare update pspeed */ + + struct enetc_bdr **xdp_tx_ring; + struct enetc_bdr *tx_ring[16]; + struct enetc_bdr *rx_ring[16]; + const struct enetc_bdr_resource *tx_res; + const struct enetc_bdr_resource *rx_res; + + struct enetc_cls_rule *cls_rules; + + struct psfp_cap psfp_cap; + + /* Minimum number of TX queues required by the network stack */ + unsigned int min_num_stack_tx_queues; + + struct phylink *phylink; + int ic_mode; + u32 tx_ictt; + + struct bpf_prog *xdp_prog; + + unsigned long flags; + + struct work_struct tx_onestep_tstamp; + struct sk_buff_head tx_skbs; + + /* Serialize access to MAC Merge state between ethtool requests + * and link state updates + */ + struct mutex mm_lock; +}; + +/* Messaging */ + +/* VF-PF set primary MAC address message format */ +struct enetc_msg_cmd_set_primary_mac { + struct enetc_msg_cmd_header header; + struct sockaddr mac; +}; + +#define ENETC_CBD(R, i) (&(((struct enetc_cbd *)((R).bd_base))[i])) + +#define ENETC_CBDR_TIMEOUT 1000 /* usecs */ + +/* PTP driver exports */ +extern int enetc_phc_index; + +/* SI common */ +u32 enetc_port_mac_rd(struct enetc_si *si, u32 reg); +void enetc_port_mac_wr(struct enetc_si *si, u32 reg, u32 val); +int enetc_pci_probe(struct pci_dev *pdev, const char *name, int sizeof_priv); +void enetc_pci_remove(struct pci_dev *pdev); +int enetc_alloc_msix(struct enetc_ndev_priv *priv); +void enetc_free_msix(struct enetc_ndev_priv *priv); +void enetc_get_si_caps(struct enetc_si *si); +void enetc_init_si_rings_params(struct enetc_ndev_priv *priv); +int enetc_alloc_si_resources(struct enetc_ndev_priv *priv); +void enetc_free_si_resources(struct enetc_ndev_priv *priv); +int enetc_configure_si(struct enetc_ndev_priv *priv); + +int enetc_open(struct net_device *ndev); +int enetc_close(struct net_device *ndev); +void enetc_start(struct net_device *ndev); +void enetc_stop(struct net_device *ndev); +netdev_tx_t enetc_xmit(struct sk_buff *skb, struct net_device *ndev); +struct net_device_stats *enetc_get_stats(struct net_device *ndev); +void enetc_set_features(struct net_device *ndev, netdev_features_t features); +int enetc_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd); +int enetc_setup_tc_mqprio(struct net_device *ndev, void *type_data); +void enetc_reset_tc_mqprio(struct net_device *ndev); +int enetc_setup_bpf(struct net_device *ndev, struct netdev_bpf *bpf); +int enetc_xdp_xmit(struct net_device *ndev, int num_frames, + struct xdp_frame **frames, u32 flags); + +/* ethtool */ +void enetc_set_ethtool_ops(struct net_device *ndev); +void enetc_mm_link_state_update(struct enetc_ndev_priv *priv, bool link); +void enetc_mm_commit_preemptible_tcs(struct enetc_ndev_priv *priv); + +/* control buffer descriptor ring (CBDR) */ +int enetc_setup_cbdr(struct device *dev, struct enetc_hw *hw, int bd_count, + struct enetc_cbdr *cbdr); +void enetc_teardown_cbdr(struct enetc_cbdr *cbdr); +int enetc_set_mac_flt_entry(struct enetc_si *si, int index, + char *mac_addr, int si_map); +int enetc_clear_mac_flt_entry(struct enetc_si *si, int index); +int enetc_set_fs_entry(struct enetc_si *si, struct enetc_cmd_rfse *rfse, + int index); +void enetc_set_rss_key(struct enetc_hw *hw, const u8 *bytes); +int enetc_get_rss_table(struct enetc_si *si, u32 *table, int count); +int enetc_set_rss_table(struct enetc_si *si, const u32 *table, int count); +int enetc_send_cmd(struct enetc_si *si, struct enetc_cbd *cbd); + +static inline void *enetc_cbd_alloc_data_mem(struct enetc_si *si, + struct enetc_cbd *cbd, + int size, dma_addr_t *dma, + void **data_align) +{ + struct enetc_cbdr *ring = &si->cbd_ring; + dma_addr_t dma_align; + void *data; + + data = dma_alloc_coherent(ring->dma_dev, + size + ENETC_CBD_DATA_MEM_ALIGN, + dma, GFP_KERNEL); + if (!data) { + dev_err(ring->dma_dev, "CBD alloc data memory failed!\n"); + return NULL; + } + + dma_align = ALIGN(*dma, ENETC_CBD_DATA_MEM_ALIGN); + *data_align = PTR_ALIGN(data, ENETC_CBD_DATA_MEM_ALIGN); + + cbd->addr[0] = cpu_to_le32(lower_32_bits(dma_align)); + cbd->addr[1] = cpu_to_le32(upper_32_bits(dma_align)); + cbd->length = cpu_to_le16(size); + + return data; +} + +static inline void enetc_cbd_free_data_mem(struct enetc_si *si, int size, + void *data, dma_addr_t *dma) +{ + struct enetc_cbdr *ring = &si->cbd_ring; + + dma_free_coherent(ring->dma_dev, size + ENETC_CBD_DATA_MEM_ALIGN, + data, *dma); +} + +void enetc_reset_ptcmsdur(struct enetc_hw *hw); +void enetc_set_ptcmsdur(struct enetc_hw *hw, u32 *queue_max_sdu); + +#ifdef CONFIG_FSL_ENETC_QOS +int enetc_qos_query_caps(struct net_device *ndev, void *type_data); +int enetc_setup_tc_taprio(struct net_device *ndev, void *type_data); +void enetc_sched_speed_set(struct enetc_ndev_priv *priv, int speed); +int enetc_setup_tc_cbs(struct net_device *ndev, void *type_data); +int enetc_setup_tc_txtime(struct net_device *ndev, void *type_data); +int enetc_setup_tc_block_cb(enum tc_setup_type type, void *type_data, + void *cb_priv); +int enetc_setup_tc_psfp(struct net_device *ndev, void *type_data); +int enetc_psfp_init(struct enetc_ndev_priv *priv); +int enetc_psfp_clean(struct enetc_ndev_priv *priv); +int enetc_set_psfp(struct net_device *ndev, bool en); + +static inline void enetc_get_max_cap(struct enetc_ndev_priv *priv) +{ + struct enetc_hw *hw = &priv->si->hw; + u32 reg; + + reg = enetc_port_rd(hw, ENETC_PSIDCAPR); + priv->psfp_cap.max_streamid = reg & ENETC_PSIDCAPR_MSK; + /* Port stream filter capability */ + reg = enetc_port_rd(hw, ENETC_PSFCAPR); + priv->psfp_cap.max_psfp_filter = reg & ENETC_PSFCAPR_MSK; + /* Port stream gate capability */ + reg = enetc_port_rd(hw, ENETC_PSGCAPR); + priv->psfp_cap.max_psfp_gate = (reg & ENETC_PSGCAPR_SGIT_MSK); + priv->psfp_cap.max_psfp_gatelist = (reg & ENETC_PSGCAPR_GCL_MSK) >> 16; + /* Port flow meter capability */ + reg = enetc_port_rd(hw, ENETC_PFMCAPR); + priv->psfp_cap.max_psfp_meter = reg & ENETC_PFMCAPR_MSK; +} + +static inline int enetc_psfp_enable(struct enetc_ndev_priv *priv) +{ + struct enetc_hw *hw = &priv->si->hw; + int err; + + enetc_get_max_cap(priv); + + err = enetc_psfp_init(priv); + if (err) + return err; + + enetc_wr(hw, ENETC_PPSFPMR, enetc_rd(hw, ENETC_PPSFPMR) | + ENETC_PPSFPMR_PSFPEN | ENETC_PPSFPMR_VS | + ENETC_PPSFPMR_PVC | ENETC_PPSFPMR_PVZC); + + return 0; +} + +static inline int enetc_psfp_disable(struct enetc_ndev_priv *priv) +{ + struct enetc_hw *hw = &priv->si->hw; + int err; + + err = enetc_psfp_clean(priv); + if (err) + return err; + + enetc_wr(hw, ENETC_PPSFPMR, enetc_rd(hw, ENETC_PPSFPMR) & + ~ENETC_PPSFPMR_PSFPEN & ~ENETC_PPSFPMR_VS & + ~ENETC_PPSFPMR_PVC & ~ENETC_PPSFPMR_PVZC); + + memset(&priv->psfp_cap, 0, sizeof(struct psfp_cap)); + + return 0; +} + +#else +#define enetc_qos_query_caps(ndev, type_data) -EOPNOTSUPP +#define enetc_setup_tc_taprio(ndev, type_data) -EOPNOTSUPP +#define enetc_sched_speed_set(priv, speed) (void)0 +#define enetc_setup_tc_cbs(ndev, type_data) -EOPNOTSUPP +#define enetc_setup_tc_txtime(ndev, type_data) -EOPNOTSUPP +#define enetc_setup_tc_psfp(ndev, type_data) -EOPNOTSUPP +#define enetc_setup_tc_block_cb NULL + +#define enetc_get_max_cap(p) \ + memset(&((p)->psfp_cap), 0, sizeof(struct psfp_cap)) + +static inline int enetc_psfp_enable(struct enetc_ndev_priv *priv) +{ + return 0; +} + +static inline int enetc_psfp_disable(struct enetc_ndev_priv *priv) +{ + return 0; +} + +static inline int enetc_set_psfp(struct net_device *ndev, bool en) +{ + return 0; +} +#endif diff --git a/drivers/net/ethernet/freescale/enetc/enetc_cbdr.c b/drivers/net/ethernet/freescale/enetc/enetc_cbdr.c new file mode 100644 index 0000000000..20bfdf7fb4 --- /dev/null +++ b/drivers/net/ethernet/freescale/enetc/enetc_cbdr.c @@ -0,0 +1,258 @@ +// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) +/* Copyright 2017-2019 NXP */ + +#include "enetc.h" + +int enetc_setup_cbdr(struct device *dev, struct enetc_hw *hw, int bd_count, + struct enetc_cbdr *cbdr) +{ + int size = bd_count * sizeof(struct enetc_cbd); + + cbdr->bd_base = dma_alloc_coherent(dev, size, &cbdr->bd_dma_base, + GFP_KERNEL); + if (!cbdr->bd_base) + return -ENOMEM; + + /* h/w requires 128B alignment */ + if (!IS_ALIGNED(cbdr->bd_dma_base, 128)) { + dma_free_coherent(dev, size, cbdr->bd_base, + cbdr->bd_dma_base); + return -EINVAL; + } + + cbdr->next_to_clean = 0; + cbdr->next_to_use = 0; + cbdr->dma_dev = dev; + cbdr->bd_count = bd_count; + + cbdr->pir = hw->reg + ENETC_SICBDRPIR; + cbdr->cir = hw->reg + ENETC_SICBDRCIR; + cbdr->mr = hw->reg + ENETC_SICBDRMR; + + /* set CBDR cache attributes */ + enetc_wr(hw, ENETC_SICAR2, + ENETC_SICAR_RD_COHERENT | ENETC_SICAR_WR_COHERENT); + + enetc_wr(hw, ENETC_SICBDRBAR0, lower_32_bits(cbdr->bd_dma_base)); + enetc_wr(hw, ENETC_SICBDRBAR1, upper_32_bits(cbdr->bd_dma_base)); + enetc_wr(hw, ENETC_SICBDRLENR, ENETC_RTBLENR_LEN(cbdr->bd_count)); + + enetc_wr_reg(cbdr->pir, cbdr->next_to_clean); + enetc_wr_reg(cbdr->cir, cbdr->next_to_use); + /* enable ring */ + enetc_wr_reg(cbdr->mr, BIT(31)); + + return 0; +} +EXPORT_SYMBOL_GPL(enetc_setup_cbdr); + +void enetc_teardown_cbdr(struct enetc_cbdr *cbdr) +{ + int size = cbdr->bd_count * sizeof(struct enetc_cbd); + + /* disable ring */ + enetc_wr_reg(cbdr->mr, 0); + + dma_free_coherent(cbdr->dma_dev, size, cbdr->bd_base, + cbdr->bd_dma_base); + cbdr->bd_base = NULL; + cbdr->dma_dev = NULL; +} +EXPORT_SYMBOL_GPL(enetc_teardown_cbdr); + +static void enetc_clean_cbdr(struct enetc_cbdr *ring) +{ + struct enetc_cbd *dest_cbd; + int i, status; + + i = ring->next_to_clean; + + while (enetc_rd_reg(ring->cir) != i) { + dest_cbd = ENETC_CBD(*ring, i); + status = dest_cbd->status_flags & ENETC_CBD_STATUS_MASK; + if (status) + dev_warn(ring->dma_dev, "CMD err %04x for cmd %04x\n", + status, dest_cbd->cmd); + + memset(dest_cbd, 0, sizeof(*dest_cbd)); + + i = (i + 1) % ring->bd_count; + } + + ring->next_to_clean = i; +} + +static int enetc_cbd_unused(struct enetc_cbdr *r) +{ + return (r->next_to_clean - r->next_to_use - 1 + r->bd_count) % + r->bd_count; +} + +int enetc_send_cmd(struct enetc_si *si, struct enetc_cbd *cbd) +{ + struct enetc_cbdr *ring = &si->cbd_ring; + int timeout = ENETC_CBDR_TIMEOUT; + struct enetc_cbd *dest_cbd; + int i; + + if (unlikely(!ring->bd_base)) + return -EIO; + + if (unlikely(!enetc_cbd_unused(ring))) + enetc_clean_cbdr(ring); + + i = ring->next_to_use; + dest_cbd = ENETC_CBD(*ring, i); + + /* copy command to the ring */ + *dest_cbd = *cbd; + i = (i + 1) % ring->bd_count; + + ring->next_to_use = i; + /* let H/W know BD ring has been updated */ + enetc_wr_reg(ring->pir, i); + + do { + if (enetc_rd_reg(ring->cir) == i) + break; + udelay(10); /* cannot sleep, rtnl_lock() */ + timeout -= 10; + } while (timeout); + + if (!timeout) + return -EBUSY; + + /* CBD may writeback data, feedback up level */ + *cbd = *dest_cbd; + + enetc_clean_cbdr(ring); + + return 0; +} +EXPORT_SYMBOL_GPL(enetc_send_cmd); + +int enetc_clear_mac_flt_entry(struct enetc_si *si, int index) +{ + struct enetc_cbd cbd; + + memset(&cbd, 0, sizeof(cbd)); + + cbd.cls = 1; + cbd.status_flags = ENETC_CBD_FLAGS_SF; + cbd.index = cpu_to_le16(index); + + return enetc_send_cmd(si, &cbd); +} +EXPORT_SYMBOL_GPL(enetc_clear_mac_flt_entry); + +int enetc_set_mac_flt_entry(struct enetc_si *si, int index, + char *mac_addr, int si_map) +{ + struct enetc_cbd cbd; + u32 upper; + u16 lower; + + memset(&cbd, 0, sizeof(cbd)); + + /* fill up the "set" descriptor */ + cbd.cls = 1; + cbd.status_flags = ENETC_CBD_FLAGS_SF; + cbd.index = cpu_to_le16(index); + cbd.opt[3] = cpu_to_le32(si_map); + /* enable entry */ + cbd.opt[0] = cpu_to_le32(BIT(31)); + + upper = *(const u32 *)mac_addr; + lower = *(const u16 *)(mac_addr + 4); + cbd.addr[0] = cpu_to_le32(upper); + cbd.addr[1] = cpu_to_le32(lower); + + return enetc_send_cmd(si, &cbd); +} +EXPORT_SYMBOL_GPL(enetc_set_mac_flt_entry); + +/* Set entry in RFS table */ +int enetc_set_fs_entry(struct enetc_si *si, struct enetc_cmd_rfse *rfse, + int index) +{ + struct enetc_cbdr *ring = &si->cbd_ring; + struct enetc_cbd cbd = {.cmd = 0}; + void *tmp, *tmp_align; + dma_addr_t dma; + int err; + + /* fill up the "set" descriptor */ + cbd.cmd = 0; + cbd.cls = 4; + cbd.index = cpu_to_le16(index); + cbd.opt[3] = cpu_to_le32(0); /* SI */ + + tmp = enetc_cbd_alloc_data_mem(si, &cbd, sizeof(*rfse), + &dma, &tmp_align); + if (!tmp) + return -ENOMEM; + + memcpy(tmp_align, rfse, sizeof(*rfse)); + + err = enetc_send_cmd(si, &cbd); + if (err) + dev_err(ring->dma_dev, "FS entry add failed (%d)!", err); + + enetc_cbd_free_data_mem(si, sizeof(*rfse), tmp, &dma); + + return err; +} +EXPORT_SYMBOL_GPL(enetc_set_fs_entry); + +static int enetc_cmd_rss_table(struct enetc_si *si, u32 *table, int count, + bool read) +{ + struct enetc_cbdr *ring = &si->cbd_ring; + struct enetc_cbd cbd = {.cmd = 0}; + u8 *tmp, *tmp_align; + dma_addr_t dma; + int err, i; + + if (count < ENETC_CBD_DATA_MEM_ALIGN) + /* HW only takes in a full 64 entry table */ + return -EINVAL; + + tmp = enetc_cbd_alloc_data_mem(si, &cbd, count, + &dma, (void *)&tmp_align); + if (!tmp) + return -ENOMEM; + + if (!read) + for (i = 0; i < count; i++) + tmp_align[i] = (u8)(table[i]); + + /* fill up the descriptor */ + cbd.cmd = read ? 2 : 1; + cbd.cls = 3; + + err = enetc_send_cmd(si, &cbd); + if (err) + dev_err(ring->dma_dev, "RSS cmd failed (%d)!", err); + + if (read) + for (i = 0; i < count; i++) + table[i] = tmp_align[i]; + + enetc_cbd_free_data_mem(si, count, tmp, &dma); + + return err; +} + +/* Get RSS table */ +int enetc_get_rss_table(struct enetc_si *si, u32 *table, int count) +{ + return enetc_cmd_rss_table(si, table, count, true); +} +EXPORT_SYMBOL_GPL(enetc_get_rss_table); + +/* Set RSS table */ +int enetc_set_rss_table(struct enetc_si *si, const u32 *table, int count) +{ + return enetc_cmd_rss_table(si, (u32 *)table, count, false); +} +EXPORT_SYMBOL_GPL(enetc_set_rss_table); diff --git a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c new file mode 100644 index 0000000000..e993ed04ab --- /dev/null +++ b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c @@ -0,0 +1,1245 @@ +// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) +/* Copyright 2017-2019 NXP */ + +#include <linux/ethtool_netlink.h> +#include <linux/net_tstamp.h> +#include <linux/module.h> +#include "enetc.h" + +static const u32 enetc_si_regs[] = { + ENETC_SIMR, ENETC_SIPMAR0, ENETC_SIPMAR1, ENETC_SICBDRMR, + ENETC_SICBDRSR, ENETC_SICBDRBAR0, ENETC_SICBDRBAR1, ENETC_SICBDRPIR, + ENETC_SICBDRCIR, ENETC_SICBDRLENR, ENETC_SICAPR0, ENETC_SICAPR1, + ENETC_SIUEFDCR +}; + +static const u32 enetc_txbdr_regs[] = { + ENETC_TBMR, ENETC_TBSR, ENETC_TBBAR0, ENETC_TBBAR1, + ENETC_TBPIR, ENETC_TBCIR, ENETC_TBLENR, ENETC_TBIER, ENETC_TBICR0, + ENETC_TBICR1 +}; + +static const u32 enetc_rxbdr_regs[] = { + ENETC_RBMR, ENETC_RBSR, ENETC_RBBSR, ENETC_RBCIR, ENETC_RBBAR0, + ENETC_RBBAR1, ENETC_RBPIR, ENETC_RBLENR, ENETC_RBIER, ENETC_RBICR0, + ENETC_RBICR1 +}; + +static const u32 enetc_port_regs[] = { + ENETC_PMR, ENETC_PSR, ENETC_PSIPMR, ENETC_PSIPMAR0(0), + ENETC_PSIPMAR1(0), ENETC_PTXMBAR, ENETC_PCAPR0, ENETC_PCAPR1, + ENETC_PSICFGR0(0), ENETC_PRFSCAPR, ENETC_PTCMSDUR(0), + ENETC_PM0_CMD_CFG, ENETC_PM0_MAXFRM, ENETC_PM0_IF_MODE +}; + +static const u32 enetc_port_mm_regs[] = { + ENETC_MMCSR, ENETC_PFPMR, ENETC_PTCFPR(0), ENETC_PTCFPR(1), + ENETC_PTCFPR(2), ENETC_PTCFPR(3), ENETC_PTCFPR(4), ENETC_PTCFPR(5), + ENETC_PTCFPR(6), ENETC_PTCFPR(7), +}; + +static int enetc_get_reglen(struct net_device *ndev) +{ + struct enetc_ndev_priv *priv = netdev_priv(ndev); + struct enetc_hw *hw = &priv->si->hw; + int len; + + len = ARRAY_SIZE(enetc_si_regs); + len += ARRAY_SIZE(enetc_txbdr_regs) * priv->num_tx_rings; + len += ARRAY_SIZE(enetc_rxbdr_regs) * priv->num_rx_rings; + + if (hw->port) + len += ARRAY_SIZE(enetc_port_regs); + + if (hw->port && !!(priv->si->hw_features & ENETC_SI_F_QBU)) + len += ARRAY_SIZE(enetc_port_mm_regs); + + len *= sizeof(u32) * 2; /* store 2 entries per reg: addr and value */ + + return len; +} + +static void enetc_get_regs(struct net_device *ndev, struct ethtool_regs *regs, + void *regbuf) +{ + struct enetc_ndev_priv *priv = netdev_priv(ndev); + struct enetc_hw *hw = &priv->si->hw; + u32 *buf = (u32 *)regbuf; + int i, j; + u32 addr; + + for (i = 0; i < ARRAY_SIZE(enetc_si_regs); i++) { + *buf++ = enetc_si_regs[i]; + *buf++ = enetc_rd(hw, enetc_si_regs[i]); + } + + for (i = 0; i < priv->num_tx_rings; i++) { + for (j = 0; j < ARRAY_SIZE(enetc_txbdr_regs); j++) { + addr = ENETC_BDR(TX, i, enetc_txbdr_regs[j]); + + *buf++ = addr; + *buf++ = enetc_rd(hw, addr); + } + } + + for (i = 0; i < priv->num_rx_rings; i++) { + for (j = 0; j < ARRAY_SIZE(enetc_rxbdr_regs); j++) { + addr = ENETC_BDR(RX, i, enetc_rxbdr_regs[j]); + + *buf++ = addr; + *buf++ = enetc_rd(hw, addr); + } + } + + if (!hw->port) + return; + + for (i = 0; i < ARRAY_SIZE(enetc_port_regs); i++) { + addr = ENETC_PORT_BASE + enetc_port_regs[i]; + *buf++ = addr; + *buf++ = enetc_rd(hw, addr); + } + + if (priv->si->hw_features & ENETC_SI_F_QBU) { + for (i = 0; i < ARRAY_SIZE(enetc_port_mm_regs); i++) { + addr = ENETC_PORT_BASE + enetc_port_mm_regs[i]; + *buf++ = addr; + *buf++ = enetc_rd(hw, addr); + } + } +} + +static const struct { + int reg; + char name[ETH_GSTRING_LEN]; +} enetc_si_counters[] = { + { ENETC_SIROCT, "SI rx octets" }, + { ENETC_SIRFRM, "SI rx frames" }, + { ENETC_SIRUCA, "SI rx u-cast frames" }, + { ENETC_SIRMCA, "SI rx m-cast frames" }, + { ENETC_SITOCT, "SI tx octets" }, + { ENETC_SITFRM, "SI tx frames" }, + { ENETC_SITUCA, "SI tx u-cast frames" }, + { ENETC_SITMCA, "SI tx m-cast frames" }, + { ENETC_RBDCR(0), "Rx ring 0 discarded frames" }, + { ENETC_RBDCR(1), "Rx ring 1 discarded frames" }, + { ENETC_RBDCR(2), "Rx ring 2 discarded frames" }, + { ENETC_RBDCR(3), "Rx ring 3 discarded frames" }, + { ENETC_RBDCR(4), "Rx ring 4 discarded frames" }, + { ENETC_RBDCR(5), "Rx ring 5 discarded frames" }, + { ENETC_RBDCR(6), "Rx ring 6 discarded frames" }, + { ENETC_RBDCR(7), "Rx ring 7 discarded frames" }, + { ENETC_RBDCR(8), "Rx ring 8 discarded frames" }, + { ENETC_RBDCR(9), "Rx ring 9 discarded frames" }, + { ENETC_RBDCR(10), "Rx ring 10 discarded frames" }, + { ENETC_RBDCR(11), "Rx ring 11 discarded frames" }, + { ENETC_RBDCR(12), "Rx ring 12 discarded frames" }, + { ENETC_RBDCR(13), "Rx ring 13 discarded frames" }, + { ENETC_RBDCR(14), "Rx ring 14 discarded frames" }, + { ENETC_RBDCR(15), "Rx ring 15 discarded frames" }, +}; + +static const struct { + int reg; + char name[ETH_GSTRING_LEN]; +} enetc_port_counters[] = { + { ENETC_PM_REOCT(0), "MAC rx ethernet octets" }, + { ENETC_PM_RALN(0), "MAC rx alignment errors" }, + { ENETC_PM_RXPF(0), "MAC rx valid pause frames" }, + { ENETC_PM_RFRM(0), "MAC rx valid frames" }, + { ENETC_PM_RFCS(0), "MAC rx fcs errors" }, + { ENETC_PM_RVLAN(0), "MAC rx VLAN frames" }, + { ENETC_PM_RERR(0), "MAC rx frame errors" }, + { ENETC_PM_RUCA(0), "MAC rx unicast frames" }, + { ENETC_PM_RMCA(0), "MAC rx multicast frames" }, + { ENETC_PM_RBCA(0), "MAC rx broadcast frames" }, + { ENETC_PM_RDRP(0), "MAC rx dropped packets" }, + { ENETC_PM_RPKT(0), "MAC rx packets" }, + { ENETC_PM_RUND(0), "MAC rx undersized packets" }, + { ENETC_PM_R64(0), "MAC rx 64 byte packets" }, + { ENETC_PM_R127(0), "MAC rx 65-127 byte packets" }, + { ENETC_PM_R255(0), "MAC rx 128-255 byte packets" }, + { ENETC_PM_R511(0), "MAC rx 256-511 byte packets" }, + { ENETC_PM_R1023(0), "MAC rx 512-1023 byte packets" }, + { ENETC_PM_R1522(0), "MAC rx 1024-1522 byte packets" }, + { ENETC_PM_R1523X(0), "MAC rx 1523 to max-octet packets" }, + { ENETC_PM_ROVR(0), "MAC rx oversized packets" }, + { ENETC_PM_RJBR(0), "MAC rx jabber packets" }, + { ENETC_PM_RFRG(0), "MAC rx fragment packets" }, + { ENETC_PM_RCNP(0), "MAC rx control packets" }, + { ENETC_PM_RDRNTP(0), "MAC rx fifo drop" }, + { ENETC_PM_TEOCT(0), "MAC tx ethernet octets" }, + { ENETC_PM_TOCT(0), "MAC tx octets" }, + { ENETC_PM_TCRSE(0), "MAC tx carrier sense errors" }, + { ENETC_PM_TXPF(0), "MAC tx valid pause frames" }, + { ENETC_PM_TFRM(0), "MAC tx frames" }, + { ENETC_PM_TFCS(0), "MAC tx fcs errors" }, + { ENETC_PM_TVLAN(0), "MAC tx VLAN frames" }, + { ENETC_PM_TERR(0), "MAC tx frame errors" }, + { ENETC_PM_TUCA(0), "MAC tx unicast frames" }, + { ENETC_PM_TMCA(0), "MAC tx multicast frames" }, + { ENETC_PM_TBCA(0), "MAC tx broadcast frames" }, + { ENETC_PM_TPKT(0), "MAC tx packets" }, + { ENETC_PM_TUND(0), "MAC tx undersized packets" }, + { ENETC_PM_T64(0), "MAC tx 64 byte packets" }, + { ENETC_PM_T127(0), "MAC tx 65-127 byte packets" }, + { ENETC_PM_T255(0), "MAC tx 128-255 byte packets" }, + { ENETC_PM_T511(0), "MAC tx 256-511 byte packets" }, + { ENETC_PM_T1023(0), "MAC tx 512-1023 byte packets" }, + { ENETC_PM_T1522(0), "MAC tx 1024-1522 byte packets" }, + { ENETC_PM_T1523X(0), "MAC tx 1523 to max-octet packets" }, + { ENETC_PM_TCNP(0), "MAC tx control packets" }, + { ENETC_PM_TDFR(0), "MAC tx deferred packets" }, + { ENETC_PM_TMCOL(0), "MAC tx multiple collisions" }, + { ENETC_PM_TSCOL(0), "MAC tx single collisions" }, + { ENETC_PM_TLCOL(0), "MAC tx late collisions" }, + { ENETC_PM_TECOL(0), "MAC tx excessive collisions" }, + { ENETC_UFDMF, "SI MAC nomatch u-cast discards" }, + { ENETC_MFDMF, "SI MAC nomatch m-cast discards" }, + { ENETC_PBFDSIR, "SI MAC nomatch b-cast discards" }, + { ENETC_PUFDVFR, "SI VLAN nomatch u-cast discards" }, + { ENETC_PMFDVFR, "SI VLAN nomatch m-cast discards" }, + { ENETC_PBFDVFR, "SI VLAN nomatch b-cast discards" }, + { ENETC_PFDMSAPR, "SI pruning discarded frames" }, + { ENETC_PICDR(0), "ICM DR0 discarded frames" }, + { ENETC_PICDR(1), "ICM DR1 discarded frames" }, + { ENETC_PICDR(2), "ICM DR2 discarded frames" }, + { ENETC_PICDR(3), "ICM DR3 discarded frames" }, +}; + +static const char rx_ring_stats[][ETH_GSTRING_LEN] = { + "Rx ring %2d frames", + "Rx ring %2d alloc errors", + "Rx ring %2d XDP drops", + "Rx ring %2d recycles", + "Rx ring %2d recycle failures", + "Rx ring %2d redirects", + "Rx ring %2d redirect failures", +}; + +static const char tx_ring_stats[][ETH_GSTRING_LEN] = { + "Tx ring %2d frames", + "Tx ring %2d XDP frames", + "Tx ring %2d XDP drops", + "Tx window drop %2d frames", +}; + +static int enetc_get_sset_count(struct net_device *ndev, int sset) +{ + struct enetc_ndev_priv *priv = netdev_priv(ndev); + int len; + + if (sset != ETH_SS_STATS) + return -EOPNOTSUPP; + + len = ARRAY_SIZE(enetc_si_counters) + + ARRAY_SIZE(tx_ring_stats) * priv->num_tx_rings + + ARRAY_SIZE(rx_ring_stats) * priv->num_rx_rings; + + if (!enetc_si_is_pf(priv->si)) + return len; + + len += ARRAY_SIZE(enetc_port_counters); + + return len; +} + +static void enetc_get_strings(struct net_device *ndev, u32 stringset, u8 *data) +{ + struct enetc_ndev_priv *priv = netdev_priv(ndev); + u8 *p = data; + int i, j; + + switch (stringset) { + case ETH_SS_STATS: + for (i = 0; i < ARRAY_SIZE(enetc_si_counters); i++) { + strscpy(p, enetc_si_counters[i].name, ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + } + for (i = 0; i < priv->num_tx_rings; i++) { + for (j = 0; j < ARRAY_SIZE(tx_ring_stats); j++) { + snprintf(p, ETH_GSTRING_LEN, tx_ring_stats[j], + i); + p += ETH_GSTRING_LEN; + } + } + for (i = 0; i < priv->num_rx_rings; i++) { + for (j = 0; j < ARRAY_SIZE(rx_ring_stats); j++) { + snprintf(p, ETH_GSTRING_LEN, rx_ring_stats[j], + i); + p += ETH_GSTRING_LEN; + } + } + + if (!enetc_si_is_pf(priv->si)) + break; + + for (i = 0; i < ARRAY_SIZE(enetc_port_counters); i++) { + strscpy(p, enetc_port_counters[i].name, + ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + } + break; + } +} + +static void enetc_get_ethtool_stats(struct net_device *ndev, + struct ethtool_stats *stats, u64 *data) +{ + struct enetc_ndev_priv *priv = netdev_priv(ndev); + struct enetc_hw *hw = &priv->si->hw; + int i, o = 0; + + for (i = 0; i < ARRAY_SIZE(enetc_si_counters); i++) + data[o++] = enetc_rd64(hw, enetc_si_counters[i].reg); + + for (i = 0; i < priv->num_tx_rings; i++) { + data[o++] = priv->tx_ring[i]->stats.packets; + data[o++] = priv->tx_ring[i]->stats.xdp_tx; + data[o++] = priv->tx_ring[i]->stats.xdp_tx_drops; + data[o++] = priv->tx_ring[i]->stats.win_drop; + } + + for (i = 0; i < priv->num_rx_rings; i++) { + data[o++] = priv->rx_ring[i]->stats.packets; + data[o++] = priv->rx_ring[i]->stats.rx_alloc_errs; + data[o++] = priv->rx_ring[i]->stats.xdp_drops; + data[o++] = priv->rx_ring[i]->stats.recycles; + data[o++] = priv->rx_ring[i]->stats.recycle_failures; + data[o++] = priv->rx_ring[i]->stats.xdp_redirect; + data[o++] = priv->rx_ring[i]->stats.xdp_redirect_failures; + } + + if (!enetc_si_is_pf(priv->si)) + return; + + for (i = 0; i < ARRAY_SIZE(enetc_port_counters); i++) + data[o++] = enetc_port_rd(hw, enetc_port_counters[i].reg); +} + +static void enetc_pause_stats(struct enetc_hw *hw, int mac, + struct ethtool_pause_stats *pause_stats) +{ + pause_stats->tx_pause_frames = enetc_port_rd(hw, ENETC_PM_TXPF(mac)); + pause_stats->rx_pause_frames = enetc_port_rd(hw, ENETC_PM_RXPF(mac)); +} + +static void enetc_get_pause_stats(struct net_device *ndev, + struct ethtool_pause_stats *pause_stats) +{ + struct enetc_ndev_priv *priv = netdev_priv(ndev); + struct enetc_hw *hw = &priv->si->hw; + struct enetc_si *si = priv->si; + + switch (pause_stats->src) { + case ETHTOOL_MAC_STATS_SRC_EMAC: + enetc_pause_stats(hw, 0, pause_stats); + break; + case ETHTOOL_MAC_STATS_SRC_PMAC: + if (si->hw_features & ENETC_SI_F_QBU) + enetc_pause_stats(hw, 1, pause_stats); + break; + case ETHTOOL_MAC_STATS_SRC_AGGREGATE: + ethtool_aggregate_pause_stats(ndev, pause_stats); + break; + } +} + +static void enetc_mac_stats(struct enetc_hw *hw, int mac, + struct ethtool_eth_mac_stats *s) +{ + s->FramesTransmittedOK = enetc_port_rd(hw, ENETC_PM_TFRM(mac)); + s->SingleCollisionFrames = enetc_port_rd(hw, ENETC_PM_TSCOL(mac)); + s->MultipleCollisionFrames = enetc_port_rd(hw, ENETC_PM_TMCOL(mac)); + s->FramesReceivedOK = enetc_port_rd(hw, ENETC_PM_RFRM(mac)); + s->FrameCheckSequenceErrors = enetc_port_rd(hw, ENETC_PM_RFCS(mac)); + s->AlignmentErrors = enetc_port_rd(hw, ENETC_PM_RALN(mac)); + s->OctetsTransmittedOK = enetc_port_rd(hw, ENETC_PM_TEOCT(mac)); + s->FramesWithDeferredXmissions = enetc_port_rd(hw, ENETC_PM_TDFR(mac)); + s->LateCollisions = enetc_port_rd(hw, ENETC_PM_TLCOL(mac)); + s->FramesAbortedDueToXSColls = enetc_port_rd(hw, ENETC_PM_TECOL(mac)); + s->FramesLostDueToIntMACXmitError = enetc_port_rd(hw, ENETC_PM_TERR(mac)); + s->CarrierSenseErrors = enetc_port_rd(hw, ENETC_PM_TCRSE(mac)); + s->OctetsReceivedOK = enetc_port_rd(hw, ENETC_PM_REOCT(mac)); + s->FramesLostDueToIntMACRcvError = enetc_port_rd(hw, ENETC_PM_RDRNTP(mac)); + s->MulticastFramesXmittedOK = enetc_port_rd(hw, ENETC_PM_TMCA(mac)); + s->BroadcastFramesXmittedOK = enetc_port_rd(hw, ENETC_PM_TBCA(mac)); + s->MulticastFramesReceivedOK = enetc_port_rd(hw, ENETC_PM_RMCA(mac)); + s->BroadcastFramesReceivedOK = enetc_port_rd(hw, ENETC_PM_RBCA(mac)); +} + +static void enetc_ctrl_stats(struct enetc_hw *hw, int mac, + struct ethtool_eth_ctrl_stats *s) +{ + s->MACControlFramesTransmitted = enetc_port_rd(hw, ENETC_PM_TCNP(mac)); + s->MACControlFramesReceived = enetc_port_rd(hw, ENETC_PM_RCNP(mac)); +} + +static const struct ethtool_rmon_hist_range enetc_rmon_ranges[] = { + { 64, 64 }, + { 65, 127 }, + { 128, 255 }, + { 256, 511 }, + { 512, 1023 }, + { 1024, 1522 }, + { 1523, ENETC_MAC_MAXFRM_SIZE }, + {}, +}; + +static void enetc_rmon_stats(struct enetc_hw *hw, int mac, + struct ethtool_rmon_stats *s) +{ + s->undersize_pkts = enetc_port_rd(hw, ENETC_PM_RUND(mac)); + s->oversize_pkts = enetc_port_rd(hw, ENETC_PM_ROVR(mac)); + s->fragments = enetc_port_rd(hw, ENETC_PM_RFRG(mac)); + s->jabbers = enetc_port_rd(hw, ENETC_PM_RJBR(mac)); + + s->hist[0] = enetc_port_rd(hw, ENETC_PM_R64(mac)); + s->hist[1] = enetc_port_rd(hw, ENETC_PM_R127(mac)); + s->hist[2] = enetc_port_rd(hw, ENETC_PM_R255(mac)); + s->hist[3] = enetc_port_rd(hw, ENETC_PM_R511(mac)); + s->hist[4] = enetc_port_rd(hw, ENETC_PM_R1023(mac)); + s->hist[5] = enetc_port_rd(hw, ENETC_PM_R1522(mac)); + s->hist[6] = enetc_port_rd(hw, ENETC_PM_R1523X(mac)); + + s->hist_tx[0] = enetc_port_rd(hw, ENETC_PM_T64(mac)); + s->hist_tx[1] = enetc_port_rd(hw, ENETC_PM_T127(mac)); + s->hist_tx[2] = enetc_port_rd(hw, ENETC_PM_T255(mac)); + s->hist_tx[3] = enetc_port_rd(hw, ENETC_PM_T511(mac)); + s->hist_tx[4] = enetc_port_rd(hw, ENETC_PM_T1023(mac)); + s->hist_tx[5] = enetc_port_rd(hw, ENETC_PM_T1522(mac)); + s->hist_tx[6] = enetc_port_rd(hw, ENETC_PM_T1523X(mac)); +} + +static void enetc_get_eth_mac_stats(struct net_device *ndev, + struct ethtool_eth_mac_stats *mac_stats) +{ + struct enetc_ndev_priv *priv = netdev_priv(ndev); + struct enetc_hw *hw = &priv->si->hw; + struct enetc_si *si = priv->si; + + switch (mac_stats->src) { + case ETHTOOL_MAC_STATS_SRC_EMAC: + enetc_mac_stats(hw, 0, mac_stats); + break; + case ETHTOOL_MAC_STATS_SRC_PMAC: + if (si->hw_features & ENETC_SI_F_QBU) + enetc_mac_stats(hw, 1, mac_stats); + break; + case ETHTOOL_MAC_STATS_SRC_AGGREGATE: + ethtool_aggregate_mac_stats(ndev, mac_stats); + break; + } +} + +static void enetc_get_eth_ctrl_stats(struct net_device *ndev, + struct ethtool_eth_ctrl_stats *ctrl_stats) +{ + struct enetc_ndev_priv *priv = netdev_priv(ndev); + struct enetc_hw *hw = &priv->si->hw; + struct enetc_si *si = priv->si; + + switch (ctrl_stats->src) { + case ETHTOOL_MAC_STATS_SRC_EMAC: + enetc_ctrl_stats(hw, 0, ctrl_stats); + break; + case ETHTOOL_MAC_STATS_SRC_PMAC: + if (si->hw_features & ENETC_SI_F_QBU) + enetc_ctrl_stats(hw, 1, ctrl_stats); + break; + case ETHTOOL_MAC_STATS_SRC_AGGREGATE: + ethtool_aggregate_ctrl_stats(ndev, ctrl_stats); + break; + } +} + +static void enetc_get_rmon_stats(struct net_device *ndev, + struct ethtool_rmon_stats *rmon_stats, + const struct ethtool_rmon_hist_range **ranges) +{ + struct enetc_ndev_priv *priv = netdev_priv(ndev); + struct enetc_hw *hw = &priv->si->hw; + struct enetc_si *si = priv->si; + + *ranges = enetc_rmon_ranges; + + switch (rmon_stats->src) { + case ETHTOOL_MAC_STATS_SRC_EMAC: + enetc_rmon_stats(hw, 0, rmon_stats); + break; + case ETHTOOL_MAC_STATS_SRC_PMAC: + if (si->hw_features & ENETC_SI_F_QBU) + enetc_rmon_stats(hw, 1, rmon_stats); + break; + case ETHTOOL_MAC_STATS_SRC_AGGREGATE: + ethtool_aggregate_rmon_stats(ndev, rmon_stats); + break; + } +} + +#define ENETC_RSSHASH_L3 (RXH_L2DA | RXH_VLAN | RXH_L3_PROTO | RXH_IP_SRC | \ + RXH_IP_DST) +#define ENETC_RSSHASH_L4 (ENETC_RSSHASH_L3 | RXH_L4_B_0_1 | RXH_L4_B_2_3) +static int enetc_get_rsshash(struct ethtool_rxnfc *rxnfc) +{ + static const u32 rsshash[] = { + [TCP_V4_FLOW] = ENETC_RSSHASH_L4, + [UDP_V4_FLOW] = ENETC_RSSHASH_L4, + [SCTP_V4_FLOW] = ENETC_RSSHASH_L4, + [AH_ESP_V4_FLOW] = ENETC_RSSHASH_L3, + [IPV4_FLOW] = ENETC_RSSHASH_L3, + [TCP_V6_FLOW] = ENETC_RSSHASH_L4, + [UDP_V6_FLOW] = ENETC_RSSHASH_L4, + [SCTP_V6_FLOW] = ENETC_RSSHASH_L4, + [AH_ESP_V6_FLOW] = ENETC_RSSHASH_L3, + [IPV6_FLOW] = ENETC_RSSHASH_L3, + [ETHER_FLOW] = 0, + }; + + if (rxnfc->flow_type >= ARRAY_SIZE(rsshash)) + return -EINVAL; + + rxnfc->data = rsshash[rxnfc->flow_type]; + + return 0; +} + +/* current HW spec does byte reversal on everything including MAC addresses */ +static void ether_addr_copy_swap(u8 *dst, const u8 *src) +{ + int i; + + for (i = 0; i < ETH_ALEN; i++) + dst[i] = src[ETH_ALEN - i - 1]; +} + +static int enetc_set_cls_entry(struct enetc_si *si, + struct ethtool_rx_flow_spec *fs, bool en) +{ + struct ethtool_tcpip4_spec *l4ip4_h, *l4ip4_m; + struct ethtool_usrip4_spec *l3ip4_h, *l3ip4_m; + struct ethhdr *eth_h, *eth_m; + struct enetc_cmd_rfse rfse = { {0} }; + + if (!en) + goto done; + + switch (fs->flow_type & 0xff) { + case TCP_V4_FLOW: + l4ip4_h = &fs->h_u.tcp_ip4_spec; + l4ip4_m = &fs->m_u.tcp_ip4_spec; + goto l4ip4; + case UDP_V4_FLOW: + l4ip4_h = &fs->h_u.udp_ip4_spec; + l4ip4_m = &fs->m_u.udp_ip4_spec; + goto l4ip4; + case SCTP_V4_FLOW: + l4ip4_h = &fs->h_u.sctp_ip4_spec; + l4ip4_m = &fs->m_u.sctp_ip4_spec; +l4ip4: + rfse.sip_h[0] = l4ip4_h->ip4src; + rfse.sip_m[0] = l4ip4_m->ip4src; + rfse.dip_h[0] = l4ip4_h->ip4dst; + rfse.dip_m[0] = l4ip4_m->ip4dst; + rfse.sport_h = ntohs(l4ip4_h->psrc); + rfse.sport_m = ntohs(l4ip4_m->psrc); + rfse.dport_h = ntohs(l4ip4_h->pdst); + rfse.dport_m = ntohs(l4ip4_m->pdst); + if (l4ip4_m->tos) + netdev_warn(si->ndev, "ToS field is not supported and was ignored\n"); + rfse.ethtype_h = ETH_P_IP; /* IPv4 */ + rfse.ethtype_m = 0xffff; + break; + case IP_USER_FLOW: + l3ip4_h = &fs->h_u.usr_ip4_spec; + l3ip4_m = &fs->m_u.usr_ip4_spec; + + rfse.sip_h[0] = l3ip4_h->ip4src; + rfse.sip_m[0] = l3ip4_m->ip4src; + rfse.dip_h[0] = l3ip4_h->ip4dst; + rfse.dip_m[0] = l3ip4_m->ip4dst; + if (l3ip4_m->tos) + netdev_warn(si->ndev, "ToS field is not supported and was ignored\n"); + rfse.ethtype_h = ETH_P_IP; /* IPv4 */ + rfse.ethtype_m = 0xffff; + break; + case ETHER_FLOW: + eth_h = &fs->h_u.ether_spec; + eth_m = &fs->m_u.ether_spec; + + ether_addr_copy_swap(rfse.smac_h, eth_h->h_source); + ether_addr_copy_swap(rfse.smac_m, eth_m->h_source); + ether_addr_copy_swap(rfse.dmac_h, eth_h->h_dest); + ether_addr_copy_swap(rfse.dmac_m, eth_m->h_dest); + rfse.ethtype_h = ntohs(eth_h->h_proto); + rfse.ethtype_m = ntohs(eth_m->h_proto); + break; + default: + return -EOPNOTSUPP; + } + + rfse.mode |= ENETC_RFSE_EN; + if (fs->ring_cookie != RX_CLS_FLOW_DISC) { + rfse.mode |= ENETC_RFSE_MODE_BD; + rfse.result = fs->ring_cookie; + } +done: + return enetc_set_fs_entry(si, &rfse, fs->location); +} + +static int enetc_get_rxnfc(struct net_device *ndev, struct ethtool_rxnfc *rxnfc, + u32 *rule_locs) +{ + struct enetc_ndev_priv *priv = netdev_priv(ndev); + int i, j; + + switch (rxnfc->cmd) { + case ETHTOOL_GRXRINGS: + rxnfc->data = priv->num_rx_rings; + break; + case ETHTOOL_GRXFH: + /* get RSS hash config */ + return enetc_get_rsshash(rxnfc); + case ETHTOOL_GRXCLSRLCNT: + /* total number of entries */ + rxnfc->data = priv->si->num_fs_entries; + /* number of entries in use */ + rxnfc->rule_cnt = 0; + for (i = 0; i < priv->si->num_fs_entries; i++) + if (priv->cls_rules[i].used) + rxnfc->rule_cnt++; + break; + case ETHTOOL_GRXCLSRULE: + if (rxnfc->fs.location >= priv->si->num_fs_entries) + return -EINVAL; + + /* get entry x */ + rxnfc->fs = priv->cls_rules[rxnfc->fs.location].fs; + break; + case ETHTOOL_GRXCLSRLALL: + /* total number of entries */ + rxnfc->data = priv->si->num_fs_entries; + /* array of indexes of used entries */ + j = 0; + for (i = 0; i < priv->si->num_fs_entries; i++) { + if (!priv->cls_rules[i].used) + continue; + if (j == rxnfc->rule_cnt) + return -EMSGSIZE; + rule_locs[j++] = i; + } + /* number of entries in use */ + rxnfc->rule_cnt = j; + break; + default: + return -EOPNOTSUPP; + } + + return 0; +} + +static int enetc_set_rxnfc(struct net_device *ndev, struct ethtool_rxnfc *rxnfc) +{ + struct enetc_ndev_priv *priv = netdev_priv(ndev); + int err; + + switch (rxnfc->cmd) { + case ETHTOOL_SRXCLSRLINS: + if (rxnfc->fs.location >= priv->si->num_fs_entries) + return -EINVAL; + + if (rxnfc->fs.ring_cookie >= priv->num_rx_rings && + rxnfc->fs.ring_cookie != RX_CLS_FLOW_DISC) + return -EINVAL; + + err = enetc_set_cls_entry(priv->si, &rxnfc->fs, true); + if (err) + return err; + priv->cls_rules[rxnfc->fs.location].fs = rxnfc->fs; + priv->cls_rules[rxnfc->fs.location].used = 1; + break; + case ETHTOOL_SRXCLSRLDEL: + if (rxnfc->fs.location >= priv->si->num_fs_entries) + return -EINVAL; + + err = enetc_set_cls_entry(priv->si, &rxnfc->fs, false); + if (err) + return err; + priv->cls_rules[rxnfc->fs.location].used = 0; + break; + default: + return -EOPNOTSUPP; + } + + return 0; +} + +static u32 enetc_get_rxfh_key_size(struct net_device *ndev) +{ + struct enetc_ndev_priv *priv = netdev_priv(ndev); + + /* return the size of the RX flow hash key. PF only */ + return (priv->si->hw.port) ? ENETC_RSSHASH_KEY_SIZE : 0; +} + +static u32 enetc_get_rxfh_indir_size(struct net_device *ndev) +{ + struct enetc_ndev_priv *priv = netdev_priv(ndev); + + /* return the size of the RX flow hash indirection table */ + return priv->si->num_rss; +} + +static int enetc_get_rxfh(struct net_device *ndev, u32 *indir, u8 *key, + u8 *hfunc) +{ + struct enetc_ndev_priv *priv = netdev_priv(ndev); + struct enetc_hw *hw = &priv->si->hw; + int err = 0, i; + + /* return hash function */ + if (hfunc) + *hfunc = ETH_RSS_HASH_TOP; + + /* return hash key */ + if (key && hw->port) + for (i = 0; i < ENETC_RSSHASH_KEY_SIZE / 4; i++) + ((u32 *)key)[i] = enetc_port_rd(hw, ENETC_PRSSK(i)); + + /* return RSS table */ + if (indir) + err = enetc_get_rss_table(priv->si, indir, priv->si->num_rss); + + return err; +} + +void enetc_set_rss_key(struct enetc_hw *hw, const u8 *bytes) +{ + int i; + + for (i = 0; i < ENETC_RSSHASH_KEY_SIZE / 4; i++) + enetc_port_wr(hw, ENETC_PRSSK(i), ((u32 *)bytes)[i]); +} +EXPORT_SYMBOL_GPL(enetc_set_rss_key); + +static int enetc_set_rxfh(struct net_device *ndev, const u32 *indir, + const u8 *key, const u8 hfunc) +{ + struct enetc_ndev_priv *priv = netdev_priv(ndev); + struct enetc_hw *hw = &priv->si->hw; + int err = 0; + + /* set hash key, if PF */ + if (key && hw->port) + enetc_set_rss_key(hw, key); + + /* set RSS table */ + if (indir) + err = enetc_set_rss_table(priv->si, indir, priv->si->num_rss); + + return err; +} + +static void enetc_get_ringparam(struct net_device *ndev, + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ring, + struct netlink_ext_ack *extack) +{ + struct enetc_ndev_priv *priv = netdev_priv(ndev); + + ring->rx_pending = priv->rx_bd_count; + ring->tx_pending = priv->tx_bd_count; + + /* do some h/w sanity checks for BDR length */ + if (netif_running(ndev)) { + struct enetc_hw *hw = &priv->si->hw; + u32 val = enetc_rxbdr_rd(hw, 0, ENETC_RBLENR); + + if (val != priv->rx_bd_count) + netif_err(priv, hw, ndev, "RxBDR[RBLENR] = %d!\n", val); + + val = enetc_txbdr_rd(hw, 0, ENETC_TBLENR); + + if (val != priv->tx_bd_count) + netif_err(priv, hw, ndev, "TxBDR[TBLENR] = %d!\n", val); + } +} + +static int enetc_get_coalesce(struct net_device *ndev, + struct ethtool_coalesce *ic, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) +{ + struct enetc_ndev_priv *priv = netdev_priv(ndev); + struct enetc_int_vector *v = priv->int_vector[0]; + + ic->tx_coalesce_usecs = enetc_cycles_to_usecs(priv->tx_ictt); + ic->rx_coalesce_usecs = enetc_cycles_to_usecs(v->rx_ictt); + + ic->tx_max_coalesced_frames = ENETC_TXIC_PKTTHR; + ic->rx_max_coalesced_frames = ENETC_RXIC_PKTTHR; + + ic->use_adaptive_rx_coalesce = priv->ic_mode & ENETC_IC_RX_ADAPTIVE; + + return 0; +} + +static int enetc_set_coalesce(struct net_device *ndev, + struct ethtool_coalesce *ic, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) +{ + struct enetc_ndev_priv *priv = netdev_priv(ndev); + u32 rx_ictt, tx_ictt; + int i, ic_mode; + bool changed; + + tx_ictt = enetc_usecs_to_cycles(ic->tx_coalesce_usecs); + rx_ictt = enetc_usecs_to_cycles(ic->rx_coalesce_usecs); + + if (ic->rx_max_coalesced_frames != ENETC_RXIC_PKTTHR) + return -EOPNOTSUPP; + + if (ic->tx_max_coalesced_frames != ENETC_TXIC_PKTTHR) + return -EOPNOTSUPP; + + ic_mode = ENETC_IC_NONE; + if (ic->use_adaptive_rx_coalesce) { + ic_mode |= ENETC_IC_RX_ADAPTIVE; + rx_ictt = 0x1; + } else { + ic_mode |= rx_ictt ? ENETC_IC_RX_MANUAL : 0; + } + + ic_mode |= tx_ictt ? ENETC_IC_TX_MANUAL : 0; + + /* commit the settings */ + changed = (ic_mode != priv->ic_mode) || (priv->tx_ictt != tx_ictt); + + priv->ic_mode = ic_mode; + priv->tx_ictt = tx_ictt; + + for (i = 0; i < priv->bdr_int_num; i++) { + struct enetc_int_vector *v = priv->int_vector[i]; + + v->rx_ictt = rx_ictt; + v->rx_dim_en = !!(ic_mode & ENETC_IC_RX_ADAPTIVE); + } + + if (netif_running(ndev) && changed) { + /* reconfigure the operation mode of h/w interrupts, + * traffic needs to be paused in the process + */ + enetc_stop(ndev); + enetc_start(ndev); + } + + return 0; +} + +static int enetc_get_ts_info(struct net_device *ndev, + struct ethtool_ts_info *info) +{ + int *phc_idx; + + phc_idx = symbol_get(enetc_phc_index); + if (phc_idx) { + info->phc_index = *phc_idx; + symbol_put(enetc_phc_index); + } else { + info->phc_index = -1; + } + +#ifdef CONFIG_FSL_ENETC_PTP_CLOCK + info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE | + SOF_TIMESTAMPING_RX_HARDWARE | + SOF_TIMESTAMPING_RAW_HARDWARE | + SOF_TIMESTAMPING_TX_SOFTWARE | + SOF_TIMESTAMPING_RX_SOFTWARE | + SOF_TIMESTAMPING_SOFTWARE; + + info->tx_types = (1 << HWTSTAMP_TX_OFF) | + (1 << HWTSTAMP_TX_ON) | + (1 << HWTSTAMP_TX_ONESTEP_SYNC); + info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) | + (1 << HWTSTAMP_FILTER_ALL); +#else + info->so_timestamping = SOF_TIMESTAMPING_RX_SOFTWARE | + SOF_TIMESTAMPING_TX_SOFTWARE | + SOF_TIMESTAMPING_SOFTWARE; +#endif + return 0; +} + +static void enetc_get_wol(struct net_device *dev, + struct ethtool_wolinfo *wol) +{ + wol->supported = 0; + wol->wolopts = 0; + + if (dev->phydev) + phy_ethtool_get_wol(dev->phydev, wol); +} + +static int enetc_set_wol(struct net_device *dev, + struct ethtool_wolinfo *wol) +{ + int ret; + + if (!dev->phydev) + return -EOPNOTSUPP; + + ret = phy_ethtool_set_wol(dev->phydev, wol); + if (!ret) + device_set_wakeup_enable(&dev->dev, wol->wolopts); + + return ret; +} + +static void enetc_get_pauseparam(struct net_device *dev, + struct ethtool_pauseparam *pause) +{ + struct enetc_ndev_priv *priv = netdev_priv(dev); + + phylink_ethtool_get_pauseparam(priv->phylink, pause); +} + +static int enetc_set_pauseparam(struct net_device *dev, + struct ethtool_pauseparam *pause) +{ + struct enetc_ndev_priv *priv = netdev_priv(dev); + + return phylink_ethtool_set_pauseparam(priv->phylink, pause); +} + +static int enetc_get_link_ksettings(struct net_device *dev, + struct ethtool_link_ksettings *cmd) +{ + struct enetc_ndev_priv *priv = netdev_priv(dev); + + if (!priv->phylink) + return -EOPNOTSUPP; + + return phylink_ethtool_ksettings_get(priv->phylink, cmd); +} + +static int enetc_set_link_ksettings(struct net_device *dev, + const struct ethtool_link_ksettings *cmd) +{ + struct enetc_ndev_priv *priv = netdev_priv(dev); + + if (!priv->phylink) + return -EOPNOTSUPP; + + return phylink_ethtool_ksettings_set(priv->phylink, cmd); +} + +static void enetc_get_mm_stats(struct net_device *ndev, + struct ethtool_mm_stats *s) +{ + struct enetc_ndev_priv *priv = netdev_priv(ndev); + struct enetc_hw *hw = &priv->si->hw; + struct enetc_si *si = priv->si; + + if (!(si->hw_features & ENETC_SI_F_QBU)) + return; + + s->MACMergeFrameAssErrorCount = enetc_port_rd(hw, ENETC_MMFAECR); + s->MACMergeFrameSmdErrorCount = enetc_port_rd(hw, ENETC_MMFSECR); + s->MACMergeFrameAssOkCount = enetc_port_rd(hw, ENETC_MMFAOCR); + s->MACMergeFragCountRx = enetc_port_rd(hw, ENETC_MMFCRXR); + s->MACMergeFragCountTx = enetc_port_rd(hw, ENETC_MMFCTXR); + s->MACMergeHoldCount = enetc_port_rd(hw, ENETC_MMHCR); +} + +static int enetc_get_mm(struct net_device *ndev, struct ethtool_mm_state *state) +{ + struct enetc_ndev_priv *priv = netdev_priv(ndev); + struct enetc_si *si = priv->si; + struct enetc_hw *hw = &si->hw; + u32 lafs, rafs, val; + + if (!(si->hw_features & ENETC_SI_F_QBU)) + return -EOPNOTSUPP; + + mutex_lock(&priv->mm_lock); + + val = enetc_port_rd(hw, ENETC_PFPMR); + state->pmac_enabled = !!(val & ENETC_PFPMR_PMACE); + + val = enetc_port_rd(hw, ENETC_MMCSR); + + switch (ENETC_MMCSR_GET_VSTS(val)) { + case 0: + state->verify_status = ETHTOOL_MM_VERIFY_STATUS_DISABLED; + break; + case 2: + state->verify_status = ETHTOOL_MM_VERIFY_STATUS_VERIFYING; + break; + case 3: + state->verify_status = ETHTOOL_MM_VERIFY_STATUS_SUCCEEDED; + break; + case 4: + state->verify_status = ETHTOOL_MM_VERIFY_STATUS_FAILED; + break; + case 5: + default: + state->verify_status = ETHTOOL_MM_VERIFY_STATUS_UNKNOWN; + break; + } + + rafs = ENETC_MMCSR_GET_RAFS(val); + state->tx_min_frag_size = ethtool_mm_frag_size_add_to_min(rafs); + lafs = ENETC_MMCSR_GET_LAFS(val); + state->rx_min_frag_size = ethtool_mm_frag_size_add_to_min(lafs); + state->tx_enabled = !!(val & ENETC_MMCSR_LPE); /* mirror of MMCSR_ME */ + state->tx_active = state->tx_enabled && + (state->verify_status == ETHTOOL_MM_VERIFY_STATUS_SUCCEEDED || + state->verify_status == ETHTOOL_MM_VERIFY_STATUS_DISABLED); + state->verify_enabled = !(val & ENETC_MMCSR_VDIS); + state->verify_time = ENETC_MMCSR_GET_VT(val); + /* A verifyTime of 128 ms would exceed the 7 bit width + * of the ENETC_MMCSR_VT field + */ + state->max_verify_time = 127; + + mutex_unlock(&priv->mm_lock); + + return 0; +} + +static int enetc_mm_wait_tx_active(struct enetc_hw *hw, int verify_time) +{ + int timeout = verify_time * USEC_PER_MSEC * ENETC_MM_VERIFY_RETRIES; + u32 val; + + /* This will time out after the standard value of 3 verification + * attempts. To not sleep forever, it relies on a non-zero verify_time, + * guarantee which is provided by the ethtool nlattr policy. + */ + return read_poll_timeout(enetc_port_rd, val, + ENETC_MMCSR_GET_VSTS(val) == 3, + ENETC_MM_VERIFY_SLEEP_US, timeout, + true, hw, ENETC_MMCSR); +} + +static void enetc_set_ptcfpr(struct enetc_hw *hw, u8 preemptible_tcs) +{ + u32 val; + int tc; + + for (tc = 0; tc < 8; tc++) { + val = enetc_port_rd(hw, ENETC_PTCFPR(tc)); + + if (preemptible_tcs & BIT(tc)) + val |= ENETC_PTCFPR_FPE; + else + val &= ~ENETC_PTCFPR_FPE; + + enetc_port_wr(hw, ENETC_PTCFPR(tc), val); + } +} + +/* ENETC does not have an IRQ to notify changes to the MAC Merge TX status + * (active/inactive), but the preemptible traffic classes should only be + * committed to hardware once TX is active. Resort to polling. + */ +void enetc_mm_commit_preemptible_tcs(struct enetc_ndev_priv *priv) +{ + struct enetc_hw *hw = &priv->si->hw; + u8 preemptible_tcs = 0; + u32 val; + int err; + + val = enetc_port_rd(hw, ENETC_MMCSR); + if (!(val & ENETC_MMCSR_ME)) + goto out; + + if (!(val & ENETC_MMCSR_VDIS)) { + err = enetc_mm_wait_tx_active(hw, ENETC_MMCSR_GET_VT(val)); + if (err) + goto out; + } + + preemptible_tcs = priv->preemptible_tcs; +out: + enetc_set_ptcfpr(hw, preemptible_tcs); +} + +/* FIXME: Workaround for the link partner's verification failing if ENETC + * priorly received too much express traffic. The documentation doesn't + * suggest this is needed. + */ +static void enetc_restart_emac_rx(struct enetc_si *si) +{ + u32 val = enetc_port_rd(&si->hw, ENETC_PM0_CMD_CFG); + + enetc_port_wr(&si->hw, ENETC_PM0_CMD_CFG, val & ~ENETC_PM0_RX_EN); + + if (val & ENETC_PM0_RX_EN) + enetc_port_wr(&si->hw, ENETC_PM0_CMD_CFG, val); +} + +static int enetc_set_mm(struct net_device *ndev, struct ethtool_mm_cfg *cfg, + struct netlink_ext_ack *extack) +{ + struct enetc_ndev_priv *priv = netdev_priv(ndev); + struct enetc_hw *hw = &priv->si->hw; + struct enetc_si *si = priv->si; + u32 val, add_frag_size; + int err; + + if (!(si->hw_features & ENETC_SI_F_QBU)) + return -EOPNOTSUPP; + + err = ethtool_mm_frag_size_min_to_add(cfg->tx_min_frag_size, + &add_frag_size, extack); + if (err) + return err; + + mutex_lock(&priv->mm_lock); + + val = enetc_port_rd(hw, ENETC_PFPMR); + if (cfg->pmac_enabled) + val |= ENETC_PFPMR_PMACE; + else + val &= ~ENETC_PFPMR_PMACE; + enetc_port_wr(hw, ENETC_PFPMR, val); + + val = enetc_port_rd(hw, ENETC_MMCSR); + + if (cfg->verify_enabled) + val &= ~ENETC_MMCSR_VDIS; + else + val |= ENETC_MMCSR_VDIS; + + if (cfg->tx_enabled) + priv->active_offloads |= ENETC_F_QBU; + else + priv->active_offloads &= ~ENETC_F_QBU; + + /* If link is up, enable/disable MAC Merge right away */ + if (!(val & ENETC_MMCSR_LINK_FAIL)) { + if (!!(priv->active_offloads & ENETC_F_QBU)) + val |= ENETC_MMCSR_ME; + else + val &= ~ENETC_MMCSR_ME; + } + + val &= ~ENETC_MMCSR_VT_MASK; + val |= ENETC_MMCSR_VT(cfg->verify_time); + + val &= ~ENETC_MMCSR_RAFS_MASK; + val |= ENETC_MMCSR_RAFS(add_frag_size); + + enetc_port_wr(hw, ENETC_MMCSR, val); + + enetc_restart_emac_rx(priv->si); + + enetc_mm_commit_preemptible_tcs(priv); + + mutex_unlock(&priv->mm_lock); + + return 0; +} + +/* When the link is lost, the verification state machine goes to the FAILED + * state and doesn't restart on its own after a new link up event. + * According to 802.3 Figure 99-8 - Verify state diagram, the LINK_FAIL bit + * should have been sufficient to re-trigger verification, but for ENETC it + * doesn't. As a workaround, we need to toggle the Merge Enable bit to + * re-trigger verification when link comes up. + */ +void enetc_mm_link_state_update(struct enetc_ndev_priv *priv, bool link) +{ + struct enetc_hw *hw = &priv->si->hw; + u32 val; + + mutex_lock(&priv->mm_lock); + + val = enetc_port_rd(hw, ENETC_MMCSR); + + if (link) { + val &= ~ENETC_MMCSR_LINK_FAIL; + if (priv->active_offloads & ENETC_F_QBU) + val |= ENETC_MMCSR_ME; + } else { + val |= ENETC_MMCSR_LINK_FAIL; + if (priv->active_offloads & ENETC_F_QBU) + val &= ~ENETC_MMCSR_ME; + } + + enetc_port_wr(hw, ENETC_MMCSR, val); + + enetc_mm_commit_preemptible_tcs(priv); + + mutex_unlock(&priv->mm_lock); +} +EXPORT_SYMBOL_GPL(enetc_mm_link_state_update); + +static const struct ethtool_ops enetc_pf_ethtool_ops = { + .supported_coalesce_params = ETHTOOL_COALESCE_USECS | + ETHTOOL_COALESCE_MAX_FRAMES | + ETHTOOL_COALESCE_USE_ADAPTIVE_RX, + .get_regs_len = enetc_get_reglen, + .get_regs = enetc_get_regs, + .get_sset_count = enetc_get_sset_count, + .get_strings = enetc_get_strings, + .get_ethtool_stats = enetc_get_ethtool_stats, + .get_pause_stats = enetc_get_pause_stats, + .get_rmon_stats = enetc_get_rmon_stats, + .get_eth_ctrl_stats = enetc_get_eth_ctrl_stats, + .get_eth_mac_stats = enetc_get_eth_mac_stats, + .get_rxnfc = enetc_get_rxnfc, + .set_rxnfc = enetc_set_rxnfc, + .get_rxfh_key_size = enetc_get_rxfh_key_size, + .get_rxfh_indir_size = enetc_get_rxfh_indir_size, + .get_rxfh = enetc_get_rxfh, + .set_rxfh = enetc_set_rxfh, + .get_ringparam = enetc_get_ringparam, + .get_coalesce = enetc_get_coalesce, + .set_coalesce = enetc_set_coalesce, + .get_link_ksettings = enetc_get_link_ksettings, + .set_link_ksettings = enetc_set_link_ksettings, + .get_link = ethtool_op_get_link, + .get_ts_info = enetc_get_ts_info, + .get_wol = enetc_get_wol, + .set_wol = enetc_set_wol, + .get_pauseparam = enetc_get_pauseparam, + .set_pauseparam = enetc_set_pauseparam, + .get_mm = enetc_get_mm, + .set_mm = enetc_set_mm, + .get_mm_stats = enetc_get_mm_stats, +}; + +static const struct ethtool_ops enetc_vf_ethtool_ops = { + .supported_coalesce_params = ETHTOOL_COALESCE_USECS | + ETHTOOL_COALESCE_MAX_FRAMES | + ETHTOOL_COALESCE_USE_ADAPTIVE_RX, + .get_regs_len = enetc_get_reglen, + .get_regs = enetc_get_regs, + .get_sset_count = enetc_get_sset_count, + .get_strings = enetc_get_strings, + .get_ethtool_stats = enetc_get_ethtool_stats, + .get_rxnfc = enetc_get_rxnfc, + .set_rxnfc = enetc_set_rxnfc, + .get_rxfh_indir_size = enetc_get_rxfh_indir_size, + .get_rxfh = enetc_get_rxfh, + .set_rxfh = enetc_set_rxfh, + .get_ringparam = enetc_get_ringparam, + .get_coalesce = enetc_get_coalesce, + .set_coalesce = enetc_set_coalesce, + .get_link = ethtool_op_get_link, + .get_ts_info = enetc_get_ts_info, +}; + +void enetc_set_ethtool_ops(struct net_device *ndev) +{ + struct enetc_ndev_priv *priv = netdev_priv(ndev); + + if (enetc_si_is_pf(priv->si)) + ndev->ethtool_ops = &enetc_pf_ethtool_ops; + else + ndev->ethtool_ops = &enetc_vf_ethtool_ops; +} +EXPORT_SYMBOL_GPL(enetc_set_ethtool_ops); diff --git a/drivers/net/ethernet/freescale/enetc/enetc_hw.h b/drivers/net/ethernet/freescale/enetc/enetc_hw.h new file mode 100644 index 0000000000..1619943fb2 --- /dev/null +++ b/drivers/net/ethernet/freescale/enetc/enetc_hw.h @@ -0,0 +1,993 @@ +/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */ +/* Copyright 2017-2019 NXP */ + +#include <linux/bitops.h> + +#define ENETC_MM_VERIFY_SLEEP_US USEC_PER_MSEC +#define ENETC_MM_VERIFY_RETRIES 3 + +/* ENETC device IDs */ +#define ENETC_DEV_ID_PF 0xe100 +#define ENETC_DEV_ID_VF 0xef00 +#define ENETC_DEV_ID_PTP 0xee02 + +/* ENETC register block BAR */ +#define ENETC_BAR_REGS 0 + +/** SI regs, offset: 0h */ +#define ENETC_SIMR 0 +#define ENETC_SIMR_EN BIT(31) +#define ENETC_SIMR_RSSE BIT(0) +#define ENETC_SICTR0 0x18 +#define ENETC_SICTR1 0x1c +#define ENETC_SIPCAPR0 0x20 +#define ENETC_SIPCAPR0_PSFP BIT(9) +#define ENETC_SIPCAPR0_RSS BIT(8) +#define ENETC_SIPCAPR0_QBV BIT(4) +#define ENETC_SIPCAPR0_QBU BIT(3) +#define ENETC_SIPCAPR1 0x24 +#define ENETC_SITGTGR 0x30 +#define ENETC_SIRBGCR 0x38 +/* cache attribute registers for transactions initiated by ENETC */ +#define ENETC_SICAR0 0x40 +#define ENETC_SICAR1 0x44 +#define ENETC_SICAR2 0x48 +/* rd snoop, no alloc + * wr snoop, no alloc, partial cache line update for BDs and full cache line + * update for data + */ +#define ENETC_SICAR_RD_COHERENT 0x2b2b0000 +#define ENETC_SICAR_WR_COHERENT 0x00006727 +#define ENETC_SICAR_MSI 0x00300030 /* rd/wr device, no snoop, no alloc */ + +#define ENETC_SIPMAR0 0x80 +#define ENETC_SIPMAR1 0x84 + +/* VF-PF Message passing */ +#define ENETC_DEFAULT_MSG_SIZE 1024 /* and max size */ +/* msg size encoding: default and max msg value of 1024B encoded as 0 */ +static inline u32 enetc_vsi_set_msize(u32 size) +{ + return size < ENETC_DEFAULT_MSG_SIZE ? size >> 5 : 0; +} + +#define ENETC_PSIMSGRR 0x204 +#define ENETC_PSIMSGRR_MR_MASK GENMASK(2, 1) +#define ENETC_PSIMSGRR_MR(n) BIT((n) + 1) /* n = VSI index */ +#define ENETC_PSIVMSGRCVAR0(n) (0x210 + (n) * 0x8) /* n = VSI index */ +#define ENETC_PSIVMSGRCVAR1(n) (0x214 + (n) * 0x8) + +#define ENETC_VSIMSGSR 0x204 /* RO */ +#define ENETC_VSIMSGSR_MB BIT(0) +#define ENETC_VSIMSGSR_MS BIT(1) +#define ENETC_VSIMSGSNDAR0 0x210 +#define ENETC_VSIMSGSNDAR1 0x214 + +#define ENETC_SIMSGSR_SET_MC(val) ((val) << 16) +#define ENETC_SIMSGSR_GET_MC(val) ((val) >> 16) + +/* SI statistics */ +#define ENETC_SIROCT 0x300 +#define ENETC_SIRFRM 0x308 +#define ENETC_SIRUCA 0x310 +#define ENETC_SIRMCA 0x318 +#define ENETC_SITOCT 0x320 +#define ENETC_SITFRM 0x328 +#define ENETC_SITUCA 0x330 +#define ENETC_SITMCA 0x338 +#define ENETC_RBDCR(n) (0x8180 + (n) * 0x200) + +/* Control BDR regs */ +#define ENETC_SICBDRMR 0x800 +#define ENETC_SICBDRSR 0x804 /* RO */ +#define ENETC_SICBDRBAR0 0x810 +#define ENETC_SICBDRBAR1 0x814 +#define ENETC_SICBDRPIR 0x818 +#define ENETC_SICBDRCIR 0x81c +#define ENETC_SICBDRLENR 0x820 + +#define ENETC_SICAPR0 0x900 +#define ENETC_SICAPR1 0x904 + +#define ENETC_PSIIER 0xa00 +#define ENETC_PSIIER_MR_MASK GENMASK(2, 1) +#define ENETC_PSIIDR 0xa08 +#define ENETC_SITXIDR 0xa18 +#define ENETC_SIRXIDR 0xa28 +#define ENETC_SIMSIVR 0xa30 + +#define ENETC_SIMSITRV(n) (0xB00 + (n) * 0x4) +#define ENETC_SIMSIRRV(n) (0xB80 + (n) * 0x4) + +#define ENETC_SIUEFDCR 0xe28 + +#define ENETC_SIRFSCAPR 0x1200 +#define ENETC_SIRFSCAPR_GET_NUM_RFS(val) ((val) & 0x7f) +#define ENETC_SIRSSCAPR 0x1600 +#define ENETC_SIRSSCAPR_GET_NUM_RSS(val) (BIT((val) & 0xf) * 32) + +/** SI BDR sub-blocks, n = 0..7 */ +enum enetc_bdr_type {TX, RX}; +#define ENETC_BDR_OFF(i) ((i) * 0x200) +#define ENETC_BDR(t, i, r) (0x8000 + (t) * 0x100 + ENETC_BDR_OFF(i) + (r)) +/* RX BDR reg offsets */ +#define ENETC_RBMR 0 +#define ENETC_RBMR_BDS BIT(2) +#define ENETC_RBMR_CM BIT(4) +#define ENETC_RBMR_VTE BIT(5) +#define ENETC_RBMR_EN BIT(31) +#define ENETC_RBSR 0x4 +#define ENETC_RBBSR 0x8 +#define ENETC_RBCIR 0xc +#define ENETC_RBBAR0 0x10 +#define ENETC_RBBAR1 0x14 +#define ENETC_RBPIR 0x18 +#define ENETC_RBLENR 0x20 +#define ENETC_RBIER 0xa0 +#define ENETC_RBIER_RXTIE BIT(0) +#define ENETC_RBIDR 0xa4 +#define ENETC_RBICR0 0xa8 +#define ENETC_RBICR0_ICEN BIT(31) +#define ENETC_RBICR0_ICPT_MASK 0x1ff +#define ENETC_RBICR0_SET_ICPT(n) ((n) & ENETC_RBICR0_ICPT_MASK) +#define ENETC_RBICR1 0xac + +/* TX BDR reg offsets */ +#define ENETC_TBMR 0 +#define ENETC_TBSR_BUSY BIT(0) +#define ENETC_TBMR_VIH BIT(9) +#define ENETC_TBMR_PRIO_MASK GENMASK(2, 0) +#define ENETC_TBMR_SET_PRIO(val) ((val) & ENETC_TBMR_PRIO_MASK) +#define ENETC_TBMR_EN BIT(31) +#define ENETC_TBSR 0x4 +#define ENETC_TBBAR0 0x10 +#define ENETC_TBBAR1 0x14 +#define ENETC_TBPIR 0x18 +#define ENETC_TBCIR 0x1c +#define ENETC_TBCIR_IDX_MASK 0xffff +#define ENETC_TBLENR 0x20 +#define ENETC_TBIER 0xa0 +#define ENETC_TBIER_TXTIE BIT(0) +#define ENETC_TBIDR 0xa4 +#define ENETC_TBICR0 0xa8 +#define ENETC_TBICR0_ICEN BIT(31) +#define ENETC_TBICR0_ICPT_MASK 0xf +#define ENETC_TBICR0_SET_ICPT(n) ((ilog2(n) + 1) & ENETC_TBICR0_ICPT_MASK) +#define ENETC_TBICR1 0xac + +#define ENETC_RTBLENR_LEN(n) ((n) & ~0x7) + +/* Port regs, offset: 1_0000h */ +#define ENETC_PORT_BASE 0x10000 +#define ENETC_PMR 0x0000 +#define ENETC_PMR_EN GENMASK(18, 16) +#define ENETC_PMR_PSPEED_MASK GENMASK(11, 8) +#define ENETC_PMR_PSPEED_10M 0 +#define ENETC_PMR_PSPEED_100M BIT(8) +#define ENETC_PMR_PSPEED_1000M BIT(9) +#define ENETC_PMR_PSPEED_2500M BIT(10) +#define ENETC_PSR 0x0004 /* RO */ +#define ENETC_PSIPMR 0x0018 +#define ENETC_PSIPMR_SET_UP(n) BIT(n) /* n = SI index */ +#define ENETC_PSIPMR_SET_MP(n) BIT((n) + 16) +#define ENETC_PSIPVMR 0x001c +#define ENETC_VLAN_PROMISC_MAP_ALL 0x7 +#define ENETC_PSIPVMR_SET_VP(simap) ((simap) & 0x7) +#define ENETC_PSIPVMR_SET_VUTA(simap) (((simap) & 0x7) << 16) +#define ENETC_PSIPMAR0(n) (0x0100 + (n) * 0x8) /* n = SI index */ +#define ENETC_PSIPMAR1(n) (0x0104 + (n) * 0x8) +#define ENETC_PVCLCTR 0x0208 +#define ENETC_PCVLANR1 0x0210 +#define ENETC_PCVLANR2 0x0214 +#define ENETC_VLAN_TYPE_C BIT(0) +#define ENETC_VLAN_TYPE_S BIT(1) +#define ENETC_PVCLCTR_OVTPIDL(bmp) ((bmp) & 0xff) /* VLAN_TYPE */ +#define ENETC_PSIVLANR(n) (0x0240 + (n) * 4) /* n = SI index */ +#define ENETC_PSIVLAN_EN BIT(31) +#define ENETC_PSIVLAN_SET_QOS(val) ((u32)(val) << 12) +#define ENETC_PPAUONTR 0x0410 +#define ENETC_PPAUOFFTR 0x0414 +#define ENETC_PTXMBAR 0x0608 +#define ENETC_PCAPR0 0x0900 +#define ENETC_PCAPR0_RXBDR(val) ((val) >> 24) +#define ENETC_PCAPR0_TXBDR(val) (((val) >> 16) & 0xff) +#define ENETC_PCAPR1 0x0904 +#define ENETC_PSICFGR0(n) (0x0940 + (n) * 0xc) /* n = SI index */ +#define ENETC_PSICFGR0_SET_TXBDR(val) ((val) & 0xff) +#define ENETC_PSICFGR0_SET_RXBDR(val) (((val) & 0xff) << 16) +#define ENETC_PSICFGR0_VTE BIT(12) +#define ENETC_PSICFGR0_SIVIE BIT(14) +#define ENETC_PSICFGR0_ASE BIT(15) +#define ENETC_PSICFGR0_SIVC(bmp) (((bmp) & 0xff) << 24) /* VLAN_TYPE */ + +#define ENETC_PTCCBSR0(n) (0x1110 + (n) * 8) /* n = 0 to 7*/ +#define ENETC_CBSE BIT(31) +#define ENETC_CBS_BW_MASK GENMASK(6, 0) +#define ENETC_PTCCBSR1(n) (0x1114 + (n) * 8) /* n = 0 to 7*/ +#define ENETC_RSSHASH_KEY_SIZE 40 +#define ENETC_PRSSCAPR 0x1404 +#define ENETC_PRSSCAPR_GET_NUM_RSS(val) (BIT((val) & 0xf) * 32) +#define ENETC_PRSSK(n) (0x1410 + (n) * 4) /* n = [0..9] */ +#define ENETC_PSIVLANFMR 0x1700 +#define ENETC_PSIVLANFMR_VS BIT(0) +#define ENETC_PRFSMR 0x1800 +#define ENETC_PRFSMR_RFSE BIT(31) +#define ENETC_PRFSCAPR 0x1804 +#define ENETC_PRFSCAPR_GET_NUM_RFS(val) ((((val) & 0xf) + 1) * 16) +#define ENETC_PSIRFSCFGR(n) (0x1814 + (n) * 4) /* n = SI index */ +#define ENETC_PFPMR 0x1900 +#define ENETC_PFPMR_PMACE BIT(1) +#define ENETC_EMDIO_BASE 0x1c00 +#define ENETC_PSIUMHFR0(n, err) (((err) ? 0x1d08 : 0x1d00) + (n) * 0x10) +#define ENETC_PSIUMHFR1(n) (0x1d04 + (n) * 0x10) +#define ENETC_PSIMMHFR0(n, err) (((err) ? 0x1d00 : 0x1d08) + (n) * 0x10) +#define ENETC_PSIMMHFR1(n) (0x1d0c + (n) * 0x10) +#define ENETC_PSIVHFR0(n) (0x1e00 + (n) * 8) /* n = SI index */ +#define ENETC_PSIVHFR1(n) (0x1e04 + (n) * 8) /* n = SI index */ +#define ENETC_MMCSR 0x1f00 +#define ENETC_MMCSR_LINK_FAIL BIT(31) +#define ENETC_MMCSR_VT_MASK GENMASK(29, 23) /* Verify Time */ +#define ENETC_MMCSR_VT(x) (((x) << 23) & ENETC_MMCSR_VT_MASK) +#define ENETC_MMCSR_GET_VT(x) (((x) & ENETC_MMCSR_VT_MASK) >> 23) +#define ENETC_MMCSR_TXSTS_MASK GENMASK(22, 21) /* Merge Status */ +#define ENETC_MMCSR_GET_TXSTS(x) (((x) & ENETC_MMCSR_TXSTS_MASK) >> 21) +#define ENETC_MMCSR_VSTS_MASK GENMASK(20, 18) /* Verify Status */ +#define ENETC_MMCSR_GET_VSTS(x) (((x) & ENETC_MMCSR_VSTS_MASK) >> 18) +#define ENETC_MMCSR_VDIS BIT(17) /* Verify Disabled */ +#define ENETC_MMCSR_ME BIT(16) /* Merge Enabled */ +#define ENETC_MMCSR_RAFS_MASK GENMASK(9, 8) /* Remote Additional Fragment Size */ +#define ENETC_MMCSR_RAFS(x) (((x) << 8) & ENETC_MMCSR_RAFS_MASK) +#define ENETC_MMCSR_GET_RAFS(x) (((x) & ENETC_MMCSR_RAFS_MASK) >> 8) +#define ENETC_MMCSR_LAFS_MASK GENMASK(4, 3) /* Local Additional Fragment Size */ +#define ENETC_MMCSR_GET_LAFS(x) (((x) & ENETC_MMCSR_LAFS_MASK) >> 3) +#define ENETC_MMCSR_LPA BIT(2) /* Local Preemption Active */ +#define ENETC_MMCSR_LPE BIT(1) /* Local Preemption Enabled */ +#define ENETC_MMCSR_LPS BIT(0) /* Local Preemption Supported */ +#define ENETC_MMFAECR 0x1f08 +#define ENETC_MMFSECR 0x1f0c +#define ENETC_MMFAOCR 0x1f10 +#define ENETC_MMFCRXR 0x1f14 +#define ENETC_MMFCTXR 0x1f18 +#define ENETC_MMHCR 0x1f1c +#define ENETC_PTCMSDUR(n) (0x2020 + (n) * 4) /* n = TC index [0..7] */ + +#define ENETC_PMAC_OFFSET 0x1000 + +#define ENETC_PM0_CMD_CFG 0x8008 +#define ENETC_PM0_TX_EN BIT(0) +#define ENETC_PM0_RX_EN BIT(1) +#define ENETC_PM0_PROMISC BIT(4) +#define ENETC_PM0_PAUSE_IGN BIT(8) +#define ENETC_PM0_CMD_XGLP BIT(10) +#define ENETC_PM0_CMD_TXP BIT(11) +#define ENETC_PM0_CMD_PHY_TX_EN BIT(15) +#define ENETC_PM0_CMD_SFD BIT(21) +#define ENETC_PM0_MAXFRM 0x8014 +#define ENETC_SET_TX_MTU(val) ((val) << 16) +#define ENETC_SET_MAXFRM(val) ((val) & 0xffff) +#define ENETC_PM0_RX_FIFO 0x801c +#define ENETC_PM0_RX_FIFO_VAL 1 + +#define ENETC_PM_IMDIO_BASE 0x8030 + +#define ENETC_PM0_PAUSE_QUANTA 0x8054 +#define ENETC_PM0_PAUSE_THRESH 0x8064 + +#define ENETC_PM0_SINGLE_STEP 0x80c0 +#define ENETC_PM0_SINGLE_STEP_CH BIT(7) +#define ENETC_PM0_SINGLE_STEP_EN BIT(31) +#define ENETC_SET_SINGLE_STEP_OFFSET(v) (((v) & 0xff) << 8) + +#define ENETC_PM0_IF_MODE 0x8300 +#define ENETC_PM0_IFM_RG BIT(2) +#define ENETC_PM0_IFM_RLP (BIT(5) | BIT(11)) +#define ENETC_PM0_IFM_EN_AUTO BIT(15) +#define ENETC_PM0_IFM_SSP_MASK GENMASK(14, 13) +#define ENETC_PM0_IFM_SSP_1000 (2 << 13) +#define ENETC_PM0_IFM_SSP_100 (0 << 13) +#define ENETC_PM0_IFM_SSP_10 (1 << 13) +#define ENETC_PM0_IFM_FULL_DPX BIT(12) +#define ENETC_PM0_IFM_IFMODE_MASK GENMASK(1, 0) +#define ENETC_PM0_IFM_IFMODE_XGMII 0 +#define ENETC_PM0_IFM_IFMODE_GMII 2 +#define ENETC_PSIDCAPR 0x1b08 +#define ENETC_PSIDCAPR_MSK GENMASK(15, 0) +#define ENETC_PSFCAPR 0x1b18 +#define ENETC_PSFCAPR_MSK GENMASK(15, 0) +#define ENETC_PSGCAPR 0x1b28 +#define ENETC_PSGCAPR_GCL_MSK GENMASK(18, 16) +#define ENETC_PSGCAPR_SGIT_MSK GENMASK(15, 0) +#define ENETC_PFMCAPR 0x1b38 +#define ENETC_PFMCAPR_MSK GENMASK(15, 0) + +/* Port MAC counters: Port MAC 0 corresponds to the eMAC and + * Port MAC 1 to the pMAC. + */ +#define ENETC_PM_REOCT(mac) (0x8100 + ENETC_PMAC_OFFSET * (mac)) +#define ENETC_PM_RALN(mac) (0x8110 + ENETC_PMAC_OFFSET * (mac)) +#define ENETC_PM_RXPF(mac) (0x8118 + ENETC_PMAC_OFFSET * (mac)) +#define ENETC_PM_RFRM(mac) (0x8120 + ENETC_PMAC_OFFSET * (mac)) +#define ENETC_PM_RFCS(mac) (0x8128 + ENETC_PMAC_OFFSET * (mac)) +#define ENETC_PM_RVLAN(mac) (0x8130 + ENETC_PMAC_OFFSET * (mac)) +#define ENETC_PM_RERR(mac) (0x8138 + ENETC_PMAC_OFFSET * (mac)) +#define ENETC_PM_RUCA(mac) (0x8140 + ENETC_PMAC_OFFSET * (mac)) +#define ENETC_PM_RMCA(mac) (0x8148 + ENETC_PMAC_OFFSET * (mac)) +#define ENETC_PM_RBCA(mac) (0x8150 + ENETC_PMAC_OFFSET * (mac)) +#define ENETC_PM_RDRP(mac) (0x8158 + ENETC_PMAC_OFFSET * (mac)) +#define ENETC_PM_RPKT(mac) (0x8160 + ENETC_PMAC_OFFSET * (mac)) +#define ENETC_PM_RUND(mac) (0x8168 + ENETC_PMAC_OFFSET * (mac)) +#define ENETC_PM_R64(mac) (0x8170 + ENETC_PMAC_OFFSET * (mac)) +#define ENETC_PM_R127(mac) (0x8178 + ENETC_PMAC_OFFSET * (mac)) +#define ENETC_PM_R255(mac) (0x8180 + ENETC_PMAC_OFFSET * (mac)) +#define ENETC_PM_R511(mac) (0x8188 + ENETC_PMAC_OFFSET * (mac)) +#define ENETC_PM_R1023(mac) (0x8190 + ENETC_PMAC_OFFSET * (mac)) +#define ENETC_PM_R1522(mac) (0x8198 + ENETC_PMAC_OFFSET * (mac)) +#define ENETC_PM_R1523X(mac) (0x81A0 + ENETC_PMAC_OFFSET * (mac)) +#define ENETC_PM_ROVR(mac) (0x81A8 + ENETC_PMAC_OFFSET * (mac)) +#define ENETC_PM_RJBR(mac) (0x81B0 + ENETC_PMAC_OFFSET * (mac)) +#define ENETC_PM_RFRG(mac) (0x81B8 + ENETC_PMAC_OFFSET * (mac)) +#define ENETC_PM_RCNP(mac) (0x81C0 + ENETC_PMAC_OFFSET * (mac)) +#define ENETC_PM_RDRNTP(mac) (0x81C8 + ENETC_PMAC_OFFSET * (mac)) +#define ENETC_PM_TEOCT(mac) (0x8200 + ENETC_PMAC_OFFSET * (mac)) +#define ENETC_PM_TOCT(mac) (0x8208 + ENETC_PMAC_OFFSET * (mac)) +#define ENETC_PM_TCRSE(mac) (0x8210 + ENETC_PMAC_OFFSET * (mac)) +#define ENETC_PM_TXPF(mac) (0x8218 + ENETC_PMAC_OFFSET * (mac)) +#define ENETC_PM_TFRM(mac) (0x8220 + ENETC_PMAC_OFFSET * (mac)) +#define ENETC_PM_TFCS(mac) (0x8228 + ENETC_PMAC_OFFSET * (mac)) +#define ENETC_PM_TVLAN(mac) (0x8230 + ENETC_PMAC_OFFSET * (mac)) +#define ENETC_PM_TERR(mac) (0x8238 + ENETC_PMAC_OFFSET * (mac)) +#define ENETC_PM_TUCA(mac) (0x8240 + ENETC_PMAC_OFFSET * (mac)) +#define ENETC_PM_TMCA(mac) (0x8248 + ENETC_PMAC_OFFSET * (mac)) +#define ENETC_PM_TBCA(mac) (0x8250 + ENETC_PMAC_OFFSET * (mac)) +#define ENETC_PM_TPKT(mac) (0x8260 + ENETC_PMAC_OFFSET * (mac)) +#define ENETC_PM_TUND(mac) (0x8268 + ENETC_PMAC_OFFSET * (mac)) +#define ENETC_PM_T64(mac) (0x8270 + ENETC_PMAC_OFFSET * (mac)) +#define ENETC_PM_T127(mac) (0x8278 + ENETC_PMAC_OFFSET * (mac)) +#define ENETC_PM_T255(mac) (0x8280 + ENETC_PMAC_OFFSET * (mac)) +#define ENETC_PM_T511(mac) (0x8288 + ENETC_PMAC_OFFSET * (mac)) +#define ENETC_PM_T1023(mac) (0x8290 + ENETC_PMAC_OFFSET * (mac)) +#define ENETC_PM_T1522(mac) (0x8298 + ENETC_PMAC_OFFSET * (mac)) +#define ENETC_PM_T1523X(mac) (0x82A0 + ENETC_PMAC_OFFSET * (mac)) +#define ENETC_PM_TCNP(mac) (0x82C0 + ENETC_PMAC_OFFSET * (mac)) +#define ENETC_PM_TDFR(mac) (0x82D0 + ENETC_PMAC_OFFSET * (mac)) +#define ENETC_PM_TMCOL(mac) (0x82D8 + ENETC_PMAC_OFFSET * (mac)) +#define ENETC_PM_TSCOL(mac) (0x82E0 + ENETC_PMAC_OFFSET * (mac)) +#define ENETC_PM_TLCOL(mac) (0x82E8 + ENETC_PMAC_OFFSET * (mac)) +#define ENETC_PM_TECOL(mac) (0x82F0 + ENETC_PMAC_OFFSET * (mac)) + +/* Port counters */ +#define ENETC_PICDR(n) (0x0700 + (n) * 8) /* n = [0..3] */ +#define ENETC_PBFDSIR 0x0810 +#define ENETC_PFDMSAPR 0x0814 +#define ENETC_UFDMF 0x1680 +#define ENETC_MFDMF 0x1684 +#define ENETC_PUFDVFR 0x1780 +#define ENETC_PMFDVFR 0x1784 +#define ENETC_PBFDVFR 0x1788 + +/** Global regs, offset: 2_0000h */ +#define ENETC_GLOBAL_BASE 0x20000 +#define ENETC_G_EIPBRR0 0x0bf8 +#define ENETC_G_EIPBRR1 0x0bfc +#define ENETC_G_EPFBLPR(n) (0xd00 + 4 * (n)) +#define ENETC_G_EPFBLPR1_XGMII 0x80000000 + +/* PCI device info */ +struct enetc_hw { + /* SI registers, used by all PCI functions */ + void __iomem *reg; + /* Port registers, PF only */ + void __iomem *port; + /* IP global registers, PF only */ + void __iomem *global; +}; + +/* ENETC register accessors */ + +/* MDIO issue workaround (on LS1028A) - + * Due to a hardware issue, an access to MDIO registers + * that is concurrent with other ENETC register accesses + * may lead to the MDIO access being dropped or corrupted. + * To protect the MDIO accesses a readers-writers locking + * scheme is used, where the MDIO register accesses are + * protected by write locks to insure exclusivity, while + * the remaining ENETC registers are accessed under read + * locks since they only compete with MDIO accesses. + */ +extern rwlock_t enetc_mdio_lock; + +/* use this locking primitive only on the fast datapath to + * group together multiple non-MDIO register accesses to + * minimize the overhead of the lock + */ +static inline void enetc_lock_mdio(void) +{ + read_lock(&enetc_mdio_lock); +} + +static inline void enetc_unlock_mdio(void) +{ + read_unlock(&enetc_mdio_lock); +} + +/* use these accessors only on the fast datapath under + * the enetc_lock_mdio() locking primitive to minimize + * the overhead of the lock + */ +static inline u32 enetc_rd_reg_hot(void __iomem *reg) +{ + lockdep_assert_held(&enetc_mdio_lock); + + return ioread32(reg); +} + +static inline void enetc_wr_reg_hot(void __iomem *reg, u32 val) +{ + lockdep_assert_held(&enetc_mdio_lock); + + iowrite32(val, reg); +} + +/* internal helpers for the MDIO w/a */ +static inline u32 _enetc_rd_reg_wa(void __iomem *reg) +{ + u32 val; + + enetc_lock_mdio(); + val = ioread32(reg); + enetc_unlock_mdio(); + + return val; +} + +static inline void _enetc_wr_reg_wa(void __iomem *reg, u32 val) +{ + enetc_lock_mdio(); + iowrite32(val, reg); + enetc_unlock_mdio(); +} + +static inline u32 _enetc_rd_mdio_reg_wa(void __iomem *reg) +{ + unsigned long flags; + u32 val; + + write_lock_irqsave(&enetc_mdio_lock, flags); + val = ioread32(reg); + write_unlock_irqrestore(&enetc_mdio_lock, flags); + + return val; +} + +static inline void _enetc_wr_mdio_reg_wa(void __iomem *reg, u32 val) +{ + unsigned long flags; + + write_lock_irqsave(&enetc_mdio_lock, flags); + iowrite32(val, reg); + write_unlock_irqrestore(&enetc_mdio_lock, flags); +} + +#ifdef ioread64 +static inline u64 _enetc_rd_reg64(void __iomem *reg) +{ + return ioread64(reg); +} +#else +/* using this to read out stats on 32b systems */ +static inline u64 _enetc_rd_reg64(void __iomem *reg) +{ + u32 low, high, tmp; + + do { + high = ioread32(reg + 4); + low = ioread32(reg); + tmp = ioread32(reg + 4); + } while (high != tmp); + + return le64_to_cpu((__le64)high << 32 | low); +} +#endif + +static inline u64 _enetc_rd_reg64_wa(void __iomem *reg) +{ + u64 val; + + enetc_lock_mdio(); + val = _enetc_rd_reg64(reg); + enetc_unlock_mdio(); + + return val; +} + +/* general register accessors */ +#define enetc_rd_reg(reg) _enetc_rd_reg_wa((reg)) +#define enetc_wr_reg(reg, val) _enetc_wr_reg_wa((reg), (val)) +#define enetc_rd(hw, off) enetc_rd_reg((hw)->reg + (off)) +#define enetc_wr(hw, off, val) enetc_wr_reg((hw)->reg + (off), val) +#define enetc_rd_hot(hw, off) enetc_rd_reg_hot((hw)->reg + (off)) +#define enetc_wr_hot(hw, off, val) enetc_wr_reg_hot((hw)->reg + (off), val) +#define enetc_rd64(hw, off) _enetc_rd_reg64_wa((hw)->reg + (off)) +/* port register accessors - PF only */ +#define enetc_port_rd(hw, off) enetc_rd_reg((hw)->port + (off)) +#define enetc_port_wr(hw, off, val) enetc_wr_reg((hw)->port + (off), val) +#define enetc_port_rd_mdio(hw, off) _enetc_rd_mdio_reg_wa((hw)->port + (off)) +#define enetc_port_wr_mdio(hw, off, val) _enetc_wr_mdio_reg_wa(\ + (hw)->port + (off), val) +/* global register accessors - PF only */ +#define enetc_global_rd(hw, off) enetc_rd_reg((hw)->global + (off)) +#define enetc_global_wr(hw, off, val) enetc_wr_reg((hw)->global + (off), val) +/* BDR register accessors, see ENETC_BDR() */ +#define enetc_bdr_rd(hw, t, n, off) \ + enetc_rd(hw, ENETC_BDR(t, n, off)) +#define enetc_bdr_wr(hw, t, n, off, val) \ + enetc_wr(hw, ENETC_BDR(t, n, off), val) +#define enetc_txbdr_rd(hw, n, off) enetc_bdr_rd(hw, TX, n, off) +#define enetc_rxbdr_rd(hw, n, off) enetc_bdr_rd(hw, RX, n, off) +#define enetc_txbdr_wr(hw, n, off, val) \ + enetc_bdr_wr(hw, TX, n, off, val) +#define enetc_rxbdr_wr(hw, n, off, val) \ + enetc_bdr_wr(hw, RX, n, off, val) + +/* Buffer Descriptors (BD) */ +union enetc_tx_bd { + struct { + __le64 addr; + __le16 buf_len; + __le16 frm_len; + union { + struct { + u8 reserved[3]; + u8 flags; + }; /* default layout */ + __le32 txstart; + __le32 lstatus; + }; + }; + struct { + __le32 tstamp; + __le16 tpid; + __le16 vid; + u8 reserved[6]; + u8 e_flags; + u8 flags; + } ext; /* Tx BD extension */ + struct { + __le32 tstamp; + u8 reserved[10]; + u8 status; + u8 flags; + } wb; /* writeback descriptor */ +}; + +enum enetc_txbd_flags { + ENETC_TXBD_FLAGS_RES0 = BIT(0), /* reserved */ + ENETC_TXBD_FLAGS_TSE = BIT(1), + ENETC_TXBD_FLAGS_W = BIT(2), + ENETC_TXBD_FLAGS_RES3 = BIT(3), /* reserved */ + ENETC_TXBD_FLAGS_TXSTART = BIT(4), + ENETC_TXBD_FLAGS_EX = BIT(6), + ENETC_TXBD_FLAGS_F = BIT(7) +}; +#define ENETC_TXBD_STATS_WIN BIT(7) +#define ENETC_TXBD_TXSTART_MASK GENMASK(24, 0) +#define ENETC_TXBD_FLAGS_OFFSET 24 + +static inline __le32 enetc_txbd_set_tx_start(u64 tx_start, u8 flags) +{ + u32 temp; + + temp = (tx_start >> 5 & ENETC_TXBD_TXSTART_MASK) | + (flags << ENETC_TXBD_FLAGS_OFFSET); + + return cpu_to_le32(temp); +} + +static inline void enetc_clear_tx_bd(union enetc_tx_bd *txbd) +{ + memset(txbd, 0, sizeof(*txbd)); +} + +/* Extension flags */ +#define ENETC_TXBD_E_FLAGS_VLAN_INS BIT(0) +#define ENETC_TXBD_E_FLAGS_ONE_STEP_PTP BIT(1) +#define ENETC_TXBD_E_FLAGS_TWO_STEP_PTP BIT(2) + +union enetc_rx_bd { + struct { + __le64 addr; + u8 reserved[8]; + } w; + struct { + __le16 inet_csum; + __le16 parse_summary; + __le32 rss_hash; + __le16 buf_len; + __le16 vlan_opt; + union { + struct { + __le16 flags; + __le16 error; + }; + __le32 lstatus; + }; + } r; + struct { + __le32 tstamp; + u8 reserved[12]; + } ext; +}; + +#define ENETC_RXBD_LSTATUS_R BIT(30) +#define ENETC_RXBD_LSTATUS_F BIT(31) +#define ENETC_RXBD_ERR_MASK 0xff +#define ENETC_RXBD_LSTATUS(flags) ((flags) << 16) +#define ENETC_RXBD_FLAG_VLAN BIT(9) +#define ENETC_RXBD_FLAG_TSTMP BIT(10) +#define ENETC_RXBD_FLAG_TPID GENMASK(1, 0) + +#define ENETC_MAC_ADDR_FILT_CNT 8 /* # of supported entries per port */ +#define EMETC_MAC_ADDR_FILT_RES 3 /* # of reserved entries at the beginning */ +#define ENETC_MAX_NUM_VFS 2 + +#define ENETC_CBD_FLAGS_SF BIT(7) /* short format */ +#define ENETC_CBD_STATUS_MASK 0xf + +struct enetc_cmd_rfse { + u8 smac_h[6]; + u8 smac_m[6]; + u8 dmac_h[6]; + u8 dmac_m[6]; + __be32 sip_h[4]; + __be32 sip_m[4]; + __be32 dip_h[4]; + __be32 dip_m[4]; + u16 ethtype_h; + u16 ethtype_m; + u16 ethtype4_h; + u16 ethtype4_m; + u16 sport_h; + u16 sport_m; + u16 dport_h; + u16 dport_m; + u16 vlan_h; + u16 vlan_m; + u8 proto_h; + u8 proto_m; + u16 flags; + u16 result; + u16 mode; +}; + +#define ENETC_RFSE_EN BIT(15) +#define ENETC_RFSE_MODE_BD 2 + +static inline void enetc_load_primary_mac_addr(struct enetc_hw *hw, + struct net_device *ndev) +{ + u8 addr[ETH_ALEN] __aligned(4); + + *(u32 *)addr = __raw_readl(hw->reg + ENETC_SIPMAR0); + *(u16 *)(addr + 4) = __raw_readw(hw->reg + ENETC_SIPMAR1); + eth_hw_addr_set(ndev, addr); +} + +#define ENETC_SI_INT_IDX 0 +/* base index for Rx/Tx interrupts */ +#define ENETC_BDR_INT_BASE_IDX 1 + +/* Messaging */ + +/* Command completion status */ +enum enetc_msg_cmd_status { + ENETC_MSG_CMD_STATUS_OK, + ENETC_MSG_CMD_STATUS_FAIL +}; + +/* VSI-PSI command message types */ +enum enetc_msg_cmd_type { + ENETC_MSG_CMD_MNG_MAC = 1, /* manage MAC address */ + ENETC_MSG_CMD_MNG_RX_MAC_FILTER,/* manage RX MAC table */ + ENETC_MSG_CMD_MNG_RX_VLAN_FILTER /* manage RX VLAN table */ +}; + +/* VSI-PSI command action types */ +enum enetc_msg_cmd_action_type { + ENETC_MSG_CMD_MNG_ADD = 1, + ENETC_MSG_CMD_MNG_REMOVE +}; + +/* PSI-VSI command header format */ +struct enetc_msg_cmd_header { + u16 type; /* command class type */ + u16 id; /* denotes the specific required action */ +}; + +/* Common H/W utility functions */ + +static inline void enetc_bdr_enable_rxvlan(struct enetc_hw *hw, int idx, + bool en) +{ + u32 val = enetc_rxbdr_rd(hw, idx, ENETC_RBMR); + + val = (val & ~ENETC_RBMR_VTE) | (en ? ENETC_RBMR_VTE : 0); + enetc_rxbdr_wr(hw, idx, ENETC_RBMR, val); +} + +static inline void enetc_bdr_enable_txvlan(struct enetc_hw *hw, int idx, + bool en) +{ + u32 val = enetc_txbdr_rd(hw, idx, ENETC_TBMR); + + val = (val & ~ENETC_TBMR_VIH) | (en ? ENETC_TBMR_VIH : 0); + enetc_txbdr_wr(hw, idx, ENETC_TBMR, val); +} + +static inline void enetc_set_bdr_prio(struct enetc_hw *hw, int bdr_idx, + int prio) +{ + u32 val = enetc_txbdr_rd(hw, bdr_idx, ENETC_TBMR); + + val &= ~ENETC_TBMR_PRIO_MASK; + val |= ENETC_TBMR_SET_PRIO(prio); + enetc_txbdr_wr(hw, bdr_idx, ENETC_TBMR, val); +} + +enum bdcr_cmd_class { + BDCR_CMD_UNSPEC = 0, + BDCR_CMD_MAC_FILTER, + BDCR_CMD_VLAN_FILTER, + BDCR_CMD_RSS, + BDCR_CMD_RFS, + BDCR_CMD_PORT_GCL, + BDCR_CMD_RECV_CLASSIFIER, + BDCR_CMD_STREAM_IDENTIFY, + BDCR_CMD_STREAM_FILTER, + BDCR_CMD_STREAM_GCL, + BDCR_CMD_FLOW_METER, + __BDCR_CMD_MAX_LEN, + BDCR_CMD_MAX_LEN = __BDCR_CMD_MAX_LEN - 1, +}; + +/* class 5, command 0 */ +struct tgs_gcl_conf { + u8 atc; /* init gate value */ + u8 res[7]; + struct { + u8 res1[4]; + __le16 acl_len; + u8 res2[2]; + }; +}; + +/* gate control list entry */ +struct gce { + __le32 period; + u8 gate; + u8 res[3]; +}; + +/* tgs_gcl_conf address point to this data space */ +struct tgs_gcl_data { + __le32 btl; + __le32 bth; + __le32 ct; + __le32 cte; + struct gce entry[]; +}; + +/* class 7, command 0, Stream Identity Entry Configuration */ +struct streamid_conf { + __le32 stream_handle; /* init gate value */ + __le32 iports; + u8 id_type; + u8 oui[3]; + u8 res[3]; + u8 en; +}; + +#define ENETC_CBDR_SID_VID_MASK 0xfff +#define ENETC_CBDR_SID_VIDM BIT(12) +#define ENETC_CBDR_SID_TG_MASK 0xc000 +/* streamid_conf address point to this data space */ +struct streamid_data { + union { + u8 dmac[6]; + u8 smac[6]; + }; + u16 vid_vidm_tg; +}; + +#define ENETC_CBDR_SFI_PRI_MASK 0x7 +#define ENETC_CBDR_SFI_PRIM BIT(3) +#define ENETC_CBDR_SFI_BLOV BIT(4) +#define ENETC_CBDR_SFI_BLEN BIT(5) +#define ENETC_CBDR_SFI_MSDUEN BIT(6) +#define ENETC_CBDR_SFI_FMITEN BIT(7) +#define ENETC_CBDR_SFI_ENABLE BIT(7) +/* class 8, command 0, Stream Filter Instance, Short Format */ +struct sfi_conf { + __le32 stream_handle; + u8 multi; + u8 res[2]; + u8 sthm; + /* Max Service Data Unit or Flow Meter Instance Table index. + * Depending on the value of FLT this represents either Max + * Service Data Unit (max frame size) allowed by the filter + * entry or is an index into the Flow Meter Instance table + * index identifying the policer which will be used to police + * it. + */ + __le16 fm_inst_table_index; + __le16 msdu; + __le16 sg_inst_table_index; + u8 res1[2]; + __le32 input_ports; + u8 res2[3]; + u8 en; +}; + +/* class 8, command 2 stream Filter Instance status query short format + * command no need structure define + * Stream Filter Instance Query Statistics Response data + */ +struct sfi_counter_data { + u32 matchl; + u32 matchh; + u32 msdu_dropl; + u32 msdu_droph; + u32 stream_gate_dropl; + u32 stream_gate_droph; + u32 flow_meter_dropl; + u32 flow_meter_droph; +}; + +#define ENETC_CBDR_SGI_OIPV_MASK 0x7 +#define ENETC_CBDR_SGI_OIPV_EN BIT(3) +#define ENETC_CBDR_SGI_CGTST BIT(6) +#define ENETC_CBDR_SGI_OGTST BIT(7) +#define ENETC_CBDR_SGI_CFG_CHG BIT(1) +#define ENETC_CBDR_SGI_CFG_PND BIT(2) +#define ENETC_CBDR_SGI_OEX BIT(4) +#define ENETC_CBDR_SGI_OEXEN BIT(5) +#define ENETC_CBDR_SGI_IRX BIT(6) +#define ENETC_CBDR_SGI_IRXEN BIT(7) +#define ENETC_CBDR_SGI_ACLLEN_MASK 0x3 +#define ENETC_CBDR_SGI_OCLLEN_MASK 0xc +#define ENETC_CBDR_SGI_EN BIT(7) +/* class 9, command 0, Stream Gate Instance Table, Short Format + * class 9, command 2, Stream Gate Instance Table entry query write back + * Short Format + */ +struct sgi_table { + u8 res[8]; + u8 oipv; + u8 res0[2]; + u8 ocgtst; + u8 res1[7]; + u8 gset; + u8 oacl_len; + u8 res2[2]; + u8 en; +}; + +#define ENETC_CBDR_SGI_AIPV_MASK 0x7 +#define ENETC_CBDR_SGI_AIPV_EN BIT(3) +#define ENETC_CBDR_SGI_AGTST BIT(7) + +/* class 9, command 1, Stream Gate Control List, Long Format */ +struct sgcl_conf { + u8 aipv; + u8 res[2]; + u8 agtst; + u8 res1[4]; + union { + struct { + u8 res2[4]; + u8 acl_len; + u8 res3[3]; + }; + u8 cct[8]; /* Config change time */ + }; +}; + +#define ENETC_CBDR_SGL_IOMEN BIT(0) +#define ENETC_CBDR_SGL_IPVEN BIT(3) +#define ENETC_CBDR_SGL_GTST BIT(4) +#define ENETC_CBDR_SGL_IPV_MASK 0xe +/* Stream Gate Control List Entry */ +struct sgce { + u32 interval; + u8 msdu[3]; + u8 multi; +}; + +/* stream control list class 9 , cmd 1 data buffer */ +struct sgcl_data { + u32 btl; + u32 bth; + u32 ct; + u32 cte; + struct sgce sgcl[]; +}; + +#define ENETC_CBDR_FMI_MR BIT(0) +#define ENETC_CBDR_FMI_MREN BIT(1) +#define ENETC_CBDR_FMI_DOY BIT(2) +#define ENETC_CBDR_FMI_CM BIT(3) +#define ENETC_CBDR_FMI_CF BIT(4) +#define ENETC_CBDR_FMI_NDOR BIT(5) +#define ENETC_CBDR_FMI_OALEN BIT(6) +#define ENETC_CBDR_FMI_IRFPP_MASK GENMASK(4, 0) + +/* class 10: command 0/1, Flow Meter Instance Set, short Format */ +struct fmi_conf { + __le32 cir; + __le32 cbs; + __le32 eir; + __le32 ebs; + u8 conf; + u8 res1; + u8 ir_fpp; + u8 res2[4]; + u8 en; +}; + +struct enetc_cbd { + union{ + struct sfi_conf sfi_conf; + struct sgi_table sgi_table; + struct fmi_conf fmi_conf; + struct { + __le32 addr[2]; + union { + __le32 opt[4]; + struct tgs_gcl_conf gcl_conf; + struct streamid_conf sid_set; + struct sgcl_conf sgcl_conf; + }; + }; /* Long format */ + __le32 data[6]; + }; + __le16 index; + __le16 length; + u8 cmd; + u8 cls; + u8 _res; + u8 status_flags; +}; + +#define ENETC_CLK 400000000ULL +static inline u32 enetc_cycles_to_usecs(u32 cycles) +{ + return (u32)div_u64(cycles * 1000000ULL, ENETC_CLK); +} + +static inline u32 enetc_usecs_to_cycles(u32 usecs) +{ + return (u32)div_u64(usecs * ENETC_CLK, 1000000ULL); +} + +/* Port traffic class frame preemption register */ +#define ENETC_PTCFPR(n) (0x1910 + (n) * 4) /* n = [0 ..7] */ +#define ENETC_PTCFPR_FPE BIT(31) + +/* port time gating control register */ +#define ENETC_PTGCR 0x11a00 +#define ENETC_PTGCR_TGE BIT(31) +#define ENETC_PTGCR_TGPE BIT(30) + +/* Port time gating capability register */ +#define ENETC_PTGCAPR 0x11a08 +#define ENETC_PTGCAPR_MAX_GCL_LEN_MASK GENMASK(15, 0) + +/* Port time specific departure */ +#define ENETC_PTCTSDR(n) (0x1210 + 4 * (n)) +#define ENETC_TSDE BIT(31) + +/* PSFP setting */ +#define ENETC_PPSFPMR 0x11b00 +#define ENETC_PPSFPMR_PSFPEN BIT(0) +#define ENETC_PPSFPMR_VS BIT(1) +#define ENETC_PPSFPMR_PVC BIT(2) +#define ENETC_PPSFPMR_PVZC BIT(3) diff --git a/drivers/net/ethernet/freescale/enetc/enetc_ierb.c b/drivers/net/ethernet/freescale/enetc/enetc_ierb.c new file mode 100644 index 0000000000..d39617ab93 --- /dev/null +++ b/drivers/net/ethernet/freescale/enetc/enetc_ierb.c @@ -0,0 +1,147 @@ +// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) +/* Copyright 2021 NXP + * + * The Integrated Endpoint Register Block (IERB) is configured by pre-boot + * software and is supposed to be to ENETC what a NVRAM is to a 'real' PCIe + * card. Upon FLR, values from the IERB are transferred to the ENETC PFs, and + * are read-only in the PF memory space. + * + * This driver fixes up the power-on reset values for the ENETC shared FIFO, + * such that the TX and RX allocations are sufficient for jumbo frames, and + * that intelligent FIFO dropping is enabled before the internal data + * structures are corrupted. + * + * Even though not all ports might be used on a given board, we are not + * concerned with partitioning the FIFO, because the default values configure + * no strict reservations, so the entire FIFO can be used by the RX of a single + * port, or the TX of a single port. + */ + +#include <linux/io.h> +#include <linux/mod_devicetable.h> +#include <linux/module.h> +#include <linux/pci.h> +#include <linux/platform_device.h> +#include "enetc.h" +#include "enetc_ierb.h" + +/* IERB registers */ +#define ENETC_IERB_TXMBAR(port) (((port) * 0x100) + 0x8080) +#define ENETC_IERB_RXMBER(port) (((port) * 0x100) + 0x8090) +#define ENETC_IERB_RXMBLR(port) (((port) * 0x100) + 0x8094) +#define ENETC_IERB_RXBCR(port) (((port) * 0x100) + 0x80a0) +#define ENETC_IERB_TXBCR(port) (((port) * 0x100) + 0x80a8) +#define ENETC_IERB_FMBDTR 0xa000 + +#define ENETC_RESERVED_FOR_ICM 1024 + +struct enetc_ierb { + void __iomem *regs; +}; + +static void enetc_ierb_write(struct enetc_ierb *ierb, u32 offset, u32 val) +{ + iowrite32(val, ierb->regs + offset); +} + +int enetc_ierb_register_pf(struct platform_device *pdev, + struct pci_dev *pf_pdev) +{ + struct enetc_ierb *ierb = platform_get_drvdata(pdev); + int port = enetc_pf_to_port(pf_pdev); + u16 tx_credit, rx_credit, tx_alloc; + + if (port < 0) + return -ENODEV; + + if (!ierb) + return -EPROBE_DEFER; + + /* By default, it is recommended to set the Host Transfer Agent + * per port transmit byte credit to "1000 + max_frame_size/2". + * The power-on reset value (1800 bytes) is rounded up to the nearest + * 100 assuming a maximum frame size of 1536 bytes. + */ + tx_credit = roundup(1000 + ENETC_MAC_MAXFRM_SIZE / 2, 100); + + /* Internal memory allocated for transmit buffering is guaranteed but + * not reserved; i.e. if the total transmit allocation is not used, + * then the unused portion is not left idle, it can be used for receive + * buffering but it will be reclaimed, if required, from receive by + * intelligently dropping already stored receive frames in the internal + * memory to ensure that the transmit allocation is respected. + * + * PaTXMBAR must be set to a value larger than + * PaTXBCR + 2 * max_frame_size + 32 + * if frame preemption is not enabled, or to + * 2 * PaTXBCR + 2 * p_max_frame_size (pMAC maximum frame size) + + * 2 * np_max_frame_size (eMAC maximum frame size) + 64 + * if frame preemption is enabled. + */ + tx_alloc = roundup(2 * tx_credit + 4 * ENETC_MAC_MAXFRM_SIZE + 64, 16); + + /* Initial credits, in units of 8 bytes, to the Ingress Congestion + * Manager for the maximum amount of bytes the port is allocated for + * pending traffic. + * It is recommended to set the initial credits to 2 times the maximum + * frame size (2 frames of maximum size). + */ + rx_credit = DIV_ROUND_UP(ENETC_MAC_MAXFRM_SIZE * 2, 8); + + enetc_ierb_write(ierb, ENETC_IERB_TXBCR(port), tx_credit); + enetc_ierb_write(ierb, ENETC_IERB_TXMBAR(port), tx_alloc); + enetc_ierb_write(ierb, ENETC_IERB_RXBCR(port), rx_credit); + + return 0; +} +EXPORT_SYMBOL(enetc_ierb_register_pf); + +static int enetc_ierb_probe(struct platform_device *pdev) +{ + struct enetc_ierb *ierb; + void __iomem *regs; + + ierb = devm_kzalloc(&pdev->dev, sizeof(*ierb), GFP_KERNEL); + if (!ierb) + return -ENOMEM; + + regs = devm_platform_get_and_ioremap_resource(pdev, 0, NULL); + if (IS_ERR(regs)) + return PTR_ERR(regs); + + ierb->regs = regs; + + /* Free buffer depletion threshold in bytes. + * This sets the minimum amount of free buffer memory that should be + * maintained in the datapath sub system, and when the amount of free + * buffer memory falls below this threshold, a depletion indication is + * asserted, which may trigger "intelligent drop" frame releases from + * the ingress queues in the ICM. + * It is recommended to set the free buffer depletion threshold to 1024 + * bytes, since the ICM needs some FIFO memory for its own use. + */ + enetc_ierb_write(ierb, ENETC_IERB_FMBDTR, ENETC_RESERVED_FOR_ICM); + + platform_set_drvdata(pdev, ierb); + + return 0; +} + +static const struct of_device_id enetc_ierb_match[] = { + { .compatible = "fsl,ls1028a-enetc-ierb", }, + {}, +}; +MODULE_DEVICE_TABLE(of, enetc_ierb_match); + +static struct platform_driver enetc_ierb_driver = { + .driver = { + .name = "fsl-enetc-ierb", + .of_match_table = enetc_ierb_match, + }, + .probe = enetc_ierb_probe, +}; + +module_platform_driver(enetc_ierb_driver); + +MODULE_DESCRIPTION("NXP ENETC IERB"); +MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/net/ethernet/freescale/enetc/enetc_ierb.h b/drivers/net/ethernet/freescale/enetc/enetc_ierb.h new file mode 100644 index 0000000000..c2ce47c4be --- /dev/null +++ b/drivers/net/ethernet/freescale/enetc/enetc_ierb.h @@ -0,0 +1,20 @@ +/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */ +/* Copyright 2021 NXP */ + +#include <linux/pci.h> +#include <linux/platform_device.h> + +#if IS_ENABLED(CONFIG_FSL_ENETC_IERB) + +int enetc_ierb_register_pf(struct platform_device *pdev, + struct pci_dev *pf_pdev); + +#else + +static inline int enetc_ierb_register_pf(struct platform_device *pdev, + struct pci_dev *pf_pdev) +{ + return -EOPNOTSUPP; +} + +#endif diff --git a/drivers/net/ethernet/freescale/enetc/enetc_mdio.c b/drivers/net/ethernet/freescale/enetc/enetc_mdio.c new file mode 100644 index 0000000000..998aaa394e --- /dev/null +++ b/drivers/net/ethernet/freescale/enetc/enetc_mdio.c @@ -0,0 +1,236 @@ +// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) +/* Copyright 2019 NXP */ + +#include <linux/fsl/enetc_mdio.h> +#include <linux/mdio.h> +#include <linux/of_mdio.h> +#include <linux/iopoll.h> +#include <linux/of.h> + +#include "enetc_pf.h" + +#define ENETC_MDIO_CFG 0x0 /* MDIO configuration and status */ +#define ENETC_MDIO_CTL 0x4 /* MDIO control */ +#define ENETC_MDIO_DATA 0x8 /* MDIO data */ +#define ENETC_MDIO_ADDR 0xc /* MDIO address */ + +#define MDIO_CFG_CLKDIV(x) ((((x) >> 1) & 0xff) << 8) +#define MDIO_CFG_BSY BIT(0) +#define MDIO_CFG_RD_ER BIT(1) +#define MDIO_CFG_HOLD(x) (((x) << 2) & GENMASK(4, 2)) +#define MDIO_CFG_ENC45 BIT(6) + /* external MDIO only - driven on neg MDC edge */ +#define MDIO_CFG_NEG BIT(23) + +#define ENETC_EMDIO_CFG \ + (MDIO_CFG_HOLD(2) | \ + MDIO_CFG_CLKDIV(258) | \ + MDIO_CFG_NEG) + +#define MDIO_CTL_DEV_ADDR(x) ((x) & 0x1f) +#define MDIO_CTL_PORT_ADDR(x) (((x) & 0x1f) << 5) +#define MDIO_CTL_READ BIT(15) + +static inline u32 enetc_mdio_rd(struct enetc_mdio_priv *mdio_priv, int off) +{ + return enetc_port_rd_mdio(mdio_priv->hw, mdio_priv->mdio_base + off); +} + +static inline void enetc_mdio_wr(struct enetc_mdio_priv *mdio_priv, int off, + u32 val) +{ + enetc_port_wr_mdio(mdio_priv->hw, mdio_priv->mdio_base + off, val); +} + +static bool enetc_mdio_is_busy(struct enetc_mdio_priv *mdio_priv) +{ + return enetc_mdio_rd(mdio_priv, ENETC_MDIO_CFG) & MDIO_CFG_BSY; +} + +static int enetc_mdio_wait_complete(struct enetc_mdio_priv *mdio_priv) +{ + bool is_busy; + + return readx_poll_timeout(enetc_mdio_is_busy, mdio_priv, + is_busy, !is_busy, 10, 10 * 1000); +} + +int enetc_mdio_write_c22(struct mii_bus *bus, int phy_id, int regnum, + u16 value) +{ + struct enetc_mdio_priv *mdio_priv = bus->priv; + u32 mdio_ctl, mdio_cfg; + u16 dev_addr; + int ret; + + mdio_cfg = ENETC_EMDIO_CFG; + dev_addr = regnum & 0x1f; + mdio_cfg &= ~MDIO_CFG_ENC45; + + enetc_mdio_wr(mdio_priv, ENETC_MDIO_CFG, mdio_cfg); + + ret = enetc_mdio_wait_complete(mdio_priv); + if (ret) + return ret; + + /* set port and dev addr */ + mdio_ctl = MDIO_CTL_PORT_ADDR(phy_id) | MDIO_CTL_DEV_ADDR(dev_addr); + enetc_mdio_wr(mdio_priv, ENETC_MDIO_CTL, mdio_ctl); + + /* write the value */ + enetc_mdio_wr(mdio_priv, ENETC_MDIO_DATA, value); + + ret = enetc_mdio_wait_complete(mdio_priv); + if (ret) + return ret; + + return 0; +} +EXPORT_SYMBOL_GPL(enetc_mdio_write_c22); + +int enetc_mdio_write_c45(struct mii_bus *bus, int phy_id, int dev_addr, + int regnum, u16 value) +{ + struct enetc_mdio_priv *mdio_priv = bus->priv; + u32 mdio_ctl, mdio_cfg; + int ret; + + mdio_cfg = ENETC_EMDIO_CFG; + mdio_cfg |= MDIO_CFG_ENC45; + + enetc_mdio_wr(mdio_priv, ENETC_MDIO_CFG, mdio_cfg); + + ret = enetc_mdio_wait_complete(mdio_priv); + if (ret) + return ret; + + /* set port and dev addr */ + mdio_ctl = MDIO_CTL_PORT_ADDR(phy_id) | MDIO_CTL_DEV_ADDR(dev_addr); + enetc_mdio_wr(mdio_priv, ENETC_MDIO_CTL, mdio_ctl); + + /* set the register address */ + enetc_mdio_wr(mdio_priv, ENETC_MDIO_ADDR, regnum & 0xffff); + + ret = enetc_mdio_wait_complete(mdio_priv); + if (ret) + return ret; + + /* write the value */ + enetc_mdio_wr(mdio_priv, ENETC_MDIO_DATA, value); + + ret = enetc_mdio_wait_complete(mdio_priv); + if (ret) + return ret; + + return 0; +} +EXPORT_SYMBOL_GPL(enetc_mdio_write_c45); + +int enetc_mdio_read_c22(struct mii_bus *bus, int phy_id, int regnum) +{ + struct enetc_mdio_priv *mdio_priv = bus->priv; + u32 mdio_ctl, mdio_cfg; + u16 dev_addr, value; + int ret; + + mdio_cfg = ENETC_EMDIO_CFG; + dev_addr = regnum & 0x1f; + mdio_cfg &= ~MDIO_CFG_ENC45; + + enetc_mdio_wr(mdio_priv, ENETC_MDIO_CFG, mdio_cfg); + + ret = enetc_mdio_wait_complete(mdio_priv); + if (ret) + return ret; + + /* set port and device addr */ + mdio_ctl = MDIO_CTL_PORT_ADDR(phy_id) | MDIO_CTL_DEV_ADDR(dev_addr); + enetc_mdio_wr(mdio_priv, ENETC_MDIO_CTL, mdio_ctl); + + /* initiate the read */ + enetc_mdio_wr(mdio_priv, ENETC_MDIO_CTL, mdio_ctl | MDIO_CTL_READ); + + ret = enetc_mdio_wait_complete(mdio_priv); + if (ret) + return ret; + + /* return all Fs if nothing was there */ + if (enetc_mdio_rd(mdio_priv, ENETC_MDIO_CFG) & MDIO_CFG_RD_ER) { + dev_dbg(&bus->dev, + "Error while reading PHY%d reg at %d.%d\n", + phy_id, dev_addr, regnum); + return 0xffff; + } + + value = enetc_mdio_rd(mdio_priv, ENETC_MDIO_DATA) & 0xffff; + + return value; +} +EXPORT_SYMBOL_GPL(enetc_mdio_read_c22); + +int enetc_mdio_read_c45(struct mii_bus *bus, int phy_id, int dev_addr, + int regnum) +{ + struct enetc_mdio_priv *mdio_priv = bus->priv; + u32 mdio_ctl, mdio_cfg; + u16 value; + int ret; + + mdio_cfg = ENETC_EMDIO_CFG; + mdio_cfg |= MDIO_CFG_ENC45; + + enetc_mdio_wr(mdio_priv, ENETC_MDIO_CFG, mdio_cfg); + + ret = enetc_mdio_wait_complete(mdio_priv); + if (ret) + return ret; + + /* set port and device addr */ + mdio_ctl = MDIO_CTL_PORT_ADDR(phy_id) | MDIO_CTL_DEV_ADDR(dev_addr); + enetc_mdio_wr(mdio_priv, ENETC_MDIO_CTL, mdio_ctl); + + /* set the register address */ + enetc_mdio_wr(mdio_priv, ENETC_MDIO_ADDR, regnum & 0xffff); + + ret = enetc_mdio_wait_complete(mdio_priv); + if (ret) + return ret; + + /* initiate the read */ + enetc_mdio_wr(mdio_priv, ENETC_MDIO_CTL, mdio_ctl | MDIO_CTL_READ); + + ret = enetc_mdio_wait_complete(mdio_priv); + if (ret) + return ret; + + /* return all Fs if nothing was there */ + if (enetc_mdio_rd(mdio_priv, ENETC_MDIO_CFG) & MDIO_CFG_RD_ER) { + dev_dbg(&bus->dev, + "Error while reading PHY%d reg at %d.%d\n", + phy_id, dev_addr, regnum); + return 0xffff; + } + + value = enetc_mdio_rd(mdio_priv, ENETC_MDIO_DATA) & 0xffff; + + return value; +} +EXPORT_SYMBOL_GPL(enetc_mdio_read_c45); + +struct enetc_hw *enetc_hw_alloc(struct device *dev, void __iomem *port_regs) +{ + struct enetc_hw *hw; + + hw = devm_kzalloc(dev, sizeof(*hw), GFP_KERNEL); + if (!hw) + return ERR_PTR(-ENOMEM); + + hw->port = port_regs; + + return hw; +} +EXPORT_SYMBOL_GPL(enetc_hw_alloc); + +/* Lock for MDIO access errata on LS1028A */ +DEFINE_RWLOCK(enetc_mdio_lock); +EXPORT_SYMBOL_GPL(enetc_mdio_lock); diff --git a/drivers/net/ethernet/freescale/enetc/enetc_msg.c b/drivers/net/ethernet/freescale/enetc/enetc_msg.c new file mode 100644 index 0000000000..40d22ebe92 --- /dev/null +++ b/drivers/net/ethernet/freescale/enetc/enetc_msg.c @@ -0,0 +1,164 @@ +// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) +/* Copyright 2017-2019 NXP */ + +#include "enetc_pf.h" + +static void enetc_msg_disable_mr_int(struct enetc_hw *hw) +{ + u32 psiier = enetc_rd(hw, ENETC_PSIIER); + /* disable MR int source(s) */ + enetc_wr(hw, ENETC_PSIIER, psiier & ~ENETC_PSIIER_MR_MASK); +} + +static void enetc_msg_enable_mr_int(struct enetc_hw *hw) +{ + u32 psiier = enetc_rd(hw, ENETC_PSIIER); + + enetc_wr(hw, ENETC_PSIIER, psiier | ENETC_PSIIER_MR_MASK); +} + +static irqreturn_t enetc_msg_psi_msix(int irq, void *data) +{ + struct enetc_si *si = (struct enetc_si *)data; + struct enetc_pf *pf = enetc_si_priv(si); + + enetc_msg_disable_mr_int(&si->hw); + schedule_work(&pf->msg_task); + + return IRQ_HANDLED; +} + +static void enetc_msg_task(struct work_struct *work) +{ + struct enetc_pf *pf = container_of(work, struct enetc_pf, msg_task); + struct enetc_hw *hw = &pf->si->hw; + unsigned long mr_mask; + int i; + + for (;;) { + mr_mask = enetc_rd(hw, ENETC_PSIMSGRR) & ENETC_PSIMSGRR_MR_MASK; + if (!mr_mask) { + /* re-arm MR interrupts, w1c the IDR reg */ + enetc_wr(hw, ENETC_PSIIDR, ENETC_PSIIER_MR_MASK); + enetc_msg_enable_mr_int(hw); + return; + } + + for (i = 0; i < pf->num_vfs; i++) { + u32 psimsgrr; + u16 msg_code; + + if (!(ENETC_PSIMSGRR_MR(i) & mr_mask)) + continue; + + enetc_msg_handle_rxmsg(pf, i, &msg_code); + + psimsgrr = ENETC_SIMSGSR_SET_MC(msg_code); + psimsgrr |= ENETC_PSIMSGRR_MR(i); /* w1c */ + enetc_wr(hw, ENETC_PSIMSGRR, psimsgrr); + } + } +} + +/* Init */ +static int enetc_msg_alloc_mbx(struct enetc_si *si, int idx) +{ + struct enetc_pf *pf = enetc_si_priv(si); + struct device *dev = &si->pdev->dev; + struct enetc_hw *hw = &si->hw; + struct enetc_msg_swbd *msg; + u32 val; + + msg = &pf->rxmsg[idx]; + /* allocate and set receive buffer */ + msg->size = ENETC_DEFAULT_MSG_SIZE; + + msg->vaddr = dma_alloc_coherent(dev, msg->size, &msg->dma, + GFP_KERNEL); + if (!msg->vaddr) { + dev_err(dev, "msg: fail to alloc dma buffer of size: %d\n", + msg->size); + return -ENOMEM; + } + + /* set multiple of 32 bytes */ + val = lower_32_bits(msg->dma); + enetc_wr(hw, ENETC_PSIVMSGRCVAR0(idx), val); + val = upper_32_bits(msg->dma); + enetc_wr(hw, ENETC_PSIVMSGRCVAR1(idx), val); + + return 0; +} + +static void enetc_msg_free_mbx(struct enetc_si *si, int idx) +{ + struct enetc_pf *pf = enetc_si_priv(si); + struct enetc_hw *hw = &si->hw; + struct enetc_msg_swbd *msg; + + msg = &pf->rxmsg[idx]; + dma_free_coherent(&si->pdev->dev, msg->size, msg->vaddr, msg->dma); + memset(msg, 0, sizeof(*msg)); + + enetc_wr(hw, ENETC_PSIVMSGRCVAR0(idx), 0); + enetc_wr(hw, ENETC_PSIVMSGRCVAR1(idx), 0); +} + +int enetc_msg_psi_init(struct enetc_pf *pf) +{ + struct enetc_si *si = pf->si; + int vector, i, err; + + /* register message passing interrupt handler */ + snprintf(pf->msg_int_name, sizeof(pf->msg_int_name), "%s-vfmsg", + si->ndev->name); + vector = pci_irq_vector(si->pdev, ENETC_SI_INT_IDX); + err = request_irq(vector, enetc_msg_psi_msix, 0, pf->msg_int_name, si); + if (err) { + dev_err(&si->pdev->dev, + "PSI messaging: request_irq() failed!\n"); + return err; + } + + /* set one IRQ entry for PSI message receive notification (SI int) */ + enetc_wr(&si->hw, ENETC_SIMSIVR, ENETC_SI_INT_IDX); + + /* initialize PSI mailbox */ + INIT_WORK(&pf->msg_task, enetc_msg_task); + + for (i = 0; i < pf->num_vfs; i++) { + err = enetc_msg_alloc_mbx(si, i); + if (err) + goto err_init_mbx; + } + + /* enable MR interrupts */ + enetc_msg_enable_mr_int(&si->hw); + + return 0; + +err_init_mbx: + for (i--; i >= 0; i--) + enetc_msg_free_mbx(si, i); + + free_irq(vector, si); + + return err; +} + +void enetc_msg_psi_free(struct enetc_pf *pf) +{ + struct enetc_si *si = pf->si; + int i; + + cancel_work_sync(&pf->msg_task); + + /* disable MR interrupts */ + enetc_msg_disable_mr_int(&si->hw); + + for (i = 0; i < pf->num_vfs; i++) + enetc_msg_free_mbx(si, i); + + /* de-register message passing interrupt handler */ + free_irq(pci_irq_vector(si->pdev, ENETC_SI_INT_IDX), si); +} diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pci_mdio.c b/drivers/net/ethernet/freescale/enetc/enetc_pci_mdio.c new file mode 100644 index 0000000000..a1b595bd79 --- /dev/null +++ b/drivers/net/ethernet/freescale/enetc/enetc_pci_mdio.c @@ -0,0 +1,112 @@ +// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) +/* Copyright 2019 NXP */ +#include <linux/fsl/enetc_mdio.h> +#include <linux/of_mdio.h> +#include "enetc_pf.h" + +#define ENETC_MDIO_DEV_ID 0xee01 +#define ENETC_MDIO_DEV_NAME "FSL PCIe IE Central MDIO" +#define ENETC_MDIO_BUS_NAME ENETC_MDIO_DEV_NAME " Bus" +#define ENETC_MDIO_DRV_NAME ENETC_MDIO_DEV_NAME " driver" + +static int enetc_pci_mdio_probe(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + struct enetc_mdio_priv *mdio_priv; + struct device *dev = &pdev->dev; + void __iomem *port_regs; + struct enetc_hw *hw; + struct mii_bus *bus; + int err; + + port_regs = pci_iomap(pdev, 0, 0); + if (!port_regs) { + dev_err(dev, "iomap failed\n"); + err = -ENXIO; + goto err_ioremap; + } + + hw = enetc_hw_alloc(dev, port_regs); + if (IS_ERR(hw)) { + err = PTR_ERR(hw); + goto err_hw_alloc; + } + + bus = devm_mdiobus_alloc_size(dev, sizeof(*mdio_priv)); + if (!bus) { + err = -ENOMEM; + goto err_mdiobus_alloc; + } + + bus->name = ENETC_MDIO_BUS_NAME; + bus->read = enetc_mdio_read_c22; + bus->write = enetc_mdio_write_c22; + bus->read_c45 = enetc_mdio_read_c45; + bus->write_c45 = enetc_mdio_write_c45; + bus->parent = dev; + mdio_priv = bus->priv; + mdio_priv->hw = hw; + mdio_priv->mdio_base = ENETC_EMDIO_BASE; + snprintf(bus->id, MII_BUS_ID_SIZE, "%s", dev_name(dev)); + + pcie_flr(pdev); + err = pci_enable_device_mem(pdev); + if (err) { + dev_err(dev, "device enable failed\n"); + goto err_pci_enable; + } + + err = pci_request_region(pdev, 0, KBUILD_MODNAME); + if (err) { + dev_err(dev, "pci_request_region failed\n"); + goto err_pci_mem_reg; + } + + err = of_mdiobus_register(bus, dev->of_node); + if (err) + goto err_mdiobus_reg; + + pci_set_drvdata(pdev, bus); + + return 0; + +err_mdiobus_reg: + pci_release_region(pdev, 0); +err_pci_mem_reg: + pci_disable_device(pdev); +err_pci_enable: +err_mdiobus_alloc: +err_hw_alloc: + iounmap(port_regs); +err_ioremap: + return err; +} + +static void enetc_pci_mdio_remove(struct pci_dev *pdev) +{ + struct mii_bus *bus = pci_get_drvdata(pdev); + struct enetc_mdio_priv *mdio_priv; + + mdiobus_unregister(bus); + mdio_priv = bus->priv; + iounmap(mdio_priv->hw->port); + pci_release_region(pdev, 0); + pci_disable_device(pdev); +} + +static const struct pci_device_id enetc_pci_mdio_id_table[] = { + { PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, ENETC_MDIO_DEV_ID) }, + { 0, } /* End of table. */ +}; +MODULE_DEVICE_TABLE(pci, enetc_pci_mdio_id_table); + +static struct pci_driver enetc_pci_mdio_driver = { + .name = KBUILD_MODNAME, + .id_table = enetc_pci_mdio_id_table, + .probe = enetc_pci_mdio_probe, + .remove = enetc_pci_mdio_remove, +}; +module_pci_driver(enetc_pci_mdio_driver); + +MODULE_DESCRIPTION(ENETC_MDIO_DRV_NAME); +MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf.c b/drivers/net/ethernet/freescale/enetc/enetc_pf.c new file mode 100644 index 0000000000..c153dc083a --- /dev/null +++ b/drivers/net/ethernet/freescale/enetc/enetc_pf.c @@ -0,0 +1,1429 @@ +// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) +/* Copyright 2017-2019 NXP */ + +#include <asm/unaligned.h> +#include <linux/mdio.h> +#include <linux/module.h> +#include <linux/fsl/enetc_mdio.h> +#include <linux/of_platform.h> +#include <linux/of_mdio.h> +#include <linux/of_net.h> +#include <linux/pcs-lynx.h> +#include "enetc_ierb.h" +#include "enetc_pf.h" + +#define ENETC_DRV_NAME_STR "ENETC PF driver" + +static void enetc_pf_get_primary_mac_addr(struct enetc_hw *hw, int si, u8 *addr) +{ + u32 upper = __raw_readl(hw->port + ENETC_PSIPMAR0(si)); + u16 lower = __raw_readw(hw->port + ENETC_PSIPMAR1(si)); + + put_unaligned_le32(upper, addr); + put_unaligned_le16(lower, addr + 4); +} + +static void enetc_pf_set_primary_mac_addr(struct enetc_hw *hw, int si, + const u8 *addr) +{ + u32 upper = get_unaligned_le32(addr); + u16 lower = get_unaligned_le16(addr + 4); + + __raw_writel(upper, hw->port + ENETC_PSIPMAR0(si)); + __raw_writew(lower, hw->port + ENETC_PSIPMAR1(si)); +} + +static int enetc_pf_set_mac_addr(struct net_device *ndev, void *addr) +{ + struct enetc_ndev_priv *priv = netdev_priv(ndev); + struct sockaddr *saddr = addr; + + if (!is_valid_ether_addr(saddr->sa_data)) + return -EADDRNOTAVAIL; + + eth_hw_addr_set(ndev, saddr->sa_data); + enetc_pf_set_primary_mac_addr(&priv->si->hw, 0, saddr->sa_data); + + return 0; +} + +static void enetc_set_vlan_promisc(struct enetc_hw *hw, char si_map) +{ + u32 val = enetc_port_rd(hw, ENETC_PSIPVMR); + + val &= ~ENETC_PSIPVMR_SET_VP(ENETC_VLAN_PROMISC_MAP_ALL); + enetc_port_wr(hw, ENETC_PSIPVMR, ENETC_PSIPVMR_SET_VP(si_map) | val); +} + +static void enetc_enable_si_vlan_promisc(struct enetc_pf *pf, int si_idx) +{ + pf->vlan_promisc_simap |= BIT(si_idx); + enetc_set_vlan_promisc(&pf->si->hw, pf->vlan_promisc_simap); +} + +static void enetc_disable_si_vlan_promisc(struct enetc_pf *pf, int si_idx) +{ + pf->vlan_promisc_simap &= ~BIT(si_idx); + enetc_set_vlan_promisc(&pf->si->hw, pf->vlan_promisc_simap); +} + +static void enetc_set_isol_vlan(struct enetc_hw *hw, int si, u16 vlan, u8 qos) +{ + u32 val = 0; + + if (vlan) + val = ENETC_PSIVLAN_EN | ENETC_PSIVLAN_SET_QOS(qos) | vlan; + + enetc_port_wr(hw, ENETC_PSIVLANR(si), val); +} + +static int enetc_mac_addr_hash_idx(const u8 *addr) +{ + u64 fold = __swab64(ether_addr_to_u64(addr)) >> 16; + u64 mask = 0; + int res = 0; + int i; + + for (i = 0; i < 8; i++) + mask |= BIT_ULL(i * 6); + + for (i = 0; i < 6; i++) + res |= (hweight64(fold & (mask << i)) & 0x1) << i; + + return res; +} + +static void enetc_reset_mac_addr_filter(struct enetc_mac_filter *filter) +{ + filter->mac_addr_cnt = 0; + + bitmap_zero(filter->mac_hash_table, + ENETC_MADDR_HASH_TBL_SZ); +} + +static void enetc_add_mac_addr_em_filter(struct enetc_mac_filter *filter, + const unsigned char *addr) +{ + /* add exact match addr */ + ether_addr_copy(filter->mac_addr, addr); + filter->mac_addr_cnt++; +} + +static void enetc_add_mac_addr_ht_filter(struct enetc_mac_filter *filter, + const unsigned char *addr) +{ + int idx = enetc_mac_addr_hash_idx(addr); + + /* add hash table entry */ + __set_bit(idx, filter->mac_hash_table); + filter->mac_addr_cnt++; +} + +static void enetc_clear_mac_ht_flt(struct enetc_si *si, int si_idx, int type) +{ + bool err = si->errata & ENETC_ERR_UCMCSWP; + + if (type == UC) { + enetc_port_wr(&si->hw, ENETC_PSIUMHFR0(si_idx, err), 0); + enetc_port_wr(&si->hw, ENETC_PSIUMHFR1(si_idx), 0); + } else { /* MC */ + enetc_port_wr(&si->hw, ENETC_PSIMMHFR0(si_idx, err), 0); + enetc_port_wr(&si->hw, ENETC_PSIMMHFR1(si_idx), 0); + } +} + +static void enetc_set_mac_ht_flt(struct enetc_si *si, int si_idx, int type, + unsigned long hash) +{ + bool err = si->errata & ENETC_ERR_UCMCSWP; + + if (type == UC) { + enetc_port_wr(&si->hw, ENETC_PSIUMHFR0(si_idx, err), + lower_32_bits(hash)); + enetc_port_wr(&si->hw, ENETC_PSIUMHFR1(si_idx), + upper_32_bits(hash)); + } else { /* MC */ + enetc_port_wr(&si->hw, ENETC_PSIMMHFR0(si_idx, err), + lower_32_bits(hash)); + enetc_port_wr(&si->hw, ENETC_PSIMMHFR1(si_idx), + upper_32_bits(hash)); + } +} + +static void enetc_sync_mac_filters(struct enetc_pf *pf) +{ + struct enetc_mac_filter *f = pf->mac_filter; + struct enetc_si *si = pf->si; + int i, pos; + + pos = EMETC_MAC_ADDR_FILT_RES; + + for (i = 0; i < MADDR_TYPE; i++, f++) { + bool em = (f->mac_addr_cnt == 1) && (i == UC); + bool clear = !f->mac_addr_cnt; + + if (clear) { + if (i == UC) + enetc_clear_mac_flt_entry(si, pos); + + enetc_clear_mac_ht_flt(si, 0, i); + continue; + } + + /* exact match filter */ + if (em) { + int err; + + enetc_clear_mac_ht_flt(si, 0, UC); + + err = enetc_set_mac_flt_entry(si, pos, f->mac_addr, + BIT(0)); + if (!err) + continue; + + /* fallback to HT filtering */ + dev_warn(&si->pdev->dev, "fallback to HT filt (%d)\n", + err); + } + + /* hash table filter, clear EM filter for UC entries */ + if (i == UC) + enetc_clear_mac_flt_entry(si, pos); + + enetc_set_mac_ht_flt(si, 0, i, *f->mac_hash_table); + } +} + +static void enetc_pf_set_rx_mode(struct net_device *ndev) +{ + struct enetc_ndev_priv *priv = netdev_priv(ndev); + struct enetc_pf *pf = enetc_si_priv(priv->si); + struct enetc_hw *hw = &priv->si->hw; + bool uprom = false, mprom = false; + struct enetc_mac_filter *filter; + struct netdev_hw_addr *ha; + u32 psipmr = 0; + bool em; + + if (ndev->flags & IFF_PROMISC) { + /* enable promisc mode for SI0 (PF) */ + psipmr = ENETC_PSIPMR_SET_UP(0) | ENETC_PSIPMR_SET_MP(0); + uprom = true; + mprom = true; + } else if (ndev->flags & IFF_ALLMULTI) { + /* enable multi cast promisc mode for SI0 (PF) */ + psipmr = ENETC_PSIPMR_SET_MP(0); + mprom = true; + } + + /* first 2 filter entries belong to PF */ + if (!uprom) { + /* Update unicast filters */ + filter = &pf->mac_filter[UC]; + enetc_reset_mac_addr_filter(filter); + + em = (netdev_uc_count(ndev) == 1); + netdev_for_each_uc_addr(ha, ndev) { + if (em) { + enetc_add_mac_addr_em_filter(filter, ha->addr); + break; + } + + enetc_add_mac_addr_ht_filter(filter, ha->addr); + } + } + + if (!mprom) { + /* Update multicast filters */ + filter = &pf->mac_filter[MC]; + enetc_reset_mac_addr_filter(filter); + + netdev_for_each_mc_addr(ha, ndev) { + if (!is_multicast_ether_addr(ha->addr)) + continue; + + enetc_add_mac_addr_ht_filter(filter, ha->addr); + } + } + + if (!uprom || !mprom) + /* update PF entries */ + enetc_sync_mac_filters(pf); + + psipmr |= enetc_port_rd(hw, ENETC_PSIPMR) & + ~(ENETC_PSIPMR_SET_UP(0) | ENETC_PSIPMR_SET_MP(0)); + enetc_port_wr(hw, ENETC_PSIPMR, psipmr); +} + +static void enetc_set_vlan_ht_filter(struct enetc_hw *hw, int si_idx, + unsigned long hash) +{ + enetc_port_wr(hw, ENETC_PSIVHFR0(si_idx), lower_32_bits(hash)); + enetc_port_wr(hw, ENETC_PSIVHFR1(si_idx), upper_32_bits(hash)); +} + +static int enetc_vid_hash_idx(unsigned int vid) +{ + int res = 0; + int i; + + for (i = 0; i < 6; i++) + res |= (hweight8(vid & (BIT(i) | BIT(i + 6))) & 0x1) << i; + + return res; +} + +static void enetc_sync_vlan_ht_filter(struct enetc_pf *pf, bool rehash) +{ + int i; + + if (rehash) { + bitmap_zero(pf->vlan_ht_filter, ENETC_VLAN_HT_SIZE); + + for_each_set_bit(i, pf->active_vlans, VLAN_N_VID) { + int hidx = enetc_vid_hash_idx(i); + + __set_bit(hidx, pf->vlan_ht_filter); + } + } + + enetc_set_vlan_ht_filter(&pf->si->hw, 0, *pf->vlan_ht_filter); +} + +static int enetc_vlan_rx_add_vid(struct net_device *ndev, __be16 prot, u16 vid) +{ + struct enetc_ndev_priv *priv = netdev_priv(ndev); + struct enetc_pf *pf = enetc_si_priv(priv->si); + int idx; + + __set_bit(vid, pf->active_vlans); + + idx = enetc_vid_hash_idx(vid); + if (!__test_and_set_bit(idx, pf->vlan_ht_filter)) + enetc_sync_vlan_ht_filter(pf, false); + + return 0; +} + +static int enetc_vlan_rx_del_vid(struct net_device *ndev, __be16 prot, u16 vid) +{ + struct enetc_ndev_priv *priv = netdev_priv(ndev); + struct enetc_pf *pf = enetc_si_priv(priv->si); + + __clear_bit(vid, pf->active_vlans); + enetc_sync_vlan_ht_filter(pf, true); + + return 0; +} + +static void enetc_set_loopback(struct net_device *ndev, bool en) +{ + struct enetc_ndev_priv *priv = netdev_priv(ndev); + struct enetc_si *si = priv->si; + u32 reg; + + reg = enetc_port_mac_rd(si, ENETC_PM0_IF_MODE); + if (reg & ENETC_PM0_IFM_RG) { + /* RGMII mode */ + reg = (reg & ~ENETC_PM0_IFM_RLP) | + (en ? ENETC_PM0_IFM_RLP : 0); + enetc_port_mac_wr(si, ENETC_PM0_IF_MODE, reg); + } else { + /* assume SGMII mode */ + reg = enetc_port_mac_rd(si, ENETC_PM0_CMD_CFG); + reg = (reg & ~ENETC_PM0_CMD_XGLP) | + (en ? ENETC_PM0_CMD_XGLP : 0); + reg = (reg & ~ENETC_PM0_CMD_PHY_TX_EN) | + (en ? ENETC_PM0_CMD_PHY_TX_EN : 0); + enetc_port_mac_wr(si, ENETC_PM0_CMD_CFG, reg); + } +} + +static int enetc_pf_set_vf_mac(struct net_device *ndev, int vf, u8 *mac) +{ + struct enetc_ndev_priv *priv = netdev_priv(ndev); + struct enetc_pf *pf = enetc_si_priv(priv->si); + struct enetc_vf_state *vf_state; + + if (vf >= pf->total_vfs) + return -EINVAL; + + if (!is_valid_ether_addr(mac)) + return -EADDRNOTAVAIL; + + vf_state = &pf->vf_state[vf]; + vf_state->flags |= ENETC_VF_FLAG_PF_SET_MAC; + enetc_pf_set_primary_mac_addr(&priv->si->hw, vf + 1, mac); + return 0; +} + +static int enetc_pf_set_vf_vlan(struct net_device *ndev, int vf, u16 vlan, + u8 qos, __be16 proto) +{ + struct enetc_ndev_priv *priv = netdev_priv(ndev); + struct enetc_pf *pf = enetc_si_priv(priv->si); + + if (priv->si->errata & ENETC_ERR_VLAN_ISOL) + return -EOPNOTSUPP; + + if (vf >= pf->total_vfs) + return -EINVAL; + + if (proto != htons(ETH_P_8021Q)) + /* only C-tags supported for now */ + return -EPROTONOSUPPORT; + + enetc_set_isol_vlan(&priv->si->hw, vf + 1, vlan, qos); + return 0; +} + +static int enetc_pf_set_vf_spoofchk(struct net_device *ndev, int vf, bool en) +{ + struct enetc_ndev_priv *priv = netdev_priv(ndev); + struct enetc_pf *pf = enetc_si_priv(priv->si); + u32 cfgr; + + if (vf >= pf->total_vfs) + return -EINVAL; + + cfgr = enetc_port_rd(&priv->si->hw, ENETC_PSICFGR0(vf + 1)); + cfgr = (cfgr & ~ENETC_PSICFGR0_ASE) | (en ? ENETC_PSICFGR0_ASE : 0); + enetc_port_wr(&priv->si->hw, ENETC_PSICFGR0(vf + 1), cfgr); + + return 0; +} + +static int enetc_setup_mac_address(struct device_node *np, struct enetc_pf *pf, + int si) +{ + struct device *dev = &pf->si->pdev->dev; + struct enetc_hw *hw = &pf->si->hw; + u8 mac_addr[ETH_ALEN] = { 0 }; + int err; + + /* (1) try to get the MAC address from the device tree */ + if (np) { + err = of_get_mac_address(np, mac_addr); + if (err == -EPROBE_DEFER) + return err; + } + + /* (2) bootloader supplied MAC address */ + if (is_zero_ether_addr(mac_addr)) + enetc_pf_get_primary_mac_addr(hw, si, mac_addr); + + /* (3) choose a random one */ + if (is_zero_ether_addr(mac_addr)) { + eth_random_addr(mac_addr); + dev_info(dev, "no MAC address specified for SI%d, using %pM\n", + si, mac_addr); + } + + enetc_pf_set_primary_mac_addr(hw, si, mac_addr); + + return 0; +} + +static int enetc_setup_mac_addresses(struct device_node *np, + struct enetc_pf *pf) +{ + int err, i; + + /* The PF might take its MAC from the device tree */ + err = enetc_setup_mac_address(np, pf, 0); + if (err) + return err; + + for (i = 0; i < pf->total_vfs; i++) { + err = enetc_setup_mac_address(NULL, pf, i + 1); + if (err) + return err; + } + + return 0; +} + +static void enetc_port_assign_rfs_entries(struct enetc_si *si) +{ + struct enetc_pf *pf = enetc_si_priv(si); + struct enetc_hw *hw = &si->hw; + int num_entries, vf_entries, i; + u32 val; + + /* split RFS entries between functions */ + val = enetc_port_rd(hw, ENETC_PRFSCAPR); + num_entries = ENETC_PRFSCAPR_GET_NUM_RFS(val); + vf_entries = num_entries / (pf->total_vfs + 1); + + for (i = 0; i < pf->total_vfs; i++) + enetc_port_wr(hw, ENETC_PSIRFSCFGR(i + 1), vf_entries); + enetc_port_wr(hw, ENETC_PSIRFSCFGR(0), + num_entries - vf_entries * pf->total_vfs); + + /* enable RFS on port */ + enetc_port_wr(hw, ENETC_PRFSMR, ENETC_PRFSMR_RFSE); +} + +static void enetc_port_si_configure(struct enetc_si *si) +{ + struct enetc_pf *pf = enetc_si_priv(si); + struct enetc_hw *hw = &si->hw; + int num_rings, i; + u32 val; + + val = enetc_port_rd(hw, ENETC_PCAPR0); + num_rings = min(ENETC_PCAPR0_RXBDR(val), ENETC_PCAPR0_TXBDR(val)); + + val = ENETC_PSICFGR0_SET_TXBDR(ENETC_PF_NUM_RINGS); + val |= ENETC_PSICFGR0_SET_RXBDR(ENETC_PF_NUM_RINGS); + + if (unlikely(num_rings < ENETC_PF_NUM_RINGS)) { + val = ENETC_PSICFGR0_SET_TXBDR(num_rings); + val |= ENETC_PSICFGR0_SET_RXBDR(num_rings); + + dev_warn(&si->pdev->dev, "Found %d rings, expected %d!\n", + num_rings, ENETC_PF_NUM_RINGS); + + num_rings = 0; + } + + /* Add default one-time settings for SI0 (PF) */ + val |= ENETC_PSICFGR0_SIVC(ENETC_VLAN_TYPE_C | ENETC_VLAN_TYPE_S); + + enetc_port_wr(hw, ENETC_PSICFGR0(0), val); + + if (num_rings) + num_rings -= ENETC_PF_NUM_RINGS; + + /* Configure the SIs for each available VF */ + val = ENETC_PSICFGR0_SIVC(ENETC_VLAN_TYPE_C | ENETC_VLAN_TYPE_S); + val |= ENETC_PSICFGR0_VTE | ENETC_PSICFGR0_SIVIE; + + if (num_rings) { + num_rings /= pf->total_vfs; + val |= ENETC_PSICFGR0_SET_TXBDR(num_rings); + val |= ENETC_PSICFGR0_SET_RXBDR(num_rings); + } + + for (i = 0; i < pf->total_vfs; i++) + enetc_port_wr(hw, ENETC_PSICFGR0(i + 1), val); + + /* Port level VLAN settings */ + val = ENETC_PVCLCTR_OVTPIDL(ENETC_VLAN_TYPE_C | ENETC_VLAN_TYPE_S); + enetc_port_wr(hw, ENETC_PVCLCTR, val); + /* use outer tag for VLAN filtering */ + enetc_port_wr(hw, ENETC_PSIVLANFMR, ENETC_PSIVLANFMR_VS); +} + +void enetc_set_ptcmsdur(struct enetc_hw *hw, u32 *max_sdu) +{ + int tc; + + for (tc = 0; tc < 8; tc++) { + u32 val = ENETC_MAC_MAXFRM_SIZE; + + if (max_sdu[tc]) + val = max_sdu[tc] + VLAN_ETH_HLEN; + + enetc_port_wr(hw, ENETC_PTCMSDUR(tc), val); + } +} + +void enetc_reset_ptcmsdur(struct enetc_hw *hw) +{ + int tc; + + for (tc = 0; tc < 8; tc++) + enetc_port_wr(hw, ENETC_PTCMSDUR(tc), ENETC_MAC_MAXFRM_SIZE); +} + +static void enetc_configure_port_mac(struct enetc_si *si) +{ + struct enetc_hw *hw = &si->hw; + + enetc_port_mac_wr(si, ENETC_PM0_MAXFRM, + ENETC_SET_MAXFRM(ENETC_RX_MAXFRM_SIZE)); + + enetc_reset_ptcmsdur(hw); + + enetc_port_mac_wr(si, ENETC_PM0_CMD_CFG, ENETC_PM0_CMD_PHY_TX_EN | + ENETC_PM0_CMD_TXP | ENETC_PM0_PROMISC); + + /* On LS1028A, the MAC RX FIFO defaults to 2, which is too high + * and may lead to RX lock-up under traffic. Set it to 1 instead, + * as recommended by the hardware team. + */ + enetc_port_mac_wr(si, ENETC_PM0_RX_FIFO, ENETC_PM0_RX_FIFO_VAL); +} + +static void enetc_mac_config(struct enetc_si *si, phy_interface_t phy_mode) +{ + u32 val; + + if (phy_interface_mode_is_rgmii(phy_mode)) { + val = enetc_port_mac_rd(si, ENETC_PM0_IF_MODE); + val &= ~(ENETC_PM0_IFM_EN_AUTO | ENETC_PM0_IFM_IFMODE_MASK); + val |= ENETC_PM0_IFM_IFMODE_GMII | ENETC_PM0_IFM_RG; + enetc_port_mac_wr(si, ENETC_PM0_IF_MODE, val); + } + + if (phy_mode == PHY_INTERFACE_MODE_USXGMII) { + val = ENETC_PM0_IFM_FULL_DPX | ENETC_PM0_IFM_IFMODE_XGMII; + enetc_port_mac_wr(si, ENETC_PM0_IF_MODE, val); + } +} + +static void enetc_mac_enable(struct enetc_si *si, bool en) +{ + u32 val = enetc_port_mac_rd(si, ENETC_PM0_CMD_CFG); + + val &= ~(ENETC_PM0_TX_EN | ENETC_PM0_RX_EN); + val |= en ? (ENETC_PM0_TX_EN | ENETC_PM0_RX_EN) : 0; + + enetc_port_mac_wr(si, ENETC_PM0_CMD_CFG, val); +} + +static void enetc_configure_port(struct enetc_pf *pf) +{ + u8 hash_key[ENETC_RSSHASH_KEY_SIZE]; + struct enetc_hw *hw = &pf->si->hw; + + enetc_configure_port_mac(pf->si); + + enetc_port_si_configure(pf->si); + + /* set up hash key */ + get_random_bytes(hash_key, ENETC_RSSHASH_KEY_SIZE); + enetc_set_rss_key(hw, hash_key); + + /* split up RFS entries */ + enetc_port_assign_rfs_entries(pf->si); + + /* enforce VLAN promisc mode for all SIs */ + pf->vlan_promisc_simap = ENETC_VLAN_PROMISC_MAP_ALL; + enetc_set_vlan_promisc(hw, pf->vlan_promisc_simap); + + enetc_port_wr(hw, ENETC_PSIPMR, 0); + + /* enable port */ + enetc_port_wr(hw, ENETC_PMR, ENETC_PMR_EN); +} + +/* Messaging */ +static u16 enetc_msg_pf_set_vf_primary_mac_addr(struct enetc_pf *pf, + int vf_id) +{ + struct enetc_vf_state *vf_state = &pf->vf_state[vf_id]; + struct enetc_msg_swbd *msg = &pf->rxmsg[vf_id]; + struct enetc_msg_cmd_set_primary_mac *cmd; + struct device *dev = &pf->si->pdev->dev; + u16 cmd_id; + char *addr; + + cmd = (struct enetc_msg_cmd_set_primary_mac *)msg->vaddr; + cmd_id = cmd->header.id; + if (cmd_id != ENETC_MSG_CMD_MNG_ADD) + return ENETC_MSG_CMD_STATUS_FAIL; + + addr = cmd->mac.sa_data; + if (vf_state->flags & ENETC_VF_FLAG_PF_SET_MAC) + dev_warn(dev, "Attempt to override PF set mac addr for VF%d\n", + vf_id); + else + enetc_pf_set_primary_mac_addr(&pf->si->hw, vf_id + 1, addr); + + return ENETC_MSG_CMD_STATUS_OK; +} + +void enetc_msg_handle_rxmsg(struct enetc_pf *pf, int vf_id, u16 *status) +{ + struct enetc_msg_swbd *msg = &pf->rxmsg[vf_id]; + struct device *dev = &pf->si->pdev->dev; + struct enetc_msg_cmd_header *cmd_hdr; + u16 cmd_type; + + *status = ENETC_MSG_CMD_STATUS_OK; + cmd_hdr = (struct enetc_msg_cmd_header *)msg->vaddr; + cmd_type = cmd_hdr->type; + + switch (cmd_type) { + case ENETC_MSG_CMD_MNG_MAC: + *status = enetc_msg_pf_set_vf_primary_mac_addr(pf, vf_id); + break; + default: + dev_err(dev, "command not supported (cmd_type: 0x%x)\n", + cmd_type); + } +} + +#ifdef CONFIG_PCI_IOV +static int enetc_sriov_configure(struct pci_dev *pdev, int num_vfs) +{ + struct enetc_si *si = pci_get_drvdata(pdev); + struct enetc_pf *pf = enetc_si_priv(si); + int err; + + if (!num_vfs) { + enetc_msg_psi_free(pf); + kfree(pf->vf_state); + pf->num_vfs = 0; + pci_disable_sriov(pdev); + } else { + pf->num_vfs = num_vfs; + + pf->vf_state = kcalloc(num_vfs, sizeof(struct enetc_vf_state), + GFP_KERNEL); + if (!pf->vf_state) { + pf->num_vfs = 0; + return -ENOMEM; + } + + err = enetc_msg_psi_init(pf); + if (err) { + dev_err(&pdev->dev, "enetc_msg_psi_init (%d)\n", err); + goto err_msg_psi; + } + + err = pci_enable_sriov(pdev, num_vfs); + if (err) { + dev_err(&pdev->dev, "pci_enable_sriov err %d\n", err); + goto err_en_sriov; + } + } + + return num_vfs; + +err_en_sriov: + enetc_msg_psi_free(pf); +err_msg_psi: + kfree(pf->vf_state); + pf->num_vfs = 0; + + return err; +} +#else +#define enetc_sriov_configure(pdev, num_vfs) (void)0 +#endif + +static int enetc_pf_set_features(struct net_device *ndev, + netdev_features_t features) +{ + netdev_features_t changed = ndev->features ^ features; + struct enetc_ndev_priv *priv = netdev_priv(ndev); + int err; + + if (changed & NETIF_F_HW_TC) { + err = enetc_set_psfp(ndev, !!(features & NETIF_F_HW_TC)); + if (err) + return err; + } + + if (changed & NETIF_F_HW_VLAN_CTAG_FILTER) { + struct enetc_pf *pf = enetc_si_priv(priv->si); + + if (!!(features & NETIF_F_HW_VLAN_CTAG_FILTER)) + enetc_disable_si_vlan_promisc(pf, 0); + else + enetc_enable_si_vlan_promisc(pf, 0); + } + + if (changed & NETIF_F_LOOPBACK) + enetc_set_loopback(ndev, !!(features & NETIF_F_LOOPBACK)); + + enetc_set_features(ndev, features); + + return 0; +} + +static int enetc_pf_setup_tc(struct net_device *ndev, enum tc_setup_type type, + void *type_data) +{ + switch (type) { + case TC_QUERY_CAPS: + return enetc_qos_query_caps(ndev, type_data); + case TC_SETUP_QDISC_MQPRIO: + return enetc_setup_tc_mqprio(ndev, type_data); + case TC_SETUP_QDISC_TAPRIO: + return enetc_setup_tc_taprio(ndev, type_data); + case TC_SETUP_QDISC_CBS: + return enetc_setup_tc_cbs(ndev, type_data); + case TC_SETUP_QDISC_ETF: + return enetc_setup_tc_txtime(ndev, type_data); + case TC_SETUP_BLOCK: + return enetc_setup_tc_psfp(ndev, type_data); + default: + return -EOPNOTSUPP; + } +} + +static const struct net_device_ops enetc_ndev_ops = { + .ndo_open = enetc_open, + .ndo_stop = enetc_close, + .ndo_start_xmit = enetc_xmit, + .ndo_get_stats = enetc_get_stats, + .ndo_set_mac_address = enetc_pf_set_mac_addr, + .ndo_set_rx_mode = enetc_pf_set_rx_mode, + .ndo_vlan_rx_add_vid = enetc_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = enetc_vlan_rx_del_vid, + .ndo_set_vf_mac = enetc_pf_set_vf_mac, + .ndo_set_vf_vlan = enetc_pf_set_vf_vlan, + .ndo_set_vf_spoofchk = enetc_pf_set_vf_spoofchk, + .ndo_set_features = enetc_pf_set_features, + .ndo_eth_ioctl = enetc_ioctl, + .ndo_setup_tc = enetc_pf_setup_tc, + .ndo_bpf = enetc_setup_bpf, + .ndo_xdp_xmit = enetc_xdp_xmit, +}; + +static void enetc_pf_netdev_setup(struct enetc_si *si, struct net_device *ndev, + const struct net_device_ops *ndev_ops) +{ + struct enetc_ndev_priv *priv = netdev_priv(ndev); + + SET_NETDEV_DEV(ndev, &si->pdev->dev); + priv->ndev = ndev; + priv->si = si; + priv->dev = &si->pdev->dev; + si->ndev = ndev; + + priv->msg_enable = (NETIF_MSG_WOL << 1) - 1; + ndev->netdev_ops = ndev_ops; + enetc_set_ethtool_ops(ndev); + ndev->watchdog_timeo = 5 * HZ; + ndev->max_mtu = ENETC_MAX_MTU; + + ndev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM | + NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | + NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_LOOPBACK | + NETIF_F_HW_CSUM | NETIF_F_TSO | NETIF_F_TSO6; + ndev->features = NETIF_F_HIGHDMA | NETIF_F_SG | NETIF_F_RXCSUM | + NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_CTAG_RX | + NETIF_F_HW_CSUM | NETIF_F_TSO | NETIF_F_TSO6; + ndev->vlan_features = NETIF_F_SG | NETIF_F_HW_CSUM | + NETIF_F_TSO | NETIF_F_TSO6; + + if (si->num_rss) + ndev->hw_features |= NETIF_F_RXHASH; + + ndev->priv_flags |= IFF_UNICAST_FLT; + ndev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT | + NETDEV_XDP_ACT_NDO_XMIT | NETDEV_XDP_ACT_RX_SG | + NETDEV_XDP_ACT_NDO_XMIT_SG; + + if (si->hw_features & ENETC_SI_F_PSFP && !enetc_psfp_enable(priv)) { + priv->active_offloads |= ENETC_F_QCI; + ndev->features |= NETIF_F_HW_TC; + ndev->hw_features |= NETIF_F_HW_TC; + } + + /* pick up primary MAC address from SI */ + enetc_load_primary_mac_addr(&si->hw, ndev); +} + +static int enetc_mdio_probe(struct enetc_pf *pf, struct device_node *np) +{ + struct device *dev = &pf->si->pdev->dev; + struct enetc_mdio_priv *mdio_priv; + struct mii_bus *bus; + int err; + + bus = devm_mdiobus_alloc_size(dev, sizeof(*mdio_priv)); + if (!bus) + return -ENOMEM; + + bus->name = "Freescale ENETC MDIO Bus"; + bus->read = enetc_mdio_read_c22; + bus->write = enetc_mdio_write_c22; + bus->read_c45 = enetc_mdio_read_c45; + bus->write_c45 = enetc_mdio_write_c45; + bus->parent = dev; + mdio_priv = bus->priv; + mdio_priv->hw = &pf->si->hw; + mdio_priv->mdio_base = ENETC_EMDIO_BASE; + snprintf(bus->id, MII_BUS_ID_SIZE, "%s", dev_name(dev)); + + err = of_mdiobus_register(bus, np); + if (err) + return dev_err_probe(dev, err, "cannot register MDIO bus\n"); + + pf->mdio = bus; + + return 0; +} + +static void enetc_mdio_remove(struct enetc_pf *pf) +{ + if (pf->mdio) + mdiobus_unregister(pf->mdio); +} + +static int enetc_imdio_create(struct enetc_pf *pf) +{ + struct device *dev = &pf->si->pdev->dev; + struct enetc_mdio_priv *mdio_priv; + struct phylink_pcs *phylink_pcs; + struct mii_bus *bus; + int err; + + bus = mdiobus_alloc_size(sizeof(*mdio_priv)); + if (!bus) + return -ENOMEM; + + bus->name = "Freescale ENETC internal MDIO Bus"; + bus->read = enetc_mdio_read_c22; + bus->write = enetc_mdio_write_c22; + bus->read_c45 = enetc_mdio_read_c45; + bus->write_c45 = enetc_mdio_write_c45; + bus->parent = dev; + bus->phy_mask = ~0; + mdio_priv = bus->priv; + mdio_priv->hw = &pf->si->hw; + mdio_priv->mdio_base = ENETC_PM_IMDIO_BASE; + snprintf(bus->id, MII_BUS_ID_SIZE, "%s-imdio", dev_name(dev)); + + err = mdiobus_register(bus); + if (err) { + dev_err(dev, "cannot register internal MDIO bus (%d)\n", err); + goto free_mdio_bus; + } + + phylink_pcs = lynx_pcs_create_mdiodev(bus, 0); + if (IS_ERR(phylink_pcs)) { + err = PTR_ERR(phylink_pcs); + dev_err(dev, "cannot create lynx pcs (%d)\n", err); + goto unregister_mdiobus; + } + + pf->imdio = bus; + pf->pcs = phylink_pcs; + + return 0; + +unregister_mdiobus: + mdiobus_unregister(bus); +free_mdio_bus: + mdiobus_free(bus); + return err; +} + +static void enetc_imdio_remove(struct enetc_pf *pf) +{ + if (pf->pcs) + lynx_pcs_destroy(pf->pcs); + if (pf->imdio) { + mdiobus_unregister(pf->imdio); + mdiobus_free(pf->imdio); + } +} + +static bool enetc_port_has_pcs(struct enetc_pf *pf) +{ + return (pf->if_mode == PHY_INTERFACE_MODE_SGMII || + pf->if_mode == PHY_INTERFACE_MODE_2500BASEX || + pf->if_mode == PHY_INTERFACE_MODE_USXGMII); +} + +static int enetc_mdiobus_create(struct enetc_pf *pf, struct device_node *node) +{ + struct device_node *mdio_np; + int err; + + mdio_np = of_get_child_by_name(node, "mdio"); + if (mdio_np) { + err = enetc_mdio_probe(pf, mdio_np); + + of_node_put(mdio_np); + if (err) + return err; + } + + if (enetc_port_has_pcs(pf)) { + err = enetc_imdio_create(pf); + if (err) { + enetc_mdio_remove(pf); + return err; + } + } + + return 0; +} + +static void enetc_mdiobus_destroy(struct enetc_pf *pf) +{ + enetc_mdio_remove(pf); + enetc_imdio_remove(pf); +} + +static struct phylink_pcs * +enetc_pl_mac_select_pcs(struct phylink_config *config, phy_interface_t iface) +{ + struct enetc_pf *pf = phylink_to_enetc_pf(config); + + return pf->pcs; +} + +static void enetc_pl_mac_config(struct phylink_config *config, + unsigned int mode, + const struct phylink_link_state *state) +{ + struct enetc_pf *pf = phylink_to_enetc_pf(config); + + enetc_mac_config(pf->si, state->interface); +} + +static void enetc_force_rgmii_mac(struct enetc_si *si, int speed, int duplex) +{ + u32 old_val, val; + + old_val = val = enetc_port_mac_rd(si, ENETC_PM0_IF_MODE); + + if (speed == SPEED_1000) { + val &= ~ENETC_PM0_IFM_SSP_MASK; + val |= ENETC_PM0_IFM_SSP_1000; + } else if (speed == SPEED_100) { + val &= ~ENETC_PM0_IFM_SSP_MASK; + val |= ENETC_PM0_IFM_SSP_100; + } else if (speed == SPEED_10) { + val &= ~ENETC_PM0_IFM_SSP_MASK; + val |= ENETC_PM0_IFM_SSP_10; + } + + if (duplex == DUPLEX_FULL) + val |= ENETC_PM0_IFM_FULL_DPX; + else + val &= ~ENETC_PM0_IFM_FULL_DPX; + + if (val == old_val) + return; + + enetc_port_mac_wr(si, ENETC_PM0_IF_MODE, val); +} + +static void enetc_pl_mac_link_up(struct phylink_config *config, + struct phy_device *phy, unsigned int mode, + phy_interface_t interface, int speed, + int duplex, bool tx_pause, bool rx_pause) +{ + struct enetc_pf *pf = phylink_to_enetc_pf(config); + u32 pause_off_thresh = 0, pause_on_thresh = 0; + u32 init_quanta = 0, refresh_quanta = 0; + struct enetc_hw *hw = &pf->si->hw; + struct enetc_si *si = pf->si; + struct enetc_ndev_priv *priv; + u32 rbmr, cmd_cfg; + int idx; + + priv = netdev_priv(pf->si->ndev); + + if (pf->si->hw_features & ENETC_SI_F_QBV) + enetc_sched_speed_set(priv, speed); + + if (!phylink_autoneg_inband(mode) && + phy_interface_mode_is_rgmii(interface)) + enetc_force_rgmii_mac(si, speed, duplex); + + /* Flow control */ + for (idx = 0; idx < priv->num_rx_rings; idx++) { + rbmr = enetc_rxbdr_rd(hw, idx, ENETC_RBMR); + + if (tx_pause) + rbmr |= ENETC_RBMR_CM; + else + rbmr &= ~ENETC_RBMR_CM; + + enetc_rxbdr_wr(hw, idx, ENETC_RBMR, rbmr); + } + + if (tx_pause) { + /* When the port first enters congestion, send a PAUSE request + * with the maximum number of quanta. When the port exits + * congestion, it will automatically send a PAUSE frame with + * zero quanta. + */ + init_quanta = 0xffff; + + /* Also, set up the refresh timer to send follow-up PAUSE + * frames at half the quanta value, in case the congestion + * condition persists. + */ + refresh_quanta = 0xffff / 2; + + /* Start emitting PAUSE frames when 3 large frames (or more + * smaller frames) have accumulated in the FIFO waiting to be + * DMAed to the RX ring. + */ + pause_on_thresh = 3 * ENETC_MAC_MAXFRM_SIZE; + pause_off_thresh = 1 * ENETC_MAC_MAXFRM_SIZE; + } + + enetc_port_mac_wr(si, ENETC_PM0_PAUSE_QUANTA, init_quanta); + enetc_port_mac_wr(si, ENETC_PM0_PAUSE_THRESH, refresh_quanta); + enetc_port_wr(hw, ENETC_PPAUONTR, pause_on_thresh); + enetc_port_wr(hw, ENETC_PPAUOFFTR, pause_off_thresh); + + cmd_cfg = enetc_port_mac_rd(si, ENETC_PM0_CMD_CFG); + + if (rx_pause) + cmd_cfg &= ~ENETC_PM0_PAUSE_IGN; + else + cmd_cfg |= ENETC_PM0_PAUSE_IGN; + + enetc_port_mac_wr(si, ENETC_PM0_CMD_CFG, cmd_cfg); + + enetc_mac_enable(si, true); + + if (si->hw_features & ENETC_SI_F_QBU) + enetc_mm_link_state_update(priv, true); +} + +static void enetc_pl_mac_link_down(struct phylink_config *config, + unsigned int mode, + phy_interface_t interface) +{ + struct enetc_pf *pf = phylink_to_enetc_pf(config); + struct enetc_si *si = pf->si; + struct enetc_ndev_priv *priv; + + priv = netdev_priv(si->ndev); + + if (si->hw_features & ENETC_SI_F_QBU) + enetc_mm_link_state_update(priv, false); + + enetc_mac_enable(si, false); +} + +static const struct phylink_mac_ops enetc_mac_phylink_ops = { + .mac_select_pcs = enetc_pl_mac_select_pcs, + .mac_config = enetc_pl_mac_config, + .mac_link_up = enetc_pl_mac_link_up, + .mac_link_down = enetc_pl_mac_link_down, +}; + +static int enetc_phylink_create(struct enetc_ndev_priv *priv, + struct device_node *node) +{ + struct enetc_pf *pf = enetc_si_priv(priv->si); + struct phylink *phylink; + int err; + + pf->phylink_config.dev = &priv->ndev->dev; + pf->phylink_config.type = PHYLINK_NETDEV; + pf->phylink_config.mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE | + MAC_10 | MAC_100 | MAC_1000 | MAC_2500FD; + + __set_bit(PHY_INTERFACE_MODE_INTERNAL, + pf->phylink_config.supported_interfaces); + __set_bit(PHY_INTERFACE_MODE_SGMII, + pf->phylink_config.supported_interfaces); + __set_bit(PHY_INTERFACE_MODE_2500BASEX, + pf->phylink_config.supported_interfaces); + __set_bit(PHY_INTERFACE_MODE_USXGMII, + pf->phylink_config.supported_interfaces); + phy_interface_set_rgmii(pf->phylink_config.supported_interfaces); + + phylink = phylink_create(&pf->phylink_config, of_fwnode_handle(node), + pf->if_mode, &enetc_mac_phylink_ops); + if (IS_ERR(phylink)) { + err = PTR_ERR(phylink); + return err; + } + + priv->phylink = phylink; + + return 0; +} + +static void enetc_phylink_destroy(struct enetc_ndev_priv *priv) +{ + phylink_destroy(priv->phylink); +} + +/* Initialize the entire shared memory for the flow steering entries + * of this port (PF + VFs) + */ +static int enetc_init_port_rfs_memory(struct enetc_si *si) +{ + struct enetc_cmd_rfse rfse = {0}; + struct enetc_hw *hw = &si->hw; + int num_rfs, i, err = 0; + u32 val; + + val = enetc_port_rd(hw, ENETC_PRFSCAPR); + num_rfs = ENETC_PRFSCAPR_GET_NUM_RFS(val); + + for (i = 0; i < num_rfs; i++) { + err = enetc_set_fs_entry(si, &rfse, i); + if (err) + break; + } + + return err; +} + +static int enetc_init_port_rss_memory(struct enetc_si *si) +{ + struct enetc_hw *hw = &si->hw; + int num_rss, err; + int *rss_table; + u32 val; + + val = enetc_port_rd(hw, ENETC_PRSSCAPR); + num_rss = ENETC_PRSSCAPR_GET_NUM_RSS(val); + if (!num_rss) + return 0; + + rss_table = kcalloc(num_rss, sizeof(*rss_table), GFP_KERNEL); + if (!rss_table) + return -ENOMEM; + + err = enetc_set_rss_table(si, rss_table, num_rss); + + kfree(rss_table); + + return err; +} + +static int enetc_pf_register_with_ierb(struct pci_dev *pdev) +{ + struct platform_device *ierb_pdev; + struct device_node *ierb_node; + + ierb_node = of_find_compatible_node(NULL, NULL, + "fsl,ls1028a-enetc-ierb"); + if (!ierb_node || !of_device_is_available(ierb_node)) + return -ENODEV; + + ierb_pdev = of_find_device_by_node(ierb_node); + of_node_put(ierb_node); + + if (!ierb_pdev) + return -EPROBE_DEFER; + + return enetc_ierb_register_pf(ierb_pdev, pdev); +} + +static struct enetc_si *enetc_psi_create(struct pci_dev *pdev) +{ + struct enetc_si *si; + int err; + + err = enetc_pci_probe(pdev, KBUILD_MODNAME, sizeof(struct enetc_pf)); + if (err) { + dev_err_probe(&pdev->dev, err, "PCI probing failed\n"); + goto out; + } + + si = pci_get_drvdata(pdev); + if (!si->hw.port || !si->hw.global) { + err = -ENODEV; + dev_err(&pdev->dev, "could not map PF space, probing a VF?\n"); + goto out_pci_remove; + } + + err = enetc_setup_cbdr(&pdev->dev, &si->hw, ENETC_CBDR_DEFAULT_SIZE, + &si->cbd_ring); + if (err) + goto out_pci_remove; + + err = enetc_init_port_rfs_memory(si); + if (err) { + dev_err(&pdev->dev, "Failed to initialize RFS memory\n"); + goto out_teardown_cbdr; + } + + err = enetc_init_port_rss_memory(si); + if (err) { + dev_err(&pdev->dev, "Failed to initialize RSS memory\n"); + goto out_teardown_cbdr; + } + + return si; + +out_teardown_cbdr: + enetc_teardown_cbdr(&si->cbd_ring); +out_pci_remove: + enetc_pci_remove(pdev); +out: + return ERR_PTR(err); +} + +static void enetc_psi_destroy(struct pci_dev *pdev) +{ + struct enetc_si *si = pci_get_drvdata(pdev); + + enetc_teardown_cbdr(&si->cbd_ring); + enetc_pci_remove(pdev); +} + +static int enetc_pf_probe(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + struct device_node *node = pdev->dev.of_node; + struct enetc_ndev_priv *priv; + struct net_device *ndev; + struct enetc_si *si; + struct enetc_pf *pf; + int err; + + err = enetc_pf_register_with_ierb(pdev); + if (err == -EPROBE_DEFER) + return err; + if (err) + dev_warn(&pdev->dev, + "Could not register with IERB driver: %pe, please update the device tree\n", + ERR_PTR(err)); + + si = enetc_psi_create(pdev); + if (IS_ERR(si)) { + err = PTR_ERR(si); + goto err_psi_create; + } + + pf = enetc_si_priv(si); + pf->si = si; + pf->total_vfs = pci_sriov_get_totalvfs(pdev); + + err = enetc_setup_mac_addresses(node, pf); + if (err) + goto err_setup_mac_addresses; + + enetc_configure_port(pf); + + enetc_get_si_caps(si); + + ndev = alloc_etherdev_mq(sizeof(*priv), ENETC_MAX_NUM_TXQS); + if (!ndev) { + err = -ENOMEM; + dev_err(&pdev->dev, "netdev creation failed\n"); + goto err_alloc_netdev; + } + + enetc_pf_netdev_setup(si, ndev, &enetc_ndev_ops); + + priv = netdev_priv(ndev); + + mutex_init(&priv->mm_lock); + + enetc_init_si_rings_params(priv); + + err = enetc_alloc_si_resources(priv); + if (err) { + dev_err(&pdev->dev, "SI resource alloc failed\n"); + goto err_alloc_si_res; + } + + err = enetc_configure_si(priv); + if (err) { + dev_err(&pdev->dev, "Failed to configure SI\n"); + goto err_config_si; + } + + err = enetc_alloc_msix(priv); + if (err) { + dev_err(&pdev->dev, "MSIX alloc failed\n"); + goto err_alloc_msix; + } + + err = of_get_phy_mode(node, &pf->if_mode); + if (err) { + dev_err(&pdev->dev, "Failed to read PHY mode\n"); + goto err_phy_mode; + } + + err = enetc_mdiobus_create(pf, node); + if (err) + goto err_mdiobus_create; + + err = enetc_phylink_create(priv, node); + if (err) + goto err_phylink_create; + + err = register_netdev(ndev); + if (err) + goto err_reg_netdev; + + return 0; + +err_reg_netdev: + enetc_phylink_destroy(priv); +err_phylink_create: + enetc_mdiobus_destroy(pf); +err_mdiobus_create: +err_phy_mode: + enetc_free_msix(priv); +err_config_si: +err_alloc_msix: + enetc_free_si_resources(priv); +err_alloc_si_res: + si->ndev = NULL; + free_netdev(ndev); +err_alloc_netdev: +err_setup_mac_addresses: + enetc_psi_destroy(pdev); +err_psi_create: + return err; +} + +static void enetc_pf_remove(struct pci_dev *pdev) +{ + struct enetc_si *si = pci_get_drvdata(pdev); + struct enetc_pf *pf = enetc_si_priv(si); + struct enetc_ndev_priv *priv; + + priv = netdev_priv(si->ndev); + + if (pf->num_vfs) + enetc_sriov_configure(pdev, 0); + + unregister_netdev(si->ndev); + + enetc_phylink_destroy(priv); + enetc_mdiobus_destroy(pf); + + enetc_free_msix(priv); + + enetc_free_si_resources(priv); + + free_netdev(si->ndev); + + enetc_psi_destroy(pdev); +} + +static void enetc_fixup_clear_rss_rfs(struct pci_dev *pdev) +{ + struct device_node *node = pdev->dev.of_node; + struct enetc_si *si; + + /* Only apply quirk for disabled functions. For the ones + * that are enabled, enetc_pf_probe() will apply it. + */ + if (node && of_device_is_available(node)) + return; + + si = enetc_psi_create(pdev); + if (!IS_ERR(si)) + enetc_psi_destroy(pdev); +} +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_FREESCALE, ENETC_DEV_ID_PF, + enetc_fixup_clear_rss_rfs); + +static const struct pci_device_id enetc_pf_id_table[] = { + { PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, ENETC_DEV_ID_PF) }, + { 0, } /* End of table. */ +}; +MODULE_DEVICE_TABLE(pci, enetc_pf_id_table); + +static struct pci_driver enetc_pf_driver = { + .name = KBUILD_MODNAME, + .id_table = enetc_pf_id_table, + .probe = enetc_pf_probe, + .remove = enetc_pf_remove, +#ifdef CONFIG_PCI_IOV + .sriov_configure = enetc_sriov_configure, +#endif +}; +module_pci_driver(enetc_pf_driver); + +MODULE_DESCRIPTION(ENETC_DRV_NAME_STR); +MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf.h b/drivers/net/ethernet/freescale/enetc/enetc_pf.h new file mode 100644 index 0000000000..c26bd66e45 --- /dev/null +++ b/drivers/net/ethernet/freescale/enetc/enetc_pf.h @@ -0,0 +1,60 @@ +/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */ +/* Copyright 2017-2019 NXP */ + +#include "enetc.h" +#include <linux/phylink.h> + +#define ENETC_PF_NUM_RINGS 8 + +enum enetc_mac_addr_type {UC, MC, MADDR_TYPE}; +#define ENETC_MAX_NUM_MAC_FLT ((ENETC_MAX_NUM_VFS + 1) * MADDR_TYPE) + +#define ENETC_MADDR_HASH_TBL_SZ 64 +struct enetc_mac_filter { + union { + char mac_addr[ETH_ALEN]; + DECLARE_BITMAP(mac_hash_table, ENETC_MADDR_HASH_TBL_SZ); + }; + int mac_addr_cnt; +}; + +#define ENETC_VLAN_HT_SIZE 64 + +enum enetc_vf_flags { + ENETC_VF_FLAG_PF_SET_MAC = BIT(0), +}; + +struct enetc_vf_state { + enum enetc_vf_flags flags; +}; + +struct enetc_pf { + struct enetc_si *si; + int num_vfs; /* number of active VFs, after sriov_init */ + int total_vfs; /* max number of VFs, set for PF at probe */ + struct enetc_vf_state *vf_state; + + struct enetc_mac_filter mac_filter[ENETC_MAX_NUM_MAC_FLT]; + + struct enetc_msg_swbd rxmsg[ENETC_MAX_NUM_VFS]; + struct work_struct msg_task; + char msg_int_name[ENETC_INT_NAME_MAX]; + + char vlan_promisc_simap; /* bitmap of SIs in VLAN promisc mode */ + DECLARE_BITMAP(vlan_ht_filter, ENETC_VLAN_HT_SIZE); + DECLARE_BITMAP(active_vlans, VLAN_N_VID); + + struct mii_bus *mdio; /* saved for cleanup */ + struct mii_bus *imdio; + struct phylink_pcs *pcs; + + phy_interface_t if_mode; + struct phylink_config phylink_config; +}; + +#define phylink_to_enetc_pf(config) \ + container_of((config), struct enetc_pf, phylink_config) + +int enetc_msg_psi_init(struct enetc_pf *pf); +void enetc_msg_psi_free(struct enetc_pf *pf); +void enetc_msg_handle_rxmsg(struct enetc_pf *pf, int mbox_id, u16 *status); diff --git a/drivers/net/ethernet/freescale/enetc/enetc_ptp.c b/drivers/net/ethernet/freescale/enetc/enetc_ptp.c new file mode 100644 index 0000000000..5243fc0310 --- /dev/null +++ b/drivers/net/ethernet/freescale/enetc/enetc_ptp.c @@ -0,0 +1,145 @@ +// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) +/* Copyright 2019 NXP */ + +#include <linux/module.h> +#include <linux/of.h> +#include <linux/fsl/ptp_qoriq.h> + +#include "enetc.h" + +int enetc_phc_index = -1; +EXPORT_SYMBOL_GPL(enetc_phc_index); + +static struct ptp_clock_info enetc_ptp_caps = { + .owner = THIS_MODULE, + .name = "ENETC PTP clock", + .max_adj = 512000, + .n_alarm = 0, + .n_ext_ts = 2, + .n_per_out = 0, + .n_pins = 0, + .pps = 1, + .adjfine = ptp_qoriq_adjfine, + .adjtime = ptp_qoriq_adjtime, + .gettime64 = ptp_qoriq_gettime, + .settime64 = ptp_qoriq_settime, + .enable = ptp_qoriq_enable, +}; + +static int enetc_ptp_probe(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + struct ptp_qoriq *ptp_qoriq; + void __iomem *base; + int err, len, n; + + if (pdev->dev.of_node && !of_device_is_available(pdev->dev.of_node)) { + dev_info(&pdev->dev, "device is disabled, skipping\n"); + return -ENODEV; + } + + err = pci_enable_device_mem(pdev); + if (err) + return dev_err_probe(&pdev->dev, err, "device enable failed\n"); + + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); + if (err) { + dev_err(&pdev->dev, "DMA configuration failed: 0x%x\n", err); + goto err_dma; + } + + err = pci_request_mem_regions(pdev, KBUILD_MODNAME); + if (err) { + dev_err(&pdev->dev, "pci_request_regions failed err=%d\n", err); + goto err_pci_mem_reg; + } + + pci_set_master(pdev); + + ptp_qoriq = kzalloc(sizeof(*ptp_qoriq), GFP_KERNEL); + if (!ptp_qoriq) { + err = -ENOMEM; + goto err_alloc_ptp; + } + + len = pci_resource_len(pdev, ENETC_BAR_REGS); + + base = ioremap(pci_resource_start(pdev, ENETC_BAR_REGS), len); + if (!base) { + err = -ENXIO; + dev_err(&pdev->dev, "ioremap() failed\n"); + goto err_ioremap; + } + + /* Allocate 1 interrupt */ + n = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSIX); + if (n != 1) { + err = -EPERM; + goto err_irq_vectors; + } + + ptp_qoriq->irq = pci_irq_vector(pdev, 0); + + err = request_irq(ptp_qoriq->irq, ptp_qoriq_isr, 0, DRIVER, ptp_qoriq); + if (err) { + dev_err(&pdev->dev, "request_irq() failed!\n"); + goto err_irq; + } + + ptp_qoriq->dev = &pdev->dev; + + err = ptp_qoriq_init(ptp_qoriq, base, &enetc_ptp_caps); + if (err) + goto err_no_clock; + + enetc_phc_index = ptp_qoriq->phc_index; + pci_set_drvdata(pdev, ptp_qoriq); + + return 0; + +err_no_clock: + free_irq(ptp_qoriq->irq, ptp_qoriq); +err_irq: + pci_free_irq_vectors(pdev); +err_irq_vectors: + iounmap(base); +err_ioremap: + kfree(ptp_qoriq); +err_alloc_ptp: + pci_release_mem_regions(pdev); +err_pci_mem_reg: +err_dma: + pci_disable_device(pdev); + + return err; +} + +static void enetc_ptp_remove(struct pci_dev *pdev) +{ + struct ptp_qoriq *ptp_qoriq = pci_get_drvdata(pdev); + + enetc_phc_index = -1; + ptp_qoriq_free(ptp_qoriq); + pci_free_irq_vectors(pdev); + kfree(ptp_qoriq); + + pci_release_mem_regions(pdev); + pci_disable_device(pdev); +} + +static const struct pci_device_id enetc_ptp_id_table[] = { + { PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, ENETC_DEV_ID_PTP) }, + { 0, } /* End of table. */ +}; +MODULE_DEVICE_TABLE(pci, enetc_ptp_id_table); + +static struct pci_driver enetc_ptp_driver = { + .name = KBUILD_MODNAME, + .id_table = enetc_ptp_id_table, + .probe = enetc_ptp_probe, + .remove = enetc_ptp_remove, +}; +module_pci_driver(enetc_ptp_driver); + +MODULE_DESCRIPTION("ENETC PTP clock driver"); +MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/net/ethernet/freescale/enetc/enetc_qos.c b/drivers/net/ethernet/freescale/enetc/enetc_qos.c new file mode 100644 index 0000000000..2513b44056 --- /dev/null +++ b/drivers/net/ethernet/freescale/enetc/enetc_qos.c @@ -0,0 +1,1689 @@ +// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) +/* Copyright 2019 NXP */ + +#include "enetc.h" + +#include <net/pkt_sched.h> +#include <linux/math64.h> +#include <linux/refcount.h> +#include <net/pkt_cls.h> +#include <net/tc_act/tc_gate.h> + +static u16 enetc_get_max_gcl_len(struct enetc_hw *hw) +{ + return enetc_rd(hw, ENETC_PTGCAPR) & ENETC_PTGCAPR_MAX_GCL_LEN_MASK; +} + +void enetc_sched_speed_set(struct enetc_ndev_priv *priv, int speed) +{ + struct enetc_hw *hw = &priv->si->hw; + u32 old_speed = priv->speed; + u32 pspeed, tmp; + + if (speed == old_speed) + return; + + switch (speed) { + case SPEED_1000: + pspeed = ENETC_PMR_PSPEED_1000M; + break; + case SPEED_2500: + pspeed = ENETC_PMR_PSPEED_2500M; + break; + case SPEED_100: + pspeed = ENETC_PMR_PSPEED_100M; + break; + case SPEED_10: + default: + pspeed = ENETC_PMR_PSPEED_10M; + } + + priv->speed = speed; + tmp = enetc_port_rd(hw, ENETC_PMR); + enetc_port_wr(hw, ENETC_PMR, (tmp & ~ENETC_PMR_PSPEED_MASK) | pspeed); +} + +static int enetc_setup_taprio(struct enetc_ndev_priv *priv, + struct tc_taprio_qopt_offload *admin_conf) +{ + struct enetc_hw *hw = &priv->si->hw; + struct enetc_cbd cbd = {.cmd = 0}; + struct tgs_gcl_conf *gcl_config; + struct tgs_gcl_data *gcl_data; + dma_addr_t dma; + struct gce *gce; + u16 data_size; + u16 gcl_len; + void *tmp; + u32 tge; + int err; + int i; + + /* TSD and Qbv are mutually exclusive in hardware */ + for (i = 0; i < priv->num_tx_rings; i++) + if (priv->tx_ring[i]->tsd_enable) + return -EBUSY; + + if (admin_conf->num_entries > enetc_get_max_gcl_len(hw)) + return -EINVAL; + + if (admin_conf->cycle_time > U32_MAX || + admin_conf->cycle_time_extension > U32_MAX) + return -EINVAL; + + /* Configure the (administrative) gate control list using the + * control BD descriptor. + */ + gcl_config = &cbd.gcl_conf; + gcl_len = admin_conf->num_entries; + + data_size = struct_size(gcl_data, entry, gcl_len); + tmp = enetc_cbd_alloc_data_mem(priv->si, &cbd, data_size, + &dma, (void *)&gcl_data); + if (!tmp) + return -ENOMEM; + + gce = (struct gce *)(gcl_data + 1); + + /* Set all gates open as default */ + gcl_config->atc = 0xff; + gcl_config->acl_len = cpu_to_le16(gcl_len); + + gcl_data->btl = cpu_to_le32(lower_32_bits(admin_conf->base_time)); + gcl_data->bth = cpu_to_le32(upper_32_bits(admin_conf->base_time)); + gcl_data->ct = cpu_to_le32(admin_conf->cycle_time); + gcl_data->cte = cpu_to_le32(admin_conf->cycle_time_extension); + + for (i = 0; i < gcl_len; i++) { + struct tc_taprio_sched_entry *temp_entry; + struct gce *temp_gce = gce + i; + + temp_entry = &admin_conf->entries[i]; + + temp_gce->gate = (u8)temp_entry->gate_mask; + temp_gce->period = cpu_to_le32(temp_entry->interval); + } + + cbd.status_flags = 0; + + cbd.cls = BDCR_CMD_PORT_GCL; + cbd.status_flags = 0; + + tge = enetc_rd(hw, ENETC_PTGCR); + enetc_wr(hw, ENETC_PTGCR, tge | ENETC_PTGCR_TGE); + + err = enetc_send_cmd(priv->si, &cbd); + if (err) + enetc_wr(hw, ENETC_PTGCR, tge & ~ENETC_PTGCR_TGE); + + enetc_cbd_free_data_mem(priv->si, data_size, tmp, &dma); + + if (err) + return err; + + enetc_set_ptcmsdur(hw, admin_conf->max_sdu); + priv->active_offloads |= ENETC_F_QBV; + + return 0; +} + +static void enetc_reset_taprio_stats(struct enetc_ndev_priv *priv) +{ + int i; + + for (i = 0; i < priv->num_tx_rings; i++) + priv->tx_ring[i]->stats.win_drop = 0; +} + +static void enetc_reset_taprio(struct enetc_ndev_priv *priv) +{ + struct enetc_hw *hw = &priv->si->hw; + u32 val; + + val = enetc_rd(hw, ENETC_PTGCR); + enetc_wr(hw, ENETC_PTGCR, val & ~ENETC_PTGCR_TGE); + enetc_reset_ptcmsdur(hw); + + priv->active_offloads &= ~ENETC_F_QBV; +} + +static void enetc_taprio_destroy(struct net_device *ndev) +{ + struct enetc_ndev_priv *priv = netdev_priv(ndev); + + enetc_reset_taprio(priv); + enetc_reset_tc_mqprio(ndev); + enetc_reset_taprio_stats(priv); +} + +static void enetc_taprio_stats(struct net_device *ndev, + struct tc_taprio_qopt_stats *stats) +{ + struct enetc_ndev_priv *priv = netdev_priv(ndev); + u64 window_drops = 0; + int i; + + for (i = 0; i < priv->num_tx_rings; i++) + window_drops += priv->tx_ring[i]->stats.win_drop; + + stats->window_drops = window_drops; +} + +static void enetc_taprio_queue_stats(struct net_device *ndev, + struct tc_taprio_qopt_queue_stats *queue_stats) +{ + struct tc_taprio_qopt_stats *stats = &queue_stats->stats; + struct enetc_ndev_priv *priv = netdev_priv(ndev); + int queue = queue_stats->queue; + + stats->window_drops = priv->tx_ring[queue]->stats.win_drop; +} + +static int enetc_taprio_replace(struct net_device *ndev, + struct tc_taprio_qopt_offload *offload) +{ + struct enetc_ndev_priv *priv = netdev_priv(ndev); + int err; + + err = enetc_setup_tc_mqprio(ndev, &offload->mqprio); + if (err) + return err; + + err = enetc_setup_taprio(priv, offload); + if (err) + enetc_reset_tc_mqprio(ndev); + + return err; +} + +int enetc_setup_tc_taprio(struct net_device *ndev, void *type_data) +{ + struct tc_taprio_qopt_offload *offload = type_data; + int err = 0; + + switch (offload->cmd) { + case TAPRIO_CMD_REPLACE: + err = enetc_taprio_replace(ndev, offload); + break; + case TAPRIO_CMD_DESTROY: + enetc_taprio_destroy(ndev); + break; + case TAPRIO_CMD_STATS: + enetc_taprio_stats(ndev, &offload->stats); + break; + case TAPRIO_CMD_QUEUE_STATS: + enetc_taprio_queue_stats(ndev, &offload->queue_stats); + break; + default: + err = -EOPNOTSUPP; + } + + return err; +} + +static u32 enetc_get_cbs_enable(struct enetc_hw *hw, u8 tc) +{ + return enetc_port_rd(hw, ENETC_PTCCBSR0(tc)) & ENETC_CBSE; +} + +static u8 enetc_get_cbs_bw(struct enetc_hw *hw, u8 tc) +{ + return enetc_port_rd(hw, ENETC_PTCCBSR0(tc)) & ENETC_CBS_BW_MASK; +} + +int enetc_setup_tc_cbs(struct net_device *ndev, void *type_data) +{ + struct enetc_ndev_priv *priv = netdev_priv(ndev); + struct tc_cbs_qopt_offload *cbs = type_data; + u32 port_transmit_rate = priv->speed; + u8 tc_nums = netdev_get_num_tc(ndev); + struct enetc_hw *hw = &priv->si->hw; + u32 hi_credit_bit, hi_credit_reg; + u32 max_interference_size; + u32 port_frame_max_size; + u8 tc = cbs->queue; + u8 prio_top, prio_next; + int bw_sum = 0; + u8 bw; + + prio_top = tc_nums - 1; + prio_next = tc_nums - 2; + + /* Support highest prio and second prio tc in cbs mode */ + if (tc != prio_top && tc != prio_next) + return -EOPNOTSUPP; + + if (!cbs->enable) { + /* Make sure the other TC that are numerically + * lower than this TC have been disabled. + */ + if (tc == prio_top && + enetc_get_cbs_enable(hw, prio_next)) { + dev_err(&ndev->dev, + "Disable TC%d before disable TC%d\n", + prio_next, tc); + return -EINVAL; + } + + enetc_port_wr(hw, ENETC_PTCCBSR1(tc), 0); + enetc_port_wr(hw, ENETC_PTCCBSR0(tc), 0); + + return 0; + } + + if (cbs->idleslope - cbs->sendslope != port_transmit_rate * 1000L || + cbs->idleslope < 0 || cbs->sendslope > 0) + return -EOPNOTSUPP; + + port_frame_max_size = ndev->mtu + VLAN_ETH_HLEN + ETH_FCS_LEN; + + bw = cbs->idleslope / (port_transmit_rate * 10UL); + + /* Make sure the other TC that are numerically + * higher than this TC have been enabled. + */ + if (tc == prio_next) { + if (!enetc_get_cbs_enable(hw, prio_top)) { + dev_err(&ndev->dev, + "Enable TC%d first before enable TC%d\n", + prio_top, prio_next); + return -EINVAL; + } + bw_sum += enetc_get_cbs_bw(hw, prio_top); + } + + if (bw_sum + bw >= 100) { + dev_err(&ndev->dev, + "The sum of all CBS Bandwidth can't exceed 100\n"); + return -EINVAL; + } + + enetc_port_rd(hw, ENETC_PTCMSDUR(tc)); + + /* For top prio TC, the max_interfrence_size is maxSizedFrame. + * + * For next prio TC, the max_interfrence_size is calculated as below: + * + * max_interference_size = M0 + Ma + Ra * M0 / (R0 - Ra) + * + * - RA: idleSlope for AVB Class A + * - R0: port transmit rate + * - M0: maximum sized frame for the port + * - MA: maximum sized frame for AVB Class A + */ + + if (tc == prio_top) { + max_interference_size = port_frame_max_size * 8; + } else { + u32 m0, ma, r0, ra; + + m0 = port_frame_max_size * 8; + ma = enetc_port_rd(hw, ENETC_PTCMSDUR(prio_top)) * 8; + ra = enetc_get_cbs_bw(hw, prio_top) * + port_transmit_rate * 10000ULL; + r0 = port_transmit_rate * 1000000ULL; + max_interference_size = m0 + ma + + (u32)div_u64((u64)ra * m0, r0 - ra); + } + + /* hiCredit bits calculate by: + * + * maxSizedFrame * (idleSlope/portTxRate) + */ + hi_credit_bit = max_interference_size * bw / 100; + + /* hiCredit bits to hiCredit register need to calculated as: + * + * (enetClockFrequency / portTransmitRate) * 100 + */ + hi_credit_reg = (u32)div_u64((ENETC_CLK * 100ULL) * hi_credit_bit, + port_transmit_rate * 1000000ULL); + + enetc_port_wr(hw, ENETC_PTCCBSR1(tc), hi_credit_reg); + + /* Set bw register and enable this traffic class */ + enetc_port_wr(hw, ENETC_PTCCBSR0(tc), bw | ENETC_CBSE); + + return 0; +} + +int enetc_setup_tc_txtime(struct net_device *ndev, void *type_data) +{ + struct enetc_ndev_priv *priv = netdev_priv(ndev); + struct tc_etf_qopt_offload *qopt = type_data; + u8 tc_nums = netdev_get_num_tc(ndev); + struct enetc_hw *hw = &priv->si->hw; + int tc; + + if (!tc_nums) + return -EOPNOTSUPP; + + tc = qopt->queue; + + if (tc < 0 || tc >= priv->num_tx_rings) + return -EINVAL; + + /* TSD and Qbv are mutually exclusive in hardware */ + if (enetc_rd(hw, ENETC_PTGCR) & ENETC_PTGCR_TGE) + return -EBUSY; + + priv->tx_ring[tc]->tsd_enable = qopt->enable; + enetc_port_wr(hw, ENETC_PTCTSDR(tc), qopt->enable ? ENETC_TSDE : 0); + + return 0; +} + +enum streamid_type { + STREAMID_TYPE_RESERVED = 0, + STREAMID_TYPE_NULL, + STREAMID_TYPE_SMAC, +}; + +enum streamid_vlan_tagged { + STREAMID_VLAN_RESERVED = 0, + STREAMID_VLAN_TAGGED, + STREAMID_VLAN_UNTAGGED, + STREAMID_VLAN_ALL, +}; + +#define ENETC_PSFP_WILDCARD -1 +#define HANDLE_OFFSET 100 + +enum forward_type { + FILTER_ACTION_TYPE_PSFP = BIT(0), + FILTER_ACTION_TYPE_ACL = BIT(1), + FILTER_ACTION_TYPE_BOTH = GENMASK(1, 0), +}; + +/* This is for limit output type for input actions */ +struct actions_fwd { + u64 actions; + u64 keys; /* include the must needed keys */ + enum forward_type output; +}; + +struct psfp_streamfilter_counters { + u64 matching_frames_count; + u64 passing_frames_count; + u64 not_passing_frames_count; + u64 passing_sdu_count; + u64 not_passing_sdu_count; + u64 red_frames_count; +}; + +struct enetc_streamid { + u32 index; + union { + u8 src_mac[6]; + u8 dst_mac[6]; + }; + u8 filtertype; + u16 vid; + u8 tagged; + s32 handle; +}; + +struct enetc_psfp_filter { + u32 index; + s32 handle; + s8 prio; + u32 maxsdu; + u32 gate_id; + s32 meter_id; + refcount_t refcount; + struct hlist_node node; +}; + +struct enetc_psfp_gate { + u32 index; + s8 init_ipv; + u64 basetime; + u64 cycletime; + u64 cycletimext; + u32 num_entries; + refcount_t refcount; + struct hlist_node node; + struct action_gate_entry entries[]; +}; + +/* Only enable the green color frame now + * Will add eir and ebs color blind, couple flag etc when + * policing action add more offloading parameters + */ +struct enetc_psfp_meter { + u32 index; + u32 cir; + u32 cbs; + refcount_t refcount; + struct hlist_node node; +}; + +#define ENETC_PSFP_FLAGS_FMI BIT(0) + +struct enetc_stream_filter { + struct enetc_streamid sid; + u32 sfi_index; + u32 sgi_index; + u32 flags; + u32 fmi_index; + struct flow_stats stats; + struct hlist_node node; +}; + +struct enetc_psfp { + unsigned long dev_bitmap; + unsigned long *psfp_sfi_bitmap; + struct hlist_head stream_list; + struct hlist_head psfp_filter_list; + struct hlist_head psfp_gate_list; + struct hlist_head psfp_meter_list; + spinlock_t psfp_lock; /* spinlock for the struct enetc_psfp r/w */ +}; + +static struct actions_fwd enetc_act_fwd[] = { + { + BIT(FLOW_ACTION_GATE), + BIT_ULL(FLOW_DISSECTOR_KEY_ETH_ADDRS), + FILTER_ACTION_TYPE_PSFP + }, + { + BIT(FLOW_ACTION_POLICE) | + BIT(FLOW_ACTION_GATE), + BIT_ULL(FLOW_DISSECTOR_KEY_ETH_ADDRS), + FILTER_ACTION_TYPE_PSFP + }, + /* example for ACL actions */ + { + BIT(FLOW_ACTION_DROP), + 0, + FILTER_ACTION_TYPE_ACL + } +}; + +static struct enetc_psfp epsfp = { + .dev_bitmap = 0, + .psfp_sfi_bitmap = NULL, +}; + +static LIST_HEAD(enetc_block_cb_list); + +/* Stream Identity Entry Set Descriptor */ +static int enetc_streamid_hw_set(struct enetc_ndev_priv *priv, + struct enetc_streamid *sid, + u8 enable) +{ + struct enetc_cbd cbd = {.cmd = 0}; + struct streamid_data *si_data; + struct streamid_conf *si_conf; + dma_addr_t dma; + u16 data_size; + void *tmp; + int port; + int err; + + port = enetc_pf_to_port(priv->si->pdev); + if (port < 0) + return -EINVAL; + + if (sid->index >= priv->psfp_cap.max_streamid) + return -EINVAL; + + if (sid->filtertype != STREAMID_TYPE_NULL && + sid->filtertype != STREAMID_TYPE_SMAC) + return -EOPNOTSUPP; + + /* Disable operation before enable */ + cbd.index = cpu_to_le16((u16)sid->index); + cbd.cls = BDCR_CMD_STREAM_IDENTIFY; + cbd.status_flags = 0; + + data_size = sizeof(struct streamid_data); + tmp = enetc_cbd_alloc_data_mem(priv->si, &cbd, data_size, + &dma, (void *)&si_data); + if (!tmp) + return -ENOMEM; + + eth_broadcast_addr(si_data->dmac); + si_data->vid_vidm_tg = (ENETC_CBDR_SID_VID_MASK + + ((0x3 << 14) | ENETC_CBDR_SID_VIDM)); + + si_conf = &cbd.sid_set; + /* Only one port supported for one entry, set itself */ + si_conf->iports = cpu_to_le32(1 << port); + si_conf->id_type = 1; + si_conf->oui[2] = 0x0; + si_conf->oui[1] = 0x80; + si_conf->oui[0] = 0xC2; + + err = enetc_send_cmd(priv->si, &cbd); + if (err) + goto out; + + if (!enable) + goto out; + + /* Enable the entry overwrite again incase space flushed by hardware */ + cbd.status_flags = 0; + + si_conf->en = 0x80; + si_conf->stream_handle = cpu_to_le32(sid->handle); + si_conf->iports = cpu_to_le32(1 << port); + si_conf->id_type = sid->filtertype; + si_conf->oui[2] = 0x0; + si_conf->oui[1] = 0x80; + si_conf->oui[0] = 0xC2; + + memset(si_data, 0, data_size); + + /* VIDM default to be 1. + * VID Match. If set (b1) then the VID must match, otherwise + * any VID is considered a match. VIDM setting is only used + * when TG is set to b01. + */ + if (si_conf->id_type == STREAMID_TYPE_NULL) { + ether_addr_copy(si_data->dmac, sid->dst_mac); + si_data->vid_vidm_tg = (sid->vid & ENETC_CBDR_SID_VID_MASK) + + ((((u16)(sid->tagged) & 0x3) << 14) + | ENETC_CBDR_SID_VIDM); + } else if (si_conf->id_type == STREAMID_TYPE_SMAC) { + ether_addr_copy(si_data->smac, sid->src_mac); + si_data->vid_vidm_tg = (sid->vid & ENETC_CBDR_SID_VID_MASK) + + ((((u16)(sid->tagged) & 0x3) << 14) + | ENETC_CBDR_SID_VIDM); + } + + err = enetc_send_cmd(priv->si, &cbd); +out: + enetc_cbd_free_data_mem(priv->si, data_size, tmp, &dma); + + return err; +} + +/* Stream Filter Instance Set Descriptor */ +static int enetc_streamfilter_hw_set(struct enetc_ndev_priv *priv, + struct enetc_psfp_filter *sfi, + u8 enable) +{ + struct enetc_cbd cbd = {.cmd = 0}; + struct sfi_conf *sfi_config; + int port; + + port = enetc_pf_to_port(priv->si->pdev); + if (port < 0) + return -EINVAL; + + cbd.index = cpu_to_le16(sfi->index); + cbd.cls = BDCR_CMD_STREAM_FILTER; + cbd.status_flags = 0x80; + cbd.length = cpu_to_le16(1); + + sfi_config = &cbd.sfi_conf; + if (!enable) + goto exit; + + sfi_config->en = 0x80; + + if (sfi->handle >= 0) { + sfi_config->stream_handle = + cpu_to_le32(sfi->handle); + sfi_config->sthm |= 0x80; + } + + sfi_config->sg_inst_table_index = cpu_to_le16(sfi->gate_id); + sfi_config->input_ports = cpu_to_le32(1 << port); + + /* The priority value which may be matched against the + * frame’s priority value to determine a match for this entry. + */ + if (sfi->prio >= 0) + sfi_config->multi |= (sfi->prio & 0x7) | 0x8; + + /* Filter Type. Identifies the contents of the MSDU/FM_INST_INDEX + * field as being either an MSDU value or an index into the Flow + * Meter Instance table. + */ + if (sfi->maxsdu) { + sfi_config->msdu = + cpu_to_le16(sfi->maxsdu); + sfi_config->multi |= 0x40; + } + + if (sfi->meter_id >= 0) { + sfi_config->fm_inst_table_index = cpu_to_le16(sfi->meter_id); + sfi_config->multi |= 0x80; + } + +exit: + return enetc_send_cmd(priv->si, &cbd); +} + +static int enetc_streamcounter_hw_get(struct enetc_ndev_priv *priv, + u32 index, + struct psfp_streamfilter_counters *cnt) +{ + struct enetc_cbd cbd = { .cmd = 2 }; + struct sfi_counter_data *data_buf; + dma_addr_t dma; + u16 data_size; + void *tmp; + int err; + + cbd.index = cpu_to_le16((u16)index); + cbd.cmd = 2; + cbd.cls = BDCR_CMD_STREAM_FILTER; + cbd.status_flags = 0; + + data_size = sizeof(struct sfi_counter_data); + + tmp = enetc_cbd_alloc_data_mem(priv->si, &cbd, data_size, + &dma, (void *)&data_buf); + if (!tmp) + return -ENOMEM; + + err = enetc_send_cmd(priv->si, &cbd); + if (err) + goto exit; + + cnt->matching_frames_count = ((u64)data_buf->matchh << 32) + + data_buf->matchl; + + cnt->not_passing_sdu_count = ((u64)data_buf->msdu_droph << 32) + + data_buf->msdu_dropl; + + cnt->passing_sdu_count = cnt->matching_frames_count + - cnt->not_passing_sdu_count; + + cnt->not_passing_frames_count = + ((u64)data_buf->stream_gate_droph << 32) + + data_buf->stream_gate_dropl; + + cnt->passing_frames_count = cnt->matching_frames_count - + cnt->not_passing_sdu_count - + cnt->not_passing_frames_count; + + cnt->red_frames_count = ((u64)data_buf->flow_meter_droph << 32) + + data_buf->flow_meter_dropl; + +exit: + enetc_cbd_free_data_mem(priv->si, data_size, tmp, &dma); + + return err; +} + +static u64 get_ptp_now(struct enetc_hw *hw) +{ + u64 now_lo, now_hi, now; + + now_lo = enetc_rd(hw, ENETC_SICTR0); + now_hi = enetc_rd(hw, ENETC_SICTR1); + now = now_lo | now_hi << 32; + + return now; +} + +static int get_start_ns(u64 now, u64 cycle, u64 *start) +{ + u64 n; + + if (!cycle) + return -EFAULT; + + n = div64_u64(now, cycle); + + *start = (n + 1) * cycle; + + return 0; +} + +/* Stream Gate Instance Set Descriptor */ +static int enetc_streamgate_hw_set(struct enetc_ndev_priv *priv, + struct enetc_psfp_gate *sgi, + u8 enable) +{ + struct enetc_cbd cbd = { .cmd = 0 }; + struct sgi_table *sgi_config; + struct sgcl_conf *sgcl_config; + struct sgcl_data *sgcl_data; + struct sgce *sgce; + dma_addr_t dma; + u16 data_size; + int err, i; + void *tmp; + u64 now; + + cbd.index = cpu_to_le16(sgi->index); + cbd.cmd = 0; + cbd.cls = BDCR_CMD_STREAM_GCL; + cbd.status_flags = 0x80; + + /* disable */ + if (!enable) + return enetc_send_cmd(priv->si, &cbd); + + if (!sgi->num_entries) + return 0; + + if (sgi->num_entries > priv->psfp_cap.max_psfp_gatelist || + !sgi->cycletime) + return -EINVAL; + + /* enable */ + sgi_config = &cbd.sgi_table; + + /* Keep open before gate list start */ + sgi_config->ocgtst = 0x80; + + sgi_config->oipv = (sgi->init_ipv < 0) ? + 0x0 : ((sgi->init_ipv & 0x7) | 0x8); + + sgi_config->en = 0x80; + + /* Basic config */ + err = enetc_send_cmd(priv->si, &cbd); + if (err) + return -EINVAL; + + memset(&cbd, 0, sizeof(cbd)); + + cbd.index = cpu_to_le16(sgi->index); + cbd.cmd = 1; + cbd.cls = BDCR_CMD_STREAM_GCL; + cbd.status_flags = 0; + + sgcl_config = &cbd.sgcl_conf; + + sgcl_config->acl_len = (sgi->num_entries - 1) & 0x3; + + data_size = struct_size(sgcl_data, sgcl, sgi->num_entries); + tmp = enetc_cbd_alloc_data_mem(priv->si, &cbd, data_size, + &dma, (void *)&sgcl_data); + if (!tmp) + return -ENOMEM; + + sgce = &sgcl_data->sgcl[0]; + + sgcl_config->agtst = 0x80; + + sgcl_data->ct = sgi->cycletime; + sgcl_data->cte = sgi->cycletimext; + + if (sgi->init_ipv >= 0) + sgcl_config->aipv = (sgi->init_ipv & 0x7) | 0x8; + + for (i = 0; i < sgi->num_entries; i++) { + struct action_gate_entry *from = &sgi->entries[i]; + struct sgce *to = &sgce[i]; + + if (from->gate_state) + to->multi |= 0x10; + + if (from->ipv >= 0) + to->multi |= ((from->ipv & 0x7) << 5) | 0x08; + + if (from->maxoctets >= 0) { + to->multi |= 0x01; + to->msdu[0] = from->maxoctets & 0xFF; + to->msdu[1] = (from->maxoctets >> 8) & 0xFF; + to->msdu[2] = (from->maxoctets >> 16) & 0xFF; + } + + to->interval = from->interval; + } + + /* If basetime is less than now, calculate start time */ + now = get_ptp_now(&priv->si->hw); + + if (sgi->basetime < now) { + u64 start; + + err = get_start_ns(now, sgi->cycletime, &start); + if (err) + goto exit; + sgcl_data->btl = lower_32_bits(start); + sgcl_data->bth = upper_32_bits(start); + } else { + u32 hi, lo; + + hi = upper_32_bits(sgi->basetime); + lo = lower_32_bits(sgi->basetime); + sgcl_data->bth = hi; + sgcl_data->btl = lo; + } + + err = enetc_send_cmd(priv->si, &cbd); + +exit: + enetc_cbd_free_data_mem(priv->si, data_size, tmp, &dma); + return err; +} + +static int enetc_flowmeter_hw_set(struct enetc_ndev_priv *priv, + struct enetc_psfp_meter *fmi, + u8 enable) +{ + struct enetc_cbd cbd = { .cmd = 0 }; + struct fmi_conf *fmi_config; + u64 temp = 0; + + cbd.index = cpu_to_le16((u16)fmi->index); + cbd.cls = BDCR_CMD_FLOW_METER; + cbd.status_flags = 0x80; + + if (!enable) + return enetc_send_cmd(priv->si, &cbd); + + fmi_config = &cbd.fmi_conf; + fmi_config->en = 0x80; + + if (fmi->cir) { + temp = (u64)8000 * fmi->cir; + temp = div_u64(temp, 3725); + } + + fmi_config->cir = cpu_to_le32((u32)temp); + fmi_config->cbs = cpu_to_le32(fmi->cbs); + + /* Default for eir ebs disable */ + fmi_config->eir = 0; + fmi_config->ebs = 0; + + /* Default: + * mark red disable + * drop on yellow disable + * color mode disable + * couple flag disable + */ + fmi_config->conf = 0; + + return enetc_send_cmd(priv->si, &cbd); +} + +static struct enetc_stream_filter *enetc_get_stream_by_index(u32 index) +{ + struct enetc_stream_filter *f; + + hlist_for_each_entry(f, &epsfp.stream_list, node) + if (f->sid.index == index) + return f; + + return NULL; +} + +static struct enetc_psfp_gate *enetc_get_gate_by_index(u32 index) +{ + struct enetc_psfp_gate *g; + + hlist_for_each_entry(g, &epsfp.psfp_gate_list, node) + if (g->index == index) + return g; + + return NULL; +} + +static struct enetc_psfp_filter *enetc_get_filter_by_index(u32 index) +{ + struct enetc_psfp_filter *s; + + hlist_for_each_entry(s, &epsfp.psfp_filter_list, node) + if (s->index == index) + return s; + + return NULL; +} + +static struct enetc_psfp_meter *enetc_get_meter_by_index(u32 index) +{ + struct enetc_psfp_meter *m; + + hlist_for_each_entry(m, &epsfp.psfp_meter_list, node) + if (m->index == index) + return m; + + return NULL; +} + +static struct enetc_psfp_filter + *enetc_psfp_check_sfi(struct enetc_psfp_filter *sfi) +{ + struct enetc_psfp_filter *s; + + hlist_for_each_entry(s, &epsfp.psfp_filter_list, node) + if (s->gate_id == sfi->gate_id && + s->prio == sfi->prio && + s->maxsdu == sfi->maxsdu && + s->meter_id == sfi->meter_id) + return s; + + return NULL; +} + +static int enetc_get_free_index(struct enetc_ndev_priv *priv) +{ + u32 max_size = priv->psfp_cap.max_psfp_filter; + unsigned long index; + + index = find_first_zero_bit(epsfp.psfp_sfi_bitmap, max_size); + if (index == max_size) + return -1; + + return index; +} + +static void stream_filter_unref(struct enetc_ndev_priv *priv, u32 index) +{ + struct enetc_psfp_filter *sfi; + u8 z; + + sfi = enetc_get_filter_by_index(index); + WARN_ON(!sfi); + z = refcount_dec_and_test(&sfi->refcount); + + if (z) { + enetc_streamfilter_hw_set(priv, sfi, false); + hlist_del(&sfi->node); + kfree(sfi); + clear_bit(index, epsfp.psfp_sfi_bitmap); + } +} + +static void stream_gate_unref(struct enetc_ndev_priv *priv, u32 index) +{ + struct enetc_psfp_gate *sgi; + u8 z; + + sgi = enetc_get_gate_by_index(index); + WARN_ON(!sgi); + z = refcount_dec_and_test(&sgi->refcount); + if (z) { + enetc_streamgate_hw_set(priv, sgi, false); + hlist_del(&sgi->node); + kfree(sgi); + } +} + +static void flow_meter_unref(struct enetc_ndev_priv *priv, u32 index) +{ + struct enetc_psfp_meter *fmi; + u8 z; + + fmi = enetc_get_meter_by_index(index); + WARN_ON(!fmi); + z = refcount_dec_and_test(&fmi->refcount); + if (z) { + enetc_flowmeter_hw_set(priv, fmi, false); + hlist_del(&fmi->node); + kfree(fmi); + } +} + +static void remove_one_chain(struct enetc_ndev_priv *priv, + struct enetc_stream_filter *filter) +{ + if (filter->flags & ENETC_PSFP_FLAGS_FMI) + flow_meter_unref(priv, filter->fmi_index); + + stream_gate_unref(priv, filter->sgi_index); + stream_filter_unref(priv, filter->sfi_index); + + hlist_del(&filter->node); + kfree(filter); +} + +static int enetc_psfp_hw_set(struct enetc_ndev_priv *priv, + struct enetc_streamid *sid, + struct enetc_psfp_filter *sfi, + struct enetc_psfp_gate *sgi, + struct enetc_psfp_meter *fmi) +{ + int err; + + err = enetc_streamid_hw_set(priv, sid, true); + if (err) + return err; + + if (sfi) { + err = enetc_streamfilter_hw_set(priv, sfi, true); + if (err) + goto revert_sid; + } + + err = enetc_streamgate_hw_set(priv, sgi, true); + if (err) + goto revert_sfi; + + if (fmi) { + err = enetc_flowmeter_hw_set(priv, fmi, true); + if (err) + goto revert_sgi; + } + + return 0; + +revert_sgi: + enetc_streamgate_hw_set(priv, sgi, false); +revert_sfi: + if (sfi) + enetc_streamfilter_hw_set(priv, sfi, false); +revert_sid: + enetc_streamid_hw_set(priv, sid, false); + return err; +} + +static struct actions_fwd * +enetc_check_flow_actions(u64 acts, unsigned long long inputkeys) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(enetc_act_fwd); i++) + if (acts == enetc_act_fwd[i].actions && + inputkeys & enetc_act_fwd[i].keys) + return &enetc_act_fwd[i]; + + return NULL; +} + +static int enetc_psfp_policer_validate(const struct flow_action *action, + const struct flow_action_entry *act, + struct netlink_ext_ack *extack) +{ + if (act->police.exceed.act_id != FLOW_ACTION_DROP) { + NL_SET_ERR_MSG_MOD(extack, + "Offload not supported when exceed action is not drop"); + return -EOPNOTSUPP; + } + + if (act->police.notexceed.act_id != FLOW_ACTION_PIPE && + act->police.notexceed.act_id != FLOW_ACTION_ACCEPT) { + NL_SET_ERR_MSG_MOD(extack, + "Offload not supported when conform action is not pipe or ok"); + return -EOPNOTSUPP; + } + + if (act->police.notexceed.act_id == FLOW_ACTION_ACCEPT && + !flow_action_is_last_entry(action, act)) { + NL_SET_ERR_MSG_MOD(extack, + "Offload not supported when conform action is ok, but action is not last"); + return -EOPNOTSUPP; + } + + if (act->police.peakrate_bytes_ps || + act->police.avrate || act->police.overhead) { + NL_SET_ERR_MSG_MOD(extack, + "Offload not supported when peakrate/avrate/overhead is configured"); + return -EOPNOTSUPP; + } + + if (act->police.rate_pkt_ps) { + NL_SET_ERR_MSG_MOD(extack, + "QoS offload not support packets per second"); + return -EOPNOTSUPP; + } + + return 0; +} + +static int enetc_psfp_parse_clsflower(struct enetc_ndev_priv *priv, + struct flow_cls_offload *f) +{ + struct flow_action_entry *entryg = NULL, *entryp = NULL; + struct flow_rule *rule = flow_cls_offload_flow_rule(f); + struct netlink_ext_ack *extack = f->common.extack; + struct enetc_stream_filter *filter, *old_filter; + struct enetc_psfp_meter *fmi = NULL, *old_fmi; + struct enetc_psfp_filter *sfi, *old_sfi; + struct enetc_psfp_gate *sgi, *old_sgi; + struct flow_action_entry *entry; + struct action_gate_entry *e; + u8 sfi_overwrite = 0; + int entries_size; + int i, err; + + if (f->common.chain_index >= priv->psfp_cap.max_streamid) { + NL_SET_ERR_MSG_MOD(extack, "No Stream identify resource!"); + return -ENOSPC; + } + + flow_action_for_each(i, entry, &rule->action) + if (entry->id == FLOW_ACTION_GATE) + entryg = entry; + else if (entry->id == FLOW_ACTION_POLICE) + entryp = entry; + + /* Not support without gate action */ + if (!entryg) + return -EINVAL; + + filter = kzalloc(sizeof(*filter), GFP_KERNEL); + if (!filter) + return -ENOMEM; + + filter->sid.index = f->common.chain_index; + + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { + struct flow_match_eth_addrs match; + + flow_rule_match_eth_addrs(rule, &match); + + if (!is_zero_ether_addr(match.mask->dst) && + !is_zero_ether_addr(match.mask->src)) { + NL_SET_ERR_MSG_MOD(extack, + "Cannot match on both source and destination MAC"); + err = -EINVAL; + goto free_filter; + } + + if (!is_zero_ether_addr(match.mask->dst)) { + if (!is_broadcast_ether_addr(match.mask->dst)) { + NL_SET_ERR_MSG_MOD(extack, + "Masked matching on destination MAC not supported"); + err = -EINVAL; + goto free_filter; + } + ether_addr_copy(filter->sid.dst_mac, match.key->dst); + filter->sid.filtertype = STREAMID_TYPE_NULL; + } + + if (!is_zero_ether_addr(match.mask->src)) { + if (!is_broadcast_ether_addr(match.mask->src)) { + NL_SET_ERR_MSG_MOD(extack, + "Masked matching on source MAC not supported"); + err = -EINVAL; + goto free_filter; + } + ether_addr_copy(filter->sid.src_mac, match.key->src); + filter->sid.filtertype = STREAMID_TYPE_SMAC; + } + } else { + NL_SET_ERR_MSG_MOD(extack, "Unsupported, must include ETH_ADDRS"); + err = -EINVAL; + goto free_filter; + } + + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) { + struct flow_match_vlan match; + + flow_rule_match_vlan(rule, &match); + if (match.mask->vlan_priority) { + if (match.mask->vlan_priority != + (VLAN_PRIO_MASK >> VLAN_PRIO_SHIFT)) { + NL_SET_ERR_MSG_MOD(extack, "Only full mask is supported for VLAN priority"); + err = -EINVAL; + goto free_filter; + } + } + + if (match.mask->vlan_id) { + if (match.mask->vlan_id != VLAN_VID_MASK) { + NL_SET_ERR_MSG_MOD(extack, "Only full mask is supported for VLAN id"); + err = -EINVAL; + goto free_filter; + } + + filter->sid.vid = match.key->vlan_id; + if (!filter->sid.vid) + filter->sid.tagged = STREAMID_VLAN_UNTAGGED; + else + filter->sid.tagged = STREAMID_VLAN_TAGGED; + } + } else { + filter->sid.tagged = STREAMID_VLAN_ALL; + } + + /* parsing gate action */ + if (entryg->hw_index >= priv->psfp_cap.max_psfp_gate) { + NL_SET_ERR_MSG_MOD(extack, "No Stream Gate resource!"); + err = -ENOSPC; + goto free_filter; + } + + if (entryg->gate.num_entries >= priv->psfp_cap.max_psfp_gatelist) { + NL_SET_ERR_MSG_MOD(extack, "No Stream Gate resource!"); + err = -ENOSPC; + goto free_filter; + } + + entries_size = struct_size(sgi, entries, entryg->gate.num_entries); + sgi = kzalloc(entries_size, GFP_KERNEL); + if (!sgi) { + err = -ENOMEM; + goto free_filter; + } + + refcount_set(&sgi->refcount, 1); + sgi->index = entryg->hw_index; + sgi->init_ipv = entryg->gate.prio; + sgi->basetime = entryg->gate.basetime; + sgi->cycletime = entryg->gate.cycletime; + sgi->num_entries = entryg->gate.num_entries; + + e = sgi->entries; + for (i = 0; i < entryg->gate.num_entries; i++) { + e[i].gate_state = entryg->gate.entries[i].gate_state; + e[i].interval = entryg->gate.entries[i].interval; + e[i].ipv = entryg->gate.entries[i].ipv; + e[i].maxoctets = entryg->gate.entries[i].maxoctets; + } + + filter->sgi_index = sgi->index; + + sfi = kzalloc(sizeof(*sfi), GFP_KERNEL); + if (!sfi) { + err = -ENOMEM; + goto free_gate; + } + + refcount_set(&sfi->refcount, 1); + sfi->gate_id = sgi->index; + sfi->meter_id = ENETC_PSFP_WILDCARD; + + /* Flow meter and max frame size */ + if (entryp) { + err = enetc_psfp_policer_validate(&rule->action, entryp, extack); + if (err) + goto free_sfi; + + if (entryp->police.burst) { + fmi = kzalloc(sizeof(*fmi), GFP_KERNEL); + if (!fmi) { + err = -ENOMEM; + goto free_sfi; + } + refcount_set(&fmi->refcount, 1); + fmi->cir = entryp->police.rate_bytes_ps; + fmi->cbs = entryp->police.burst; + fmi->index = entryp->hw_index; + filter->flags |= ENETC_PSFP_FLAGS_FMI; + filter->fmi_index = fmi->index; + sfi->meter_id = fmi->index; + } + + if (entryp->police.mtu) + sfi->maxsdu = entryp->police.mtu; + } + + /* prio ref the filter prio */ + if (f->common.prio && f->common.prio <= BIT(3)) + sfi->prio = f->common.prio - 1; + else + sfi->prio = ENETC_PSFP_WILDCARD; + + old_sfi = enetc_psfp_check_sfi(sfi); + if (!old_sfi) { + int index; + + index = enetc_get_free_index(priv); + if (index < 0) { + NL_SET_ERR_MSG_MOD(extack, "No Stream Filter resource!"); + err = -ENOSPC; + goto free_fmi; + } + + sfi->index = index; + sfi->handle = index + HANDLE_OFFSET; + /* Update the stream filter handle also */ + filter->sid.handle = sfi->handle; + filter->sfi_index = sfi->index; + sfi_overwrite = 0; + } else { + filter->sfi_index = old_sfi->index; + filter->sid.handle = old_sfi->handle; + sfi_overwrite = 1; + } + + err = enetc_psfp_hw_set(priv, &filter->sid, + sfi_overwrite ? NULL : sfi, sgi, fmi); + if (err) + goto free_fmi; + + spin_lock(&epsfp.psfp_lock); + if (filter->flags & ENETC_PSFP_FLAGS_FMI) { + old_fmi = enetc_get_meter_by_index(filter->fmi_index); + if (old_fmi) { + fmi->refcount = old_fmi->refcount; + refcount_set(&fmi->refcount, + refcount_read(&old_fmi->refcount) + 1); + hlist_del(&old_fmi->node); + kfree(old_fmi); + } + hlist_add_head(&fmi->node, &epsfp.psfp_meter_list); + } + + /* Remove the old node if exist and update with a new node */ + old_sgi = enetc_get_gate_by_index(filter->sgi_index); + if (old_sgi) { + refcount_set(&sgi->refcount, + refcount_read(&old_sgi->refcount) + 1); + hlist_del(&old_sgi->node); + kfree(old_sgi); + } + + hlist_add_head(&sgi->node, &epsfp.psfp_gate_list); + + if (!old_sfi) { + hlist_add_head(&sfi->node, &epsfp.psfp_filter_list); + set_bit(sfi->index, epsfp.psfp_sfi_bitmap); + } else { + kfree(sfi); + refcount_inc(&old_sfi->refcount); + } + + old_filter = enetc_get_stream_by_index(filter->sid.index); + if (old_filter) + remove_one_chain(priv, old_filter); + + filter->stats.lastused = jiffies; + hlist_add_head(&filter->node, &epsfp.stream_list); + + spin_unlock(&epsfp.psfp_lock); + + return 0; + +free_fmi: + kfree(fmi); +free_sfi: + kfree(sfi); +free_gate: + kfree(sgi); +free_filter: + kfree(filter); + + return err; +} + +static int enetc_config_clsflower(struct enetc_ndev_priv *priv, + struct flow_cls_offload *cls_flower) +{ + struct flow_rule *rule = flow_cls_offload_flow_rule(cls_flower); + struct netlink_ext_ack *extack = cls_flower->common.extack; + struct flow_dissector *dissector = rule->match.dissector; + struct flow_action *action = &rule->action; + struct flow_action_entry *entry; + struct actions_fwd *fwd; + u64 actions = 0; + int i, err; + + if (!flow_action_has_entries(action)) { + NL_SET_ERR_MSG_MOD(extack, "At least one action is needed"); + return -EINVAL; + } + + flow_action_for_each(i, entry, action) + actions |= BIT(entry->id); + + fwd = enetc_check_flow_actions(actions, dissector->used_keys); + if (!fwd) { + NL_SET_ERR_MSG_MOD(extack, "Unsupported filter type!"); + return -EOPNOTSUPP; + } + + if (fwd->output & FILTER_ACTION_TYPE_PSFP) { + err = enetc_psfp_parse_clsflower(priv, cls_flower); + if (err) { + NL_SET_ERR_MSG_MOD(extack, "Invalid PSFP inputs"); + return err; + } + } else { + NL_SET_ERR_MSG_MOD(extack, "Unsupported actions"); + return -EOPNOTSUPP; + } + + return 0; +} + +static int enetc_psfp_destroy_clsflower(struct enetc_ndev_priv *priv, + struct flow_cls_offload *f) +{ + struct enetc_stream_filter *filter; + struct netlink_ext_ack *extack = f->common.extack; + int err; + + if (f->common.chain_index >= priv->psfp_cap.max_streamid) { + NL_SET_ERR_MSG_MOD(extack, "No Stream identify resource!"); + return -ENOSPC; + } + + filter = enetc_get_stream_by_index(f->common.chain_index); + if (!filter) + return -EINVAL; + + err = enetc_streamid_hw_set(priv, &filter->sid, false); + if (err) + return err; + + remove_one_chain(priv, filter); + + return 0; +} + +static int enetc_destroy_clsflower(struct enetc_ndev_priv *priv, + struct flow_cls_offload *f) +{ + return enetc_psfp_destroy_clsflower(priv, f); +} + +static int enetc_psfp_get_stats(struct enetc_ndev_priv *priv, + struct flow_cls_offload *f) +{ + struct psfp_streamfilter_counters counters = {}; + struct enetc_stream_filter *filter; + struct flow_stats stats = {}; + int err; + + filter = enetc_get_stream_by_index(f->common.chain_index); + if (!filter) + return -EINVAL; + + err = enetc_streamcounter_hw_get(priv, filter->sfi_index, &counters); + if (err) + return -EINVAL; + + spin_lock(&epsfp.psfp_lock); + stats.pkts = counters.matching_frames_count + + counters.not_passing_sdu_count - + filter->stats.pkts; + stats.drops = counters.not_passing_frames_count + + counters.not_passing_sdu_count + + counters.red_frames_count - + filter->stats.drops; + stats.lastused = filter->stats.lastused; + filter->stats.pkts += stats.pkts; + filter->stats.drops += stats.drops; + spin_unlock(&epsfp.psfp_lock); + + flow_stats_update(&f->stats, 0x0, stats.pkts, stats.drops, + stats.lastused, FLOW_ACTION_HW_STATS_DELAYED); + + return 0; +} + +static int enetc_setup_tc_cls_flower(struct enetc_ndev_priv *priv, + struct flow_cls_offload *cls_flower) +{ + switch (cls_flower->command) { + case FLOW_CLS_REPLACE: + return enetc_config_clsflower(priv, cls_flower); + case FLOW_CLS_DESTROY: + return enetc_destroy_clsflower(priv, cls_flower); + case FLOW_CLS_STATS: + return enetc_psfp_get_stats(priv, cls_flower); + default: + return -EOPNOTSUPP; + } +} + +static inline void clean_psfp_sfi_bitmap(void) +{ + bitmap_free(epsfp.psfp_sfi_bitmap); + epsfp.psfp_sfi_bitmap = NULL; +} + +static void clean_stream_list(void) +{ + struct enetc_stream_filter *s; + struct hlist_node *tmp; + + hlist_for_each_entry_safe(s, tmp, &epsfp.stream_list, node) { + hlist_del(&s->node); + kfree(s); + } +} + +static void clean_sfi_list(void) +{ + struct enetc_psfp_filter *sfi; + struct hlist_node *tmp; + + hlist_for_each_entry_safe(sfi, tmp, &epsfp.psfp_filter_list, node) { + hlist_del(&sfi->node); + kfree(sfi); + } +} + +static void clean_sgi_list(void) +{ + struct enetc_psfp_gate *sgi; + struct hlist_node *tmp; + + hlist_for_each_entry_safe(sgi, tmp, &epsfp.psfp_gate_list, node) { + hlist_del(&sgi->node); + kfree(sgi); + } +} + +static void clean_psfp_all(void) +{ + /* Disable all list nodes and free all memory */ + clean_sfi_list(); + clean_sgi_list(); + clean_stream_list(); + epsfp.dev_bitmap = 0; + clean_psfp_sfi_bitmap(); +} + +int enetc_setup_tc_block_cb(enum tc_setup_type type, void *type_data, + void *cb_priv) +{ + struct net_device *ndev = cb_priv; + + if (!tc_can_offload(ndev)) + return -EOPNOTSUPP; + + switch (type) { + case TC_SETUP_CLSFLOWER: + return enetc_setup_tc_cls_flower(netdev_priv(ndev), type_data); + default: + return -EOPNOTSUPP; + } +} + +int enetc_set_psfp(struct net_device *ndev, bool en) +{ + struct enetc_ndev_priv *priv = netdev_priv(ndev); + int err; + + if (en) { + err = enetc_psfp_enable(priv); + if (err) + return err; + + priv->active_offloads |= ENETC_F_QCI; + return 0; + } + + err = enetc_psfp_disable(priv); + if (err) + return err; + + priv->active_offloads &= ~ENETC_F_QCI; + + return 0; +} + +int enetc_psfp_init(struct enetc_ndev_priv *priv) +{ + if (epsfp.psfp_sfi_bitmap) + return 0; + + epsfp.psfp_sfi_bitmap = bitmap_zalloc(priv->psfp_cap.max_psfp_filter, + GFP_KERNEL); + if (!epsfp.psfp_sfi_bitmap) + return -ENOMEM; + + spin_lock_init(&epsfp.psfp_lock); + + if (list_empty(&enetc_block_cb_list)) + epsfp.dev_bitmap = 0; + + return 0; +} + +int enetc_psfp_clean(struct enetc_ndev_priv *priv) +{ + if (!list_empty(&enetc_block_cb_list)) + return -EBUSY; + + clean_psfp_all(); + + return 0; +} + +int enetc_setup_tc_psfp(struct net_device *ndev, void *type_data) +{ + struct enetc_ndev_priv *priv = netdev_priv(ndev); + struct flow_block_offload *f = type_data; + int port, err; + + err = flow_block_cb_setup_simple(f, &enetc_block_cb_list, + enetc_setup_tc_block_cb, + ndev, ndev, true); + if (err) + return err; + + switch (f->command) { + case FLOW_BLOCK_BIND: + port = enetc_pf_to_port(priv->si->pdev); + if (port < 0) + return -EINVAL; + + set_bit(port, &epsfp.dev_bitmap); + break; + case FLOW_BLOCK_UNBIND: + port = enetc_pf_to_port(priv->si->pdev); + if (port < 0) + return -EINVAL; + + clear_bit(port, &epsfp.dev_bitmap); + if (!epsfp.dev_bitmap) + clean_psfp_all(); + break; + } + + return 0; +} + +int enetc_qos_query_caps(struct net_device *ndev, void *type_data) +{ + struct enetc_ndev_priv *priv = netdev_priv(ndev); + struct tc_query_caps_base *base = type_data; + struct enetc_si *si = priv->si; + + switch (base->type) { + case TC_SETUP_QDISC_MQPRIO: { + struct tc_mqprio_caps *caps = base->caps; + + caps->validate_queue_counts = true; + + return 0; + } + case TC_SETUP_QDISC_TAPRIO: { + struct tc_taprio_caps *caps = base->caps; + + if (si->hw_features & ENETC_SI_F_QBV) + caps->supports_queue_max_sdu = true; + + return 0; + } + default: + return -EOPNOTSUPP; + } +} diff --git a/drivers/net/ethernet/freescale/enetc/enetc_vf.c b/drivers/net/ethernet/freescale/enetc/enetc_vf.c new file mode 100644 index 0000000000..dfcaac302e --- /dev/null +++ b/drivers/net/ethernet/freescale/enetc/enetc_vf.c @@ -0,0 +1,263 @@ +// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) +/* Copyright 2017-2019 NXP */ + +#include <linux/module.h> +#include "enetc.h" + +#define ENETC_DRV_NAME_STR "ENETC VF driver" + +/* Messaging */ +static void enetc_msg_vsi_write_msg(struct enetc_hw *hw, + struct enetc_msg_swbd *msg) +{ + u32 val; + + val = enetc_vsi_set_msize(msg->size) | lower_32_bits(msg->dma); + enetc_wr(hw, ENETC_VSIMSGSNDAR1, upper_32_bits(msg->dma)); + enetc_wr(hw, ENETC_VSIMSGSNDAR0, val); +} + +static int enetc_msg_vsi_send(struct enetc_si *si, struct enetc_msg_swbd *msg) +{ + int timeout = 100; + u32 vsimsgsr; + + enetc_msg_vsi_write_msg(&si->hw, msg); + + do { + vsimsgsr = enetc_rd(&si->hw, ENETC_VSIMSGSR); + if (!(vsimsgsr & ENETC_VSIMSGSR_MB)) + break; + + usleep_range(1000, 2000); + } while (--timeout); + + if (!timeout) + return -ETIMEDOUT; + + /* check for message delivery error */ + if (vsimsgsr & ENETC_VSIMSGSR_MS) { + dev_err(&si->pdev->dev, "VSI command execute error: %d\n", + ENETC_SIMSGSR_GET_MC(vsimsgsr)); + return -EIO; + } + + return 0; +} + +static int enetc_msg_vsi_set_primary_mac_addr(struct enetc_ndev_priv *priv, + struct sockaddr *saddr) +{ + struct enetc_msg_cmd_set_primary_mac *cmd; + struct enetc_msg_swbd msg; + int err; + + msg.size = ALIGN(sizeof(struct enetc_msg_cmd_set_primary_mac), 64); + msg.vaddr = dma_alloc_coherent(priv->dev, msg.size, &msg.dma, + GFP_KERNEL); + if (!msg.vaddr) { + dev_err(priv->dev, "Failed to alloc Tx msg (size: %d)\n", + msg.size); + return -ENOMEM; + } + + cmd = (struct enetc_msg_cmd_set_primary_mac *)msg.vaddr; + cmd->header.type = ENETC_MSG_CMD_MNG_MAC; + cmd->header.id = ENETC_MSG_CMD_MNG_ADD; + memcpy(&cmd->mac, saddr, sizeof(struct sockaddr)); + + /* send the command and wait */ + err = enetc_msg_vsi_send(priv->si, &msg); + + dma_free_coherent(priv->dev, msg.size, msg.vaddr, msg.dma); + + return err; +} + +static int enetc_vf_set_mac_addr(struct net_device *ndev, void *addr) +{ + struct enetc_ndev_priv *priv = netdev_priv(ndev); + struct sockaddr *saddr = addr; + + if (!is_valid_ether_addr(saddr->sa_data)) + return -EADDRNOTAVAIL; + + return enetc_msg_vsi_set_primary_mac_addr(priv, saddr); +} + +static int enetc_vf_set_features(struct net_device *ndev, + netdev_features_t features) +{ + enetc_set_features(ndev, features); + + return 0; +} + +static int enetc_vf_setup_tc(struct net_device *ndev, enum tc_setup_type type, + void *type_data) +{ + switch (type) { + case TC_SETUP_QDISC_MQPRIO: + return enetc_setup_tc_mqprio(ndev, type_data); + default: + return -EOPNOTSUPP; + } +} + +/* Probing/ Init */ +static const struct net_device_ops enetc_ndev_ops = { + .ndo_open = enetc_open, + .ndo_stop = enetc_close, + .ndo_start_xmit = enetc_xmit, + .ndo_get_stats = enetc_get_stats, + .ndo_set_mac_address = enetc_vf_set_mac_addr, + .ndo_set_features = enetc_vf_set_features, + .ndo_eth_ioctl = enetc_ioctl, + .ndo_setup_tc = enetc_vf_setup_tc, +}; + +static void enetc_vf_netdev_setup(struct enetc_si *si, struct net_device *ndev, + const struct net_device_ops *ndev_ops) +{ + struct enetc_ndev_priv *priv = netdev_priv(ndev); + + SET_NETDEV_DEV(ndev, &si->pdev->dev); + priv->ndev = ndev; + priv->si = si; + priv->dev = &si->pdev->dev; + si->ndev = ndev; + + priv->msg_enable = (NETIF_MSG_IFUP << 1) - 1; + ndev->netdev_ops = ndev_ops; + enetc_set_ethtool_ops(ndev); + ndev->watchdog_timeo = 5 * HZ; + ndev->max_mtu = ENETC_MAX_MTU; + + ndev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM | + NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_CTAG_RX | + NETIF_F_HW_CSUM | NETIF_F_TSO | NETIF_F_TSO6; + ndev->features = NETIF_F_HIGHDMA | NETIF_F_SG | NETIF_F_RXCSUM | + NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_CTAG_RX | + NETIF_F_HW_CSUM | NETIF_F_TSO | NETIF_F_TSO6; + ndev->vlan_features = NETIF_F_SG | NETIF_F_HW_CSUM | + NETIF_F_TSO | NETIF_F_TSO6; + + if (si->num_rss) + ndev->hw_features |= NETIF_F_RXHASH; + + /* pick up primary MAC address from SI */ + enetc_load_primary_mac_addr(&si->hw, ndev); +} + +static int enetc_vf_probe(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + struct enetc_ndev_priv *priv; + struct net_device *ndev; + struct enetc_si *si; + int err; + + err = enetc_pci_probe(pdev, KBUILD_MODNAME, 0); + if (err) + return dev_err_probe(&pdev->dev, err, "PCI probing failed\n"); + + si = pci_get_drvdata(pdev); + + enetc_get_si_caps(si); + + ndev = alloc_etherdev_mq(sizeof(*priv), ENETC_MAX_NUM_TXQS); + if (!ndev) { + err = -ENOMEM; + dev_err(&pdev->dev, "netdev creation failed\n"); + goto err_alloc_netdev; + } + + enetc_vf_netdev_setup(si, ndev, &enetc_ndev_ops); + + priv = netdev_priv(ndev); + + enetc_init_si_rings_params(priv); + + err = enetc_setup_cbdr(priv->dev, &si->hw, ENETC_CBDR_DEFAULT_SIZE, + &si->cbd_ring); + if (err) + goto err_setup_cbdr; + + err = enetc_alloc_si_resources(priv); + if (err) { + dev_err(&pdev->dev, "SI resource alloc failed\n"); + goto err_alloc_si_res; + } + + err = enetc_configure_si(priv); + if (err) { + dev_err(&pdev->dev, "Failed to configure SI\n"); + goto err_config_si; + } + + err = enetc_alloc_msix(priv); + if (err) { + dev_err(&pdev->dev, "MSIX alloc failed\n"); + goto err_alloc_msix; + } + + err = register_netdev(ndev); + if (err) + goto err_reg_netdev; + + netif_carrier_off(ndev); + + return 0; + +err_reg_netdev: + enetc_free_msix(priv); +err_config_si: +err_alloc_msix: + enetc_free_si_resources(priv); +err_alloc_si_res: + enetc_teardown_cbdr(&si->cbd_ring); +err_setup_cbdr: + si->ndev = NULL; + free_netdev(ndev); +err_alloc_netdev: + enetc_pci_remove(pdev); + + return err; +} + +static void enetc_vf_remove(struct pci_dev *pdev) +{ + struct enetc_si *si = pci_get_drvdata(pdev); + struct enetc_ndev_priv *priv; + + priv = netdev_priv(si->ndev); + unregister_netdev(si->ndev); + + enetc_free_msix(priv); + + enetc_free_si_resources(priv); + enetc_teardown_cbdr(&si->cbd_ring); + + free_netdev(si->ndev); + + enetc_pci_remove(pdev); +} + +static const struct pci_device_id enetc_vf_id_table[] = { + { PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, ENETC_DEV_ID_VF) }, + { 0, } /* End of table. */ +}; +MODULE_DEVICE_TABLE(pci, enetc_vf_id_table); + +static struct pci_driver enetc_vf_driver = { + .name = KBUILD_MODNAME, + .id_table = enetc_vf_id_table, + .probe = enetc_vf_probe, + .remove = enetc_vf_remove, +}; +module_pci_driver(enetc_vf_driver); + +MODULE_DESCRIPTION(ENETC_DRV_NAME_STR); +MODULE_LICENSE("Dual BSD/GPL"); |