summaryrefslogtreecommitdiffstats
path: root/debian/patches/features/all/ethernet-microsoft/0015-net-mana-Add-new-MANA-VF-performance-counters-for-ea.patch
diff options
context:
space:
mode:
Diffstat (limited to 'debian/patches/features/all/ethernet-microsoft/0015-net-mana-Add-new-MANA-VF-performance-counters-for-ea.patch')
-rw-r--r--debian/patches/features/all/ethernet-microsoft/0015-net-mana-Add-new-MANA-VF-performance-counters-for-ea.patch334
1 files changed, 334 insertions, 0 deletions
diff --git a/debian/patches/features/all/ethernet-microsoft/0015-net-mana-Add-new-MANA-VF-performance-counters-for-ea.patch b/debian/patches/features/all/ethernet-microsoft/0015-net-mana-Add-new-MANA-VF-performance-counters-for-ea.patch
new file mode 100644
index 000000000..0ada4dca6
--- /dev/null
+++ b/debian/patches/features/all/ethernet-microsoft/0015-net-mana-Add-new-MANA-VF-performance-counters-for-ea.patch
@@ -0,0 +1,334 @@
+From fbefc731a4cc001fe7ce5786e08053d707dde85f Mon Sep 17 00:00:00 2001
+From: Shradha Gupta <shradhagupta@linux.microsoft.com>
+Date: Wed, 15 Mar 2023 04:55:13 -0700
+Subject: [PATCH 15/23] net: mana: Add new MANA VF performance counters for
+ easier troubleshooting
+
+Extended performance counter stats in 'ethtool -S <interface>' output
+for MANA VF to facilitate troubleshooting.
+
+Tested-on: Ubuntu22
+Signed-off-by: Shradha Gupta <shradhagupta@linux.microsoft.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+(cherry picked from commit bd7fc6e1957c2102866f9e464c1f2302e891b7e9)
+Signed-off-by: Bastian Blank <waldi@debian.org>
+---
+ drivers/net/ethernet/microsoft/mana/mana_en.c | 62 ++++++++++++++++++-
+ .../ethernet/microsoft/mana/mana_ethtool.c | 52 +++++++++++++++-
+ include/net/mana/mana.h | 18 ++++++
+ 3 files changed, 128 insertions(+), 4 deletions(-)
+
+--- a/drivers/net/ethernet/microsoft/mana/mana_en.c
++++ b/drivers/net/ethernet/microsoft/mana/mana_en.c
+@@ -157,6 +157,7 @@ netdev_tx_t mana_start_xmit(struct sk_bu
+ struct mana_txq *txq;
+ struct mana_cq *cq;
+ int err, len;
++ u16 ihs;
+
+ if (unlikely(!apc->port_is_up))
+ goto tx_drop;
+@@ -167,6 +168,7 @@ netdev_tx_t mana_start_xmit(struct sk_bu
+ txq = &apc->tx_qp[txq_idx].txq;
+ gdma_sq = txq->gdma_sq;
+ cq = &apc->tx_qp[txq_idx].tx_cq;
++ tx_stats = &txq->stats;
+
+ pkg.tx_oob.s_oob.vcq_num = cq->gdma_id;
+ pkg.tx_oob.s_oob.vsq_frame = txq->vsq_frame;
+@@ -180,10 +182,17 @@ netdev_tx_t mana_start_xmit(struct sk_bu
+
+ pkg.tx_oob.s_oob.pkt_fmt = pkt_fmt;
+
+- if (pkt_fmt == MANA_SHORT_PKT_FMT)
++ if (pkt_fmt == MANA_SHORT_PKT_FMT) {
+ pkg.wqe_req.inline_oob_size = sizeof(struct mana_tx_short_oob);
+- else
++ u64_stats_update_begin(&tx_stats->syncp);
++ tx_stats->short_pkt_fmt++;
++ u64_stats_update_end(&tx_stats->syncp);
++ } else {
+ pkg.wqe_req.inline_oob_size = sizeof(struct mana_tx_oob);
++ u64_stats_update_begin(&tx_stats->syncp);
++ tx_stats->long_pkt_fmt++;
++ u64_stats_update_end(&tx_stats->syncp);
++ }
+
+ pkg.wqe_req.inline_oob_data = &pkg.tx_oob;
+ pkg.wqe_req.flags = 0;
+@@ -233,9 +242,35 @@ netdev_tx_t mana_start_xmit(struct sk_bu
+ &ipv6_hdr(skb)->daddr, 0,
+ IPPROTO_TCP, 0);
+ }
++
++ if (skb->encapsulation) {
++ ihs = skb_inner_tcp_all_headers(skb);
++ u64_stats_update_begin(&tx_stats->syncp);
++ tx_stats->tso_inner_packets++;
++ tx_stats->tso_inner_bytes += skb->len - ihs;
++ u64_stats_update_end(&tx_stats->syncp);
++ } else {
++ if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
++ ihs = skb_transport_offset(skb) + sizeof(struct udphdr);
++ } else {
++ ihs = skb_tcp_all_headers(skb);
++ if (ipv6_has_hopopt_jumbo(skb))
++ ihs -= sizeof(struct hop_jumbo_hdr);
++ }
++
++ u64_stats_update_begin(&tx_stats->syncp);
++ tx_stats->tso_packets++;
++ tx_stats->tso_bytes += skb->len - ihs;
++ u64_stats_update_end(&tx_stats->syncp);
++ }
++
+ } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ csum_type = mana_checksum_info(skb);
+
++ u64_stats_update_begin(&tx_stats->syncp);
++ tx_stats->csum_partial++;
++ u64_stats_update_end(&tx_stats->syncp);
++
+ if (csum_type == IPPROTO_TCP) {
+ pkg.tx_oob.s_oob.is_outer_ipv4 = ipv4;
+ pkg.tx_oob.s_oob.is_outer_ipv6 = ipv6;
+@@ -255,8 +290,12 @@ netdev_tx_t mana_start_xmit(struct sk_bu
+ }
+ }
+
+- if (mana_map_skb(skb, apc, &pkg))
++ if (mana_map_skb(skb, apc, &pkg)) {
++ u64_stats_update_begin(&tx_stats->syncp);
++ tx_stats->mana_map_err++;
++ u64_stats_update_end(&tx_stats->syncp);
+ goto free_sgl_ptr;
++ }
+
+ skb_queue_tail(&txq->pending_skbs, skb);
+
+@@ -1039,6 +1078,8 @@ static void mana_poll_tx_cq(struct mana_
+ if (comp_read < 1)
+ return;
+
++ apc->eth_stats.tx_cqes = comp_read;
++
+ for (i = 0; i < comp_read; i++) {
+ struct mana_tx_comp_oob *cqe_oob;
+
+@@ -1067,6 +1108,7 @@ static void mana_poll_tx_cq(struct mana_
+ netdev_err(ndev, "TX: CQE error %d\n",
+ cqe_oob->cqe_hdr.cqe_type);
+
++ apc->eth_stats.tx_cqe_err++;
+ break;
+
+ default:
+@@ -1077,6 +1119,7 @@ static void mana_poll_tx_cq(struct mana_
+ netdev_err(ndev, "TX: unknown CQE type %d\n",
+ cqe_oob->cqe_hdr.cqe_type);
+
++ apc->eth_stats.tx_cqe_unknown_type++;
+ break;
+ }
+
+@@ -1123,6 +1166,8 @@ static void mana_poll_tx_cq(struct mana_
+ WARN_ON_ONCE(1);
+
+ cq->work_done = pkt_transmitted;
++
++ apc->eth_stats.tx_cqes -= pkt_transmitted;
+ }
+
+ static void mana_post_pkt_rxq(struct mana_rxq *rxq)
+@@ -1257,12 +1302,15 @@ static void mana_process_rx_cqe(struct m
+ struct gdma_context *gc = rxq->gdma_rq->gdma_dev->gdma_context;
+ struct net_device *ndev = rxq->ndev;
+ struct mana_recv_buf_oob *rxbuf_oob;
++ struct mana_port_context *apc;
+ struct device *dev = gc->dev;
+ void *new_buf, *old_buf;
+ struct page *new_page;
+ u32 curr, pktlen;
+ dma_addr_t da;
+
++ apc = netdev_priv(ndev);
++
+ switch (oob->cqe_hdr.cqe_type) {
+ case CQE_RX_OKAY:
+ break;
+@@ -1275,6 +1323,7 @@ static void mana_process_rx_cqe(struct m
+
+ case CQE_RX_COALESCED_4:
+ netdev_err(ndev, "RX coalescing is unsupported\n");
++ apc->eth_stats.rx_coalesced_err++;
+ return;
+
+ case CQE_RX_OBJECT_FENCE:
+@@ -1284,6 +1333,7 @@ static void mana_process_rx_cqe(struct m
+ default:
+ netdev_err(ndev, "Unknown RX CQE type = %d\n",
+ oob->cqe_hdr.cqe_type);
++ apc->eth_stats.rx_cqe_unknown_type++;
+ return;
+ }
+
+@@ -1346,11 +1396,15 @@ static void mana_poll_rx_cq(struct mana_
+ {
+ struct gdma_comp *comp = cq->gdma_comp_buf;
+ struct mana_rxq *rxq = cq->rxq;
++ struct mana_port_context *apc;
+ int comp_read, i;
+
++ apc = netdev_priv(rxq->ndev);
++
+ comp_read = mana_gd_poll_cq(cq->gdma_cq, comp, CQE_POLLING_BUFFER);
+ WARN_ON_ONCE(comp_read > CQE_POLLING_BUFFER);
+
++ apc->eth_stats.rx_cqes = comp_read;
+ rxq->xdp_flush = false;
+
+ for (i = 0; i < comp_read; i++) {
+@@ -1362,6 +1416,8 @@ static void mana_poll_rx_cq(struct mana_
+ return;
+
+ mana_process_rx_cqe(rxq, cq, &comp[i]);
++
++ apc->eth_stats.rx_cqes--;
+ }
+
+ if (rxq->xdp_flush)
+--- a/drivers/net/ethernet/microsoft/mana/mana_ethtool.c
++++ b/drivers/net/ethernet/microsoft/mana/mana_ethtool.c
+@@ -13,6 +13,15 @@ static const struct {
+ } mana_eth_stats[] = {
+ {"stop_queue", offsetof(struct mana_ethtool_stats, stop_queue)},
+ {"wake_queue", offsetof(struct mana_ethtool_stats, wake_queue)},
++ {"tx_cqes", offsetof(struct mana_ethtool_stats, tx_cqes)},
++ {"tx_cq_err", offsetof(struct mana_ethtool_stats, tx_cqe_err)},
++ {"tx_cqe_unknown_type", offsetof(struct mana_ethtool_stats,
++ tx_cqe_unknown_type)},
++ {"rx_cqes", offsetof(struct mana_ethtool_stats, rx_cqes)},
++ {"rx_coalesced_err", offsetof(struct mana_ethtool_stats,
++ rx_coalesced_err)},
++ {"rx_cqe_unknown_type", offsetof(struct mana_ethtool_stats,
++ rx_cqe_unknown_type)},
+ };
+
+ static int mana_get_sset_count(struct net_device *ndev, int stringset)
+@@ -23,7 +32,8 @@ static int mana_get_sset_count(struct ne
+ if (stringset != ETH_SS_STATS)
+ return -EINVAL;
+
+- return ARRAY_SIZE(mana_eth_stats) + num_queues * 8;
++ return ARRAY_SIZE(mana_eth_stats) + num_queues *
++ (MANA_STATS_RX_COUNT + MANA_STATS_TX_COUNT);
+ }
+
+ static void mana_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
+@@ -61,6 +71,22 @@ static void mana_get_strings(struct net_
+ p += ETH_GSTRING_LEN;
+ sprintf(p, "tx_%d_xdp_xmit", i);
+ p += ETH_GSTRING_LEN;
++ sprintf(p, "tx_%d_tso_packets", i);
++ p += ETH_GSTRING_LEN;
++ sprintf(p, "tx_%d_tso_bytes", i);
++ p += ETH_GSTRING_LEN;
++ sprintf(p, "tx_%d_tso_inner_packets", i);
++ p += ETH_GSTRING_LEN;
++ sprintf(p, "tx_%d_tso_inner_bytes", i);
++ p += ETH_GSTRING_LEN;
++ sprintf(p, "tx_%d_long_pkt_fmt", i);
++ p += ETH_GSTRING_LEN;
++ sprintf(p, "tx_%d_short_pkt_fmt", i);
++ p += ETH_GSTRING_LEN;
++ sprintf(p, "tx_%d_csum_partial", i);
++ p += ETH_GSTRING_LEN;
++ sprintf(p, "tx_%d_mana_map_err", i);
++ p += ETH_GSTRING_LEN;
+ }
+ }
+
+@@ -78,6 +104,14 @@ static void mana_get_ethtool_stats(struc
+ u64 xdp_xmit;
+ u64 xdp_drop;
+ u64 xdp_tx;
++ u64 tso_packets;
++ u64 tso_bytes;
++ u64 tso_inner_packets;
++ u64 tso_inner_bytes;
++ u64 long_pkt_fmt;
++ u64 short_pkt_fmt;
++ u64 csum_partial;
++ u64 mana_map_err;
+ int q, i = 0;
+
+ if (!apc->port_is_up)
+@@ -113,11 +147,27 @@ static void mana_get_ethtool_stats(struc
+ packets = tx_stats->packets;
+ bytes = tx_stats->bytes;
+ xdp_xmit = tx_stats->xdp_xmit;
++ tso_packets = tx_stats->tso_packets;
++ tso_bytes = tx_stats->tso_bytes;
++ tso_inner_packets = tx_stats->tso_inner_packets;
++ tso_inner_bytes = tx_stats->tso_inner_bytes;
++ long_pkt_fmt = tx_stats->long_pkt_fmt;
++ short_pkt_fmt = tx_stats->short_pkt_fmt;
++ csum_partial = tx_stats->csum_partial;
++ mana_map_err = tx_stats->mana_map_err;
+ } while (u64_stats_fetch_retry(&tx_stats->syncp, start));
+
+ data[i++] = packets;
+ data[i++] = bytes;
+ data[i++] = xdp_xmit;
++ data[i++] = tso_packets;
++ data[i++] = tso_bytes;
++ data[i++] = tso_inner_packets;
++ data[i++] = tso_inner_bytes;
++ data[i++] = long_pkt_fmt;
++ data[i++] = short_pkt_fmt;
++ data[i++] = csum_partial;
++ data[i++] = mana_map_err;
+ }
+ }
+
+--- a/include/net/mana/mana.h
++++ b/include/net/mana/mana.h
+@@ -48,6 +48,10 @@ enum TRI_STATE {
+
+ #define MAX_PORTS_IN_MANA_DEV 256
+
++/* Update this count whenever the respective structures are changed */
++#define MANA_STATS_RX_COUNT 5
++#define MANA_STATS_TX_COUNT 11
++
+ struct mana_stats_rx {
+ u64 packets;
+ u64 bytes;
+@@ -61,6 +65,14 @@ struct mana_stats_tx {
+ u64 packets;
+ u64 bytes;
+ u64 xdp_xmit;
++ u64 tso_packets;
++ u64 tso_bytes;
++ u64 tso_inner_packets;
++ u64 tso_inner_bytes;
++ u64 short_pkt_fmt;
++ u64 long_pkt_fmt;
++ u64 csum_partial;
++ u64 mana_map_err;
+ struct u64_stats_sync syncp;
+ };
+
+@@ -331,6 +343,12 @@ struct mana_tx_qp {
+ struct mana_ethtool_stats {
+ u64 stop_queue;
+ u64 wake_queue;
++ u64 tx_cqes;
++ u64 tx_cqe_err;
++ u64 tx_cqe_unknown_type;
++ u64 rx_cqes;
++ u64 rx_coalesced_err;
++ u64 rx_cqe_unknown_type;
+ };
+
+ struct mana_context {