summaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/microsoft/mana
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-18 18:50:03 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-18 18:50:03 +0000
commit01a69402cf9d38ff180345d55c2ee51c7e89fbc7 (patch)
treeb406c5242a088c4f59c6e4b719b783f43aca6ae9 /drivers/net/ethernet/microsoft/mana
parentAdding upstream version 6.7.12. (diff)
downloadlinux-01a69402cf9d38ff180345d55c2ee51c7e89fbc7.tar.xz
linux-01a69402cf9d38ff180345d55c2ee51c7e89fbc7.zip
Adding upstream version 6.8.9.upstream/6.8.9
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/net/ethernet/microsoft/mana')
-rw-r--r--drivers/net/ethernet/microsoft/mana/gdma_main.c81
-rw-r--r--drivers/net/ethernet/microsoft/mana/hw_channel.c1
-rw-r--r--drivers/net/ethernet/microsoft/mana/mana_en.c53
-rw-r--r--drivers/net/ethernet/microsoft/mana/mana_ethtool.c73
4 files changed, 149 insertions, 59 deletions
diff --git a/drivers/net/ethernet/microsoft/mana/gdma_main.c b/drivers/net/ethernet/microsoft/mana/gdma_main.c
index 6367de0c2c..d33b272145 100644
--- a/drivers/net/ethernet/microsoft/mana/gdma_main.c
+++ b/drivers/net/ethernet/microsoft/mana/gdma_main.c
@@ -158,6 +158,9 @@ static int mana_gd_detect_devices(struct pci_dev *pdev)
if (dev_type == GDMA_DEVICE_MANA) {
gc->mana.gdma_context = gc;
gc->mana.dev_id = dev;
+ } else if (dev_type == GDMA_DEVICE_MANA_IB) {
+ gc->mana_ib.dev_id = dev;
+ gc->mana_ib.gdma_context = gc;
}
}
@@ -414,8 +417,12 @@ static void mana_gd_process_eq_events(void *arg)
old_bits = (eq->head / num_eqe - 1) & GDMA_EQE_OWNER_MASK;
/* No more entries */
- if (owner_bits == old_bits)
+ if (owner_bits == old_bits) {
+ /* return here without ringing the doorbell */
+ if (i == 0)
+ return;
break;
+ }
new_bits = (eq->head / num_eqe) & GDMA_EQE_OWNER_MASK;
if (owner_bits != new_bits) {
@@ -445,42 +452,29 @@ static int mana_gd_register_irq(struct gdma_queue *queue,
struct gdma_dev *gd = queue->gdma_dev;
struct gdma_irq_context *gic;
struct gdma_context *gc;
- struct gdma_resource *r;
unsigned int msi_index;
unsigned long flags;
struct device *dev;
int err = 0;
gc = gd->gdma_context;
- r = &gc->msix_resource;
dev = gc->dev;
+ msi_index = spec->eq.msix_index;
- spin_lock_irqsave(&r->lock, flags);
-
- msi_index = find_first_zero_bit(r->map, r->size);
- if (msi_index >= r->size || msi_index >= gc->num_msix_usable) {
+ if (msi_index >= gc->num_msix_usable) {
err = -ENOSPC;
- } else {
- bitmap_set(r->map, msi_index, 1);
- queue->eq.msix_index = msi_index;
- }
-
- spin_unlock_irqrestore(&r->lock, flags);
-
- if (err) {
- dev_err(dev, "Register IRQ err:%d, msi:%u rsize:%u, nMSI:%u",
- err, msi_index, r->size, gc->num_msix_usable);
+ dev_err(dev, "Register IRQ err:%d, msi:%u nMSI:%u",
+ err, msi_index, gc->num_msix_usable);
return err;
}
+ queue->eq.msix_index = msi_index;
gic = &gc->irq_contexts[msi_index];
- WARN_ON(gic->handler || gic->arg);
-
- gic->arg = queue;
-
- gic->handler = mana_gd_process_eq_events;
+ spin_lock_irqsave(&gic->lock, flags);
+ list_add_rcu(&queue->entry, &gic->eq_list);
+ spin_unlock_irqrestore(&gic->lock, flags);
return 0;
}
@@ -490,12 +484,11 @@ static void mana_gd_deregiser_irq(struct gdma_queue *queue)
struct gdma_dev *gd = queue->gdma_dev;
struct gdma_irq_context *gic;
struct gdma_context *gc;
- struct gdma_resource *r;
unsigned int msix_index;
unsigned long flags;
+ struct gdma_queue *eq;
gc = gd->gdma_context;
- r = &gc->msix_resource;
/* At most num_online_cpus() + 1 interrupts are used. */
msix_index = queue->eq.msix_index;
@@ -503,14 +496,17 @@ static void mana_gd_deregiser_irq(struct gdma_queue *queue)
return;
gic = &gc->irq_contexts[msix_index];
- gic->handler = NULL;
- gic->arg = NULL;
-
- spin_lock_irqsave(&r->lock, flags);
- bitmap_clear(r->map, msix_index, 1);
- spin_unlock_irqrestore(&r->lock, flags);
+ spin_lock_irqsave(&gic->lock, flags);
+ list_for_each_entry_rcu(eq, &gic->eq_list, entry) {
+ if (queue == eq) {
+ list_del_rcu(&eq->entry);
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&gic->lock, flags);
queue->eq.msix_index = INVALID_PCI_MSIX_INDEX;
+ synchronize_rcu();
}
int mana_gd_test_eq(struct gdma_context *gc, struct gdma_queue *eq)
@@ -588,6 +584,7 @@ static int mana_gd_create_eq(struct gdma_dev *gd,
int err;
queue->eq.msix_index = INVALID_PCI_MSIX_INDEX;
+ queue->id = INVALID_QUEUE_ID;
log2_num_entries = ilog2(queue->queue_size / GDMA_EQE_SIZE);
@@ -819,6 +816,7 @@ free_q:
kfree(queue);
return err;
}
+EXPORT_SYMBOL_NS(mana_gd_create_mana_eq, NET_MANA);
int mana_gd_create_mana_wq_cq(struct gdma_dev *gd,
const struct gdma_queue_spec *spec,
@@ -895,6 +893,7 @@ void mana_gd_destroy_queue(struct gdma_context *gc, struct gdma_queue *queue)
mana_gd_free_memory(gmi);
kfree(queue);
}
+EXPORT_SYMBOL_NS(mana_gd_destroy_queue, NET_MANA);
int mana_gd_verify_vf_version(struct pci_dev *pdev)
{
@@ -971,6 +970,7 @@ int mana_gd_register_device(struct gdma_dev *gd)
return 0;
}
+EXPORT_SYMBOL_NS(mana_gd_register_device, NET_MANA);
int mana_gd_deregister_device(struct gdma_dev *gd)
{
@@ -1001,6 +1001,7 @@ int mana_gd_deregister_device(struct gdma_dev *gd)
return err;
}
+EXPORT_SYMBOL_NS(mana_gd_deregister_device, NET_MANA);
u32 mana_gd_wq_avail_space(struct gdma_queue *wq)
{
@@ -1217,9 +1218,14 @@ int mana_gd_poll_cq(struct gdma_queue *cq, struct gdma_comp *comp, int num_cqe)
static irqreturn_t mana_gd_intr(int irq, void *arg)
{
struct gdma_irq_context *gic = arg;
+ struct list_head *eq_list = &gic->eq_list;
+ struct gdma_queue *eq;
- if (gic->handler)
- gic->handler(gic->arg);
+ rcu_read_lock();
+ list_for_each_entry_rcu(eq, eq_list, entry) {
+ gic->handler(eq);
+ }
+ rcu_read_unlock();
return IRQ_HANDLED;
}
@@ -1271,8 +1277,9 @@ static int mana_gd_setup_irqs(struct pci_dev *pdev)
for (i = 0; i < nvec; i++) {
gic = &gc->irq_contexts[i];
- gic->handler = NULL;
- gic->arg = NULL;
+ gic->handler = mana_gd_process_eq_events;
+ INIT_LIST_HEAD(&gic->eq_list);
+ spin_lock_init(&gic->lock);
if (!i)
snprintf(gic->name, MANA_IRQ_NAME_SZ, "mana_hwc@pci:%s",
@@ -1295,10 +1302,6 @@ static int mana_gd_setup_irqs(struct pci_dev *pdev)
irq_set_affinity_and_hint(irq, cpumask_of(cpu));
}
- err = mana_gd_alloc_res_map(nvec, &gc->msix_resource);
- if (err)
- goto free_irq;
-
gc->max_num_msix = nvec;
gc->num_msix_usable = nvec;
@@ -1329,8 +1332,6 @@ static void mana_gd_remove_irqs(struct pci_dev *pdev)
if (gc->max_num_msix < 1)
return;
- mana_gd_free_res_map(&gc->msix_resource);
-
for (i = 0; i < gc->max_num_msix; i++) {
irq = pci_irq_vector(pdev, i);
if (irq < 0)
diff --git a/drivers/net/ethernet/microsoft/mana/hw_channel.c b/drivers/net/ethernet/microsoft/mana/hw_channel.c
index 9d1cd3bfcf..2729a2c5ac 100644
--- a/drivers/net/ethernet/microsoft/mana/hw_channel.c
+++ b/drivers/net/ethernet/microsoft/mana/hw_channel.c
@@ -300,6 +300,7 @@ static int mana_hwc_create_gdma_eq(struct hw_channel_context *hwc,
spec.eq.context = ctx;
spec.eq.callback = cb;
spec.eq.log2_throttle_limit = DEFAULT_LOG2_THROTTLING_FOR_ERROR_EQ;
+ spec.eq.msix_index = 0;
return mana_gd_create_hwc_queue(hwc->gdma_dev, &spec, queue);
}
diff --git a/drivers/net/ethernet/microsoft/mana/mana_en.c b/drivers/net/ethernet/microsoft/mana/mana_en.c
index fc3d2903a8..d8af5e7e15 100644
--- a/drivers/net/ethernet/microsoft/mana/mana_en.c
+++ b/drivers/net/ethernet/microsoft/mana/mana_en.c
@@ -601,7 +601,7 @@ static void mana_get_rxbuf_cfg(int mtu, u32 *datasize, u32 *alloc_size,
*alloc_size = mtu + MANA_RXBUF_PAD + *headroom;
- *datasize = ALIGN(mtu + ETH_HLEN, MANA_RX_DATA_ALIGN);
+ *datasize = mtu + ETH_HLEN;
}
static int mana_pre_alloc_rxbufs(struct mana_port_context *mpc, int new_mtu)
@@ -1244,6 +1244,7 @@ static int mana_create_eq(struct mana_context *ac)
spec.eq.log2_throttle_limit = LOG2_EQ_THROTTLE;
for (i = 0; i < gc->max_num_queues; i++) {
+ spec.eq.msix_index = (i + 1) % gc->num_msix_usable;
err = mana_gd_create_mana_eq(gd, &spec, &ac->eqs[i].eq);
if (err)
goto out;
@@ -2137,6 +2138,7 @@ static int mana_create_page_pool(struct mana_rxq *rxq, struct gdma_context *gc)
pprm.pool_size = RX_BUFFERS_PER_QUEUE;
pprm.nid = gc->numa_node;
pprm.napi = &rxq->rx_cq.napi;
+ pprm.netdev = rxq->ndev;
rxq->page_pool = page_pool_create(&pprm);
@@ -2385,13 +2387,33 @@ void mana_query_gf_stats(struct mana_port_context *apc)
mana_gd_init_req_hdr(&req.hdr, MANA_QUERY_GF_STAT,
sizeof(req), sizeof(resp));
- req.req_stats = STATISTICS_FLAGS_HC_TX_BYTES |
+ req.req_stats = STATISTICS_FLAGS_RX_DISCARDS_NO_WQE |
+ STATISTICS_FLAGS_RX_ERRORS_VPORT_DISABLED |
+ STATISTICS_FLAGS_HC_RX_BYTES |
+ STATISTICS_FLAGS_HC_RX_UCAST_PACKETS |
+ STATISTICS_FLAGS_HC_RX_UCAST_BYTES |
+ STATISTICS_FLAGS_HC_RX_MCAST_PACKETS |
+ STATISTICS_FLAGS_HC_RX_MCAST_BYTES |
+ STATISTICS_FLAGS_HC_RX_BCAST_PACKETS |
+ STATISTICS_FLAGS_HC_RX_BCAST_BYTES |
+ STATISTICS_FLAGS_TX_ERRORS_GF_DISABLED |
+ STATISTICS_FLAGS_TX_ERRORS_VPORT_DISABLED |
+ STATISTICS_FLAGS_TX_ERRORS_INVAL_VPORT_OFFSET_PACKETS |
+ STATISTICS_FLAGS_TX_ERRORS_VLAN_ENFORCEMENT |
+ STATISTICS_FLAGS_TX_ERRORS_ETH_TYPE_ENFORCEMENT |
+ STATISTICS_FLAGS_TX_ERRORS_SA_ENFORCEMENT |
+ STATISTICS_FLAGS_TX_ERRORS_SQPDID_ENFORCEMENT |
+ STATISTICS_FLAGS_TX_ERRORS_CQPDID_ENFORCEMENT |
+ STATISTICS_FLAGS_TX_ERRORS_MTU_VIOLATION |
+ STATISTICS_FLAGS_TX_ERRORS_INVALID_OOB |
+ STATISTICS_FLAGS_HC_TX_BYTES |
STATISTICS_FLAGS_HC_TX_UCAST_PACKETS |
STATISTICS_FLAGS_HC_TX_UCAST_BYTES |
STATISTICS_FLAGS_HC_TX_MCAST_PACKETS |
STATISTICS_FLAGS_HC_TX_MCAST_BYTES |
STATISTICS_FLAGS_HC_TX_BCAST_PACKETS |
- STATISTICS_FLAGS_HC_TX_BCAST_BYTES;
+ STATISTICS_FLAGS_HC_TX_BCAST_BYTES |
+ STATISTICS_FLAGS_TX_ERRORS_GDMA_ERROR;
err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
sizeof(resp));
@@ -2407,6 +2429,30 @@ void mana_query_gf_stats(struct mana_port_context *apc)
return;
}
+ apc->eth_stats.hc_rx_discards_no_wqe = resp.rx_discards_nowqe;
+ apc->eth_stats.hc_rx_err_vport_disabled = resp.rx_err_vport_disabled;
+ apc->eth_stats.hc_rx_bytes = resp.hc_rx_bytes;
+ apc->eth_stats.hc_rx_ucast_pkts = resp.hc_rx_ucast_pkts;
+ apc->eth_stats.hc_rx_ucast_bytes = resp.hc_rx_ucast_bytes;
+ apc->eth_stats.hc_rx_bcast_pkts = resp.hc_rx_bcast_pkts;
+ apc->eth_stats.hc_rx_bcast_bytes = resp.hc_rx_bcast_bytes;
+ apc->eth_stats.hc_rx_mcast_pkts = resp.hc_rx_mcast_pkts;
+ apc->eth_stats.hc_rx_mcast_bytes = resp.hc_rx_mcast_bytes;
+ apc->eth_stats.hc_tx_err_gf_disabled = resp.tx_err_gf_disabled;
+ apc->eth_stats.hc_tx_err_vport_disabled = resp.tx_err_vport_disabled;
+ apc->eth_stats.hc_tx_err_inval_vportoffset_pkt =
+ resp.tx_err_inval_vport_offset_pkt;
+ apc->eth_stats.hc_tx_err_vlan_enforcement =
+ resp.tx_err_vlan_enforcement;
+ apc->eth_stats.hc_tx_err_eth_type_enforcement =
+ resp.tx_err_ethtype_enforcement;
+ apc->eth_stats.hc_tx_err_sa_enforcement = resp.tx_err_SA_enforcement;
+ apc->eth_stats.hc_tx_err_sqpdid_enforcement =
+ resp.tx_err_SQPDID_enforcement;
+ apc->eth_stats.hc_tx_err_cqpdid_enforcement =
+ resp.tx_err_CQPDID_enforcement;
+ apc->eth_stats.hc_tx_err_mtu_violation = resp.tx_err_mtu_violation;
+ apc->eth_stats.hc_tx_err_inval_oob = resp.tx_err_inval_oob;
apc->eth_stats.hc_tx_bytes = resp.hc_tx_bytes;
apc->eth_stats.hc_tx_ucast_pkts = resp.hc_tx_ucast_pkts;
apc->eth_stats.hc_tx_ucast_bytes = resp.hc_tx_ucast_bytes;
@@ -2414,6 +2460,7 @@ void mana_query_gf_stats(struct mana_port_context *apc)
apc->eth_stats.hc_tx_bcast_bytes = resp.hc_tx_bcast_bytes;
apc->eth_stats.hc_tx_mcast_pkts = resp.hc_tx_mcast_pkts;
apc->eth_stats.hc_tx_mcast_bytes = resp.hc_tx_mcast_bytes;
+ apc->eth_stats.hc_tx_err_gdma = resp.tx_err_gdma;
}
static int mana_init_port(struct net_device *ndev)
diff --git a/drivers/net/ethernet/microsoft/mana/mana_ethtool.c b/drivers/net/ethernet/microsoft/mana/mana_ethtool.c
index 607150165a..ab2413d71f 100644
--- a/drivers/net/ethernet/microsoft/mana/mana_ethtool.c
+++ b/drivers/net/ethernet/microsoft/mana/mana_ethtool.c
@@ -13,6 +13,46 @@ static const struct {
} mana_eth_stats[] = {
{"stop_queue", offsetof(struct mana_ethtool_stats, stop_queue)},
{"wake_queue", offsetof(struct mana_ethtool_stats, wake_queue)},
+ {"hc_rx_discards_no_wqe", offsetof(struct mana_ethtool_stats,
+ hc_rx_discards_no_wqe)},
+ {"hc_rx_err_vport_disabled", offsetof(struct mana_ethtool_stats,
+ hc_rx_err_vport_disabled)},
+ {"hc_rx_bytes", offsetof(struct mana_ethtool_stats, hc_rx_bytes)},
+ {"hc_rx_ucast_pkts", offsetof(struct mana_ethtool_stats,
+ hc_rx_ucast_pkts)},
+ {"hc_rx_ucast_bytes", offsetof(struct mana_ethtool_stats,
+ hc_rx_ucast_bytes)},
+ {"hc_rx_bcast_pkts", offsetof(struct mana_ethtool_stats,
+ hc_rx_bcast_pkts)},
+ {"hc_rx_bcast_bytes", offsetof(struct mana_ethtool_stats,
+ hc_rx_bcast_bytes)},
+ {"hc_rx_mcast_pkts", offsetof(struct mana_ethtool_stats,
+ hc_rx_mcast_pkts)},
+ {"hc_rx_mcast_bytes", offsetof(struct mana_ethtool_stats,
+ hc_rx_mcast_bytes)},
+ {"hc_tx_err_gf_disabled", offsetof(struct mana_ethtool_stats,
+ hc_tx_err_gf_disabled)},
+ {"hc_tx_err_vport_disabled", offsetof(struct mana_ethtool_stats,
+ hc_tx_err_vport_disabled)},
+ {"hc_tx_err_inval_vportoffset_pkt",
+ offsetof(struct mana_ethtool_stats,
+ hc_tx_err_inval_vportoffset_pkt)},
+ {"hc_tx_err_vlan_enforcement", offsetof(struct mana_ethtool_stats,
+ hc_tx_err_vlan_enforcement)},
+ {"hc_tx_err_eth_type_enforcement",
+ offsetof(struct mana_ethtool_stats, hc_tx_err_eth_type_enforcement)},
+ {"hc_tx_err_sa_enforcement", offsetof(struct mana_ethtool_stats,
+ hc_tx_err_sa_enforcement)},
+ {"hc_tx_err_sqpdid_enforcement",
+ offsetof(struct mana_ethtool_stats, hc_tx_err_sqpdid_enforcement)},
+ {"hc_tx_err_cqpdid_enforcement",
+ offsetof(struct mana_ethtool_stats, hc_tx_err_cqpdid_enforcement)},
+ {"hc_tx_err_mtu_violation", offsetof(struct mana_ethtool_stats,
+ hc_tx_err_mtu_violation)},
+ {"hc_tx_err_inval_oob", offsetof(struct mana_ethtool_stats,
+ hc_tx_err_inval_oob)},
+ {"hc_tx_err_gdma", offsetof(struct mana_ethtool_stats,
+ hc_tx_err_gdma)},
{"hc_tx_bytes", offsetof(struct mana_ethtool_stats, hc_tx_bytes)},
{"hc_tx_ucast_pkts", offsetof(struct mana_ethtool_stats,
hc_tx_ucast_pkts)},
@@ -208,28 +248,28 @@ static u32 mana_rss_indir_size(struct net_device *ndev)
return MANA_INDIRECT_TABLE_SIZE;
}
-static int mana_get_rxfh(struct net_device *ndev, u32 *indir, u8 *key,
- u8 *hfunc)
+static int mana_get_rxfh(struct net_device *ndev,
+ struct ethtool_rxfh_param *rxfh)
{
struct mana_port_context *apc = netdev_priv(ndev);
int i;
- if (hfunc)
- *hfunc = ETH_RSS_HASH_TOP; /* Toeplitz */
+ rxfh->hfunc = ETH_RSS_HASH_TOP; /* Toeplitz */
- if (indir) {
+ if (rxfh->indir) {
for (i = 0; i < MANA_INDIRECT_TABLE_SIZE; i++)
- indir[i] = apc->indir_table[i];
+ rxfh->indir[i] = apc->indir_table[i];
}
- if (key)
- memcpy(key, apc->hashkey, MANA_HASH_KEY_SIZE);
+ if (rxfh->key)
+ memcpy(rxfh->key, apc->hashkey, MANA_HASH_KEY_SIZE);
return 0;
}
-static int mana_set_rxfh(struct net_device *ndev, const u32 *indir,
- const u8 *key, const u8 hfunc)
+static int mana_set_rxfh(struct net_device *ndev,
+ struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack)
{
struct mana_port_context *apc = netdev_priv(ndev);
bool update_hash = false, update_table = false;
@@ -240,25 +280,26 @@ static int mana_set_rxfh(struct net_device *ndev, const u32 *indir,
if (!apc->port_is_up)
return -EOPNOTSUPP;
- if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
+ if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
+ rxfh->hfunc != ETH_RSS_HASH_TOP)
return -EOPNOTSUPP;
- if (indir) {
+ if (rxfh->indir) {
for (i = 0; i < MANA_INDIRECT_TABLE_SIZE; i++)
- if (indir[i] >= apc->num_queues)
+ if (rxfh->indir[i] >= apc->num_queues)
return -EINVAL;
update_table = true;
for (i = 0; i < MANA_INDIRECT_TABLE_SIZE; i++) {
save_table[i] = apc->indir_table[i];
- apc->indir_table[i] = indir[i];
+ apc->indir_table[i] = rxfh->indir[i];
}
}
- if (key) {
+ if (rxfh->key) {
update_hash = true;
memcpy(save_key, apc->hashkey, MANA_HASH_KEY_SIZE);
- memcpy(apc->hashkey, key, MANA_HASH_KEY_SIZE);
+ memcpy(apc->hashkey, rxfh->key, MANA_HASH_KEY_SIZE);
}
err = mana_config_rss(apc, TRI_STATE_TRUE, update_hash, update_table);