From 01997497f915e8f79871f3f2acb55ac465051d24 Mon Sep 17 00:00:00 2001 From: Daniel Baumann Date: Sun, 7 Apr 2024 20:49:59 +0200 Subject: Adding debian version 6.1.76-1. Signed-off-by: Daniel Baumann --- ...sign-interrupts-to-CPUs-based-on-NUMA-nod.patch | 114 +++++++++++++++++++++ 1 file changed, 114 insertions(+) create mode 100644 debian/patches/features/all/ethernet-microsoft/0002-net-mana-Assign-interrupts-to-CPUs-based-on-NUMA-nod.patch (limited to 'debian/patches/features/all/ethernet-microsoft/0002-net-mana-Assign-interrupts-to-CPUs-based-on-NUMA-nod.patch') diff --git a/debian/patches/features/all/ethernet-microsoft/0002-net-mana-Assign-interrupts-to-CPUs-based-on-NUMA-nod.patch b/debian/patches/features/all/ethernet-microsoft/0002-net-mana-Assign-interrupts-to-CPUs-based-on-NUMA-nod.patch new file mode 100644 index 000000000..6fc9d9cc7 --- /dev/null +++ b/debian/patches/features/all/ethernet-microsoft/0002-net-mana-Assign-interrupts-to-CPUs-based-on-NUMA-nod.patch @@ -0,0 +1,114 @@ +From 8bdc2e0b4963e7cfd41b43ed117e8f0dec13fe29 Mon Sep 17 00:00:00 2001 +From: Saurabh Sengar +Date: Mon, 31 Oct 2022 23:06:01 -0700 +Subject: [PATCH 02/23] net: mana: Assign interrupts to CPUs based on NUMA + nodes + +In large VMs with multiple NUMA nodes, network performance is usually +best if network interrupts are all assigned to the same virtual NUMA +node. This patch assigns online CPU according to a numa aware policy, +local cpus are returned first, followed by non-local ones, then it wraps +around. + +Signed-off-by: Saurabh Sengar +Reviewed-by: Haiyang Zhang +Link: https://lore.kernel.org/r/1667282761-11547-1-git-send-email-ssengar@linux.microsoft.com +Signed-off-by: Paolo Abeni +(cherry picked from commit 71fa6887eeca7b631528f9c7a39815498de8028c) +Signed-off-by: Bastian Blank +--- + drivers/net/ethernet/microsoft/mana/gdma.h | 1 + + .../net/ethernet/microsoft/mana/gdma_main.c | 30 +++++++++++++++++-- + 2 files changed, 28 insertions(+), 3 deletions(-) + +diff --git a/drivers/net/ethernet/microsoft/mana/gdma.h b/drivers/net/ethernet/microsoft/mana/gdma.h +index 48b0ab56bdb0..68684a8bcead 100644 +--- a/drivers/net/ethernet/microsoft/mana/gdma.h ++++ b/drivers/net/ethernet/microsoft/mana/gdma.h +@@ -356,6 +356,7 @@ struct gdma_context { + void __iomem *shm_base; + void __iomem *db_page_base; + u32 db_page_size; ++ int numa_node; + + /* Shared memory chanenl (used to bootstrap HWC) */ + struct shm_channel shm_channel; +diff --git a/drivers/net/ethernet/microsoft/mana/gdma_main.c b/drivers/net/ethernet/microsoft/mana/gdma_main.c +index d674ebda2053..abe9888a40aa 100644 +--- a/drivers/net/ethernet/microsoft/mana/gdma_main.c ++++ b/drivers/net/ethernet/microsoft/mana/gdma_main.c +@@ -1208,8 +1208,10 @@ static int mana_gd_setup_irqs(struct pci_dev *pdev) + struct gdma_context *gc = pci_get_drvdata(pdev); + struct gdma_irq_context *gic; + unsigned int max_irqs; ++ u16 *cpus; ++ cpumask_var_t req_mask; + int nvec, irq; +- int err, i, j; ++ int err, i = 0, j; + + if (max_queues_per_port > MANA_MAX_NUM_QUEUES) + max_queues_per_port = MANA_MAX_NUM_QUEUES; +@@ -1228,7 +1230,21 @@ static int mana_gd_setup_irqs(struct pci_dev *pdev) + goto free_irq_vector; + } + ++ if (!zalloc_cpumask_var(&req_mask, GFP_KERNEL)) { ++ err = -ENOMEM; ++ goto free_irq; ++ } ++ ++ cpus = kcalloc(nvec, sizeof(*cpus), GFP_KERNEL); ++ if (!cpus) { ++ err = -ENOMEM; ++ goto free_mask; ++ } ++ for (i = 0; i < nvec; i++) ++ cpus[i] = cpumask_local_spread(i, gc->numa_node); ++ + for (i = 0; i < nvec; i++) { ++ cpumask_set_cpu(cpus[i], req_mask); + gic = &gc->irq_contexts[i]; + gic->handler = NULL; + gic->arg = NULL; +@@ -1243,13 +1259,17 @@ static int mana_gd_setup_irqs(struct pci_dev *pdev) + irq = pci_irq_vector(pdev, i); + if (irq < 0) { + err = irq; +- goto free_irq; ++ goto free_mask; + } + + err = request_irq(irq, mana_gd_intr, 0, gic->name, gic); + if (err) +- goto free_irq; ++ goto free_mask; ++ irq_set_affinity_and_hint(irq, req_mask); ++ cpumask_clear(req_mask); + } ++ free_cpumask_var(req_mask); ++ kfree(cpus); + + err = mana_gd_alloc_res_map(nvec, &gc->msix_resource); + if (err) +@@ -1260,6 +1280,9 @@ static int mana_gd_setup_irqs(struct pci_dev *pdev) + + return 0; + ++free_mask: ++ free_cpumask_var(req_mask); ++ kfree(cpus); + free_irq: + for (j = i - 1; j >= 0; j--) { + irq = pci_irq_vector(pdev, j); +@@ -1389,6 +1412,7 @@ static int mana_gd_probe(struct pci_dev *pdev, const struct pci_device_id *ent) + if (!bar0_va) + goto free_gc; + ++ gc->numa_node = dev_to_node(&pdev->dev); + gc->is_pf = mana_is_pf(pdev->device); + gc->bar0_va = bar0_va; + gc->dev = &pdev->dev; +-- +2.40.1 + -- cgit v1.2.3