summaryrefslogtreecommitdiffstats
path: root/debian/patches-rt/0276-powerpc-pseries-iommu-Use-a-locallock-instead-local_.patch
diff options
context:
space:
mode:
Diffstat (limited to 'debian/patches-rt/0276-powerpc-pseries-iommu-Use-a-locallock-instead-local_.patch')
-rw-r--r--debian/patches-rt/0276-powerpc-pseries-iommu-Use-a-locallock-instead-local_.patch117
1 files changed, 117 insertions, 0 deletions
diff --git a/debian/patches-rt/0276-powerpc-pseries-iommu-Use-a-locallock-instead-local_.patch b/debian/patches-rt/0276-powerpc-pseries-iommu-Use-a-locallock-instead-local_.patch
new file mode 100644
index 000000000..fbd35f4c7
--- /dev/null
+++ b/debian/patches-rt/0276-powerpc-pseries-iommu-Use-a-locallock-instead-local_.patch
@@ -0,0 +1,117 @@
+From afebcd1022d509a6887f066b6601caf561ee1ee5 Mon Sep 17 00:00:00 2001
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Tue, 26 Mar 2019 18:31:54 +0100
+Subject: [PATCH 276/323] powerpc/pseries/iommu: Use a locallock instead
+ local_irq_save()
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.10/older/patches-5.10.204-rt100.tar.xz
+
+The locallock protects the per-CPU variable tce_page. The function
+attempts to allocate memory while tce_page is protected (by disabling
+interrupts).
+
+Use local_irq_save() instead of local_irq_disable().
+
+Cc: stable-rt@vger.kernel.org
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ arch/powerpc/platforms/pseries/iommu.c | 31 +++++++++++++++++---------
+ 1 file changed, 20 insertions(+), 11 deletions(-)
+
+diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c
+index 245f1f8df656..f05555dde8e7 100644
+--- a/arch/powerpc/platforms/pseries/iommu.c
++++ b/arch/powerpc/platforms/pseries/iommu.c
+@@ -24,6 +24,7 @@
+ #include <linux/of.h>
+ #include <linux/iommu.h>
+ #include <linux/rculist.h>
++#include <linux/local_lock.h>
+ #include <asm/io.h>
+ #include <asm/prom.h>
+ #include <asm/rtas.h>
+@@ -190,7 +191,13 @@ static int tce_build_pSeriesLP(unsigned long liobn, long tcenum, long tceshift,
+ return ret;
+ }
+
+-static DEFINE_PER_CPU(__be64 *, tce_page);
++struct tce_page {
++ __be64 * page;
++ local_lock_t lock;
++};
++static DEFINE_PER_CPU(struct tce_page, tce_page) = {
++ .lock = INIT_LOCAL_LOCK(lock),
++};
+
+ static int tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum,
+ long npages, unsigned long uaddr,
+@@ -212,9 +219,10 @@ static int tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum,
+ direction, attrs);
+ }
+
+- local_irq_save(flags); /* to protect tcep and the page behind it */
++ /* to protect tcep and the page behind it */
++ local_lock_irqsave(&tce_page.lock, flags);
+
+- tcep = __this_cpu_read(tce_page);
++ tcep = __this_cpu_read(tce_page.page);
+
+ /* This is safe to do since interrupts are off when we're called
+ * from iommu_alloc{,_sg}()
+@@ -223,12 +231,12 @@ static int tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum,
+ tcep = (__be64 *)__get_free_page(GFP_ATOMIC);
+ /* If allocation fails, fall back to the loop implementation */
+ if (!tcep) {
+- local_irq_restore(flags);
++ local_unlock_irqrestore(&tce_page.lock, flags);
+ return tce_build_pSeriesLP(tbl->it_index, tcenum,
+ tbl->it_page_shift,
+ npages, uaddr, direction, attrs);
+ }
+- __this_cpu_write(tce_page, tcep);
++ __this_cpu_write(tce_page.page, tcep);
+ }
+
+ rpn = __pa(uaddr) >> TCE_SHIFT;
+@@ -258,7 +266,7 @@ static int tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum,
+ tcenum += limit;
+ } while (npages > 0 && !rc);
+
+- local_irq_restore(flags);
++ local_unlock_irqrestore(&tce_page.lock, flags);
+
+ if (unlikely(rc == H_NOT_ENOUGH_RESOURCES)) {
+ ret = (int)rc;
+@@ -429,16 +437,17 @@ static int tce_setrange_multi_pSeriesLP(unsigned long start_pfn,
+ DMA_BIDIRECTIONAL, 0);
+ }
+
+- local_irq_disable(); /* to protect tcep and the page behind it */
+- tcep = __this_cpu_read(tce_page);
++ /* to protect tcep and the page behind it */
++ local_lock_irq(&tce_page.lock);
++ tcep = __this_cpu_read(tce_page.page);
+
+ if (!tcep) {
+ tcep = (__be64 *)__get_free_page(GFP_ATOMIC);
+ if (!tcep) {
+- local_irq_enable();
++ local_unlock_irq(&tce_page.lock);
+ return -ENOMEM;
+ }
+- __this_cpu_write(tce_page, tcep);
++ __this_cpu_write(tce_page.page, tcep);
+ }
+
+ proto_tce = TCE_PCI_READ | TCE_PCI_WRITE;
+@@ -481,7 +490,7 @@ static int tce_setrange_multi_pSeriesLP(unsigned long start_pfn,
+
+ /* error cleanup: caller will clear whole range */
+
+- local_irq_enable();
++ local_unlock_irq(&tce_page.lock);
+ return rc;
+ }
+
+--
+2.43.0
+