summaryrefslogtreecommitdiffstats
path: root/debian/patches-rt/0021-of-allocate-free-phandle-cache-outside-of-the-devtre.patch
diff options
context:
space:
mode:
Diffstat (limited to 'debian/patches-rt/0021-of-allocate-free-phandle-cache-outside-of-the-devtre.patch')
-rw-r--r--debian/patches-rt/0021-of-allocate-free-phandle-cache-outside-of-the-devtre.patch103
1 files changed, 103 insertions, 0 deletions
diff --git a/debian/patches-rt/0021-of-allocate-free-phandle-cache-outside-of-the-devtre.patch b/debian/patches-rt/0021-of-allocate-free-phandle-cache-outside-of-the-devtre.patch
new file mode 100644
index 000000000..9b53b3722
--- /dev/null
+++ b/debian/patches-rt/0021-of-allocate-free-phandle-cache-outside-of-the-devtre.patch
@@ -0,0 +1,103 @@
+From aafa075abaa80a17d9ac105e72ab235ddb573aac Mon Sep 17 00:00:00 2001
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Fri, 31 Aug 2018 14:16:30 +0200
+Subject: [PATCH 021/347] of: allocate / free phandle cache outside of the
+ devtree_lock
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.246-rt110.tar.xz
+
+The phandle cache code allocates memory while holding devtree_lock which
+is a raw_spinlock_t. Memory allocation (and free()) is not possible on
+RT while a raw_spinlock_t is held.
+Invoke the kfree() and kcalloc() while the lock is dropped.
+
+Cc: Rob Herring <robh+dt@kernel.org>
+Cc: Frank Rowand <frowand.list@gmail.com>
+Cc: devicetree@vger.kernel.org
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ drivers/of/base.c | 19 +++++++++++++------
+ 1 file changed, 13 insertions(+), 6 deletions(-)
+
+diff --git a/drivers/of/base.c b/drivers/of/base.c
+index f0dbb7ad88cf..c59b30bab0e0 100644
+--- a/drivers/of/base.c
++++ b/drivers/of/base.c
+@@ -130,31 +130,34 @@ static u32 phandle_cache_mask;
+ /*
+ * Caller must hold devtree_lock.
+ */
+-static void __of_free_phandle_cache(void)
++static struct device_node** __of_free_phandle_cache(void)
+ {
+ u32 cache_entries = phandle_cache_mask + 1;
+ u32 k;
++ struct device_node **shadow;
+
+ if (!phandle_cache)
+- return;
++ return NULL;
+
+ for (k = 0; k < cache_entries; k++)
+ of_node_put(phandle_cache[k]);
+
+- kfree(phandle_cache);
++ shadow = phandle_cache;
+ phandle_cache = NULL;
++ return shadow;
+ }
+
+ int of_free_phandle_cache(void)
+ {
+ unsigned long flags;
++ struct device_node **shadow;
+
+ raw_spin_lock_irqsave(&devtree_lock, flags);
+
+- __of_free_phandle_cache();
++ shadow = __of_free_phandle_cache();
+
+ raw_spin_unlock_irqrestore(&devtree_lock, flags);
+-
++ kfree(shadow);
+ return 0;
+ }
+ #if !defined(CONFIG_MODULES)
+@@ -189,10 +192,11 @@ void of_populate_phandle_cache(void)
+ u32 cache_entries;
+ struct device_node *np;
+ u32 phandles = 0;
++ struct device_node **shadow;
+
+ raw_spin_lock_irqsave(&devtree_lock, flags);
+
+- __of_free_phandle_cache();
++ shadow = __of_free_phandle_cache();
+
+ for_each_of_allnodes(np)
+ if (np->phandle && np->phandle != OF_PHANDLE_ILLEGAL)
+@@ -200,12 +204,14 @@ void of_populate_phandle_cache(void)
+
+ if (!phandles)
+ goto out;
++ raw_spin_unlock_irqrestore(&devtree_lock, flags);
+
+ cache_entries = roundup_pow_of_two(phandles);
+ phandle_cache_mask = cache_entries - 1;
+
+ phandle_cache = kcalloc(cache_entries, sizeof(*phandle_cache),
+ GFP_ATOMIC);
++ raw_spin_lock_irqsave(&devtree_lock, flags);
+ if (!phandle_cache)
+ goto out;
+
+@@ -217,6 +223,7 @@ void of_populate_phandle_cache(void)
+
+ out:
+ raw_spin_unlock_irqrestore(&devtree_lock, flags);
++ kfree(shadow);
+ }
+
+ void __init of_core_init(void)
+--
+2.36.1
+