From: Sebastian Andrzej Siewior Date: Fri, 31 Aug 2018 14:16:30 +0200 Subject: [PATCH 021/353] of: allocate / free phandle cache outside of the devtree_lock Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=bb46fdc2bedd1a8eaa5e58329d772011b6026d47 The phandle cache code allocates memory while holding devtree_lock which is a raw_spinlock_t. Memory allocation (and free()) is not possible on RT while a raw_spinlock_t is held. Invoke the kfree() and kcalloc() while the lock is dropped. Cc: Rob Herring Cc: Frank Rowand Cc: devicetree@vger.kernel.org Signed-off-by: Sebastian Andrzej Siewior --- drivers/of/base.c | 19 +++++++++++++------ 1 file changed, 13 insertions(+), 6 deletions(-) diff --git a/drivers/of/base.c b/drivers/of/base.c index f0dbb7ad88cf..c59b30bab0e0 100644 --- a/drivers/of/base.c +++ b/drivers/of/base.c @@ -130,31 +130,34 @@ static u32 phandle_cache_mask; /* * Caller must hold devtree_lock. */ -static void __of_free_phandle_cache(void) +static struct device_node** __of_free_phandle_cache(void) { u32 cache_entries = phandle_cache_mask + 1; u32 k; + struct device_node **shadow; if (!phandle_cache) - return; + return NULL; for (k = 0; k < cache_entries; k++) of_node_put(phandle_cache[k]); - kfree(phandle_cache); + shadow = phandle_cache; phandle_cache = NULL; + return shadow; } int of_free_phandle_cache(void) { unsigned long flags; + struct device_node **shadow; raw_spin_lock_irqsave(&devtree_lock, flags); - __of_free_phandle_cache(); + shadow = __of_free_phandle_cache(); raw_spin_unlock_irqrestore(&devtree_lock, flags); - + kfree(shadow); return 0; } #if !defined(CONFIG_MODULES) @@ -189,10 +192,11 @@ void of_populate_phandle_cache(void) u32 cache_entries; struct device_node *np; u32 phandles = 0; + struct device_node **shadow; raw_spin_lock_irqsave(&devtree_lock, flags); - __of_free_phandle_cache(); + shadow = __of_free_phandle_cache(); for_each_of_allnodes(np) if (np->phandle && np->phandle != OF_PHANDLE_ILLEGAL) @@ -200,12 +204,14 @@ void of_populate_phandle_cache(void) if (!phandles) goto out; + raw_spin_unlock_irqrestore(&devtree_lock, flags); cache_entries = roundup_pow_of_two(phandles); phandle_cache_mask = cache_entries - 1; phandle_cache = kcalloc(cache_entries, sizeof(*phandle_cache), GFP_ATOMIC); + raw_spin_lock_irqsave(&devtree_lock, flags); if (!phandle_cache) goto out; @@ -217,6 +223,7 @@ void of_populate_phandle_cache(void) out: raw_spin_unlock_irqrestore(&devtree_lock, flags); + kfree(shadow); } void __init of_core_init(void)