aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSebastian Andrzej Siewior <bigeasy@linutronix.de>2018-08-31 14:16:30 +0200
committerSebastian Andrzej Siewior <bigeasy@linutronix.de>2019-07-11 17:28:04 +0200
commit6a30bfd44010bd533e52d86a636e6bf418065b53 (patch)
tree68da117d69f06fc4f9e8a9158ea7df8c43a9c7b5
parent279affa4a0609c915fdc57fe784a55d6ac3d1ef7 (diff)
downloadlinux-rt-devel-6a30bfd44010bd533e52d86a636e6bf418065b53.tar.gz
of: allocate / free phandle cache outside of the devtree_lock
The phandle cache code allocates memory while holding devtree_lock which is a raw_spinlock_t. Memory allocation (and free()) is not possible on RT while a raw_spinlock_t is held. Invoke the kfree() and kcalloc() while the lock is dropped. Cc: Rob Herring <robh+dt@kernel.org> Cc: Frank Rowand <frowand.list@gmail.com> Cc: devicetree@vger.kernel.org Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-rw-r--r--drivers/of/base.c19
1 files changed, 13 insertions, 6 deletions
diff --git a/drivers/of/base.c b/drivers/of/base.c
index 20e0e7ee4edf95..a44c889a6e6ccf 100644
--- a/drivers/of/base.c
+++ b/drivers/of/base.c
@@ -138,31 +138,34 @@ static u32 phandle_cache_mask;
/*
* Caller must hold devtree_lock.
*/
-static void __of_free_phandle_cache(void)
+static struct device_node** __of_free_phandle_cache(void)
{
u32 cache_entries = phandle_cache_mask + 1;
u32 k;
+ struct device_node **shadow;
if (!phandle_cache)
- return;
+ return NULL;
for (k = 0; k < cache_entries; k++)
of_node_put(phandle_cache[k]);
- kfree(phandle_cache);
+ shadow = phandle_cache;
phandle_cache = NULL;
+ return shadow;
}
int of_free_phandle_cache(void)
{
unsigned long flags;
+ struct device_node **shadow;
raw_spin_lock_irqsave(&devtree_lock, flags);
- __of_free_phandle_cache();
+ shadow = __of_free_phandle_cache();
raw_spin_unlock_irqrestore(&devtree_lock, flags);
-
+ kfree(shadow);
return 0;
}
#if !defined(CONFIG_MODULES)
@@ -197,10 +200,11 @@ void of_populate_phandle_cache(void)
u32 cache_entries;
struct device_node *np;
u32 phandles = 0;
+ struct device_node **shadow;
raw_spin_lock_irqsave(&devtree_lock, flags);
- __of_free_phandle_cache();
+ shadow = __of_free_phandle_cache();
for_each_of_allnodes(np)
if (np->phandle && np->phandle != OF_PHANDLE_ILLEGAL)
@@ -208,12 +212,14 @@ void of_populate_phandle_cache(void)
if (!phandles)
goto out;
+ raw_spin_unlock_irqrestore(&devtree_lock, flags);
cache_entries = roundup_pow_of_two(phandles);
phandle_cache_mask = cache_entries - 1;
phandle_cache = kcalloc(cache_entries, sizeof(*phandle_cache),
GFP_ATOMIC);
+ raw_spin_lock_irqsave(&devtree_lock, flags);
if (!phandle_cache)
goto out;
@@ -225,6 +231,7 @@ void of_populate_phandle_cache(void)
out:
raw_spin_unlock_irqrestore(&devtree_lock, flags);
+ kfree(shadow);
}
void __init of_core_init(void)