2018-09-13 17:28:08 +00:00
|
|
|
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
|
|
|
Date: Fri, 31 Aug 2018 14:16:30 +0200
|
|
|
|
Subject: [PATCH] of: allocate / free phandle cache outside of the devtree_lock
|
2018-11-15 07:47:09 +00:00
|
|
|
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.1-rt3.tar.xz
|
2018-09-13 17:28:08 +00:00
|
|
|
|
|
|
|
The phandle cache code allocates memory while holding devtree_lock which
|
|
|
|
is a raw_spinlock_t. Memory allocation (and free()) is not possible on
|
|
|
|
RT while a raw_spinlock_t is held.
|
|
|
|
Invoke the kfree() and kcalloc() while the lock is dropped.
|
|
|
|
|
|
|
|
Cc: Rob Herring <robh+dt@kernel.org>
|
|
|
|
Cc: Frank Rowand <frowand.list@gmail.com>
|
|
|
|
Cc: devicetree@vger.kernel.org
|
|
|
|
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
|
|
|
---
|
2018-10-30 12:40:05 +00:00
|
|
|
drivers/of/base.c | 22 ++++++++++++++--------
|
2018-09-13 17:28:08 +00:00
|
|
|
1 file changed, 14 insertions(+), 8 deletions(-)
|
|
|
|
|
|
|
|
--- a/drivers/of/base.c
|
|
|
|
+++ b/drivers/of/base.c
|
2018-10-30 12:40:05 +00:00
|
|
|
@@ -130,46 +130,52 @@ void of_populate_phandle_cache(void)
|
2018-09-13 17:28:08 +00:00
|
|
|
u32 cache_entries;
|
|
|
|
struct device_node *np;
|
|
|
|
u32 phandles = 0;
|
|
|
|
+ struct device_node **shadow;
|
|
|
|
|
|
|
|
raw_spin_lock_irqsave(&devtree_lock, flags);
|
2018-10-30 12:40:05 +00:00
|
|
|
|
2018-09-13 17:28:08 +00:00
|
|
|
- kfree(phandle_cache);
|
|
|
|
+ shadow = phandle_cache;
|
|
|
|
phandle_cache = NULL;
|
|
|
|
|
|
|
|
for_each_of_allnodes(np)
|
|
|
|
if (np->phandle && np->phandle != OF_PHANDLE_ILLEGAL)
|
|
|
|
phandles++;
|
|
|
|
+ raw_spin_unlock_irqrestore(&devtree_lock, flags);
|
2018-10-30 12:40:05 +00:00
|
|
|
+ kfree(shadow);
|
|
|
|
|
2018-09-30 14:00:39 +00:00
|
|
|
if (!phandles)
|
2018-10-30 12:40:05 +00:00
|
|
|
- goto out;
|
|
|
|
+ return;
|
2018-09-30 14:00:39 +00:00
|
|
|
|
2018-09-13 17:28:08 +00:00
|
|
|
cache_entries = roundup_pow_of_two(phandles);
|
|
|
|
phandle_cache_mask = cache_entries - 1;
|
|
|
|
|
|
|
|
- phandle_cache = kcalloc(cache_entries, sizeof(*phandle_cache),
|
|
|
|
- GFP_ATOMIC);
|
|
|
|
- if (!phandle_cache)
|
|
|
|
- goto out;
|
|
|
|
+ shadow = kcalloc(cache_entries, sizeof(*phandle_cache), GFP_KERNEL);
|
|
|
|
+ if (!shadow)
|
|
|
|
+ return;
|
2018-10-30 12:40:05 +00:00
|
|
|
+
|
2018-09-13 17:28:08 +00:00
|
|
|
+ raw_spin_lock_irqsave(&devtree_lock, flags);
|
|
|
|
+ phandle_cache = shadow;
|
|
|
|
|
|
|
|
for_each_of_allnodes(np)
|
|
|
|
if (np->phandle && np->phandle != OF_PHANDLE_ILLEGAL)
|
|
|
|
phandle_cache[np->phandle & phandle_cache_mask] = np;
|
|
|
|
|
|
|
|
-out:
|
|
|
|
raw_spin_unlock_irqrestore(&devtree_lock, flags);
|
|
|
|
}
|
|
|
|
|
|
|
|
int of_free_phandle_cache(void)
|
|
|
|
{
|
|
|
|
unsigned long flags;
|
|
|
|
+ struct device_node **shadow;
|
|
|
|
|
|
|
|
raw_spin_lock_irqsave(&devtree_lock, flags);
|
|
|
|
|
|
|
|
- kfree(phandle_cache);
|
|
|
|
+ shadow = phandle_cache;
|
|
|
|
phandle_cache = NULL;
|
|
|
|
|
|
|
|
raw_spin_unlock_irqrestore(&devtree_lock, flags);
|
|
|
|
|
|
|
|
+ kfree(shadow);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
#if !defined(CONFIG_MODULES)
|