395 lines
11 KiB
Diff
395 lines
11 KiB
Diff
Subject: mm: Enable SLUB for RT
|
|
From: Thomas Gleixner <tglx@linutronix.de>
|
|
Date: Thu, 25 Oct 2012 10:32:35 +0100
|
|
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.0/patches-4.0.5-rt3.tar.xz
|
|
|
|
Make SLUB RT aware and remove the restriction in Kconfig.
|
|
|
|
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
|
---
|
|
mm/slab.h | 4 ++
|
|
mm/slub.c | 118 +++++++++++++++++++++++++++++++++++++++++++++++---------------
|
|
2 files changed, 95 insertions(+), 27 deletions(-)
|
|
|
|
--- a/mm/slab.h
|
|
+++ b/mm/slab.h
|
|
@@ -330,7 +330,11 @@ static inline struct kmem_cache *cache_f
|
|
* The slab lists for all objects.
|
|
*/
|
|
struct kmem_cache_node {
|
|
+#ifdef CONFIG_SLUB
|
|
+ raw_spinlock_t list_lock;
|
|
+#else
|
|
spinlock_t list_lock;
|
|
+#endif
|
|
|
|
#ifdef CONFIG_SLAB
|
|
struct list_head slabs_partial; /* partial list first, better asm code */
|
|
--- a/mm/slub.c
|
|
+++ b/mm/slub.c
|
|
@@ -1069,7 +1069,7 @@ static noinline struct kmem_cache_node *
|
|
{
|
|
struct kmem_cache_node *n = get_node(s, page_to_nid(page));
|
|
|
|
- spin_lock_irqsave(&n->list_lock, *flags);
|
|
+ raw_spin_lock_irqsave(&n->list_lock, *flags);
|
|
slab_lock(page);
|
|
|
|
if (!check_slab(s, page))
|
|
@@ -1116,7 +1116,7 @@ static noinline struct kmem_cache_node *
|
|
|
|
fail:
|
|
slab_unlock(page);
|
|
- spin_unlock_irqrestore(&n->list_lock, *flags);
|
|
+ raw_spin_unlock_irqrestore(&n->list_lock, *flags);
|
|
slab_fix(s, "Object at 0x%p not freed", object);
|
|
return NULL;
|
|
}
|
|
@@ -1244,6 +1244,12 @@ static inline void dec_slabs_node(struct
|
|
|
|
#endif /* CONFIG_SLUB_DEBUG */
|
|
|
|
+struct slub_free_list {
|
|
+ raw_spinlock_t lock;
|
|
+ struct list_head list;
|
|
+};
|
|
+static DEFINE_PER_CPU(struct slub_free_list, slub_free_list);
|
|
+
|
|
/*
|
|
* Hooks for other subsystems that check memory allocations. In a typical
|
|
* production configuration these hooks all should produce no code at all.
|
|
@@ -1341,7 +1347,11 @@ static struct page *allocate_slab(struct
|
|
|
|
flags &= gfp_allowed_mask;
|
|
|
|
+#ifdef CONFIG_PREEMPT_RT_FULL
|
|
+ if (system_state == SYSTEM_RUNNING)
|
|
+#else
|
|
if (flags & __GFP_WAIT)
|
|
+#endif
|
|
local_irq_enable();
|
|
|
|
flags |= s->allocflags;
|
|
@@ -1382,7 +1392,11 @@ static struct page *allocate_slab(struct
|
|
kmemcheck_mark_unallocated_pages(page, pages);
|
|
}
|
|
|
|
+#ifdef CONFIG_PREEMPT_RT_FULL
|
|
+ if (system_state == SYSTEM_RUNNING)
|
|
+#else
|
|
if (flags & __GFP_WAIT)
|
|
+#endif
|
|
local_irq_disable();
|
|
if (!page)
|
|
return NULL;
|
|
@@ -1485,6 +1499,16 @@ static void __free_slab(struct kmem_cach
|
|
memcg_uncharge_slab(s, order);
|
|
}
|
|
|
|
+static void free_delayed(struct list_head *h)
|
|
+{
|
|
+ while(!list_empty(h)) {
|
|
+ struct page *page = list_first_entry(h, struct page, lru);
|
|
+
|
|
+ list_del(&page->lru);
|
|
+ __free_slab(page->slab_cache, page);
|
|
+ }
|
|
+}
|
|
+
|
|
#define need_reserve_slab_rcu \
|
|
(sizeof(((struct page *)NULL)->lru) < sizeof(struct rcu_head))
|
|
|
|
@@ -1519,6 +1543,12 @@ static void free_slab(struct kmem_cache
|
|
}
|
|
|
|
call_rcu(head, rcu_free_slab);
|
|
+ } else if (irqs_disabled()) {
|
|
+ struct slub_free_list *f = this_cpu_ptr(&slub_free_list);
|
|
+
|
|
+ raw_spin_lock(&f->lock);
|
|
+ list_add(&page->lru, &f->list);
|
|
+ raw_spin_unlock(&f->lock);
|
|
} else
|
|
__free_slab(s, page);
|
|
}
|
|
@@ -1632,7 +1662,7 @@ static void *get_partial_node(struct kme
|
|
if (!n || !n->nr_partial)
|
|
return NULL;
|
|
|
|
- spin_lock(&n->list_lock);
|
|
+ raw_spin_lock(&n->list_lock);
|
|
list_for_each_entry_safe(page, page2, &n->partial, lru) {
|
|
void *t;
|
|
|
|
@@ -1657,7 +1687,7 @@ static void *get_partial_node(struct kme
|
|
break;
|
|
|
|
}
|
|
- spin_unlock(&n->list_lock);
|
|
+ raw_spin_unlock(&n->list_lock);
|
|
return object;
|
|
}
|
|
|
|
@@ -1903,7 +1933,7 @@ static void deactivate_slab(struct kmem_
|
|
* that acquire_slab() will see a slab page that
|
|
* is frozen
|
|
*/
|
|
- spin_lock(&n->list_lock);
|
|
+ raw_spin_lock(&n->list_lock);
|
|
}
|
|
} else {
|
|
m = M_FULL;
|
|
@@ -1914,7 +1944,7 @@ static void deactivate_slab(struct kmem_
|
|
* slabs from diagnostic functions will not see
|
|
* any frozen slabs.
|
|
*/
|
|
- spin_lock(&n->list_lock);
|
|
+ raw_spin_lock(&n->list_lock);
|
|
}
|
|
}
|
|
|
|
@@ -1949,7 +1979,7 @@ static void deactivate_slab(struct kmem_
|
|
goto redo;
|
|
|
|
if (lock)
|
|
- spin_unlock(&n->list_lock);
|
|
+ raw_spin_unlock(&n->list_lock);
|
|
|
|
if (m == M_FREE) {
|
|
stat(s, DEACTIVATE_EMPTY);
|
|
@@ -1981,10 +2011,10 @@ static void unfreeze_partials(struct kme
|
|
n2 = get_node(s, page_to_nid(page));
|
|
if (n != n2) {
|
|
if (n)
|
|
- spin_unlock(&n->list_lock);
|
|
+ raw_spin_unlock(&n->list_lock);
|
|
|
|
n = n2;
|
|
- spin_lock(&n->list_lock);
|
|
+ raw_spin_lock(&n->list_lock);
|
|
}
|
|
|
|
do {
|
|
@@ -2013,7 +2043,7 @@ static void unfreeze_partials(struct kme
|
|
}
|
|
|
|
if (n)
|
|
- spin_unlock(&n->list_lock);
|
|
+ raw_spin_unlock(&n->list_lock);
|
|
|
|
while (discard_page) {
|
|
page = discard_page;
|
|
@@ -2052,14 +2082,21 @@ static void put_cpu_partial(struct kmem_
|
|
pobjects = oldpage->pobjects;
|
|
pages = oldpage->pages;
|
|
if (drain && pobjects > s->cpu_partial) {
|
|
+ struct slub_free_list *f;
|
|
unsigned long flags;
|
|
+ LIST_HEAD(tofree);
|
|
/*
|
|
* partial array is full. Move the existing
|
|
* set to the per node partial list.
|
|
*/
|
|
local_irq_save(flags);
|
|
unfreeze_partials(s, this_cpu_ptr(s->cpu_slab));
|
|
+ f = this_cpu_ptr(&slub_free_list);
|
|
+ raw_spin_lock(&f->lock);
|
|
+ list_splice_init(&f->list, &tofree);
|
|
+ raw_spin_unlock(&f->lock);
|
|
local_irq_restore(flags);
|
|
+ free_delayed(&tofree);
|
|
oldpage = NULL;
|
|
pobjects = 0;
|
|
pages = 0;
|
|
@@ -2131,7 +2168,22 @@ static bool has_cpu_slab(int cpu, void *
|
|
|
|
static void flush_all(struct kmem_cache *s)
|
|
{
|
|
+ LIST_HEAD(tofree);
|
|
+ int cpu;
|
|
+
|
|
on_each_cpu_cond(has_cpu_slab, flush_cpu_slab, s, 1, GFP_ATOMIC);
|
|
+ for_each_online_cpu(cpu) {
|
|
+ struct slub_free_list *f;
|
|
+
|
|
+ if (!has_cpu_slab(cpu, s))
|
|
+ continue;
|
|
+
|
|
+ f = &per_cpu(slub_free_list, cpu);
|
|
+ raw_spin_lock_irq(&f->lock);
|
|
+ list_splice_init(&f->list, &tofree);
|
|
+ raw_spin_unlock_irq(&f->lock);
|
|
+ free_delayed(&tofree);
|
|
+ }
|
|
}
|
|
|
|
/*
|
|
@@ -2167,10 +2219,10 @@ static unsigned long count_partial(struc
|
|
unsigned long x = 0;
|
|
struct page *page;
|
|
|
|
- spin_lock_irqsave(&n->list_lock, flags);
|
|
+ raw_spin_lock_irqsave(&n->list_lock, flags);
|
|
list_for_each_entry(page, &n->partial, lru)
|
|
x += get_count(page);
|
|
- spin_unlock_irqrestore(&n->list_lock, flags);
|
|
+ raw_spin_unlock_irqrestore(&n->list_lock, flags);
|
|
return x;
|
|
}
|
|
#endif /* CONFIG_SLUB_DEBUG || CONFIG_SYSFS */
|
|
@@ -2307,9 +2359,11 @@ static inline void *get_freelist(struct
|
|
static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
|
|
unsigned long addr, struct kmem_cache_cpu *c)
|
|
{
|
|
+ struct slub_free_list *f;
|
|
void *freelist;
|
|
struct page *page;
|
|
unsigned long flags;
|
|
+ LIST_HEAD(tofree);
|
|
|
|
local_irq_save(flags);
|
|
#ifdef CONFIG_PREEMPT
|
|
@@ -2377,7 +2431,13 @@ static void *__slab_alloc(struct kmem_ca
|
|
VM_BUG_ON(!c->page->frozen);
|
|
c->freelist = get_freepointer(s, freelist);
|
|
c->tid = next_tid(c->tid);
|
|
+out:
|
|
+ f = this_cpu_ptr(&slub_free_list);
|
|
+ raw_spin_lock(&f->lock);
|
|
+ list_splice_init(&f->list, &tofree);
|
|
+ raw_spin_unlock(&f->lock);
|
|
local_irq_restore(flags);
|
|
+ free_delayed(&tofree);
|
|
return freelist;
|
|
|
|
new_slab:
|
|
@@ -2394,8 +2454,7 @@ static void *__slab_alloc(struct kmem_ca
|
|
|
|
if (unlikely(!freelist)) {
|
|
slab_out_of_memory(s, gfpflags, node);
|
|
- local_irq_restore(flags);
|
|
- return NULL;
|
|
+ goto out;
|
|
}
|
|
|
|
page = c->page;
|
|
@@ -2410,8 +2469,7 @@ static void *__slab_alloc(struct kmem_ca
|
|
deactivate_slab(s, page, get_freepointer(s, freelist));
|
|
c->page = NULL;
|
|
c->freelist = NULL;
|
|
- local_irq_restore(flags);
|
|
- return freelist;
|
|
+ goto out;
|
|
}
|
|
|
|
/*
|
|
@@ -2595,7 +2653,7 @@ static void __slab_free(struct kmem_cach
|
|
|
|
do {
|
|
if (unlikely(n)) {
|
|
- spin_unlock_irqrestore(&n->list_lock, flags);
|
|
+ raw_spin_unlock_irqrestore(&n->list_lock, flags);
|
|
n = NULL;
|
|
}
|
|
prior = page->freelist;
|
|
@@ -2627,7 +2685,7 @@ static void __slab_free(struct kmem_cach
|
|
* Otherwise the list_lock will synchronize with
|
|
* other processors updating the list of slabs.
|
|
*/
|
|
- spin_lock_irqsave(&n->list_lock, flags);
|
|
+ raw_spin_lock_irqsave(&n->list_lock, flags);
|
|
|
|
}
|
|
}
|
|
@@ -2669,7 +2727,7 @@ static void __slab_free(struct kmem_cach
|
|
add_partial(n, page, DEACTIVATE_TO_TAIL);
|
|
stat(s, FREE_ADD_PARTIAL);
|
|
}
|
|
- spin_unlock_irqrestore(&n->list_lock, flags);
|
|
+ raw_spin_unlock_irqrestore(&n->list_lock, flags);
|
|
return;
|
|
|
|
slab_empty:
|
|
@@ -2684,7 +2742,7 @@ static void __slab_free(struct kmem_cach
|
|
remove_full(s, n, page);
|
|
}
|
|
|
|
- spin_unlock_irqrestore(&n->list_lock, flags);
|
|
+ raw_spin_unlock_irqrestore(&n->list_lock, flags);
|
|
stat(s, FREE_SLAB);
|
|
discard_slab(s, page);
|
|
}
|
|
@@ -2883,7 +2941,7 @@ static void
|
|
init_kmem_cache_node(struct kmem_cache_node *n)
|
|
{
|
|
n->nr_partial = 0;
|
|
- spin_lock_init(&n->list_lock);
|
|
+ raw_spin_lock_init(&n->list_lock);
|
|
INIT_LIST_HEAD(&n->partial);
|
|
#ifdef CONFIG_SLUB_DEBUG
|
|
atomic_long_set(&n->nr_slabs, 0);
|
|
@@ -3465,7 +3523,7 @@ int __kmem_cache_shrink(struct kmem_cach
|
|
for (i = 0; i < SHRINK_PROMOTE_MAX; i++)
|
|
INIT_LIST_HEAD(promote + i);
|
|
|
|
- spin_lock_irqsave(&n->list_lock, flags);
|
|
+ raw_spin_lock_irqsave(&n->list_lock, flags);
|
|
|
|
/*
|
|
* Build lists of slabs to discard or promote.
|
|
@@ -3496,7 +3554,7 @@ int __kmem_cache_shrink(struct kmem_cach
|
|
for (i = SHRINK_PROMOTE_MAX - 1; i >= 0; i--)
|
|
list_splice(promote + i, &n->partial);
|
|
|
|
- spin_unlock_irqrestore(&n->list_lock, flags);
|
|
+ raw_spin_unlock_irqrestore(&n->list_lock, flags);
|
|
|
|
/* Release empty slabs */
|
|
list_for_each_entry_safe(page, t, &discard, lru)
|
|
@@ -3672,6 +3730,12 @@ void __init kmem_cache_init(void)
|
|
{
|
|
static __initdata struct kmem_cache boot_kmem_cache,
|
|
boot_kmem_cache_node;
|
|
+ int cpu;
|
|
+
|
|
+ for_each_possible_cpu(cpu) {
|
|
+ raw_spin_lock_init(&per_cpu(slub_free_list, cpu).lock);
|
|
+ INIT_LIST_HEAD(&per_cpu(slub_free_list, cpu).list);
|
|
+ }
|
|
|
|
if (debug_guardpage_minorder())
|
|
slub_max_order = 0;
|
|
@@ -3914,7 +3978,7 @@ static int validate_slab_node(struct kme
|
|
struct page *page;
|
|
unsigned long flags;
|
|
|
|
- spin_lock_irqsave(&n->list_lock, flags);
|
|
+ raw_spin_lock_irqsave(&n->list_lock, flags);
|
|
|
|
list_for_each_entry(page, &n->partial, lru) {
|
|
validate_slab_slab(s, page, map);
|
|
@@ -3936,7 +4000,7 @@ static int validate_slab_node(struct kme
|
|
s->name, count, atomic_long_read(&n->nr_slabs));
|
|
|
|
out:
|
|
- spin_unlock_irqrestore(&n->list_lock, flags);
|
|
+ raw_spin_unlock_irqrestore(&n->list_lock, flags);
|
|
return count;
|
|
}
|
|
|
|
@@ -4124,12 +4188,12 @@ static int list_locations(struct kmem_ca
|
|
if (!atomic_long_read(&n->nr_slabs))
|
|
continue;
|
|
|
|
- spin_lock_irqsave(&n->list_lock, flags);
|
|
+ raw_spin_lock_irqsave(&n->list_lock, flags);
|
|
list_for_each_entry(page, &n->partial, lru)
|
|
process_slab(&t, s, page, alloc, map);
|
|
list_for_each_entry(page, &n->full, lru)
|
|
process_slab(&t, s, page, alloc, map);
|
|
- spin_unlock_irqrestore(&n->list_lock, flags);
|
|
+ raw_spin_unlock_irqrestore(&n->list_lock, flags);
|
|
}
|
|
|
|
for (i = 0; i < t.count; i++) {
|