395 lines
11 KiB
Diff
395 lines
11 KiB
Diff
Subject: mm: Enable SLUB for RT
|
|
From: Thomas Gleixner <tglx@linutronix.de>
|
|
Date: Thu, 25 Oct 2012 10:32:35 +0100
|
|
|
|
Make SLUB RT aware and remove the restriction in Kconfig.
|
|
|
|
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
|
---
|
|
include/linux/slub_def.h | 2
|
|
init/Kconfig | 1
|
|
mm/slub.c | 116 ++++++++++++++++++++++++++++++++++++-----------
|
|
3 files changed, 90 insertions(+), 29 deletions(-)
|
|
|
|
Index: linux-stable/include/linux/slub_def.h
|
|
===================================================================
|
|
--- linux-stable.orig/include/linux/slub_def.h
|
|
+++ linux-stable/include/linux/slub_def.h
|
|
@@ -54,7 +54,7 @@ struct kmem_cache_cpu {
|
|
};
|
|
|
|
struct kmem_cache_node {
|
|
- spinlock_t list_lock; /* Protect partial list and nr_partial */
|
|
+ raw_spinlock_t list_lock; /* Protect partial list and nr_partial */
|
|
unsigned long nr_partial;
|
|
struct list_head partial;
|
|
#ifdef CONFIG_SLUB_DEBUG
|
|
Index: linux-stable/init/Kconfig
|
|
===================================================================
|
|
--- linux-stable.orig/init/Kconfig
|
|
+++ linux-stable/init/Kconfig
|
|
@@ -1442,7 +1442,6 @@ config SLAB
|
|
|
|
config SLUB
|
|
bool "SLUB (Unqueued Allocator)"
|
|
- depends on !PREEMPT_RT_FULL
|
|
help
|
|
SLUB is a slab allocator that minimizes cache line usage
|
|
instead of managing queues of cached objects (SLAB approach).
|
|
Index: linux-stable/mm/slub.c
|
|
===================================================================
|
|
--- linux-stable.orig/mm/slub.c
|
|
+++ linux-stable/mm/slub.c
|
|
@@ -1253,6 +1253,12 @@ static inline void slab_free_hook(struct
|
|
|
|
#endif /* CONFIG_SLUB_DEBUG */
|
|
|
|
+struct slub_free_list {
|
|
+ raw_spinlock_t lock;
|
|
+ struct list_head list;
|
|
+};
|
|
+static DEFINE_PER_CPU(struct slub_free_list, slub_free_list);
|
|
+
|
|
/*
|
|
* Slab allocation and freeing
|
|
*/
|
|
@@ -1277,7 +1283,11 @@ static struct page *allocate_slab(struct
|
|
|
|
flags &= gfp_allowed_mask;
|
|
|
|
+#ifdef CONFIG_PREEMPT_RT_FULL
|
|
+ if (system_state == SYSTEM_RUNNING)
|
|
+#else
|
|
if (flags & __GFP_WAIT)
|
|
+#endif
|
|
local_irq_enable();
|
|
|
|
flags |= s->allocflags;
|
|
@@ -1317,7 +1327,11 @@ static struct page *allocate_slab(struct
|
|
kmemcheck_mark_unallocated_pages(page, pages);
|
|
}
|
|
|
|
+#ifdef CONFIG_PREEMPT_RT_FULL
|
|
+ if (system_state == SYSTEM_RUNNING)
|
|
+#else
|
|
if (flags & __GFP_WAIT)
|
|
+#endif
|
|
local_irq_disable();
|
|
if (!page)
|
|
return NULL;
|
|
@@ -1409,6 +1423,16 @@ static void __free_slab(struct kmem_cach
|
|
__free_pages(page, order);
|
|
}
|
|
|
|
+static void free_delayed(struct kmem_cache *s, struct list_head *h)
|
|
+{
|
|
+ while(!list_empty(h)) {
|
|
+ struct page *page = list_first_entry(h, struct page, lru);
|
|
+
|
|
+ list_del(&page->lru);
|
|
+ __free_slab(s, page);
|
|
+ }
|
|
+}
|
|
+
|
|
#define need_reserve_slab_rcu \
|
|
(sizeof(((struct page *)NULL)->lru) < sizeof(struct rcu_head))
|
|
|
|
@@ -1443,6 +1467,12 @@ static void free_slab(struct kmem_cache
|
|
}
|
|
|
|
call_rcu(head, rcu_free_slab);
|
|
+ } else if (irqs_disabled()) {
|
|
+ struct slub_free_list *f = &__get_cpu_var(slub_free_list);
|
|
+
|
|
+ raw_spin_lock(&f->lock);
|
|
+ list_add(&page->lru, &f->list);
|
|
+ raw_spin_unlock(&f->lock);
|
|
} else
|
|
__free_slab(s, page);
|
|
}
|
|
@@ -1544,7 +1574,7 @@ static void *get_partial_node(struct kme
|
|
if (!n || !n->nr_partial)
|
|
return NULL;
|
|
|
|
- spin_lock(&n->list_lock);
|
|
+ raw_spin_lock(&n->list_lock);
|
|
list_for_each_entry_safe(page, page2, &n->partial, lru) {
|
|
void *t;
|
|
int available;
|
|
@@ -1569,7 +1599,7 @@ static void *get_partial_node(struct kme
|
|
break;
|
|
|
|
}
|
|
- spin_unlock(&n->list_lock);
|
|
+ raw_spin_unlock(&n->list_lock);
|
|
return object;
|
|
}
|
|
|
|
@@ -1811,7 +1841,7 @@ redo:
|
|
* that acquire_slab() will see a slab page that
|
|
* is frozen
|
|
*/
|
|
- spin_lock(&n->list_lock);
|
|
+ raw_spin_lock(&n->list_lock);
|
|
}
|
|
} else {
|
|
m = M_FULL;
|
|
@@ -1822,7 +1852,7 @@ redo:
|
|
* slabs from diagnostic functions will not see
|
|
* any frozen slabs.
|
|
*/
|
|
- spin_lock(&n->list_lock);
|
|
+ raw_spin_lock(&n->list_lock);
|
|
}
|
|
}
|
|
|
|
@@ -1857,7 +1887,7 @@ redo:
|
|
goto redo;
|
|
|
|
if (lock)
|
|
- spin_unlock(&n->list_lock);
|
|
+ raw_spin_unlock(&n->list_lock);
|
|
|
|
if (m == M_FREE) {
|
|
stat(s, DEACTIVATE_EMPTY);
|
|
@@ -1886,10 +1916,10 @@ static void unfreeze_partials(struct kme
|
|
n2 = get_node(s, page_to_nid(page));
|
|
if (n != n2) {
|
|
if (n)
|
|
- spin_unlock(&n->list_lock);
|
|
+ raw_spin_unlock(&n->list_lock);
|
|
|
|
n = n2;
|
|
- spin_lock(&n->list_lock);
|
|
+ raw_spin_lock(&n->list_lock);
|
|
}
|
|
|
|
do {
|
|
@@ -1918,7 +1948,7 @@ static void unfreeze_partials(struct kme
|
|
}
|
|
|
|
if (n)
|
|
- spin_unlock(&n->list_lock);
|
|
+ raw_spin_unlock(&n->list_lock);
|
|
|
|
while (discard_page) {
|
|
page = discard_page;
|
|
@@ -1939,7 +1969,7 @@ static void unfreeze_partials(struct kme
|
|
* If we did not find a slot then simply move all the partials to the
|
|
* per node partial list.
|
|
*/
|
|
-int put_cpu_partial(struct kmem_cache *s, struct page *page, int drain)
|
|
+static int put_cpu_partial(struct kmem_cache *s, struct page *page, int drain)
|
|
{
|
|
struct page *oldpage;
|
|
int pages;
|
|
@@ -1954,14 +1984,21 @@ int put_cpu_partial(struct kmem_cache *s
|
|
pobjects = oldpage->pobjects;
|
|
pages = oldpage->pages;
|
|
if (drain && pobjects > s->cpu_partial) {
|
|
+ struct slub_free_list *f;
|
|
unsigned long flags;
|
|
+ LIST_HEAD(tofree);
|
|
/*
|
|
* partial array is full. Move the existing
|
|
* set to the per node partial list.
|
|
*/
|
|
local_irq_save(flags);
|
|
unfreeze_partials(s, this_cpu_ptr(s->cpu_slab));
|
|
+ f = &__get_cpu_var(slub_free_list);
|
|
+ raw_spin_lock(&f->lock);
|
|
+ list_splice_init(&f->list, &tofree);
|
|
+ raw_spin_unlock(&f->lock);
|
|
local_irq_restore(flags);
|
|
+ free_delayed(s, &tofree);
|
|
pobjects = 0;
|
|
pages = 0;
|
|
stat(s, CPU_PARTIAL_DRAIN);
|
|
@@ -2023,7 +2060,22 @@ static bool has_cpu_slab(int cpu, void *
|
|
|
|
static void flush_all(struct kmem_cache *s)
|
|
{
|
|
+ LIST_HEAD(tofree);
|
|
+ int cpu;
|
|
+
|
|
on_each_cpu_cond(has_cpu_slab, flush_cpu_slab, s, 1, GFP_ATOMIC);
|
|
+ for_each_online_cpu(cpu) {
|
|
+ struct slub_free_list *f;
|
|
+
|
|
+ if (!has_cpu_slab(cpu, s))
|
|
+ continue;
|
|
+
|
|
+ f = &per_cpu(slub_free_list, cpu);
|
|
+ raw_spin_lock_irq(&f->lock);
|
|
+ list_splice_init(&f->list, &tofree);
|
|
+ raw_spin_unlock_irq(&f->lock);
|
|
+ free_delayed(s, &tofree);
|
|
+ }
|
|
}
|
|
|
|
/*
|
|
@@ -2051,10 +2103,10 @@ static unsigned long count_partial(struc
|
|
unsigned long x = 0;
|
|
struct page *page;
|
|
|
|
- spin_lock_irqsave(&n->list_lock, flags);
|
|
+ raw_spin_lock_irqsave(&n->list_lock, flags);
|
|
list_for_each_entry(page, &n->partial, lru)
|
|
x += get_count(page);
|
|
- spin_unlock_irqrestore(&n->list_lock, flags);
|
|
+ raw_spin_unlock_irqrestore(&n->list_lock, flags);
|
|
return x;
|
|
}
|
|
|
|
@@ -2197,9 +2249,11 @@ static inline void *get_freelist(struct
|
|
static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
|
|
unsigned long addr, struct kmem_cache_cpu *c)
|
|
{
|
|
+ struct slub_free_list *f;
|
|
void *freelist;
|
|
struct page *page;
|
|
unsigned long flags;
|
|
+ LIST_HEAD(tofree);
|
|
|
|
local_irq_save(flags);
|
|
#ifdef CONFIG_PREEMPT
|
|
@@ -2262,7 +2316,13 @@ load_freelist:
|
|
VM_BUG_ON(!c->page->frozen);
|
|
c->freelist = get_freepointer(s, freelist);
|
|
c->tid = next_tid(c->tid);
|
|
+out:
|
|
+ f = &__get_cpu_var(slub_free_list);
|
|
+ raw_spin_lock(&f->lock);
|
|
+ list_splice_init(&f->list, &tofree);
|
|
+ raw_spin_unlock(&f->lock);
|
|
local_irq_restore(flags);
|
|
+ free_delayed(s, &tofree);
|
|
return freelist;
|
|
|
|
new_slab:
|
|
@@ -2280,9 +2340,7 @@ new_slab:
|
|
if (unlikely(!freelist)) {
|
|
if (!(gfpflags & __GFP_NOWARN) && printk_ratelimit())
|
|
slab_out_of_memory(s, gfpflags, node);
|
|
-
|
|
- local_irq_restore(flags);
|
|
- return NULL;
|
|
+ goto out;
|
|
}
|
|
|
|
page = c->page;
|
|
@@ -2296,8 +2354,7 @@ new_slab:
|
|
deactivate_slab(s, page, get_freepointer(s, freelist));
|
|
c->page = NULL;
|
|
c->freelist = NULL;
|
|
- local_irq_restore(flags);
|
|
- return freelist;
|
|
+ goto out;
|
|
}
|
|
|
|
/*
|
|
@@ -2488,7 +2545,7 @@ static void __slab_free(struct kmem_cach
|
|
* Otherwise the list_lock will synchronize with
|
|
* other processors updating the list of slabs.
|
|
*/
|
|
- spin_lock_irqsave(&n->list_lock, flags);
|
|
+ raw_spin_lock_irqsave(&n->list_lock, flags);
|
|
|
|
}
|
|
}
|
|
@@ -2538,7 +2595,7 @@ static void __slab_free(struct kmem_cach
|
|
stat(s, FREE_ADD_PARTIAL);
|
|
}
|
|
}
|
|
- spin_unlock_irqrestore(&n->list_lock, flags);
|
|
+ raw_spin_unlock_irqrestore(&n->list_lock, flags);
|
|
return;
|
|
|
|
slab_empty:
|
|
@@ -2552,7 +2609,7 @@ slab_empty:
|
|
/* Slab must be on the full list */
|
|
remove_full(s, page);
|
|
|
|
- spin_unlock_irqrestore(&n->list_lock, flags);
|
|
+ raw_spin_unlock_irqrestore(&n->list_lock, flags);
|
|
stat(s, FREE_SLAB);
|
|
discard_slab(s, page);
|
|
}
|
|
@@ -2781,7 +2838,7 @@ static void
|
|
init_kmem_cache_node(struct kmem_cache_node *n)
|
|
{
|
|
n->nr_partial = 0;
|
|
- spin_lock_init(&n->list_lock);
|
|
+ raw_spin_lock_init(&n->list_lock);
|
|
INIT_LIST_HEAD(&n->partial);
|
|
#ifdef CONFIG_SLUB_DEBUG
|
|
atomic_long_set(&n->nr_slabs, 0);
|
|
@@ -3524,7 +3581,7 @@ int kmem_cache_shrink(struct kmem_cache
|
|
for (i = 0; i < objects; i++)
|
|
INIT_LIST_HEAD(slabs_by_inuse + i);
|
|
|
|
- spin_lock_irqsave(&n->list_lock, flags);
|
|
+ raw_spin_lock_irqsave(&n->list_lock, flags);
|
|
|
|
/*
|
|
* Build lists indexed by the items in use in each slab.
|
|
@@ -3545,7 +3602,7 @@ int kmem_cache_shrink(struct kmem_cache
|
|
for (i = objects - 1; i > 0; i--)
|
|
list_splice(slabs_by_inuse + i, n->partial.prev);
|
|
|
|
- spin_unlock_irqrestore(&n->list_lock, flags);
|
|
+ raw_spin_unlock_irqrestore(&n->list_lock, flags);
|
|
|
|
/* Release empty slabs */
|
|
list_for_each_entry_safe(page, t, slabs_by_inuse, lru)
|
|
@@ -3711,10 +3768,15 @@ void __init kmem_cache_init(void)
|
|
int i;
|
|
int caches = 0;
|
|
struct kmem_cache *temp_kmem_cache;
|
|
- int order;
|
|
+ int order, cpu;
|
|
struct kmem_cache *temp_kmem_cache_node;
|
|
unsigned long kmalloc_size;
|
|
|
|
+ for_each_possible_cpu(cpu) {
|
|
+ raw_spin_lock_init(&per_cpu(slub_free_list, cpu).lock);
|
|
+ INIT_LIST_HEAD(&per_cpu(slub_free_list, cpu).list);
|
|
+ }
|
|
+
|
|
if (debug_guardpage_minorder())
|
|
slub_max_order = 0;
|
|
|
|
@@ -4127,7 +4189,7 @@ static int validate_slab_node(struct kme
|
|
struct page *page;
|
|
unsigned long flags;
|
|
|
|
- spin_lock_irqsave(&n->list_lock, flags);
|
|
+ raw_spin_lock_irqsave(&n->list_lock, flags);
|
|
|
|
list_for_each_entry(page, &n->partial, lru) {
|
|
validate_slab_slab(s, page, map);
|
|
@@ -4150,7 +4212,7 @@ static int validate_slab_node(struct kme
|
|
atomic_long_read(&n->nr_slabs));
|
|
|
|
out:
|
|
- spin_unlock_irqrestore(&n->list_lock, flags);
|
|
+ raw_spin_unlock_irqrestore(&n->list_lock, flags);
|
|
return count;
|
|
}
|
|
|
|
@@ -4340,12 +4402,12 @@ static int list_locations(struct kmem_ca
|
|
if (!atomic_long_read(&n->nr_slabs))
|
|
continue;
|
|
|
|
- spin_lock_irqsave(&n->list_lock, flags);
|
|
+ raw_spin_lock_irqsave(&n->list_lock, flags);
|
|
list_for_each_entry(page, &n->partial, lru)
|
|
process_slab(&t, s, page, alloc, map);
|
|
list_for_each_entry(page, &n->full, lru)
|
|
process_slab(&t, s, page, alloc, map);
|
|
- spin_unlock_irqrestore(&n->list_lock, flags);
|
|
+ raw_spin_unlock_irqrestore(&n->list_lock, flags);
|
|
}
|
|
|
|
for (i = 0; i < t.count; i++) {
|