linux/debian/patches/features/all/rt/0090-mm-slab-wrap-functions...

452 lines
14 KiB
Diff

From 96d14b2f907b78c9e5c6ee1e46c843b09dcf6533 Mon Sep 17 00:00:00 2001
From: Thomas Gleixner <tglx@linutronix.de>
Date: Sat, 18 Jun 2011 19:44:43 +0200
Subject: [090/254] mm-slab-wrap-functions.patch
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
mm/slab.c | 152 ++++++++++++++++++++++++++++++++++++++++++-------------------
1 file changed, 104 insertions(+), 48 deletions(-)
diff --git a/mm/slab.c b/mm/slab.c
index 80c57f9..dc84364 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -116,6 +116,7 @@
#include <linux/kmemcheck.h>
#include <linux/memory.h>
#include <linux/prefetch.h>
+#include <linux/locallock.h>
#include <asm/cacheflush.h>
#include <asm/tlbflush.h>
@@ -726,12 +727,40 @@ static void slab_set_debugobj_lock_classes(struct kmem_cache *cachep)
#endif
static DEFINE_PER_CPU(struct delayed_work, slab_reap_work);
+static DEFINE_LOCAL_IRQ_LOCK(slab_lock);
+
+#ifndef CONFIG_PREEMPT_RT_BASE
+# define slab_on_each_cpu(func, cp) on_each_cpu(func, cp, 1)
+#else
+/*
+ * execute func() for all CPUs. On PREEMPT_RT we dont actually have
+ * to run on the remote CPUs - we only have to take their CPU-locks.
+ * (This is a rare operation, so cacheline bouncing is not an issue.)
+ */
+static void
+slab_on_each_cpu(void (*func)(void *arg, int this_cpu), void *arg)
+{
+ unsigned int i;
+
+ for_each_online_cpu(i) {
+ spin_lock_irq(&per_cpu(slab_lock, i).lock);
+ func(arg, i);
+ spin_unlock_irq(&per_cpu(slab_lock, i).lock);
+ }
+}
+#endif
static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep)
{
return cachep->array[smp_processor_id()];
}
+static inline struct array_cache *cpu_cache_get_on_cpu(struct kmem_cache *cachep,
+ int cpu)
+{
+ return cachep->array[cpu];
+}
+
static inline struct kmem_cache *__find_general_cachep(size_t size,
gfp_t gfpflags)
{
@@ -1080,9 +1109,10 @@ static void reap_alien(struct kmem_cache *cachep, struct kmem_list3 *l3)
if (l3->alien) {
struct array_cache *ac = l3->alien[node];
- if (ac && ac->avail && spin_trylock_irq(&ac->lock)) {
+ if (ac && ac->avail &&
+ local_spin_trylock_irq(slab_lock, &ac->lock)) {
__drain_alien_cache(cachep, ac, node);
- spin_unlock_irq(&ac->lock);
+ local_spin_unlock_irq(slab_lock, &ac->lock);
}
}
}
@@ -1097,9 +1127,9 @@ static void drain_alien_cache(struct kmem_cache *cachep,
for_each_online_node(i) {
ac = alien[i];
if (ac) {
- spin_lock_irqsave(&ac->lock, flags);
+ local_spin_lock_irqsave(slab_lock, &ac->lock, flags);
__drain_alien_cache(cachep, ac, i);
- spin_unlock_irqrestore(&ac->lock, flags);
+ local_spin_unlock_irqrestore(slab_lock, &ac->lock, flags);
}
}
}
@@ -1178,11 +1208,11 @@ static int init_cache_nodelists_node(int node)
cachep->nodelists[node] = l3;
}
- spin_lock_irq(&cachep->nodelists[node]->list_lock);
+ local_spin_lock_irq(slab_lock, &cachep->nodelists[node]->list_lock);
cachep->nodelists[node]->free_limit =
(1 + nr_cpus_node(node)) *
cachep->batchcount + cachep->num;
- spin_unlock_irq(&cachep->nodelists[node]->list_lock);
+ local_spin_unlock_irq(slab_lock, &cachep->nodelists[node]->list_lock);
}
return 0;
}
@@ -1207,7 +1237,7 @@ static void __cpuinit cpuup_canceled(long cpu)
if (!l3)
goto free_array_cache;
- spin_lock_irq(&l3->list_lock);
+ local_spin_lock_irq(slab_lock, &l3->list_lock);
/* Free limit for this kmem_list3 */
l3->free_limit -= cachep->batchcount;
@@ -1215,7 +1245,7 @@ static void __cpuinit cpuup_canceled(long cpu)
free_block(cachep, nc->entry, nc->avail, node);
if (!cpumask_empty(mask)) {
- spin_unlock_irq(&l3->list_lock);
+ local_spin_unlock_irq(slab_lock, &l3->list_lock);
goto free_array_cache;
}
@@ -1229,7 +1259,7 @@ static void __cpuinit cpuup_canceled(long cpu)
alien = l3->alien;
l3->alien = NULL;
- spin_unlock_irq(&l3->list_lock);
+ local_spin_unlock_irq(slab_lock, &l3->list_lock);
kfree(shared);
if (alien) {
@@ -1303,7 +1333,7 @@ static int __cpuinit cpuup_prepare(long cpu)
l3 = cachep->nodelists[node];
BUG_ON(!l3);
- spin_lock_irq(&l3->list_lock);
+ local_spin_lock_irq(slab_lock, &l3->list_lock);
if (!l3->shared) {
/*
* We are serialised from CPU_DEAD or
@@ -1318,7 +1348,7 @@ static int __cpuinit cpuup_prepare(long cpu)
alien = NULL;
}
#endif
- spin_unlock_irq(&l3->list_lock);
+ local_spin_unlock_irq(slab_lock, &l3->list_lock);
kfree(shared);
free_alien_cache(alien);
if (cachep->flags & SLAB_DEBUG_OBJECTS)
@@ -1509,6 +1539,8 @@ void __init kmem_cache_init(void)
if (num_possible_nodes() == 1)
use_alien_caches = 0;
+ local_irq_lock_init(slab_lock);
+
for (i = 0; i < NUM_INIT_LISTS; i++) {
kmem_list3_init(&initkmem_list3[i]);
if (i < MAX_NUMNODES)
@@ -2565,7 +2597,7 @@ EXPORT_SYMBOL(kmem_cache_create);
#if DEBUG
static void check_irq_off(void)
{
- BUG_ON(!irqs_disabled());
+ BUG_ON_NONRT(!irqs_disabled());
}
static void check_irq_on(void)
@@ -2600,13 +2632,12 @@ static void drain_array(struct kmem_cache *cachep, struct kmem_list3 *l3,
struct array_cache *ac,
int force, int node);
-static void do_drain(void *arg)
+static void __do_drain(void *arg, unsigned int cpu)
{
struct kmem_cache *cachep = arg;
struct array_cache *ac;
- int node = numa_mem_id();
+ int node = cpu_to_mem(cpu);
- check_irq_off();
ac = cpu_cache_get(cachep);
spin_lock(&cachep->nodelists[node]->list_lock);
free_block(cachep, ac->entry, ac->avail, node);
@@ -2614,12 +2645,24 @@ static void do_drain(void *arg)
ac->avail = 0;
}
+#ifndef CONFIG_PREEMPT_RT_BASE
+static void do_drain(void *arg)
+{
+ __do_drain(arg, smp_processor_id());
+}
+#else
+static void do_drain(void *arg, int this_cpu)
+{
+ __do_drain(arg, this_cpu);
+}
+#endif
+
static void drain_cpu_caches(struct kmem_cache *cachep)
{
struct kmem_list3 *l3;
int node;
- on_each_cpu(do_drain, cachep, 1);
+ slab_on_each_cpu(do_drain, cachep);
check_irq_on();
for_each_online_node(node) {
l3 = cachep->nodelists[node];
@@ -2650,10 +2693,10 @@ static int drain_freelist(struct kmem_cache *cache,
nr_freed = 0;
while (nr_freed < tofree && !list_empty(&l3->slabs_free)) {
- spin_lock_irq(&l3->list_lock);
+ local_spin_lock_irq(slab_lock, &l3->list_lock);
p = l3->slabs_free.prev;
if (p == &l3->slabs_free) {
- spin_unlock_irq(&l3->list_lock);
+ local_spin_unlock_irq(slab_lock, &l3->list_lock);
goto out;
}
@@ -2667,7 +2710,7 @@ static int drain_freelist(struct kmem_cache *cache,
* to the cache.
*/
l3->free_objects -= cache->num;
- spin_unlock_irq(&l3->list_lock);
+ local_spin_unlock_irq(slab_lock, &l3->list_lock);
slab_destroy(cache, slabp);
nr_freed++;
}
@@ -2962,7 +3005,7 @@ static int cache_grow(struct kmem_cache *cachep,
offset *= cachep->colour_off;
if (local_flags & __GFP_WAIT)
- local_irq_enable();
+ local_unlock_irq(slab_lock);
/*
* The test for missing atomic flag is performed here, rather than
@@ -2992,7 +3035,7 @@ static int cache_grow(struct kmem_cache *cachep,
cache_init_objs(cachep, slabp);
if (local_flags & __GFP_WAIT)
- local_irq_disable();
+ local_lock_irq(slab_lock);
check_irq_off();
spin_lock(&l3->list_lock);
@@ -3006,7 +3049,7 @@ opps1:
kmem_freepages(cachep, objp);
failed:
if (local_flags & __GFP_WAIT)
- local_irq_disable();
+ local_lock_irq(slab_lock);
return 0;
}
@@ -3400,11 +3443,11 @@ retry:
* set and go into memory reserves if necessary.
*/
if (local_flags & __GFP_WAIT)
- local_irq_enable();
+ local_unlock_irq(slab_lock);
kmem_flagcheck(cache, flags);
obj = kmem_getpages(cache, local_flags, numa_mem_id());
if (local_flags & __GFP_WAIT)
- local_irq_disable();
+ local_lock_irq(slab_lock);
if (obj) {
/*
* Insert into the appropriate per node queues
@@ -3522,7 +3565,7 @@ __cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
return NULL;
cache_alloc_debugcheck_before(cachep, flags);
- local_irq_save(save_flags);
+ local_lock_irqsave(slab_lock, save_flags);
if (nodeid == NUMA_NO_NODE)
nodeid = slab_node;
@@ -3547,7 +3590,7 @@ __cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
/* ___cache_alloc_node can fall back to other nodes */
ptr = ____cache_alloc_node(cachep, flags, nodeid);
out:
- local_irq_restore(save_flags);
+ local_unlock_irqrestore(slab_lock, save_flags);
ptr = cache_alloc_debugcheck_after(cachep, flags, ptr, caller);
kmemleak_alloc_recursive(ptr, obj_size(cachep), 1, cachep->flags,
flags);
@@ -3607,9 +3650,9 @@ __cache_alloc(struct kmem_cache *cachep, gfp_t flags, void *caller)
return NULL;
cache_alloc_debugcheck_before(cachep, flags);
- local_irq_save(save_flags);
+ local_lock_irqsave(slab_lock, save_flags);
objp = __do_cache_alloc(cachep, flags);
- local_irq_restore(save_flags);
+ local_unlock_irqrestore(slab_lock, save_flags);
objp = cache_alloc_debugcheck_after(cachep, flags, objp, caller);
kmemleak_alloc_recursive(objp, obj_size(cachep), 1, cachep->flags,
flags);
@@ -3922,9 +3965,9 @@ void kmem_cache_free(struct kmem_cache *cachep, void *objp)
debug_check_no_locks_freed(objp, obj_size(cachep));
if (!(cachep->flags & SLAB_DEBUG_OBJECTS))
debug_check_no_obj_freed(objp, obj_size(cachep));
- local_irq_save(flags);
+ local_lock_irqsave(slab_lock, flags);
__cache_free(cachep, objp, __builtin_return_address(0));
- local_irq_restore(flags);
+ local_unlock_irqrestore(slab_lock, flags);
trace_kmem_cache_free(_RET_IP_, objp);
}
@@ -3952,9 +3995,9 @@ void kfree(const void *objp)
c = virt_to_cache(objp);
debug_check_no_locks_freed(objp, obj_size(c));
debug_check_no_obj_freed(objp, obj_size(c));
- local_irq_save(flags);
+ local_lock_irqsave(slab_lock, flags);
__cache_free(c, (void *)objp, __builtin_return_address(0));
- local_irq_restore(flags);
+ local_unlock_irqrestore(slab_lock, flags);
}
EXPORT_SYMBOL(kfree);
@@ -3997,7 +4040,7 @@ static int alloc_kmemlist(struct kmem_cache *cachep, gfp_t gfp)
if (l3) {
struct array_cache *shared = l3->shared;
- spin_lock_irq(&l3->list_lock);
+ local_spin_lock_irq(slab_lock, &l3->list_lock);
if (shared)
free_block(cachep, shared->entry,
@@ -4010,7 +4053,7 @@ static int alloc_kmemlist(struct kmem_cache *cachep, gfp_t gfp)
}
l3->free_limit = (1 + nr_cpus_node(node)) *
cachep->batchcount + cachep->num;
- spin_unlock_irq(&l3->list_lock);
+ local_spin_unlock_irq(slab_lock, &l3->list_lock);
kfree(shared);
free_alien_cache(new_alien);
continue;
@@ -4057,17 +4100,28 @@ struct ccupdate_struct {
struct array_cache *new[0];
};
-static void do_ccupdate_local(void *info)
+static void __do_ccupdate_local(void *info, int cpu)
{
struct ccupdate_struct *new = info;
struct array_cache *old;
- check_irq_off();
- old = cpu_cache_get(new->cachep);
+ old = cpu_cache_get_on_cpu(new->cachep, cpu);
+
+ new->cachep->array[cpu] = new->new[cpu];
+ new->new[cpu] = old;
+}
- new->cachep->array[smp_processor_id()] = new->new[smp_processor_id()];
- new->new[smp_processor_id()] = old;
+#ifndef CONFIG_PREEMPT_RT_BASE
+static void do_ccupdate_local(void *info)
+{
+ __do_ccupdate_local(info, smp_processor_id());
}
+#else
+static void do_ccupdate_local(void *info, int cpu)
+{
+ __do_ccupdate_local(info, cpu);
+}
+#endif
/* Always called with the cache_chain_mutex held */
static int do_tune_cpucache(struct kmem_cache *cachep, int limit,
@@ -4093,7 +4147,7 @@ static int do_tune_cpucache(struct kmem_cache *cachep, int limit,
}
new->cachep = cachep;
- on_each_cpu(do_ccupdate_local, (void *)new, 1);
+ slab_on_each_cpu(do_ccupdate_local, (void *)new);
check_irq_on();
cachep->batchcount = batchcount;
@@ -4104,9 +4158,11 @@ static int do_tune_cpucache(struct kmem_cache *cachep, int limit,
struct array_cache *ccold = new->new[i];
if (!ccold)
continue;
- spin_lock_irq(&cachep->nodelists[cpu_to_mem(i)]->list_lock);
+ local_spin_lock_irq(slab_lock,
+ &cachep->nodelists[cpu_to_mem(i)]->list_lock);
free_block(cachep, ccold->entry, ccold->avail, cpu_to_mem(i));
- spin_unlock_irq(&cachep->nodelists[cpu_to_mem(i)]->list_lock);
+ local_spin_unlock_irq(slab_lock,
+ &cachep->nodelists[cpu_to_mem(i)]->list_lock);
kfree(ccold);
}
kfree(new);
@@ -4182,7 +4238,7 @@ static void drain_array(struct kmem_cache *cachep, struct kmem_list3 *l3,
if (ac->touched && !force) {
ac->touched = 0;
} else {
- spin_lock_irq(&l3->list_lock);
+ local_spin_lock_irq(slab_lock, &l3->list_lock);
if (ac->avail) {
tofree = force ? ac->avail : (ac->limit + 4) / 5;
if (tofree > ac->avail)
@@ -4192,7 +4248,7 @@ static void drain_array(struct kmem_cache *cachep, struct kmem_list3 *l3,
memmove(ac->entry, &(ac->entry[tofree]),
sizeof(void *) * ac->avail);
}
- spin_unlock_irq(&l3->list_lock);
+ local_spin_unlock_irq(slab_lock, &l3->list_lock);
}
}
@@ -4331,7 +4387,7 @@ static int s_show(struct seq_file *m, void *p)
continue;
check_irq_on();
- spin_lock_irq(&l3->list_lock);
+ local_spin_lock_irq(slab_lock, &l3->list_lock);
list_for_each_entry(slabp, &l3->slabs_full, list) {
if (slabp->inuse != cachep->num && !error)
@@ -4356,7 +4412,7 @@ static int s_show(struct seq_file *m, void *p)
if (l3->shared)
shared_avail += l3->shared->avail;
- spin_unlock_irq(&l3->list_lock);
+ local_spin_unlock_irq(slab_lock, &l3->list_lock);
}
num_slabs += active_slabs;
num_objs = num_slabs * cachep->num;
@@ -4585,13 +4641,13 @@ static int leaks_show(struct seq_file *m, void *p)
continue;
check_irq_on();
- spin_lock_irq(&l3->list_lock);
+ local_spin_lock_irq(slab_lock, &l3->list_lock);
list_for_each_entry(slabp, &l3->slabs_full, list)
handle_slab(n, cachep, slabp);
list_for_each_entry(slabp, &l3->slabs_partial, list)
handle_slab(n, cachep, slabp);
- spin_unlock_irq(&l3->list_lock);
+ local_spin_unlock_irq(slab_lock, &l3->list_lock);
}
name = cachep->name;
if (n[0] == n[1]) {