From 989ca95e14eeb781f5bb57334ab626cfe13d9ec7 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 28 Nov 2011 19:51:51 +0100 Subject: [002/254] slab, lockdep: Annotate all slab caches Currently we only annotate the kmalloc caches, annotate all of them. Signed-off-by: Peter Zijlstra Cc: Hans Schillstrom Cc: Christoph Lameter Cc: Pekka Enberg Cc: Matt Mackall Cc: Sitsofe Wheeler Cc: linux-mm@kvack.org Cc: David Rientjes Signed-off-by: Thomas Gleixner Link: http://lkml.kernel.org/n/tip-10bey2cgpcvtbdkgigaoab8w@git.kernel.org --- mm/slab.c | 52 ++++++++++++++++++++++++++++------------------------ 1 file changed, 28 insertions(+), 24 deletions(-) diff --git a/mm/slab.c b/mm/slab.c index e901a36..1fd4b4d 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -611,6 +611,12 @@ int slab_is_available(void) return g_cpucache_up >= EARLY; } +/* + * Guard access to the cache-chain. + */ +static DEFINE_MUTEX(cache_chain_mutex); +static struct list_head cache_chain; + #ifdef CONFIG_LOCKDEP /* @@ -672,38 +678,41 @@ static void slab_set_debugobj_lock_classes(struct kmem_cache *cachep) slab_set_debugobj_lock_classes_node(cachep, node); } -static void init_node_lock_keys(int q) +static void init_lock_keys(struct kmem_cache *cachep, int node) { - struct cache_sizes *s = malloc_sizes; + struct kmem_list3 *l3; if (g_cpucache_up < LATE) return; - for (s = malloc_sizes; s->cs_size != ULONG_MAX; s++) { - struct kmem_list3 *l3; + l3 = cachep->nodelists[node]; + if (!l3 || OFF_SLAB(cachep)) + return; - l3 = s->cs_cachep->nodelists[q]; - if (!l3 || OFF_SLAB(s->cs_cachep)) - continue; + slab_set_lock_classes(cachep, &on_slab_l3_key, &on_slab_alc_key, node); +} - slab_set_lock_classes(s->cs_cachep, &on_slab_l3_key, - &on_slab_alc_key, q); - } +static void init_node_lock_keys(int node) +{ + struct kmem_cache *cachep; + + list_for_each_entry(cachep, &cache_chain, next) + init_lock_keys(cachep, node); } -static inline void init_lock_keys(void) +static inline void init_cachep_lock_keys(struct kmem_cache *cachep) { int node; for_each_node(node) - init_node_lock_keys(node); + init_lock_keys(cachep, node); } #else -static void init_node_lock_keys(int q) +static void init_node_lock_keys(int node) { } -static inline void init_lock_keys(void) +static void init_cachep_lock_keys(struct kmem_cache *cachep) { } @@ -716,12 +725,6 @@ static void slab_set_debugobj_lock_classes(struct kmem_cache *cachep) } #endif -/* - * Guard access to the cache-chain. - */ -static DEFINE_MUTEX(cache_chain_mutex); -static struct list_head cache_chain; - static DEFINE_PER_CPU(struct delayed_work, slab_reap_work); static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep) @@ -1685,14 +1688,13 @@ void __init kmem_cache_init_late(void) g_cpucache_up = LATE; - /* Annotate slab for lockdep -- annotate the malloc caches */ - init_lock_keys(); - /* 6) resize the head arrays to their final sizes */ mutex_lock(&cache_chain_mutex); - list_for_each_entry(cachep, &cache_chain, next) + list_for_each_entry(cachep, &cache_chain, next) { + init_cachep_lock_keys(cachep); if (enable_cpucache(cachep, GFP_NOWAIT)) BUG(); + } mutex_unlock(&cache_chain_mutex); /* Done! */ @@ -2544,6 +2546,8 @@ kmem_cache_create (const char *name, size_t size, size_t align, slab_set_debugobj_lock_classes(cachep); } + init_cachep_lock_keys(cachep); + /* cache setup completed, link it into the list */ list_add(&cachep->next, &cache_chain); oops: