623 lines
20 KiB
Diff
623 lines
20 KiB
Diff
From 6b1bf65d021e071dccf657fbae57841973070648 Mon Sep 17 00:00:00 2001
|
|
Message-Id: <6b1bf65d021e071dccf657fbae57841973070648.1594742966.git.zanussi@kernel.org>
|
|
In-Reply-To: <832f7d97d6b989a5b4860dd2dbec58ad6ad5ab81.1594742966.git.zanussi@kernel.org>
|
|
References: <832f7d97d6b989a5b4860dd2dbec58ad6ad5ab81.1594742966.git.zanussi@kernel.org>
|
|
From: Thomas Gleixner <tglx@linutronix.de>
|
|
Date: Mon, 28 May 2018 15:24:22 +0200
|
|
Subject: [PATCH 074/329] mm/SLxB: change list_lock to raw_spinlock_t
|
|
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.132-rt59.tar.xz
|
|
|
|
The list_lock is used with used with IRQs off on RT. Make it a raw_spinlock_t
|
|
otherwise the interrupts won't be disabled on -RT. The locking rules remain
|
|
the same on !RT.
|
|
This patch changes it for SLAB and SLUB since both share the same header
|
|
file for struct kmem_cache_node defintion.
|
|
|
|
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
|
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
|
---
|
|
mm/slab.c | 94 +++++++++++++++++++++++++++----------------------------
|
|
mm/slab.h | 2 +-
|
|
mm/slub.c | 50 ++++++++++++++---------------
|
|
3 files changed, 73 insertions(+), 73 deletions(-)
|
|
|
|
diff --git a/mm/slab.c b/mm/slab.c
|
|
index 46f21e73db2f..38f6609343b3 100644
|
|
--- a/mm/slab.c
|
|
+++ b/mm/slab.c
|
|
@@ -233,7 +233,7 @@ static void kmem_cache_node_init(struct kmem_cache_node *parent)
|
|
parent->shared = NULL;
|
|
parent->alien = NULL;
|
|
parent->colour_next = 0;
|
|
- spin_lock_init(&parent->list_lock);
|
|
+ raw_spin_lock_init(&parent->list_lock);
|
|
parent->free_objects = 0;
|
|
parent->free_touched = 0;
|
|
}
|
|
@@ -600,9 +600,9 @@ static noinline void cache_free_pfmemalloc(struct kmem_cache *cachep,
|
|
page_node = page_to_nid(page);
|
|
n = get_node(cachep, page_node);
|
|
|
|
- spin_lock(&n->list_lock);
|
|
+ raw_spin_lock(&n->list_lock);
|
|
free_block(cachep, &objp, 1, page_node, &list);
|
|
- spin_unlock(&n->list_lock);
|
|
+ raw_spin_unlock(&n->list_lock);
|
|
|
|
slabs_destroy(cachep, &list);
|
|
}
|
|
@@ -731,7 +731,7 @@ static void __drain_alien_cache(struct kmem_cache *cachep,
|
|
struct kmem_cache_node *n = get_node(cachep, node);
|
|
|
|
if (ac->avail) {
|
|
- spin_lock(&n->list_lock);
|
|
+ raw_spin_lock(&n->list_lock);
|
|
/*
|
|
* Stuff objects into the remote nodes shared array first.
|
|
* That way we could avoid the overhead of putting the objects
|
|
@@ -742,7 +742,7 @@ static void __drain_alien_cache(struct kmem_cache *cachep,
|
|
|
|
free_block(cachep, ac->entry, ac->avail, node, list);
|
|
ac->avail = 0;
|
|
- spin_unlock(&n->list_lock);
|
|
+ raw_spin_unlock(&n->list_lock);
|
|
}
|
|
}
|
|
|
|
@@ -815,9 +815,9 @@ static int __cache_free_alien(struct kmem_cache *cachep, void *objp,
|
|
slabs_destroy(cachep, &list);
|
|
} else {
|
|
n = get_node(cachep, page_node);
|
|
- spin_lock(&n->list_lock);
|
|
+ raw_spin_lock(&n->list_lock);
|
|
free_block(cachep, &objp, 1, page_node, &list);
|
|
- spin_unlock(&n->list_lock);
|
|
+ raw_spin_unlock(&n->list_lock);
|
|
slabs_destroy(cachep, &list);
|
|
}
|
|
return 1;
|
|
@@ -858,10 +858,10 @@ static int init_cache_node(struct kmem_cache *cachep, int node, gfp_t gfp)
|
|
*/
|
|
n = get_node(cachep, node);
|
|
if (n) {
|
|
- spin_lock_irq(&n->list_lock);
|
|
+ raw_spin_lock_irq(&n->list_lock);
|
|
n->free_limit = (1 + nr_cpus_node(node)) * cachep->batchcount +
|
|
cachep->num;
|
|
- spin_unlock_irq(&n->list_lock);
|
|
+ raw_spin_unlock_irq(&n->list_lock);
|
|
|
|
return 0;
|
|
}
|
|
@@ -940,7 +940,7 @@ static int setup_kmem_cache_node(struct kmem_cache *cachep,
|
|
goto fail;
|
|
|
|
n = get_node(cachep, node);
|
|
- spin_lock_irq(&n->list_lock);
|
|
+ raw_spin_lock_irq(&n->list_lock);
|
|
if (n->shared && force_change) {
|
|
free_block(cachep, n->shared->entry,
|
|
n->shared->avail, node, &list);
|
|
@@ -958,7 +958,7 @@ static int setup_kmem_cache_node(struct kmem_cache *cachep,
|
|
new_alien = NULL;
|
|
}
|
|
|
|
- spin_unlock_irq(&n->list_lock);
|
|
+ raw_spin_unlock_irq(&n->list_lock);
|
|
slabs_destroy(cachep, &list);
|
|
|
|
/*
|
|
@@ -997,7 +997,7 @@ static void cpuup_canceled(long cpu)
|
|
if (!n)
|
|
continue;
|
|
|
|
- spin_lock_irq(&n->list_lock);
|
|
+ raw_spin_lock_irq(&n->list_lock);
|
|
|
|
/* Free limit for this kmem_cache_node */
|
|
n->free_limit -= cachep->batchcount;
|
|
@@ -1010,7 +1010,7 @@ static void cpuup_canceled(long cpu)
|
|
}
|
|
|
|
if (!cpumask_empty(mask)) {
|
|
- spin_unlock_irq(&n->list_lock);
|
|
+ raw_spin_unlock_irq(&n->list_lock);
|
|
goto free_slab;
|
|
}
|
|
|
|
@@ -1024,7 +1024,7 @@ static void cpuup_canceled(long cpu)
|
|
alien = n->alien;
|
|
n->alien = NULL;
|
|
|
|
- spin_unlock_irq(&n->list_lock);
|
|
+ raw_spin_unlock_irq(&n->list_lock);
|
|
|
|
kfree(shared);
|
|
if (alien) {
|
|
@@ -1208,7 +1208,7 @@ static void __init init_list(struct kmem_cache *cachep, struct kmem_cache_node *
|
|
/*
|
|
* Do not assume that spinlocks can be initialized via memcpy:
|
|
*/
|
|
- spin_lock_init(&ptr->list_lock);
|
|
+ raw_spin_lock_init(&ptr->list_lock);
|
|
|
|
MAKE_ALL_LISTS(cachep, ptr, nodeid);
|
|
cachep->node[nodeid] = ptr;
|
|
@@ -1379,11 +1379,11 @@ slab_out_of_memory(struct kmem_cache *cachep, gfp_t gfpflags, int nodeid)
|
|
for_each_kmem_cache_node(cachep, node, n) {
|
|
unsigned long total_slabs, free_slabs, free_objs;
|
|
|
|
- spin_lock_irqsave(&n->list_lock, flags);
|
|
+ raw_spin_lock_irqsave(&n->list_lock, flags);
|
|
total_slabs = n->total_slabs;
|
|
free_slabs = n->free_slabs;
|
|
free_objs = n->free_objects;
|
|
- spin_unlock_irqrestore(&n->list_lock, flags);
|
|
+ raw_spin_unlock_irqrestore(&n->list_lock, flags);
|
|
|
|
pr_warn(" node %d: slabs: %ld/%ld, objs: %ld/%ld\n",
|
|
node, total_slabs - free_slabs, total_slabs,
|
|
@@ -2178,7 +2178,7 @@ static void check_spinlock_acquired(struct kmem_cache *cachep)
|
|
{
|
|
#ifdef CONFIG_SMP
|
|
check_irq_off();
|
|
- assert_spin_locked(&get_node(cachep, numa_mem_id())->list_lock);
|
|
+ assert_raw_spin_locked(&get_node(cachep, numa_mem_id())->list_lock);
|
|
#endif
|
|
}
|
|
|
|
@@ -2186,7 +2186,7 @@ static void check_spinlock_acquired_node(struct kmem_cache *cachep, int node)
|
|
{
|
|
#ifdef CONFIG_SMP
|
|
check_irq_off();
|
|
- assert_spin_locked(&get_node(cachep, node)->list_lock);
|
|
+ assert_raw_spin_locked(&get_node(cachep, node)->list_lock);
|
|
#endif
|
|
}
|
|
|
|
@@ -2226,9 +2226,9 @@ static void do_drain(void *arg)
|
|
check_irq_off();
|
|
ac = cpu_cache_get(cachep);
|
|
n = get_node(cachep, node);
|
|
- spin_lock(&n->list_lock);
|
|
+ raw_spin_lock(&n->list_lock);
|
|
free_block(cachep, ac->entry, ac->avail, node, &list);
|
|
- spin_unlock(&n->list_lock);
|
|
+ raw_spin_unlock(&n->list_lock);
|
|
slabs_destroy(cachep, &list);
|
|
ac->avail = 0;
|
|
}
|
|
@@ -2246,9 +2246,9 @@ static void drain_cpu_caches(struct kmem_cache *cachep)
|
|
drain_alien_cache(cachep, n->alien);
|
|
|
|
for_each_kmem_cache_node(cachep, node, n) {
|
|
- spin_lock_irq(&n->list_lock);
|
|
+ raw_spin_lock_irq(&n->list_lock);
|
|
drain_array_locked(cachep, n->shared, node, true, &list);
|
|
- spin_unlock_irq(&n->list_lock);
|
|
+ raw_spin_unlock_irq(&n->list_lock);
|
|
|
|
slabs_destroy(cachep, &list);
|
|
}
|
|
@@ -2270,10 +2270,10 @@ static int drain_freelist(struct kmem_cache *cache,
|
|
nr_freed = 0;
|
|
while (nr_freed < tofree && !list_empty(&n->slabs_free)) {
|
|
|
|
- spin_lock_irq(&n->list_lock);
|
|
+ raw_spin_lock_irq(&n->list_lock);
|
|
p = n->slabs_free.prev;
|
|
if (p == &n->slabs_free) {
|
|
- spin_unlock_irq(&n->list_lock);
|
|
+ raw_spin_unlock_irq(&n->list_lock);
|
|
goto out;
|
|
}
|
|
|
|
@@ -2286,7 +2286,7 @@ static int drain_freelist(struct kmem_cache *cache,
|
|
* to the cache.
|
|
*/
|
|
n->free_objects -= cache->num;
|
|
- spin_unlock_irq(&n->list_lock);
|
|
+ raw_spin_unlock_irq(&n->list_lock);
|
|
slab_destroy(cache, page);
|
|
nr_freed++;
|
|
}
|
|
@@ -2734,7 +2734,7 @@ static void cache_grow_end(struct kmem_cache *cachep, struct page *page)
|
|
INIT_LIST_HEAD(&page->lru);
|
|
n = get_node(cachep, page_to_nid(page));
|
|
|
|
- spin_lock(&n->list_lock);
|
|
+ raw_spin_lock(&n->list_lock);
|
|
n->total_slabs++;
|
|
if (!page->active) {
|
|
list_add_tail(&page->lru, &(n->slabs_free));
|
|
@@ -2744,7 +2744,7 @@ static void cache_grow_end(struct kmem_cache *cachep, struct page *page)
|
|
|
|
STATS_INC_GROWN(cachep);
|
|
n->free_objects += cachep->num - page->active;
|
|
- spin_unlock(&n->list_lock);
|
|
+ raw_spin_unlock(&n->list_lock);
|
|
|
|
fixup_objfreelist_debug(cachep, &list);
|
|
}
|
|
@@ -2912,7 +2912,7 @@ static struct page *get_first_slab(struct kmem_cache_node *n, bool pfmemalloc)
|
|
{
|
|
struct page *page;
|
|
|
|
- assert_spin_locked(&n->list_lock);
|
|
+ assert_raw_spin_locked(&n->list_lock);
|
|
page = list_first_entry_or_null(&n->slabs_partial, struct page, lru);
|
|
if (!page) {
|
|
n->free_touched = 1;
|
|
@@ -2938,10 +2938,10 @@ static noinline void *cache_alloc_pfmemalloc(struct kmem_cache *cachep,
|
|
if (!gfp_pfmemalloc_allowed(flags))
|
|
return NULL;
|
|
|
|
- spin_lock(&n->list_lock);
|
|
+ raw_spin_lock(&n->list_lock);
|
|
page = get_first_slab(n, true);
|
|
if (!page) {
|
|
- spin_unlock(&n->list_lock);
|
|
+ raw_spin_unlock(&n->list_lock);
|
|
return NULL;
|
|
}
|
|
|
|
@@ -2950,7 +2950,7 @@ static noinline void *cache_alloc_pfmemalloc(struct kmem_cache *cachep,
|
|
|
|
fixup_slab_list(cachep, n, page, &list);
|
|
|
|
- spin_unlock(&n->list_lock);
|
|
+ raw_spin_unlock(&n->list_lock);
|
|
fixup_objfreelist_debug(cachep, &list);
|
|
|
|
return obj;
|
|
@@ -3009,7 +3009,7 @@ static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags)
|
|
if (!n->free_objects && (!shared || !shared->avail))
|
|
goto direct_grow;
|
|
|
|
- spin_lock(&n->list_lock);
|
|
+ raw_spin_lock(&n->list_lock);
|
|
shared = READ_ONCE(n->shared);
|
|
|
|
/* See if we can refill from the shared array */
|
|
@@ -3033,7 +3033,7 @@ static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags)
|
|
must_grow:
|
|
n->free_objects -= ac->avail;
|
|
alloc_done:
|
|
- spin_unlock(&n->list_lock);
|
|
+ raw_spin_unlock(&n->list_lock);
|
|
fixup_objfreelist_debug(cachep, &list);
|
|
|
|
direct_grow:
|
|
@@ -3258,7 +3258,7 @@ static void *____cache_alloc_node(struct kmem_cache *cachep, gfp_t flags,
|
|
BUG_ON(!n);
|
|
|
|
check_irq_off();
|
|
- spin_lock(&n->list_lock);
|
|
+ raw_spin_lock(&n->list_lock);
|
|
page = get_first_slab(n, false);
|
|
if (!page)
|
|
goto must_grow;
|
|
@@ -3276,12 +3276,12 @@ static void *____cache_alloc_node(struct kmem_cache *cachep, gfp_t flags,
|
|
|
|
fixup_slab_list(cachep, n, page, &list);
|
|
|
|
- spin_unlock(&n->list_lock);
|
|
+ raw_spin_unlock(&n->list_lock);
|
|
fixup_objfreelist_debug(cachep, &list);
|
|
return obj;
|
|
|
|
must_grow:
|
|
- spin_unlock(&n->list_lock);
|
|
+ raw_spin_unlock(&n->list_lock);
|
|
page = cache_grow_begin(cachep, gfp_exact_node(flags), nodeid);
|
|
if (page) {
|
|
/* This slab isn't counted yet so don't update free_objects */
|
|
@@ -3457,7 +3457,7 @@ static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac)
|
|
|
|
check_irq_off();
|
|
n = get_node(cachep, node);
|
|
- spin_lock(&n->list_lock);
|
|
+ raw_spin_lock(&n->list_lock);
|
|
if (n->shared) {
|
|
struct array_cache *shared_array = n->shared;
|
|
int max = shared_array->limit - shared_array->avail;
|
|
@@ -3486,7 +3486,7 @@ static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac)
|
|
STATS_SET_FREEABLE(cachep, i);
|
|
}
|
|
#endif
|
|
- spin_unlock(&n->list_lock);
|
|
+ raw_spin_unlock(&n->list_lock);
|
|
slabs_destroy(cachep, &list);
|
|
ac->avail -= batchcount;
|
|
memmove(ac->entry, &(ac->entry[batchcount]), sizeof(void *)*ac->avail);
|
|
@@ -3896,9 +3896,9 @@ static int __do_tune_cpucache(struct kmem_cache *cachep, int limit,
|
|
|
|
node = cpu_to_mem(cpu);
|
|
n = get_node(cachep, node);
|
|
- spin_lock_irq(&n->list_lock);
|
|
+ raw_spin_lock_irq(&n->list_lock);
|
|
free_block(cachep, ac->entry, ac->avail, node, &list);
|
|
- spin_unlock_irq(&n->list_lock);
|
|
+ raw_spin_unlock_irq(&n->list_lock);
|
|
slabs_destroy(cachep, &list);
|
|
}
|
|
free_percpu(prev);
|
|
@@ -4023,9 +4023,9 @@ static void drain_array(struct kmem_cache *cachep, struct kmem_cache_node *n,
|
|
return;
|
|
}
|
|
|
|
- spin_lock_irq(&n->list_lock);
|
|
+ raw_spin_lock_irq(&n->list_lock);
|
|
drain_array_locked(cachep, ac, node, false, &list);
|
|
- spin_unlock_irq(&n->list_lock);
|
|
+ raw_spin_unlock_irq(&n->list_lock);
|
|
|
|
slabs_destroy(cachep, &list);
|
|
}
|
|
@@ -4109,7 +4109,7 @@ void get_slabinfo(struct kmem_cache *cachep, struct slabinfo *sinfo)
|
|
|
|
for_each_kmem_cache_node(cachep, node, n) {
|
|
check_irq_on();
|
|
- spin_lock_irq(&n->list_lock);
|
|
+ raw_spin_lock_irq(&n->list_lock);
|
|
|
|
total_slabs += n->total_slabs;
|
|
free_slabs += n->free_slabs;
|
|
@@ -4118,7 +4118,7 @@ void get_slabinfo(struct kmem_cache *cachep, struct slabinfo *sinfo)
|
|
if (n->shared)
|
|
shared_avail += n->shared->avail;
|
|
|
|
- spin_unlock_irq(&n->list_lock);
|
|
+ raw_spin_unlock_irq(&n->list_lock);
|
|
}
|
|
num_objs = total_slabs * cachep->num;
|
|
active_slabs = total_slabs - free_slabs;
|
|
@@ -4338,13 +4338,13 @@ static int leaks_show(struct seq_file *m, void *p)
|
|
for_each_kmem_cache_node(cachep, node, n) {
|
|
|
|
check_irq_on();
|
|
- spin_lock_irq(&n->list_lock);
|
|
+ raw_spin_lock_irq(&n->list_lock);
|
|
|
|
list_for_each_entry(page, &n->slabs_full, lru)
|
|
handle_slab(x, cachep, page);
|
|
list_for_each_entry(page, &n->slabs_partial, lru)
|
|
handle_slab(x, cachep, page);
|
|
- spin_unlock_irq(&n->list_lock);
|
|
+ raw_spin_unlock_irq(&n->list_lock);
|
|
}
|
|
} while (!is_store_user_clean(cachep));
|
|
|
|
diff --git a/mm/slab.h b/mm/slab.h
|
|
index 9632772e14be..d6b01d61f768 100644
|
|
--- a/mm/slab.h
|
|
+++ b/mm/slab.h
|
|
@@ -454,7 +454,7 @@ static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags,
|
|
* The slab lists for all objects.
|
|
*/
|
|
struct kmem_cache_node {
|
|
- spinlock_t list_lock;
|
|
+ raw_spinlock_t list_lock;
|
|
|
|
#ifdef CONFIG_SLAB
|
|
struct list_head slabs_partial; /* partial list first, better asm code */
|
|
diff --git a/mm/slub.c b/mm/slub.c
|
|
index 882a1e0ae89c..66416eff4818 100644
|
|
--- a/mm/slub.c
|
|
+++ b/mm/slub.c
|
|
@@ -1181,7 +1181,7 @@ static noinline int free_debug_processing(
|
|
unsigned long uninitialized_var(flags);
|
|
int ret = 0;
|
|
|
|
- spin_lock_irqsave(&n->list_lock, flags);
|
|
+ raw_spin_lock_irqsave(&n->list_lock, flags);
|
|
slab_lock(page);
|
|
|
|
if (s->flags & SLAB_CONSISTENCY_CHECKS) {
|
|
@@ -1216,7 +1216,7 @@ static noinline int free_debug_processing(
|
|
bulk_cnt, cnt);
|
|
|
|
slab_unlock(page);
|
|
- spin_unlock_irqrestore(&n->list_lock, flags);
|
|
+ raw_spin_unlock_irqrestore(&n->list_lock, flags);
|
|
if (!ret)
|
|
slab_fix(s, "Object at 0x%p not freed", object);
|
|
return ret;
|
|
@@ -1821,7 +1821,7 @@ static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n,
|
|
if (!n || !n->nr_partial)
|
|
return NULL;
|
|
|
|
- spin_lock(&n->list_lock);
|
|
+ raw_spin_lock(&n->list_lock);
|
|
list_for_each_entry_safe(page, page2, &n->partial, lru) {
|
|
void *t;
|
|
|
|
@@ -1846,7 +1846,7 @@ static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n,
|
|
break;
|
|
|
|
}
|
|
- spin_unlock(&n->list_lock);
|
|
+ raw_spin_unlock(&n->list_lock);
|
|
return object;
|
|
}
|
|
|
|
@@ -2098,7 +2098,7 @@ static void deactivate_slab(struct kmem_cache *s, struct page *page,
|
|
* that acquire_slab() will see a slab page that
|
|
* is frozen
|
|
*/
|
|
- spin_lock(&n->list_lock);
|
|
+ raw_spin_lock(&n->list_lock);
|
|
}
|
|
} else {
|
|
m = M_FULL;
|
|
@@ -2109,7 +2109,7 @@ static void deactivate_slab(struct kmem_cache *s, struct page *page,
|
|
* slabs from diagnostic functions will not see
|
|
* any frozen slabs.
|
|
*/
|
|
- spin_lock(&n->list_lock);
|
|
+ raw_spin_lock(&n->list_lock);
|
|
}
|
|
}
|
|
|
|
@@ -2144,7 +2144,7 @@ static void deactivate_slab(struct kmem_cache *s, struct page *page,
|
|
goto redo;
|
|
|
|
if (lock)
|
|
- spin_unlock(&n->list_lock);
|
|
+ raw_spin_unlock(&n->list_lock);
|
|
|
|
if (m == M_FREE) {
|
|
stat(s, DEACTIVATE_EMPTY);
|
|
@@ -2179,10 +2179,10 @@ static void unfreeze_partials(struct kmem_cache *s,
|
|
n2 = get_node(s, page_to_nid(page));
|
|
if (n != n2) {
|
|
if (n)
|
|
- spin_unlock(&n->list_lock);
|
|
+ raw_spin_unlock(&n->list_lock);
|
|
|
|
n = n2;
|
|
- spin_lock(&n->list_lock);
|
|
+ raw_spin_lock(&n->list_lock);
|
|
}
|
|
|
|
do {
|
|
@@ -2211,7 +2211,7 @@ static void unfreeze_partials(struct kmem_cache *s,
|
|
}
|
|
|
|
if (n)
|
|
- spin_unlock(&n->list_lock);
|
|
+ raw_spin_unlock(&n->list_lock);
|
|
|
|
while (discard_page) {
|
|
page = discard_page;
|
|
@@ -2380,10 +2380,10 @@ static unsigned long count_partial(struct kmem_cache_node *n,
|
|
unsigned long x = 0;
|
|
struct page *page;
|
|
|
|
- spin_lock_irqsave(&n->list_lock, flags);
|
|
+ raw_spin_lock_irqsave(&n->list_lock, flags);
|
|
list_for_each_entry(page, &n->partial, lru)
|
|
x += get_count(page);
|
|
- spin_unlock_irqrestore(&n->list_lock, flags);
|
|
+ raw_spin_unlock_irqrestore(&n->list_lock, flags);
|
|
return x;
|
|
}
|
|
#endif /* CONFIG_SLUB_DEBUG || CONFIG_SYSFS */
|
|
@@ -2828,7 +2828,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
|
|
|
|
do {
|
|
if (unlikely(n)) {
|
|
- spin_unlock_irqrestore(&n->list_lock, flags);
|
|
+ raw_spin_unlock_irqrestore(&n->list_lock, flags);
|
|
n = NULL;
|
|
}
|
|
prior = page->freelist;
|
|
@@ -2860,7 +2860,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
|
|
* Otherwise the list_lock will synchronize with
|
|
* other processors updating the list of slabs.
|
|
*/
|
|
- spin_lock_irqsave(&n->list_lock, flags);
|
|
+ raw_spin_lock_irqsave(&n->list_lock, flags);
|
|
|
|
}
|
|
}
|
|
@@ -2902,7 +2902,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
|
|
add_partial(n, page, DEACTIVATE_TO_TAIL);
|
|
stat(s, FREE_ADD_PARTIAL);
|
|
}
|
|
- spin_unlock_irqrestore(&n->list_lock, flags);
|
|
+ raw_spin_unlock_irqrestore(&n->list_lock, flags);
|
|
return;
|
|
|
|
slab_empty:
|
|
@@ -2917,7 +2917,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
|
|
remove_full(s, n, page);
|
|
}
|
|
|
|
- spin_unlock_irqrestore(&n->list_lock, flags);
|
|
+ raw_spin_unlock_irqrestore(&n->list_lock, flags);
|
|
stat(s, FREE_SLAB);
|
|
discard_slab(s, page);
|
|
}
|
|
@@ -3315,7 +3315,7 @@ static void
|
|
init_kmem_cache_node(struct kmem_cache_node *n)
|
|
{
|
|
n->nr_partial = 0;
|
|
- spin_lock_init(&n->list_lock);
|
|
+ raw_spin_lock_init(&n->list_lock);
|
|
INIT_LIST_HEAD(&n->partial);
|
|
#ifdef CONFIG_SLUB_DEBUG
|
|
atomic_long_set(&n->nr_slabs, 0);
|
|
@@ -3702,7 +3702,7 @@ static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n)
|
|
struct page *page, *h;
|
|
|
|
BUG_ON(irqs_disabled());
|
|
- spin_lock_irq(&n->list_lock);
|
|
+ raw_spin_lock_irq(&n->list_lock);
|
|
list_for_each_entry_safe(page, h, &n->partial, lru) {
|
|
if (!page->inuse) {
|
|
remove_partial(n, page);
|
|
@@ -3712,7 +3712,7 @@ static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n)
|
|
"Objects remaining in %s on __kmem_cache_shutdown()");
|
|
}
|
|
}
|
|
- spin_unlock_irq(&n->list_lock);
|
|
+ raw_spin_unlock_irq(&n->list_lock);
|
|
|
|
list_for_each_entry_safe(page, h, &discard, lru)
|
|
discard_slab(s, page);
|
|
@@ -3985,7 +3985,7 @@ int __kmem_cache_shrink(struct kmem_cache *s)
|
|
for (i = 0; i < SHRINK_PROMOTE_MAX; i++)
|
|
INIT_LIST_HEAD(promote + i);
|
|
|
|
- spin_lock_irqsave(&n->list_lock, flags);
|
|
+ raw_spin_lock_irqsave(&n->list_lock, flags);
|
|
|
|
/*
|
|
* Build lists of slabs to discard or promote.
|
|
@@ -4016,7 +4016,7 @@ int __kmem_cache_shrink(struct kmem_cache *s)
|
|
for (i = SHRINK_PROMOTE_MAX - 1; i >= 0; i--)
|
|
list_splice(promote + i, &n->partial);
|
|
|
|
- spin_unlock_irqrestore(&n->list_lock, flags);
|
|
+ raw_spin_unlock_irqrestore(&n->list_lock, flags);
|
|
|
|
/* Release empty slabs */
|
|
list_for_each_entry_safe(page, t, &discard, lru)
|
|
@@ -4430,7 +4430,7 @@ static int validate_slab_node(struct kmem_cache *s,
|
|
struct page *page;
|
|
unsigned long flags;
|
|
|
|
- spin_lock_irqsave(&n->list_lock, flags);
|
|
+ raw_spin_lock_irqsave(&n->list_lock, flags);
|
|
|
|
list_for_each_entry(page, &n->partial, lru) {
|
|
validate_slab_slab(s, page, map);
|
|
@@ -4452,7 +4452,7 @@ static int validate_slab_node(struct kmem_cache *s,
|
|
s->name, count, atomic_long_read(&n->nr_slabs));
|
|
|
|
out:
|
|
- spin_unlock_irqrestore(&n->list_lock, flags);
|
|
+ raw_spin_unlock_irqrestore(&n->list_lock, flags);
|
|
return count;
|
|
}
|
|
|
|
@@ -4642,12 +4642,12 @@ static int list_locations(struct kmem_cache *s, char *buf,
|
|
if (!atomic_long_read(&n->nr_slabs))
|
|
continue;
|
|
|
|
- spin_lock_irqsave(&n->list_lock, flags);
|
|
+ raw_spin_lock_irqsave(&n->list_lock, flags);
|
|
list_for_each_entry(page, &n->partial, lru)
|
|
process_slab(&t, s, page, alloc, map);
|
|
list_for_each_entry(page, &n->full, lru)
|
|
process_slab(&t, s, page, alloc, map);
|
|
- spin_unlock_irqrestore(&n->list_lock, flags);
|
|
+ raw_spin_unlock_irqrestore(&n->list_lock, flags);
|
|
}
|
|
|
|
for (i = 0; i < t.count; i++) {
|
|
--
|
|
2.17.1
|
|
|