2020-10-12 12:52:06 +00:00
|
|
|
From 32dcb68778d490393e793053f19a2bf407762aec Mon Sep 17 00:00:00 2001
|
|
|
|
Message-Id: <32dcb68778d490393e793053f19a2bf407762aec.1601675152.git.zanussi@kernel.org>
|
|
|
|
In-Reply-To: <5b5a156f9808b1acf1205606e03da117214549ea.1601675151.git.zanussi@kernel.org>
|
|
|
|
References: <5b5a156f9808b1acf1205606e03da117214549ea.1601675151.git.zanussi@kernel.org>
|
2018-08-27 14:32:32 +00:00
|
|
|
From: Thomas Gleixner <tglx@linutronix.de>
|
|
|
|
Date: Tue, 12 Jul 2011 11:39:36 +0200
|
2020-09-04 20:10:21 +00:00
|
|
|
Subject: [PATCH 177/333] mm/vmalloc: Another preempt disable region which
|
2019-04-08 23:49:20 +00:00
|
|
|
sucks
|
2020-10-12 12:52:06 +00:00
|
|
|
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.148-rt64.tar.xz
|
2018-08-27 14:32:32 +00:00
|
|
|
|
|
|
|
Avoid the preempt disable version of get_cpu_var(). The inner-lock should
|
|
|
|
provide enough serialisation.
|
|
|
|
|
|
|
|
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
|
|
|
---
|
2019-04-08 23:49:20 +00:00
|
|
|
mm/vmalloc.c | 13 ++++++++-----
|
2018-08-27 14:32:32 +00:00
|
|
|
1 file changed, 8 insertions(+), 5 deletions(-)
|
|
|
|
|
2019-04-08 23:49:20 +00:00
|
|
|
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
|
2020-06-23 13:42:59 +00:00
|
|
|
index 1817871b0239..aa06badc76f4 100644
|
2018-08-27 14:32:32 +00:00
|
|
|
--- a/mm/vmalloc.c
|
|
|
|
+++ b/mm/vmalloc.c
|
2020-06-22 13:14:16 +00:00
|
|
|
@@ -853,7 +853,7 @@ static void *new_vmap_block(unsigned int order, gfp_t gfp_mask)
|
2018-08-27 14:32:32 +00:00
|
|
|
struct vmap_block *vb;
|
|
|
|
struct vmap_area *va;
|
|
|
|
unsigned long vb_idx;
|
|
|
|
- int node, err;
|
|
|
|
+ int node, err, cpu;
|
|
|
|
void *vaddr;
|
|
|
|
|
|
|
|
node = numa_node_id();
|
2020-06-22 13:14:16 +00:00
|
|
|
@@ -896,11 +896,12 @@ static void *new_vmap_block(unsigned int order, gfp_t gfp_mask)
|
2018-08-27 14:32:32 +00:00
|
|
|
BUG_ON(err);
|
|
|
|
radix_tree_preload_end();
|
|
|
|
|
|
|
|
- vbq = &get_cpu_var(vmap_block_queue);
|
|
|
|
+ cpu = get_cpu_light();
|
|
|
|
+ vbq = this_cpu_ptr(&vmap_block_queue);
|
|
|
|
spin_lock(&vbq->lock);
|
|
|
|
list_add_tail_rcu(&vb->free_list, &vbq->free);
|
|
|
|
spin_unlock(&vbq->lock);
|
|
|
|
- put_cpu_var(vmap_block_queue);
|
|
|
|
+ put_cpu_light();
|
|
|
|
|
|
|
|
return vaddr;
|
|
|
|
}
|
2020-06-22 13:14:16 +00:00
|
|
|
@@ -969,6 +970,7 @@ static void *vb_alloc(unsigned long size, gfp_t gfp_mask)
|
2018-08-27 14:32:32 +00:00
|
|
|
struct vmap_block *vb;
|
|
|
|
void *vaddr = NULL;
|
|
|
|
unsigned int order;
|
|
|
|
+ int cpu;
|
|
|
|
|
|
|
|
BUG_ON(offset_in_page(size));
|
|
|
|
BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC);
|
2020-06-22 13:14:16 +00:00
|
|
|
@@ -983,7 +985,8 @@ static void *vb_alloc(unsigned long size, gfp_t gfp_mask)
|
2018-08-27 14:32:32 +00:00
|
|
|
order = get_order(size);
|
|
|
|
|
|
|
|
rcu_read_lock();
|
|
|
|
- vbq = &get_cpu_var(vmap_block_queue);
|
|
|
|
+ cpu = get_cpu_light();
|
|
|
|
+ vbq = this_cpu_ptr(&vmap_block_queue);
|
|
|
|
list_for_each_entry_rcu(vb, &vbq->free, free_list) {
|
|
|
|
unsigned long pages_off;
|
|
|
|
|
2020-06-22 13:14:16 +00:00
|
|
|
@@ -1006,7 +1009,7 @@ static void *vb_alloc(unsigned long size, gfp_t gfp_mask)
|
2018-08-27 14:32:32 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
- put_cpu_var(vmap_block_queue);
|
|
|
|
+ put_cpu_light();
|
|
|
|
rcu_read_unlock();
|
|
|
|
|
|
|
|
/* Allocate new block if nothing was found */
|
2020-01-03 23:36:11 +00:00
|
|
|
--
|
2020-06-22 13:14:16 +00:00
|
|
|
2.17.1
|
2020-01-03 23:36:11 +00:00
|
|
|
|