117 lines
3.5 KiB
Diff
117 lines
3.5 KiB
Diff
From: Ingo Molnar <mingo@elte.hu>
|
|
Date: Fri, 3 Jul 2009 08:29:51 -0500
|
|
Subject: mm: convert swap to percpu locked
|
|
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz
|
|
|
|
Signed-off-by: Ingo Molnar <mingo@elte.hu>
|
|
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
|
|
|
---
|
|
mm/swap.c | 30 ++++++++++++++++++------------
|
|
1 file changed, 18 insertions(+), 12 deletions(-)
|
|
|
|
Index: linux-stable/mm/swap.c
|
|
===================================================================
|
|
--- linux-stable.orig/mm/swap.c
|
|
+++ linux-stable/mm/swap.c
|
|
@@ -31,6 +31,7 @@
|
|
#include <linux/memcontrol.h>
|
|
#include <linux/gfp.h>
|
|
#include <linux/uio.h>
|
|
+#include <linux/locallock.h>
|
|
|
|
#include "internal.h"
|
|
|
|
@@ -41,6 +42,9 @@ static DEFINE_PER_CPU(struct pagevec[NR_
|
|
static DEFINE_PER_CPU(struct pagevec, lru_rotate_pvecs);
|
|
static DEFINE_PER_CPU(struct pagevec, lru_deactivate_pvecs);
|
|
|
|
+static DEFINE_LOCAL_IRQ_LOCK(rotate_lock);
|
|
+static DEFINE_LOCAL_IRQ_LOCK(swapvec_lock);
|
|
+
|
|
/*
|
|
* This path almost never happens for VM activity - pages are normally
|
|
* freed via pagevecs. But it gets used by networking.
|
|
@@ -355,11 +359,11 @@ void rotate_reclaimable_page(struct page
|
|
unsigned long flags;
|
|
|
|
page_cache_get(page);
|
|
- local_irq_save(flags);
|
|
+ local_lock_irqsave(rotate_lock, flags);
|
|
pvec = &__get_cpu_var(lru_rotate_pvecs);
|
|
if (!pagevec_add(pvec, page))
|
|
pagevec_move_tail(pvec);
|
|
- local_irq_restore(flags);
|
|
+ local_unlock_irqrestore(rotate_lock, flags);
|
|
}
|
|
}
|
|
|
|
@@ -404,12 +408,13 @@ static void activate_page_drain(int cpu)
|
|
void activate_page(struct page *page)
|
|
{
|
|
if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
|
|
- struct pagevec *pvec = &get_cpu_var(activate_page_pvecs);
|
|
+ struct pagevec *pvec = &get_locked_var(swapvec_lock,
|
|
+ activate_page_pvecs);
|
|
|
|
page_cache_get(page);
|
|
if (!pagevec_add(pvec, page))
|
|
pagevec_lru_move_fn(pvec, __activate_page, NULL);
|
|
- put_cpu_var(activate_page_pvecs);
|
|
+ put_locked_var(swapvec_lock, activate_page_pvecs);
|
|
}
|
|
}
|
|
|
|
@@ -457,13 +462,13 @@ EXPORT_SYMBOL(mark_page_accessed);
|
|
*/
|
|
void __lru_cache_add(struct page *page, enum lru_list lru)
|
|
{
|
|
- struct pagevec *pvec = &get_cpu_var(lru_add_pvecs)[lru];
|
|
+ struct pagevec *pvec = &get_locked_var(swapvec_lock, lru_add_pvecs)[lru];
|
|
|
|
page_cache_get(page);
|
|
if (!pagevec_space(pvec))
|
|
__pagevec_lru_add(pvec, lru);
|
|
pagevec_add(pvec, page);
|
|
- put_cpu_var(lru_add_pvecs);
|
|
+ put_locked_var(swapvec_lock, lru_add_pvecs);
|
|
}
|
|
EXPORT_SYMBOL(__lru_cache_add);
|
|
|
|
@@ -598,9 +603,9 @@ void lru_add_drain_cpu(int cpu)
|
|
unsigned long flags;
|
|
|
|
/* No harm done if a racing interrupt already did this */
|
|
- local_irq_save(flags);
|
|
+ local_lock_irqsave(rotate_lock, flags);
|
|
pagevec_move_tail(pvec);
|
|
- local_irq_restore(flags);
|
|
+ local_unlock_irqrestore(rotate_lock, flags);
|
|
}
|
|
|
|
pvec = &per_cpu(lru_deactivate_pvecs, cpu);
|
|
@@ -628,18 +633,19 @@ void deactivate_page(struct page *page)
|
|
return;
|
|
|
|
if (likely(get_page_unless_zero(page))) {
|
|
- struct pagevec *pvec = &get_cpu_var(lru_deactivate_pvecs);
|
|
+ struct pagevec *pvec = &get_locked_var(swapvec_lock,
|
|
+ lru_deactivate_pvecs);
|
|
|
|
if (!pagevec_add(pvec, page))
|
|
pagevec_lru_move_fn(pvec, lru_deactivate_fn, NULL);
|
|
- put_cpu_var(lru_deactivate_pvecs);
|
|
+ put_locked_var(swapvec_lock, lru_deactivate_pvecs);
|
|
}
|
|
}
|
|
|
|
void lru_add_drain(void)
|
|
{
|
|
- lru_add_drain_cpu(get_cpu());
|
|
- put_cpu();
|
|
+ lru_add_drain_cpu(local_lock_cpu(swapvec_lock));
|
|
+ local_unlock_cpu(swapvec_lock);
|
|
}
|
|
|
|
static void lru_add_drain_per_cpu(struct work_struct *dummy)
|