linux/debian/patches/features/all/rt/0114-mm-vmstat-fix-the-irq-...

54 lines
1.8 KiB
Diff

From cdd283b7922e89fbd22fa1087d94fcc6c79befd1 Mon Sep 17 00:00:00 2001
From: Thomas Gleixner <tglx@linutronix.de>
Date: Wed, 22 Jun 2011 20:47:08 +0200
Subject: [PATCH 114/290] mm-vmstat-fix-the-irq-lock-asymetry.patch
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
mm/vmscan.c | 18 +++++++++---------
1 file changed, 9 insertions(+), 9 deletions(-)
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 1e4ee1a..00daa2e 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1381,8 +1381,8 @@ static int too_many_isolated(struct zone *zone, int file,
*/
static noinline_for_stack void
putback_lru_pages(struct zone *zone, struct scan_control *sc,
- unsigned long nr_anon, unsigned long nr_file,
- struct list_head *page_list)
+ unsigned long nr_anon, unsigned long nr_file,
+ struct list_head *page_list, unsigned long nr_reclaimed)
{
struct page *page;
struct pagevec pvec;
@@ -1393,7 +1393,12 @@ putback_lru_pages(struct zone *zone, struct scan_control *sc,
/*
* Put back any unfreeable pages.
*/
- spin_lock(&zone->lru_lock);
+ spin_lock_irq(&zone->lru_lock);
+
+ if (current_is_kswapd())
+ __count_vm_events(KSWAPD_STEAL, nr_reclaimed);
+ __count_zone_vm_events(PGSTEAL, zone, nr_reclaimed);
+
while (!list_empty(page_list)) {
int lru;
page = lru_to_page(page_list);
@@ -1576,12 +1581,7 @@ shrink_inactive_list(unsigned long nr_to_scan, struct zone *zone,
priority, &nr_dirty, &nr_writeback);
}
- local_irq_disable();
- if (current_is_kswapd())
- __count_vm_events(KSWAPD_STEAL, nr_reclaimed);
- __count_zone_vm_events(PGSTEAL, zone, nr_reclaimed);
-
- putback_lru_pages(zone, sc, nr_anon, nr_file, &page_list);
+ putback_lru_pages(zone, sc, nr_anon, nr_file, &page_list, nr_reclaimed);
/*
* If reclaim is isolating dirty pages under writeback, it implies