linux/debian/patches-rt/0076-mm-page_alloc-rt-frien...

243 lines
7.4 KiB
Diff

From bba668c3f3b781ed69a6664b3f53634c778d4fc0 Mon Sep 17 00:00:00 2001
Message-Id: <bba668c3f3b781ed69a6664b3f53634c778d4fc0.1601675151.git.zanussi@kernel.org>
In-Reply-To: <5b5a156f9808b1acf1205606e03da117214549ea.1601675151.git.zanussi@kernel.org>
References: <5b5a156f9808b1acf1205606e03da117214549ea.1601675151.git.zanussi@kernel.org>
From: Ingo Molnar <mingo@elte.hu>
Date: Fri, 3 Jul 2009 08:29:37 -0500
Subject: [PATCH 076/333] mm: page_alloc: rt-friendly per-cpu pages
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.148-rt64.tar.xz
rt-friendly per-cpu pages: convert the irqs-off per-cpu locking
method into a preemptible, explicit-per-cpu-locks method.
Contains fixes from:
Peter Zijlstra <a.p.zijlstra@chello.nl>
Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
mm/page_alloc.c | 63 +++++++++++++++++++++++++++++++++----------------
1 file changed, 43 insertions(+), 20 deletions(-)
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index e43e374a9415..f6f11e22970e 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -60,6 +60,7 @@
#include <linux/hugetlb.h>
#include <linux/sched/rt.h>
#include <linux/sched/mm.h>
+#include <linux/locallock.h>
#include <linux/page_owner.h>
#include <linux/kthread.h>
#include <linux/memcontrol.h>
@@ -291,6 +292,18 @@ EXPORT_SYMBOL(nr_node_ids);
EXPORT_SYMBOL(nr_online_nodes);
#endif
+static DEFINE_LOCAL_IRQ_LOCK(pa_lock);
+
+#ifdef CONFIG_PREEMPT_RT_BASE
+# define cpu_lock_irqsave(cpu, flags) \
+ local_lock_irqsave_on(pa_lock, flags, cpu)
+# define cpu_unlock_irqrestore(cpu, flags) \
+ local_unlock_irqrestore_on(pa_lock, flags, cpu)
+#else
+# define cpu_lock_irqsave(cpu, flags) local_irq_save(flags)
+# define cpu_unlock_irqrestore(cpu, flags) local_irq_restore(flags)
+#endif
+
int page_group_by_mobility_disabled __read_mostly;
#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
@@ -1301,10 +1314,10 @@ static void __free_pages_ok(struct page *page, unsigned int order)
return;
migratetype = get_pfnblock_migratetype(page, pfn);
- local_irq_save(flags);
+ local_lock_irqsave(pa_lock, flags);
__count_vm_events(PGFREE, 1 << order);
free_one_page(page_zone(page), page, pfn, order, migratetype);
- local_irq_restore(flags);
+ local_unlock_irqrestore(pa_lock, flags);
}
static void __init __free_pages_boot_core(struct page *page, unsigned int order)
@@ -2569,13 +2582,13 @@ void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
int to_drain, batch;
LIST_HEAD(dst);
- local_irq_save(flags);
+ local_lock_irqsave(pa_lock, flags);
batch = READ_ONCE(pcp->batch);
to_drain = min(pcp->count, batch);
if (to_drain > 0)
isolate_pcp_pages(to_drain, pcp, &dst);
- local_irq_restore(flags);
+ local_unlock_irqrestore(pa_lock, flags);
if (to_drain > 0)
free_pcppages_bulk(zone, &dst, false);
@@ -2597,7 +2610,7 @@ static void drain_pages_zone(unsigned int cpu, struct zone *zone)
LIST_HEAD(dst);
int count;
- local_irq_save(flags);
+ cpu_lock_irqsave(cpu, flags);
pset = per_cpu_ptr(zone->pageset, cpu);
pcp = &pset->pcp;
@@ -2605,7 +2618,7 @@ static void drain_pages_zone(unsigned int cpu, struct zone *zone)
if (count)
isolate_pcp_pages(count, pcp, &dst);
- local_irq_restore(flags);
+ cpu_unlock_irqrestore(cpu, flags);
if (count)
free_pcppages_bulk(zone, &dst, false);
@@ -2643,6 +2656,7 @@ void drain_local_pages(struct zone *zone)
drain_pages(cpu);
}
+#ifndef CONFIG_PREEMPT_RT_BASE
static void drain_local_pages_wq(struct work_struct *work)
{
/*
@@ -2656,6 +2670,7 @@ static void drain_local_pages_wq(struct work_struct *work)
drain_local_pages(NULL);
preempt_enable();
}
+#endif
/*
* Spill all the per-cpu pages from all CPUs back into the buddy allocator.
@@ -2722,7 +2737,14 @@ void drain_all_pages(struct zone *zone)
else
cpumask_clear_cpu(cpu, &cpus_with_pcps);
}
-
+#ifdef CONFIG_PREEMPT_RT_BASE
+ for_each_cpu(cpu, &cpus_with_pcps) {
+ if (zone)
+ drain_pages_zone(cpu, zone);
+ else
+ drain_pages(cpu);
+ }
+#else
for_each_cpu(cpu, &cpus_with_pcps) {
struct work_struct *work = per_cpu_ptr(&pcpu_drain, cpu);
INIT_WORK(work, drain_local_pages_wq);
@@ -2730,6 +2752,7 @@ void drain_all_pages(struct zone *zone)
}
for_each_cpu(cpu, &cpus_with_pcps)
flush_work(per_cpu_ptr(&pcpu_drain, cpu));
+#endif
mutex_unlock(&pcpu_drain_mutex);
}
@@ -2849,9 +2872,9 @@ void free_unref_page(struct page *page)
if (!free_unref_page_prepare(page, pfn))
return;
- local_irq_save(flags);
+ local_lock_irqsave(pa_lock, flags);
free_unref_page_commit(page, pfn, &dst);
- local_irq_restore(flags);
+ local_unlock_irqrestore(pa_lock, flags);
if (!list_empty(&dst))
free_pcppages_bulk(zone, &dst, false);
}
@@ -2878,7 +2901,7 @@ void free_unref_page_list(struct list_head *list)
set_page_private(page, pfn);
}
- local_irq_save(flags);
+ local_lock_irqsave(pa_lock, flags);
list_for_each_entry_safe(page, next, list, lru) {
unsigned long pfn = page_private(page);
enum zone_type type;
@@ -2893,12 +2916,12 @@ void free_unref_page_list(struct list_head *list)
* a large list of pages to free.
*/
if (++batch_count == SWAP_CLUSTER_MAX) {
- local_irq_restore(flags);
+ local_unlock_irqrestore(pa_lock, flags);
batch_count = 0;
- local_irq_save(flags);
+ local_lock_irqsave(pa_lock, flags);
}
}
- local_irq_restore(flags);
+ local_unlock_irqrestore(pa_lock, flags);
for (i = 0; i < __MAX_NR_ZONES; ) {
struct page *page;
@@ -3047,7 +3070,7 @@ static struct page *rmqueue_pcplist(struct zone *preferred_zone,
struct page *page;
unsigned long flags;
- local_irq_save(flags);
+ local_lock_irqsave(pa_lock, flags);
pcp = &this_cpu_ptr(zone->pageset)->pcp;
list = &pcp->lists[migratetype];
page = __rmqueue_pcplist(zone, migratetype, pcp, list);
@@ -3055,7 +3078,7 @@ static struct page *rmqueue_pcplist(struct zone *preferred_zone,
__count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order);
zone_statistics(preferred_zone, zone);
}
- local_irq_restore(flags);
+ local_unlock_irqrestore(pa_lock, flags);
return page;
}
@@ -3082,7 +3105,7 @@ struct page *rmqueue(struct zone *preferred_zone,
* allocate greater than order-1 page units with __GFP_NOFAIL.
*/
WARN_ON_ONCE((gfp_flags & __GFP_NOFAIL) && (order > 1));
- spin_lock_irqsave(&zone->lock, flags);
+ local_spin_lock_irqsave(pa_lock, &zone->lock, flags);
do {
page = NULL;
@@ -3102,14 +3125,14 @@ struct page *rmqueue(struct zone *preferred_zone,
__count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order);
zone_statistics(preferred_zone, zone);
- local_irq_restore(flags);
+ local_unlock_irqrestore(pa_lock, flags);
out:
VM_BUG_ON_PAGE(page && bad_range(zone, page), page);
return page;
failed:
- local_irq_restore(flags);
+ local_unlock_irqrestore(pa_lock, flags);
return NULL;
}
@@ -8130,7 +8153,7 @@ void zone_pcp_reset(struct zone *zone)
struct per_cpu_pageset *pset;
/* avoid races with drain_pages() */
- local_irq_save(flags);
+ local_lock_irqsave(pa_lock, flags);
if (zone->pageset != &boot_pageset) {
for_each_online_cpu(cpu) {
pset = per_cpu_ptr(zone->pageset, cpu);
@@ -8139,7 +8162,7 @@ void zone_pcp_reset(struct zone *zone)
free_percpu(zone->pageset);
zone->pageset = &boot_pageset;
}
- local_irq_restore(flags);
+ local_unlock_irqrestore(pa_lock, flags);
}
#ifdef CONFIG_MEMORY_HOTREMOVE
--
2.17.1