2020-03-06 11:44:27 +00:00
|
|
|
From c48feb8fe1bad2aed0a15440a28da0bca8b5292a Mon Sep 17 00:00:00 2001
|
2018-08-27 14:32:32 +00:00
|
|
|
From: Luiz Capitulino <lcapitulino@redhat.com>
|
|
|
|
Date: Fri, 27 May 2016 15:03:28 +0200
|
2020-04-09 19:44:24 +00:00
|
|
|
Subject: [PATCH 078/328] mm: perform lru_add_drain_all() remotely
|
|
|
|
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.106-rt46.tar.xz
|
2018-08-27 14:32:32 +00:00
|
|
|
|
|
|
|
lru_add_drain_all() works by scheduling lru_add_drain_cpu() to run
|
|
|
|
on all CPUs that have non-empty LRU pagevecs and then waiting for
|
|
|
|
the scheduled work to complete. However, workqueue threads may never
|
|
|
|
have the chance to run on a CPU that's running a SCHED_FIFO task.
|
|
|
|
This causes lru_add_drain_all() to block forever.
|
|
|
|
|
|
|
|
This commit solves this problem by changing lru_add_drain_all()
|
|
|
|
to drain the LRU pagevecs of remote CPUs. This is done by grabbing
|
|
|
|
swapvec_lock and calling lru_add_drain_cpu().
|
|
|
|
|
|
|
|
PS: This is based on an idea and initial implementation by
|
|
|
|
Rik van Riel.
|
|
|
|
|
|
|
|
Signed-off-by: Rik van Riel <riel@redhat.com>
|
|
|
|
Signed-off-by: Luiz Capitulino <lcapitulino@redhat.com>
|
|
|
|
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
|
|
|
---
|
2019-04-08 23:49:20 +00:00
|
|
|
mm/swap.c | 36 ++++++++++++++++++++++++++++++------
|
2018-08-27 14:32:32 +00:00
|
|
|
1 file changed, 30 insertions(+), 6 deletions(-)
|
|
|
|
|
2019-04-08 23:49:20 +00:00
|
|
|
diff --git a/mm/swap.c b/mm/swap.c
|
2019-11-25 00:04:39 +00:00
|
|
|
index 92f994b962f0..3885645a45ce 100644
|
2018-08-27 14:32:32 +00:00
|
|
|
--- a/mm/swap.c
|
|
|
|
+++ b/mm/swap.c
|
2019-04-08 23:49:20 +00:00
|
|
|
@@ -585,9 +585,15 @@ void lru_add_drain_cpu(int cpu)
|
2018-08-27 14:32:32 +00:00
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
/* No harm done if a racing interrupt already did this */
|
|
|
|
+#ifdef CONFIG_PREEMPT_RT_BASE
|
|
|
|
+ local_lock_irqsave_on(rotate_lock, flags, cpu);
|
|
|
|
+ pagevec_move_tail(pvec);
|
|
|
|
+ local_unlock_irqrestore_on(rotate_lock, flags, cpu);
|
|
|
|
+#else
|
|
|
|
local_lock_irqsave(rotate_lock, flags);
|
|
|
|
pagevec_move_tail(pvec);
|
|
|
|
local_unlock_irqrestore(rotate_lock, flags);
|
|
|
|
+#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
pvec = &per_cpu(lru_deactivate_file_pvecs, cpu);
|
2019-04-08 23:49:20 +00:00
|
|
|
@@ -657,6 +663,16 @@ void lru_add_drain(void)
|
|
|
|
|
|
|
|
#ifdef CONFIG_SMP
|
2018-08-27 14:32:32 +00:00
|
|
|
|
|
|
|
+#ifdef CONFIG_PREEMPT_RT_BASE
|
|
|
|
+static inline void remote_lru_add_drain(int cpu, struct cpumask *has_work)
|
|
|
|
+{
|
|
|
|
+ local_lock_on(swapvec_lock, cpu);
|
|
|
|
+ lru_add_drain_cpu(cpu);
|
|
|
|
+ local_unlock_on(swapvec_lock, cpu);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+#else
|
|
|
|
+
|
2019-04-08 23:49:20 +00:00
|
|
|
static DEFINE_PER_CPU(struct work_struct, lru_add_drain_work);
|
|
|
|
|
2018-08-27 14:32:32 +00:00
|
|
|
static void lru_add_drain_per_cpu(struct work_struct *dummy)
|
2019-04-08 23:49:20 +00:00
|
|
|
@@ -664,6 +680,16 @@ static void lru_add_drain_per_cpu(struct work_struct *dummy)
|
2018-08-27 14:32:32 +00:00
|
|
|
lru_add_drain();
|
2019-04-08 23:49:20 +00:00
|
|
|
}
|
2018-08-27 14:32:32 +00:00
|
|
|
|
|
|
|
+static inline void remote_lru_add_drain(int cpu, struct cpumask *has_work)
|
|
|
|
+{
|
|
|
|
+ struct work_struct *work = &per_cpu(lru_add_drain_work, cpu);
|
|
|
|
+
|
|
|
|
+ INIT_WORK(work, lru_add_drain_per_cpu);
|
|
|
|
+ queue_work_on(cpu, mm_percpu_wq, work);
|
|
|
|
+ cpumask_set_cpu(cpu, has_work);
|
|
|
|
+}
|
|
|
|
+#endif
|
|
|
|
+
|
|
|
|
/*
|
|
|
|
* Doesn't need any cpu hotplug locking because we do rely on per-cpu
|
|
|
|
* kworkers being shut down before our page_alloc_cpu_dead callback is
|
2019-04-08 23:49:20 +00:00
|
|
|
@@ -688,21 +714,19 @@ void lru_add_drain_all(void)
|
2018-08-27 14:32:32 +00:00
|
|
|
cpumask_clear(&has_work);
|
|
|
|
|
|
|
|
for_each_online_cpu(cpu) {
|
|
|
|
- struct work_struct *work = &per_cpu(lru_add_drain_work, cpu);
|
|
|
|
|
|
|
|
if (pagevec_count(&per_cpu(lru_add_pvec, cpu)) ||
|
|
|
|
pagevec_count(&per_cpu(lru_rotate_pvecs, cpu)) ||
|
|
|
|
pagevec_count(&per_cpu(lru_deactivate_file_pvecs, cpu)) ||
|
|
|
|
pagevec_count(&per_cpu(lru_lazyfree_pvecs, cpu)) ||
|
|
|
|
- need_activate_page_drain(cpu)) {
|
|
|
|
- INIT_WORK(work, lru_add_drain_per_cpu);
|
|
|
|
- queue_work_on(cpu, mm_percpu_wq, work);
|
|
|
|
- cpumask_set_cpu(cpu, &has_work);
|
|
|
|
- }
|
|
|
|
+ need_activate_page_drain(cpu))
|
|
|
|
+ remote_lru_add_drain(cpu, &has_work);
|
|
|
|
}
|
|
|
|
|
|
|
|
+#ifndef CONFIG_PREEMPT_RT_BASE
|
|
|
|
for_each_cpu(cpu, &has_work)
|
|
|
|
flush_work(&per_cpu(lru_add_drain_work, cpu));
|
|
|
|
+#endif
|
|
|
|
|
|
|
|
mutex_unlock(&lock);
|
|
|
|
}
|
2020-01-03 23:36:11 +00:00
|
|
|
--
|
2020-04-09 19:44:24 +00:00
|
|
|
2.25.1
|
2020-01-03 23:36:11 +00:00
|
|
|
|