2020-09-04 20:10:21 +00:00
|
|
|
From 54b1967d10355a74b7cdfa78a26e9d4a9ae36187 Mon Sep 17 00:00:00 2001
|
|
|
|
Message-Id: <54b1967d10355a74b7cdfa78a26e9d4a9ae36187.1599166691.git.zanussi@kernel.org>
|
|
|
|
In-Reply-To: <56457dc415803c8abc5acb513ada877a79596f05.1599166690.git.zanussi@kernel.org>
|
|
|
|
References: <56457dc415803c8abc5acb513ada877a79596f05.1599166690.git.zanussi@kernel.org>
|
2020-03-28 12:59:50 +00:00
|
|
|
From: Scott Wood <swood@redhat.com>
|
|
|
|
Date: Fri, 24 Jan 2020 06:11:46 -0500
|
2020-09-04 20:10:21 +00:00
|
|
|
Subject: [PATCH 316/333] sched: migrate_enable: Use per-cpu cpu_stop_work
|
|
|
|
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.142-rt63.tar.xz
|
2020-03-28 12:59:50 +00:00
|
|
|
|
|
|
|
[ Upstream commit 2dcd94b443c5dcbc20281666321b7f025f9cc85c ]
|
|
|
|
|
|
|
|
Commit e6c287b1512d ("sched: migrate_enable: Use stop_one_cpu_nowait()")
|
|
|
|
adds a busy wait to deal with an edge case where the migrated thread
|
|
|
|
can resume running on another CPU before the stopper has consumed
|
|
|
|
cpu_stop_work. However, this is done with preemption disabled and can
|
|
|
|
potentially lead to deadlock.
|
|
|
|
|
|
|
|
While it is not guaranteed that the cpu_stop_work will be consumed before
|
|
|
|
the migrating thread resumes and exits the stack frame, it is guaranteed
|
|
|
|
that nothing other than the stopper can run on the old cpu between the
|
|
|
|
migrating thread scheduling out and the cpu_stop_work being consumed.
|
|
|
|
Thus, we can store cpu_stop_work in per-cpu data without it being
|
|
|
|
reused too early.
|
|
|
|
|
|
|
|
Fixes: e6c287b1512d ("sched: migrate_enable: Use stop_one_cpu_nowait()")
|
|
|
|
Suggested-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
|
|
|
Signed-off-by: Scott Wood <swood@redhat.com>
|
|
|
|
Reviewed-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
|
|
|
|
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
|
|
|
Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
|
|
|
|
---
|
|
|
|
kernel/sched/core.c | 22 ++++++++++++++--------
|
|
|
|
1 file changed, 14 insertions(+), 8 deletions(-)
|
|
|
|
|
|
|
|
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
|
2020-08-28 04:53:35 +00:00
|
|
|
index ab34c573b79c..098e88f7a216 100644
|
2020-03-28 12:59:50 +00:00
|
|
|
--- a/kernel/sched/core.c
|
|
|
|
+++ b/kernel/sched/core.c
|
2020-08-28 04:53:35 +00:00
|
|
|
@@ -7286,6 +7286,9 @@ static void migrate_disabled_sched(struct task_struct *p)
|
2020-03-28 12:59:50 +00:00
|
|
|
p->migrate_disable_scheduled = 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
+static DEFINE_PER_CPU(struct cpu_stop_work, migrate_work);
|
|
|
|
+static DEFINE_PER_CPU(struct migration_arg, migrate_arg);
|
|
|
|
+
|
|
|
|
void migrate_enable(void)
|
|
|
|
{
|
|
|
|
struct task_struct *p = current;
|
2020-08-28 04:53:35 +00:00
|
|
|
@@ -7324,23 +7327,26 @@ void migrate_enable(void)
|
2020-03-28 12:59:50 +00:00
|
|
|
|
|
|
|
WARN_ON(smp_processor_id() != cpu);
|
|
|
|
if (!is_cpu_allowed(p, cpu)) {
|
|
|
|
- struct migration_arg arg = { .task = p };
|
|
|
|
- struct cpu_stop_work work;
|
|
|
|
+ struct migration_arg __percpu *arg;
|
|
|
|
+ struct cpu_stop_work __percpu *work;
|
|
|
|
struct rq_flags rf;
|
|
|
|
|
|
|
|
+ work = this_cpu_ptr(&migrate_work);
|
|
|
|
+ arg = this_cpu_ptr(&migrate_arg);
|
|
|
|
+ WARN_ON_ONCE(!arg->done && !work->disabled && work->arg);
|
|
|
|
+
|
|
|
|
+ arg->task = p;
|
|
|
|
+ arg->done = false;
|
|
|
|
+
|
|
|
|
rq = task_rq_lock(p, &rf);
|
|
|
|
update_rq_clock(rq);
|
|
|
|
- arg.dest_cpu = select_fallback_rq(cpu, p);
|
|
|
|
+ arg->dest_cpu = select_fallback_rq(cpu, p);
|
|
|
|
task_rq_unlock(rq, p, &rf);
|
|
|
|
|
|
|
|
stop_one_cpu_nowait(task_cpu(p), migration_cpu_stop,
|
|
|
|
- &arg, &work);
|
|
|
|
+ arg, work);
|
|
|
|
tlb_migrate_finish(p->mm);
|
|
|
|
__schedule(true);
|
|
|
|
- if (!work.disabled) {
|
|
|
|
- while (!arg.done)
|
|
|
|
- cpu_relax();
|
|
|
|
- }
|
|
|
|
}
|
|
|
|
|
|
|
|
out:
|
|
|
|
--
|
2020-06-22 13:14:16 +00:00
|
|
|
2.17.1
|
2020-03-28 12:59:50 +00:00
|
|
|
|