70 lines
2.2 KiB
Diff
70 lines
2.2 KiB
Diff
|
Subject: sched: Have migrate_disable ignore bounded threads
|
||
|
Date: Tue, 27 Sep 2011 08:40:25 -0400
|
||
|
From: Peter Zijlstra <a.p.zijlstra@chello.nl>
|
||
|
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.12/patches-3.12.0-rt2.tar.xz
|
||
|
|
||
|
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
|
||
|
Cc: Peter Zijlstra <peterz@infradead.org>
|
||
|
Cc: Clark Williams <williams@redhat.com>
|
||
|
Link: http://lkml.kernel.org/r/20110927124423.567944215@goodmis.org
|
||
|
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
||
|
|
||
|
---
|
||
|
kernel/sched/core.c | 23 +++++++++--------------
|
||
|
1 file changed, 9 insertions(+), 14 deletions(-)
|
||
|
|
||
|
--- a/kernel/sched/core.c
|
||
|
+++ b/kernel/sched/core.c
|
||
|
@@ -2391,7 +2391,7 @@ void migrate_disable(void)
|
||
|
{
|
||
|
struct task_struct *p = current;
|
||
|
|
||
|
- if (in_atomic()) {
|
||
|
+ if (in_atomic() || p->flags & PF_NO_SETAFFINITY) {
|
||
|
#ifdef CONFIG_SCHED_DEBUG
|
||
|
p->migrate_disable_atomic++;
|
||
|
#endif
|
||
|
@@ -2422,7 +2422,7 @@ void migrate_enable(void)
|
||
|
unsigned long flags;
|
||
|
struct rq *rq;
|
||
|
|
||
|
- if (in_atomic()) {
|
||
|
+ if (in_atomic() || p->flags & PF_NO_SETAFFINITY) {
|
||
|
#ifdef CONFIG_SCHED_DEBUG
|
||
|
p->migrate_disable_atomic--;
|
||
|
#endif
|
||
|
@@ -2443,26 +2443,21 @@ void migrate_enable(void)
|
||
|
|
||
|
if (unlikely(migrate_disabled_updated(p))) {
|
||
|
/*
|
||
|
- * See comment in update_migrate_disable() about locking.
|
||
|
+ * Undo whatever update_migrate_disable() did, also see there
|
||
|
+ * about locking.
|
||
|
*/
|
||
|
rq = this_rq();
|
||
|
raw_spin_lock_irqsave(&rq->lock, flags);
|
||
|
- mask = tsk_cpus_allowed(p);
|
||
|
+
|
||
|
/*
|
||
|
* Clearing migrate_disable causes tsk_cpus_allowed to
|
||
|
* show the tasks original cpu affinity.
|
||
|
*/
|
||
|
p->migrate_disable = 0;
|
||
|
-
|
||
|
- WARN_ON(!cpumask_test_cpu(smp_processor_id(), mask));
|
||
|
-
|
||
|
- if (unlikely(!cpumask_equal(&p->cpus_allowed, mask))) {
|
||
|
- /* Get the mask now that migration is enabled */
|
||
|
- mask = tsk_cpus_allowed(p);
|
||
|
- if (p->sched_class->set_cpus_allowed)
|
||
|
- p->sched_class->set_cpus_allowed(p, mask);
|
||
|
- p->nr_cpus_allowed = cpumask_weight(mask);
|
||
|
- }
|
||
|
+ mask = tsk_cpus_allowed(p);
|
||
|
+ if (p->sched_class->set_cpus_allowed)
|
||
|
+ p->sched_class->set_cpus_allowed(p, mask);
|
||
|
+ p->nr_cpus_allowed = cpumask_weight(mask);
|
||
|
raw_spin_unlock_irqrestore(&rq->lock, flags);
|
||
|
} else
|
||
|
p->migrate_disable = 0;
|