54 lines
1.4 KiB
Diff
54 lines
1.4 KiB
Diff
From 99413c8e97ac85b7f0d29603625302a76aa06e1e Mon Sep 17 00:00:00 2001
|
|
From: Peter Zijlstra <peterz@infradead.org>
|
|
Date: Tue, 16 Mar 2010 14:31:44 -0700
|
|
Subject: [PATCH 147/267] sched: Break out from load_balancing on rq_lock
|
|
contention
|
|
|
|
Also limit NEW_IDLE pull
|
|
|
|
Signed-off-by: Peter Zijlstra <peterz@infradead.org>
|
|
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
|
---
|
|
kernel/sched_fair.c | 18 ++++++++++++++++++
|
|
1 file changed, 18 insertions(+)
|
|
|
|
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
|
|
index 8a39fa3..3747e53 100644
|
|
--- a/kernel/sched_fair.c
|
|
+++ b/kernel/sched_fair.c
|
|
@@ -2899,6 +2899,10 @@ balance_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest,
|
|
*/
|
|
if (idle == CPU_NEWLY_IDLE)
|
|
break;
|
|
+
|
|
+ if (raw_spin_is_contended(&this_rq->lock) ||
|
|
+ raw_spin_is_contended(&busiest->lock))
|
|
+ break;
|
|
#endif
|
|
|
|
/*
|
|
@@ -3039,6 +3043,20 @@ load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
|
|
rem_load_move -= moved_load;
|
|
if (rem_load_move < 0)
|
|
break;
|
|
+
|
|
+#ifdef CONFIG_PREEMPT
|
|
+ /*
|
|
+ * NEWIDLE balancing is a source of latency, so preemptible
|
|
+ * kernels will stop after the first task is pulled to minimize
|
|
+ * the critical section.
|
|
+ */
|
|
+ if (idle == CPU_NEWLY_IDLE && this_rq->nr_running)
|
|
+ break;
|
|
+
|
|
+ if (raw_spin_is_contended(&this_rq->lock) ||
|
|
+ raw_spin_is_contended(&busiest->lock))
|
|
+ break;
|
|
+#endif
|
|
}
|
|
rcu_read_unlock();
|
|
|
|
--
|
|
1.7.10
|
|
|