263 lines
8.0 KiB
Diff
263 lines
8.0 KiB
Diff
From: Thomas Gleixner <tglx@linutronix.de>
|
|
Date: Mon, 18 Jan 2016 17:21:59 +0100
|
|
Subject: sched: provide a tsk_nr_cpus_allowed() helper
|
|
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.6/older/patches-4.6.2-rt5.tar.xz
|
|
|
|
tsk_nr_cpus_allowed() is an accessor for task->nr_cpus_allowed which allows
|
|
us to change the representation of ->nr_cpus_allowed if required.
|
|
|
|
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
|
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
|
---
|
|
include/linux/sched.h | 5 +++++
|
|
kernel/sched/core.c | 2 +-
|
|
kernel/sched/deadline.c | 28 ++++++++++++++--------------
|
|
kernel/sched/rt.c | 24 ++++++++++++------------
|
|
4 files changed, 32 insertions(+), 27 deletions(-)
|
|
|
|
--- a/include/linux/sched.h
|
|
+++ b/include/linux/sched.h
|
|
@@ -1871,6 +1871,11 @@ extern int arch_task_struct_size __read_
|
|
/* Future-safe accessor for struct task_struct's cpus_allowed. */
|
|
#define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
|
|
|
|
+static inline int tsk_nr_cpus_allowed(struct task_struct *p)
|
|
+{
|
|
+ return p->nr_cpus_allowed;
|
|
+}
|
|
+
|
|
#define TNF_MIGRATED 0x01
|
|
#define TNF_NO_GROUP 0x02
|
|
#define TNF_SHARED 0x04
|
|
--- a/kernel/sched/core.c
|
|
+++ b/kernel/sched/core.c
|
|
@@ -1515,7 +1515,7 @@ int select_task_rq(struct task_struct *p
|
|
{
|
|
lockdep_assert_held(&p->pi_lock);
|
|
|
|
- if (p->nr_cpus_allowed > 1)
|
|
+ if (tsk_nr_cpus_allowed(p) > 1)
|
|
cpu = p->sched_class->select_task_rq(p, cpu, sd_flags, wake_flags);
|
|
|
|
/*
|
|
--- a/kernel/sched/deadline.c
|
|
+++ b/kernel/sched/deadline.c
|
|
@@ -134,7 +134,7 @@ static void inc_dl_migration(struct sche
|
|
{
|
|
struct task_struct *p = dl_task_of(dl_se);
|
|
|
|
- if (p->nr_cpus_allowed > 1)
|
|
+ if (tsk_nr_cpus_allowed(p) > 1)
|
|
dl_rq->dl_nr_migratory++;
|
|
|
|
update_dl_migration(dl_rq);
|
|
@@ -144,7 +144,7 @@ static void dec_dl_migration(struct sche
|
|
{
|
|
struct task_struct *p = dl_task_of(dl_se);
|
|
|
|
- if (p->nr_cpus_allowed > 1)
|
|
+ if (tsk_nr_cpus_allowed(p) > 1)
|
|
dl_rq->dl_nr_migratory--;
|
|
|
|
update_dl_migration(dl_rq);
|
|
@@ -966,7 +966,7 @@ static void enqueue_task_dl(struct rq *r
|
|
|
|
enqueue_dl_entity(&p->dl, pi_se, flags);
|
|
|
|
- if (!task_current(rq, p) && p->nr_cpus_allowed > 1)
|
|
+ if (!task_current(rq, p) && tsk_nr_cpus_allowed(p) > 1)
|
|
enqueue_pushable_dl_task(rq, p);
|
|
}
|
|
|
|
@@ -1040,9 +1040,9 @@ select_task_rq_dl(struct task_struct *p,
|
|
* try to make it stay here, it might be important.
|
|
*/
|
|
if (unlikely(dl_task(curr)) &&
|
|
- (curr->nr_cpus_allowed < 2 ||
|
|
+ (tsk_nr_cpus_allowed(curr) < 2 ||
|
|
!dl_entity_preempt(&p->dl, &curr->dl)) &&
|
|
- (p->nr_cpus_allowed > 1)) {
|
|
+ (tsk_nr_cpus_allowed(p) > 1)) {
|
|
int target = find_later_rq(p);
|
|
|
|
if (target != -1 &&
|
|
@@ -1063,7 +1063,7 @@ static void check_preempt_equal_dl(struc
|
|
* Current can't be migrated, useless to reschedule,
|
|
* let's hope p can move out.
|
|
*/
|
|
- if (rq->curr->nr_cpus_allowed == 1 ||
|
|
+ if (tsk_nr_cpus_allowed(rq->curr) == 1 ||
|
|
cpudl_find(&rq->rd->cpudl, rq->curr, NULL) == -1)
|
|
return;
|
|
|
|
@@ -1071,7 +1071,7 @@ static void check_preempt_equal_dl(struc
|
|
* p is migratable, so let's not schedule it and
|
|
* see if it is pushed or pulled somewhere else.
|
|
*/
|
|
- if (p->nr_cpus_allowed != 1 &&
|
|
+ if (tsk_nr_cpus_allowed(p) != 1 &&
|
|
cpudl_find(&rq->rd->cpudl, p, NULL) != -1)
|
|
return;
|
|
|
|
@@ -1185,7 +1185,7 @@ static void put_prev_task_dl(struct rq *
|
|
{
|
|
update_curr_dl(rq);
|
|
|
|
- if (on_dl_rq(&p->dl) && p->nr_cpus_allowed > 1)
|
|
+ if (on_dl_rq(&p->dl) && tsk_nr_cpus_allowed(p) > 1)
|
|
enqueue_pushable_dl_task(rq, p);
|
|
}
|
|
|
|
@@ -1286,7 +1286,7 @@ static int find_later_rq(struct task_str
|
|
if (unlikely(!later_mask))
|
|
return -1;
|
|
|
|
- if (task->nr_cpus_allowed == 1)
|
|
+ if (tsk_nr_cpus_allowed(task) == 1)
|
|
return -1;
|
|
|
|
/*
|
|
@@ -1432,7 +1432,7 @@ static struct task_struct *pick_next_pus
|
|
|
|
BUG_ON(rq->cpu != task_cpu(p));
|
|
BUG_ON(task_current(rq, p));
|
|
- BUG_ON(p->nr_cpus_allowed <= 1);
|
|
+ BUG_ON(tsk_nr_cpus_allowed(p) <= 1);
|
|
|
|
BUG_ON(!task_on_rq_queued(p));
|
|
BUG_ON(!dl_task(p));
|
|
@@ -1471,7 +1471,7 @@ static int push_dl_task(struct rq *rq)
|
|
*/
|
|
if (dl_task(rq->curr) &&
|
|
dl_time_before(next_task->dl.deadline, rq->curr->dl.deadline) &&
|
|
- rq->curr->nr_cpus_allowed > 1) {
|
|
+ tsk_nr_cpus_allowed(rq->curr) > 1) {
|
|
resched_curr(rq);
|
|
return 0;
|
|
}
|
|
@@ -1618,9 +1618,9 @@ static void task_woken_dl(struct rq *rq,
|
|
{
|
|
if (!task_running(rq, p) &&
|
|
!test_tsk_need_resched(rq->curr) &&
|
|
- p->nr_cpus_allowed > 1 &&
|
|
+ tsk_nr_cpus_allowed(p) > 1 &&
|
|
dl_task(rq->curr) &&
|
|
- (rq->curr->nr_cpus_allowed < 2 ||
|
|
+ (tsk_nr_cpus_allowed(rq->curr) < 2 ||
|
|
!dl_entity_preempt(&p->dl, &rq->curr->dl))) {
|
|
push_dl_tasks(rq);
|
|
}
|
|
@@ -1724,7 +1724,7 @@ static void switched_to_dl(struct rq *rq
|
|
|
|
if (task_on_rq_queued(p) && rq->curr != p) {
|
|
#ifdef CONFIG_SMP
|
|
- if (p->nr_cpus_allowed > 1 && rq->dl.overloaded)
|
|
+ if (tsk_nr_cpus_allowed(p) > 1 && rq->dl.overloaded)
|
|
queue_push_tasks(rq);
|
|
#else
|
|
if (dl_task(rq->curr))
|
|
--- a/kernel/sched/rt.c
|
|
+++ b/kernel/sched/rt.c
|
|
@@ -334,7 +334,7 @@ static void inc_rt_migration(struct sche
|
|
rt_rq = &rq_of_rt_rq(rt_rq)->rt;
|
|
|
|
rt_rq->rt_nr_total++;
|
|
- if (p->nr_cpus_allowed > 1)
|
|
+ if (tsk_nr_cpus_allowed(p) > 1)
|
|
rt_rq->rt_nr_migratory++;
|
|
|
|
update_rt_migration(rt_rq);
|
|
@@ -351,7 +351,7 @@ static void dec_rt_migration(struct sche
|
|
rt_rq = &rq_of_rt_rq(rt_rq)->rt;
|
|
|
|
rt_rq->rt_nr_total--;
|
|
- if (p->nr_cpus_allowed > 1)
|
|
+ if (tsk_nr_cpus_allowed(p) > 1)
|
|
rt_rq->rt_nr_migratory--;
|
|
|
|
update_rt_migration(rt_rq);
|
|
@@ -1324,7 +1324,7 @@ enqueue_task_rt(struct rq *rq, struct ta
|
|
|
|
enqueue_rt_entity(rt_se, flags);
|
|
|
|
- if (!task_current(rq, p) && p->nr_cpus_allowed > 1)
|
|
+ if (!task_current(rq, p) && tsk_nr_cpus_allowed(p) > 1)
|
|
enqueue_pushable_task(rq, p);
|
|
}
|
|
|
|
@@ -1413,7 +1413,7 @@ select_task_rq_rt(struct task_struct *p,
|
|
* will have to sort it out.
|
|
*/
|
|
if (curr && unlikely(rt_task(curr)) &&
|
|
- (curr->nr_cpus_allowed < 2 ||
|
|
+ (tsk_nr_cpus_allowed(curr) < 2 ||
|
|
curr->prio <= p->prio)) {
|
|
int target = find_lowest_rq(p);
|
|
|
|
@@ -1437,7 +1437,7 @@ static void check_preempt_equal_prio(str
|
|
* Current can't be migrated, useless to reschedule,
|
|
* let's hope p can move out.
|
|
*/
|
|
- if (rq->curr->nr_cpus_allowed == 1 ||
|
|
+ if (tsk_nr_cpus_allowed(rq->curr) == 1 ||
|
|
!cpupri_find(&rq->rd->cpupri, rq->curr, NULL))
|
|
return;
|
|
|
|
@@ -1445,7 +1445,7 @@ static void check_preempt_equal_prio(str
|
|
* p is migratable, so let's not schedule it and
|
|
* see if it is pushed or pulled somewhere else.
|
|
*/
|
|
- if (p->nr_cpus_allowed != 1
|
|
+ if (tsk_nr_cpus_allowed(p) != 1
|
|
&& cpupri_find(&rq->rd->cpupri, p, NULL))
|
|
return;
|
|
|
|
@@ -1579,7 +1579,7 @@ static void put_prev_task_rt(struct rq *
|
|
* The previous task needs to be made eligible for pushing
|
|
* if it is still active
|
|
*/
|
|
- if (on_rt_rq(&p->rt) && p->nr_cpus_allowed > 1)
|
|
+ if (on_rt_rq(&p->rt) && tsk_nr_cpus_allowed(p) > 1)
|
|
enqueue_pushable_task(rq, p);
|
|
}
|
|
|
|
@@ -1629,7 +1629,7 @@ static int find_lowest_rq(struct task_st
|
|
if (unlikely(!lowest_mask))
|
|
return -1;
|
|
|
|
- if (task->nr_cpus_allowed == 1)
|
|
+ if (tsk_nr_cpus_allowed(task) == 1)
|
|
return -1; /* No other targets possible */
|
|
|
|
if (!cpupri_find(&task_rq(task)->rd->cpupri, task, lowest_mask))
|
|
@@ -1762,7 +1762,7 @@ static struct task_struct *pick_next_pus
|
|
|
|
BUG_ON(rq->cpu != task_cpu(p));
|
|
BUG_ON(task_current(rq, p));
|
|
- BUG_ON(p->nr_cpus_allowed <= 1);
|
|
+ BUG_ON(tsk_nr_cpus_allowed(p) <= 1);
|
|
|
|
BUG_ON(!task_on_rq_queued(p));
|
|
BUG_ON(!rt_task(p));
|
|
@@ -2122,9 +2122,9 @@ static void task_woken_rt(struct rq *rq,
|
|
{
|
|
if (!task_running(rq, p) &&
|
|
!test_tsk_need_resched(rq->curr) &&
|
|
- p->nr_cpus_allowed > 1 &&
|
|
+ tsk_nr_cpus_allowed(p) > 1 &&
|
|
(dl_task(rq->curr) || rt_task(rq->curr)) &&
|
|
- (rq->curr->nr_cpus_allowed < 2 ||
|
|
+ (tsk_nr_cpus_allowed(rq->curr) < 2 ||
|
|
rq->curr->prio <= p->prio))
|
|
push_rt_tasks(rq);
|
|
}
|
|
@@ -2197,7 +2197,7 @@ static void switched_to_rt(struct rq *rq
|
|
*/
|
|
if (task_on_rq_queued(p) && rq->curr != p) {
|
|
#ifdef CONFIG_SMP
|
|
- if (p->nr_cpus_allowed > 1 && rq->rt.overloaded)
|
|
+ if (tsk_nr_cpus_allowed(p) > 1 && rq->rt.overloaded)
|
|
queue_push_tasks(rq);
|
|
#else
|
|
if (p->prio < rq->curr->prio)
|