From 53f10af8e401301257ec2ee11638597f7fce2b04 Mon Sep 17 00:00:00 2001 Message-Id: <53f10af8e401301257ec2ee11638597f7fce2b04.1601675151.git.zanussi@kernel.org> In-Reply-To: <5b5a156f9808b1acf1205606e03da117214549ea.1601675151.git.zanussi@kernel.org> References: <5b5a156f9808b1acf1205606e03da117214549ea.1601675151.git.zanussi@kernel.org> From: Sebastian Andrzej Siewior Date: Sat, 27 May 2017 19:02:06 +0200 Subject: [PATCH 027/333] kernel/sched/core: add migrate_disable() Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.148-rt64.tar.xz --- include/linux/preempt.h | 23 +++++++ include/linux/sched.h | 7 +++ include/linux/smp.h | 3 + kernel/sched/core.c | 130 +++++++++++++++++++++++++++++++++++++++- kernel/sched/debug.c | 4 ++ 5 files changed, 165 insertions(+), 2 deletions(-) diff --git a/include/linux/preempt.h b/include/linux/preempt.h index c01813c3fbe9..3196d0e76719 100644 --- a/include/linux/preempt.h +++ b/include/linux/preempt.h @@ -185,6 +185,22 @@ do { \ #define preemptible() (preempt_count() == 0 && !irqs_disabled()) +#ifdef CONFIG_SMP + +extern void migrate_disable(void); +extern void migrate_enable(void); + +int __migrate_disabled(struct task_struct *p); + +#else +#define migrate_disable() barrier() +#define migrate_enable() barrier() +static inline int __migrate_disabled(struct task_struct *p) +{ + return 0; +} +#endif + #ifdef CONFIG_PREEMPT #define preempt_enable() \ do { \ @@ -253,6 +269,13 @@ do { \ #define preempt_enable_notrace() barrier() #define preemptible() 0 +#define migrate_disable() barrier() +#define migrate_enable() barrier() + +static inline int __migrate_disabled(struct task_struct *p) +{ + return 0; +} #endif /* CONFIG_PREEMPT_COUNT */ #ifdef MODULE diff --git a/include/linux/sched.h b/include/linux/sched.h index 463c20160105..3277801f519d 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -662,6 +662,13 @@ struct task_struct { int nr_cpus_allowed; const cpumask_t *cpus_ptr; cpumask_t cpus_mask; +#if defined(CONFIG_PREEMPT_COUNT) && defined(CONFIG_SMP) + int migrate_disable; + int migrate_disable_update; +# ifdef CONFIG_SCHED_DEBUG + int migrate_disable_atomic; +# endif +#endif #ifdef CONFIG_PREEMPT_RCU int rcu_read_lock_nesting; diff --git a/include/linux/smp.h b/include/linux/smp.h index 9fb239e12b82..5801e516ba63 100644 --- a/include/linux/smp.h +++ b/include/linux/smp.h @@ -202,6 +202,9 @@ static inline int get_boot_cpu_id(void) #define get_cpu() ({ preempt_disable(); smp_processor_id(); }) #define put_cpu() preempt_enable() +#define get_cpu_light() ({ migrate_disable(); smp_processor_id(); }) +#define put_cpu_light() migrate_enable() + /* * Callback to arch code if there's nosmp or maxcpus=0 on the * boot command line: diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 42343f58cea4..77afdcb4d3ba 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -1008,7 +1008,15 @@ void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_ma p->nr_cpus_allowed = cpumask_weight(new_mask); } -void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask) +#if defined(CONFIG_PREEMPT_COUNT) && defined(CONFIG_SMP) +int __migrate_disabled(struct task_struct *p) +{ + return p->migrate_disable; +} +#endif + +static void __do_set_cpus_allowed_tail(struct task_struct *p, + const struct cpumask *new_mask) { struct rq *rq = task_rq(p); bool queued, running; @@ -1037,6 +1045,20 @@ void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask) set_curr_task(rq, p); } +void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask) +{ +#if defined(CONFIG_PREEMPT_COUNT) && defined(CONFIG_SMP) + if (__migrate_disabled(p)) { + lockdep_assert_held(&p->pi_lock); + + cpumask_copy(&p->cpus_mask, new_mask); + p->migrate_disable_update = 1; + return; + } +#endif + __do_set_cpus_allowed_tail(p, new_mask); +} + /* * Change a given task's CPU affinity. Migrate the thread to a * proper CPU and schedule it away if the CPU it's executing on @@ -1096,9 +1118,16 @@ static int __set_cpus_allowed_ptr(struct task_struct *p, } /* Can the task run on the task's current CPU? If so, we're done */ - if (cpumask_test_cpu(task_cpu(p), new_mask)) + if (cpumask_test_cpu(task_cpu(p), new_mask) || __migrate_disabled(p)) goto out; +#if defined(CONFIG_PREEMPT_COUNT) && defined(CONFIG_SMP) + if (__migrate_disabled(p)) { + p->migrate_disable_update = 1; + goto out; + } +#endif + if (task_running(rq, p) || p->state == TASK_WAKING) { struct migration_arg arg = { p, dest_cpu }; /* Need help from migration thread: drop lock and wait. */ @@ -7109,3 +7138,100 @@ const u32 sched_prio_to_wmult[40] = { }; #undef CREATE_TRACE_POINTS + +#if defined(CONFIG_PREEMPT_COUNT) && defined(CONFIG_SMP) + +void migrate_disable(void) +{ + struct task_struct *p = current; + + if (in_atomic() || irqs_disabled()) { +#ifdef CONFIG_SCHED_DEBUG + p->migrate_disable_atomic++; +#endif + return; + } +#ifdef CONFIG_SCHED_DEBUG + WARN_ON_ONCE(p->migrate_disable_atomic); +#endif + + if (p->migrate_disable) { + p->migrate_disable++; + return; + } + + preempt_disable(); + p->migrate_disable = 1; + + p->cpus_ptr = cpumask_of(smp_processor_id()); + p->nr_cpus_allowed = 1; + + preempt_enable(); +} +EXPORT_SYMBOL(migrate_disable); + +void migrate_enable(void) +{ + struct task_struct *p = current; + + if (in_atomic() || irqs_disabled()) { +#ifdef CONFIG_SCHED_DEBUG + p->migrate_disable_atomic--; +#endif + return; + } + +#ifdef CONFIG_SCHED_DEBUG + WARN_ON_ONCE(p->migrate_disable_atomic); +#endif + + WARN_ON_ONCE(p->migrate_disable <= 0); + if (p->migrate_disable > 1) { + p->migrate_disable--; + return; + } + + preempt_disable(); + + p->cpus_ptr = &p->cpus_mask; + p->nr_cpus_allowed = cpumask_weight(&p->cpus_mask); + p->migrate_disable = 0; + + if (p->migrate_disable_update) { + struct rq *rq; + struct rq_flags rf; + + rq = task_rq_lock(p, &rf); + update_rq_clock(rq); + + __do_set_cpus_allowed_tail(p, &p->cpus_mask); + task_rq_unlock(rq, p, &rf); + + p->migrate_disable_update = 0; + + WARN_ON(smp_processor_id() != task_cpu(p)); + if (!cpumask_test_cpu(task_cpu(p), &p->cpus_mask)) { + const struct cpumask *cpu_valid_mask = cpu_active_mask; + struct migration_arg arg; + unsigned int dest_cpu; + + if (p->flags & PF_KTHREAD) { + /* + * Kernel threads are allowed on online && !active CPUs + */ + cpu_valid_mask = cpu_online_mask; + } + dest_cpu = cpumask_any_and(cpu_valid_mask, &p->cpus_mask); + arg.task = p; + arg.dest_cpu = dest_cpu; + + preempt_enable(); + stop_one_cpu(task_cpu(p), migration_cpu_stop, &arg); + tlb_migrate_finish(p->mm); + return; + } + } + preempt_enable(); +} +EXPORT_SYMBOL(migrate_enable); +#endif diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c index 78fadf0438ea..5027158d3908 100644 --- a/kernel/sched/debug.c +++ b/kernel/sched/debug.c @@ -982,6 +982,10 @@ void proc_sched_show_task(struct task_struct *p, struct pid_namespace *ns, P(dl.runtime); P(dl.deadline); } +#if defined(CONFIG_PREEMPT_COUNT) && defined(CONFIG_SMP) + P(migrate_disable); +#endif + P(nr_cpus_allowed); #undef PN_SCHEDSTAT #undef PN #undef __PN -- 2.17.1