From 32d07c91c3f116cf5dd6e5326d978018d48615d5 Mon Sep 17 00:00:00 2001 Message-Id: <32d07c91c3f116cf5dd6e5326d978018d48615d5.1601675152.git.zanussi@kernel.org> In-Reply-To: <5b5a156f9808b1acf1205606e03da117214549ea.1601675151.git.zanussi@kernel.org> References: <5b5a156f9808b1acf1205606e03da117214549ea.1601675151.git.zanussi@kernel.org> From: Thomas Gleixner Date: Wed, 19 Jul 2017 17:31:20 +0200 Subject: [PATCH 228/333] cpu/hotplug: Implement CPU pinning Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.148-rt64.tar.xz Signed-off-by: Thomas Gleixner --- include/linux/sched.h | 1 + kernel/cpu.c | 38 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 39 insertions(+) diff --git a/include/linux/sched.h b/include/linux/sched.h index f18668e166f2..ff1fe0b3c1e3 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -671,6 +671,7 @@ struct task_struct { #if defined(CONFIG_SMP) && defined(CONFIG_PREEMPT_RT_BASE) int migrate_disable; int migrate_disable_update; + int pinned_on_cpu; # ifdef CONFIG_SCHED_DEBUG int migrate_disable_atomic; # endif diff --git a/kernel/cpu.c b/kernel/cpu.c index 267fcad3a81e..f9e0e3db440a 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c @@ -76,6 +76,11 @@ static DEFINE_PER_CPU(struct cpuhp_cpu_state, cpuhp_state) = { .fail = CPUHP_INVALID, }; +#ifdef CONFIG_HOTPLUG_CPU +static DEFINE_PER_CPU(struct rt_rw_lock, cpuhp_pin_lock) = \ + __RWLOCK_RT_INITIALIZER(cpuhp_pin_lock); +#endif + #if defined(CONFIG_LOCKDEP) && defined(CONFIG_SMP) static struct lockdep_map cpuhp_state_up_map = STATIC_LOCKDEP_MAP_INIT("cpuhp_state-up", &cpuhp_state_up_map); @@ -287,7 +292,28 @@ static int cpu_hotplug_disabled; */ void pin_current_cpu(void) { + struct rt_rw_lock *cpuhp_pin; + unsigned int cpu; + int ret; + +again: + cpuhp_pin = this_cpu_ptr(&cpuhp_pin_lock); + ret = __read_rt_trylock(cpuhp_pin); + if (ret) { + current->pinned_on_cpu = smp_processor_id(); + return; + } + cpu = smp_processor_id(); + preempt_enable(); + + __read_rt_lock(cpuhp_pin); + preempt_disable(); + if (cpu != smp_processor_id()) { + __read_rt_unlock(cpuhp_pin); + goto again; + } + current->pinned_on_cpu = cpu; } /** @@ -295,6 +321,13 @@ void pin_current_cpu(void) */ void unpin_current_cpu(void) { + struct rt_rw_lock *cpuhp_pin = this_cpu_ptr(&cpuhp_pin_lock); + + if (WARN_ON(current->pinned_on_cpu != smp_processor_id())) + cpuhp_pin = per_cpu_ptr(&cpuhp_pin_lock, current->pinned_on_cpu); + + current->pinned_on_cpu = -1; + __read_rt_unlock(cpuhp_pin); } DEFINE_STATIC_PERCPU_RWSEM(cpu_hotplug_lock); @@ -869,6 +902,7 @@ static int take_cpu_down(void *_param) static int takedown_cpu(unsigned int cpu) { + struct rt_rw_lock *cpuhp_pin = per_cpu_ptr(&cpuhp_pin_lock, cpu); struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); int err; @@ -881,11 +915,14 @@ static int takedown_cpu(unsigned int cpu) */ irq_lock_sparse(); + __write_rt_lock(cpuhp_pin); + /* * So now all preempt/rcu users must observe !cpu_active(). */ err = stop_machine_cpuslocked(take_cpu_down, NULL, cpumask_of(cpu)); if (err) { + __write_rt_unlock(cpuhp_pin); /* CPU refused to die */ irq_unlock_sparse(); /* Unpark the hotplug thread so we can rollback there */ @@ -904,6 +941,7 @@ static int takedown_cpu(unsigned int cpu) wait_for_ap_thread(st, false); BUG_ON(st->state != CPUHP_AP_IDLE_DEAD); + __write_rt_unlock(cpuhp_pin); /* Interrupts are moved away from the dying cpu, reenable alloc/free */ irq_unlock_sparse(); -- 2.17.1