From 9908f88de24f10a2d242e0e57e3a9828ede70ec6 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Fri, 3 Jul 2009 13:16:38 -0500 Subject: [PATCH 179/278] softirq: Sanitize softirq pending for NOHZ/RT Signed-off-by: Thomas Gleixner --- include/linux/interrupt.h | 2 ++ kernel/softirq.c | 61 +++++++++++++++++++++++++++++++++++++++++++++ kernel/time/tick-sched.c | 8 +----- 3 files changed, 64 insertions(+), 7 deletions(-) diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index b9162dc..74e28d9 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h @@ -471,6 +471,8 @@ static inline void __raise_softirq_irqoff(unsigned int nr) extern void raise_softirq_irqoff(unsigned int nr); extern void raise_softirq(unsigned int nr); +extern void softirq_check_pending_idle(void); + /* This is the worklist that queues up per-cpu softirq work. * * send_remote_sendirq() adds work to these lists, and diff --git a/kernel/softirq.c b/kernel/softirq.c index c6c5824..8332622 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c @@ -61,6 +61,67 @@ char *softirq_to_name[NR_SOFTIRQS] = { "TASKLET", "SCHED", "HRTIMER", "RCU" }; +#ifdef CONFIG_NO_HZ +# ifdef CONFIG_PREEMPT_RT_FULL +/* + * On preempt-rt a softirq might be blocked on a lock. There might be + * no other runnable task on this CPU because the lock owner runs on + * some other CPU. So we have to go into idle with the pending bit + * set. Therefor we need to check this otherwise we warn about false + * positives which confuses users and defeats the whole purpose of + * this test. + * + * This code is called with interrupts disabled. + */ +void softirq_check_pending_idle(void) +{ + static int rate_limit; + u32 warnpending = 0, pending = local_softirq_pending(); + + if (rate_limit >= 10) + return; + + if (pending) { + struct task_struct *tsk; + + tsk = __get_cpu_var(ksoftirqd); + /* + * The wakeup code in rtmutex.c wakes up the task + * _before_ it sets pi_blocked_on to NULL under + * tsk->pi_lock. So we need to check for both: state + * and pi_blocked_on. + */ + raw_spin_lock(&tsk->pi_lock); + + if (!tsk->pi_blocked_on && !(tsk->state == TASK_RUNNING)) + warnpending = 1; + + raw_spin_unlock(&tsk->pi_lock); + } + + if (warnpending) { + printk(KERN_ERR "NOHZ: local_softirq_pending %02x\n", + pending); + rate_limit++; + } +} +# else +/* + * On !PREEMPT_RT we just printk rate limited: + */ +void softirq_check_pending_idle(void) +{ + static int rate_limit; + + if (rate_limit < 10) { + printk(KERN_ERR "NOHZ: local_softirq_pending %02x\n", + local_softirq_pending()); + rate_limit++; + } +} +# endif +#endif + /* * we cannot loop indefinitely here to avoid userspace starvation, * but we also don't want to introduce a worst case 1/HZ latency diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index 548d311..c901f33 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c @@ -336,13 +336,7 @@ void tick_nohz_stop_sched_tick(int inidle) goto end; if (unlikely(local_softirq_pending() && cpu_online(cpu))) { - static int ratelimit; - - if (ratelimit < 10) { - printk(KERN_ERR "NOHZ: local_softirq_pending %02x\n", - (unsigned int) local_softirq_pending()); - ratelimit++; - } + softirq_check_pending_idle(); goto end; } -- 1.7.10