linux/debian/patches/features/all/rt/softirq-local-lock.patch

360 lines
8.9 KiB
Diff

Subject: softirq-local-lock.patch
From: Thomas Gleixner <tglx@linutronix.de>
Date: Tue, 28 Jun 2011 15:57:18 +0200
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.0/patches-4.0.5-rt3.tar.xz
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
include/linux/bottom_half.h | 12 ++
include/linux/interrupt.h | 10 ++
include/linux/preempt_mask.h | 15 ++-
include/linux/sched.h | 1
init/main.c | 1
kernel/softirq.c | 186 ++++++++++++++++++++++++++++++++++++++++++-
6 files changed, 220 insertions(+), 5 deletions(-)
--- a/include/linux/bottom_half.h
+++ b/include/linux/bottom_half.h
@@ -4,6 +4,17 @@
#include <linux/preempt.h>
#include <linux/preempt_mask.h>
+#ifdef CONFIG_PREEMPT_RT_FULL
+
+extern void local_bh_disable(void);
+extern void _local_bh_enable(void);
+extern void local_bh_enable(void);
+extern void local_bh_enable_ip(unsigned long ip);
+extern void __local_bh_disable_ip(unsigned long ip, unsigned int cnt);
+extern void __local_bh_enable_ip(unsigned long ip, unsigned int cnt);
+
+#else
+
#ifdef CONFIG_TRACE_IRQFLAGS
extern void __local_bh_disable_ip(unsigned long ip, unsigned int cnt);
#else
@@ -31,5 +42,6 @@ static inline void local_bh_enable(void)
{
__local_bh_enable_ip(_THIS_IP_, SOFTIRQ_DISABLE_OFFSET);
}
+#endif
#endif /* _LINUX_BH_H */
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -430,7 +430,11 @@ struct softirq_action
asmlinkage void do_softirq(void);
asmlinkage void __do_softirq(void);
+#ifndef CONFIG_PREEMPT_RT_FULL
static inline void thread_do_softirq(void) { do_softirq(); }
+#else
+extern void thread_do_softirq(void);
+#endif
#ifdef __ARCH_HAS_DO_SOFTIRQ
void do_softirq_own_stack(void);
#else
@@ -599,6 +603,12 @@ void tasklet_hrtimer_cancel(struct taskl
tasklet_kill(&ttimer->tasklet);
}
+#ifdef CONFIG_PREEMPT_RT_FULL
+extern void softirq_early_init(void);
+#else
+static inline void softirq_early_init(void) { }
+#endif
+
/*
* Autoprobing for irqs:
*
--- a/include/linux/preempt_mask.h
+++ b/include/linux/preempt_mask.h
@@ -44,16 +44,26 @@
#define HARDIRQ_OFFSET (1UL << HARDIRQ_SHIFT)
#define NMI_OFFSET (1UL << NMI_SHIFT)
-#define SOFTIRQ_DISABLE_OFFSET (2 * SOFTIRQ_OFFSET)
+#ifndef CONFIG_PREEMPT_RT_FULL
+# define SOFTIRQ_DISABLE_OFFSET (2 * SOFTIRQ_OFFSET)
+#else
+# define SOFTIRQ_DISABLE_OFFSET (0)
+#endif
#define PREEMPT_ACTIVE_BITS 1
#define PREEMPT_ACTIVE_SHIFT (NMI_SHIFT + NMI_BITS)
#define PREEMPT_ACTIVE (__IRQ_MASK(PREEMPT_ACTIVE_BITS) << PREEMPT_ACTIVE_SHIFT)
#define hardirq_count() (preempt_count() & HARDIRQ_MASK)
-#define softirq_count() (preempt_count() & SOFTIRQ_MASK)
#define irq_count() (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_MASK \
| NMI_MASK))
+#ifndef CONFIG_PREEMPT_RT_FULL
+# define softirq_count() (preempt_count() & SOFTIRQ_MASK)
+# define in_serving_softirq() (softirq_count() & SOFTIRQ_OFFSET)
+#else
+# define softirq_count() (0UL)
+extern int in_serving_softirq(void);
+#endif
/*
* Are we doing bottom half or hardware interrupt processing?
@@ -64,7 +74,6 @@
#define in_irq() (hardirq_count())
#define in_softirq() (softirq_count())
#define in_interrupt() (irq_count())
-#define in_serving_softirq() (softirq_count() & SOFTIRQ_OFFSET)
/*
* Are we in NMI context?
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1736,6 +1736,7 @@ struct task_struct {
#endif
#ifdef CONFIG_PREEMPT_RT_BASE
struct rcu_head put_rcu;
+ int softirq_nestcnt;
#endif
#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
unsigned long task_state_change;
--- a/init/main.c
+++ b/init/main.c
@@ -514,6 +514,7 @@ asmlinkage __visible void __init start_k
* Interrupts are still disabled. Do necessary setups, then
* enable them
*/
+ softirq_early_init();
boot_cpu_init();
page_address_init();
pr_notice("%s", linux_banner);
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -25,6 +25,7 @@
#include <linux/smp.h>
#include <linux/smpboot.h>
#include <linux/tick.h>
+#include <linux/locallock.h>
#include <linux/irq.h>
#define CREATE_TRACE_POINTS
@@ -177,6 +178,7 @@ static void handle_pending_softirqs(u32
local_irq_disable();
}
+#ifndef CONFIG_PREEMPT_RT_FULL
/*
* preempt_count and SOFTIRQ_OFFSET usage:
* - preempt_count is changed by SOFTIRQ_OFFSET on entering or leaving
@@ -388,6 +390,182 @@ asmlinkage __visible void do_softirq(voi
local_irq_restore(flags);
}
+static inline void local_bh_disable_nort(void) { local_bh_disable(); }
+static inline void _local_bh_enable_nort(void) { _local_bh_enable(); }
+
+#else /* !PREEMPT_RT_FULL */
+
+/*
+ * On RT we serialize softirq execution with a cpu local lock
+ */
+static DEFINE_LOCAL_IRQ_LOCK(local_softirq_lock);
+static DEFINE_PER_CPU(struct task_struct *, local_softirq_runner);
+
+asmlinkage void __do_softirq(void);
+
+void __init softirq_early_init(void)
+{
+ local_irq_lock_init(local_softirq_lock);
+}
+
+static void __local_bh_disable(void)
+{
+ migrate_disable();
+ current->softirq_nestcnt++;
+}
+
+void local_bh_disable(void)
+{
+ __local_bh_disable();
+}
+EXPORT_SYMBOL(local_bh_disable);
+
+void __local_bh_disable_ip(unsigned long ip, unsigned int cnt)
+{
+ __local_bh_disable();
+ if (cnt & PREEMPT_CHECK_OFFSET)
+ preempt_disable();
+}
+
+static void __local_bh_enable(void)
+{
+ if (WARN_ON(current->softirq_nestcnt == 0))
+ return;
+
+ if ((current->softirq_nestcnt == 1) &&
+ local_softirq_pending() &&
+ local_trylock(local_softirq_lock)) {
+
+ local_irq_disable();
+ if (local_softirq_pending())
+ __do_softirq();
+ local_irq_enable();
+ local_unlock(local_softirq_lock);
+ WARN_ON(current->softirq_nestcnt != 1);
+ }
+ current->softirq_nestcnt--;
+ migrate_enable();
+}
+
+void local_bh_enable(void)
+{
+ __local_bh_enable();
+}
+EXPORT_SYMBOL(local_bh_enable);
+
+extern void __local_bh_enable_ip(unsigned long ip, unsigned int cnt)
+{
+ __local_bh_enable();
+ if (cnt & PREEMPT_CHECK_OFFSET)
+ preempt_enable();
+}
+
+void local_bh_enable_ip(unsigned long ip)
+{
+ local_bh_enable();
+}
+EXPORT_SYMBOL(local_bh_enable_ip);
+
+/* For tracing */
+int notrace __in_softirq(void)
+{
+ if (__this_cpu_read(local_softirq_lock.owner) == current)
+ return __this_cpu_read(local_softirq_lock.nestcnt);
+ return 0;
+}
+
+int in_serving_softirq(void)
+{
+ int res;
+
+ preempt_disable();
+ res = __this_cpu_read(local_softirq_runner) == current;
+ preempt_enable();
+ return res;
+}
+EXPORT_SYMBOL(in_serving_softirq);
+
+/*
+ * Called with bh and local interrupts disabled. For full RT cpu must
+ * be pinned.
+ */
+asmlinkage void __do_softirq(void)
+{
+ u32 pending = local_softirq_pending();
+ int cpu = smp_processor_id();
+
+ current->softirq_nestcnt++;
+
+ /* Reset the pending bitmask before enabling irqs */
+ set_softirq_pending(0);
+
+ __this_cpu_write(local_softirq_runner, current);
+
+ lockdep_softirq_enter();
+
+ handle_pending_softirqs(pending, cpu);
+
+ pending = local_softirq_pending();
+ if (pending)
+ wakeup_softirqd();
+
+ lockdep_softirq_exit();
+ __this_cpu_write(local_softirq_runner, NULL);
+
+ current->softirq_nestcnt--;
+}
+
+static int __thread_do_softirq(int cpu)
+{
+ /*
+ * Prevent the current cpu from going offline.
+ * pin_current_cpu() can reenable preemption and block on the
+ * hotplug mutex. When it returns, the current cpu is
+ * pinned. It might be the wrong one, but the offline check
+ * below catches that.
+ */
+ pin_current_cpu();
+ /*
+ * If called from ksoftirqd (cpu >= 0) we need to check
+ * whether we are on the wrong cpu due to cpu offlining. If
+ * called via thread_do_softirq() no action required.
+ */
+ if (cpu >= 0 && cpu_is_offline(cpu)) {
+ unpin_current_cpu();
+ return -1;
+ }
+ preempt_enable();
+ local_lock(local_softirq_lock);
+ local_irq_disable();
+ /*
+ * We cannot switch stacks on RT as we want to be able to
+ * schedule!
+ */
+ if (local_softirq_pending())
+ __do_softirq();
+ local_unlock(local_softirq_lock);
+ unpin_current_cpu();
+ preempt_disable();
+ local_irq_enable();
+ return 0;
+}
+
+/*
+ * Called from netif_rx_ni(). Preemption enabled.
+ */
+void thread_do_softirq(void)
+{
+ if (!in_serving_softirq()) {
+ preempt_disable();
+ __thread_do_softirq(-1);
+ preempt_enable();
+ }
+}
+
+static inline void local_bh_disable_nort(void) { }
+static inline void _local_bh_enable_nort(void) { }
+
+#endif /* PREEMPT_RT_FULL */
/*
* Enter an interrupt context.
*/
@@ -399,9 +577,9 @@ void irq_enter(void)
* Prevent raise_softirq from needlessly waking up ksoftirqd
* here, as softirq will be serviced on return from interrupt.
*/
- local_bh_disable();
+ local_bh_disable_nort();
tick_irq_enter();
- _local_bh_enable();
+ _local_bh_enable_nort();
}
__irq_enter();
@@ -409,6 +587,7 @@ void irq_enter(void)
static inline void invoke_softirq(void)
{
+#ifndef CONFIG_PREEMPT_RT_FULL
if (!force_irqthreads) {
#ifdef CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK
/*
@@ -428,6 +607,9 @@ static inline void invoke_softirq(void)
} else {
wakeup_softirqd();
}
+#else
+ wakeup_softirqd();
+#endif
}
static inline void tick_irq_exit(void)