From 5c4e6f2122b008584d40bb4407ea17ed73f2bb71 Mon Sep 17 00:00:00 2001 Message-Id: <5c4e6f2122b008584d40bb4407ea17ed73f2bb71.1601675152.git.zanussi@kernel.org> In-Reply-To: <5b5a156f9808b1acf1205606e03da117214549ea.1601675151.git.zanussi@kernel.org> References: <5b5a156f9808b1acf1205606e03da117214549ea.1601675151.git.zanussi@kernel.org> From: Thomas Gleixner Date: Tue, 31 Jan 2012 13:01:27 +0100 Subject: [PATCH 123/333] genirq: Allow disabling of softirq processing in irq thread context Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.148-rt64.tar.xz The processing of softirqs in irq thread context is a performance gain for the non-rt workloads of a system, but it's counterproductive for interrupts which are explicitely related to the realtime workload. Allow such interrupts to prevent softirq processing in their thread context. Signed-off-by: Thomas Gleixner --- include/linux/interrupt.h | 2 ++ include/linux/irq.h | 4 +++- kernel/irq/manage.c | 13 ++++++++++++- kernel/irq/settings.h | 12 ++++++++++++ kernel/softirq.c | 9 +++++++++ 5 files changed, 38 insertions(+), 2 deletions(-) diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index cf9860d49d57..e1438fe66467 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h @@ -61,6 +61,7 @@ * interrupt handler after suspending interrupts. For system * wakeup devices users need to implement wakeup detection in * their interrupt handlers. + * IRQF_NO_SOFTIRQ_CALL - Do not process softirqs in the irq thread context (RT) */ #define IRQF_SHARED 0x00000080 #define IRQF_PROBE_SHARED 0x00000100 @@ -74,6 +75,7 @@ #define IRQF_NO_THREAD 0x00010000 #define IRQF_EARLY_RESUME 0x00020000 #define IRQF_COND_SUSPEND 0x00040000 +#define IRQF_NO_SOFTIRQ_CALL 0x00080000 #define IRQF_TIMER (__IRQF_TIMER | IRQF_NO_SUSPEND | IRQF_NO_THREAD) diff --git a/include/linux/irq.h b/include/linux/irq.h index a042faefb9b7..e3ed5e02605f 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h @@ -69,6 +69,7 @@ enum irqchip_irq_state; * IRQ_IS_POLLED - Always polled by another interrupt. Exclude * it from the spurious interrupt detection * mechanism and from core side polling. + * IRQ_NO_SOFTIRQ_CALL - No softirq processing in the irq thread context (RT) * IRQ_DISABLE_UNLAZY - Disable lazy irq disable */ enum { @@ -96,13 +97,14 @@ enum { IRQ_PER_CPU_DEVID = (1 << 17), IRQ_IS_POLLED = (1 << 18), IRQ_DISABLE_UNLAZY = (1 << 19), + IRQ_NO_SOFTIRQ_CALL = (1 << 20), }; #define IRQF_MODIFY_MASK \ (IRQ_TYPE_SENSE_MASK | IRQ_NOPROBE | IRQ_NOREQUEST | \ IRQ_NOAUTOEN | IRQ_MOVE_PCNTXT | IRQ_LEVEL | IRQ_NO_BALANCING | \ IRQ_PER_CPU | IRQ_NESTED_THREAD | IRQ_NOTHREAD | IRQ_PER_CPU_DEVID | \ - IRQ_IS_POLLED | IRQ_DISABLE_UNLAZY) + IRQ_IS_POLLED | IRQ_DISABLE_UNLAZY | IRQ_NO_SOFTIRQ_CALL) #define IRQ_NO_BALANCING_MASK (IRQ_PER_CPU | IRQ_NO_BALANCING) diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index 98f9bb27ce37..bb2825ed4bf0 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c @@ -994,7 +994,15 @@ irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action) atomic_inc(&desc->threads_handled); irq_finalize_oneshot(desc, action); - local_bh_enable(); + /* + * Interrupts which have real time requirements can be set up + * to avoid softirq processing in the thread handler. This is + * safe as these interrupts do not raise soft interrupts. + */ + if (irq_settings_no_softirq_call(desc)) + _local_bh_enable(); + else + local_bh_enable(); return ret; } @@ -1504,6 +1512,9 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) irqd_set(&desc->irq_data, IRQD_NO_BALANCING); } + if (new->flags & IRQF_NO_SOFTIRQ_CALL) + irq_settings_set_no_softirq_call(desc); + if (irq_settings_can_autoenable(desc)) { irq_startup(desc, IRQ_RESEND, IRQ_START_COND); } else { diff --git a/kernel/irq/settings.h b/kernel/irq/settings.h index e43795cd2ccf..47e2f9e23586 100644 --- a/kernel/irq/settings.h +++ b/kernel/irq/settings.h @@ -17,6 +17,7 @@ enum { _IRQ_PER_CPU_DEVID = IRQ_PER_CPU_DEVID, _IRQ_IS_POLLED = IRQ_IS_POLLED, _IRQ_DISABLE_UNLAZY = IRQ_DISABLE_UNLAZY, + _IRQ_NO_SOFTIRQ_CALL = IRQ_NO_SOFTIRQ_CALL, _IRQF_MODIFY_MASK = IRQF_MODIFY_MASK, }; @@ -31,6 +32,7 @@ enum { #define IRQ_PER_CPU_DEVID GOT_YOU_MORON #define IRQ_IS_POLLED GOT_YOU_MORON #define IRQ_DISABLE_UNLAZY GOT_YOU_MORON +#define IRQ_NO_SOFTIRQ_CALL GOT_YOU_MORON #undef IRQF_MODIFY_MASK #define IRQF_MODIFY_MASK GOT_YOU_MORON @@ -41,6 +43,16 @@ irq_settings_clr_and_set(struct irq_desc *desc, u32 clr, u32 set) desc->status_use_accessors |= (set & _IRQF_MODIFY_MASK); } +static inline bool irq_settings_no_softirq_call(struct irq_desc *desc) +{ + return desc->status_use_accessors & _IRQ_NO_SOFTIRQ_CALL; +} + +static inline void irq_settings_set_no_softirq_call(struct irq_desc *desc) +{ + desc->status_use_accessors |= _IRQ_NO_SOFTIRQ_CALL; +} + static inline bool irq_settings_is_per_cpu(struct irq_desc *desc) { return desc->status_use_accessors & _IRQ_PER_CPU; diff --git a/kernel/softirq.c b/kernel/softirq.c index fd89f8ab85ac..3e9333d148ad 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c @@ -598,6 +598,15 @@ void __local_bh_enable(void) } EXPORT_SYMBOL(__local_bh_enable); +void _local_bh_enable(void) +{ + if (WARN_ON(current->softirq_nestcnt == 0)) + return; + if (--current->softirq_nestcnt == 0) + migrate_enable(); +} +EXPORT_SYMBOL(_local_bh_enable); + int in_serving_softirq(void) { return current->flags & PF_IN_SOFTIRQ; -- 2.17.1