Subject: softirq: Check preemption after reenabling interrupts From: Thomas Gleixner Date: Sun, 13 Nov 2011 17:17:09 +0100 (CET) Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8.14-rt9.tar.xz raise_softirq_irqoff() disables interrupts and wakes the softirq daemon, but after reenabling interrupts there is no preemption check, so the execution of the softirq thread might be delayed arbitrarily. In principle we could add that check to local_irq_enable/restore, but that's overkill as the rasie_softirq_irqoff() sections are the only ones which show this behaviour. Reported-by: Carsten Emde Signed-off-by: Thomas Gleixner --- block/blk-softirq.c | 3 +++ include/linux/preempt.h | 3 +++ lib/irq_poll.c | 5 +++++ net/core/dev.c | 7 +++++++ 4 files changed, 18 insertions(+) --- a/block/blk-softirq.c +++ b/block/blk-softirq.c @@ -51,6 +51,7 @@ static void trigger_softirq(void *data) raise_softirq_irqoff(BLOCK_SOFTIRQ); local_irq_restore(flags); + preempt_check_resched_rt(); } /* @@ -93,6 +94,7 @@ static int blk_cpu_notify(struct notifie this_cpu_ptr(&blk_cpu_done)); raise_softirq_irqoff(BLOCK_SOFTIRQ); local_irq_enable(); + preempt_check_resched_rt(); } return NOTIFY_OK; @@ -150,6 +152,7 @@ void __blk_complete_request(struct reque goto do_local; local_irq_restore(flags); + preempt_check_resched_rt(); } /** --- a/include/linux/preempt.h +++ b/include/linux/preempt.h @@ -160,8 +160,10 @@ do { \ #ifdef CONFIG_PREEMPT_RT_BASE # define preempt_enable_no_resched() sched_preempt_enable_no_resched() +# define preempt_check_resched_rt() preempt_check_resched() #else # define preempt_enable_no_resched() preempt_enable() +# define preempt_check_resched_rt() barrier(); #endif #define preemptible() (preempt_count() == 0 && !irqs_disabled()) @@ -232,6 +234,7 @@ do { \ #define preempt_disable_notrace() barrier() #define preempt_enable_no_resched_notrace() barrier() #define preempt_enable_notrace() barrier() +#define preempt_check_resched_rt() barrier() #define preemptible() 0 #endif /* CONFIG_PREEMPT_COUNT */ --- a/lib/irq_poll.c +++ b/lib/irq_poll.c @@ -36,6 +36,7 @@ void irq_poll_sched(struct irq_poll *iop list_add_tail(&iop->list, this_cpu_ptr(&blk_cpu_iopoll)); __raise_softirq_irqoff(IRQ_POLL_SOFTIRQ); local_irq_restore(flags); + preempt_check_resched_rt(); } EXPORT_SYMBOL(irq_poll_sched); @@ -71,6 +72,7 @@ void irq_poll_complete(struct irq_poll * local_irq_save(flags); __irq_poll_complete(iop); local_irq_restore(flags); + preempt_check_resched_rt(); } EXPORT_SYMBOL(irq_poll_complete); @@ -95,6 +97,7 @@ static void irq_poll_softirq(struct soft } local_irq_enable(); + preempt_check_resched_rt(); /* Even though interrupts have been re-enabled, this * access is safe because interrupts can only add new @@ -132,6 +135,7 @@ static void irq_poll_softirq(struct soft __raise_softirq_irqoff(IRQ_POLL_SOFTIRQ); local_irq_enable(); + preempt_check_resched_rt(); } /** @@ -199,6 +203,7 @@ static int irq_poll_cpu_notify(struct no this_cpu_ptr(&blk_cpu_iopoll)); __raise_softirq_irqoff(IRQ_POLL_SOFTIRQ); local_irq_enable(); + preempt_check_resched_rt(); } return NOTIFY_OK; --- a/net/core/dev.c +++ b/net/core/dev.c @@ -2268,6 +2268,7 @@ static void __netif_reschedule(struct Qd sd->output_queue_tailp = &q->next_sched; raise_softirq_irqoff(NET_TX_SOFTIRQ); local_irq_restore(flags); + preempt_check_resched_rt(); } void __netif_schedule(struct Qdisc *q) @@ -2349,6 +2350,7 @@ void __dev_kfree_skb_irq(struct sk_buff __this_cpu_write(softnet_data.completion_queue, skb); raise_softirq_irqoff(NET_TX_SOFTIRQ); local_irq_restore(flags); + preempt_check_resched_rt(); } EXPORT_SYMBOL(__dev_kfree_skb_irq); @@ -3778,6 +3780,7 @@ static int enqueue_to_backlog(struct sk_ rps_unlock(sd); local_irq_restore(flags); + preempt_check_resched_rt(); atomic_long_inc(&skb->dev->rx_dropped); kfree_skb(skb); @@ -4797,6 +4800,7 @@ static void net_rps_action_and_irq_enabl sd->rps_ipi_list = NULL; local_irq_enable(); + preempt_check_resched_rt(); /* Send pending IPI's to kick RPS processing on remote cpus. */ while (remsd) { @@ -4810,6 +4814,7 @@ static void net_rps_action_and_irq_enabl } else #endif local_irq_enable(); + preempt_check_resched_rt(); } static bool sd_has_rps_ipi_waiting(struct softnet_data *sd) @@ -4891,6 +4896,7 @@ void __napi_schedule(struct napi_struct local_irq_save(flags); ____napi_schedule(this_cpu_ptr(&softnet_data), n); local_irq_restore(flags); + preempt_check_resched_rt(); } EXPORT_SYMBOL(__napi_schedule); @@ -7989,6 +7995,7 @@ static int dev_cpu_callback(struct notif raise_softirq_irqoff(NET_TX_SOFTIRQ); local_irq_enable(); + preempt_check_resched_rt(); /* Process offline CPU's input_pkt_queue */ while ((skb = __skb_dequeue(&oldsd->process_queue))) {