linux/debian/patches-rt/0191-workqueue-Use-local-ir...

190 lines
6.0 KiB
Diff

From 09f0e99c07775b5e1f04241a951402fcd9906fb8 Mon Sep 17 00:00:00 2001
Message-Id: <09f0e99c07775b5e1f04241a951402fcd9906fb8.1601675152.git.zanussi@kernel.org>
In-Reply-To: <5b5a156f9808b1acf1205606e03da117214549ea.1601675151.git.zanussi@kernel.org>
References: <5b5a156f9808b1acf1205606e03da117214549ea.1601675151.git.zanussi@kernel.org>
From: Thomas Gleixner <tglx@linutronix.de>
Date: Sun, 17 Jul 2011 21:42:26 +0200
Subject: [PATCH 191/333] workqueue: Use local irq lock instead of irq disable
regions
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.148-rt64.tar.xz
Use a local_irq_lock as a replacement for irq off regions. We keep the
semantic of irq-off in regard to the pool->lock and remain preemptible.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
kernel/workqueue.c | 45 ++++++++++++++++++++++++++++++---------------
1 file changed, 30 insertions(+), 15 deletions(-)
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 29dc939dad4e..f34586370fcb 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -49,6 +49,7 @@
#include <linux/uaccess.h>
#include <linux/sched/isolation.h>
#include <linux/nmi.h>
+#include <linux/locallock.h>
#include "workqueue_internal.h"
@@ -350,6 +351,8 @@ EXPORT_SYMBOL_GPL(system_power_efficient_wq);
struct workqueue_struct *system_freezable_power_efficient_wq __read_mostly;
EXPORT_SYMBOL_GPL(system_freezable_power_efficient_wq);
+static DEFINE_LOCAL_IRQ_LOCK(pendingb_lock);
+
static int worker_thread(void *__worker);
static void workqueue_sysfs_unregister(struct workqueue_struct *wq);
@@ -1103,9 +1106,11 @@ static void put_pwq_unlocked(struct pool_workqueue *pwq)
* As both pwqs and pools are RCU protected, the
* following lock operations are safe.
*/
- spin_lock_irq(&pwq->pool->lock);
+ rcu_read_lock();
+ local_spin_lock_irq(pendingb_lock, &pwq->pool->lock);
put_pwq(pwq);
- spin_unlock_irq(&pwq->pool->lock);
+ local_spin_unlock_irq(pendingb_lock, &pwq->pool->lock);
+ rcu_read_unlock();
}
}
@@ -1209,7 +1214,7 @@ static int try_to_grab_pending(struct work_struct *work, bool is_dwork,
struct worker_pool *pool;
struct pool_workqueue *pwq;
- local_irq_save(*flags);
+ local_lock_irqsave(pendingb_lock, *flags);
/* try to steal the timer if it exists */
if (is_dwork) {
@@ -1273,7 +1278,7 @@ static int try_to_grab_pending(struct work_struct *work, bool is_dwork,
spin_unlock(&pool->lock);
fail:
rcu_read_unlock();
- local_irq_restore(*flags);
+ local_unlock_irqrestore(pendingb_lock, *flags);
if (work_is_canceling(work))
return -ENOENT;
cpu_relax();
@@ -1378,7 +1383,13 @@ static void __queue_work(int cpu, struct workqueue_struct *wq,
* queued or lose PENDING. Grabbing PENDING and queueing should
* happen with IRQ disabled.
*/
+#ifndef CONFIG_PREEMPT_RT_FULL
+ /*
+ * nort: On RT the "interrupts-disabled" rule has been replaced with
+ * pendingb_lock.
+ */
lockdep_assert_irqs_disabled();
+#endif
debug_work_activate(work);
@@ -1486,14 +1497,14 @@ bool queue_work_on(int cpu, struct workqueue_struct *wq,
bool ret = false;
unsigned long flags;
- local_irq_save(flags);
+ local_lock_irqsave(pendingb_lock,flags);
if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
__queue_work(cpu, wq, work);
ret = true;
}
- local_irq_restore(flags);
+ local_unlock_irqrestore(pendingb_lock, flags);
return ret;
}
EXPORT_SYMBOL(queue_work_on);
@@ -1502,8 +1513,11 @@ void delayed_work_timer_fn(struct timer_list *t)
{
struct delayed_work *dwork = from_timer(dwork, t, timer);
+ /* XXX */
+ /* local_lock(pendingb_lock); */
/* should have been called from irqsafe timer with irq already off */
__queue_work(dwork->cpu, dwork->wq, &dwork->work);
+ /* local_unlock(pendingb_lock); */
}
EXPORT_SYMBOL(delayed_work_timer_fn);
@@ -1558,14 +1572,14 @@ bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
unsigned long flags;
/* read the comment in __queue_work() */
- local_irq_save(flags);
+ local_lock_irqsave(pendingb_lock, flags);
if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
__queue_delayed_work(cpu, wq, dwork, delay);
ret = true;
}
- local_irq_restore(flags);
+ local_unlock_irqrestore(pendingb_lock, flags);
return ret;
}
EXPORT_SYMBOL(queue_delayed_work_on);
@@ -1600,7 +1614,7 @@ bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq,
if (likely(ret >= 0)) {
__queue_delayed_work(cpu, wq, dwork, delay);
- local_irq_restore(flags);
+ local_unlock_irqrestore(pendingb_lock, flags);
}
/* -ENOENT from try_to_grab_pending() becomes %true */
@@ -1611,11 +1625,12 @@ EXPORT_SYMBOL_GPL(mod_delayed_work_on);
static void rcu_work_rcufn(struct rcu_head *rcu)
{
struct rcu_work *rwork = container_of(rcu, struct rcu_work, rcu);
+ unsigned long flags;
/* read the comment in __queue_work() */
- local_irq_disable();
+ local_lock_irqsave(pendingb_lock, flags);
__queue_work(WORK_CPU_UNBOUND, rwork->wq, &rwork->work);
- local_irq_enable();
+ local_unlock_irqrestore(pendingb_lock, flags);
}
/**
@@ -3010,7 +3025,7 @@ static bool __cancel_work_timer(struct work_struct *work, bool is_dwork)
/* tell other tasks trying to grab @work to back off */
mark_work_canceling(work);
- local_irq_restore(flags);
+ local_unlock_irqrestore(pendingb_lock, flags);
/*
* This allows canceling during early boot. We know that @work
@@ -3071,10 +3086,10 @@ EXPORT_SYMBOL_GPL(cancel_work_sync);
*/
bool flush_delayed_work(struct delayed_work *dwork)
{
- local_irq_disable();
+ local_lock_irq(pendingb_lock);
if (del_timer_sync(&dwork->timer))
__queue_work(dwork->cpu, dwork->wq, &dwork->work);
- local_irq_enable();
+ local_unlock_irq(pendingb_lock);
return flush_work(&dwork->work);
}
EXPORT_SYMBOL(flush_delayed_work);
@@ -3112,7 +3127,7 @@ static bool __cancel_work(struct work_struct *work, bool is_dwork)
return false;
set_work_pool_and_clear_pending(work, get_work_pool_id(work));
- local_irq_restore(flags);
+ local_unlock_irqrestore(pendingb_lock, flags);
return ret;
}
--
2.17.1