2015-12-24 21:29:36 +00:00
|
|
|
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
|
|
|
Date: Wed, 21 Aug 2013 17:48:46 +0200
|
|
|
|
Subject: genirq: Do not invoke the affinity callback via a workqueue on RT
|
2016-02-09 18:04:09 +00:00
|
|
|
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz
|
2015-12-24 21:29:36 +00:00
|
|
|
|
|
|
|
Joe Korty reported, that __irq_set_affinity_locked() schedules a
|
|
|
|
workqueue while holding a rawlock which results in a might_sleep()
|
|
|
|
warning.
|
|
|
|
This patch moves the invokation into a process context so that we only
|
|
|
|
wakeup() a process while holding the lock.
|
|
|
|
|
|
|
|
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
|
|
|
---
|
2016-01-27 19:24:32 +00:00
|
|
|
include/linux/interrupt.h | 2 +
|
2015-12-24 21:29:36 +00:00
|
|
|
kernel/irq/manage.c | 79 ++++++++++++++++++++++++++++++++++++++++++++--
|
2016-01-27 19:24:32 +00:00
|
|
|
2 files changed, 78 insertions(+), 3 deletions(-)
|
2015-12-24 21:29:36 +00:00
|
|
|
|
|
|
|
--- a/include/linux/interrupt.h
|
|
|
|
+++ b/include/linux/interrupt.h
|
2016-01-27 19:24:32 +00:00
|
|
|
@@ -206,6 +206,7 @@ extern void resume_device_irqs(void);
|
|
|
|
* @irq: Interrupt to which notification applies
|
|
|
|
* @kref: Reference count, for internal use
|
|
|
|
* @work: Work item, for internal use
|
|
|
|
+ * @list: List item for deferred callbacks
|
|
|
|
* @notify: Function to be called on change. This will be
|
|
|
|
* called in process context.
|
|
|
|
* @release: Function to be called on release. This will be
|
|
|
|
@@ -217,6 +218,7 @@ struct irq_affinity_notify {
|
2015-12-24 21:29:36 +00:00
|
|
|
unsigned int irq;
|
|
|
|
struct kref kref;
|
|
|
|
struct work_struct work;
|
|
|
|
+ struct list_head list;
|
|
|
|
void (*notify)(struct irq_affinity_notify *, const cpumask_t *mask);
|
|
|
|
void (*release)(struct kref *ref);
|
|
|
|
};
|
|
|
|
--- a/kernel/irq/manage.c
|
|
|
|
+++ b/kernel/irq/manage.c
|
|
|
|
@@ -183,6 +183,62 @@ static inline void
|
|
|
|
irq_get_pending(struct cpumask *mask, struct irq_desc *desc) { }
|
|
|
|
#endif
|
|
|
|
|
|
|
|
+#ifdef CONFIG_PREEMPT_RT_FULL
|
|
|
|
+static void _irq_affinity_notify(struct irq_affinity_notify *notify);
|
|
|
|
+static struct task_struct *set_affinity_helper;
|
|
|
|
+static LIST_HEAD(affinity_list);
|
|
|
|
+static DEFINE_RAW_SPINLOCK(affinity_list_lock);
|
|
|
|
+
|
|
|
|
+static int set_affinity_thread(void *unused)
|
|
|
|
+{
|
|
|
|
+ while (1) {
|
|
|
|
+ struct irq_affinity_notify *notify;
|
|
|
|
+ int empty;
|
|
|
|
+
|
|
|
|
+ set_current_state(TASK_INTERRUPTIBLE);
|
|
|
|
+
|
|
|
|
+ raw_spin_lock_irq(&affinity_list_lock);
|
|
|
|
+ empty = list_empty(&affinity_list);
|
|
|
|
+ raw_spin_unlock_irq(&affinity_list_lock);
|
|
|
|
+
|
|
|
|
+ if (empty)
|
|
|
|
+ schedule();
|
|
|
|
+ if (kthread_should_stop())
|
|
|
|
+ break;
|
|
|
|
+ set_current_state(TASK_RUNNING);
|
|
|
|
+try_next:
|
|
|
|
+ notify = NULL;
|
|
|
|
+
|
|
|
|
+ raw_spin_lock_irq(&affinity_list_lock);
|
|
|
|
+ if (!list_empty(&affinity_list)) {
|
|
|
|
+ notify = list_first_entry(&affinity_list,
|
|
|
|
+ struct irq_affinity_notify, list);
|
|
|
|
+ list_del_init(¬ify->list);
|
|
|
|
+ }
|
|
|
|
+ raw_spin_unlock_irq(&affinity_list_lock);
|
|
|
|
+
|
|
|
|
+ if (!notify)
|
|
|
|
+ continue;
|
|
|
|
+ _irq_affinity_notify(notify);
|
|
|
|
+ goto try_next;
|
|
|
|
+ }
|
|
|
|
+ return 0;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static void init_helper_thread(void)
|
|
|
|
+{
|
|
|
|
+ if (set_affinity_helper)
|
|
|
|
+ return;
|
|
|
|
+ set_affinity_helper = kthread_run(set_affinity_thread, NULL,
|
|
|
|
+ "affinity-cb");
|
|
|
|
+ WARN_ON(IS_ERR(set_affinity_helper));
|
|
|
|
+}
|
|
|
|
+#else
|
|
|
|
+
|
|
|
|
+static inline void init_helper_thread(void) { }
|
|
|
|
+
|
|
|
|
+#endif
|
|
|
|
+
|
|
|
|
int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask,
|
|
|
|
bool force)
|
|
|
|
{
|
|
|
|
@@ -222,7 +278,17 @@ int irq_set_affinity_locked(struct irq_d
|
|
|
|
|
|
|
|
if (desc->affinity_notify) {
|
|
|
|
kref_get(&desc->affinity_notify->kref);
|
|
|
|
+
|
|
|
|
+#ifdef CONFIG_PREEMPT_RT_FULL
|
|
|
|
+ raw_spin_lock(&affinity_list_lock);
|
|
|
|
+ if (list_empty(&desc->affinity_notify->list))
|
|
|
|
+ list_add_tail(&affinity_list,
|
|
|
|
+ &desc->affinity_notify->list);
|
|
|
|
+ raw_spin_unlock(&affinity_list_lock);
|
|
|
|
+ wake_up_process(set_affinity_helper);
|
|
|
|
+#else
|
|
|
|
schedule_work(&desc->affinity_notify->work);
|
|
|
|
+#endif
|
|
|
|
}
|
|
|
|
irqd_set(data, IRQD_AFFINITY_SET);
|
|
|
|
|
|
|
|
@@ -260,10 +326,8 @@ int irq_set_affinity_hint(unsigned int i
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(irq_set_affinity_hint);
|
|
|
|
|
|
|
|
-static void irq_affinity_notify(struct work_struct *work)
|
|
|
|
+static void _irq_affinity_notify(struct irq_affinity_notify *notify)
|
|
|
|
{
|
|
|
|
- struct irq_affinity_notify *notify =
|
|
|
|
- container_of(work, struct irq_affinity_notify, work);
|
|
|
|
struct irq_desc *desc = irq_to_desc(notify->irq);
|
|
|
|
cpumask_var_t cpumask;
|
|
|
|
unsigned long flags;
|
|
|
|
@@ -285,6 +349,13 @@ static void irq_affinity_notify(struct w
|
|
|
|
kref_put(¬ify->kref, notify->release);
|
|
|
|
}
|
|
|
|
|
|
|
|
+static void irq_affinity_notify(struct work_struct *work)
|
|
|
|
+{
|
|
|
|
+ struct irq_affinity_notify *notify =
|
|
|
|
+ container_of(work, struct irq_affinity_notify, work);
|
|
|
|
+ _irq_affinity_notify(notify);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
/**
|
|
|
|
* irq_set_affinity_notifier - control notification of IRQ affinity changes
|
|
|
|
* @irq: Interrupt for which to enable/disable notification
|
|
|
|
@@ -314,6 +385,8 @@ irq_set_affinity_notifier(unsigned int i
|
|
|
|
notify->irq = irq;
|
|
|
|
kref_init(¬ify->kref);
|
|
|
|
INIT_WORK(¬ify->work, irq_affinity_notify);
|
|
|
|
+ INIT_LIST_HEAD(¬ify->list);
|
|
|
|
+ init_helper_thread();
|
|
|
|
}
|
|
|
|
|
|
|
|
raw_spin_lock_irqsave(&desc->lock, flags);
|