2018-08-27 14:32:32 +00:00
|
|
|
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
|
|
|
Date: Tue, 3 Jul 2018 18:19:48 +0200
|
2019-11-25 00:04:39 +00:00
|
|
|
Subject: [PATCH 014/290] cgroup: use irqsave in cgroup_rstat_flush_locked()
|
|
|
|
Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=a46bfc03c899ec820ba9e964b1bac8ee7ffc5f2f
|
2018-08-27 14:32:32 +00:00
|
|
|
|
|
|
|
All callers of cgroup_rstat_flush_locked() acquire cgroup_rstat_lock
|
|
|
|
either with spin_lock_irq() or spin_lock_irqsave().
|
|
|
|
cgroup_rstat_flush_locked() itself acquires cgroup_rstat_cpu_lock which
|
|
|
|
is a raw_spin_lock. This lock is also acquired in cgroup_rstat_updated()
|
|
|
|
in IRQ context and therefore requires _irqsave() locking suffix in
|
|
|
|
cgroup_rstat_flush_locked().
|
|
|
|
Since there is no difference between spin_lock_t and raw_spin_lock_t
|
|
|
|
on !RT lockdep does not complain here. On RT lockdep complains because
|
|
|
|
the interrupts were not disabled here and a deadlock is possible.
|
|
|
|
|
|
|
|
Acquire the raw_spin_lock_t with disabled interrupts.
|
|
|
|
|
|
|
|
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
|
|
|
---
|
2019-04-08 23:49:20 +00:00
|
|
|
kernel/cgroup/rstat.c | 5 +++--
|
2018-08-27 14:32:32 +00:00
|
|
|
1 file changed, 3 insertions(+), 2 deletions(-)
|
|
|
|
|
2019-04-08 23:49:20 +00:00
|
|
|
diff --git a/kernel/cgroup/rstat.c b/kernel/cgroup/rstat.c
|
2019-04-30 12:45:19 +00:00
|
|
|
index bb95a35e8c2d..3266a9781b4e 100644
|
2018-08-27 14:32:32 +00:00
|
|
|
--- a/kernel/cgroup/rstat.c
|
|
|
|
+++ b/kernel/cgroup/rstat.c
|
2019-04-30 12:45:19 +00:00
|
|
|
@@ -159,8 +159,9 @@ static void cgroup_rstat_flush_locked(struct cgroup *cgrp, bool may_sleep)
|
2018-08-27 14:32:32 +00:00
|
|
|
raw_spinlock_t *cpu_lock = per_cpu_ptr(&cgroup_rstat_cpu_lock,
|
|
|
|
cpu);
|
|
|
|
struct cgroup *pos = NULL;
|
|
|
|
+ unsigned long flags;
|
|
|
|
|
|
|
|
- raw_spin_lock(cpu_lock);
|
|
|
|
+ raw_spin_lock_irqsave(cpu_lock, flags);
|
|
|
|
while ((pos = cgroup_rstat_cpu_pop_updated(pos, cgrp, cpu))) {
|
|
|
|
struct cgroup_subsys_state *css;
|
|
|
|
|
2019-04-30 12:45:19 +00:00
|
|
|
@@ -172,7 +173,7 @@ static void cgroup_rstat_flush_locked(struct cgroup *cgrp, bool may_sleep)
|
2018-08-27 14:32:32 +00:00
|
|
|
css->ss->css_rstat_flush(css, cpu);
|
|
|
|
rcu_read_unlock();
|
|
|
|
}
|
|
|
|
- raw_spin_unlock(cpu_lock);
|
|
|
|
+ raw_spin_unlock_irqrestore(cpu_lock, flags);
|
|
|
|
|
|
|
|
/* if @may_sleep, play nice and yield if necessary */
|
|
|
|
if (may_sleep && (need_resched() ||
|