2020-04-21 20:13:52 +00:00
|
|
|
From 7a56801d0e3c06595886dde9df739f3c9a8b1b03 Mon Sep 17 00:00:00 2001
|
2020-02-21 18:07:43 +00:00
|
|
|
From: Peter Zijlstra <peterz@infradead.org>
|
|
|
|
Date: Mon, 30 Sep 2019 18:15:44 +0200
|
2020-04-21 20:13:52 +00:00
|
|
|
Subject: [PATCH 305/325] locking/rtmutex: Clean ->pi_blocked_on in the error
|
2020-02-21 18:07:43 +00:00
|
|
|
case
|
2020-04-21 20:13:52 +00:00
|
|
|
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.115-rt48.tar.xz
|
2020-02-21 18:07:43 +00:00
|
|
|
|
|
|
|
[ Upstream commit 0be4ea6e3ce693101be0fbd55a0cc7ce238ab2eb ]
|
|
|
|
|
|
|
|
The function rt_mutex_wait_proxy_lock() cleans ->pi_blocked_on in case
|
|
|
|
of failure (timeout, signal). The same cleanup is required in
|
|
|
|
__rt_mutex_start_proxy_lock().
|
|
|
|
In both the cases the tasks was interrupted by a signal or timeout while
|
|
|
|
acquiring the lock and after the interruption it longer blocks on the
|
|
|
|
lock.
|
|
|
|
|
|
|
|
Fixes: 1a1fb985f2e2b ("futex: Handle early deadlock return correctly")
|
|
|
|
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
|
|
|
|
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
|
|
|
Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
|
|
|
|
---
|
|
|
|
kernel/locking/rtmutex.c | 43 +++++++++++++++++++++++-----------------
|
|
|
|
1 file changed, 25 insertions(+), 18 deletions(-)
|
|
|
|
|
|
|
|
diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
|
|
|
|
index 2a9bf2443acc..63b3d6f306fa 100644
|
|
|
|
--- a/kernel/locking/rtmutex.c
|
|
|
|
+++ b/kernel/locking/rtmutex.c
|
|
|
|
@@ -2320,6 +2320,26 @@ void rt_mutex_proxy_unlock(struct rt_mutex *lock,
|
|
|
|
rt_mutex_set_owner(lock, NULL);
|
|
|
|
}
|
|
|
|
|
|
|
|
+static void fixup_rt_mutex_blocked(struct rt_mutex *lock)
|
|
|
|
+{
|
|
|
|
+ struct task_struct *tsk = current;
|
|
|
|
+ /*
|
|
|
|
+ * RT has a problem here when the wait got interrupted by a timeout
|
|
|
|
+ * or a signal. task->pi_blocked_on is still set. The task must
|
|
|
|
+ * acquire the hash bucket lock when returning from this function.
|
|
|
|
+ *
|
|
|
|
+ * If the hash bucket lock is contended then the
|
|
|
|
+ * BUG_ON(rt_mutex_real_waiter(task->pi_blocked_on)) in
|
|
|
|
+ * task_blocks_on_rt_mutex() will trigger. This can be avoided by
|
|
|
|
+ * clearing task->pi_blocked_on which removes the task from the
|
|
|
|
+ * boosting chain of the rtmutex. That's correct because the task
|
|
|
|
+ * is not longer blocked on it.
|
|
|
|
+ */
|
|
|
|
+ raw_spin_lock(&tsk->pi_lock);
|
|
|
|
+ tsk->pi_blocked_on = NULL;
|
|
|
|
+ raw_spin_unlock(&tsk->pi_lock);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
/**
|
|
|
|
* __rt_mutex_start_proxy_lock() - Start lock acquisition for another task
|
|
|
|
* @lock: the rt_mutex to take
|
|
|
|
@@ -2392,6 +2412,9 @@ int __rt_mutex_start_proxy_lock(struct rt_mutex *lock,
|
|
|
|
ret = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
+ if (ret)
|
|
|
|
+ fixup_rt_mutex_blocked(lock);
|
|
|
|
+
|
|
|
|
debug_rt_mutex_print_deadlock(waiter);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
@@ -2472,7 +2495,6 @@ int rt_mutex_wait_proxy_lock(struct rt_mutex *lock,
|
|
|
|
struct hrtimer_sleeper *to,
|
|
|
|
struct rt_mutex_waiter *waiter)
|
|
|
|
{
|
|
|
|
- struct task_struct *tsk = current;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
raw_spin_lock_irq(&lock->wait_lock);
|
|
|
|
@@ -2484,23 +2506,8 @@ int rt_mutex_wait_proxy_lock(struct rt_mutex *lock,
|
|
|
|
* have to fix that up.
|
|
|
|
*/
|
|
|
|
fixup_rt_mutex_waiters(lock);
|
|
|
|
- /*
|
|
|
|
- * RT has a problem here when the wait got interrupted by a timeout
|
|
|
|
- * or a signal. task->pi_blocked_on is still set. The task must
|
|
|
|
- * acquire the hash bucket lock when returning from this function.
|
|
|
|
- *
|
|
|
|
- * If the hash bucket lock is contended then the
|
|
|
|
- * BUG_ON(rt_mutex_real_waiter(task->pi_blocked_on)) in
|
|
|
|
- * task_blocks_on_rt_mutex() will trigger. This can be avoided by
|
|
|
|
- * clearing task->pi_blocked_on which removes the task from the
|
|
|
|
- * boosting chain of the rtmutex. That's correct because the task
|
|
|
|
- * is not longer blocked on it.
|
|
|
|
- */
|
|
|
|
- if (ret) {
|
|
|
|
- raw_spin_lock(&tsk->pi_lock);
|
|
|
|
- tsk->pi_blocked_on = NULL;
|
|
|
|
- raw_spin_unlock(&tsk->pi_lock);
|
|
|
|
- }
|
|
|
|
+ if (ret)
|
|
|
|
+ fixup_rt_mutex_blocked(lock);
|
|
|
|
|
|
|
|
raw_spin_unlock_irq(&lock->wait_lock);
|
|
|
|
|
|
|
|
--
|
2020-04-09 19:44:24 +00:00
|
|
|
2.25.1
|
2020-02-21 18:07:43 +00:00
|
|
|
|