From 31aee2dc5cb0dde7da5a2cad0631c2ff64889b81 Mon Sep 17 00:00:00 2001 Message-Id: <31aee2dc5cb0dde7da5a2cad0631c2ff64889b81.1601675153.git.zanussi@kernel.org> In-Reply-To: <5b5a156f9808b1acf1205606e03da117214549ea.1601675151.git.zanussi@kernel.org> References: <5b5a156f9808b1acf1205606e03da117214549ea.1601675151.git.zanussi@kernel.org> From: Peter Zijlstra Date: Mon, 30 Sep 2019 18:15:44 +0200 Subject: [PATCH 304/333] locking/rtmutex: Clean ->pi_blocked_on in the error case Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.148-rt64.tar.xz [ Upstream commit 0be4ea6e3ce693101be0fbd55a0cc7ce238ab2eb ] The function rt_mutex_wait_proxy_lock() cleans ->pi_blocked_on in case of failure (timeout, signal). The same cleanup is required in __rt_mutex_start_proxy_lock(). In both the cases the tasks was interrupted by a signal or timeout while acquiring the lock and after the interruption it longer blocks on the lock. Fixes: 1a1fb985f2e2b ("futex: Handle early deadlock return correctly") Signed-off-by: Peter Zijlstra (Intel) Signed-off-by: Sebastian Andrzej Siewior Signed-off-by: Steven Rostedt (VMware) --- kernel/locking/rtmutex.c | 43 +++++++++++++++++++++++----------------- 1 file changed, 25 insertions(+), 18 deletions(-) diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c index 2a9bf2443acc..63b3d6f306fa 100644 --- a/kernel/locking/rtmutex.c +++ b/kernel/locking/rtmutex.c @@ -2320,6 +2320,26 @@ void rt_mutex_proxy_unlock(struct rt_mutex *lock, rt_mutex_set_owner(lock, NULL); } +static void fixup_rt_mutex_blocked(struct rt_mutex *lock) +{ + struct task_struct *tsk = current; + /* + * RT has a problem here when the wait got interrupted by a timeout + * or a signal. task->pi_blocked_on is still set. The task must + * acquire the hash bucket lock when returning from this function. + * + * If the hash bucket lock is contended then the + * BUG_ON(rt_mutex_real_waiter(task->pi_blocked_on)) in + * task_blocks_on_rt_mutex() will trigger. This can be avoided by + * clearing task->pi_blocked_on which removes the task from the + * boosting chain of the rtmutex. That's correct because the task + * is not longer blocked on it. + */ + raw_spin_lock(&tsk->pi_lock); + tsk->pi_blocked_on = NULL; + raw_spin_unlock(&tsk->pi_lock); +} + /** * __rt_mutex_start_proxy_lock() - Start lock acquisition for another task * @lock: the rt_mutex to take @@ -2392,6 +2412,9 @@ int __rt_mutex_start_proxy_lock(struct rt_mutex *lock, ret = 0; } + if (ret) + fixup_rt_mutex_blocked(lock); + debug_rt_mutex_print_deadlock(waiter); return ret; @@ -2472,7 +2495,6 @@ int rt_mutex_wait_proxy_lock(struct rt_mutex *lock, struct hrtimer_sleeper *to, struct rt_mutex_waiter *waiter) { - struct task_struct *tsk = current; int ret; raw_spin_lock_irq(&lock->wait_lock); @@ -2484,23 +2506,8 @@ int rt_mutex_wait_proxy_lock(struct rt_mutex *lock, * have to fix that up. */ fixup_rt_mutex_waiters(lock); - /* - * RT has a problem here when the wait got interrupted by a timeout - * or a signal. task->pi_blocked_on is still set. The task must - * acquire the hash bucket lock when returning from this function. - * - * If the hash bucket lock is contended then the - * BUG_ON(rt_mutex_real_waiter(task->pi_blocked_on)) in - * task_blocks_on_rt_mutex() will trigger. This can be avoided by - * clearing task->pi_blocked_on which removes the task from the - * boosting chain of the rtmutex. That's correct because the task - * is not longer blocked on it. - */ - if (ret) { - raw_spin_lock(&tsk->pi_lock); - tsk->pi_blocked_on = NULL; - raw_spin_unlock(&tsk->pi_lock); - } + if (ret) + fixup_rt_mutex_blocked(lock); raw_spin_unlock_irq(&lock->wait_lock); -- 2.17.1