152 lines
4.3 KiB
Diff
152 lines
4.3 KiB
Diff
From: Thomas Gleixner <tglx@linutronix.de>
|
|
Date: Thu, 12 Oct 2017 16:36:39 +0200
|
|
Subject: rtmutex: export lockdep-less version of rt_mutex's lock,
|
|
trylock and unlock
|
|
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.14/older/patches-4.14.6-rt7.tar.xz
|
|
|
|
Required for lock implementation ontop of rtmutex.
|
|
|
|
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
|
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
|
---
|
|
kernel/locking/rtmutex.c | 70 +++++++++++++++++++++++++---------------
|
|
kernel/locking/rtmutex_common.h | 3 +
|
|
2 files changed, 47 insertions(+), 26 deletions(-)
|
|
|
|
--- a/kernel/locking/rtmutex.c
|
|
+++ b/kernel/locking/rtmutex.c
|
|
@@ -1489,6 +1489,29 @@ rt_mutex_fastunlock(struct rt_mutex *loc
|
|
rt_mutex_postunlock(&wake_q);
|
|
}
|
|
|
|
+int __sched __rt_mutex_lock_state(struct rt_mutex *lock, int state)
|
|
+{
|
|
+ might_sleep();
|
|
+ return rt_mutex_fastlock(lock, state, NULL, rt_mutex_slowlock);
|
|
+}
|
|
+
|
|
+/**
|
|
+ * rt_mutex_lock_state - lock a rt_mutex with a given state
|
|
+ *
|
|
+ * @lock: The rt_mutex to be locked
|
|
+ * @state: The state to set when blocking on the rt_mutex
|
|
+ */
|
|
+static int __sched rt_mutex_lock_state(struct rt_mutex *lock, int state)
|
|
+{
|
|
+ int ret;
|
|
+
|
|
+ mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_);
|
|
+ ret = __rt_mutex_lock_state(lock, state);
|
|
+ if (ret)
|
|
+ mutex_release(&lock->dep_map, 1, _RET_IP_);
|
|
+ return ret;
|
|
+}
|
|
+
|
|
/**
|
|
* rt_mutex_lock - lock a rt_mutex
|
|
*
|
|
@@ -1496,10 +1519,7 @@ rt_mutex_fastunlock(struct rt_mutex *loc
|
|
*/
|
|
void __sched rt_mutex_lock(struct rt_mutex *lock)
|
|
{
|
|
- might_sleep();
|
|
-
|
|
- mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_);
|
|
- rt_mutex_fastlock(lock, TASK_UNINTERRUPTIBLE, rt_mutex_slowlock);
|
|
+ rt_mutex_lock_state(lock, TASK_UNINTERRUPTIBLE);
|
|
}
|
|
EXPORT_SYMBOL_GPL(rt_mutex_lock);
|
|
|
|
@@ -1514,16 +1534,7 @@ EXPORT_SYMBOL_GPL(rt_mutex_lock);
|
|
*/
|
|
int __sched rt_mutex_lock_interruptible(struct rt_mutex *lock)
|
|
{
|
|
- int ret;
|
|
-
|
|
- might_sleep();
|
|
-
|
|
- mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_);
|
|
- ret = rt_mutex_fastlock(lock, TASK_INTERRUPTIBLE, rt_mutex_slowlock);
|
|
- if (ret)
|
|
- mutex_release(&lock->dep_map, 1, _RET_IP_);
|
|
-
|
|
- return ret;
|
|
+ return rt_mutex_lock_state(lock, TASK_INTERRUPTIBLE);
|
|
}
|
|
EXPORT_SYMBOL_GPL(rt_mutex_lock_interruptible);
|
|
|
|
@@ -1544,13 +1555,10 @@ int __sched rt_mutex_futex_trylock(struc
|
|
* Returns:
|
|
* 0 on success
|
|
* -EINTR when interrupted by a signal
|
|
- * -EDEADLK when the lock would deadlock (when deadlock detection is on)
|
|
*/
|
|
int __sched rt_mutex_lock_killable(struct rt_mutex *lock)
|
|
{
|
|
- might_sleep();
|
|
-
|
|
- return rt_mutex_fastlock(lock, TASK_KILLABLE, rt_mutex_slowlock);
|
|
+ return rt_mutex_lock_state(lock, TASK_KILLABLE);
|
|
}
|
|
EXPORT_SYMBOL_GPL(rt_mutex_lock_killable);
|
|
|
|
@@ -1585,6 +1593,18 @@ rt_mutex_timed_lock(struct rt_mutex *loc
|
|
}
|
|
EXPORT_SYMBOL_GPL(rt_mutex_timed_lock);
|
|
|
|
+int __sched __rt_mutex_trylock(struct rt_mutex *lock)
|
|
+{
|
|
+#ifdef CONFIG_PREEMPT_RT_FULL
|
|
+ if (WARN_ON_ONCE(in_irq() || in_nmi()))
|
|
+#else
|
|
+ if (WARN_ON_ONCE(in_irq() || in_nmi() || in_serving_softirq()))
|
|
+#endif
|
|
+ return 0;
|
|
+
|
|
+ return rt_mutex_fasttrylock(lock, rt_mutex_slowtrylock);
|
|
+}
|
|
+
|
|
/**
|
|
* rt_mutex_trylock - try to lock a rt_mutex
|
|
*
|
|
@@ -1600,14 +1620,7 @@ int __sched rt_mutex_trylock(struct rt_m
|
|
{
|
|
int ret;
|
|
|
|
-#ifdef CONFIG_PREEMPT_RT_FULL
|
|
- if (WARN_ON_ONCE(in_irq() || in_nmi()))
|
|
-#else
|
|
- if (WARN_ON_ONCE(in_irq() || in_nmi() || in_serving_softirq()))
|
|
-#endif
|
|
- return 0;
|
|
-
|
|
- ret = rt_mutex_fasttrylock(lock, rt_mutex_slowtrylock);
|
|
+ ret = __rt_mutex_trylock(lock);
|
|
if (ret)
|
|
mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
|
|
|
|
@@ -1615,6 +1628,11 @@ int __sched rt_mutex_trylock(struct rt_m
|
|
}
|
|
EXPORT_SYMBOL_GPL(rt_mutex_trylock);
|
|
|
|
+void __sched __rt_mutex_unlock(struct rt_mutex *lock)
|
|
+{
|
|
+ rt_mutex_fastunlock(lock, rt_mutex_slowunlock);
|
|
+}
|
|
+
|
|
/**
|
|
* rt_mutex_unlock - unlock a rt_mutex
|
|
*
|
|
--- a/kernel/locking/rtmutex_common.h
|
|
+++ b/kernel/locking/rtmutex_common.h
|
|
@@ -159,6 +159,9 @@ extern bool __rt_mutex_futex_unlock(stru
|
|
extern void rt_mutex_postunlock(struct wake_q_head *wake_q);
|
|
/* RW semaphore special interface */
|
|
|
|
+extern int __rt_mutex_lock_state(struct rt_mutex *lock, int state);
|
|
+extern int __rt_mutex_trylock(struct rt_mutex *lock);
|
|
+extern void __rt_mutex_unlock(struct rt_mutex *lock);
|
|
int __sched rt_mutex_slowlock_locked(struct rt_mutex *lock, int state,
|
|
struct hrtimer_sleeper *timeout,
|
|
enum rtmutex_chainwalk chwalk,
|