linux/debian/patches/features/all/rt/Revert-rwsem-rt-Do-not-allo...

98 lines
2.9 KiB
Diff

From 1db4ce56001465fba240916d0c8de7443cafec62 Mon Sep 17 00:00:00 2001
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Wed, 25 Feb 2015 12:16:43 +0100
Subject: [PATCH] Revert "rwsem-rt: Do not allow readers to nest"
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.0/patches-4.0.5-rt3.tar.xz
This behaviour is required by cpufreq and its logic is "okay": It does a
read_lock followed by a try_read_lock.
Lockdep warns if one try a read_lock twice in -RT and vanilla so it
should be good. We still only allow multiple readers as long as it is in
the same process.
Cc: stable-rt@vger.kernel.org
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
include/linux/rwsem_rt.h | 1 +
kernel/locking/rt.c | 29 +++++++++++++++++++++++------
2 files changed, 24 insertions(+), 6 deletions(-)
--- a/include/linux/rwsem_rt.h
+++ b/include/linux/rwsem_rt.h
@@ -20,6 +20,7 @@
struct rw_semaphore {
struct rt_mutex lock;
+ int read_depth;
#ifdef CONFIG_DEBUG_LOCK_ALLOC
struct lockdep_map dep_map;
#endif
--- a/kernel/locking/rt.c
+++ b/kernel/locking/rt.c
@@ -316,7 +316,8 @@ EXPORT_SYMBOL(rt_up_write);
void rt_up_read(struct rw_semaphore *rwsem)
{
rwsem_release(&rwsem->dep_map, 1, _RET_IP_);
- rt_mutex_unlock(&rwsem->lock);
+ if (--rwsem->read_depth == 0)
+ rt_mutex_unlock(&rwsem->lock);
}
EXPORT_SYMBOL(rt_up_read);
@@ -327,6 +328,7 @@ EXPORT_SYMBOL(rt_up_read);
void rt_downgrade_write(struct rw_semaphore *rwsem)
{
BUG_ON(rt_mutex_owner(&rwsem->lock) != current);
+ rwsem->read_depth = 1;
}
EXPORT_SYMBOL(rt_downgrade_write);
@@ -356,12 +358,23 @@ EXPORT_SYMBOL(rt_down_write_nested);
int rt_down_read_trylock(struct rw_semaphore *rwsem)
{
- int ret;
+ struct rt_mutex *lock = &rwsem->lock;
+ int ret = 1;
- ret = rt_mutex_trylock(&rwsem->lock);
- if (ret)
- rwsem_acquire(&rwsem->dep_map, 0, 1, _RET_IP_);
+ /*
+ * recursive read locks succeed when current owns the rwsem,
+ * but not when read_depth == 0 which means that the rwsem is
+ * write locked.
+ */
+ if (rt_mutex_owner(lock) != current)
+ ret = rt_mutex_trylock(&rwsem->lock);
+ else if (!rwsem->read_depth)
+ ret = 0;
+ if (ret) {
+ rwsem->read_depth++;
+ rwsem_acquire(&rwsem->dep_map, 0, 1, _RET_IP_);
+ }
return ret;
}
EXPORT_SYMBOL(rt_down_read_trylock);
@@ -371,7 +384,10 @@ static void __rt_down_read(struct rw_sem
struct rt_mutex *lock = &rwsem->lock;
rwsem_acquire_read(&rwsem->dep_map, subclass, 0, _RET_IP_);
- rt_mutex_lock(&rwsem->lock);
+
+ if (rt_mutex_owner(lock) != current)
+ rt_mutex_lock(&rwsem->lock);
+ rwsem->read_depth++;
}
void rt_down_read(struct rw_semaphore *rwsem)
@@ -396,6 +412,7 @@ void __rt_rwsem_init(struct rw_semaphor
debug_check_no_locks_freed((void *)rwsem, sizeof(*rwsem));
lockdep_init_map(&rwsem->dep_map, name, key, 0);
#endif
+ rwsem->read_depth = 0;
rwsem->lock.save_state = 0;
}
EXPORT_SYMBOL(__rt_rwsem_init);