From 5285cd46bea76913bdc462813c1be23a338d0550 Mon Sep 17 00:00:00 2001 Message-Id: <5285cd46bea76913bdc462813c1be23a338d0550.1601675151.git.zanussi@kernel.org> In-Reply-To: <5b5a156f9808b1acf1205606e03da117214549ea.1601675151.git.zanussi@kernel.org> References: <5b5a156f9808b1acf1205606e03da117214549ea.1601675151.git.zanussi@kernel.org> From: Paul Gortmaker Date: Fri, 21 Jun 2013 15:07:25 -0400 Subject: [PATCH 068/333] list_bl: Make list head locking RT safe Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.148-rt64.tar.xz As per changes in include/linux/jbd_common.h for avoiding the bit_spin_locks on RT ("fs: jbd/jbd2: Make state lock and journal head lock rt safe") we do the same thing here. We use the non atomic __set_bit and __clear_bit inside the scope of the lock to preserve the ability of the existing LIST_DEBUG code to use the zero'th bit in the sanity checks. As a bit spinlock, we had no lockdep visibility into the usage of the list head locking. Now, if we were to implement it as a standard non-raw spinlock, we would see: BUG: sleeping function called from invalid context at kernel/rtmutex.c:658 in_atomic(): 1, irqs_disabled(): 0, pid: 122, name: udevd 5 locks held by udevd/122: #0: (&sb->s_type->i_mutex_key#7/1){+.+.+.}, at: [] lock_rename+0xe8/0xf0 #1: (rename_lock){+.+...}, at: [] d_move+0x2c/0x60 #2: (&dentry->d_lock){+.+...}, at: [] dentry_lock_for_move+0xf3/0x130 #3: (&dentry->d_lock/2){+.+...}, at: [] dentry_lock_for_move+0xc4/0x130 #4: (&dentry->d_lock/3){+.+...}, at: [] dentry_lock_for_move+0xd7/0x130 Pid: 122, comm: udevd Not tainted 3.4.47-rt62 #7 Call Trace: [] __might_sleep+0x134/0x1f0 [] rt_spin_lock+0x24/0x60 [] __d_shrink+0x5c/0xa0 [] __d_drop+0x1d/0x40 [] __d_move+0x8e/0x320 [] d_move+0x3e/0x60 [] vfs_rename+0x198/0x4c0 [] sys_renameat+0x213/0x240 [] ? _raw_spin_unlock+0x35/0x60 [] ? do_page_fault+0x1ec/0x4b0 [] ? retint_swapgs+0xe/0x13 [] ? trace_hardirqs_on_thunk+0x3a/0x3f [] sys_rename+0x1b/0x20 [] system_call_fastpath+0x1a/0x1f Since we are only taking the lock during short lived list operations, lets assume for now that it being raw won't be a significant latency concern. Signed-off-by: Paul Gortmaker Signed-off-by: Sebastian Andrzej Siewior --- include/linux/list_bl.h | 28 ++++++++++++++++++++++++++-- 1 file changed, 26 insertions(+), 2 deletions(-) diff --git a/include/linux/list_bl.h b/include/linux/list_bl.h index 3fc2cc57ba1b..69b659259bac 100644 --- a/include/linux/list_bl.h +++ b/include/linux/list_bl.h @@ -3,6 +3,7 @@ #define _LINUX_LIST_BL_H #include +#include #include /* @@ -33,13 +34,22 @@ struct hlist_bl_head { struct hlist_bl_node *first; +#ifdef CONFIG_PREEMPT_RT_BASE + raw_spinlock_t lock; +#endif }; struct hlist_bl_node { struct hlist_bl_node *next, **pprev; }; -#define INIT_HLIST_BL_HEAD(ptr) \ - ((ptr)->first = NULL) + +static inline void INIT_HLIST_BL_HEAD(struct hlist_bl_head *h) +{ + h->first = NULL; +#ifdef CONFIG_PREEMPT_RT_BASE + raw_spin_lock_init(&h->lock); +#endif +} static inline void INIT_HLIST_BL_NODE(struct hlist_bl_node *h) { @@ -119,12 +129,26 @@ static inline void hlist_bl_del_init(struct hlist_bl_node *n) static inline void hlist_bl_lock(struct hlist_bl_head *b) { +#ifndef CONFIG_PREEMPT_RT_BASE bit_spin_lock(0, (unsigned long *)b); +#else + raw_spin_lock(&b->lock); +#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) + __set_bit(0, (unsigned long *)b); +#endif +#endif } static inline void hlist_bl_unlock(struct hlist_bl_head *b) { +#ifndef CONFIG_PREEMPT_RT_BASE __bit_spin_unlock(0, (unsigned long *)b); +#else +#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) + __clear_bit(0, (unsigned long *)b); +#endif + raw_spin_unlock(&b->lock); +#endif } static inline bool hlist_bl_is_locked(struct hlist_bl_head *b) -- 2.17.1