From a31a6204232e00ac5cee010869ee505682f5842f Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Fri, 18 Mar 2011 10:11:25 +0100 Subject: [PATCH 095/303] fs: jbd/jbd2: Make state lock and journal head lock rt safe bit_spin_locks break under RT. Based on a previous patch from Steven Rostedt Signed-off-by: Thomas Gleixner -- include/linux/buffer_head.h | 10 ++++++++++ include/linux/jbd_common.h | 24 ++++++++++++++++++++++++ 2 files changed, 34 insertions(+) --- include/linux/buffer_head.h | 10 ++++++++++ include/linux/jbd_common.h | 24 ++++++++++++++++++++++++ 2 files changed, 34 insertions(+) diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h index 5c16cf1..3f8e27b 100644 --- a/include/linux/buffer_head.h +++ b/include/linux/buffer_head.h @@ -74,6 +74,11 @@ struct buffer_head { atomic_t b_count; /* users using this buffer_head */ #ifdef CONFIG_PREEMPT_RT_BASE spinlock_t b_uptodate_lock; +#if defined(CONFIG_JBD) || defined(CONFIG_JBD_MODULE) || \ + defined(CONFIG_JBD2) || defined(CONFIG_JBD2_MODULE) + spinlock_t b_state_lock; + spinlock_t b_journal_head_lock; +#endif #endif }; @@ -105,6 +110,11 @@ static inline void buffer_head_init_locks(struct buffer_head *bh) { #ifdef CONFIG_PREEMPT_RT_BASE spin_lock_init(&bh->b_uptodate_lock); +#if defined(CONFIG_JBD) || defined(CONFIG_JBD_MODULE) || \ + defined(CONFIG_JBD2) || defined(CONFIG_JBD2_MODULE) + spin_lock_init(&bh->b_state_lock); + spin_lock_init(&bh->b_journal_head_lock); +#endif #endif } diff --git a/include/linux/jbd_common.h b/include/linux/jbd_common.h index 6230f85..11c313e 100644 --- a/include/linux/jbd_common.h +++ b/include/linux/jbd_common.h @@ -37,32 +37,56 @@ static inline struct journal_head *bh2jh(struct buffer_head *bh) static inline void jbd_lock_bh_state(struct buffer_head *bh) { +#ifndef CONFIG_PREEMPT_RT_BASE bit_spin_lock(BH_State, &bh->b_state); +#else + spin_lock(&bh->b_state_lock); +#endif } static inline int jbd_trylock_bh_state(struct buffer_head *bh) { +#ifndef CONFIG_PREEMPT_RT_BASE return bit_spin_trylock(BH_State, &bh->b_state); +#else + return spin_trylock(&bh->b_state_lock); +#endif } static inline int jbd_is_locked_bh_state(struct buffer_head *bh) { +#ifndef CONFIG_PREEMPT_RT_BASE return bit_spin_is_locked(BH_State, &bh->b_state); +#else + return spin_is_locked(&bh->b_state_lock); +#endif } static inline void jbd_unlock_bh_state(struct buffer_head *bh) { +#ifndef CONFIG_PREEMPT_RT_BASE bit_spin_unlock(BH_State, &bh->b_state); +#else + spin_unlock(&bh->b_state_lock); +#endif } static inline void jbd_lock_bh_journal_head(struct buffer_head *bh) { +#ifndef CONFIG_PREEMPT_RT_BASE bit_spin_lock(BH_JournalHead, &bh->b_state); +#else + spin_lock(&bh->b_journal_head_lock); +#endif } static inline void jbd_unlock_bh_journal_head(struct buffer_head *bh) { +#ifndef CONFIG_PREEMPT_RT_BASE bit_spin_unlock(BH_JournalHead, &bh->b_state); +#else + spin_unlock(&bh->b_journal_head_lock); +#endif } #endif