198 lines
6.2 KiB
Diff
198 lines
6.2 KiB
Diff
From 9c2e7dffa272d9112c453d37bf48c92bb1b4d287 Mon Sep 17 00:00:00 2001
|
|
From: Thomas Gleixner <tglx@linutronix.de>
|
|
Date: Fri, 18 Mar 2011 09:18:52 +0100
|
|
Subject: [PATCH 066/325] buffer_head: Replace bh_uptodate_lock for -rt
|
|
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.115-rt48.tar.xz
|
|
|
|
Wrap the bit_spin_lock calls into a separate inline and add the RT
|
|
replacements with a real spinlock.
|
|
|
|
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
|
---
|
|
fs/buffer.c | 21 +++++++--------------
|
|
fs/ext4/page-io.c | 6 ++----
|
|
fs/ntfs/aops.c | 10 +++-------
|
|
include/linux/buffer_head.h | 34 ++++++++++++++++++++++++++++++++++
|
|
4 files changed, 46 insertions(+), 25 deletions(-)
|
|
|
|
diff --git a/fs/buffer.c b/fs/buffer.c
|
|
index a550e0d8e965..a5b3a456dbff 100644
|
|
--- a/fs/buffer.c
|
|
+++ b/fs/buffer.c
|
|
@@ -274,8 +274,7 @@ static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
|
|
* decide that the page is now completely done.
|
|
*/
|
|
first = page_buffers(page);
|
|
- local_irq_save(flags);
|
|
- bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
|
|
+ flags = bh_uptodate_lock_irqsave(first);
|
|
clear_buffer_async_read(bh);
|
|
unlock_buffer(bh);
|
|
tmp = bh;
|
|
@@ -288,8 +287,7 @@ static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
|
|
}
|
|
tmp = tmp->b_this_page;
|
|
} while (tmp != bh);
|
|
- bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
|
|
- local_irq_restore(flags);
|
|
+ bh_uptodate_unlock_irqrestore(first, flags);
|
|
|
|
/*
|
|
* If none of the buffers had errors and they are all
|
|
@@ -301,9 +299,7 @@ static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
|
|
return;
|
|
|
|
still_busy:
|
|
- bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
|
|
- local_irq_restore(flags);
|
|
- return;
|
|
+ bh_uptodate_unlock_irqrestore(first, flags);
|
|
}
|
|
|
|
/*
|
|
@@ -330,8 +326,7 @@ void end_buffer_async_write(struct buffer_head *bh, int uptodate)
|
|
}
|
|
|
|
first = page_buffers(page);
|
|
- local_irq_save(flags);
|
|
- bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
|
|
+ flags = bh_uptodate_lock_irqsave(first);
|
|
|
|
clear_buffer_async_write(bh);
|
|
unlock_buffer(bh);
|
|
@@ -343,15 +338,12 @@ void end_buffer_async_write(struct buffer_head *bh, int uptodate)
|
|
}
|
|
tmp = tmp->b_this_page;
|
|
}
|
|
- bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
|
|
- local_irq_restore(flags);
|
|
+ bh_uptodate_unlock_irqrestore(first, flags);
|
|
end_page_writeback(page);
|
|
return;
|
|
|
|
still_busy:
|
|
- bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
|
|
- local_irq_restore(flags);
|
|
- return;
|
|
+ bh_uptodate_unlock_irqrestore(first, flags);
|
|
}
|
|
EXPORT_SYMBOL(end_buffer_async_write);
|
|
|
|
@@ -3368,6 +3360,7 @@ struct buffer_head *alloc_buffer_head(gfp_t gfp_flags)
|
|
struct buffer_head *ret = kmem_cache_zalloc(bh_cachep, gfp_flags);
|
|
if (ret) {
|
|
INIT_LIST_HEAD(&ret->b_assoc_buffers);
|
|
+ buffer_head_init_locks(ret);
|
|
preempt_disable();
|
|
__this_cpu_inc(bh_accounting.nr);
|
|
recalc_bh_state();
|
|
diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c
|
|
index 9cc79b7b0df1..3f4ba2011499 100644
|
|
--- a/fs/ext4/page-io.c
|
|
+++ b/fs/ext4/page-io.c
|
|
@@ -95,8 +95,7 @@ static void ext4_finish_bio(struct bio *bio)
|
|
* We check all buffers in the page under BH_Uptodate_Lock
|
|
* to avoid races with other end io clearing async_write flags
|
|
*/
|
|
- local_irq_save(flags);
|
|
- bit_spin_lock(BH_Uptodate_Lock, &head->b_state);
|
|
+ flags = bh_uptodate_lock_irqsave(head);
|
|
do {
|
|
if (bh_offset(bh) < bio_start ||
|
|
bh_offset(bh) + bh->b_size > bio_end) {
|
|
@@ -108,8 +107,7 @@ static void ext4_finish_bio(struct bio *bio)
|
|
if (bio->bi_status)
|
|
buffer_io_error(bh);
|
|
} while ((bh = bh->b_this_page) != head);
|
|
- bit_spin_unlock(BH_Uptodate_Lock, &head->b_state);
|
|
- local_irq_restore(flags);
|
|
+ bh_uptodate_unlock_irqrestore(head, flags);
|
|
if (!under_io) {
|
|
#ifdef CONFIG_EXT4_FS_ENCRYPTION
|
|
if (data_page)
|
|
diff --git a/fs/ntfs/aops.c b/fs/ntfs/aops.c
|
|
index 8946130c87ad..71d0b3ba70f8 100644
|
|
--- a/fs/ntfs/aops.c
|
|
+++ b/fs/ntfs/aops.c
|
|
@@ -106,8 +106,7 @@ static void ntfs_end_buffer_async_read(struct buffer_head *bh, int uptodate)
|
|
"0x%llx.", (unsigned long long)bh->b_blocknr);
|
|
}
|
|
first = page_buffers(page);
|
|
- local_irq_save(flags);
|
|
- bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
|
|
+ flags = bh_uptodate_lock_irqsave(first);
|
|
clear_buffer_async_read(bh);
|
|
unlock_buffer(bh);
|
|
tmp = bh;
|
|
@@ -122,8 +121,7 @@ static void ntfs_end_buffer_async_read(struct buffer_head *bh, int uptodate)
|
|
}
|
|
tmp = tmp->b_this_page;
|
|
} while (tmp != bh);
|
|
- bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
|
|
- local_irq_restore(flags);
|
|
+ bh_uptodate_unlock_irqrestore(first, flags);
|
|
/*
|
|
* If none of the buffers had errors then we can set the page uptodate,
|
|
* but we first have to perform the post read mst fixups, if the
|
|
@@ -156,9 +154,7 @@ static void ntfs_end_buffer_async_read(struct buffer_head *bh, int uptodate)
|
|
unlock_page(page);
|
|
return;
|
|
still_busy:
|
|
- bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
|
|
- local_irq_restore(flags);
|
|
- return;
|
|
+ bh_uptodate_unlock_irqrestore(first, flags);
|
|
}
|
|
|
|
/**
|
|
diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h
|
|
index 96225a77c112..8a1bcfb145d7 100644
|
|
--- a/include/linux/buffer_head.h
|
|
+++ b/include/linux/buffer_head.h
|
|
@@ -76,8 +76,42 @@ struct buffer_head {
|
|
struct address_space *b_assoc_map; /* mapping this buffer is
|
|
associated with */
|
|
atomic_t b_count; /* users using this buffer_head */
|
|
+#ifdef CONFIG_PREEMPT_RT_BASE
|
|
+ spinlock_t b_uptodate_lock;
|
|
+#endif
|
|
};
|
|
|
|
+static inline unsigned long bh_uptodate_lock_irqsave(struct buffer_head *bh)
|
|
+{
|
|
+ unsigned long flags;
|
|
+
|
|
+#ifndef CONFIG_PREEMPT_RT_BASE
|
|
+ local_irq_save(flags);
|
|
+ bit_spin_lock(BH_Uptodate_Lock, &bh->b_state);
|
|
+#else
|
|
+ spin_lock_irqsave(&bh->b_uptodate_lock, flags);
|
|
+#endif
|
|
+ return flags;
|
|
+}
|
|
+
|
|
+static inline void
|
|
+bh_uptodate_unlock_irqrestore(struct buffer_head *bh, unsigned long flags)
|
|
+{
|
|
+#ifndef CONFIG_PREEMPT_RT_BASE
|
|
+ bit_spin_unlock(BH_Uptodate_Lock, &bh->b_state);
|
|
+ local_irq_restore(flags);
|
|
+#else
|
|
+ spin_unlock_irqrestore(&bh->b_uptodate_lock, flags);
|
|
+#endif
|
|
+}
|
|
+
|
|
+static inline void buffer_head_init_locks(struct buffer_head *bh)
|
|
+{
|
|
+#ifdef CONFIG_PREEMPT_RT_BASE
|
|
+ spin_lock_init(&bh->b_uptodate_lock);
|
|
+#endif
|
|
+}
|
|
+
|
|
/*
|
|
* macro tricks to expand the set_buffer_foo(), clear_buffer_foo()
|
|
* and buffer_foo() functions.
|
|
--
|
|
2.25.1
|
|
|