370 lines
9.5 KiB
Diff
370 lines
9.5 KiB
Diff
From 96bdc6cdad26d03d6c43df560b27a6ee7cd72f8e Mon Sep 17 00:00:00 2001
|
|
From: David Hildenbrand <dahi@linux.vnet.ibm.com>
|
|
Date: Mon, 11 May 2015 17:52:09 +0200
|
|
Subject: [PATCH 04/14] mm: explicitly disable/enable preemption in
|
|
kmap_atomic_*
|
|
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.0/patches-4.0.4-rt1.tar.xz
|
|
|
|
The existing code relies on pagefault_disable() implicitly disabling
|
|
preemption, so that no schedule will happen between kmap_atomic() and
|
|
kunmap_atomic().
|
|
|
|
Let's make this explicit, to prepare for pagefault_disable() not
|
|
touching preemption anymore.
|
|
|
|
Signed-off-by: David Hildenbrand <dahi@linux.vnet.ibm.com>
|
|
---
|
|
arch/arm/mm/highmem.c | 3 +++
|
|
arch/frv/mm/highmem.c | 2 ++
|
|
arch/metag/mm/highmem.c | 4 +++-
|
|
arch/microblaze/mm/highmem.c | 4 +++-
|
|
arch/mips/mm/highmem.c | 5 ++++-
|
|
arch/mn10300/include/asm/highmem.h | 3 +++
|
|
arch/parisc/include/asm/cacheflush.h | 2 ++
|
|
arch/powerpc/mm/highmem.c | 4 +++-
|
|
arch/sparc/mm/highmem.c | 4 +++-
|
|
arch/tile/mm/highmem.c | 3 ++-
|
|
arch/x86/mm/highmem_32.c | 3 ++-
|
|
arch/x86/mm/iomap_32.c | 2 ++
|
|
arch/xtensa/mm/highmem.c | 2 ++
|
|
include/linux/highmem.h | 2 ++
|
|
include/linux/io-mapping.h | 2 ++
|
|
15 files changed, 38 insertions(+), 7 deletions(-)
|
|
|
|
--- a/arch/arm/mm/highmem.c
|
|
+++ b/arch/arm/mm/highmem.c
|
|
@@ -59,6 +59,7 @@ void *kmap_atomic(struct page *page)
|
|
void *kmap;
|
|
int type;
|
|
|
|
+ preempt_disable();
|
|
pagefault_disable();
|
|
if (!PageHighMem(page))
|
|
return page_address(page);
|
|
@@ -121,6 +122,7 @@ void __kunmap_atomic(void *kvaddr)
|
|
kunmap_high(pte_page(pkmap_page_table[PKMAP_NR(vaddr)]));
|
|
}
|
|
pagefault_enable();
|
|
+ preempt_enable();
|
|
}
|
|
EXPORT_SYMBOL(__kunmap_atomic);
|
|
|
|
@@ -130,6 +132,7 @@ void *kmap_atomic_pfn(unsigned long pfn)
|
|
int idx, type;
|
|
struct page *page = pfn_to_page(pfn);
|
|
|
|
+ preempt_disable();
|
|
pagefault_disable();
|
|
if (!PageHighMem(page))
|
|
return page_address(page);
|
|
--- a/arch/frv/mm/highmem.c
|
|
+++ b/arch/frv/mm/highmem.c
|
|
@@ -42,6 +42,7 @@ void *kmap_atomic(struct page *page)
|
|
unsigned long paddr;
|
|
int type;
|
|
|
|
+ preempt_disable();
|
|
pagefault_disable();
|
|
type = kmap_atomic_idx_push();
|
|
paddr = page_to_phys(page);
|
|
@@ -85,5 +86,6 @@ void __kunmap_atomic(void *kvaddr)
|
|
}
|
|
kmap_atomic_idx_pop();
|
|
pagefault_enable();
|
|
+ preempt_enable();
|
|
}
|
|
EXPORT_SYMBOL(__kunmap_atomic);
|
|
--- a/arch/metag/mm/highmem.c
|
|
+++ b/arch/metag/mm/highmem.c
|
|
@@ -43,7 +43,7 @@ void *kmap_atomic(struct page *page)
|
|
unsigned long vaddr;
|
|
int type;
|
|
|
|
- /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
|
|
+ preempt_disable();
|
|
pagefault_disable();
|
|
if (!PageHighMem(page))
|
|
return page_address(page);
|
|
@@ -82,6 +82,7 @@ void __kunmap_atomic(void *kvaddr)
|
|
}
|
|
|
|
pagefault_enable();
|
|
+ preempt_enable();
|
|
}
|
|
EXPORT_SYMBOL(__kunmap_atomic);
|
|
|
|
@@ -95,6 +96,7 @@ void *kmap_atomic_pfn(unsigned long pfn)
|
|
unsigned long vaddr;
|
|
int type;
|
|
|
|
+ preempt_disable();
|
|
pagefault_disable();
|
|
|
|
type = kmap_atomic_idx_push();
|
|
--- a/arch/microblaze/mm/highmem.c
|
|
+++ b/arch/microblaze/mm/highmem.c
|
|
@@ -37,7 +37,7 @@ void *kmap_atomic_prot(struct page *page
|
|
unsigned long vaddr;
|
|
int idx, type;
|
|
|
|
- /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
|
|
+ preempt_disable();
|
|
pagefault_disable();
|
|
if (!PageHighMem(page))
|
|
return page_address(page);
|
|
@@ -63,6 +63,7 @@ void __kunmap_atomic(void *kvaddr)
|
|
|
|
if (vaddr < __fix_to_virt(FIX_KMAP_END)) {
|
|
pagefault_enable();
|
|
+ preempt_enable();
|
|
return;
|
|
}
|
|
|
|
@@ -84,5 +85,6 @@ void __kunmap_atomic(void *kvaddr)
|
|
#endif
|
|
kmap_atomic_idx_pop();
|
|
pagefault_enable();
|
|
+ preempt_enable();
|
|
}
|
|
EXPORT_SYMBOL(__kunmap_atomic);
|
|
--- a/arch/mips/mm/highmem.c
|
|
+++ b/arch/mips/mm/highmem.c
|
|
@@ -47,7 +47,7 @@ void *kmap_atomic(struct page *page)
|
|
unsigned long vaddr;
|
|
int idx, type;
|
|
|
|
- /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
|
|
+ preempt_disable();
|
|
pagefault_disable();
|
|
if (!PageHighMem(page))
|
|
return page_address(page);
|
|
@@ -72,6 +72,7 @@ void __kunmap_atomic(void *kvaddr)
|
|
|
|
if (vaddr < FIXADDR_START) { // FIXME
|
|
pagefault_enable();
|
|
+ preempt_enable();
|
|
return;
|
|
}
|
|
|
|
@@ -92,6 +93,7 @@ void __kunmap_atomic(void *kvaddr)
|
|
#endif
|
|
kmap_atomic_idx_pop();
|
|
pagefault_enable();
|
|
+ preempt_enable();
|
|
}
|
|
EXPORT_SYMBOL(__kunmap_atomic);
|
|
|
|
@@ -104,6 +106,7 @@ void *kmap_atomic_pfn(unsigned long pfn)
|
|
unsigned long vaddr;
|
|
int idx, type;
|
|
|
|
+ preempt_disable();
|
|
pagefault_disable();
|
|
|
|
type = kmap_atomic_idx_push();
|
|
--- a/arch/mn10300/include/asm/highmem.h
|
|
+++ b/arch/mn10300/include/asm/highmem.h
|
|
@@ -75,6 +75,7 @@ static inline void *kmap_atomic(struct p
|
|
unsigned long vaddr;
|
|
int idx, type;
|
|
|
|
+ preempt_disable();
|
|
pagefault_disable();
|
|
if (page < highmem_start_page)
|
|
return page_address(page);
|
|
@@ -98,6 +99,7 @@ static inline void __kunmap_atomic(unsig
|
|
|
|
if (vaddr < FIXADDR_START) { /* FIXME */
|
|
pagefault_enable();
|
|
+ preempt_enable();
|
|
return;
|
|
}
|
|
|
|
@@ -122,6 +124,7 @@ static inline void __kunmap_atomic(unsig
|
|
|
|
kmap_atomic_idx_pop();
|
|
pagefault_enable();
|
|
+ preempt_enable();
|
|
}
|
|
#endif /* __KERNEL__ */
|
|
|
|
--- a/arch/parisc/include/asm/cacheflush.h
|
|
+++ b/arch/parisc/include/asm/cacheflush.h
|
|
@@ -142,6 +142,7 @@ static inline void kunmap(struct page *p
|
|
|
|
static inline void *kmap_atomic(struct page *page)
|
|
{
|
|
+ preempt_disable();
|
|
pagefault_disable();
|
|
return page_address(page);
|
|
}
|
|
@@ -150,6 +151,7 @@ static inline void __kunmap_atomic(void
|
|
{
|
|
flush_kernel_dcache_page_addr(addr);
|
|
pagefault_enable();
|
|
+ preempt_enable();
|
|
}
|
|
|
|
#define kmap_atomic_prot(page, prot) kmap_atomic(page)
|
|
--- a/arch/powerpc/mm/highmem.c
|
|
+++ b/arch/powerpc/mm/highmem.c
|
|
@@ -34,7 +34,7 @@ void *kmap_atomic_prot(struct page *page
|
|
unsigned long vaddr;
|
|
int idx, type;
|
|
|
|
- /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
|
|
+ preempt_disable();
|
|
pagefault_disable();
|
|
if (!PageHighMem(page))
|
|
return page_address(page);
|
|
@@ -59,6 +59,7 @@ void __kunmap_atomic(void *kvaddr)
|
|
|
|
if (vaddr < __fix_to_virt(FIX_KMAP_END)) {
|
|
pagefault_enable();
|
|
+ preempt_enable();
|
|
return;
|
|
}
|
|
|
|
@@ -82,5 +83,6 @@ void __kunmap_atomic(void *kvaddr)
|
|
|
|
kmap_atomic_idx_pop();
|
|
pagefault_enable();
|
|
+ preempt_enable();
|
|
}
|
|
EXPORT_SYMBOL(__kunmap_atomic);
|
|
--- a/arch/sparc/mm/highmem.c
|
|
+++ b/arch/sparc/mm/highmem.c
|
|
@@ -53,7 +53,7 @@ void *kmap_atomic(struct page *page)
|
|
unsigned long vaddr;
|
|
long idx, type;
|
|
|
|
- /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
|
|
+ preempt_disable();
|
|
pagefault_disable();
|
|
if (!PageHighMem(page))
|
|
return page_address(page);
|
|
@@ -91,6 +91,7 @@ void __kunmap_atomic(void *kvaddr)
|
|
|
|
if (vaddr < FIXADDR_START) { // FIXME
|
|
pagefault_enable();
|
|
+ preempt_enable();
|
|
return;
|
|
}
|
|
|
|
@@ -126,5 +127,6 @@ void __kunmap_atomic(void *kvaddr)
|
|
|
|
kmap_atomic_idx_pop();
|
|
pagefault_enable();
|
|
+ preempt_enable();
|
|
}
|
|
EXPORT_SYMBOL(__kunmap_atomic);
|
|
--- a/arch/tile/mm/highmem.c
|
|
+++ b/arch/tile/mm/highmem.c
|
|
@@ -201,7 +201,7 @@ void *kmap_atomic_prot(struct page *page
|
|
int idx, type;
|
|
pte_t *pte;
|
|
|
|
- /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
|
|
+ preempt_disable();
|
|
pagefault_disable();
|
|
|
|
/* Avoid icache flushes by disallowing atomic executable mappings. */
|
|
@@ -259,6 +259,7 @@ void __kunmap_atomic(void *kvaddr)
|
|
}
|
|
|
|
pagefault_enable();
|
|
+ preempt_enable();
|
|
}
|
|
EXPORT_SYMBOL(__kunmap_atomic);
|
|
|
|
--- a/arch/x86/mm/highmem_32.c
|
|
+++ b/arch/x86/mm/highmem_32.c
|
|
@@ -35,7 +35,7 @@ void *kmap_atomic_prot(struct page *page
|
|
unsigned long vaddr;
|
|
int idx, type;
|
|
|
|
- /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
|
|
+ preempt_disable();
|
|
pagefault_disable();
|
|
|
|
if (!PageHighMem(page))
|
|
@@ -100,6 +100,7 @@ void __kunmap_atomic(void *kvaddr)
|
|
#endif
|
|
|
|
pagefault_enable();
|
|
+ preempt_enable();
|
|
}
|
|
EXPORT_SYMBOL(__kunmap_atomic);
|
|
|
|
--- a/arch/x86/mm/iomap_32.c
|
|
+++ b/arch/x86/mm/iomap_32.c
|
|
@@ -59,6 +59,7 @@ void *kmap_atomic_prot_pfn(unsigned long
|
|
unsigned long vaddr;
|
|
int idx, type;
|
|
|
|
+ preempt_disable();
|
|
pagefault_disable();
|
|
|
|
type = kmap_atomic_idx_push();
|
|
@@ -117,5 +118,6 @@ iounmap_atomic(void __iomem *kvaddr)
|
|
}
|
|
|
|
pagefault_enable();
|
|
+ preempt_enable();
|
|
}
|
|
EXPORT_SYMBOL_GPL(iounmap_atomic);
|
|
--- a/arch/xtensa/mm/highmem.c
|
|
+++ b/arch/xtensa/mm/highmem.c
|
|
@@ -42,6 +42,7 @@ void *kmap_atomic(struct page *page)
|
|
enum fixed_addresses idx;
|
|
unsigned long vaddr;
|
|
|
|
+ preempt_disable();
|
|
pagefault_disable();
|
|
if (!PageHighMem(page))
|
|
return page_address(page);
|
|
@@ -79,6 +80,7 @@ void __kunmap_atomic(void *kvaddr)
|
|
}
|
|
|
|
pagefault_enable();
|
|
+ preempt_enable();
|
|
}
|
|
EXPORT_SYMBOL(__kunmap_atomic);
|
|
|
|
--- a/include/linux/highmem.h
|
|
+++ b/include/linux/highmem.h
|
|
@@ -65,6 +65,7 @@ static inline void kunmap(struct page *p
|
|
|
|
static inline void *kmap_atomic(struct page *page)
|
|
{
|
|
+ preempt_disable();
|
|
pagefault_disable();
|
|
return page_address(page);
|
|
}
|
|
@@ -73,6 +74,7 @@ static inline void *kmap_atomic(struct p
|
|
static inline void __kunmap_atomic(void *addr)
|
|
{
|
|
pagefault_enable();
|
|
+ preempt_enable();
|
|
}
|
|
|
|
#define kmap_atomic_pfn(pfn) kmap_atomic(pfn_to_page(pfn))
|
|
--- a/include/linux/io-mapping.h
|
|
+++ b/include/linux/io-mapping.h
|
|
@@ -141,6 +141,7 @@ static inline void __iomem *
|
|
io_mapping_map_atomic_wc(struct io_mapping *mapping,
|
|
unsigned long offset)
|
|
{
|
|
+ preempt_disable();
|
|
pagefault_disable();
|
|
return ((char __force __iomem *) mapping) + offset;
|
|
}
|
|
@@ -149,6 +150,7 @@ static inline void
|
|
io_mapping_unmap_atomic(void __iomem *vaddr)
|
|
{
|
|
pagefault_enable();
|
|
+ preempt_enable();
|
|
}
|
|
|
|
/* Non-atomic map/unmap */
|