136 lines
4.7 KiB
Diff
136 lines
4.7 KiB
Diff
From git-commits-head-owner@vger.kernel.org Tue Sep 26 20:23:25 2006
|
|
Date: Tue, 26 Sep 2006 15:59:55 GMT
|
|
Message-Id: <200609261559.k8QFxtwo003275@hera.kernel.org>
|
|
From: Linux Kernel Mailing List <linux-kernel@vger.kernel.org>
|
|
To: git-commits-head@vger.kernel.org
|
|
Subject: [PATCH] mm: optimize the new mprotect() code a bit
|
|
|
|
commit c1e6098b23bb46e2b488fe9a26f831f867157483
|
|
tree 6bac4d3cfaab3e7153a15d1a24f9211b2de37ba6
|
|
parent edc79b2a46ed854595e40edcf3f8b37f9f14aa3f
|
|
author Peter Zijlstra <a.p.zijlstra@chello.nl> 1159252259 -0700
|
|
committer Linus Torvalds <torvalds@g5.osdl.org> 1159285724 -0700
|
|
|
|
[PATCH] mm: optimize the new mprotect() code a bit
|
|
|
|
mprotect() resets the page protections, which could result in extra write
|
|
faults for those pages whose dirty state we track using write faults and are
|
|
dirty already.
|
|
|
|
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
|
|
Cc: Hugh Dickins <hugh@veritas.com>
|
|
Signed-off-by: Andrew Morton <akpm@osdl.org>
|
|
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
|
|
|
|
mm/mprotect.c | 34 ++++++++++++++++++++++++----------
|
|
1 file changed, 24 insertions(+), 10 deletions(-)
|
|
|
|
diff --git a/mm/mprotect.c b/mm/mprotect.c
|
|
index 367b7f6..955f9d0 100644
|
|
--- a/mm/mprotect.c
|
|
+++ b/mm/mprotect.c
|
|
@@ -27,7 +27,8 @@ #include <asm/cacheflush.h>
|
|
#include <asm/tlbflush.h>
|
|
|
|
static void change_pte_range(struct mm_struct *mm, pmd_t *pmd,
|
|
- unsigned long addr, unsigned long end, pgprot_t newprot)
|
|
+ unsigned long addr, unsigned long end, pgprot_t newprot,
|
|
+ int dirty_accountable)
|
|
{
|
|
pte_t *pte, oldpte;
|
|
spinlock_t *ptl;
|
|
@@ -42,7 +43,14 @@ static void change_pte_range(struct mm_s
|
|
* bits by wiping the pte and then setting the new pte
|
|
* into place.
|
|
*/
|
|
- ptent = pte_modify(ptep_get_and_clear(mm, addr, pte), newprot);
|
|
+ ptent = ptep_get_and_clear(mm, addr, pte);
|
|
+ ptent = pte_modify(ptent, newprot);
|
|
+ /*
|
|
+ * Avoid taking write faults for pages we know to be
|
|
+ * dirty.
|
|
+ */
|
|
+ if (dirty_accountable && pte_dirty(ptent))
|
|
+ ptent = pte_mkwrite(ptent);
|
|
set_pte_at(mm, addr, pte, ptent);
|
|
lazy_mmu_prot_update(ptent);
|
|
#ifdef CONFIG_MIGRATION
|
|
@@ -66,7 +74,8 @@ #endif
|
|
}
|
|
|
|
static inline void change_pmd_range(struct mm_struct *mm, pud_t *pud,
|
|
- unsigned long addr, unsigned long end, pgprot_t newprot)
|
|
+ unsigned long addr, unsigned long end, pgprot_t newprot,
|
|
+ int dirty_accountable)
|
|
{
|
|
pmd_t *pmd;
|
|
unsigned long next;
|
|
@@ -76,12 +85,13 @@ static inline void change_pmd_range(stru
|
|
next = pmd_addr_end(addr, end);
|
|
if (pmd_none_or_clear_bad(pmd))
|
|
continue;
|
|
- change_pte_range(mm, pmd, addr, next, newprot);
|
|
+ change_pte_range(mm, pmd, addr, next, newprot, dirty_accountable);
|
|
} while (pmd++, addr = next, addr != end);
|
|
}
|
|
|
|
static inline void change_pud_range(struct mm_struct *mm, pgd_t *pgd,
|
|
- unsigned long addr, unsigned long end, pgprot_t newprot)
|
|
+ unsigned long addr, unsigned long end, pgprot_t newprot,
|
|
+ int dirty_accountable)
|
|
{
|
|
pud_t *pud;
|
|
unsigned long next;
|
|
@@ -91,12 +101,13 @@ static inline void change_pud_range(stru
|
|
next = pud_addr_end(addr, end);
|
|
if (pud_none_or_clear_bad(pud))
|
|
continue;
|
|
- change_pmd_range(mm, pud, addr, next, newprot);
|
|
+ change_pmd_range(mm, pud, addr, next, newprot, dirty_accountable);
|
|
} while (pud++, addr = next, addr != end);
|
|
}
|
|
|
|
static void change_protection(struct vm_area_struct *vma,
|
|
- unsigned long addr, unsigned long end, pgprot_t newprot)
|
|
+ unsigned long addr, unsigned long end, pgprot_t newprot,
|
|
+ int dirty_accountable)
|
|
{
|
|
struct mm_struct *mm = vma->vm_mm;
|
|
pgd_t *pgd;
|
|
@@ -110,7 +121,7 @@ static void change_protection(struct vm_
|
|
next = pgd_addr_end(addr, end);
|
|
if (pgd_none_or_clear_bad(pgd))
|
|
continue;
|
|
- change_pud_range(mm, pgd, addr, next, newprot);
|
|
+ change_pud_range(mm, pgd, addr, next, newprot, dirty_accountable);
|
|
} while (pgd++, addr = next, addr != end);
|
|
flush_tlb_range(vma, start, end);
|
|
}
|
|
@@ -125,6 +136,7 @@ mprotect_fixup(struct vm_area_struct *vm
|
|
unsigned long charged = 0;
|
|
pgoff_t pgoff;
|
|
int error;
|
|
+ int dirty_accountable = 0;
|
|
|
|
if (newflags == oldflags) {
|
|
*pprev = vma;
|
|
@@ -181,14 +193,16 @@ success:
|
|
vma->vm_flags = newflags;
|
|
vma->vm_page_prot = protection_map[newflags &
|
|
(VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)];
|
|
- if (vma_wants_writenotify(vma))
|
|
+ if (vma_wants_writenotify(vma)) {
|
|
vma->vm_page_prot = protection_map[newflags &
|
|
(VM_READ|VM_WRITE|VM_EXEC)];
|
|
+ dirty_accountable = 1;
|
|
+ }
|
|
|
|
if (is_vm_hugetlb_page(vma))
|
|
hugetlb_change_protection(vma, start, end, vma->vm_page_prot);
|
|
else
|
|
- change_protection(vma, start, end, vma->vm_page_prot);
|
|
+ change_protection(vma, start, end, vma->vm_page_prot, dirty_accountable);
|
|
vm_stat_account(mm, oldflags, vma->vm_file, -nrpages);
|
|
vm_stat_account(mm, newflags, vma->vm_file, nrpages);
|
|
return 0;
|