459 lines
14 KiB
Diff
459 lines
14 KiB
Diff
From git-commits-head-owner@vger.kernel.org Tue Sep 26 20:23:25 2006
|
|
Date: Tue, 26 Sep 2006 15:59:34 GMT
|
|
Message-Id: <200609261559.k8QFxYlE003113@hera.kernel.org>
|
|
From: Linux Kernel Mailing List <linux-kernel@vger.kernel.org>
|
|
To: git-commits-head@vger.kernel.org
|
|
Subject: [PATCH] mm: tracking shared dirty pages
|
|
|
|
commit d08b3851da41d0ee60851f2c75b118e1f7a5fc89
|
|
tree a01f6930a1387e8f66607e2fe16c62bb7044353b
|
|
parent 725d704ecaca4a43f067092c140d4f3271cf2856
|
|
author Peter Zijlstra <a.p.zijlstra@chello.nl> 1159252257 -0700
|
|
committer Linus Torvalds <torvalds@g5.osdl.org> 1159285724 -0700
|
|
|
|
[PATCH] mm: tracking shared dirty pages
|
|
|
|
Tracking of dirty pages in shared writeable mmap()s.
|
|
|
|
The idea is simple: write protect clean shared writeable pages, catch the
|
|
write-fault, make writeable and set dirty. On page write-back clean all the
|
|
PTE dirty bits and write protect them once again.
|
|
|
|
The implementation is a tad harder, mainly because the default
|
|
backing_dev_info capabilities were too loosely maintained. Hence it is not
|
|
enough to test the backing_dev_info for cap_account_dirty.
|
|
|
|
The current heuristic is as follows, a VMA is eligible when:
|
|
- its shared writeable
|
|
(vm_flags & (VM_WRITE|VM_SHARED)) == (VM_WRITE|VM_SHARED)
|
|
- it is not a 'special' mapping
|
|
(vm_flags & (VM_PFNMAP|VM_INSERTPAGE)) == 0
|
|
- the backing_dev_info is cap_account_dirty
|
|
mapping_cap_account_dirty(vma->vm_file->f_mapping)
|
|
- f_op->mmap() didn't change the default page protection
|
|
|
|
Page from remap_pfn_range() are explicitly excluded because their COW
|
|
semantics are already horrid enough (see vm_normal_page() in do_wp_page()) and
|
|
because they don't have a backing store anyway.
|
|
|
|
mprotect() is taught about the new behaviour as well. However it overrides
|
|
the last condition.
|
|
|
|
Cleaning the pages on write-back is done with page_mkclean() a new rmap call.
|
|
It can be called on any page, but is currently only implemented for mapped
|
|
pages, if the page is found the be of a VMA that accounts dirty pages it will
|
|
also wrprotect the PTE.
|
|
|
|
Finally, in fs/buffers.c:try_to_free_buffers(); remove clear_page_dirty() from
|
|
under ->private_lock. This seems to be safe, since ->private_lock is used to
|
|
serialize access to the buffers, not the page itself. This is needed because
|
|
clear_page_dirty() will call into page_mkclean() and would thereby violate
|
|
locking order.
|
|
|
|
[dhowells@redhat.com: Provide a page_mkclean() implementation for NOMMU]
|
|
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
|
|
Cc: Hugh Dickins <hugh@veritas.com>
|
|
Signed-off-by: David Howells <dhowells@redhat.com>
|
|
Signed-off-by: Andrew Morton <akpm@osdl.org>
|
|
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
|
|
|
|
fs/buffer.c | 2 -
|
|
include/linux/mm.h | 34 ++++++++++++++++++++++++++
|
|
include/linux/rmap.h | 14 ++++++++++
|
|
mm/memory.c | 29 ++++++++++++++++++----
|
|
mm/mmap.c | 10 +++----
|
|
mm/mprotect.c | 21 ++++++----------
|
|
mm/page-writeback.c | 17 ++++++++++---
|
|
mm/rmap.c | 65 +++++++++++++++++++++++++++++++++++++++++++++++++++
|
|
8 files changed, 162 insertions(+), 30 deletions(-)
|
|
|
|
diff --git a/fs/buffer.c b/fs/buffer.c
|
|
index 71649ef..3b6d701 100644
|
|
--- a/fs/buffer.c
|
|
+++ b/fs/buffer.c
|
|
@@ -2987,6 +2987,7 @@ int try_to_free_buffers(struct page *pag
|
|
|
|
spin_lock(&mapping->private_lock);
|
|
ret = drop_buffers(page, &buffers_to_free);
|
|
+ spin_unlock(&mapping->private_lock);
|
|
if (ret) {
|
|
/*
|
|
* If the filesystem writes its buffers by hand (eg ext3)
|
|
@@ -2998,7 +2999,6 @@ int try_to_free_buffers(struct page *pag
|
|
*/
|
|
clear_page_dirty(page);
|
|
}
|
|
- spin_unlock(&mapping->private_lock);
|
|
out:
|
|
if (buffers_to_free) {
|
|
struct buffer_head *bh = buffers_to_free;
|
|
diff --git a/include/linux/mm.h b/include/linux/mm.h
|
|
index 7d20b25..4498414 100644
|
|
--- a/include/linux/mm.h
|
|
+++ b/include/linux/mm.h
|
|
@@ -15,6 +15,7 @@ #include <linux/prio_tree.h>
|
|
#include <linux/fs.h>
|
|
#include <linux/mutex.h>
|
|
#include <linux/debug_locks.h>
|
|
+#include <linux/backing-dev.h>
|
|
|
|
struct mempolicy;
|
|
struct anon_vma;
|
|
@@ -810,6 +811,39 @@ struct shrinker;
|
|
extern struct shrinker *set_shrinker(int, shrinker_t);
|
|
extern void remove_shrinker(struct shrinker *shrinker);
|
|
|
|
+/*
|
|
+ * Some shared mappigns will want the pages marked read-only
|
|
+ * to track write events. If so, we'll downgrade vm_page_prot
|
|
+ * to the private version (using protection_map[] without the
|
|
+ * VM_SHARED bit).
|
|
+ */
|
|
+static inline int vma_wants_writenotify(struct vm_area_struct *vma)
|
|
+{
|
|
+ unsigned int vm_flags = vma->vm_flags;
|
|
+
|
|
+ /* If it was private or non-writable, the write bit is already clear */
|
|
+ if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
|
|
+ return 0;
|
|
+
|
|
+ /* The backer wishes to know when pages are first written to? */
|
|
+ if (vma->vm_ops && vma->vm_ops->page_mkwrite)
|
|
+ return 1;
|
|
+
|
|
+ /* The open routine did something to the protections already? */
|
|
+ if (pgprot_val(vma->vm_page_prot) !=
|
|
+ pgprot_val(protection_map[vm_flags &
|
|
+ (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]))
|
|
+ return 0;
|
|
+
|
|
+ /* Specialty mapping? */
|
|
+ if (vm_flags & (VM_PFNMAP|VM_INSERTPAGE))
|
|
+ return 0;
|
|
+
|
|
+ /* Can the mapping track the dirty pages? */
|
|
+ return vma->vm_file && vma->vm_file->f_mapping &&
|
|
+ mapping_cap_account_dirty(vma->vm_file->f_mapping);
|
|
+}
|
|
+
|
|
extern pte_t *FASTCALL(get_locked_pte(struct mm_struct *mm, unsigned long addr, spinlock_t **ptl));
|
|
|
|
int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
|
|
diff --git a/include/linux/rmap.h b/include/linux/rmap.h
|
|
index bf97b09..db2c1df 100644
|
|
--- a/include/linux/rmap.h
|
|
+++ b/include/linux/rmap.h
|
|
@@ -103,6 +103,14 @@ pte_t *page_check_address(struct page *,
|
|
*/
|
|
unsigned long page_address_in_vma(struct page *, struct vm_area_struct *);
|
|
|
|
+/*
|
|
+ * Cleans the PTEs of shared mappings.
|
|
+ * (and since clean PTEs should also be readonly, write protects them too)
|
|
+ *
|
|
+ * returns the number of cleaned PTEs.
|
|
+ */
|
|
+int page_mkclean(struct page *);
|
|
+
|
|
#else /* !CONFIG_MMU */
|
|
|
|
#define anon_vma_init() do {} while (0)
|
|
@@ -112,6 +120,12 @@ #define anon_vma_link(vma) do {} while (
|
|
#define page_referenced(page,l) TestClearPageReferenced(page)
|
|
#define try_to_unmap(page, refs) SWAP_FAIL
|
|
|
|
+static inline int page_mkclean(struct page *page)
|
|
+{
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+
|
|
#endif /* CONFIG_MMU */
|
|
|
|
/*
|
|
diff --git a/mm/memory.c b/mm/memory.c
|
|
index 109e986..fa941b1 100644
|
|
--- a/mm/memory.c
|
|
+++ b/mm/memory.c
|
|
@@ -1458,14 +1458,19 @@ static int do_wp_page(struct mm_struct *
|
|
{
|
|
struct page *old_page, *new_page;
|
|
pte_t entry;
|
|
- int reuse, ret = VM_FAULT_MINOR;
|
|
+ int reuse = 0, ret = VM_FAULT_MINOR;
|
|
+ struct page *dirty_page = NULL;
|
|
|
|
old_page = vm_normal_page(vma, address, orig_pte);
|
|
if (!old_page)
|
|
goto gotten;
|
|
|
|
- if (unlikely((vma->vm_flags & (VM_SHARED|VM_WRITE)) ==
|
|
- (VM_SHARED|VM_WRITE))) {
|
|
+ /*
|
|
+ * Only catch write-faults on shared writable pages, read-only
|
|
+ * shared pages can get COWed by get_user_pages(.write=1, .force=1).
|
|
+ */
|
|
+ if (unlikely((vma->vm_flags & (VM_WRITE|VM_SHARED)) ==
|
|
+ (VM_WRITE|VM_SHARED))) {
|
|
if (vma->vm_ops && vma->vm_ops->page_mkwrite) {
|
|
/*
|
|
* Notify the address space that the page is about to
|
|
@@ -1494,13 +1499,12 @@ static int do_wp_page(struct mm_struct *
|
|
if (!pte_same(*page_table, orig_pte))
|
|
goto unlock;
|
|
}
|
|
-
|
|
+ dirty_page = old_page;
|
|
+ get_page(dirty_page);
|
|
reuse = 1;
|
|
} else if (PageAnon(old_page) && !TestSetPageLocked(old_page)) {
|
|
reuse = can_share_swap_page(old_page);
|
|
unlock_page(old_page);
|
|
- } else {
|
|
- reuse = 0;
|
|
}
|
|
|
|
if (reuse) {
|
|
@@ -1566,6 +1570,10 @@ gotten:
|
|
page_cache_release(old_page);
|
|
unlock:
|
|
pte_unmap_unlock(page_table, ptl);
|
|
+ if (dirty_page) {
|
|
+ set_page_dirty(dirty_page);
|
|
+ put_page(dirty_page);
|
|
+ }
|
|
return ret;
|
|
oom:
|
|
if (old_page)
|
|
@@ -2098,6 +2106,7 @@ static int do_no_page(struct mm_struct *
|
|
unsigned int sequence = 0;
|
|
int ret = VM_FAULT_MINOR;
|
|
int anon = 0;
|
|
+ struct page *dirty_page = NULL;
|
|
|
|
pte_unmap(page_table);
|
|
BUG_ON(vma->vm_flags & VM_PFNMAP);
|
|
@@ -2192,6 +2201,10 @@ retry:
|
|
} else {
|
|
inc_mm_counter(mm, file_rss);
|
|
page_add_file_rmap(new_page);
|
|
+ if (write_access) {
|
|
+ dirty_page = new_page;
|
|
+ get_page(dirty_page);
|
|
+ }
|
|
}
|
|
} else {
|
|
/* One of our sibling threads was faster, back out. */
|
|
@@ -2204,6 +2217,10 @@ retry:
|
|
lazy_mmu_prot_update(entry);
|
|
unlock:
|
|
pte_unmap_unlock(page_table, ptl);
|
|
+ if (dirty_page) {
|
|
+ set_page_dirty(dirty_page);
|
|
+ put_page(dirty_page);
|
|
+ }
|
|
return ret;
|
|
oom:
|
|
page_cache_release(new_page);
|
|
diff --git a/mm/mmap.c b/mm/mmap.c
|
|
index d799d89..8507ee9 100644
|
|
--- a/mm/mmap.c
|
|
+++ b/mm/mmap.c
|
|
@@ -1105,12 +1105,6 @@ munmap_back:
|
|
goto free_vma;
|
|
}
|
|
|
|
- /* Don't make the VMA automatically writable if it's shared, but the
|
|
- * backer wishes to know when pages are first written to */
|
|
- if (vma->vm_ops && vma->vm_ops->page_mkwrite)
|
|
- vma->vm_page_prot =
|
|
- protection_map[vm_flags & (VM_READ|VM_WRITE|VM_EXEC)];
|
|
-
|
|
/* We set VM_ACCOUNT in a shared mapping's vm_flags, to inform
|
|
* shmem_zero_setup (perhaps called through /dev/zero's ->mmap)
|
|
* that memory reservation must be checked; but that reservation
|
|
@@ -1128,6 +1122,10 @@ munmap_back:
|
|
pgoff = vma->vm_pgoff;
|
|
vm_flags = vma->vm_flags;
|
|
|
|
+ if (vma_wants_writenotify(vma))
|
|
+ vma->vm_page_prot =
|
|
+ protection_map[vm_flags & (VM_READ|VM_WRITE|VM_EXEC)];
|
|
+
|
|
if (!file || !vma_merge(mm, prev, addr, vma->vm_end,
|
|
vma->vm_flags, NULL, file, pgoff, vma_policy(vma))) {
|
|
file = vma->vm_file;
|
|
diff --git a/mm/mprotect.c b/mm/mprotect.c
|
|
index 638edab..367b7f6 100644
|
|
--- a/mm/mprotect.c
|
|
+++ b/mm/mprotect.c
|
|
@@ -123,8 +123,6 @@ mprotect_fixup(struct vm_area_struct *vm
|
|
unsigned long oldflags = vma->vm_flags;
|
|
long nrpages = (end - start) >> PAGE_SHIFT;
|
|
unsigned long charged = 0;
|
|
- unsigned int mask;
|
|
- pgprot_t newprot;
|
|
pgoff_t pgoff;
|
|
int error;
|
|
|
|
@@ -176,24 +174,21 @@ mprotect_fixup(struct vm_area_struct *vm
|
|
}
|
|
|
|
success:
|
|
- /* Don't make the VMA automatically writable if it's shared, but the
|
|
- * backer wishes to know when pages are first written to */
|
|
- mask = VM_READ|VM_WRITE|VM_EXEC|VM_SHARED;
|
|
- if (vma->vm_ops && vma->vm_ops->page_mkwrite)
|
|
- mask &= ~VM_SHARED;
|
|
-
|
|
- newprot = protection_map[newflags & mask];
|
|
-
|
|
/*
|
|
* vm_flags and vm_page_prot are protected by the mmap_sem
|
|
* held in write mode.
|
|
*/
|
|
vma->vm_flags = newflags;
|
|
- vma->vm_page_prot = newprot;
|
|
+ vma->vm_page_prot = protection_map[newflags &
|
|
+ (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)];
|
|
+ if (vma_wants_writenotify(vma))
|
|
+ vma->vm_page_prot = protection_map[newflags &
|
|
+ (VM_READ|VM_WRITE|VM_EXEC)];
|
|
+
|
|
if (is_vm_hugetlb_page(vma))
|
|
- hugetlb_change_protection(vma, start, end, newprot);
|
|
+ hugetlb_change_protection(vma, start, end, vma->vm_page_prot);
|
|
else
|
|
- change_protection(vma, start, end, newprot);
|
|
+ change_protection(vma, start, end, vma->vm_page_prot);
|
|
vm_stat_account(mm, oldflags, vma->vm_file, -nrpages);
|
|
vm_stat_account(mm, newflags, vma->vm_file, nrpages);
|
|
return 0;
|
|
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
|
|
index 77a0bc4..1c87430 100644
|
|
--- a/mm/page-writeback.c
|
|
+++ b/mm/page-writeback.c
|
|
@@ -23,6 +23,7 @@ #include <linux/init.h>
|
|
#include <linux/backing-dev.h>
|
|
#include <linux/blkdev.h>
|
|
#include <linux/mpage.h>
|
|
+#include <linux/rmap.h>
|
|
#include <linux/percpu.h>
|
|
#include <linux/notifier.h>
|
|
#include <linux/smp.h>
|
|
@@ -550,7 +551,7 @@ int do_writepages(struct address_space *
|
|
return 0;
|
|
wbc->for_writepages = 1;
|
|
if (mapping->a_ops->writepages)
|
|
- ret = mapping->a_ops->writepages(mapping, wbc);
|
|
+ ret = mapping->a_ops->writepages(mapping, wbc);
|
|
else
|
|
ret = generic_writepages(mapping, wbc);
|
|
wbc->for_writepages = 0;
|
|
@@ -712,9 +713,15 @@ int test_clear_page_dirty(struct page *p
|
|
radix_tree_tag_clear(&mapping->page_tree,
|
|
page_index(page),
|
|
PAGECACHE_TAG_DIRTY);
|
|
- if (mapping_cap_account_dirty(mapping))
|
|
- __dec_zone_page_state(page, NR_FILE_DIRTY);
|
|
write_unlock_irqrestore(&mapping->tree_lock, flags);
|
|
+ /*
|
|
+ * We can continue to use `mapping' here because the
|
|
+ * page is locked, which pins the address_space
|
|
+ */
|
|
+ if (mapping_cap_account_dirty(mapping)) {
|
|
+ page_mkclean(page);
|
|
+ dec_zone_page_state(page, NR_FILE_DIRTY);
|
|
+ }
|
|
return 1;
|
|
}
|
|
write_unlock_irqrestore(&mapping->tree_lock, flags);
|
|
@@ -744,8 +751,10 @@ int clear_page_dirty_for_io(struct page
|
|
|
|
if (mapping) {
|
|
if (TestClearPageDirty(page)) {
|
|
- if (mapping_cap_account_dirty(mapping))
|
|
+ if (mapping_cap_account_dirty(mapping)) {
|
|
+ page_mkclean(page);
|
|
dec_zone_page_state(page, NR_FILE_DIRTY);
|
|
+ }
|
|
return 1;
|
|
}
|
|
return 0;
|
|
diff --git a/mm/rmap.c b/mm/rmap.c
|
|
index 40158b5..e2155d7 100644
|
|
--- a/mm/rmap.c
|
|
+++ b/mm/rmap.c
|
|
@@ -434,6 +434,71 @@ int page_referenced(struct page *page, i
|
|
return referenced;
|
|
}
|
|
|
|
+static int page_mkclean_one(struct page *page, struct vm_area_struct *vma)
|
|
+{
|
|
+ struct mm_struct *mm = vma->vm_mm;
|
|
+ unsigned long address;
|
|
+ pte_t *pte, entry;
|
|
+ spinlock_t *ptl;
|
|
+ int ret = 0;
|
|
+
|
|
+ address = vma_address(page, vma);
|
|
+ if (address == -EFAULT)
|
|
+ goto out;
|
|
+
|
|
+ pte = page_check_address(page, mm, address, &ptl);
|
|
+ if (!pte)
|
|
+ goto out;
|
|
+
|
|
+ if (!pte_dirty(*pte) && !pte_write(*pte))
|
|
+ goto unlock;
|
|
+
|
|
+ entry = ptep_get_and_clear(mm, address, pte);
|
|
+ entry = pte_mkclean(entry);
|
|
+ entry = pte_wrprotect(entry);
|
|
+ ptep_establish(vma, address, pte, entry);
|
|
+ lazy_mmu_prot_update(entry);
|
|
+ ret = 1;
|
|
+
|
|
+unlock:
|
|
+ pte_unmap_unlock(pte, ptl);
|
|
+out:
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+static int page_mkclean_file(struct address_space *mapping, struct page *page)
|
|
+{
|
|
+ pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
|
|
+ struct vm_area_struct *vma;
|
|
+ struct prio_tree_iter iter;
|
|
+ int ret = 0;
|
|
+
|
|
+ BUG_ON(PageAnon(page));
|
|
+
|
|
+ spin_lock(&mapping->i_mmap_lock);
|
|
+ vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
|
|
+ if (vma->vm_flags & VM_SHARED)
|
|
+ ret += page_mkclean_one(page, vma);
|
|
+ }
|
|
+ spin_unlock(&mapping->i_mmap_lock);
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+int page_mkclean(struct page *page)
|
|
+{
|
|
+ int ret = 0;
|
|
+
|
|
+ BUG_ON(!PageLocked(page));
|
|
+
|
|
+ if (page_mapped(page)) {
|
|
+ struct address_space *mapping = page_mapping(page);
|
|
+ if (mapping)
|
|
+ ret = page_mkclean_file(mapping, page);
|
|
+ }
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
/**
|
|
* page_set_anon_rmap - setup new anonymous rmap
|
|
* @page: the page to add the mapping to
|