mm: fix TLB flush race between migration, and change_protection_range

There are a few subtle races, between change_protection_range (used by
mprotect and change_prot_numa) on one side, and NUMA page migration and
compaction on the other side.

The basic race is that there is a time window between when the PTE gets
made non-present (PROT_NONE or NUMA), and the TLB is flushed.

During that time, a CPU may continue writing to the page.

This is fine most of the time, however compaction or the NUMA migration
code may come in, and migrate the page away.

When that happens, the CPU may continue writing, through the cached
translation, to what is no longer the current memory location of the

This only affects x86, which has a somewhat optimistic pte_accessible.
All other architectures appear to be safe, and will either always flush,
or flush whenever there is a valid mapping, even with no permissions

The basic race looks like this:


						load TLB entry
make entry PTE/PMD_NUMA
			fault on entry
						read/write old page
			start migrating page
			change PTE/PMD to new page
						read/write old page [*]
flush TLB
						reload TLB from new entry
						read/write new page
						lose data

[*] the old page may belong to a new user at this point!

The obvious fix is to flush remote TLB entries, by making sure that
pte_accessible aware of the fact that PROT_NONE and PROT_NUMA memory may
still be accessible if there is a TLB flush pending for the mm.

This should fix both NUMA migration and compaction.

[ fix build]
Signed-off-by: Rik van Riel <>
Signed-off-by: Mel Gorman <>
Cc: Alex Thorlton <>
Cc: <>
Signed-off-by: Andrew Morton <>
Signed-off-by: Linus Torvalds <>
Rik van Riel 9 years ago committed by Linus Torvalds
parent de466bd628
commit 2084140594
  1. 4
  2. 11
  3. 2
  4. 44
  5. 1
  6. 7
  7. 2
  8. 5

@ -619,7 +619,7 @@ static inline unsigned long pte_present(pte_t pte)
#define pte_accessible pte_accessible
static inline unsigned long pte_accessible(pte_t a)
static inline unsigned long pte_accessible(struct mm_struct *mm, pte_t a)
return pte_val(a) & _PAGE_VALID;
@ -847,7 +847,7 @@ static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
* SUN4V NOTE: _PAGE_VALID is the same value in both the SUN4U
* and SUN4V pte layout, so this inline test is fine.
if (likely(mm != &init_mm) && pte_accessible(orig))
if (likely(mm != &init_mm) && pte_accessible(mm, orig))
tlb_batch_add(mm, addr, ptep, orig, fullmm);

@ -452,9 +452,16 @@ static inline int pte_present(pte_t a)
#define pte_accessible pte_accessible
static inline int pte_accessible(pte_t a)
static inline bool pte_accessible(struct mm_struct *mm, pte_t a)
return pte_flags(a) & _PAGE_PRESENT;
if (pte_flags(a) & _PAGE_PRESENT)
return true;
if ((pte_flags(a) & (_PAGE_PROTNONE | _PAGE_NUMA)) &&
return true;
return false;
static inline int pte_hidden(pte_t pte)

@ -217,7 +217,7 @@ static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b)
#ifndef pte_accessible
# define pte_accessible(pte) ((void)(pte),1)
# define pte_accessible(mm, pte) ((void)(pte), 1)
#ifndef flush_tlb_fix_spurious_fault

@ -442,6 +442,14 @@ struct mm_struct {
/* numa_scan_seq prevents two threads setting pte_numa */
int numa_scan_seq;
* An operation with batched TLB flushing is going on. Anything that
* can move process memory needs to flush the TLB when moving a
* PROT_NONE or PROT_NUMA mapped page.
bool tlb_flush_pending;
struct uprobes_state uprobes_state;
@ -459,4 +467,40 @@ static inline cpumask_t *mm_cpumask(struct mm_struct *mm)
return mm->cpu_vm_mask_var;
* Memory barriers to keep this state in sync are graciously provided by
* the page table locks, outside of which no page table modifications happen.
* The barriers below prevent the compiler from re-ordering the instructions
* around the memory barriers that are already present in the code.
static inline bool mm_tlb_flush_pending(struct mm_struct *mm)
return mm->tlb_flush_pending;
static inline void set_tlb_flush_pending(struct mm_struct *mm)
mm->tlb_flush_pending = true;
/* Clearing is done after a TLB flush, which also provides a barrier. */
static inline void clear_tlb_flush_pending(struct mm_struct *mm)
mm->tlb_flush_pending = false;
static inline bool mm_tlb_flush_pending(struct mm_struct *mm)
return false;
static inline void set_tlb_flush_pending(struct mm_struct *mm)
static inline void clear_tlb_flush_pending(struct mm_struct *mm)
#endif /* _LINUX_MM_TYPES_H */

@ -537,6 +537,7 @@ static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p)
mm_init_owner(mm, p);
if (likely(!mm_alloc_pgd(mm))) {
mm->def_flags = 0;

@ -1376,6 +1376,13 @@ int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
goto clear_pmdnuma;
* The page_table_lock above provides a memory barrier
* with change_protection_range.
if (mm_tlb_flush_pending(mm))
flush_tlb_range(vma, haddr, haddr + HPAGE_PMD_SIZE);
* Migrate the THP to the requested node, returns with page unlocked
* and pmd_numa cleared.

@ -188,6 +188,7 @@ static unsigned long change_protection_range(struct vm_area_struct *vma,
BUG_ON(addr >= end);
pgd = pgd_offset(mm, addr);
flush_cache_range(vma, addr, end);
do {
next = pgd_addr_end(addr, end);
if (pgd_none_or_clear_bad(pgd))
@ -199,6 +200,7 @@ static unsigned long change_protection_range(struct vm_area_struct *vma,
/* Only flush the TLB if we actually modified any entries: */
if (pages)
flush_tlb_range(vma, start, end);
return pages;

@ -110,9 +110,10 @@ int pmdp_clear_flush_young(struct vm_area_struct *vma,
pte_t ptep_clear_flush(struct vm_area_struct *vma, unsigned long address,
pte_t *ptep)
struct mm_struct *mm = (vma)->vm_mm;
pte_t pte;
pte = ptep_get_and_clear((vma)->vm_mm, address, ptep);
if (pte_accessible(pte))
pte = ptep_get_and_clear(mm, address, ptep);
if (pte_accessible(mm, pte))
flush_tlb_page(vma, address);
return pte;