mm: protect VMA modifications using VMA sequence count
The VMA sequence count has been introduced to allow fast detection of VMA modification when running a page fault handler without holding the mmap_sem. This patch provides protection against the VMA modification done in : - madvise() - mpol_rebind_policy() - vma_replace_policy() - change_prot_numa() - mlock(), munlock() - mprotect() - mmap_region() - collapse_huge_page() - userfaultd registering services In addition, VMA fields which will be read during the speculative fault path needs to be written using WRITE_ONCE to prevent write to be split and intermediate values to be pushed to other CPUs. Change-Id: Ic36046b7254e538b6baf7144c50ae577ee7f2074 Signed-off-by: Laurent Dufour <ldufour@linux.vnet.ibm.com> Patch-mainline: linux-mm @ Tue, 17 Apr 2018 16:33:15 [vinmenon@codeaurora.org: trivial merge conflict fixes] Signed-off-by: Vinayak Menon <vinmenon@codeaurora.org> [charante@codeaurora.org: trivial merge conflict fixes] Signed-off-by: Charan Teja Reddy <charante@codeaurora.org>
This commit is contained in:
parent
662540931e
commit
06219e6c87
@ -1277,8 +1277,11 @@ static ssize_t clear_refs_write(struct file *file, const char __user *buf,
|
||||
goto out_mm;
|
||||
}
|
||||
for (vma = mm->mmap; vma; vma = vma->vm_next) {
|
||||
vma->vm_flags &= ~VM_SOFTDIRTY;
|
||||
vm_write_begin(vma);
|
||||
WRITE_ONCE(vma->vm_flags,
|
||||
vma->vm_flags & ~VM_SOFTDIRTY);
|
||||
vma_set_page_prot(vma);
|
||||
vm_write_end(vma);
|
||||
}
|
||||
downgrade_write(&mm->mmap_sem);
|
||||
break;
|
||||
|
@ -675,8 +675,11 @@ int dup_userfaultfd(struct vm_area_struct *vma, struct list_head *fcs)
|
||||
|
||||
octx = vma->vm_userfaultfd_ctx.ctx;
|
||||
if (!octx || !(octx->features & UFFD_FEATURE_EVENT_FORK)) {
|
||||
vm_write_begin(vma);
|
||||
vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
|
||||
vma->vm_flags &= ~(VM_UFFD_WP | VM_UFFD_MISSING);
|
||||
WRITE_ONCE(vma->vm_flags,
|
||||
vma->vm_flags & ~(VM_UFFD_WP | VM_UFFD_MISSING));
|
||||
vm_write_end(vma);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -919,8 +922,10 @@ static int userfaultfd_release(struct inode *inode, struct file *file)
|
||||
else
|
||||
prev = vma;
|
||||
}
|
||||
vma->vm_flags = new_flags;
|
||||
vm_write_begin(vma);
|
||||
WRITE_ONCE(vma->vm_flags, new_flags);
|
||||
vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
|
||||
vm_write_end(vma);
|
||||
}
|
||||
up_write(&mm->mmap_sem);
|
||||
mmput(mm);
|
||||
@ -1487,8 +1492,10 @@ static int userfaultfd_register(struct userfaultfd_ctx *ctx,
|
||||
* the next vma was merged into the current one and
|
||||
* the current one has not been updated yet.
|
||||
*/
|
||||
vma->vm_flags = new_flags;
|
||||
vm_write_begin(vma);
|
||||
WRITE_ONCE(vma->vm_flags, new_flags);
|
||||
vma->vm_userfaultfd_ctx.ctx = ctx;
|
||||
vm_write_end(vma);
|
||||
|
||||
skip:
|
||||
prev = vma;
|
||||
@ -1650,8 +1657,10 @@ static int userfaultfd_unregister(struct userfaultfd_ctx *ctx,
|
||||
* the next vma was merged into the current one and
|
||||
* the current one has not been updated yet.
|
||||
*/
|
||||
vma->vm_flags = new_flags;
|
||||
vm_write_begin(vma);
|
||||
WRITE_ONCE(vma->vm_flags, new_flags);
|
||||
vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
|
||||
vm_write_end(vma);
|
||||
|
||||
skip:
|
||||
prev = vma;
|
||||
|
@ -1026,6 +1026,7 @@ static void collapse_huge_page(struct mm_struct *mm,
|
||||
if (mm_find_pmd(mm, address) != pmd)
|
||||
goto out;
|
||||
|
||||
vm_write_begin(vma);
|
||||
anon_vma_lock_write(vma->anon_vma);
|
||||
|
||||
mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, NULL, mm,
|
||||
@ -1062,6 +1063,7 @@ static void collapse_huge_page(struct mm_struct *mm,
|
||||
pmd_populate(mm, pmd, pmd_pgtable(_pmd));
|
||||
spin_unlock(pmd_ptl);
|
||||
anon_vma_unlock_write(vma->anon_vma);
|
||||
vm_write_end(vma);
|
||||
result = SCAN_FAIL;
|
||||
goto out;
|
||||
}
|
||||
@ -1097,6 +1099,7 @@ static void collapse_huge_page(struct mm_struct *mm,
|
||||
set_pmd_at(mm, address, pmd, _pmd);
|
||||
update_mmu_cache_pmd(vma, address, pmd);
|
||||
spin_unlock(pmd_ptl);
|
||||
vm_write_end(vma);
|
||||
|
||||
*hpage = NULL;
|
||||
|
||||
|
10
mm/madvise.c
10
mm/madvise.c
@ -166,7 +166,9 @@ static long madvise_behavior(struct vm_area_struct *vma,
|
||||
/*
|
||||
* vm_flags is protected by the mmap_sem held in write mode.
|
||||
*/
|
||||
vma->vm_flags = new_flags;
|
||||
vm_write_begin(vma);
|
||||
WRITE_ONCE(vma->vm_flags, new_flags);
|
||||
vm_write_end(vma);
|
||||
|
||||
out_convert_errno:
|
||||
/*
|
||||
@ -483,9 +485,11 @@ static void madvise_cold_page_range(struct mmu_gather *tlb,
|
||||
.tlb = tlb,
|
||||
};
|
||||
|
||||
vm_write_begin(vma);
|
||||
tlb_start_vma(tlb, vma);
|
||||
walk_page_range(vma->vm_mm, addr, end, &cold_walk_ops, &walk_private);
|
||||
tlb_end_vma(tlb, vma);
|
||||
vm_write_end(vma);
|
||||
}
|
||||
|
||||
static long madvise_cold(struct vm_area_struct *vma,
|
||||
@ -516,9 +520,11 @@ static void madvise_pageout_page_range(struct mmu_gather *tlb,
|
||||
.tlb = tlb,
|
||||
};
|
||||
|
||||
vm_write_begin(vma);
|
||||
tlb_start_vma(tlb, vma);
|
||||
walk_page_range(vma->vm_mm, addr, end, &cold_walk_ops, &walk_private);
|
||||
tlb_end_vma(tlb, vma);
|
||||
vm_write_end(vma);
|
||||
}
|
||||
|
||||
static inline bool can_do_pageout(struct vm_area_struct *vma)
|
||||
@ -721,10 +727,12 @@ static int madvise_free_single_vma(struct vm_area_struct *vma,
|
||||
update_hiwater_rss(mm);
|
||||
|
||||
mmu_notifier_invalidate_range_start(&range);
|
||||
vm_write_begin(vma);
|
||||
tlb_start_vma(&tlb, vma);
|
||||
walk_page_range(vma->vm_mm, range.start, range.end,
|
||||
&madvise_free_walk_ops, &tlb);
|
||||
tlb_end_vma(&tlb, vma);
|
||||
vm_write_end(vma);
|
||||
mmu_notifier_invalidate_range_end(&range);
|
||||
tlb_finish_mmu(&tlb, range.start, range.end);
|
||||
|
||||
|
@ -380,8 +380,11 @@ void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
|
||||
struct vm_area_struct *vma;
|
||||
|
||||
down_write(&mm->mmap_sem);
|
||||
for (vma = mm->mmap; vma; vma = vma->vm_next)
|
||||
for (vma = mm->mmap; vma; vma = vma->vm_next) {
|
||||
vm_write_begin(vma);
|
||||
mpol_rebind_policy(vma->vm_policy, new);
|
||||
vm_write_end(vma);
|
||||
}
|
||||
up_write(&mm->mmap_sem);
|
||||
}
|
||||
|
||||
@ -596,9 +599,11 @@ unsigned long change_prot_numa(struct vm_area_struct *vma,
|
||||
{
|
||||
int nr_updated;
|
||||
|
||||
vm_write_begin(vma);
|
||||
nr_updated = change_protection(vma, addr, end, PAGE_NONE, 0, 1);
|
||||
if (nr_updated)
|
||||
count_vm_numa_events(NUMA_PTE_UPDATES, nr_updated);
|
||||
vm_write_end(vma);
|
||||
|
||||
return nr_updated;
|
||||
}
|
||||
@ -711,6 +716,7 @@ static int vma_replace_policy(struct vm_area_struct *vma,
|
||||
if (IS_ERR(new))
|
||||
return PTR_ERR(new);
|
||||
|
||||
vm_write_begin(vma);
|
||||
if (vma->vm_ops && vma->vm_ops->set_policy) {
|
||||
err = vma->vm_ops->set_policy(vma, new);
|
||||
if (err)
|
||||
@ -718,11 +724,17 @@ static int vma_replace_policy(struct vm_area_struct *vma,
|
||||
}
|
||||
|
||||
old = vma->vm_policy;
|
||||
vma->vm_policy = new; /* protected by mmap_sem */
|
||||
/*
|
||||
* The speculative page fault handler accesses this field without
|
||||
* hodling the mmap_sem.
|
||||
*/
|
||||
WRITE_ONCE(vma->vm_policy, new);
|
||||
vm_write_end(vma);
|
||||
mpol_put(old);
|
||||
|
||||
return 0;
|
||||
err_out:
|
||||
vm_write_end(vma);
|
||||
mpol_put(new);
|
||||
return err;
|
||||
}
|
||||
@ -1703,14 +1715,20 @@ COMPAT_SYSCALL_DEFINE4(migrate_pages, compat_pid_t, pid,
|
||||
struct mempolicy *__get_vma_policy(struct vm_area_struct *vma,
|
||||
unsigned long addr)
|
||||
{
|
||||
struct mempolicy *pol = NULL;
|
||||
struct mempolicy *pol;
|
||||
|
||||
if (vma) {
|
||||
if (vma->vm_ops && vma->vm_ops->get_policy) {
|
||||
pol = vma->vm_ops->get_policy(vma, addr);
|
||||
} else if (vma->vm_policy) {
|
||||
pol = vma->vm_policy;
|
||||
if (!vma)
|
||||
return NULL;
|
||||
|
||||
if (vma->vm_ops && vma->vm_ops->get_policy)
|
||||
return vma->vm_ops->get_policy(vma, addr);
|
||||
|
||||
/*
|
||||
* This could be called without holding the mmap_sem in the
|
||||
* speculative page fault handler's path.
|
||||
*/
|
||||
pol = READ_ONCE(vma->vm_policy);
|
||||
if (pol) {
|
||||
/*
|
||||
* shmem_alloc_page() passes MPOL_F_SHARED policy with
|
||||
* a pseudo vma whose vma->vm_ops=NULL. Take a reference
|
||||
@ -1720,7 +1738,6 @@ struct mempolicy *__get_vma_policy(struct vm_area_struct *vma,
|
||||
if (mpol_needs_cond_ref(pol))
|
||||
mpol_get(pol);
|
||||
}
|
||||
}
|
||||
|
||||
return pol;
|
||||
}
|
||||
|
13
mm/mlock.c
13
mm/mlock.c
@ -445,7 +445,9 @@ static unsigned long __munlock_pagevec_fill(struct pagevec *pvec,
|
||||
void munlock_vma_pages_range(struct vm_area_struct *vma,
|
||||
unsigned long start, unsigned long end)
|
||||
{
|
||||
vma->vm_flags &= VM_LOCKED_CLEAR_MASK;
|
||||
vm_write_begin(vma);
|
||||
WRITE_ONCE(vma->vm_flags, vma->vm_flags & VM_LOCKED_CLEAR_MASK);
|
||||
vm_write_end(vma);
|
||||
|
||||
while (start < end) {
|
||||
struct page *page;
|
||||
@ -569,10 +571,11 @@ static int mlock_fixup(struct vm_area_struct *vma, struct vm_area_struct **prev,
|
||||
* It's okay if try_to_unmap_one unmaps a page just after we
|
||||
* set VM_LOCKED, populate_vma_page_range will bring it back.
|
||||
*/
|
||||
|
||||
if (lock)
|
||||
vma->vm_flags = newflags;
|
||||
else
|
||||
if (lock) {
|
||||
vm_write_begin(vma);
|
||||
WRITE_ONCE(vma->vm_flags, newflags);
|
||||
vm_write_end(vma);
|
||||
} else
|
||||
munlock_vma_pages_range(vma, start, end);
|
||||
|
||||
out:
|
||||
|
22
mm/mmap.c
22
mm/mmap.c
@ -879,17 +879,18 @@ int __vma_adjust(struct vm_area_struct *vma, unsigned long start,
|
||||
}
|
||||
|
||||
if (start != vma->vm_start) {
|
||||
vma->vm_start = start;
|
||||
WRITE_ONCE(vma->vm_start, start);
|
||||
start_changed = true;
|
||||
}
|
||||
if (end != vma->vm_end) {
|
||||
vma->vm_end = end;
|
||||
WRITE_ONCE(vma->vm_end, end);
|
||||
end_changed = true;
|
||||
}
|
||||
vma->vm_pgoff = pgoff;
|
||||
WRITE_ONCE(vma->vm_pgoff, pgoff);
|
||||
if (adjust_next) {
|
||||
next->vm_start += adjust_next << PAGE_SHIFT;
|
||||
next->vm_pgoff += adjust_next;
|
||||
WRITE_ONCE(next->vm_start,
|
||||
next->vm_start + (adjust_next << PAGE_SHIFT));
|
||||
WRITE_ONCE(next->vm_pgoff, next->vm_pgoff + adjust_next);
|
||||
}
|
||||
|
||||
if (root) {
|
||||
@ -1868,12 +1869,14 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
|
||||
out:
|
||||
perf_event_mmap(vma);
|
||||
|
||||
vm_write_begin(vma);
|
||||
vm_stat_account(mm, vm_flags, len >> PAGE_SHIFT);
|
||||
if (vm_flags & VM_LOCKED) {
|
||||
if ((vm_flags & VM_SPECIAL) || vma_is_dax(vma) ||
|
||||
is_vm_hugetlb_page(vma) ||
|
||||
vma == get_gate_vma(current->mm))
|
||||
vma->vm_flags &= VM_LOCKED_CLEAR_MASK;
|
||||
WRITE_ONCE(vma->vm_flags,
|
||||
vma->vm_flags & VM_LOCKED_CLEAR_MASK);
|
||||
else
|
||||
mm->locked_vm += (len >> PAGE_SHIFT);
|
||||
}
|
||||
@ -1888,9 +1891,10 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
|
||||
* then new mapped in-place (which must be aimed as
|
||||
* a completely new data area).
|
||||
*/
|
||||
vma->vm_flags |= VM_SOFTDIRTY;
|
||||
WRITE_ONCE(vma->vm_flags, vma->vm_flags | VM_SOFTDIRTY);
|
||||
|
||||
vma_set_page_prot(vma);
|
||||
vm_write_end(vma);
|
||||
|
||||
return addr;
|
||||
|
||||
@ -2525,8 +2529,8 @@ int expand_downwards(struct vm_area_struct *vma,
|
||||
mm->locked_vm += grow;
|
||||
vm_stat_account(mm, vma->vm_flags, grow);
|
||||
anon_vma_interval_tree_pre_update_vma(vma);
|
||||
vma->vm_start = address;
|
||||
vma->vm_pgoff -= grow;
|
||||
WRITE_ONCE(vma->vm_start, address);
|
||||
WRITE_ONCE(vma->vm_pgoff, vma->vm_pgoff - grow);
|
||||
anon_vma_interval_tree_post_update_vma(vma);
|
||||
vma_gap_update(vma);
|
||||
spin_unlock(&mm->page_table_lock);
|
||||
|
@ -454,12 +454,14 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
|
||||
* vm_flags and vm_page_prot are protected by the mmap_sem
|
||||
* held in write mode.
|
||||
*/
|
||||
vma->vm_flags = newflags;
|
||||
vm_write_begin(vma);
|
||||
WRITE_ONCE(vma->vm_flags, newflags);
|
||||
dirty_accountable = vma_wants_writenotify(vma, vma->vm_page_prot);
|
||||
vma_set_page_prot(vma);
|
||||
|
||||
change_protection(vma, start, end, vma->vm_page_prot,
|
||||
dirty_accountable, 0);
|
||||
vm_write_end(vma);
|
||||
|
||||
/*
|
||||
* Private VM_LOCKED VMA becoming writable: trigger COW to avoid major
|
||||
|
@ -534,7 +534,11 @@ static unsigned long swapin_nr_pages(unsigned long offset)
|
||||
* This has been extended to use the NUMA policies from the mm triggering
|
||||
* the readahead.
|
||||
*
|
||||
* Caller must hold read mmap_sem if vmf->vma is not NULL.
|
||||
* Caller must hold down_read on the vma->vm_mm if vmf->vma is not NULL.
|
||||
* This is needed to ensure the VMA will not be freed in our back. In the case
|
||||
* of the speculative page fault handler, this cannot happen, even if we don't
|
||||
* hold the mmap_sem. Callees are assumed to take care of reading VMA's fields
|
||||
* using READ_ONCE() to read consistent values.
|
||||
*/
|
||||
struct page *swap_cluster_readahead(swp_entry_t entry, gfp_t gfp_mask,
|
||||
struct vm_fault *vmf)
|
||||
@ -631,9 +635,9 @@ static inline void swap_ra_clamp_pfn(struct vm_area_struct *vma,
|
||||
unsigned long *start,
|
||||
unsigned long *end)
|
||||
{
|
||||
*start = max3(lpfn, PFN_DOWN(vma->vm_start),
|
||||
*start = max3(lpfn, PFN_DOWN(READ_ONCE(vma->vm_start)),
|
||||
PFN_DOWN(faddr & PMD_MASK));
|
||||
*end = min3(rpfn, PFN_DOWN(vma->vm_end),
|
||||
*end = min3(rpfn, PFN_DOWN(READ_ONCE(vma->vm_end)),
|
||||
PFN_DOWN((faddr & PMD_MASK) + PMD_SIZE));
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user