FROMLIST: mm/mmap: write-lock VMAs before merging, splitting or expanding them

Decisions about whether VMAs can be merged, split or expanded must be
made while VMAs are protected from the changes which can affect that
decision. For example, merge_vma uses vma->anon_vma in its decision
whether the VMA can be merged. Meanwhile, page fault handler changes
vma->anon_vma during COW operation.
Write-lock all VMAs which might be affected by a merge or split operation
before making decision how such operations should be performed.

Signed-off-by: Suren Baghdasaryan <surenb@google.com>

Link: https://lore.kernel.org/all/20230216051750.3125598-17-surenb@google.com/
[surenb: using older v3 of patchset due to missing __vma_adjust()
refactoring in 6.2-rc4 which introduced vma_prepare()]

Bug: 161210518
Change-Id: I56d84aa67366a1988fc81296da7164ad7f89a5c0
Signed-off-by: Suren Baghdasaryan <surenb@google.com>
This commit is contained in:
Suren Baghdasaryan 2022-06-10 22:37:08 -07:00 committed by Carlos Llamas
parent d73ebe031c
commit 998ec9f54d

View File

@ -257,8 +257,11 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
*/
mas_set(&mas, oldbrk);
next = mas_find(&mas, newbrk - 1 + PAGE_SIZE + stack_guard_gap);
if (next && newbrk + PAGE_SIZE > vm_start_gap(next))
goto out;
if (next) {
vma_start_write(next);
if (newbrk + PAGE_SIZE > vm_start_gap(next))
goto out;
}
brkvma = mas_prev(&mas, mm->start_brk);
/* Ok, looks good - let it rip. */
@ -1027,10 +1030,17 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
if (vm_flags & VM_SPECIAL)
return NULL;
if (prev)
vma_start_write(prev);
next = find_vma(mm, prev ? prev->vm_end : 0);
if (next)
vma_start_write(next);
mid = next;
if (next && next->vm_end == end) /* cases 6, 7, 8 */
if (next && next->vm_end == end) { /* cases 6, 7, 8 */
next = find_vma(mm, next->vm_end);
if (next)
vma_start_write(next);
}
/* verify some invariant that must be enforced by the caller */
VM_WARN_ON(prev && addr <= prev->vm_start);
@ -2216,6 +2226,7 @@ int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
int err;
validate_mm_mt(mm);
vma_start_write(vma);
if (vma->vm_ops && vma->vm_ops->may_split) {
err = vma->vm_ops->may_split(vma, addr);
if (err)
@ -2590,6 +2601,8 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
/* Attempt to expand an old mapping */
/* Check next */
if (next)
vma_start_write(next);
if (next && next->vm_start == end && !vma_policy(next) &&
can_vma_merge_before(next, vm_flags, NULL, file, pgoff+pglen,
NULL_VM_UFFD_CTX, NULL)) {
@ -2599,6 +2612,8 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
}
/* Check prev */
if (prev)
vma_start_write(prev);
if (prev && prev->vm_end == addr && !vma_policy(prev) &&
(vma ? can_vma_merge_after(prev, vm_flags, vma->anon_vma, file,
pgoff, vma->vm_userfaultfd_ctx, NULL) :
@ -2976,6 +2991,8 @@ static int do_brk_flags(struct ma_state *mas, struct vm_area_struct *vma,
if (security_vm_enough_memory_mm(mm, len >> PAGE_SHIFT))
return -ENOMEM;
if (vma)
vma_start_write(vma);
/*
* Expand the existing vma if possible; Note that singular lists do not
* occur after forking, so the expand will only happen on new VMAs.