UPSTREAM: mm: conditionally write-lock VMA in free_pgtables

Normally free_pgtables needs to lock affected VMAs except for the case
when VMAs were isolated under VMA write-lock.  munmap() does just that,
isolating while holding appropriate locks and then downgrading mmap_lock
and dropping per-VMA locks before freeing page tables.  Add a parameter to
free_pgtables for such scenario.

Link: https://lkml.kernel.org/r/20230227173632.3292573-20-surenb@google.com
Signed-off-by: Suren Baghdasaryan <surenb@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>

(cherry picked from commit 98e51a2239d9d419d819cd61a2e720ebf19a8b0a)

Bug: 161210518
Change-Id: I3c9177cce187526407754baf7641d3741ca7b0cb
Signed-off-by: Suren Baghdasaryan <surenb@google.com>
This commit is contained in:
Suren Baghdasaryan 2023-02-27 09:36:18 -08:00 committed by Carlos Llamas
parent 5f1e1ab919
commit 9cc64c7fb9
3 changed files with 11 additions and 4 deletions

View File

@ -87,7 +87,8 @@ void folio_activate(struct folio *folio);
void free_pgtables(struct mmu_gather *tlb, struct maple_tree *mt,
struct vm_area_struct *start_vma, unsigned long floor,
unsigned long ceiling, unsigned long start_t);
unsigned long ceiling, unsigned long start_t,
bool mm_wr_locked);
void pmd_install(struct mm_struct *mm, pmd_t *pmd, pgtable_t *pte);
struct zap_details;

View File

@ -397,7 +397,8 @@ void free_pgd_range(struct mmu_gather *tlb,
void free_pgtables(struct mmu_gather *tlb, struct maple_tree *mt,
struct vm_area_struct *vma, unsigned long floor,
unsigned long ceiling, unsigned long start_t)
unsigned long ceiling, unsigned long start_t,
bool mm_wr_locked)
{
MA_STATE(mas, mt, start_t, start_t);
@ -415,6 +416,8 @@ void free_pgtables(struct mmu_gather *tlb, struct maple_tree *mt,
* Hide vma from rmap and truncate_pagecache before freeing
* pgtables
*/
if (mm_wr_locked)
vma_start_write(vma);
unlink_anon_vmas(vma);
unlink_file_vma(vma);
@ -429,6 +432,8 @@ void free_pgtables(struct mmu_gather *tlb, struct maple_tree *mt,
&& !is_vm_hugetlb_page(next)) {
vma = next;
next = mas_find(&mas, ceiling - 1);
if (mm_wr_locked)
vma_start_write(vma);
unlink_anon_vmas(vma);
unlink_file_vma(vma);
}

View File

@ -2227,7 +2227,8 @@ static void unmap_region(struct mm_struct *mm, struct maple_tree *mt,
update_hiwater_rss(mm);
unmap_vmas(&tlb, mt, vma, start, end, start_t, end_t, mm_wr_locked);
free_pgtables(&tlb, mt, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS,
next ? next->vm_start : USER_PGTABLES_CEILING, start_t);
next ? next->vm_start : USER_PGTABLES_CEILING, start_t,
mm_wr_locked);
tlb_finish_mmu(&tlb);
}
@ -3163,7 +3164,7 @@ void exit_mmap(struct mm_struct *mm)
mmap_write_lock(mm);
mt_clear_in_rcu(&mm->mm_mt);
free_pgtables(&tlb, &mm->mm_mt, vma, FIRST_USER_ADDRESS,
USER_PGTABLES_CEILING, vma->vm_end);
USER_PGTABLES_CEILING, vma->vm_end, true);
tlb_finish_mmu(&tlb);
/*