mm/userfaultfd: fix uffd-wp handling for THP migration entries
commit 24bf08c4376be417f16ceb609188b16f461b0443 upstream.
Looks like what we fixed for hugetlb in commit 44f86392bdd1 ("mm/hugetlb:
fix uffd-wp handling for migration entries in
hugetlb_change_protection()") similarly applies to THP.
Setting/clearing uffd-wp on THP migration entries is not implemented
properly. Further, while removing migration PMDs considers the uffd-wp
bit, inserting migration PMDs does not consider the uffd-wp bit.
We have to set/clear independently of the migration entry type in
change_huge_pmd() and properly copy the uffd-wp bit in
set_pmd_migration_entry().
Verified using a simple reproducer that triggers migration of a THP, that
the set_pmd_migration_entry() no longer loses the uffd-wp bit.
Link: https://lkml.kernel.org/r/20230405160236.587705-2-david@redhat.com
Fixes: f45ec5ff16
("userfaultfd: wp: support swap and page migration")
Signed-off-by: David Hildenbrand <david@redhat.com>
Reviewed-by: Peter Xu <peterx@redhat.com>
Cc: <stable@vger.kernel.org>
Cc: Muhammad Usama Anjum <usama.anjum@collabora.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
parent
b1644a0031
commit
cc647e05db
@ -1805,10 +1805,10 @@ int change_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
|
|||||||
if (is_swap_pmd(*pmd)) {
|
if (is_swap_pmd(*pmd)) {
|
||||||
swp_entry_t entry = pmd_to_swp_entry(*pmd);
|
swp_entry_t entry = pmd_to_swp_entry(*pmd);
|
||||||
struct page *page = pfn_swap_entry_to_page(entry);
|
struct page *page = pfn_swap_entry_to_page(entry);
|
||||||
|
pmd_t newpmd;
|
||||||
|
|
||||||
VM_BUG_ON(!is_pmd_migration_entry(*pmd));
|
VM_BUG_ON(!is_pmd_migration_entry(*pmd));
|
||||||
if (is_writable_migration_entry(entry)) {
|
if (is_writable_migration_entry(entry)) {
|
||||||
pmd_t newpmd;
|
|
||||||
/*
|
/*
|
||||||
* A protection check is difficult so
|
* A protection check is difficult so
|
||||||
* just be safe and disable write
|
* just be safe and disable write
|
||||||
@ -1822,8 +1822,16 @@ int change_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
|
|||||||
newpmd = pmd_swp_mksoft_dirty(newpmd);
|
newpmd = pmd_swp_mksoft_dirty(newpmd);
|
||||||
if (pmd_swp_uffd_wp(*pmd))
|
if (pmd_swp_uffd_wp(*pmd))
|
||||||
newpmd = pmd_swp_mkuffd_wp(newpmd);
|
newpmd = pmd_swp_mkuffd_wp(newpmd);
|
||||||
set_pmd_at(mm, addr, pmd, newpmd);
|
} else {
|
||||||
|
newpmd = *pmd;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (uffd_wp)
|
||||||
|
newpmd = pmd_swp_mkuffd_wp(newpmd);
|
||||||
|
else if (uffd_wp_resolve)
|
||||||
|
newpmd = pmd_swp_clear_uffd_wp(newpmd);
|
||||||
|
if (!pmd_same(*pmd, newpmd))
|
||||||
|
set_pmd_at(mm, addr, pmd, newpmd);
|
||||||
goto unlock;
|
goto unlock;
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
@ -3233,6 +3241,8 @@ int set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw,
|
|||||||
pmdswp = swp_entry_to_pmd(entry);
|
pmdswp = swp_entry_to_pmd(entry);
|
||||||
if (pmd_soft_dirty(pmdval))
|
if (pmd_soft_dirty(pmdval))
|
||||||
pmdswp = pmd_swp_mksoft_dirty(pmdswp);
|
pmdswp = pmd_swp_mksoft_dirty(pmdswp);
|
||||||
|
if (pmd_uffd_wp(pmdval))
|
||||||
|
pmdswp = pmd_swp_mkuffd_wp(pmdswp);
|
||||||
set_pmd_at(mm, address, pvmw->pmd, pmdswp);
|
set_pmd_at(mm, address, pvmw->pmd, pmdswp);
|
||||||
page_remove_rmap(page, vma, true);
|
page_remove_rmap(page, vma, true);
|
||||||
put_page(page);
|
put_page(page);
|
||||||
|
Loading…
Reference in New Issue
Block a user