BACKPORT: userfaultfd: use per-vma locks in userfaultfd operations
All userfaultfd operations, except write-protect, opportunistically use per-vma locks to lock vmas. On failure, attempt again inside mmap_lock critical section. Write-protect operation requires mmap_lock as it iterates over multiple vmas. Link: https://lkml.kernel.org/r/20240215182756.3448972-5-lokeshgidra@google.com Signed-off-by: Lokesh Gidra <lokeshgidra@google.com> Reviewed-by: Liam R. Howlett <Liam.Howlett@oracle.com> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Axel Rasmussen <axelrasmussen@google.com> Cc: Brian Geffon <bgeffon@google.com> Cc: David Hildenbrand <david@redhat.com> Cc: Jann Horn <jannh@google.com> Cc: Kalesh Singh <kaleshsingh@google.com> Cc: Matthew Wilcox (Oracle) <willy@infradead.org> Cc: Mike Rapoport (IBM) <rppt@kernel.org> Cc: Nicolas Geoffray <ngeoffray@google.com> Cc: Peter Xu <peterx@redhat.com> Cc: Ryan Roberts <ryan.roberts@arm.com> Cc: Suren Baghdasaryan <surenb@google.com> Cc: Tim Murray <timmurray@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> (cherry picked from commit 867a43a34ff8a38772212045262b2c9b77807ea3) Conflicts: mm/userfaultfd.c 1. Resolve conflict in validate_dst_vma() due to absence of range_in_vma(). 2. Use 'page' instead of 'folio' for BUG_ON on copy_from_user() failure in COPY ioctl. 3. Resolve conflict around mfill_file_over_size(). 4. Resolve conflict in comment for __mcopy_atomic_hugetlb() due to function name change. 5. Resolve conflict due to use of 'flags' instead of 'mode' in __mcopy_atomic_hugetlb(). 6. Use find_vma() and validate_dst_vma() in mwriteprotect_range() instead of find_dst_vma(). Bug: 320478828 Change-Id: I6d5b7101218cb1b11329108c3f31f12bb1caebc6 Signed-off-by: Lokesh Gidra <lokeshgidra@google.com>
This commit is contained in:
parent
daf0b0fc4a
commit
ce2896c0c6
@ -1946,17 +1946,8 @@ static int userfaultfd_move(struct userfaultfd_ctx *ctx,
|
||||
return -EINVAL;
|
||||
|
||||
if (mmget_not_zero(mm)) {
|
||||
mmap_read_lock(mm);
|
||||
|
||||
/* Re-check after taking map_changing_lock */
|
||||
down_read(&ctx->map_changing_lock);
|
||||
if (likely(!atomic_read(&ctx->mmap_changing)))
|
||||
ret = move_pages(ctx, mm, uffdio_move.dst, uffdio_move.src,
|
||||
uffdio_move.len, uffdio_move.mode);
|
||||
else
|
||||
ret = -EAGAIN;
|
||||
up_read(&ctx->map_changing_lock);
|
||||
mmap_read_unlock(mm);
|
||||
ret = move_pages(ctx, uffdio_move.dst, uffdio_move.src,
|
||||
uffdio_move.len, uffdio_move.mode);
|
||||
mmput(mm);
|
||||
} else {
|
||||
return -ESRCH;
|
||||
|
@ -121,9 +121,8 @@ extern void uffd_wp_range(struct mm_struct *dst_mm, struct vm_area_struct *vma,
|
||||
/* move_pages */
|
||||
void double_pt_lock(spinlock_t *ptl1, spinlock_t *ptl2);
|
||||
void double_pt_unlock(spinlock_t *ptl1, spinlock_t *ptl2);
|
||||
ssize_t move_pages(struct userfaultfd_ctx *ctx, struct mm_struct *mm,
|
||||
unsigned long dst_start, unsigned long src_start,
|
||||
unsigned long len, __u64 flags);
|
||||
ssize_t move_pages(struct userfaultfd_ctx *ctx, unsigned long dst_start,
|
||||
unsigned long src_start, unsigned long len, __u64 flags);
|
||||
int move_pages_huge_pmd(struct mm_struct *mm, pmd_t *dst_pmd, pmd_t *src_pmd, pmd_t dst_pmdval,
|
||||
struct vm_area_struct *dst_vma,
|
||||
struct vm_area_struct *src_vma,
|
||||
|
@ -1915,7 +1915,7 @@ int change_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
|
||||
|
||||
#ifdef CONFIG_USERFAULTFD
|
||||
/*
|
||||
* The PT lock for src_pmd and the mmap_lock for reading are held by
|
||||
* The PT lock for src_pmd and dst_vma/src_vma (for reading) are locked by
|
||||
* the caller, but it must return after releasing the page_table_lock.
|
||||
* Just move the page from src_pmd to dst_pmd if possible.
|
||||
* Return zero if succeeded in moving the page, -EAGAIN if it needs to be
|
||||
@ -1938,7 +1938,8 @@ int move_pages_huge_pmd(struct mm_struct *mm, pmd_t *dst_pmd, pmd_t *src_pmd, pm
|
||||
src_ptl = pmd_lockptr(mm, src_pmd);
|
||||
|
||||
lockdep_assert_held(src_ptl);
|
||||
mmap_assert_locked(mm);
|
||||
vma_assert_locked(src_vma);
|
||||
vma_assert_locked(dst_vma);
|
||||
|
||||
/* Sanity checks before the operation */
|
||||
if (WARN_ON_ONCE(!pmd_none(dst_pmdval)) || WARN_ON_ONCE(src_addr & ~HPAGE_PMD_MASK) ||
|
||||
|
392
mm/userfaultfd.c
392
mm/userfaultfd.c
@ -20,23 +20,11 @@
|
||||
#include "internal.h"
|
||||
|
||||
static __always_inline
|
||||
struct vm_area_struct *find_dst_vma(struct mm_struct *dst_mm,
|
||||
unsigned long dst_start,
|
||||
unsigned long len)
|
||||
bool validate_dst_vma(struct vm_area_struct *dst_vma, unsigned long dst_end)
|
||||
{
|
||||
/*
|
||||
* Make sure that the dst range is both valid and fully within a
|
||||
* single existing vma.
|
||||
*/
|
||||
struct vm_area_struct *dst_vma;
|
||||
|
||||
dst_vma = find_vma(dst_mm, dst_start);
|
||||
if (!dst_vma)
|
||||
return NULL;
|
||||
|
||||
if (dst_start < dst_vma->vm_start ||
|
||||
dst_start + len > dst_vma->vm_end)
|
||||
return NULL;
|
||||
/* Make sure that the dst range is fully within dst_vma. */
|
||||
if (dst_end > dst_vma->vm_end)
|
||||
return false;
|
||||
|
||||
/*
|
||||
* Check the vma is registered in uffd, this is required to
|
||||
@ -44,11 +32,122 @@ struct vm_area_struct *find_dst_vma(struct mm_struct *dst_mm,
|
||||
* time.
|
||||
*/
|
||||
if (!dst_vma->vm_userfaultfd_ctx.ctx)
|
||||
return NULL;
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static __always_inline
|
||||
struct vm_area_struct *find_vma_and_prepare_anon(struct mm_struct *mm,
|
||||
unsigned long addr)
|
||||
{
|
||||
struct vm_area_struct *vma;
|
||||
|
||||
mmap_assert_locked(mm);
|
||||
vma = vma_lookup(mm, addr);
|
||||
if (!vma)
|
||||
vma = ERR_PTR(-ENOENT);
|
||||
else if (!(vma->vm_flags & VM_SHARED) &&
|
||||
unlikely(anon_vma_prepare(vma)))
|
||||
vma = ERR_PTR(-ENOMEM);
|
||||
|
||||
return vma;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PER_VMA_LOCK
|
||||
/*
|
||||
* lock_vma() - Lookup and lock vma corresponding to @address.
|
||||
* @mm: mm to search vma in.
|
||||
* @address: address that the vma should contain.
|
||||
*
|
||||
* Should be called without holding mmap_lock. vma should be unlocked after use
|
||||
* with unlock_vma().
|
||||
*
|
||||
* Return: A locked vma containing @address, -ENOENT if no vma is found, or
|
||||
* -ENOMEM if anon_vma couldn't be allocated.
|
||||
*/
|
||||
static struct vm_area_struct *lock_vma(struct mm_struct *mm,
|
||||
unsigned long address)
|
||||
{
|
||||
struct vm_area_struct *vma;
|
||||
|
||||
vma = lock_vma_under_rcu(mm, address);
|
||||
if (vma) {
|
||||
/*
|
||||
* lock_vma_under_rcu() only checks anon_vma for private
|
||||
* anonymous mappings. But we need to ensure it is assigned in
|
||||
* private file-backed vmas as well.
|
||||
*/
|
||||
if (!(vma->vm_flags & VM_SHARED) && unlikely(!vma->anon_vma))
|
||||
vma_end_read(vma);
|
||||
else
|
||||
return vma;
|
||||
}
|
||||
|
||||
mmap_read_lock(mm);
|
||||
vma = find_vma_and_prepare_anon(mm, address);
|
||||
if (!IS_ERR(vma)) {
|
||||
/*
|
||||
* We cannot use vma_start_read() as it may fail due to
|
||||
* false locked (see comment in vma_start_read()). We
|
||||
* can avoid that by directly locking vm_lock under
|
||||
* mmap_lock, which guarantees that nobody can lock the
|
||||
* vma for write (vma_start_write()) under us.
|
||||
*/
|
||||
down_read(&vma->vm_lock->lock);
|
||||
}
|
||||
|
||||
mmap_read_unlock(mm);
|
||||
return vma;
|
||||
}
|
||||
|
||||
static struct vm_area_struct *uffd_mfill_lock(struct mm_struct *dst_mm,
|
||||
unsigned long dst_start,
|
||||
unsigned long len)
|
||||
{
|
||||
struct vm_area_struct *dst_vma;
|
||||
|
||||
dst_vma = lock_vma(dst_mm, dst_start);
|
||||
if (IS_ERR(dst_vma) || validate_dst_vma(dst_vma, dst_start + len))
|
||||
return dst_vma;
|
||||
|
||||
vma_end_read(dst_vma);
|
||||
return ERR_PTR(-ENOENT);
|
||||
}
|
||||
|
||||
static void uffd_mfill_unlock(struct vm_area_struct *vma)
|
||||
{
|
||||
vma_end_read(vma);
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
static struct vm_area_struct *uffd_mfill_lock(struct mm_struct *dst_mm,
|
||||
unsigned long dst_start,
|
||||
unsigned long len)
|
||||
{
|
||||
struct vm_area_struct *dst_vma;
|
||||
|
||||
mmap_read_lock(dst_mm);
|
||||
dst_vma = find_vma_and_prepare_anon(dst_mm, dst_start);
|
||||
if (IS_ERR(dst_vma))
|
||||
goto out_unlock;
|
||||
|
||||
if (validate_dst_vma(dst_vma, dst_start + len))
|
||||
return dst_vma;
|
||||
|
||||
dst_vma = ERR_PTR(-ENOENT);
|
||||
out_unlock:
|
||||
mmap_read_unlock(dst_mm);
|
||||
return dst_vma;
|
||||
}
|
||||
|
||||
static void uffd_mfill_unlock(struct vm_area_struct *vma)
|
||||
{
|
||||
mmap_read_unlock(vma->vm_mm);
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Install PTEs, to map dst_addr (within dst_vma) to page.
|
||||
*
|
||||
@ -320,7 +419,8 @@ static pmd_t *mm_alloc_pmd(struct mm_struct *mm, unsigned long address)
|
||||
#ifdef CONFIG_HUGETLB_PAGE
|
||||
/*
|
||||
* __mcopy_atomic processing for HUGETLB vmas. Note that this routine is
|
||||
* called with mmap_lock held, it will release mmap_lock before returning.
|
||||
* called with either vma-lock or mmap_lock held, it will release the lock
|
||||
* before returning.
|
||||
*/
|
||||
static __always_inline ssize_t __mcopy_atomic_hugetlb(
|
||||
struct userfaultfd_ctx *ctx,
|
||||
@ -332,7 +432,6 @@ static __always_inline ssize_t __mcopy_atomic_hugetlb(
|
||||
bool wp_copy)
|
||||
{
|
||||
struct mm_struct *dst_mm = dst_vma->vm_mm;
|
||||
int vm_shared = dst_vma->vm_flags & VM_SHARED;
|
||||
ssize_t err;
|
||||
pte_t *dst_pte;
|
||||
unsigned long src_addr, dst_addr;
|
||||
@ -351,7 +450,7 @@ static __always_inline ssize_t __mcopy_atomic_hugetlb(
|
||||
*/
|
||||
if (mode == MCOPY_ATOMIC_ZEROPAGE) {
|
||||
up_read(&ctx->map_changing_lock);
|
||||
mmap_read_unlock(dst_mm);
|
||||
uffd_mfill_unlock(dst_vma);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
@ -374,24 +473,28 @@ static __always_inline ssize_t __mcopy_atomic_hugetlb(
|
||||
* retry, dst_vma will be set to NULL and we must lookup again.
|
||||
*/
|
||||
if (!dst_vma) {
|
||||
dst_vma = uffd_mfill_lock(dst_mm, dst_start, len);
|
||||
if (IS_ERR(dst_vma)) {
|
||||
err = PTR_ERR(dst_vma);
|
||||
goto out;
|
||||
}
|
||||
|
||||
err = -ENOENT;
|
||||
dst_vma = find_dst_vma(dst_mm, dst_start, len);
|
||||
if (!dst_vma || !is_vm_hugetlb_page(dst_vma))
|
||||
goto out_unlock;
|
||||
if (!is_vm_hugetlb_page(dst_vma))
|
||||
goto out_unlock_vma;
|
||||
|
||||
err = -EINVAL;
|
||||
if (vma_hpagesize != vma_kernel_pagesize(dst_vma))
|
||||
goto out_unlock;
|
||||
goto out_unlock_vma;
|
||||
|
||||
vm_shared = dst_vma->vm_flags & VM_SHARED;
|
||||
}
|
||||
|
||||
/*
|
||||
* If not shared, ensure the dst_vma has a anon_vma.
|
||||
*/
|
||||
err = -ENOMEM;
|
||||
if (!vm_shared) {
|
||||
if (unlikely(anon_vma_prepare(dst_vma)))
|
||||
/*
|
||||
* If memory mappings are changing because of non-cooperative
|
||||
* operation (e.g. mremap) running in parallel, bail out and
|
||||
* request the user to retry later
|
||||
*/
|
||||
down_read(&ctx->map_changing_lock);
|
||||
err = -EAGAIN;
|
||||
if (atomic_read(&ctx->mmap_changing))
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
@ -437,7 +540,7 @@ static __always_inline ssize_t __mcopy_atomic_hugetlb(
|
||||
|
||||
if (unlikely(err == -ENOENT)) {
|
||||
up_read(&ctx->map_changing_lock);
|
||||
mmap_read_unlock(dst_mm);
|
||||
uffd_mfill_unlock(dst_vma);
|
||||
BUG_ON(!page);
|
||||
|
||||
err = copy_huge_page_from_user(page,
|
||||
@ -448,17 +551,6 @@ static __always_inline ssize_t __mcopy_atomic_hugetlb(
|
||||
err = -EFAULT;
|
||||
goto out;
|
||||
}
|
||||
mmap_read_lock(dst_mm);
|
||||
down_read(&ctx->map_changing_lock);
|
||||
/*
|
||||
* If memory mappings are changing because of non-cooperative
|
||||
* operation (e.g. mremap) running in parallel, bail out and
|
||||
* request the user to retry later
|
||||
*/
|
||||
if (atomic_read(&ctx->mmap_changing)) {
|
||||
err = -EAGAIN;
|
||||
break;
|
||||
}
|
||||
|
||||
dst_vma = NULL;
|
||||
goto retry;
|
||||
@ -479,7 +571,8 @@ static __always_inline ssize_t __mcopy_atomic_hugetlb(
|
||||
|
||||
out_unlock:
|
||||
up_read(&ctx->map_changing_lock);
|
||||
mmap_read_unlock(dst_mm);
|
||||
out_unlock_vma:
|
||||
uffd_mfill_unlock(dst_vma);
|
||||
out:
|
||||
if (page)
|
||||
put_page(page);
|
||||
@ -574,7 +667,15 @@ static __always_inline ssize_t __mcopy_atomic(struct userfaultfd_ctx *ctx,
|
||||
copied = 0;
|
||||
page = NULL;
|
||||
retry:
|
||||
mmap_read_lock(dst_mm);
|
||||
/*
|
||||
* Make sure the vma is not shared, that the dst range is
|
||||
* both valid and fully within a single existing vma.
|
||||
*/
|
||||
dst_vma = uffd_mfill_lock(dst_mm, dst_start, len);
|
||||
if (IS_ERR(dst_vma)) {
|
||||
err = PTR_ERR(dst_vma);
|
||||
goto out;
|
||||
}
|
||||
|
||||
/*
|
||||
* If memory mappings are changing because of non-cooperative
|
||||
@ -586,15 +687,6 @@ static __always_inline ssize_t __mcopy_atomic(struct userfaultfd_ctx *ctx,
|
||||
if (atomic_read(&ctx->mmap_changing))
|
||||
goto out_unlock;
|
||||
|
||||
/*
|
||||
* Make sure the vma is not shared, that the dst range is
|
||||
* both valid and fully within a single existing vma.
|
||||
*/
|
||||
err = -ENOENT;
|
||||
dst_vma = find_dst_vma(dst_mm, dst_start, len);
|
||||
if (!dst_vma)
|
||||
goto out_unlock;
|
||||
|
||||
err = -EINVAL;
|
||||
/*
|
||||
* shmem_zero_setup is invoked in mmap for MAP_ANONYMOUS|MAP_SHARED but
|
||||
@ -625,16 +717,6 @@ static __always_inline ssize_t __mcopy_atomic(struct userfaultfd_ctx *ctx,
|
||||
if (!vma_is_shmem(dst_vma) && mcopy_mode == MCOPY_ATOMIC_CONTINUE)
|
||||
goto out_unlock;
|
||||
|
||||
/*
|
||||
* Ensure the dst_vma has a anon_vma or this page
|
||||
* would get a NULL anon_vma when moved in the
|
||||
* dst_vma.
|
||||
*/
|
||||
err = -ENOMEM;
|
||||
if (!(dst_vma->vm_flags & VM_SHARED) &&
|
||||
unlikely(anon_vma_prepare(dst_vma)))
|
||||
goto out_unlock;
|
||||
|
||||
while (src_addr < src_start + len) {
|
||||
pmd_t dst_pmdval;
|
||||
|
||||
@ -677,7 +759,7 @@ static __always_inline ssize_t __mcopy_atomic(struct userfaultfd_ctx *ctx,
|
||||
void *page_kaddr;
|
||||
|
||||
up_read(&ctx->map_changing_lock);
|
||||
mmap_read_unlock(dst_mm);
|
||||
uffd_mfill_unlock(dst_vma);
|
||||
BUG_ON(!page);
|
||||
|
||||
page_kaddr = kmap_local_page(page);
|
||||
@ -708,7 +790,7 @@ static __always_inline ssize_t __mcopy_atomic(struct userfaultfd_ctx *ctx,
|
||||
|
||||
out_unlock:
|
||||
up_read(&ctx->map_changing_lock);
|
||||
mmap_read_unlock(dst_mm);
|
||||
uffd_mfill_unlock(dst_vma);
|
||||
out:
|
||||
if (page)
|
||||
put_page(page);
|
||||
@ -784,10 +866,12 @@ int mwriteprotect_range(struct userfaultfd_ctx *ctx, unsigned long start,
|
||||
goto out_unlock;
|
||||
|
||||
err = -ENOENT;
|
||||
dst_vma = find_dst_vma(dst_mm, start, len);
|
||||
dst_vma = find_vma(dst_mm, start);
|
||||
|
||||
if (!dst_vma)
|
||||
goto out_unlock;
|
||||
if (start < dst_vma->vm_start || !validate_dst_vma(dst_vma, start + len))
|
||||
goto out_unlock;
|
||||
if (!userfaultfd_wp(dst_vma))
|
||||
goto out_unlock;
|
||||
if (!vma_can_userfault(dst_vma, dst_vma->vm_flags))
|
||||
@ -1217,27 +1301,136 @@ static int validate_move_areas(struct userfaultfd_ctx *ctx,
|
||||
if (!vma_is_anonymous(src_vma) || !vma_is_anonymous(dst_vma))
|
||||
return -EINVAL;
|
||||
|
||||
/*
|
||||
* Ensure the dst_vma has a anon_vma or this page
|
||||
* would get a NULL anon_vma when moved in the
|
||||
* dst_vma.
|
||||
*/
|
||||
if (unlikely(anon_vma_prepare(dst_vma)))
|
||||
return -ENOMEM;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static __always_inline
|
||||
int find_vmas_mm_locked(struct mm_struct *mm,
|
||||
unsigned long dst_start,
|
||||
unsigned long src_start,
|
||||
struct vm_area_struct **dst_vmap,
|
||||
struct vm_area_struct **src_vmap)
|
||||
{
|
||||
struct vm_area_struct *vma;
|
||||
|
||||
mmap_assert_locked(mm);
|
||||
vma = find_vma_and_prepare_anon(mm, dst_start);
|
||||
if (IS_ERR(vma))
|
||||
return PTR_ERR(vma);
|
||||
|
||||
*dst_vmap = vma;
|
||||
/* Skip finding src_vma if src_start is in dst_vma */
|
||||
if (src_start >= vma->vm_start && src_start < vma->vm_end)
|
||||
goto out_success;
|
||||
|
||||
vma = vma_lookup(mm, src_start);
|
||||
if (!vma)
|
||||
return -ENOENT;
|
||||
out_success:
|
||||
*src_vmap = vma;
|
||||
return 0;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PER_VMA_LOCK
|
||||
static int uffd_move_lock(struct mm_struct *mm,
|
||||
unsigned long dst_start,
|
||||
unsigned long src_start,
|
||||
struct vm_area_struct **dst_vmap,
|
||||
struct vm_area_struct **src_vmap)
|
||||
{
|
||||
struct vm_area_struct *vma;
|
||||
int err;
|
||||
|
||||
vma = lock_vma(mm, dst_start);
|
||||
if (IS_ERR(vma))
|
||||
return PTR_ERR(vma);
|
||||
|
||||
*dst_vmap = vma;
|
||||
/*
|
||||
* Skip finding src_vma if src_start is in dst_vma. This also ensures
|
||||
* that we don't lock the same vma twice.
|
||||
*/
|
||||
if (src_start >= vma->vm_start && src_start < vma->vm_end) {
|
||||
*src_vmap = vma;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Using lock_vma() to get src_vma can lead to following deadlock:
|
||||
*
|
||||
* Thread1 Thread2
|
||||
* ------- -------
|
||||
* vma_start_read(dst_vma)
|
||||
* mmap_write_lock(mm)
|
||||
* vma_start_write(src_vma)
|
||||
* vma_start_read(src_vma)
|
||||
* mmap_read_lock(mm)
|
||||
* vma_start_write(dst_vma)
|
||||
*/
|
||||
*src_vmap = lock_vma_under_rcu(mm, src_start);
|
||||
if (likely(*src_vmap))
|
||||
return 0;
|
||||
|
||||
/* Undo any locking and retry in mmap_lock critical section */
|
||||
vma_end_read(*dst_vmap);
|
||||
|
||||
mmap_read_lock(mm);
|
||||
err = find_vmas_mm_locked(mm, dst_start, src_start, dst_vmap, src_vmap);
|
||||
if (!err) {
|
||||
/*
|
||||
* See comment in lock_vma() as to why not using
|
||||
* vma_start_read() here.
|
||||
*/
|
||||
down_read(&(*dst_vmap)->vm_lock->lock);
|
||||
if (*dst_vmap != *src_vmap)
|
||||
down_read(&(*src_vmap)->vm_lock->lock);
|
||||
}
|
||||
mmap_read_unlock(mm);
|
||||
return err;
|
||||
}
|
||||
|
||||
static void uffd_move_unlock(struct vm_area_struct *dst_vma,
|
||||
struct vm_area_struct *src_vma)
|
||||
{
|
||||
vma_end_read(src_vma);
|
||||
if (src_vma != dst_vma)
|
||||
vma_end_read(dst_vma);
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
static int uffd_move_lock(struct mm_struct *mm,
|
||||
unsigned long dst_start,
|
||||
unsigned long src_start,
|
||||
struct vm_area_struct **dst_vmap,
|
||||
struct vm_area_struct **src_vmap)
|
||||
{
|
||||
int err;
|
||||
|
||||
mmap_read_lock(mm);
|
||||
err = find_vmas_mm_locked(mm, dst_start, src_start, dst_vmap, src_vmap);
|
||||
if (err)
|
||||
mmap_read_unlock(mm);
|
||||
return err;
|
||||
}
|
||||
|
||||
static void uffd_move_unlock(struct vm_area_struct *dst_vma,
|
||||
struct vm_area_struct *src_vma)
|
||||
{
|
||||
mmap_assert_locked(src_vma->vm_mm);
|
||||
mmap_read_unlock(dst_vma->vm_mm);
|
||||
}
|
||||
#endif
|
||||
|
||||
/**
|
||||
* move_pages - move arbitrary anonymous pages of an existing vma
|
||||
* @ctx: pointer to the userfaultfd context
|
||||
* @mm: the address space to move pages
|
||||
* @dst_start: start of the destination virtual memory range
|
||||
* @src_start: start of the source virtual memory range
|
||||
* @len: length of the virtual memory range
|
||||
* @mode: flags from uffdio_move.mode
|
||||
*
|
||||
* Must be called with mmap_lock held for read.
|
||||
* It will either use the mmap_lock in read mode or per-vma locks
|
||||
*
|
||||
* move_pages() remaps arbitrary anonymous pages atomically in zero
|
||||
* copy. It only works on non shared anonymous pages because those can
|
||||
@ -1305,10 +1498,10 @@ static int validate_move_areas(struct userfaultfd_ctx *ctx,
|
||||
* could be obtained. This is the only additional complexity added to
|
||||
* the rmap code to provide this anonymous page remapping functionality.
|
||||
*/
|
||||
ssize_t move_pages(struct userfaultfd_ctx *ctx, struct mm_struct *mm,
|
||||
unsigned long dst_start, unsigned long src_start,
|
||||
unsigned long len, __u64 mode)
|
||||
ssize_t move_pages(struct userfaultfd_ctx *ctx, unsigned long dst_start,
|
||||
unsigned long src_start, unsigned long len, __u64 mode)
|
||||
{
|
||||
struct mm_struct *mm = ctx->mm;
|
||||
struct vm_area_struct *src_vma, *dst_vma;
|
||||
unsigned long src_addr, dst_addr;
|
||||
pmd_t *src_pmd, *dst_pmd;
|
||||
@ -1326,28 +1519,34 @@ ssize_t move_pages(struct userfaultfd_ctx *ctx, struct mm_struct *mm,
|
||||
WARN_ON_ONCE(dst_start + len <= dst_start))
|
||||
goto out;
|
||||
|
||||
err = uffd_move_lock(mm, dst_start, src_start, &dst_vma, &src_vma);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
/* Re-check after taking map_changing_lock */
|
||||
err = -EAGAIN;
|
||||
down_read(&ctx->map_changing_lock);
|
||||
if (likely(atomic_read(&ctx->mmap_changing)))
|
||||
goto out_unlock;
|
||||
/*
|
||||
* Make sure the vma is not shared, that the src and dst remap
|
||||
* ranges are both valid and fully within a single existing
|
||||
* vma.
|
||||
*/
|
||||
src_vma = find_vma(mm, src_start);
|
||||
if (!src_vma || (src_vma->vm_flags & VM_SHARED))
|
||||
goto out;
|
||||
if (src_start < src_vma->vm_start ||
|
||||
src_start + len > src_vma->vm_end)
|
||||
goto out;
|
||||
err = -EINVAL;
|
||||
if (src_vma->vm_flags & VM_SHARED)
|
||||
goto out_unlock;
|
||||
if (src_start + len > src_vma->vm_end)
|
||||
goto out_unlock;
|
||||
|
||||
dst_vma = find_vma(mm, dst_start);
|
||||
if (!dst_vma || (dst_vma->vm_flags & VM_SHARED))
|
||||
goto out;
|
||||
if (dst_start < dst_vma->vm_start ||
|
||||
dst_start + len > dst_vma->vm_end)
|
||||
goto out;
|
||||
if (dst_vma->vm_flags & VM_SHARED)
|
||||
goto out_unlock;
|
||||
if (dst_start + len > dst_vma->vm_end)
|
||||
goto out_unlock;
|
||||
|
||||
err = validate_move_areas(ctx, src_vma, dst_vma);
|
||||
if (err)
|
||||
goto out;
|
||||
goto out_unlock;
|
||||
|
||||
for (src_addr = src_start, dst_addr = dst_start;
|
||||
src_addr < src_start + len;) {
|
||||
@ -1464,6 +1663,9 @@ ssize_t move_pages(struct userfaultfd_ctx *ctx, struct mm_struct *mm,
|
||||
moved += step_size;
|
||||
}
|
||||
|
||||
out_unlock:
|
||||
up_read(&ctx->map_changing_lock);
|
||||
uffd_move_unlock(dst_vma, src_vma);
|
||||
out:
|
||||
VM_WARN_ON(moved < 0);
|
||||
VM_WARN_ON(err > 0);
|
||||
|
Loading…
Reference in New Issue
Block a user