ANDROID: userfaultfd: Fix use-after-free in userfaultfd_using_sigbus()

In 582c6d188ec1 ("ANDROID: userfaultfd: allow SPF for
UFFD_FEATURE_SIGBUS on private+anon"), we allowed userfaultfd
registered VMAs using SIGBUS to be handled with SPF. But during
page-fault handling, before userfaultfd_ctx is dereferenced,
another thread may call userfaultfd_release(), unlink the VMA
and then deallocate the same userfaultfd_ctx, leaving a dangling
pointer behind for dereference.

It is insufficient to do the access under rcu read-lock as the context
may have been deallocated before entering the critical section. Checking
vma has not changed in the critical section ensures we are not looking at
dangling pointer to userfaultfd_ctx.

Change-Id: I9c3ba0f1352e49f0ea387b92c18b5f1b5dcad7f1
Signed-off-by: Lokesh Gidra <lokeshgidra@google.com>
Bug: 349936398
(cherry picked from commit c75b369e72da0283a20f794c0070c478b490f453)
This commit is contained in:
Lokesh Gidra 2024-06-30 00:31:34 -07:00
parent 441ca240dd
commit 6f61666ab1
5 changed files with 30 additions and 22 deletions

View File

@ -358,18 +358,26 @@ static inline long userfaultfd_get_blocking_state(unsigned int flags)
}
#ifdef CONFIG_SPECULATIVE_PAGE_FAULT
bool userfaultfd_using_sigbus(struct vm_area_struct *vma)
bool userfaultfd_using_sigbus(struct vm_fault *vmf)
{
struct userfaultfd_ctx *ctx;
bool ret;
bool ret = false;
/*
* Do it inside RCU section to ensure that the ctx doesn't
* disappear under us.
*/
rcu_read_lock();
ctx = rcu_dereference(vma->vm_userfaultfd_ctx.ctx);
ret = ctx && (ctx->features & UFFD_FEATURE_SIGBUS);
/*
* Ensure that we are not looking at dangling pointer to
* userfaultfd_ctx, which could happen if userfaultfd_release() is
* called and vma is unlinked.
*/
if (!vma_has_changed(vmf)) {
struct userfaultfd_ctx *ctx;
ctx = rcu_dereference(vmf->vma->vm_userfaultfd_ctx.ctx);
ret = ctx && (ctx->features & UFFD_FEATURE_SIGBUS);
}
rcu_read_unlock();
return ret;
}

View File

@ -1776,6 +1776,20 @@ static inline void vm_write_end(struct vm_area_struct *vma)
{
raw_write_seqcount_end(&vma->vm_sequence);
}
static inline bool vma_has_changed(struct vm_fault *vmf)
{
int ret = RB_EMPTY_NODE(&vmf->vma->vm_rb);
unsigned int seq = READ_ONCE(vmf->vma->vm_sequence.sequence);
/*
* Matches both the wmb in write_seqlock_{begin,end}() and
* the wmb in vma_rb_erase().
*/
smp_rmb();
return ret || seq != vmf->sequence;
}
#else
static inline void vm_write_begin(struct vm_area_struct *vma)
{

View File

@ -40,7 +40,7 @@ extern int sysctl_unprivileged_userfaultfd;
extern vm_fault_t handle_userfault(struct vm_fault *vmf, unsigned long reason);
#ifdef CONFIG_SPECULATIVE_PAGE_FAULT
extern bool userfaultfd_using_sigbus(struct vm_area_struct *vma);
extern bool userfaultfd_using_sigbus(struct vm_fault *vmf);
#endif
/*

View File

@ -40,20 +40,6 @@ vm_fault_t do_swap_page(struct vm_fault *vmf);
extern struct vm_area_struct *get_vma(struct mm_struct *mm,
unsigned long addr);
extern void put_vma(struct vm_area_struct *vma);
static inline bool vma_has_changed(struct vm_fault *vmf)
{
int ret = RB_EMPTY_NODE(&vmf->vma->vm_rb);
unsigned int seq = READ_ONCE(vmf->vma->vm_sequence.sequence);
/*
* Matches both the wmb in write_seqlock_{begin,end}() and
* the wmb in vma_rb_erase().
*/
smp_rmb();
return ret || seq != vmf->sequence;
}
#endif /* CONFIG_SPECULATIVE_PAGE_FAULT */
void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *start_vma,

View File

@ -5058,6 +5058,7 @@ static vm_fault_t ___handle_speculative_fault(struct mm_struct *mm,
vmf.vma_flags = READ_ONCE(vmf.vma->vm_flags);
vmf.vma_page_prot = READ_ONCE(vmf.vma->vm_page_prot);
vmf.sequence = seq;
#ifdef CONFIG_USERFAULTFD
/*
@ -5067,7 +5068,7 @@ static vm_fault_t ___handle_speculative_fault(struct mm_struct *mm,
if (unlikely(vmf.vma_flags & __VM_UFFD_FLAGS)) {
uffd_missing_sigbus = vma_is_anonymous(vmf.vma) &&
(vmf.vma_flags & VM_UFFD_MISSING) &&
userfaultfd_using_sigbus(vmf.vma);
userfaultfd_using_sigbus(&vmf);
if (!uffd_missing_sigbus) {
trace_spf_vma_notsup(_RET_IP_, vmf.vma, address);
return VM_FAULT_RETRY;
@ -5193,7 +5194,6 @@ static vm_fault_t ___handle_speculative_fault(struct mm_struct *mm,
vmf.pte = NULL;
}
vmf.sequence = seq;
vmf.flags = flags;
local_irq_enable();