Merge "mm: allow vmas with vm_ops to be speculatively handled"
This commit is contained in:
commit
a265191ce0
@ -115,7 +115,8 @@ enum vm_event_item { PGPGIN, PGPGOUT,
|
||||
SWAP_RA_HIT,
|
||||
#endif
|
||||
#ifdef CONFIG_SPECULATIVE_PAGE_FAULT
|
||||
SPECULATIVE_PGFAULT,
|
||||
SPECULATIVE_PGFAULT_ANON,
|
||||
SPECULATIVE_PGFAULT_FILE,
|
||||
#endif
|
||||
NR_VM_EVENT_ITEMS
|
||||
};
|
||||
|
@ -2404,12 +2404,12 @@ static struct file *do_sync_mmap_readahead(struct vm_fault *vmf)
|
||||
pgoff_t offset = vmf->pgoff;
|
||||
|
||||
/* If we don't want any read-ahead, don't bother */
|
||||
if (vmf->vma->vm_flags & VM_RAND_READ)
|
||||
if (vmf->vma_flags & VM_RAND_READ)
|
||||
return fpin;
|
||||
if (!ra->ra_pages)
|
||||
return fpin;
|
||||
|
||||
if (vmf->vma->vm_flags & VM_SEQ_READ) {
|
||||
if (vmf->vma_flags & VM_SEQ_READ) {
|
||||
fpin = maybe_unlock_mmap_for_io(vmf, fpin);
|
||||
page_cache_sync_readahead(mapping, ra, file, offset,
|
||||
ra->ra_pages);
|
||||
@ -2453,7 +2453,7 @@ static struct file *do_async_mmap_readahead(struct vm_fault *vmf,
|
||||
pgoff_t offset = vmf->pgoff;
|
||||
|
||||
/* If we don't want any read-ahead, don't bother */
|
||||
if (vmf->vma->vm_flags & VM_RAND_READ)
|
||||
if (vmf->vma_flags & VM_RAND_READ)
|
||||
return fpin;
|
||||
if (ra->mmap_miss > 0)
|
||||
ra->mmap_miss--;
|
||||
@ -2476,7 +2476,7 @@ static struct file *do_async_mmap_readahead(struct vm_fault *vmf,
|
||||
* it in the page cache, and handles the special cases reasonably without
|
||||
* having a lot of duplicated code.
|
||||
*
|
||||
* vma->vm_mm->mmap_sem must be held on entry.
|
||||
* vma->vm_mm->mmap_sem must be held on entry (except FAULT_FLAG_SPECULATIVE).
|
||||
*
|
||||
* If our return value has VM_FAULT_RETRY set, it's because the mmap_sem
|
||||
* may be dropped before doing I/O or by lock_page_maybe_drop_mmap().
|
||||
|
24
mm/memory.c
24
mm/memory.c
@ -4063,6 +4063,7 @@ static vm_fault_t wp_huge_pud(struct vm_fault *vmf, pud_t orig_pud)
|
||||
static vm_fault_t handle_pte_fault(struct vm_fault *vmf)
|
||||
{
|
||||
pte_t entry;
|
||||
vm_fault_t ret = 0;
|
||||
|
||||
if (unlikely(pmd_none(*vmf->pmd))) {
|
||||
/*
|
||||
@ -4115,8 +4116,6 @@ static vm_fault_t handle_pte_fault(struct vm_fault *vmf)
|
||||
if (!vmf->pte) {
|
||||
if (vma_is_anonymous(vmf->vma))
|
||||
return do_anonymous_page(vmf);
|
||||
else if (vmf->flags & FAULT_FLAG_SPECULATIVE)
|
||||
return VM_FAULT_RETRY;
|
||||
else
|
||||
return do_fault(vmf);
|
||||
}
|
||||
@ -4150,10 +4149,12 @@ static vm_fault_t handle_pte_fault(struct vm_fault *vmf)
|
||||
*/
|
||||
if (vmf->flags & FAULT_FLAG_WRITE)
|
||||
flush_tlb_fix_spurious_fault(vmf->vma, vmf->address);
|
||||
if (vmf->flags & FAULT_FLAG_SPECULATIVE)
|
||||
ret = VM_FAULT_RETRY;
|
||||
}
|
||||
unlock:
|
||||
pte_unmap_unlock(vmf->pte, vmf->ptl);
|
||||
return 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -4298,22 +4299,12 @@ int __handle_speculative_fault(struct mm_struct *mm, unsigned long address,
|
||||
return VM_FAULT_RETRY;
|
||||
}
|
||||
|
||||
/*
|
||||
* Can't call vm_ops service has we don't know what they would do
|
||||
* with the VMA.
|
||||
* This include huge page from hugetlbfs.
|
||||
*/
|
||||
if (vmf.vma->vm_ops) {
|
||||
trace_spf_vma_notsup(_RET_IP_, vmf.vma, address);
|
||||
return VM_FAULT_RETRY;
|
||||
}
|
||||
|
||||
/*
|
||||
* __anon_vma_prepare() requires the mmap_sem to be held
|
||||
* because vm_next and vm_prev must be safe. This can't be guaranteed
|
||||
* in the speculative path.
|
||||
*/
|
||||
if (unlikely(!vmf.vma->anon_vma)) {
|
||||
if (unlikely(vma_is_anonymous(vmf.vma) && !vmf.vma->anon_vma)) {
|
||||
trace_spf_vma_notsup(_RET_IP_, vmf.vma, address);
|
||||
return VM_FAULT_RETRY;
|
||||
}
|
||||
@ -4455,7 +4446,10 @@ int __handle_speculative_fault(struct mm_struct *mm, unsigned long address,
|
||||
* If there is no need to retry, don't return the vma to the caller.
|
||||
*/
|
||||
if (ret != VM_FAULT_RETRY) {
|
||||
count_vm_event(SPECULATIVE_PGFAULT);
|
||||
if (vma_is_anonymous(vmf.vma))
|
||||
count_vm_event(SPECULATIVE_PGFAULT_ANON);
|
||||
else
|
||||
count_vm_event(SPECULATIVE_PGFAULT_FILE);
|
||||
put_vma(vmf.vma);
|
||||
*vma = NULL;
|
||||
}
|
||||
|
@ -2057,10 +2057,10 @@ static vm_fault_t shmem_fault(struct vm_fault *vmf)
|
||||
|
||||
sgp = SGP_CACHE;
|
||||
|
||||
if ((vma->vm_flags & VM_NOHUGEPAGE) ||
|
||||
if ((vmf->vma_flags & VM_NOHUGEPAGE) ||
|
||||
test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags))
|
||||
sgp = SGP_NOHUGE;
|
||||
else if (vma->vm_flags & VM_HUGEPAGE)
|
||||
else if (vmf->vma_flags & VM_HUGEPAGE)
|
||||
sgp = SGP_HUGE;
|
||||
|
||||
err = shmem_getpage_gfp(inode, vmf->pgoff, &vmf->page, sgp,
|
||||
|
@ -1301,7 +1301,8 @@ const char * const vmstat_text[] = {
|
||||
"swap_ra_hit",
|
||||
#endif
|
||||
#ifdef CONFIG_SPECULATIVE_PAGE_FAULT
|
||||
"speculative_pgfault"
|
||||
"speculative_pgfault_anon",
|
||||
"speculative_pgfault_file",
|
||||
#endif
|
||||
#endif /* CONFIG_VM_EVENT_COUNTERS */
|
||||
};
|
||||
|
Loading…
Reference in New Issue
Block a user