hugetlb: kill hugetlbfs_pagecache_page()
Fold hugetlbfs_pagecache_page() into its sole caller to remove some duplicated code. No functional change intended. Link: https://lkml.kernel.org/r/20220901120030.63318-8-linmiaohe@huawei.com Signed-off-by: Miaohe Lin <linmiaohe@huawei.com> Reviewed-by: Muchun Song <songmuchun@bytedance.com> Cc: Mike Kravetz <mike.kravetz@oracle.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
12658abfc5
commit
29be84265f
15
mm/hugetlb.c
15
mm/hugetlb.c
@ -5433,19 +5433,6 @@ static vm_fault_t hugetlb_wp(struct mm_struct *mm, struct vm_area_struct *vma,
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Return the pagecache page at a given address within a VMA */
|
||||
static struct page *hugetlbfs_pagecache_page(struct hstate *h,
|
||||
struct vm_area_struct *vma, unsigned long address)
|
||||
{
|
||||
struct address_space *mapping;
|
||||
pgoff_t idx;
|
||||
|
||||
mapping = vma->vm_file->f_mapping;
|
||||
idx = vma_hugecache_offset(h, vma, address);
|
||||
|
||||
return find_lock_page(mapping, idx);
|
||||
}
|
||||
|
||||
/*
|
||||
* Return whether there is a pagecache page to back given address within VMA.
|
||||
* Caller follow_hugetlb_page() holds page_table_lock so we cannot lock_page.
|
||||
@ -5840,7 +5827,7 @@ vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
|
||||
/* Just decrements count, does not deallocate */
|
||||
vma_end_reservation(h, vma, haddr);
|
||||
|
||||
pagecache_page = hugetlbfs_pagecache_page(h, vma, haddr);
|
||||
pagecache_page = find_lock_page(mapping, idx);
|
||||
}
|
||||
|
||||
ptl = huge_pte_lock(h, mm, ptep);
|
||||
|
Loading…
Reference in New Issue
Block a user