80110bbfbb
syzbot detected a case where the page table counters were not properly
updated.
syzkaller login: ------------[ cut here ]------------
kernel BUG at mm/page_table_check.c:162!
invalid opcode: 0000 [#1] PREEMPT SMP KASAN
CPU: 0 PID: 3099 Comm: pasha Not tainted 5.16.0+ #48
Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIO4
RIP: 0010:__page_table_check_zero+0x159/0x1a0
Call Trace:
free_pcp_prepare+0x3be/0xaa0
free_unref_page+0x1c/0x650
free_compound_page+0xec/0x130
free_transhuge_page+0x1be/0x260
__put_compound_page+0x90/0xd0
release_pages+0x54c/0x1060
__pagevec_release+0x7c/0x110
shmem_undo_range+0x85e/0x1250
...
The repro involved having a huge page that is split due to uprobe event
temporarily replacing one of the pages in the huge page. Later the huge
page was combined again, but the counters were off, as the PTE level was
not properly updated.
Make sure that when PMD is cleared and prior to freeing the level the
PTEs are updated.
Link: https://lkml.kernel.org/r/20220131203249.2832273-5-pasha.tatashin@soleen.com
Fixes: df4e817b71
("mm: page table check")
Signed-off-by: Pasha Tatashin <pasha.tatashin@soleen.com>
Acked-by: David Rientjes <rientjes@google.com>
Cc: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
Cc: Anshuman Khandual <anshuman.khandual@arm.com>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Greg Thelen <gthelen@google.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Jiri Slaby <jirislaby@kernel.org>
Cc: Mike Rapoport <rppt@kernel.org>
Cc: Muchun Song <songmuchun@bytedance.com>
Cc: Paul Turner <pjt@google.com>
Cc: Wei Xu <weixugc@google.com>
Cc: Will Deacon <will@kernel.org>
Cc: Zi Yan <ziy@nvidia.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
167 lines
4.2 KiB
C
167 lines
4.2 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
|
|
/*
|
|
* Copyright (c) 2021, Google LLC.
|
|
* Pasha Tatashin <pasha.tatashin@soleen.com>
|
|
*/
|
|
#ifndef __LINUX_PAGE_TABLE_CHECK_H
|
|
#define __LINUX_PAGE_TABLE_CHECK_H
|
|
|
|
#ifdef CONFIG_PAGE_TABLE_CHECK
|
|
#include <linux/jump_label.h>
|
|
|
|
extern struct static_key_true page_table_check_disabled;
|
|
extern struct page_ext_operations page_table_check_ops;
|
|
|
|
void __page_table_check_zero(struct page *page, unsigned int order);
|
|
void __page_table_check_pte_clear(struct mm_struct *mm, unsigned long addr,
|
|
pte_t pte);
|
|
void __page_table_check_pmd_clear(struct mm_struct *mm, unsigned long addr,
|
|
pmd_t pmd);
|
|
void __page_table_check_pud_clear(struct mm_struct *mm, unsigned long addr,
|
|
pud_t pud);
|
|
void __page_table_check_pte_set(struct mm_struct *mm, unsigned long addr,
|
|
pte_t *ptep, pte_t pte);
|
|
void __page_table_check_pmd_set(struct mm_struct *mm, unsigned long addr,
|
|
pmd_t *pmdp, pmd_t pmd);
|
|
void __page_table_check_pud_set(struct mm_struct *mm, unsigned long addr,
|
|
pud_t *pudp, pud_t pud);
|
|
void __page_table_check_pte_clear_range(struct mm_struct *mm,
|
|
unsigned long addr,
|
|
pmd_t pmd);
|
|
|
|
static inline void page_table_check_alloc(struct page *page, unsigned int order)
|
|
{
|
|
if (static_branch_likely(&page_table_check_disabled))
|
|
return;
|
|
|
|
__page_table_check_zero(page, order);
|
|
}
|
|
|
|
static inline void page_table_check_free(struct page *page, unsigned int order)
|
|
{
|
|
if (static_branch_likely(&page_table_check_disabled))
|
|
return;
|
|
|
|
__page_table_check_zero(page, order);
|
|
}
|
|
|
|
static inline void page_table_check_pte_clear(struct mm_struct *mm,
|
|
unsigned long addr, pte_t pte)
|
|
{
|
|
if (static_branch_likely(&page_table_check_disabled))
|
|
return;
|
|
|
|
__page_table_check_pte_clear(mm, addr, pte);
|
|
}
|
|
|
|
static inline void page_table_check_pmd_clear(struct mm_struct *mm,
|
|
unsigned long addr, pmd_t pmd)
|
|
{
|
|
if (static_branch_likely(&page_table_check_disabled))
|
|
return;
|
|
|
|
__page_table_check_pmd_clear(mm, addr, pmd);
|
|
}
|
|
|
|
static inline void page_table_check_pud_clear(struct mm_struct *mm,
|
|
unsigned long addr, pud_t pud)
|
|
{
|
|
if (static_branch_likely(&page_table_check_disabled))
|
|
return;
|
|
|
|
__page_table_check_pud_clear(mm, addr, pud);
|
|
}
|
|
|
|
static inline void page_table_check_pte_set(struct mm_struct *mm,
|
|
unsigned long addr, pte_t *ptep,
|
|
pte_t pte)
|
|
{
|
|
if (static_branch_likely(&page_table_check_disabled))
|
|
return;
|
|
|
|
__page_table_check_pte_set(mm, addr, ptep, pte);
|
|
}
|
|
|
|
static inline void page_table_check_pmd_set(struct mm_struct *mm,
|
|
unsigned long addr, pmd_t *pmdp,
|
|
pmd_t pmd)
|
|
{
|
|
if (static_branch_likely(&page_table_check_disabled))
|
|
return;
|
|
|
|
__page_table_check_pmd_set(mm, addr, pmdp, pmd);
|
|
}
|
|
|
|
static inline void page_table_check_pud_set(struct mm_struct *mm,
|
|
unsigned long addr, pud_t *pudp,
|
|
pud_t pud)
|
|
{
|
|
if (static_branch_likely(&page_table_check_disabled))
|
|
return;
|
|
|
|
__page_table_check_pud_set(mm, addr, pudp, pud);
|
|
}
|
|
|
|
static inline void page_table_check_pte_clear_range(struct mm_struct *mm,
|
|
unsigned long addr,
|
|
pmd_t pmd)
|
|
{
|
|
if (static_branch_likely(&page_table_check_disabled))
|
|
return;
|
|
|
|
__page_table_check_pte_clear_range(mm, addr, pmd);
|
|
}
|
|
|
|
#else
|
|
|
|
static inline void page_table_check_alloc(struct page *page, unsigned int order)
|
|
{
|
|
}
|
|
|
|
static inline void page_table_check_free(struct page *page, unsigned int order)
|
|
{
|
|
}
|
|
|
|
static inline void page_table_check_pte_clear(struct mm_struct *mm,
|
|
unsigned long addr, pte_t pte)
|
|
{
|
|
}
|
|
|
|
static inline void page_table_check_pmd_clear(struct mm_struct *mm,
|
|
unsigned long addr, pmd_t pmd)
|
|
{
|
|
}
|
|
|
|
static inline void page_table_check_pud_clear(struct mm_struct *mm,
|
|
unsigned long addr, pud_t pud)
|
|
{
|
|
}
|
|
|
|
static inline void page_table_check_pte_set(struct mm_struct *mm,
|
|
unsigned long addr, pte_t *ptep,
|
|
pte_t pte)
|
|
{
|
|
}
|
|
|
|
static inline void page_table_check_pmd_set(struct mm_struct *mm,
|
|
unsigned long addr, pmd_t *pmdp,
|
|
pmd_t pmd)
|
|
{
|
|
}
|
|
|
|
static inline void page_table_check_pud_set(struct mm_struct *mm,
|
|
unsigned long addr, pud_t *pudp,
|
|
pud_t pud)
|
|
{
|
|
}
|
|
|
|
static inline void page_table_check_pte_clear_range(struct mm_struct *mm,
|
|
unsigned long addr,
|
|
pmd_t pmd)
|
|
{
|
|
}
|
|
|
|
#endif /* CONFIG_PAGE_TABLE_CHECK */
|
|
#endif /* __LINUX_PAGE_TABLE_CHECK_H */
|