android_kernel_xiaomi_sm8450/mm/pagewalk.c
Greg Kroah-Hartman e0f0b200a5 This is the 5.10.142 stable release
-----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAmMZsd0ACgkQONu9yGCS
 aT5gXw//QdDh8KQRcJdVrIKSm1CzApFkAbjaL461gcjKMqBwAavjdWNDx8zKFEw7
 H0OX+m0fr9gaDHNoNqjvKGN0V57HfXve/0f786W9aTqENF9BOmomHtsT0k5I5T9y
 C2uTvSw/l3GXoMI1afepTtHFdip+PSZSlYL9afBDZI4WAz8Bkn+RlVHDNCtQdn1D
 GaDliP6iQ+1J1p/VB32TdZyPOZK/qspD15b+3xYVoA8ad1/oKrUtYcvyA/svl45V
 I3GrH2klYBBp/ffgCGawe6qOjwiQeabDkMyuvNUVaYwNOEeYLdNIbVRxFz3N+b0W
 0Kixpwwqijb8AVY8xsL7W8Ure/2KRzu/4cILtHOiTbqB2lQCmGAIgfHjIOBc+CPf
 uW6UpBeXSgiXJJhbtEd3kYEVWeFBppKiuN2i2puP+fkWFvEHKEddtlLapqA65WDq
 3GITqiKLC2GPftigs6ws8T1Ow1izZ3MXzhO8s9JS1WHZeUg1jxL7tIlLsuXIh4xt
 MA64n6ASJ4JsoNaP2jvix3J1T7PQ6/mz/jfzDR4emTiCFNuJhF1k70sAtssusX4W
 SSuh3bLrHO0CNXfChp++MphfWV4takBobMTbjjSsblfif9FEyx2advYNEJe2BG9C
 NqQTUDs1eKP6PR0yI871uKyBlvw7rCZoCMrZCLggkLjL8+jxbWw=
 =Ebrg
 -----END PGP SIGNATURE-----

Merge 5.10.142 into android12-5.10-lts

Changes in 5.10.142
	drm/msm/dsi: fix the inconsistent indenting
	drm/msm/dp: delete DP_RECOVERED_CLOCK_OUT_EN to fix tps4
	drm/msm/dsi: Fix number of regulators for msm8996_dsi_cfg
	drm/msm/dsi: Fix number of regulators for SDM660
	platform/x86: pmc_atom: Fix SLP_TYPx bitfield mask
	iio: adc: mcp3911: make use of the sign bit
	bpf, cgroup: Fix kernel BUG in purge_effective_progs
	ieee802154/adf7242: defer destroy_workqueue call
	ALSA: hda: intel-nhlt: remove use of __func__ in dev_dbg
	ALSA: hda: intel-nhlt: Correct the handling of fmt_config flexible array
	wifi: cfg80211: debugfs: fix return type in ht40allow_map_read()
	Revert "xhci: turn off port power in shutdown"
	net: sched: tbf: don't call qdisc_put() while holding tree lock
	net/sched: fix netdevice reference leaks in attach_default_qdiscs()
	ethernet: rocker: fix sleep in atomic context bug in neigh_timer_handler
	kcm: fix strp_init() order and cleanup
	sch_cake: Return __NET_XMIT_STOLEN when consuming enqueued skb
	tcp: annotate data-race around challenge_timestamp
	Revert "sch_cake: Return __NET_XMIT_STOLEN when consuming enqueued skb"
	net/smc: Remove redundant refcount increase
	serial: fsl_lpuart: RS485 RTS polariy is inverse
	staging: rtl8712: fix use after free bugs
	powerpc: align syscall table for ppc32
	vt: Clear selection before changing the font
	tty: serial: lpuart: disable flow control while waiting for the transmit engine to complete
	Input: iforce - wake up after clearing IFORCE_XMIT_RUNNING flag
	iio: ad7292: Prevent regulator double disable
	iio: adc: mcp3911: use correct formula for AD conversion
	misc: fastrpc: fix memory corruption on probe
	misc: fastrpc: fix memory corruption on open
	USB: serial: ftdi_sio: add Omron CS1W-CIF31 device id
	binder: fix UAF of ref->proc caused by race condition
	drm/i915/reg: Fix spelling mistake "Unsupport" -> "Unsupported"
	clk: core: Honor CLK_OPS_PARENT_ENABLE for clk gate ops
	Revert "clk: core: Honor CLK_OPS_PARENT_ENABLE for clk gate ops"
	clk: core: Fix runtime PM sequence in clk_core_unprepare()
	Input: rk805-pwrkey - fix module autoloading
	clk: bcm: rpi: Fix error handling of raspberrypi_fw_get_rate
	clk: bcm: rpi: Use correct order for the parameters of devm_kcalloc()
	clk: bcm: rpi: Prevent out-of-bounds access
	clk: bcm: rpi: Add missing newline
	hwmon: (gpio-fan) Fix array out of bounds access
	gpio: pca953x: Add mutex_lock for regcache sync in PM
	KVM: x86: Mask off unsupported and unknown bits of IA32_ARCH_CAPABILITIES
	xen/grants: prevent integer overflow in gnttab_dma_alloc_pages()
	mm: pagewalk: Fix race between unmap and page walker
	xen-blkback: Advertise feature-persistent as user requested
	xen-blkfront: Advertise feature-persistent as user requested
	thunderbolt: Use the actual buffer in tb_async_error()
	media: mceusb: Use new usb_control_msg_*() routines
	xhci: Add grace period after xHC start to prevent premature runtime suspend.
	USB: serial: cp210x: add Decagon UCA device id
	USB: serial: option: add support for OPPO R11 diag port
	USB: serial: option: add Quectel EM060K modem
	USB: serial: option: add support for Cinterion MV32-WA/WB RmNet mode
	usb: typec: altmodes/displayport: correct pin assignment for UFP receptacles
	usb: dwc2: fix wrong order of phy_power_on and phy_init
	USB: cdc-acm: Add Icom PMR F3400 support (0c26:0020)
	usb-storage: Add ignore-residue quirk for NXP PN7462AU
	s390/hugetlb: fix prepare_hugepage_range() check for 2 GB hugepages
	s390: fix nospec table alignments
	USB: core: Prevent nested device-reset calls
	usb: gadget: mass_storage: Fix cdrom data transfers on MAC-OS
	driver core: Don't probe devices after bus_type.match() probe deferral
	wifi: mac80211: Don't finalize CSA in IBSS mode if state is disconnected
	wifi: mac80211: Fix UAF in ieee80211_scan_rx()
	ip: fix triggering of 'icmp redirect'
	net: Use u64_stats_fetch_begin_irq() for stats fetch.
	net: mac802154: Fix a condition in the receive path
	ALSA: hda/realtek: Add speaker AMP init for Samsung laptops with ALC298
	ALSA: seq: oss: Fix data-race for max_midi_devs access
	ALSA: seq: Fix data-race at module auto-loading
	drm/i915/glk: ECS Liva Q2 needs GLK HDMI port timing quirk
	btrfs: harden identification of a stale device
	mmc: core: Fix UHS-I SD 1.8V workaround branch
	usb: dwc3: fix PHY disable sequence
	usb: dwc3: qcom: fix use-after-free on runtime-PM wakeup
	usb: dwc3: disable USB core PHY management
	USB: serial: ch341: fix lost character on LCR updates
	USB: serial: ch341: fix disabled rx timer on older devices
	Linux 5.10.142

Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
Change-Id: I32d9b4c4c0e6c802744abb8b1c87ad794f4de0c8
2022-09-22 13:46:39 +02:00

565 lines
14 KiB
C

// SPDX-License-Identifier: GPL-2.0
#include <linux/pagewalk.h>
#include <linux/highmem.h>
#include <linux/sched.h>
#include <linux/hugetlb.h>
/*
* We want to know the real level where a entry is located ignoring any
* folding of levels which may be happening. For example if p4d is folded then
* a missing entry found at level 1 (p4d) is actually at level 0 (pgd).
*/
static int real_depth(int depth)
{
if (depth == 3 && PTRS_PER_PMD == 1)
depth = 2;
if (depth == 2 && PTRS_PER_PUD == 1)
depth = 1;
if (depth == 1 && PTRS_PER_P4D == 1)
depth = 0;
return depth;
}
static int walk_pte_range_inner(pte_t *pte, unsigned long addr,
unsigned long end, struct mm_walk *walk)
{
const struct mm_walk_ops *ops = walk->ops;
int err = 0;
for (;;) {
err = ops->pte_entry(pte, addr, addr + PAGE_SIZE, walk);
if (err)
break;
if (addr >= end - PAGE_SIZE)
break;
addr += PAGE_SIZE;
pte++;
}
return err;
}
static int walk_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
struct mm_walk *walk)
{
pte_t *pte;
int err = 0;
spinlock_t *ptl;
if (walk->no_vma) {
pte = pte_offset_map(pmd, addr);
err = walk_pte_range_inner(pte, addr, end, walk);
pte_unmap(pte);
} else {
pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
err = walk_pte_range_inner(pte, addr, end, walk);
pte_unmap_unlock(pte, ptl);
}
return err;
}
static int walk_pmd_range(pud_t *pud, unsigned long addr, unsigned long end,
struct mm_walk *walk)
{
pmd_t *pmd;
unsigned long next;
const struct mm_walk_ops *ops = walk->ops;
int err = 0;
int depth = real_depth(3);
pmd = pmd_offset(pud, addr);
do {
again:
next = pmd_addr_end(addr, end);
if (pmd_none(*pmd)) {
if (ops->pte_hole)
err = ops->pte_hole(addr, next, depth, walk);
if (err)
break;
continue;
}
walk->action = ACTION_SUBTREE;
/*
* This implies that each ->pmd_entry() handler
* needs to know about pmd_trans_huge() pmds
*/
if (ops->pmd_entry)
err = ops->pmd_entry(pmd, addr, next, walk);
if (err)
break;
if (walk->action == ACTION_AGAIN)
goto again;
/*
* Check this here so we only break down trans_huge
* pages when we _need_ to
*/
if ((!walk->vma && (pmd_leaf(*pmd) || !pmd_present(*pmd))) ||
walk->action == ACTION_CONTINUE ||
!(ops->pte_entry))
continue;
if (walk->vma) {
split_huge_pmd(walk->vma, pmd, addr);
if (pmd_trans_unstable(pmd))
goto again;
}
err = walk_pte_range(pmd, addr, next, walk);
if (err)
break;
} while (pmd++, addr = next, addr != end);
return err;
}
static int walk_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end,
struct mm_walk *walk)
{
pud_t *pud;
unsigned long next;
const struct mm_walk_ops *ops = walk->ops;
int err = 0;
int depth = real_depth(2);
pud = pud_offset(p4d, addr);
do {
again:
next = pud_addr_end(addr, end);
if (pud_none(*pud)) {
if (ops->pte_hole)
err = ops->pte_hole(addr, next, depth, walk);
if (err)
break;
continue;
}
walk->action = ACTION_SUBTREE;
if (ops->pud_entry)
err = ops->pud_entry(pud, addr, next, walk);
if (err)
break;
if (walk->action == ACTION_AGAIN)
goto again;
if ((!walk->vma && (pud_leaf(*pud) || !pud_present(*pud))) ||
walk->action == ACTION_CONTINUE ||
!(ops->pmd_entry || ops->pte_entry))
continue;
if (walk->vma)
split_huge_pud(walk->vma, pud, addr);
if (pud_none(*pud))
goto again;
err = walk_pmd_range(pud, addr, next, walk);
if (err)
break;
} while (pud++, addr = next, addr != end);
return err;
}
static int walk_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end,
struct mm_walk *walk)
{
p4d_t *p4d;
unsigned long next;
const struct mm_walk_ops *ops = walk->ops;
int err = 0;
int depth = real_depth(1);
p4d = p4d_offset(pgd, addr);
do {
next = p4d_addr_end(addr, end);
if (p4d_none_or_clear_bad(p4d)) {
if (ops->pte_hole)
err = ops->pte_hole(addr, next, depth, walk);
if (err)
break;
continue;
}
if (ops->p4d_entry) {
err = ops->p4d_entry(p4d, addr, next, walk);
if (err)
break;
}
if (ops->pud_entry || ops->pmd_entry || ops->pte_entry)
err = walk_pud_range(p4d, addr, next, walk);
if (err)
break;
} while (p4d++, addr = next, addr != end);
return err;
}
static int walk_pgd_range(unsigned long addr, unsigned long end,
struct mm_walk *walk)
{
pgd_t *pgd;
unsigned long next;
const struct mm_walk_ops *ops = walk->ops;
int err = 0;
if (walk->pgd)
pgd = walk->pgd + pgd_index(addr);
else
pgd = pgd_offset(walk->mm, addr);
do {
next = pgd_addr_end(addr, end);
if (pgd_none_or_clear_bad(pgd)) {
if (ops->pte_hole)
err = ops->pte_hole(addr, next, 0, walk);
if (err)
break;
continue;
}
if (ops->pgd_entry) {
err = ops->pgd_entry(pgd, addr, next, walk);
if (err)
break;
}
if (ops->p4d_entry || ops->pud_entry || ops->pmd_entry ||
ops->pte_entry)
err = walk_p4d_range(pgd, addr, next, walk);
if (err)
break;
} while (pgd++, addr = next, addr != end);
return err;
}
#ifdef CONFIG_HUGETLB_PAGE
static unsigned long hugetlb_entry_end(struct hstate *h, unsigned long addr,
unsigned long end)
{
unsigned long boundary = (addr & huge_page_mask(h)) + huge_page_size(h);
return boundary < end ? boundary : end;
}
static int walk_hugetlb_range(unsigned long addr, unsigned long end,
struct mm_walk *walk)
{
struct vm_area_struct *vma = walk->vma;
struct hstate *h = hstate_vma(vma);
unsigned long next;
unsigned long hmask = huge_page_mask(h);
unsigned long sz = huge_page_size(h);
pte_t *pte;
const struct mm_walk_ops *ops = walk->ops;
int err = 0;
do {
next = hugetlb_entry_end(h, addr, end);
pte = huge_pte_offset(walk->mm, addr & hmask, sz);
if (pte)
err = ops->hugetlb_entry(pte, hmask, addr, next, walk);
else if (ops->pte_hole)
err = ops->pte_hole(addr, next, -1, walk);
if (err)
break;
} while (addr = next, addr != end);
return err;
}
#else /* CONFIG_HUGETLB_PAGE */
static int walk_hugetlb_range(unsigned long addr, unsigned long end,
struct mm_walk *walk)
{
return 0;
}
#endif /* CONFIG_HUGETLB_PAGE */
/*
* Decide whether we really walk over the current vma on [@start, @end)
* or skip it via the returned value. Return 0 if we do walk over the
* current vma, and return 1 if we skip the vma. Negative values means
* error, where we abort the current walk.
*/
static int walk_page_test(unsigned long start, unsigned long end,
struct mm_walk *walk)
{
struct vm_area_struct *vma = walk->vma;
const struct mm_walk_ops *ops = walk->ops;
if (ops->test_walk)
return ops->test_walk(start, end, walk);
/*
* vma(VM_PFNMAP) doesn't have any valid struct pages behind VM_PFNMAP
* range, so we don't walk over it as we do for normal vmas. However,
* Some callers are interested in handling hole range and they don't
* want to just ignore any single address range. Such users certainly
* define their ->pte_hole() callbacks, so let's delegate them to handle
* vma(VM_PFNMAP).
*/
if (vma->vm_flags & VM_PFNMAP) {
int err = 1;
if (ops->pte_hole)
err = ops->pte_hole(start, end, -1, walk);
return err ? err : 1;
}
return 0;
}
static int __walk_page_range(unsigned long start, unsigned long end,
struct mm_walk *walk)
{
int err = 0;
struct vm_area_struct *vma = walk->vma;
const struct mm_walk_ops *ops = walk->ops;
if (ops->pre_vma) {
err = ops->pre_vma(start, end, walk);
if (err)
return err;
}
if (is_vm_hugetlb_page(vma)) {
if (ops->hugetlb_entry)
err = walk_hugetlb_range(start, end, walk);
} else
err = walk_pgd_range(start, end, walk);
if (ops->post_vma)
ops->post_vma(walk);
return err;
}
/**
* walk_page_range - walk page table with caller specific callbacks
* @mm: mm_struct representing the target process of page table walk
* @start: start address of the virtual address range
* @end: end address of the virtual address range
* @ops: operation to call during the walk
* @private: private data for callbacks' usage
*
* Recursively walk the page table tree of the process represented by @mm
* within the virtual address range [@start, @end). During walking, we can do
* some caller-specific works for each entry, by setting up pmd_entry(),
* pte_entry(), and/or hugetlb_entry(). If you don't set up for some of these
* callbacks, the associated entries/pages are just ignored.
* The return values of these callbacks are commonly defined like below:
*
* - 0 : succeeded to handle the current entry, and if you don't reach the
* end address yet, continue to walk.
* - >0 : succeeded to handle the current entry, and return to the caller
* with caller specific value.
* - <0 : failed to handle the current entry, and return to the caller
* with error code.
*
* Before starting to walk page table, some callers want to check whether
* they really want to walk over the current vma, typically by checking
* its vm_flags. walk_page_test() and @ops->test_walk() are used for this
* purpose.
*
* If operations need to be staged before and committed after a vma is walked,
* there are two callbacks, pre_vma() and post_vma(). Note that post_vma(),
* since it is intended to handle commit-type operations, can't return any
* errors.
*
* struct mm_walk keeps current values of some common data like vma and pmd,
* which are useful for the access from callbacks. If you want to pass some
* caller-specific data to callbacks, @private should be helpful.
*
* Locking:
* Callers of walk_page_range() and walk_page_vma() should hold @mm->mmap_lock,
* because these function traverse vma list and/or access to vma's data.
*/
int walk_page_range(struct mm_struct *mm, unsigned long start,
unsigned long end, const struct mm_walk_ops *ops,
void *private)
{
int err = 0;
unsigned long next;
struct vm_area_struct *vma;
struct mm_walk walk = {
.ops = ops,
.mm = mm,
.private = private,
};
if (start >= end)
return -EINVAL;
if (!walk.mm)
return -EINVAL;
mmap_assert_locked(walk.mm);
vma = find_vma(walk.mm, start);
do {
if (!vma) { /* after the last vma */
walk.vma = NULL;
next = end;
if (ops->pte_hole)
err = ops->pte_hole(start, next, -1, &walk);
} else if (start < vma->vm_start) { /* outside vma */
walk.vma = NULL;
next = min(end, vma->vm_start);
if (ops->pte_hole)
err = ops->pte_hole(start, next, -1, &walk);
} else { /* inside vma */
walk.vma = vma;
next = min(end, vma->vm_end);
vma = vma->vm_next;
err = walk_page_test(start, next, &walk);
if (err > 0) {
/*
* positive return values are purely for
* controlling the pagewalk, so should never
* be passed to the callers.
*/
err = 0;
continue;
}
if (err < 0)
break;
err = __walk_page_range(start, next, &walk);
}
if (err)
break;
} while (start = next, start < end);
return err;
}
EXPORT_SYMBOL_GPL(walk_page_range);
/*
* Similar to walk_page_range() but can walk any page tables even if they are
* not backed by VMAs. Because 'unusual' entries may be walked this function
* will also not lock the PTEs for the pte_entry() callback. This is useful for
* walking the kernel pages tables or page tables for firmware.
*/
int walk_page_range_novma(struct mm_struct *mm, unsigned long start,
unsigned long end, const struct mm_walk_ops *ops,
pgd_t *pgd,
void *private)
{
struct mm_walk walk = {
.ops = ops,
.mm = mm,
.pgd = pgd,
.private = private,
.no_vma = true
};
if (start >= end || !walk.mm)
return -EINVAL;
mmap_assert_write_locked(walk.mm);
return walk_pgd_range(start, end, &walk);
}
int walk_page_vma(struct vm_area_struct *vma, const struct mm_walk_ops *ops,
void *private)
{
struct mm_walk walk = {
.ops = ops,
.mm = vma->vm_mm,
.vma = vma,
.private = private,
};
int err;
if (!walk.mm)
return -EINVAL;
mmap_assert_locked(walk.mm);
err = walk_page_test(vma->vm_start, vma->vm_end, &walk);
if (err > 0)
return 0;
if (err < 0)
return err;
return __walk_page_range(vma->vm_start, vma->vm_end, &walk);
}
/**
* walk_page_mapping - walk all memory areas mapped into a struct address_space.
* @mapping: Pointer to the struct address_space
* @first_index: First page offset in the address_space
* @nr: Number of incremental page offsets to cover
* @ops: operation to call during the walk
* @private: private data for callbacks' usage
*
* This function walks all memory areas mapped into a struct address_space.
* The walk is limited to only the given page-size index range, but if
* the index boundaries cross a huge page-table entry, that entry will be
* included.
*
* Also see walk_page_range() for additional information.
*
* Locking:
* This function can't require that the struct mm_struct::mmap_lock is held,
* since @mapping may be mapped by multiple processes. Instead
* @mapping->i_mmap_rwsem must be held. This might have implications in the
* callbacks, and it's up tho the caller to ensure that the
* struct mm_struct::mmap_lock is not needed.
*
* Also this means that a caller can't rely on the struct
* vm_area_struct::vm_flags to be constant across a call,
* except for immutable flags. Callers requiring this shouldn't use
* this function.
*
* Return: 0 on success, negative error code on failure, positive number on
* caller defined premature termination.
*/
int walk_page_mapping(struct address_space *mapping, pgoff_t first_index,
pgoff_t nr, const struct mm_walk_ops *ops,
void *private)
{
struct mm_walk walk = {
.ops = ops,
.private = private,
};
struct vm_area_struct *vma;
pgoff_t vba, vea, cba, cea;
unsigned long start_addr, end_addr;
int err = 0;
lockdep_assert_held(&mapping->i_mmap_rwsem);
vma_interval_tree_foreach(vma, &mapping->i_mmap, first_index,
first_index + nr - 1) {
/* Clip to the vma */
vba = vma->vm_pgoff;
vea = vba + vma_pages(vma);
cba = first_index;
cba = max(cba, vba);
cea = first_index + nr;
cea = min(cea, vea);
start_addr = ((cba - vba) << PAGE_SHIFT) + vma->vm_start;
end_addr = ((cea - vba) << PAGE_SHIFT) + vma->vm_start;
if (start_addr >= end_addr)
continue;
walk.vma = vma;
walk.mm = vma->vm_mm;
err = walk_page_test(vma->vm_start, vma->vm_end, &walk);
if (err > 0) {
err = 0;
break;
} else if (err < 0)
break;
err = __walk_page_range(start_addr, end_addr, &walk);
if (err)
break;
}
return err;
}