Greg Kroah-Hartman 2df0fb4a4b This is the 5.10.50 stable release
-----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAmDu+1UACgkQONu9yGCS
 aT7jQRAAuLDi7ejk3JUameYFMzVXGAUE6yPs392/lWJzey7IBf+2uLqz4FzqqUHp
 U1GkEKJVaCacEfi0+rpi7BxNFljUdZdg/F/P68ARtAWPvwqAeJ4QIh5u3A682UUO
 1M5h6e5/oY9F4kQIb5Kot04avqOeR6lTqrkA8jeP5h43ngyLWuS2d+5oOGmbCukS
 UgEaCC6CiKjcN51UUTj/fXMQ0X4IDHP5pD8rWwH0IvK0i7gduvk744un8LVB6aW1
 rNV88C3BEFFtkPQh2XySnXM5Ok8kYlhFoTDsqlpeAX7pA8hiUPYBoRzTg0MJtPZn
 N1L/Yqhvxmn5xs9HAw7mDOo8E8NWXzsT5FvZVaBeiCgtdKmcPszylXqmSt1oiOb0
 /EmkCWmlbG/3qWql24+LU4XP36iVPx32HQxAgg2XbnlNU5o0E1y2F98p6p/3JSWX
 NAjHtmg/MxueFQ+w8bDzhO8YzYn1dIU3V3qaXRvtpODrmaSYW+bwCyPtSjXe3/vL
 604zb3dOg9+tD/gKqfRb/UPMu24nNll8M/gnSRci05/thmIxwtYudPwoLNSejDqr
 e+a8vejISfIyp41XrpYQbUeKs1WOA+A7vgx6CZrT791afiT+6UgC/ecQfg1NFxhs
 8ayWpocaIszxyXxVGro1rfwZeQmTlbTCZ5wVdpn9sDPZfI7epts=
 =FCrA
 -----END PGP SIGNATURE-----

Merge 5.10.50 into android12-5.10-lts

Changes in 5.10.50
	Bluetooth: hci_qca: fix potential GPF
	Bluetooth: btqca: Don't modify firmware contents in-place
	Bluetooth: Remove spurious error message
	ALSA: usb-audio: fix rate on Ozone Z90 USB headset
	ALSA: usb-audio: Fix OOB access at proc output
	ALSA: firewire-motu: fix stream format for MOTU 8pre FireWire
	ALSA: usb-audio: scarlett2: Fix wrong resume call
	ALSA: intel8x0: Fix breakage at ac97 clock measurement
	ALSA: hda/realtek: fix mute/micmute LEDs for HP ProBook 450 G8
	ALSA: hda/realtek: fix mute/micmute LEDs for HP ProBook 445 G8
	ALSA: hda/realtek: fix mute/micmute LEDs for HP ProBook 630 G8
	ALSA: hda/realtek: Add another ALC236 variant support
	ALSA: hda/realtek: fix mute/micmute LEDs for HP EliteBook x360 830 G8
	ALSA: hda/realtek: Improve fixup for HP Spectre x360 15-df0xxx
	ALSA: hda/realtek: Fix bass speaker DAC mapping for Asus UM431D
	ALSA: hda/realtek: Apply LED fixup for HP Dragonfly G1, too
	ALSA: hda/realtek: fix mute/micmute LEDs for HP EliteBook 830 G8 Notebook PC
	media: dvb-usb: fix wrong definition
	Input: usbtouchscreen - fix control-request directions
	net: can: ems_usb: fix use-after-free in ems_usb_disconnect()
	usb: gadget: eem: fix echo command packet response issue
	usb: renesas-xhci: Fix handling of unknown ROM state
	USB: cdc-acm: blacklist Heimann USB Appset device
	usb: dwc3: Fix debugfs creation flow
	usb: typec: Add the missed altmode_id_remove() in typec_register_altmode()
	xhci: solve a double free problem while doing s4
	gfs2: Fix underflow in gfs2_page_mkwrite
	gfs2: Fix error handling in init_statfs
	ntfs: fix validity check for file name attribute
	selftests/lkdtm: Avoid needing explicit sub-shell
	copy_page_to_iter(): fix ITER_DISCARD case
	iov_iter_fault_in_readable() should do nothing in xarray case
	Input: joydev - prevent use of not validated data in JSIOCSBTNMAP ioctl
	crypto: nx - Fix memcpy() over-reading in nonce
	crypto: ccp - Annotate SEV Firmware file names
	arm_pmu: Fix write counter incorrect in ARMv7 big-endian mode
	ARM: dts: ux500: Fix LED probing
	ARM: dts: at91: sama5d4: fix pinctrl muxing
	btrfs: send: fix invalid path for unlink operations after parent orphanization
	btrfs: compression: don't try to compress if we don't have enough pages
	btrfs: clear defrag status of a root if starting transaction fails
	ext4: cleanup in-core orphan list if ext4_truncate() failed to get a transaction handle
	ext4: fix kernel infoleak via ext4_extent_header
	ext4: fix overflow in ext4_iomap_alloc()
	ext4: return error code when ext4_fill_flex_info() fails
	ext4: correct the cache_nr in tracepoint ext4_es_shrink_exit
	ext4: remove check for zero nr_to_scan in ext4_es_scan()
	ext4: fix avefreec in find_group_orlov
	ext4: use ext4_grp_locked_error in mb_find_extent
	can: bcm: delay release of struct bcm_op after synchronize_rcu()
	can: gw: synchronize rcu operations before removing gw job entry
	can: isotp: isotp_release(): omit unintended hrtimer restart on socket release
	can: j1939: j1939_sk_init(): set SOCK_RCU_FREE to call sk_destruct() after RCU is done
	can: peak_pciefd: pucan_handle_status(): fix a potential starvation issue in TX path
	mac80211: remove iwlwifi specific workaround that broke sta NDP tx
	SUNRPC: Fix the batch tasks count wraparound.
	SUNRPC: Should wake up the privileged task firstly.
	bus: mhi: Wait for M2 state during system resume
	mm/gup: fix try_grab_compound_head() race with split_huge_page()
	perf/smmuv3: Don't trample existing events with global filter
	KVM: nVMX: Handle split-lock #AC exceptions that happen in L2
	KVM: PPC: Book3S HV: Workaround high stack usage with clang
	KVM: x86/mmu: Treat NX as used (not reserved) for all !TDP shadow MMUs
	KVM: x86/mmu: Use MMU's role to detect CR4.SMEP value in nested NPT walk
	s390/cio: dont call css_wait_for_slow_path() inside a lock
	s390: mm: Fix secure storage access exception handling
	f2fs: Prevent swap file in LFS mode
	clk: agilex/stratix10/n5x: fix how the bypass_reg is handled
	clk: agilex/stratix10: remove noc_clk
	clk: agilex/stratix10: fix bypass representation
	rtc: stm32: Fix unbalanced clk_disable_unprepare() on probe error path
	iio: frequency: adf4350: disable reg and clk on error in adf4350_probe()
	iio: light: tcs3472: do not free unallocated IRQ
	iio: ltr501: mark register holding upper 8 bits of ALS_DATA{0,1} and PS_DATA as volatile, too
	iio: ltr501: ltr559: fix initialization of LTR501_ALS_CONTR
	iio: ltr501: ltr501_read_ps(): add missing endianness conversion
	iio: accel: bma180: Fix BMA25x bandwidth register values
	serial: mvebu-uart: fix calculation of clock divisor
	serial: sh-sci: Stop dmaengine transfer in sci_stop_tx()
	serial_cs: Add Option International GSM-Ready 56K/ISDN modem
	serial_cs: remove wrong GLOBETROTTER.cis entry
	ath9k: Fix kernel NULL pointer dereference during ath_reset_internal()
	ssb: sdio: Don't overwrite const buffer if block_write fails
	rsi: Assign beacon rate settings to the correct rate_info descriptor field
	rsi: fix AP mode with WPA failure due to encrypted EAPOL
	tracing/histograms: Fix parsing of "sym-offset" modifier
	tracepoint: Add tracepoint_probe_register_may_exist() for BPF tracing
	seq_buf: Make trace_seq_putmem_hex() support data longer than 8
	powerpc/stacktrace: Fix spurious "stale" traces in raise_backtrace_ipi()
	loop: Fix missing discard support when using LOOP_CONFIGURE
	evm: Execute evm_inode_init_security() only when an HMAC key is loaded
	evm: Refuse EVM_ALLOW_METADATA_WRITES only if an HMAC key is loaded
	fuse: Fix crash in fuse_dentry_automount() error path
	fuse: Fix crash if superblock of submount gets killed early
	fuse: Fix infinite loop in sget_fc()
	fuse: ignore PG_workingset after stealing
	fuse: check connected before queueing on fpq->io
	fuse: reject internal errno
	thermal/cpufreq_cooling: Update offline CPUs per-cpu thermal_pressure
	spi: Make of_register_spi_device also set the fwnode
	Add a reference to ucounts for each cred
	staging: media: rkvdec: fix pm_runtime_get_sync() usage count
	media: marvel-ccic: fix some issues when getting pm_runtime
	media: mdk-mdp: fix pm_runtime_get_sync() usage count
	media: s5p: fix pm_runtime_get_sync() usage count
	media: am437x: fix pm_runtime_get_sync() usage count
	media: sh_vou: fix pm_runtime_get_sync() usage count
	media: mtk-vcodec: fix PM runtime get logic
	media: s5p-jpeg: fix pm_runtime_get_sync() usage count
	media: sunxi: fix pm_runtime_get_sync() usage count
	media: sti/bdisp: fix pm_runtime_get_sync() usage count
	media: exynos4-is: fix pm_runtime_get_sync() usage count
	media: exynos-gsc: fix pm_runtime_get_sync() usage count
	spi: spi-loopback-test: Fix 'tx_buf' might be 'rx_buf'
	spi: spi-topcliff-pch: Fix potential double free in pch_spi_process_messages()
	spi: omap-100k: Fix the length judgment problem
	regulator: uniphier: Add missing MODULE_DEVICE_TABLE
	sched/core: Initialize the idle task with preemption disabled
	hwrng: exynos - Fix runtime PM imbalance on error
	crypto: nx - add missing MODULE_DEVICE_TABLE
	media: sti: fix obj-$(config) targets
	media: cpia2: fix memory leak in cpia2_usb_probe
	media: cobalt: fix race condition in setting HPD
	media: hevc: Fix dependent slice segment flags
	media: pvrusb2: fix warning in pvr2_i2c_core_done
	media: imx: imx7_mipi_csis: Fix logging of only error event counters
	crypto: qat - check return code of qat_hal_rd_rel_reg()
	crypto: qat - remove unused macro in FW loader
	crypto: qce: skcipher: Fix incorrect sg count for dma transfers
	arm64: perf: Convert snprintf to sysfs_emit
	sched/fair: Fix ascii art by relpacing tabs
	media: i2c: ov2659: Use clk_{prepare_enable,disable_unprepare}() to set xvclk on/off
	media: bt878: do not schedule tasklet when it is not setup
	media: em28xx: Fix possible memory leak of em28xx struct
	media: hantro: Fix .buf_prepare
	media: cedrus: Fix .buf_prepare
	media: v4l2-core: Avoid the dangling pointer in v4l2_fh_release
	media: bt8xx: Fix a missing check bug in bt878_probe
	media: st-hva: Fix potential NULL pointer dereferences
	crypto: hisilicon/sec - fixup 3des minimum key size declaration
	Makefile: fix GDB warning with CONFIG_RELR
	media: dvd_usb: memory leak in cinergyt2_fe_attach
	memstick: rtsx_usb_ms: fix UAF
	mmc: sdhci-sprd: use sdhci_sprd_writew
	mmc: via-sdmmc: add a check against NULL pointer dereference
	spi: meson-spicc: fix a wrong goto jump for avoiding memory leak.
	spi: meson-spicc: fix memory leak in meson_spicc_probe
	crypto: shash - avoid comparing pointers to exported functions under CFI
	media: dvb_net: avoid speculation from net slot
	media: siano: fix device register error path
	media: imx-csi: Skip first few frames from a BT.656 source
	hwmon: (max31790) Report correct current pwm duty cycles
	hwmon: (max31790) Fix pwmX_enable attributes
	drivers/perf: fix the missed ida_simple_remove() in ddr_perf_probe()
	KVM: PPC: Book3S HV: Fix TLB management on SMT8 POWER9 and POWER10 processors
	btrfs: fix error handling in __btrfs_update_delayed_inode
	btrfs: abort transaction if we fail to update the delayed inode
	btrfs: sysfs: fix format string for some discard stats
	btrfs: don't clear page extent mapped if we're not invalidating the full page
	btrfs: disable build on platforms having page size 256K
	locking/lockdep: Fix the dep path printing for backwards BFS
	lockding/lockdep: Avoid to find wrong lock dep path in check_irq_usage()
	KVM: s390: get rid of register asm usage
	regulator: mt6358: Fix vdram2 .vsel_mask
	regulator: da9052: Ensure enough delay time for .set_voltage_time_sel
	media: Fix Media Controller API config checks
	ACPI: video: use native backlight for GA401/GA502/GA503
	HID: do not use down_interruptible() when unbinding devices
	EDAC/ti: Add missing MODULE_DEVICE_TABLE
	ACPI: processor idle: Fix up C-state latency if not ordered
	hv_utils: Fix passing zero to 'PTR_ERR' warning
	lib: vsprintf: Fix handling of number field widths in vsscanf
	Input: goodix - platform/x86: touchscreen_dmi - Move upside down quirks to touchscreen_dmi.c
	platform/x86: touchscreen_dmi: Add an extra entry for the upside down Goodix touchscreen on Teclast X89 tablets
	platform/x86: touchscreen_dmi: Add info for the Goodix GT912 panel of TM800A550L tablets
	ACPI: EC: Make more Asus laptops use ECDT _GPE
	block_dump: remove block_dump feature in mark_inode_dirty()
	blk-mq: grab rq->refcount before calling ->fn in blk_mq_tagset_busy_iter
	blk-mq: clear stale request in tags->rq[] before freeing one request pool
	fs: dlm: cancel work sync othercon
	random32: Fix implicit truncation warning in prandom_seed_state()
	open: don't silently ignore unknown O-flags in openat2()
	drivers: hv: Fix missing error code in vmbus_connect()
	fs: dlm: fix memory leak when fenced
	ACPICA: Fix memory leak caused by _CID repair function
	ACPI: bus: Call kobject_put() in acpi_init() error path
	ACPI: resources: Add checks for ACPI IRQ override
	block: fix race between adding/removing rq qos and normal IO
	platform/x86: asus-nb-wmi: Revert "Drop duplicate DMI quirk structures"
	platform/x86: asus-nb-wmi: Revert "add support for ASUS ROG Zephyrus G14 and G15"
	platform/x86: toshiba_acpi: Fix missing error code in toshiba_acpi_setup_keyboard()
	nvme-pci: fix var. type for increasing cq_head
	nvmet-fc: do not check for invalid target port in nvmet_fc_handle_fcp_rqst()
	EDAC/Intel: Do not load EDAC driver when running as a guest
	PCI: hv: Add check for hyperv_initialized in init_hv_pci_drv()
	cifs: improve fallocate emulation
	ACPI: EC: trust DSDT GPE for certain HP laptop
	clocksource: Retry clock read if long delays detected
	clocksource: Check per-CPU clock synchronization when marked unstable
	tpm_tis_spi: add missing SPI device ID entries
	ACPI: tables: Add custom DSDT file as makefile prerequisite
	HID: wacom: Correct base usage for capacitive ExpressKey status bits
	cifs: fix missing spinlock around update to ses->status
	mailbox: qcom: Use PLATFORM_DEVID_AUTO to register platform device
	block: fix discard request merge
	kthread_worker: fix return value when kthread_mod_delayed_work() races with kthread_cancel_delayed_work_sync()
	ia64: mca_drv: fix incorrect array size calculation
	writeback, cgroup: increment isw_nr_in_flight before grabbing an inode
	spi: Allow to have all native CSs in use along with GPIOs
	spi: Avoid undefined behaviour when counting unused native CSs
	media: venus: Rework error fail recover logic
	media: s5p_cec: decrement usage count if disabled
	media: hantro: do a PM resume earlier
	crypto: ixp4xx - dma_unmap the correct address
	crypto: ixp4xx - update IV after requests
	crypto: ux500 - Fix error return code in hash_hw_final()
	sata_highbank: fix deferred probing
	pata_rb532_cf: fix deferred probing
	media: I2C: change 'RST' to "RSET" to fix multiple build errors
	sched/uclamp: Fix wrong implementation of cpu.uclamp.min
	sched/uclamp: Fix locking around cpu_util_update_eff()
	kbuild: Fix objtool dependency for 'OBJECT_FILES_NON_STANDARD_<obj> := n'
	pata_octeon_cf: avoid WARN_ON() in ata_host_activate()
	evm: fix writing <securityfs>/evm overflow
	x86/elf: Use _BITUL() macro in UAPI headers
	crypto: sa2ul - Fix leaks on failure paths with sa_dma_init()
	crypto: sa2ul - Fix pm_runtime enable in sa_ul_probe()
	crypto: ccp - Fix a resource leak in an error handling path
	media: rc: i2c: Fix an error message
	pata_ep93xx: fix deferred probing
	locking/lockdep: Reduce LOCKDEP dependency list
	media: rkvdec: Fix .buf_prepare
	media: exynos4-is: Fix a use after free in isp_video_release
	media: au0828: fix a NULL vs IS_ERR() check
	media: tc358743: Fix error return code in tc358743_probe_of()
	media: gspca/gl860: fix zero-length control requests
	m68k: atari: Fix ATARI_KBD_CORE kconfig unmet dependency warning
	media: siano: Fix out-of-bounds warnings in smscore_load_firmware_family2()
	regulator: fan53880: Fix vsel_mask setting for FAN53880_BUCK
	crypto: nitrox - fix unchecked variable in nitrox_register_interrupts
	crypto: omap-sham - Fix PM reference leak in omap sham ops
	crypto: x86/curve25519 - fix cpu feature checking logic in mod_exit
	crypto: sm2 - remove unnecessary reset operations
	crypto: sm2 - fix a memory leak in sm2
	mmc: usdhi6rol0: fix error return code in usdhi6_probe()
	arm64: consistently use reserved_pg_dir
	arm64/mm: Fix ttbr0 values stored in struct thread_info for software-pan
	media: subdev: remove VIDIOC_DQEVENT_TIME32 handling
	media: s5p-g2d: Fix a memory leak on ctx->fh.m2m_ctx
	hwmon: (lm70) Use device_get_match_data()
	hwmon: (lm70) Revert "hwmon: (lm70) Add support for ACPI"
	hwmon: (max31722) Remove non-standard ACPI device IDs
	hwmon: (max31790) Fix fan speed reporting for fan7..12
	KVM: nVMX: Sync all PGDs on nested transition with shadow paging
	KVM: nVMX: Ensure 64-bit shift when checking VMFUNC bitmap
	KVM: nVMX: Don't clobber nested MMU's A/D status on EPTP switch
	KVM: x86/mmu: Fix return value in tdp_mmu_map_handle_target_level()
	perf/arm-cmn: Fix invalid pointer when access dtc object sharing the same IRQ number
	KVM: arm64: Don't zero the cycle count register when PMCR_EL0.P is set
	regulator: hi655x: Fix pass wrong pointer to config.driver_data
	btrfs: clear log tree recovering status if starting transaction fails
	x86/sev: Make sure IRQs are disabled while GHCB is active
	x86/sev: Split up runtime #VC handler for correct state tracking
	sched/rt: Fix RT utilization tracking during policy change
	sched/rt: Fix Deadline utilization tracking during policy change
	sched/uclamp: Fix uclamp_tg_restrict()
	lockdep: Fix wait-type for empty stack
	lockdep/selftests: Fix selftests vs PROVE_RAW_LOCK_NESTING
	spi: spi-sun6i: Fix chipselect/clock bug
	crypto: nx - Fix RCU warning in nx842_OF_upd_status
	psi: Fix race between psi_trigger_create/destroy
	media: v4l2-async: Clean v4l2_async_notifier_add_fwnode_remote_subdev
	media: video-mux: Skip dangling endpoints
	PM / devfreq: Add missing error code in devfreq_add_device()
	ACPI: PM / fan: Put fan device IDs into separate header file
	block: avoid double io accounting for flush request
	nvme-pci: look for StorageD3Enable on companion ACPI device instead
	ACPI: sysfs: Fix a buffer overrun problem with description_show()
	mark pstore-blk as broken
	clocksource/drivers/timer-ti-dm: Save and restore timer TIOCP_CFG
	extcon: extcon-max8997: Fix IRQ freeing at error path
	ACPI: APEI: fix synchronous external aborts in user-mode
	blk-wbt: introduce a new disable state to prevent false positive by rwb_enabled()
	blk-wbt: make sure throttle is enabled properly
	ACPI: Use DEVICE_ATTR_<RW|RO|WO> macros
	ACPI: bgrt: Fix CFI violation
	cpufreq: Make cpufreq_online() call driver->offline() on errors
	blk-mq: update hctx->dispatch_busy in case of real scheduler
	ocfs2: fix snprintf() checking
	dax: fix ENOMEM handling in grab_mapping_entry()
	mm/debug_vm_pgtable/basic: add validation for dirtiness after write protect
	mm/debug_vm_pgtable/basic: iterate over entire protection_map[]
	mm/debug_vm_pgtable: ensure THP availability via has_transparent_hugepage()
	swap: fix do_swap_page() race with swapoff
	mm/shmem: fix shmem_swapin() race with swapoff
	mm: memcg/slab: properly set up gfp flags for objcg pointer array
	mm: page_alloc: refactor setup_per_zone_lowmem_reserve()
	mm/page_alloc: fix counting of managed_pages
	xfrm: xfrm_state_mtu should return at least 1280 for ipv6
	drm/bridge/sii8620: fix dependency on extcon
	drm/bridge: Fix the stop condition of drm_bridge_chain_pre_enable()
	drm/amd/dc: Fix a missing check bug in dm_dp_mst_detect()
	drm/ast: Fix missing conversions to managed API
	video: fbdev: imxfb: Fix an error message
	net: mvpp2: Put fwnode in error case during ->probe()
	net: pch_gbe: Propagate error from devm_gpio_request_one()
	pinctrl: renesas: r8a7796: Add missing bias for PRESET# pin
	pinctrl: renesas: r8a77990: JTAG pins do not have pull-down capabilities
	drm/vmwgfx: Mark a surface gpu-dirty after the SVGA3dCmdDXGenMips command
	drm/vmwgfx: Fix cpu updates of coherent multisample surfaces
	net: qrtr: ns: Fix error return code in qrtr_ns_init()
	clk: meson: g12a: fix gp0 and hifi ranges
	net: ftgmac100: add missing error return code in ftgmac100_probe()
	drm: rockchip: set alpha_en to 0 if it is not used
	drm/rockchip: cdn-dp-core: add missing clk_disable_unprepare() on error in cdn_dp_grf_write()
	drm/rockchip: dsi: move all lane config except LCDC mux to bind()
	drm/rockchip: lvds: Fix an error handling path
	drm/rockchip: cdn-dp: fix sign extension on an int multiply for a u64 result
	mptcp: fix pr_debug in mptcp_token_new_connect
	mptcp: generate subflow hmac after mptcp_finish_join()
	RDMA/srp: Fix a recently introduced memory leak
	RDMA/rtrs-clt: Check state of the rtrs_clt_sess before reading its stats
	RDMA/rtrs: Do not reset hb_missed_max after re-connection
	RDMA/rtrs-srv: Fix memory leak of unfreed rtrs_srv_stats object
	RDMA/rtrs-srv: Fix memory leak when having multiple sessions
	RDMA/rtrs-clt: Check if the queue_depth has changed during a reconnection
	RDMA/rtrs-clt: Fix memory leak of not-freed sess->stats and stats->pcpu_stats
	ehea: fix error return code in ehea_restart_qps()
	clk: tegra30: Use 300MHz for video decoder by default
	xfrm: remove the fragment check for ipv6 beet mode
	net/sched: act_vlan: Fix modify to allow 0
	RDMA/core: Sanitize WQ state received from the userspace
	drm/pl111: depend on CONFIG_VEXPRESS_CONFIG
	RDMA/rxe: Fix failure during driver load
	drm/pl111: Actually fix CONFIG_VEXPRESS_CONFIG depends
	drm/vc4: hdmi: Fix error path of hpd-gpios
	clk: vc5: fix output disabling when enabling a FOD
	drm: qxl: ensure surf.data is ininitialized
	tools/bpftool: Fix error return code in do_batch()
	ath10k: go to path err_unsupported when chip id is not supported
	ath10k: add missing error return code in ath10k_pci_probe()
	wireless: carl9170: fix LEDS build errors & warnings
	ieee802154: hwsim: Fix possible memory leak in hwsim_subscribe_all_others
	clk: imx8mq: remove SYS PLL 1/2 clock gates
	wcn36xx: Move hal_buf allocation to devm_kmalloc in probe
	ssb: Fix error return code in ssb_bus_scan()
	brcmfmac: fix setting of station info chains bitmask
	brcmfmac: correctly report average RSSI in station info
	brcmfmac: Fix a double-free in brcmf_sdio_bus_reset
	brcmsmac: mac80211_if: Fix a resource leak in an error handling path
	cw1200: Revert unnecessary patches that fix unreal use-after-free bugs
	ath11k: Fix an error handling path in ath11k_core_fetch_board_data_api_n()
	ath10k: Fix an error code in ath10k_add_interface()
	ath11k: send beacon template after vdev_start/restart during csa
	netlabel: Fix memory leak in netlbl_mgmt_add_common
	RDMA/mlx5: Don't add slave port to unaffiliated list
	netfilter: nft_exthdr: check for IPv6 packet before further processing
	netfilter: nft_osf: check for TCP packet before further processing
	netfilter: nft_tproxy: restrict support to TCP and UDP transport protocols
	RDMA/rxe: Fix qp reference counting for atomic ops
	selftests/bpf: Whitelist test_progs.h from .gitignore
	xsk: Fix missing validation for skb and unaligned mode
	xsk: Fix broken Tx ring validation
	bpf: Fix libelf endian handling in resolv_btfids
	RDMA/rtrs-srv: Set minimal max_send_wr and max_recv_wr
	samples/bpf: Fix Segmentation fault for xdp_redirect command
	samples/bpf: Fix the error return code of xdp_redirect's main()
	mt76: fix possible NULL pointer dereference in mt76_tx
	mt76: mt7615: fix NULL pointer dereference in tx_prepare_skb()
	net: ethernet: aeroflex: fix UAF in greth_of_remove
	net: ethernet: ezchip: fix UAF in nps_enet_remove
	net: ethernet: ezchip: fix error handling
	vrf: do not push non-ND strict packets with a source LLA through packet taps again
	net: sched: add barrier to ensure correct ordering for lockless qdisc
	tls: prevent oversized sendfile() hangs by ignoring MSG_MORE
	netfilter: nf_tables_offload: check FLOW_DISSECTOR_KEY_BASIC in VLAN transfer logic
	pkt_sched: sch_qfq: fix qfq_change_class() error path
	xfrm: Fix xfrm offload fallback fail case
	iwlwifi: increase PNVM load timeout
	rtw88: 8822c: fix lc calibration timing
	vxlan: add missing rcu_read_lock() in neigh_reduce()
	ip6_tunnel: fix GRE6 segmentation
	net/ipv4: swap flow ports when validating source
	net: ti: am65-cpsw-nuss: Fix crash when changing number of TX queues
	tc-testing: fix list handling
	ieee802154: hwsim: Fix memory leak in hwsim_add_one
	ieee802154: hwsim: avoid possible crash in hwsim_del_edge_nl()
	bpf: Fix null ptr deref with mixed tail calls and subprogs
	drm/msm: Fix error return code in msm_drm_init()
	drm/msm/dpu: Fix error return code in dpu_mdss_init()
	mac80211: remove iwlwifi specific workaround NDPs of null_response
	net: bcmgenet: Fix attaching to PYH failed on RPi 4B
	ipv6: exthdrs: do not blindly use init_net
	can: j1939: j1939_sk_setsockopt(): prevent allocation of j1939 filter for optlen == 0
	bpf: Do not change gso_size during bpf_skb_change_proto()
	i40e: Fix error handling in i40e_vsi_open
	i40e: Fix autoneg disabling for non-10GBaseT links
	i40e: Fix missing rtnl locking when setting up pf switch
	Revert "ibmvnic: remove duplicate napi_schedule call in open function"
	ibmvnic: set ltb->buff to NULL after freeing
	ibmvnic: free tx_pool if tso_pool alloc fails
	RDMA/cma: Protect RMW with qp_mutex
	net: macsec: fix the length used to copy the key for offloading
	net: phy: mscc: fix macsec key length
	net: atlantic: fix the macsec key length
	ipv6: fix out-of-bound access in ip6_parse_tlv()
	e1000e: Check the PCIm state
	net: dsa: sja1105: fix NULL pointer dereference in sja1105_reload_cbs()
	bpfilter: Specify the log level for the kmsg message
	RDMA/cma: Fix incorrect Packet Lifetime calculation
	gve: Fix swapped vars when fetching max queues
	Revert "be2net: disable bh with spin_lock in be_process_mcc"
	Bluetooth: mgmt: Fix slab-out-of-bounds in tlv_data_is_valid
	Bluetooth: Fix not sending Set Extended Scan Response
	Bluetooth: Fix Set Extended (Scan Response) Data
	Bluetooth: Fix handling of HCI_LE_Advertising_Set_Terminated event
	clk: actions: Fix UART clock dividers on Owl S500 SoC
	clk: actions: Fix SD clocks factor table on Owl S500 SoC
	clk: actions: Fix bisp_factor_table based clocks on Owl S500 SoC
	clk: actions: Fix AHPPREDIV-H-AHB clock chain on Owl S500 SoC
	clk: qcom: clk-alpha-pll: fix CAL_L write in alpha_pll_fabia_prepare
	clk: si5341: Wait for DEVICE_READY on startup
	clk: si5341: Avoid divide errors due to bogus register contents
	clk: si5341: Check for input clock presence and PLL lock on startup
	clk: si5341: Update initialization magic
	writeback: fix obtain a reference to a freeing memcg css
	net: lwtunnel: handle MTU calculation in forwading
	net: sched: fix warning in tcindex_alloc_perfect_hash
	net: tipc: fix FB_MTU eat two pages
	RDMA/mlx5: Don't access NULL-cleared mpi pointer
	RDMA/core: Always release restrack object
	MIPS: Fix PKMAP with 32-bit MIPS huge page support
	staging: fbtft: Rectify GPIO handling
	staging: fbtft: Don't spam logs when probe is deferred
	ASoC: rt5682: Disable irq on shutdown
	rcu: Invoke rcu_spawn_core_kthreads() from rcu_spawn_gp_kthread()
	serial: fsl_lpuart: don't modify arbitrary data on lpuart32
	serial: fsl_lpuart: remove RTSCTS handling from get_mctrl()
	serial: 8250_omap: fix a timeout loop condition
	tty: nozomi: Fix a resource leak in an error handling function
	mwifiex: re-fix for unaligned accesses
	iio: adis_buffer: do not return ints in irq handlers
	iio: adis16400: do not return ints in irq handlers
	iio: adis16475: do not return ints in irq handlers
	iio: accel: bma180: Fix buffer alignment in iio_push_to_buffers_with_timestamp()
	iio: accel: bma220: Fix buffer alignment in iio_push_to_buffers_with_timestamp()
	iio: accel: hid: Fix buffer alignment in iio_push_to_buffers_with_timestamp()
	iio: accel: kxcjk-1013: Fix buffer alignment in iio_push_to_buffers_with_timestamp()
	iio: accel: mxc4005: Fix overread of data and alignment issue.
	iio: accel: stk8312: Fix buffer alignment in iio_push_to_buffers_with_timestamp()
	iio: accel: stk8ba50: Fix buffer alignment in iio_push_to_buffers_with_timestamp()
	iio: adc: ti-ads1015: Fix buffer alignment in iio_push_to_buffers_with_timestamp()
	iio: adc: vf610: Fix buffer alignment in iio_push_to_buffers_with_timestamp()
	iio: gyro: bmg160: Fix buffer alignment in iio_push_to_buffers_with_timestamp()
	iio: humidity: am2315: Fix buffer alignment in iio_push_to_buffers_with_timestamp()
	iio: prox: srf08: Fix buffer alignment in iio_push_to_buffers_with_timestamp()
	iio: prox: pulsed-light: Fix buffer alignment in iio_push_to_buffers_with_timestamp()
	iio: prox: as3935: Fix buffer alignment in iio_push_to_buffers_with_timestamp()
	iio: magn: hmc5843: Fix buffer alignment in iio_push_to_buffers_with_timestamp()
	iio: magn: bmc150: Fix buffer alignment in iio_push_to_buffers_with_timestamp()
	iio: light: isl29125: Fix buffer alignment in iio_push_to_buffers_with_timestamp()
	iio: light: tcs3414: Fix buffer alignment in iio_push_to_buffers_with_timestamp()
	iio: light: tcs3472: Fix buffer alignment in iio_push_to_buffers_with_timestamp()
	iio: chemical: atlas: Fix buffer alignment in iio_push_to_buffers_with_timestamp()
	iio: cros_ec_sensors: Fix alignment of buffer in iio_push_to_buffers_with_timestamp()
	iio: potentiostat: lmp91000: Fix alignment of buffer in iio_push_to_buffers_with_timestamp()
	ASoC: rk3328: fix missing clk_disable_unprepare() on error in rk3328_platform_probe()
	ASoC: hisilicon: fix missing clk_disable_unprepare() on error in hi6210_i2s_startup()
	backlight: lm3630a_bl: Put fwnode in error case during ->probe()
	ASoC: rsnd: tidyup loop on rsnd_adg_clk_query()
	Input: hil_kbd - fix error return code in hil_dev_connect()
	perf scripting python: Fix tuple_set_u64()
	mtd: partitions: redboot: seek fis-index-block in the right node
	mtd: rawnand: arasan: Ensure proper configuration for the asserted target
	staging: mmal-vchiq: Fix incorrect static vchiq_instance.
	char: pcmcia: error out if 'num_bytes_read' is greater than 4 in set_protocol()
	firmware: stratix10-svc: Fix a resource leak in an error handling path
	tty: nozomi: Fix the error handling path of 'nozomi_card_init()'
	leds: class: The -ENOTSUPP should never be seen by user space
	leds: lm3532: select regmap I2C API
	leds: lm36274: Put fwnode in error case during ->probe()
	leds: lm3692x: Put fwnode in any case during ->probe()
	leds: lm3697: Don't spam logs when probe is deferred
	leds: lp50xx: Put fwnode in error case during ->probe()
	scsi: FlashPoint: Rename si_flags field
	scsi: iscsi: Flush block work before unblock
	mfd: mp2629: Select MFD_CORE to fix build error
	mfd: rn5t618: Fix IRQ trigger by changing it to level mode
	fsi: core: Fix return of error values on failures
	fsi: scom: Reset the FSI2PIB engine for any error
	fsi: occ: Don't accept response from un-initialized OCC
	fsi/sbefifo: Clean up correct FIFO when receiving reset request from SBE
	fsi/sbefifo: Fix reset timeout
	visorbus: fix error return code in visorchipset_init()
	iommu/amd: Fix extended features logging
	s390/irq: select HAVE_IRQ_EXIT_ON_IRQ_STACK
	s390: enable HAVE_IOREMAP_PROT
	s390: appldata depends on PROC_SYSCTL
	selftests: splice: Adjust for handler fallback removal
	iommu/dma: Fix IOVA reserve dma ranges
	ASoC: max98373-sdw: use first_hw_init flag on resume
	ASoC: rt1308-sdw: use first_hw_init flag on resume
	ASoC: rt5682-sdw: use first_hw_init flag on resume
	ASoC: rt700-sdw: use first_hw_init flag on resume
	ASoC: rt711-sdw: use first_hw_init flag on resume
	ASoC: rt715-sdw: use first_hw_init flag on resume
	ASoC: rt5682: fix getting the wrong device id when the suspend_stress_test
	ASoC: rt5682-sdw: set regcache_cache_only false before reading RT5682_DEVICE_ID
	ASoC: mediatek: mtk-btcvsd: Fix an error handling path in 'mtk_btcvsd_snd_probe()'
	usb: gadget: f_fs: Fix setting of device and driver data cross-references
	usb: dwc2: Don't reset the core after setting turnaround time
	eeprom: idt_89hpesx: Put fwnode in matching case during ->probe()
	eeprom: idt_89hpesx: Restore printing the unsupported fwnode name
	thunderbolt: Bond lanes only when dual_link_port != NULL in alloc_dev_default()
	iio: adc: at91-sama5d2: Fix buffer alignment in iio_push_to_buffers_with_timestamp()
	iio: adc: hx711: Fix buffer alignment in iio_push_to_buffers_with_timestamp()
	iio: adc: mxs-lradc: Fix buffer alignment in iio_push_to_buffers_with_timestamp()
	iio: adc: ti-ads8688: Fix alignment of buffer in iio_push_to_buffers_with_timestamp()
	iio: magn: rm3100: Fix alignment of buffer in iio_push_to_buffers_with_timestamp()
	iio: light: vcnl4000: Fix buffer alignment in iio_push_to_buffers_with_timestamp()
	ASoC: fsl_spdif: Fix error handler with pm_runtime_enable
	staging: gdm724x: check for buffer overflow in gdm_lte_multi_sdu_pkt()
	staging: gdm724x: check for overflow in gdm_lte_netif_rx()
	staging: rtl8712: fix error handling in r871xu_drv_init
	staging: rtl8712: fix memory leak in rtl871x_load_fw_cb
	coresight: core: Fix use of uninitialized pointer
	staging: mt7621-dts: fix pci address for PCI memory range
	serial: 8250: Actually allow UPF_MAGIC_MULTIPLIER baud rates
	iio: light: vcnl4035: Fix buffer alignment in iio_push_to_buffers_with_timestamp()
	iio: prox: isl29501: Fix buffer alignment in iio_push_to_buffers_with_timestamp()
	ASoC: cs42l42: Correct definition of CS42L42_ADC_PDN_MASK
	of: Fix truncation of memory sizes on 32-bit platforms
	mtd: rawnand: marvell: add missing clk_disable_unprepare() on error in marvell_nfc_resume()
	habanalabs: Fix an error handling path in 'hl_pci_probe()'
	scsi: mpt3sas: Fix error return value in _scsih_expander_add()
	soundwire: stream: Fix test for DP prepare complete
	phy: uniphier-pcie: Fix updating phy parameters
	phy: ti: dm816x: Fix the error handling path in 'dm816x_usb_phy_probe()
	extcon: sm5502: Drop invalid register write in sm5502_reg_data
	extcon: max8997: Add missing modalias string
	powerpc/powernv: Fix machine check reporting of async store errors
	ASoC: atmel-i2s: Fix usage of capture and playback at the same time
	configfs: fix memleak in configfs_release_bin_file
	ASoC: Intel: sof_sdw: add SOF_RT715_DAI_ID_FIX for AlderLake
	ASoC: fsl_spdif: Fix unexpected interrupt after suspend
	leds: as3645a: Fix error return code in as3645a_parse_node()
	leds: ktd2692: Fix an error handling path
	selftests/ftrace: fix event-no-pid on 1-core machine
	serial: 8250: 8250_omap: Disable RX interrupt after DMA enable
	serial: 8250: 8250_omap: Fix possible interrupt storm on K3 SoCs
	powerpc: Offline CPU in stop_this_cpu()
	powerpc/papr_scm: Properly handle UUID types and API
	powerpc/64s: Fix copy-paste data exposure into newly created tasks
	powerpc/papr_scm: Make 'perf_stats' invisible if perf-stats unavailable
	ALSA: firewire-lib: Fix 'amdtp_domain_start()' when no AMDTP_OUT_STREAM stream is found
	serial: mvebu-uart: do not allow changing baudrate when uartclk is not available
	serial: mvebu-uart: correctly calculate minimal possible baudrate
	arm64: dts: marvell: armada-37xx: Fix reg for standard variant of UART
	vfio/pci: Handle concurrent vma faults
	mm/pmem: avoid inserting hugepage PTE entry with fsdax if hugepage support is disabled
	mm/huge_memory.c: remove dedicated macro HPAGE_CACHE_INDEX_MASK
	mm/huge_memory.c: add missing read-only THP checking in transparent_hugepage_enabled()
	mm/huge_memory.c: don't discard hugepage if other processes are mapping it
	mm/hugetlb: use helper huge_page_order and pages_per_huge_page
	mm/hugetlb: remove redundant check in preparing and destroying gigantic page
	hugetlb: remove prep_compound_huge_page cleanup
	include/linux/huge_mm.h: remove extern keyword
	mm/z3fold: fix potential memory leak in z3fold_destroy_pool()
	mm/z3fold: use release_z3fold_page_locked() to release locked z3fold page
	lib/math/rational.c: fix divide by zero
	selftests/vm/pkeys: fix alloc_random_pkey() to make it really, really random
	selftests/vm/pkeys: handle negative sys_pkey_alloc() return code
	selftests/vm/pkeys: refill shadow register after implicit kernel write
	perf llvm: Return -ENOMEM when asprintf() fails
	csky: fix syscache.c fallthrough warning
	csky: syscache: Fixup duplicate cache flush
	exfat: handle wrong stream entry size in exfat_readdir()
	scsi: fc: Correct RHBA attributes length
	scsi: target: cxgbit: Unmap DMA buffer before calling target_execute_cmd()
	mailbox: qcom-ipcc: Fix IPCC mbox channel exhaustion
	fscrypt: don't ignore minor_hash when hash is 0
	fscrypt: fix derivation of SipHash keys on big endian CPUs
	tpm: Replace WARN_ONCE() with dev_err_once() in tpm_tis_status()
	erofs: fix error return code in erofs_read_superblock()
	block: return the correct bvec when checking for gaps
	io_uring: fix blocking inline submission
	mmc: block: Disable CMDQ on the ioctl path
	mmc: vub3000: fix control-request direction
	media: exynos4-is: remove a now unused integer
	scsi: core: Retry I/O for Notify (Enable Spinup) Required error
	crypto: qce - fix error return code in qce_skcipher_async_req_handle()
	s390: preempt: Fix preempt_count initialization
	cred: add missing return error code when set_cred_ucounts() failed
	iommu/dma: Fix compile warning in 32-bit builds
	powerpc/preempt: Don't touch the idle task's preempt_count during hotplug
	Linux 5.10.50

Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
Change-Id: Iec4eab24ea8eb5a6d79739a1aec8432d93a8f82c
2021-07-14 17:35:23 +02:00

3038 lines
86 KiB
C

// SPDX-License-Identifier: GPL-2.0-only
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/spinlock.h>
#include <linux/mm.h>
#include <linux/memremap.h>
#include <linux/pagemap.h>
#include <linux/rmap.h>
#include <linux/swap.h>
#include <linux/swapops.h>
#include <linux/sched/signal.h>
#include <linux/rwsem.h>
#include <linux/hugetlb.h>
#include <linux/migrate.h>
#include <linux/mm_inline.h>
#include <linux/sched/mm.h>
#include <linux/page_pinner.h>
#include <asm/mmu_context.h>
#include <asm/tlbflush.h>
#include "internal.h"
struct follow_page_context {
struct dev_pagemap *pgmap;
unsigned int page_mask;
};
static void hpage_pincount_add(struct page *page, int refs)
{
VM_BUG_ON_PAGE(!hpage_pincount_available(page), page);
VM_BUG_ON_PAGE(page != compound_head(page), page);
atomic_add(refs, compound_pincount_ptr(page));
}
static void hpage_pincount_sub(struct page *page, int refs)
{
VM_BUG_ON_PAGE(!hpage_pincount_available(page), page);
VM_BUG_ON_PAGE(page != compound_head(page), page);
atomic_sub(refs, compound_pincount_ptr(page));
}
/* Equivalent to calling put_page() @refs times. */
static void put_page_refs(struct page *page, int refs)
{
#ifdef CONFIG_DEBUG_VM
if (VM_WARN_ON_ONCE_PAGE(page_ref_count(page) < refs, page))
return;
#endif
/*
* Calling put_page() for each ref is unnecessarily slow. Only the last
* ref needs a put_page().
*/
if (refs > 1)
page_ref_sub(page, refs - 1);
put_page(page);
}
/*
* Return the compound head page with ref appropriately incremented,
* or NULL if that failed.
*/
static inline struct page *try_get_compound_head(struct page *page, int refs)
{
struct page *head = compound_head(page);
if (WARN_ON_ONCE(page_ref_count(head) < 0))
return NULL;
if (unlikely(!page_cache_add_speculative(head, refs)))
return NULL;
/*
* At this point we have a stable reference to the head page; but it
* could be that between the compound_head() lookup and the refcount
* increment, the compound page was split, in which case we'd end up
* holding a reference on a page that has nothing to do with the page
* we were given anymore.
* So now that the head page is stable, recheck that the pages still
* belong together.
*/
if (unlikely(compound_head(page) != head)) {
put_page_refs(head, refs);
return NULL;
}
return head;
}
/*
* try_grab_compound_head() - attempt to elevate a page's refcount, by a
* flags-dependent amount.
*
* "grab" names in this file mean, "look at flags to decide whether to use
* FOLL_PIN or FOLL_GET behavior, when incrementing the page's refcount.
*
* Either FOLL_PIN or FOLL_GET (or neither) must be set, but not both at the
* same time. (That's true throughout the get_user_pages*() and
* pin_user_pages*() APIs.) Cases:
*
* FOLL_GET: page's refcount will be incremented by 1.
* FOLL_PIN: page's refcount will be incremented by GUP_PIN_COUNTING_BIAS.
*
* Return: head page (with refcount appropriately incremented) for success, or
* NULL upon failure. If neither FOLL_GET nor FOLL_PIN was set, that's
* considered failure, and furthermore, a likely bug in the caller, so a warning
* is also emitted.
*/
static __maybe_unused struct page *try_grab_compound_head(struct page *page,
int refs,
unsigned int flags)
{
if (flags & FOLL_GET) {
struct page *head = try_get_compound_head(page, refs);
if (head)
set_page_pinner(head, compound_order(head));
return head;
} else if (flags & FOLL_PIN) {
int orig_refs = refs;
/*
* Can't do FOLL_LONGTERM + FOLL_PIN with CMA in the gup fast
* path, so fail and let the caller fall back to the slow path.
*/
if (unlikely(flags & FOLL_LONGTERM) &&
is_migrate_cma_page(page))
return NULL;
/*
* CAUTION: Don't use compound_head() on the page before this
* point, the result won't be stable.
*/
page = try_get_compound_head(page, refs);
if (!page)
return NULL;
/*
* When pinning a compound page of order > 1 (which is what
* hpage_pincount_available() checks for), use an exact count to
* track it, via hpage_pincount_add/_sub().
*
* However, be sure to *also* increment the normal page refcount
* field at least once, so that the page really is pinned.
*/
if (hpage_pincount_available(page))
hpage_pincount_add(page, refs);
else
page_ref_add(page, refs * (GUP_PIN_COUNTING_BIAS - 1));
mod_node_page_state(page_pgdat(page), NR_FOLL_PIN_ACQUIRED,
orig_refs);
return page;
}
WARN_ON_ONCE(1);
return NULL;
}
static void put_compound_head(struct page *page, int refs, unsigned int flags)
{
if (flags & FOLL_PIN) {
mod_node_page_state(page_pgdat(page), NR_FOLL_PIN_RELEASED,
refs);
if (hpage_pincount_available(page))
hpage_pincount_sub(page, refs);
else
refs *= GUP_PIN_COUNTING_BIAS;
}
if (flags & FOLL_GET)
reset_page_pinner(page, compound_order(page));
put_page_refs(page, refs);
}
/**
* try_grab_page() - elevate a page's refcount by a flag-dependent amount
*
* This might not do anything at all, depending on the flags argument.
*
* "grab" names in this file mean, "look at flags to decide whether to use
* FOLL_PIN or FOLL_GET behavior, when incrementing the page's refcount.
*
* @page: pointer to page to be grabbed
* @flags: gup flags: these are the FOLL_* flag values.
*
* Either FOLL_PIN or FOLL_GET (or neither) may be set, but not both at the same
* time. Cases:
*
* FOLL_GET: page's refcount will be incremented by 1.
* FOLL_PIN: page's refcount will be incremented by GUP_PIN_COUNTING_BIAS.
*
* Return: true for success, or if no action was required (if neither FOLL_PIN
* nor FOLL_GET was set, nothing is done). False for failure: FOLL_GET or
* FOLL_PIN was set, but the page could not be grabbed.
*/
bool __must_check try_grab_page(struct page *page, unsigned int flags)
{
WARN_ON_ONCE((flags & (FOLL_GET | FOLL_PIN)) == (FOLL_GET | FOLL_PIN));
if (flags & FOLL_GET) {
bool ret = try_get_page(page);
if (ret) {
page = compound_head(page);
set_page_pinner(page, compound_order(page));
}
return ret;
} else if (flags & FOLL_PIN) {
int refs = 1;
page = compound_head(page);
if (WARN_ON_ONCE(page_ref_count(page) <= 0))
return false;
if (hpage_pincount_available(page))
hpage_pincount_add(page, 1);
else
refs = GUP_PIN_COUNTING_BIAS;
/*
* Similar to try_grab_compound_head(): even if using the
* hpage_pincount_add/_sub() routines, be sure to
* *also* increment the normal page refcount field at least
* once, so that the page really is pinned.
*/
page_ref_add(page, refs);
mod_node_page_state(page_pgdat(page), NR_FOLL_PIN_ACQUIRED, 1);
}
return true;
}
/**
* unpin_user_page() - release a dma-pinned page
* @page: pointer to page to be released
*
* Pages that were pinned via pin_user_pages*() must be released via either
* unpin_user_page(), or one of the unpin_user_pages*() routines. This is so
* that such pages can be separately tracked and uniquely handled. In
* particular, interactions with RDMA and filesystems need special handling.
*/
void unpin_user_page(struct page *page)
{
put_compound_head(compound_head(page), 1, FOLL_PIN);
}
EXPORT_SYMBOL(unpin_user_page);
/*
* put_user_page() - release a page obtained using get_user_pages() or
* follow_page(FOLL_GET)
* @page: pointer to page to be released
*
* Pages that were obtained via get_user_pages()/follow_page(FOLL_GET) must be
* released via put_user_page.
* note: If it's not a page from GUP or follow_page(FOLL_GET), it's harmless.
*/
void put_user_page(struct page *page)
{
struct page *head = compound_head(page);
reset_page_pinner(head, compound_order(head));
put_page(page);
}
EXPORT_SYMBOL(put_user_page);
/**
* unpin_user_pages_dirty_lock() - release and optionally dirty gup-pinned pages
* @pages: array of pages to be maybe marked dirty, and definitely released.
* @npages: number of pages in the @pages array.
* @make_dirty: whether to mark the pages dirty
*
* "gup-pinned page" refers to a page that has had one of the get_user_pages()
* variants called on that page.
*
* For each page in the @pages array, make that page (or its head page, if a
* compound page) dirty, if @make_dirty is true, and if the page was previously
* listed as clean. In any case, releases all pages using unpin_user_page(),
* possibly via unpin_user_pages(), for the non-dirty case.
*
* Please see the unpin_user_page() documentation for details.
*
* set_page_dirty_lock() is used internally. If instead, set_page_dirty() is
* required, then the caller should a) verify that this is really correct,
* because _lock() is usually required, and b) hand code it:
* set_page_dirty_lock(), unpin_user_page().
*
*/
void unpin_user_pages_dirty_lock(struct page **pages, unsigned long npages,
bool make_dirty)
{
unsigned long index;
/*
* TODO: this can be optimized for huge pages: if a series of pages is
* physically contiguous and part of the same compound page, then a
* single operation to the head page should suffice.
*/
if (!make_dirty) {
unpin_user_pages(pages, npages);
return;
}
for (index = 0; index < npages; index++) {
struct page *page = compound_head(pages[index]);
/*
* Checking PageDirty at this point may race with
* clear_page_dirty_for_io(), but that's OK. Two key
* cases:
*
* 1) This code sees the page as already dirty, so it
* skips the call to set_page_dirty(). That could happen
* because clear_page_dirty_for_io() called
* page_mkclean(), followed by set_page_dirty().
* However, now the page is going to get written back,
* which meets the original intention of setting it
* dirty, so all is well: clear_page_dirty_for_io() goes
* on to call TestClearPageDirty(), and write the page
* back.
*
* 2) This code sees the page as clean, so it calls
* set_page_dirty(). The page stays dirty, despite being
* written back, so it gets written back again in the
* next writeback cycle. This is harmless.
*/
if (!PageDirty(page))
set_page_dirty_lock(page);
unpin_user_page(page);
}
}
EXPORT_SYMBOL(unpin_user_pages_dirty_lock);
/**
* unpin_user_pages() - release an array of gup-pinned pages.
* @pages: array of pages to be marked dirty and released.
* @npages: number of pages in the @pages array.
*
* For each page in the @pages array, release the page using unpin_user_page().
*
* Please see the unpin_user_page() documentation for details.
*/
void unpin_user_pages(struct page **pages, unsigned long npages)
{
unsigned long index;
/*
* If this WARN_ON() fires, then the system *might* be leaking pages (by
* leaving them pinned), but probably not. More likely, gup/pup returned
* a hard -ERRNO error to the caller, who erroneously passed it here.
*/
if (WARN_ON(IS_ERR_VALUE(npages)))
return;
/*
* TODO: this can be optimized for huge pages: if a series of pages is
* physically contiguous and part of the same compound page, then a
* single operation to the head page should suffice.
*/
for (index = 0; index < npages; index++)
unpin_user_page(pages[index]);
}
EXPORT_SYMBOL(unpin_user_pages);
#ifdef CONFIG_MMU
static struct page *no_page_table(struct vm_area_struct *vma,
unsigned int flags)
{
/*
* When core dumping an enormous anonymous area that nobody
* has touched so far, we don't want to allocate unnecessary pages or
* page tables. Return error instead of NULL to skip handle_mm_fault,
* then get_dump_page() will return NULL to leave a hole in the dump.
* But we can only make this optimization where a hole would surely
* be zero-filled if handle_mm_fault() actually did handle it.
*/
if ((flags & FOLL_DUMP) &&
(vma_is_anonymous(vma) || !vma->vm_ops->fault))
return ERR_PTR(-EFAULT);
return NULL;
}
static int follow_pfn_pte(struct vm_area_struct *vma, unsigned long address,
pte_t *pte, unsigned int flags)
{
/* No page to get reference */
if (flags & FOLL_GET)
return -EFAULT;
if (flags & FOLL_TOUCH) {
pte_t entry = *pte;
if (flags & FOLL_WRITE)
entry = pte_mkdirty(entry);
entry = pte_mkyoung(entry);
if (!pte_same(*pte, entry)) {
set_pte_at(vma->vm_mm, address, pte, entry);
update_mmu_cache(vma, address, pte);
}
}
/* Proper page table entry exists, but no corresponding struct page */
return -EEXIST;
}
/*
* FOLL_FORCE can write to even unwritable pte's, but only
* after we've gone through a COW cycle and they are dirty.
*/
static inline bool can_follow_write_pte(pte_t pte, unsigned int flags)
{
return pte_write(pte) ||
((flags & FOLL_FORCE) && (flags & FOLL_COW) && pte_dirty(pte));
}
static struct page *follow_page_pte(struct vm_area_struct *vma,
unsigned long address, pmd_t *pmd, unsigned int flags,
struct dev_pagemap **pgmap)
{
struct mm_struct *mm = vma->vm_mm;
struct page *page;
spinlock_t *ptl;
pte_t *ptep, pte;
int ret;
/* FOLL_GET and FOLL_PIN are mutually exclusive. */
if (WARN_ON_ONCE((flags & (FOLL_PIN | FOLL_GET)) ==
(FOLL_PIN | FOLL_GET)))
return ERR_PTR(-EINVAL);
retry:
if (unlikely(pmd_bad(*pmd)))
return no_page_table(vma, flags);
ptep = pte_offset_map_lock(mm, pmd, address, &ptl);
pte = *ptep;
if (!pte_present(pte)) {
swp_entry_t entry;
/*
* KSM's break_ksm() relies upon recognizing a ksm page
* even while it is being migrated, so for that case we
* need migration_entry_wait().
*/
if (likely(!(flags & FOLL_MIGRATION)))
goto no_page;
if (pte_none(pte))
goto no_page;
entry = pte_to_swp_entry(pte);
if (!is_migration_entry(entry))
goto no_page;
pte_unmap_unlock(ptep, ptl);
migration_entry_wait(mm, pmd, address);
goto retry;
}
if ((flags & FOLL_NUMA) && pte_protnone(pte))
goto no_page;
if ((flags & FOLL_WRITE) && !can_follow_write_pte(pte, flags)) {
pte_unmap_unlock(ptep, ptl);
return NULL;
}
page = vm_normal_page(vma, address, pte);
if (!page && pte_devmap(pte) && (flags & (FOLL_GET | FOLL_PIN))) {
/*
* Only return device mapping pages in the FOLL_GET or FOLL_PIN
* case since they are only valid while holding the pgmap
* reference.
*/
*pgmap = get_dev_pagemap(pte_pfn(pte), *pgmap);
if (*pgmap)
page = pte_page(pte);
else
goto no_page;
} else if (unlikely(!page)) {
if (flags & FOLL_DUMP) {
/* Avoid special (like zero) pages in core dumps */
page = ERR_PTR(-EFAULT);
goto out;
}
if (is_zero_pfn(pte_pfn(pte))) {
page = pte_page(pte);
} else {
ret = follow_pfn_pte(vma, address, ptep, flags);
page = ERR_PTR(ret);
goto out;
}
}
if (flags & FOLL_SPLIT && PageTransCompound(page)) {
get_page(page);
pte_unmap_unlock(ptep, ptl);
lock_page(page);
ret = split_huge_page(page);
unlock_page(page);
put_page(page);
if (ret)
return ERR_PTR(ret);
goto retry;
}
/* try_grab_page() does nothing unless FOLL_GET or FOLL_PIN is set. */
if (unlikely(!try_grab_page(page, flags))) {
page = ERR_PTR(-ENOMEM);
goto out;
}
/*
* We need to make the page accessible if and only if we are going
* to access its content (the FOLL_PIN case). Please see
* Documentation/core-api/pin_user_pages.rst for details.
*/
if (flags & FOLL_PIN) {
ret = arch_make_page_accessible(page);
if (ret) {
unpin_user_page(page);
page = ERR_PTR(ret);
goto out;
}
}
if (flags & FOLL_TOUCH) {
if ((flags & FOLL_WRITE) &&
!pte_dirty(pte) && !PageDirty(page))
set_page_dirty(page);
/*
* pte_mkyoung() would be more correct here, but atomic care
* is needed to avoid losing the dirty bit: it is easier to use
* mark_page_accessed().
*/
mark_page_accessed(page);
}
if ((flags & FOLL_MLOCK) && (vma->vm_flags & VM_LOCKED)) {
/* Do not mlock pte-mapped THP */
if (PageTransCompound(page))
goto out;
/*
* The preliminary mapping check is mainly to avoid the
* pointless overhead of lock_page on the ZERO_PAGE
* which might bounce very badly if there is contention.
*
* If the page is already locked, we don't need to
* handle it now - vmscan will handle it later if and
* when it attempts to reclaim the page.
*/
if (page->mapping && trylock_page(page)) {
lru_add_drain(); /* push cached pages to LRU */
/*
* Because we lock page here, and migration is
* blocked by the pte's page reference, and we
* know the page is still mapped, we don't even
* need to check for file-cache page truncation.
*/
mlock_vma_page(page);
unlock_page(page);
}
}
out:
pte_unmap_unlock(ptep, ptl);
return page;
no_page:
pte_unmap_unlock(ptep, ptl);
if (!pte_none(pte))
return NULL;
return no_page_table(vma, flags);
}
static struct page *follow_pmd_mask(struct vm_area_struct *vma,
unsigned long address, pud_t *pudp,
unsigned int flags,
struct follow_page_context *ctx)
{
pmd_t *pmd, pmdval;
spinlock_t *ptl;
struct page *page;
struct mm_struct *mm = vma->vm_mm;
pmd = pmd_offset(pudp, address);
/*
* The READ_ONCE() will stabilize the pmdval in a register or
* on the stack so that it will stop changing under the code.
*/
pmdval = READ_ONCE(*pmd);
if (pmd_none(pmdval))
return no_page_table(vma, flags);
if (pmd_huge(pmdval) && is_vm_hugetlb_page(vma)) {
page = follow_huge_pmd(mm, address, pmd, flags);
if (page)
return page;
return no_page_table(vma, flags);
}
if (is_hugepd(__hugepd(pmd_val(pmdval)))) {
page = follow_huge_pd(vma, address,
__hugepd(pmd_val(pmdval)), flags,
PMD_SHIFT);
if (page)
return page;
return no_page_table(vma, flags);
}
retry:
if (!pmd_present(pmdval)) {
if (likely(!(flags & FOLL_MIGRATION)))
return no_page_table(vma, flags);
VM_BUG_ON(thp_migration_supported() &&
!is_pmd_migration_entry(pmdval));
if (is_pmd_migration_entry(pmdval))
pmd_migration_entry_wait(mm, pmd);
pmdval = READ_ONCE(*pmd);
/*
* MADV_DONTNEED may convert the pmd to null because
* mmap_lock is held in read mode
*/
if (pmd_none(pmdval))
return no_page_table(vma, flags);
goto retry;
}
if (pmd_devmap(pmdval)) {
ptl = pmd_lock(mm, pmd);
page = follow_devmap_pmd(vma, address, pmd, flags, &ctx->pgmap);
spin_unlock(ptl);
if (page)
return page;
}
if (likely(!pmd_trans_huge(pmdval)))
return follow_page_pte(vma, address, pmd, flags, &ctx->pgmap);
if ((flags & FOLL_NUMA) && pmd_protnone(pmdval))
return no_page_table(vma, flags);
retry_locked:
ptl = pmd_lock(mm, pmd);
if (unlikely(pmd_none(*pmd))) {
spin_unlock(ptl);
return no_page_table(vma, flags);
}
if (unlikely(!pmd_present(*pmd))) {
spin_unlock(ptl);
if (likely(!(flags & FOLL_MIGRATION)))
return no_page_table(vma, flags);
pmd_migration_entry_wait(mm, pmd);
goto retry_locked;
}
if (unlikely(!pmd_trans_huge(*pmd))) {
spin_unlock(ptl);
return follow_page_pte(vma, address, pmd, flags, &ctx->pgmap);
}
if (flags & (FOLL_SPLIT | FOLL_SPLIT_PMD)) {
int ret;
page = pmd_page(*pmd);
if (is_huge_zero_page(page)) {
spin_unlock(ptl);
ret = 0;
split_huge_pmd(vma, pmd, address);
if (pmd_trans_unstable(pmd))
ret = -EBUSY;
} else if (flags & FOLL_SPLIT) {
if (unlikely(!try_get_page(page))) {
spin_unlock(ptl);
return ERR_PTR(-ENOMEM);
}
spin_unlock(ptl);
lock_page(page);
ret = split_huge_page(page);
unlock_page(page);
put_page(page);
if (pmd_none(*pmd))
return no_page_table(vma, flags);
} else { /* flags & FOLL_SPLIT_PMD */
spin_unlock(ptl);
split_huge_pmd(vma, pmd, address);
ret = pte_alloc(mm, pmd) ? -ENOMEM : 0;
}
return ret ? ERR_PTR(ret) :
follow_page_pte(vma, address, pmd, flags, &ctx->pgmap);
}
page = follow_trans_huge_pmd(vma, address, pmd, flags);
spin_unlock(ptl);
ctx->page_mask = HPAGE_PMD_NR - 1;
return page;
}
static struct page *follow_pud_mask(struct vm_area_struct *vma,
unsigned long address, p4d_t *p4dp,
unsigned int flags,
struct follow_page_context *ctx)
{
pud_t *pud;
spinlock_t *ptl;
struct page *page;
struct mm_struct *mm = vma->vm_mm;
pud = pud_offset(p4dp, address);
if (pud_none(*pud))
return no_page_table(vma, flags);
if (pud_huge(*pud) && is_vm_hugetlb_page(vma)) {
page = follow_huge_pud(mm, address, pud, flags);
if (page)
return page;
return no_page_table(vma, flags);
}
if (is_hugepd(__hugepd(pud_val(*pud)))) {
page = follow_huge_pd(vma, address,
__hugepd(pud_val(*pud)), flags,
PUD_SHIFT);
if (page)
return page;
return no_page_table(vma, flags);
}
if (pud_devmap(*pud)) {
ptl = pud_lock(mm, pud);
page = follow_devmap_pud(vma, address, pud, flags, &ctx->pgmap);
spin_unlock(ptl);
if (page)
return page;
}
if (unlikely(pud_bad(*pud)))
return no_page_table(vma, flags);
return follow_pmd_mask(vma, address, pud, flags, ctx);
}
static struct page *follow_p4d_mask(struct vm_area_struct *vma,
unsigned long address, pgd_t *pgdp,
unsigned int flags,
struct follow_page_context *ctx)
{
p4d_t *p4d;
struct page *page;
p4d = p4d_offset(pgdp, address);
if (p4d_none(*p4d))
return no_page_table(vma, flags);
BUILD_BUG_ON(p4d_huge(*p4d));
if (unlikely(p4d_bad(*p4d)))
return no_page_table(vma, flags);
if (is_hugepd(__hugepd(p4d_val(*p4d)))) {
page = follow_huge_pd(vma, address,
__hugepd(p4d_val(*p4d)), flags,
P4D_SHIFT);
if (page)
return page;
return no_page_table(vma, flags);
}
return follow_pud_mask(vma, address, p4d, flags, ctx);
}
/**
* follow_page_mask - look up a page descriptor from a user-virtual address
* @vma: vm_area_struct mapping @address
* @address: virtual address to look up
* @flags: flags modifying lookup behaviour
* @ctx: contains dev_pagemap for %ZONE_DEVICE memory pinning and a
* pointer to output page_mask
*
* @flags can have FOLL_ flags set, defined in <linux/mm.h>
*
* When getting pages from ZONE_DEVICE memory, the @ctx->pgmap caches
* the device's dev_pagemap metadata to avoid repeating expensive lookups.
*
* On output, the @ctx->page_mask is set according to the size of the page.
*
* Return: the mapped (struct page *), %NULL if no mapping exists, or
* an error pointer if there is a mapping to something not represented
* by a page descriptor (see also vm_normal_page()).
*/
static struct page *follow_page_mask(struct vm_area_struct *vma,
unsigned long address, unsigned int flags,
struct follow_page_context *ctx)
{
pgd_t *pgd;
struct page *page;
struct mm_struct *mm = vma->vm_mm;
ctx->page_mask = 0;
/* make this handle hugepd */
page = follow_huge_addr(mm, address, flags & FOLL_WRITE);
if (!IS_ERR(page)) {
WARN_ON_ONCE(flags & (FOLL_GET | FOLL_PIN));
return page;
}
pgd = pgd_offset(mm, address);
if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
return no_page_table(vma, flags);
if (pgd_huge(*pgd)) {
page = follow_huge_pgd(mm, address, pgd, flags);
if (page)
return page;
return no_page_table(vma, flags);
}
if (is_hugepd(__hugepd(pgd_val(*pgd)))) {
page = follow_huge_pd(vma, address,
__hugepd(pgd_val(*pgd)), flags,
PGDIR_SHIFT);
if (page)
return page;
return no_page_table(vma, flags);
}
return follow_p4d_mask(vma, address, pgd, flags, ctx);
}
struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
unsigned int foll_flags)
{
struct follow_page_context ctx = { NULL };
struct page *page;
page = follow_page_mask(vma, address, foll_flags, &ctx);
if (ctx.pgmap)
put_dev_pagemap(ctx.pgmap);
return page;
}
static int get_gate_page(struct mm_struct *mm, unsigned long address,
unsigned int gup_flags, struct vm_area_struct **vma,
struct page **page)
{
pgd_t *pgd;
p4d_t *p4d;
pud_t *pud;
pmd_t *pmd;
pte_t *pte;
int ret = -EFAULT;
/* user gate pages are read-only */
if (gup_flags & FOLL_WRITE)
return -EFAULT;
if (address > TASK_SIZE)
pgd = pgd_offset_k(address);
else
pgd = pgd_offset_gate(mm, address);
if (pgd_none(*pgd))
return -EFAULT;
p4d = p4d_offset(pgd, address);
if (p4d_none(*p4d))
return -EFAULT;
pud = pud_offset(p4d, address);
if (pud_none(*pud))
return -EFAULT;
pmd = pmd_offset(pud, address);
if (!pmd_present(*pmd))
return -EFAULT;
VM_BUG_ON(pmd_trans_huge(*pmd));
pte = pte_offset_map(pmd, address);
if (pte_none(*pte))
goto unmap;
*vma = get_gate_vma(mm);
if (!page)
goto out;
*page = vm_normal_page(*vma, address, *pte);
if (!*page) {
if ((gup_flags & FOLL_DUMP) || !is_zero_pfn(pte_pfn(*pte)))
goto unmap;
*page = pte_page(*pte);
}
if (unlikely(!try_grab_page(*page, gup_flags))) {
ret = -ENOMEM;
goto unmap;
}
out:
ret = 0;
unmap:
pte_unmap(pte);
return ret;
}
/*
* mmap_lock must be held on entry. If @locked != NULL and *@flags
* does not include FOLL_NOWAIT, the mmap_lock may be released. If it
* is, *@locked will be set to 0 and -EBUSY returned.
*/
static int faultin_page(struct vm_area_struct *vma,
unsigned long address, unsigned int *flags, int *locked)
{
unsigned int fault_flags = 0;
vm_fault_t ret;
/* mlock all present pages, but do not fault in new pages */
if ((*flags & (FOLL_POPULATE | FOLL_MLOCK)) == FOLL_MLOCK)
return -ENOENT;
if (*flags & FOLL_WRITE)
fault_flags |= FAULT_FLAG_WRITE;
if (*flags & FOLL_REMOTE)
fault_flags |= FAULT_FLAG_REMOTE;
if (locked)
fault_flags |= FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
if (*flags & FOLL_NOWAIT)
fault_flags |= FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_RETRY_NOWAIT;
if (*flags & FOLL_TRIED) {
/*
* Note: FAULT_FLAG_ALLOW_RETRY and FAULT_FLAG_TRIED
* can co-exist
*/
fault_flags |= FAULT_FLAG_TRIED;
}
ret = handle_mm_fault(vma, address, fault_flags, NULL);
if (ret & VM_FAULT_ERROR) {
int err = vm_fault_to_errno(ret, *flags);
if (err)
return err;
BUG();
}
if (ret & VM_FAULT_RETRY) {
if (locked && !(fault_flags & FAULT_FLAG_RETRY_NOWAIT))
*locked = 0;
return -EBUSY;
}
/*
* The VM_FAULT_WRITE bit tells us that do_wp_page has broken COW when
* necessary, even if maybe_mkwrite decided not to set pte_write. We
* can thus safely do subsequent page lookups as if they were reads.
* But only do so when looping for pte_write is futile: in some cases
* userspace may also be wanting to write to the gotten user page,
* which a read fault here might prevent (a readonly page might get
* reCOWed by userspace write).
*/
if ((ret & VM_FAULT_WRITE) && !(vma->vm_flags & VM_WRITE))
*flags |= FOLL_COW;
return 0;
}
static int check_vma_flags(struct vm_area_struct *vma, unsigned long gup_flags)
{
vm_flags_t vm_flags = vma->vm_flags;
int write = (gup_flags & FOLL_WRITE);
int foreign = (gup_flags & FOLL_REMOTE);
if (vm_flags & (VM_IO | VM_PFNMAP))
return -EFAULT;
if (gup_flags & FOLL_ANON && !vma_is_anonymous(vma))
return -EFAULT;
if (write) {
if (!(vm_flags & VM_WRITE)) {
if (!(gup_flags & FOLL_FORCE))
return -EFAULT;
/*
* We used to let the write,force case do COW in a
* VM_MAYWRITE VM_SHARED !VM_WRITE vma, so ptrace could
* set a breakpoint in a read-only mapping of an
* executable, without corrupting the file (yet only
* when that file had been opened for writing!).
* Anon pages in shared mappings are surprising: now
* just reject it.
*/
if (!is_cow_mapping(vm_flags))
return -EFAULT;
}
} else if (!(vm_flags & VM_READ)) {
if (!(gup_flags & FOLL_FORCE))
return -EFAULT;
/*
* Is there actually any vma we can reach here which does not
* have VM_MAYREAD set?
*/
if (!(vm_flags & VM_MAYREAD))
return -EFAULT;
}
/*
* gups are always data accesses, not instruction
* fetches, so execute=false here
*/
if (!arch_vma_access_permitted(vma, write, false, foreign))
return -EFAULT;
return 0;
}
/**
* __get_user_pages() - pin user pages in memory
* @mm: mm_struct of target mm
* @start: starting user address
* @nr_pages: number of pages from start to pin
* @gup_flags: flags modifying pin behaviour
* @pages: array that receives pointers to the pages pinned.
* Should be at least nr_pages long. Or NULL, if caller
* only intends to ensure the pages are faulted in.
* @vmas: array of pointers to vmas corresponding to each page.
* Or NULL if the caller does not require them.
* @locked: whether we're still with the mmap_lock held
*
* Returns either number of pages pinned (which may be less than the
* number requested), or an error. Details about the return value:
*
* -- If nr_pages is 0, returns 0.
* -- If nr_pages is >0, but no pages were pinned, returns -errno.
* -- If nr_pages is >0, and some pages were pinned, returns the number of
* pages pinned. Again, this may be less than nr_pages.
* -- 0 return value is possible when the fault would need to be retried.
*
* The caller is responsible for releasing returned @pages, via put_page().
*
* @vmas are valid only as long as mmap_lock is held.
*
* Must be called with mmap_lock held. It may be released. See below.
*
* __get_user_pages walks a process's page tables and takes a reference to
* each struct page that each user address corresponds to at a given
* instant. That is, it takes the page that would be accessed if a user
* thread accesses the given user virtual address at that instant.
*
* This does not guarantee that the page exists in the user mappings when
* __get_user_pages returns, and there may even be a completely different
* page there in some cases (eg. if mmapped pagecache has been invalidated
* and subsequently re faulted). However it does guarantee that the page
* won't be freed completely. And mostly callers simply care that the page
* contains data that was valid *at some point in time*. Typically, an IO
* or similar operation cannot guarantee anything stronger anyway because
* locks can't be held over the syscall boundary.
*
* If @gup_flags & FOLL_WRITE == 0, the page must not be written to. If
* the page is written to, set_page_dirty (or set_page_dirty_lock, as
* appropriate) must be called after the page is finished with, and
* before put_page is called.
*
* If @locked != NULL, *@locked will be set to 0 when mmap_lock is
* released by an up_read(). That can happen if @gup_flags does not
* have FOLL_NOWAIT.
*
* A caller using such a combination of @locked and @gup_flags
* must therefore hold the mmap_lock for reading only, and recognize
* when it's been released. Otherwise, it must be held for either
* reading or writing and will not be released.
*
* In most cases, get_user_pages or get_user_pages_fast should be used
* instead of __get_user_pages. __get_user_pages should be used only if
* you need some special @gup_flags.
*/
static long __get_user_pages(struct mm_struct *mm,
unsigned long start, unsigned long nr_pages,
unsigned int gup_flags, struct page **pages,
struct vm_area_struct **vmas, int *locked)
{
long ret = 0, i = 0;
struct vm_area_struct *vma = NULL;
struct follow_page_context ctx = { NULL };
if (!nr_pages)
return 0;
start = untagged_addr(start);
VM_BUG_ON(!!pages != !!(gup_flags & (FOLL_GET | FOLL_PIN)));
/*
* If FOLL_FORCE is set then do not force a full fault as the hinting
* fault information is unrelated to the reference behaviour of a task
* using the address space
*/
if (!(gup_flags & FOLL_FORCE))
gup_flags |= FOLL_NUMA;
do {
struct page *page;
unsigned int foll_flags = gup_flags;
unsigned int page_increm;
/* first iteration or cross vma bound */
if (!vma || start >= vma->vm_end) {
vma = find_extend_vma(mm, start);
if (!vma && in_gate_area(mm, start)) {
ret = get_gate_page(mm, start & PAGE_MASK,
gup_flags, &vma,
pages ? &pages[i] : NULL);
if (ret)
goto out;
ctx.page_mask = 0;
goto next_page;
}
if (!vma || check_vma_flags(vma, gup_flags)) {
ret = -EFAULT;
goto out;
}
if (is_vm_hugetlb_page(vma)) {
i = follow_hugetlb_page(mm, vma, pages, vmas,
&start, &nr_pages, i,
gup_flags, locked);
if (locked && *locked == 0) {
/*
* We've got a VM_FAULT_RETRY
* and we've lost mmap_lock.
* We must stop here.
*/
BUG_ON(gup_flags & FOLL_NOWAIT);
BUG_ON(ret != 0);
goto out;
}
continue;
}
}
retry:
/*
* If we have a pending SIGKILL, don't keep faulting pages and
* potentially allocating memory.
*/
if (fatal_signal_pending(current)) {
ret = -EINTR;
goto out;
}
cond_resched();
page = follow_page_mask(vma, start, foll_flags, &ctx);
if (!page) {
ret = faultin_page(vma, start, &foll_flags, locked);
switch (ret) {
case 0:
goto retry;
case -EBUSY:
ret = 0;
fallthrough;
case -EFAULT:
case -ENOMEM:
case -EHWPOISON:
goto out;
case -ENOENT:
goto next_page;
}
BUG();
} else if (PTR_ERR(page) == -EEXIST) {
/*
* Proper page table entry exists, but no corresponding
* struct page.
*/
goto next_page;
} else if (IS_ERR(page)) {
ret = PTR_ERR(page);
goto out;
}
if (pages) {
pages[i] = page;
flush_anon_page(vma, page, start);
flush_dcache_page(page);
ctx.page_mask = 0;
}
next_page:
if (vmas) {
vmas[i] = vma;
ctx.page_mask = 0;
}
page_increm = 1 + (~(start >> PAGE_SHIFT) & ctx.page_mask);
if (page_increm > nr_pages)
page_increm = nr_pages;
i += page_increm;
start += page_increm * PAGE_SIZE;
nr_pages -= page_increm;
} while (nr_pages);
out:
if (ctx.pgmap)
put_dev_pagemap(ctx.pgmap);
return i ? i : ret;
}
static bool vma_permits_fault(struct vm_area_struct *vma,
unsigned int fault_flags)
{
bool write = !!(fault_flags & FAULT_FLAG_WRITE);
bool foreign = !!(fault_flags & FAULT_FLAG_REMOTE);
vm_flags_t vm_flags = write ? VM_WRITE : VM_READ;
if (!(vm_flags & vma->vm_flags))
return false;
/*
* The architecture might have a hardware protection
* mechanism other than read/write that can deny access.
*
* gup always represents data access, not instruction
* fetches, so execute=false here:
*/
if (!arch_vma_access_permitted(vma, write, false, foreign))
return false;
return true;
}
/**
* fixup_user_fault() - manually resolve a user page fault
* @mm: mm_struct of target mm
* @address: user address
* @fault_flags:flags to pass down to handle_mm_fault()
* @unlocked: did we unlock the mmap_lock while retrying, maybe NULL if caller
* does not allow retry. If NULL, the caller must guarantee
* that fault_flags does not contain FAULT_FLAG_ALLOW_RETRY.
*
* This is meant to be called in the specific scenario where for locking reasons
* we try to access user memory in atomic context (within a pagefault_disable()
* section), this returns -EFAULT, and we want to resolve the user fault before
* trying again.
*
* Typically this is meant to be used by the futex code.
*
* The main difference with get_user_pages() is that this function will
* unconditionally call handle_mm_fault() which will in turn perform all the
* necessary SW fixup of the dirty and young bits in the PTE, while
* get_user_pages() only guarantees to update these in the struct page.
*
* This is important for some architectures where those bits also gate the
* access permission to the page because they are maintained in software. On
* such architectures, gup() will not be enough to make a subsequent access
* succeed.
*
* This function will not return with an unlocked mmap_lock. So it has not the
* same semantics wrt the @mm->mmap_lock as does filemap_fault().
*/
int fixup_user_fault(struct mm_struct *mm,
unsigned long address, unsigned int fault_flags,
bool *unlocked)
{
struct vm_area_struct *vma;
vm_fault_t ret, major = 0;
address = untagged_addr(address);
if (unlocked)
fault_flags |= FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
retry:
vma = find_extend_vma(mm, address);
if (!vma || address < vma->vm_start)
return -EFAULT;
if (!vma_permits_fault(vma, fault_flags))
return -EFAULT;
if ((fault_flags & FAULT_FLAG_KILLABLE) &&
fatal_signal_pending(current))
return -EINTR;
ret = handle_mm_fault(vma, address, fault_flags, NULL);
major |= ret & VM_FAULT_MAJOR;
if (ret & VM_FAULT_ERROR) {
int err = vm_fault_to_errno(ret, 0);
if (err)
return err;
BUG();
}
if (ret & VM_FAULT_RETRY) {
mmap_read_lock(mm);
*unlocked = true;
fault_flags |= FAULT_FLAG_TRIED;
goto retry;
}
return 0;
}
EXPORT_SYMBOL_GPL(fixup_user_fault);
/*
* Please note that this function, unlike __get_user_pages will not
* return 0 for nr_pages > 0 without FOLL_NOWAIT
*/
static __always_inline long __get_user_pages_locked(struct mm_struct *mm,
unsigned long start,
unsigned long nr_pages,
struct page **pages,
struct vm_area_struct **vmas,
int *locked,
unsigned int flags)
{
long ret, pages_done;
bool lock_dropped;
if (locked) {
/* if VM_FAULT_RETRY can be returned, vmas become invalid */
BUG_ON(vmas);
/* check caller initialized locked */
BUG_ON(*locked != 1);
}
if (flags & FOLL_PIN)
atomic_set(&mm->has_pinned, 1);
/*
* FOLL_PIN and FOLL_GET are mutually exclusive. Traditional behavior
* is to set FOLL_GET if the caller wants pages[] filled in (but has
* carelessly failed to specify FOLL_GET), so keep doing that, but only
* for FOLL_GET, not for the newer FOLL_PIN.
*
* FOLL_PIN always expects pages to be non-null, but no need to assert
* that here, as any failures will be obvious enough.
*/
if (pages && !(flags & FOLL_PIN))
flags |= FOLL_GET;
pages_done = 0;
lock_dropped = false;
for (;;) {
ret = __get_user_pages(mm, start, nr_pages, flags, pages,
vmas, locked);
if (!locked)
/* VM_FAULT_RETRY couldn't trigger, bypass */
return ret;
/* VM_FAULT_RETRY cannot return errors */
if (!*locked) {
BUG_ON(ret < 0);
BUG_ON(ret >= nr_pages);
}
if (ret > 0) {
nr_pages -= ret;
pages_done += ret;
if (!nr_pages)
break;
}
if (*locked) {
/*
* VM_FAULT_RETRY didn't trigger or it was a
* FOLL_NOWAIT.
*/
if (!pages_done)
pages_done = ret;
break;
}
/*
* VM_FAULT_RETRY triggered, so seek to the faulting offset.
* For the prefault case (!pages) we only update counts.
*/
if (likely(pages))
pages += ret;
start += ret << PAGE_SHIFT;
lock_dropped = true;
retry:
/*
* Repeat on the address that fired VM_FAULT_RETRY
* with both FAULT_FLAG_ALLOW_RETRY and
* FAULT_FLAG_TRIED. Note that GUP can be interrupted
* by fatal signals, so we need to check it before we
* start trying again otherwise it can loop forever.
*/
if (fatal_signal_pending(current)) {
if (!pages_done)
pages_done = -EINTR;
break;
}
ret = mmap_read_lock_killable(mm);
if (ret) {
BUG_ON(ret > 0);
if (!pages_done)
pages_done = ret;
break;
}
*locked = 1;
ret = __get_user_pages(mm, start, 1, flags | FOLL_TRIED,
pages, NULL, locked);
if (!*locked) {
/* Continue to retry until we succeeded */
BUG_ON(ret != 0);
goto retry;
}
if (ret != 1) {
BUG_ON(ret > 1);
if (!pages_done)
pages_done = ret;
break;
}
nr_pages--;
pages_done++;
if (!nr_pages)
break;
if (likely(pages))
pages++;
start += PAGE_SIZE;
}
if (lock_dropped && *locked) {
/*
* We must let the caller know we temporarily dropped the lock
* and so the critical section protected by it was lost.
*/
mmap_read_unlock(mm);
*locked = 0;
}
return pages_done;
}
/**
* populate_vma_page_range() - populate a range of pages in the vma.
* @vma: target vma
* @start: start address
* @end: end address
* @locked: whether the mmap_lock is still held
*
* This takes care of mlocking the pages too if VM_LOCKED is set.
*
* Return either number of pages pinned in the vma, or a negative error
* code on error.
*
* vma->vm_mm->mmap_lock must be held.
*
* If @locked is NULL, it may be held for read or write and will
* be unperturbed.
*
* If @locked is non-NULL, it must held for read only and may be
* released. If it's released, *@locked will be set to 0.
*/
long populate_vma_page_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end, int *locked)
{
struct mm_struct *mm = vma->vm_mm;
unsigned long nr_pages = (end - start) / PAGE_SIZE;
int gup_flags;
VM_BUG_ON(start & ~PAGE_MASK);
VM_BUG_ON(end & ~PAGE_MASK);
VM_BUG_ON_VMA(start < vma->vm_start, vma);
VM_BUG_ON_VMA(end > vma->vm_end, vma);
mmap_assert_locked(mm);
gup_flags = FOLL_TOUCH | FOLL_POPULATE | FOLL_MLOCK;
if (vma->vm_flags & VM_LOCKONFAULT)
gup_flags &= ~FOLL_POPULATE;
/*
* We want to touch writable mappings with a write fault in order
* to break COW, except for shared mappings because these don't COW
* and we would not want to dirty them for nothing.
*/
if ((vma->vm_flags & (VM_WRITE | VM_SHARED)) == VM_WRITE)
gup_flags |= FOLL_WRITE;
/*
* We want mlock to succeed for regions that have any permissions
* other than PROT_NONE.
*/
if (vma_is_accessible(vma))
gup_flags |= FOLL_FORCE;
/*
* We made sure addr is within a VMA, so the following will
* not result in a stack expansion that recurses back here.
*/
return __get_user_pages(mm, start, nr_pages, gup_flags,
NULL, NULL, locked);
}
/*
* __mm_populate - populate and/or mlock pages within a range of address space.
*
* This is used to implement mlock() and the MAP_POPULATE / MAP_LOCKED mmap
* flags. VMAs must be already marked with the desired vm_flags, and
* mmap_lock must not be held.
*/
int __mm_populate(unsigned long start, unsigned long len, int ignore_errors)
{
struct mm_struct *mm = current->mm;
unsigned long end, nstart, nend;
struct vm_area_struct *vma = NULL;
int locked = 0;
long ret = 0;
end = start + len;
for (nstart = start; nstart < end; nstart = nend) {
/*
* We want to fault in pages for [nstart; end) address range.
* Find first corresponding VMA.
*/
if (!locked) {
locked = 1;
mmap_read_lock(mm);
vma = find_vma(mm, nstart);
} else if (nstart >= vma->vm_end)
vma = vma->vm_next;
if (!vma || vma->vm_start >= end)
break;
/*
* Set [nstart; nend) to intersection of desired address
* range with the first VMA. Also, skip undesirable VMA types.
*/
nend = min(end, vma->vm_end);
if (vma->vm_flags & (VM_IO | VM_PFNMAP))
continue;
if (nstart < vma->vm_start)
nstart = vma->vm_start;
/*
* Now fault in a range of pages. populate_vma_page_range()
* double checks the vma flags, so that it won't mlock pages
* if the vma was already munlocked.
*/
ret = populate_vma_page_range(vma, nstart, nend, &locked);
if (ret < 0) {
if (ignore_errors) {
ret = 0;
continue; /* continue at next VMA */
}
break;
}
nend = nstart + ret * PAGE_SIZE;
ret = 0;
}
if (locked)
mmap_read_unlock(mm);
return ret; /* 0 or negative error code */
}
#else /* CONFIG_MMU */
static long __get_user_pages_locked(struct mm_struct *mm, unsigned long start,
unsigned long nr_pages, struct page **pages,
struct vm_area_struct **vmas, int *locked,
unsigned int foll_flags)
{
struct vm_area_struct *vma;
unsigned long vm_flags;
int i;
/* calculate required read or write permissions.
* If FOLL_FORCE is set, we only require the "MAY" flags.
*/
vm_flags = (foll_flags & FOLL_WRITE) ?
(VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD);
vm_flags &= (foll_flags & FOLL_FORCE) ?
(VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
for (i = 0; i < nr_pages; i++) {
vma = find_vma(mm, start);
if (!vma)
goto finish_or_fault;
/* protect what we can, including chardevs */
if ((vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
!(vm_flags & vma->vm_flags))
goto finish_or_fault;
if (pages) {
pages[i] = virt_to_page(start);
if (pages[i])
get_page(pages[i]);
}
if (vmas)
vmas[i] = vma;
start = (start + PAGE_SIZE) & PAGE_MASK;
}
return i;
finish_or_fault:
return i ? : -EFAULT;
}
#endif /* !CONFIG_MMU */
/**
* get_dump_page() - pin user page in memory while writing it to core dump
* @addr: user address
*
* Returns struct page pointer of user page pinned for dump,
* to be freed afterwards by put_page().
*
* Returns NULL on any kind of failure - a hole must then be inserted into
* the corefile, to preserve alignment with its headers; and also returns
* NULL wherever the ZERO_PAGE, or an anonymous pte_none, has been found -
* allowing a hole to be left in the corefile to save diskspace.
*
* Called without mmap_lock (takes and releases the mmap_lock by itself).
*/
#ifdef CONFIG_ELF_CORE
struct page *get_dump_page(unsigned long addr)
{
struct mm_struct *mm = current->mm;
struct page *page;
int locked = 1;
int ret;
if (mmap_read_lock_killable(mm))
return NULL;
ret = __get_user_pages_locked(mm, addr, 1, &page, NULL, &locked,
FOLL_FORCE | FOLL_DUMP | FOLL_GET);
if (locked)
mmap_read_unlock(mm);
return (ret == 1) ? page : NULL;
}
#endif /* CONFIG_ELF_CORE */
#if defined(CONFIG_FS_DAX) || defined (CONFIG_CMA)
static bool check_dax_vmas(struct vm_area_struct **vmas, long nr_pages)
{
long i;
struct vm_area_struct *vma_prev = NULL;
for (i = 0; i < nr_pages; i++) {
struct vm_area_struct *vma = vmas[i];
if (vma == vma_prev)
continue;
vma_prev = vma;
if (vma_is_fsdax(vma))
return true;
}
return false;
}
#ifdef CONFIG_CMA
static long check_and_migrate_cma_pages(struct mm_struct *mm,
unsigned long start,
unsigned long nr_pages,
struct page **pages,
struct vm_area_struct **vmas,
unsigned int gup_flags)
{
unsigned long i, isolation_error_count;
bool drain_allow;
LIST_HEAD(cma_page_list);
long ret = nr_pages;
struct page *prev_head, *head;
struct migration_target_control mtc = {
.nid = NUMA_NO_NODE,
.gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_NOWARN,
};
check_again:
prev_head = NULL;
isolation_error_count = 0;
drain_allow = true;
for (i = 0; i < nr_pages; i++) {
head = compound_head(pages[i]);
if (head == prev_head)
continue;
prev_head = head;
/*
* If we get a page from the CMA zone, since we are going to
* be pinning these entries, we might as well move them out
* of the CMA zone if possible.
*/
if (is_migrate_cma_page(head)) {
if (PageHuge(head)) {
if (!isolate_huge_page(head, &cma_page_list))
isolation_error_count++;
} else {
if (!PageLRU(head) && drain_allow) {
lru_add_drain_all();
drain_allow = false;
}
if (isolate_lru_page(head)) {
isolation_error_count++;
continue;
}
list_add_tail(&head->lru, &cma_page_list);
mod_node_page_state(page_pgdat(head),
NR_ISOLATED_ANON +
page_is_file_lru(head),
thp_nr_pages(head));
}
}
}
/*
* If list is empty, and no isolation errors, means that all pages are
* in the correct zone.
*/
if (list_empty(&cma_page_list) && !isolation_error_count)
return ret;
if (!list_empty(&cma_page_list)) {
/*
* drop the above get_user_pages reference.
*/
if (gup_flags & FOLL_PIN)
unpin_user_pages(pages, nr_pages);
else
for (i = 0; i < nr_pages; i++)
put_page(pages[i]);
ret = migrate_pages(&cma_page_list, alloc_migration_target,
NULL, (unsigned long)&mtc, MIGRATE_SYNC,
MR_CONTIG_RANGE);
if (ret) {
if (!list_empty(&cma_page_list))
putback_movable_pages(&cma_page_list);
return ret > 0 ? -ENOMEM : ret;
}
/* We unpinned pages before migration, pin them again */
ret = __get_user_pages_locked(mm, start, nr_pages, pages, vmas,
NULL, gup_flags);
if (ret <= 0)
return ret;
nr_pages = ret;
}
/*
* check again because pages were unpinned, and we also might have
* had isolation errors and need more pages to migrate.
*/
goto check_again;
}
#else
static long check_and_migrate_cma_pages(struct mm_struct *mm,
unsigned long start,
unsigned long nr_pages,
struct page **pages,
struct vm_area_struct **vmas,
unsigned int gup_flags)
{
return nr_pages;
}
#endif /* CONFIG_CMA */
/*
* __gup_longterm_locked() is a wrapper for __get_user_pages_locked which
* allows us to process the FOLL_LONGTERM flag.
*/
static long __gup_longterm_locked(struct mm_struct *mm,
unsigned long start,
unsigned long nr_pages,
struct page **pages,
struct vm_area_struct **vmas,
unsigned int gup_flags)
{
struct vm_area_struct **vmas_tmp = vmas;
unsigned long flags = 0;
long rc, i;
if (gup_flags & FOLL_LONGTERM) {
if (!pages)
return -EINVAL;
if (!vmas_tmp) {
vmas_tmp = kcalloc(nr_pages,
sizeof(struct vm_area_struct *),
GFP_KERNEL);
if (!vmas_tmp)
return -ENOMEM;
}
flags = memalloc_nocma_save();
}
rc = __get_user_pages_locked(mm, start, nr_pages, pages,
vmas_tmp, NULL, gup_flags);
if (gup_flags & FOLL_LONGTERM) {
if (rc < 0)
goto out;
if (check_dax_vmas(vmas_tmp, rc)) {
if (gup_flags & FOLL_PIN)
unpin_user_pages(pages, rc);
else
for (i = 0; i < rc; i++)
put_page(pages[i]);
rc = -EOPNOTSUPP;
goto out;
}
rc = check_and_migrate_cma_pages(mm, start, rc, pages,
vmas_tmp, gup_flags);
out:
memalloc_nocma_restore(flags);
}
if (vmas_tmp != vmas)
kfree(vmas_tmp);
return rc;
}
#else /* !CONFIG_FS_DAX && !CONFIG_CMA */
static __always_inline long __gup_longterm_locked(struct mm_struct *mm,
unsigned long start,
unsigned long nr_pages,
struct page **pages,
struct vm_area_struct **vmas,
unsigned int flags)
{
return __get_user_pages_locked(mm, start, nr_pages, pages, vmas,
NULL, flags);
}
#endif /* CONFIG_FS_DAX || CONFIG_CMA */
static bool is_valid_gup_flags(unsigned int gup_flags)
{
/*
* FOLL_PIN must only be set internally by the pin_user_pages*() APIs,
* never directly by the caller, so enforce that with an assertion:
*/
if (WARN_ON_ONCE(gup_flags & FOLL_PIN))
return false;
/*
* FOLL_PIN is a prerequisite to FOLL_LONGTERM. Another way of saying
* that is, FOLL_LONGTERM is a specific case, more restrictive case of
* FOLL_PIN.
*/
if (WARN_ON_ONCE(gup_flags & FOLL_LONGTERM))
return false;
return true;
}
#ifdef CONFIG_MMU
static long __get_user_pages_remote(struct mm_struct *mm,
unsigned long start, unsigned long nr_pages,
unsigned int gup_flags, struct page **pages,
struct vm_area_struct **vmas, int *locked)
{
/*
* Parts of FOLL_LONGTERM behavior are incompatible with
* FAULT_FLAG_ALLOW_RETRY because of the FS DAX check requirement on
* vmas. However, this only comes up if locked is set, and there are
* callers that do request FOLL_LONGTERM, but do not set locked. So,
* allow what we can.
*/
if (gup_flags & FOLL_LONGTERM) {
if (WARN_ON_ONCE(locked))
return -EINVAL;
/*
* This will check the vmas (even if our vmas arg is NULL)
* and return -ENOTSUPP if DAX isn't allowed in this case:
*/
return __gup_longterm_locked(mm, start, nr_pages, pages,
vmas, gup_flags | FOLL_TOUCH |
FOLL_REMOTE);
}
return __get_user_pages_locked(mm, start, nr_pages, pages, vmas,
locked,
gup_flags | FOLL_TOUCH | FOLL_REMOTE);
}
/**
* get_user_pages_remote() - pin user pages in memory
* @mm: mm_struct of target mm
* @start: starting user address
* @nr_pages: number of pages from start to pin
* @gup_flags: flags modifying lookup behaviour
* @pages: array that receives pointers to the pages pinned.
* Should be at least nr_pages long. Or NULL, if caller
* only intends to ensure the pages are faulted in.
* @vmas: array of pointers to vmas corresponding to each page.
* Or NULL if the caller does not require them.
* @locked: pointer to lock flag indicating whether lock is held and
* subsequently whether VM_FAULT_RETRY functionality can be
* utilised. Lock must initially be held.
*
* Returns either number of pages pinned (which may be less than the
* number requested), or an error. Details about the return value:
*
* -- If nr_pages is 0, returns 0.
* -- If nr_pages is >0, but no pages were pinned, returns -errno.
* -- If nr_pages is >0, and some pages were pinned, returns the number of
* pages pinned. Again, this may be less than nr_pages.
*
* The caller is responsible for releasing returned @pages, via put_page().
*
* @vmas are valid only as long as mmap_lock is held.
*
* Must be called with mmap_lock held for read or write.
*
* get_user_pages_remote walks a process's page tables and takes a reference
* to each struct page that each user address corresponds to at a given
* instant. That is, it takes the page that would be accessed if a user
* thread accesses the given user virtual address at that instant.
*
* This does not guarantee that the page exists in the user mappings when
* get_user_pages_remote returns, and there may even be a completely different
* page there in some cases (eg. if mmapped pagecache has been invalidated
* and subsequently re faulted). However it does guarantee that the page
* won't be freed completely. And mostly callers simply care that the page
* contains data that was valid *at some point in time*. Typically, an IO
* or similar operation cannot guarantee anything stronger anyway because
* locks can't be held over the syscall boundary.
*
* If gup_flags & FOLL_WRITE == 0, the page must not be written to. If the page
* is written to, set_page_dirty (or set_page_dirty_lock, as appropriate) must
* be called after the page is finished with, and before put_page is called.
*
* get_user_pages_remote is typically used for fewer-copy IO operations,
* to get a handle on the memory by some means other than accesses
* via the user virtual addresses. The pages may be submitted for
* DMA to devices or accessed via their kernel linear mapping (via the
* kmap APIs). Care should be taken to use the correct cache flushing APIs.
*
* See also get_user_pages_fast, for performance critical applications.
*
* get_user_pages_remote should be phased out in favor of
* get_user_pages_locked|unlocked or get_user_pages_fast. Nothing
* should use get_user_pages_remote because it cannot pass
* FAULT_FLAG_ALLOW_RETRY to handle_mm_fault.
*/
long get_user_pages_remote(struct mm_struct *mm,
unsigned long start, unsigned long nr_pages,
unsigned int gup_flags, struct page **pages,
struct vm_area_struct **vmas, int *locked)
{
if (!is_valid_gup_flags(gup_flags))
return -EINVAL;
return __get_user_pages_remote(mm, start, nr_pages, gup_flags,
pages, vmas, locked);
}
EXPORT_SYMBOL(get_user_pages_remote);
#else /* CONFIG_MMU */
long get_user_pages_remote(struct mm_struct *mm,
unsigned long start, unsigned long nr_pages,
unsigned int gup_flags, struct page **pages,
struct vm_area_struct **vmas, int *locked)
{
return 0;
}
static long __get_user_pages_remote(struct mm_struct *mm,
unsigned long start, unsigned long nr_pages,
unsigned int gup_flags, struct page **pages,
struct vm_area_struct **vmas, int *locked)
{
return 0;
}
#endif /* !CONFIG_MMU */
/**
* get_user_pages() - pin user pages in memory
* @start: starting user address
* @nr_pages: number of pages from start to pin
* @gup_flags: flags modifying lookup behaviour
* @pages: array that receives pointers to the pages pinned.
* Should be at least nr_pages long. Or NULL, if caller
* only intends to ensure the pages are faulted in.
* @vmas: array of pointers to vmas corresponding to each page.
* Or NULL if the caller does not require them.
*
* This is the same as get_user_pages_remote(), just with a less-flexible
* calling convention where we assume that the mm being operated on belongs to
* the current task, and doesn't allow passing of a locked parameter. We also
* obviously don't pass FOLL_REMOTE in here.
*/
long get_user_pages(unsigned long start, unsigned long nr_pages,
unsigned int gup_flags, struct page **pages,
struct vm_area_struct **vmas)
{
if (!is_valid_gup_flags(gup_flags))
return -EINVAL;
return __gup_longterm_locked(current->mm, start, nr_pages,
pages, vmas, gup_flags | FOLL_TOUCH);
}
EXPORT_SYMBOL(get_user_pages);
/**
* get_user_pages_locked() is suitable to replace the form:
*
* mmap_read_lock(mm);
* do_something()
* get_user_pages(mm, ..., pages, NULL);
* mmap_read_unlock(mm);
*
* to:
*
* int locked = 1;
* mmap_read_lock(mm);
* do_something()
* get_user_pages_locked(mm, ..., pages, &locked);
* if (locked)
* mmap_read_unlock(mm);
*
* @start: starting user address
* @nr_pages: number of pages from start to pin
* @gup_flags: flags modifying lookup behaviour
* @pages: array that receives pointers to the pages pinned.
* Should be at least nr_pages long. Or NULL, if caller
* only intends to ensure the pages are faulted in.
* @locked: pointer to lock flag indicating whether lock is held and
* subsequently whether VM_FAULT_RETRY functionality can be
* utilised. Lock must initially be held.
*
* We can leverage the VM_FAULT_RETRY functionality in the page fault
* paths better by using either get_user_pages_locked() or
* get_user_pages_unlocked().
*
*/
long get_user_pages_locked(unsigned long start, unsigned long nr_pages,
unsigned int gup_flags, struct page **pages,
int *locked)
{
/*
* FIXME: Current FOLL_LONGTERM behavior is incompatible with
* FAULT_FLAG_ALLOW_RETRY because of the FS DAX check requirement on
* vmas. As there are no users of this flag in this call we simply
* disallow this option for now.
*/
if (WARN_ON_ONCE(gup_flags & FOLL_LONGTERM))
return -EINVAL;
/*
* FOLL_PIN must only be set internally by the pin_user_pages*() APIs,
* never directly by the caller, so enforce that:
*/
if (WARN_ON_ONCE(gup_flags & FOLL_PIN))
return -EINVAL;
return __get_user_pages_locked(current->mm, start, nr_pages,
pages, NULL, locked,
gup_flags | FOLL_TOUCH);
}
EXPORT_SYMBOL(get_user_pages_locked);
/*
* get_user_pages_unlocked() is suitable to replace the form:
*
* mmap_read_lock(mm);
* get_user_pages(mm, ..., pages, NULL);
* mmap_read_unlock(mm);
*
* with:
*
* get_user_pages_unlocked(mm, ..., pages);
*
* It is functionally equivalent to get_user_pages_fast so
* get_user_pages_fast should be used instead if specific gup_flags
* (e.g. FOLL_FORCE) are not required.
*/
long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
struct page **pages, unsigned int gup_flags)
{
struct mm_struct *mm = current->mm;
int locked = 1;
long ret;
/*
* FIXME: Current FOLL_LONGTERM behavior is incompatible with
* FAULT_FLAG_ALLOW_RETRY because of the FS DAX check requirement on
* vmas. As there are no users of this flag in this call we simply
* disallow this option for now.
*/
if (WARN_ON_ONCE(gup_flags & FOLL_LONGTERM))
return -EINVAL;
mmap_read_lock(mm);
ret = __get_user_pages_locked(mm, start, nr_pages, pages, NULL,
&locked, gup_flags | FOLL_TOUCH);
if (locked)
mmap_read_unlock(mm);
return ret;
}
EXPORT_SYMBOL(get_user_pages_unlocked);
/*
* Fast GUP
*
* get_user_pages_fast attempts to pin user pages by walking the page
* tables directly and avoids taking locks. Thus the walker needs to be
* protected from page table pages being freed from under it, and should
* block any THP splits.
*
* One way to achieve this is to have the walker disable interrupts, and
* rely on IPIs from the TLB flushing code blocking before the page table
* pages are freed. This is unsuitable for architectures that do not need
* to broadcast an IPI when invalidating TLBs.
*
* Another way to achieve this is to batch up page table containing pages
* belonging to more than one mm_user, then rcu_sched a callback to free those
* pages. Disabling interrupts will allow the fast_gup walker to both block
* the rcu_sched callback, and an IPI that we broadcast for splitting THPs
* (which is a relatively rare event). The code below adopts this strategy.
*
* Before activating this code, please be aware that the following assumptions
* are currently made:
*
* *) Either MMU_GATHER_RCU_TABLE_FREE is enabled, and tlb_remove_table() is used to
* free pages containing page tables or TLB flushing requires IPI broadcast.
*
* *) ptes can be read atomically by the architecture.
*
* *) access_ok is sufficient to validate userspace address ranges.
*
* The last two assumptions can be relaxed by the addition of helper functions.
*
* This code is based heavily on the PowerPC implementation by Nick Piggin.
*/
#ifdef CONFIG_HAVE_FAST_GUP
#ifdef CONFIG_GUP_GET_PTE_LOW_HIGH
/*
* WARNING: only to be used in the get_user_pages_fast() implementation.
*
* With get_user_pages_fast(), we walk down the pagetables without taking any
* locks. For this we would like to load the pointers atomically, but sometimes
* that is not possible (e.g. without expensive cmpxchg8b on x86_32 PAE). What
* we do have is the guarantee that a PTE will only either go from not present
* to present, or present to not present or both -- it will not switch to a
* completely different present page without a TLB flush in between; something
* that we are blocking by holding interrupts off.
*
* Setting ptes from not present to present goes:
*
* ptep->pte_high = h;
* smp_wmb();
* ptep->pte_low = l;
*
* And present to not present goes:
*
* ptep->pte_low = 0;
* smp_wmb();
* ptep->pte_high = 0;
*
* We must ensure here that the load of pte_low sees 'l' IFF pte_high sees 'h'.
* We load pte_high *after* loading pte_low, which ensures we don't see an older
* value of pte_high. *Then* we recheck pte_low, which ensures that we haven't
* picked up a changed pte high. We might have gotten rubbish values from
* pte_low and pte_high, but we are guaranteed that pte_low will not have the
* present bit set *unless* it is 'l'. Because get_user_pages_fast() only
* operates on present ptes we're safe.
*/
static inline pte_t gup_get_pte(pte_t *ptep)
{
pte_t pte;
do {
pte.pte_low = ptep->pte_low;
smp_rmb();
pte.pte_high = ptep->pte_high;
smp_rmb();
} while (unlikely(pte.pte_low != ptep->pte_low));
return pte;
}
#else /* CONFIG_GUP_GET_PTE_LOW_HIGH */
/*
* We require that the PTE can be read atomically.
*/
static inline pte_t gup_get_pte(pte_t *ptep)
{
return ptep_get(ptep);
}
#endif /* CONFIG_GUP_GET_PTE_LOW_HIGH */
static void __maybe_unused undo_dev_pagemap(int *nr, int nr_start,
unsigned int flags,
struct page **pages)
{
while ((*nr) - nr_start) {
struct page *page = pages[--(*nr)];
ClearPageReferenced(page);
if (flags & FOLL_PIN)
unpin_user_page(page);
else
put_page(page);
}
}
#ifdef CONFIG_ARCH_HAS_PTE_SPECIAL
static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end,
unsigned int flags, struct page **pages, int *nr)
{
struct dev_pagemap *pgmap = NULL;
int nr_start = *nr, ret = 0;
pte_t *ptep, *ptem;
ptem = ptep = pte_offset_map(&pmd, addr);
do {
pte_t pte = gup_get_pte(ptep);
struct page *head, *page;
/*
* Similar to the PMD case below, NUMA hinting must take slow
* path using the pte_protnone check.
*/
if (pte_protnone(pte))
goto pte_unmap;
if (!pte_access_permitted(pte, flags & FOLL_WRITE))
goto pte_unmap;
if (pte_devmap(pte)) {
if (unlikely(flags & FOLL_LONGTERM))
goto pte_unmap;
pgmap = get_dev_pagemap(pte_pfn(pte), pgmap);
if (unlikely(!pgmap)) {
undo_dev_pagemap(nr, nr_start, flags, pages);
goto pte_unmap;
}
} else if (pte_special(pte))
goto pte_unmap;
VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
page = pte_page(pte);
head = try_grab_compound_head(page, 1, flags);
if (!head)
goto pte_unmap;
if (unlikely(pte_val(pte) != pte_val(*ptep))) {
put_compound_head(head, 1, flags);
goto pte_unmap;
}
VM_BUG_ON_PAGE(compound_head(page) != head, page);
/*
* We need to make the page accessible if and only if we are
* going to access its content (the FOLL_PIN case). Please
* see Documentation/core-api/pin_user_pages.rst for
* details.
*/
if (flags & FOLL_PIN) {
ret = arch_make_page_accessible(page);
if (ret) {
unpin_user_page(page);
goto pte_unmap;
}
}
SetPageReferenced(page);
pages[*nr] = page;
(*nr)++;
} while (ptep++, addr += PAGE_SIZE, addr != end);
ret = 1;
pte_unmap:
if (pgmap)
put_dev_pagemap(pgmap);
pte_unmap(ptem);
return ret;
}
#else
/*
* If we can't determine whether or not a pte is special, then fail immediately
* for ptes. Note, we can still pin HugeTLB and THP as these are guaranteed not
* to be special.
*
* For a futex to be placed on a THP tail page, get_futex_key requires a
* get_user_pages_fast_only implementation that can pin pages. Thus it's still
* useful to have gup_huge_pmd even if we can't operate on ptes.
*/
static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end,
unsigned int flags, struct page **pages, int *nr)
{
return 0;
}
#endif /* CONFIG_ARCH_HAS_PTE_SPECIAL */
#if defined(CONFIG_ARCH_HAS_PTE_DEVMAP) && defined(CONFIG_TRANSPARENT_HUGEPAGE)
static int __gup_device_huge(unsigned long pfn, unsigned long addr,
unsigned long end, unsigned int flags,
struct page **pages, int *nr)
{
int nr_start = *nr;
struct dev_pagemap *pgmap = NULL;
do {
struct page *page = pfn_to_page(pfn);
pgmap = get_dev_pagemap(pfn, pgmap);
if (unlikely(!pgmap)) {
undo_dev_pagemap(nr, nr_start, flags, pages);
return 0;
}
SetPageReferenced(page);
pages[*nr] = page;
if (unlikely(!try_grab_page(page, flags))) {
undo_dev_pagemap(nr, nr_start, flags, pages);
return 0;
}
(*nr)++;
pfn++;
} while (addr += PAGE_SIZE, addr != end);
if (pgmap)
put_dev_pagemap(pgmap);
return 1;
}
static int __gup_device_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr,
unsigned long end, unsigned int flags,
struct page **pages, int *nr)
{
unsigned long fault_pfn;
int nr_start = *nr;
fault_pfn = pmd_pfn(orig) + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
if (!__gup_device_huge(fault_pfn, addr, end, flags, pages, nr))
return 0;
if (unlikely(pmd_val(orig) != pmd_val(*pmdp))) {
undo_dev_pagemap(nr, nr_start, flags, pages);
return 0;
}
return 1;
}
static int __gup_device_huge_pud(pud_t orig, pud_t *pudp, unsigned long addr,
unsigned long end, unsigned int flags,
struct page **pages, int *nr)
{
unsigned long fault_pfn;
int nr_start = *nr;
fault_pfn = pud_pfn(orig) + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
if (!__gup_device_huge(fault_pfn, addr, end, flags, pages, nr))
return 0;
if (unlikely(pud_val(orig) != pud_val(*pudp))) {
undo_dev_pagemap(nr, nr_start, flags, pages);
return 0;
}
return 1;
}
#else
static int __gup_device_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr,
unsigned long end, unsigned int flags,
struct page **pages, int *nr)
{
BUILD_BUG();
return 0;
}
static int __gup_device_huge_pud(pud_t pud, pud_t *pudp, unsigned long addr,
unsigned long end, unsigned int flags,
struct page **pages, int *nr)
{
BUILD_BUG();
return 0;
}
#endif
static int record_subpages(struct page *page, unsigned long addr,
unsigned long end, struct page **pages)
{
int nr;
for (nr = 0; addr != end; addr += PAGE_SIZE)
pages[nr++] = page++;
return nr;
}
#ifdef CONFIG_ARCH_HAS_HUGEPD
static unsigned long hugepte_addr_end(unsigned long addr, unsigned long end,
unsigned long sz)
{
unsigned long __boundary = (addr + sz) & ~(sz-1);
return (__boundary - 1 < end - 1) ? __boundary : end;
}
static int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr,
unsigned long end, unsigned int flags,
struct page **pages, int *nr)
{
unsigned long pte_end;
struct page *head, *page;
pte_t pte;
int refs;
pte_end = (addr + sz) & ~(sz-1);
if (pte_end < end)
end = pte_end;
pte = huge_ptep_get(ptep);
if (!pte_access_permitted(pte, flags & FOLL_WRITE))
return 0;
/* hugepages are never "special" */
VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
head = pte_page(pte);
page = head + ((addr & (sz-1)) >> PAGE_SHIFT);
refs = record_subpages(page, addr, end, pages + *nr);
head = try_grab_compound_head(head, refs, flags);
if (!head)
return 0;
if (unlikely(pte_val(pte) != pte_val(*ptep))) {
put_compound_head(head, refs, flags);
return 0;
}
*nr += refs;
SetPageReferenced(head);
return 1;
}
static int gup_huge_pd(hugepd_t hugepd, unsigned long addr,
unsigned int pdshift, unsigned long end, unsigned int flags,
struct page **pages, int *nr)
{
pte_t *ptep;
unsigned long sz = 1UL << hugepd_shift(hugepd);
unsigned long next;
ptep = hugepte_offset(hugepd, addr, pdshift);
do {
next = hugepte_addr_end(addr, end, sz);
if (!gup_hugepte(ptep, sz, addr, end, flags, pages, nr))
return 0;
} while (ptep++, addr = next, addr != end);
return 1;
}
#else
static inline int gup_huge_pd(hugepd_t hugepd, unsigned long addr,
unsigned int pdshift, unsigned long end, unsigned int flags,
struct page **pages, int *nr)
{
return 0;
}
#endif /* CONFIG_ARCH_HAS_HUGEPD */
static int gup_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr,
unsigned long end, unsigned int flags,
struct page **pages, int *nr)
{
struct page *head, *page;
int refs;
if (!pmd_access_permitted(orig, flags & FOLL_WRITE))
return 0;
if (pmd_devmap(orig)) {
if (unlikely(flags & FOLL_LONGTERM))
return 0;
return __gup_device_huge_pmd(orig, pmdp, addr, end, flags,
pages, nr);
}
page = pmd_page(orig) + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
refs = record_subpages(page, addr, end, pages + *nr);
head = try_grab_compound_head(pmd_page(orig), refs, flags);
if (!head)
return 0;
if (unlikely(pmd_val(orig) != pmd_val(*pmdp))) {
put_compound_head(head, refs, flags);
return 0;
}
*nr += refs;
SetPageReferenced(head);
return 1;
}
static int gup_huge_pud(pud_t orig, pud_t *pudp, unsigned long addr,
unsigned long end, unsigned int flags,
struct page **pages, int *nr)
{
struct page *head, *page;
int refs;
if (!pud_access_permitted(orig, flags & FOLL_WRITE))
return 0;
if (pud_devmap(orig)) {
if (unlikely(flags & FOLL_LONGTERM))
return 0;
return __gup_device_huge_pud(orig, pudp, addr, end, flags,
pages, nr);
}
page = pud_page(orig) + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
refs = record_subpages(page, addr, end, pages + *nr);
head = try_grab_compound_head(pud_page(orig), refs, flags);
if (!head)
return 0;
if (unlikely(pud_val(orig) != pud_val(*pudp))) {
put_compound_head(head, refs, flags);
return 0;
}
*nr += refs;
SetPageReferenced(head);
return 1;
}
static int gup_huge_pgd(pgd_t orig, pgd_t *pgdp, unsigned long addr,
unsigned long end, unsigned int flags,
struct page **pages, int *nr)
{
int refs;
struct page *head, *page;
if (!pgd_access_permitted(orig, flags & FOLL_WRITE))
return 0;
BUILD_BUG_ON(pgd_devmap(orig));
page = pgd_page(orig) + ((addr & ~PGDIR_MASK) >> PAGE_SHIFT);
refs = record_subpages(page, addr, end, pages + *nr);
head = try_grab_compound_head(pgd_page(orig), refs, flags);
if (!head)
return 0;
if (unlikely(pgd_val(orig) != pgd_val(*pgdp))) {
put_compound_head(head, refs, flags);
return 0;
}
*nr += refs;
SetPageReferenced(head);
return 1;
}
static int gup_pmd_range(pud_t *pudp, pud_t pud, unsigned long addr, unsigned long end,
unsigned int flags, struct page **pages, int *nr)
{
unsigned long next;
pmd_t *pmdp;
pmdp = pmd_offset_lockless(pudp, pud, addr);
do {
pmd_t pmd = READ_ONCE(*pmdp);
next = pmd_addr_end(addr, end);
if (!pmd_present(pmd))
return 0;
if (unlikely(pmd_trans_huge(pmd) || pmd_huge(pmd) ||
pmd_devmap(pmd))) {
/*
* NUMA hinting faults need to be handled in the GUP
* slowpath for accounting purposes and so that they
* can be serialised against THP migration.
*/
if (pmd_protnone(pmd))
return 0;
if (!gup_huge_pmd(pmd, pmdp, addr, next, flags,
pages, nr))
return 0;
} else if (unlikely(is_hugepd(__hugepd(pmd_val(pmd))))) {
/*
* architecture have different format for hugetlbfs
* pmd format and THP pmd format
*/
if (!gup_huge_pd(__hugepd(pmd_val(pmd)), addr,
PMD_SHIFT, next, flags, pages, nr))
return 0;
} else if (!gup_pte_range(pmd, addr, next, flags, pages, nr))
return 0;
} while (pmdp++, addr = next, addr != end);
return 1;
}
static int gup_pud_range(p4d_t *p4dp, p4d_t p4d, unsigned long addr, unsigned long end,
unsigned int flags, struct page **pages, int *nr)
{
unsigned long next;
pud_t *pudp;
pudp = pud_offset_lockless(p4dp, p4d, addr);
do {
pud_t pud = READ_ONCE(*pudp);
next = pud_addr_end(addr, end);
if (unlikely(!pud_present(pud)))
return 0;
if (unlikely(pud_huge(pud))) {
if (!gup_huge_pud(pud, pudp, addr, next, flags,
pages, nr))
return 0;
} else if (unlikely(is_hugepd(__hugepd(pud_val(pud))))) {
if (!gup_huge_pd(__hugepd(pud_val(pud)), addr,
PUD_SHIFT, next, flags, pages, nr))
return 0;
} else if (!gup_pmd_range(pudp, pud, addr, next, flags, pages, nr))
return 0;
} while (pudp++, addr = next, addr != end);
return 1;
}
static int gup_p4d_range(pgd_t *pgdp, pgd_t pgd, unsigned long addr, unsigned long end,
unsigned int flags, struct page **pages, int *nr)
{
unsigned long next;
p4d_t *p4dp;
p4dp = p4d_offset_lockless(pgdp, pgd, addr);
do {
p4d_t p4d = READ_ONCE(*p4dp);
next = p4d_addr_end(addr, end);
if (p4d_none(p4d))
return 0;
BUILD_BUG_ON(p4d_huge(p4d));
if (unlikely(is_hugepd(__hugepd(p4d_val(p4d))))) {
if (!gup_huge_pd(__hugepd(p4d_val(p4d)), addr,
P4D_SHIFT, next, flags, pages, nr))
return 0;
} else if (!gup_pud_range(p4dp, p4d, addr, next, flags, pages, nr))
return 0;
} while (p4dp++, addr = next, addr != end);
return 1;
}
static void gup_pgd_range(unsigned long addr, unsigned long end,
unsigned int flags, struct page **pages, int *nr)
{
unsigned long next;
pgd_t *pgdp;
pgdp = pgd_offset(current->mm, addr);
do {
pgd_t pgd = READ_ONCE(*pgdp);
next = pgd_addr_end(addr, end);
if (pgd_none(pgd))
return;
if (unlikely(pgd_huge(pgd))) {
if (!gup_huge_pgd(pgd, pgdp, addr, next, flags,
pages, nr))
return;
} else if (unlikely(is_hugepd(__hugepd(pgd_val(pgd))))) {
if (!gup_huge_pd(__hugepd(pgd_val(pgd)), addr,
PGDIR_SHIFT, next, flags, pages, nr))
return;
} else if (!gup_p4d_range(pgdp, pgd, addr, next, flags, pages, nr))
return;
} while (pgdp++, addr = next, addr != end);
}
#else
static inline void gup_pgd_range(unsigned long addr, unsigned long end,
unsigned int flags, struct page **pages, int *nr)
{
}
#endif /* CONFIG_HAVE_FAST_GUP */
#ifndef gup_fast_permitted
/*
* Check if it's allowed to use get_user_pages_fast_only() for the range, or
* we need to fall back to the slow version:
*/
static bool gup_fast_permitted(unsigned long start, unsigned long end)
{
return true;
}
#endif
static int __gup_longterm_unlocked(unsigned long start, int nr_pages,
unsigned int gup_flags, struct page **pages)
{
int ret;
/*
* FIXME: FOLL_LONGTERM does not work with
* get_user_pages_unlocked() (see comments in that function)
*/
if (gup_flags & FOLL_LONGTERM) {
mmap_read_lock(current->mm);
ret = __gup_longterm_locked(current->mm,
start, nr_pages,
pages, NULL, gup_flags);
mmap_read_unlock(current->mm);
} else {
ret = get_user_pages_unlocked(start, nr_pages,
pages, gup_flags);
}
return ret;
}
static unsigned long lockless_pages_from_mm(unsigned long start,
unsigned long end,
unsigned int gup_flags,
struct page **pages)
{
unsigned long flags;
int nr_pinned = 0;
unsigned seq;
if (!IS_ENABLED(CONFIG_HAVE_FAST_GUP) ||
!gup_fast_permitted(start, end))
return 0;
if (gup_flags & FOLL_PIN) {
seq = raw_read_seqcount(&current->mm->write_protect_seq);
if (seq & 1)
return 0;
}
/*
* Disable interrupts. The nested form is used, in order to allow full,
* general purpose use of this routine.
*
* With interrupts disabled, we block page table pages from being freed
* from under us. See struct mmu_table_batch comments in
* include/asm-generic/tlb.h for more details.
*
* We do not adopt an rcu_read_lock() here as we also want to block IPIs
* that come from THPs splitting.
*/
local_irq_save(flags);
gup_pgd_range(start, end, gup_flags, pages, &nr_pinned);
local_irq_restore(flags);
/*
* When pinning pages for DMA there could be a concurrent write protect
* from fork() via copy_page_range(), in this case always fail fast GUP.
*/
if (gup_flags & FOLL_PIN) {
if (read_seqcount_retry(&current->mm->write_protect_seq, seq)) {
unpin_user_pages(pages, nr_pinned);
return 0;
}
}
return nr_pinned;
}
static int internal_get_user_pages_fast(unsigned long start,
unsigned long nr_pages,
unsigned int gup_flags,
struct page **pages)
{
unsigned long len, end;
unsigned long nr_pinned;
int ret;
if (WARN_ON_ONCE(gup_flags & ~(FOLL_WRITE | FOLL_LONGTERM |
FOLL_FORCE | FOLL_PIN | FOLL_GET |
FOLL_FAST_ONLY)))
return -EINVAL;
if (gup_flags & FOLL_PIN)
atomic_set(&current->mm->has_pinned, 1);
if (!(gup_flags & FOLL_FAST_ONLY))
might_lock_read(&current->mm->mmap_lock);
start = untagged_addr(start) & PAGE_MASK;
len = nr_pages << PAGE_SHIFT;
if (check_add_overflow(start, len, &end))
return 0;
if (unlikely(!access_ok((void __user *)start, len)))
return -EFAULT;
nr_pinned = lockless_pages_from_mm(start, end, gup_flags, pages);
if (nr_pinned == nr_pages || gup_flags & FOLL_FAST_ONLY)
return nr_pinned;
/* Slow path: try to get the remaining pages with get_user_pages */
start += nr_pinned << PAGE_SHIFT;
pages += nr_pinned;
ret = __gup_longterm_unlocked(start, nr_pages - nr_pinned, gup_flags,
pages);
if (ret < 0) {
/*
* The caller has to unpin the pages we already pinned so
* returning -errno is not an option
*/
if (nr_pinned)
return nr_pinned;
return ret;
}
return ret + nr_pinned;
}
/**
* get_user_pages_fast_only() - pin user pages in memory
* @start: starting user address
* @nr_pages: number of pages from start to pin
* @gup_flags: flags modifying pin behaviour
* @pages: array that receives pointers to the pages pinned.
* Should be at least nr_pages long.
*
* Like get_user_pages_fast() except it's IRQ-safe in that it won't fall back to
* the regular GUP.
* Note a difference with get_user_pages_fast: this always returns the
* number of pages pinned, 0 if no pages were pinned.
*
* If the architecture does not support this function, simply return with no
* pages pinned.
*
* Careful, careful! COW breaking can go either way, so a non-write
* access can get ambiguous page results. If you call this function without
* 'write' set, you'd better be sure that you're ok with that ambiguity.
*/
int get_user_pages_fast_only(unsigned long start, int nr_pages,
unsigned int gup_flags, struct page **pages)
{
int nr_pinned;
/*
* Internally (within mm/gup.c), gup fast variants must set FOLL_GET,
* because gup fast is always a "pin with a +1 page refcount" request.
*
* FOLL_FAST_ONLY is required in order to match the API description of
* this routine: no fall back to regular ("slow") GUP.
*/
gup_flags |= FOLL_GET | FOLL_FAST_ONLY;
nr_pinned = internal_get_user_pages_fast(start, nr_pages, gup_flags,
pages);
/*
* As specified in the API description above, this routine is not
* allowed to return negative values. However, the common core
* routine internal_get_user_pages_fast() *can* return -errno.
* Therefore, correct for that here:
*/
if (nr_pinned < 0)
nr_pinned = 0;
return nr_pinned;
}
EXPORT_SYMBOL_GPL(get_user_pages_fast_only);
/**
* get_user_pages_fast() - pin user pages in memory
* @start: starting user address
* @nr_pages: number of pages from start to pin
* @gup_flags: flags modifying pin behaviour
* @pages: array that receives pointers to the pages pinned.
* Should be at least nr_pages long.
*
* Attempt to pin user pages in memory without taking mm->mmap_lock.
* If not successful, it will fall back to taking the lock and
* calling get_user_pages().
*
* Returns number of pages pinned. This may be fewer than the number requested.
* If nr_pages is 0 or negative, returns 0. If no pages were pinned, returns
* -errno.
*/
int get_user_pages_fast(unsigned long start, int nr_pages,
unsigned int gup_flags, struct page **pages)
{
if (!is_valid_gup_flags(gup_flags))
return -EINVAL;
/*
* The caller may or may not have explicitly set FOLL_GET; either way is
* OK. However, internally (within mm/gup.c), gup fast variants must set
* FOLL_GET, because gup fast is always a "pin with a +1 page refcount"
* request.
*/
gup_flags |= FOLL_GET;
return internal_get_user_pages_fast(start, nr_pages, gup_flags, pages);
}
EXPORT_SYMBOL_GPL(get_user_pages_fast);
/**
* pin_user_pages_fast() - pin user pages in memory without taking locks
*
* @start: starting user address
* @nr_pages: number of pages from start to pin
* @gup_flags: flags modifying pin behaviour
* @pages: array that receives pointers to the pages pinned.
* Should be at least nr_pages long.
*
* Nearly the same as get_user_pages_fast(), except that FOLL_PIN is set. See
* get_user_pages_fast() for documentation on the function arguments, because
* the arguments here are identical.
*
* FOLL_PIN means that the pages must be released via unpin_user_page(). Please
* see Documentation/core-api/pin_user_pages.rst for further details.
*/
int pin_user_pages_fast(unsigned long start, int nr_pages,
unsigned int gup_flags, struct page **pages)
{
/* FOLL_GET and FOLL_PIN are mutually exclusive. */
if (WARN_ON_ONCE(gup_flags & FOLL_GET))
return -EINVAL;
gup_flags |= FOLL_PIN;
return internal_get_user_pages_fast(start, nr_pages, gup_flags, pages);
}
EXPORT_SYMBOL_GPL(pin_user_pages_fast);
/*
* This is the FOLL_PIN equivalent of get_user_pages_fast_only(). Behavior
* is the same, except that this one sets FOLL_PIN instead of FOLL_GET.
*
* The API rules are the same, too: no negative values may be returned.
*/
int pin_user_pages_fast_only(unsigned long start, int nr_pages,
unsigned int gup_flags, struct page **pages)
{
int nr_pinned;
/*
* FOLL_GET and FOLL_PIN are mutually exclusive. Note that the API
* rules require returning 0, rather than -errno:
*/
if (WARN_ON_ONCE(gup_flags & FOLL_GET))
return 0;
/*
* FOLL_FAST_ONLY is required in order to match the API description of
* this routine: no fall back to regular ("slow") GUP.
*/
gup_flags |= (FOLL_PIN | FOLL_FAST_ONLY);
nr_pinned = internal_get_user_pages_fast(start, nr_pages, gup_flags,
pages);
/*
* This routine is not allowed to return negative values. However,
* internal_get_user_pages_fast() *can* return -errno. Therefore,
* correct for that here:
*/
if (nr_pinned < 0)
nr_pinned = 0;
return nr_pinned;
}
EXPORT_SYMBOL_GPL(pin_user_pages_fast_only);
/**
* pin_user_pages_remote() - pin pages of a remote process
*
* @mm: mm_struct of target mm
* @start: starting user address
* @nr_pages: number of pages from start to pin
* @gup_flags: flags modifying lookup behaviour
* @pages: array that receives pointers to the pages pinned.
* Should be at least nr_pages long. Or NULL, if caller
* only intends to ensure the pages are faulted in.
* @vmas: array of pointers to vmas corresponding to each page.
* Or NULL if the caller does not require them.
* @locked: pointer to lock flag indicating whether lock is held and
* subsequently whether VM_FAULT_RETRY functionality can be
* utilised. Lock must initially be held.
*
* Nearly the same as get_user_pages_remote(), except that FOLL_PIN is set. See
* get_user_pages_remote() for documentation on the function arguments, because
* the arguments here are identical.
*
* FOLL_PIN means that the pages must be released via unpin_user_page(). Please
* see Documentation/core-api/pin_user_pages.rst for details.
*/
long pin_user_pages_remote(struct mm_struct *mm,
unsigned long start, unsigned long nr_pages,
unsigned int gup_flags, struct page **pages,
struct vm_area_struct **vmas, int *locked)
{
/* FOLL_GET and FOLL_PIN are mutually exclusive. */
if (WARN_ON_ONCE(gup_flags & FOLL_GET))
return -EINVAL;
gup_flags |= FOLL_PIN;
return __get_user_pages_remote(mm, start, nr_pages, gup_flags,
pages, vmas, locked);
}
EXPORT_SYMBOL(pin_user_pages_remote);
/**
* pin_user_pages() - pin user pages in memory for use by other devices
*
* @start: starting user address
* @nr_pages: number of pages from start to pin
* @gup_flags: flags modifying lookup behaviour
* @pages: array that receives pointers to the pages pinned.
* Should be at least nr_pages long. Or NULL, if caller
* only intends to ensure the pages are faulted in.
* @vmas: array of pointers to vmas corresponding to each page.
* Or NULL if the caller does not require them.
*
* Nearly the same as get_user_pages(), except that FOLL_TOUCH is not set, and
* FOLL_PIN is set.
*
* FOLL_PIN means that the pages must be released via unpin_user_page(). Please
* see Documentation/core-api/pin_user_pages.rst for details.
*/
long pin_user_pages(unsigned long start, unsigned long nr_pages,
unsigned int gup_flags, struct page **pages,
struct vm_area_struct **vmas)
{
/* FOLL_GET and FOLL_PIN are mutually exclusive. */
if (WARN_ON_ONCE(gup_flags & FOLL_GET))
return -EINVAL;
gup_flags |= FOLL_PIN;
return __gup_longterm_locked(current->mm, start, nr_pages,
pages, vmas, gup_flags);
}
EXPORT_SYMBOL(pin_user_pages);
/*
* pin_user_pages_unlocked() is the FOLL_PIN variant of
* get_user_pages_unlocked(). Behavior is the same, except that this one sets
* FOLL_PIN and rejects FOLL_GET.
*/
long pin_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
struct page **pages, unsigned int gup_flags)
{
/* FOLL_GET and FOLL_PIN are mutually exclusive. */
if (WARN_ON_ONCE(gup_flags & FOLL_GET))
return -EINVAL;
gup_flags |= FOLL_PIN;
return get_user_pages_unlocked(start, nr_pages, pages, gup_flags);
}
EXPORT_SYMBOL(pin_user_pages_unlocked);
/*
* pin_user_pages_locked() is the FOLL_PIN variant of get_user_pages_locked().
* Behavior is the same, except that this one sets FOLL_PIN and rejects
* FOLL_GET.
*/
long pin_user_pages_locked(unsigned long start, unsigned long nr_pages,
unsigned int gup_flags, struct page **pages,
int *locked)
{
/*
* FIXME: Current FOLL_LONGTERM behavior is incompatible with
* FAULT_FLAG_ALLOW_RETRY because of the FS DAX check requirement on
* vmas. As there are no users of this flag in this call we simply
* disallow this option for now.
*/
if (WARN_ON_ONCE(gup_flags & FOLL_LONGTERM))
return -EINVAL;
/* FOLL_GET and FOLL_PIN are mutually exclusive. */
if (WARN_ON_ONCE(gup_flags & FOLL_GET))
return -EINVAL;
gup_flags |= FOLL_PIN;
return __get_user_pages_locked(current->mm, start, nr_pages,
pages, NULL, locked,
gup_flags | FOLL_TOUCH);
}
EXPORT_SYMBOL(pin_user_pages_locked);