android_kernel_asus_sm8350/mm/mmap.c
Srinivasarao Pathipati b696332499 Merge android11-5.4.219+ (0ce03d1) into msm-5.4
* refs/heads/tmp-0ce03d1:
  Revert "wait: Fix __wait_event_hrtimeout for RT/DL tasks"
  Reverts below USB and netfilter patches
  BACKPORT: Kconfig.debug: provide a little extra FRAME_WARN leeway when KASAN is enabled
  UPSTREAM: bpf: Ensure correct locking around vulnerable function find_vpid()
  UPSTREAM: HID: roccat: Fix use-after-free in roccat_read()
  ANDROID: arm64: mm: perform clean & invalidation in __dma_map_area
  UPSTREAM: mmc: hsq: Fix data stomping during mmc recovery
  UPSTREAM: pinctrl: sunxi: Fix name for A100 R_PIO
  BACKPORT: mmc: core: Fix UHS-I SD 1.8V workaround branch
  UPSTREAM: Bluetooth: L2CAP: Fix l2cap_global_chan_by_psm regression
  UPSTREAM: wifi: mac80211_hwsim: set virtio device ready in probe()
  BACKPORT: f2fs: don't use casefolded comparison for "." and ".."
  UPSTREAM: Revert "mm/cma.c: remove redundant cma_mutex lock"
  UPSTREAM: usb: dwc3: Try usb-role-switch first in dwc3_drd_init
  BACKPORT: usb: typec: ucsi: Fix reuse of completion structure
  BACKPORT: tipc: fix incorrect order of state message data sanity check
  UPSTREAM: net: fix up skbs delta_truesize in UDP GRO frag_list
  UPSTREAM: cgroup-v1: Correct privileges check in release_agent writes
  UPSTREAM: mm: don't try to NUMA-migrate COW pages that have other uses
  UPSTREAM: usb: raw-gadget: fix handling of dual-direction-capable endpoints
  UPSTREAM: selinux: check return value of sel_make_avc_files
  UPSTREAM: usb: musb: select GENERIC_PHY instead of depending on it
  BACKPORT: driver core: Fix error return code in really_probe()
  UPSTREAM: fscrypt: fix derivation of SipHash keys on big endian CPUs
  BACKPORT: fscrypt: rename FS_KEY_DERIVATION_NONCE_SIZE
  UPSTREAM: socionext: account for napi_gro_receive never returning GRO_DROP
  UPSTREAM: net: socionext: netsec: fix xdp stats accounting
  BACKPORT: fs: align IOCB_* flags with RWF_* flags
  UPSTREAM: efi: capsule-loader: Fix use-after-free in efi_capsule_write
  BACKPORT: ARM: 9039/1: assembler: generalize byte swapping macro into rev_l
  BACKPORT: ARM: 9035/1: uncompress: Add be32tocpu macro
  UPSTREAM: drm/meson: Fix overflow implicit truncation warnings
  UPSTREAM: irqchip/tegra: Fix overflow implicit truncation warnings
  UPSTREAM: video: fbdev: pxa3xx-gcu: Fix integer overflow in pxa3xx_gcu_write
  UPSTREAM: mm/mremap: hold the rmap lock in write mode when moving page table entries.
  FROMLIST: binder: fix UAF of alloc->vma in race with munmap()
  UPSTREAM: mm: Fix TLB flush for not-first PFNMAP mappings in unmap_region()
  UPSTREAM: mm: Force TLB flush for PFNMAP mappings before unlink_file_vma()
  UPSTREAM: af_key: Do not call xfrm_probe_algs in parallel
  UPSTREAM: wifi: cfg80211: fix u8 overflow in cfg80211_update_notlisted_nontrans()
  UPSTREAM: wifi: cfg80211/mac80211: reject bad MBSSID elements
  UPSTREAM: wifi: cfg80211: ensure length byte is present before access
  UPSTREAM: wifi: cfg80211: fix BSS refcounting bugs
  UPSTREAM: wifi: cfg80211: avoid nontransmitted BSS list corruption
  UPSTREAM: wifi: mac80211_hwsim: avoid mac80211 warning on bad rate
  UPSTREAM: wifi: cfg80211: update hidden BSSes to avoid WARN_ON
  UPSTREAM: mac80211: mlme: find auth challenge directly
  UPSTREAM: wifi: mac80211: don't parse mbssid in assoc response
  ANDROID: GKI: db845c: Update symbols list and ABI
  UPSTREAM: wifi: mac80211: fix MBSSID parsing use-after-free
  ANDROID: Drop explicit 'CONFIG_INIT_STACK_ALL_ZERO=y' from gki_defconfig
  UPSTREAM: hardening: Remove Clang's enable flag for -ftrivial-auto-var-init=zero
  UPSTREAM: hardening: Avoid harmless Clang option under CONFIG_INIT_STACK_ALL_ZERO
  UPSTREAM: hardening: Clarify Kconfig text for auto-var-init
  ANDROID: GKI: Update FCNT KMI symbol list
  Linux 5.4.219
  wifi: mac80211: fix MBSSID parsing use-after-free
  wifi: mac80211: don't parse mbssid in assoc response
  mac80211: mlme: find auth challenge directly
  Revert "fs: check FMODE_LSEEK to control internal pipe splicing"
  Linux 5.4.218
  Input: xpad - fix wireless 360 controller breaking after suspend
  Input: xpad - add supported devices as contributed on github
  wifi: cfg80211: update hidden BSSes to avoid WARN_ON
  wifi: mac80211_hwsim: avoid mac80211 warning on bad rate
  wifi: cfg80211: avoid nontransmitted BSS list corruption
  wifi: cfg80211: fix BSS refcounting bugs
  wifi: cfg80211: ensure length byte is present before access
  wifi: cfg80211/mac80211: reject bad MBSSID elements
  wifi: cfg80211: fix u8 overflow in cfg80211_update_notlisted_nontrans()
  random: use expired timer rather than wq for mixing fast pool
  random: avoid reading two cache lines on irq randomness
  random: restore O_NONBLOCK support
  USB: serial: qcserial: add new usb-id for Dell branded EM7455
  scsi: stex: Properly zero out the passthrough command structure
  efi: Correct Macmini DMI match in uefi cert quirk
  ALSA: hda: Fix position reporting on Poulsbo
  random: clamp credited irq bits to maximum mixed
  ceph: don't truncate file in atomic_open
  nilfs2: replace WARN_ONs by nilfs_error for checkpoint acquisition failure
  nilfs2: fix leak of nilfs_root in case of writer thread creation failure
  nilfs2: fix NULL pointer dereference at nilfs_bmap_lookup_at_level()
  rpmsg: qcom: glink: replace strncpy() with strscpy_pad()
  mmc: core: Terminate infinite loop in SD-UHS voltage switch
  mmc: core: Replace with already defined values for readability
  USB: serial: ftdi_sio: fix 300 bps rate for SIO
  usb: mon: make mmapped memory read only
  arch: um: Mark the stack non-executable to fix a binutils warning
  um: Cleanup compiler warning in arch/x86/um/tls_32.c
  um: Cleanup syscall_handler_t cast in syscalls_32.h
  net/ieee802154: fix uninit value bug in dgram_sendmsg
  scsi: qedf: Fix a UAF bug in __qedf_probe()
  ARM: dts: fix Moxa SDIO 'compatible', remove 'sdhci' misnomer
  dmaengine: xilinx_dma: Report error in case of dma_set_mask_and_coherent API failure
  dmaengine: xilinx_dma: cleanup for fetching xlnx,num-fstores property
  firmware: arm_scmi: Add SCMI PM driver remove routine
  fs: fix UAF/GPF bug in nilfs_mdt_destroy
  perf tools: Fixup get_current_dir_name() compilation
  mm: pagewalk: Fix race between unmap and page walker
  ANDROID: Fix kenelci build-break for !CONFIG_PERF_EVENTS
  BACKPORT: HID: steam: Prevent NULL pointer dereference in steam_{recv,send}_report
  Linux 5.4.217
  docs: update mediator information in CoC docs
  Makefile.extrawarn: Move -Wcast-function-type-strict to W=1
  Revert "drm/amdgpu: use dirty framebuffer helper"
  xfs: remove unused variable 'done'
  xfs: fix uninitialized variable in xfs_attr3_leaf_inactive
  xfs: streamline xfs_attr3_leaf_inactive
  xfs: move incore structures out of xfs_da_format.h
  xfs: fix memory corruption during remote attr value buffer invalidation
  xfs: refactor remote attr value buffer invalidation
  xfs: fix IOCB_NOWAIT handling in xfs_file_dio_aio_read
  xfs: fix s_maxbytes computation on 32-bit kernels
  xfs: truncate should remove all blocks, not just to the end of the page cache
  xfs: introduce XFS_MAX_FILEOFF
  xfs: fix misuse of the XFS_ATTR_INCOMPLETE flag
  x86/speculation: Add RSB VM Exit protections
  x86/bugs: Warn when "ibrs" mitigation is selected on Enhanced IBRS parts
  x86/speculation: Use DECLARE_PER_CPU for x86_spec_ctrl_current
  x86/speculation: Disable RRSBA behavior
  x86/bugs: Add Cannon lake to RETBleed affected CPU list
  x86/cpu/amd: Enumerate BTC_NO
  x86/common: Stamp out the stepping madness
  x86/speculation: Fill RSB on vmexit for IBRS
  KVM: VMX: Fix IBRS handling after vmexit
  KVM: VMX: Prevent guest RSB poisoning attacks with eIBRS
  KVM: VMX: Convert launched argument to flags
  KVM: VMX: Flatten __vmx_vcpu_run()
  KVM/nVMX: Use __vmx_vcpu_run in nested_vmx_check_vmentry_hw
  KVM/VMX: Use TEST %REG,%REG instead of CMP $0,%REG in vmenter.S
  x86/speculation: Remove x86_spec_ctrl_mask
  x86/speculation: Use cached host SPEC_CTRL value for guest entry/exit
  x86/speculation: Fix SPEC_CTRL write on SMT state change
  x86/speculation: Fix firmware entry SPEC_CTRL handling
  x86/speculation: Fix RSB filling with CONFIG_RETPOLINE=n
  x86/speculation: Change FILL_RETURN_BUFFER to work with objtool
  intel_idle: Disable IBRS during long idle
  x86/bugs: Report Intel retbleed vulnerability
  x86/bugs: Split spectre_v2_select_mitigation() and spectre_v2_user_select_mitigation()
  x86/speculation: Add spectre_v2=ibrs option to support Kernel IBRS
  x86/bugs: Optimize SPEC_CTRL MSR writes
  x86/entry: Add kernel IBRS implementation
  x86/entry: Remove skip_r11rcx
  x86/bugs: Keep a per-CPU IA32_SPEC_CTRL value
  x86/bugs: Add AMD retbleed= boot parameter
  x86/bugs: Report AMD retbleed vulnerability
  x86/cpufeatures: Move RETPOLINE flags to word 11
  x86/kvm/vmx: Make noinstr clean
  x86/cpu: Add a steppings field to struct x86_cpu_id
  x86/cpu: Add consistent CPU match macros
  x86/devicetable: Move x86 specific macro out of generic code
  Revert "x86/cpu: Add a steppings field to struct x86_cpu_id"
  Revert "x86/speculation: Add RSB VM Exit protections"
  Linux 5.4.216
  clk: iproc: Do not rely on node name for correct PLL setup
  clk: imx: imx6sx: remove the SET_RATE_PARENT flag for QSPI clocks
  selftests: Fix the if conditions of in test_extra_filter()
  nvme: Fix IOC_PR_CLEAR and IOC_PR_RELEASE ioctls for nvme devices
  nvme: add new line after variable declatation
  usbnet: Fix memory leak in usbnet_disconnect()
  Input: melfas_mip4 - fix return value check in mip4_probe()
  Revert "drm: bridge: analogix/dp: add panel prepare/unprepare in suspend/resume time"
  soc: sunxi: sram: Fix debugfs info for A64 SRAM C
  soc: sunxi: sram: Fix probe function ordering issues
  soc: sunxi_sram: Make use of the helper function devm_platform_ioremap_resource()
  soc: sunxi: sram: Prevent the driver from being unbound
  soc: sunxi: sram: Actually claim SRAM regions
  ARM: dts: am33xx: Fix MMCHS0 dma properties
  ARM: dts: Move am33xx and am43xx mmc nodes to sdhci-omap driver
  media: dvb_vb2: fix possible out of bound access
  mm: fix madivse_pageout mishandling on non-LRU page
  mm/migrate_device.c: flush TLB while holding PTL
  mm: prevent page_frag_alloc() from corrupting the memory
  mm/page_alloc: fix race condition between build_all_zonelists and page allocation
  mmc: moxart: fix 4-bit bus width and remove 8-bit bus width
  libata: add ATA_HORKAGE_NOLPM for Pioneer BDR-207M and BDR-205
  Revert "net: mvpp2: debugfs: fix memory leak when using debugfs_lookup()"
  ntfs: fix BUG_ON in ntfs_lookup_inode_by_name()
  ARM: dts: integrator: Tag PCI host with device_type
  clk: ingenic-tcu: Properly enable registers before accessing timers
  net: usb: qmi_wwan: Add new usb-id for Dell branded EM7455
  uas: ignore UAS for Thinkplus chips
  usb-storage: Add Hiksemi USB3-FW to IGNORE_UAS
  uas: add no-uas quirk for Hiksemi usb_disk
  ANDROID: ABI: Update allowed list for QCOM
  UPSTREAM: wifi: mac80211_hwsim: use 32-bit skb cookie
  UPSTREAM: wifi: mac80211_hwsim: add back erroneously removed cast
  UPSTREAM: wifi: mac80211_hwsim: fix race condition in pending packet
  Linux 5.4.215
  ext4: make directory inode spreading reflect flexbg size
  xfs: fix use-after-free when aborting corrupt attr inactivation
  xfs: fix an ABBA deadlock in xfs_rename
  xfs: don't commit sunit/swidth updates to disk if that would cause repair failures
  xfs: split the sunit parameter update into two parts
  xfs: refactor agfl length computation function
  xfs: use bitops interface for buf log item AIL flag check
  xfs: stabilize insert range start boundary to avoid COW writeback race
  xfs: fix some memory leaks in log recovery
  xfs: always log corruption errors
  xfs: constify the buffer pointer arguments to error functions
  xfs: convert EIO to EFSCORRUPTED when log contents are invalid
  xfs: Fix deadlock between AGI and AGF when target_ip exists in xfs_rename()
  xfs: attach dquots and reserve quota blocks during unwritten conversion
  xfs: range check ri_cnt when recovering log items
  xfs: add missing assert in xfs_fsmap_owner_from_rmap
  xfs: slightly tweak an assert in xfs_fs_map_blocks
  xfs: replace -EIO with -EFSCORRUPTED for corrupt metadata
  ext4: fix bug in extents parsing when eh_entries == 0 and eh_depth > 0
  workqueue: don't skip lockdep work dependency in cancel_work_sync()
  drm/rockchip: Fix return type of cdn_dp_connector_mode_valid
  drm/amd/display: Limit user regamma to a valid value
  drm/amdgpu: use dirty framebuffer helper
  Drivers: hv: Never allocate anything besides framebuffer from framebuffer memory region
  cifs: always initialize struct msghdr smb_msg completely
  usb: xhci-mtk: fix issue of out-of-bounds array access
  s390/dasd: fix Oops in dasd_alias_get_start_dev due to missing pavgroup
  serial: tegra-tcu: Use uart_xmit_advance(), fixes icount.tx accounting
  serial: tegra: Use uart_xmit_advance(), fixes icount.tx accounting
  serial: Create uart_xmit_advance()
  net: sched: fix possible refcount leak in tc_new_tfilter()
  net: sunhme: Fix packet reception for len < RX_COPY_THRESHOLD
  perf kcore_copy: Do not check /proc/modules is unchanged
  perf jit: Include program header in ELF files
  can: gs_usb: gs_can_open(): fix race dev->can.state condition
  netfilter: ebtables: fix memory leak when blob is malformed
  net/sched: taprio: make qdisc_leaf() see the per-netdev-queue pfifo child qdiscs
  net/sched: taprio: avoid disabling offload when it was never enabled
  of: mdio: Add of_node_put() when breaking out of for_each_xx
  i40e: Fix set max_tx_rate when it is lower than 1 Mbps
  i40e: Fix VF set max MTU size
  iavf: Fix set max MTU size with port VLAN and jumbo frames
  iavf: Fix bad page state
  MIPS: Loongson32: Fix PHY-mode being left unspecified
  MIPS: lantiq: export clk_get_io() for lantiq_wdt.ko
  net: team: Unsync device addresses on ndo_stop
  ipvlan: Fix out-of-bound bugs caused by unset skb->mac_header
  iavf: Fix cached head and tail value for iavf_get_tx_pending
  netfilter: nfnetlink_osf: fix possible bogus match in nf_osf_find()
  netfilter: nf_conntrack_irc: Tighten matching on DCC message
  netfilter: nf_conntrack_sip: fix ct_sip_walk_headers
  arm64: dts: rockchip: Remove 'enable-active-low' from rk3399-puma
  arm64: dts: rockchip: Set RK3399-Gru PCLK_EDP to 24 MHz
  arm64: dts: rockchip: Pull up wlan wake# on Gru-Bob
  mm/slub: fix to return errno if kmalloc() fails
  efi: libstub: check Shim mode using MokSBStateRT
  ALSA: hda/realtek: Enable 4-speaker output Dell Precision 5530 laptop
  ALSA: hda/realtek: Add quirk for ASUS GA503R laptop
  ALSA: hda/realtek: Add pincfg for ASUS G533Z HP jack
  ALSA: hda/realtek: Add pincfg for ASUS G513 HP jack
  ALSA: hda/realtek: Re-arrange quirk table entries
  ALSA: hda/realtek: Add quirk for Huawei WRT-WX9
  ALSA: hda: add Intel 5 Series / 3400 PCI DID
  ALSA: hda/tegra: set depop delay for tegra
  USB: serial: option: add Quectel RM520N
  USB: serial: option: add Quectel BG95 0x0203 composition
  USB: core: Fix RST error in hub.c
  Revert "usb: gadget: udc-xilinx: replace memcpy with memcpy_toio"
  Revert "usb: add quirks for Lenovo OneLink+ Dock"
  usb: cdns3: fix issue with rearming ISO OUT endpoint
  usb: gadget: udc-xilinx: replace memcpy with memcpy_toio
  usb: add quirks for Lenovo OneLink+ Dock
  tty: serial: atmel: Preserve previous USART mode if RS485 disabled
  serial: atmel: remove redundant assignment in rs485_config
  tty/serial: atmel: RS485 & ISO7816: wait for TXRDY before sending data
  wifi: mac80211: Fix UAF in ieee80211_scan_rx()
  usb: xhci-mtk: relax TT periodic bandwidth allocation
  usb: xhci-mtk: allow multiple Start-Split in a microframe
  usb: xhci-mtk: add some schedule error number
  usb: xhci-mtk: add a function to (un)load bandwidth info
  usb: xhci-mtk: use @sch_tt to check whether need do TT schedule
  usb: xhci-mtk: add only one extra CS for FS/LS INTR
  usb: xhci-mtk: get the microframe boundary for ESIT
  usb: dwc3: gadget: Avoid duplicate requests to enable Run/Stop
  usb: dwc3: gadget: Don't modify GEVNTCOUNT in pullup()
  usb: dwc3: gadget: Refactor pullup()
  usb: dwc3: gadget: Prevent repeat pullup()
  usb: dwc3: Issue core soft reset before enabling run/stop
  usb: dwc3: gadget: Avoid starting DWC3 gadget during UDC unbind
  ALSA: hda/sigmatel: Fix unused variable warning for beep power change
  cgroup: Add missing cpus_read_lock() to cgroup_attach_task_all()
  video: fbdev: pxa3xx-gcu: Fix integer overflow in pxa3xx_gcu_write
  mksysmap: Fix the mismatch of 'L0' symbols in System.map
  MIPS: OCTEON: irq: Fix octeon_irq_force_ciu_mapping()
  afs: Return -EAGAIN, not -EREMOTEIO, when a file already locked
  net: usb: qmi_wwan: add Quectel RM520N
  ALSA: hda/tegra: Align BDL entry to 4KB boundary
  ALSA: hda/sigmatel: Keep power up while beep is enabled
  rxrpc: Fix calc of resend age
  rxrpc: Fix local destruction being repeated
  regulator: pfuze100: Fix the global-out-of-bounds access in pfuze100_regulator_probe()
  ASoC: nau8824: Fix semaphore unbalance at error paths
  iomap: iomap that extends beyond EOF should be marked dirty
  MAINTAINERS: add Chandan as xfs maintainer for 5.4.y
  cifs: don't send down the destination address to sendmsg for a SOCK_STREAM
  cifs: revalidate mapping when doing direct writes
  tracing: hold caller_addr to hardirq_{enable,disable}_ip
  task_stack, x86/cea: Force-inline stack helpers
  ALSA: pcm: oss: Fix race at SNDCTL_DSP_SYNC
  parisc: ccio-dma: Add missing iounmap in error path in ccio_probe()
  drm/meson: Fix OSD1 RGB to YCbCr coefficient
  drm/meson: Correct OSD1 global alpha value
  gpio: mpc8xxx: Fix support for IRQ_TYPE_LEVEL_LOW flow_type in mpc85xx
  NFSv4: Turn off open-by-filehandle and NFS re-export for NFSv4.0
  of: fdt: fix off-by-one error in unflatten_dt_nodes()
  Revert "USB: core: Prevent nested device-reset calls"
  Revert "io_uring: disable polling pollfree files"
  Revert "netfilter: conntrack: NF_CONNTRACK_PROCFS should no longer default to y"
  Revert "sched/deadline: Fix priority inheritance with multiple scheduling classes"
  Revert "kernel/sched: Remove dl_boosted flag comment"
  Revert "mm/rmap: Fix anon_vma->degree ambiguity leading to double-reuse"
  Revert "fs: check FMODE_LSEEK to control internal pipe splicing"
  Linux 5.4.214
  tracefs: Only clobber mode/uid/gid on remount if asked
  soc: fsl: select FSL_GUTS driver for DPIO
  net: dp83822: disable rx error interrupt
  mm: Fix TLB flush for not-first PFNMAP mappings in unmap_region()
  usb: storage: Add ASUS <0x0b05:0x1932> to IGNORE_UAS
  platform/x86: acer-wmi: Acer Aspire One AOD270/Packard Bell Dot keymap fixes
  perf/arm_pmu_platform: fix tests for platform_get_irq() failure
  nvmet-tcp: fix unhandled tcp states in nvmet_tcp_state_change()
  Input: iforce - add support for Boeder Force Feedback Wheel
  ieee802154: cc2520: add rc code in cc2520_tx()
  tg3: Disable tg3 device on system reboot to avoid triggering AER
  hid: intel-ish-hid: ishtp: Fix ishtp client sending disordered message
  HID: ishtp-hid-clientHID: ishtp-hid-client: Fix comment typo
  drm/msm/rd: Fix FIFO-full deadlock
  Linux 5.4.213
  MIPS: loongson32: ls1c: Fix hang during startup
  x86/nospec: Fix i386 RSB stuffing
  sch_sfb: Also store skb len before calling child enqueue
  tcp: fix early ETIMEDOUT after spurious non-SACK RTO
  nvme-tcp: fix UAF when detecting digest errors
  RDMA/mlx5: Set local port to one when accessing counters
  ipv6: sr: fix out-of-bounds read when setting HMAC data.
  RDMA/siw: Pass a pointer to virt_to_page()
  i40e: Fix kernel crash during module removal
  tipc: fix shift wrapping bug in map_get()
  sch_sfb: Don't assume the skb is still around after enqueueing to child
  afs: Use the operation issue time instead of the reply time for callbacks
  rxrpc: Fix an insufficiently large sglist in rxkad_verify_packet_2()
  netfilter: nf_conntrack_irc: Fix forged IP logic
  netfilter: br_netfilter: Drop dst references before setting.
  RDMA/hns: Fix supported page size
  soc: brcmstb: pm-arm: Fix refcount leak and __iomem leak bugs
  RDMA/cma: Fix arguments order in net device validation
  regulator: core: Clean up on enable failure
  ARM: dts: imx6qdl-kontron-samx6i: remove duplicated node
  smb3: missing inode locks in punch hole
  cgroup: Fix threadgroup_rwsem <-> cpus_read_lock() deadlock
  cgroup: Elide write-locking threadgroup_rwsem when updating csses on an empty subtree
  cgroup: Optimize single thread migration
  scsi: lpfc: Add missing destroy_workqueue() in error path
  scsi: mpt3sas: Fix use-after-free warning
  nvmet: fix a use-after-free
  debugfs: add debugfs_lookup_and_remove()
  kprobes: Prohibit probes in gate area
  ALSA: usb-audio: Fix an out-of-bounds bug in __snd_usb_parse_audio_interface()
  ALSA: aloop: Fix random zeros in capture data when using jiffies timer
  ALSA: emu10k1: Fix out of bounds access in snd_emu10k1_pcm_channel_alloc()
  drm/amdgpu: mmVM_L2_CNTL3 register not initialized correctly
  fbdev: chipsfb: Add missing pci_disable_device() in chipsfb_pci_init()
  arm64: cacheinfo: Fix incorrect assignment of signed error value to unsigned fw_level
  parisc: Add runtime check to prevent PA2.0 kernels on PA1.x machines
  parisc: ccio-dma: Handle kmalloc failure in ccio_init_resources()
  drm/radeon: add a force flush to delay work when radeon
  drm/amdgpu: Check num_gfx_rings for gfx v9_0 rb setup.
  drm/gem: Fix GEM handle release errors
  scsi: megaraid_sas: Fix double kfree()
  USB: serial: ch341: fix disabled rx timer on older devices
  USB: serial: ch341: fix lost character on LCR updates
  usb: dwc3: disable USB core PHY management
  usb: dwc3: fix PHY disable sequence
  btrfs: harden identification of a stale device
  drm/i915/glk: ECS Liva Q2 needs GLK HDMI port timing quirk
  ALSA: seq: Fix data-race at module auto-loading
  ALSA: seq: oss: Fix data-race for max_midi_devs access
  net: mac802154: Fix a condition in the receive path
  ip: fix triggering of 'icmp redirect'
  wifi: mac80211: Don't finalize CSA in IBSS mode if state is disconnected
  driver core: Don't probe devices after bus_type.match() probe deferral
  usb: gadget: mass_storage: Fix cdrom data transfers on MAC-OS
  USB: core: Prevent nested device-reset calls
  s390: fix nospec table alignments
  s390/hugetlb: fix prepare_hugepage_range() check for 2 GB hugepages
  usb-storage: Add ignore-residue quirk for NXP PN7462AU
  USB: cdc-acm: Add Icom PMR F3400 support (0c26:0020)
  usb: dwc2: fix wrong order of phy_power_on and phy_init
  usb: typec: altmodes/displayport: correct pin assignment for UFP receptacles
  USB: serial: option: add support for Cinterion MV32-WA/WB RmNet mode
  USB: serial: option: add Quectel EM060K modem
  USB: serial: option: add support for OPPO R11 diag port
  USB: serial: cp210x: add Decagon UCA device id
  xhci: Add grace period after xHC start to prevent premature runtime suspend.
  thunderbolt: Use the actual buffer in tb_async_error()
  gpio: pca953x: Add mutex_lock for regcache sync in PM
  hwmon: (gpio-fan) Fix array out of bounds access
  clk: bcm: rpi: Fix error handling of raspberrypi_fw_get_rate
  Input: rk805-pwrkey - fix module autoloading
  clk: core: Fix runtime PM sequence in clk_core_unprepare()
  Revert "clk: core: Honor CLK_OPS_PARENT_ENABLE for clk gate ops"
  clk: core: Honor CLK_OPS_PARENT_ENABLE for clk gate ops
  drm/i915/reg: Fix spelling mistake "Unsupport" -> "Unsupported"
  usb: dwc3: qcom: fix use-after-free on runtime-PM wakeup
  binder: fix UAF of ref->proc caused by race condition
  USB: serial: ftdi_sio: add Omron CS1W-CIF31 device id
  misc: fastrpc: fix memory corruption on open
  misc: fastrpc: fix memory corruption on probe
  iio: adc: mcp3911: use correct formula for AD conversion
  Input: iforce - wake up after clearing IFORCE_XMIT_RUNNING flag
  tty: serial: lpuart: disable flow control while waiting for the transmit engine to complete
  vt: Clear selection before changing the font
  powerpc: align syscall table for ppc32
  staging: rtl8712: fix use after free bugs
  serial: fsl_lpuart: RS485 RTS polariy is inverse
  net/smc: Remove redundant refcount increase
  Revert "sch_cake: Return __NET_XMIT_STOLEN when consuming enqueued skb"
  tcp: annotate data-race around challenge_timestamp
  sch_cake: Return __NET_XMIT_STOLEN when consuming enqueued skb
  kcm: fix strp_init() order and cleanup
  ethernet: rocker: fix sleep in atomic context bug in neigh_timer_handler
  net: sched: tbf: don't call qdisc_put() while holding tree lock
  Revert "xhci: turn off port power in shutdown"
  wifi: cfg80211: debugfs: fix return type in ht40allow_map_read()
  ieee802154/adf7242: defer destroy_workqueue call
  iio: adc: mcp3911: make use of the sign bit
  platform/x86: pmc_atom: Fix SLP_TYPx bitfield mask
  drm/msm/dsi: Fix number of regulators for msm8996_dsi_cfg
  drm/msm/dsi: fix the inconsistent indenting
  net: dp83822: disable false carrier interrupt
  Revert "mm: kmemleak: take a full lowmem check in kmemleak_*_phys()"
  fs: only do a memory barrier for the first set_buffer_uptodate()
  net: mvpp2: debugfs: fix memory leak when using debugfs_lookup()
  wifi: iwlegacy: 4965: corrected fix for potential off-by-one overflow in il4965_rs_fill_link_cmd()
  efi: capsule-loader: Fix use-after-free in efi_capsule_write
  Linux 5.4.212
  net: neigh: don't call kfree_skb() under spin_lock_irqsave()
  net/af_packet: check len when min_header_len equals to 0
  io_uring: disable polling pollfree files
  kprobes: don't call disarm_kprobe() for disabled kprobes
  lib/vdso: Mark do_hres() and do_coarse() as __always_inline
  lib/vdso: Let do_coarse() return 0 to simplify the callsite
  btrfs: tree-checker: check for overlapping extent items
  netfilter: conntrack: NF_CONNTRACK_PROCFS should no longer default to y
  drm/amd/display: Fix pixel clock programming
  s390/hypfs: avoid error message under KVM
  neigh: fix possible DoS due to net iface start/stop loop
  drm/amd/display: clear optc underflow before turn off odm clock
  drm/amd/display: Avoid MPC infinite loop
  btrfs: unify lookup return value when dir entry is missing
  btrfs: do not pin logs too early during renames
  btrfs: introduce btrfs_lookup_match_dir
  mm/rmap: Fix anon_vma->degree ambiguity leading to double-reuse
  bpf: Don't redirect packets with invalid pkt_len
  ftrace: Fix NULL pointer dereference in is_ftrace_trampoline when ftrace is dead
  fbdev: fb_pm2fb: Avoid potential divide by zero error
  HID: hidraw: fix memory leak in hidraw_release()
  media: pvrusb2: fix memory leak in pvr_probe
  udmabuf: Set the DMA mask for the udmabuf device (v2)
  HID: steam: Prevent NULL pointer dereference in steam_{recv,send}_report
  Bluetooth: L2CAP: Fix build errors in some archs
  kbuild: Fix include path in scripts/Makefile.modpost
  x86/bugs: Add "unknown" reporting for MMIO Stale Data
  s390/mm: do not trigger write fault when vma does not allow VM_WRITE
  mm: Force TLB flush for PFNMAP mappings before unlink_file_vma()
  scsi: storvsc: Remove WQ_MEM_RECLAIM from storvsc_error_wq
  perf/x86/intel/uncore: Fix broken read_counter() for SNB IMC PMU
  md: call __md_stop_writes in md_stop
  mm/hugetlb: fix hugetlb not supporting softdirty tracking
  ACPI: processor: Remove freq Qos request for all CPUs
  s390: fix double free of GS and RI CBs on fork() failure
  asm-generic: sections: refactor memory_intersects
  loop: Check for overflow while configuring loop
  x86/unwind/orc: Unwind ftrace trampolines with correct ORC entry
  btrfs: check if root is readonly while setting security xattr
  btrfs: add info when mount fails due to stale replace target
  btrfs: replace: drop assert for suspended replace
  btrfs: fix silent failure when deleting root reference
  ixgbe: stop resetting SYSTIME in ixgbe_ptp_start_cyclecounter
  net: Fix a data-race around sysctl_somaxconn.
  net: Fix a data-race around netdev_budget_usecs.
  net: Fix a data-race around netdev_budget.
  net: Fix a data-race around sysctl_net_busy_read.
  net: Fix a data-race around sysctl_net_busy_poll.
  net: Fix a data-race around sysctl_tstamp_allow_data.
  ratelimit: Fix data-races in ___ratelimit().
  net: Fix data-races around netdev_tstamp_prequeue.
  net: Fix data-races around weight_p and dev_weight_[rt]x_bias.
  netfilter: nft_tunnel: restrict it to netdev family
  netfilter: nft_osf: restrict osf to ipv4, ipv6 and inet families
  netfilter: nft_payload: do not truncate csum_offset and csum_type
  netfilter: nft_payload: report ERANGE for too long offset and length
  bnxt_en: fix NQ resource accounting during vf creation on 57500 chips
  netfilter: ebtables: reject blobs that don't provide all entry points
  net: ipvtap - add __init/__exit annotations to module init/exit funcs
  bonding: 802.3ad: fix no transmission of LACPDUs
  net: moxa: get rid of asymmetry in DMA mapping/unmapping
  net/mlx5e: Properly disable vlan strip on non-UL reps
  rose: check NULL rose_loopback_neigh->loopback
  SUNRPC: RPC level errors should set task->tk_rpc_status
  af_key: Do not call xfrm_probe_algs in parallel
  xfrm: fix refcount leak in __xfrm_policy_check()
  kernel/sched: Remove dl_boosted flag comment
  sched/deadline: Fix priority inheritance with multiple scheduling classes
  sched/deadline: Fix stale throttling on de-/boosted tasks
  sched/deadline: Unthrottle PI boosted threads while enqueuing
  pinctrl: amd: Don't save/restore interrupt status and wake status bits
  Revert "selftests/bpf: Fix test_align verifier log patterns"
  Revert "selftests/bpf: Fix "dubious pointer arithmetic" test"
  usb: cdns3: Fix issue for clear halt endpoint
  kernel/sys_ni: add compat entry for fadvise64_64
  parisc: Fix exception handler for fldw and fstw instructions
  audit: fix potential double free on error path from fsnotify_add_inode_mark
  Revert "USB: HCD: Fix URB giveback issue in tasklet function"
  Linux 5.4.211
  btrfs: raid56: don't trust any cached sector in __raid56_parity_recover()
  btrfs: only write the sectors in the vertical stripe which has data stripes
  can: j1939: j1939_session_destroy(): fix memory leak of skbs
  can: j1939: j1939_sk_queue_activate_next_locked(): replace WARN_ON_ONCE with netdev_warn_once()
  tracing/probes: Have kprobes and uprobes use $COMM too
  MIPS: tlbex: Explicitly compare _PAGE_NO_EXEC against 0
  video: fbdev: i740fb: Check the argument of i740_calc_vclk()
  powerpc/64: Init jump labels before parse_early_param()
  smb3: check xattr value length earlier
  f2fs: fix to avoid use f2fs_bug_on() in f2fs_new_node_page()
  ALSA: timer: Use deferred fasync helper
  ALSA: core: Add async signal helpers
  powerpc/32: Don't always pass -mcpu=powerpc to the compiler
  watchdog: export lockup_detector_reconfigure
  RISC-V: Add fast call path of crash_kexec()
  riscv: mmap with PROT_WRITE but no PROT_READ is invalid
  mips: cavium-octeon: Fix missing of_node_put() in octeon2_usb_clocks_start
  vfio: Clear the caps->buf to NULL after free
  tty: serial: Fix refcount leak bug in ucc_uart.c
  lib/list_debug.c: Detect uninitialized lists
  ext4: avoid resizing to a partial cluster size
  ext4: avoid remove directory when directory is corrupted
  drivers:md:fix a potential use-after-free bug
  nvmet-tcp: fix lockdep complaint on nvmet_tcp_wq flush during queue teardown
  dmaengine: sprd: Cleanup in .remove() after pm_runtime_get_sync() failed
  selftests/kprobe: Do not test for GRP/ without event failures
  um: add "noreboot" command line option for PANIC_TIMEOUT=-1 setups
  PCI/ACPI: Guard ARM64-specific mcfg_quirks
  cxl: Fix a memory leak in an error handling path
  gadgetfs: ep_io - wait until IRQ finishes
  scsi: lpfc: Prevent buffer overflow crashes in debugfs with malformed user input
  clk: qcom: ipq8074: dont disable gcc_sleep_clk_src
  vboxguest: Do not use devm for irq
  usb: renesas: Fix refcount leak bug
  usb: host: ohci-ppc-of: Fix refcount leak bug
  drm/meson: Fix overflow implicit truncation warnings
  irqchip/tegra: Fix overflow implicit truncation warnings
  usb: gadget: uvc: call uvc uvcg_warn on completed status instead of uvcg_info
  usb: cdns3 fix use-after-free at workaround 2
  PCI: Add ACS quirk for Broadcom BCM5750x NICs
  drm/meson: Fix refcount bugs in meson_vpu_has_available_connectors()
  locking/atomic: Make test_and_*_bit() ordered on failure
  gcc-plugins: Undefine LATENT_ENTROPY_PLUGIN when plugin disabled for a file
  igb: Add lock to avoid data race
  fec: Fix timer capture timing in `fec_ptp_enable_pps()`
  i40e: Fix to stop tx_timeout recovery if GLOBR fails
  ice: Ignore EEXIST when setting promisc mode
  net: dsa: microchip: ksz9477: fix fdb_dump last invalid entry
  net: moxa: pass pdev instead of ndev to DMA functions
  net: dsa: mv88e6060: prevent crash on an unused port
  powerpc/pci: Fix get_phb_number() locking
  netfilter: nf_tables: really skip inactive sets when allocating name
  clk: rockchip: add sclk_mac_lbtest to rk3188_critical_clocks
  iavf: Fix adminq error handling
  nios2: add force_successful_syscall_return()
  nios2: restarts apply only to the first sigframe we build...
  nios2: fix syscall restart checks
  nios2: traced syscall does need to check the syscall number
  nios2: don't leave NULLs in sys_call_table[]
  nios2: page fault et.al. are *not* restartable syscalls...
  tee: add overflow check in register_shm_helper()
  dpaa2-eth: trace the allocated address instead of page struct
  atm: idt77252: fix use-after-free bugs caused by tst_timer
  xen/xenbus: fix return type in xenbus_file_read()
  nfp: ethtool: fix the display error of `ethtool -m DEVNAME`
  NTB: ntb_tool: uninitialized heap data in tool_fn_write()
  tools build: Switch to new openssl API for test-libcrypto
  tools/vm/slabinfo: use alphabetic order when two values are equal
  dt-bindings: arm: qcom: fix MSM8916 MTP compatibles
  vsock: Set socket state back to SS_UNCONNECTED in vsock_connect_timeout()
  vsock: Fix memory leak in vsock_connect()
  plip: avoid rcu debug splat
  geneve: do not use RT_TOS for IPv6 flowlabel
  ACPI: property: Return type of acpi_add_nondev_subnodes() should be bool
  pinctrl: sunxi: Add I/O bias setting for H6 R-PIO
  pinctrl: qcom: msm8916: Allow CAMSS GP clocks to be muxed
  pinctrl: nomadik: Fix refcount leak in nmk_pinctrl_dt_subnode_to_map
  net: bgmac: Fix a BUG triggered by wrong bytes_compl
  devlink: Fix use-after-free after a failed reload
  SUNRPC: Reinitialise the backchannel request buffers before reuse
  sunrpc: fix expiry of auth creds
  can: mcp251x: Fix race condition on receive interrupt
  NFSv4/pnfs: Fix a use-after-free bug in open
  NFSv4.1: RECLAIM_COMPLETE must handle EACCES
  NFSv4: Fix races in the legacy idmapper upcall
  NFSv4.1: Handle NFS4ERR_DELAY replies to OP_SEQUENCE correctly
  NFSv4.1: Don't decrease the value of seq_nr_highest_sent
  Documentation: ACPI: EINJ: Fix obsolete example
  apparmor: Fix memleak in aa_simple_write_to_buffer()
  apparmor: fix reference count leak in aa_pivotroot()
  apparmor: fix overlapping attachment computation
  apparmor: fix aa_label_asxprint return check
  apparmor: Fix failed mount permission check error message
  apparmor: fix absroot causing audited secids to begin with =
  apparmor: fix quiet_denied for file rules
  can: ems_usb: fix clang's -Wunaligned-access warning
  tracing: Have filter accept "common_cpu" to be consistent
  btrfs: fix lost error handling when looking up extended ref on log replay
  mmc: pxamci: Fix an error handling path in pxamci_probe()
  mmc: pxamci: Fix another error handling path in pxamci_probe()
  ata: libata-eh: Add missing command name
  rds: add missing barrier to release_refill
  ALSA: info: Fix llseek return value when using callback
  net_sched: cls_route: disallow handle of 0
  net/9p: Initialize the iounit field during fid creation
  Bluetooth: L2CAP: Fix l2cap_global_chan_by_psm regression
  Revert "net: usb: ax88179_178a needs FLAG_SEND_ZLP"
  scsi: sg: Allow waiting for commands to complete on removed device
  tcp: fix over estimation in sk_forced_mem_schedule()
  KVM: x86: Avoid theoretical NULL pointer dereference in kvm_irq_delivery_to_apic_fast()
  KVM: x86: Check lapic_in_kernel() before attempting to set a SynIC irq
  KVM: Add infrastructure and macro to mark VM as bugged
  btrfs: reject log replay if there is unsupported RO compat flag
  net_sched: cls_route: remove from list when handle is 0
  iommu/vt-d: avoid invalid memory access via node_online(NUMA_NO_NODE)
  firmware: arm_scpi: Ensure scpi_info is not assigned if the probe fails
  timekeeping: contribute wall clock to rng on time change
  ACPI: CPPC: Do not prevent CPPC from working in the future
  dm writecache: set a default MAX_WRITEBACK_JOBS
  dm thin: fix use-after-free crash in dm_sm_register_threshold_callback
  dm raid: fix address sanitizer warning in raid_status
  dm raid: fix address sanitizer warning in raid_resume
  intel_th: pci: Add Meteor Lake-P support
  intel_th: pci: Add Raptor Lake-S PCH support
  intel_th: pci: Add Raptor Lake-S CPU support
  ext4: correct the misjudgment in ext4_iget_extra_inode
  ext4: correct max_inline_xattr_value_size computing
  ext4: fix extent status tree race in writeback error recovery path
  ext4: update s_overhead_clusters in the superblock during an on-line resize
  ext4: fix use-after-free in ext4_xattr_set_entry
  ext4: make sure ext4_append() always allocates new block
  ext4: add EXT4_INODE_HAS_XATTR_SPACE macro in xattr.h
  btrfs: reset block group chunk force if we have to wait
  tpm: eventlog: Fix section mismatch for DEBUG_SECTION_MISMATCH
  kexec, KEYS, s390: Make use of built-in and secondary keyring for signature verification
  spmi: trace: fix stack-out-of-bound access in SPMI tracing functions
  x86/olpc: fix 'logical not is only applied to the left hand side'
  scsi: qla2xxx: Fix erroneous mailbox timeout after PCI error injection
  scsi: qla2xxx: Turn off multi-queue for 8G adapters
  scsi: qla2xxx: Fix discovery issues in FC-AL topology
  scsi: zfcp: Fix missing auto port scan and thus missing target ports
  video: fbdev: s3fb: Check the size of screen before memset_io()
  video: fbdev: arkfb: Check the size of screen before memset_io()
  video: fbdev: vt8623fb: Check the size of screen before memset_io()
  tools/thermal: Fix possible path truncations
  video: fbdev: arkfb: Fix a divide-by-zero bug in ark_set_pixclock()
  x86/numa: Use cpumask_available instead of hardcoded NULL check
  scripts/faddr2line: Fix vmlinux detection on arm64
  genelf: Use HAVE_LIBCRYPTO_SUPPORT, not the never defined HAVE_LIBCRYPTO
  powerpc/pci: Fix PHB numbering when using opal-phbid
  kprobes: Forbid probing on trampoline and BPF code areas
  perf symbol: Fail to read phdr workaround
  powerpc/cell/axon_msi: Fix refcount leak in setup_msi_msg_address
  powerpc/xive: Fix refcount leak in xive_get_max_prio
  powerpc/spufs: Fix refcount leak in spufs_init_isolated_loader
  powerpc/pci: Prefer PCI domain assignment via DT 'linux,pci-domain' and alias
  powerpc/32: Do not allow selection of e5500 or e6500 CPUs on PPC32
  video: fbdev: sis: fix typos in SiS_GetModeID()
  video: fbdev: amba-clcd: Fix refcount leak bugs
  watchdog: armada_37xx_wdt: check the return value of devm_ioremap() in armada_37xx_wdt_probe()
  ASoC: audio-graph-card: Add of_node_put() in fail path
  fuse: Remove the control interface for virtio-fs
  ASoC: qcom: q6dsp: Fix an off-by-one in q6adm_alloc_copp()
  s390/zcore: fix race when reading from hardware system area
  iommu/arm-smmu: qcom_iommu: Add of_node_put() when breaking out of loop
  mfd: max77620: Fix refcount leak in max77620_initialise_fps
  mfd: t7l66xb: Drop platform disable callback
  kfifo: fix kfifo_to_user() return type
  rpmsg: qcom_smd: Fix refcount leak in qcom_smd_parse_edge
  iommu/exynos: Handle failed IOMMU device registration properly
  tty: n_gsm: fix missing corner cases in gsmld_poll()
  tty: n_gsm: fix DM command
  tty: n_gsm: fix wrong T1 retry count handling
  vfio/ccw: Do not change FSM state in subchannel event
  remoteproc: qcom: wcnss: Fix handling of IRQs
  tty: n_gsm: fix race condition in gsmld_write()
  tty: n_gsm: fix packet re-transmission without open control channel
  tty: n_gsm: fix non flow control frames during mux flow off
  profiling: fix shift too large makes kernel panic
  ASoC: codecs: wcd9335: move gains from SX_TLV to S8_TLV
  ASoC: codecs: msm8916-wcd-digital: move gains from SX_TLV to S8_TLV
  serial: 8250_dw: Store LSR into lsr_saved_flags in dw8250_tx_wait_empty()
  ASoC: mediatek: mt8173-rt5650: Fix refcount leak in mt8173_rt5650_dev_probe
  ASoC: codecs: da7210: add check for i2c_add_driver
  ASoC: mt6797-mt6351: Fix refcount leak in mt6797_mt6351_dev_probe
  ASoC: mediatek: mt8173: Fix refcount leak in mt8173_rt5650_rt5676_dev_probe
  opp: Fix error check in dev_pm_opp_attach_genpd()
  jbd2: fix assertion 'jh->b_frozen_data == NULL' failure when journal aborted
  ext4: recover csum seed of tmp_inode after migrating to extents
  jbd2: fix outstanding credits assert in jbd2_journal_commit_transaction()
  null_blk: fix ida error handling in null_add_dev()
  RDMA/rxe: Fix error unwind in rxe_create_qp()
  mm/mmap.c: fix missing call to vm_unacct_memory in mmap_region
  platform/olpc: Fix uninitialized data in debugfs write
  USB: serial: fix tty-port initialized comments
  PCI: tegra194: Fix link up retry sequence
  PCI: tegra194: Fix Root Port interrupt handling
  HID: alps: Declare U1_UNICORN_LEGACY support
  mmc: cavium-thunderx: Add of_node_put() when breaking out of loop
  mmc: cavium-octeon: Add of_node_put() when breaking out of loop
  gpio: gpiolib-of: Fix refcount bugs in of_mm_gpiochip_add_data()
  RDMA/hfi1: fix potential memory leak in setup_base_ctxt()
  RDMA/siw: Fix duplicated reported IW_CM_EVENT_CONNECT_REPLY event
  RDMA/hns: Fix incorrect clearing of interrupt status register
  usb: gadget: udc: amd5536 depends on HAS_DMA
  scsi: smartpqi: Fix DMA direction for RAID requests
  mmc: sdhci-of-at91: fix set_uhs_signaling rewriting of MC1R
  memstick/ms_block: Fix a memory leak
  memstick/ms_block: Fix some incorrect memory allocation
  mmc: sdhci-of-esdhc: Fix refcount leak in esdhc_signal_voltage_switch
  staging: rtl8192u: Fix sleep in atomic context bug in dm_fsync_timer_callback
  intel_th: msu: Fix vmalloced buffers
  intel_th: msu-sink: Potential dereference of null pointer
  intel_th: Fix a resource leak in an error handling path
  soundwire: bus_type: fix remove and shutdown support
  clk: qcom: camcc-sdm845: Fix topology around titan_top power domain
  clk: qcom: ipq8074: set BRANCH_HALT_DELAY flag for UBI clocks
  clk: qcom: ipq8074: fix NSS port frequency tables
  usb: host: xhci: use snprintf() in xhci_decode_trb()
  clk: qcom: clk-krait: unlock spin after mux completion
  driver core: fix potential deadlock in __driver_attach
  misc: rtsx: Fix an error handling path in rtsx_pci_probe()
  clk: mediatek: reset: Fix written reset bit offset
  usb: xhci: tegra: Fix error check
  usb: ohci-nxp: Fix refcount leak in ohci_hcd_nxp_probe
  usb: host: Fix refcount leak in ehci_hcd_ppc_of_probe
  fpga: altera-pr-ip: fix unsigned comparison with less than zero
  mtd: st_spi_fsm: Add a clk_disable_unprepare() in .probe()'s error path
  mtd: partitions: Fix refcount leak in parse_redboot_of
  mtd: sm_ftl: Fix deadlock caused by cancel_work_sync in sm_release
  HID: cp2112: prevent a buffer overflow in cp2112_xfer()
  mtd: rawnand: meson: Fix a potential double free issue
  mtd: maps: Fix refcount leak in ap_flash_init
  mtd: maps: Fix refcount leak in of_flash_probe_versatile
  clk: renesas: r9a06g032: Fix UART clkgrp bitsel
  dccp: put dccp_qpolicy_full() and dccp_qpolicy_push() in the same lock
  net: rose: fix netdev reference changes
  netdevsim: Avoid allocation warnings triggered from user space
  iavf: Fix max_rate limiting
  crypto: inside-secure - Add missing MODULE_DEVICE_TABLE for of
  net/mlx5e: Fix the value of MLX5E_MAX_RQ_NUM_MTTS
  wifi: libertas: Fix possible refcount leak in if_usb_probe()
  wifi: iwlwifi: mvm: fix double list_add at iwl_mvm_mac_wake_tx_queue
  wifi: wil6210: debugfs: fix uninitialized variable use in `wil_write_file_wmi()`
  i2c: mux-gpmux: Add of_node_put() when breaking out of loop
  i2c: cadence: Support PEC for SMBus block read
  Bluetooth: hci_intel: Add check for platform_driver_register
  can: pch_can: pch_can_error(): initialize errc before using it
  can: error: specify the values of data[5..7] of CAN error frames
  can: usb_8dev: do not report txerr and rxerr during bus-off
  can: kvaser_usb_leaf: do not report txerr and rxerr during bus-off
  can: kvaser_usb_hydra: do not report txerr and rxerr during bus-off
  can: sun4i_can: do not report txerr and rxerr during bus-off
  can: hi311x: do not report txerr and rxerr during bus-off
  can: sja1000: do not report txerr and rxerr during bus-off
  can: rcar_can: do not report txerr and rxerr during bus-off
  can: pch_can: do not report txerr and rxerr during bus-off
  selftests/bpf: fix a test for snprintf() overflow
  wifi: p54: add missing parentheses in p54_flush()
  wifi: p54: Fix an error handling path in p54spi_probe()
  wifi: wil6210: debugfs: fix info leak in wil_write_file_wmi()
  fs: check FMODE_LSEEK to control internal pipe splicing
  selftests: timers: clocksource-switch: fix passing errors from child
  selftests: timers: valid-adjtimex: build fix for newer toolchains
  libbpf: Fix the name of a reused map
  tcp: make retransmitted SKB fit into the send window
  drm/exynos/exynos7_drm_decon: free resources when clk_set_parent() failed.
  mediatek: mt76: mac80211: Fix missing of_node_put() in mt76_led_init()
  media: platform: mtk-mdp: Fix mdp_ipi_comm structure alignment
  crypto: hisilicon - Kunpeng916 crypto driver don't sleep when in softirq
  drm/msm/mdp5: Fix global state lock backoff
  drm: bridge: sii8620: fix possible off-by-one
  drm/mediatek: dpi: Only enable dpi after the bridge is enabled
  drm/mediatek: dpi: Remove output format of YUV
  drm/rockchip: Fix an error handling path rockchip_dp_probe()
  drm/rockchip: vop: Don't crash for invalid duplicate_state()
  crypto: arm64/gcm - Select AEAD for GHASH_ARM64_CE
  drm/vc4: dsi: Correct DSI divider calculations
  drm/vc4: plane: Fix margin calculations for the right/bottom edges
  drm/vc4: plane: Remove subpixel positioning check
  media: hdpvr: fix error value returns in hdpvr_read
  drm/mcde: Fix refcount leak in mcde_dsi_bind
  drm: bridge: adv7511: Add check for mipi_dsi_driver_register
  wifi: iwlegacy: 4965: fix potential off-by-one overflow in il4965_rs_fill_link_cmd()
  ath9k: fix use-after-free in ath9k_hif_usb_rx_cb
  media: tw686x: Register the irq at the end of probe
  i2c: Fix a potential use after free
  drm: adv7511: override i2c address of cec before accessing it
  drm/mediatek: Add pull-down MIPI operation in mtk_dsi_poweroff function
  drm/radeon: fix potential buffer overflow in ni_set_mc_special_registers()
  drm/mipi-dbi: align max_chunk to 2 in spi_transfer
  wifi: rtlwifi: fix error codes in rtl_debugfs_set_write_h2c()
  ath10k: do not enforce interrupt trigger type
  dm: return early from dm_pr_call() if DM device is suspended
  thermal/tools/tmon: Include pthread and time headers in tmon.h
  nohz/full, sched/rt: Fix missed tick-reenabling bug in dequeue_task_rt()
  regulator: of: Fix refcount leak bug in of_get_regulation_constraints()
  blk-mq: don't create hctx debugfs dir until q->debugfs_dir is created
  erofs: avoid consecutive detection for Highmem memory
  arm64: dts: mt7622: fix BPI-R64 WPS button
  bus: hisi_lpc: fix missing platform_device_put() in hisi_lpc_acpi_probe()
  ARM: dts: qcom: pm8841: add required thermal-sensor-cells
  soc: qcom: aoss: Fix refcount leak in qmp_cooling_devices_register
  cpufreq: zynq: Fix refcount leak in zynq_get_revision
  ARM: OMAP2+: Fix refcount leak in omap3xxx_prm_late_init
  ARM: OMAP2+: Fix refcount leak in omapdss_init_of
  ARM: dts: qcom: mdm9615: add missing PMIC GPIO reg
  soc: fsl: guts: machine variable might be unset
  ARM: dts: ast2600-evb: fix board compatible
  ARM: dts: ast2500-evb: fix board compatible
  x86/pmem: Fix platform-device leak in error path
  ARM: bcm: Fix refcount leak in bcm_kona_smc_init
  meson-mx-socinfo: Fix refcount leak in meson_mx_socinfo_init
  ARM: findbit: fix overflowing offset
  spi: spi-rspi: Fix PIO fallback on RZ platforms
  selinux: Add boundary check in put_entry()
  PM: hibernate: defer device probing when resuming from hibernation
  ARM: shmobile: rcar-gen2: Increase refcount for new reference
  arm64: dts: allwinner: a64: orangepi-win: Fix LED node name
  arm64: dts: qcom: ipq8074: fix NAND node name
  ACPI: LPSS: Fix missing check in register_device_clock()
  ACPI: PM: save NVS memory for Lenovo G40-45
  ACPI: EC: Remove duplicate ThinkPad X1 Carbon 6th entry from DMI quirks
  ARM: OMAP2+: display: Fix refcount leak bug
  spi: synquacer: Add missing clk_disable_unprepare()
  ARM: dts: imx6ul: fix qspi node compatible
  ARM: dts: imx6ul: fix lcdif node compatible
  ARM: dts: imx6ul: fix csi node compatible
  ARM: dts: imx6ul: change operating-points to uint32-matrix
  ARM: dts: imx6ul: add missing properties for sram
  wait: Fix __wait_event_hrtimeout for RT/DL tasks
  genirq: Don't return error on missing optional irq_request_resources()
  ext2: Add more validity checks for inode counts
  arm64: fix oops in concurrently setting insn_emulation sysctls
  arm64: Do not forget syscall when starting a new thread.
  x86: Handle idle=nomwait cmdline properly for x86_idle
  epoll: autoremove wakers even more aggressively
  netfilter: nf_tables: fix null deref due to zeroed list head
  netfilter: nf_tables: do not allow RULE_ID to refer to another chain
  netfilter: nf_tables: do not allow SET_ID to refer to another table
  arm64: dts: uniphier: Fix USB interrupts for PXs3 SoC
  ARM: dts: uniphier: Fix USB interrupts for PXs2 SoC
  USB: HCD: Fix URB giveback issue in tasklet function
  coresight: Clear the connection field properly
  MIPS: cpuinfo: Fix a warning for CONFIG_CPUMASK_OFFSTACK
  powerpc/powernv: Avoid crashing if rng is NULL
  powerpc/ptdump: Fix display of RW pages on FSL_BOOK3E
  powerpc/fsl-pci: Fix Class Code of PCIe Root Port
  PCI: Add defines for normal and subtractive PCI bridges
  ia64, processor: fix -Wincompatible-pointer-types in ia64_get_irr()
  md-raid10: fix KASAN warning
  serial: mvebu-uart: uart2 error bits clearing
  fuse: limit nsec
  iio: light: isl29028: Fix the warning in isl29028_remove()
  drm/amdgpu: Check BO's requested pinning domains against its preferred_domains
  drm/nouveau: fix another off-by-one in nvbios_addr
  drm/gem: Properly annotate WW context on drm_gem_lock_reservations() error
  parisc: io_pgetevents_time64() needs compat syscall in 32-bit compat mode
  parisc: Fix device names in /proc/iomem
  ovl: drop WARN_ON() dentry is NULL in ovl_encode_fh()
  usbnet: Fix linkwatch use-after-free on disconnect
  fbcon: Fix boundary checks for fbcon=vc:n1-n2 parameters
  thermal: sysfs: Fix cooling_device_stats_setup() error code path
  fs: Add missing umask strip in vfs_tmpfile
  vfs: Check the truncate maximum size in inode_newsize_ok()
  tty: vt: initialize unicode screen buffer
  ALSA: hda/realtek: Add quirk for another Asus K42JZ model
  ALSA: hda/cirrus - support for iMac 12,1 model
  ALSA: hda/conexant: Add quirk for LENOVO 20149 Notebook model
  mm/mremap: hold the rmap lock in write mode when moving page table entries.
  KVM: x86: Set error code to segment selector on LLDT/LTR non-canonical #GP
  KVM: x86: Mark TSS busy during LTR emulation _after_ all fault checks
  KVM: nVMX: Let userspace set nVMX MSR to any _host_ supported value
  KVM: SVM: Don't BUG if userspace injects an interrupt with GIF=0
  KVM: nVMX: Snapshot pre-VM-Enter DEBUGCTL for !nested_run_pending case
  KVM: nVMX: Snapshot pre-VM-Enter BNDCFGS for !nested_run_pending case
  HID: wacom: Don't register pad_input for touch switch
  HID: wacom: Only report rotation for art pen
  add barriers to buffer_uptodate and set_buffer_uptodate
  wifi: mac80211_hwsim: use 32-bit skb cookie
  wifi: mac80211_hwsim: add back erroneously removed cast
  wifi: mac80211_hwsim: fix race condition in pending packet
  igc: Remove _I_PHY_ID checking
  ALSA: bcd2000: Fix a UAF bug on the error path of probing
  scsi: Revert "scsi: qla2xxx: Fix disk failure to rediscover"
  x86: link vdso and boot with -z noexecstack --no-warn-rwx-segments
  Makefile: link with -z noexecstack --no-warn-rwx-segments

 Conflicts:
	Documentation/devicetree/bindings
	Documentation/devicetree/bindings/arm/qcom.yaml
	Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt
	drivers/mmc/core/sd.c
	drivers/net/usb/ax88179_178a.c
	drivers/rpmsg/qcom_glink_native.c
	drivers/usb/dwc3/core.c
	drivers/usb/typec/ucsi/ucsi.c
	net/core/dev.c
	net/wireless/scan.c

Change-Id: Id1996866ef5d9b7c097c39a5bdb00db413763104
Signed-off-by: Srinivasarao Pathipati <quic_c_spathi@quicinc.com>
2023-01-27 00:57:29 +05:30

3906 lines
104 KiB
C

// SPDX-License-Identifier: GPL-2.0-only
/*
* mm/mmap.c
*
* Written by obz.
*
* Address space accounting code <alan@lxorguk.ukuu.org.uk>
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/backing-dev.h>
#include <linux/mm.h>
#include <linux/vmacache.h>
#include <linux/shm.h>
#include <linux/mman.h>
#include <linux/pagemap.h>
#include <linux/swap.h>
#include <linux/syscalls.h>
#include <linux/capability.h>
#include <linux/init.h>
#include <linux/file.h>
#include <linux/fs.h>
#include <linux/personality.h>
#include <linux/security.h>
#include <linux/hugetlb.h>
#include <linux/shmem_fs.h>
#include <linux/profile.h>
#include <linux/export.h>
#include <linux/mount.h>
#include <linux/mempolicy.h>
#include <linux/rmap.h>
#include <linux/mmu_notifier.h>
#include <linux/mmdebug.h>
#include <linux/perf_event.h>
#include <linux/audit.h>
#include <linux/khugepaged.h>
#include <linux/uprobes.h>
#include <linux/rbtree_augmented.h>
#include <linux/notifier.h>
#include <linux/memory.h>
#include <linux/printk.h>
#include <linux/userfaultfd_k.h>
#include <linux/moduleparam.h>
#include <linux/pkeys.h>
#include <linux/oom.h>
#include <linux/sched/mm.h>
#include <linux/uaccess.h>
#include <asm/cacheflush.h>
#include <asm/tlb.h>
#include <asm/mmu_context.h>
#include "internal.h"
#ifndef arch_mmap_check
#define arch_mmap_check(addr, len, flags) (0)
#endif
#ifdef CONFIG_HAVE_ARCH_MMAP_RND_BITS
const int mmap_rnd_bits_min = CONFIG_ARCH_MMAP_RND_BITS_MIN;
const int mmap_rnd_bits_max = CONFIG_ARCH_MMAP_RND_BITS_MAX;
int mmap_rnd_bits __read_mostly = CONFIG_ARCH_MMAP_RND_BITS;
#endif
#ifdef CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS
const int mmap_rnd_compat_bits_min = CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MIN;
const int mmap_rnd_compat_bits_max = CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MAX;
int mmap_rnd_compat_bits __read_mostly = CONFIG_ARCH_MMAP_RND_COMPAT_BITS;
#endif
static bool ignore_rlimit_data;
core_param(ignore_rlimit_data, ignore_rlimit_data, bool, 0644);
static void unmap_region(struct mm_struct *mm,
struct vm_area_struct *vma, struct vm_area_struct *prev,
unsigned long start, unsigned long end);
/* description of effects of mapping type and prot in current implementation.
* this is due to the limited x86 page protection hardware. The expected
* behavior is in parens:
*
* map_type prot
* PROT_NONE PROT_READ PROT_WRITE PROT_EXEC
* MAP_SHARED r: (no) no r: (yes) yes r: (no) yes r: (no) yes
* w: (no) no w: (no) no w: (yes) yes w: (no) no
* x: (no) no x: (no) yes x: (no) yes x: (yes) yes
*
* MAP_PRIVATE r: (no) no r: (yes) yes r: (no) yes r: (no) yes
* w: (no) no w: (no) no w: (copy) copy w: (no) no
* x: (no) no x: (no) yes x: (no) yes x: (yes) yes
*/
pgprot_t protection_map[16] __ro_after_init = {
__P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
__S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
};
#ifndef CONFIG_ARCH_HAS_FILTER_PGPROT
static inline pgprot_t arch_filter_pgprot(pgprot_t prot)
{
return prot;
}
#endif
pgprot_t vm_get_page_prot(unsigned long vm_flags)
{
pgprot_t ret = __pgprot(pgprot_val(protection_map[vm_flags &
(VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
pgprot_val(arch_vm_get_page_prot(vm_flags)));
return arch_filter_pgprot(ret);
}
EXPORT_SYMBOL(vm_get_page_prot);
static pgprot_t vm_pgprot_modify(pgprot_t oldprot, unsigned long vm_flags)
{
return pgprot_modify(oldprot, vm_get_page_prot(vm_flags));
}
/* Update vma->vm_page_prot to reflect vma->vm_flags. */
void vma_set_page_prot(struct vm_area_struct *vma)
{
unsigned long vm_flags = vma->vm_flags;
pgprot_t vm_page_prot;
vm_page_prot = vm_pgprot_modify(vma->vm_page_prot, vm_flags);
if (vma_wants_writenotify(vma, vm_page_prot)) {
vm_flags &= ~VM_SHARED;
vm_page_prot = vm_pgprot_modify(vm_page_prot, vm_flags);
}
/* remove_protection_ptes reads vma->vm_page_prot without mmap_sem */
WRITE_ONCE(vma->vm_page_prot, vm_page_prot);
}
/*
* Requires inode->i_mapping->i_mmap_rwsem
*/
static void __remove_shared_vm_struct(struct vm_area_struct *vma,
struct file *file, struct address_space *mapping)
{
if (vma->vm_flags & VM_DENYWRITE)
atomic_inc(&file_inode(file)->i_writecount);
if (vma->vm_flags & VM_SHARED)
mapping_unmap_writable(mapping);
flush_dcache_mmap_lock(mapping);
vma_interval_tree_remove(vma, &mapping->i_mmap);
flush_dcache_mmap_unlock(mapping);
}
/*
* Unlink a file-based vm structure from its interval tree, to hide
* vma from rmap and vmtruncate before freeing its page tables.
*/
void unlink_file_vma(struct vm_area_struct *vma)
{
struct file *file = vma->vm_file;
if (file) {
struct address_space *mapping = file->f_mapping;
i_mmap_lock_write(mapping);
__remove_shared_vm_struct(vma, file, mapping);
i_mmap_unlock_write(mapping);
}
}
static void __free_vma(struct vm_area_struct *vma)
{
if (vma->vm_file)
fput(vma->vm_file);
mpol_put(vma_policy(vma));
vm_area_free(vma);
}
#ifdef CONFIG_SPECULATIVE_PAGE_FAULT
void put_vma(struct vm_area_struct *vma)
{
if (atomic_dec_and_test(&vma->vm_ref_count))
__free_vma(vma);
}
#else
static inline void put_vma(struct vm_area_struct *vma)
{
__free_vma(vma);
}
#endif
/*
* Close a vm structure and free it, returning the next.
*/
static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)
{
struct vm_area_struct *next = vma->vm_next;
might_sleep();
if (vma->vm_ops && vma->vm_ops->close)
vma->vm_ops->close(vma);
put_vma(vma);
return next;
}
static int do_brk_flags(unsigned long addr, unsigned long request, unsigned long flags,
struct list_head *uf);
SYSCALL_DEFINE1(brk, unsigned long, brk)
{
unsigned long retval;
unsigned long newbrk, oldbrk, origbrk;
struct mm_struct *mm = current->mm;
struct vm_area_struct *next;
unsigned long min_brk;
bool populate;
bool downgraded = false;
LIST_HEAD(uf);
if (down_write_killable(&mm->mmap_sem))
return -EINTR;
origbrk = mm->brk;
#ifdef CONFIG_COMPAT_BRK
/*
* CONFIG_COMPAT_BRK can still be overridden by setting
* randomize_va_space to 2, which will still cause mm->start_brk
* to be arbitrarily shifted
*/
if (current->brk_randomized)
min_brk = mm->start_brk;
else
min_brk = mm->end_data;
#else
min_brk = mm->start_brk;
#endif
if (brk < min_brk)
goto out;
/*
* Check against rlimit here. If this check is done later after the test
* of oldbrk with newbrk then it can escape the test and let the data
* segment grow beyond its set limit the in case where the limit is
* not page aligned -Ram Gupta
*/
if (check_data_rlimit(rlimit(RLIMIT_DATA), brk, mm->start_brk,
mm->end_data, mm->start_data))
goto out;
newbrk = PAGE_ALIGN(brk);
oldbrk = PAGE_ALIGN(mm->brk);
if (oldbrk == newbrk) {
mm->brk = brk;
goto success;
}
/*
* Always allow shrinking brk.
* __do_munmap() may downgrade mmap_sem to read.
*/
if (brk <= mm->brk) {
int ret;
/*
* mm->brk must to be protected by write mmap_sem so update it
* before downgrading mmap_sem. When __do_munmap() fails,
* mm->brk will be restored from origbrk.
*/
mm->brk = brk;
ret = __do_munmap(mm, newbrk, oldbrk-newbrk, &uf, true);
if (ret < 0) {
mm->brk = origbrk;
goto out;
} else if (ret == 1) {
downgraded = true;
}
goto success;
}
/* Check against existing mmap mappings. */
next = find_vma(mm, oldbrk);
if (next && newbrk + PAGE_SIZE > vm_start_gap(next))
goto out;
/* Ok, looks good - let it rip. */
if (do_brk_flags(oldbrk, newbrk-oldbrk, 0, &uf) < 0)
goto out;
mm->brk = brk;
success:
populate = newbrk > oldbrk && (mm->def_flags & VM_LOCKED) != 0;
if (downgraded)
up_read(&mm->mmap_sem);
else
up_write(&mm->mmap_sem);
userfaultfd_unmap_complete(mm, &uf);
if (populate)
mm_populate(oldbrk, newbrk - oldbrk);
return brk;
out:
retval = origbrk;
up_write(&mm->mmap_sem);
return retval;
}
static inline unsigned long vma_compute_gap(struct vm_area_struct *vma)
{
unsigned long gap, prev_end;
/*
* Note: in the rare case of a VM_GROWSDOWN above a VM_GROWSUP, we
* allow two stack_guard_gaps between them here, and when choosing
* an unmapped area; whereas when expanding we only require one.
* That's a little inconsistent, but keeps the code here simpler.
*/
gap = vm_start_gap(vma);
if (vma->vm_prev) {
prev_end = vm_end_gap(vma->vm_prev);
if (gap > prev_end)
gap -= prev_end;
else
gap = 0;
}
return gap;
}
#ifdef CONFIG_DEBUG_VM_RB
static unsigned long vma_compute_subtree_gap(struct vm_area_struct *vma)
{
unsigned long max = vma_compute_gap(vma), subtree_gap;
if (vma->vm_rb.rb_left) {
subtree_gap = rb_entry(vma->vm_rb.rb_left,
struct vm_area_struct, vm_rb)->rb_subtree_gap;
if (subtree_gap > max)
max = subtree_gap;
}
if (vma->vm_rb.rb_right) {
subtree_gap = rb_entry(vma->vm_rb.rb_right,
struct vm_area_struct, vm_rb)->rb_subtree_gap;
if (subtree_gap > max)
max = subtree_gap;
}
return max;
}
static int browse_rb(struct mm_struct *mm)
{
struct rb_root *root = &mm->mm_rb;
int i = 0, j, bug = 0;
struct rb_node *nd, *pn = NULL;
unsigned long prev = 0, pend = 0;
for (nd = rb_first(root); nd; nd = rb_next(nd)) {
struct vm_area_struct *vma;
vma = rb_entry(nd, struct vm_area_struct, vm_rb);
if (vma->vm_start < prev) {
pr_emerg("vm_start %lx < prev %lx\n",
vma->vm_start, prev);
bug = 1;
}
if (vma->vm_start < pend) {
pr_emerg("vm_start %lx < pend %lx\n",
vma->vm_start, pend);
bug = 1;
}
if (vma->vm_start > vma->vm_end) {
pr_emerg("vm_start %lx > vm_end %lx\n",
vma->vm_start, vma->vm_end);
bug = 1;
}
spin_lock(&mm->page_table_lock);
if (vma->rb_subtree_gap != vma_compute_subtree_gap(vma)) {
pr_emerg("free gap %lx, correct %lx\n",
vma->rb_subtree_gap,
vma_compute_subtree_gap(vma));
bug = 1;
}
spin_unlock(&mm->page_table_lock);
i++;
pn = nd;
prev = vma->vm_start;
pend = vma->vm_end;
}
j = 0;
for (nd = pn; nd; nd = rb_prev(nd))
j++;
if (i != j) {
pr_emerg("backwards %d, forwards %d\n", j, i);
bug = 1;
}
return bug ? -1 : i;
}
static void validate_mm_rb(struct rb_root *root, struct vm_area_struct *ignore)
{
struct rb_node *nd;
for (nd = rb_first(root); nd; nd = rb_next(nd)) {
struct vm_area_struct *vma;
vma = rb_entry(nd, struct vm_area_struct, vm_rb);
VM_BUG_ON_VMA(vma != ignore &&
vma->rb_subtree_gap != vma_compute_subtree_gap(vma),
vma);
}
}
static void validate_mm(struct mm_struct *mm)
{
int bug = 0;
int i = 0;
unsigned long highest_address = 0;
struct vm_area_struct *vma = mm->mmap;
while (vma) {
struct anon_vma *anon_vma = vma->anon_vma;
struct anon_vma_chain *avc;
if (anon_vma) {
anon_vma_lock_read(anon_vma);
list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
anon_vma_interval_tree_verify(avc);
anon_vma_unlock_read(anon_vma);
}
highest_address = vm_end_gap(vma);
vma = vma->vm_next;
i++;
}
if (i != mm->map_count) {
pr_emerg("map_count %d vm_next %d\n", mm->map_count, i);
bug = 1;
}
if (highest_address != mm->highest_vm_end) {
pr_emerg("mm->highest_vm_end %lx, found %lx\n",
mm->highest_vm_end, highest_address);
bug = 1;
}
i = browse_rb(mm);
if (i != mm->map_count) {
if (i != -1)
pr_emerg("map_count %d rb %d\n", mm->map_count, i);
bug = 1;
}
VM_BUG_ON_MM(bug, mm);
}
#else
#define validate_mm_rb(root, ignore) do { } while (0)
#define validate_mm(mm) do { } while (0)
#endif
RB_DECLARE_CALLBACKS_MAX(static, vma_gap_callbacks,
struct vm_area_struct, vm_rb,
unsigned long, rb_subtree_gap, vma_compute_gap)
#ifdef CONFIG_SPECULATIVE_PAGE_FAULT
#define mm_rb_write_lock(mm) write_lock(&(mm)->mm_rb_lock)
#define mm_rb_write_unlock(mm) write_unlock(&(mm)->mm_rb_lock)
#else
#define mm_rb_write_lock(mm) do { } while (0)
#define mm_rb_write_unlock(mm) do { } while (0)
#endif /* CONFIG_SPECULATIVE_PAGE_FAULT */
/*
* Update augmented rbtree rb_subtree_gap values after vma->vm_start or
* vma->vm_prev->vm_end values changed, without modifying the vma's position
* in the rbtree.
*/
static void vma_gap_update(struct vm_area_struct *vma)
{
/*
* As it turns out, RB_DECLARE_CALLBACKS_MAX() already created
* a callback function that does exactly what we want.
*/
vma_gap_callbacks_propagate(&vma->vm_rb, NULL);
}
static inline void vma_rb_insert(struct vm_area_struct *vma,
struct mm_struct *mm)
{
struct rb_root *root = &mm->mm_rb;
/* All rb_subtree_gap values must be consistent prior to insertion */
validate_mm_rb(root, NULL);
rb_insert_augmented(&vma->vm_rb, root, &vma_gap_callbacks);
}
static void __vma_rb_erase(struct vm_area_struct *vma, struct mm_struct *mm)
{
struct rb_root *root = &mm->mm_rb;
/*
* Note rb_erase_augmented is a fairly large inline function,
* so make sure we instantiate it only once with our desired
* augmented rbtree callbacks.
*/
mm_rb_write_lock(mm);
rb_erase_augmented(&vma->vm_rb, root, &vma_gap_callbacks);
mm_rb_write_unlock(mm); /* wmb */
/*
* Ensure the removal is complete before clearing the node.
* Matched by vma_has_changed()/handle_speculative_fault().
*/
RB_CLEAR_NODE(&vma->vm_rb);
}
static __always_inline void vma_rb_erase_ignore(struct vm_area_struct *vma,
struct mm_struct *mm,
struct vm_area_struct *ignore)
{
/*
* All rb_subtree_gap values must be consistent prior to erase,
* with the possible exception of the "next" vma being erased if
* next->vm_start was reduced.
*/
validate_mm_rb(&mm->mm_rb, ignore);
__vma_rb_erase(vma, mm);
}
static __always_inline void vma_rb_erase(struct vm_area_struct *vma,
struct mm_struct *mm)
{
/*
* All rb_subtree_gap values must be consistent prior to erase,
* with the possible exception of the vma being erased.
*/
validate_mm_rb(&mm->mm_rb, vma);
__vma_rb_erase(vma, mm);
}
/*
* vma has some anon_vma assigned, and is already inserted on that
* anon_vma's interval trees.
*
* Before updating the vma's vm_start / vm_end / vm_pgoff fields, the
* vma must be removed from the anon_vma's interval trees using
* anon_vma_interval_tree_pre_update_vma().
*
* After the update, the vma will be reinserted using
* anon_vma_interval_tree_post_update_vma().
*
* The entire update must be protected by exclusive mmap_sem and by
* the root anon_vma's mutex.
*/
static inline void
anon_vma_interval_tree_pre_update_vma(struct vm_area_struct *vma)
{
struct anon_vma_chain *avc;
list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
anon_vma_interval_tree_remove(avc, &avc->anon_vma->rb_root);
}
static inline void
anon_vma_interval_tree_post_update_vma(struct vm_area_struct *vma)
{
struct anon_vma_chain *avc;
list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
anon_vma_interval_tree_insert(avc, &avc->anon_vma->rb_root);
}
static int find_vma_links(struct mm_struct *mm, unsigned long addr,
unsigned long end, struct vm_area_struct **pprev,
struct rb_node ***rb_link, struct rb_node **rb_parent)
{
struct rb_node **__rb_link, *__rb_parent, *rb_prev;
__rb_link = &mm->mm_rb.rb_node;
rb_prev = __rb_parent = NULL;
while (*__rb_link) {
struct vm_area_struct *vma_tmp;
__rb_parent = *__rb_link;
vma_tmp = rb_entry(__rb_parent, struct vm_area_struct, vm_rb);
if (vma_tmp->vm_end > addr) {
/* Fail if an existing vma overlaps the area */
if (vma_tmp->vm_start < end)
return -ENOMEM;
__rb_link = &__rb_parent->rb_left;
} else {
rb_prev = __rb_parent;
__rb_link = &__rb_parent->rb_right;
}
}
*pprev = NULL;
if (rb_prev)
*pprev = rb_entry(rb_prev, struct vm_area_struct, vm_rb);
*rb_link = __rb_link;
*rb_parent = __rb_parent;
return 0;
}
static unsigned long count_vma_pages_range(struct mm_struct *mm,
unsigned long addr, unsigned long end)
{
unsigned long nr_pages = 0;
struct vm_area_struct *vma;
/* Find first overlaping mapping */
vma = find_vma_intersection(mm, addr, end);
if (!vma)
return 0;
nr_pages = (min(end, vma->vm_end) -
max(addr, vma->vm_start)) >> PAGE_SHIFT;
/* Iterate over the rest of the overlaps */
for (vma = vma->vm_next; vma; vma = vma->vm_next) {
unsigned long overlap_len;
if (vma->vm_start > end)
break;
overlap_len = min(end, vma->vm_end) - vma->vm_start;
nr_pages += overlap_len >> PAGE_SHIFT;
}
return nr_pages;
}
void __vma_link_rb(struct mm_struct *mm, struct vm_area_struct *vma,
struct rb_node **rb_link, struct rb_node *rb_parent)
{
/* Update tracking information for the gap following the new vma. */
if (vma->vm_next)
vma_gap_update(vma->vm_next);
else
mm->highest_vm_end = vm_end_gap(vma);
/*
* vma->vm_prev wasn't known when we followed the rbtree to find the
* correct insertion point for that vma. As a result, we could not
* update the vma vm_rb parents rb_subtree_gap values on the way down.
* So, we first insert the vma with a zero rb_subtree_gap value
* (to be consistent with what we did on the way down), and then
* immediately update the gap to the correct value. Finally we
* rebalance the rbtree after all augmented values have been set.
*/
mm_rb_write_lock(mm);
rb_link_node(&vma->vm_rb, rb_parent, rb_link);
vma->rb_subtree_gap = 0;
vma_gap_update(vma);
vma_rb_insert(vma, mm);
mm_rb_write_unlock(mm);
}
static void __vma_link_file(struct vm_area_struct *vma)
{
struct file *file;
file = vma->vm_file;
if (file) {
struct address_space *mapping = file->f_mapping;
if (vma->vm_flags & VM_DENYWRITE)
atomic_dec(&file_inode(file)->i_writecount);
if (vma->vm_flags & VM_SHARED)
atomic_inc(&mapping->i_mmap_writable);
flush_dcache_mmap_lock(mapping);
vma_interval_tree_insert(vma, &mapping->i_mmap);
flush_dcache_mmap_unlock(mapping);
}
}
static void
__vma_link(struct mm_struct *mm, struct vm_area_struct *vma,
struct vm_area_struct *prev, struct rb_node **rb_link,
struct rb_node *rb_parent)
{
__vma_link_list(mm, vma, prev, rb_parent);
__vma_link_rb(mm, vma, rb_link, rb_parent);
}
static void vma_link(struct mm_struct *mm, struct vm_area_struct *vma,
struct vm_area_struct *prev, struct rb_node **rb_link,
struct rb_node *rb_parent)
{
struct address_space *mapping = NULL;
if (vma->vm_file) {
mapping = vma->vm_file->f_mapping;
i_mmap_lock_write(mapping);
}
__vma_link(mm, vma, prev, rb_link, rb_parent);
__vma_link_file(vma);
if (mapping)
i_mmap_unlock_write(mapping);
mm->map_count++;
validate_mm(mm);
}
/*
* Helper for vma_adjust() in the split_vma insert case: insert a vma into the
* mm's list and rbtree. It has already been inserted into the interval tree.
*/
static void __insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
{
struct vm_area_struct *prev;
struct rb_node **rb_link, *rb_parent;
if (find_vma_links(mm, vma->vm_start, vma->vm_end,
&prev, &rb_link, &rb_parent))
BUG();
__vma_link(mm, vma, prev, rb_link, rb_parent);
mm->map_count++;
}
static __always_inline void __vma_unlink_common(struct mm_struct *mm,
struct vm_area_struct *vma,
struct vm_area_struct *prev,
bool has_prev,
struct vm_area_struct *ignore)
{
struct vm_area_struct *next;
vma_rb_erase_ignore(vma, mm, ignore);
next = vma->vm_next;
if (has_prev)
prev->vm_next = next;
else {
prev = vma->vm_prev;
if (prev)
prev->vm_next = next;
else
mm->mmap = next;
}
if (next)
next->vm_prev = prev;
/* Kill the cache */
vmacache_invalidate(mm);
}
static inline void __vma_unlink_prev(struct mm_struct *mm,
struct vm_area_struct *vma,
struct vm_area_struct *prev)
{
__vma_unlink_common(mm, vma, prev, true, vma);
}
/*
* We cannot adjust vm_start, vm_end, vm_pgoff fields of a vma that
* is already present in an i_mmap tree without adjusting the tree.
* The following helper function should be used when such adjustments
* are necessary. The "insert" vma (if any) is to be inserted
* before we drop the necessary locks.
*/
int __vma_adjust(struct vm_area_struct *vma, unsigned long start,
unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert,
struct vm_area_struct *expand, bool keep_locked)
{
struct mm_struct *mm = vma->vm_mm;
struct vm_area_struct *next = vma->vm_next, *orig_vma = vma;
struct address_space *mapping = NULL;
struct rb_root_cached *root = NULL;
struct anon_vma *anon_vma = NULL;
struct file *file = vma->vm_file;
bool start_changed = false, end_changed = false;
long adjust_next = 0;
int remove_next = 0;
vm_write_begin(vma);
if (next)
vm_write_begin(next);
if (next && !insert) {
struct vm_area_struct *exporter = NULL, *importer = NULL;
if (end >= next->vm_end) {
/*
* vma expands, overlapping all the next, and
* perhaps the one after too (mprotect case 6).
* The only other cases that gets here are
* case 1, case 7 and case 8.
*/
if (next == expand) {
/*
* The only case where we don't expand "vma"
* and we expand "next" instead is case 8.
*/
VM_WARN_ON(end != next->vm_end);
/*
* remove_next == 3 means we're
* removing "vma" and that to do so we
* swapped "vma" and "next".
*/
remove_next = 3;
VM_WARN_ON(file != next->vm_file);
swap(vma, next);
} else {
VM_WARN_ON(expand != vma);
/*
* case 1, 6, 7, remove_next == 2 is case 6,
* remove_next == 1 is case 1 or 7.
*/
remove_next = 1 + (end > next->vm_end);
VM_WARN_ON(remove_next == 2 &&
end != next->vm_next->vm_end);
VM_WARN_ON(remove_next == 1 &&
end != next->vm_end);
/* trim end to next, for case 6 first pass */
end = next->vm_end;
}
exporter = next;
importer = vma;
/*
* If next doesn't have anon_vma, import from vma after
* next, if the vma overlaps with it.
*/
if (remove_next == 2 && !next->anon_vma)
exporter = next->vm_next;
} else if (end > next->vm_start) {
/*
* vma expands, overlapping part of the next:
* mprotect case 5 shifting the boundary up.
*/
adjust_next = (end - next->vm_start) >> PAGE_SHIFT;
exporter = next;
importer = vma;
VM_WARN_ON(expand != importer);
} else if (end < vma->vm_end) {
/*
* vma shrinks, and !insert tells it's not
* split_vma inserting another: so it must be
* mprotect case 4 shifting the boundary down.
*/
adjust_next = -((vma->vm_end - end) >> PAGE_SHIFT);
exporter = vma;
importer = next;
VM_WARN_ON(expand != importer);
}
/*
* Easily overlooked: when mprotect shifts the boundary,
* make sure the expanding vma has anon_vma set if the
* shrinking vma had, to cover any anon pages imported.
*/
if (exporter && exporter->anon_vma && !importer->anon_vma) {
int error;
importer->anon_vma = exporter->anon_vma;
error = anon_vma_clone(importer, exporter);
if (error) {
if (next && next != vma)
vm_write_end(next);
vm_write_end(vma);
return error;
}
}
}
again:
vma_adjust_trans_huge(orig_vma, start, end, adjust_next);
if (file) {
mapping = file->f_mapping;
root = &mapping->i_mmap;
uprobe_munmap(vma, vma->vm_start, vma->vm_end);
if (adjust_next)
uprobe_munmap(next, next->vm_start, next->vm_end);
i_mmap_lock_write(mapping);
if (insert) {
/*
* Put into interval tree now, so instantiated pages
* are visible to arm/parisc __flush_dcache_page
* throughout; but we cannot insert into address
* space until vma start or end is updated.
*/
__vma_link_file(insert);
}
}
anon_vma = vma->anon_vma;
if (!anon_vma && adjust_next)
anon_vma = next->anon_vma;
if (anon_vma) {
VM_WARN_ON(adjust_next && next->anon_vma &&
anon_vma != next->anon_vma);
anon_vma_lock_write(anon_vma);
anon_vma_interval_tree_pre_update_vma(vma);
if (adjust_next)
anon_vma_interval_tree_pre_update_vma(next);
}
if (root) {
flush_dcache_mmap_lock(mapping);
vma_interval_tree_remove(vma, root);
if (adjust_next)
vma_interval_tree_remove(next, root);
}
if (start != vma->vm_start) {
WRITE_ONCE(vma->vm_start, start);
start_changed = true;
}
if (end != vma->vm_end) {
WRITE_ONCE(vma->vm_end, end);
end_changed = true;
}
WRITE_ONCE(vma->vm_pgoff, pgoff);
if (adjust_next) {
WRITE_ONCE(next->vm_start,
next->vm_start + (adjust_next << PAGE_SHIFT));
WRITE_ONCE(next->vm_pgoff, next->vm_pgoff + adjust_next);
}
if (root) {
if (adjust_next)
vma_interval_tree_insert(next, root);
vma_interval_tree_insert(vma, root);
flush_dcache_mmap_unlock(mapping);
}
if (remove_next) {
/*
* vma_merge has merged next into vma, and needs
* us to remove next before dropping the locks.
*/
if (remove_next != 3)
__vma_unlink_prev(mm, next, vma);
else
/*
* vma is not before next if they've been
* swapped.
*
* pre-swap() next->vm_start was reduced so
* tell validate_mm_rb to ignore pre-swap()
* "next" (which is stored in post-swap()
* "vma").
*/
__vma_unlink_common(mm, next, NULL, false, vma);
if (file)
__remove_shared_vm_struct(next, file, mapping);
} else if (insert) {
/*
* split_vma has split insert from vma, and needs
* us to insert it before dropping the locks
* (it may either follow vma or precede it).
*/
__insert_vm_struct(mm, insert);
} else {
if (start_changed)
vma_gap_update(vma);
if (end_changed) {
if (!next)
mm->highest_vm_end = vm_end_gap(vma);
else if (!adjust_next)
vma_gap_update(next);
}
}
if (anon_vma) {
anon_vma_interval_tree_post_update_vma(vma);
if (adjust_next)
anon_vma_interval_tree_post_update_vma(next);
anon_vma_unlock_write(anon_vma);
}
if (mapping)
i_mmap_unlock_write(mapping);
if (root) {
uprobe_mmap(vma);
if (adjust_next)
uprobe_mmap(next);
}
if (remove_next) {
if (file)
uprobe_munmap(next, next->vm_start, next->vm_end);
if (next->anon_vma)
anon_vma_merge(vma, next);
mm->map_count--;
vm_write_end(next);
put_vma(next);
/*
* In mprotect's case 6 (see comments on vma_merge),
* we must remove another next too. It would clutter
* up the code too much to do both in one go.
*/
if (remove_next != 3) {
/*
* If "next" was removed and vma->vm_end was
* expanded (up) over it, in turn
* "next->vm_prev->vm_end" changed and the
* "vma->vm_next" gap must be updated.
*/
next = vma->vm_next;
if (next)
vm_write_begin(next);
} else {
/*
* For the scope of the comment "next" and
* "vma" considered pre-swap(): if "vma" was
* removed, next->vm_start was expanded (down)
* over it and the "next" gap must be updated.
* Because of the swap() the post-swap() "vma"
* actually points to pre-swap() "next"
* (post-swap() "next" as opposed is now a
* dangling pointer).
*/
next = vma;
}
if (remove_next == 2) {
remove_next = 1;
end = next->vm_end;
goto again;
}
else if (next)
vma_gap_update(next);
else {
/*
* If remove_next == 2 we obviously can't
* reach this path.
*
* If remove_next == 3 we can't reach this
* path because pre-swap() next is always not
* NULL. pre-swap() "next" is not being
* removed and its next->vm_end is not altered
* (and furthermore "end" already matches
* next->vm_end in remove_next == 3).
*
* We reach this only in the remove_next == 1
* case if the "next" vma that was removed was
* the highest vma of the mm. However in such
* case next->vm_end == "end" and the extended
* "vma" has vma->vm_end == next->vm_end so
* mm->highest_vm_end doesn't need any update
* in remove_next == 1 case.
*/
VM_WARN_ON(mm->highest_vm_end != vm_end_gap(vma));
}
}
if (insert && file)
uprobe_mmap(insert);
if (next && next != vma)
vm_write_end(next);
if (!keep_locked)
vm_write_end(vma);
validate_mm(mm);
return 0;
}
/*
* If the vma has a ->close operation then the driver probably needs to release
* per-vma resources, so we don't attempt to merge those.
*/
static inline int is_mergeable_vma(struct vm_area_struct *vma,
struct file *file, unsigned long vm_flags,
struct vm_userfaultfd_ctx vm_userfaultfd_ctx,
const char __user *anon_name)
{
/*
* VM_SOFTDIRTY should not prevent from VMA merging, if we
* match the flags but dirty bit -- the caller should mark
* merged VMA as dirty. If dirty bit won't be excluded from
* comparison, we increase pressure on the memory system forcing
* the kernel to generate new VMAs when old one could be
* extended instead.
*/
if ((vma->vm_flags ^ vm_flags) & ~VM_SOFTDIRTY)
return 0;
if (vma->vm_file != file)
return 0;
if (vma->vm_ops && vma->vm_ops->close)
return 0;
if (!is_mergeable_vm_userfaultfd_ctx(vma, vm_userfaultfd_ctx))
return 0;
if (vma_get_anon_name(vma) != anon_name)
return 0;
return 1;
}
static inline int is_mergeable_anon_vma(struct anon_vma *anon_vma1,
struct anon_vma *anon_vma2,
struct vm_area_struct *vma)
{
/*
* The list_is_singular() test is to avoid merging VMA cloned from
* parents. This can improve scalability caused by anon_vma lock.
*/
if ((!anon_vma1 || !anon_vma2) && (!vma ||
list_is_singular(&vma->anon_vma_chain)))
return 1;
return anon_vma1 == anon_vma2;
}
/*
* Return true if we can merge this (vm_flags,anon_vma,file,vm_pgoff)
* in front of (at a lower virtual address and file offset than) the vma.
*
* We cannot merge two vmas if they have differently assigned (non-NULL)
* anon_vmas, nor if same anon_vma is assigned but offsets incompatible.
*
* We don't check here for the merged mmap wrapping around the end of pagecache
* indices (16TB on ia32) because do_mmap_pgoff() does not permit mmap's which
* wrap, nor mmaps which cover the final page at index -1UL.
*/
static int
can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
struct anon_vma *anon_vma, struct file *file,
pgoff_t vm_pgoff,
struct vm_userfaultfd_ctx vm_userfaultfd_ctx,
const char __user *anon_name)
{
if (is_mergeable_vma(vma, file, vm_flags, vm_userfaultfd_ctx, anon_name) &&
is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
if (vma->vm_pgoff == vm_pgoff)
return 1;
}
return 0;
}
/*
* Return true if we can merge this (vm_flags,anon_vma,file,vm_pgoff)
* beyond (at a higher virtual address and file offset than) the vma.
*
* We cannot merge two vmas if they have differently assigned (non-NULL)
* anon_vmas, nor if same anon_vma is assigned but offsets incompatible.
*/
static int
can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
struct anon_vma *anon_vma, struct file *file,
pgoff_t vm_pgoff,
struct vm_userfaultfd_ctx vm_userfaultfd_ctx,
const char __user *anon_name)
{
if (is_mergeable_vma(vma, file, vm_flags, vm_userfaultfd_ctx, anon_name) &&
is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
pgoff_t vm_pglen;
vm_pglen = vma_pages(vma);
if (vma->vm_pgoff + vm_pglen == vm_pgoff)
return 1;
}
return 0;
}
/*
* Given a mapping request (addr,end,vm_flags,file,pgoff,anon_name),
* figure out whether that can be merged with its predecessor or its
* successor. Or both (it neatly fills a hole).
*
* In most cases - when called for mmap, brk or mremap - [addr,end) is
* certain not to be mapped by the time vma_merge is called; but when
* called for mprotect, it is certain to be already mapped (either at
* an offset within prev, or at the start of next), and the flags of
* this area are about to be changed to vm_flags - and the no-change
* case has already been eliminated.
*
* The following mprotect cases have to be considered, where AAAA is
* the area passed down from mprotect_fixup, never extending beyond one
* vma, PPPPPP is the prev vma specified, and NNNNNN the next vma after:
*
* AAAA AAAA AAAA AAAA
* PPPPPPNNNNNN PPPPPPNNNNNN PPPPPPNNNNNN PPPPNNNNXXXX
* cannot merge might become might become might become
* PPNNNNNNNNNN PPPPPPPPPPNN PPPPPPPPPPPP 6 or
* mmap, brk or case 4 below case 5 below PPPPPPPPXXXX 7 or
* mremap move: PPPPXXXXXXXX 8
* AAAA
* PPPP NNNN PPPPPPPPPPPP PPPPPPPPNNNN PPPPNNNNNNNN
* might become case 1 below case 2 below case 3 below
*
* It is important for case 8 that the vma NNNN overlapping the
* region AAAA is never going to extended over XXXX. Instead XXXX must
* be extended in region AAAA and NNNN must be removed. This way in
* all cases where vma_merge succeeds, the moment vma_adjust drops the
* rmap_locks, the properties of the merged vma will be already
* correct for the whole merged range. Some of those properties like
* vm_page_prot/vm_flags may be accessed by rmap_walks and they must
* be correct for the whole merged range immediately after the
* rmap_locks are released. Otherwise if XXXX would be removed and
* NNNN would be extended over the XXXX range, remove_migration_ptes
* or other rmap walkers (if working on addresses beyond the "end"
* parameter) may establish ptes with the wrong permissions of NNNN
* instead of the right permissions of XXXX.
*/
struct vm_area_struct *__vma_merge(struct mm_struct *mm,
struct vm_area_struct *prev, unsigned long addr,
unsigned long end, unsigned long vm_flags,
struct anon_vma *anon_vma, struct file *file,
pgoff_t pgoff, struct mempolicy *policy,
struct vm_userfaultfd_ctx vm_userfaultfd_ctx,
const char __user *anon_name, bool keep_locked)
{
pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
struct vm_area_struct *area, *next;
int err;
/*
* We later require that vma->vm_flags == vm_flags,
* so this tests vma->vm_flags & VM_SPECIAL, too.
*/
if (vm_flags & VM_SPECIAL)
return NULL;
if (prev)
next = prev->vm_next;
else
next = mm->mmap;
area = next;
if (area && area->vm_end == end) /* cases 6, 7, 8 */
next = next->vm_next;
/* verify some invariant that must be enforced by the caller */
VM_WARN_ON(prev && addr <= prev->vm_start);
VM_WARN_ON(area && end > area->vm_end);
VM_WARN_ON(addr >= end);
/*
* Can it merge with the predecessor?
*/
if (prev && prev->vm_end == addr &&
mpol_equal(vma_policy(prev), policy) &&
can_vma_merge_after(prev, vm_flags,
anon_vma, file, pgoff,
vm_userfaultfd_ctx,
anon_name)) {
/*
* OK, it can. Can we now merge in the successor as well?
*/
if (next && end == next->vm_start &&
mpol_equal(policy, vma_policy(next)) &&
can_vma_merge_before(next, vm_flags,
anon_vma, file,
pgoff+pglen,
vm_userfaultfd_ctx,
anon_name) &&
is_mergeable_anon_vma(prev->anon_vma,
next->anon_vma, NULL)) {
/* cases 1, 6 */
err = __vma_adjust(prev, prev->vm_start,
next->vm_end, prev->vm_pgoff, NULL,
prev, keep_locked);
} else /* cases 2, 5, 7 */
err = __vma_adjust(prev, prev->vm_start,
end, prev->vm_pgoff, NULL, prev,
keep_locked);
if (err)
return NULL;
khugepaged_enter_vma_merge(prev, vm_flags);
return prev;
}
/*
* Can this new request be merged in front of next?
*/
if (next && end == next->vm_start &&
mpol_equal(policy, vma_policy(next)) &&
can_vma_merge_before(next, vm_flags,
anon_vma, file, pgoff+pglen,
vm_userfaultfd_ctx,
anon_name)) {
if (prev && addr < prev->vm_end) /* case 4 */
err = __vma_adjust(prev, prev->vm_start,
addr, prev->vm_pgoff, NULL, next,
keep_locked);
else { /* cases 3, 8 */
err = __vma_adjust(area, addr, next->vm_end,
next->vm_pgoff - pglen, NULL, next,
keep_locked);
/*
* In case 3 area is already equal to next and
* this is a noop, but in case 8 "area" has
* been removed and next was expanded over it.
*/
area = next;
}
if (err)
return NULL;
khugepaged_enter_vma_merge(area, vm_flags);
return area;
}
return NULL;
}
/*
* Rough compatbility check to quickly see if it's even worth looking
* at sharing an anon_vma.
*
* They need to have the same vm_file, and the flags can only differ
* in things that mprotect may change.
*
* NOTE! The fact that we share an anon_vma doesn't _have_ to mean that
* we can merge the two vma's. For example, we refuse to merge a vma if
* there is a vm_ops->close() function, because that indicates that the
* driver is doing some kind of reference counting. But that doesn't
* really matter for the anon_vma sharing case.
*/
static int anon_vma_compatible(struct vm_area_struct *a, struct vm_area_struct *b)
{
return a->vm_end == b->vm_start &&
mpol_equal(vma_policy(a), vma_policy(b)) &&
a->vm_file == b->vm_file &&
!((a->vm_flags ^ b->vm_flags) & ~(VM_READ|VM_WRITE|VM_EXEC|VM_SOFTDIRTY)) &&
b->vm_pgoff == a->vm_pgoff + ((b->vm_start - a->vm_start) >> PAGE_SHIFT);
}
/*
* Do some basic sanity checking to see if we can re-use the anon_vma
* from 'old'. The 'a'/'b' vma's are in VM order - one of them will be
* the same as 'old', the other will be the new one that is trying
* to share the anon_vma.
*
* NOTE! This runs with mm_sem held for reading, so it is possible that
* the anon_vma of 'old' is concurrently in the process of being set up
* by another page fault trying to merge _that_. But that's ok: if it
* is being set up, that automatically means that it will be a singleton
* acceptable for merging, so we can do all of this optimistically. But
* we do that READ_ONCE() to make sure that we never re-load the pointer.
*
* IOW: that the "list_is_singular()" test on the anon_vma_chain only
* matters for the 'stable anon_vma' case (ie the thing we want to avoid
* is to return an anon_vma that is "complex" due to having gone through
* a fork).
*
* We also make sure that the two vma's are compatible (adjacent,
* and with the same memory policies). That's all stable, even with just
* a read lock on the mm_sem.
*/
static struct anon_vma *reusable_anon_vma(struct vm_area_struct *old, struct vm_area_struct *a, struct vm_area_struct *b)
{
if (anon_vma_compatible(a, b)) {
struct anon_vma *anon_vma = READ_ONCE(old->anon_vma);
if (anon_vma && list_is_singular(&old->anon_vma_chain))
return anon_vma;
}
return NULL;
}
/*
* find_mergeable_anon_vma is used by anon_vma_prepare, to check
* neighbouring vmas for a suitable anon_vma, before it goes off
* to allocate a new anon_vma. It checks because a repetitive
* sequence of mprotects and faults may otherwise lead to distinct
* anon_vmas being allocated, preventing vma merge in subsequent
* mprotect.
*/
struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *vma)
{
struct anon_vma *anon_vma;
struct vm_area_struct *near;
near = vma->vm_next;
if (!near)
goto try_prev;
anon_vma = reusable_anon_vma(near, vma, near);
if (anon_vma)
return anon_vma;
try_prev:
near = vma->vm_prev;
if (!near)
goto none;
anon_vma = reusable_anon_vma(near, near, vma);
if (anon_vma)
return anon_vma;
none:
/*
* There's no absolute need to look only at touching neighbours:
* we could search further afield for "compatible" anon_vmas.
* But it would probably just be a waste of time searching,
* or lead to too many vmas hanging off the same anon_vma.
* We're trying to allow mprotect remerging later on,
* not trying to minimize memory used for anon_vmas.
*/
return NULL;
}
/*
* If a hint addr is less than mmap_min_addr change hint to be as
* low as possible but still greater than mmap_min_addr
*/
static inline unsigned long round_hint_to_min(unsigned long hint)
{
hint &= PAGE_MASK;
if (((void *)hint != NULL) &&
(hint < mmap_min_addr))
return PAGE_ALIGN(mmap_min_addr);
return hint;
}
static inline int mlock_future_check(struct mm_struct *mm,
unsigned long flags,
unsigned long len)
{
unsigned long locked, lock_limit;
/* mlock MCL_FUTURE? */
if (flags & VM_LOCKED) {
locked = len >> PAGE_SHIFT;
locked += mm->locked_vm;
lock_limit = rlimit(RLIMIT_MEMLOCK);
lock_limit >>= PAGE_SHIFT;
if (locked > lock_limit && !capable(CAP_IPC_LOCK))
return -EAGAIN;
}
return 0;
}
static inline u64 file_mmap_size_max(struct file *file, struct inode *inode)
{
if (S_ISREG(inode->i_mode))
return MAX_LFS_FILESIZE;
if (S_ISBLK(inode->i_mode))
return MAX_LFS_FILESIZE;
if (S_ISSOCK(inode->i_mode))
return MAX_LFS_FILESIZE;
/* Special "we do even unsigned file positions" case */
if (file->f_mode & FMODE_UNSIGNED_OFFSET)
return 0;
/* Yes, random drivers might want more. But I'm tired of buggy drivers */
return ULONG_MAX;
}
static inline bool file_mmap_ok(struct file *file, struct inode *inode,
unsigned long pgoff, unsigned long len)
{
u64 maxsize = file_mmap_size_max(file, inode);
if (maxsize && len > maxsize)
return false;
maxsize -= len;
if (pgoff > maxsize >> PAGE_SHIFT)
return false;
return true;
}
/*
* The caller must hold down_write(&current->mm->mmap_sem).
*/
unsigned long do_mmap(struct file *file, unsigned long addr,
unsigned long len, unsigned long prot,
unsigned long flags, vm_flags_t vm_flags,
unsigned long pgoff, unsigned long *populate,
struct list_head *uf)
{
struct mm_struct *mm = current->mm;
int pkey = 0;
*populate = 0;
if (!len)
return -EINVAL;
/*
* Does the application expect PROT_READ to imply PROT_EXEC?
*
* (the exception is when the underlying filesystem is noexec
* mounted, in which case we dont add PROT_EXEC.)
*/
if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
if (!(file && path_noexec(&file->f_path)))
prot |= PROT_EXEC;
/* force arch specific MAP_FIXED handling in get_unmapped_area */
if (flags & MAP_FIXED_NOREPLACE)
flags |= MAP_FIXED;
if (!(flags & MAP_FIXED))
addr = round_hint_to_min(addr);
/* Careful about overflows.. */
len = PAGE_ALIGN(len);
if (!len)
return -ENOMEM;
/* offset overflow? */
if ((pgoff + (len >> PAGE_SHIFT)) < pgoff)
return -EOVERFLOW;
/* Too many mappings? */
if (mm->map_count > sysctl_max_map_count)
return -ENOMEM;
/* Obtain the address to map to. we verify (or select) it and ensure
* that it represents a valid section of the address space.
*/
addr = get_unmapped_area(file, addr, len, pgoff, flags);
if (offset_in_page(addr))
return addr;
if (flags & MAP_FIXED_NOREPLACE) {
struct vm_area_struct *vma = find_vma(mm, addr);
if (vma && vma->vm_start < addr + len)
return -EEXIST;
}
if (prot == PROT_EXEC) {
pkey = execute_only_pkey(mm);
if (pkey < 0)
pkey = 0;
}
/* Do simple checking here so the lower-level routines won't have
* to. we assume access permissions have been handled by the open
* of the memory object, so we don't do any here.
*/
vm_flags |= calc_vm_prot_bits(prot, pkey) | calc_vm_flag_bits(flags) |
mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
if (flags & MAP_LOCKED)
if (!can_do_mlock())
return -EPERM;
if (mlock_future_check(mm, vm_flags, len))
return -EAGAIN;
if (file) {
struct inode *inode = file_inode(file);
unsigned long flags_mask;
if (!file_mmap_ok(file, inode, pgoff, len))
return -EOVERFLOW;
flags_mask = LEGACY_MAP_MASK | file->f_op->mmap_supported_flags;
switch (flags & MAP_TYPE) {
case MAP_SHARED:
/*
* Force use of MAP_SHARED_VALIDATE with non-legacy
* flags. E.g. MAP_SYNC is dangerous to use with
* MAP_SHARED as you don't know which consistency model
* you will get. We silently ignore unsupported flags
* with MAP_SHARED to preserve backward compatibility.
*/
flags &= LEGACY_MAP_MASK;
/* fall through */
case MAP_SHARED_VALIDATE:
if (flags & ~flags_mask)
return -EOPNOTSUPP;
if (prot & PROT_WRITE) {
if (!(file->f_mode & FMODE_WRITE))
return -EACCES;
if (IS_SWAPFILE(file->f_mapping->host))
return -ETXTBSY;
}
/*
* Make sure we don't allow writing to an append-only
* file..
*/
if (IS_APPEND(inode) && (file->f_mode & FMODE_WRITE))
return -EACCES;
/*
* Make sure there are no mandatory locks on the file.
*/
if (locks_verify_locked(file))
return -EAGAIN;
vm_flags |= VM_SHARED | VM_MAYSHARE;
if (!(file->f_mode & FMODE_WRITE))
vm_flags &= ~(VM_MAYWRITE | VM_SHARED);
/* fall through */
case MAP_PRIVATE:
if (!(file->f_mode & FMODE_READ))
return -EACCES;
if (path_noexec(&file->f_path)) {
if (vm_flags & VM_EXEC)
return -EPERM;
vm_flags &= ~VM_MAYEXEC;
}
if (!file->f_op->mmap)
return -ENODEV;
if (vm_flags & (VM_GROWSDOWN|VM_GROWSUP))
return -EINVAL;
break;
default:
return -EINVAL;
}
} else {
switch (flags & MAP_TYPE) {
case MAP_SHARED:
if (vm_flags & (VM_GROWSDOWN|VM_GROWSUP))
return -EINVAL;
/*
* Ignore pgoff.
*/
pgoff = 0;
vm_flags |= VM_SHARED | VM_MAYSHARE;
break;
case MAP_PRIVATE:
/*
* Set pgoff according to addr for anon_vma.
*/
pgoff = addr >> PAGE_SHIFT;
break;
default:
return -EINVAL;
}
}
/*
* Set 'VM_NORESERVE' if we should not account for the
* memory use of this mapping.
*/
if (flags & MAP_NORESERVE) {
/* We honor MAP_NORESERVE if allowed to overcommit */
if (sysctl_overcommit_memory != OVERCOMMIT_NEVER)
vm_flags |= VM_NORESERVE;
/* hugetlb applies strict overcommit unless MAP_NORESERVE */
if (file && is_file_hugepages(file))
vm_flags |= VM_NORESERVE;
}
addr = mmap_region(file, addr, len, vm_flags, pgoff, uf);
if (!IS_ERR_VALUE(addr) &&
((vm_flags & VM_LOCKED) ||
(flags & (MAP_POPULATE | MAP_NONBLOCK)) == MAP_POPULATE))
*populate = len;
return addr;
}
unsigned long ksys_mmap_pgoff(unsigned long addr, unsigned long len,
unsigned long prot, unsigned long flags,
unsigned long fd, unsigned long pgoff)
{
struct file *file = NULL;
unsigned long retval;
if (!(flags & MAP_ANONYMOUS)) {
audit_mmap_fd(fd, flags);
file = fget(fd);
if (!file)
return -EBADF;
if (is_file_hugepages(file))
len = ALIGN(len, huge_page_size(hstate_file(file)));
retval = -EINVAL;
if (unlikely(flags & MAP_HUGETLB && !is_file_hugepages(file)))
goto out_fput;
} else if (flags & MAP_HUGETLB) {
struct user_struct *user = NULL;
struct hstate *hs;
hs = hstate_sizelog((flags >> MAP_HUGE_SHIFT) & MAP_HUGE_MASK);
if (!hs)
return -EINVAL;
len = ALIGN(len, huge_page_size(hs));
/*
* VM_NORESERVE is used because the reservations will be
* taken when vm_ops->mmap() is called
* A dummy user value is used because we are not locking
* memory so no accounting is necessary
*/
file = hugetlb_file_setup(HUGETLB_ANON_FILE, len,
VM_NORESERVE,
&user, HUGETLB_ANONHUGE_INODE,
(flags >> MAP_HUGE_SHIFT) & MAP_HUGE_MASK);
if (IS_ERR(file))
return PTR_ERR(file);
}
flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
retval = vm_mmap_pgoff(file, addr, len, prot, flags, pgoff);
out_fput:
if (file)
fput(file);
return retval;
}
SYSCALL_DEFINE6(mmap_pgoff, unsigned long, addr, unsigned long, len,
unsigned long, prot, unsigned long, flags,
unsigned long, fd, unsigned long, pgoff)
{
return ksys_mmap_pgoff(addr, len, prot, flags, fd, pgoff);
}
#ifdef __ARCH_WANT_SYS_OLD_MMAP
struct mmap_arg_struct {
unsigned long addr;
unsigned long len;
unsigned long prot;
unsigned long flags;
unsigned long fd;
unsigned long offset;
};
SYSCALL_DEFINE1(old_mmap, struct mmap_arg_struct __user *, arg)
{
struct mmap_arg_struct a;
if (copy_from_user(&a, arg, sizeof(a)))
return -EFAULT;
if (offset_in_page(a.offset))
return -EINVAL;
return ksys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd,
a.offset >> PAGE_SHIFT);
}
#endif /* __ARCH_WANT_SYS_OLD_MMAP */
/*
* Some shared mappings will want the pages marked read-only
* to track write events. If so, we'll downgrade vm_page_prot
* to the private version (using protection_map[] without the
* VM_SHARED bit).
*/
int vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot)
{
vm_flags_t vm_flags = vma->vm_flags;
const struct vm_operations_struct *vm_ops = vma->vm_ops;
/* If it was private or non-writable, the write bit is already clear */
if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
return 0;
/* The backer wishes to know when pages are first written to? */
if (vm_ops && (vm_ops->page_mkwrite || vm_ops->pfn_mkwrite))
return 1;
/* The open routine did something to the protections that pgprot_modify
* won't preserve? */
if (pgprot_val(vm_page_prot) !=
pgprot_val(vm_pgprot_modify(vm_page_prot, vm_flags)))
return 0;
/*
* Do we need to track softdirty? hugetlb does not support softdirty
* tracking yet.
*/
if (IS_ENABLED(CONFIG_MEM_SOFT_DIRTY) && !(vm_flags & VM_SOFTDIRTY) &&
!is_vm_hugetlb_page(vma))
return 1;
/* Specialty mapping? */
if (vm_flags & VM_PFNMAP)
return 0;
/* Can the mapping track the dirty pages? */
return vma->vm_file && vma->vm_file->f_mapping &&
mapping_cap_account_dirty(vma->vm_file->f_mapping);
}
/*
* We account for memory if it's a private writeable mapping,
* not hugepages and VM_NORESERVE wasn't set.
*/
static inline int accountable_mapping(struct file *file, vm_flags_t vm_flags)
{
/*
* hugetlb has its own accounting separate from the core VM
* VM_HUGETLB may not be set yet so we cannot check for that flag.
*/
if (file && is_file_hugepages(file))
return 0;
return (vm_flags & (VM_NORESERVE | VM_SHARED | VM_WRITE)) == VM_WRITE;
}
unsigned long mmap_region(struct file *file, unsigned long addr,
unsigned long len, vm_flags_t vm_flags, unsigned long pgoff,
struct list_head *uf)
{
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma, *prev;
int error;
struct rb_node **rb_link, *rb_parent;
unsigned long charged = 0;
/* Check against address space limit. */
if (!may_expand_vm(mm, vm_flags, len >> PAGE_SHIFT)) {
unsigned long nr_pages;
/*
* MAP_FIXED may remove pages of mappings that intersects with
* requested mapping. Account for the pages it would unmap.
*/
nr_pages = count_vma_pages_range(mm, addr, addr + len);
if (!may_expand_vm(mm, vm_flags,
(len >> PAGE_SHIFT) - nr_pages))
return -ENOMEM;
}
/* Clear old maps */
while (find_vma_links(mm, addr, addr + len, &prev, &rb_link,
&rb_parent)) {
if (do_munmap(mm, addr, len, uf))
return -ENOMEM;
}
/*
* Private writable mapping: check memory availability
*/
if (accountable_mapping(file, vm_flags)) {
charged = len >> PAGE_SHIFT;
if (security_vm_enough_memory_mm(mm, charged))
return -ENOMEM;
vm_flags |= VM_ACCOUNT;
}
/*
* Can we just expand an old mapping?
*/
vma = vma_merge(mm, prev, addr, addr + len, vm_flags,
NULL, file, pgoff, NULL, NULL_VM_UFFD_CTX, NULL);
if (vma)
goto out;
/*
* Determine the object being mapped and call the appropriate
* specific mapper. the address has already been validated, but
* not unmapped, but the maps are removed from the list.
*/
vma = vm_area_alloc(mm);
if (!vma) {
error = -ENOMEM;
goto unacct_error;
}
vma->vm_start = addr;
vma->vm_end = addr + len;
vma->vm_flags = vm_flags;
vma->vm_page_prot = vm_get_page_prot(vm_flags);
vma->vm_pgoff = pgoff;
if (file) {
if (vm_flags & VM_DENYWRITE) {
error = deny_write_access(file);
if (error)
goto free_vma;
}
if (vm_flags & VM_SHARED) {
error = mapping_map_writable(file->f_mapping);
if (error)
goto allow_write_and_free_vma;
}
/* ->mmap() can change vma->vm_file, but must guarantee that
* vma_link() below can deny write-access if VM_DENYWRITE is set
* and map writably if VM_SHARED is set. This usually means the
* new file must not have been exposed to user-space, yet.
*/
vma->vm_file = get_file(file);
error = call_mmap(file, vma);
if (error)
goto unmap_and_free_vma;
/* Can addr have changed??
*
* Answer: Yes, several device drivers can do it in their
* f_op->mmap method. -DaveM
* Bug: If addr is changed, prev, rb_link, rb_parent should
* be updated for vma_link()
*/
WARN_ON_ONCE(addr != vma->vm_start);
addr = vma->vm_start;
vm_flags = vma->vm_flags;
} else if (vm_flags & VM_SHARED) {
error = shmem_zero_setup(vma);
if (error)
goto free_vma;
} else {
vma_set_anonymous(vma);
}
vma_link(mm, vma, prev, rb_link, rb_parent);
/* Once vma denies write, undo our temporary denial count */
if (file) {
if (vm_flags & VM_SHARED)
mapping_unmap_writable(file->f_mapping);
if (vm_flags & VM_DENYWRITE)
allow_write_access(file);
}
file = vma->vm_file;
out:
perf_event_mmap(vma);
vm_write_begin(vma);
vm_stat_account(mm, vm_flags, len >> PAGE_SHIFT);
if (vm_flags & VM_LOCKED) {
if ((vm_flags & VM_SPECIAL) || vma_is_dax(vma) ||
is_vm_hugetlb_page(vma) ||
vma == get_gate_vma(current->mm))
WRITE_ONCE(vma->vm_flags,
vma->vm_flags & VM_LOCKED_CLEAR_MASK);
else
mm->locked_vm += (len >> PAGE_SHIFT);
}
if (file)
uprobe_mmap(vma);
/*
* New (or expanded) vma always get soft dirty status.
* Otherwise user-space soft-dirty page tracker won't
* be able to distinguish situation when vma area unmapped,
* then new mapped in-place (which must be aimed as
* a completely new data area).
*/
WRITE_ONCE(vma->vm_flags, vma->vm_flags | VM_SOFTDIRTY);
vma_set_page_prot(vma);
vm_write_end(vma);
return addr;
unmap_and_free_vma:
vma->vm_file = NULL;
fput(file);
/* Undo any partial mapping done by a device driver. */
unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
if (vm_flags & VM_SHARED)
mapping_unmap_writable(file->f_mapping);
allow_write_and_free_vma:
if (vm_flags & VM_DENYWRITE)
allow_write_access(file);
free_vma:
vm_area_free(vma);
unacct_error:
if (charged)
vm_unacct_memory(charged);
return error;
}
unsigned long unmapped_area(struct vm_unmapped_area_info *info)
{
/*
* We implement the search by looking for an rbtree node that
* immediately follows a suitable gap. That is,
* - gap_start = vma->vm_prev->vm_end <= info->high_limit - length;
* - gap_end = vma->vm_start >= info->low_limit + length;
* - gap_end - gap_start >= length
*/
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
unsigned long length, low_limit, high_limit, gap_start, gap_end;
/* Adjust search length to account for worst case alignment overhead */
length = info->length + info->align_mask;
if (length < info->length)
return -ENOMEM;
/* Adjust search limits by the desired length */
if (info->high_limit < length)
return -ENOMEM;
high_limit = info->high_limit - length;
if (info->low_limit > high_limit)
return -ENOMEM;
low_limit = info->low_limit + length;
/* Check if rbtree root looks promising */
if (RB_EMPTY_ROOT(&mm->mm_rb))
goto check_highest;
vma = rb_entry(mm->mm_rb.rb_node, struct vm_area_struct, vm_rb);
if (vma->rb_subtree_gap < length)
goto check_highest;
while (true) {
/* Visit left subtree if it looks promising */
gap_end = vm_start_gap(vma);
if (gap_end >= low_limit && vma->vm_rb.rb_left) {
struct vm_area_struct *left =
rb_entry(vma->vm_rb.rb_left,
struct vm_area_struct, vm_rb);
if (left->rb_subtree_gap >= length) {
vma = left;
continue;
}
}
gap_start = vma->vm_prev ? vm_end_gap(vma->vm_prev) : 0;
check_current:
/* Check if current node has a suitable gap */
if (gap_start > high_limit)
return -ENOMEM;
if (gap_end >= low_limit &&
gap_end > gap_start && gap_end - gap_start >= length)
goto found;
/* Visit right subtree if it looks promising */
if (vma->vm_rb.rb_right) {
struct vm_area_struct *right =
rb_entry(vma->vm_rb.rb_right,
struct vm_area_struct, vm_rb);
if (right->rb_subtree_gap >= length) {
vma = right;
continue;
}
}
/* Go back up the rbtree to find next candidate node */
while (true) {
struct rb_node *prev = &vma->vm_rb;
if (!rb_parent(prev))
goto check_highest;
vma = rb_entry(rb_parent(prev),
struct vm_area_struct, vm_rb);
if (prev == vma->vm_rb.rb_left) {
gap_start = vm_end_gap(vma->vm_prev);
gap_end = vm_start_gap(vma);
goto check_current;
}
}
}
check_highest:
/* Check highest gap, which does not precede any rbtree node */
gap_start = mm->highest_vm_end;
gap_end = ULONG_MAX; /* Only for VM_BUG_ON below */
if (gap_start > high_limit)
return -ENOMEM;
found:
/* We found a suitable gap. Clip it with the original low_limit. */
if (gap_start < info->low_limit)
gap_start = info->low_limit;
/* Adjust gap address to the desired alignment */
gap_start += (info->align_offset - gap_start) & info->align_mask;
VM_BUG_ON(gap_start + info->length > info->high_limit);
VM_BUG_ON(gap_start + info->length > gap_end);
return gap_start;
}
unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info)
{
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
unsigned long length, low_limit, high_limit, gap_start, gap_end;
/* Adjust search length to account for worst case alignment overhead */
length = info->length + info->align_mask;
if (length < info->length)
return -ENOMEM;
/*
* Adjust search limits by the desired length.
* See implementation comment at top of unmapped_area().
*/
gap_end = info->high_limit;
if (gap_end < length)
return -ENOMEM;
high_limit = gap_end - length;
if (info->low_limit > high_limit)
return -ENOMEM;
low_limit = info->low_limit + length;
/* Check highest gap, which does not precede any rbtree node */
gap_start = mm->highest_vm_end;
if (gap_start <= high_limit)
goto found_highest;
/* Check if rbtree root looks promising */
if (RB_EMPTY_ROOT(&mm->mm_rb))
return -ENOMEM;
vma = rb_entry(mm->mm_rb.rb_node, struct vm_area_struct, vm_rb);
if (vma->rb_subtree_gap < length)
return -ENOMEM;
while (true) {
/* Visit right subtree if it looks promising */
gap_start = vma->vm_prev ? vm_end_gap(vma->vm_prev) : 0;
if (gap_start <= high_limit && vma->vm_rb.rb_right) {
struct vm_area_struct *right =
rb_entry(vma->vm_rb.rb_right,
struct vm_area_struct, vm_rb);
if (right->rb_subtree_gap >= length) {
vma = right;
continue;
}
}
check_current:
/* Check if current node has a suitable gap */
gap_end = vm_start_gap(vma);
if (gap_end < low_limit)
return -ENOMEM;
if (gap_start <= high_limit &&
gap_end > gap_start && gap_end - gap_start >= length)
goto found;
/* Visit left subtree if it looks promising */
if (vma->vm_rb.rb_left) {
struct vm_area_struct *left =
rb_entry(vma->vm_rb.rb_left,
struct vm_area_struct, vm_rb);
if (left->rb_subtree_gap >= length) {
vma = left;
continue;
}
}
/* Go back up the rbtree to find next candidate node */
while (true) {
struct rb_node *prev = &vma->vm_rb;
if (!rb_parent(prev))
return -ENOMEM;
vma = rb_entry(rb_parent(prev),
struct vm_area_struct, vm_rb);
if (prev == vma->vm_rb.rb_right) {
gap_start = vma->vm_prev ?
vm_end_gap(vma->vm_prev) : 0;
goto check_current;
}
}
}
found:
/* We found a suitable gap. Clip it with the original high_limit. */
if (gap_end > info->high_limit)
gap_end = info->high_limit;
found_highest:
/* Compute highest gap address at the desired alignment */
gap_end -= info->length;
gap_end -= (gap_end - info->align_offset) & info->align_mask;
VM_BUG_ON(gap_end < info->low_limit);
VM_BUG_ON(gap_end < gap_start);
return gap_end;
}
/* Get an address range which is currently unmapped.
* For shmat() with addr=0.
*
* Ugly calling convention alert:
* Return value with the low bits set means error value,
* ie
* if (ret & ~PAGE_MASK)
* error = ret;
*
* This function "knows" that -ENOMEM has the bits set.
*/
#ifndef HAVE_ARCH_UNMAPPED_AREA
unsigned long
arch_get_unmapped_area(struct file *filp, unsigned long addr,
unsigned long len, unsigned long pgoff, unsigned long flags)
{
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma, *prev;
struct vm_unmapped_area_info info;
const unsigned long mmap_end = arch_get_mmap_end(addr);
if (len > mmap_end - mmap_min_addr)
return -ENOMEM;
if (flags & MAP_FIXED)
return addr;
if (addr) {
addr = PAGE_ALIGN(addr);
vma = find_vma_prev(mm, addr, &prev);
if (mmap_end - len >= addr && addr >= mmap_min_addr &&
(!vma || addr + len <= vm_start_gap(vma)) &&
(!prev || addr >= vm_end_gap(prev)))
return addr;
}
info.flags = 0;
info.length = len;
info.low_limit = mm->mmap_base;
info.high_limit = mmap_end;
info.align_mask = 0;
info.align_offset = 0;
return vm_unmapped_area(&info);
}
#endif
/*
* This mmap-allocator allocates new areas top-down from below the
* stack's low limit (the base):
*/
#ifndef HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
unsigned long
arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
unsigned long len, unsigned long pgoff,
unsigned long flags)
{
struct vm_area_struct *vma, *prev;
struct mm_struct *mm = current->mm;
struct vm_unmapped_area_info info;
const unsigned long mmap_end = arch_get_mmap_end(addr);
/* requested length too big for entire address space */
if (len > mmap_end - mmap_min_addr)
return -ENOMEM;
if (flags & MAP_FIXED)
return addr;
/* requesting a specific address */
if (addr) {
addr = PAGE_ALIGN(addr);
vma = find_vma_prev(mm, addr, &prev);
if (mmap_end - len >= addr && addr >= mmap_min_addr &&
(!vma || addr + len <= vm_start_gap(vma)) &&
(!prev || addr >= vm_end_gap(prev)))
return addr;
}
info.flags = VM_UNMAPPED_AREA_TOPDOWN;
info.length = len;
info.low_limit = max(PAGE_SIZE, mmap_min_addr);
info.high_limit = arch_get_mmap_base(addr, mm->mmap_base);
info.align_mask = 0;
info.align_offset = 0;
addr = vm_unmapped_area(&info);
/*
* A failed mmap() very likely causes application failure,
* so fall back to the bottom-up function here. This scenario
* can happen with large stack limits and large mmap()
* allocations.
*/
if (offset_in_page(addr)) {
VM_BUG_ON(addr != -ENOMEM);
info.flags = 0;
info.low_limit = TASK_UNMAPPED_BASE;
info.high_limit = mmap_end;
addr = vm_unmapped_area(&info);
}
return addr;
}
#endif
unsigned long
get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
unsigned long pgoff, unsigned long flags)
{
unsigned long (*get_area)(struct file *, unsigned long,
unsigned long, unsigned long, unsigned long);
unsigned long error = arch_mmap_check(addr, len, flags);
if (error)
return error;
/* Careful about overflows.. */
if (len > TASK_SIZE)
return -ENOMEM;
get_area = current->mm->get_unmapped_area;
if (file) {
if (file->f_op->get_unmapped_area)
get_area = file->f_op->get_unmapped_area;
} else if (flags & MAP_SHARED) {
/*
* mmap_region() will call shmem_zero_setup() to create a file,
* so use shmem's get_unmapped_area in case it can be huge.
* do_mmap_pgoff() will clear pgoff, so match alignment.
*/
pgoff = 0;
get_area = shmem_get_unmapped_area;
}
addr = get_area(file, addr, len, pgoff, flags);
if (IS_ERR_VALUE(addr))
return addr;
if (addr > TASK_SIZE - len)
return -ENOMEM;
if (offset_in_page(addr))
return -EINVAL;
error = security_mmap_addr(addr);
return error ? error : addr;
}
EXPORT_SYMBOL(get_unmapped_area);
/* Look up the first VMA which satisfies addr < vm_end, NULL if none. */
static struct vm_area_struct *__find_vma(struct mm_struct *mm,
unsigned long addr)
{
struct rb_node *rb_node;
struct vm_area_struct *vma = NULL;
rb_node = mm->mm_rb.rb_node;
while (rb_node) {
struct vm_area_struct *tmp;
tmp = rb_entry(rb_node, struct vm_area_struct, vm_rb);
if (tmp->vm_end > addr) {
vma = tmp;
if (tmp->vm_start <= addr)
break;
rb_node = rb_node->rb_left;
} else
rb_node = rb_node->rb_right;
}
return vma;
}
struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
{
struct vm_area_struct *vma;
/* Check the cache first. */
vma = vmacache_find(mm, addr);
if (likely(vma))
return vma;
vma = __find_vma(mm, addr);
if (vma)
vmacache_update(addr, vma);
return vma;
}
EXPORT_SYMBOL(find_vma);
#ifdef CONFIG_SPECULATIVE_PAGE_FAULT
struct vm_area_struct *get_vma(struct mm_struct *mm, unsigned long addr)
{
struct vm_area_struct *vma = NULL;
read_lock(&mm->mm_rb_lock);
vma = __find_vma(mm, addr);
if (vma)
atomic_inc(&vma->vm_ref_count);
read_unlock(&mm->mm_rb_lock);
return vma;
}
#endif
/*
* Same as find_vma, but also return a pointer to the previous VMA in *pprev.
*/
struct vm_area_struct *
find_vma_prev(struct mm_struct *mm, unsigned long addr,
struct vm_area_struct **pprev)
{
struct vm_area_struct *vma;
vma = find_vma(mm, addr);
if (vma) {
*pprev = vma->vm_prev;
} else {
struct rb_node *rb_node = rb_last(&mm->mm_rb);
*pprev = rb_node ? rb_entry(rb_node, struct vm_area_struct, vm_rb) : NULL;
}
return vma;
}
/*
* Verify that the stack growth is acceptable and
* update accounting. This is shared with both the
* grow-up and grow-down cases.
*/
static int acct_stack_growth(struct vm_area_struct *vma,
unsigned long size, unsigned long grow)
{
struct mm_struct *mm = vma->vm_mm;
unsigned long new_start;
/* address space limit tests */
if (!may_expand_vm(mm, vma->vm_flags, grow))
return -ENOMEM;
/* Stack limit test */
if (size > rlimit(RLIMIT_STACK))
return -ENOMEM;
/* mlock limit tests */
if (vma->vm_flags & VM_LOCKED) {
unsigned long locked;
unsigned long limit;
locked = mm->locked_vm + grow;
limit = rlimit(RLIMIT_MEMLOCK);
limit >>= PAGE_SHIFT;
if (locked > limit && !capable(CAP_IPC_LOCK))
return -ENOMEM;
}
/* Check to ensure the stack will not grow into a hugetlb-only region */
new_start = (vma->vm_flags & VM_GROWSUP) ? vma->vm_start :
vma->vm_end - size;
if (is_hugepage_only_range(vma->vm_mm, new_start, size))
return -EFAULT;
/*
* Overcommit.. This must be the final test, as it will
* update security statistics.
*/
if (security_vm_enough_memory_mm(mm, grow))
return -ENOMEM;
return 0;
}
#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
/*
* PA-RISC uses this for its stack; IA64 for its Register Backing Store.
* vma is the last one with address > vma->vm_end. Have to extend vma.
*/
int expand_upwards(struct vm_area_struct *vma, unsigned long address)
{
struct mm_struct *mm = vma->vm_mm;
struct vm_area_struct *next;
unsigned long gap_addr;
int error = 0;
if (!(vma->vm_flags & VM_GROWSUP))
return -EFAULT;
/* Guard against exceeding limits of the address space. */
address &= PAGE_MASK;
if (address >= (TASK_SIZE & PAGE_MASK))
return -ENOMEM;
address += PAGE_SIZE;
/* Enforce stack_guard_gap */
gap_addr = address + stack_guard_gap;
/* Guard against overflow */
if (gap_addr < address || gap_addr > TASK_SIZE)
gap_addr = TASK_SIZE;
next = vma->vm_next;
if (next && next->vm_start < gap_addr &&
(next->vm_flags & (VM_WRITE|VM_READ|VM_EXEC))) {
if (!(next->vm_flags & VM_GROWSUP))
return -ENOMEM;
/* Check that both stack segments have the same anon_vma? */
}
/* We must make sure the anon_vma is allocated. */
if (unlikely(anon_vma_prepare(vma)))
return -ENOMEM;
/*
* vma->vm_start/vm_end cannot change under us because the caller
* is required to hold the mmap_sem in read mode. We need the
* anon_vma lock to serialize against concurrent expand_stacks.
*/
anon_vma_lock_write(vma->anon_vma);
/* Somebody else might have raced and expanded it already */
if (address > vma->vm_end) {
unsigned long size, grow;
size = address - vma->vm_start;
grow = (address - vma->vm_end) >> PAGE_SHIFT;
error = -ENOMEM;
if (vma->vm_pgoff + (size >> PAGE_SHIFT) >= vma->vm_pgoff) {
error = acct_stack_growth(vma, size, grow);
if (!error) {
/*
* vma_gap_update() doesn't support concurrent
* updates, but we only hold a shared mmap_sem
* lock here, so we need to protect against
* concurrent vma expansions.
* anon_vma_lock_write() doesn't help here, as
* we don't guarantee that all growable vmas
* in a mm share the same root anon vma.
* So, we reuse mm->page_table_lock to guard
* against concurrent vma expansions.
*/
spin_lock(&mm->page_table_lock);
if (vma->vm_flags & VM_LOCKED)
mm->locked_vm += grow;
vm_stat_account(mm, vma->vm_flags, grow);
anon_vma_interval_tree_pre_update_vma(vma);
vma->vm_end = address;
anon_vma_interval_tree_post_update_vma(vma);
if (vma->vm_next)
vma_gap_update(vma->vm_next);
else
mm->highest_vm_end = vm_end_gap(vma);
spin_unlock(&mm->page_table_lock);
perf_event_mmap(vma);
}
}
}
anon_vma_unlock_write(vma->anon_vma);
khugepaged_enter_vma_merge(vma, vma->vm_flags);
validate_mm(mm);
return error;
}
#endif /* CONFIG_STACK_GROWSUP || CONFIG_IA64 */
/*
* vma is the first one with address < vma->vm_start. Have to extend vma.
*/
int expand_downwards(struct vm_area_struct *vma,
unsigned long address)
{
struct mm_struct *mm = vma->vm_mm;
struct vm_area_struct *prev;
int error = 0;
address &= PAGE_MASK;
if (address < mmap_min_addr)
return -EPERM;
/* Enforce stack_guard_gap */
prev = vma->vm_prev;
/* Check that both stack segments have the same anon_vma? */
if (prev && !(prev->vm_flags & VM_GROWSDOWN) &&
(prev->vm_flags & (VM_WRITE|VM_READ|VM_EXEC))) {
if (address - prev->vm_end < stack_guard_gap)
return -ENOMEM;
}
/* We must make sure the anon_vma is allocated. */
if (unlikely(anon_vma_prepare(vma)))
return -ENOMEM;
/*
* vma->vm_start/vm_end cannot change under us because the caller
* is required to hold the mmap_sem in read mode. We need the
* anon_vma lock to serialize against concurrent expand_stacks.
*/
anon_vma_lock_write(vma->anon_vma);
/* Somebody else might have raced and expanded it already */
if (address < vma->vm_start) {
unsigned long size, grow;
size = vma->vm_end - address;
grow = (vma->vm_start - address) >> PAGE_SHIFT;
error = -ENOMEM;
if (grow <= vma->vm_pgoff) {
error = acct_stack_growth(vma, size, grow);
if (!error) {
/*
* vma_gap_update() doesn't support concurrent
* updates, but we only hold a shared mmap_sem
* lock here, so we need to protect against
* concurrent vma expansions.
* anon_vma_lock_write() doesn't help here, as
* we don't guarantee that all growable vmas
* in a mm share the same root anon vma.
* So, we reuse mm->page_table_lock to guard
* against concurrent vma expansions.
*/
spin_lock(&mm->page_table_lock);
if (vma->vm_flags & VM_LOCKED)
mm->locked_vm += grow;
vm_stat_account(mm, vma->vm_flags, grow);
anon_vma_interval_tree_pre_update_vma(vma);
WRITE_ONCE(vma->vm_start, address);
WRITE_ONCE(vma->vm_pgoff, vma->vm_pgoff - grow);
anon_vma_interval_tree_post_update_vma(vma);
vma_gap_update(vma);
spin_unlock(&mm->page_table_lock);
perf_event_mmap(vma);
}
}
}
anon_vma_unlock_write(vma->anon_vma);
khugepaged_enter_vma_merge(vma, vma->vm_flags);
validate_mm(mm);
return error;
}
/* enforced gap between the expanding stack and other mappings. */
unsigned long stack_guard_gap = 256UL<<PAGE_SHIFT;
static int __init cmdline_parse_stack_guard_gap(char *p)
{
unsigned long val;
char *endptr;
val = simple_strtoul(p, &endptr, 10);
if (!*endptr)
stack_guard_gap = val << PAGE_SHIFT;
return 1;
}
__setup("stack_guard_gap=", cmdline_parse_stack_guard_gap);
#ifdef CONFIG_STACK_GROWSUP
int expand_stack(struct vm_area_struct *vma, unsigned long address)
{
return expand_upwards(vma, address);
}
struct vm_area_struct *
find_extend_vma(struct mm_struct *mm, unsigned long addr)
{
struct vm_area_struct *vma, *prev;
addr &= PAGE_MASK;
vma = find_vma_prev(mm, addr, &prev);
if (vma && (vma->vm_start <= addr))
return vma;
/* don't alter vm_end if the coredump is running */
if (!prev || !mmget_still_valid(mm) || expand_stack(prev, addr))
return NULL;
if (prev->vm_flags & VM_LOCKED)
populate_vma_page_range(prev, addr, prev->vm_end, NULL);
return prev;
}
#else
int expand_stack(struct vm_area_struct *vma, unsigned long address)
{
return expand_downwards(vma, address);
}
struct vm_area_struct *
find_extend_vma(struct mm_struct *mm, unsigned long addr)
{
struct vm_area_struct *vma;
unsigned long start;
addr &= PAGE_MASK;
vma = find_vma(mm, addr);
if (!vma)
return NULL;
if (vma->vm_start <= addr)
return vma;
if (!(vma->vm_flags & VM_GROWSDOWN))
return NULL;
/* don't alter vm_start if the coredump is running */
if (!mmget_still_valid(mm))
return NULL;
start = vma->vm_start;
if (expand_stack(vma, addr))
return NULL;
if (vma->vm_flags & VM_LOCKED)
populate_vma_page_range(vma, addr, start, NULL);
return vma;
}
#endif
EXPORT_SYMBOL_GPL(find_extend_vma);
/*
* Ok - we have the memory areas we should free on the vma list,
* so release them, and do the vma updates.
*
* Called with the mm semaphore held.
*/
static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
{
unsigned long nr_accounted = 0;
/* Update high watermark before we lower total_vm */
update_hiwater_vm(mm);
do {
long nrpages = vma_pages(vma);
if (vma->vm_flags & VM_ACCOUNT)
nr_accounted += nrpages;
vm_stat_account(mm, vma->vm_flags, -nrpages);
vma = remove_vma(vma);
} while (vma);
vm_unacct_memory(nr_accounted);
validate_mm(mm);
}
/*
* Get rid of page table information in the indicated region.
*
* Called with the mm semaphore held.
*/
static void unmap_region(struct mm_struct *mm,
struct vm_area_struct *vma, struct vm_area_struct *prev,
unsigned long start, unsigned long end)
{
struct vm_area_struct *next = prev ? prev->vm_next : mm->mmap;
struct mmu_gather tlb;
struct vm_area_struct *cur_vma;
lru_add_drain();
tlb_gather_mmu(&tlb, mm, start, end);
update_hiwater_rss(mm);
unmap_vmas(&tlb, vma, start, end);
/*
* Ensure we have no stale TLB entries by the time this mapping is
* removed from the rmap.
* Note that we don't have to worry about nested flushes here because
* we're holding the mm semaphore for removing the mapping - so any
* concurrent flush in this region has to be coming through the rmap,
* and we synchronize against that using the rmap lock.
*/
for (cur_vma = vma; cur_vma; cur_vma = cur_vma->vm_next) {
if ((cur_vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) != 0) {
tlb_flush_mmu(&tlb);
break;
}
}
free_pgtables(&tlb, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS,
next ? next->vm_start : USER_PGTABLES_CEILING);
tlb_finish_mmu(&tlb, start, end);
}
/*
* Create a list of vma's touched by the unmap, removing them from the mm's
* vma list as we go..
*/
static bool
detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
struct vm_area_struct *prev, unsigned long end)
{
struct vm_area_struct **insertion_point;
struct vm_area_struct *tail_vma = NULL;
insertion_point = (prev ? &prev->vm_next : &mm->mmap);
vma->vm_prev = NULL;
do {
vma_rb_erase(vma, mm);
mm->map_count--;
tail_vma = vma;
vma = vma->vm_next;
} while (vma && vma->vm_start < end);
*insertion_point = vma;
if (vma) {
vma->vm_prev = prev;
vma_gap_update(vma);
} else
mm->highest_vm_end = prev ? vm_end_gap(prev) : 0;
tail_vma->vm_next = NULL;
/* Kill the cache */
vmacache_invalidate(mm);
/*
* Do not downgrade mmap_lock if we are next to VM_GROWSDOWN or
* VM_GROWSUP VMA. Such VMAs can change their size under
* down_read(mmap_lock) and collide with the VMA we are about to unmap.
*/
if (vma && (vma->vm_flags & VM_GROWSDOWN))
return false;
if (prev && (prev->vm_flags & VM_GROWSUP))
return false;
return true;
}
/*
* __split_vma() bypasses sysctl_max_map_count checking. We use this where it
* has already been checked or doesn't make sense to fail.
*/
int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long addr, int new_below)
{
struct vm_area_struct *new;
int err;
if (vma->vm_ops && vma->vm_ops->split) {
err = vma->vm_ops->split(vma, addr);
if (err)
return err;
}
new = vm_area_dup(vma);
if (!new)
return -ENOMEM;
if (new_below)
new->vm_end = addr;
else {
new->vm_start = addr;
new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
}
err = vma_dup_policy(vma, new);
if (err)
goto out_free_vma;
err = anon_vma_clone(new, vma);
if (err)
goto out_free_mpol;
if (new->vm_file)
get_file(new->vm_file);
if (new->vm_ops && new->vm_ops->open)
new->vm_ops->open(new);
if (new_below)
err = vma_adjust(vma, addr, vma->vm_end, vma->vm_pgoff +
((addr - new->vm_start) >> PAGE_SHIFT), new);
else
err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
/* Success. */
if (!err)
return 0;
/* Clean everything up if vma_adjust failed. */
if (new->vm_ops && new->vm_ops->close)
new->vm_ops->close(new);
if (new->vm_file)
fput(new->vm_file);
unlink_anon_vmas(new);
out_free_mpol:
mpol_put(vma_policy(new));
out_free_vma:
vm_area_free(new);
return err;
}
/*
* Split a vma into two pieces at address 'addr', a new vma is allocated
* either for the first part or the tail.
*/
int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long addr, int new_below)
{
if (mm->map_count >= sysctl_max_map_count)
return -ENOMEM;
return __split_vma(mm, vma, addr, new_below);
}
/* Munmap is split into 2 main parts -- this part which finds
* what needs doing, and the areas themselves, which do the
* work. This now handles partial unmappings.
* Jeremy Fitzhardinge <jeremy@goop.org>
*/
int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len,
struct list_head *uf, bool downgrade)
{
unsigned long end;
struct vm_area_struct *vma, *prev, *last;
if ((offset_in_page(start)) || start > TASK_SIZE || len > TASK_SIZE-start)
return -EINVAL;
len = PAGE_ALIGN(len);
end = start + len;
if (len == 0)
return -EINVAL;
/*
* arch_unmap() might do unmaps itself. It must be called
* and finish any rbtree manipulation before this code
* runs and also starts to manipulate the rbtree.
*/
arch_unmap(mm, start, end);
/* Find the first overlapping VMA */
vma = find_vma(mm, start);
if (!vma)
return 0;
prev = vma->vm_prev;
/* we have start < vma->vm_end */
/* if it doesn't overlap, we have nothing.. */
if (vma->vm_start >= end)
return 0;
/*
* If we need to split any vma, do it now to save pain later.
*
* Note: mremap's move_vma VM_ACCOUNT handling assumes a partially
* unmapped vm_area_struct will remain in use: so lower split_vma
* places tmp vma above, and higher split_vma places tmp vma below.
*/
if (start > vma->vm_start) {
int error;
/*
* Make sure that map_count on return from munmap() will
* not exceed its limit; but let map_count go just above
* its limit temporarily, to help free resources as expected.
*/
if (end < vma->vm_end && mm->map_count >= sysctl_max_map_count)
return -ENOMEM;
error = __split_vma(mm, vma, start, 0);
if (error)
return error;
prev = vma;
}
/* Does it split the last one? */
last = find_vma(mm, end);
if (last && end > last->vm_start) {
int error = __split_vma(mm, last, end, 1);
if (error)
return error;
}
vma = prev ? prev->vm_next : mm->mmap;
if (unlikely(uf)) {
/*
* If userfaultfd_unmap_prep returns an error the vmas
* will remain splitted, but userland will get a
* highly unexpected error anyway. This is no
* different than the case where the first of the two
* __split_vma fails, but we don't undo the first
* split, despite we could. This is unlikely enough
* failure that it's not worth optimizing it for.
*/
int error = userfaultfd_unmap_prep(vma, start, end, uf);
if (error)
return error;
}
/*
* unlock any mlock()ed ranges before detaching vmas
*/
if (mm->locked_vm) {
struct vm_area_struct *tmp = vma;
while (tmp && tmp->vm_start < end) {
if (tmp->vm_flags & VM_LOCKED) {
mm->locked_vm -= vma_pages(tmp);
munlock_vma_pages_all(tmp);
}
tmp = tmp->vm_next;
}
}
/* Detach vmas from rbtree */
if (!detach_vmas_to_be_unmapped(mm, vma, prev, end))
downgrade = false;
if (downgrade)
downgrade_write(&mm->mmap_sem);
unmap_region(mm, vma, prev, start, end);
/* Fix up all other VM information */
remove_vma_list(mm, vma);
return downgrade ? 1 : 0;
}
int do_munmap(struct mm_struct *mm, unsigned long start, size_t len,
struct list_head *uf)
{
return __do_munmap(mm, start, len, uf, false);
}
static int __vm_munmap(unsigned long start, size_t len, bool downgrade)
{
int ret;
struct mm_struct *mm = current->mm;
LIST_HEAD(uf);
if (down_write_killable(&mm->mmap_sem))
return -EINTR;
ret = __do_munmap(mm, start, len, &uf, downgrade);
/*
* Returning 1 indicates mmap_sem is downgraded.
* But 1 is not legal return value of vm_munmap() and munmap(), reset
* it to 0 before return.
*/
if (ret == 1) {
up_read(&mm->mmap_sem);
ret = 0;
} else
up_write(&mm->mmap_sem);
userfaultfd_unmap_complete(mm, &uf);
return ret;
}
int vm_munmap(unsigned long start, size_t len)
{
return __vm_munmap(start, len, false);
}
EXPORT_SYMBOL(vm_munmap);
SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
{
addr = untagged_addr(addr);
profile_munmap(addr);
return __vm_munmap(addr, len, true);
}
/*
* Emulation of deprecated remap_file_pages() syscall.
*/
SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
unsigned long, prot, unsigned long, pgoff, unsigned long, flags)
{
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
unsigned long populate = 0;
unsigned long ret = -EINVAL;
struct file *file;
pr_warn_once("%s (%d) uses deprecated remap_file_pages() syscall. See Documentation/vm/remap_file_pages.rst.\n",
current->comm, current->pid);
if (prot)
return ret;
start = start & PAGE_MASK;
size = size & PAGE_MASK;
if (start + size <= start)
return ret;
/* Does pgoff wrap? */
if (pgoff + (size >> PAGE_SHIFT) < pgoff)
return ret;
if (down_write_killable(&mm->mmap_sem))
return -EINTR;
vma = find_vma(mm, start);
if (!vma || !(vma->vm_flags & VM_SHARED))
goto out;
if (start < vma->vm_start)
goto out;
if (start + size > vma->vm_end) {
struct vm_area_struct *next;
for (next = vma->vm_next; next; next = next->vm_next) {
/* hole between vmas ? */
if (next->vm_start != next->vm_prev->vm_end)
goto out;
if (next->vm_file != vma->vm_file)
goto out;
if (next->vm_flags != vma->vm_flags)
goto out;
if (start + size <= next->vm_end)
break;
}
if (!next)
goto out;
}
prot |= vma->vm_flags & VM_READ ? PROT_READ : 0;
prot |= vma->vm_flags & VM_WRITE ? PROT_WRITE : 0;
prot |= vma->vm_flags & VM_EXEC ? PROT_EXEC : 0;
flags &= MAP_NONBLOCK;
flags |= MAP_SHARED | MAP_FIXED | MAP_POPULATE;
if (vma->vm_flags & VM_LOCKED) {
struct vm_area_struct *tmp;
flags |= MAP_LOCKED;
/* drop PG_Mlocked flag for over-mapped range */
for (tmp = vma; tmp->vm_start >= start + size;
tmp = tmp->vm_next) {
/*
* Split pmd and munlock page on the border
* of the range.
*/
vma_adjust_trans_huge(tmp, start, start + size, 0);
munlock_vma_pages_range(tmp,
max(tmp->vm_start, start),
min(tmp->vm_end, start + size));
}
}
file = get_file(vma->vm_file);
ret = do_mmap_pgoff(vma->vm_file, start, size,
prot, flags, pgoff, &populate, NULL);
fput(file);
out:
up_write(&mm->mmap_sem);
if (populate)
mm_populate(ret, populate);
if (!IS_ERR_VALUE(ret))
ret = 0;
return ret;
}
/*
* this is really a simplified "do_mmap". it only handles
* anonymous maps. eventually we may be able to do some
* brk-specific accounting here.
*/
static int do_brk_flags(unsigned long addr, unsigned long len, unsigned long flags, struct list_head *uf)
{
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma, *prev;
struct rb_node **rb_link, *rb_parent;
pgoff_t pgoff = addr >> PAGE_SHIFT;
int error;
/* Until we need other flags, refuse anything except VM_EXEC. */
if ((flags & (~VM_EXEC)) != 0)
return -EINVAL;
flags |= VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
if (offset_in_page(error))
return error;
error = mlock_future_check(mm, mm->def_flags, len);
if (error)
return error;
/*
* Clear old maps. this also does some error checking for us
*/
while (find_vma_links(mm, addr, addr + len, &prev, &rb_link,
&rb_parent)) {
if (do_munmap(mm, addr, len, uf))
return -ENOMEM;
}
/* Check against address space limits *after* clearing old maps... */
if (!may_expand_vm(mm, flags, len >> PAGE_SHIFT))
return -ENOMEM;
if (mm->map_count > sysctl_max_map_count)
return -ENOMEM;
if (security_vm_enough_memory_mm(mm, len >> PAGE_SHIFT))
return -ENOMEM;
/* Can we just expand an old private anonymous mapping? */
vma = vma_merge(mm, prev, addr, addr + len, flags,
NULL, NULL, pgoff, NULL, NULL_VM_UFFD_CTX, NULL);
if (vma)
goto out;
/*
* create a vma struct for an anonymous mapping
*/
vma = vm_area_alloc(mm);
if (!vma) {
vm_unacct_memory(len >> PAGE_SHIFT);
return -ENOMEM;
}
vma_set_anonymous(vma);
vma->vm_start = addr;
vma->vm_end = addr + len;
vma->vm_pgoff = pgoff;
vma->vm_flags = flags;
vma->vm_page_prot = vm_get_page_prot(flags);
vma_link(mm, vma, prev, rb_link, rb_parent);
out:
perf_event_mmap(vma);
mm->total_vm += len >> PAGE_SHIFT;
mm->data_vm += len >> PAGE_SHIFT;
if (flags & VM_LOCKED)
mm->locked_vm += (len >> PAGE_SHIFT);
vma->vm_flags |= VM_SOFTDIRTY;
return 0;
}
int vm_brk_flags(unsigned long addr, unsigned long request, unsigned long flags)
{
struct mm_struct *mm = current->mm;
unsigned long len;
int ret;
bool populate;
LIST_HEAD(uf);
len = PAGE_ALIGN(request);
if (len < request)
return -ENOMEM;
if (!len)
return 0;
if (down_write_killable(&mm->mmap_sem))
return -EINTR;
ret = do_brk_flags(addr, len, flags, &uf);
populate = ((mm->def_flags & VM_LOCKED) != 0);
up_write(&mm->mmap_sem);
userfaultfd_unmap_complete(mm, &uf);
if (populate && !ret)
mm_populate(addr, len);
return ret;
}
EXPORT_SYMBOL(vm_brk_flags);
int vm_brk(unsigned long addr, unsigned long len)
{
return vm_brk_flags(addr, len, 0);
}
EXPORT_SYMBOL(vm_brk);
/* Release all mmaps. */
void exit_mmap(struct mm_struct *mm)
{
struct mmu_gather tlb;
struct vm_area_struct *vma;
unsigned long nr_accounted = 0;
/* mm's last user has gone, and its about to be pulled down */
mmu_notifier_release(mm);
if (unlikely(mm_is_oom_victim(mm))) {
/*
* Manually reap the mm to free as much memory as possible.
* Then, as the oom reaper does, set MMF_OOM_SKIP to disregard
* this mm from further consideration. Taking mm->mmap_sem for
* write after setting MMF_OOM_SKIP will guarantee that the oom
* reaper will not run on this mm again after mmap_sem is
* dropped.
*
* Nothing can be holding mm->mmap_sem here and the above call
* to mmu_notifier_release(mm) ensures mmu notifier callbacks in
* __oom_reap_task_mm() will not block.
*
* This needs to be done before calling munlock_vma_pages_all(),
* which clears VM_LOCKED, otherwise the oom reaper cannot
* reliably test it.
*/
(void)__oom_reap_task_mm(mm);
set_bit(MMF_OOM_SKIP, &mm->flags);
down_write(&mm->mmap_sem);
up_write(&mm->mmap_sem);
}
if (mm->locked_vm) {
vma = mm->mmap;
while (vma) {
if (vma->vm_flags & VM_LOCKED)
munlock_vma_pages_all(vma);
vma = vma->vm_next;
}
}
arch_exit_mmap(mm);
vma = mm->mmap;
if (!vma) /* Can happen if dup_mmap() received an OOM */
return;
lru_add_drain();
flush_cache_mm(mm);
tlb_gather_mmu(&tlb, mm, 0, -1);
/* update_hiwater_rss(mm) here? but nobody should be looking */
/* Use -1 here to ensure all VMAs in the mm are unmapped */
unmap_vmas(&tlb, vma, 0, -1);
free_pgtables(&tlb, vma, FIRST_USER_ADDRESS, USER_PGTABLES_CEILING);
tlb_finish_mmu(&tlb, 0, -1);
/*
* Walk the list again, actually closing and freeing it,
* with preemption enabled, without holding any MM locks.
*/
while (vma) {
if (vma->vm_flags & VM_ACCOUNT)
nr_accounted += vma_pages(vma);
vma = remove_vma(vma);
cond_resched();
}
vm_unacct_memory(nr_accounted);
}
/* Insert vm structure into process list sorted by address
* and into the inode's i_mmap tree. If vm_file is non-NULL
* then i_mmap_rwsem is taken here.
*/
int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
{
struct vm_area_struct *prev;
struct rb_node **rb_link, *rb_parent;
if (find_vma_links(mm, vma->vm_start, vma->vm_end,
&prev, &rb_link, &rb_parent))
return -ENOMEM;
if ((vma->vm_flags & VM_ACCOUNT) &&
security_vm_enough_memory_mm(mm, vma_pages(vma)))
return -ENOMEM;
/*
* The vm_pgoff of a purely anonymous vma should be irrelevant
* until its first write fault, when page's anon_vma and index
* are set. But now set the vm_pgoff it will almost certainly
* end up with (unless mremap moves it elsewhere before that
* first wfault), so /proc/pid/maps tells a consistent story.
*
* By setting it to reflect the virtual start address of the
* vma, merges and splits can happen in a seamless way, just
* using the existing file pgoff checks and manipulations.
* Similarly in do_mmap_pgoff and in do_brk.
*/
if (vma_is_anonymous(vma)) {
BUG_ON(vma->anon_vma);
vma->vm_pgoff = vma->vm_start >> PAGE_SHIFT;
}
vma_link(mm, vma, prev, rb_link, rb_parent);
return 0;
}
/*
* Copy the vma structure to a new location in the same mm,
* prior to moving page table entries, to effect an mremap move.
*/
struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
unsigned long addr, unsigned long len, pgoff_t pgoff,
bool *need_rmap_locks)
{
struct vm_area_struct *vma = *vmap;
unsigned long vma_start = vma->vm_start;
struct mm_struct *mm = vma->vm_mm;
struct vm_area_struct *new_vma, *prev;
struct rb_node **rb_link, *rb_parent;
bool faulted_in_anon_vma = true;
/*
* If anonymous vma has not yet been faulted, update new pgoff
* to match new location, to increase its chance of merging.
*/
if (unlikely(vma_is_anonymous(vma) && !vma->anon_vma)) {
pgoff = addr >> PAGE_SHIFT;
faulted_in_anon_vma = false;
}
if (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent))
return NULL; /* should never get here */
/* There is 3 cases to manage here in
* AAAA AAAA AAAA AAAA
* PPPP.... PPPP......NNNN PPPP....NNNN PP........NN
* PPPPPPPP(A) PPPP..NNNNNNNN(B) PPPPPPPPPPPP(1) NULL
* PPPPPPPPNNNN(2)
* PPPPNNNNNNNN(3)
*
* new_vma == prev in case A,1,2
* new_vma == next in case B,3
*/
new_vma = __vma_merge(mm, prev, addr, addr + len, vma->vm_flags,
vma->anon_vma, vma->vm_file, pgoff,
vma_policy(vma), vma->vm_userfaultfd_ctx,
vma_get_anon_name(vma), true);
if (new_vma) {
/*
* Source vma may have been merged into new_vma
*/
if (unlikely(vma_start >= new_vma->vm_start &&
vma_start < new_vma->vm_end)) {
/*
* The only way we can get a vma_merge with
* self during an mremap is if the vma hasn't
* been faulted in yet and we were allowed to
* reset the dst vma->vm_pgoff to the
* destination address of the mremap to allow
* the merge to happen. mremap must change the
* vm_pgoff linearity between src and dst vmas
* (in turn preventing a vma_merge) to be
* safe. It is only safe to keep the vm_pgoff
* linear if there are no pages mapped yet.
*/
VM_BUG_ON_VMA(faulted_in_anon_vma, new_vma);
*vmap = vma = new_vma;
}
*need_rmap_locks = (new_vma->vm_pgoff <= vma->vm_pgoff);
} else {
new_vma = vm_area_dup(vma);
if (!new_vma)
goto out;
new_vma->vm_start = addr;
new_vma->vm_end = addr + len;
new_vma->vm_pgoff = pgoff;
if (vma_dup_policy(vma, new_vma))
goto out_free_vma;
if (anon_vma_clone(new_vma, vma))
goto out_free_mempol;
if (new_vma->vm_file)
get_file(new_vma->vm_file);
if (new_vma->vm_ops && new_vma->vm_ops->open)
new_vma->vm_ops->open(new_vma);
/*
* As the VMA is linked right now, it may be hit by the
* speculative page fault handler. But we don't want it to
* to start mapping page in this area until the caller has
* potentially move the pte from the moved VMA. To prevent
* that we protect it right now, and let the caller unprotect
* it once the move is done.
*/
vm_write_begin(new_vma);
vma_link(mm, new_vma, prev, rb_link, rb_parent);
*need_rmap_locks = false;
}
return new_vma;
out_free_mempol:
mpol_put(vma_policy(new_vma));
out_free_vma:
vm_area_free(new_vma);
out:
return NULL;
}
/*
* Return true if the calling process may expand its vm space by the passed
* number of pages
*/
bool may_expand_vm(struct mm_struct *mm, vm_flags_t flags, unsigned long npages)
{
if (mm->total_vm + npages > rlimit(RLIMIT_AS) >> PAGE_SHIFT)
return false;
if (is_data_mapping(flags) &&
mm->data_vm + npages > rlimit(RLIMIT_DATA) >> PAGE_SHIFT) {
/* Workaround for Valgrind */
if (rlimit(RLIMIT_DATA) == 0 &&
mm->data_vm + npages <= rlimit_max(RLIMIT_DATA) >> PAGE_SHIFT)
return true;
pr_warn_once("%s (%d): VmData %lu exceed data ulimit %lu. Update limits%s.\n",
current->comm, current->pid,
(mm->data_vm + npages) << PAGE_SHIFT,
rlimit(RLIMIT_DATA),
ignore_rlimit_data ? "" : " or use boot option ignore_rlimit_data");
if (!ignore_rlimit_data)
return false;
}
return true;
}
void vm_stat_account(struct mm_struct *mm, vm_flags_t flags, long npages)
{
mm->total_vm += npages;
if (is_exec_mapping(flags))
mm->exec_vm += npages;
else if (is_stack_mapping(flags))
mm->stack_vm += npages;
else if (is_data_mapping(flags))
mm->data_vm += npages;
}
static vm_fault_t special_mapping_fault(struct vm_fault *vmf);
/*
* Having a close hook prevents vma merging regardless of flags.
*/
static void special_mapping_close(struct vm_area_struct *vma)
{
}
static const char *special_mapping_name(struct vm_area_struct *vma)
{
return ((struct vm_special_mapping *)vma->vm_private_data)->name;
}
static int special_mapping_mremap(struct vm_area_struct *new_vma)
{
struct vm_special_mapping *sm = new_vma->vm_private_data;
if (WARN_ON_ONCE(current->mm != new_vma->vm_mm))
return -EFAULT;
if (sm->mremap)
return sm->mremap(sm, new_vma);
return 0;
}
static const struct vm_operations_struct special_mapping_vmops = {
.close = special_mapping_close,
.fault = special_mapping_fault,
.mremap = special_mapping_mremap,
.name = special_mapping_name,
};
static const struct vm_operations_struct legacy_special_mapping_vmops = {
.close = special_mapping_close,
.fault = special_mapping_fault,
};
static vm_fault_t special_mapping_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
pgoff_t pgoff;
struct page **pages;
if (vma->vm_ops == &legacy_special_mapping_vmops) {
pages = vma->vm_private_data;
} else {
struct vm_special_mapping *sm = vma->vm_private_data;
if (sm->fault)
return sm->fault(sm, vmf->vma, vmf);
pages = sm->pages;
}
for (pgoff = vmf->pgoff; pgoff && *pages; ++pages)
pgoff--;
if (*pages) {
struct page *page = *pages;
get_page(page);
vmf->page = page;
return 0;
}
return VM_FAULT_SIGBUS;
}
static struct vm_area_struct *__install_special_mapping(
struct mm_struct *mm,
unsigned long addr, unsigned long len,
unsigned long vm_flags, void *priv,
const struct vm_operations_struct *ops)
{
int ret;
struct vm_area_struct *vma;
vma = vm_area_alloc(mm);
if (unlikely(vma == NULL))
return ERR_PTR(-ENOMEM);
vma->vm_start = addr;
vma->vm_end = addr + len;
vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND | VM_SOFTDIRTY;
vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
vma->vm_ops = ops;
vma->vm_private_data = priv;
ret = insert_vm_struct(mm, vma);
if (ret)
goto out;
vm_stat_account(mm, vma->vm_flags, len >> PAGE_SHIFT);
perf_event_mmap(vma);
return vma;
out:
vm_area_free(vma);
return ERR_PTR(ret);
}
bool vma_is_special_mapping(const struct vm_area_struct *vma,
const struct vm_special_mapping *sm)
{
return vma->vm_private_data == sm &&
(vma->vm_ops == &special_mapping_vmops ||
vma->vm_ops == &legacy_special_mapping_vmops);
}
/*
* Called with mm->mmap_sem held for writing.
* Insert a new vma covering the given region, with the given flags.
* Its pages are supplied by the given array of struct page *.
* The array can be shorter than len >> PAGE_SHIFT if it's null-terminated.
* The region past the last page supplied will always produce SIGBUS.
* The array pointer and the pages it points to are assumed to stay alive
* for as long as this mapping might exist.
*/
struct vm_area_struct *_install_special_mapping(
struct mm_struct *mm,
unsigned long addr, unsigned long len,
unsigned long vm_flags, const struct vm_special_mapping *spec)
{
return __install_special_mapping(mm, addr, len, vm_flags, (void *)spec,
&special_mapping_vmops);
}
int install_special_mapping(struct mm_struct *mm,
unsigned long addr, unsigned long len,
unsigned long vm_flags, struct page **pages)
{
struct vm_area_struct *vma = __install_special_mapping(
mm, addr, len, vm_flags, (void *)pages,
&legacy_special_mapping_vmops);
return PTR_ERR_OR_ZERO(vma);
}
static DEFINE_MUTEX(mm_all_locks_mutex);
static void vm_lock_anon_vma(struct mm_struct *mm, struct anon_vma *anon_vma)
{
if (!test_bit(0, (unsigned long *) &anon_vma->root->rb_root.rb_root.rb_node)) {
/*
* The LSB of head.next can't change from under us
* because we hold the mm_all_locks_mutex.
*/
down_write_nest_lock(&anon_vma->root->rwsem, &mm->mmap_sem);
/*
* We can safely modify head.next after taking the
* anon_vma->root->rwsem. If some other vma in this mm shares
* the same anon_vma we won't take it again.
*
* No need of atomic instructions here, head.next
* can't change from under us thanks to the
* anon_vma->root->rwsem.
*/
if (__test_and_set_bit(0, (unsigned long *)
&anon_vma->root->rb_root.rb_root.rb_node))
BUG();
}
}
static void vm_lock_mapping(struct mm_struct *mm, struct address_space *mapping)
{
if (!test_bit(AS_MM_ALL_LOCKS, &mapping->flags)) {
/*
* AS_MM_ALL_LOCKS can't change from under us because
* we hold the mm_all_locks_mutex.
*
* Operations on ->flags have to be atomic because
* even if AS_MM_ALL_LOCKS is stable thanks to the
* mm_all_locks_mutex, there may be other cpus
* changing other bitflags in parallel to us.
*/
if (test_and_set_bit(AS_MM_ALL_LOCKS, &mapping->flags))
BUG();
down_write_nest_lock(&mapping->i_mmap_rwsem, &mm->mmap_sem);
}
}
/*
* This operation locks against the VM for all pte/vma/mm related
* operations that could ever happen on a certain mm. This includes
* vmtruncate, try_to_unmap, and all page faults.
*
* The caller must take the mmap_sem in write mode before calling
* mm_take_all_locks(). The caller isn't allowed to release the
* mmap_sem until mm_drop_all_locks() returns.
*
* mmap_sem in write mode is required in order to block all operations
* that could modify pagetables and free pages without need of
* altering the vma layout. It's also needed in write mode to avoid new
* anon_vmas to be associated with existing vmas.
*
* A single task can't take more than one mm_take_all_locks() in a row
* or it would deadlock.
*
* The LSB in anon_vma->rb_root.rb_node and the AS_MM_ALL_LOCKS bitflag in
* mapping->flags avoid to take the same lock twice, if more than one
* vma in this mm is backed by the same anon_vma or address_space.
*
* We take locks in following order, accordingly to comment at beginning
* of mm/rmap.c:
* - all hugetlbfs_i_mmap_rwsem_key locks (aka mapping->i_mmap_rwsem for
* hugetlb mapping);
* - all i_mmap_rwsem locks;
* - all anon_vma->rwseml
*
* We can take all locks within these types randomly because the VM code
* doesn't nest them and we protected from parallel mm_take_all_locks() by
* mm_all_locks_mutex.
*
* mm_take_all_locks() and mm_drop_all_locks are expensive operations
* that may have to take thousand of locks.
*
* mm_take_all_locks() can fail if it's interrupted by signals.
*/
int mm_take_all_locks(struct mm_struct *mm)
{
struct vm_area_struct *vma;
struct anon_vma_chain *avc;
BUG_ON(down_read_trylock(&mm->mmap_sem));
mutex_lock(&mm_all_locks_mutex);
for (vma = mm->mmap; vma; vma = vma->vm_next) {
if (signal_pending(current))
goto out_unlock;
if (vma->vm_file && vma->vm_file->f_mapping &&
is_vm_hugetlb_page(vma))
vm_lock_mapping(mm, vma->vm_file->f_mapping);
}
for (vma = mm->mmap; vma; vma = vma->vm_next) {
if (signal_pending(current))
goto out_unlock;
if (vma->vm_file && vma->vm_file->f_mapping &&
!is_vm_hugetlb_page(vma))
vm_lock_mapping(mm, vma->vm_file->f_mapping);
}
for (vma = mm->mmap; vma; vma = vma->vm_next) {
if (signal_pending(current))
goto out_unlock;
if (vma->anon_vma)
list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
vm_lock_anon_vma(mm, avc->anon_vma);
}
return 0;
out_unlock:
mm_drop_all_locks(mm);
return -EINTR;
}
static void vm_unlock_anon_vma(struct anon_vma *anon_vma)
{
if (test_bit(0, (unsigned long *) &anon_vma->root->rb_root.rb_root.rb_node)) {
/*
* The LSB of head.next can't change to 0 from under
* us because we hold the mm_all_locks_mutex.
*
* We must however clear the bitflag before unlocking
* the vma so the users using the anon_vma->rb_root will
* never see our bitflag.
*
* No need of atomic instructions here, head.next
* can't change from under us until we release the
* anon_vma->root->rwsem.
*/
if (!__test_and_clear_bit(0, (unsigned long *)
&anon_vma->root->rb_root.rb_root.rb_node))
BUG();
anon_vma_unlock_write(anon_vma);
}
}
static void vm_unlock_mapping(struct address_space *mapping)
{
if (test_bit(AS_MM_ALL_LOCKS, &mapping->flags)) {
/*
* AS_MM_ALL_LOCKS can't change to 0 from under us
* because we hold the mm_all_locks_mutex.
*/
i_mmap_unlock_write(mapping);
if (!test_and_clear_bit(AS_MM_ALL_LOCKS,
&mapping->flags))
BUG();
}
}
/*
* The mmap_sem cannot be released by the caller until
* mm_drop_all_locks() returns.
*/
void mm_drop_all_locks(struct mm_struct *mm)
{
struct vm_area_struct *vma;
struct anon_vma_chain *avc;
BUG_ON(down_read_trylock(&mm->mmap_sem));
BUG_ON(!mutex_is_locked(&mm_all_locks_mutex));
for (vma = mm->mmap; vma; vma = vma->vm_next) {
if (vma->anon_vma)
list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
vm_unlock_anon_vma(avc->anon_vma);
if (vma->vm_file && vma->vm_file->f_mapping)
vm_unlock_mapping(vma->vm_file->f_mapping);
}
mutex_unlock(&mm_all_locks_mutex);
}
/*
* initialise the percpu counter for VM
*/
void __init mmap_init(void)
{
int ret;
ret = percpu_counter_init(&vm_committed_as, 0, GFP_KERNEL);
VM_BUG_ON(ret);
}
/*
* Initialise sysctl_user_reserve_kbytes.
*
* This is intended to prevent a user from starting a single memory hogging
* process, such that they cannot recover (kill the hog) in OVERCOMMIT_NEVER
* mode.
*
* The default value is min(3% of free memory, 128MB)
* 128MB is enough to recover with sshd/login, bash, and top/kill.
*/
static int init_user_reserve(void)
{
unsigned long free_kbytes;
free_kbytes = global_zone_page_state(NR_FREE_PAGES) << (PAGE_SHIFT - 10);
sysctl_user_reserve_kbytes = min(free_kbytes / 32, 1UL << 17);
return 0;
}
subsys_initcall(init_user_reserve);
/*
* Initialise sysctl_admin_reserve_kbytes.
*
* The purpose of sysctl_admin_reserve_kbytes is to allow the sys admin
* to log in and kill a memory hogging process.
*
* Systems with more than 256MB will reserve 8MB, enough to recover
* with sshd, bash, and top in OVERCOMMIT_GUESS. Smaller systems will
* only reserve 3% of free pages by default.
*/
static int init_admin_reserve(void)
{
unsigned long free_kbytes;
free_kbytes = global_zone_page_state(NR_FREE_PAGES) << (PAGE_SHIFT - 10);
sysctl_admin_reserve_kbytes = min(free_kbytes / 32, 1UL << 13);
return 0;
}
subsys_initcall(init_admin_reserve);
/*
* Reinititalise user and admin reserves if memory is added or removed.
*
* The default user reserve max is 128MB, and the default max for the
* admin reserve is 8MB. These are usually, but not always, enough to
* enable recovery from a memory hogging process using login/sshd, a shell,
* and tools like top. It may make sense to increase or even disable the
* reserve depending on the existence of swap or variations in the recovery
* tools. So, the admin may have changed them.
*
* If memory is added and the reserves have been eliminated or increased above
* the default max, then we'll trust the admin.
*
* If memory is removed and there isn't enough free memory, then we
* need to reset the reserves.
*
* Otherwise keep the reserve set by the admin.
*/
static int reserve_mem_notifier(struct notifier_block *nb,
unsigned long action, void *data)
{
unsigned long tmp, free_kbytes;
switch (action) {
case MEM_ONLINE:
/* Default max is 128MB. Leave alone if modified by operator. */
tmp = sysctl_user_reserve_kbytes;
if (0 < tmp && tmp < (1UL << 17))
init_user_reserve();
/* Default max is 8MB. Leave alone if modified by operator. */
tmp = sysctl_admin_reserve_kbytes;
if (0 < tmp && tmp < (1UL << 13))
init_admin_reserve();
break;
case MEM_OFFLINE:
free_kbytes = global_zone_page_state(NR_FREE_PAGES) << (PAGE_SHIFT - 10);
if (sysctl_user_reserve_kbytes > free_kbytes) {
init_user_reserve();
pr_info("vm.user_reserve_kbytes reset to %lu\n",
sysctl_user_reserve_kbytes);
}
if (sysctl_admin_reserve_kbytes > free_kbytes) {
init_admin_reserve();
pr_info("vm.admin_reserve_kbytes reset to %lu\n",
sysctl_admin_reserve_kbytes);
}
break;
default:
break;
}
return NOTIFY_OK;
}
static struct notifier_block reserve_mem_nb = {
.notifier_call = reserve_mem_notifier,
};
static int __meminit init_reserve_notifier(void)
{
if (register_hotmemory_notifier(&reserve_mem_nb))
pr_err("Failed registering memory add/remove notifier for admin reserve\n");
return 0;
}
subsys_initcall(init_reserve_notifier);