37e14da1f2
* refs/heads/tmp-075ce55: ANDROID: 16K: Avoid mmap lock assertions for padding VMAs ANDROID: consolidate.fragment: don't disable CONFIG_PM_ADVANCED_DEBUG ANDROID: GKI: Update symbol list for mtk Revert "f2fs: fix to tag gcing flag on page during block migration" ANDROID: 16K: Only check basename of linker context FROMGIT: arm64: mte: Make mte_check_tfsr_*() conditional on KASAN instead of MTE ANDROID: gki_defconfig: Disable CONFIG_BRCMSTB_DPFE and CONFIG_BRCMSTB_MEMC FROMGIT: f2fs: fix to avoid use SSR allocate when do defragment ANDROID: 16K: Only check basename of linker context ANDROID: 16K: Avoid and document padding madvise lock warning ANDROID: arm64: vdso32: support user-supplied flags ANDROID: GKI: Add initial symbol list for bcmstb ANDROID: gki_defconfig: Enable Broadcom STB SoCs UPSTREAM: mmc: core: Do not force a retune before RPMB switch UPSTREAM: arm64/arm: arm_pmuv3: perf: Don't truncate 64-bit registers BACKPORT: net: phy: Allow drivers to always call into ->suspend() UPSTREAM: ARM: perf: Mark all accessor functions inline UPSTREAM: arm64: perf: Mark all accessor functions inline UPSTREAM: perf/core: Drop __weak attribute from arch_perf_update_userpage() prototype UPSTREAM: ARM: perf: Allow the use of the PMUv3 driver on 32bit ARM UPSTREAM: ARM: Make CONFIG_CPU_V7 valid for 32bit ARMv8 implementations UPSTREAM: perf: pmuv3: Change GENMASK to GENMASK_ULL UPSTREAM: perf: pmuv3: Move inclusion of kvm_host.h to the arch-specific helper UPSTREAM: perf: pmuv3: Abstract PMU version checks UPSTREAM: arm64: perf: Abstract system register accesses away UPSTREAM: arm64: perf: Move PMUv3 driver to drivers/perf UPSTREAM: arm64/perf: Replace PMU version number '0' with ID_AA64DFR0_EL1_PMUVer_NI ANDROID: GKI: Update oplus symbol list UPSTREAM: block/blk-mq: Don't complete locally if capacities are different BACKPORT: sched: Add a new function to compare if two cpus have the same capacity ANDROID: GKI: Update symbol list for mtk ANDROID: GKI: Update rockchip symbols for rndis_host. ANDROID: GKI: Update rockchip symbols for snd multi dais. UPSTREAM: usb: gadget: f_fs: Fix race between aio_cancel() and AIO request complete UPSTREAM: usb: gadget: f_fs: use io_data->status consistently ANDROID: set rewrite_absolute_paths_in_config for GKI aarch64. UPSTREAM: wifi: cfg80211: Clear mlo_links info when STA disconnects ANDROID: ABI: Add usb_gadget_connect & usb_gadget_disconnect symbol ANDROID: GKI: Update symbol list for mtk BACKPORT: iommu: Have __iommu_probe_device() check for already probed devices ANDROID: ABI fixup for abi break in struct dst_ops BACKPORT: net: fix __dst_negative_advice() race ANDROID: ABI fixup for abi break in struct dst_ops BACKPORT: net: fix __dst_negative_advice() race Revert "crypto: api - Disallow identical driver names" UPSTREAM: USB: gadget: core: create sysfs link between udc and gadget ANDROID: GKI: add tegra20-cpufreq.ko to rockpi4 build ANDROID: GKI: update symbol list for db845c ANDROID: Build some tegra configs as vendor module ANDROID: gki_defconfig: Enable Tegra SoCs ANDROID: Update the ABI symbol list Revert^2 "BACKPORT: FROMGIT: module: allow UNUSED_KSYMS_WHITELIST ..." ANDROID: binder: fix KMI-break due to alloc->lock ANDROID: binder: fix ptrdiff_t printk-format issue ANDROID: binder: fix KMI-break due to address type change Reapply "ANDROID: vendor_hooks: Add hook for binder_detect_low_async_space" Reapply "ANDROID: Add vendor hooks for binder perf tuning" UPSTREAM: binder: switch alloc->mutex to spinlock_t UPSTREAM: binder: reverse locking order in shrinker callback UPSTREAM: binder: avoid user addresses in debug logs UPSTREAM: binder: refactor binder_delete_free_buffer() UPSTREAM: binder: collapse print_binder_buffer() into caller UPSTREAM: binder: document the final page calculation BACKPORT: UPSTREAM: binder: rename lru shrinker utilities UPSTREAM: binder: make oversized buffer code more readable UPSTREAM: binder: remove redundant debug log UPSTREAM: binder: perform page installation outside of locks UPSTREAM: binder: initialize lru pages in mmap callback UPSTREAM: binder: malloc new_buffer outside of locks UPSTREAM: binder: refactor page range allocation UPSTREAM: binder: relocate binder_alloc_clear_buf() UPSTREAM: binder: relocate low space calculation UPSTREAM: binder: separate the no-space debugging logic UPSTREAM: binder: remove pid param in binder_alloc_new_buf() UPSTREAM: binder: do unlocked work in binder_alloc_new_buf() UPSTREAM: binder: split up binder_update_page_range() UPSTREAM: binder: keep vma addresses type as unsigned long UPSTREAM: binder: remove extern from function prototypes Revert "ANDROID: Add vendor hooks for binder perf tuning" Revert "ANDROID: vendor_hooks: Add hook for binder_detect_low_async_space_locked" Revert "ANDROID: vendor_hook: rename the the name of hooks" ANDROID: GKI: Update rockchip symbols for some usb wifi bt. UPSTREAM: selftests: timers: Fix valid-adjtimex signed left-shift undefined behavior ANDROID: GKI: Update rockchip symbols to add iova APIs FROMLIST: kheaders: explicitly define file modes for archived headers ANDROID: incremental-fs: Make work with 16k pages ANDROID: pahole -J -j1 for reproducible BTF Revert "BACKPORT: FROMGIT: module: allow UNUSED_KSYMS_WHITELIST ..." BACKPORT: FROMGIT: module: allow UNUSED_KSYMS_WHITELIST to be relative against objtree. ANDROID: export one function for mm metrics ANDROID: GKI: Update symbol list for mtk FROMLIST: kheaders: explicitly define file modes for archived headers ANDROID: pahole -J -j1 for reproducible BTF FROMLIST: usb: typec: tcpm: Ignore received Hard Reset in TOGGLING state ANDROID: Export sysctl_sched_wakeup_granularity to enable modifying it UPSTREAM: epoll: be better about file lifetimes FROMLIST: usb: typec: tcpm: Ignore received Hard Reset in TOGGLING state FROMLIST: usb: typec: tcpm: Ignore received Hard Reset in TOGGLING state UPSTREAM: usb: gadget: ncm: Fix handling of zero block length packets ANDROID: Update the ABI symbol list ANDROID: GKI: Update symbol list for mtk UPSTREAM: usb: gadget: ncm: Avoid dropping datagrams of properly parsed NTBs ANDROID: GKI: Update rockchip symbols to add iova APIs FROMLIST: sched/pi: Reweight fair_policy() tasks when inheriting prio ANDROID: Update the ABI symbol list ANDROID: mm: Add restricted vendor hook in do_read_fault() ANDROID: abi_gki_aarch64_qcom: Update symbol list ANDROID: abi_gki_aarch64_qcom: Update symbol list ANDROID: Update the ABI symbol list ANDROID: add vendor hooks and expoert reclaim_pages to reclaim memory FROMGIT: usb: dwc3: Wait unconditionally after issuing EndXfer command ANDROID: ABI: Update honor symbol list ANDROID: add vendor hook in do_read_fault to tune fault_around_bytes ANDROID: usb: Optimize the problem of slow transfer rate in USB accessory mode ANDROID: Zap kernel/sched/android.h stubs ANDROID: Update the ABI symbol list ANDROID: Export sysctl_sched_wakeup_granularity to enable modifying it ANDROID: export one function for mm metrics ANDROID: Update the ABI symbol list ANDROID: Export sysctl_sched_wakeup_granularity to enable modifying it UPSTREAM: ALSA: virtio: use ack callback UPSTREAM: usb: typec: tcpm: clear pd_event queue in PORT_RESET BACKPORT: usb: typec: tcpm: enforce ready state when queueing alt mode vdm ANDROID: GKI: Update QCOM symbol list and ABI STG ANDROID: GKI: fix ABI breakage in struct userfaultfd_ctx UPSTREAM: userfaultfd: fix deadlock warning when locking src and dst VMAs BACKPORT: userfaultfd: use per-vma locks in userfaultfd operations BACKPORT: mm: add vma_assert_locked() for !CONFIG_PER_VMA_LOCK BACKPORT: userfaultfd: protect mmap_changing with rw_sem in userfaulfd_ctx BACKPORT: userfaultfd: move userfaultfd_ctx struct to header file BACKPORT: userfaultfd: fix mmap_changing checking in mfill_atomic_hugetlb BACKPORT: selftests/mm: add separate UFFDIO_MOVE test for PMD splitting BACKPORT: selftests/mm: add UFFDIO_MOVE ioctl test BACKPORT: selftests/mm: add uffd_test_case_ops to allow test case-specific operations BACKPORT: selftests/mm: call uffd_test_ctx_clear at the end of the test UPSTREAM: userfaultfd: fix return error if mmap_changing is non-zero in MOVE ioctl BACKPORT: userfaultfd: change src_folio after ensuring it's unpinned in UFFDIO_MOVE BACKPORT: mm: userfaultfd: fix unexpected change to src_folio when UFFDIO_MOVE fails BACKPORT: userfaultfd: handle zeropage moves by UFFDIO_MOVE UPSTREAM: userfaultfd: avoid huge_zero_page in UFFDIO_MOVE UPSTREAM: userfaultfd: fix move_pages_pte() splitting folio under RCU read lock BACKPORT: userfaultfd: UFFDIO_MOVE uABI UPSTREAM: mm/rmap: support move to different root anon_vma in folio_move_anon_rmap() UPSTREAM: crypto: x86/curve25519 - disable gcov ANDROID: GKI: Update QCOM symbol list and ABI STG ANDROID: GKI: update symbol list file for xiaomi UPSTREAM: netfilter: nft_set_pipapo: do not free live element UPSTREAM: net: tls: handle backlogging of crypto requests UPSTREAM: af_unix: Fix garbage collector racing against connect() UPSTREAM: af_unix: Do not use atomic ops for unix_sk(sk)->inflight. ANDROID: 16K: Fix show maps CFI failure ANDROID: 16K: Handle pad VMA splits and merges ANDROID: 16K: madvise_vma_pad_pages: Remove filemap_fault check ANDROID: 16K: Only madvise padding from dynamic linker context ANDROID: 16K: Separate padding from ELF LOAD segment mappings ANDROID: 16K: Exclude ELF padding for fault around range ANDROID: 16K: Use MADV_DONTNEED to save VMA padding pages. ANDROID: 16K: Introduce ELF padding representation for VMAs ANDROID: 16K: Introduce /sys/kernel/mm/pgsize_miration/enabled FROMGIT: usb: typec: tcpm: Check for port partner validity before consuming it FROMGIT: usb: typec: tcpm: Check for port partner validity before consuming it Revert "FROMGIT: usb: typec: tcpm: Check for port partner validity before consuming it" FROMGIT: usb: typec: tcpm: Check for port partner validity before consuming it ANDROID: vendor_hooks: add symbols for lazy preemption ANDROID: vendor_hooks: add two hooks for lazy preemption ANDROID: KVM: arm64: wait_for_initramfs for pKVM module loading procfs ANDROID: GKI: Expose device async to userspace BACKPORT: mtk-mmsys: Change mtk-mmsys & mtk-mutex to modules ANDROID: Update the ABI symbol list BACKPORT: clk: mediatek: Split configuration options for MT8186 clock drivers BACKPORT: clk: mediatek: Add MODULE_LICENSE() where missing FROMGIT: coresight: etm4x: Fix access to resource selector registers FROMGIT: coresight: etm4x: Safe access for TRCQCLTR FROMGIT: coresight: etm4x: Do not save/restore Data trace control registers FROMGIT: coresight: etm4x: Do not hardcode IOMEM access for register restore ANDROID: GKI: Update honda symbol list for led-trigger ANDROID: GKI: Update symbols to symbol list ANDROID: vendor_hook: Add hooks to support reader optimistic spin in rwsem UPSTREAM: af_unix: Fix garbage collector racing against connect() UPSTREAM: af_unix: Do not use atomic ops for unix_sk(sk)->inflight. ANDROID: GKI: fix ABI breakage in struct userfaultfd_ctx UPSTREAM: userfaultfd: fix deadlock warning when locking src and dst VMAs BACKPORT: userfaultfd: use per-vma locks in userfaultfd operations BACKPORT: mm: add vma_assert_locked() for !CONFIG_PER_VMA_LOCK BACKPORT: userfaultfd: protect mmap_changing with rw_sem in userfaulfd_ctx BACKPORT: userfaultfd: move userfaultfd_ctx struct to header file BACKPORT: userfaultfd: fix mmap_changing checking in mfill_atomic_hugetlb BACKPORT: selftests/mm: add separate UFFDIO_MOVE test for PMD splitting BACKPORT: selftests/mm: add UFFDIO_MOVE ioctl test BACKPORT: selftests/mm: add uffd_test_case_ops to allow test case-specific operations BACKPORT: selftests/mm: call uffd_test_ctx_clear at the end of the test UPSTREAM: userfaultfd: fix return error if mmap_changing is non-zero in MOVE ioctl BACKPORT: userfaultfd: change src_folio after ensuring it's unpinned in UFFDIO_MOVE BACKPORT: mm: userfaultfd: fix unexpected change to src_folio when UFFDIO_MOVE fails BACKPORT: userfaultfd: handle zeropage moves by UFFDIO_MOVE UPSTREAM: userfaultfd: avoid huge_zero_page in UFFDIO_MOVE UPSTREAM: userfaultfd: fix move_pages_pte() splitting folio under RCU read lock BACKPORT: userfaultfd: UFFDIO_MOVE uABI UPSTREAM: mm/rmap: support move to different root anon_vma in folio_move_anon_rmap() ANDROID: PM: hibernate: Encryption support with compression ANDROID: abi_gki_aarch64_qcom: Update symbol list ANDROID: vendor_hooks: Add hooks to support hibernation ANDROID: gki_defconfig: Sync gki_defconfig UPSTREAM: PM: hibernate: Support to select compression algorithm UPSTREAM: PM: hibernate: Add support for LZ4 compression for hibernation BACKPORT: PM: hibernate: Move to crypto APIs for LZO compression BACKPORT: PM: hibernate: Rename lzo* to make it generic Revert "hrtimer: Report offline hrtimer enqueue" Revert "drm/mipi-dsi: Fix detach call without attach" ANDROID: ABI: Update symbol list for Exynos SoC ANDROID: abi_gki_aarch64_qcom: Update symbol list BACKPORT: mtk-mmsys: Change mtk-mmsys & mtk-mutex to modules BACKPORT: clk: mediatek: Split configuration options for MT8186 clock drivers BACKPORT: clk: mediatek: Add MODULE_LICENSE() where missing ANDROID: Update the ABI symbol list Reapply "ANDROID: block: Add support for filesystem requests and small segments" UPSTREAM: usb:typec:tcpm:support double Rp to Vbus cable as sink ANDROID: Update the ABI symbol list ANDROID: Add known structs used by modules to KMI ANDROID: use reserved cpucaps for new capability Revert "mm/sparsemem: fix race in accessing memory_section->usage" UPSTREAM: netfilter: nf_tables: release mutex after nft_gc_seq_end from abort path UPSTREAM: netfilter: nf_tables: release batch on table validation from abort path UPSTREAM: netfilter: nf_tables: mark set as dead when unbinding anonymous set with timeout UPSTREAM: netfilter: nft_set_pipapo: release elements in clone only from destroy path ANDROID: GKI: Add symbol list for Nothing ANDROID: GKI: Update honda symbol list for asound ANDROID: GKI: Update honda symbol list for xt_LOG ANDROID: GKI: Update honda symbol list for ebtables ANDROID: GKI: Update honda symbol list for net scheduler ANDROID: softirq: add tasklet to LONG_SOFTIRQ_MASK ANDROID: Update the ABI symbol list ANDROID: vendor_hooks: add restricted vendor hook for meminfo ANDROID: Update the ABI symbol list BACKPORT: cpufreq: Don't unregister cpufreq cooling on CPU hotplug FROMGIT: usb: typec: tcpm: Correct the PDO counting in pd_set UPSTREAM: tls: fix race between tx work scheduling and socket close UPSTREAM: netfilter: nft_chain_filter: handle NETDEV_UNREGISTER for inet/ingress basechain UPSTREAM: tls: fix race between async notify and socket close UPSTREAM: net: tls: factor out tls_*crypt_async_wait() UPSTREAM: tls: extract context alloc/initialization out of tls_set_sw_offload BACKPORT: mm: update mark_victim tracepoints fields Revert "FROMGIT: BACKPORT: mm: update mark_victim tracepoints fields" ANDROID: Update the ABI symbol list ANDROID: export two function to reclaim memory ANDROID: add reclaim tune parameter functions UPSTREAM: netfilter: nf_tables: disallow anonymous set with timeout flag ANDROID: drop redundant OWNERS_DrNo file UPSTREAM: ALSA: virtio: Fix "Coverity: virtsnd_kctl_tlv_op(): Uninitialized variables" warning. UPSTREAM: ALSA: virtio: add support for audio controls ANDROID: Update symbols list for virtio_snd.ko audio controls ANDROID: Move cpu_busy_with_softirqs() into sched.h UPSTREAM: mm: page_alloc: unreserve highatomic page blocks before oom UPSTREAM: mm: page_alloc: enforce minimum zone size to do high atomic reserves UPSTREAM: mm: page_alloc: correct high atomic reserve calculations ANDROID: Update the ABI symbol list ANDROID: Export cpu_busy_with_softirqs() ANDROID: GKI: update symbol list file for xiaomi Reapply "ANDROID: null_blk: Support configuring the maximum segment size" UPSTREAM: usb: typec: Return size of buffer if pd_set operation succeeds UPSTREAM: usb: typec: tcpm: Update PD of Type-C port upon pd_set UPSTREAM: usb: typec: tcpm: fix double-free issue in tcpm_port_unregister_pd() UPSTREAM: usb: typec: tcpm: Correct port source pdo array in pd_set callback Reapply "Merge tag 'android14-6.1.75_r00' into android14-6.1" Revert "ANDROID: null_blk: Support configuring the maximum segment size" Revert "usb: dwc3: core: configure TX/RX threshold for DWC3_IP" ANDROID: Update the ABI symbol list ANDROID: GKI: Update symbols for rockchip drm FROMLIST: binder: check offset alignment in binder_get_object() ANDROID: GKI: Add symbol list for Nothing ANDROID: thermal: Add vendor thermal_pm_notify_suspend function UPSTREAM: usb: dwc3: gadget: Don't disconnect if not started Revert "ANDROID: block: Add support for filesystem requests and small segments" ANDROID: vendor_hook: fix issue vip thread do not sleep while no vip work UPSTREAM: sched/wait: Fix a kthread_park race with wait_woken() ANDROID: KVM: arm64: Fix TLB invalidation when coalescing into a block ANDROID: ABI: Update symbol list for Exynos SoC ANDROID: GKI: Add symbols for rockchip snd trcm FROMGIT: usb: dwc2: Disable clock gating feature on Rockchip SoCs ANDROID: ABI: Update symbol list for Exynos SoC ANDROID: Add symbols for IIO SCMI module ANDROID: KVM: arm64: Fix missing trace event for nVHE dyn HVCs ANDROID: Update the ABI symbol list ANDROID: Update symbols list for open-dice.ko ANDROID: Add symbols for IIO SCMI module ANDROID: GKI: Update symbols for rockchip UPSTREAM: usb: dwc2: disable lpm feature on Rockchip SoCs ANDROID: Update symbols list for open-dice.ko FROMLIST: mm/migrate: set swap entry values of THP tail pages properly. ANDROID: Update the ABI symbol list ANDROID: virt: gunyah: Move send_lock around req and reply ANDROID: null_blk: Support configuring the maximum segment size ANDROID: scsi_debug: Support configuring the maximum segment size ANDROID: block: Add support for filesystem requests and small segments ANDROID: block: Support submitting passthrough requests with small segments ANDROID: block: Make sub_page_limit_queues available in debugfs ANDROID: block: Support configuring limits below the page size ANDROID: block: Prepare for supporting sub-page limits ANDROID: block: Use pr_info() instead of printk(KERN_INFO ...) ANDROID: sound: usb: add vendor hook for cpu suspend support ANDROID: usb: Add vendor hook for usb suspend and resume ANDROID: fips140: change linker script guard Revert "ANDROID: Update symbols list for open-dice.ko" ANDROID: Update symbols list for open-dice.ko FROMGIT: PM: sleep: wakeirq: fix wake irq warning in system suspend ANDROID: ABI: Update symbol for Exynos SoC UPSTREAM: usb: dwc3: core: set force_gen1 bit in USB31 devices if max speed is SS UPSTREAM: PCI: dwc: Strengthen the MSI address allocation logic UPSTREAM: PCI: dwc: Restrict only coherent DMA mask for MSI address allocation ANDROID: ABI: Update oplus symbol list ANDROID: vendor_hook: rename the the name of hooks FROMGIT: usb: xhci: Add error handling in xhci_map_urb_for_dma FROMGIT: usb: dwc3: host: Set XHCI_SG_TRB_CACHE_SIZE_QUIRK FROMGIT: usb: host: xhci-plat: Add support for XHCI_SG_TRB_CACHE_SIZE_QUIRK ANDROID: fuse-bpf: Fix readdir for getdents Linux 6.1.78 netfilter: nft_set_rbtree: skip end interval element from gc net: stmmac: xgmac: fix a typo of register name in DPP safety handling ALSA: usb-audio: Sort quirk table entries net: stmmac: xgmac: use #define for string constants clocksource: Skip watchdog check for large watchdog intervals block: treat poll queue enter similarly to timeouts f2fs: add helper to check compression level RDMA/irdma: Fix support for 64k pages vhost: use kzalloc() instead of kmalloc() followed by memset() Revert "ASoC: amd: Add new dmi entries for acp5x platform" io_uring/net: fix sr->len for IORING_OP_RECV with MSG_WAITALL and buffers Input: atkbd - skip ATKBD_CMD_SETLEDS when skipping ATKBD_CMD_GETID Input: i8042 - fix strange behavior of touchpad on Clevo NS70PU hrtimer: Report offline hrtimer enqueue usb: host: xhci-plat: Add support for XHCI_SG_TRB_CACHE_SIZE_QUIRK usb: dwc3: host: Set XHCI_SG_TRB_CACHE_SIZE_QUIRK USB: serial: cp210x: add ID for IMST iM871A-USB USB: serial: option: add Fibocom FM101-GL variant USB: serial: qcserial: add new usb-id for Dell Wireless DW5826e ALSA: usb-audio: add quirk for RODE NT-USB+ ALSA: usb-audio: Add a quirk for Yamaha YIT-W12TX transmitter ALSA: usb-audio: Add delay quirk for MOTU M Series 2nd revision mtd: parsers: ofpart: add workaround for #size-cells 0 fs: dlm: don't put dlm_local_addrs on heap blk-iocost: Fix an UBSAN shift-out-of-bounds warning scsi: core: Move scsi_host_busy() out of host lock if it is for per-command fs/ntfs3: Fix an NULL dereference bug netfilter: nft_set_pipapo: remove scratch_aligned pointer netfilter: nft_set_pipapo: add helper to release pcpu scratch area netfilter: nft_set_pipapo: store index in scratch maps netfilter: nft_ct: reject direction for ct id drm/amd/display: Implement bounds check for stream encoder creation in DCN301 netfilter: nft_compat: restrict match/target protocol to u16 netfilter: nft_compat: reject unused compat flag netfilter: nft_compat: narrow down revision to unsigned 8-bits selftests: cmsg_ipv6: repeat the exact packet ppp_async: limit MRU to 64K af_unix: Call kfree_skb() for dead unix_(sk)->oob_skb in GC. tipc: Check the bearer type before calling tipc_udp_nl_bearer_add() rxrpc: Fix response to PING RESPONSE ACKs to a dead call drm/i915/gvt: Fix uninitialized variable in handle_mmio() inet: read sk->sk_family once in inet_recv_error() hwmon: (coretemp) Fix bogus core_id to attr name mapping hwmon: (coretemp) Fix out-of-bounds memory access hwmon: (aspeed-pwm-tacho) mutex for tach reading octeontx2-pf: Fix a memleak otx2_sq_init atm: idt77252: fix a memleak in open_card_ubr0 tunnels: fix out of bounds access when building IPv6 PMTU error selftests: net: avoid just another constant wait selftests: net: cut more slack for gro fwd tests. net: atlantic: Fix DMA mapping for PTP hwts ring netdevsim: avoid potential loop in nsim_dev_trap_report_work() wifi: mac80211: fix waiting for beacons logic net: stmmac: xgmac: fix handling of DPP safety error for DMA channels drm/msm/dpu: check for valid hw_pp in dpu_encoder_helper_phys_cleanup drm/msm/dp: return correct Colorimetry for DP_TEST_DYNAMIC_RANGE_CEA case drm/msms/dp: fixed link clock divider bits be over written in BPC unknown case cifs: failure to add channel on iface should bump up weight phy: ti: phy-omap-usb2: Fix NULL pointer dereference for SRP dmaengine: fix is_slave_direction() return false when DMA_DEV_TO_DEV phy: renesas: rcar-gen3-usb2: Fix returning wrong error code dmaengine: fsl-qdma: Fix a memory leak related to the queue command DMA dmaengine: fsl-qdma: Fix a memory leak related to the status queue DMA dmaengine: ti: k3-udma: Report short packet errors dmaengine: fsl-dpaa2-qdma: Fix the size of dma pools ext4: regenerate buddy after block freeing failed if under fc replay Linux 6.1.77 drm/amdgpu: Fix missing error code in 'gmc_v6/7/8/9_0_hw_init()' ASoC: codecs: wsa883x: fix PA volume control ASoC: codecs: lpass-wsa-macro: fix compander volume hack bonding: remove print in bond_verify_device_path gve: Fix use-after-free vulnerability LoongArch/smp: Call rcutree_report_cpu_starting() at tlb_init() drm/msm/dsi: Enable runtime PM Revert "drm/amd/display: Disable PSR-SU on Parade 0803 TCON again" mm, kmsan: fix infinite recursion due to RCU critical section arm64: irq: set the correct node for shadow call stack selftests: bonding: Check initial state selftests: team: Add missing config options net: sysfs: Fix /sys/class/net/<iface> path selftests: net: fix available tunnels detection af_unix: fix lockdep positive in sk_diag_dump_icons() net: ipv4: fix a memleak in ip_setup_cork netfilter: nft_ct: sanitize layer 3 and 4 protocol number in custom expectations netfilter: nf_log: replace BUG_ON by WARN_ON_ONCE when putting logger netfilter: nf_tables: restrict tunnel object to NFPROTO_NETDEV netfilter: conntrack: correct window scaling with retransmitted SYN selftests: net: add missing config for GENEVE bridge: mcast: fix disabled snooping after long uptime llc: call sock_orphan() at release time ipv6: Ensure natural alignment of const ipv6 loopback and router addresses net: dsa: qca8k: fix illegal usage of GPIO ixgbe: Fix an error handling path in ixgbe_read_iosf_sb_reg_x550() ixgbe: Refactor overtemp event handling ixgbe: Refactor returning internal error codes tcp: add sanity checks to rx zerocopy net: lan966x: Fix port configuration when using SGMII interface ipmr: fix kernel panic when forwarding mcast packets ipv4: raw: add drop reasons ip6_tunnel: make sure to pull inner header in __ip6_tnl_rcv() selftests: net: give more time for GRO aggregation HID: hidraw: fix a problem of memory leak in hidraw_release() scsi: core: Move scsi_host_busy() out of host lock for waking up EH handler regulator: ti-abb: don't use devm_platform_ioremap_resource_byname for shared interrupt register scsi: isci: Fix an error code problem in isci_io_request_build() drm: using mul_u32_u32() requires linux/math64.h wifi: cfg80211: fix RCU dereference in __cfg80211_bss_update perf: Fix the nr_addr_filters fix drm/amdkfd: Fix 'node' NULL check in 'svm_range_get_range_boundaries()' drm/amdgpu: Release 'adev->pm.fw' before return in 'amdgpu_device_need_post()' drm/amdgpu: Fix with right return code '-EIO' in 'amdgpu_gmc_vram_checking()' drm/amd/powerplay: Fix kzalloc parameter 'ATOM_Tonga_PPM_Table' in 'get_platform_power_management_table()' ceph: fix invalid pointer access if get_quota_realm return ERR_PTR ceph: fix deadlock or deadcode of misusing dget() ceph: reinitialize mds feature bit even when session in open blk-mq: fix IO hang from sbitmap wakeup race virtio_net: Fix "‘%d’ directive writing between 1 and 11 bytes into a region of size 10" warnings drm/amdkfd: Fix lock dependency warning with srcu drm/amdkfd: Fix lock dependency warning libsubcmd: Fix memory leak in uniq() misc: lis3lv02d_i2c: Add missing setting of the reg_ctrl callback 9p: Fix initialisation of netfs_inode for 9p PCI/AER: Decode Requester ID when no error info found PCI: Fix 64GT/s effective data rate calculation spmi: mediatek: Fix UAF on device remove fs/kernfs/dir: obey S_ISGID tty: allow TIOCSLCKTRMIOS with CAP_CHECKPOINT_RESTORE selftests/sgx: Fix linker script asserts usb: hub: Add quirk to decrease IN-ep poll interval for Microchip USB491x hub usb: hub: Replace hardcoded quirk value with BIT() macro perf cs-etm: Bump minimum OpenCSD version to ensure a bugfix is present PCI: switchtec: Fix stdev_release() crash after surprise hot remove PCI: Only override AMD USB controller if required mailbox: arm_mhuv2: Fix a bug for mhuv2_sender_interrupt mfd: ti_am335x_tscadc: Fix TI SoC dependencies xen/gntdev: Fix the abuse of underlying struct page in DMA-buf import i3c: master: cdns: Update maximum prescaler value for i2c clock um: time-travel: fix time corruption um: net: Fix return type of uml_net_start_xmit() um: Don't use vfprintf() for os_info() um: Fix naming clash between UML and scheduler leds: trigger: panic: Don't register panic notifier if creating the trigger failed ALSA: hda/conexant: Fix headset auto detect fail in cx8070 and SN6140 drm/amdkfd: Fix iterator used outside loop in 'kfd_add_peer_prop()' drm/amdgpu: Drop 'fence' check in 'to_amdgpu_amdkfd_fence()' drm/amdgpu: Fix '*fw' from request_firmware() not released in 'amdgpu_ucode_request()' drm/amdgpu: Let KFD sync with VM fences drm/amdgpu: Fix ecc irq enable/disable unpaired clk: imx: clk-imx8qxp: fix LVDS bypass, pixel and phy clocks drm/amd/display: make flip_timestamp_in_us a 64-bit variable watchdog: it87_wdt: Keep WDTCTRL bit 3 unmodified for IT8784/IT8786 clk: mmp: pxa168: Fix memory leak in pxa168_clk_init() clk: hi3620: Fix memory leak in hi3620_mmc_clk_init() drm/amdgpu: fix ftrace event amdgpu_bo_move always move on same heap drm/msm/dpu: fix writeback programming for YUV cases drm/msm/dpu: Ratelimit framedone timeout msgs drm/amd/display: For prefetch mode > 0, extend prefetch if possible media: i2c: imx335: Fix hblank min/max values media: ddbridge: fix an error code problem in ddb_probe media: amphion: remove mutext lock in condition of wait_event IB/ipoib: Fix mcast list locking drm/exynos: Call drm_atomic_helper_shutdown() at shutdown/unbind time f2fs: fix to tag gcing flag on page during block migration hwmon: (nct6775) Fix fan speed set failure in automatic mode media: rkisp1: Fix IRQ disable race issue media: rkisp1: Store IRQ lines media: rkisp1: Fix IRQ handler return values media: rkisp1: Drop IRQF_SHARED ALSA: hda: Intel: add HDA_ARL PCI ID support PCI: add INTEL_HDA_ARL to pci_ids.h media: rockchip: rga: fix swizzling for RGB formats media: stk1160: Fixed high volume of stk1160_dbg messages drm/mipi-dsi: Fix detach call without attach drm/framebuffer: Fix use of uninitialized variable drm/drm_file: fix use of uninitialized variable ASoC: amd: Add new dmi entries for acp5x platform f2fs: fix write pointers on zoned device after roll forward drm/amd/display: Fix tiled display misalignment drm/bridge: anx7625: Fix Set HPD irq detect window to 2ms drm/panel-edp: Add override_edid_mode quirk for generic edp RDMA/IPoIB: Fix error code return in ipoib_mcast_join fast_dput(): handle underflows gracefully ASoC: doc: Fix undefined SND_SOC_DAPM_NOPM argument ALSA: hda: Refer to correct stream index at loops f2fs: fix to check return value of f2fs_reserve_new_block() net: dsa: qca8k: put MDIO bus OF node on qca8k_mdio_register() failure octeontx2-af: Fix max NPC MCAM entry check while validating ref_entry i40e: Fix VF disable behavior to block all traffic bridge: cfm: fix enum typo in br_cc_ccm_tx_parse net/smc: disable SEID on non-s390 archs where virtual ISM may be used Bluetooth: L2CAP: Fix possible multiple reject send Bluetooth: hci_sync: fix BR/EDR wakeup bug Bluetooth: qca: Set both WIDEBAND_SPEECH and LE_STATES quirks for QCA2066 wifi: cfg80211: free beacon_ies when overridden from hidden BSS wifi: rtlwifi: rtl8723{be,ae}: using calculate_bit_shift() libbpf: Fix NULL pointer dereference in bpf_object__collect_prog_relos wifi: rtl8xxxu: Add additional USB IDs for RTL8192EU devices arm64: dts: amlogic: fix format for s4 uart node ice: fix pre-shifted bit usage arm64: dts: qcom: msm8998: Fix 'out-ports' is a required property arm64: dts: qcom: msm8996: Fix 'in-ports' is a required property md: Whenassemble the array, consult the superblock of the freshest device block: prevent an integer overflow in bvec_try_merge_hw_page net: dsa: mv88e6xxx: Fix mv88e6352_serdes_get_stats error path net: atlantic: eliminate double free in error handling logic ice: fix ICE_AQ_VSI_Q_OPT_RSS_* register values scsi: hisi_sas: Set .phy_attached before notifing phyup event HISI_PHYE_PHY_UP_PM ARM: dts: imx23/28: Fix the DMA controller node name ARM: dts: imx23-sansa: Use preferred i2c-gpios properties ARM: dts: imx27-apf27dev: Fix LED name ARM: dts: imx25/27: Pass timing0 ARM: dts: imx25: Fix the iim compatible string block/rnbd-srv: Check for unlikely string overflow ionic: bypass firmware cmds when stuck in reset ionic: pass opcode to devcmd_wait net: phy: at803x: fix passing the wrong reference for config_intr ARM: dts: imx1: Fix sram node ARM: dts: imx27: Fix sram node ARM: dts: imx: Use flash@0,0 pattern ARM: dts: imx25/27-eukrea: Fix RTC node name ARM: dts: rockchip: fix rk3036 hdmi ports node wifi: wfx: fix possible NULL pointer dereference in wfx_set_mfp_ap() bpf: Set uattr->batch.count as zero before batched update or deletion scsi: libfc: Fix up timeout error in fc_fcp_rec_error() scsi: libfc: Don't schedule abort twice bpf: Check rcu_read_lock_trace_held() before calling bpf map helpers wifi: ath11k: fix race due to setting ATH11K_FLAG_EXT_IRQ_ENABLED too early wifi: ath9k: Fix potential array-index-out-of-bounds read in ath9k_htc_txstatus() ARM: dts: imx7s: Fix nand-controller #size-cells ARM: dts: imx7s: Fix lcdif compatible ARM: dts: imx7d: Fix coresight funnel ports scsi: arcmsr: Support new PCI device IDs 1883 and 1886 scsi: mpi3mr: Add PCI checks where SAS5116 diverges from SAS4116 net: usb: ax88179_178a: avoid two consecutive device resets bonding: return -ENOMEM instead of BUG in alb_upper_dev_walk PCI: Add no PM reset quirk for NVIDIA Spectrum devices scsi: lpfc: Fix possible file string name overflow when updating firmware soc: xilinx: fix unhandled SGI warning message soc: xilinx: Fix for call trace due to the usage of smp_processor_id() selftests/bpf: Fix issues in setup_classid_environment() wifi: rt2x00: correct wrong BBP register in RxDCOC calibration selftests/bpf: Fix pyperf180 compilation failure with clang18 selftests/bpf: satisfy compiler by having explicit return in btf test wifi: rt2x00: restart beacon queue when hardware reset ext4: avoid online resizing failures due to oversized flex bg ext4: remove unnecessary check from alloc_flex_gd() ext4: unify the type of flexbg_size to unsigned int ext4: fix inconsistent between segment fstrim and full fstrim ecryptfs: Reject casefold directory inodes SUNRPC: Fix a suspicious RCU usage warning KVM: s390: fix setting of fpc register s390/ptrace: handle setting of fpc register correctly s390/vfio-ap: fix sysfs status attribute for AP queue devices arch: consolidate arch_irq_work_raise prototypes jfs: fix array-index-out-of-bounds in diNewExt rxrpc_find_service_conn_rcu: fix the usage of read_seqbegin_or_lock() afs: fix the usage of read_seqbegin_or_lock() in afs_find_server*() afs: fix the usage of read_seqbegin_or_lock() in afs_lookup_volume_rcu() crypto: stm32/crc32 - fix parsing list of devices erofs: fix ztailpacking for subpage compressed blocks crypto: octeontx2 - Fix cptvf driver cleanup pstore/ram: Fix crash when setting number of cpus to an odd number jfs: fix uaf in jfs_evict_inode jfs: fix array-index-out-of-bounds in dbAdjTree jfs: fix slab-out-of-bounds Read in dtSearch UBSAN: array-index-out-of-bounds in dtSplitRoot FS:JFS:UBSAN:array-index-out-of-bounds in dbAdjTree ACPI: APEI: set memory failure flags as MF_ACTION_REQUIRED on synchronous events PM / devfreq: Synchronize devfreq_monitor_[start/stop] ACPI: NUMA: Fix the logic of getting the fake_pxm value ACPI: extlog: fix NULL pointer dereference check PNP: ACPI: fix fortify warning ACPI: video: Add quirk for the Colorful X15 AT 23 Laptop audit: Send netlink ACK before setting connection in auditd_set regulator: core: Only increment use_count when enable_count changes debugobjects: Stop accessing objects after releasing hash bucket lock perf/core: Fix narrow startup race when creating the perf nr_addr_filters sysfs file x86/mce: Mark fatal MCE's page as poison to avoid panic in the kdump kernel powerpc/lib: Validate size for vector operations powerpc: pmd_move_must_withdraw() is only needed for CONFIG_TRANSPARENT_HUGEPAGE x86/boot: Ignore NMIs during very early boot powerpc/64s: Fix CONFIG_NUMA=n build due to create_section_mapping() powerpc/mm: Fix build failures due to arch_reserved_kernel_pages() powerpc: Fix build error due to is_valid_bugaddr() drivers/perf: pmuv3: don't expose SW_INCR event in sysfs arm64: irq: set the correct node for VMAP stack powerpc/mm: Fix null-pointer dereference in pgtable_cache_add asm-generic: make sparse happy with odd-sized put_unaligned_*() Linux 6.1.76 net/mlx5e: Handle hardware IPsec limits events serial: core: fix kernel-doc for uart_port_unlock_irqrestore() x86/entry/ia32: Ensure s32 is sign extended to s64 tick/sched: Preserve number of idle sleeps across CPU hotplug events mips: Call lose_fpu(0) before initializing fcr31 in mips_set_personality_nan cxl/region:Fix overflow issue in alloc_hpa() MIPS: lantiq: register smp_ops on non-smp platforms spi: fix finalize message on error return spi: bcm-qspi: fix SFDP BFPT read by usig mspi read drm/bridge: anx7625: Ensure bridge is suspended in disable() block: Move checking GENHD_FL_NO_PART to bdev_add_partition() gpio: eic-sprd: Clear interrupt after set the interrupt type drm/exynos: gsc: minor fix for loop iteration in gsc_runtime_resume drm/exynos: fix accidental on-stack copy of exynos_drm_plane drm/bridge: parade-ps8640: Make sure we drop the AUX mutex in the error case drm/bridge: parade-ps8640: Ensure bridge is suspended in .post_disable() drm/bridge: sii902x: Fix audio codec unregistration drm/bridge: sii902x: Fix probing race issue drm/bridge: sii902x: Use devm_regulator_bulk_get_enable() drm: panel-simple: add missing bus flags for Tianma tm070jvhg[30/33] drm/bridge: parade-ps8640: Wait for HPD when doing an AUX transfer Revert "powerpc/64s: Increase default stack size to 32KB" drm/panel-edp: drm/panel-edp: Fix AUO B116XAK01 name and timing btrfs: zoned: optimize hint byte for zoned allocator btrfs: zoned: factor out prepare_allocation_zoned() serial: sc16is7xx: fix unconditional activation of THRI interrupt serial: sc16is7xx: Use port lock wrappers serial: core: Provide port lock wrappers dlm: use kernel_connect() and kernel_bind() ARM: dts: qcom: sdx55: fix USB SS wakeup ARM: dts: qcom: sdx55: fix USB DP/DM HS PHY interrupts ARM: dts: qcom: sdx55: fix pdc '#interrupt-cells' ARM: dts: samsung: exynos4210-i9100: Unconditionally enable LDO12 ARM: dts: qcom: sdx55: fix USB wakeup interrupt types pipe: wakeup wr_wait after setting max_usage fs/pipe: move check to pipe_has_watch_queue() thermal: intel: hfi: Add syscore callbacks for system-wide PM thermal: intel: hfi: Disable an HFI instance when all its CPUs go offline thermal: intel: hfi: Refactor enabling code into helper functions PM: sleep: Fix possible deadlocks in core system-wide PM code PM: core: Remove unnecessary (void *) conversions bus: mhi: ep: Do not allocate event ring element on stack media: ov13b10: Enable runtime PM before registering async sub-device media: ov13b10: Support device probe in non-zero ACPI D state erofs: fix lz4 inplace decompression erofs: get rid of the remaining kmap_atomic() drm/amdgpu/pm: Fix the power source flag error drm/amd/display: Port DENTIST hang and TDR fixes to OTG disable W/A drm/bridge: nxp-ptn3460: simplify some error checking platform/x86: intel-uncore-freq: Fix types in sysfs callbacks drm/amd/display: Disable PSR-SU on Parade 0803 TCON again drm/tidss: Fix atomic_flush check drm/bridge: nxp-ptn3460: fix i2c_master_send() error checking drm: Don't unref the same fb many times by mistake due to deadlock handling cpufreq: intel_pstate: Refine computation of P-state for given frequency gpiolib: acpi: Ignore touchpad wakeup on GPD G1619-04 xfs: read only mounts with fsopen mount API are busted firmware: arm_scmi: Check mailbox/SMT channel for consistency ksmbd: fix global oob in ksmbd_nl_policy platform/x86: p2sb: Allow p2sb_bar() calls during PCI device probe netfilter: nf_tables: reject QUEUE/DROP verdict parameters netfilter: nft_chain_filter: handle NETDEV_UNREGISTER for inet/ingress basechain hv_netvsc: Calculate correct ring size when PAGE_SIZE is not 4 Kbytes wifi: iwlwifi: fix a memory corruption exec: Fix error handling in begin_new_exec() rbd: don't move requests to the running list on errors btrfs: don't abort filesystem when attempting to snapshot deleted subvolume btrfs: defrag: reject unknown flags of btrfs_ioctl_defrag_range_args btrfs: don't warn if discard range is not aligned to sector btrfs: tree-checker: fix inline ref size in error messages btrfs: ref-verify: free ref cache before clearing mount opt btrfs: avoid copying BTRFS_ROOT_SUBVOL_DEAD flag to snapshot of subvolume being deleted nbd: always initialize struct msghdr completely net: fec: fix the unhandled context fault from smmu fjes: fix memleaks in fjes_hw_setup selftests: netdevsim: fix the udp_tunnel_nic test net: mvpp2: clear BM pool before initialization net: stmmac: Wait a bit for the reset to take effect netfilter: nf_tables: validate NFPROTO_* family netfilter: nf_tables: restrict anonymous set and map names to 16 bytes btrfs: fix race between reading a directory and adding entries to it btrfs: refresh dir last index during a rewinddir(3) call btrfs: set last dir index to the current last index when opening dir btrfs: fix infinite directory reads netfilter: nft_limit: reject configurations that cause integer overflow rcu: Defer RCU kthreads wakeup when CPU is dying net/mlx5e: fix a potential double-free in fs_any_create_groups net/mlx5e: fix a double-free in arfs_create_groups net/mlx5e: Allow software parsing when IPsec crypto is enabled net/mlx5: Use mlx5 device constant for selecting CQ period mode for ASO net/mlx5: DR, Can't go to uplink vport on RX rule net/mlx5: DR, Use the right GVMI number for drop action ipv6: init the accept_queue's spinlocks in inet6_create netlink: fix potential sleeping issue in mqueue_flush_file tcp: Add memory barrier to tcp_push() afs: Hide silly-rename files from userspace tracing: Ensure visibility when inserting an element into tracing_map netfs, fscache: Prevent Oops in fscache_put_cache() net/rds: Fix UBSAN: array-index-out-of-bounds in rds_cmsg_recv net: micrel: Fix PTP frame parsing for lan8814 tun: add missing rx stats accounting in tun_xdp_act tun: fix missing dropped counter in tun_xdp_act net: fix removing a namespace with conflicting altnames udp: fix busy polling llc: Drop support for ETH_P_TR_802_2. llc: make llc_ui_sendmsg() more robust against bonding changes vlan: skip nested type that is not IFLA_VLAN_QOS_MAPPING bnxt_en: Wait for FLR to complete during probe tcp: make sure init the accept_queue's spinlocks once net/smc: fix illegal rmb_desc access in SMC-D connection dump wifi: mac80211: fix potential sta-link leak drm/amd/display: pbn_div need be updated for hotplug event Revert "drm/amd: Enable PCIe PME from D3" ksmbd: Add missing set_freezable() for freezable kthread ksmbd: send lease break notification on FILE_RENAME_INFORMATION ksmbd: don't increment epoch if current state and request state are same ksmbd: fix potential circular locking issue in smb2_set_ea() ksmbd: set v2 lease version on lease upgrade mm: page_alloc: unreserve highatomic page blocks before oom LoongArch/smp: Call rcutree_report_cpu_starting() earlier serial: sc16is7xx: improve do/while loop in sc16is7xx_irq() serial: sc16is7xx: remove obsolete loop in sc16is7xx_port_irq() serial: sc16is7xx: fix invalid sc16is7xx_lines bitfield in case of probe error serial: sc16is7xx: convert from _raw_ to _noinc_ regmap functions for FIFO serial: sc16is7xx: change EFR lock to operate on each channels serial: sc16is7xx: remove unused line structure member serial: sc16is7xx: remove global regmap from struct sc16is7xx_port serial: sc16is7xx: remove wasteful static buffer in sc16is7xx_regmap_name() serial: sc16is7xx: improve regmap debugfs by using one regmap per port rename(): fix the locking of subdirectories mm/sparsemem: fix race in accessing memory_section->usage mm/rmap: fix misplaced parenthesis of a likely() ubifs: ubifs_symlink: Fix memleak of inode->i_link in error path nouveau/vmm: don't set addr on the fail path to avoid warning rtc: Extend timeout for waiting for UIP to clear to 1s rtc: Add support for configuring the UIP timeout for RTC reads rtc: mc146818-lib: Adjust failure return code for mc146818_get_time() rtc: Adjust failure return code for cmos_set_alarm() rtc: cmos: Use ACPI alarm for non-Intel x86 systems too arm64: Rename ARM64_WORKAROUND_2966298 media: mtk-jpeg: Fix use after free bug due to error path handling in mtk_jpeg_dec_device_run mmc: mmc_spi: remove custom DMA mapped buffers mmc: core: Use mrq.sbc in close-ended ffu media: videobuf2-dma-sg: fix vmap callback scripts/get_abi: fix source path leak docs: kernel_abi.py: fix command injection lsm: new security_file_ioctl_compat() hook arm64: dts: qcom: sm8150: fix USB DP/DM HS PHY interrupts arm64: dts: qcom: sdm845: fix USB DP/DM HS PHY interrupts arm64: dts: qcom: sc7280: fix usb_1 wakeup interrupt types arm64: dts: qcom: sm8150: fix USB wakeup interrupt types arm64: dts: qcom: sdm845: fix USB wakeup interrupt types arm64: dts: qcom: sc7180: fix USB wakeup interrupt types scsi: ufs: core: Remove the ufshcd_hba_exit() call from ufshcd_async_scan() dmaengine: fix NULL pointer in channel unregistration function iio: adc: ad7091r: Enable internal vref if external vref is not supplied async: Introduce async_schedule_dev_nocall() async: Split async_schedule_node_domain() parisc/power: Fix power soft-off button emulation on qemu parisc/firmware: Fix F-extend for PDC addresses bus: mhi: host: Add spinlock to protect WP access when queueing TREs bus: mhi: host: Drop chan lock before queuing buffers bus: mhi: host: Add alignment check for event ring read pointer mips: Fix max_mapnr being uninitialized on early stages s390/vfio-ap: let on_scan_complete() callback filter matrix and update guest's APCB s390/vfio-ap: loop over the shadow APCB when filtering guest's AP configuration s390/vfio-ap: always filter entire AP matrix media: ov9734: Enable runtime PM before registering async sub-device rpmsg: virtio: Free driver_override when rpmsg_remove() media: imx355: Enable runtime PM before registering async sub-device PM / devfreq: Fix buffer overflow in trans_stat_show s390/vfio-ap: unpin pages on gisc registration failure crypto: s390/aes - Fix buffer overread in CTR mode hwrng: core - Fix page fault dead lock on mmap-ed hwrng PM: hibernate: Enforce ordering during image compression/decompression crypto: api - Disallow identical driver names btrfs: sysfs: validate scrub_speed_max value OPP: Pass rounded rate to _set_opp() arm64: properly install vmlinuz.efi ext4: allow for the last group to be marked as trimmed iio: adc: ad7091r: Allow users to configure device events iio: adc: ad7091r: Set alert bit in config register Revert "nSVM: Check for reserved encodings of TLB_CONTROL in nested VMCB" usb: dwc3: gadget: Handle EP0 request dequeuing properly usb: dwc3: gadget: Queue PM runtime idle on disconnect event usb: dwc3: gadget: Refactor EP0 forced stall/restart into a separate API Conflicts: Documentation/devicetree/bindings Documentation/devicetree/bindings/gpio/xlnx,gpio-xilinx.yaml Documentation/devicetree/bindings/media/mediatek,mdp3-rdma.yaml Documentation/devicetree/bindings/media/mediatek,mdp3-wrot.yaml Documentation/devicetree/bindings/media/rockchip-isp1.yaml Documentation/devicetree/bindings/nvmem/mxs-ocotp.yaml drivers/bus/mhi/host/main.c drivers/clk/qcom/videocc-sm8150.c drivers/soc/qcom/llcc-qcom.c include/dt-bindings/clock/qcom,videocc-sm8150.h Change-Id: I736b858a11a53d39b1a31aaa270d6508d9d527ba Upstream-Build: ks_qcom-android14-6.1-keystone-qcom-release@12087153 UKQ2.240714.001 Signed-off-by: Srinivasarao Pathipati <quic_c_spathi@quicinc.com>
2355 lines
66 KiB
C
2355 lines
66 KiB
C
// SPDX-License-Identifier: GPL-2.0-only
|
|
/*
|
|
* linux/mm/memory_hotplug.c
|
|
*
|
|
* Copyright (C)
|
|
*/
|
|
|
|
#include <linux/stddef.h>
|
|
#include <linux/mm.h>
|
|
#include <linux/sched/signal.h>
|
|
#include <linux/swap.h>
|
|
#include <linux/interrupt.h>
|
|
#include <linux/pagemap.h>
|
|
#include <linux/compiler.h>
|
|
#include <linux/export.h>
|
|
#include <linux/pagevec.h>
|
|
#include <linux/writeback.h>
|
|
#include <linux/slab.h>
|
|
#include <linux/sysctl.h>
|
|
#include <linux/cpu.h>
|
|
#include <linux/memory.h>
|
|
#include <linux/memremap.h>
|
|
#include <linux/memory_hotplug.h>
|
|
#include <linux/vmalloc.h>
|
|
#include <linux/ioport.h>
|
|
#include <linux/delay.h>
|
|
#include <linux/migrate.h>
|
|
#include <linux/page-isolation.h>
|
|
#include <linux/pfn.h>
|
|
#include <linux/suspend.h>
|
|
#include <linux/mm_inline.h>
|
|
#include <linux/firmware-map.h>
|
|
#include <linux/stop_machine.h>
|
|
#include <linux/hugetlb.h>
|
|
#include <linux/memblock.h>
|
|
#include <linux/compaction.h>
|
|
#include <linux/rmap.h>
|
|
#include <linux/module.h>
|
|
|
|
#include <asm/tlbflush.h>
|
|
|
|
#include "internal.h"
|
|
#include "shuffle.h"
|
|
|
|
#ifdef CONFIG_MHP_MEMMAP_ON_MEMORY
|
|
/*
|
|
* memory_hotplug.memmap_on_memory parameter
|
|
*/
|
|
static bool memmap_on_memory __ro_after_init;
|
|
module_param(memmap_on_memory, bool, 0444);
|
|
MODULE_PARM_DESC(memmap_on_memory, "Enable memmap on memory for memory hotplug");
|
|
|
|
static inline bool mhp_memmap_on_memory(void)
|
|
{
|
|
return memmap_on_memory;
|
|
}
|
|
#else
|
|
static inline bool mhp_memmap_on_memory(void)
|
|
{
|
|
return false;
|
|
}
|
|
#endif
|
|
|
|
enum {
|
|
ONLINE_POLICY_CONTIG_ZONES = 0,
|
|
ONLINE_POLICY_AUTO_MOVABLE,
|
|
};
|
|
|
|
static const char * const online_policy_to_str[] = {
|
|
[ONLINE_POLICY_CONTIG_ZONES] = "contig-zones",
|
|
[ONLINE_POLICY_AUTO_MOVABLE] = "auto-movable",
|
|
};
|
|
|
|
static int set_online_policy(const char *val, const struct kernel_param *kp)
|
|
{
|
|
int ret = sysfs_match_string(online_policy_to_str, val);
|
|
|
|
if (ret < 0)
|
|
return ret;
|
|
*((int *)kp->arg) = ret;
|
|
return 0;
|
|
}
|
|
|
|
static int get_online_policy(char *buffer, const struct kernel_param *kp)
|
|
{
|
|
return sprintf(buffer, "%s\n", online_policy_to_str[*((int *)kp->arg)]);
|
|
}
|
|
|
|
/*
|
|
* memory_hotplug.online_policy: configure online behavior when onlining without
|
|
* specifying a zone (MMOP_ONLINE)
|
|
*
|
|
* "contig-zones": keep zone contiguous
|
|
* "auto-movable": online memory to ZONE_MOVABLE if the configuration
|
|
* (auto_movable_ratio, auto_movable_numa_aware) allows for it
|
|
*/
|
|
static int online_policy __read_mostly = ONLINE_POLICY_CONTIG_ZONES;
|
|
static const struct kernel_param_ops online_policy_ops = {
|
|
.set = set_online_policy,
|
|
.get = get_online_policy,
|
|
};
|
|
module_param_cb(online_policy, &online_policy_ops, &online_policy, 0644);
|
|
MODULE_PARM_DESC(online_policy,
|
|
"Set the online policy (\"contig-zones\", \"auto-movable\") "
|
|
"Default: \"contig-zones\"");
|
|
|
|
/*
|
|
* memory_hotplug.auto_movable_ratio: specify maximum MOVABLE:KERNEL ratio
|
|
*
|
|
* The ratio represent an upper limit and the kernel might decide to not
|
|
* online some memory to ZONE_MOVABLE -- e.g., because hotplugged KERNEL memory
|
|
* doesn't allow for more MOVABLE memory.
|
|
*/
|
|
static unsigned int auto_movable_ratio __read_mostly = 301;
|
|
module_param(auto_movable_ratio, uint, 0644);
|
|
MODULE_PARM_DESC(auto_movable_ratio,
|
|
"Set the maximum ratio of MOVABLE:KERNEL memory in the system "
|
|
"in percent for \"auto-movable\" online policy. Default: 301");
|
|
|
|
/*
|
|
* memory_hotplug.auto_movable_numa_aware: consider numa node stats
|
|
*/
|
|
#ifdef CONFIG_NUMA
|
|
static bool auto_movable_numa_aware __read_mostly = true;
|
|
module_param(auto_movable_numa_aware, bool, 0644);
|
|
MODULE_PARM_DESC(auto_movable_numa_aware,
|
|
"Consider numa node stats in addition to global stats in "
|
|
"\"auto-movable\" online policy. Default: true");
|
|
#endif /* CONFIG_NUMA */
|
|
|
|
/*
|
|
* online_page_callback contains pointer to current page onlining function.
|
|
* Initially it is generic_online_page(). If it is required it could be
|
|
* changed by calling set_online_page_callback() for callback registration
|
|
* and restore_online_page_callback() for generic callback restore.
|
|
*/
|
|
|
|
static online_page_callback_t online_page_callback = generic_online_page;
|
|
static DEFINE_MUTEX(online_page_callback_lock);
|
|
|
|
DEFINE_STATIC_PERCPU_RWSEM(mem_hotplug_lock);
|
|
|
|
void get_online_mems(void)
|
|
{
|
|
percpu_down_read(&mem_hotplug_lock);
|
|
}
|
|
|
|
void put_online_mems(void)
|
|
{
|
|
percpu_up_read(&mem_hotplug_lock);
|
|
}
|
|
|
|
bool movable_node_enabled = false;
|
|
|
|
#ifndef CONFIG_MEMORY_HOTPLUG_DEFAULT_ONLINE
|
|
int mhp_default_online_type = MMOP_OFFLINE;
|
|
#else
|
|
int mhp_default_online_type = MMOP_ONLINE;
|
|
#endif
|
|
|
|
static int __init setup_memhp_default_state(char *str)
|
|
{
|
|
const int online_type = mhp_online_type_from_str(str);
|
|
|
|
if (online_type >= 0)
|
|
mhp_default_online_type = online_type;
|
|
|
|
return 1;
|
|
}
|
|
__setup("memhp_default_state=", setup_memhp_default_state);
|
|
|
|
void mem_hotplug_begin(void)
|
|
{
|
|
cpus_read_lock();
|
|
percpu_down_write(&mem_hotplug_lock);
|
|
}
|
|
|
|
void mem_hotplug_done(void)
|
|
{
|
|
percpu_up_write(&mem_hotplug_lock);
|
|
cpus_read_unlock();
|
|
}
|
|
|
|
u64 max_mem_size = U64_MAX;
|
|
|
|
/* add this memory to iomem resource */
|
|
static struct resource *register_memory_resource(u64 start, u64 size,
|
|
const char *resource_name)
|
|
{
|
|
struct resource *res;
|
|
unsigned long flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
|
|
|
|
if (strcmp(resource_name, "System RAM"))
|
|
flags |= IORESOURCE_SYSRAM_DRIVER_MANAGED;
|
|
|
|
if (!mhp_range_allowed(start, size, true))
|
|
return ERR_PTR(-E2BIG);
|
|
|
|
/*
|
|
* Make sure value parsed from 'mem=' only restricts memory adding
|
|
* while booting, so that memory hotplug won't be impacted. Please
|
|
* refer to document of 'mem=' in kernel-parameters.txt for more
|
|
* details.
|
|
*/
|
|
if (start + size > max_mem_size && system_state < SYSTEM_RUNNING)
|
|
return ERR_PTR(-E2BIG);
|
|
|
|
/*
|
|
* Request ownership of the new memory range. This might be
|
|
* a child of an existing resource that was present but
|
|
* not marked as busy.
|
|
*/
|
|
res = __request_region(&iomem_resource, start, size,
|
|
resource_name, flags);
|
|
|
|
if (!res) {
|
|
pr_debug("Unable to reserve System RAM region: %016llx->%016llx\n",
|
|
start, start + size);
|
|
return ERR_PTR(-EEXIST);
|
|
}
|
|
return res;
|
|
}
|
|
|
|
static void release_memory_resource(struct resource *res)
|
|
{
|
|
if (!res)
|
|
return;
|
|
release_resource(res);
|
|
kfree(res);
|
|
}
|
|
|
|
static int check_pfn_span(unsigned long pfn, unsigned long nr_pages)
|
|
{
|
|
/*
|
|
* Disallow all operations smaller than a sub-section and only
|
|
* allow operations smaller than a section for
|
|
* SPARSEMEM_VMEMMAP. Note that check_hotplug_memory_range()
|
|
* enforces a larger memory_block_size_bytes() granularity for
|
|
* memory that will be marked online, so this check should only
|
|
* fire for direct arch_{add,remove}_memory() users outside of
|
|
* add_memory_resource().
|
|
*/
|
|
unsigned long min_align;
|
|
|
|
if (IS_ENABLED(CONFIG_SPARSEMEM_VMEMMAP))
|
|
min_align = PAGES_PER_SUBSECTION;
|
|
else
|
|
min_align = PAGES_PER_SECTION;
|
|
if (!IS_ALIGNED(pfn | nr_pages, min_align))
|
|
return -EINVAL;
|
|
return 0;
|
|
}
|
|
|
|
/*
|
|
* Return page for the valid pfn only if the page is online. All pfn
|
|
* walkers which rely on the fully initialized page->flags and others
|
|
* should use this rather than pfn_valid && pfn_to_page
|
|
*/
|
|
struct page *pfn_to_online_page(unsigned long pfn)
|
|
{
|
|
unsigned long nr = pfn_to_section_nr(pfn);
|
|
struct dev_pagemap *pgmap;
|
|
struct mem_section *ms;
|
|
|
|
if (nr >= NR_MEM_SECTIONS)
|
|
return NULL;
|
|
|
|
ms = __nr_to_section(nr);
|
|
if (!online_section(ms))
|
|
return NULL;
|
|
|
|
/*
|
|
* Save some code text when online_section() +
|
|
* pfn_section_valid() are sufficient.
|
|
*/
|
|
if (IS_ENABLED(CONFIG_HAVE_ARCH_PFN_VALID) && !pfn_valid(pfn))
|
|
return NULL;
|
|
|
|
if (!pfn_section_valid(ms, pfn))
|
|
return NULL;
|
|
|
|
if (!online_device_section(ms))
|
|
return pfn_to_page(pfn);
|
|
|
|
/*
|
|
* Slowpath: when ZONE_DEVICE collides with
|
|
* ZONE_{NORMAL,MOVABLE} within the same section some pfns in
|
|
* the section may be 'offline' but 'valid'. Only
|
|
* get_dev_pagemap() can determine sub-section online status.
|
|
*/
|
|
pgmap = get_dev_pagemap(pfn, NULL);
|
|
put_dev_pagemap(pgmap);
|
|
|
|
/* The presence of a pgmap indicates ZONE_DEVICE offline pfn */
|
|
if (pgmap)
|
|
return NULL;
|
|
|
|
return pfn_to_page(pfn);
|
|
}
|
|
EXPORT_SYMBOL_GPL(pfn_to_online_page);
|
|
|
|
int __ref __add_pages(int nid, unsigned long pfn, unsigned long nr_pages,
|
|
struct mhp_params *params)
|
|
{
|
|
const unsigned long end_pfn = pfn + nr_pages;
|
|
unsigned long cur_nr_pages;
|
|
int err;
|
|
struct vmem_altmap *altmap = params->altmap;
|
|
|
|
if (WARN_ON_ONCE(!pgprot_val(params->pgprot)))
|
|
return -EINVAL;
|
|
|
|
VM_BUG_ON(!mhp_range_allowed(PFN_PHYS(pfn), nr_pages * PAGE_SIZE, false));
|
|
|
|
if (altmap) {
|
|
/*
|
|
* Validate altmap is within bounds of the total request
|
|
*/
|
|
if (altmap->base_pfn != pfn
|
|
|| vmem_altmap_offset(altmap) > nr_pages) {
|
|
pr_warn_once("memory add fail, invalid altmap\n");
|
|
return -EINVAL;
|
|
}
|
|
altmap->alloc = 0;
|
|
}
|
|
|
|
if (check_pfn_span(pfn, nr_pages)) {
|
|
WARN(1, "Misaligned %s start: %#lx end: #%lx\n", __func__, pfn, pfn + nr_pages - 1);
|
|
return -EINVAL;
|
|
}
|
|
|
|
for (; pfn < end_pfn; pfn += cur_nr_pages) {
|
|
/* Select all remaining pages up to the next section boundary */
|
|
cur_nr_pages = min(end_pfn - pfn,
|
|
SECTION_ALIGN_UP(pfn + 1) - pfn);
|
|
err = sparse_add_section(nid, pfn, cur_nr_pages, altmap,
|
|
params->pgmap);
|
|
if (err)
|
|
break;
|
|
cond_resched();
|
|
}
|
|
vmemmap_populate_print_last();
|
|
return err;
|
|
}
|
|
|
|
/* find the smallest valid pfn in the range [start_pfn, end_pfn) */
|
|
static unsigned long find_smallest_section_pfn(int nid, struct zone *zone,
|
|
unsigned long start_pfn,
|
|
unsigned long end_pfn)
|
|
{
|
|
for (; start_pfn < end_pfn; start_pfn += PAGES_PER_SUBSECTION) {
|
|
if (unlikely(!pfn_to_online_page(start_pfn)))
|
|
continue;
|
|
|
|
if (unlikely(pfn_to_nid(start_pfn) != nid))
|
|
continue;
|
|
|
|
if (zone != page_zone(pfn_to_page(start_pfn)))
|
|
continue;
|
|
|
|
return start_pfn;
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
/* find the biggest valid pfn in the range [start_pfn, end_pfn). */
|
|
static unsigned long find_biggest_section_pfn(int nid, struct zone *zone,
|
|
unsigned long start_pfn,
|
|
unsigned long end_pfn)
|
|
{
|
|
unsigned long pfn;
|
|
|
|
/* pfn is the end pfn of a memory section. */
|
|
pfn = end_pfn - 1;
|
|
for (; pfn >= start_pfn; pfn -= PAGES_PER_SUBSECTION) {
|
|
if (unlikely(!pfn_to_online_page(pfn)))
|
|
continue;
|
|
|
|
if (unlikely(pfn_to_nid(pfn) != nid))
|
|
continue;
|
|
|
|
if (zone != page_zone(pfn_to_page(pfn)))
|
|
continue;
|
|
|
|
return pfn;
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
static void shrink_zone_span(struct zone *zone, unsigned long start_pfn,
|
|
unsigned long end_pfn)
|
|
{
|
|
unsigned long pfn;
|
|
int nid = zone_to_nid(zone);
|
|
|
|
if (zone->zone_start_pfn == start_pfn) {
|
|
/*
|
|
* If the section is smallest section in the zone, it need
|
|
* shrink zone->zone_start_pfn and zone->zone_spanned_pages.
|
|
* In this case, we find second smallest valid mem_section
|
|
* for shrinking zone.
|
|
*/
|
|
pfn = find_smallest_section_pfn(nid, zone, end_pfn,
|
|
zone_end_pfn(zone));
|
|
if (pfn) {
|
|
zone->spanned_pages = zone_end_pfn(zone) - pfn;
|
|
zone->zone_start_pfn = pfn;
|
|
} else {
|
|
zone->zone_start_pfn = 0;
|
|
zone->spanned_pages = 0;
|
|
}
|
|
} else if (zone_end_pfn(zone) == end_pfn) {
|
|
/*
|
|
* If the section is biggest section in the zone, it need
|
|
* shrink zone->spanned_pages.
|
|
* In this case, we find second biggest valid mem_section for
|
|
* shrinking zone.
|
|
*/
|
|
pfn = find_biggest_section_pfn(nid, zone, zone->zone_start_pfn,
|
|
start_pfn);
|
|
if (pfn)
|
|
zone->spanned_pages = pfn - zone->zone_start_pfn + 1;
|
|
else {
|
|
zone->zone_start_pfn = 0;
|
|
zone->spanned_pages = 0;
|
|
}
|
|
}
|
|
}
|
|
|
|
static void update_pgdat_span(struct pglist_data *pgdat)
|
|
{
|
|
unsigned long node_start_pfn = 0, node_end_pfn = 0;
|
|
struct zone *zone;
|
|
|
|
for (zone = pgdat->node_zones;
|
|
zone < pgdat->node_zones + MAX_NR_ZONES; zone++) {
|
|
unsigned long end_pfn = zone_end_pfn(zone);
|
|
|
|
/* No need to lock the zones, they can't change. */
|
|
if (!zone->spanned_pages)
|
|
continue;
|
|
if (!node_end_pfn) {
|
|
node_start_pfn = zone->zone_start_pfn;
|
|
node_end_pfn = end_pfn;
|
|
continue;
|
|
}
|
|
|
|
if (end_pfn > node_end_pfn)
|
|
node_end_pfn = end_pfn;
|
|
if (zone->zone_start_pfn < node_start_pfn)
|
|
node_start_pfn = zone->zone_start_pfn;
|
|
}
|
|
|
|
pgdat->node_start_pfn = node_start_pfn;
|
|
pgdat->node_spanned_pages = node_end_pfn - node_start_pfn;
|
|
}
|
|
|
|
void __ref remove_pfn_range_from_zone(struct zone *zone,
|
|
unsigned long start_pfn,
|
|
unsigned long nr_pages)
|
|
{
|
|
const unsigned long end_pfn = start_pfn + nr_pages;
|
|
struct pglist_data *pgdat = zone->zone_pgdat;
|
|
unsigned long pfn, cur_nr_pages;
|
|
|
|
/* Poison struct pages because they are now uninitialized again. */
|
|
for (pfn = start_pfn; pfn < end_pfn; pfn += cur_nr_pages) {
|
|
cond_resched();
|
|
|
|
/* Select all remaining pages up to the next section boundary */
|
|
cur_nr_pages =
|
|
min(end_pfn - pfn, SECTION_ALIGN_UP(pfn + 1) - pfn);
|
|
page_init_poison(pfn_to_page(pfn),
|
|
sizeof(struct page) * cur_nr_pages);
|
|
}
|
|
|
|
/*
|
|
* Zone shrinking code cannot properly deal with ZONE_DEVICE. So
|
|
* we will not try to shrink the zones - which is okay as
|
|
* set_zone_contiguous() cannot deal with ZONE_DEVICE either way.
|
|
*/
|
|
if (zone_is_zone_device(zone))
|
|
return;
|
|
|
|
clear_zone_contiguous(zone);
|
|
|
|
shrink_zone_span(zone, start_pfn, start_pfn + nr_pages);
|
|
update_pgdat_span(pgdat);
|
|
|
|
set_zone_contiguous(zone);
|
|
}
|
|
|
|
static void __remove_section(unsigned long pfn, unsigned long nr_pages,
|
|
unsigned long map_offset,
|
|
struct vmem_altmap *altmap)
|
|
{
|
|
struct mem_section *ms = __pfn_to_section(pfn);
|
|
|
|
if (WARN_ON_ONCE(!valid_section(ms)))
|
|
return;
|
|
|
|
sparse_remove_section(ms, pfn, nr_pages, map_offset, altmap);
|
|
}
|
|
|
|
/**
|
|
* __remove_pages() - remove sections of pages
|
|
* @pfn: starting pageframe (must be aligned to start of a section)
|
|
* @nr_pages: number of pages to remove (must be multiple of section size)
|
|
* @altmap: alternative device page map or %NULL if default memmap is used
|
|
*
|
|
* Generic helper function to remove section mappings and sysfs entries
|
|
* for the section of the memory we are removing. Caller needs to make
|
|
* sure that pages are marked reserved and zones are adjust properly by
|
|
* calling offline_pages().
|
|
*/
|
|
void __remove_pages(unsigned long pfn, unsigned long nr_pages,
|
|
struct vmem_altmap *altmap)
|
|
{
|
|
const unsigned long end_pfn = pfn + nr_pages;
|
|
unsigned long cur_nr_pages;
|
|
unsigned long map_offset = 0;
|
|
|
|
map_offset = vmem_altmap_offset(altmap);
|
|
|
|
if (check_pfn_span(pfn, nr_pages)) {
|
|
WARN(1, "Misaligned %s start: %#lx end: #%lx\n", __func__, pfn, pfn + nr_pages - 1);
|
|
return;
|
|
}
|
|
|
|
for (; pfn < end_pfn; pfn += cur_nr_pages) {
|
|
cond_resched();
|
|
/* Select all remaining pages up to the next section boundary */
|
|
cur_nr_pages = min(end_pfn - pfn,
|
|
SECTION_ALIGN_UP(pfn + 1) - pfn);
|
|
__remove_section(pfn, cur_nr_pages, map_offset, altmap);
|
|
map_offset = 0;
|
|
}
|
|
}
|
|
|
|
int set_online_page_callback(online_page_callback_t callback)
|
|
{
|
|
int rc = -EINVAL;
|
|
|
|
get_online_mems();
|
|
mutex_lock(&online_page_callback_lock);
|
|
|
|
if (online_page_callback == generic_online_page) {
|
|
online_page_callback = callback;
|
|
rc = 0;
|
|
}
|
|
|
|
mutex_unlock(&online_page_callback_lock);
|
|
put_online_mems();
|
|
|
|
return rc;
|
|
}
|
|
EXPORT_SYMBOL_GPL(set_online_page_callback);
|
|
|
|
int restore_online_page_callback(online_page_callback_t callback)
|
|
{
|
|
int rc = -EINVAL;
|
|
|
|
get_online_mems();
|
|
mutex_lock(&online_page_callback_lock);
|
|
|
|
if (online_page_callback == callback) {
|
|
online_page_callback = generic_online_page;
|
|
rc = 0;
|
|
}
|
|
|
|
mutex_unlock(&online_page_callback_lock);
|
|
put_online_mems();
|
|
|
|
return rc;
|
|
}
|
|
EXPORT_SYMBOL_GPL(restore_online_page_callback);
|
|
|
|
void generic_online_page(struct page *page, unsigned int order)
|
|
{
|
|
/*
|
|
* Freeing the page with debug_pagealloc enabled will try to unmap it,
|
|
* so we should map it first. This is better than introducing a special
|
|
* case in page freeing fast path.
|
|
*/
|
|
debug_pagealloc_map_pages(page, 1 << order);
|
|
__free_pages_core(page, order);
|
|
totalram_pages_add(1UL << order);
|
|
}
|
|
EXPORT_SYMBOL_GPL(generic_online_page);
|
|
|
|
static void online_pages_range(unsigned long start_pfn, unsigned long nr_pages)
|
|
{
|
|
const unsigned long end_pfn = start_pfn + nr_pages;
|
|
unsigned long pfn;
|
|
|
|
/*
|
|
* Online the pages in MAX_ORDER - 1 aligned chunks. The callback might
|
|
* decide to not expose all pages to the buddy (e.g., expose them
|
|
* later). We account all pages as being online and belonging to this
|
|
* zone ("present").
|
|
* When using memmap_on_memory, the range might not be aligned to
|
|
* MAX_ORDER_NR_PAGES - 1, but pageblock aligned. __ffs() will detect
|
|
* this and the first chunk to online will be pageblock_nr_pages.
|
|
*/
|
|
for (pfn = start_pfn; pfn < end_pfn;) {
|
|
int order = min(MAX_ORDER - 1UL, __ffs(pfn));
|
|
|
|
(*online_page_callback)(pfn_to_page(pfn), order);
|
|
pfn += (1UL << order);
|
|
}
|
|
|
|
/* mark all involved sections as online */
|
|
online_mem_sections(start_pfn, end_pfn);
|
|
}
|
|
|
|
/* check which state of node_states will be changed when online memory */
|
|
static void node_states_check_changes_online(unsigned long nr_pages,
|
|
struct zone *zone, struct memory_notify *arg)
|
|
{
|
|
int nid = zone_to_nid(zone);
|
|
|
|
arg->status_change_nid = NUMA_NO_NODE;
|
|
arg->status_change_nid_normal = NUMA_NO_NODE;
|
|
|
|
if (!node_state(nid, N_MEMORY))
|
|
arg->status_change_nid = nid;
|
|
if (zone_idx(zone) <= ZONE_NORMAL && !node_state(nid, N_NORMAL_MEMORY))
|
|
arg->status_change_nid_normal = nid;
|
|
}
|
|
|
|
static void node_states_set_node(int node, struct memory_notify *arg)
|
|
{
|
|
if (arg->status_change_nid_normal >= 0)
|
|
node_set_state(node, N_NORMAL_MEMORY);
|
|
|
|
if (arg->status_change_nid >= 0)
|
|
node_set_state(node, N_MEMORY);
|
|
}
|
|
|
|
static void __meminit resize_zone_range(struct zone *zone, unsigned long start_pfn,
|
|
unsigned long nr_pages)
|
|
{
|
|
unsigned long old_end_pfn = zone_end_pfn(zone);
|
|
|
|
if (zone_is_empty(zone) || start_pfn < zone->zone_start_pfn)
|
|
zone->zone_start_pfn = start_pfn;
|
|
|
|
zone->spanned_pages = max(start_pfn + nr_pages, old_end_pfn) - zone->zone_start_pfn;
|
|
}
|
|
|
|
static void __meminit resize_pgdat_range(struct pglist_data *pgdat, unsigned long start_pfn,
|
|
unsigned long nr_pages)
|
|
{
|
|
unsigned long old_end_pfn = pgdat_end_pfn(pgdat);
|
|
|
|
if (!pgdat->node_spanned_pages || start_pfn < pgdat->node_start_pfn)
|
|
pgdat->node_start_pfn = start_pfn;
|
|
|
|
pgdat->node_spanned_pages = max(start_pfn + nr_pages, old_end_pfn) - pgdat->node_start_pfn;
|
|
|
|
}
|
|
|
|
#ifdef CONFIG_ZONE_DEVICE
|
|
static void section_taint_zone_device(unsigned long pfn)
|
|
{
|
|
struct mem_section *ms = __pfn_to_section(pfn);
|
|
|
|
ms->section_mem_map |= SECTION_TAINT_ZONE_DEVICE;
|
|
}
|
|
#else
|
|
static inline void section_taint_zone_device(unsigned long pfn)
|
|
{
|
|
}
|
|
#endif
|
|
|
|
/*
|
|
* Associate the pfn range with the given zone, initializing the memmaps
|
|
* and resizing the pgdat/zone data to span the added pages. After this
|
|
* call, all affected pages are PG_reserved.
|
|
*
|
|
* All aligned pageblocks are initialized to the specified migratetype
|
|
* (usually MIGRATE_MOVABLE). Besides setting the migratetype, no related
|
|
* zone stats (e.g., nr_isolate_pageblock) are touched.
|
|
*/
|
|
void __ref move_pfn_range_to_zone(struct zone *zone, unsigned long start_pfn,
|
|
unsigned long nr_pages,
|
|
struct vmem_altmap *altmap, int migratetype)
|
|
{
|
|
struct pglist_data *pgdat = zone->zone_pgdat;
|
|
int nid = pgdat->node_id;
|
|
|
|
clear_zone_contiguous(zone);
|
|
|
|
if (zone_is_empty(zone))
|
|
init_currently_empty_zone(zone, start_pfn, nr_pages);
|
|
resize_zone_range(zone, start_pfn, nr_pages);
|
|
resize_pgdat_range(pgdat, start_pfn, nr_pages);
|
|
|
|
/*
|
|
* Subsection population requires care in pfn_to_online_page().
|
|
* Set the taint to enable the slow path detection of
|
|
* ZONE_DEVICE pages in an otherwise ZONE_{NORMAL,MOVABLE}
|
|
* section.
|
|
*/
|
|
if (zone_is_zone_device(zone)) {
|
|
if (!IS_ALIGNED(start_pfn, PAGES_PER_SECTION))
|
|
section_taint_zone_device(start_pfn);
|
|
if (!IS_ALIGNED(start_pfn + nr_pages, PAGES_PER_SECTION))
|
|
section_taint_zone_device(start_pfn + nr_pages);
|
|
}
|
|
|
|
/*
|
|
* TODO now we have a visible range of pages which are not associated
|
|
* with their zone properly. Not nice but set_pfnblock_flags_mask
|
|
* expects the zone spans the pfn range. All the pages in the range
|
|
* are reserved so nobody should be touching them so we should be safe
|
|
*/
|
|
memmap_init_range(nr_pages, nid, zone_idx(zone), start_pfn, 0,
|
|
MEMINIT_HOTPLUG, altmap, migratetype);
|
|
|
|
set_zone_contiguous(zone);
|
|
}
|
|
|
|
struct auto_movable_stats {
|
|
unsigned long kernel_early_pages;
|
|
unsigned long movable_pages;
|
|
};
|
|
|
|
static void auto_movable_stats_account_zone(struct auto_movable_stats *stats,
|
|
struct zone *zone)
|
|
{
|
|
if (zone_idx(zone) == ZONE_MOVABLE) {
|
|
stats->movable_pages += zone->present_pages;
|
|
} else {
|
|
stats->kernel_early_pages += zone->present_early_pages;
|
|
#ifdef CONFIG_CMA
|
|
/*
|
|
* CMA pages (never on hotplugged memory) behave like
|
|
* ZONE_MOVABLE.
|
|
*/
|
|
stats->movable_pages += zone->cma_pages;
|
|
stats->kernel_early_pages -= zone->cma_pages;
|
|
#endif /* CONFIG_CMA */
|
|
}
|
|
}
|
|
struct auto_movable_group_stats {
|
|
unsigned long movable_pages;
|
|
unsigned long req_kernel_early_pages;
|
|
};
|
|
|
|
static int auto_movable_stats_account_group(struct memory_group *group,
|
|
void *arg)
|
|
{
|
|
const int ratio = READ_ONCE(auto_movable_ratio);
|
|
struct auto_movable_group_stats *stats = arg;
|
|
long pages;
|
|
|
|
/*
|
|
* We don't support modifying the config while the auto-movable online
|
|
* policy is already enabled. Just avoid the division by zero below.
|
|
*/
|
|
if (!ratio)
|
|
return 0;
|
|
|
|
/*
|
|
* Calculate how many early kernel pages this group requires to
|
|
* satisfy the configured zone ratio.
|
|
*/
|
|
pages = group->present_movable_pages * 100 / ratio;
|
|
pages -= group->present_kernel_pages;
|
|
|
|
if (pages > 0)
|
|
stats->req_kernel_early_pages += pages;
|
|
stats->movable_pages += group->present_movable_pages;
|
|
return 0;
|
|
}
|
|
|
|
static bool auto_movable_can_online_movable(int nid, struct memory_group *group,
|
|
unsigned long nr_pages)
|
|
{
|
|
unsigned long kernel_early_pages, movable_pages;
|
|
struct auto_movable_group_stats group_stats = {};
|
|
struct auto_movable_stats stats = {};
|
|
pg_data_t *pgdat = NODE_DATA(nid);
|
|
struct zone *zone;
|
|
int i;
|
|
|
|
/* Walk all relevant zones and collect MOVABLE vs. KERNEL stats. */
|
|
if (nid == NUMA_NO_NODE) {
|
|
/* TODO: cache values */
|
|
for_each_populated_zone(zone)
|
|
auto_movable_stats_account_zone(&stats, zone);
|
|
} else {
|
|
for (i = 0; i < MAX_NR_ZONES; i++) {
|
|
zone = pgdat->node_zones + i;
|
|
if (populated_zone(zone))
|
|
auto_movable_stats_account_zone(&stats, zone);
|
|
}
|
|
}
|
|
|
|
kernel_early_pages = stats.kernel_early_pages;
|
|
movable_pages = stats.movable_pages;
|
|
|
|
/*
|
|
* Kernel memory inside dynamic memory group allows for more MOVABLE
|
|
* memory within the same group. Remove the effect of all but the
|
|
* current group from the stats.
|
|
*/
|
|
walk_dynamic_memory_groups(nid, auto_movable_stats_account_group,
|
|
group, &group_stats);
|
|
if (kernel_early_pages <= group_stats.req_kernel_early_pages)
|
|
return false;
|
|
kernel_early_pages -= group_stats.req_kernel_early_pages;
|
|
movable_pages -= group_stats.movable_pages;
|
|
|
|
if (group && group->is_dynamic)
|
|
kernel_early_pages += group->present_kernel_pages;
|
|
|
|
/*
|
|
* Test if we could online the given number of pages to ZONE_MOVABLE
|
|
* and still stay in the configured ratio.
|
|
*/
|
|
movable_pages += nr_pages;
|
|
return movable_pages <= (auto_movable_ratio * kernel_early_pages) / 100;
|
|
}
|
|
|
|
/*
|
|
* Returns a default kernel memory zone for the given pfn range.
|
|
* If no kernel zone covers this pfn range it will automatically go
|
|
* to the ZONE_NORMAL.
|
|
*/
|
|
static struct zone *default_kernel_zone_for_pfn(int nid, unsigned long start_pfn,
|
|
unsigned long nr_pages)
|
|
{
|
|
struct pglist_data *pgdat = NODE_DATA(nid);
|
|
int zid;
|
|
|
|
for (zid = 0; zid < ZONE_NORMAL; zid++) {
|
|
struct zone *zone = &pgdat->node_zones[zid];
|
|
|
|
if (zone_intersects(zone, start_pfn, nr_pages))
|
|
return zone;
|
|
}
|
|
|
|
return &pgdat->node_zones[ZONE_NORMAL];
|
|
}
|
|
|
|
/*
|
|
* Determine to which zone to online memory dynamically based on user
|
|
* configuration and system stats. We care about the following ratio:
|
|
*
|
|
* MOVABLE : KERNEL
|
|
*
|
|
* Whereby MOVABLE is memory in ZONE_MOVABLE and KERNEL is memory in
|
|
* one of the kernel zones. CMA pages inside one of the kernel zones really
|
|
* behaves like ZONE_MOVABLE, so we treat them accordingly.
|
|
*
|
|
* We don't allow for hotplugged memory in a KERNEL zone to increase the
|
|
* amount of MOVABLE memory we can have, so we end up with:
|
|
*
|
|
* MOVABLE : KERNEL_EARLY
|
|
*
|
|
* Whereby KERNEL_EARLY is memory in one of the kernel zones, available sinze
|
|
* boot. We base our calculation on KERNEL_EARLY internally, because:
|
|
*
|
|
* a) Hotplugged memory in one of the kernel zones can sometimes still get
|
|
* hotunplugged, especially when hot(un)plugging individual memory blocks.
|
|
* There is no coordination across memory devices, therefore "automatic"
|
|
* hotunplugging, as implemented in hypervisors, could result in zone
|
|
* imbalances.
|
|
* b) Early/boot memory in one of the kernel zones can usually not get
|
|
* hotunplugged again (e.g., no firmware interface to unplug, fragmented
|
|
* with unmovable allocations). While there are corner cases where it might
|
|
* still work, it is barely relevant in practice.
|
|
*
|
|
* Exceptions are dynamic memory groups, which allow for more MOVABLE
|
|
* memory within the same memory group -- because in that case, there is
|
|
* coordination within the single memory device managed by a single driver.
|
|
*
|
|
* We rely on "present pages" instead of "managed pages", as the latter is
|
|
* highly unreliable and dynamic in virtualized environments, and does not
|
|
* consider boot time allocations. For example, memory ballooning adjusts the
|
|
* managed pages when inflating/deflating the balloon, and balloon compaction
|
|
* can even migrate inflated pages between zones.
|
|
*
|
|
* Using "present pages" is better but some things to keep in mind are:
|
|
*
|
|
* a) Some memblock allocations, such as for the crashkernel area, are
|
|
* effectively unused by the kernel, yet they account to "present pages".
|
|
* Fortunately, these allocations are comparatively small in relevant setups
|
|
* (e.g., fraction of system memory).
|
|
* b) Some hotplugged memory blocks in virtualized environments, esecially
|
|
* hotplugged by virtio-mem, look like they are completely present, however,
|
|
* only parts of the memory block are actually currently usable.
|
|
* "present pages" is an upper limit that can get reached at runtime. As
|
|
* we base our calculations on KERNEL_EARLY, this is not an issue.
|
|
*/
|
|
static struct zone *auto_movable_zone_for_pfn(int nid,
|
|
struct memory_group *group,
|
|
unsigned long pfn,
|
|
unsigned long nr_pages)
|
|
{
|
|
unsigned long online_pages = 0, max_pages, end_pfn;
|
|
struct page *page;
|
|
|
|
if (!auto_movable_ratio)
|
|
goto kernel_zone;
|
|
|
|
if (group && !group->is_dynamic) {
|
|
max_pages = group->s.max_pages;
|
|
online_pages = group->present_movable_pages;
|
|
|
|
/* If anything is !MOVABLE online the rest !MOVABLE. */
|
|
if (group->present_kernel_pages)
|
|
goto kernel_zone;
|
|
} else if (!group || group->d.unit_pages == nr_pages) {
|
|
max_pages = nr_pages;
|
|
} else {
|
|
max_pages = group->d.unit_pages;
|
|
/*
|
|
* Take a look at all online sections in the current unit.
|
|
* We can safely assume that all pages within a section belong
|
|
* to the same zone, because dynamic memory groups only deal
|
|
* with hotplugged memory.
|
|
*/
|
|
pfn = ALIGN_DOWN(pfn, group->d.unit_pages);
|
|
end_pfn = pfn + group->d.unit_pages;
|
|
for (; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
|
|
page = pfn_to_online_page(pfn);
|
|
if (!page)
|
|
continue;
|
|
/* If anything is !MOVABLE online the rest !MOVABLE. */
|
|
if (!is_zone_movable_page(page))
|
|
goto kernel_zone;
|
|
online_pages += PAGES_PER_SECTION;
|
|
}
|
|
}
|
|
|
|
/*
|
|
* Online MOVABLE if we could *currently* online all remaining parts
|
|
* MOVABLE. We expect to (add+) online them immediately next, so if
|
|
* nobody interferes, all will be MOVABLE if possible.
|
|
*/
|
|
nr_pages = max_pages - online_pages;
|
|
if (!auto_movable_can_online_movable(NUMA_NO_NODE, group, nr_pages))
|
|
goto kernel_zone;
|
|
|
|
#ifdef CONFIG_NUMA
|
|
if (auto_movable_numa_aware &&
|
|
!auto_movable_can_online_movable(nid, group, nr_pages))
|
|
goto kernel_zone;
|
|
#endif /* CONFIG_NUMA */
|
|
|
|
return &NODE_DATA(nid)->node_zones[ZONE_MOVABLE];
|
|
kernel_zone:
|
|
return default_kernel_zone_for_pfn(nid, pfn, nr_pages);
|
|
}
|
|
|
|
static inline struct zone *default_zone_for_pfn(int nid, unsigned long start_pfn,
|
|
unsigned long nr_pages)
|
|
{
|
|
struct zone *kernel_zone = default_kernel_zone_for_pfn(nid, start_pfn,
|
|
nr_pages);
|
|
struct zone *movable_zone = &NODE_DATA(nid)->node_zones[ZONE_MOVABLE];
|
|
bool in_kernel = zone_intersects(kernel_zone, start_pfn, nr_pages);
|
|
bool in_movable = zone_intersects(movable_zone, start_pfn, nr_pages);
|
|
|
|
/*
|
|
* We inherit the existing zone in a simple case where zones do not
|
|
* overlap in the given range
|
|
*/
|
|
if (in_kernel ^ in_movable)
|
|
return (in_kernel) ? kernel_zone : movable_zone;
|
|
|
|
/*
|
|
* If the range doesn't belong to any zone or two zones overlap in the
|
|
* given range then we use movable zone only if movable_node is
|
|
* enabled because we always online to a kernel zone by default.
|
|
*/
|
|
return movable_node_enabled ? movable_zone : kernel_zone;
|
|
}
|
|
|
|
struct zone *zone_for_pfn_range(int online_type, int nid,
|
|
struct memory_group *group, unsigned long start_pfn,
|
|
unsigned long nr_pages)
|
|
{
|
|
if (online_type == MMOP_ONLINE_KERNEL)
|
|
return default_kernel_zone_for_pfn(nid, start_pfn, nr_pages);
|
|
|
|
if (online_type == MMOP_ONLINE_MOVABLE)
|
|
return &NODE_DATA(nid)->node_zones[ZONE_MOVABLE];
|
|
|
|
if (online_policy == ONLINE_POLICY_AUTO_MOVABLE)
|
|
return auto_movable_zone_for_pfn(nid, group, start_pfn, nr_pages);
|
|
|
|
return default_zone_for_pfn(nid, start_pfn, nr_pages);
|
|
}
|
|
|
|
/*
|
|
* This function should only be called by memory_block_{online,offline},
|
|
* and {online,offline}_pages.
|
|
*/
|
|
void adjust_present_page_count(struct page *page, struct memory_group *group,
|
|
long nr_pages)
|
|
{
|
|
struct zone *zone = page_zone(page);
|
|
const bool movable = zone_idx(zone) == ZONE_MOVABLE;
|
|
|
|
/*
|
|
* We only support onlining/offlining/adding/removing of complete
|
|
* memory blocks; therefore, either all is either early or hotplugged.
|
|
*/
|
|
if (early_section(__pfn_to_section(page_to_pfn(page))))
|
|
zone->present_early_pages += nr_pages;
|
|
zone->present_pages += nr_pages;
|
|
zone->zone_pgdat->node_present_pages += nr_pages;
|
|
|
|
if (group && movable)
|
|
group->present_movable_pages += nr_pages;
|
|
else if (group && !movable)
|
|
group->present_kernel_pages += nr_pages;
|
|
}
|
|
|
|
int mhp_init_memmap_on_memory(unsigned long pfn, unsigned long nr_pages,
|
|
struct zone *zone)
|
|
{
|
|
unsigned long end_pfn = pfn + nr_pages;
|
|
int ret, i;
|
|
|
|
ret = kasan_add_zero_shadow(__va(PFN_PHYS(pfn)), PFN_PHYS(nr_pages));
|
|
if (ret)
|
|
return ret;
|
|
|
|
move_pfn_range_to_zone(zone, pfn, nr_pages, NULL, MIGRATE_UNMOVABLE);
|
|
|
|
for (i = 0; i < nr_pages; i++)
|
|
SetPageVmemmapSelfHosted(pfn_to_page(pfn + i));
|
|
|
|
/*
|
|
* It might be that the vmemmap_pages fully span sections. If that is
|
|
* the case, mark those sections online here as otherwise they will be
|
|
* left offline.
|
|
*/
|
|
if (nr_pages >= PAGES_PER_SECTION)
|
|
online_mem_sections(pfn, ALIGN_DOWN(end_pfn, PAGES_PER_SECTION));
|
|
|
|
return ret;
|
|
}
|
|
|
|
void mhp_deinit_memmap_on_memory(unsigned long pfn, unsigned long nr_pages)
|
|
{
|
|
unsigned long end_pfn = pfn + nr_pages;
|
|
|
|
/*
|
|
* It might be that the vmemmap_pages fully span sections. If that is
|
|
* the case, mark those sections offline here as otherwise they will be
|
|
* left online.
|
|
*/
|
|
if (nr_pages >= PAGES_PER_SECTION)
|
|
offline_mem_sections(pfn, ALIGN_DOWN(end_pfn, PAGES_PER_SECTION));
|
|
|
|
/*
|
|
* The pages associated with this vmemmap have been offlined, so
|
|
* we can reset its state here.
|
|
*/
|
|
remove_pfn_range_from_zone(page_zone(pfn_to_page(pfn)), pfn, nr_pages);
|
|
kasan_remove_zero_shadow(__va(PFN_PHYS(pfn)), PFN_PHYS(nr_pages));
|
|
}
|
|
|
|
/*
|
|
* Must be called with mem_hotplug_lock in write mode.
|
|
*/
|
|
int __ref online_pages(unsigned long pfn, unsigned long nr_pages,
|
|
struct zone *zone, struct memory_group *group)
|
|
{
|
|
unsigned long flags;
|
|
int need_zonelists_rebuild = 0;
|
|
const int nid = zone_to_nid(zone);
|
|
int ret;
|
|
struct memory_notify arg;
|
|
|
|
/*
|
|
* {on,off}lining is constrained to full memory sections (or more
|
|
* precisely to memory blocks from the user space POV).
|
|
* memmap_on_memory is an exception because it reserves initial part
|
|
* of the physical memory space for vmemmaps. That space is pageblock
|
|
* aligned.
|
|
*/
|
|
if (WARN_ON_ONCE(!nr_pages || !pageblock_aligned(pfn) ||
|
|
!IS_ALIGNED(pfn + nr_pages, PAGES_PER_SECTION)))
|
|
return -EINVAL;
|
|
|
|
|
|
/* associate pfn range with the zone */
|
|
move_pfn_range_to_zone(zone, pfn, nr_pages, NULL, MIGRATE_ISOLATE);
|
|
|
|
arg.start_pfn = pfn;
|
|
arg.nr_pages = nr_pages;
|
|
node_states_check_changes_online(nr_pages, zone, &arg);
|
|
|
|
ret = memory_notify(MEM_GOING_ONLINE, &arg);
|
|
ret = notifier_to_errno(ret);
|
|
if (ret)
|
|
goto failed_addition;
|
|
|
|
/*
|
|
* Fixup the number of isolated pageblocks before marking the sections
|
|
* onlining, such that undo_isolate_page_range() works correctly.
|
|
*/
|
|
spin_lock_irqsave(&zone->lock, flags);
|
|
zone->nr_isolate_pageblock += nr_pages / pageblock_nr_pages;
|
|
spin_unlock_irqrestore(&zone->lock, flags);
|
|
|
|
/*
|
|
* If this zone is not populated, then it is not in zonelist.
|
|
* This means the page allocator ignores this zone.
|
|
* So, zonelist must be updated after online.
|
|
*/
|
|
if (!populated_zone(zone)) {
|
|
need_zonelists_rebuild = 1;
|
|
setup_zone_pageset(zone);
|
|
}
|
|
|
|
online_pages_range(pfn, nr_pages);
|
|
adjust_present_page_count(pfn_to_page(pfn), group, nr_pages);
|
|
|
|
node_states_set_node(nid, &arg);
|
|
if (need_zonelists_rebuild)
|
|
build_all_zonelists(NULL);
|
|
|
|
/* Basic onlining is complete, allow allocation of onlined pages. */
|
|
undo_isolate_page_range(pfn, pfn + nr_pages, MIGRATE_MOVABLE);
|
|
|
|
/*
|
|
* Freshly onlined pages aren't shuffled (e.g., all pages are placed to
|
|
* the tail of the freelist when undoing isolation). Shuffle the whole
|
|
* zone to make sure the just onlined pages are properly distributed
|
|
* across the whole freelist - to create an initial shuffle.
|
|
*/
|
|
shuffle_zone(zone);
|
|
|
|
/* reinitialise watermarks and update pcp limits */
|
|
init_per_zone_wmark_min();
|
|
|
|
kswapd_run(nid);
|
|
kcompactd_run(nid);
|
|
|
|
writeback_set_ratelimit();
|
|
|
|
memory_notify(MEM_ONLINE, &arg);
|
|
return 0;
|
|
|
|
failed_addition:
|
|
pr_debug("online_pages [mem %#010llx-%#010llx] failed\n",
|
|
(unsigned long long) pfn << PAGE_SHIFT,
|
|
(((unsigned long long) pfn + nr_pages) << PAGE_SHIFT) - 1);
|
|
memory_notify(MEM_CANCEL_ONLINE, &arg);
|
|
remove_pfn_range_from_zone(zone, pfn, nr_pages);
|
|
return ret;
|
|
}
|
|
|
|
static void reset_node_present_pages(pg_data_t *pgdat)
|
|
{
|
|
struct zone *z;
|
|
|
|
for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++)
|
|
z->present_pages = 0;
|
|
|
|
pgdat->node_present_pages = 0;
|
|
}
|
|
|
|
/* we are OK calling __meminit stuff here - we have CONFIG_MEMORY_HOTPLUG */
|
|
static pg_data_t __ref *hotadd_init_pgdat(int nid)
|
|
{
|
|
struct pglist_data *pgdat;
|
|
|
|
/*
|
|
* NODE_DATA is preallocated (free_area_init) but its internal
|
|
* state is not allocated completely. Add missing pieces.
|
|
* Completely offline nodes stay around and they just need
|
|
* reintialization.
|
|
*/
|
|
pgdat = NODE_DATA(nid);
|
|
|
|
/* init node's zones as empty zones, we don't have any present pages.*/
|
|
free_area_init_core_hotplug(pgdat);
|
|
|
|
/*
|
|
* The node we allocated has no zone fallback lists. For avoiding
|
|
* to access not-initialized zonelist, build here.
|
|
*/
|
|
build_all_zonelists(pgdat);
|
|
|
|
/*
|
|
* When memory is hot-added, all the memory is in offline state. So
|
|
* clear all zones' present_pages because they will be updated in
|
|
* online_pages() and offline_pages().
|
|
* TODO: should be in free_area_init_core_hotplug?
|
|
*/
|
|
reset_node_managed_pages(pgdat);
|
|
reset_node_present_pages(pgdat);
|
|
|
|
return pgdat;
|
|
}
|
|
|
|
/*
|
|
* __try_online_node - online a node if offlined
|
|
* @nid: the node ID
|
|
* @set_node_online: Whether we want to online the node
|
|
* called by cpu_up() to online a node without onlined memory.
|
|
*
|
|
* Returns:
|
|
* 1 -> a new node has been allocated
|
|
* 0 -> the node is already online
|
|
* -ENOMEM -> the node could not be allocated
|
|
*/
|
|
static int __try_online_node(int nid, bool set_node_online)
|
|
{
|
|
pg_data_t *pgdat;
|
|
int ret = 1;
|
|
|
|
if (node_online(nid))
|
|
return 0;
|
|
|
|
pgdat = hotadd_init_pgdat(nid);
|
|
if (!pgdat) {
|
|
pr_err("Cannot online node %d due to NULL pgdat\n", nid);
|
|
ret = -ENOMEM;
|
|
goto out;
|
|
}
|
|
|
|
if (set_node_online) {
|
|
node_set_online(nid);
|
|
ret = register_one_node(nid);
|
|
BUG_ON(ret);
|
|
}
|
|
out:
|
|
return ret;
|
|
}
|
|
|
|
/*
|
|
* Users of this function always want to online/register the node
|
|
*/
|
|
int try_online_node(int nid)
|
|
{
|
|
int ret;
|
|
|
|
mem_hotplug_begin();
|
|
ret = __try_online_node(nid, true);
|
|
mem_hotplug_done();
|
|
return ret;
|
|
}
|
|
|
|
static int check_hotplug_memory_range(u64 start, u64 size)
|
|
{
|
|
/* memory range must be block size aligned */
|
|
if (!size || !IS_ALIGNED(start, memory_block_size_bytes()) ||
|
|
!IS_ALIGNED(size, memory_block_size_bytes())) {
|
|
pr_err("Block size [%#lx] unaligned hotplug range: start %#llx, size %#llx",
|
|
memory_block_size_bytes(), start, size);
|
|
return -EINVAL;
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int online_memory_block(struct memory_block *mem, void *arg)
|
|
{
|
|
mem->online_type = mhp_default_online_type;
|
|
return device_online(&mem->dev);
|
|
}
|
|
|
|
bool mhp_supports_memmap_on_memory(unsigned long size)
|
|
{
|
|
unsigned long nr_vmemmap_pages = size / PAGE_SIZE;
|
|
unsigned long vmemmap_size = nr_vmemmap_pages * sizeof(struct page);
|
|
unsigned long remaining_size = size - vmemmap_size;
|
|
|
|
/*
|
|
* Besides having arch support and the feature enabled at runtime, we
|
|
* need a few more assumptions to hold true:
|
|
*
|
|
* a) We span a single memory block: memory onlining/offlinin;g happens
|
|
* in memory block granularity. We don't want the vmemmap of online
|
|
* memory blocks to reside on offline memory blocks. In the future,
|
|
* we might want to support variable-sized memory blocks to make the
|
|
* feature more versatile.
|
|
*
|
|
* b) The vmemmap pages span complete PMDs: We don't want vmemmap code
|
|
* to populate memory from the altmap for unrelated parts (i.e.,
|
|
* other memory blocks)
|
|
*
|
|
* c) The vmemmap pages (and thereby the pages that will be exposed to
|
|
* the buddy) have to cover full pageblocks: memory onlining/offlining
|
|
* code requires applicable ranges to be page-aligned, for example, to
|
|
* set the migratetypes properly.
|
|
*
|
|
* TODO: Although we have a check here to make sure that vmemmap pages
|
|
* fully populate a PMD, it is not the right place to check for
|
|
* this. A much better solution involves improving vmemmap code
|
|
* to fallback to base pages when trying to populate vmemmap using
|
|
* altmap as an alternative source of memory, and we do not exactly
|
|
* populate a single PMD.
|
|
*/
|
|
return mhp_memmap_on_memory() &&
|
|
size == memory_block_size_bytes() &&
|
|
IS_ALIGNED(vmemmap_size, PMD_SIZE) &&
|
|
IS_ALIGNED(remaining_size, (pageblock_nr_pages << PAGE_SHIFT));
|
|
}
|
|
|
|
/*
|
|
* NOTE: The caller must call lock_device_hotplug() to serialize hotplug
|
|
* and online/offline operations (triggered e.g. by sysfs).
|
|
*
|
|
* we are OK calling __meminit stuff here - we have CONFIG_MEMORY_HOTPLUG
|
|
*/
|
|
int __ref add_memory_resource(int nid, struct resource *res, mhp_t mhp_flags)
|
|
{
|
|
struct mhp_params params = { .pgprot = pgprot_mhp(PAGE_KERNEL) };
|
|
enum memblock_flags memblock_flags = MEMBLOCK_NONE;
|
|
struct vmem_altmap mhp_altmap = {};
|
|
struct memory_group *group = NULL;
|
|
u64 start, size;
|
|
bool new_node = false;
|
|
int ret;
|
|
|
|
start = res->start;
|
|
size = resource_size(res);
|
|
|
|
ret = check_hotplug_memory_range(start, size);
|
|
if (ret)
|
|
return ret;
|
|
|
|
if (mhp_flags & MHP_NID_IS_MGID) {
|
|
group = memory_group_find_by_id(nid);
|
|
if (!group)
|
|
return -EINVAL;
|
|
nid = group->nid;
|
|
}
|
|
|
|
if (!node_possible(nid)) {
|
|
WARN(1, "node %d was absent from the node_possible_map\n", nid);
|
|
return -EINVAL;
|
|
}
|
|
|
|
mem_hotplug_begin();
|
|
|
|
if (IS_ENABLED(CONFIG_ARCH_KEEP_MEMBLOCK)) {
|
|
if (res->flags & IORESOURCE_SYSRAM_DRIVER_MANAGED)
|
|
memblock_flags = MEMBLOCK_DRIVER_MANAGED;
|
|
ret = memblock_add_node(start, size, nid, memblock_flags);
|
|
if (ret)
|
|
goto error_mem_hotplug_end;
|
|
}
|
|
|
|
ret = __try_online_node(nid, false);
|
|
if (ret < 0)
|
|
goto error;
|
|
new_node = ret;
|
|
|
|
/*
|
|
* Self hosted memmap array
|
|
*/
|
|
if (mhp_flags & MHP_MEMMAP_ON_MEMORY) {
|
|
if (!mhp_supports_memmap_on_memory(size)) {
|
|
ret = -EINVAL;
|
|
goto error;
|
|
}
|
|
mhp_altmap.free = PHYS_PFN(size);
|
|
mhp_altmap.base_pfn = PHYS_PFN(start);
|
|
params.altmap = &mhp_altmap;
|
|
}
|
|
|
|
/* call arch's memory hotadd */
|
|
ret = arch_add_memory(nid, start, size, ¶ms);
|
|
if (ret < 0)
|
|
goto error;
|
|
|
|
/* create memory block devices after memory was added */
|
|
ret = create_memory_block_devices(start, size, mhp_altmap.alloc,
|
|
group);
|
|
if (ret) {
|
|
arch_remove_memory(start, size, params.altmap);
|
|
goto error;
|
|
}
|
|
|
|
if (new_node) {
|
|
/* If sysfs file of new node can't be created, cpu on the node
|
|
* can't be hot-added. There is no rollback way now.
|
|
* So, check by BUG_ON() to catch it reluctantly..
|
|
* We online node here. We can't roll back from here.
|
|
*/
|
|
node_set_online(nid);
|
|
ret = __register_one_node(nid);
|
|
BUG_ON(ret);
|
|
}
|
|
|
|
register_memory_blocks_under_node(nid, PFN_DOWN(start),
|
|
PFN_UP(start + size - 1),
|
|
MEMINIT_HOTPLUG);
|
|
|
|
/* create new memmap entry */
|
|
if (!strcmp(res->name, "System RAM"))
|
|
firmware_map_add_hotplug(start, start + size, "System RAM");
|
|
|
|
/* device_online() will take the lock when calling online_pages() */
|
|
mem_hotplug_done();
|
|
|
|
/*
|
|
* In case we're allowed to merge the resource, flag it and trigger
|
|
* merging now that adding succeeded.
|
|
*/
|
|
if (mhp_flags & MHP_MERGE_RESOURCE)
|
|
merge_system_ram_resource(res);
|
|
|
|
/* online pages if requested */
|
|
if (mhp_default_online_type != MMOP_OFFLINE)
|
|
walk_memory_blocks(start, size, NULL, online_memory_block);
|
|
|
|
return ret;
|
|
error:
|
|
if (IS_ENABLED(CONFIG_ARCH_KEEP_MEMBLOCK))
|
|
memblock_remove(start, size);
|
|
error_mem_hotplug_end:
|
|
mem_hotplug_done();
|
|
return ret;
|
|
}
|
|
|
|
/* requires device_hotplug_lock, see add_memory_resource() */
|
|
int __ref __add_memory(int nid, u64 start, u64 size, mhp_t mhp_flags)
|
|
{
|
|
struct resource *res;
|
|
int ret;
|
|
|
|
res = register_memory_resource(start, size, "System RAM");
|
|
if (IS_ERR(res))
|
|
return PTR_ERR(res);
|
|
|
|
ret = add_memory_resource(nid, res, mhp_flags);
|
|
if (ret < 0)
|
|
release_memory_resource(res);
|
|
return ret;
|
|
}
|
|
|
|
int add_memory(int nid, u64 start, u64 size, mhp_t mhp_flags)
|
|
{
|
|
int rc;
|
|
|
|
lock_device_hotplug();
|
|
rc = __add_memory(nid, start, size, mhp_flags);
|
|
unlock_device_hotplug();
|
|
|
|
return rc;
|
|
}
|
|
EXPORT_SYMBOL_GPL(add_memory);
|
|
|
|
#ifdef CONFIG_MEMORY_HOTPLUG_SUBSECTIONS
|
|
int add_memory_subsection(int nid, u64 start, u64 size)
|
|
{
|
|
struct mhp_params params = { .pgprot = PAGE_KERNEL };
|
|
struct resource *res;
|
|
int ret;
|
|
|
|
if (!IS_ALIGNED(start, SUBSECTION_SIZE) ||
|
|
!IS_ALIGNED(size, SUBSECTION_SIZE)) {
|
|
pr_err("%s: start 0x%llx size 0x%llx not aligned to subsection size\n",
|
|
__func__, start, size);
|
|
return -EINVAL;
|
|
}
|
|
|
|
res = register_memory_resource(start, size, "System RAM");
|
|
if (IS_ERR(res))
|
|
return PTR_ERR(res);
|
|
|
|
mem_hotplug_begin();
|
|
|
|
nid = memory_add_physaddr_to_nid(start);
|
|
|
|
if (IS_ENABLED(CONFIG_ARCH_KEEP_MEMBLOCK))
|
|
memblock_add_node(start, size, nid, MEMBLOCK_NONE);
|
|
|
|
ret = arch_add_memory(nid, start, size, ¶ms);
|
|
if (ret) {
|
|
pr_err("%s failed to add subsection start 0x%llx size 0x%llx\n",
|
|
__func__, start, size);
|
|
goto err_add_memory;
|
|
}
|
|
mem_hotplug_done();
|
|
|
|
return ret;
|
|
|
|
err_add_memory:
|
|
if (IS_ENABLED(CONFIG_ARCH_KEEP_MEMBLOCK))
|
|
memblock_remove(start, size);
|
|
|
|
mem_hotplug_done();
|
|
|
|
release_memory_resource(res);
|
|
return ret;
|
|
}
|
|
EXPORT_SYMBOL(add_memory_subsection);
|
|
#endif /* CONFIG_MEMORY_HOTPLUG_SUBSECTIONS */
|
|
|
|
/*
|
|
* Add special, driver-managed memory to the system as system RAM. Such
|
|
* memory is not exposed via the raw firmware-provided memmap as system
|
|
* RAM, instead, it is detected and added by a driver - during cold boot,
|
|
* after a reboot, and after kexec.
|
|
*
|
|
* Reasons why this memory should not be used for the initial memmap of a
|
|
* kexec kernel or for placing kexec images:
|
|
* - The booting kernel is in charge of determining how this memory will be
|
|
* used (e.g., use persistent memory as system RAM)
|
|
* - Coordination with a hypervisor is required before this memory
|
|
* can be used (e.g., inaccessible parts).
|
|
*
|
|
* For this memory, no entries in /sys/firmware/memmap ("raw firmware-provided
|
|
* memory map") are created. Also, the created memory resource is flagged
|
|
* with IORESOURCE_SYSRAM_DRIVER_MANAGED, so in-kernel users can special-case
|
|
* this memory as well (esp., not place kexec images onto it).
|
|
*
|
|
* The resource_name (visible via /proc/iomem) has to have the format
|
|
* "System RAM ($DRIVER)".
|
|
*/
|
|
int add_memory_driver_managed(int nid, u64 start, u64 size,
|
|
const char *resource_name, mhp_t mhp_flags)
|
|
{
|
|
struct resource *res;
|
|
int rc;
|
|
|
|
if (!resource_name ||
|
|
strstr(resource_name, "System RAM (") != resource_name ||
|
|
resource_name[strlen(resource_name) - 1] != ')')
|
|
return -EINVAL;
|
|
|
|
lock_device_hotplug();
|
|
|
|
res = register_memory_resource(start, size, resource_name);
|
|
if (IS_ERR(res)) {
|
|
rc = PTR_ERR(res);
|
|
goto out_unlock;
|
|
}
|
|
|
|
rc = add_memory_resource(nid, res, mhp_flags);
|
|
if (rc < 0)
|
|
release_memory_resource(res);
|
|
|
|
out_unlock:
|
|
unlock_device_hotplug();
|
|
return rc;
|
|
}
|
|
EXPORT_SYMBOL_GPL(add_memory_driver_managed);
|
|
|
|
/*
|
|
* Platforms should define arch_get_mappable_range() that provides
|
|
* maximum possible addressable physical memory range for which the
|
|
* linear mapping could be created. The platform returned address
|
|
* range must adhere to these following semantics.
|
|
*
|
|
* - range.start <= range.end
|
|
* - Range includes both end points [range.start..range.end]
|
|
*
|
|
* There is also a fallback definition provided here, allowing the
|
|
* entire possible physical address range in case any platform does
|
|
* not define arch_get_mappable_range().
|
|
*/
|
|
struct range __weak arch_get_mappable_range(void)
|
|
{
|
|
struct range mhp_range = {
|
|
.start = 0UL,
|
|
.end = -1ULL,
|
|
};
|
|
return mhp_range;
|
|
}
|
|
|
|
struct range mhp_get_pluggable_range(bool need_mapping)
|
|
{
|
|
const u64 max_phys = (1ULL << MAX_PHYSMEM_BITS) - 1;
|
|
struct range mhp_range;
|
|
|
|
if (need_mapping) {
|
|
mhp_range = arch_get_mappable_range();
|
|
if (mhp_range.start > max_phys) {
|
|
mhp_range.start = 0;
|
|
mhp_range.end = 0;
|
|
}
|
|
mhp_range.end = min_t(u64, mhp_range.end, max_phys);
|
|
} else {
|
|
mhp_range.start = 0;
|
|
mhp_range.end = max_phys;
|
|
}
|
|
return mhp_range;
|
|
}
|
|
EXPORT_SYMBOL_GPL(mhp_get_pluggable_range);
|
|
|
|
bool mhp_range_allowed(u64 start, u64 size, bool need_mapping)
|
|
{
|
|
struct range mhp_range = mhp_get_pluggable_range(need_mapping);
|
|
u64 end = start + size;
|
|
|
|
if (start < end && start >= mhp_range.start && (end - 1) <= mhp_range.end)
|
|
return true;
|
|
|
|
pr_warn("Hotplug memory [%#llx-%#llx] exceeds maximum addressable range [%#llx-%#llx]\n",
|
|
start, end, mhp_range.start, mhp_range.end);
|
|
return false;
|
|
}
|
|
|
|
#ifdef CONFIG_MEMORY_HOTREMOVE
|
|
/*
|
|
* Scan pfn range [start,end) to find movable/migratable pages (LRU pages,
|
|
* non-lru movable pages and hugepages). Will skip over most unmovable
|
|
* pages (esp., pages that can be skipped when offlining), but bail out on
|
|
* definitely unmovable pages.
|
|
*
|
|
* Returns:
|
|
* 0 in case a movable page is found and movable_pfn was updated.
|
|
* -ENOENT in case no movable page was found.
|
|
* -EBUSY in case a definitely unmovable page was found.
|
|
*/
|
|
static int scan_movable_pages(unsigned long start, unsigned long end,
|
|
unsigned long *movable_pfn)
|
|
{
|
|
unsigned long pfn;
|
|
|
|
for (pfn = start; pfn < end; pfn++) {
|
|
struct page *page, *head;
|
|
unsigned long skip;
|
|
|
|
if (!pfn_valid(pfn))
|
|
continue;
|
|
page = pfn_to_page(pfn);
|
|
if (PageLRU(page))
|
|
goto found;
|
|
if (__PageMovable(page))
|
|
goto found;
|
|
|
|
/*
|
|
* PageOffline() pages that are not marked __PageMovable() and
|
|
* have a reference count > 0 (after MEM_GOING_OFFLINE) are
|
|
* definitely unmovable. If their reference count would be 0,
|
|
* they could at least be skipped when offlining memory.
|
|
*/
|
|
if (PageOffline(page) && page_count(page))
|
|
return -EBUSY;
|
|
|
|
if (!PageHuge(page))
|
|
continue;
|
|
head = compound_head(page);
|
|
/*
|
|
* This test is racy as we hold no reference or lock. The
|
|
* hugetlb page could have been free'ed and head is no longer
|
|
* a hugetlb page before the following check. In such unlikely
|
|
* cases false positives and negatives are possible. Calling
|
|
* code must deal with these scenarios.
|
|
*/
|
|
if (HPageMigratable(head))
|
|
goto found;
|
|
skip = compound_nr(head) - (pfn - page_to_pfn(head));
|
|
pfn += skip - 1;
|
|
}
|
|
return -ENOENT;
|
|
found:
|
|
*movable_pfn = pfn;
|
|
return 0;
|
|
}
|
|
|
|
static int
|
|
do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
|
|
{
|
|
unsigned long pfn;
|
|
struct page *page, *head;
|
|
int ret = 0;
|
|
LIST_HEAD(source);
|
|
static DEFINE_RATELIMIT_STATE(migrate_rs, DEFAULT_RATELIMIT_INTERVAL,
|
|
DEFAULT_RATELIMIT_BURST);
|
|
|
|
for (pfn = start_pfn; pfn < end_pfn; pfn++) {
|
|
struct folio *folio;
|
|
|
|
if (!pfn_valid(pfn))
|
|
continue;
|
|
page = pfn_to_page(pfn);
|
|
folio = page_folio(page);
|
|
head = &folio->page;
|
|
|
|
if (PageHuge(page)) {
|
|
pfn = page_to_pfn(head) + compound_nr(head) - 1;
|
|
isolate_hugetlb(head, &source);
|
|
continue;
|
|
} else if (PageTransHuge(page))
|
|
pfn = page_to_pfn(head) + thp_nr_pages(page) - 1;
|
|
|
|
/*
|
|
* HWPoison pages have elevated reference counts so the migration would
|
|
* fail on them. It also doesn't make any sense to migrate them in the
|
|
* first place. Still try to unmap such a page in case it is still mapped
|
|
* (e.g. current hwpoison implementation doesn't unmap KSM pages but keep
|
|
* the unmap as the catch all safety net).
|
|
*/
|
|
if (PageHWPoison(page)) {
|
|
if (WARN_ON(folio_test_lru(folio)))
|
|
folio_isolate_lru(folio);
|
|
if (folio_mapped(folio))
|
|
try_to_unmap(folio, TTU_IGNORE_MLOCK);
|
|
continue;
|
|
}
|
|
|
|
if (!get_page_unless_zero(page))
|
|
continue;
|
|
/*
|
|
* We can skip free pages. And we can deal with pages on
|
|
* LRU and non-lru movable pages.
|
|
*/
|
|
if (PageLRU(page))
|
|
ret = isolate_lru_page(page);
|
|
else
|
|
ret = isolate_movable_page(page, ISOLATE_UNEVICTABLE);
|
|
if (!ret) { /* Success */
|
|
list_add_tail(&page->lru, &source);
|
|
if (!__PageMovable(page))
|
|
inc_node_page_state(page, NR_ISOLATED_ANON +
|
|
page_is_file_lru(page));
|
|
|
|
} else {
|
|
if (__ratelimit(&migrate_rs)) {
|
|
pr_warn("failed to isolate pfn %lx\n", pfn);
|
|
dump_page(page, "isolation failed");
|
|
}
|
|
}
|
|
put_page(page);
|
|
}
|
|
if (!list_empty(&source)) {
|
|
nodemask_t nmask = node_states[N_MEMORY];
|
|
struct migration_target_control mtc = {
|
|
.nmask = &nmask,
|
|
.gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL,
|
|
};
|
|
|
|
/*
|
|
* We have checked that migration range is on a single zone so
|
|
* we can use the nid of the first page to all the others.
|
|
*/
|
|
mtc.nid = page_to_nid(list_first_entry(&source, struct page, lru));
|
|
|
|
/*
|
|
* try to allocate from a different node but reuse this node
|
|
* if there are no other online nodes to be used (e.g. we are
|
|
* offlining a part of the only existing node)
|
|
*/
|
|
node_clear(mtc.nid, nmask);
|
|
if (nodes_empty(nmask))
|
|
node_set(mtc.nid, nmask);
|
|
ret = migrate_pages(&source, alloc_migration_target, NULL,
|
|
(unsigned long)&mtc, MIGRATE_SYNC, MR_MEMORY_HOTPLUG, NULL);
|
|
if (ret) {
|
|
list_for_each_entry(page, &source, lru) {
|
|
if (__ratelimit(&migrate_rs)) {
|
|
pr_warn("migrating pfn %lx failed ret:%d\n",
|
|
page_to_pfn(page), ret);
|
|
dump_page(page, "migration failure");
|
|
}
|
|
}
|
|
putback_movable_pages(&source);
|
|
}
|
|
}
|
|
|
|
return ret;
|
|
}
|
|
|
|
static int __init cmdline_parse_movable_node(char *p)
|
|
{
|
|
movable_node_enabled = true;
|
|
return 0;
|
|
}
|
|
early_param("movable_node", cmdline_parse_movable_node);
|
|
|
|
/* check which state of node_states will be changed when offline memory */
|
|
static void node_states_check_changes_offline(unsigned long nr_pages,
|
|
struct zone *zone, struct memory_notify *arg)
|
|
{
|
|
struct pglist_data *pgdat = zone->zone_pgdat;
|
|
unsigned long present_pages = 0;
|
|
enum zone_type zt;
|
|
|
|
arg->status_change_nid = NUMA_NO_NODE;
|
|
arg->status_change_nid_normal = NUMA_NO_NODE;
|
|
|
|
/*
|
|
* Check whether node_states[N_NORMAL_MEMORY] will be changed.
|
|
* If the memory to be offline is within the range
|
|
* [0..ZONE_NORMAL], and it is the last present memory there,
|
|
* the zones in that range will become empty after the offlining,
|
|
* thus we can determine that we need to clear the node from
|
|
* node_states[N_NORMAL_MEMORY].
|
|
*/
|
|
for (zt = 0; zt <= ZONE_NORMAL; zt++)
|
|
present_pages += pgdat->node_zones[zt].present_pages;
|
|
if (zone_idx(zone) <= ZONE_NORMAL && nr_pages >= present_pages)
|
|
arg->status_change_nid_normal = zone_to_nid(zone);
|
|
|
|
/*
|
|
* We have accounted the pages from [0..ZONE_NORMAL); ZONE_HIGHMEM
|
|
* does not apply as we don't support 32bit.
|
|
* Here we count the possible pages from ZONE_MOVABLE.
|
|
* If after having accounted all the pages, we see that the nr_pages
|
|
* to be offlined is over or equal to the accounted pages,
|
|
* we know that the node will become empty, and so, we can clear
|
|
* it for N_MEMORY as well.
|
|
*/
|
|
present_pages += pgdat->node_zones[ZONE_MOVABLE].present_pages;
|
|
|
|
if (nr_pages >= present_pages)
|
|
arg->status_change_nid = zone_to_nid(zone);
|
|
}
|
|
|
|
static void node_states_clear_node(int node, struct memory_notify *arg)
|
|
{
|
|
if (arg->status_change_nid_normal >= 0)
|
|
node_clear_state(node, N_NORMAL_MEMORY);
|
|
|
|
if (arg->status_change_nid >= 0)
|
|
node_clear_state(node, N_MEMORY);
|
|
}
|
|
|
|
static int count_system_ram_pages_cb(unsigned long start_pfn,
|
|
unsigned long nr_pages, void *data)
|
|
{
|
|
unsigned long *nr_system_ram_pages = data;
|
|
|
|
*nr_system_ram_pages += nr_pages;
|
|
return 0;
|
|
}
|
|
|
|
/*
|
|
* Must be called with mem_hotplug_lock in write mode.
|
|
*/
|
|
int __ref offline_pages(unsigned long start_pfn, unsigned long nr_pages,
|
|
struct zone *zone, struct memory_group *group)
|
|
{
|
|
const unsigned long end_pfn = start_pfn + nr_pages;
|
|
unsigned long pfn, system_ram_pages = 0;
|
|
const int node = zone_to_nid(zone);
|
|
unsigned long flags;
|
|
struct memory_notify arg;
|
|
char *reason;
|
|
int ret;
|
|
|
|
/*
|
|
* {on,off}lining is constrained to full memory sections (or more
|
|
* precisely to memory blocks from the user space POV).
|
|
* memmap_on_memory is an exception because it reserves initial part
|
|
* of the physical memory space for vmemmaps. That space is pageblock
|
|
* aligned.
|
|
*/
|
|
if (WARN_ON_ONCE(!nr_pages || !pageblock_aligned(start_pfn) ||
|
|
!IS_ALIGNED(start_pfn + nr_pages, PAGES_PER_SECTION)))
|
|
return -EINVAL;
|
|
|
|
/*
|
|
* Don't allow to offline memory blocks that contain holes.
|
|
* Consequently, memory blocks with holes can never get onlined
|
|
* via the hotplug path - online_pages() - as hotplugged memory has
|
|
* no holes. This way, we e.g., don't have to worry about marking
|
|
* memory holes PG_reserved, don't need pfn_valid() checks, and can
|
|
* avoid using walk_system_ram_range() later.
|
|
*/
|
|
walk_system_ram_range(start_pfn, nr_pages, &system_ram_pages,
|
|
count_system_ram_pages_cb);
|
|
if (system_ram_pages != nr_pages) {
|
|
ret = -EINVAL;
|
|
reason = "memory holes";
|
|
goto failed_removal;
|
|
}
|
|
|
|
/*
|
|
* We only support offlining of memory blocks managed by a single zone,
|
|
* checked by calling code. This is just a sanity check that we might
|
|
* want to remove in the future.
|
|
*/
|
|
if (WARN_ON_ONCE(page_zone(pfn_to_page(start_pfn)) != zone ||
|
|
page_zone(pfn_to_page(end_pfn - 1)) != zone)) {
|
|
ret = -EINVAL;
|
|
reason = "multizone range";
|
|
goto failed_removal;
|
|
}
|
|
|
|
/*
|
|
* Disable pcplists so that page isolation cannot race with freeing
|
|
* in a way that pages from isolated pageblock are left on pcplists.
|
|
*/
|
|
zone_pcp_disable(zone);
|
|
lru_cache_disable();
|
|
|
|
/* set above range as isolated */
|
|
ret = start_isolate_page_range(start_pfn, end_pfn,
|
|
MIGRATE_MOVABLE,
|
|
MEMORY_OFFLINE | REPORT_FAILURE,
|
|
GFP_USER | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL);
|
|
if (ret) {
|
|
reason = "failure to isolate range";
|
|
goto failed_removal_pcplists_disabled;
|
|
}
|
|
|
|
arg.start_pfn = start_pfn;
|
|
arg.nr_pages = nr_pages;
|
|
node_states_check_changes_offline(nr_pages, zone, &arg);
|
|
|
|
ret = memory_notify(MEM_GOING_OFFLINE, &arg);
|
|
ret = notifier_to_errno(ret);
|
|
if (ret) {
|
|
reason = "notifier failure";
|
|
goto failed_removal_isolated;
|
|
}
|
|
|
|
do {
|
|
pfn = start_pfn;
|
|
do {
|
|
if (signal_pending(current)) {
|
|
ret = -EINTR;
|
|
reason = "signal backoff";
|
|
goto failed_removal_isolated;
|
|
}
|
|
|
|
cond_resched();
|
|
|
|
ret = scan_movable_pages(pfn, end_pfn, &pfn);
|
|
if (!ret) {
|
|
/*
|
|
* TODO: fatal migration failures should bail
|
|
* out
|
|
*/
|
|
do_migrate_range(pfn, end_pfn);
|
|
}
|
|
} while (!ret);
|
|
|
|
if (ret != -ENOENT) {
|
|
reason = "unmovable page";
|
|
goto failed_removal_isolated;
|
|
}
|
|
|
|
/*
|
|
* Dissolve free hugepages in the memory block before doing
|
|
* offlining actually in order to make hugetlbfs's object
|
|
* counting consistent.
|
|
*/
|
|
ret = dissolve_free_huge_pages(start_pfn, end_pfn);
|
|
if (ret) {
|
|
reason = "failure to dissolve huge pages";
|
|
goto failed_removal_isolated;
|
|
}
|
|
|
|
ret = test_pages_isolated(start_pfn, end_pfn, MEMORY_OFFLINE);
|
|
|
|
} while (ret);
|
|
|
|
/* Mark all sections offline and remove free pages from the buddy. */
|
|
__offline_isolated_pages(start_pfn, end_pfn);
|
|
pr_debug("Offlined Pages %ld\n", nr_pages);
|
|
|
|
/*
|
|
* The memory sections are marked offline, and the pageblock flags
|
|
* effectively stale; nobody should be touching them. Fixup the number
|
|
* of isolated pageblocks, memory onlining will properly revert this.
|
|
*/
|
|
spin_lock_irqsave(&zone->lock, flags);
|
|
zone->nr_isolate_pageblock -= nr_pages / pageblock_nr_pages;
|
|
spin_unlock_irqrestore(&zone->lock, flags);
|
|
|
|
lru_cache_enable();
|
|
zone_pcp_enable(zone);
|
|
|
|
/* removal success */
|
|
adjust_managed_page_count(pfn_to_page(start_pfn), -nr_pages);
|
|
adjust_present_page_count(pfn_to_page(start_pfn), group, -nr_pages);
|
|
|
|
/* reinitialise watermarks and update pcp limits */
|
|
init_per_zone_wmark_min();
|
|
|
|
if (!populated_zone(zone)) {
|
|
zone_pcp_reset(zone);
|
|
build_all_zonelists(NULL);
|
|
}
|
|
|
|
node_states_clear_node(node, &arg);
|
|
if (arg.status_change_nid >= 0) {
|
|
kcompactd_stop(node);
|
|
kswapd_stop(node);
|
|
}
|
|
|
|
writeback_set_ratelimit();
|
|
|
|
memory_notify(MEM_OFFLINE, &arg);
|
|
remove_pfn_range_from_zone(zone, start_pfn, nr_pages);
|
|
return 0;
|
|
|
|
failed_removal_isolated:
|
|
/* pushback to free area */
|
|
undo_isolate_page_range(start_pfn, end_pfn, MIGRATE_MOVABLE);
|
|
memory_notify(MEM_CANCEL_OFFLINE, &arg);
|
|
failed_removal_pcplists_disabled:
|
|
lru_cache_enable();
|
|
zone_pcp_enable(zone);
|
|
failed_removal:
|
|
pr_debug("memory offlining [mem %#010llx-%#010llx] failed due to %s\n",
|
|
(unsigned long long) start_pfn << PAGE_SHIFT,
|
|
((unsigned long long) end_pfn << PAGE_SHIFT) - 1,
|
|
reason);
|
|
return ret;
|
|
}
|
|
|
|
static int check_memblock_offlined_cb(struct memory_block *mem, void *arg)
|
|
{
|
|
int *nid = arg;
|
|
|
|
*nid = mem->nid;
|
|
if (unlikely(mem->state != MEM_OFFLINE)) {
|
|
phys_addr_t beginpa, endpa;
|
|
|
|
beginpa = PFN_PHYS(section_nr_to_pfn(mem->start_section_nr));
|
|
endpa = beginpa + memory_block_size_bytes() - 1;
|
|
pr_warn("removing memory fails, because memory [%pa-%pa] is onlined\n",
|
|
&beginpa, &endpa);
|
|
|
|
return -EBUSY;
|
|
}
|
|
return 0;
|
|
}
|
|
|
|
static int get_nr_vmemmap_pages_cb(struct memory_block *mem, void *arg)
|
|
{
|
|
/*
|
|
* If not set, continue with the next block.
|
|
*/
|
|
return mem->nr_vmemmap_pages;
|
|
}
|
|
|
|
static int check_cpu_on_node(int nid)
|
|
{
|
|
int cpu;
|
|
|
|
for_each_present_cpu(cpu) {
|
|
if (cpu_to_node(cpu) == nid)
|
|
/*
|
|
* the cpu on this node isn't removed, and we can't
|
|
* offline this node.
|
|
*/
|
|
return -EBUSY;
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int check_no_memblock_for_node_cb(struct memory_block *mem, void *arg)
|
|
{
|
|
int nid = *(int *)arg;
|
|
|
|
/*
|
|
* If a memory block belongs to multiple nodes, the stored nid is not
|
|
* reliable. However, such blocks are always online (e.g., cannot get
|
|
* offlined) and, therefore, are still spanned by the node.
|
|
*/
|
|
return mem->nid == nid ? -EEXIST : 0;
|
|
}
|
|
|
|
/**
|
|
* try_offline_node
|
|
* @nid: the node ID
|
|
*
|
|
* Offline a node if all memory sections and cpus of the node are removed.
|
|
*
|
|
* NOTE: The caller must call lock_device_hotplug() to serialize hotplug
|
|
* and online/offline operations before this call.
|
|
*/
|
|
void try_offline_node(int nid)
|
|
{
|
|
int rc;
|
|
|
|
/*
|
|
* If the node still spans pages (especially ZONE_DEVICE), don't
|
|
* offline it. A node spans memory after move_pfn_range_to_zone(),
|
|
* e.g., after the memory block was onlined.
|
|
*/
|
|
if (node_spanned_pages(nid))
|
|
return;
|
|
|
|
/*
|
|
* Especially offline memory blocks might not be spanned by the
|
|
* node. They will get spanned by the node once they get onlined.
|
|
* However, they link to the node in sysfs and can get onlined later.
|
|
*/
|
|
rc = for_each_memory_block(&nid, check_no_memblock_for_node_cb);
|
|
if (rc)
|
|
return;
|
|
|
|
if (check_cpu_on_node(nid))
|
|
return;
|
|
|
|
/*
|
|
* all memory/cpu of this node are removed, we can offline this
|
|
* node now.
|
|
*/
|
|
node_set_offline(nid);
|
|
unregister_one_node(nid);
|
|
}
|
|
EXPORT_SYMBOL(try_offline_node);
|
|
|
|
static int __ref try_remove_memory(u64 start, u64 size)
|
|
{
|
|
struct vmem_altmap mhp_altmap = {};
|
|
struct vmem_altmap *altmap = NULL;
|
|
unsigned long nr_vmemmap_pages;
|
|
int rc = 0, nid = NUMA_NO_NODE;
|
|
|
|
BUG_ON(check_hotplug_memory_range(start, size));
|
|
|
|
/*
|
|
* All memory blocks must be offlined before removing memory. Check
|
|
* whether all memory blocks in question are offline and return error
|
|
* if this is not the case.
|
|
*
|
|
* While at it, determine the nid. Note that if we'd have mixed nodes,
|
|
* we'd only try to offline the last determined one -- which is good
|
|
* enough for the cases we care about.
|
|
*/
|
|
rc = walk_memory_blocks(start, size, &nid, check_memblock_offlined_cb);
|
|
if (rc)
|
|
return rc;
|
|
|
|
/*
|
|
* We only support removing memory added with MHP_MEMMAP_ON_MEMORY in
|
|
* the same granularity it was added - a single memory block.
|
|
*/
|
|
if (mhp_memmap_on_memory()) {
|
|
nr_vmemmap_pages = walk_memory_blocks(start, size, NULL,
|
|
get_nr_vmemmap_pages_cb);
|
|
if (nr_vmemmap_pages) {
|
|
if (size != memory_block_size_bytes()) {
|
|
pr_warn("Refuse to remove %#llx - %#llx,"
|
|
"wrong granularity\n",
|
|
start, start + size);
|
|
return -EINVAL;
|
|
}
|
|
|
|
/*
|
|
* Let remove_pmd_table->free_hugepage_table do the
|
|
* right thing if we used vmem_altmap when hot-adding
|
|
* the range.
|
|
*/
|
|
mhp_altmap.alloc = nr_vmemmap_pages;
|
|
altmap = &mhp_altmap;
|
|
}
|
|
}
|
|
|
|
/* remove memmap entry */
|
|
firmware_map_remove(start, start + size, "System RAM");
|
|
|
|
/*
|
|
* Memory block device removal under the device_hotplug_lock is
|
|
* a barrier against racing online attempts.
|
|
*/
|
|
remove_memory_block_devices(start, size);
|
|
|
|
mem_hotplug_begin();
|
|
|
|
arch_remove_memory(start, size, altmap);
|
|
|
|
if (IS_ENABLED(CONFIG_ARCH_KEEP_MEMBLOCK)) {
|
|
memblock_phys_free(start, size);
|
|
memblock_remove(start, size);
|
|
}
|
|
|
|
release_mem_region_adjustable(start, size);
|
|
|
|
if (nid != NUMA_NO_NODE)
|
|
try_offline_node(nid);
|
|
|
|
mem_hotplug_done();
|
|
return 0;
|
|
}
|
|
|
|
/**
|
|
* __remove_memory - Remove memory if every memory block is offline
|
|
* @start: physical address of the region to remove
|
|
* @size: size of the region to remove
|
|
*
|
|
* NOTE: The caller must call lock_device_hotplug() to serialize hotplug
|
|
* and online/offline operations before this call, as required by
|
|
* try_offline_node().
|
|
*/
|
|
void __remove_memory(u64 start, u64 size)
|
|
{
|
|
|
|
/*
|
|
* trigger BUG() if some memory is not offlined prior to calling this
|
|
* function
|
|
*/
|
|
if (try_remove_memory(start, size))
|
|
BUG();
|
|
}
|
|
|
|
/*
|
|
* Remove memory if every memory block is offline, otherwise return -EBUSY is
|
|
* some memory is not offline
|
|
*/
|
|
int remove_memory(u64 start, u64 size)
|
|
{
|
|
int rc;
|
|
|
|
lock_device_hotplug();
|
|
rc = try_remove_memory(start, size);
|
|
unlock_device_hotplug();
|
|
|
|
return rc;
|
|
}
|
|
EXPORT_SYMBOL_GPL(remove_memory);
|
|
|
|
#ifdef CONFIG_MEMORY_HOTPLUG_SUBSECTIONS
|
|
int remove_memory_subsection(u64 start, u64 size)
|
|
{
|
|
if (!IS_ALIGNED(start, SUBSECTION_SIZE) ||
|
|
!IS_ALIGNED(size, SUBSECTION_SIZE)) {
|
|
pr_err("%s: start 0x%llx size 0x%llx not aligned to subsection size\n",
|
|
__func__, start, size);
|
|
return -EINVAL;
|
|
}
|
|
|
|
mem_hotplug_begin();
|
|
arch_remove_memory(start, size, NULL);
|
|
|
|
if (IS_ENABLED(CONFIG_ARCH_KEEP_MEMBLOCK))
|
|
memblock_remove(start, size);
|
|
|
|
release_mem_region_adjustable(start, size);
|
|
|
|
mem_hotplug_done();
|
|
|
|
return 0;
|
|
}
|
|
EXPORT_SYMBOL(remove_memory_subsection);
|
|
#endif /*CONFIG_MEMORY_HOTPLUG_SUBSECTIONS */
|
|
|
|
static int try_offline_memory_block(struct memory_block *mem, void *arg)
|
|
{
|
|
uint8_t online_type = MMOP_ONLINE_KERNEL;
|
|
uint8_t **online_types = arg;
|
|
struct page *page;
|
|
int rc;
|
|
|
|
/*
|
|
* Sense the online_type via the zone of the memory block. Offlining
|
|
* with multiple zones within one memory block will be rejected
|
|
* by offlining code ... so we don't care about that.
|
|
*/
|
|
page = pfn_to_online_page(section_nr_to_pfn(mem->start_section_nr));
|
|
if (page && zone_idx(page_zone(page)) == ZONE_MOVABLE)
|
|
online_type = MMOP_ONLINE_MOVABLE;
|
|
|
|
rc = device_offline(&mem->dev);
|
|
/*
|
|
* Default is MMOP_OFFLINE - change it only if offlining succeeded,
|
|
* so try_reonline_memory_block() can do the right thing.
|
|
*/
|
|
if (!rc)
|
|
**online_types = online_type;
|
|
|
|
(*online_types)++;
|
|
/* Ignore if already offline. */
|
|
return rc < 0 ? rc : 0;
|
|
}
|
|
|
|
static int try_reonline_memory_block(struct memory_block *mem, void *arg)
|
|
{
|
|
uint8_t **online_types = arg;
|
|
int rc;
|
|
|
|
if (**online_types != MMOP_OFFLINE) {
|
|
mem->online_type = **online_types;
|
|
rc = device_online(&mem->dev);
|
|
if (rc < 0)
|
|
pr_warn("%s: Failed to re-online memory: %d",
|
|
__func__, rc);
|
|
}
|
|
|
|
/* Continue processing all remaining memory blocks. */
|
|
(*online_types)++;
|
|
return 0;
|
|
}
|
|
|
|
/*
|
|
* Try to offline and remove memory. Might take a long time to finish in case
|
|
* memory is still in use. Primarily useful for memory devices that logically
|
|
* unplugged all memory (so it's no longer in use) and want to offline + remove
|
|
* that memory.
|
|
*/
|
|
int offline_and_remove_memory(u64 start, u64 size)
|
|
{
|
|
const unsigned long mb_count = size / memory_block_size_bytes();
|
|
uint8_t *online_types, *tmp;
|
|
int rc;
|
|
|
|
if (!IS_ALIGNED(start, memory_block_size_bytes()) ||
|
|
!IS_ALIGNED(size, memory_block_size_bytes()) || !size)
|
|
return -EINVAL;
|
|
|
|
/*
|
|
* We'll remember the old online type of each memory block, so we can
|
|
* try to revert whatever we did when offlining one memory block fails
|
|
* after offlining some others succeeded.
|
|
*/
|
|
online_types = kmalloc_array(mb_count, sizeof(*online_types),
|
|
GFP_KERNEL);
|
|
if (!online_types)
|
|
return -ENOMEM;
|
|
/*
|
|
* Initialize all states to MMOP_OFFLINE, so when we abort processing in
|
|
* try_offline_memory_block(), we'll skip all unprocessed blocks in
|
|
* try_reonline_memory_block().
|
|
*/
|
|
memset(online_types, MMOP_OFFLINE, mb_count);
|
|
|
|
lock_device_hotplug();
|
|
|
|
tmp = online_types;
|
|
rc = walk_memory_blocks(start, size, &tmp, try_offline_memory_block);
|
|
|
|
/*
|
|
* In case we succeeded to offline all memory, remove it.
|
|
* This cannot fail as it cannot get onlined in the meantime.
|
|
*/
|
|
if (!rc) {
|
|
rc = try_remove_memory(start, size);
|
|
if (rc)
|
|
pr_err("%s: Failed to remove memory: %d", __func__, rc);
|
|
}
|
|
|
|
/*
|
|
* Rollback what we did. While memory onlining might theoretically fail
|
|
* (nacked by a notifier), it barely ever happens.
|
|
*/
|
|
if (rc) {
|
|
tmp = online_types;
|
|
walk_memory_blocks(start, size, &tmp,
|
|
try_reonline_memory_block);
|
|
}
|
|
unlock_device_hotplug();
|
|
|
|
kfree(online_types);
|
|
return rc;
|
|
}
|
|
EXPORT_SYMBOL_GPL(offline_and_remove_memory);
|
|
#endif /* CONFIG_MEMORY_HOTREMOVE */
|