android_kernel_xiaomi_sm8450/kernel/smp.c
Greg Kroah-Hartman 7d3ca1ed3f This is the 5.10.226 stable release
-----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAmbiry8ACgkQONu9yGCS
 aT6N3g//bWYo3l5q543ygloK7UBTkuyJFWb0ENBbu0J9qlnYaSeKDvqjmMUPBHKi
 ZCAzL5nHmAfPMSbjRnltsl54Z7X69264BNLv62M86VnGVDrD7Y93Sn+Ts9jJoTYR
 k97HrSojKsqrC2MZLaLuDnoeReoHpeAn4rwIs8mAaApxv48NfRh65KRkipkkRi0N
 S7UXC82a8NyjF9wQaB2+Cdt2S0SD2706074X/0jXBAM3YR+5lF3NFgXylmUULTWi
 kmCCauGhvfsR9vGIXBAYfX/thF3FSuffJjrWQ3i3/v75PyfoLZ4CruRGXIKM5UBN
 TDEqx1Fx+fDXjgH07DYjFLBxQnv9wTgAtflXJj4qfaULO3NonBZHW3xIBe8foO2b
 6858JdPcSA2LJ1wUxTc8BuYzgiwz5aCbGa0cLJCyJKYhJXGToweFyDM1nS2V66MD
 TF43J/8zv9OAbj6TIT8WisfCgDMIIeMg/RsoaduGZViEN2Sg46XHN4ciZ7eakJOq
 j3JFaAan+WPDlYpBLv1tCz+e6IDexugnbP43+E+eY8Xl6UDKUaXd3NiT728W84ll
 0KULycqOteiFy7KN6NJx0oLA3YarQciatRm99zA8pnBBvqy0yJXYxWxmmaSQiGo+
 VvNTrz6uc+ISP9TJfuPm8KH7NwQVhrjsndXaW2HWgoQ+fWgSZjU=
 =abQi
 -----END PGP SIGNATURE-----

Merge 5.10.226 into android12-5.10-lts

Changes in 5.10.226
	drm: panel-orientation-quirks: Add quirk for OrangePi Neo
	ALSA: hda/generic: Add a helper to mute speakers at suspend/shutdown
	ALSA: hda/conexant: Mute speakers at suspend / shutdown
	i2c: Fix conditional for substituting empty ACPI functions
	dma-debug: avoid deadlock between dma debug vs printk and netconsole
	net: usb: qmi_wwan: add MeiG Smart SRM825L
	drm/amdgpu: Fix uninitialized variable warning in amdgpu_afmt_acr
	drm/amdgpu: fix overflowed array index read warning
	drm/amd/pm: fix uninitialized variable warning for smu8_hwmgr
	drm/amd/pm: fix warning using uninitialized value of max_vid_step
	drm/amd/pm: fix the Out-of-bounds read warning
	drm/amdgpu: fix uninitialized scalar variable warning
	drm/amd/pm: fix uninitialized variable warnings for vega10_hwmgr
	drm/amdgpu: avoid reading vf2pf info size from FB
	drm/amd/display: Check gpio_id before used as array index
	drm/amd/display: Stop amdgpu_dm initialize when stream nums greater than 6
	drm/amd/display: Add array index check for hdcp ddc access
	drm/amd/display: Check num_valid_sets before accessing reader_wm_sets[]
	drm/amd/display: Check msg_id before processing transcation
	drm/amd/display: Fix Coverity INTEGER_OVERFLOW within dal_gpio_service_create
	drm/amdgpu/pm: Fix uninitialized variable agc_btc_response
	drm/amdgpu: Fix out-of-bounds write warning
	drm/amdgpu: Fix out-of-bounds read of df_v1_7_channel_number
	drm/amdgpu: fix ucode out-of-bounds read warning
	drm/amdgpu: fix mc_data out-of-bounds read warning
	drm/amdkfd: Reconcile the definition and use of oem_id in struct kfd_topology_device
	apparmor: fix possible NULL pointer dereference
	drm/amdgpu/pm: Check input value for CUSTOM profile mode setting on legacy SOCs
	drm/amdgpu: the warning dereferencing obj for nbio_v7_4
	drm/amd/pm: check negtive return for table entries
	wifi: iwlwifi: remove fw_running op
	PCI: al: Check IORESOURCE_BUS existence during probe
	hwspinlock: Introduce hwspin_lock_bust()
	ionic: fix potential irq name truncation
	usbip: Don't submit special requests twice
	usb: typec: ucsi: Fix null pointer dereference in trace
	fsnotify: clear PARENT_WATCHED flags lazily
	smack: tcp: ipv4, fix incorrect labeling
	drm/meson: plane: Add error handling
	wifi: cfg80211: make hash table duplicates more survivable
	block: remove the blk_flush_integrity call in blk_integrity_unregister
	drm/amd/display: Skip wbscl_set_scaler_filter if filter is null
	media: uvcvideo: Enforce alignment of frame and interval
	block: initialize integrity buffer to zero before writing it to media
	drm/amd/pm: Fix the null pointer dereference for vega10_hwmgr
	bpf, cgroups: Fix cgroup v2 fallback on v1/v2 mixed mode
	net: set SOCK_RCU_FREE before inserting socket into hashtable
	virtio_net: Fix napi_skb_cache_put warning
	rcu-tasks: Fix show_rcu_tasks_trace_gp_kthread buffer overflow
	udf: Limit file size to 4TB
	ext4: handle redirtying in ext4_bio_write_page()
	i2c: Use IS_REACHABLE() for substituting empty ACPI functions
	bpf, cgroup: Assign cgroup in cgroup_sk_alloc when called from interrupt
	sch/netem: fix use after free in netem_dequeue
	ASoC: dapm: Fix UAF for snd_soc_pcm_runtime object
	ALSA: hda/conexant: Add pincfg quirk to enable top speakers on Sirius devices
	ALSA: hda/realtek: add patch for internal mic in Lenovo V145
	ALSA: hda/realtek: Support mute LED on HP Laptop 14-dq2xxx
	ata: libata: Fix memory leak for error path in ata_host_alloc()
	irqchip/gic-v2m: Fix refcount leak in gicv2m_of_init()
	Revert "Bluetooth: MGMT/SMP: Fix address type when using SMP over BREDR/LE"
	Bluetooth: MGMT: Ignore keys being loaded with invalid type
	mmc: dw_mmc: Fix IDMAC operation with pages bigger than 4K
	mmc: sdhci-of-aspeed: fix module autoloading
	fuse: update stats for pages in dropped aux writeback list
	fuse: use unsigned type for getxattr/listxattr size truncation
	clk: qcom: clk-alpha-pll: Fix the pll post div mask
	clk: qcom: clk-alpha-pll: Fix the trion pll postdiv set rate API
	can: mcp251x: fix deadlock if an interrupt occurs during mcp251x_open
	tracing: Avoid possible softlockup in tracing_iter_reset()
	ila: call nf_unregister_net_hooks() sooner
	sched: sch_cake: fix bulk flow accounting logic for host fairness
	nilfs2: fix missing cleanup on rollforward recovery error
	nilfs2: fix state management in error path of log writing function
	btrfs: fix use-after-free after failure to create a snapshot
	mptcp: pr_debug: add missing \n at the end
	mptcp: pm: avoid possible UaF when selecting endp
	nfsd: move reply cache initialization into nfsd startup
	nfsd: move init of percpu reply_cache_stats counters back to nfsd_init_net
	NFSD: Refactor nfsd_reply_cache_free_locked()
	NFSD: Rename nfsd_reply_cache_alloc()
	NFSD: Replace nfsd_prune_bucket()
	NFSD: Refactor the duplicate reply cache shrinker
	NFSD: simplify error paths in nfsd_svc()
	NFSD: Rewrite synopsis of nfsd_percpu_counters_init()
	NFSD: Fix frame size warning in svc_export_parse()
	sunrpc: don't change ->sv_stats if it doesn't exist
	nfsd: stop setting ->pg_stats for unused stats
	sunrpc: pass in the sv_stats struct through svc_create_pooled
	sunrpc: remove ->pg_stats from svc_program
	sunrpc: use the struct net as the svc proc private
	nfsd: rename NFSD_NET_* to NFSD_STATS_*
	nfsd: expose /proc/net/sunrpc/nfsd in net namespaces
	nfsd: make all of the nfsd stats per-network namespace
	nfsd: remove nfsd_stats, make th_cnt a global counter
	nfsd: make svc_stat per-network namespace instead of global
	ALSA: hda: Add input value sanity checks to HDMI channel map controls
	smack: unix sockets: fix accept()ed socket label
	irqchip/armada-370-xp: Do not allow mapping IRQ 0 and 1
	af_unix: Remove put_pid()/put_cred() in copy_peercred().
	iommu: sun50i: clear bypass register
	netfilter: nf_conncount: fix wrong variable type
	udf: Avoid excessive partition lengths
	media: vivid: fix wrong sizeimage value for mplane
	leds: spi-byte: Call of_node_put() on error path
	wifi: brcmsmac: advertise MFP_CAPABLE to enable WPA3
	usb: uas: set host status byte on data completion error
	media: vivid: don't set HDMI TX controls if there are no HDMI outputs
	PCI: keystone: Add workaround for Errata #i2037 (AM65x SR 1.0)
	media: qcom: camss: Add check for v4l2_fwnode_endpoint_parse
	pcmcia: Use resource_size function on resource object
	can: bcm: Remove proc entry when dev is unregistered.
	igb: Fix not clearing TimeSync interrupts for 82580
	svcrdma: Catch another Reply chunk overflow case
	platform/x86: dell-smbios: Fix error path in dell_smbios_init()
	tcp_bpf: fix return value of tcp_bpf_sendmsg()
	igc: Unlock on error in igc_io_resume()
	drivers/net/usb: Remove all strcpy() uses
	net: usb: don't write directly to netdev->dev_addr
	usbnet: modern method to get random MAC
	bareudp: Fix device stats updates.
	fou: remove sparse errors
	gro: remove rcu_read_lock/rcu_read_unlock from gro_receive handlers
	gro: remove rcu_read_lock/rcu_read_unlock from gro_complete handlers
	fou: Fix null-ptr-deref in GRO.
	net: bridge: br_fdb_external_learn_add(): always set EXT_LEARN
	net: dsa: vsc73xx: fix possible subblocks range of CAPT block
	ASoC: topology: Properly initialize soc_enum values
	dm init: Handle minors larger than 255
	iommu/vt-d: Handle volatile descriptor status read
	cgroup: Protect css->cgroup write under css_set_lock
	um: line: always fill *error_out in setup_one_line()
	devres: Initialize an uninitialized struct member
	pci/hotplug/pnv_php: Fix hotplug driver crash on Powernv
	hwmon: (adc128d818) Fix underflows seen when writing limit attributes
	hwmon: (lm95234) Fix underflows seen when writing limit attributes
	hwmon: (nct6775-core) Fix underflows seen when writing limit attributes
	hwmon: (w83627ehf) Fix underflows seen when writing limit attributes
	libbpf: Add NULL checks to bpf_object__{prev_map,next_map}
	wifi: mwifiex: Do not return unused priv in mwifiex_get_priv_by_id()
	smp: Add missing destroy_work_on_stack() call in smp_call_on_cpu()
	btrfs: replace BUG_ON with ASSERT in walk_down_proc()
	btrfs: clean up our handling of refs == 0 in snapshot delete
	PCI: Add missing bridge lock to pci_bus_lock()
	net: dpaa: avoid on-stack arrays of NR_CPUS elements
	kselftests: dmabuf-heaps: Ensure the driver name is null-terminated
	btrfs: initialize location to fix -Wmaybe-uninitialized in btrfs_lookup_dentry()
	s390/vmlinux.lds.S: Move ro_after_init section behind rodata section
	HID: cougar: fix slab-out-of-bounds Read in cougar_report_fixup
	Input: uinput - reject requests with unreasonable number of slots
	usbnet: ipheth: race between ipheth_close and error handling
	Squashfs: sanity check symbolic link size
	of/irq: Prevent device address out-of-bounds read in interrupt map walk
	lib/generic-radix-tree.c: Fix rare race in __genradix_ptr_alloc()
	MIPS: cevt-r4k: Don't call get_c0_compare_int if timer irq is installed
	ata: pata_macio: Use WARN instead of BUG
	NFSv4: Add missing rescheduling points in nfs_client_return_marked_delegations
	staging: iio: frequency: ad9834: Validate frequency parameter value
	iio: buffer-dmaengine: fix releasing dma channel on error
	iio: fix scale application in iio_convert_raw_to_processed_unlocked
	iio: adc: ad7124: fix chip ID mismatch
	binder: fix UAF caused by offsets overwrite
	nvmem: Fix return type of devm_nvmem_device_get() in kerneldoc
	uio_hv_generic: Fix kernel NULL pointer dereference in hv_uio_rescind
	Drivers: hv: vmbus: Fix rescind handling in uio_hv_generic
	VMCI: Fix use-after-free when removing resource in vmci_resource_remove()
	clocksource/drivers/imx-tpm: Fix return -ETIME when delta exceeds INT_MAX
	clocksource/drivers/imx-tpm: Fix next event not taking effect sometime
	clocksource/drivers/timer-of: Remove percpu irq related code
	uprobes: Use kzalloc to allocate xol area
	perf/aux: Fix AUX buffer serialization
	nilfs2: replace snprintf in show functions with sysfs_emit
	nilfs2: protect references to superblock parameters exposed in sysfs
	ACPI: processor: Return an error if acpi_processor_get_info() fails in processor_add()
	ACPI: processor: Fix memory leaks in error paths of processor_add()
	arm64: acpi: Move get_cpu_for_acpi_id() to a header
	arm64: acpi: Harden get_cpu_for_acpi_id() against missing CPU entry
	nvmet-tcp: fix kernel crash if commands allocation fails
	drm/i915/fence: Mark debug_fence_init_onstack() with __maybe_unused
	drm/i915/fence: Mark debug_fence_free() with __maybe_unused
	mmc: cqhci: Fix checking of CQHCI_HALT state
	rtmutex: Drop rt_mutex::wait_lock before scheduling
	x86/mm: Fix PTI for i386 some more
	net, sunrpc: Remap EPERM in case of connection failure in xs_tcp_setup_socket
	memcg: protect concurrent access to mem_cgroup_idr
	Linux 5.10.226

Change-Id: I3c0afd32ba78775f67cde6d73b4dbf931bbc4770
Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
2024-09-14 20:53:50 +00:00

1042 lines
27 KiB
C

// SPDX-License-Identifier: GPL-2.0-only
/*
* Generic helpers for smp ipi calls
*
* (C) Jens Axboe <jens.axboe@oracle.com> 2008
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/irq_work.h>
#include <linux/rcupdate.h>
#include <linux/rculist.h>
#include <linux/kernel.h>
#include <linux/export.h>
#include <linux/percpu.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/gfp.h>
#include <linux/smp.h>
#include <linux/cpu.h>
#include <linux/sched.h>
#include <linux/sched/idle.h>
#include <linux/hypervisor.h>
#include <linux/sched/clock.h>
#include <linux/nmi.h>
#include <linux/sched/debug.h>
#include <linux/suspend.h>
#include "smpboot.h"
#include "sched/smp.h"
#define CSD_TYPE(_csd) ((_csd)->flags & CSD_FLAG_TYPE_MASK)
struct call_function_data {
call_single_data_t __percpu *csd;
cpumask_var_t cpumask;
cpumask_var_t cpumask_ipi;
};
static DEFINE_PER_CPU_ALIGNED(struct call_function_data, cfd_data);
static DEFINE_PER_CPU_SHARED_ALIGNED(struct llist_head, call_single_queue);
static void flush_smp_call_function_queue(bool warn_cpu_offline);
int smpcfd_prepare_cpu(unsigned int cpu)
{
struct call_function_data *cfd = &per_cpu(cfd_data, cpu);
if (!zalloc_cpumask_var_node(&cfd->cpumask, GFP_KERNEL,
cpu_to_node(cpu)))
return -ENOMEM;
if (!zalloc_cpumask_var_node(&cfd->cpumask_ipi, GFP_KERNEL,
cpu_to_node(cpu))) {
free_cpumask_var(cfd->cpumask);
return -ENOMEM;
}
cfd->csd = alloc_percpu(call_single_data_t);
if (!cfd->csd) {
free_cpumask_var(cfd->cpumask);
free_cpumask_var(cfd->cpumask_ipi);
return -ENOMEM;
}
return 0;
}
int smpcfd_dead_cpu(unsigned int cpu)
{
struct call_function_data *cfd = &per_cpu(cfd_data, cpu);
free_cpumask_var(cfd->cpumask);
free_cpumask_var(cfd->cpumask_ipi);
free_percpu(cfd->csd);
return 0;
}
int smpcfd_dying_cpu(unsigned int cpu)
{
/*
* The IPIs for the smp-call-function callbacks queued by other
* CPUs might arrive late, either due to hardware latencies or
* because this CPU disabled interrupts (inside stop-machine)
* before the IPIs were sent. So flush out any pending callbacks
* explicitly (without waiting for the IPIs to arrive), to
* ensure that the outgoing CPU doesn't go offline with work
* still pending.
*/
flush_smp_call_function_queue(false);
irq_work_run();
return 0;
}
void __init call_function_init(void)
{
int i;
for_each_possible_cpu(i)
init_llist_head(&per_cpu(call_single_queue, i));
smpcfd_prepare_cpu(smp_processor_id());
}
#ifdef CONFIG_CSD_LOCK_WAIT_DEBUG
static DEFINE_PER_CPU(call_single_data_t *, cur_csd);
static DEFINE_PER_CPU(smp_call_func_t, cur_csd_func);
static DEFINE_PER_CPU(void *, cur_csd_info);
#define CSD_LOCK_TIMEOUT (5ULL * NSEC_PER_SEC)
static atomic_t csd_bug_count = ATOMIC_INIT(0);
/* Record current CSD work for current CPU, NULL to erase. */
static void csd_lock_record(struct __call_single_data *csd)
{
if (!csd) {
smp_mb(); /* NULL cur_csd after unlock. */
__this_cpu_write(cur_csd, NULL);
return;
}
__this_cpu_write(cur_csd_func, csd->func);
__this_cpu_write(cur_csd_info, csd->info);
smp_wmb(); /* func and info before csd. */
__this_cpu_write(cur_csd, csd);
smp_mb(); /* Update cur_csd before function call. */
/* Or before unlock, as the case may be. */
}
static __always_inline int csd_lock_wait_getcpu(struct __call_single_data *csd)
{
unsigned int csd_type;
csd_type = CSD_TYPE(csd);
if (csd_type == CSD_TYPE_ASYNC || csd_type == CSD_TYPE_SYNC)
return csd->dst; /* Other CSD_TYPE_ values might not have ->dst. */
return -1;
}
/*
* Complain if too much time spent waiting. Note that only
* the CSD_TYPE_SYNC/ASYNC types provide the destination CPU,
* so waiting on other types gets much less information.
*/
static __always_inline bool csd_lock_wait_toolong(struct __call_single_data *csd, u64 ts0, u64 *ts1, int *bug_id)
{
int cpu = -1;
int cpux;
bool firsttime;
u64 ts2, ts_delta;
call_single_data_t *cpu_cur_csd;
unsigned int flags = READ_ONCE(csd->flags);
if (!(flags & CSD_FLAG_LOCK)) {
if (!unlikely(*bug_id))
return true;
cpu = csd_lock_wait_getcpu(csd);
pr_alert("csd: CSD lock (#%d) got unstuck on CPU#%02d, CPU#%02d released the lock.\n",
*bug_id, raw_smp_processor_id(), cpu);
return true;
}
ts2 = sched_clock();
ts_delta = ts2 - *ts1;
if (likely(ts_delta <= CSD_LOCK_TIMEOUT))
return false;
firsttime = !*bug_id;
if (firsttime)
*bug_id = atomic_inc_return(&csd_bug_count);
cpu = csd_lock_wait_getcpu(csd);
if (WARN_ONCE(cpu < 0 || cpu >= nr_cpu_ids, "%s: cpu = %d\n", __func__, cpu))
cpux = 0;
else
cpux = cpu;
cpu_cur_csd = smp_load_acquire(&per_cpu(cur_csd, cpux)); /* Before func and info. */
pr_alert("csd: %s non-responsive CSD lock (#%d) on CPU#%d, waiting %llu ns for CPU#%02d %pS(%ps).\n",
firsttime ? "Detected" : "Continued", *bug_id, raw_smp_processor_id(), ts2 - ts0,
cpu, csd->func, csd->info);
if (cpu_cur_csd && csd != cpu_cur_csd) {
pr_alert("\tcsd: CSD lock (#%d) handling prior %pS(%ps) request.\n",
*bug_id, READ_ONCE(per_cpu(cur_csd_func, cpux)),
READ_ONCE(per_cpu(cur_csd_info, cpux)));
} else {
pr_alert("\tcsd: CSD lock (#%d) %s.\n",
*bug_id, !cpu_cur_csd ? "unresponsive" : "handling this request");
}
if (cpu >= 0) {
if (!trigger_single_cpu_backtrace(cpu))
dump_cpu_task(cpu);
if (!cpu_cur_csd) {
pr_alert("csd: Re-sending CSD lock (#%d) IPI from CPU#%02d to CPU#%02d\n", *bug_id, raw_smp_processor_id(), cpu);
arch_send_call_function_single_ipi(cpu);
}
}
dump_stack();
*ts1 = ts2;
return false;
}
/*
* csd_lock/csd_unlock used to serialize access to per-cpu csd resources
*
* For non-synchronous ipi calls the csd can still be in use by the
* previous function call. For multi-cpu calls its even more interesting
* as we'll have to ensure no other cpu is observing our csd.
*/
static __always_inline void csd_lock_wait(struct __call_single_data *csd)
{
int bug_id = 0;
u64 ts0, ts1;
ts1 = ts0 = sched_clock();
for (;;) {
if (csd_lock_wait_toolong(csd, ts0, &ts1, &bug_id))
break;
cpu_relax();
}
smp_acquire__after_ctrl_dep();
}
#else
static void csd_lock_record(struct __call_single_data *csd)
{
}
static __always_inline void csd_lock_wait(struct __call_single_data *csd)
{
smp_cond_load_acquire(&csd->flags, !(VAL & CSD_FLAG_LOCK));
}
#endif
static __always_inline void csd_lock(struct __call_single_data *csd)
{
csd_lock_wait(csd);
csd->flags |= CSD_FLAG_LOCK;
/*
* prevent CPU from reordering the above assignment
* to ->flags with any subsequent assignments to other
* fields of the specified call_single_data_t structure:
*/
smp_wmb();
}
static __always_inline void csd_unlock(struct __call_single_data *csd)
{
WARN_ON(!(csd->flags & CSD_FLAG_LOCK));
/*
* ensure we're all done before releasing data:
*/
smp_store_release(&csd->flags, 0);
}
static DEFINE_PER_CPU_SHARED_ALIGNED(call_single_data_t, csd_data);
void __smp_call_single_queue(int cpu, struct llist_node *node)
{
/*
* The list addition should be visible before sending the IPI
* handler locks the list to pull the entry off it because of
* normal cache coherency rules implied by spinlocks.
*
* If IPIs can go out of order to the cache coherency protocol
* in an architecture, sufficient synchronisation should be added
* to arch code to make it appear to obey cache coherency WRT
* locking and barrier primitives. Generic code isn't really
* equipped to do the right thing...
*/
if (llist_add(node, &per_cpu(call_single_queue, cpu)))
send_call_function_single_ipi(cpu);
}
/*
* Insert a previously allocated call_single_data_t element
* for execution on the given CPU. data must already have
* ->func, ->info, and ->flags set.
*/
static int generic_exec_single(int cpu, struct __call_single_data *csd)
{
if (cpu == smp_processor_id()) {
smp_call_func_t func = csd->func;
void *info = csd->info;
unsigned long flags;
/*
* We can unlock early even for the synchronous on-stack case,
* since we're doing this from the same CPU..
*/
csd_lock_record(csd);
csd_unlock(csd);
local_irq_save(flags);
func(info);
csd_lock_record(NULL);
local_irq_restore(flags);
return 0;
}
if ((unsigned)cpu >= nr_cpu_ids || !cpu_online(cpu)) {
csd_unlock(csd);
return -ENXIO;
}
__smp_call_single_queue(cpu, &csd->llist);
return 0;
}
/**
* generic_smp_call_function_single_interrupt - Execute SMP IPI callbacks
*
* Invoked by arch to handle an IPI for call function single.
* Must be called with interrupts disabled.
*/
void generic_smp_call_function_single_interrupt(void)
{
flush_smp_call_function_queue(true);
}
/**
* flush_smp_call_function_queue - Flush pending smp-call-function callbacks
*
* @warn_cpu_offline: If set to 'true', warn if callbacks were queued on an
* offline CPU. Skip this check if set to 'false'.
*
* Flush any pending smp-call-function callbacks queued on this CPU. This is
* invoked by the generic IPI handler, as well as by a CPU about to go offline,
* to ensure that all pending IPI callbacks are run before it goes completely
* offline.
*
* Loop through the call_single_queue and run all the queued callbacks.
* Must be called with interrupts disabled.
*/
static void flush_smp_call_function_queue(bool warn_cpu_offline)
{
call_single_data_t *csd, *csd_next;
struct llist_node *entry, *prev;
struct llist_head *head;
static bool warned;
lockdep_assert_irqs_disabled();
head = this_cpu_ptr(&call_single_queue);
entry = llist_del_all(head);
entry = llist_reverse_order(entry);
/* There shouldn't be any pending callbacks on an offline CPU. */
if (unlikely(warn_cpu_offline && !cpu_online(smp_processor_id()) &&
!warned && entry != NULL)) {
warned = true;
WARN(1, "IPI on offline CPU %d\n", smp_processor_id());
/*
* We don't have to use the _safe() variant here
* because we are not invoking the IPI handlers yet.
*/
llist_for_each_entry(csd, entry, llist) {
switch (CSD_TYPE(csd)) {
case CSD_TYPE_ASYNC:
case CSD_TYPE_SYNC:
case CSD_TYPE_IRQ_WORK:
pr_warn("IPI callback %pS sent to offline CPU\n",
csd->func);
break;
case CSD_TYPE_TTWU:
pr_warn("IPI task-wakeup sent to offline CPU\n");
break;
default:
pr_warn("IPI callback, unknown type %d, sent to offline CPU\n",
CSD_TYPE(csd));
break;
}
}
}
/*
* First; run all SYNC callbacks, people are waiting for us.
*/
prev = NULL;
llist_for_each_entry_safe(csd, csd_next, entry, llist) {
/* Do we wait until *after* callback? */
if (CSD_TYPE(csd) == CSD_TYPE_SYNC) {
smp_call_func_t func = csd->func;
void *info = csd->info;
if (prev) {
prev->next = &csd_next->llist;
} else {
entry = &csd_next->llist;
}
csd_lock_record(csd);
func(info);
csd_unlock(csd);
csd_lock_record(NULL);
} else {
prev = &csd->llist;
}
}
if (!entry)
return;
/*
* Second; run all !SYNC callbacks.
*/
prev = NULL;
llist_for_each_entry_safe(csd, csd_next, entry, llist) {
int type = CSD_TYPE(csd);
if (type != CSD_TYPE_TTWU) {
if (prev) {
prev->next = &csd_next->llist;
} else {
entry = &csd_next->llist;
}
if (type == CSD_TYPE_ASYNC) {
smp_call_func_t func = csd->func;
void *info = csd->info;
csd_lock_record(csd);
csd_unlock(csd);
func(info);
csd_lock_record(NULL);
} else if (type == CSD_TYPE_IRQ_WORK) {
irq_work_single(csd);
}
} else {
prev = &csd->llist;
}
}
/*
* Third; only CSD_TYPE_TTWU is left, issue those.
*/
if (entry)
sched_ttwu_pending(entry);
}
void flush_smp_call_function_from_idle(void)
{
unsigned long flags;
if (llist_empty(this_cpu_ptr(&call_single_queue)))
return;
local_irq_save(flags);
flush_smp_call_function_queue(true);
if (local_softirq_pending())
do_softirq();
local_irq_restore(flags);
}
/*
* smp_call_function_single - Run a function on a specific CPU
* @func: The function to run. This must be fast and non-blocking.
* @info: An arbitrary pointer to pass to the function.
* @wait: If true, wait until function has completed on other CPUs.
*
* Returns 0 on success, else a negative status code.
*/
int smp_call_function_single(int cpu, smp_call_func_t func, void *info,
int wait)
{
call_single_data_t *csd;
call_single_data_t csd_stack = {
.flags = CSD_FLAG_LOCK | CSD_TYPE_SYNC,
};
int this_cpu;
int err;
/*
* prevent preemption and reschedule on another processor,
* as well as CPU removal
*/
this_cpu = get_cpu();
/*
* Can deadlock when called with interrupts disabled.
* We allow cpu's that are not yet online though, as no one else can
* send smp call function interrupt to this cpu and as such deadlocks
* can't happen.
*/
WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled()
&& !oops_in_progress);
/*
* When @wait we can deadlock when we interrupt between llist_add() and
* arch_send_call_function_ipi*(); when !@wait we can deadlock due to
* csd_lock() on because the interrupt context uses the same csd
* storage.
*/
WARN_ON_ONCE(!in_task());
csd = &csd_stack;
if (!wait) {
csd = this_cpu_ptr(&csd_data);
csd_lock(csd);
}
csd->func = func;
csd->info = info;
#ifdef CONFIG_CSD_LOCK_WAIT_DEBUG
csd->src = smp_processor_id();
csd->dst = cpu;
#endif
err = generic_exec_single(cpu, csd);
if (wait)
csd_lock_wait(csd);
put_cpu();
return err;
}
EXPORT_SYMBOL(smp_call_function_single);
/**
* smp_call_function_single_async(): Run an asynchronous function on a
* specific CPU.
* @cpu: The CPU to run on.
* @csd: Pre-allocated and setup data structure
*
* Like smp_call_function_single(), but the call is asynchonous and
* can thus be done from contexts with disabled interrupts.
*
* The caller passes his own pre-allocated data structure
* (ie: embedded in an object) and is responsible for synchronizing it
* such that the IPIs performed on the @csd are strictly serialized.
*
* If the function is called with one csd which has not yet been
* processed by previous call to smp_call_function_single_async(), the
* function will return immediately with -EBUSY showing that the csd
* object is still in progress.
*
* NOTE: Be careful, there is unfortunately no current debugging facility to
* validate the correctness of this serialization.
*/
int smp_call_function_single_async(int cpu, struct __call_single_data *csd)
{
int err = 0;
preempt_disable();
if (csd->flags & CSD_FLAG_LOCK) {
err = -EBUSY;
goto out;
}
csd->flags = CSD_FLAG_LOCK;
smp_wmb();
err = generic_exec_single(cpu, csd);
out:
preempt_enable();
return err;
}
EXPORT_SYMBOL_GPL(smp_call_function_single_async);
/*
* smp_call_function_any - Run a function on any of the given cpus
* @mask: The mask of cpus it can run on.
* @func: The function to run. This must be fast and non-blocking.
* @info: An arbitrary pointer to pass to the function.
* @wait: If true, wait until function has completed.
*
* Returns 0 on success, else a negative status code (if no cpus were online).
*
* Selection preference:
* 1) current cpu if in @mask
* 2) any cpu of current node if in @mask
* 3) any other online cpu in @mask
*/
int smp_call_function_any(const struct cpumask *mask,
smp_call_func_t func, void *info, int wait)
{
unsigned int cpu;
const struct cpumask *nodemask;
int ret;
/* Try for same CPU (cheapest) */
cpu = get_cpu();
if (cpumask_test_cpu(cpu, mask))
goto call;
/* Try for same node. */
nodemask = cpumask_of_node(cpu_to_node(cpu));
for (cpu = cpumask_first_and(nodemask, mask); cpu < nr_cpu_ids;
cpu = cpumask_next_and(cpu, nodemask, mask)) {
if (cpu_online(cpu))
goto call;
}
/* Any online will do: smp_call_function_single handles nr_cpu_ids. */
cpu = cpumask_any_and(mask, cpu_online_mask);
call:
ret = smp_call_function_single(cpu, func, info, wait);
put_cpu();
return ret;
}
EXPORT_SYMBOL_GPL(smp_call_function_any);
static void smp_call_function_many_cond(const struct cpumask *mask,
smp_call_func_t func, void *info,
bool wait, smp_cond_func_t cond_func)
{
struct call_function_data *cfd;
int cpu, next_cpu, this_cpu = smp_processor_id();
/*
* Can deadlock when called with interrupts disabled.
* We allow cpu's that are not yet online though, as no one else can
* send smp call function interrupt to this cpu and as such deadlocks
* can't happen.
*/
WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled()
&& !oops_in_progress && !early_boot_irqs_disabled);
/*
* When @wait we can deadlock when we interrupt between llist_add() and
* arch_send_call_function_ipi*(); when !@wait we can deadlock due to
* csd_lock() on because the interrupt context uses the same csd
* storage.
*/
WARN_ON_ONCE(!in_task());
/* Try to fastpath. So, what's a CPU they want? Ignoring this one. */
cpu = cpumask_first_and(mask, cpu_online_mask);
if (cpu == this_cpu)
cpu = cpumask_next_and(cpu, mask, cpu_online_mask);
/* No online cpus? We're done. */
if (cpu >= nr_cpu_ids)
return;
/* Do we have another CPU which isn't us? */
next_cpu = cpumask_next_and(cpu, mask, cpu_online_mask);
if (next_cpu == this_cpu)
next_cpu = cpumask_next_and(next_cpu, mask, cpu_online_mask);
/* Fastpath: do that cpu by itself. */
if (next_cpu >= nr_cpu_ids) {
if (!cond_func || cond_func(cpu, info))
smp_call_function_single(cpu, func, info, wait);
return;
}
cfd = this_cpu_ptr(&cfd_data);
cpumask_and(cfd->cpumask, mask, cpu_online_mask);
__cpumask_clear_cpu(this_cpu, cfd->cpumask);
/* Some callers race with other cpus changing the passed mask */
if (unlikely(!cpumask_weight(cfd->cpumask)))
return;
cpumask_clear(cfd->cpumask_ipi);
for_each_cpu(cpu, cfd->cpumask) {
call_single_data_t *csd = per_cpu_ptr(cfd->csd, cpu);
if (cond_func && !cond_func(cpu, info))
continue;
csd_lock(csd);
if (wait)
csd->flags |= CSD_TYPE_SYNC;
csd->func = func;
csd->info = info;
#ifdef CONFIG_CSD_LOCK_WAIT_DEBUG
csd->src = smp_processor_id();
csd->dst = cpu;
#endif
if (llist_add(&csd->llist, &per_cpu(call_single_queue, cpu)))
__cpumask_set_cpu(cpu, cfd->cpumask_ipi);
}
/* Send a message to all CPUs in the map */
arch_send_call_function_ipi_mask(cfd->cpumask_ipi);
if (wait) {
for_each_cpu(cpu, cfd->cpumask) {
call_single_data_t *csd;
csd = per_cpu_ptr(cfd->csd, cpu);
csd_lock_wait(csd);
}
}
}
/**
* smp_call_function_many(): Run a function on a set of other CPUs.
* @mask: The set of cpus to run on (only runs on online subset).
* @func: The function to run. This must be fast and non-blocking.
* @info: An arbitrary pointer to pass to the function.
* @wait: If true, wait (atomically) until function has completed
* on other CPUs.
*
* If @wait is true, then returns once @func has returned.
*
* You must not call this function with disabled interrupts or from a
* hardware interrupt handler or from a bottom half handler. Preemption
* must be disabled when calling this function.
*/
void smp_call_function_many(const struct cpumask *mask,
smp_call_func_t func, void *info, bool wait)
{
smp_call_function_many_cond(mask, func, info, wait, NULL);
}
EXPORT_SYMBOL(smp_call_function_many);
/**
* smp_call_function(): Run a function on all other CPUs.
* @func: The function to run. This must be fast and non-blocking.
* @info: An arbitrary pointer to pass to the function.
* @wait: If true, wait (atomically) until function has completed
* on other CPUs.
*
* Returns 0.
*
* If @wait is true, then returns once @func has returned; otherwise
* it returns just before the target cpu calls @func.
*
* You must not call this function with disabled interrupts or from a
* hardware interrupt handler or from a bottom half handler.
*/
void smp_call_function(smp_call_func_t func, void *info, int wait)
{
preempt_disable();
smp_call_function_many(cpu_online_mask, func, info, wait);
preempt_enable();
}
EXPORT_SYMBOL(smp_call_function);
/* Setup configured maximum number of CPUs to activate */
unsigned int setup_max_cpus = NR_CPUS;
EXPORT_SYMBOL(setup_max_cpus);
/*
* Setup routine for controlling SMP activation
*
* Command-line option of "nosmp" or "maxcpus=0" will disable SMP
* activation entirely (the MPS table probe still happens, though).
*
* Command-line option of "maxcpus=<NUM>", where <NUM> is an integer
* greater than 0, limits the maximum number of CPUs activated in
* SMP mode to <NUM>.
*/
void __weak arch_disable_smp_support(void) { }
static int __init nosmp(char *str)
{
setup_max_cpus = 0;
arch_disable_smp_support();
return 0;
}
early_param("nosmp", nosmp);
/* this is hard limit */
static int __init nrcpus(char *str)
{
int nr_cpus;
if (get_option(&str, &nr_cpus) && nr_cpus > 0 && nr_cpus < nr_cpu_ids)
nr_cpu_ids = nr_cpus;
return 0;
}
early_param("nr_cpus", nrcpus);
static int __init maxcpus(char *str)
{
get_option(&str, &setup_max_cpus);
if (setup_max_cpus == 0)
arch_disable_smp_support();
return 0;
}
early_param("maxcpus", maxcpus);
/* Setup number of possible processor ids */
unsigned int nr_cpu_ids __read_mostly = NR_CPUS;
EXPORT_SYMBOL(nr_cpu_ids);
/* An arch may set nr_cpu_ids earlier if needed, so this would be redundant */
void __init setup_nr_cpu_ids(void)
{
nr_cpu_ids = find_last_bit(cpumask_bits(cpu_possible_mask),NR_CPUS) + 1;
}
/* Called by boot processor to activate the rest. */
void __init smp_init(void)
{
int num_nodes, num_cpus;
idle_threads_init();
cpuhp_threads_init();
pr_info("Bringing up secondary CPUs ...\n");
bringup_nonboot_cpus(setup_max_cpus);
num_nodes = num_online_nodes();
num_cpus = num_online_cpus();
pr_info("Brought up %d node%s, %d CPU%s\n",
num_nodes, (num_nodes > 1 ? "s" : ""),
num_cpus, (num_cpus > 1 ? "s" : ""));
/* Any cleanup work */
smp_cpus_done(setup_max_cpus);
}
/*
* Call a function on all processors. May be used during early boot while
* early_boot_irqs_disabled is set. Use local_irq_save/restore() instead
* of local_irq_disable/enable().
*/
void on_each_cpu(smp_call_func_t func, void *info, int wait)
{
unsigned long flags;
preempt_disable();
smp_call_function(func, info, wait);
local_irq_save(flags);
func(info);
local_irq_restore(flags);
preempt_enable();
}
EXPORT_SYMBOL(on_each_cpu);
/**
* on_each_cpu_mask(): Run a function on processors specified by
* cpumask, which may include the local processor.
* @mask: The set of cpus to run on (only runs on online subset).
* @func: The function to run. This must be fast and non-blocking.
* @info: An arbitrary pointer to pass to the function.
* @wait: If true, wait (atomically) until function has completed
* on other CPUs.
*
* If @wait is true, then returns once @func has returned.
*
* You must not call this function with disabled interrupts or from a
* hardware interrupt handler or from a bottom half handler. The
* exception is that it may be used during early boot while
* early_boot_irqs_disabled is set.
*/
void on_each_cpu_mask(const struct cpumask *mask, smp_call_func_t func,
void *info, bool wait)
{
int cpu = get_cpu();
smp_call_function_many(mask, func, info, wait);
if (cpumask_test_cpu(cpu, mask)) {
unsigned long flags;
local_irq_save(flags);
func(info);
local_irq_restore(flags);
}
put_cpu();
}
EXPORT_SYMBOL(on_each_cpu_mask);
/*
* on_each_cpu_cond(): Call a function on each processor for which
* the supplied function cond_func returns true, optionally waiting
* for all the required CPUs to finish. This may include the local
* processor.
* @cond_func: A callback function that is passed a cpu id and
* the info parameter. The function is called
* with preemption disabled. The function should
* return a blooean value indicating whether to IPI
* the specified CPU.
* @func: The function to run on all applicable CPUs.
* This must be fast and non-blocking.
* @info: An arbitrary pointer to pass to both functions.
* @wait: If true, wait (atomically) until function has
* completed on other CPUs.
*
* Preemption is disabled to protect against CPUs going offline but not online.
* CPUs going online during the call will not be seen or sent an IPI.
*
* You must not call this function with disabled interrupts or
* from a hardware interrupt handler or from a bottom half handler.
*/
void on_each_cpu_cond_mask(smp_cond_func_t cond_func, smp_call_func_t func,
void *info, bool wait, const struct cpumask *mask)
{
int cpu = get_cpu();
smp_call_function_many_cond(mask, func, info, wait, cond_func);
if (cpumask_test_cpu(cpu, mask) && cond_func(cpu, info)) {
unsigned long flags;
local_irq_save(flags);
func(info);
local_irq_restore(flags);
}
put_cpu();
}
EXPORT_SYMBOL(on_each_cpu_cond_mask);
void on_each_cpu_cond(smp_cond_func_t cond_func, smp_call_func_t func,
void *info, bool wait)
{
on_each_cpu_cond_mask(cond_func, func, info, wait, cpu_online_mask);
}
EXPORT_SYMBOL(on_each_cpu_cond);
static void do_nothing(void *unused)
{
}
/**
* kick_all_cpus_sync - Force all cpus out of idle
*
* Used to synchronize the update of pm_idle function pointer. It's
* called after the pointer is updated and returns after the dummy
* callback function has been executed on all cpus. The execution of
* the function can only happen on the remote cpus after they have
* left the idle function which had been called via pm_idle function
* pointer. So it's guaranteed that nothing uses the previous pointer
* anymore.
*/
void kick_all_cpus_sync(void)
{
/* Make sure the change is visible before we kick the cpus */
smp_mb();
smp_call_function(do_nothing, NULL, 1);
}
EXPORT_SYMBOL_GPL(kick_all_cpus_sync);
/**
* wake_up_all_idle_cpus - break all cpus out of idle
* wake_up_all_idle_cpus try to break all cpus which is in idle state even
* including idle polling cpus, for non-idle cpus, we will do nothing
* for them.
*/
void wake_up_all_idle_cpus(void)
{
int cpu;
preempt_disable();
for_each_online_cpu(cpu) {
if (cpu == smp_processor_id())
continue;
#if IS_ENABLED(CONFIG_SUSPEND)
if (s2idle_state == S2IDLE_STATE_ENTER || cpu_active(cpu))
#endif
wake_up_if_idle(cpu);
}
preempt_enable();
}
EXPORT_SYMBOL_GPL(wake_up_all_idle_cpus);
/**
* wake_up_all_online_idle_cpus - break all online cpus out of idle
* wake_up_all_online_idle_cpus try to break all online cpus which is in idle
* state even including idle polling cpus, for non-idle cpus, we will do nothing
* for them.
*/
void wake_up_all_online_idle_cpus(void)
{
int cpu;
preempt_disable();
for_each_online_cpu(cpu) {
if (cpu == smp_processor_id())
continue;
wake_up_if_idle(cpu);
}
preempt_enable();
}
EXPORT_SYMBOL_GPL(wake_up_all_online_idle_cpus);
/**
* smp_call_on_cpu - Call a function on a specific cpu
*
* Used to call a function on a specific cpu and wait for it to return.
* Optionally make sure the call is done on a specified physical cpu via vcpu
* pinning in order to support virtualized environments.
*/
struct smp_call_on_cpu_struct {
struct work_struct work;
struct completion done;
int (*func)(void *);
void *data;
int ret;
int cpu;
};
static void smp_call_on_cpu_callback(struct work_struct *work)
{
struct smp_call_on_cpu_struct *sscs;
sscs = container_of(work, struct smp_call_on_cpu_struct, work);
if (sscs->cpu >= 0)
hypervisor_pin_vcpu(sscs->cpu);
sscs->ret = sscs->func(sscs->data);
if (sscs->cpu >= 0)
hypervisor_pin_vcpu(-1);
complete(&sscs->done);
}
int smp_call_on_cpu(unsigned int cpu, int (*func)(void *), void *par, bool phys)
{
struct smp_call_on_cpu_struct sscs = {
.done = COMPLETION_INITIALIZER_ONSTACK(sscs.done),
.func = func,
.data = par,
.cpu = phys ? cpu : -1,
};
INIT_WORK_ONSTACK(&sscs.work, smp_call_on_cpu_callback);
if (cpu >= nr_cpu_ids || !cpu_online(cpu))
return -ENXIO;
queue_work_on(cpu, system_wq, &sscs.work);
wait_for_completion(&sscs.done);
destroy_work_on_stack(&sscs.work);
return sscs.ret;
}
EXPORT_SYMBOL_GPL(smp_call_on_cpu);